From 36e61230987d6629deca67b54b87b554ded69da1 Mon Sep 17 00:00:00 2001 From: Max Williams <8859277+max-rocket-internet@users.noreply.github.com> Date: Thu, 14 Nov 2024 16:59:38 +0100 Subject: [PATCH] Adding old index.yaml and artifacthub metadata file to repo (#629) Co-authored-by: Max Williams --- artifacthub-repo.yml | 6 + ci/artifacthub-io-sync-metadata.sh | 40 + index.yaml | 10479 +++++++++++++++++++++++++++ 3 files changed, 10525 insertions(+) create mode 100644 artifacthub-repo.yml create mode 100755 ci/artifacthub-io-sync-metadata.sh create mode 100644 index.yaml diff --git a/artifacthub-repo.yml b/artifacthub-repo.yml new file mode 100644 index 00000000..14b2a5d0 --- /dev/null +++ b/artifacthub-repo.yml @@ -0,0 +1,6 @@ +repositoryID: 8ea3e1b3-7f98-4ee9-95d7-58bcca61b846 +owners: + - name: max.williams + email: max.williams@deliveryhero.com + - name: gonzalo.lopez + email: gonzalo.lopez@deliveryhero.com \ No newline at end of file diff --git a/ci/artifacthub-io-sync-metadata.sh b/ci/artifacthub-io-sync-metadata.sh new file mode 100755 index 00000000..521247ff --- /dev/null +++ b/ci/artifacthub-io-sync-metadata.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +# Run this script to push the artifacthub-repo.yml file to every helm chart OCI package +# This should only need to be run once for each packge + +repo_root=$(git rev-parse --show-toplevel 2> /dev/null) + +if [[ $? -ne 0 ]]; then + echo "Error: Not in a Git repository." >&2 + exit 1 +fi + +if [[ ! -d "$repo_root" ]]; then + echo "Error: Could not determine repository root." >&2 + exit 1 +fi + +if [[ "$(pwd)" != "$repo_root" ]]; then + echo "Error: Not at the root of the deliveryhero/helm-charts repository. Current directory is $(pwd), but repo root is $repo_root" >&2 + exit 1 +fi + +if [[ ! -f "artifacthub-repo.yml" ]]; then + echo "Error: artifacthub-repo.yml does not exist." >&2 + exit 1 +fi + +for chart in stable/*/; do + chart_name=${chart%/} # Remove the trailing / + chart_name=${chart_name##*/} # Remove everything up to the last / + + echo "Processing chart: $chart_name" + + oras push \ + ghcr.io/deliveryhero/helm-charts/${chart_name}:artifacthub.io \ + --config /dev/null:application/vnd.cncf.artifacthub.config.v1+yaml \ + artifacthub-repo.yml:application/vnd.cncf.artifacthub.repository-metadata.layer.v1.yaml +done + +exit 0 diff --git a/index.yaml b/index.yaml new file mode 100644 index 00000000..22929456 --- /dev/null +++ b/index.yaml @@ -0,0 +1,10479 @@ +apiVersion: v1 +entries: + aws-ebs-csi-driver: + - apiVersion: v2 + appVersion: 1.16.1 + created: "2024-10-04T09:09:20Z" + description: A Helm chart for AWS EBS CSI Driver + digest: 17f959dcc5d08e3d250ecc3f45a941537b195c4fc46ec508ac8dc38f80e86e32 + home: https://github.com/kubernetes-sigs/aws-ebs-csi-driver + keywords: + - aws + - ebs + - csi + kubeVersion: '>=1.17.0-0' + maintainers: + - name: Kubernetes Authors + url: https://github.com/kubernetes-sigs/aws-ebs-csi-driver/ + name: aws-ebs-csi-driver + sources: + - https://github.com/kubernetes-sigs/aws-ebs-csi-driver + urls: + - charts/aws-ebs-csi-driver-2.17.1.tgz + version: 2.17.1 + - annotations: + artifacthub.io/changes: | + - kind: added + description: Custom controller.updateStrategy to set controller deployment strategy. + apiVersion: v2 + appVersion: 1.13.0 + created: "2023-03-13T09:31:30Z" + description: A Helm chart for AWS EBS CSI Driver + digest: 67335febc4b2f79f0f598274e4cae5f10c7afb137812fb4a2d52f4ee1352d140 + home: https://github.com/kubernetes-sigs/aws-ebs-csi-driver + keywords: + - aws + - ebs + - csi + kubeVersion: '>=1.17.0-0' + maintainers: + - email: chengpan@amazon.com + name: leakingtapan + - name: krmichel + url: https://github.com/krmichel + name: aws-ebs-csi-driver + sources: + - https://github.com/kubernetes-sigs/aws-ebs-csi-driver + urls: + - charts/aws-ebs-csi-driver-2.13.0.tgz + version: 2.13.0 + aws-s3-proxy: + - apiVersion: v1 + appVersion: "2.0" + created: "2024-10-04T09:09:20Z" + description: 'Reverse proxy for AWS S3 with basic authentication. See here for + configuration via environment variables: https://github.com/pottava/aws-s3-proxy#usage ' + digest: d91faeda870ddd57b9fa95bf54879634d02e2b1bc95601e219b92836b50547fa + home: https://github.com/pottava/aws-s3-proxy + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: aws-s3-proxy + urls: + - charts/aws-s3-proxy-0.1.5.tgz + version: 0.1.5 + - apiVersion: v1 + appVersion: "2.0" + created: "2024-06-26T11:55:22Z" + description: 'Reverse proxy for AWS S3 with basic authentication. See here for + configuration via environment variables: https://github.com/pottava/aws-s3-proxy#usage ' + digest: 15b612a69c89da15a7a38141d696dab0c487fc9478b26c5abe712e21f56a13fc + home: https://github.com/pottava/aws-s3-proxy + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: aws-s3-proxy + urls: + - charts/aws-s3-proxy-0.1.4.tgz + version: 0.1.4 + - apiVersion: v1 + appVersion: "2.0" + created: "2023-06-12T17:37:57Z" + description: 'Reverse proxy for AWS S3 with basic authentication. See here for + configuration via environment variables: https://github.com/pottava/aws-s3-proxy#usage ' + digest: 7eb75563f31d09e0cc18b36e242fcbeabbfbd356acc02d0edd0b44c41351958e + home: https://github.com/pottava/aws-s3-proxy + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: aws-s3-proxy + urls: + - charts/aws-s3-proxy-0.1.3.tgz + version: 0.1.3 + - apiVersion: v1 + appVersion: "2.0" + created: "2021-08-19T13:59:42Z" + description: | + Reverse proxy for AWS S3 with basic authentication. + + See here for configuration via environment variables: https://github.com/pottava/aws-s3-proxy#usage + digest: d3e2a084696e19d5105a961ea0274c166e0c6aa1bbb92eb971d931498af0937c + home: https://github.com/pottava/aws-s3-proxy + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: aws-s3-proxy + urls: + - charts/aws-s3-proxy-0.1.2.tgz + version: 0.1.2 + - apiVersion: v1 + appVersion: "2.0" + created: "2020-11-13T08:31:55Z" + description: | + Reverse proxy for AWS S3 with basic authentication. + + See here for configuration via environment variables: https://github.com/pottava/aws-s3-proxy#usage + digest: 24e59106da5a1548e7d05526ebcf3e27df54b5a8474856b11a2c8a35bf9c1bcc + home: https://github.com/pottava/aws-s3-proxy + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: aws-s3-proxy + urls: + - charts/aws-s3-proxy-0.1.1.tgz + version: 0.1.1 + aws-service-events-exporter: + - apiVersion: v2 + appVersion: 1.0.0 + created: "2024-10-04T09:09:21Z" + description: 'This helm chart exports aws service events to prometheus via aws + SQS queue, this include: - [RDS events](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html) + - [Elasticache events](https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/ECEvents.Viewing.html) ' + digest: f6b604b8e5c8cd65dc191b52f4f340a9bcc9a2d25838202d1f64624f3eb32800 + home: https://github.com/deliveryhero/aws-service-events-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: nyambati + - email: no-reply@deliveryhero.com + name: javad-hajiani + name: aws-service-events-exporter + sources: + - https://github.com/deliveryhero/aws-service-events-exporter + type: application + urls: + - charts/aws-service-events-exporter-1.0.5.tgz + version: 1.0.5 + - apiVersion: v2 + appVersion: 1.0.0 + created: "2021-09-02T12:05:36Z" + description: | + This helm chart exports aws service events to prometheus via aws SQS queue, this include: + + - [RDS events](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html) + - [Elasticache events](https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/ECEvents.Viewing.html) + digest: ea0df8f57d2fd48a7e56f69b5ccc526cd6b677d479a99cc177dd40a0c69bbdc1 + home: https://github.com/deliveryhero/aws-service-events-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: nyambati + name: aws-service-events-exporter + sources: + - https://github.com/deliveryhero/aws-service-events-exporter + type: application + urls: + - charts/aws-service-events-exporter-1.0.4.tgz + version: 1.0.4 + - apiVersion: v2 + appVersion: 1.0.0 + created: "2021-08-12T13:34:50Z" + description: | + This helm chart exports aws service events to prometheus via aws SQS queue, this include: + + - [RDS events](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html) + - [Elasticache events](https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/ECEvents.Viewing.html) + digest: 0207c25c4b616b62391f9ded7d0031263ee9c0a048afda7b2d367ceb4edb4ce0 + home: https://github.com/deliveryhero/aws-service-events-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: nyambati + name: aws-service-events-exporter + sources: + - https://github.com/deliveryhero/aws-service-events-exporter + type: application + urls: + - charts/aws-service-events-exporter-1.0.3.tgz + version: 1.0.3 + - apiVersion: v2 + appVersion: 1.0.0 + created: "2021-08-06T19:44:39Z" + description: | + This helm chart exports aws service events to prometheus via aws SQS queue, this include: + + - [RDS events](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html) + - [Elasticache events](https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/ECEvents.Viewing.html) + digest: eebfe161d93447b2e8530e006be169fe552ad707acdf57e8e474cff081989a5f + home: https://github.com/deliveryhero/aws-service-events-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: nyambati + name: aws-service-events-exporter + sources: + - https://github.com/deliveryhero/aws-service-events-exporter + type: application + urls: + - charts/aws-service-events-exporter-1.0.2.tgz + version: 1.0.2 + - apiVersion: v2 + appVersion: 1.0.0 + created: "2021-05-19T08:11:12Z" + description: | + This helm chart exports aws service events to prometheus via aws SQS queue, this include: + + - [RDS events](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html) + - [Elasticache events](https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/ECEvents.Viewing.html) + digest: 480d0d6a645a2551d4b04ad85e006e4a5fe91e9de88d3ff6baa1ad8fb1e3509f + home: https://github.com/deliveryhero/aws-service-events-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: nyambati + name: aws-service-events-exporter + sources: + - https://github.com/deliveryhero/aws-service-events-exporter + type: application + urls: + - charts/aws-service-events-exporter-1.0.1.tgz + version: 1.0.1 + - apiVersion: v2 + appVersion: 1.0.0 + created: "2021-04-30T09:30:15Z" + description: | + This helm chart exports aws service events to prometheus via aws SQS queue, this include: + + - [RDS events](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html) + - [Elasticache events](https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/ECEvents.Viewing.html) + digest: 0ee87baf6602ae1c64df8a926270233029bfd34d055fc5449adb1d28c1dd36a3 + home: https://github.com/deliveryhero/aws-service-events-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: nyambati + name: aws-service-events-exporter + sources: + - https://github.com/deliveryhero/aws-service-events-exporter + type: application + urls: + - charts/aws-service-events-exporter-1.0.0.tgz + version: 1.0.0 + aws-service-quotas-exporter: + - apiVersion: v2 + appVersion: v1.3.2 + created: "2024-10-04T09:09:22Z" + description: This exporter exports AWS service quotas and usage as Prometheus + metrics + digest: 855132c7ece746443d08c7adb51a6d1134f571e568e24ac8fcf3f7ceae52ec7b + home: https://github.com/thought-machine/aws-service-quotas-exporter + keywords: + - aws + - service + - quotas + - exporter + maintainers: + - email: no-reply@deliveryhero.com + name: nyambati + name: aws-service-quotas-exporter + sources: + - https://github.com/thought-machine/aws-service-quotas-exporter + type: application + urls: + - charts/aws-service-quotas-exporter-0.1.2.tgz + version: 0.1.2 + - apiVersion: v2 + appVersion: v1.3.2 + created: "2023-02-28T16:02:46Z" + description: This exporter exports AWS service quotas and usage as Prometheus + metrics + digest: 2b2f49f1e5fd55f5f0f759c14dfd91e8d1c5b6ef9acb277d53a117a0d240ebbe + home: https://github.com/thought-machine/aws-service-quotas-exporter + keywords: + - aws + - service + - quotas + - exporter + maintainers: + - email: no-reply@deliveryhero.com + name: nyambati + name: aws-service-quotas-exporter + sources: + - https://github.com/thought-machine/aws-service-quotas-exporter + type: application + urls: + - charts/aws-service-quotas-exporter-0.1.1.tgz + version: 0.1.1 + - apiVersion: v2 + appVersion: v1.3.2 + created: "2023-02-10T14:50:25Z" + description: This exporter exports AWS service quotas and usage as Prometheus + metrics + digest: 9dd59fd47b0d9ad822b32679b90781284473715ab9c24bf54404663786fd8c9a + home: https://github.com/thought-machine/aws-service-quotas-exporter + keywords: + - aws + - service + - quotas + - exporter + maintainers: + - email: no-reply@deliveryhero.com + name: nyambati + name: aws-service-quotas-exporter + sources: + - https://github.com/thought-machine/aws-service-quotas-exporter + type: application + urls: + - charts/aws-service-quotas-exporter-0.1.0.tgz + version: 0.1.0 + aws-storage-class: + - apiVersion: v1 + created: "2024-10-04T09:09:22.013103906Z" + description: 'Creates a StorageClass. From here: https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/storage-class/aws/default.yaml' + digest: a5171263e4f3379096a2a74608074d4c31140c76e43a4ce489aa72703c4252cb + home: https://github.com/deliveryhero/helm-charts + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: aws-storage-class + urls: + - charts/aws-storage-class-0.1.7.tgz + version: 0.1.7 + - apiVersion: v1 + created: "2023-06-01T08:35:47Z" + description: 'Creates a StorageClass. From here: https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/storage-class/aws/default.yaml' + digest: 51e424b48be5e8e6bc5488b85c76fd54b3aa8927b767e0c20cb9b3a211411b34 + home: https://github.com/deliveryhero/helm-charts + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: aws-storage-class + urls: + - charts/aws-storage-class-0.1.6.tgz + version: 0.1.6 + - apiVersion: v1 + created: "2022-12-20T10:47:27Z" + description: 'Creates a StorageClass. From here: https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/storage-class/aws/default.yaml' + digest: 6c0f2033e009aa92b04a74bdbb1723ca85de12456ed1be9cccb6defc265641a8 + home: https://github.com/deliveryhero/helm-charts + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: aws-storage-class + urls: + - charts/aws-storage-class-0.1.5.tgz + version: 0.1.5 + - apiVersion: v1 + created: "2022-12-08T15:02:26.007297368Z" + description: 'Creates a StorageClass. From here: https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/storage-class/aws/default.yaml' + digest: c4109a153a23e9e8fa419a21048c28dffb172fab2fedb20fc7108d7bb271ce73 + home: https://github.com/deliveryhero/helm-charts + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: aws-storage-class + urls: + - charts/aws-storage-class-0.1.4.tgz + version: 0.1.4 + - apiVersion: v1 + created: "2022-12-01T12:26:50Z" + description: 'Creates a StorageClass. From here: https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/storage-class/aws/default.yaml' + digest: 921ff7a8c4c7ad122e9a47f78b62811ddbc7ab1d63e333582091fdc359e3e537 + home: https://github.com/deliveryhero/helm-charts + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: aws-storage-class + urls: + - charts/aws-storage-class-0.1.3.tgz + version: 0.1.3 + - apiVersion: v1 + created: "2022-11-29T12:01:14Z" + description: 'Creates a StorageClass. From here: https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/storage-class/aws/default.yaml' + digest: 44af9d3caab671c2cfbccde63222120a1db85f0f92bbe861e27c5d67847930ed + home: https://github.com/deliveryhero/helm-charts + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: aws-storage-class + urls: + - charts/aws-storage-class-0.1.2.tgz + version: 0.1.2 + - apiVersion: v1 + created: "2021-09-16T08:58:16Z" + description: 'Creates a StorageClass. From here: https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/storage-class/aws/default.yaml' + digest: 8768b003cbafd92bb6d06b6059dc03840e19c6df9eb2f6e471ec92670a99f2a4 + home: https://github.com/deliveryhero/helm-charts + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: aws-storage-class + urls: + - charts/aws-storage-class-0.1.1.tgz + version: 0.1.1 + backstage: + - apiVersion: v2 + appVersion: v0.1.1-alpha.23 + created: "2024-10-04T09:09:23Z" + description: A Helm chart for Backstage + digest: 762d39ed584bf263b76ec28c65cc17d0542e04f5ebcf1fdf99169bb1676aca22 + home: https://github.com/backstage/backstage + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + - email: no-reply@deliveryhero.com + name: nyambati + name: backstage + sources: + - https://github.com/backstage/backstage + - https://github.com/spotify/lighthouse-audit-service + type: application + urls: + - charts/backstage-0.1.13.tgz + version: 0.1.13 + - apiVersion: v2 + appVersion: v0.1.1-alpha.23 + created: "2022-12-02T14:52:10Z" + description: A Helm chart for Backstage + digest: 7345e28b20945c9697af36a86c2892c68685660c6122209b9d433f3879eb4982 + home: https://github.com/backstage/backstage + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + - email: no-reply@deliveryhero.com + name: nyambati + name: backstage + sources: + - https://github.com/backstage/backstage + - https://github.com/spotify/lighthouse-audit-service + type: application + urls: + - charts/backstage-0.1.12.tgz + version: 0.1.12 + - apiVersion: v2 + appVersion: v0.1.1-alpha.23 + created: "2021-07-08T12:47:12Z" + description: A Helm chart for Backstage + digest: 3a1d47731a412884eeeb749bc667aa56af7a80ec9ab3fe225a3ff380c12e1b1a + home: https://github.com/backstage/backstage + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + - email: no-reply@deliveryhero.com + name: nyambati + name: backstage + sources: + - https://github.com/backstage/backstage + - https://github.com/spotify/lighthouse-audit-service + type: application + urls: + - charts/backstage-0.1.11.tgz + version: 0.1.11 + - apiVersion: v2 + appVersion: v0.1.1-alpha.23 + created: "2021-07-01T10:12:43Z" + description: A Helm chart for Backstage + digest: 75618fed212a2bf30a0c7ae5cbf1a0250ff2b59f6ad6a3fc1c0ddf320956c7ad + home: https://github.com/backstage/backstage + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + - email: no-reply@deliveryhero.com + name: nyambati + name: backstage + sources: + - https://github.com/backstage/backstage + - https://github.com/spotify/lighthouse-audit-service + type: application + urls: + - charts/backstage-0.1.10.tgz + version: 0.1.10 + - apiVersion: v2 + appVersion: v0.1.1-alpha.23 + created: "2021-06-30T19:42:09Z" + description: A Helm chart for Backstage + digest: bf27369d848f9834a469d041dcd36e4033491cd2de8f5d1ac6b1e19a4fd9721b + home: https://github.com/backstage/backstage + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + - email: no-reply@deliveryhero.com + name: nyambati + name: backstage + sources: + - https://github.com/backstage/backstage + - https://github.com/spotify/lighthouse-audit-service + type: application + urls: + - charts/backstage-0.1.9.tgz + version: 0.1.9 + - apiVersion: v2 + appVersion: v0.1.1-alpha.23 + created: "2021-06-30T19:12:08Z" + description: A Helm chart for Backstage + digest: 3cf036aecacc1a291731c48d65b4049458d15ccbd46f2431f02905e161aa4afc + home: https://github.com/backstage/backstage + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + - email: no-reply@deliveryhero.com + name: nyambati + name: backstage + sources: + - https://github.com/backstage/backstage + - https://github.com/spotify/lighthouse-audit-service + type: application + urls: + - charts/backstage-0.1.8.tgz + version: 0.1.8 + - apiVersion: v2 + appVersion: v0.1.1-alpha.23 + created: "2021-06-30T17:24:18Z" + description: A Helm chart for Backstage + digest: a44a9ef17ebc4075b43367a6ba2b5d37c02accc816d45d91b43aa14ee540777b + home: https://github.com/backstage/backstage + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + - email: no-reply@deliveryhero.com + name: nyambati + name: backstage + sources: + - https://github.com/backstage/backstage + - https://github.com/spotify/lighthouse-audit-service + type: application + urls: + - charts/backstage-0.1.7.tgz + version: 0.1.7 + - apiVersion: v2 + appVersion: v0.1.1-alpha.23 + created: "2021-06-30T14:03:22Z" + description: A Helm chart for Backstage + digest: ca75a0cff07ace5365cebe040197003c6d8df34c9bb8b4dbeeb58c5fdc409abc + home: https://github.com/backstage/backstage + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + - email: no-reply@deliveryhero.com + name: nyambati + name: backstage + sources: + - https://github.com/backstage/backstage + - https://github.com/spotify/lighthouse-audit-service + type: application + urls: + - charts/backstage-0.1.6.tgz + version: 0.1.6 + - apiVersion: v2 + appVersion: v0.1.1-alpha.23 + created: "2021-06-22T18:24:44Z" + description: A Helm chart for Backstage + digest: a751eb26c3d88d4b401eb7d5a726c2dc482540deaea13cdd4ec2244c4c111288 + home: https://github.com/backstage/backstage + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + - email: no-reply@deliveryhero.com + name: nyambati + name: backstage + sources: + - https://github.com/backstage/backstage + - https://github.com/spotify/lighthouse-audit-service + type: application + urls: + - charts/backstage-0.1.5.tgz + version: 0.1.5 + - apiVersion: v2 + appVersion: v0.1.1-alpha.23 + created: "2021-04-30T09:30:17Z" + description: A Helm chart for Backstage + digest: 06a8349506dc46f2505ff9ee630e8a437833750712b7a718041e3293911bb92a + home: https://github.com/backstage/backstage + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + - email: no-reply@deliveryhero.com + name: nyambati + name: backstage + sources: + - https://github.com/backstage/backstage + - https://github.com/spotify/lighthouse-audit-service + type: application + urls: + - charts/backstage-0.1.4.tgz + version: 0.1.4 + - apiVersion: v2 + appVersion: v0.1.1-alpha.23 + created: "2021-04-20T12:17:45Z" + description: A Helm chart for Backstage + digest: 09ed2791cb9b6deb0341b6c97116b3b2883f5a3ccf7f03110cf0adbaa73f956d + home: https://github.com/backstage/backstage + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + - email: no-reply@deliveryhero.com + name: nyambati + name: backstage + sources: + - https://github.com/backstage/backstage + - https://github.com/spotify/lighthouse-audit-service + type: application + urls: + - charts/backstage-0.1.3.tgz + version: 0.1.3 + - apiVersion: v2 + appVersion: v0.1.1-alpha.23 + created: "2020-12-31T15:12:58Z" + description: A Helm chart for Backstage + digest: cae2580d6ce510076fb6a6431fd428e8829712c0e8906c02f7247caf1e169a44 + home: https://github.com/backstage/backstage + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + - email: no-reply@deliveryhero.com + name: nyambati + name: backstage + sources: + - https://github.com/backstage/backstage + - https://github.com/spotify/lighthouse-audit-service + type: application + urls: + - charts/backstage-0.1.2.tgz + version: 0.1.2 + - apiVersion: v2 + appVersion: v0.1.1-alpha.23 + created: "2020-12-30T10:12:55Z" + dependencies: + - condition: postgresql.enabled + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 9.4.1 + description: A Helm chart for Backstage + digest: 1a87ffaff32b03b7fd5f61d84ff7667c047c294662b34897693c3c606dbb989f + home: https://github.com/backstage/backstage + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: backstage + sources: + - https://github.com/backstage/backstage + - https://github.com/spotify/lighthouse-audit-service + type: application + urls: + - charts/backstage-0.1.1.tgz + version: 0.1.1 + backstage-mono: + - apiVersion: v2 + appVersion: 0.0.1 + created: "2024-10-04T09:09:24Z" + description: A Helm chart for simple backstage deployment. This chart deploys + a single pod for both backstage frontend and backend. + digest: 495432982fb31def845164da663e0337d487b09aed2838bed34220d1f30c4cdd + home: https://github.com/backstage/backstage + maintainers: + - email: no-reply@deliveryhero.com + name: goelankitt + name: backstage-mono + type: application + urls: + - charts/backstage-mono-0.1.1.tgz + version: 0.1.1 + - apiVersion: v2 + appVersion: 0.0.1 + created: "2021-08-16T19:26:31Z" + description: A Helm chart for simple backstage deployment. This chart deploys + a single pod for both backstage frontend and backend. + digest: a02233bd145033f07b78326f5234009d7ff6ce1ace174bf61c08529eb37948c7 + home: https://github.com/backstage/backstage + maintainers: + - email: no-reply@deliveryhero.com + name: goelankitt + name: backstage-mono + type: application + urls: + - charts/backstage-mono-0.1.0.tgz + version: 0.1.0 + cachet: + - apiVersion: v1 + appVersion: 2.3.15 + created: "2024-10-04T09:09:36Z" + dependencies: + - condition: postgresql.enabled + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 11.6.26 + description: The open source status page system + digest: a0696772f035bc38116578988af90cf4e8b124891f90e1f1725bbb5a54c979ec + home: https://cachethq.io/ + maintainers: + - email: no-reply@deliveryhero.com + name: nyambati + name: cachet + sources: + - https://github.com/CachetHQ/Docker + - https://github.com/CachetHQ/Cachet + urls: + - charts/cachet-1.3.4.tgz + version: 1.3.4 + - apiVersion: v1 + appVersion: 2.3.15 + created: "2024-01-02T15:45:13Z" + dependencies: + - condition: postgresql.enabled + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 11.6.26 + description: The open source status page system + digest: 99dd99c3418a37c5072123a2ec4633049abbf13b3e7e6ba0da69afcce710811d + home: https://cachethq.io/ + maintainers: + - email: no-reply@deliveryhero.com + name: nyambati + name: cachet + sources: + - https://github.com/CachetHQ/Docker + - https://github.com/CachetHQ/Cachet + urls: + - charts/cachet-1.3.3.tgz + version: 1.3.3 + - apiVersion: v1 + appVersion: 2.3.15 + created: "2023-11-16T10:29:43Z" + dependencies: + - condition: postgresql.enabled + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 11.6.26 + description: The open source status page system + digest: e5083aa11061afe31bd5ebbeebfd899a12cfa1ee0f750ee6cd68d2264dfda674 + home: https://cachethq.io/ + maintainers: + - email: no-reply@deliveryhero.com + name: nyambati + name: cachet + sources: + - https://github.com/CachetHQ/Docker + - https://github.com/CachetHQ/Cachet + urls: + - charts/cachet-1.3.2.tgz + version: 1.3.2 + - apiVersion: v1 + appVersion: 2.3.15 + created: "2022-10-24T14:46:13Z" + dependencies: + - condition: postgresql.enabled + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 11.6.26 + description: The open source status page system + digest: 25306deb53f994e125459f8d4de10e8a0dde804ed2fc1b6ab56083f88be892c3 + home: https://cachethq.io/ + maintainers: + - email: no-reply@deliveryhero.com + name: nyambati + name: cachet + sources: + - https://github.com/CachetHQ/Docker + - https://github.com/CachetHQ/Cachet + urls: + - charts/cachet-1.3.1.tgz + version: 1.3.1 + - apiVersion: v1 + appVersion: 2.3.15 + created: "2022-05-30T14:18:12Z" + dependencies: + - condition: postgresql.enabled + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 9.1.4 + description: The open source status page system + digest: 7835b470c04d0660460b2cb1d3c89ba81418f378367bd0885f00d77fda01b200 + home: https://cachethq.io/ + maintainers: + - email: no-reply@deliveryhero.com + name: nyambati + name: cachet + sources: + - https://github.com/CachetHQ/Docker + - https://github.com/CachetHQ/Cachet + urls: + - charts/cachet-1.3.0.tgz + version: 1.3.0 + - apiVersion: v1 + appVersion: 2.3.15 + created: "2022-01-13T11:15:33Z" + dependencies: + - condition: postgresql.enabled + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 9.1.4 + description: The open source status page system + digest: c577790cd1bb33b14e810117fc968eedcc323388b43c82293df1210a70531aea + home: https://cachethq.io/ + maintainers: + - email: no-reply@deliveryhero.com + name: nyambati + name: cachet + sources: + - https://github.com/CachetHQ/Docker + - https://github.com/CachetHQ/Cachet + urls: + - charts/cachet-1.2.9.tgz + version: 1.2.9 + - apiVersion: v1 + appVersion: 2.3.15 + created: "2021-08-02T16:01:57Z" + dependencies: + - condition: postgresql.enabled + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 9.1.4 + description: The open source status page system + digest: c754996ffcd3fcff0bfa76e2e31127788c69932e4735a6239a2f14a695e7e70d + home: https://cachethq.io/ + maintainers: + - email: no-reply@deliveryhero.com + name: nyambati + name: cachet + sources: + - https://github.com/CachetHQ/Docker + - https://github.com/CachetHQ/Cachet + urls: + - charts/cachet-1.2.8.tgz + version: 1.2.8 + - apiVersion: v1 + appVersion: 2.3.15 + created: "2021-05-03T14:35:52Z" + dependencies: + - condition: postgresql.enabled + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 9.1.4 + description: The open source status page system + digest: 171f92abb62be3ed94677576340c42547cb35386f03b929343d43a0f323612ee + home: https://cachethq.io/ + maintainers: + - email: no-reply@deliveryhero.com + name: nyambati + name: cachet + sources: + - https://github.com/CachetHQ/Docker + - https://github.com/CachetHQ/Cachet + urls: + - charts/cachet-1.2.7.tgz + version: 1.2.7 + - apiVersion: v1 + appVersion: 2.3.15 + created: "2020-11-25T15:13:03Z" + dependencies: + - condition: postgresql.enabled + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 9.1.4 + description: The open source status page system + digest: 461bb0835bf8b26f6e9811de0a97ba1bbb941791feb7e5f8f1902fecbf904c9a + home: https://cachethq.io/ + maintainers: + - email: no-reply@deliveryhero.com + name: nyambati + name: cachet + sources: + - https://github.com/CachetHQ/Docker + - https://github.com/CachetHQ/Cachet + urls: + - charts/cachet-1.2.6.tgz + version: 1.2.6 + - apiVersion: v1 + appVersion: 2.3.15 + created: "2020-09-01T08:19:05Z" + dependencies: + - condition: postgresql.enabled + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 9.1.4 + description: The open source status page system + digest: 08d04446dde8acac78042b82eb9a4fcc3ae6bc8bc5b59b768c6288d1423ddb6a + home: https://cachethq.io/ + maintainers: + - email: no-reply@deliveryhero.com + name: nyambati + name: cachet + sources: + - https://github.com/CachetHQ/Docker + - https://github.com/CachetHQ/Cachet + urls: + - charts/cachet-1.2.5.tgz + version: 1.2.5 + cloudhealth-collector: + - apiVersion: v1 + appVersion: "957" + created: "2024-10-04T09:09:37Z" + description: 'Deploys a k8s pod to collect data and generate reports based or + resources usages, costs and other possibilities. Please check more about it + on: https://www.cloudhealthtech.com/solutions/containers Chart based on its + official k8s resources: https://s3.amazonaws.com/cloudhealth-public/containers/kubernetes-collector-pod-template.yaml A + Cloudhealth account is required. To install the chart a valid api token should + be generated from adding a new cluster: https://apps.cloudhealthtech.com/containers_clusters ' + digest: 3922906bd2ad2f5f8f3fc29327056282b8f670c6097c24fd8971b6439695f3cc + home: https://github.com/deliveryhero/helm-charts + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: cloudhealth-collector + sources: + - https://s3.amazonaws.com/cloudhealth-public/containers/kubernetes-collector-pod-template.yaml + urls: + - charts/cloudhealth-collector-0.1.6.tgz + version: 0.1.6 + - apiVersion: v1 + appVersion: "957" + created: "2022-10-28T14:31:30Z" + description: 'Deploys a k8s pod to collect data and generate reports based or + resources usages, costs and other possibilities. Please check more about it + on: https://www.cloudhealthtech.com/solutions/containers Chart based on its + official k8s resources: https://s3.amazonaws.com/cloudhealth-public/containers/kubernetes-collector-pod-template.yaml A + Cloudhealth account is required. To install the chart a valid api token should + be generated from adding a new cluster: https://apps.cloudhealthtech.com/containers_clusters ' + digest: 40c8b1bbba969e3cd6e9109afde46633f9ab666538c82b47d04f5a3c29ea6f46 + home: https://github.com/deliveryhero/helm-charts + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: cloudhealth-collector + sources: + - https://s3.amazonaws.com/cloudhealth-public/containers/kubernetes-collector-pod-template.yaml + urls: + - charts/cloudhealth-collector-0.1.5.tgz + version: 0.1.5 + - apiVersion: v1 + appVersion: "957" + created: "2022-10-13T09:30:48Z" + description: 'Deploys a k8s pod to collect data and generate reports based or + resources usages, costs and other possibilities. Please check more about it + on: https://www.cloudhealthtech.com/solutions/containers Chart based on its + official k8s resources: https://s3.amazonaws.com/cloudhealth-public/containers/kubernetes-collector-pod-template.yaml A + Cloudhealth account is required. To install the chart a valid api token should + be generated from adding a new cluster: https://apps.cloudhealthtech.com/containers_clusters ' + digest: 47b394b2a3072c4742b55db913c79c302535f47d8f26aaaf49872529ed3ba481 + home: https://github.com/deliveryhero/helm-charts + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: cloudhealth-collector + sources: + - https://s3.amazonaws.com/cloudhealth-public/containers/kubernetes-collector-pod-template.yaml + urls: + - charts/cloudhealth-collector-0.1.4.tgz + version: 0.1.4 + - apiVersion: v1 + appVersion: "957" + created: "2022-08-23T11:45:28Z" + description: 'Deploys a k8s pod to collect data and generate reports based or + resources usages, costs and other possibilities. Please check more about it + on: https://www.cloudhealthtech.com/solutions/containers Chart based on its + official k8s resources: https://s3.amazonaws.com/cloudhealth-public/containers/kubernetes-collector-pod-template.yaml A + Cloudhealth account is required. To install the chart a valid api token should + be generated from adding a new cluster: https://apps.cloudhealthtech.com/containers_clusters ' + digest: c767a8105c7007989f958647c96da4e51aa87dda621acab62fe9dab6d84eff0a + home: https://github.com/deliveryhero/helm-charts + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: cloudhealth-collector + sources: + - https://s3.amazonaws.com/cloudhealth-public/containers/kubernetes-collector-pod-template.yaml + urls: + - charts/cloudhealth-collector-0.1.3.tgz + version: 0.1.3 + - apiVersion: v1 + appVersion: "957" + created: "2022-08-20T19:55:46Z" + description: 'Deploys a k8s pod to collect data and generate reports based or + resources usages, costs and other possibilities. Please check more about it + on: https://www.cloudhealthtech.com/solutions/containers Chart based on its + official k8s resources: https://s3.amazonaws.com/cloudhealth-public/containers/kubernetes-collector-pod-template.yaml A + Cloudhealth account is required. To install the chart a valid api token should + be generated from adding a new cluster: https://apps.cloudhealthtech.com/containers_clusters ' + digest: 4aa278a21bff6dd38b3d366b3d67c30592544d4b48dd6b0e1e3744bec1c833a5 + home: https://github.com/deliveryhero/helm-charts + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: cloudhealth-collector + sources: + - https://s3.amazonaws.com/cloudhealth-public/containers/kubernetes-collector-pod-template.yaml + urls: + - charts/cloudhealth-collector-0.1.2.tgz + version: 0.1.2 + - apiVersion: v1 + appVersion: "957" + created: "2022-05-30T07:41:50Z" + description: 'Deploys a k8s pod to collect data and generate reports based or + resources usages, costs and other possibilities. Please check more about it + on: https://www.cloudhealthtech.com/solutions/containers Chart based on its + official k8s resources: https://s3.amazonaws.com/cloudhealth-public/containers/kubernetes-collector-pod-template.yaml A + Cloudhealth account is required. To install the chart a valid api token should + be generated from adding a new cluster: https://apps.cloudhealthtech.com/containers_clusters ' + digest: eb43c4afbf84f8862bb4936c6a09b2d52137a56bad8df72e8f3b5017a3138659 + home: https://github.com/deliveryhero/helm-charts + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: cloudhealth-collector + sources: + - https://s3.amazonaws.com/cloudhealth-public/containers/kubernetes-collector-pod-template.yaml + urls: + - charts/cloudhealth-collector-0.1.1.tgz + version: 0.1.1 + - apiVersion: v1 + appVersion: "957" + created: "2020-11-13T08:32:00Z" + description: | + Deploys a k8s pod to collect data and generate reports based or resources usages, costs and other possibilities. Please check more about it on: https://www.cloudhealthtech.com/solutions/containers + + Chart based on its official k8s resources: https://s3.amazonaws.com/cloudhealth-public/containers/kubernetes-collector-pod-template.yaml + + A Cloudhealth account is required. + + To install the chart a valid api token should be generated from adding a new cluster: https://apps.cloudhealthtech.com/containers_clusters + digest: de0a753d596aa41faa5122ebe09bc05ec5bb7060ba87194a6a31cf43235acd95 + home: https://github.com/deliveryhero/helm-charts + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: cloudhealth-collector + sources: + - https://s3.amazonaws.com/cloudhealth-public/containers/kubernetes-collector-pod-template.yaml + urls: + - charts/cloudhealth-collector-0.1.0.tgz + version: 0.1.0 + cluster-overprovisioner: + - apiVersion: v1 + appVersion: "3.9" + created: "2024-10-04T09:09:37Z" + description: 'This chart provide a buffer for cluster autoscaling to allow overprovisioning + of cluster nodes. This is desired when you have work loads that need to scale + up quickly without waiting for the new cluster nodes to be created and join + the cluster. It works by creating a deployment that creates pods of a lower + than default `PriorityClass`. These pods request resources from the cluster + but don''t actually consume any resources. These pods are then evicted allowing + other normal pods to be created while also triggering a scale-up by the [cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler). This + approach is the [current recommended method to achieve overprovisioning](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler). ' + digest: 903956b8be8174c3986ec0c8efc700d8b0cd74020ea162f409d72663ec544019 + home: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - cluster + - autoscaling + - overprovision + - cluster-autoscaler + maintainers: + - email: max.williams@deliveryhero.com + name: max-rocket-internet + - email: miguel.mingorance@deliveryhero.com + name: mmingorance-dh + name: cluster-overprovisioner + sources: + - https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler + - https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler + - https://github.com/kubernetes/kubernetes/tree/master/build/pause + urls: + - charts/cluster-overprovisioner-0.7.11.tgz + version: 0.7.11 + - apiVersion: v1 + appVersion: "3.6" + created: "2022-12-19T16:35:24Z" + description: 'This chart provide a buffer for cluster autoscaling to allow overprovisioning + of cluster nodes. This is desired when you have work loads that need to scale + up quickly without waiting for the new cluster nodes to be created and join + the cluster. It works by creating a deployment that creates pods of a lower + than default `PriorityClass`. These pods request resources from the cluster + but don''t actually consume any resources. These pods are then evicted allowing + other normal pods to be created while also triggering a scale-up by the [cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler). This + approach is the [current recommended method to achieve overprovisioning](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler). ' + digest: 773ae21cfb44aa068668a7f4c347f7b3514658be97c917d81582ba3faa36f0b9 + home: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - cluster + - autoscaling + - overprovision + - cluster-autoscaler + maintainers: + - email: max.williams@deliveryhero.com + name: max-rocket-internet + - email: miguel.mingorance@deliveryhero.com + name: mmingorance-dh + name: cluster-overprovisioner + sources: + - https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler + - https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler + - https://github.com/kubernetes/kubernetes/tree/master/build/pause + urls: + - charts/cluster-overprovisioner-0.7.10.tgz + version: 0.7.10 + - apiVersion: v1 + appVersion: "3.6" + created: "2022-09-30T07:20:16Z" + description: 'This chart provide a buffer for cluster autoscaling to allow overprovisioning + of cluster nodes. This is desired when you have work loads that need to scale + up quickly without waiting for the new cluster nodes to be created and join + the cluster. It works by creating a deployment that creates pods of a lower + than default `PriorityClass`. These pods request resources from the cluster + but don''t actually consume any resources. These pods are then evicted allowing + other normal pods to be created while also triggering a scale-up by the [cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler). This + approach is the [current recommended method to achieve overprovisioning](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler). ' + digest: 1dbcab6f44bfb2242e79c2c01babe058bb516e4e9a720bc04d50198862e689b4 + home: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - cluster + - autoscaling + - overprovision + - cluster-autoscaler + maintainers: + - email: max.williams@deliveryhero.com + name: max-rocket-internet + - email: miguel.mingorance@deliveryhero.com + name: mmingorance-dh + name: cluster-overprovisioner + sources: + - https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler + - https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler + - https://github.com/kubernetes/kubernetes/tree/master/build/pause + urls: + - charts/cluster-overprovisioner-0.7.9.tgz + version: 0.7.9 + - apiVersion: v1 + appVersion: "3.6" + created: "2022-08-08T18:42:40Z" + description: 'This chart provide a buffer for cluster autoscaling to allow overprovisioning + of cluster nodes. This is desired when you have work loads that need to scale + up quickly without waiting for the new cluster nodes to be created and join + the cluster. It works by creating a deployment that creates pods of a lower + than default `PriorityClass`. These pods request resources from the cluster + but don''t actually consume any resources. These pods are then evicted allowing + other normal pods to be created while also triggering a scale-up by the [cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler). This + approach is the [current recommended method to achieve overprovisioning](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler). ' + digest: cb12548b87c2ce7de6c2f73a6a942b8544975b02408911c41634a0ac4759c29b + home: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - cluster + - autoscaling + - overprovision + - cluster-autoscaler + maintainers: + - email: max.williams@deliveryhero.com + name: max-rocket-internet + - email: miguel.mingorance@deliveryhero.com + name: mmingorance-dh + name: cluster-overprovisioner + sources: + - https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler + - https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler + - https://github.com/kubernetes/kubernetes/tree/master/build/pause + urls: + - charts/cluster-overprovisioner-0.7.8.tgz + version: 0.7.8 + - apiVersion: v1 + appVersion: "3.6" + created: "2022-04-07T12:33:38Z" + description: 'This chart provide a buffer for cluster autoscaling to allow overprovisioning + of cluster nodes. This is desired when you have work loads that need to scale + up quickly without waiting for the new cluster nodes to be created and join + the cluster. It works by creating a deployment that creates pods of a lower + than default `PriorityClass`. These pods request resources from the cluster + but don''t actually consume any resources. These pods are then evicted allowing + other normal pods to be created while also triggering a scale-up by the [cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler). This + approach is the [current recommended method to achieve overprovisioning](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler). ' + digest: 8f7015e3d0435045ffd18ab42e829f231c31a225e4f5dc12bc5e761a4b335e37 + home: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - cluster + - autoscaling + - overprovision + - cluster-autoscaler + maintainers: + - email: max.williams@deliveryhero.com + name: max-rocket-internet + - email: miguel.mingorance@deliveryhero.com + name: mmingorance-dh + name: cluster-overprovisioner + sources: + - https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler + - https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler + - https://github.com/kubernetes/kubernetes/tree/master/build/pause + urls: + - charts/cluster-overprovisioner-0.7.7.tgz + version: 0.7.7 + - apiVersion: v1 + appVersion: "3.1" + created: "2022-02-08T10:00:30Z" + description: 'This chart provide a buffer for cluster autoscaling to allow overprovisioning + of cluster nodes. This is desired when you have work loads that need to scale + up quickly without waiting for the new cluster nodes to be created and join + the cluster. It works by creating a deployment that creates pods of a lower + than default `PriorityClass`. These pods request resources from the cluster + but don''t actually consume any resources. These pods are then evicted allowing + other normal pods to be created while also triggering a scale-up by the [cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler). This + approach is the [current recommended method to achieve overprovisioning](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler). ' + digest: e0de9267279823a6580d6a179a8f4b2840d37655c23c591a6fed8715c167d09e + home: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - cluster + - autoscaling + - overprovision + - cluster-autoscaler + maintainers: + - email: max.williams@deliveryhero.com + name: max-rocket-internet + - email: miguel.mingorance@deliveryhero.com + name: mmingorance-dh + name: cluster-overprovisioner + sources: + - https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler + - https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler + - https://github.com/kubernetes/kubernetes/tree/master/build/pause + urls: + - charts/cluster-overprovisioner-0.7.6.tgz + version: 0.7.6 + - apiVersion: v1 + appVersion: "3.1" + created: "2022-01-31T16:23:32Z" + description: 'This chart provide a buffer for cluster autoscaling to allow overprovisioning + of cluster nodes. This is desired when you have work loads that need to scale + up quickly without waiting for the new cluster nodes to be created and join + the cluster. It works by creating a deployment that creates pods of a lower + than default `PriorityClass`. These pods request resources from the cluster + but don''t actually consume any resources. These pods are then evicted allowing + other normal pods to be created while also triggering a scale-up by the [cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler). This + approach is the [current recommended method to achieve overprovisioning](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler). ' + digest: a1cebf606ab58385411c46ca36de158add4b6b15816a969d9fd8f7c81ed4f30b + home: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - cluster + - autoscaling + - overprovision + - cluster-autoscaler + maintainers: + - email: max.williams@deliveryhero.com + name: max-rocket-internet + - email: miguel.mingorance@deliveryhero.com + name: mmingorance-dh + name: cluster-overprovisioner + sources: + - https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler + - https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler + - https://github.com/kubernetes/kubernetes/tree/master/build/pause + urls: + - charts/cluster-overprovisioner-0.7.5.tgz + version: 0.7.5 + - apiVersion: v1 + appVersion: "3.1" + created: "2021-11-30T09:01:18Z" + description: 'This chart provide a buffer for cluster autoscaling to allow overprovisioning + of cluster nodes. This is desired when you have work loads that need to scale + up quickly without waiting for the new cluster nodes to be created and join + the cluster. It works by creating a deployment that creates pods of a lower + than default `PriorityClass`. These pods request resources from the cluster + but don''t actually consume any resources. These pods are then evicted allowing + other normal pods to be created while also triggering a scale-up by the [cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler). This + approach is the [current recommended method to achieve overprovisioning](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler). ' + digest: c73791f46a0c9ba7ead7a7798ddeffe1c8f789e04e0ffafec6be87501f0ed223 + home: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - cluster + - autoscaling + - overprovision + - cluster-autoscaler + maintainers: + - email: max.williams@deliveryhero.com + name: max-rocket-internet + - email: miguel.mingorance@deliveryhero.com + name: mmingorance-dh + name: cluster-overprovisioner + sources: + - https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler + - https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler + - https://github.com/kubernetes/kubernetes/tree/master/build/pause + urls: + - charts/cluster-overprovisioner-0.7.4.tgz + version: 0.7.4 + - apiVersion: v1 + appVersion: "3.1" + created: "2021-11-22T09:05:01Z" + description: 'This chart provide a buffer for cluster autoscaling to allow overprovisioning + of cluster nodes. This is desired when you have work loads that need to scale + up quickly without waiting for the new cluster nodes to be created and join + the cluster. It works by creating a deployment that creates pods of a lower + than default `PriorityClass`. These pods request resources from the cluster + but don''t actually consume any resources. These pods are then evicted allowing + other normal pods are created while also triggering a scale-up by the [cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler). This + approach is the [current recommended method to achieve overprovisioning](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler). ' + digest: b3a7bd539c1eb507fa48d8ce7d9898b55ac06738e2641cbf537389af0d6a358e + home: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - cluster + - autoscaling + - overprovision + - cluster-autoscaler + maintainers: + - email: max.williams@deliveryhero.com + name: max-rocket-internet + - email: miguel.mingorance@deliveryhero.com + name: mmingorance-dh + name: cluster-overprovisioner + sources: + - https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler + - https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler + - https://github.com/kubernetes/kubernetes/tree/master/build/pause + urls: + - charts/cluster-overprovisioner-0.7.3.tgz + version: 0.7.3 + - apiVersion: v1 + appVersion: "3.1" + created: "2021-08-16T13:50:50Z" + description: | + This chart provide a buffer for cluster autoscaling to allow overprovisioning of cluster nodes. This is desired when you have work loads that need to scale up quickly without waiting for the new cluster nodes to be created and join the cluster. + + It works by creating a deployment that creates pods of a lower than default `PriorityClass`. These pods request resources from the cluster but don't actually consume any resources. These pods are then evicted allowing other normal pods are created while also triggering a scale-up by the [cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler). + + This approach is the [current recommended method to achieve overprovisioning](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler). + digest: bf3dfe466fdc5cc33e9224ff642d07171be248fe8cd444e69052fe669d13443f + home: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - cluster + - autoscaling + - overprovision + - cluster-autoscaler + maintainers: + - email: max.williams@deliveryhero.com + name: max-rocket-internet + - email: miguel.mingorance@deliveryhero.com + name: mmingorance-dh + name: cluster-overprovisioner + sources: + - https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler + - https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler + - https://github.com/kubernetes/kubernetes/tree/master/build/pause + urls: + - charts/cluster-overprovisioner-0.7.2.tgz + version: 0.7.2 + - apiVersion: v1 + appVersion: "3.1" + created: "2021-05-18T08:39:49Z" + description: | + This chart provide a buffer for cluster autoscaling to allow overprovisioning of cluster nodes. This is desired when you have work loads that need to scale up quickly without waiting for the new cluster nodes to be created and join the cluster. + + It works by creating a deployment that creates pods of a lower than default `PriorityClass`. These pods request resources from the cluster but don't actually consume any resources. These pods are then evicted allowing other normal pods are created while also triggering a scale-up by the [cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler). + + This approach is the [current recommended method to achieve overprovisioning](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler). + digest: 0a505a40a4dc2de1aa6471485d8e8918392c01c3caf0dface624ddd5e6f9e229 + home: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - cluster + - autoscaling + - overprovision + - cluster-autoscaler + maintainers: + - email: max.williams@deliveryhero.com + name: max-rocket-internet + - email: miguel.mingorance@deliveryhero.com + name: mmingorance-dh + name: cluster-overprovisioner + sources: + - https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler + - https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler + - https://github.com/kubernetes/kubernetes/tree/master/build/pause + urls: + - charts/cluster-overprovisioner-0.7.1.tgz + version: 0.7.1 + - apiVersion: v1 + appVersion: "3.1" + created: "2021-04-21T18:07:10Z" + description: | + This chart provide a buffer for cluster autoscaling to allow overprovisioning of cluster nodes. This is desired when you have work loads that need to scale up quickly without waiting for the new cluster nodes to be created and join the cluster. + + It works by creating a deployment that creates pods of a lower than default `PriorityClass`. These pods request resources from the cluster but don't actually consume any resources. These pods are then evicted allowing other normal pods are created while also triggering a scale-up by the [cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler). + + This approach is the [current recommended method to achieve overprovisioning](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler). + digest: 317dcd12ebc745963479f5aa8589bb7863bde0cdbe51eb64245e5362d49057d0 + home: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - cluster + - autoscaling + - overprovision + - cluster-autoscaler + maintainers: + - email: max.williams@deliveryhero.com + name: max-rocket-internet + - email: miguel.mingorance@deliveryhero.com + name: mmingorance-dh + name: cluster-overprovisioner + sources: + - https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler + - https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler + - https://github.com/kubernetes/kubernetes/tree/master/build/pause + urls: + - charts/cluster-overprovisioner-0.7.0.tgz + version: 0.7.0 + - apiVersion: v1 + appVersion: "1.0" + created: "2021-04-14T08:34:29Z" + description: | + This chart provide a buffer for cluster autoscaling to allow overprovisioning of cluster nodes. This is desired when you have work loads that need to scale up quickly without waiting for the new cluster nodes to be created and join the cluster. + + It works by creating a deployment that creates pods of a lower than default `PriorityClass`. These pods request resources from the cluster but don't actually consume any resources. These pods are then evicted allowing other normal pods are created while also triggering a scale-up by the [cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler). + + This approach is the [current recommended method to achieve overprovisioning](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler). + digest: 45578d8a5a1b561ca6518acb654556a873e5385f80824d969af7b63e521e456a + home: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - cluster + - autoscaling + - overprovision + - cluster-autoscaler + maintainers: + - email: max.williams@deliveryhero.com + name: max-rocket-internet + - email: miguel.mingorance@deliveryhero.com + name: mmingorance-dh + name: cluster-overprovisioner + sources: + - https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler + - https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler + - https://github.com/kubernetes/kubernetes/tree/master/build/pause + urls: + - charts/cluster-overprovisioner-0.6.0.tgz + version: 0.6.0 + - apiVersion: v1 + appVersion: "1.0" + created: "2021-03-01T13:20:08Z" + description: | + This chart provide a buffer for cluster autoscaling to allow overprovisioning of cluster nodes. This is desired when you have work loads that need to scale up quickly without waiting for the new cluster nodes to be created and join the cluster. + + It works by creating a deployment that creates pods of a lower than default `PriorityClass`. These pods request resources from the cluster but don't actually consume any resources. These pods are then evicted allowing other normal pods are created while also triggering a scale-up by the [cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler). + + This approach is the [current recommended method to achieve overprovisioning](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler). + digest: 60c33e65198eb3f5232bef806915eab8851d948c10ac3a3b2fb0ba87eb1c96c5 + home: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - cluster + - autoscaling + - overprovision + - cluster-autoscaler + maintainers: + - email: max.williams@deliveryhero.com + name: max-rocket-internet + - email: miguel.mingorance@deliveryhero.com + name: mmingorance-dh + name: cluster-overprovisioner + sources: + - https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler + - https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler + - https://github.com/kubernetes/kubernetes/tree/master/build/pause + urls: + - charts/cluster-overprovisioner-0.5.0.tgz + version: 0.5.0 + - apiVersion: v1 + appVersion: "1.0" + created: "2020-12-01T12:34:18Z" + description: | + This chart provide a buffer for cluster autoscaling to allow overprovisioning of cluster nodes. This is desired when you have work loads that need to scale up quickly without waiting for the new cluster nodes to be created and join the cluster. + + It works by creating a deployment that creates pods of a lower than default `PriorityClass`. These pods request resources from the cluster but don't actually consume any resources. These pods are then evicted allowing other normal pods are created while also triggering a scale-up by the [cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler). + + This approach is the [current recommended method to achieve overprovisioning](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler). + digest: 76a3920c8a5475e3ab998cfa2ae17deffa5087818370ab3c4e2cdec176abc093 + home: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - cluster + - autoscaling + - overprovision + - cluster-autoscaler + maintainers: + - email: max.williams@deliveryhero.com + name: max-rocket-internet + - email: miguel.mingorance@deliveryhero.com + name: mmingorance-dh + name: cluster-overprovisioner + sources: + - https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler + - https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler + - https://github.com/kubernetes/kubernetes/tree/master/build/pause + urls: + - charts/cluster-overprovisioner-0.4.3.tgz + version: 0.4.3 + - apiVersion: v1 + appVersion: "1.0" + created: "2020-10-16T11:41:32Z" + description: | + This chart provide a buffer for cluster autoscaling to allow overprovisioning of cluster nodes. This is desired when you have work loads that need to scale up quickly without waiting for the new cluster nodes to be created and join the cluster. + + It works but creating a deployment that creates pods of a lower than default `PriorityClass`. These pods request resources from the cluster but don't actually consume any resources. These pods are then evicted allowing other normal pods are created while also triggering a scale-up by the [cluster-autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler). + + This approach is the [current recommended method to achieve overprovisioning](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler). + digest: 1bc6530d77234a78daddb7efa24496443328453c3e7ef7367ada7b132db426cd + home: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - cluster + - autoscaling + - overprovision + - cluster-autoscaler + maintainers: + - email: max.williams@deliveryhero.com + name: max-rocket-internet + - email: miguel.mingorance@deliveryhero.com + name: mmingorance-dh + name: cluster-overprovisioner + sources: + - https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler + - https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler + - https://github.com/kubernetes/kubernetes/tree/master/build/pause + urls: + - charts/cluster-overprovisioner-0.4.2.tgz + version: 0.4.2 + cortex-gateway: + - apiVersion: v2 + appVersion: v1.1.0 + created: "2024-10-04T09:09:38Z" + description: A Helm chart for cortex-gateway + digest: 8bfba26eb86bebd871b03297cbe5d0c034b753614d27fa218bf6e88b0930e763 + home: https://github.com/rewe-digital/cortex-gateway + maintainers: + - email: no-reply@deliveryhero.com + name: goelankitt + name: cortex-gateway + type: application + urls: + - charts/cortex-gateway-0.1.8.tgz + version: 0.1.8 + - apiVersion: v2 + appVersion: v1.1.0 + created: "2023-07-25T08:10:51Z" + description: A Helm chart for cortex-gateway + digest: 4b3f3d768436b0578d454563e3c31241bed18b2a28acca32697e9e5d2d09ab45 + home: https://github.com/rewe-digital/cortex-gateway + maintainers: + - email: no-reply@deliveryhero.com + name: goelankitt + name: cortex-gateway + type: application + urls: + - charts/cortex-gateway-0.1.7.tgz + version: 0.1.7 + - apiVersion: v2 + appVersion: 1.0.5 + created: "2022-04-05T08:48:03Z" + description: A Helm chart for cortex-gateway + digest: 7c2c8e6e33dbd84502bc3fcf505717019db5b3f2cb2453699a209cf325f74fb6 + home: https://github.com/rewe-digital/cortex-gateway + maintainers: + - email: no-reply@deliveryhero.com + name: goelankitt + name: cortex-gateway + type: application + urls: + - charts/cortex-gateway-0.1.6.tgz + version: 0.1.6 + - apiVersion: v2 + appVersion: 1.0.4 + created: "2022-02-28T10:54:02Z" + description: A Helm chart for cortex-gateway + digest: d730a480817066f2472fa4a5d2f9ae1a7d8d03cfbff260e141f2084c83bf1056 + home: https://github.com/rewe-digital/cortex-gateway + maintainers: + - email: no-reply@deliveryhero.com + name: goelankitt + name: cortex-gateway + type: application + urls: + - charts/cortex-gateway-0.1.5.tgz + version: 0.1.5 + - apiVersion: v2 + appVersion: 1.0.2 + created: "2022-01-24T10:32:51Z" + description: A Helm chart for cortex-gateway + digest: ea106fecc4fe03779fd702cf040e3265471c7950593a46792f806f96ab915994 + home: https://github.com/rewe-digital/cortex-gateway + maintainers: + - email: no-reply@deliveryhero.com + name: goelankitt + name: cortex-gateway + type: application + urls: + - charts/cortex-gateway-0.1.4.tgz + version: 0.1.4 + - apiVersion: v2 + appVersion: 1.0.1 + created: "2021-11-01T15:03:29Z" + description: A Helm chart for cortex-gateway + digest: 38a4badfbd358036e0c23fb0418ff43689b3d4ecbeb4c68d79ffc95daf6ab170 + home: https://github.com/rewe-digital/cortex-gateway + maintainers: + - email: no-reply@deliveryhero.com + name: goelankitt + name: cortex-gateway + type: application + urls: + - charts/cortex-gateway-0.1.1.tgz + version: 0.1.1 + - apiVersion: v2 + appVersion: 1.0.1 + created: "2021-10-05T10:35:29Z" + description: A Helm chart for cortex-gateway + digest: de6e8e3a637ccaafb8daf232ddb73e038b97b56f7a70b970f3549a7126b44728 + home: https://github.com/rewe-digital/cortex-gateway + maintainers: + - email: no-reply@deliveryhero.com + name: goelankitt + name: cortex-gateway + type: application + urls: + - charts/cortex-gateway-0.1.0.tgz + version: 0.1.0 + datadog-controller: + - apiVersion: v1 + appVersion: "0.1" + created: "2024-10-04T09:09:38Z" + description: 'This is a simple [Kubernetes Controller](https://kubernetes.io/docs/concepts/architecture/controller/) + to allow [Datadog Monitors](https://docs.datadoghq.com/monitors/) to be created, + updated or deleted from custom resources in Kubernetes. You will need to set + `datadog.client_api_key` and `datadog.client_app_key` when installing the chart. + These keys can be found at https://app.datadoghq.eu/account/settings#api or + https://app.datadoghq.com/account/settings#api Here''s an example `DatadogMonitor` + resource: ```yaml apiVersion: datadoghq.com/v1beta1 kind: DatadogMonitor metadata: name: + apm-error-rate-example spec: name: my-service error rate query: "avg(last_5m):sum:trace.http.request.errors{env:stg,service:my-service} + / sum:trace.http.request.hits{env:stg,service:my-service} > 1" type: "query + alert" message: Service my-service has a high error rate on env:stg ``` ' + digest: 7f69ceacd4f609ce18c143463596eb8540f9b28e5e1636d87e8b06761e67b5d7 + home: https://github.com/max-rocket-internet/datadog-controller + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - monitoring + - datadog + - controller + maintainers: + - email: max.williams@deliveryhero.com + name: max-rocket-internet + name: datadog-controller + sources: + - https://github.com/max-rocket-internet/datadog-controller + urls: + - charts/datadog-controller-1.1.tgz + version: "1.1" + - apiVersion: v1 + appVersion: "0.1" + created: "2022-07-20T12:25:29Z" + description: 'This is a simple [Kubernetes Controller](https://kubernetes.io/docs/concepts/architecture/controller/) + to allow [Datadog Monitors](https://docs.datadoghq.com/monitors/) to be created, + updated or deleted from custom resources in Kubernetes. You will need to set + `datadog.client_api_key` and `datadog.client_app_key` when installing the chart. + These keys can be found at https://app.datadoghq.eu/account/settings#api or + https://app.datadoghq.com/account/settings#api Here''s an example `DatadogMonitor` + resource: ```yaml apiVersion: datadoghq.com/v1beta1 kind: DatadogMonitor metadata: name: + apm-error-rate-example spec: name: my-service error rate query: "avg(last_5m):sum:trace.http.request.errors{env:stg,service:my-service} + / sum:trace.http.request.hits{env:stg,service:my-service} > 1" type: "query + alert" message: Service my-service has a high error rate on env:stg ``` ' + digest: 94ca0e9593cb112f9217c98f8f038e49912e48c792b7c1c347a457dd0077ba38 + home: https://github.com/deliveryhero/datadog-controller + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - monitoring + - datadog + - controller + maintainers: + - email: max.williams@deliveryhero.com + name: max-rocket-internet + name: datadog-controller + sources: + - https://github.com/deliveryhero/datadog-controller + urls: + - charts/datadog-controller-1.0.tgz + version: "1.0" + dregsy: + - apiVersion: v2 + appVersion: 0.4.3 + created: "2024-10-04T09:09:39Z" + description: Dregsy lets you sync Docker images between registries, public or + private through defined sync tasks can be invoked as one-off or periodic task. + digest: dbbbd77332f7cd7f40591a7dc63a7e9e264ed106a1fc7533d134a708b6f5972a + home: https://github.com/xelalexv/dregsy + maintainers: + - email: no-reply@deliveryhero.com + name: nyambati + name: dregsy + sources: + - https://github.com/xelalexv/dregsy + type: application + urls: + - charts/dregsy-0.1.4.tgz + version: 0.1.4 + - apiVersion: v2 + appVersion: 0.4.3 + created: "2023-05-04T08:56:33Z" + description: Dregsy lets you sync Docker images between registries, public or + private through defined sync tasks can be invoked as one-off or periodic task. + digest: cddc3237d36b950e03c47a9455e30128c54efe42fac3fcdfae311245bcc308a2 + home: https://github.com/xelalexv/dregsy + maintainers: + - email: no-reply@deliveryhero.com + name: nyambati + name: dregsy + sources: + - https://github.com/xelalexv/dregsy + type: application + urls: + - charts/dregsy-0.1.3.tgz + version: 0.1.3 + - apiVersion: v2 + appVersion: 0.4.3 + created: "2023-04-25T07:50:04Z" + description: Dregsy lets you sync Docker images between registries, public or + private through defined sync tasks can be invoked as one-off or periodic task. + digest: d7b40b3afc889c176451c8f80d588f671fc37f9fb9553d294c7919c804b191f9 + home: https://github.com/xelalexv/dregsy + maintainers: + - email: no-reply@deliveryhero.com + name: nyambati + name: dregsy + sources: + - https://github.com/xelalexv/dregsy + type: application + urls: + - charts/dregsy-0.1.2.tgz + version: 0.1.2 + - apiVersion: v2 + appVersion: 0.4.3 + created: "2023-03-24T13:53:01Z" + description: Dregsy lets you sync Docker images between registries, public or + private through defined sync tasks can be invoked as one-off or periodic task. + digest: 7337b15004221dff153f37e308a8973b300cffb3edd847c934c7b7be38b1842f + home: https://github.com/xelalexv/dregsy + maintainers: + - email: no-reply@deliveryhero.com + name: nyambati + name: dregsy + sources: + - https://github.com/xelalexv/dregsy + type: application + urls: + - charts/dregsy-0.1.1.tgz + version: 0.1.1 + - apiVersion: v2 + appVersion: 0.4.3 + created: "2022-07-13T08:09:36Z" + description: Dregsy lets you sync Docker images between registries, public or + private through defined sync tasks can be invoked as one-off or periodic task. + digest: 6a4449f50e7ed8d4c8ccd0b5cf5ebe6c480eb8c9436310b310aff128ebb17e8b + home: https://github.com/xelalexv/dregsy + maintainers: + - email: no-reply@deliveryhero.com + name: nyambati + name: dregsy + sources: + - https://github.com/xelalexv/dregsy + type: application + urls: + - charts/dregsy-0.1.0.tgz + version: 0.1.0 + field-exporter: + - apiVersion: v2 + appVersion: v1.3.1 + created: "2024-10-04T09:09:39Z" + description: 'A chart to install [field-exporter](https://github.com/deliveryhero/field-exporter). + This controller is used to fill the gap in [k8s-config-connector](https://github.com/GoogleCloudPlatform/k8s-config-connector) + for exporting value from Config Connector managed resources into Secrets and + ConfigMaps. ' + digest: ffb7cbf8e82f1aa7557fc50e864d3cfe83a8d0836d5de4017797d5e34e3384c1 + home: https://github.com/deliveryhero/field-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: vzholudev + url: https://github.com/vzholudev + - email: no-reply@deliveryhero.com + name: mtahaahmed + url: https://github.com/mtahaahmed + name: field-exporter + sources: + - https://github.com/deliveryhero/field-exporter + type: application + urls: + - charts/field-exporter-1.3.1.tgz + version: 1.3.1 + - apiVersion: v2 + appVersion: v1.3.0 + created: "2024-02-05T12:27:42Z" + description: 'A chart to install [field-exporter](https://github.com/deliveryhero/field-exporter). + This controller is used to fill the gap in [k8s-config-connector](https://github.com/GoogleCloudPlatform/k8s-config-connector) + for exporting value from Config Connector managed resources into Secrets and + ConfigMaps. ' + digest: 07dfae5120837c4a3b266ab92ecec60cd7f17980b000b4c6fde6e8f629b8d4a1 + home: https://github.com/deliveryhero/field-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: vzholudev + url: https://github.com/vzholudev + - email: no-reply@deliveryhero.com + name: mtahaahmed + url: https://github.com/mtahaahmed + name: field-exporter + sources: + - https://github.com/deliveryhero/field-exporter + type: application + urls: + - charts/field-exporter-1.3.0.tgz + version: 1.3.0 + - apiVersion: v2 + appVersion: v1.1.0 + created: "2023-12-13T08:34:42Z" + description: 'A chart to install [field-exporter](https://github.com/deliveryhero/field-exporter). + This controller is used to fill the gap in [k8s-config-connector](https://github.com/GoogleCloudPlatform/k8s-config-connector) + for exporting value from Config Connector managed resources into Secrets and + ConfigMaps. ' + digest: c7befc0bc767869c6d0420b7ce5ed1bad7018c3ee68df56d2cdef1f36df159a9 + home: https://github.com/deliveryhero/field-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: vzholudev + url: https://github.com/vzholudev + - email: no-reply@deliveryhero.com + name: mtahaahmed + url: https://github.com/mtahaahmed + name: field-exporter + sources: + - https://github.com/deliveryhero/field-exporter + type: application + urls: + - charts/field-exporter-1.1.0.tgz + version: 1.1.0 + - apiVersion: v2 + appVersion: v1.0.5 + created: "2023-11-28T12:32:14Z" + description: 'A chart to install [field-exporter](https://github.com/deliveryhero/field-exporter). + This controller is used to fill the gap in [k8s-config-connector](https://github.com/GoogleCloudPlatform/k8s-config-connector) + for exporting value from Config Connector managed resources into Secrets and + ConfigMaps. ' + digest: 1ef86fa3702ade9d3e398f19a31ccccbf0dd867adb25a1ab510cbf87372ccbbb + home: https://github.com/deliveryhero/field-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: vzholudev + url: https://github.com/vzholudev + - email: no-reply@deliveryhero.com + name: mtahaahmed + url: https://github.com/mtahaahmed + name: field-exporter + sources: + - https://github.com/deliveryhero/field-exporter + type: application + urls: + - charts/field-exporter-1.0.5.tgz + version: 1.0.5 + - apiVersion: v2 + appVersion: v1.0.2 + created: "2023-11-15T09:01:40Z" + description: 'A chart to install [field-exporter](https://github.com/deliveryhero/field-exporter). + This controller is used to fill the gap in [k8s-config-connector](https://github.com/GoogleCloudPlatform/k8s-config-connector) + for exporting value from Config Connector managed resources into Secrets and + ConfigMaps. ' + digest: 12b119bd3bd2d9afe95c7ec282db8929c026e2e39832b4e452f90c2eae3c89bf + home: https://github.com/deliveryhero/field-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: vzholudev + url: https://github.com/vzholudev + - email: no-reply@deliveryhero.com + name: mtahaahmed + url: https://github.com/mtahaahmed + name: field-exporter + sources: + - https://github.com/deliveryhero/field-exporter + type: application + urls: + - charts/field-exporter-1.0.4.tgz + version: 1.0.4 + - apiVersion: v2 + appVersion: v1.0.2 + created: "2023-11-09T11:14:29Z" + description: 'A chart to install [field-exporter](https://github.com/deliveryhero/field-exporter). + This controller is used to fill the gap in [k8s-config-connector](https://github.com/GoogleCloudPlatform/k8s-config-connector) + for exporting value from Config Connector managed resources into Secrets and + ConfigMaps. ' + digest: 27f327f33c3e0b0853fe28bc50100b86249608af8800456724fc5bd80a4e73ca + home: https://github.com/deliveryhero/field-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: vzholudev + url: https://github.com/vzholudev + - email: no-reply@deliveryhero.com + name: mtahaahmed + url: https://github.com/mtahaahmed + name: field-exporter + sources: + - https://github.com/deliveryhero/field-exporter + type: application + urls: + - charts/field-exporter-1.0.3.tgz + version: 1.0.3 + - apiVersion: v2 + appVersion: v1.0.2 + created: "2023-11-08T09:34:56Z" + description: 'A chart to install [field-exporter](https://github.com/deliveryhero/field-exporter). + This controller is used to fill the gap in [k8s-config-connector](https://github.com/GoogleCloudPlatform/k8s-config-connector) + for exporting value from Config Connector managed resources into Secrets and + ConfigMaps. ' + digest: ab2295a07868652b44f2750f174eee2141085183b87fa8299d1fcb5016d7ffc4 + maintainers: + - email: no-reply@deliveryhero.com + name: vzholudev + url: https://github.com/vzholudev + - email: no-reply@deliveryhero.com + name: mtahaahmed + url: https://github.com/mtahaahmed + name: field-exporter + type: application + urls: + - charts/field-exporter-1.0.2.tgz + version: 1.0.2 + - apiVersion: v2 + appVersion: v1.0.0 + created: "2023-11-06T08:41:39Z" + description: 'A chart to install [field-exporter](https://github.com/deliveryhero/field-exporter). + This controller is used to fill the gap in [k8s-config-connector](https://github.com/GoogleCloudPlatform/k8s-config-connector) + for exporting value from Config Connector managed resources into Secrets and + ConfigMaps. ' + digest: bb58d24e29e705ba3cbbad831f1d2acdafd6f566f5617814fe2be795fd0378ed + maintainers: + - email: no-reply@deliveryhero.com + name: vzholudev + url: https://github.com/vzholudev + - email: no-reply@deliveryhero.com + name: mtahaahmed + url: https://github.com/mtahaahmed + name: field-exporter + type: application + urls: + - charts/field-exporter-1.0.0.tgz + version: 1.0.0 + gripmock: + - apiVersion: v2 + appVersion: 1.10.1 + created: "2024-10-04T09:09:40Z" + description: 'A chart to install [gripmock](https://github.com/tokopedia/gripmock). + A mock server for GRPC services. It uses `.proto` file(s) to generate the implementation + of gRPC service(s) for you. > **Note:** > > The latest version (v1.10 - default) + of gripmock is requiring `go_package` declaration in the `.proto` file. This + is due to the latest update of `protoc` plugin that is used by gripmock is making + the `go_package` declaration mandatory. > > Version v1.11.1-beta release is + available by overriding the `image.tag` in your `values.yaml` file. This version + supports **NO** declaration of `go_package`. ' + digest: 474636cd3a0ebf44c834c8a1a2f2726913d1e14942bd8e9f4c3b8955e532c67e + maintainers: + - email: marcelo.aplanalp@deliveryhero.com + name: MarceloAplanalp + name: gripmock + type: application + urls: + - charts/gripmock-1.1.2.tgz + version: 1.1.2 + - apiVersion: v2 + appVersion: 1.10.1 + created: "2023-09-29T10:00:08.024956337Z" + description: 'A chart to install [gripmock](https://github.com/tokopedia/gripmock). + A mock server for GRPC services. It uses `.proto` file(s) to generate the implementation + of gRPC service(s) for you. > **Note:** > > The latest version (v1.10 - default) + of gripmock is requiring `go_package` declaration in the `.proto` file. This + is due to the latest update of `protoc` plugin that is used by gripmock is making + the `go_package` declaration mandatory. > > Version v1.11.1-beta release is + available by overriding the `image.tag` in your `values.yaml` file. This version + supports **NO** declaration of `go_package`. ' + digest: 3f3bf84c7f7e15113d2b2f8fca980a56ee3da0dc44867c275acd96c600955f97 + maintainers: + - email: marcelo.aplanalp@deliveryhero.com + name: MarceloAplanalp + name: gripmock + type: application + urls: + - charts/gripmock-1.1.1.tgz + version: 1.1.1 + - apiVersion: v2 + appVersion: 1.10.1 + created: "2023-08-09T07:38:28Z" + description: 'A chart to install [gripmock](https://github.com/tokopedia/gripmock). + A mock server for GRPC services. It uses `.proto` file(s) to generate the implementation + of gRPC service(s) for you. > **Note:** > > The latest version (v1.10 - default) + of gripmock is requiring `go_package` declaration in the `.proto` file. This + is due to the latest update of `protoc` plugin that is used by gripmock is making + the `go_package` declaration mandatory. > > Version v1.11.1-beta release is + available by overriding the `image.tag` in your `values.yaml` file. This version + supports **NO** declaration of `go_package`. ' + digest: d1eed49a16a902b5ac2d11dfaca9405b4073b7282c4dbecbfded34c75bfc3cf4 + maintainers: + - email: marcelo.aplanalp@deliveryhero.com + name: MarceloAplanalp + name: gripmock + type: application + urls: + - charts/gripmock-1.1.0.tgz + version: 1.1.0 + - apiVersion: v2 + appVersion: 1.10.1 + created: "2023-08-08T08:55:37Z" + description: 'A chart to install [gripmock](https://github.com/tokopedia/gripmock). + A mock server for GRPC services. It uses `.proto` file(s) to generate the implementation + of gRPC service(s) for you. > **Note:** > > The latest version (v1.10 - default) + of gripmock is requiring `go_package` declaration in the `.proto` file. This + is due to the latest update of `protoc` plugin that is used by gripmock is making + the `go_package` declaration mandatory. > > Version v1.11.1-beta release is + available by overriding the `image.tag` in your `values.yaml` file. This version + supports **NO** declaration of `go_package`. ' + digest: 530499f020c2c0ee27774d8f9feedd381d1337728c7612a5675e6f318dc7de20 + maintainers: + - email: marcelo.aplanalp@deliveryhero.com + name: MarceloAplanalp + name: gripmock + type: application + urls: + - charts/gripmock-1.0.0.tgz + version: 1.0.0 + hoppscotch: + - apiVersion: v1 + appVersion: 2024.8.2 + created: "2024-10-04T09:09:41Z" + description: A free, fast and beautiful API request builder + digest: 74c09f2d16f88baa9a7ad3d47c7da8f169e29e7d0e6461060d0ad6adefaf8b37 + home: https://github.com/hoppscotch/hoppscotch + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + name: hoppscotch + sources: + - https://github.com/hoppscotch/hoppscotch + urls: + - charts/hoppscotch-0.3.0.tgz + version: 0.3.0 + - apiVersion: v1 + appVersion: v1.9.9 + created: "2024-09-17T09:55:48Z" + description: A free, fast and beautiful API request builder + digest: 245036d41352b5471552d70d784a327fa5a676f9594a3e65a97a40db4b47ef58 + home: https://github.com/hoppscotch/hoppscotch + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + name: hoppscotch + sources: + - https://github.com/hoppscotch/hoppscotch + urls: + - charts/hoppscotch-0.2.8.tgz + version: 0.2.8 + - apiVersion: v1 + appVersion: v1.9.7 + created: "2021-02-04T14:56:03Z" + description: A free, fast and beautiful API request builder + digest: afaecbedfdff147ddb5c17e87b3eb5963f49bf97a6c8746ea57a4fe5b06e289d + home: https://github.com/hoppscotch/hoppscotch + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + name: hoppscotch + sources: + - https://github.com/hoppscotch/hoppscotch + urls: + - charts/hoppscotch-0.2.7.tgz + version: 0.2.7 + - apiVersion: v1 + appVersion: v1.9.7 + created: "2020-09-04T07:50:46Z" + description: A free, fast and beautiful API request builder + digest: 43473658f1303f1961fdeb97ca9e1ab634d6ef7debf5024d3f72c6c4af572dfc + home: https://github.com/hoppscotch/hoppscotch + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + name: hoppscotch + sources: + - https://github.com/hoppscotch/hoppscotch + urls: + - charts/hoppscotch-0.2.6.tgz + version: 0.2.6 + k8s-cloudwatch-adapter: + - apiVersion: v1 + appVersion: 0.9.0 + created: "2024-10-04T09:09:41Z" + description: 'An implementation of the Kubernetes Custom Metrics API and External + Metrics API for AWS CloudWatch metrics. This adapter allows you to scale your + Kubernetes deployment using the Horizontal Pod Autoscaler (HPA) with metrics + from AWS CloudWatch. ' + digest: 2d010f1cd223c876109e6d87035fe8909f4d8fdf3405a000ff10a77a0ee5cc3b + home: https://github.com/awslabs/k8s-cloudwatch-adapter + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: k8s-cloudwatch-adapter + sources: + - https://github.com/awslabs/k8s-cloudwatch-adapter + urls: + - charts/k8s-cloudwatch-adapter-0.2.1.tgz + version: 0.2.1 + - apiVersion: v1 + appVersion: 0.9.0 + created: "2023-02-07T09:34:52Z" + description: 'An implementation of the Kubernetes Custom Metrics API and External + Metrics API for AWS CloudWatch metrics. This adapter allows you to scale your + Kubernetes deployment using the Horizontal Pod Autoscaler (HPA) with metrics + from AWS CloudWatch. ' + digest: fe30a62daba6663e063ccc857ac4d5d1cfc84c062758f398cd83fe1553932bc6 + home: https://github.com/awslabs/k8s-cloudwatch-adapter + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: k8s-cloudwatch-adapter + sources: + - https://github.com/awslabs/k8s-cloudwatch-adapter + urls: + - charts/k8s-cloudwatch-adapter-0.2.0.tgz + version: 0.2.0 + - apiVersion: v1 + appVersion: 0.9.0 + created: "2022-12-30T09:05:06Z" + description: 'An implementation of the Kubernetes Custom Metrics API and External + Metrics API for AWS CloudWatch metrics. This adapter allows you to scale your + Kubernetes deployment using the Horizontal Pod Autoscaler (HPA) with metrics + from AWS CloudWatch. ' + digest: a1d6dd4fb1761ab298faeec99406d3ac02c4fc7c55f8680e7590aa2c88b1928b + home: https://github.com/awslabs/k8s-cloudwatch-adapter + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: k8s-cloudwatch-adapter + sources: + - https://github.com/awslabs/k8s-cloudwatch-adapter + urls: + - charts/k8s-cloudwatch-adapter-0.1.4.tgz + version: 0.1.4 + - apiVersion: v1 + appVersion: 0.9.0 + created: "2020-11-13T08:32:02Z" + description: | + An implementation of the Kubernetes Custom Metrics API and External Metrics API for AWS CloudWatch metrics. This adapter allows you to scale your Kubernetes deployment using the Horizontal Pod Autoscaler (HPA) with metrics from AWS CloudWatch. + digest: 47a588bd2ff2127e554a68b9e9c717428be7501a619b3ecb292cd82ccc0409d1 + home: https://github.com/awslabs/k8s-cloudwatch-adapter + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: k8s-cloudwatch-adapter + sources: + - https://github.com/awslabs/k8s-cloudwatch-adapter + urls: + - charts/k8s-cloudwatch-adapter-0.1.3.tgz + version: 0.1.3 + k8s-event-logger: + - apiVersion: v1 + appVersion: "2.1" + created: "2024-10-04T09:09:42Z" + description: 'This chart runs a pod that simply watches Kubernetes Events and + logs them to stdout in JSON to be collected and stored by your logging solution, + e.g. [fluentd](https://github.com/helm/charts/tree/master/stable/fluentd) or + [fluent-bit](https://github.com/helm/charts/tree/master/stable/fluent-bit). https://github.com/max-rocket-internet/k8s-event-logger Events + in Kubernetes log very important information. If are trying to understand what + happened in the past then these events show clearly what your Kubernetes cluster + was thinking and doing. Some examples: - Pod events like failed probes, crashes, + scheduling related information like `TriggeredScaleUp` or `FailedScheduling` + - HorizontalPodAutoscaler events like scaling up and down - Deployment events + like scaling in and out of ReplicaSets - Ingress events like create and update The + problem is that these events are simply API objects in Kubernetes and are only + stored for about 1 hour. Without some way of storing these events, debugging + a problem in the past very tricky. ' + digest: 4552dabbdbd57582a8273360a14498fccb5b79732eeea4af6efd9860dc92e4f6 + home: https://github.com/max-rocket-internet/k8s-event-logger + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - events + - logging + - Auditing + maintainers: + - email: max.williams@deliveryhero.com + name: max-rocket-internet + name: k8s-event-logger + sources: + - https://github.com/max-rocket-internet/k8s-event-logger + urls: + - charts/k8s-event-logger-1.1.8.tgz + version: 1.1.8 + - apiVersion: v1 + appVersion: "2.1" + created: "2024-10-04T07:23:31Z" + description: 'This chart runs a pod that simply watches Kubernetes Events and + logs them to stdout in JSON to be collected and stored by your logging solution, + e.g. [fluentd](https://github.com/helm/charts/tree/master/stable/fluentd) or + [fluent-bit](https://github.com/helm/charts/tree/master/stable/fluent-bit). https://github.com/max-rocket-internet/k8s-event-logger Events + in Kubernetes log very important information. If are trying to understand what + happened in the past then these events show clearly what your Kubernetes cluster + was thinking and doing. Some examples: - Pod events like failed probes, crashes, + scheduling related information like `TriggeredScaleUp` or `FailedScheduling` + - HorizontalPodAutoscaler events like scaling up and down - Deployment events + like scaling in and out of ReplicaSets - Ingress events like create and update The + problem is that these events are simply API objects in Kubernetes and are only + stored for about 1 hour. Without some way of storing these events, debugging + a problem in the past very tricky. ' + digest: ee9d42ea9eae106fe65b2b706876bcf12aa6c3d2c9e94e261b9794c0a6cad9d8 + home: https://github.com/max-rocket-internet/k8s-event-logger + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - events + - logging + - Auditing + maintainers: + - email: max.williams@deliveryhero.com + name: max-rocket-internet + name: k8s-event-logger + sources: + - https://github.com/max-rocket-internet/k8s-event-logger + urls: + - charts/k8s-event-logger-1.1.7.tgz + version: 1.1.7 + - apiVersion: v1 + appVersion: "2.1" + created: "2024-09-20T10:38:38Z" + description: 'This chart runs a pod that simply watches Kubernetes Events and + logs them to stdout in JSON to be collected and stored by your logging solution, + e.g. [fluentd](https://github.com/helm/charts/tree/master/stable/fluentd) or + [fluent-bit](https://github.com/helm/charts/tree/master/stable/fluent-bit). https://github.com/max-rocket-internet/k8s-event-logger Events + in Kubernetes log very important information. If are trying to understand what + happened in the past then these events show clearly what your Kubernetes cluster + was thinking and doing. Some examples: - Pod events like failed probes, crashes, + scheduling related information like `TriggeredScaleUp` or `FailedScheduling` + - HorizontalPodAutoscaler events like scaling up and down - Deployment events + like scaling in and out of ReplicaSets - Ingress events like create and update The + problem is that these events are simply API objects in Kubernetes and are only + stored for about 1 hour. Without some way of storing these events, debugging + a problem in the past very tricky. ' + digest: 95b1eca04ded3bb2ff5edc6143152b53e75836c54c66dc98e1183d9a217f5f0d + home: https://github.com/max-rocket-internet/k8s-event-logger + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - events + - logging + - Auditing + maintainers: + - email: max.williams@deliveryhero.com + name: max-rocket-internet + name: k8s-event-logger + sources: + - https://github.com/max-rocket-internet/k8s-event-logger + urls: + - charts/k8s-event-logger-1.1.6.tgz + version: 1.1.6 + - apiVersion: v1 + appVersion: "2.1" + created: "2023-11-03T15:36:13Z" + description: 'This chart runs a pod that simply watches Kubernetes Events and + logs them to stdout in JSON to be collected and stored by your logging solution, + e.g. [fluentd](https://github.com/helm/charts/tree/master/stable/fluentd) or + [fluent-bit](https://github.com/helm/charts/tree/master/stable/fluent-bit). https://github.com/max-rocket-internet/k8s-event-logger Events + in Kubernetes log very important information. If are trying to understand what + happened in the past then these events show clearly what your Kubernetes cluster + was thinking and doing. Some examples: - Pod events like failed probes, crashes, + scheduling related information like `TriggeredScaleUp` or `FailedScheduling` + - HorizontalPodAutoscaler events like scaling up and down - Deployment events + like scaling in and out of ReplicaSets - Ingress events like create and update The + problem is that these events are simply API objects in Kubernetes and are only + stored for about 1 hour. Without some way of storing these events, debugging + a problem in the past very tricky. ' + digest: b42c10a48d1e8be67cedcc9e893e1805c047f216f6343247ecc56645312c6ad3 + home: https://github.com/max-rocket-internet/k8s-event-logger + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - events + - logging + - Auditing + maintainers: + - email: max.williams@deliveryhero.com + name: max-rocket-internet + name: k8s-event-logger + sources: + - https://github.com/max-rocket-internet/k8s-event-logger + urls: + - charts/k8s-event-logger-1.1.5.tgz + version: 1.1.5 + - apiVersion: v1 + appVersion: "2.0" + created: "2023-07-05T08:46:11Z" + description: 'This chart runs a pod that simply watches Kubernetes Events and + logs them to stdout in JSON to be collected and stored by your logging solution, + e.g. [fluentd](https://github.com/helm/charts/tree/master/stable/fluentd) or + [fluent-bit](https://github.com/helm/charts/tree/master/stable/fluent-bit). https://github.com/max-rocket-internet/k8s-event-logger Events + in Kubernetes log very important information. If are trying to understand what + happened in the past then these events show clearly what your Kubernetes cluster + was thinking and doing. Some examples: - Pod events like failed probes, crashes, + scheduling related information like `TriggeredScaleUp` or `FailedScheduling` + - HorizontalPodAutoscaler events like scaling up and down - Deployment events + like scaling in and out of ReplicaSets - Ingress events like create and update The + problem is that these events are simply API objects in Kubernetes and are only + stored for about 1 hour. Without some way of storing these events, debugging + a problem in the past very tricky. ' + digest: 5ec576985b70506c73b6a2fe4c9eaf7572adbc3bddbabb891c501a4ca6c03b74 + home: https://github.com/max-rocket-internet/k8s-event-logger + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - events + - logging + - Auditing + maintainers: + - email: max.williams@deliveryhero.com + name: max-rocket-internet + name: k8s-event-logger + sources: + - https://github.com/max-rocket-internet/k8s-event-logger + urls: + - charts/k8s-event-logger-1.1.4.tgz + version: 1.1.4 + - apiVersion: v1 + appVersion: "1.8" + created: "2023-06-21T09:42:59.028982371Z" + description: 'This chart runs a pod that simply watches Kubernetes Events and + logs them to stdout in JSON to be collected and stored by your logging solution, + e.g. [fluentd](https://github.com/helm/charts/tree/master/stable/fluentd) or + [fluent-bit](https://github.com/helm/charts/tree/master/stable/fluent-bit). https://github.com/max-rocket-internet/k8s-event-logger Events + in Kubernetes log very important information. If are trying to understand what + happened in the past then these events show clearly what your Kubernetes cluster + was thinking and doing. Some examples: - Pod events like failed probes, crashes, + scheduling related information like `TriggeredScaleUp` or `FailedScheduling` + - HorizontalPodAutoscaler events like scaling up and down - Deployment events + like scaling in and out of ReplicaSets - Ingress events like create and update The + problem is that these events are simply API objects in Kubernetes and are only + stored for about 1 hour. Without some way of storing these events, debugging + a problem in the past very tricky. ' + digest: f6610c47552efe77b92b26f7b7a8969283b6f74056121f78f6664ca97215579c + home: https://github.com/max-rocket-internet/k8s-event-logger + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - events + - logging + - Auditing + maintainers: + - email: max.williams@deliveryhero.com + name: max-rocket-internet + name: k8s-event-logger + sources: + - https://github.com/max-rocket-internet/k8s-event-logger + urls: + - charts/k8s-event-logger-1.1.3.tgz + version: 1.1.3 + - apiVersion: v1 + appVersion: "1.6" + created: "2023-06-13T11:10:57.002946069Z" + description: 'This chart runs a pod that simply watches Kubernetes Events and + logs them to stdout in JSON to be collected and stored by your logging solution, + e.g. [fluentd](https://github.com/helm/charts/tree/master/stable/fluentd) or + [fluent-bit](https://github.com/helm/charts/tree/master/stable/fluent-bit). https://github.com/max-rocket-internet/k8s-event-logger Events + in Kubernetes log very important information. If are trying to understand what + happened in the past then these events show clearly what your Kubernetes cluster + was thinking and doing. Some examples: - Pod events like failed probes, crashes, + scheduling related information like `TriggeredScaleUp` or `FailedScheduling` + - HorizontalPodAutoscaler events like scaling up and down - Deployment events + like scaling in and out of ReplicaSets - Ingress events like create and update The + problem is that these events are simply API objects in Kubernetes and are only + stored for about 1 hour. Without some way of storing these events, debugging + a problem in the past very tricky. ' + digest: ea453a98e8bbbb57359798a6d1ed624381a92bb6c44a2f8e190c951bce17e237 + home: https://github.com/max-rocket-internet/k8s-event-logger + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - events + - logging + - Auditing + maintainers: + - email: max.williams@deliveryhero.com + name: max-rocket-internet + name: k8s-event-logger + sources: + - https://github.com/max-rocket-internet/k8s-event-logger + urls: + - charts/k8s-event-logger-1.1.2.tgz + version: 1.1.2 + - apiVersion: v1 + appVersion: "1.6" + created: "2022-12-05T16:09:04Z" + description: 'This chart runs a pod that simply watches Kubernetes Events and + logs them to stdout in JSON to be collected and stored by your logging solution, + e.g. [fluentd](https://github.com/helm/charts/tree/master/stable/fluentd) or + [fluent-bit](https://github.com/helm/charts/tree/master/stable/fluent-bit). https://github.com/max-rocket-internet/k8s-event-logger Events + in Kubernetes log very important information. If are trying to understand what + happened in the past then these events show clearly what your Kubernetes cluster + was thinking and doing. Some examples: - Pod events like failed probes, crashes, + scheduling related information like `TriggeredScaleUp` or `FailedScheduling` + - HorizontalPodAutoscaler events like scaling up and down - Deployment events + like scaling in and out of ReplicaSets - Ingress events like create and update The + problem is that these events are simply API objects in Kubernetes and are only + stored for about 1 hour. Without some way of storing these events, debugging + a problem in the past very tricky. ' + digest: 880511f923c26a3fe8446df077e879327a45aaec481a96e5925d9bc733b48308 + home: https://github.com/max-rocket-internet/k8s-event-logger + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - events + - logging + - Auditing + maintainers: + - email: max.williams@deliveryhero.com + name: max-rocket-internet + name: k8s-event-logger + sources: + - https://github.com/max-rocket-internet/k8s-event-logger + urls: + - charts/k8s-event-logger-1.1.1.tgz + version: 1.1.1 + - apiVersion: v1 + appVersion: "1.6" + created: "2022-12-05T10:00:20Z" + description: 'This chart runs a pod that simply watches Kubernetes Events and + logs them to stdout in JSON to be collected and stored by your logging solution, + e.g. [fluentd](https://github.com/helm/charts/tree/master/stable/fluentd) or + [fluent-bit](https://github.com/helm/charts/tree/master/stable/fluent-bit). https://github.com/max-rocket-internet/k8s-event-logger Events + in Kubernetes log very important information. If are trying to understand what + happened in the past then these events show clearly what your Kubernetes cluster + was thinking and doing. Some examples: - Pod events like failed probes, crashes, + scheduling related information like `TriggeredScaleUp` or `FailedScheduling` + - HorizontalPodAutoscaler events like scaling up and down - Deployment events + like scaling in and out of ReplicaSets - Ingress events like create and update The + problem is that these events are simply API objects in Kubernetes and are only + stored for about 1 hour. Without some way of storing these events, debugging + a problem in the past very tricky. ' + digest: c1bbf3da0cdfb5ee6f3ec1a4e10906e700d905998513b3f5dab564cef2e8c879 + home: https://github.com/max-rocket-internet/k8s-event-logger + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - events + - logging + - Auditing + maintainers: + - email: max.williams@deliveryhero.com + name: max-rocket-internet + name: k8s-event-logger + sources: + - https://github.com/max-rocket-internet/k8s-event-logger + urls: + - charts/k8s-event-logger-1.1.tgz + version: "1.1" + - apiVersion: v1 + appVersion: "1.5" + created: "2022-06-08T09:45:21Z" + description: 'This chart runs a pod that simply watches Kubernetes Events and + logs them to stdout in JSON to be collected and stored by your logging solution, + e.g. [fluentd](https://github.com/helm/charts/tree/master/stable/fluentd) or + [fluent-bit](https://github.com/helm/charts/tree/master/stable/fluent-bit). https://github.com/max-rocket-internet/k8s-event-logger Events + in Kubernetes log very important information. If are trying to understand what + happened in the past then these events show clearly what your Kubernetes cluster + was thinking and doing. Some examples: - Pod events like failed probes, crashes, + scheduling related information like `TriggeredScaleUp` or `FailedScheduling` + - HorizontalPodAutoscaler events like scaling up and down - Deployment events + like scaling in and out of ReplicaSets - Ingress events like create and update The + problem is that these events are simply API objects in Kubernetes and are only + stored for about 1 hour. Without some way of storing these events, debugging + a problem in the past very tricky. ' + digest: 21f82649915a1f6747a0400877269b6e2de9ee3b650e70eed32c12d6b8f7945a + home: https://github.com/max-rocket-internet/k8s-event-logger + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - events + - logging + - Auditing + maintainers: + - email: max.williams@deliveryhero.com + name: max-rocket-internet + name: k8s-event-logger + sources: + - https://github.com/max-rocket-internet/k8s-event-logger + urls: + - charts/k8s-event-logger-1.0.tgz + version: "1.0" + k8s-resources: + - apiVersion: v1 + appVersion: 0.0.1 + created: "2024-10-04T09:09:43Z" + description: 'Not an application but a Helm chart to create any and many resources + in Kubernetes. Currently supports: - CronJob - ConfigMap - Custom resources + from CustomResourceDefinition - HorizontalPodAutoscaler - Ingress - Namespace + - Secret - Service - ServiceAccount - ScaledObject (KEDA) Every resource type + can have custom labels, annotations or a `fullnameOverride` set. See default + [values.yaml](https://github.com/deliveryhero/helm-charts/blob/master/stable/k8s-resources/values.yaml) + for examples. ' + digest: 5aa44e3a6c3c2d08f963cb3db37f0b2fdffec274c1ca13dd482ebbffbf3bf2c8 + home: https://github.com/deliveryhero/helm-charts + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: k8s-resources + urls: + - charts/k8s-resources-0.6.6.tgz + version: 0.6.6 + - apiVersion: v1 + appVersion: 0.0.1 + created: "2024-03-12T13:35:05Z" + description: 'Not an application but a Helm chart to create any and many resources + in Kubernetes. Currently supports: - CronJob - ConfigMap - Custom resources + from CustomResourceDefinition - HorizontalPodAutoscaler - Ingress - Namespace + - Secret - Service - ServiceAccount - ScaledObject (KEDA) Every resource type + can have custom labels, annotations or a `fullnameOverride` set. See default + [values.yaml](https://github.com/deliveryhero/helm-charts/blob/master/stable/k8s-resources/values.yaml) + for examples. ' + digest: 55ca3fc20df1364b6d282949c13d282441bf1539d581332db220faa79d8202b5 + home: https://github.com/deliveryhero/helm-charts + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: k8s-resources + urls: + - charts/k8s-resources-0.6.5.tgz + version: 0.6.5 + - apiVersion: v1 + appVersion: 0.0.1 + created: "2023-08-29T07:11:12Z" + description: 'Not an application but a Helm chart to create any and many resources + in Kubernetes. Currently supports: - CronJob - ConfigMap - Custom resources + from CustomResourceDefinition - HorizontalPodAutoscaler - Ingress - Namespace + - Secret - Service - ServiceAccount - ScaledObject (KEDA) Every resource type + can have custom labels, annotations or a `fullnameOverride` set. See default + [values.yaml](https://github.com/deliveryhero/helm-charts/blob/master/stable/k8s-resources/values.yaml) + for examples. ' + digest: 17af2dc2cfe36090e92aab998812515d93228e5c11908e02028223719be74ead + home: https://github.com/deliveryhero/helm-charts + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: k8s-resources + urls: + - charts/k8s-resources-0.6.4.tgz + version: 0.6.4 + - apiVersion: v1 + appVersion: 0.0.1 + created: "2023-08-11T13:30:15Z" + description: 'Not an application but a Helm chart to create any and many resources + in Kubernetes. Currently supports: - CronJob - ConfigMap - Custom resources + from CustomResourceDefinition - HorizontalPodAutoscaler - Ingress - Namespace + - Secret - Service - ServiceAccount - ScaledObject (KEDA) Every resource type + can have custom labels, annotations or a `fullnameOverride` set. See default + [values.yaml](https://github.com/deliveryhero/helm-charts/blob/master/stable/k8s-resources/values.yaml) + for examples. ' + digest: b06e68dbd86af03a7f7747f8569f8e13644cc7f5e0fde7fa70439de48ce0816b + home: https://github.com/deliveryhero/helm-charts + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: k8s-resources + urls: + - charts/k8s-resources-0.6.3.tgz + version: 0.6.3 + - apiVersion: v1 + appVersion: 0.0.1 + created: "2023-05-31T12:29:54Z" + description: 'Not an application but a Helm chart to create any and many resources + in Kubernetes. Currently supports: - CronJob - ConfigMap - Custom resources + from CustomResourceDefinition - HorizontalPodAutoscaler - Ingress - Namespace + - Secret - Service - ServiceAccount - ScaledObject (KEDA) Every resource type + can have custom labels, annotations or a `fullnameOverride` set. See default + [values.yaml](https://github.com/deliveryhero/helm-charts/blob/master/stable/k8s-resources/values.yaml) + for examples. ' + digest: 2692437f6c5b5b7644500244507611bee12603051745ee59bda3aebc9bb05607 + home: https://github.com/deliveryhero/helm-charts + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: k8s-resources + urls: + - charts/k8s-resources-0.6.2.tgz + version: 0.6.2 + - apiVersion: v1 + appVersion: 0.0.1 + created: "2023-05-31T09:41:37Z" + description: 'Not an application but a Helm chart to create any and many resources + in Kubernetes. Currently supports: - CronJob - ConfigMap - Custom resources + from CustomResourceDefinition - HorizontalPodAutoscaler - Ingress - Namespace + - Secret - Service - ServiceAccount Every resource type can have custom labels, + annotations or a `fullnameOverride` set. See default [values.yaml](https://github.com/deliveryhero/helm-charts/blob/master/stable/k8s-resources/values.yaml) + for examples. ' + digest: fc0f20db5ccd038ca124df4d3f812d10c3559aea84e1bfbbdc2848943b3bebc5 + home: https://github.com/deliveryhero/helm-charts + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: k8s-resources + urls: + - charts/k8s-resources-0.6.1.tgz + version: 0.6.1 + - apiVersion: v1 + appVersion: 0.0.1 + created: "2022-09-12T20:22:15Z" + description: 'Not an application but a Helm chart to create any and many resources + in Kubernetes. Currently supports: - CronJob - ConfigMap - Custom resources + from CustomResourceDefinition - HorizontalPodAutoscaler - Ingress - Secret - + Service - ServiceAccount Every resource type can have custom labels, annotations + or a `fullnameOverride` set. See default [values.yaml](https://github.com/deliveryhero/helm-charts/blob/master/stable/k8s-resources/values.yaml) + for examples. ' + digest: e7c0a8c40d0c241e6dc97587d7cad3d7eb481c9c149f5fbeaec5ab05254c8fee + home: https://github.com/deliveryhero/helm-charts + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: k8s-resources + urls: + - charts/k8s-resources-0.5.5.tgz + version: 0.5.5 + - apiVersion: v1 + appVersion: 0.0.1 + created: "2022-08-10T15:18:34Z" + description: 'Not an application but a Helm chart to create any and many resources + in Kubernetes. Currently supports: - CronJob - ConfigMap - Custom resources + from CustomResourceDefinition - HorizontalPodAutoscaler - Ingress - Secret - + Service - ServiceAccount Every resource type can have custom labels, annotations + or a `fullnameOverride` set. See default [values.yaml](https://github.com/deliveryhero/helm-charts/blob/master/stable/k8s-resources/values.yaml) + for examples. ' + digest: b62fe882bafa3f326ed7505adc7449340adbc8622ed22b56394a8ef5de17f2bf + home: https://github.com/deliveryhero/helm-charts + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: k8s-resources + urls: + - charts/k8s-resources-0.5.4.tgz + version: 0.5.4 + - apiVersion: v1 + appVersion: 0.0.1 + created: "2022-06-08T07:46:48Z" + description: 'Not an application but a Helm chart to create any and many resources + in Kubernetes. Currently supports: - CronJob - ConfigMap - Custom resources + from CustomResourceDefinition - HorizontalPodAutoscaler - Ingress - Secret - + Service - ServiceAccount Every resource type can have custom labels, annotations + or a `fullnameOverride` set. See default [values.yaml](https://github.com/deliveryhero/helm-charts/blob/master/stable/k8s-resources/values.yaml) + for examples. ' + digest: e430195c6337e04b9a941f7cb6a031310216a7caa1096bafc4c8bcef32d57be7 + home: https://github.com/deliveryhero/helm-charts + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: k8s-resources + urls: + - charts/k8s-resources-0.5.3.tgz + version: 0.5.3 + - apiVersion: v1 + appVersion: 0.0.1 + created: "2022-06-07T15:22:43Z" + description: 'Not an application but a Helm chart to create any and many resources + in Kubernetes. Currently supports: - CronJob - ConfigMap - Custom resources + from CustomResourceDefinition - HorizontalPodAutoscaler - Ingress - Secret - + Service - ServiceAccount Every resource type can have custom labels, annotations + or a `fullnameOverride` set. See default [values.yaml](https://github.com/deliveryhero/helm-charts/blob/master/stable/k8s-resources/values.yaml) + for examples. ' + digest: 5876381f2c54b7ea988f2b7a53632629a76b6e9fca136f9c0f6919dfae6f598f + home: https://github.com/deliveryhero/helm-charts + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: k8s-resources + urls: + - charts/k8s-resources-0.5.2.tgz + version: 0.5.2 + - apiVersion: v1 + appVersion: 0.0.1 + created: "2022-05-30T14:18:17Z" + description: 'Not an application but a Helm chart to create any and many resources + in Kubernetes. Currently supports: - CronJob - ConfigMap - Custom resources + from CustomResourceDefinition - HorizontalPodAutoscaler - Ingress - Secret - + Service - ServiceAccount Every resource type can have custom labels, annotations + or a `fullnameOverride` set. See default [values.yaml](https://github.com/deliveryhero/helm-charts/blob/master/stable/k8s-resources/values.yaml) + for examples. ' + digest: 3ca8d3bbbe93aeb3640ba0964b1f96b0409d59b49f0e9a56b8740e1355d698fa + home: https://github.com/deliveryhero/helm-charts + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: k8s-resources + urls: + - charts/k8s-resources-0.5.1.tgz + version: 0.5.1 + - apiVersion: v1 + appVersion: 0.0.1 + created: "2022-03-29T08:12:50Z" + description: 'Not an application but a Helm chart to create any and many resources + in Kubernetes. Currently supports: - CronJob - ConfigMap - Custom resources + from CustomResourceDefinition - HorizontalPodAutoscaler - Ingress - Secret - + Service - ServiceAccount Every resource type can have custom labels, annotations + or a `fullnameOverride` set. See default [values.yaml](https://github.com/deliveryhero/helm-charts/blob/master/stable/k8s-resources/values.yaml) + for examples. ' + digest: ebae253701929f340ee55dc054f1f170e3e0f921176c8580d606dd9f6dc2d504 + home: https://github.com/deliveryhero/helm-charts + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: k8s-resources + urls: + - charts/k8s-resources-0.5.0.tgz + version: 0.5.0 + - apiVersion: v1 + appVersion: 0.0.1 + created: "2021-11-09T16:50:07Z" + description: 'Not an application but a Helm chart to create any and many resources + in Kubernetes. Currently supports: - CronJob - ConfigMap - Custom resources + from CustomResourceDefinition - HorizontalPodAutoscaler - Ingress - Secret - + Service - ServiceAccount Every resource type can have custom labels, annotations + or a `fullnameOverride` set. See default [values.yaml](https://github.com/deliveryhero/helm-charts/blob/master/stable/k8s-resources/values.yaml) + for examples. ' + digest: 9a527b1a64d0702ee4164e2f9325d043ab50e08f83cb1df043c2cd91b7c3a03c + home: https://github.com/deliveryhero/helm-charts + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: k8s-resources + urls: + - charts/k8s-resources-0.4.0.tgz + version: 0.4.0 + - apiVersion: v1 + appVersion: 0.0.1 + created: "2020-12-01T15:52:40Z" + description: | + Not an application but a Helm chart to create any and many resources in Kubernetes. + + Currently supports: + + - ConfigMap + - Custom resources from CustomResourceDefinition + - HorizontalPodAutoscaler + - Ingress + - Secret + - Service + - ServiceAccount + + Every resource type can have custom labels, annotations or a `fullnameOverride` set. See default [values.yaml](https://github.com/deliveryhero/helm-charts/blob/master/stable/k8s-resources/values.yaml) for examples. + digest: 003eb3d06c83f97d20b84d585e4ffc8aa25c08a50fa19d495163c3f9a7e4e806 + home: https://github.com/deliveryhero/helm-charts + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: k8s-resources + urls: + - charts/k8s-resources-0.3.0.tgz + version: 0.3.0 + - apiVersion: v1 + appVersion: 0.0.1 + created: "2020-09-02T14:14:50Z" + description: | + Not an application but a Helm chart to create any and many resources in Kubernetes. + + Currently supports: + + - ConfigMap + - Custom resources from CustomResourceDefinition + - HorizontalPodAutoscaler + - Ingress + - Secret + - Service + - ServiceAccount + + Every resource type can have custom labels, annotations or a `fullnameOverride` set. See default [values.yaml](https://github.com/deliveryhero/helm-charts/blob/master/stable/k8s-resources/values.yaml) for examples. + digest: 74504decd85cedfcd977050338af9f85a5e8dd2d46c9ae66a5513a304343e16a + home: https://github.com/deliveryhero/helm-charts + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: k8s-resources + urls: + - charts/k8s-resources-0.2.0.tgz + version: 0.2.0 + - apiVersion: v1 + appVersion: 0.0.1 + created: "2020-09-02T09:27:23Z" + description: | + Not an application but a Helm chart to create any and many resources in Kubernetes. + + Currently supports: + + - ConfigMap + - Custom resources from CustomResourceDefinition + - HorizontalPodAutoscaler + - Ingress + - Secret + - Service + - ServiceAccount + + Every resource type can have custom labels, annotations or a `fullnameOverride` set. See default `values.yaml` for examples. + digest: 3a27b9d1b1f45ea1ef18730021641aafc3c5edfbf1cefcb65cc01971d87e6efc + home: https://github.com/deliveryhero/helm-charts + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: k8s-resources + urls: + - charts/k8s-resources-0.1.0.tgz + version: 0.1.0 + killgrave: + - apiVersion: v2 + appVersion: 0.4.1 + created: "2024-10-04T09:09:43Z" + description: 'A chart to install [killgrave](https://github.com/friendsofgo/killgrave), + a simulator for HTTP-based APIs. In its more basic setup, this chart requires + a `configmap` including all _imposters_ (the definition of a request-response + pair) ' + digest: 14b46d61e9995f4e9bb3842a077ec3b73dc5871f8401d21b963113bfa7872f2c + icon: https://avatars.githubusercontent.com/u/36798605?v=4&s=160 + maintainers: + - email: marcelo.aplanalp@deliveryhero.com + name: MarceloAplanalp + name: killgrave + type: application + urls: + - charts/killgrave-1.0.1.tgz + version: 1.0.1 + - apiVersion: v2 + appVersion: 0.4.1 + created: "2022-11-21T10:33:43Z" + description: 'A chart to install [killgrave](https://github.com/friendsofgo/killgrave), + a simulator for HTTP-based APIs. In its more basic setup, this chart requires + a `configmap` including all _imposters_ (the definition of a request-response + pair) ' + digest: 5101c636674da7149c5ec3a62037ee52c164574b7dc4bcb6667a64175c802ad7 + icon: https://avatars.githubusercontent.com/u/36798605?v=4&s=160 + maintainers: + - email: marcelo.aplanalp@deliveryhero.com + name: MarceloAplanalp + name: killgrave + type: application + urls: + - charts/killgrave-1.0.0.tgz + version: 1.0.0 + kube-bench: + - apiVersion: v2 + appVersion: 0.8.0 + created: "2024-10-04T09:09:43Z" + description: Helm chart to deploy run kube-bench as a cronjob on aks, gke or eks. + digest: a76cc6917f6109149cd9ff6da57758ab1126e0ebb02d80324e10e4e5807c9a3b + home: https://github.com/aquasecurity/kube-bench + icon: https://raw.githubusercontent.com/aquasecurity/kube-bench/0d1bd2bbd95608957be024c12d03a0510325e5e2/docs/images/kube-bench.png + maintainers: + - email: no-reply@deliveryhero.com + name: goelankitt + name: kube-bench + sources: + - https://github.com/aquasecurity/kube-bench + urls: + - charts/kube-bench-0.1.16.tgz + version: 0.1.16 + - apiVersion: v2 + appVersion: 0.7.1 + created: "2024-07-01T13:19:34Z" + description: Helm chart to deploy run kube-bench as a cronjob on aks, gke or eks. + digest: 71b431048ad78e5e682290e49dbb3e014b47b388a0c05a1a0d6b553e23fee0c1 + home: https://github.com/aquasecurity/kube-bench + icon: https://raw.githubusercontent.com/aquasecurity/kube-bench/0d1bd2bbd95608957be024c12d03a0510325e5e2/docs/images/kube-bench.png + maintainers: + - email: no-reply@deliveryhero.com + name: goelankitt + name: kube-bench + sources: + - https://github.com/aquasecurity/kube-bench + urls: + - charts/kube-bench-0.1.15.tgz + version: 0.1.15 + - apiVersion: v2 + appVersion: 0.6.19 + created: "2024-01-19T13:18:02Z" + description: Helm chart to deploy run kube-bench as a cronjob on aks, gke or eks. + digest: 133c183e789fc3aecd9e1ee4a550da8386f7ffc22669bba88babc540e92c307d + home: https://github.com/aquasecurity/kube-bench + icon: https://raw.githubusercontent.com/aquasecurity/kube-bench/0d1bd2bbd95608957be024c12d03a0510325e5e2/docs/images/kube-bench.png + maintainers: + - email: no-reply@deliveryhero.com + name: goelankitt + name: kube-bench + sources: + - https://github.com/aquasecurity/kube-bench + urls: + - charts/kube-bench-0.1.14.tgz + version: 0.1.14 + - apiVersion: v2 + appVersion: 0.6.17 + created: "2023-10-25T17:48:35Z" + description: Helm chart to deploy run kube-bench as a cronjob on aks, gke or eks. + digest: e65be49c4e2c6f379349c1383c6bbe68001ae74644448a2820459ccbc49a65d7 + home: https://github.com/aquasecurity/kube-bench + icon: https://raw.githubusercontent.com/aquasecurity/kube-bench/0d1bd2bbd95608957be024c12d03a0510325e5e2/docs/images/kube-bench.png + maintainers: + - email: no-reply@deliveryhero.com + name: goelankitt + name: kube-bench + sources: + - https://github.com/aquasecurity/kube-bench + urls: + - charts/kube-bench-0.1.13.tgz + version: 0.1.13 + - apiVersion: v2 + appVersion: 0.6.16 + created: "2023-08-08T10:02:03Z" + description: Helm chart to deploy run kube-bench as a cronjob on aks, gke or eks. + digest: 5e330b302cb221697bf5671eee1c100f19498212b04737690ccd267df03d0d04 + home: https://github.com/aquasecurity/kube-bench + icon: https://raw.githubusercontent.com/aquasecurity/kube-bench/0d1bd2bbd95608957be024c12d03a0510325e5e2/docs/images/kube-bench.png + maintainers: + - email: no-reply@deliveryhero.com + name: goelankitt + name: kube-bench + sources: + - https://github.com/aquasecurity/kube-bench + urls: + - charts/kube-bench-0.1.12.tgz + version: 0.1.12 + - apiVersion: v2 + appVersion: 0.6.15 + created: "2023-07-20T07:51:44Z" + description: Helm chart to deploy run kube-bench as a cronjob on aks, gke or eks. + digest: df894083f1d44c03566d32ca47c88c7ba258a6b4e5c8c9de0e6e13b82eefef90 + home: https://github.com/aquasecurity/kube-bench + icon: https://raw.githubusercontent.com/aquasecurity/kube-bench/0d1bd2bbd95608957be024c12d03a0510325e5e2/docs/images/kube-bench.png + maintainers: + - email: no-reply@deliveryhero.com + name: goelankitt + name: kube-bench + sources: + - https://github.com/aquasecurity/kube-bench + urls: + - charts/kube-bench-0.1.11.tgz + version: 0.1.11 + - apiVersion: v2 + appVersion: 0.6.14 + created: "2023-06-12T09:49:36Z" + description: Helm chart to deploy run kube-bench as a cronjob on aks, gke or eks. + digest: ee1957848cf4f6ada250ef54cc8966821746d53736ccbd471c6d0747347db1fe + home: https://github.com/aquasecurity/kube-bench + icon: https://raw.githubusercontent.com/aquasecurity/kube-bench/0d1bd2bbd95608957be024c12d03a0510325e5e2/docs/images/kube-bench.png + maintainers: + - email: no-reply@deliveryhero.com + name: goelankitt + name: kube-bench + sources: + - https://github.com/aquasecurity/kube-bench + urls: + - charts/kube-bench-0.1.10.tgz + version: 0.1.10 + - apiVersion: v2 + appVersion: 0.6.14 + created: "2023-05-19T09:20:43Z" + description: Helm chart to deploy run kube-bench as a cronjob on aks, gke or eks. + digest: aeb374a34d3a835013e9dffdc04636c38b9e07ba4ef6b360d0b80a839c31b5ba + home: https://github.com/aquasecurity/kube-bench + icon: https://raw.githubusercontent.com/aquasecurity/kube-bench/0d1bd2bbd95608957be024c12d03a0510325e5e2/docs/images/kube-bench.png + maintainers: + - email: no-reply@deliveryhero.com + name: goelankitt + name: kube-bench + sources: + - https://github.com/aquasecurity/kube-bench + urls: + - charts/kube-bench-0.1.9.tgz + version: 0.1.9 + - apiVersion: v2 + appVersion: 0.6.13 + created: "2023-05-11T12:10:12Z" + description: Helm chart to deploy run kube-bench as a cronjob on aks, gke or eks. + digest: 796ec7aaf1308105287845458121c2722801ba6e8a660a5b1bc5107aecf08d93 + home: https://github.com/aquasecurity/kube-bench + icon: https://raw.githubusercontent.com/aquasecurity/kube-bench/0d1bd2bbd95608957be024c12d03a0510325e5e2/docs/images/kube-bench.png + maintainers: + - email: no-reply@deliveryhero.com + name: goelankitt + name: kube-bench + sources: + - https://github.com/aquasecurity/kube-bench + urls: + - charts/kube-bench-0.1.8.tgz + version: 0.1.8 + - apiVersion: v2 + appVersion: 0.6.12 + created: "2023-04-13T08:24:54Z" + description: Helm chart to deploy run kube-bench as a cronjob on aks, gke or eks. + digest: 8e1ccd07502c1ea8cbdd20d153912f6a44d23c71efe7e99dee9efe38dfb99412 + home: https://github.com/aquasecurity/kube-bench + icon: https://raw.githubusercontent.com/aquasecurity/kube-bench/0d1bd2bbd95608957be024c12d03a0510325e5e2/docs/images/kube-bench.png + maintainers: + - email: no-reply@deliveryhero.com + name: goelankitt + name: kube-bench + sources: + - https://github.com/aquasecurity/kube-bench + urls: + - charts/kube-bench-0.1.7.tgz + version: 0.1.7 + - apiVersion: v1 + created: "2023-03-14T20:15:34Z" + description: Helm chart to deploy run kube-bench as a cronjob on gke or eks. + digest: 521efc1394459e418dfc3f2270f095da0e4baae8ab82b0f2de52257330ca6681 + home: https://github.com/aquasecurity/kube-bench + icon: https://raw.githubusercontent.com/aquasecurity/kube-bench/0d1bd2bbd95608957be024c12d03a0510325e5e2/docs/images/kube-bench.png + maintainers: + - email: no-reply@deliveryhero.com + name: goelankitt + name: kube-bench + sources: + - https://github.com/aquasecurity/kube-bench + urls: + - charts/kube-bench-0.1.6.tgz + version: 0.1.6 + - apiVersion: v1 + created: "2023-03-13T20:22:08Z" + description: Helm chart to deploy run kube-bench as a cronjob on gke or eks. + digest: 1bca5ad4096af4f06191885e202550a72c372906cb5a431b776a7cf979119fda + home: https://github.com/aquasecurity/kube-bench + icon: https://raw.githubusercontent.com/aquasecurity/kube-bench/0d1bd2bbd95608957be024c12d03a0510325e5e2/docs/images/kube-bench.png + maintainers: + - email: no-reply@deliveryhero.com + name: goelankitt + name: kube-bench + sources: + - https://github.com/aquasecurity/kube-bench + urls: + - charts/kube-bench-0.1.5.tgz + version: 0.1.5 + - apiVersion: v1 + created: "2022-08-23T08:00:02Z" + description: Helm chart to deploy run kube-bench as a cronjob on gke or eks. + digest: b6828b93309b7b19a642b9b3a1ffcf83ef410df17668e6d09336de89ae62cd13 + home: https://github.com/aquasecurity/kube-bench + icon: https://raw.githubusercontent.com/aquasecurity/kube-bench/master/images/kube-bench.png + maintainers: + - email: no-reply@deliveryhero.com + name: goelankitt + name: kube-bench + sources: + - https://github.com/aquasecurity/kube-bench + urls: + - charts/kube-bench-0.1.4.tgz + version: 0.1.4 + - apiVersion: v1 + created: "2022-07-05T10:53:33Z" + description: Helm chart to deploy run kube-bench as a cronjob on gke or eks. + digest: f2d1aca00931a4484a5268581ac4b803456464e90ccb5798225e7d24c7d18941 + home: https://github.com/aquasecurity/kube-bench + icon: https://raw.githubusercontent.com/aquasecurity/kube-bench/master/images/kube-bench.png + maintainers: + - email: no-reply@deliveryhero.com + name: goelankitt + name: kube-bench + sources: + - https://github.com/aquasecurity/kube-bench + urls: + - charts/kube-bench-0.1.3.tgz + version: 0.1.3 + - apiVersion: v1 + created: "2022-07-04T11:56:09Z" + description: Helm chart to deploy run kube-bench as a cronjob on gke or eks. + digest: dc0e369fb10ba5222ae33b55521b570ab097d49cffc2d3928ece71dc53c2bfa2 + home: https://github.com/aquasecurity/kube-bench + icon: https://raw.githubusercontent.com/aquasecurity/kube-bench/master/images/kube-bench.png + maintainers: + - email: no-reply@deliveryhero.com + name: goelankitt + name: kube-bench + sources: + - https://github.com/aquasecurity/kube-bench + urls: + - charts/kube-bench-0.1.2.tgz + version: 0.1.2 + - apiVersion: v1 + created: "2021-11-30T13:12:43Z" + description: Helm chart to deploy run kube-bench as a cronjob on gke or eks. + digest: 1322e12d4afecc848ad772cca8f8c05c3de601ab3e81a3951e8243028a20865f + home: https://github.com/aquasecurity/kube-bench + icon: https://raw.githubusercontent.com/aquasecurity/kube-bench/master/images/kube-bench.png + maintainers: + - email: no-reply@deliveryhero.com + name: goelankitt + name: kube-bench + sources: + - https://github.com/aquasecurity/kube-bench + urls: + - charts/kube-bench-0.1.1.tgz + version: 0.1.1 + - apiVersion: v1 + appVersion: 0.1.0 + created: "2020-10-29T12:35:42Z" + description: Helm chart to deploy run kube-bench as a cronjob on gke or eks. + digest: bf66f561034d66ebe0e4fa87e942d11bb9378892f041ad2e77e3448f2fe5a052 + home: https://github.com/aquasecurity/kube-bench + icon: https://raw.githubusercontent.com/aquasecurity/kube-bench/master/images/kube-bench.png + maintainers: + - email: no-reply@deliveryhero.com + name: goelankitt + name: kube-bench + sources: + - https://github.com/aquasecurity/kube-bench + urls: + - charts/kube-bench-0.1.0.tgz + version: 0.1.0 + kube-downscaler: + - apiVersion: v1 + appVersion: 23.2.0-6-gc9b88e8 + created: "2024-10-04T09:09:44Z" + description: Scale down Kubernetes deployments after work hours + digest: 6cf326a51084ae7ad1fbe6e6ae6fdf1c68e0b4c52da55380dbc813ae2be1bff0 + home: https://codeberg.org/hjacobs/kube-downscaler + keywords: + - k8s pods scheduler + - scheduled lifecycle + maintainers: + - name: hjacobs + url: https://codeberg.org/hjacobs + name: kube-downscaler + sources: + - https://github.com/deliveryhero/helm-charts + - https://codeberg.org/hjacobs/kube-downscaler + urls: + - charts/kube-downscaler-0.7.5.tgz + version: 0.7.5 + - apiVersion: v1 + appVersion: 23.2.0-6-gc9b88e8 + created: "2024-08-18T10:44:53Z" + description: Scale down Kubernetes deployments after work hours + digest: 514e1f697b8f0127df77cf3bc4012418359b303b455883c947e48539230193f6 + home: https://codeberg.org/hjacobs/kube-downscaler + keywords: + - k8s pods scheduler + - scheduled lifecycle + maintainers: + - name: hjacobs + url: https://codeberg.org/hjacobs + name: kube-downscaler + sources: + - https://github.com/deliveryhero/helm-charts + - https://codeberg.org/hjacobs/kube-downscaler + urls: + - charts/kube-downscaler-0.7.4.tgz + version: 0.7.4 + - apiVersion: v1 + appVersion: 23.2.0 + created: "2023-11-10T16:13:56Z" + description: Scale down Kubernetes deployments after work hours + digest: 395f453e2aede2aba87fb748b9c10c5d59f0e27983e4d10f4e82abe680d5e234 + home: https://codeberg.org/hjacobs/kube-downscaler + keywords: + - k8s pods scheduler + - scheduled lifecycle + maintainers: + - name: hjacobs + url: https://codeberg.org/hjacobs + name: kube-downscaler + sources: + - https://github.com/deliveryhero/helm-charts + - https://codeberg.org/hjacobs/kube-downscaler + urls: + - charts/kube-downscaler-0.7.3.tgz + version: 0.7.3 + - apiVersion: v1 + appVersion: 23.2.0 + created: "2023-09-14T08:00:07Z" + description: Scale down Kubernetes deployments after work hours + digest: dd28db7cd39f6241e279d6c6f537e19033595d2ca9fec301d1b27ce072ad4530 + home: https://codeberg.org/hjacobs/kube-downscaler + keywords: + - k8s pods scheduler + - scheduled lifecycle + maintainers: + - name: hjacobs + url: https://codeberg.org/hjacobs + name: kube-downscaler + sources: + - https://github.com/deliveryhero/helm-charts + - https://codeberg.org/hjacobs/kube-downscaler + urls: + - charts/kube-downscaler-0.7.2.tgz + version: 0.7.2 + - apiVersion: v1 + appVersion: 22.9.0 + created: "2023-06-13T11:46:09Z" + description: Scale down Kubernetes deployments after work hours + digest: 15dc921cb83be3d0f5b4f29fffca386e70378c719d23f9833eec2b8fc1bea9ce + home: https://codeberg.org/hjacobs/kube-downscaler + keywords: + - k8s pods scheduler + - scheduled lifecycle + maintainers: + - name: hjacobs + url: https://codeberg.org/hjacobs + name: kube-downscaler + sources: + - https://github.com/deliveryhero/helm-charts + - https://codeberg.org/hjacobs/kube-downscaler + urls: + - charts/kube-downscaler-0.7.1.tgz + version: 0.7.1 + - apiVersion: v1 + appVersion: 22.9.0 + created: "2023-02-02T02:18:10Z" + description: Scale down Kubernetes deployments after work hours + digest: b9929c4520338668b4b5c61819988e4ce7fa5f8c4d943b980249d60cbab5797b + home: https://codeberg.org/hjacobs/kube-downscaler + keywords: + - k8s pods scheduler + - scheduled lifecycle + maintainers: + - name: hjacobs + url: https://codeberg.org/hjacobs + name: kube-downscaler + sources: + - https://github.com/deliveryhero/helm-charts + - https://codeberg.org/hjacobs/kube-downscaler + urls: + - charts/kube-downscaler-0.7.0.tgz + version: 0.7.0 + - apiVersion: v1 + appVersion: 22.9.0 + created: "2022-11-28T16:03:39Z" + description: Scale down Kubernetes deployments after work hours + digest: b2381f5649c362f681431312becc36b2e4eddae2344b82ec15ea39a96fbbe4fd + home: https://codeberg.org/hjacobs/kube-downscaler + keywords: + - k8s pods scheduler + - scheduled lifecycle + maintainers: + - name: hjacobs + url: https://codeberg.org/hjacobs + name: kube-downscaler + sources: + - https://github.com/deliveryhero/helm-charts + - https://codeberg.org/hjacobs/kube-downscaler + urls: + - charts/kube-downscaler-0.6.0.tgz + version: 0.6.0 + - apiVersion: v1 + appVersion: 22.7.1 + created: "2022-11-11T08:26:39Z" + description: Scale down Kubernetes deployments after work hours + digest: c64e43e91d4ff6f691c87e43ce4bcee123323d78b4e5089a212ff3bcecea2c40 + home: https://codeberg.org/hjacobs/kube-downscaler + keywords: + - k8s pods scheduler + - scheduled lifecycle + maintainers: + - name: hjacobs + url: https://codeberg.org/hjacobs + name: kube-downscaler + sources: + - https://github.com/deliveryhero/helm-charts + - https://codeberg.org/hjacobs/kube-downscaler + urls: + - charts/kube-downscaler-0.5.9.tgz + version: 0.5.9 + - apiVersion: v1 + appVersion: 22.7.1 + created: "2022-10-19T07:20:15Z" + description: Scale down Kubernetes deployments after work hours + digest: e09b6357e2e35bcd763220d1914f55cfe226e0794d8462b0b1c945d57a2dc24c + home: https://codeberg.org/hjacobs/kube-downscaler + keywords: + - k8s pods scheduler + - scheduled lifecycle + maintainers: + - name: hjacobs + url: https://codeberg.org/hjacobs + name: kube-downscaler + sources: + - https://github.com/deliveryhero/helm-charts + - https://codeberg.org/hjacobs/kube-downscaler + urls: + - charts/kube-downscaler-0.5.8.tgz + version: 0.5.8 + - apiVersion: v1 + appVersion: 22.7.1 + created: "2022-09-23T07:45:20Z" + description: Scale down Kubernetes deployments after work hours + digest: cdf96b956bacfb7acef8db4a6e2a4bf8eb950defcf9f376b005db985113899a0 + home: https://codeberg.org/hjacobs/kube-downscaler + keywords: + - k8s pods scheduler + - scheduled lifecycle + maintainers: + - name: hjacobs + url: https://codeberg.org/hjacobs + name: kube-downscaler + sources: + - https://github.com/deliveryhero/helm-charts + - https://codeberg.org/hjacobs/kube-downscaler + urls: + - charts/kube-downscaler-0.5.7.tgz + version: 0.5.7 + - apiVersion: v1 + appVersion: 22.7.1 + created: "2022-09-16T13:52:31Z" + description: Scale down Kubernetes deployments after work hours + digest: 691dda9c0c280373302725ffb8521cc985f83fb9c6ea24e7424cbcfc01353508 + home: https://codeberg.org/hjacobs/kube-downscaler + keywords: + - k8s pods scheduler + - scheduled lifecycle + maintainers: + - name: hjacobs + url: https://codeberg.org/hjacobs + name: kube-downscaler + sources: + - https://github.com/deliveryhero/helm-charts + - https://codeberg.org/hjacobs/kube-downscaler + urls: + - charts/kube-downscaler-0.5.6.tgz + version: 0.5.6 + - apiVersion: v1 + appVersion: 22.7.1 + created: "2022-08-24T07:06:39Z" + description: Scale down Kubernetes deployments after work hours + digest: 9529ff839beb5c8f50f69a6b96aa7e6ad2d26a16fc4576c0dd0043010ca0e8c2 + home: https://codeberg.org/hjacobs/kube-downscaler + keywords: + - k8s pods scheduler + - scheduled lifecycle + maintainers: + - name: hjacobs + url: https://codeberg.org/hjacobs + name: kube-downscaler + sources: + - https://github.com/deliveryhero/helm-charts + - https://codeberg.org/hjacobs/kube-downscaler + urls: + - charts/kube-downscaler-0.5.5.tgz + version: 0.5.5 + - apiVersion: v1 + appVersion: 22.7.1 + created: "2022-08-10T16:56:54Z" + description: Scale down Kubernetes deployments after work hours + digest: bfd27ff738c8fe947f0f8df88e2d5e25d68c50f9651d8767cee667f7fd9b49a5 + home: https://codeberg.org/hjacobs/kube-downscaler + keywords: + - k8s pods scheduler + - scheduled lifecycle + maintainers: + - name: hjacobs + url: https://codeberg.org/hjacobs + name: kube-downscaler + sources: + - https://github.com/deliveryhero/helm-charts + - https://codeberg.org/hjacobs/kube-downscaler + urls: + - charts/kube-downscaler-0.5.4.tgz + version: 0.5.4 + - apiVersion: v1 + appVersion: 22.2.0 + created: "2022-08-10T09:40:16Z" + description: Scale down Kubernetes deployments after work hours + digest: 02f02c136c97b2c2c94428938a4ac8c495a43b374916c389cf0cdf386e6b10fa + home: https://codeberg.org/hjacobs/kube-downscaler + keywords: + - k8s pods scheduler + - scheduled lifecycle + maintainers: + - name: hjacobs + url: https://codeberg.org/hjacobs + name: kube-downscaler + sources: + - https://github.com/deliveryhero/helm-charts + - https://codeberg.org/hjacobs/kube-downscaler + urls: + - charts/kube-downscaler-0.5.3.tgz + version: 0.5.3 + - apiVersion: v1 + appVersion: 22.2.0 + created: "2022-08-08T18:49:16Z" + description: Scale down Kubernetes deployments after work hours + digest: 689a4da891e2a91da530b2972f1557e1f197db2da3ecdca15aa0f46923fd2e2b + home: https://codeberg.org/hjacobs/kube-downscaler + keywords: + - k8s pods scheduler + - scheduled lifecycle + maintainers: + - name: hjacobs + url: https://codeberg.org/hjacobs + name: kube-downscaler + sources: + - https://github.com/deliveryhero/helm-charts + - https://codeberg.org/hjacobs/kube-downscaler + urls: + - charts/kube-downscaler-0.5.2.tgz + version: 0.5.2 + - apiVersion: v1 + appVersion: 22.2.0 + created: "2022-08-08T12:20:06Z" + description: Scale down Kubernetes deployments after work hours + digest: 392e79045ec4af26116424fd0902f8c1a5d197ba8f6ec0c9041efcf82a36db3c + home: https://codeberg.org/hjacobs/kube-downscaler + keywords: + - k8s pods scheduler + - scheduled lifecycle + maintainers: + - name: hjacobs + url: https://codeberg.org/hjacobs + name: kube-downscaler + sources: + - https://github.com/deliveryhero/helm-charts + - https://codeberg.org/hjacobs/kube-downscaler + urls: + - charts/kube-downscaler-0.5.1.tgz + version: 0.5.1 + - apiVersion: v1 + appVersion: 22.2.0 + created: "2022-07-14T09:05:03Z" + description: Scale down Kubernetes deployments after work hours + digest: 0bc21635487d0a8c2c5b5a2e5a192b31dcef9cd09d57602b852cbddd6dfaa3bd + home: https://codeberg.org/hjacobs/kube-downscaler + keywords: + - k8s pods scheduler + - scheduled lifecycle + maintainers: + - name: hjacobs + url: https://codeberg.org/hjacobs + name: kube-downscaler + sources: + - https://github.com/deliveryhero/helm-charts + - https://codeberg.org/hjacobs/kube-downscaler + urls: + - charts/kube-downscaler-0.5.tgz + version: "0.5" + - apiVersion: v1 + appVersion: 21.2.0 + created: "2022-03-17T08:21:34Z" + description: Scale down Kubernetes deployments after work hours + digest: c78a786a2b1d07f900ba73277edc686dddfaa35291a8f04d13d28f6c4882107f + home: https://codeberg.org/hjacobs/kube-downscaler + keywords: + - k8s pods scheduler + - scheduled lifecycle + maintainers: + - name: hjacobs + url: https://codeberg.org/hjacobs + name: kube-downscaler + sources: + - https://github.com/deliveryhero/helm-charts + - https://codeberg.org/hjacobs/kube-downscaler + urls: + - charts/kube-downscaler-0.4.tgz + version: "0.4" + - apiVersion: v1 + appVersion: 21.2.0 + created: "2021-07-12T16:34:54Z" + description: Scale down Kubernetes deployments after work hours + digest: 9e59de6b6e3ea450a9dc3ec5dba196e54ecda79b617a0fda18fb5966e1cc7fe7 + home: https://codeberg.org/hjacobs/kube-downscaler + keywords: + - k8s pods scheduler + - scheduled lifecycle + maintainers: + - name: hjacobs + url: https://codeberg.org/hjacobs + name: kube-downscaler + sources: + - https://github.com/deliveryhero/helm-charts + - https://codeberg.org/hjacobs/kube-downscaler + urls: + - charts/kube-downscaler-0.3.tgz + version: "0.3" + - apiVersion: v1 + appVersion: 0.5.1 + created: "2021-03-03T07:16:31Z" + description: Scale down Kubernetes deployments after work hours + digest: 9becd8af5a74139c5d5a3ce5a843a969cea80d91a248269964e1760c2851e7f1 + home: https://github.com/hjacobs/kube-downscaler + keywords: + - k8s pods scheduler + - scheduled lifecycle + maintainers: + - name: hjacobs + url: https://github.com/hjacobs + name: kube-downscaler + sources: + - https://github.com/hjacobs/kube-downscaler + urls: + - charts/kube-downscaler-0.2.tgz + version: "0.2" + - apiVersion: v1 + appVersion: 0.5.1 + created: "2020-09-16T14:21:25Z" + description: Scale down Kubernetes deployments after work hours + digest: 22a77802470ca74cd9dd126ba7e822129df6ded3f95b44cc91e177200c27bfd1 + home: https://github.com/hjacobs/kube-downscaler + keywords: + - k8s pods scheduler + - scheduled lifecycle + maintainers: + - name: hjacobs + url: https://github.com/hjacobs + name: kube-downscaler + sources: + - https://github.com/hjacobs/kube-downscaler + urls: + - charts/kube-downscaler-0.1.1.tgz + version: 0.1.1 + kubecost-reports-exporter: + - apiVersion: v2 + appVersion: 2.0.0 + created: "2024-10-04T09:09:44Z" + description: 'Helm chart for exporting kubernetes cost reports to AWS s3 bucket. + N/B We have updated chart to use V2 scripts using allocations and assets api. + if you are using old installation please use v1 chart ' + digest: f1829b6f98ea056613bfada21adf80f4fd54a827b0267008288b2f39766e03b5 + home: https://www.kubecost.com + keywords: + - kubecost + - exporter + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: kubecost-reports-exporter + sources: + - https://www.kubecost.com + type: application + urls: + - charts/kubecost-reports-exporter-2.0.4.tgz + version: 2.0.4 + - apiVersion: v2 + appVersion: 2.0.0 + created: "2024-02-20T10:03:44Z" + description: 'Helm chart for exporting kubernetes cost reports to AWS s3 bucket. + N/B We have updated chart to use V2 scripts using allocations and assets api. + if you are using old installation please use v1 chart ' + digest: e3cfc8304e30ba7b572a6ca1c1cccae63045232ad74d8a418b2015f9a057c757 + home: https://www.kubecost.com + keywords: + - kubecost + - exporter + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: kubecost-reports-exporter + sources: + - https://www.kubecost.com + type: application + urls: + - charts/kubecost-reports-exporter-2.0.3.tgz + version: 2.0.3 + - apiVersion: v2 + appVersion: 2.0.0 + created: "2023-11-13T12:07:57Z" + description: 'Helm chart for exporting kubernetes cost reports to AWS s3 bucket. + N/B We have updated chart to use V2 scripts using allocations and assets api. + if you are using old installation please use v1 chart ' + digest: aacb93d62515375cd388b45c8b7ca997f949da3a64907865bf28b9251322c13e + home: https://www.kubecost.com + keywords: + - kubecost + - exporter + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: kubecost-reports-exporter + sources: + - https://www.kubecost.com + type: application + urls: + - charts/kubecost-reports-exporter-2.0.2.tgz + version: 2.0.2 + - apiVersion: v2 + appVersion: 2.0.0 + created: "2023-07-26T11:24:36Z" + description: 'Helm chart for exporting kubernetes cost reports to AWS s3 bucket. + N/B We have updated chart to use V2 scripts using allocations and assets api. + if you are using old installation please use v1 chart ' + digest: 0122c9b769d8baf3238b9ab7ace61def66c24dff9c5c1d06f5303db7c5f27615 + home: https://www.kubecost.com + keywords: + - kubecost + - exporter + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: kubecost-reports-exporter + sources: + - https://www.kubecost.com + type: application + urls: + - charts/kubecost-reports-exporter-2.0.1.tgz + version: 2.0.1 + - apiVersion: v2 + appVersion: 2.0.0 + created: "2023-06-21T07:37:21.007938128Z" + description: 'Helm chart for exporting kubernetes cost reports to AWS s3 bucket. + N/B We have updated chart to use V2 scripts using allocations and assets api. + if you are using old installation please use v1 chart ' + digest: ec052315d04578d7414e7cc7a87641dc322cb9c97588a5681c2be1c820628b34 + home: https://www.kubecost.com + keywords: + - kubecost + - exporter + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: kubecost-reports-exporter + sources: + - https://www.kubecost.com + type: application + urls: + - charts/kubecost-reports-exporter-2.0.0.tgz + version: 2.0.0 + - apiVersion: v2 + appVersion: 1.0.5 + created: "2023-06-16T16:30:07Z" + description: Helm chart for exporting kubernetes cost reports to S3 + digest: 7453ce4db203b2f002cb0c11742012ed036e50c1858d73f06de4f9bae0dc7d29 + home: https://www.kubecost.com + keywords: + - kubecost + - exporter + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: kubecost-reports-exporter + sources: + - https://www.kubecost.com + type: application + urls: + - charts/kubecost-reports-exporter-1.1.0.tgz + version: 1.1.0 + - apiVersion: v2 + appVersion: 1.0.4 + created: "2022-04-05T07:37:55Z" + description: Helm chart for exporting kubernetes cost reports to S3 + digest: c16e9e9afffd449ef492b3733e4143006020594a8c9642828bab4bb796abbdb3 + home: https://www.kubecost.com + keywords: + - kubecost + - exporter + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: kubecost-reports-exporter + sources: + - https://www.kubecost.com + type: application + urls: + - charts/kubecost-reports-exporter-1.0.9.tgz + version: 1.0.9 + - apiVersion: v2 + appVersion: 1.0.0 + created: "2021-09-15T07:53:54Z" + description: Helm chart for exporting kubernetes cost reports to S3 + digest: a7899454558bf30b62793d3cbd4d875f369359436570b4b5db40ef702219b54f + home: https://www.kubecost.com + keywords: + - kubecost + - exporter + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: kubecost-reports-exporter + sources: + - https://www.kubecost.com + type: application + urls: + - charts/kubecost-reports-exporter-1.0.8.tgz + version: 1.0.8 + - apiVersion: v2 + appVersion: 1.0.0 + created: "2021-09-07T07:46:15Z" + description: Helm chart for exporting kubernetes cost reports to S3 + digest: d8d1ecbbadf7dc72784a9ca82f5b3f0ec50ba2d0db268095ff1c7b63e3c59659 + home: https://www.kubecost.com + keywords: + - kubecost + - exporter + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: kubecost-reports-exporter + sources: + - https://www.kubecost.com + type: application + urls: + - charts/kubecost-reports-exporter-1.0.7.tgz + version: 1.0.7 + - apiVersion: v2 + appVersion: 1.0.0 + created: "2021-06-17T15:14:00Z" + description: Helm chart for exporting kubernetes cost reports to S3 + digest: c851e5fa0db0372e360547c9437e1005e1a90267eba9d3d9d2392bb2efd02be4 + home: https://www.kubecost.com + keywords: + - kubecost + - exporter + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: kubecost-reports-exporter + sources: + - https://www.kubecost.com + type: application + urls: + - charts/kubecost-reports-exporter-1.0.6.tgz + version: 1.0.6 + - apiVersion: v2 + appVersion: 1.0.0 + created: "2021-05-19T07:55:28Z" + description: Helm chart for exporting kubernetes cost reports to S3 + digest: 66bcee4ef4215518d4de39bc33bb189c4eb01847adb6ddfb49d19c1847f3c79f + home: https://www.kubecost.com + keywords: + - kubecost + - exporter + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: kubecost-reports-exporter + sources: + - https://www.kubecost.com + type: application + urls: + - charts/kubecost-reports-exporter-1.0.5.tgz + version: 1.0.5 + - apiVersion: v2 + appVersion: 1.0.0 + created: "2021-04-30T09:30:31Z" + description: Helm chart for exporting kubernetes cost reports to S3 + digest: 0b142d224d53b34598485083983441913f6bd46abff35784dc748078c96c5233 + home: https://www.kubecost.com + keywords: + - kubecost + - exporter + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: kubecost-reports-exporter + sources: + - https://www.kubecost.com + type: application + urls: + - charts/kubecost-reports-exporter-1.0.4.tgz + version: 1.0.4 + - apiVersion: v2 + appVersion: 1.0.0 + created: "2021-04-12T19:01:15Z" + description: Helm chart for exporting kubernetes cost reports to S3 + digest: 5fc0303c72ca44ec67285be19854622e96055ac6ebd99a283127e686272afa19 + home: https://www.kubecost.com + keywords: + - kubecost + - exporter + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: kubecost-reports-exporter + sources: + - https://www.kubecost.com + type: application + urls: + - charts/kubecost-reports-exporter-1.0.3.tgz + version: 1.0.3 + - apiVersion: v2 + appVersion: 1.0.0 + created: "2021-04-11T18:54:38Z" + description: Helm chart for exporting kubernetes cost reports to S3 + digest: 92feb5c8437e3f256f1b8b2c304351a89730f3ef1efd323f105c582418a925fb + home: https://www.kubecost.com + keywords: + - kubecost + - exporter + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: kubecost-reports-exporter + sources: + - https://www.kubecost.com + type: application + urls: + - charts/kubecost-reports-exporter-1.0.2.tgz + version: 1.0.2 + - apiVersion: v2 + appVersion: 1.0.0 + created: "2021-04-11T16:55:57Z" + description: Helm chart for exporting kubernetes cost reports to S3 + digest: 977905ecf569b994bab40f2706642ae27a8a2fd1bc69de686339eefc6e9a133f + home: https://www.kubecost.com + keywords: + - kubecost + - exporter + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: kubecost-reports-exporter + sources: + - https://www.kubecost.com + type: application + urls: + - charts/kubecost-reports-exporter-1.0.1.tgz + version: 1.0.1 + - apiVersion: v2 + appVersion: 1.0.0 + created: "2021-04-10T02:22:35Z" + description: Helm chart for exporting kubernetes cost reports to S3 + digest: 2e803413bec13ad0c095e649bdeb1dabdfa098c42562b1fbdd0b655e75cf8d9c + home: https://www.kubecost.com + keywords: + - kubecost + - exporter + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: kubecost-reports-exporter + sources: + - https://www.kubecost.com + type: application + urls: + - charts/kubecost-reports-exporter-1.0.0.tgz + version: 1.0.0 + labelsmanager-controller: + - apiVersion: v2 + appVersion: 1.0.0 + created: "2024-10-04T09:09:44Z" + description: 'This is a simple [Kubernetes Controller](https://kubernetes.io/docs/concepts/architecture/controller/) + that injects required default labels into pods on creation. Here''s an example + `Labels` resource: ```yaml apiVersion: labels.deliveryhero.com/v1beta1 kind: + Labels metadata: name: labels-sample spec: priority: 1 # labels with highest + priority takes precendence labels: country: DE env: stg region: + eu ``` ' + digest: 59558d0c919edec99d68f3566ac557b988f41c375b60a405cc4130fd2b4ef364 + home: https://github.com/deliveryhero/labelsmanager-controller + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - monitoring + - labels + - controller + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: labelsmanager-controller + sources: + - https://github.com/deliveryhero/labelsmanager-controller + type: application + urls: + - charts/labelsmanager-controller-1.0.3.tgz + version: 1.0.3 + - apiVersion: v2 + appVersion: 1.0.0 + created: "2022-01-04T16:19:28Z" + description: 'This is a simple [Kubernetes Controller](https://kubernetes.io/docs/concepts/architecture/controller/) + that injects required default labels into pods on creation. Here''s an example + `Labels` resource: ```yaml apiVersion: labels.deliveryhero.com/v1beta1 kind: + Labels metadata: name: labels-sample spec: priority: 1 # labels with highest + priority takes precendence labels: country: DE env: stg region: + eu ``` ' + digest: cdaaacdb9cc054d03c72088c20d568ceff9efeed664bdf5ff367745330026f6b + home: https://github.com/deliveryhero/labelsmanager-controller + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - monitoring + - labels + - controller + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: labelsmanager-controller + sources: + - https://github.com/deliveryhero/labelsmanager-controller + type: application + urls: + - charts/labelsmanager-controller-1.0.2.tgz + version: 1.0.2 + - apiVersion: v2 + appVersion: 1.0.0 + created: "2020-12-18T12:31:40Z" + description: | + This is a simple [Kubernetes Controller](https://kubernetes.io/docs/concepts/architecture/controller/) that injects required default labels into pods on creation. + + Here's an example `Labels` resource: + + ```yaml + apiVersion: labels.deliveryhero.com/v1beta1 + kind: Labels + metadata: + name: labels-sample + spec: + priority: 1 # labels with highest priority takes precendence + labels: + country: DE + env: stg + region: eu + + ``` + digest: e2008a62169a4c001ab9823fa0009bd396e65a65f642fed872195973587c2f60 + home: https://github.com/deliveryhero/labelsmanager-controller + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - monitoring + - labels + - controller + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: labelsmanager-controller + sources: + - https://github.com/deliveryhero/labelsmanager-controller + type: application + urls: + - charts/labelsmanager-controller-1.0.1.tgz + version: 1.0.1 + - apiVersion: v2 + appVersion: 1.0.0 + created: "2020-12-02T09:58:22Z" + description: | + This is a simple [Kubernetes Controller](https://kubernetes.io/docs/concepts/architecture/controller/) that injects required default labels into pods on creation. + + Here's an example `Labels` resource: + + ```yaml + apiVersion: labels.deliveryhero.com/v1beta1 + kind: Labels + metadata: + name: labels-sample + spec: + priority: 1 # labels with highest priority takes precendence + labels: + country: DE + env: stg + region: eu + + ``` + digest: a8d282166464e1c2a177e32735c7a05791c6cae67e5adffcf5c95eabb3daad10 + home: https://github.com/deliveryhero/labelsmanager-controller + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - monitoring + - labels + - controller + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: labelsmanager-controller + sources: + - https://github.com/deliveryhero/labelsmanager-controller + type: application + urls: + - charts/labelsmanager-controller-1.0.0.tgz + version: 1.0.0 + listmonk: + - apiVersion: v2 + appVersion: v2.1.0 + created: "2024-10-04T09:09:52.012366718Z" + dependencies: + - condition: postgresql.enable + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 11.6.26 + description: A Helm chart for listmonk application + digest: 0afe01d1d616d1a7c1b27bf139040e61dae9b24d581734af055cb9c32b4eb7a8 + home: https://github.com/knadh/listmonk + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + name: listmonk + sources: + - https://github.com/knadh/listmonk + type: application + urls: + - charts/listmonk-0.1.10.tgz + version: 0.1.10 + - apiVersion: v2 + appVersion: v2.1.0 + created: "2024-07-18T12:29:32Z" + dependencies: + - condition: postgresql.enable + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 11.6.26 + description: A Helm chart for listmonk application + digest: cc0acf2a47af281679596387f1d95fc0728d7b166649b1700f703576e1a1bd84 + home: https://github.com/knadh/listmonk + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + name: listmonk + sources: + - https://github.com/knadh/listmonk + type: application + urls: + - charts/listmonk-0.1.9.tgz + version: 0.1.9 + - apiVersion: v2 + appVersion: v2.1.0 + created: "2022-05-30T14:20:44Z" + dependencies: + - condition: postgresql.enable + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 9.1.4 + description: A Helm chart for listmonk application + digest: 84bbc2ebd626a9ae7ef7a3b88976412a7b5e4efbaf95947f4b935af1bbfbc749 + home: https://github.com/knadh/listmonk + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + name: listmonk + sources: + - https://github.com/knadh/listmonk + type: application + urls: + - charts/listmonk-0.1.8.tgz + version: 0.1.8 + - apiVersion: v2 + appVersion: v2.1.0 + created: "2022-04-26T10:42:10Z" + dependencies: + - condition: postgresql.enable + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 9.1.4 + description: A Helm chart for listmonk application + digest: 6ffe46b0e7978eaaabe715417c3e15ca441663ff469498fcf73a859744c02df0 + home: https://github.com/knadh/listmonk + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + name: listmonk + sources: + - https://github.com/knadh/listmonk + type: application + urls: + - charts/listmonk-0.1.7.tgz + version: 0.1.7 + - apiVersion: v2 + appVersion: v2.1.0 + created: "2022-04-26T10:18:43Z" + dependencies: + - condition: postgresql.enable + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 9.1.4 + description: A Helm chart for listmonk application + digest: 54bdcd54acea25863b48235f3fd804a025b753b8030411f1576b68b9ace661f8 + home: https://github.com/knadh/listmonk + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + name: listmonk + sources: + - https://github.com/knadh/listmonk + type: application + urls: + - charts/listmonk-0.1.6.tgz + version: 0.1.6 + - apiVersion: v2 + appVersion: v2.1.0 + created: "2022-04-26T09:32:59Z" + dependencies: + - condition: postgresql.enable + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 9.1.4 + description: A Helm chart for listmonk application + digest: 7c3ab4ea30d78267ad911126c2ca608b88ada183aa17b445803fc9aa478d203a + home: https://github.com/knadh/listmonk + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + name: listmonk + sources: + - https://github.com/knadh/listmonk + type: application + urls: + - charts/listmonk-0.1.5.tgz + version: 0.1.5 + - apiVersion: v2 + appVersion: v2.0.0 + created: "2022-04-20T07:53:25Z" + dependencies: + - condition: postgresql.enable + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 9.1.4 + description: A Helm chart for listmonk application + digest: be1552346e8db2884d6467753d38a05279ad82e6c816769164e0c8fd1147f5d7 + home: https://github.com/knadh/listmonk + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + name: listmonk + sources: + - https://github.com/knadh/listmonk + type: application + urls: + - charts/listmonk-0.1.4.tgz + version: 0.1.4 + - apiVersion: v2 + appVersion: v2.0.0 + created: "2021-10-21T10:58:54Z" + dependencies: + - condition: postgresql.enable + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 9.1.4 + description: A Helm chart for listmonk application + digest: 3fcd2b453f8049e5058247d3ec6471cfbec5103ac5c1be965b0cb4f9ae070d4f + home: https://github.com/knadh/listmonk + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + name: listmonk + sources: + - https://github.com/knadh/listmonk + type: application + urls: + - charts/listmonk-0.1.3.tgz + version: 0.1.3 + - apiVersion: v2 + appVersion: v2.0.0 + created: "2021-10-21T10:40:56Z" + dependencies: + - condition: postgresql.enable + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 9.1.4 + description: A Helm chart for listmonk application + digest: 7fab6f05c3d6d2cf7b65a19b1b46724c7fbd89fceff761a0154b1ede500fe895 + home: https://github.com/knadh/listmonk + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + name: listmonk + sources: + - https://github.com/knadh/listmonk + type: application + urls: + - charts/listmonk-0.1.2.tgz + version: 0.1.2 + - apiVersion: v2 + appVersion: v1.1.0 + created: "2021-10-18T10:51:22Z" + dependencies: + - condition: postgresql.enable + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 9.1.4 + description: A Helm chart for listmonk application + digest: e6458cf194f38b129ba05b582a68e77fbb99277f1c1c57a64125efe7aa51e5d3 + home: https://github.com/knadh/listmonk + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + name: listmonk + sources: + - https://github.com/knadh/listmonk + type: application + urls: + - charts/listmonk-0.1.1.tgz + version: 0.1.1 + - apiVersion: v2 + appVersion: v1.1.0 + created: "2021-09-13T16:10:16Z" + dependencies: + - condition: postgresql.enable + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 9.1.4 + description: A Helm chart for listmonk application + digest: ad02552d4cb70c8b86976166d7544d211aa10706044afcec22ce18dba7210816 + home: https://github.com/knadh/listmonk + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + name: listmonk + sources: + - https://github.com/knadh/listmonk + type: application + urls: + - charts/listmonk-0.1.0.tgz + version: 0.1.0 + locust: + - apiVersion: v1 + appVersion: 2.15.1 + created: "2024-10-04T09:09:53Z" + description: 'A chart to install Locust, a scalable load testing tool written + in Python. This chart will setup everything required to run a full distributed + locust environment with any amount of workers. This chart will also create + configmaps for storing the locust files in Kubernetes, this way there is no + need to build custom docker images. By default it will install using an example + locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). + When you want to provide your own locustfile, you will need to create 2 configmaps + using the structure from that example: ```console kubectl create configmap + my-loadtest-locustfile --from-file path/to/your/main.py kubectl create configmap + my-loadtest-lib --from-file path/to/your/lib/ ``` And then install the chart + passing the names of those configmaps as values: ```console helm install locust + deliveryhero/locust \ --set loadtest.name=my-loadtest \ --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile + \ --set loadtest.locust_lib_configmap=my-loadtest-lib ``` ' + digest: 53c9a3dddfbec33c91212b88008a54575fb4030b6e6c79c8952c8630eb5cf820 + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.31.6.tgz + version: 0.31.6 + - apiVersion: v1 + appVersion: 2.15.1 + created: "2024-04-08T14:26:39Z" + description: 'A chart to install Locust, a scalable load testing tool written + in Python. This chart will setup everything required to run a full distributed + locust environment with any amount of workers. This chart will also create + configmaps for storing the locust files in Kubernetes, this way there is no + need to build custom docker images. By default it will install using an example + locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). + When you want to provide your own locustfile, you will need to create 2 configmaps + using the structure from that example: ```console kubectl create configmap + my-loadtest-locustfile --from-file path/to/your/main.py kubectl create configmap + my-loadtest-lib --from-file path/to/your/lib/ ``` And then install the chart + passing the names of those configmaps as values: ```console helm install locust + deliveryhero/locust \ --set loadtest.name=my-loadtest \ --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile + \ --set loadtest.locust_lib_configmap=my-loadtest-lib ``` ' + digest: 9a9cf9a23be5d92b5d872915cf1df41af39e4703e84b7d6070ab7605862936de + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.31.5.tgz + version: 0.31.5 + - apiVersion: v1 + appVersion: 2.15.1 + created: "2023-07-05T07:25:15Z" + description: 'A chart to install Locust, a scalable load testing tool written + in Python. This chart will setup everything required to run a full distributed + locust environment with any amount of workers. This chart will also create + configmaps for storing the locust files in Kubernetes, this way there is no + need to build custom docker images. By default it will install using an example + locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). + When you want to provide your own locustfile, you will need to create 2 configmaps + using the structure from that example: ```console kubectl create configmap + my-loadtest-locustfile --from-file path/to/your/main.py kubectl create configmap + my-loadtest-lib --from-file path/to/your/lib/ ``` And then install the chart + passing the names of those configmaps as values: ```console helm install locust + deliveryhero/locust \ --set loadtest.name=my-loadtest \ --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile + \ --set loadtest.locust_lib_configmap=my-loadtest-lib ``` ' + digest: 054bb8d1867c059d0493b71b2cf572127f4396fc31165abda59492fe2a83d8ad + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.31.4.tgz + version: 0.31.4 + - apiVersion: v1 + appVersion: 2.15.1 + created: "2023-05-04T21:51:50Z" + description: 'A chart to install Locust, a scalable load testing tool written + in Python. This chart will setup everything required to run a full distributed + locust environment with any amount of workers. This chart will also create + configmaps for storing the locust files in Kubernetes, this way there is no + need to build custom docker images. By default it will install using an example + locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). + When you want to provide your own locustfile, you will need to create 2 configmaps + using the structure from that example: ```console kubectl create configmap + my-loadtest-locustfile --from-file path/to/your/main.py kubectl create configmap + my-loadtest-lib --from-file path/to/your/lib/ ``` And then install the chart + passing the names of those configmaps as values: ```console helm install locust + deliveryhero/locust \ --set loadtest.name=my-loadtest \ --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile + \ --set loadtest.locust_lib_configmap=my-loadtest-lib ``` ' + digest: fdc854b46dbc39bfb329add19ff5183aa2470e235559538dff1d6af6e8deed0d + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.31.3.tgz + version: 0.31.3 + - apiVersion: v1 + appVersion: 2.15.1 + created: "2023-03-20T04:15:48Z" + description: 'A chart to install Locust, a scalable load testing tool written + in Python. This chart will setup everything required to run a full distributed + locust environment with any amount of workers. This chart will also create + configmaps for storing the locust files in Kubernetes, this way there is no + need to build custom docker images. By default it will install using an example + locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). + When you want to provide your own locustfile, you will need to create 2 configmaps + using the structure from that example: ```console kubectl create configmap + my-loadtest-locustfile --from-file path/to/your/main.py kubectl create configmap + my-loadtest-lib --from-file path/to/your/lib/ ``` And then install the chart + passing the names of those configmaps as values: ```console helm install locust + deliveryhero/locust \ --set loadtest.name=my-loadtest \ --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile + \ --set loadtest.locust_lib_configmap=my-loadtest-lib ``` ' + digest: e6b3f7342509fe8865cfc06fbb7bf11e071a2c66f0ddf3b7cbcbc205c74ea73c + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.31.2.tgz + version: 0.31.2 + - apiVersion: v1 + appVersion: 2.13.1 + created: "2023-03-16T08:27:37Z" + description: 'A chart to install Locust, a scalable load testing tool written + in Python. This chart will setup everything required to run a full distributed + locust environment with any amount of workers. This chart will also create + configmaps for storing the locust files in Kubernetes, this way there is no + need to build custom docker images. By default it will install using an example + locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). + When you want to provide your own locustfile, you will need to create 2 configmaps + using the structure from that example: ```console kubectl create configmap + my-loadtest-locustfile --from-file path/to/your/main.py kubectl create configmap + my-loadtest-lib --from-file path/to/your/lib/ ``` And then install the chart + passing the names of those configmaps as values: ```console helm install locust + deliveryhero/locust \ --set loadtest.name=my-loadtest \ --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile + \ --set loadtest.locust_lib_configmap=my-loadtest-lib ``` ' + digest: daae89120ec0fed91c6d22b67b3e31f6d22022d1b85d50151c7ddd371b0abd81 + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.31.1.tgz + version: 0.31.1 + - apiVersion: v1 + appVersion: 2.13.1 + created: "2022-12-20T08:43:51Z" + description: 'A chart to install Locust, a scalable load testing tool written + in Python. This chart will setup everything required to run a full distributed + locust environment with any amount of workers. This chart will also create + configmaps for storing the locust files in Kubernetes, this way there is no + need to build custom docker images. By default it will install using an example + locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). + When you want to provide your own locustfile, you will need to create 2 configmaps + using the structure from that example: ```console kubectl create configmap + my-loadtest-locustfile --from-file path/to/your/main.py kubectl create configmap + my-loadtest-lib --from-file path/to/your/lib/ ``` And then install the chart + passing the names of those configmaps as values: ```console helm install locust + deliveryhero/locust \ --set loadtest.name=my-loadtest \ --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile + \ --set loadtest.locust_lib_configmap=my-loadtest-lib ``` ' + digest: c7d9e9b2cbd5a129a89b7ca4095daa6425e5bc820e5798d7f0564cc09effc436 + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.31.0.tgz + version: 0.31.0 + - apiVersion: v1 + appVersion: 2.13.1 + created: "2022-12-14T16:33:31Z" + description: 'A chart to install Locust, a scalable load testing tool written + in Python. This chart will setup everything required to run a full distributed + locust environment with any amount of workers. This chart will also create + configmaps for storing the locust files in Kubernetes, this way there is no + need to build custom docker images. By default it will install using an example + locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). + When you want to provide your own locustfile, you will need to create 2 configmaps + using the structure from that example: ```console kubectl create configmap + my-loadtest-locustfile --from-file path/to/your/main.py kubectl create configmap + my-loadtest-lib --from-file path/to/your/lib/ ``` And then install the chart + passing the names of those configmaps as values: ```console helm install locust + deliveryhero/locust \ --set loadtest.name=my-loadtest \ --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile + \ --set loadtest.locust_lib_configmap=my-loadtest-lib ``` ' + digest: 59dc7278e50601e589e8ff7d35789397411efbf6ff9abd83bd7305b4ebdb250e + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.30.1.tgz + version: 0.30.1 + - apiVersion: v1 + appVersion: 2.12.1 + created: "2022-12-06T09:15:40Z" + description: 'A chart to install Locust, a scalable load testing tool written + in Python. This chart will setup everything required to run a full distributed + locust environment with any amount of workers. This chart will also create + configmaps for storing the locust files in Kubernetes, this way there is no + need to build custom docker images. By default it will install using an example + locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). + When you want to provide your own locustfile, you will need to create 2 configmaps + using the structure from that example: ```console kubectl create configmap + my-loadtest-locustfile --from-file path/to/your/main.py kubectl create configmap + my-loadtest-lib --from-file path/to/your/lib/ ``` And then install the chart + passing the names of those configmaps as values: ```console helm install locust + deliveryhero/locust \ --set loadtest.name=my-loadtest \ --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile + \ --set loadtest.locust_lib_configmap=my-loadtest-lib ``` ' + digest: 6238ec5ab2506ca38e710a5759e0bf92caf36ba14724d4d9123e8ce20c488dc2 + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.30.0.tgz + version: 0.30.0 + - apiVersion: v1 + appVersion: 2.11.0 + created: "2022-09-22T07:54:41.027875566Z" + description: 'A chart to install Locust, a scalable load testing tool written + in Python. This chart will setup everything required to run a full distributed + locust environment with any amount of workers. This chart will also create + configmaps for storing the locust files in Kubernetes, this way there is no + need to build custom docker images. By default it will install using an example + locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). + When you want to provide your own locustfile, you will need to create 2 configmaps + using the structure from that example: ```console kubectl create configmap + my-loadtest-locustfile --from-file path/to/your/main.py kubectl create configmap + my-loadtest-lib --from-file path/to/your/lib/ ``` And then install the chart + passing the names of those configmaps as values: ```console helm install locust + deliveryhero/locust \ --set loadtest.name=my-loadtest \ --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile + \ --set loadtest.locust_lib_configmap=my-loadtest-lib ``` ' + digest: 4829eecc7d6a730fa4701720f068de1e59f52608d8ef2a2c49ea3ee7e9f0ed15 + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.29.0.tgz + version: 0.29.0 + - apiVersion: v1 + appVersion: 2.11.0 + created: "2022-08-26T11:39:59Z" + description: 'A chart to install Locust, a scalable load testing tool written + in Python. This chart will setup everything required to run a full distributed + locust environment with any amount of workers. This chart will also create + configmaps for storing the locust files in Kubernetes, this way there is no + need to build custom docker images. By default it will install using an example + locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). + When you want to provide your own locustfile, you will need to create 2 configmaps + using the structure from that example: ```console kubectl create configmap + my-loadtest-locustfile --from-file path/to/your/main.py kubectl create configmap + my-loadtest-lib --from-file path/to/your/lib/ ``` And then install the chart + passing the names of those configmaps as values: ```console helm install locust + deliveryhero/locust \ --set loadtest.name=my-loadtest \ --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile + \ --set loadtest.locust_lib_configmap=my-loadtest-lib ``` ' + digest: 655af48fb01d7e7a4d9590d0b6cae3f21722e00edc089c9029c9d2824949b602 + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.28.0.tgz + version: 0.28.0 + - apiVersion: v1 + appVersion: 2.8.6 + created: "2022-08-12T07:23:39Z" + description: 'A chart to install Locust, a scalable load testing tool written + in Python. This chart will setup everything required to run a full distributed + locust environment with any amount of workers. This chart will also create + configmaps for storing the locust files in Kubernetes, this way there is no + need to build custom docker images. By default it will install using an example + locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). + When you want to provide your own locustfile, you will need to create 2 configmaps + using the structure from that example: ```console kubectl create configmap + my-loadtest-locustfile --from-file path/to/your/main.py kubectl create configmap + my-loadtest-lib --from-file path/to/your/lib/ ``` And then install the chart + passing the names of those configmaps as values: ```console helm install locust + deliveryhero/locust \ --set loadtest.name=my-loadtest \ --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile + \ --set loadtest.locust_lib_configmap=my-loadtest-lib ``` ' + digest: 0969dee8ad4fd19da65492de3fa59a39c1c23ceb1c3ccceb9e8e5990e950207e + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.27.1.tgz + version: 0.27.1 + - apiVersion: v1 + appVersion: 2.8.6 + created: "2022-06-30T08:58:06Z" + description: 'A chart to install Locust, a scalable load testing tool written + in Python. This chart will setup everything required to run a full distributed + locust environment with any amount of workers. This chart will also create + configmaps for storing the locust files in Kubernetes, this way there is no + need to build custom docker images. By default it will install using an example + locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). + When you want to provide your own locustfile, you will need to create 2 configmaps + using the structure from that example: ```console kubectl create configmap + my-loadtest-locustfile --from-file path/to/your/main.py kubectl create configmap + my-loadtest-lib --from-file path/to/your/lib/ ``` And then install the chart + passing the names of those configmaps as values: ```console helm install locust + deliveryhero/locust \ --set loadtest.name=my-loadtest \ --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile + \ --set loadtest.locust_lib_configmap=my-loadtest-lib ``` ' + digest: c07a89f55d1dc867dad7bcbfe888a4aacb7220766f1626a7d60138346e4b482f + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.27.0.tgz + version: 0.27.0 + - apiVersion: v1 + appVersion: 2.1.0 + created: "2022-05-19T17:44:13Z" + description: 'A chart to install Locust, a scalable load testing tool written + in Python. This chart will setup everything required to run a full distributed + locust environment with any amount of workers. This chart will also create + configmaps for storing the locust files in Kubernetes, this way there is no + need to build custom docker images. By default it will install using an example + locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). + When you want to provide your own locustfile, you will need to create 2 configmaps + using the structure from that example: ```console kubectl create configmap + my-loadtest-locustfile --from-file path/to/your/main.py kubectl create configmap + my-loadtest-lib --from-file path/to/your/lib/ ``` And then install the chart + passing the names of those configmaps as values: ```console helm install locust + deliveryhero/locust \ --set loadtest.name=my-loadtest \ --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile + \ --set loadtest.locust_lib_configmap=my-loadtest-lib ``` ' + digest: 7037246c535187e414d11db96c88fb39ff3badb64a3dc8672accfb500f8314c3 + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.26.1.tgz + version: 0.26.1 + - apiVersion: v1 + appVersion: 2.1.0 + created: "2022-03-03T12:25:47Z" + description: 'A chart to install Locust, a scalable load testing tool written + in Python. This chart will setup everything required to run a full distributed + locust environment with any amount of workers. This chart will also create + configmaps for storing the locust files in Kubernetes, this way there is no + need to build custom docker images. By default it will install using an example + locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). + When you want to provide your own locustfile, you will need to create 2 configmaps + using the structure from that example: ```console kubectl create configmap + my-loadtest-locustfile --from-file path/to/your/main.py kubectl create configmap + my-loadtest-lib --from-file path/to/your/lib/ ``` And then install the chart + passing the names of those configmaps as values: ```console helm install locust + deliveryhero/locust \ --set loadtest.name=my-loadtest \ --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile + \ --set loadtest.locust_lib_configmap=my-loadtest-lib ``` ' + digest: 40f55b38c7489b885d6ffd50b769fc45fee7b22b67db30354527ba564599b513 + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.26.0.tgz + version: 0.26.0 + - apiVersion: v1 + appVersion: 2.1.0 + created: "2022-02-28T08:20:47Z" + description: 'A chart to install Locust, a scalable load testing tool written + in Python. This chart will setup everything required to run a full distributed + locust environment with any amount of workers. This chart will also create + configmaps for storing the locust files in Kubernetes, this way there is no + need to build custom docker images. By default it will install using an example + locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). + When you want to provide your own locustfile, you will need to create 2 configmaps + using the structure from that example: ```console kubectl create configmap + my-loadtest-locustfile --from-file path/to/your/main.py kubectl create configmap + my-loadtest-lib --from-file path/to/your/lib/ ``` And then install the chart + passing the names of those configmaps as values: ```console helm install locust + deliveryhero/locust \ --set loadtest.name=my-loadtest \ --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile + \ --set loadtest.locust_lib_configmap=my-loadtest-lib ``` ' + digest: 61b7244d1529b9350b629be0ba7529a96c332dde77322ec065ce509ecc36c899 + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.25.0.tgz + version: 0.25.0 + - apiVersion: v1 + appVersion: 2.1.0 + created: "2022-02-24T12:45:32Z" + description: 'A chart to install Locust, a scalable load testing tool written + in Python. This chart will setup everything required to run a full distributed + locust environment with any amount of workers. This chart will also create + configmaps for storing the locust files in Kubernetes, this way there is no + need to build custom docker images. By default it will install using an example + locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). + When you want to provide your own locustfile, you will need to create 2 configmaps + using the structure from that example: ```console kubectl create configmap + my-loadtest-locustfile --from-file path/to/your/main.py kubectl create configmap + my-loadtest-lib --from-file path/to/your/lib/ ``` And then install the chart + passing the names of those configmaps as values: ```console helm install locust + deliveryhero/locust \ --set loadtest.name=my-loadtest \ --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile + \ --set loadtest.locust_lib_configmap=my-loadtest-lib ``` ' + digest: b68f57c23d799e534067b2cea8803d01bb2b619dbeb2560cae09ec57e73420ac + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.24.0.tgz + version: 0.24.0 + - apiVersion: v1 + appVersion: 2.1.0 + created: "2022-02-18T17:37:35Z" + description: 'A chart to install Locust, a scalable load testing tool written + in Python. This chart will setup everything required to run a full distributed + locust environment with any amount of workers. This chart will also create + configmaps for storing the locust files in Kubernetes, this way there is no + need to build custom docker images. By default it will install using an example + locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). + When you want to provide your own locustfile, you will need to create 2 configmaps + using the structure from that example: ```console kubectl create configmap + my-loadtest-locustfile --from-file path/to/your/main.py kubectl create configmap + my-loadtest-lib --from-file path/to/your/lib/ ``` And then install the chart + passing the names of those configmaps as values: ```console helm install locust + deliveryhero/locust \ --set loadtest.name=my-loadtest \ --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile + \ --set loadtest.locust_lib_configmap=my-loadtest-lib ``` ' + digest: 17d6ed9ced2953de51501c5c7a15a4a4a28ee9bbd620191eb8d404e1a2f8f01b + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.23.0.tgz + version: 0.23.0 + - apiVersion: v1 + appVersion: 2.1.0 + created: "2022-01-27T14:09:51Z" + description: 'A chart to install Locust, a scalable load testing tool written + in Python. This chart will setup everything required to run a full distributed + locust environment with any amount of workers. This chart will also create + configmaps for storing the locust files in Kubernetes, this way there is no + need to build custom docker images. By default it will install using an example + locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). + When you want to provide your own locustfile, you will need to create 2 configmaps + using the structure from that example: ```console kubectl create configmap + my-loadtest-locustfile --from-file path/to/your/main.py kubectl create configmap + my-loadtest-lib --from-file path/to/your/lib/ ``` And then install the chart + passing the names of those configmaps as values: ```console helm install locust + deliveryhero/locust \ --set loadtest.name=my-loadtest \ --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile + \ --set loadtest.locust_lib_configmap=my-loadtest-lib ``` ' + digest: a3189d86e9e19321e2137058d551150f66bfa81e2304144132481b72cbb0aa69 + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.22.0.tgz + version: 0.22.0 + - apiVersion: v1 + appVersion: 2.1.0 + created: "2022-01-19T13:26:05Z" + description: 'A chart to install Locust, a scalable load testing tool written + in Python. This chart will setup everything required to run a full distributed + locust environment with any amount of workers. This chart will also create + configmaps for storing the locust files in Kubernetes, this way there is no + need to build custom docker images. By default it will install using an example + locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). + When you want to provide your own locustfile, you will need to create 2 configmaps + using the structure from that example: ```console kubectl create configmap + my-loadtest-locustfile --from-file path/to/your/main.py kubectl create configmap + my-loadtest-lib --from-file path/to/your/lib/ ``` And then install the chart + passing the names of those configmaps as values: ```console helm install locust + deliveryhero/locust \ --set loadtest.name=my-loadtest \ --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile + \ --set loadtest.locust_lib_configmap=my-loadtest-lib ``` ' + digest: c59271be5d9e2181aec9b928bde5485a6d5b2cf8ae330e1f28fdc178c36ab685 + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.21.1.tgz + version: 0.21.1 + - apiVersion: v1 + appVersion: 2.1.0 + created: "2022-01-10T13:29:47Z" + description: 'A chart to install Locust, a scalable load testing tool written + in Python. This chart will setup everything required to run a full distributed + locust environment with any amount of workers. This chart will also create + configmaps for storing the locust files in Kubernetes, this way there is no + need to build custom docker images. By default it will install using an example + locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). + When you want to provide your own locustfile, you will need to create 2 configmaps + using the structure from that example: ```console kubectl create configmap + my-loadtest-locustfile --from-file path/to/your/main.py kubectl create configmap + my-loadtest-lib --from-file path/to/your/lib/ ``` And then install the chart + passing the names of those configmaps as values: ```console helm install locust + deliveryhero/locust \ --set loadtest.name=my-loadtest \ --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile + \ --set loadtest.locust_lib_configmap=my-loadtest-lib ``` ' + digest: c79ef8db4c6b1d6996ecea2e96f3f6dbed4f11cef51f3221098b43ca947bb61b + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.21.0.tgz + version: 0.21.0 + - apiVersion: v1 + appVersion: 2.1.0 + created: "2021-10-11T06:59:48Z" + description: 'A chart to install Locust, a scalable load testing tool written + in Python. This chart will setup everything required to run a full distributed + locust environment with any amount of workers. This chart will also create + configmaps for storing the locust files in Kubernetes, this way there is no + need to build custom docker images. By default it will install using an example + locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). + When you want to provide your own locustfile, you will need to create 2 configmaps + using the structure from that example: ```console kubectl create configmap + my-loadtest-locustfile --from-file path/to/your/main.py kubectl create configmap + my-loadtest-lib --from-file path/to/your/lib/ ``` And then install the chart + passing the names of those configmaps as values: ```console helm install locust + deliveryhero/locust \ --set loadtest.name=my-loadtest \ --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile + \ --set loadtest.locust_lib_configmap=my-loadtest-lib ``` ' + digest: b97d360d283527b24eccb0411dce15c6c7e7992a0996eda0f71b15c8cd14dc78 + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.20.3.tgz + version: 0.20.3 + - apiVersion: v1 + appVersion: 2.1.0 + created: "2021-09-16T18:12:46Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: 327fd9bc98249e7b0df68b5db822519aa7b85745af8d93c4860b641b4f5b6c2c + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.20.2.tgz + version: 0.20.2 + - apiVersion: v1 + appVersion: 2.1.0 + created: "2021-08-23T10:56:48Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: 64db38174c27891a2123d67cafab5b977defa24c8bef01a2c4dcd1a89d90f391 + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.20.1.tgz + version: 0.20.1 + - apiVersion: v1 + appVersion: 2.1.0 + created: "2021-08-20T08:10:16Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: 093b8dbe5a50a5589491f7d4a9445d33beb6f3d617c3ed6db0dfca5713245833 + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.20.tgz + version: "0.20" + - apiVersion: v1 + appVersion: 1.4.4 + created: "2021-08-17T11:32:41Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: d657b9d4c47d89869f10fab9adf12c57bfa36b1a9b570f02a16a556b6549affc + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.19.25.tgz + version: 0.19.25 + - apiVersion: v1 + appVersion: 1.4.4 + created: "2021-08-16T18:30:51Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: a110cce0964c04c285c8f42e38c85d3a1fe93181a6617048f8767f40c214d944 + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.19.24.tgz + version: 0.19.24 + - apiVersion: v1 + appVersion: 1.4.4 + created: "2021-08-06T16:51:14Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: 7325e2627ae22fa5656a18f7b8ed1d215a9a808afb13bf9aba0dae53691e33ba + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.19.23.tgz + version: 0.19.23 + - apiVersion: v1 + appVersion: 1.4.4 + created: "2021-07-29T20:02:02Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: d6cf76b64339c82234dc93ca5ecd9cccd57bde8b41a561dd75bf023ac9dea5d1 + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.19.22.tgz + version: 0.19.22 + - apiVersion: v1 + appVersion: 1.4.4 + created: "2021-07-14T19:22:25Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: 46f200870257c36a9de2068326cfe8cc76de6af7ae6ba294d9025cfa42780746 + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.19.21.tgz + version: 0.19.21 + - apiVersion: v1 + appVersion: 1.4.4 + created: "2021-07-13T10:16:18Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: 0785fd89123dbbb075a8d43e79233bf0c66f1d0a053cc9d9d8234c1a7870ef55 + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.19.20.tgz + version: 0.19.20 + - apiVersion: v1 + appVersion: 1.4.4 + created: "2021-07-12T17:18:01Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: 552be1cc31e960d0e2cd61ee600482c431f0c42e532ad13f2ca63c928158bf4d + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.9.19.tgz + version: 0.9.19 + - apiVersion: v1 + appVersion: 1.4.4 + created: "2021-05-11T13:39:01Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: 15fc5e158929ff8120baac2817f0af85e10a752c7c0e1b7926168eb0907be9a4 + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.9.18.tgz + version: 0.9.18 + - apiVersion: v1 + appVersion: 1.4.4 + created: "2021-04-29T08:32:54Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: 9f0bd73243abe17792102eb9c121b983c851cfbda078cf023189a8f05fabc66b + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.9.17.tgz + version: 0.9.17 + - apiVersion: v1 + appVersion: 1.4.4 + created: "2021-04-29T08:33:21Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: 99174908b24979376185a9240a00a41ba4536e4b31d49d631fe9fae7129bd9d6 + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.9.16.tgz + version: 0.9.16 + - apiVersion: v1 + appVersion: 1.4.4 + created: "2021-04-28T09:52:59Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: 4f6f08b14e413e7f1dffc20ec8e340a64662b38db4b157513a136bcea537f490 + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.9.15.tgz + version: 0.9.15 + - apiVersion: v1 + appVersion: 1.4.3 + created: "2021-04-28T09:10:01Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: 112a5ab5422531a610ac858c09e4d19b5eb7cfb1c068dab79d44d85cd6d8516a + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.9.14.tgz + version: 0.9.14 + - apiVersion: v1 + appVersion: 1.4.3 + created: "2021-04-26T16:09:17Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: e20de87b86e6f886198cfa257ffda96d0fbdb8e737bb84cc4f516c2db3972b2b + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.9.13.tgz + version: 0.9.13 + - apiVersion: v1 + appVersion: 1.4.3 + created: "2021-04-20T10:25:07Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: 34781420e763fad3e0671120c46354054748e762b1b6f73afea7db67cf2c72b7 + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.9.12.tgz + version: 0.9.12 + - apiVersion: v1 + appVersion: 1.4.3 + created: "2021-04-16T11:41:40Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: 18c84cb3ec07e6284f0a350bb179d2465d8b591753c2d69c4d4f6f63001bdb5e + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.9.11.tgz + version: 0.9.11 + - apiVersion: v1 + appVersion: 1.4.3 + created: "2021-03-17T10:22:49Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: d524c2a4d87bf6371af5ba7fc3423616d11f05d4a0edf1384a1a7fb2ee7d9ead + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.9.10.tgz + version: 0.9.10 + - apiVersion: v1 + appVersion: 1.4.1 + created: "2021-03-09T12:04:45Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: c2fa649d7fa6a3f75efdd8e3cf79f7cebe461a57e59bec240a71f694dcd530bd + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.9.9.tgz + version: 0.9.9 + - apiVersion: v1 + appVersion: 1.4.1 + created: "2021-01-12T10:09:40Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: 3edc70585ef54eb77732ada4218eb68222386bae57307affd9c464506aa89ed0 + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.9.8.tgz + version: 0.9.8 + - apiVersion: v1 + appVersion: 1.4.1 + created: "2021-01-04T13:30:05Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: 2759896a2ab2ac049e2b1aef4435c1695e467094012b811f56cd6fec6404a8d9 + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.9.7.tgz + version: 0.9.7 + - apiVersion: v1 + appVersion: 1.4.1 + created: "2020-12-18T11:43:36Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: 4604e81f4f4b55f6e0fd9952a2a72f3974bb3babac49207de924de20abb8ca27 + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.9.6.tgz + version: 0.9.6 + - apiVersion: v1 + appVersion: 1.4.1 + created: "2020-12-08T08:20:50Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: 5c82d7fff2801e7c792269909ca5f4472116fda7fb9b9135c31d39d3eaaad70c + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.9.5.tgz + version: 0.9.5 + - apiVersion: v1 + appVersion: 1.4.1 + created: "2020-12-01T16:07:06Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: e7bcdb71b59ef8e6c03737e8e62217ae40671d8f2179462913da263ed2a1ae9d + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.9.4.tgz + version: 0.9.4 + - apiVersion: v1 + appVersion: 1.4.1 + created: "2020-11-27T10:30:40Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: a0585e1e10e36e46423fffda5492a6d11c854bfa04c0f48cc91f7dd886ca7dad + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.9.3.tgz + version: 0.9.3 + - apiVersion: v1 + appVersion: 1.4.1 + created: "2020-11-26T09:40:53Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: 6a81ccae6867e0e18f8d38b26401f14aa64d9877834aec04d6e808db52ff15f5 + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.9.2.tgz + version: 0.9.2 + - apiVersion: v1 + appVersion: 1.4.1 + created: "2020-11-25T13:24:37Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: 68a6d33ac3024767b6d9f2d9df3d9b2be840c5699923882c1060efa56a4e7010 + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.9.1.tgz + version: 0.9.1 + - apiVersion: v1 + appVersion: 1.4.1 + created: "2020-11-24T14:13:12Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: 4b0d45d73de15c67049bd5eab7bfe16b5bd7d31647bb8141b5da6aa963370d09 + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.9.tgz + version: "0.9" + - apiVersion: v1 + appVersion: 1.3.0 + created: "2020-11-23T17:15:56Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: cac8197a60d2469318d544ac2f6fe3ac310f367d88feded35c000600cf5ee36c + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.8.tgz + version: "0.8" + - apiVersion: v1 + appVersion: 1.3.0 + created: "2020-11-17T11:31:21Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: 0c4c16cdccdfea1d2c863248138eb7ec43f27b2b51f403f7acba6a7431d66271 + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.7.tgz + version: "0.7" + - apiVersion: v1 + appVersion: 1.3.0 + created: "2020-11-13T08:32:05Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: 368c420ae7d87209cd01f97dee69c2df41192402801baa1e33965a1641dfbe39 + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.6.tgz + version: "0.6" + - apiVersion: v1 + appVersion: 1.2.3 + created: "2020-10-29T10:51:19Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: 4b23cbbfc4946294d089dfd9b62b4d13751db799a7bdebaddbd973072a71db52 + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.5.tgz + version: "0.5" + - apiVersion: v1 + appVersion: 1.2.3 + created: "2020-10-14T15:39:28Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: cbe3b1d712ebcd6b2d281018524577bccf98b42d421185dc37385bfa32c2e7a9 + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.4.tgz + version: "0.4" + - apiVersion: v1 + appVersion: 1.2.3 + created: "2020-10-09T16:37:54Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: dc9befe254a6ca29ead9aa4166b34c954cf4fb4a873bcc0613a51a9a1805c968 + home: https://github.com/locustio/locust + icon: https://locust.io/static/img/logo.png + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.3.tgz + version: "0.3" + - apiVersion: v1 + appVersion: 1.2.3 + created: "2020-09-21T09:08:55Z" + description: | + A chart to install Locust, a scalable load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + + By default it will install using an example locustfile and lib from [stable/locust/locustfiles/example](https://github.com/deliveryhero/helm-charts/tree/master/stable/locust/locustfiles/example). When you want to provide your own locustfile, you will need to create 2 configmaps using the structure from that example: + + ```console + kubectl create configmap my-loadtest-locustfile --from-file path/to/your/main.py + kubectl create configmap my-loadtest-lib --from-file path/to/your/lib/ + ``` + + And then install the chart passing the names of those configmaps as values: + + ```console + helm install locust deliveryhero/locust \ + --set loadtest.name=my-loadtest \ + --set loadtest.locust_locustfile_configmap=my-loadtest-locustfile \ + --set loadtest.locust_lib_configmap=my-loadtest-lib + ``` + digest: 265c11e444581a14d610d784de19343ce2a95db42995ad8e82d611f681a2278d + home: https://github.com/locustio/locust + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.2.tgz + version: "0.2" + - apiVersion: v1 + appVersion: 1.2.1 + created: "2020-08-31T14:10:44Z" + description: | + A chart to install Locust, a scalable user load testing tool written in Python. + + This chart will setup everything required to run a full distributed locust environment with any amount of workers. + + This chart will also create configmaps for storing the locust files in Kubernetes, this way there is no need to build custom docker images. + digest: 5b6ef0d512165b35b29b126d6a243b9e1a8d5b79bab1dd570342d3a058d3395d + home: https://github.com/locustio/locust + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: locust + urls: + - charts/locust-0.0.2.tgz + version: 0.0.2 + metabase: + - apiVersion: v2 + appVersion: v0.45.2 + created: "2024-10-04T09:09:53Z" + description: The easy, open source way for everyone in your company to ask questions + and learn from data. + digest: 5262b2ced088511cbe6cf230a856947e2ef3061aa488dbc965971d5a667d1c10 + home: http://www.metabase.com/ + icon: http://www.metabase.com/images/logo.svg + maintainers: + - email: no-reply@deliveryhero.com + name: nyambati + name: metabase + sources: + - https://github.com/metabase/metabase + urls: + - charts/metabase-0.14.3.tgz + version: 0.14.3 + - apiVersion: v2 + appVersion: v0.45.2 + created: "2023-05-04T09:43:28Z" + description: The easy, open source way for everyone in your company to ask questions + and learn from data. + digest: 52d184558a1c8f5d09e956df1dc59168b8e2de5862f4c182ea17c6675afc4892 + home: http://www.metabase.com/ + icon: http://www.metabase.com/images/logo.svg + maintainers: + - email: no-reply@deliveryhero.com + name: nyambati + name: metabase + sources: + - https://github.com/metabase/metabase + urls: + - charts/metabase-0.14.2.tgz + version: 0.14.2 + - apiVersion: v2 + appVersion: v0.45.2 + created: "2023-01-27T10:44:43Z" + description: The easy, open source way for everyone in your company to ask questions + and learn from data. + digest: ee051598f93d0fc20f26892a7430682aee4da8397dcaba3ceb66dfcea69eddac + home: http://www.metabase.com/ + icon: http://www.metabase.com/images/logo.svg + maintainers: + - email: no-reply@deliveryhero.com + name: nyambati + name: metabase + sources: + - https://github.com/metabase/metabase + urls: + - charts/metabase-0.14.1.tgz + version: 0.14.1 + - apiVersion: v2 + appVersion: v0.45.2 + created: "2023-01-23T13:31:59Z" + description: The easy, open source way for everyone in your company to ask questions + and learn from data. + digest: 2f0d98584bc94f57f8918ec2baf8c2d4dfa76c85104119fc2d6278daabe07fc2 + home: http://www.metabase.com/ + icon: http://www.metabase.com/images/logo.svg + maintainers: + - email: no-reply@deliveryhero.com + name: nyambati + name: metabase + sources: + - https://github.com/metabase/metabase + urls: + - charts/metabase-0.14.0.tgz + version: 0.14.0 + - apiVersion: v1 + appVersion: v0.43 + created: "2023-01-19T15:18:26Z" + description: The easy, open source way for everyone in your company to ask questions + and learn from data. + digest: ec3bdd184c4cf894c3dee0b9056e7c4715b2f1bf4becbd1b4e03fbb2fb325c0e + home: http://www.metabase.com/ + icon: http://www.metabase.com/images/logo.svg + maintainers: + - email: no-reply@deliveryhero.com + name: nyambati + name: metabase + sources: + - https://github.com/metabase/metabase + urls: + - charts/metabase-0.13.5.tgz + version: 0.13.5 + - apiVersion: v1 + appVersion: v0.43 + created: "2022-12-05T16:32:13Z" + description: The easy, open source way for everyone in your company to ask questions + and learn from data. + digest: d4f917fbbb26a5e67d6cc55ec437c9870c73679c92b738e1df416153659b3069 + home: http://www.metabase.com/ + icon: http://www.metabase.com/images/logo.svg + maintainers: + - email: no-reply@deliveryhero.com + name: nyambati + name: metabase + sources: + - https://github.com/metabase/metabase + urls: + - charts/metabase-0.13.4.tgz + version: 0.13.4 + - apiVersion: v1 + appVersion: v0.43 + created: "2022-09-16T14:25:33Z" + description: The easy, open source way for everyone in your company to ask questions + and learn from data. + digest: 82d6a8173b0d6220d3db330ec7d53cfafc1cde271969b7aeccdacf1240b95244 + home: http://www.metabase.com/ + icon: http://www.metabase.com/images/logo.svg + maintainers: + - email: no-reply@deliveryhero.com + name: nyambati + name: metabase + sources: + - https://github.com/metabase/metabase + urls: + - charts/metabase-0.13.3.tgz + version: 0.13.3 + - apiVersion: v1 + appVersion: v0.43 + created: "2022-07-07T08:43:18Z" + description: The easy, open source way for everyone in your company to ask questions + and learn from data. + digest: f99b75fa7ce58bdb58a5b79fecc8922e75f01ab885fe45fc97292c86102827ca + home: http://www.metabase.com/ + icon: http://www.metabase.com/images/logo.svg + maintainers: + - email: no-reply@deliveryhero.com + name: nyambati + name: metabase + sources: + - https://github.com/metabase/metabase + urls: + - charts/metabase-0.13.2.tgz + version: 0.13.2 + mlflow: + - apiVersion: v2 + appVersion: 1.9.1 + created: "2024-10-04T09:09:54Z" + description: 'A Helm chart to install MLflow tracking, a tool to track Machine + Learning experiments. This Helm chart is using Postgresql as backend and S3 + as artifact store. Contributions for other backends and artifacts store are + welcome. ' + digest: 8958a6e177b4cb80e860cdccbe1206457a143ab1e3213ad0e8af863298c1d035 + home: https://www.mlflow.org/ + icon: https://www.mlflow.org/docs/latest/_static/MLflow-logo-final-black.png + keywords: + - mlflow + - mlops + - datascience + - machinelearning + maintainers: + - email: no-reply@deliveryhero.com + name: mkuhn + url: https://github.com/magdalenakuhn17 + name: mlflow + sources: + - https://github.com/mlflow + type: application + urls: + - charts/mlflow-1.0.9.tgz + version: 1.0.9 + - apiVersion: v2 + appVersion: 1.9.1 + created: "2023-11-14T10:58:34Z" + description: 'A Helm chart to install MLflow tracking, a tool to track Machine + Learning experiments. This Helm chart is using Postgresql as backend and S3 + as artifact store. Contributions for other backends and artifacts store are + welcome. ' + digest: 1014fc78fc525817aecb477f53d1a749a15d32559971c9594577493fa41eaa7a + home: https://www.mlflow.org/ + icon: https://www.mlflow.org/docs/latest/_static/MLflow-logo-final-black.png + keywords: + - mlflow + - mlops + - datascience + - machinelearning + maintainers: + - email: magdalena.kuhn@deliveryhero.com + name: mkuhn + url: https://github.com/lena-kuhn + name: mlflow + sources: + - https://github.com/mlflow + type: application + urls: + - charts/mlflow-1.0.8.tgz + version: 1.0.8 + - apiVersion: v2 + appVersion: 1.9.1 + created: "2022-06-30T12:01:34Z" + description: 'A Helm chart to install MLflow tracking, a tool to track Machine + Learning experiments. This Helm chart is using Postgresql as backend and S3 + as artifact store. Contributions for other backends and artifacts store are + welcome. ' + digest: b8a501e40b3ffb35a1524fd34c6851cee89841c701a5881efe3a991901325b3f + home: https://www.mlflow.org/ + icon: https://www.mlflow.org/docs/latest/_static/MLflow-logo-final-black.png + keywords: + - mlflow + - mlops + - datascience + - machinelearning + maintainers: + - email: magdalena.kuhn@deliveryhero.com + name: mkuhn + url: https://github.com/lena-kuhn + name: mlflow + sources: + - https://github.com/mlflow + type: application + urls: + - charts/mlflow-1.0.7.tgz + version: 1.0.7 + - apiVersion: v2 + appVersion: 1.9.1 + created: "2022-06-30T11:40:41Z" + description: 'A Helm chart to install MLflow tracking, a tool to track Machine + Learning experiments. This Helm chart is using Postgresql as backend and S3 + as artifact store. Contributions for other backends and artifacts store are + welcome. ' + digest: ef64aea9d6eff2b952806397125b856e25a0132bead72a30fcde5729e82fe0be + home: https://www.mlflow.org/ + icon: https://www.mlflow.org/docs/latest/_static/MLflow-logo-final-black.png + keywords: + - mlflow + - mlops + - datascience + - machinelearning + maintainers: + - email: magdalena.kuhn@deliveryhero.com + name: mkuhn + url: https://github.com/lena-kuhn + name: mlflow + sources: + - https://github.com/mlflow + type: application + urls: + - charts/mlflow-1.0.6.tgz + version: 1.0.6 + - apiVersion: v2 + appVersion: 1.9.1 + created: "2022-06-29T14:22:30Z" + description: 'A Helm chart to install MLflow tracking, a tool to track Machine + Learning experiments. This Helm chart is using Postgresql as backend and S3 + as artifact store. Contributions for other backends and artifacts store are + welcome. ' + digest: db7e10932b5d3c497c96d225dbaec863dd0db9bc513b7b30d47fbcec462b0d87 + home: https://www.mlflow.org/ + icon: https://www.mlflow.org/docs/latest/_static/MLflow-logo-final-black.png + keywords: + - mlflow + - mlops + - datascience + - machinelearning + maintainers: + - email: magdalena.kuhn@deliveryhero.com + name: mkuhn + url: https://github.com/lena-kuhn + name: mlflow + sources: + - https://github.com/mlflow + type: application + urls: + - charts/mlflow-1.0.5.tgz + version: 1.0.5 + - apiVersion: v2 + appVersion: 1.9.1 + created: "2022-01-28T11:49:58Z" + description: 'A Helm chart to install MLflow tracking, a tool to track Machine + Learning experiments. This Helm chart is using Postgresql as backend and S3 + as artifact store. Contributions for other backends and artifacts store are + welcome. ' + digest: 2f621d85ec2e1591d0e69b4555d34b4d617b7b34b0331d0583e4ba5d9f14fcbc + home: https://www.mlflow.org/ + icon: https://www.mlflow.org/docs/latest/_static/MLflow-logo-final-black.png + keywords: + - mlflow + - mlops + - datascience + - machinelearning + maintainers: + - email: magdalena.kuhn@deliveryhero.com + name: mkuhn + url: https://github.com/lena-kuhn + name: mlflow + sources: + - https://github.com/mlflow + type: application + urls: + - charts/mlflow-1.0.4.tgz + version: 1.0.4 + - apiVersion: v2 + appVersion: 1.9.1 + created: "2021-09-02T11:46:34Z" + description: | + A Helm chart to install MLflow tracking, a tool to track Machine Learning experiments. + + This Helm chart is using Postgresql as backend and S3 as artifact store. + Contributions for other backends and artifacts store are welcome. + digest: 333e5724edf949009ad33de81581b2f92ce21bfc261c7101a18805d34a4e2911 + home: https://www.mlflow.org/ + icon: https://www.mlflow.org/docs/latest/_static/MLflow-logo-final-black.png + keywords: + - mlflow + - mlops + - datascience + - machinelearning + maintainers: + - email: magdalena.kuhn@deliveryhero.com + name: mkuhn + url: https://github.com/lena-kuhn + name: mlflow + sources: + - https://github.com/mlflow + type: application + urls: + - charts/mlflow-1.0.3.tgz + version: 1.0.3 + - apiVersion: v2 + appVersion: 1.9.1 + created: "2021-09-01T07:23:37Z" + description: | + A Helm chart to install MLflow tracking, a tool to track Machine Learning experiments. + + This Helm chart is using Postgresql as backend and S3 as artifact store. + Contributions for other backends and artifacts store are welcome. + digest: 059b037d101d746825c47104353550e5e89e713ddede0773ca0e0f088e04326b + home: https://www.mlflow.org/ + icon: https://www.mlflow.org/docs/latest/_static/MLflow-logo-final-black.png + keywords: + - mlflow + - mlops + - datascience + - machinelearning + maintainers: + - email: magdalena.kuhn@deliveryhero.com + name: mkuhn + url: https://github.com/lena-kuhn + name: mlflow + sources: + - https://github.com/mlflow + type: application + urls: + - charts/mlflow-1.0.2.tgz + version: 1.0.2 + - apiVersion: v2 + appVersion: 1.9.1 + created: "2021-08-20T15:54:05Z" + description: | + A Helm chart to install MLflow tracking, a tool to track Machine Learning experiments. + + This Helm chart is using Postgresql as backend and S3 as artifact store. + Contributions for other backends and artifacts store are welcome. + digest: 47a26b9df9f8c6b3f66e86cc8f1b972251a34f9f2672e2e6ec876d7e3cc1d065 + home: https://www.mlflow.org/ + icon: https://www.mlflow.org/docs/latest/_static/MLflow-logo-final-black.png + keywords: + - mlflow + - mlops + - datascience + - machinelearning + maintainers: + - email: magdalena.kuhn@deliveryhero.com + name: mkuhn + url: https://github.com/lena-kuhn + name: mlflow + sources: + - https://github.com/mlflow + type: application + urls: + - charts/mlflow-1.0.1.tgz + version: 1.0.1 + - apiVersion: v2 + appVersion: latest + created: "2021-08-19T11:05:09Z" + description: | + A Helm chart to install MLflow tracking, a tool to track Machine Learning experiments. + + This Helm chart is using Postgresql as backend and S3 as artifact store. + Contributions for other backends and artifacts store are welcome. + digest: 3f170463303feb228af3b6a4b7962695520ba17a90dc93679bed686821814644 + home: https://www.mlflow.org/ + icon: https://www.mlflow.org/docs/latest/_static/MLflow-logo-final-black.png + keywords: + - mlflow + - mlops + - datascience + - machinelearning + maintainers: + - email: magdalena.kuhn@deliveryhero.com + name: mkuhn + url: https://github.com/lena-kuhn + name: mlflow + sources: + - https://github.com/mlflow + type: application + urls: + - charts/mlflow-1.0.0.tgz + version: 1.0.0 + net-exporter: + - annotations: + config.giantswarm.io/version: 1.x.x + apiVersion: v1 + appVersion: 1.10.3 + created: "2024-10-04T09:09:54Z" + description: Helm chart for net-exporter. + digest: 8eeebc421a7190188439d6ba5202e8110c7fd51262c44694778988dc9430091d + home: https://github.com/giantswarm/net-exporter + maintainers: + - name: pciang + url: https://github.com/pciang + name: net-exporter + urls: + - charts/net-exporter-1.10.4.tgz + version: 1.10.4 + - annotations: + config.giantswarm.io/version: 1.x.x + apiVersion: v1 + appVersion: 1.10.3 + created: "2022-02-18T11:50:01Z" + description: Helm chart for net-exporter. + digest: 2b3117c0f37d224cb56192ae0994000ebe5b6fa320090bba9a4e5770239bcd5f + home: https://github.com/giantswarm/net-exporter + maintainers: + - name: pciang + url: https://github.com/pciang + name: net-exporter + urls: + - charts/net-exporter-1.10.3.tgz + version: 1.10.3 + newrelic-controller: + - apiVersion: v1 + appVersion: "0.8" + created: "2024-10-04T09:09:54Z" + description: A controller for managing New Relic settings + digest: a0663f5b9b6f05f558c2d82e7354faccbccc60cdd1a9be020037505ccc462ada + home: https://github.com/max-rocket-internet/newrelic-controller + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: newrelic-controller + sources: + - https://github.com/max-rocket-internet/newrelic-controller + urls: + - charts/newrelic-controller-1.1.tgz + version: "1.1" + - apiVersion: v1 + appVersion: "0.8" + created: "2021-04-30T09:30:34Z" + description: A controller for managing New Relic settings + digest: d936a58b5db097c6a8e24e02d928275f35346992b5a6f0ee99e6e976290274e6 + home: https://github.com/max-rocket-internet/newrelic-controller + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: newrelic-controller + sources: + - https://github.com/max-rocket-internet/newrelic-controller + urls: + - charts/newrelic-controller-1.0.tgz + version: "1.0" + node-local-dns: + - apiVersion: v2 + appVersion: 1.23.1 + created: "2024-10-04T09:09:55Z" + description: 'A chart to install node-local-dns. NodeLocal DNSCache improves + Cluster DNS performance by running a DNS caching agent on cluster nodes as a + DaemonSet. In today''s architecture, Pods in ''ClusterFirst'' DNS mode reach + out to a kube-dns serviceIP for DNS queries. This is translated to a kube-dns/CoreDNS + endpoint via iptables rules added by kube-proxy. With this new architecture, + Pods will reach out to the DNS caching agent running on the same node, thereby + avoiding iptables DNAT rules and connection tracking. The local caching agent + will query kube-dns service for cache misses of cluster hostnames ("cluster.local" + suffix by default). Further documentation is [here](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) This + helm chart works for both kube-proxy setups (iptables or ipvs). ' + digest: 52cefb77069ede9c8ebe1b37cb5cd62f762e8f623ccaad7b1611e2257a01b40d + icon: https://miro.medium.com/max/641/1*q3vcbyVXcvaRnFLEQEHrXw.png + maintainers: + - email: no-reply@deliveryhero.com + name: gabrieladt + name: node-local-dns + urls: + - charts/node-local-dns-2.1.0.tgz + version: 2.1.0 + - apiVersion: v2 + appVersion: 1.23.1 + created: "2024-09-19T08:00:31Z" + description: 'A chart to install node-local-dns. NodeLocal DNSCache improves + Cluster DNS performance by running a DNS caching agent on cluster nodes as a + DaemonSet. In today''s architecture, Pods in ''ClusterFirst'' DNS mode reach + out to a kube-dns serviceIP for DNS queries. This is translated to a kube-dns/CoreDNS + endpoint via iptables rules added by kube-proxy. With this new architecture, + Pods will reach out to the DNS caching agent running on the same node, thereby + avoiding iptables DNAT rules and connection tracking. The local caching agent + will query kube-dns service for cache misses of cluster hostnames ("cluster.local" + suffix by default). Further documentation is [here](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) This + helm chart works for both kube-proxy setups (iptables or ipvs). ' + digest: 7b0d9287a38c6b49e9f3ce62ac49c1146438797be3ef6c5bd0d007e9fce575a3 + icon: https://miro.medium.com/max/641/1*q3vcbyVXcvaRnFLEQEHrXw.png + maintainers: + - email: no-reply@deliveryhero.com + name: gabrieladt + name: node-local-dns + urls: + - charts/node-local-dns-2.0.14.tgz + version: 2.0.14 + - apiVersion: v2 + appVersion: 1.23.1 + created: "2024-07-09T09:29:02Z" + description: 'A chart to install node-local-dns. NodeLocal DNSCache improves + Cluster DNS performance by running a DNS caching agent on cluster nodes as a + DaemonSet. In today''s architecture, Pods in ''ClusterFirst'' DNS mode reach + out to a kube-dns serviceIP for DNS queries. This is translated to a kube-dns/CoreDNS + endpoint via iptables rules added by kube-proxy. With this new architecture, + Pods will reach out to the DNS caching agent running on the same node, thereby + avoiding iptables DNAT rules and connection tracking. The local caching agent + will query kube-dns service for cache misses of cluster hostnames ("cluster.local" + suffix by default). Further documentation is [here](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) This + helm chart works for both kube-proxy setups (iptables or ipvs). ' + digest: 0e42610928db691b58b9efbbcedec4774cb963a69be6046510036a0527370438 + icon: https://miro.medium.com/max/641/1*q3vcbyVXcvaRnFLEQEHrXw.png + maintainers: + - email: no-reply@deliveryhero.com + name: gabrieladt + name: node-local-dns + urls: + - charts/node-local-dns-2.0.13.tgz + version: 2.0.13 + - apiVersion: v2 + appVersion: 1.23.1 + created: "2024-06-19T08:30:54Z" + description: 'A chart to install node-local-dns. NodeLocal DNSCache improves + Cluster DNS performance by running a DNS caching agent on cluster nodes as a + DaemonSet. In today''s architecture, Pods in ''ClusterFirst'' DNS mode reach + out to a kube-dns serviceIP for DNS queries. This is translated to a kube-dns/CoreDNS + endpoint via iptables rules added by kube-proxy. With this new architecture, + Pods will reach out to the DNS caching agent running on the same node, thereby + avoiding iptables DNAT rules and connection tracking. The local caching agent + will query kube-dns service for cache misses of cluster hostnames ("cluster.local" + suffix by default). Further documentation is [here](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) This + helm chart works for both kube-proxy setups (iptables or ipvs). ' + digest: 99a4223d30a00ea7321bcb1a018e77423101e2edf79ac2bf3e4d1277d8aec885 + icon: https://miro.medium.com/max/641/1*q3vcbyVXcvaRnFLEQEHrXw.png + maintainers: + - email: no-reply@deliveryhero.com + name: gabrieladt + name: node-local-dns + urls: + - charts/node-local-dns-2.0.12.tgz + version: 2.0.12 + - apiVersion: v2 + appVersion: 1.23.1 + created: "2024-06-18T11:36:26Z" + description: 'A chart to install node-local-dns. NodeLocal DNSCache improves + Cluster DNS performance by running a DNS caching agent on cluster nodes as a + DaemonSet. In today''s architecture, Pods in ''ClusterFirst'' DNS mode reach + out to a kube-dns serviceIP for DNS queries. This is translated to a kube-dns/CoreDNS + endpoint via iptables rules added by kube-proxy. With this new architecture, + Pods will reach out to the DNS caching agent running on the same node, thereby + avoiding iptables DNAT rules and connection tracking. The local caching agent + will query kube-dns service for cache misses of cluster hostnames ("cluster.local" + suffix by default). Further documentation is [here](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) This + helm chart works for both kube-proxy setups (iptables or ipvs). ' + digest: 1111824cec1d45c242bd7d6ec8c2de72b69595ec27a6aa7b28c8782f0dc68278 + icon: https://miro.medium.com/max/641/1*q3vcbyVXcvaRnFLEQEHrXw.png + maintainers: + - email: no-reply@deliveryhero.com + name: gabrieladt + name: node-local-dns + urls: + - charts/node-local-dns-2.0.11.tgz + version: 2.0.11 + - apiVersion: v2 + appVersion: 1.23.1 + created: "2024-06-13T18:08:21Z" + description: 'A chart to install node-local-dns. NodeLocal DNSCache improves + Cluster DNS performance by running a DNS caching agent on cluster nodes as a + DaemonSet. In today''s architecture, Pods in ''ClusterFirst'' DNS mode reach + out to a kube-dns serviceIP for DNS queries. This is translated to a kube-dns/CoreDNS + endpoint via iptables rules added by kube-proxy. With this new architecture, + Pods will reach out to the DNS caching agent running on the same node, thereby + avoiding iptables DNAT rules and connection tracking. The local caching agent + will query kube-dns service for cache misses of cluster hostnames ("cluster.local" + suffix by default). Further documentation is [here](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) This + helm chart works for both kube-proxy setups (iptables or ipvs). ' + digest: ed249e44c931d584888499daf8d1e9247b7b98459e76abfd50e5d2c6870bca65 + icon: https://miro.medium.com/max/641/1*q3vcbyVXcvaRnFLEQEHrXw.png + maintainers: + - email: no-reply@deliveryhero.com + name: gabrieladt + name: node-local-dns + urls: + - charts/node-local-dns-2.0.10.tgz + version: 2.0.10 + - apiVersion: v2 + appVersion: 1.23.0 + created: "2024-05-16T12:04:30Z" + description: 'A chart to install node-local-dns. NodeLocal DNSCache improves + Cluster DNS performance by running a DNS caching agent on cluster nodes as a + DaemonSet. In today''s architecture, Pods in ''ClusterFirst'' DNS mode reach + out to a kube-dns serviceIP for DNS queries. This is translated to a kube-dns/CoreDNS + endpoint via iptables rules added by kube-proxy. With this new architecture, + Pods will reach out to the DNS caching agent running on the same node, thereby + avoiding iptables DNAT rules and connection tracking. The local caching agent + will query kube-dns service for cache misses of cluster hostnames ("cluster.local" + suffix by default). Further documentation is [here](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) This + helm chart works for both kube-proxy setups (iptables or ipvs). ' + digest: ccf94637d2457f4a248810ab93965be083c2b9fa3fe4bd2dae740c5bafd5aa0a + icon: https://miro.medium.com/max/641/1*q3vcbyVXcvaRnFLEQEHrXw.png + maintainers: + - email: no-reply@deliveryhero.com + name: gabrieladt + name: node-local-dns + urls: + - charts/node-local-dns-2.0.9.tgz + version: 2.0.9 + - apiVersion: v2 + appVersion: 1.22.23 + created: "2024-04-24T09:47:16Z" + description: 'A chart to install node-local-dns. NodeLocal DNSCache improves + Cluster DNS performance by running a DNS caching agent on cluster nodes as a + DaemonSet. In today''s architecture, Pods in ''ClusterFirst'' DNS mode reach + out to a kube-dns serviceIP for DNS queries. This is translated to a kube-dns/CoreDNS + endpoint via iptables rules added by kube-proxy. With this new architecture, + Pods will reach out to the DNS caching agent running on the same node, thereby + avoiding iptables DNAT rules and connection tracking. The local caching agent + will query kube-dns service for cache misses of cluster hostnames ("cluster.local" + suffix by default). Further documentation is [here](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) This + helm chart works for both kube-proxy setups (iptables or ipvs). ' + digest: 5c3a1c08b284bec7b808db2717cc166104703aa8c33bdad7f88fa092bcd96843 + icon: https://miro.medium.com/max/641/1*q3vcbyVXcvaRnFLEQEHrXw.png + maintainers: + - email: no-reply@deliveryhero.com + name: gabrieladt + name: node-local-dns + urls: + - charts/node-local-dns-2.0.8.tgz + version: 2.0.8 + - apiVersion: v2 + appVersion: 1.22.23 + created: "2024-04-09T11:54:01Z" + description: 'A chart to install node-local-dns. NodeLocal DNSCache improves + Cluster DNS performance by running a DNS caching agent on cluster nodes as a + DaemonSet. In today''s architecture, Pods in ''ClusterFirst'' DNS mode reach + out to a kube-dns serviceIP for DNS queries. This is translated to a kube-dns/CoreDNS + endpoint via iptables rules added by kube-proxy. With this new architecture, + Pods will reach out to the DNS caching agent running on the same node, thereby + avoiding iptables DNAT rules and connection tracking. The local caching agent + will query kube-dns service for cache misses of cluster hostnames ("cluster.local" + suffix by default). Further documentation is [here](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) This + helm chart works for both kube-proxy setups (iptables or ipvs). ' + digest: cb672ad17635d29c06a64b0b32e0411a1b6d6732ca4e848dba9ee27f2a1c5eaa + icon: https://miro.medium.com/max/641/1*q3vcbyVXcvaRnFLEQEHrXw.png + maintainers: + - email: no-reply@deliveryhero.com + name: gabrieladt + name: node-local-dns + urls: + - charts/node-local-dns-2.0.7.tgz + version: 2.0.7 + - apiVersion: v2 + appVersion: 1.22.23 + created: "2024-04-07T22:31:32Z" + description: 'A chart to install node-local-dns. NodeLocal DNSCache improves + Cluster DNS performance by running a DNS caching agent on cluster nodes as a + DaemonSet. In today''s architecture, Pods in ''ClusterFirst'' DNS mode reach + out to a kube-dns serviceIP for DNS queries. This is translated to a kube-dns/CoreDNS + endpoint via iptables rules added by kube-proxy. With this new architecture, + Pods will reach out to the DNS caching agent running on the same node, thereby + avoiding iptables DNAT rules and connection tracking. The local caching agent + will query kube-dns service for cache misses of cluster hostnames ("cluster.local" + suffix by default). Further documentation is [here](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) This + helm chart works for both kube-proxy setups (iptables or ipvs). ' + digest: fca139a86164aeb424e78a4b345b6134a538a4d491927e3acad26035268064b2 + icon: https://miro.medium.com/max/641/1*q3vcbyVXcvaRnFLEQEHrXw.png + maintainers: + - email: no-reply@deliveryhero.com + name: gabrieladt + name: node-local-dns + urls: + - charts/node-local-dns-2.0.6.tgz + version: 2.0.6 + - apiVersion: v2 + appVersion: 1.22.23 + created: "2024-02-20T08:42:32Z" + description: 'A chart to install node-local-dns. NodeLocal DNSCache improves + Cluster DNS performance by running a DNS caching agent on cluster nodes as a + DaemonSet. In today''s architecture, Pods in ''ClusterFirst'' DNS mode reach + out to a kube-dns serviceIP for DNS queries. This is translated to a kube-dns/CoreDNS + endpoint via iptables rules added by kube-proxy. With this new architecture, + Pods will reach out to the DNS caching agent running on the same node, thereby + avoiding iptables DNAT rules and connection tracking. The local caching agent + will query kube-dns service for cache misses of cluster hostnames ("cluster.local" + suffix by default). Further documentation is [here](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) This + helm chart works for both kube-proxy setups (iptables or ipvs). ' + digest: c605f5ab4d39772b5b005a6d7654f9853779419ecd921211a30a3d214b2cc825 + icon: https://miro.medium.com/max/641/1*q3vcbyVXcvaRnFLEQEHrXw.png + maintainers: + - email: no-reply@deliveryhero.com + name: gabrieladt + name: node-local-dns + urls: + - charts/node-local-dns-2.0.5.tgz + version: 2.0.5 + - apiVersion: v2 + appVersion: 1.22.23 + created: "2024-02-09T10:15:04Z" + description: 'A chart to install node-local-dns. NodeLocal DNSCache improves + Cluster DNS performance by running a DNS caching agent on cluster nodes as a + DaemonSet. In today''s architecture, Pods in ''ClusterFirst'' DNS mode reach + out to a kube-dns serviceIP for DNS queries. This is translated to a kube-dns/CoreDNS + endpoint via iptables rules added by kube-proxy. With this new architecture, + Pods will reach out to the DNS caching agent running on the same node, thereby + avoiding iptables DNAT rules and connection tracking. The local caching agent + will query kube-dns service for cache misses of cluster hostnames ("cluster.local" + suffix by default). Further documentation is [here](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) This + helm chart works for both kube-proxy setups (iptables or ipvs). ' + digest: 6373363b2ed13b03fa4d22ef4a2d0e4487f9c1e6a4d663c2809a70fd4778c023 + icon: https://miro.medium.com/max/641/1*q3vcbyVXcvaRnFLEQEHrXw.png + maintainers: + - email: no-reply@deliveryhero.com + name: gabrieladt + name: node-local-dns + urls: + - charts/node-local-dns-2.0.4.tgz + version: 2.0.4 + - apiVersion: v2 + appVersion: 1.22.23 + created: "2023-12-07T11:52:53Z" + description: 'A chart to install node-local-dns. NodeLocal DNSCache improves + Cluster DNS performance by running a DNS caching agent on cluster nodes as a + DaemonSet. In today''s architecture, Pods in ''ClusterFirst'' DNS mode reach + out to a kube-dns serviceIP for DNS queries. This is translated to a kube-dns/CoreDNS + endpoint via iptables rules added by kube-proxy. With this new architecture, + Pods will reach out to the DNS caching agent running on the same node, thereby + avoiding iptables DNAT rules and connection tracking. The local caching agent + will query kube-dns service for cache misses of cluster hostnames ("cluster.local" + suffix by default). Further documentation is [here](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) This + helm chart works for both kube-proxy setups (iptables or ipvs). ' + digest: 5733ac14c7144c9f08a5deb5dfd002bcf5912f1185139b726a7f9a6a2ef12848 + icon: https://miro.medium.com/max/641/1*q3vcbyVXcvaRnFLEQEHrXw.png + maintainers: + - email: no-reply@deliveryhero.com + name: gabrieladt + name: node-local-dns + urls: + - charts/node-local-dns-2.0.3.tgz + version: 2.0.3 + - apiVersion: v2 + appVersion: 1.22.23 + created: "2023-09-29T13:11:37.02343221Z" + description: 'A chart to install node-local-dns. NodeLocal DNSCache improves + Cluster DNS performance by running a DNS caching agent on cluster nodes as a + DaemonSet. In today''s architecture, Pods in ''ClusterFirst'' DNS mode reach + out to a kube-dns serviceIP for DNS queries. This is translated to a kube-dns/CoreDNS + endpoint via iptables rules added by kube-proxy. With this new architecture, + Pods will reach out to the DNS caching agent running on the same node, thereby + avoiding iptables DNAT rules and connection tracking. The local caching agent + will query kube-dns service for cache misses of cluster hostnames ("cluster.local" + suffix by default). Further documentation is [here](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) This + helm chart works for both kube-proxy setups (iptables or ipvs). ' + digest: a934dddd5f635ffd67addeaffca8d961d46538f6e4993c0d97aeb7b5819ef871 + icon: https://miro.medium.com/max/641/1*q3vcbyVXcvaRnFLEQEHrXw.png + maintainers: + - email: no-reply@deliveryhero.com + name: gabrieladt + name: node-local-dns + urls: + - charts/node-local-dns-2.0.2.tgz + version: 2.0.2 + - apiVersion: v2 + appVersion: 1.22.23 + created: "2023-09-26T07:02:51Z" + description: 'A chart to install node-local-dns. NodeLocal DNSCache improves + Cluster DNS performance by running a DNS caching agent on cluster nodes as a + DaemonSet. In today''s architecture, Pods in ''ClusterFirst'' DNS mode reach + out to a kube-dns serviceIP for DNS queries. This is translated to a kube-dns/CoreDNS + endpoint via iptables rules added by kube-proxy. With this new architecture, + Pods will reach out to the DNS caching agent running on the same node, thereby + avoiding iptables DNAT rules and connection tracking. The local caching agent + will query kube-dns service for cache misses of cluster hostnames ("cluster.local" + suffix by default). Further documentation is [here](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) This + helm chart works for both kube-proxy setups (iptables or ipvs). ' + digest: afd2d422b5d96d8df45d77f093d98597cadc04c6d47ada81626cadcbae8e3fb1 + icon: https://miro.medium.com/max/641/1*q3vcbyVXcvaRnFLEQEHrXw.png + maintainers: + - email: no-reply@deliveryhero.com + name: gabrieladt + name: node-local-dns + urls: + - charts/node-local-dns-2.0.1.tgz + version: 2.0.1 + - apiVersion: v2 + appVersion: 1.22.23 + created: "2023-09-21T06:44:24Z" + description: 'A chart to install node-local-dns. NodeLocal DNSCache improves + Cluster DNS performance by running a DNS caching agent on cluster nodes as a + DaemonSet. In today''s architecture, Pods in ''ClusterFirst'' DNS mode reach + out to a kube-dns serviceIP for DNS queries. This is translated to a kube-dns/CoreDNS + endpoint via iptables rules added by kube-proxy. With this new architecture, + Pods will reach out to the DNS caching agent running on the same node, thereby + avoiding iptables DNAT rules and connection tracking. The local caching agent + will query kube-dns service for cache misses of cluster hostnames ("cluster.local" + suffix by default). Further documentation is [here](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) This + helm chart works for both kube-proxy setups (iptables or ipvs). ' + digest: 969719dc416574b36f5bbf9b24b01cce1c715b9f686201f2380752412b5a7190 + icon: https://miro.medium.com/max/641/1*q3vcbyVXcvaRnFLEQEHrXw.png + maintainers: + - email: no-reply@deliveryhero.com + name: gabrieladt + name: node-local-dns + urls: + - charts/node-local-dns-2.0.0.tgz + version: 2.0.0 + - apiVersion: v2 + appVersion: 1.22.20 + created: "2023-08-29T17:42:18Z" + description: 'A chart to install node-local-dns. NodeLocal DNSCache improves + Cluster DNS performance by running a DNS caching agent on cluster nodes as a + DaemonSet. In today''s architecture, Pods in ''ClusterFirst'' DNS mode reach + out to a kube-dns serviceIP for DNS queries. This is translated to a kube-dns/CoreDNS + endpoint via iptables rules added by kube-proxy. With this new architecture, + Pods will reach out to the DNS caching agent running on the same node, thereby + avoiding iptables DNAT rules and connection tracking. The local caching agent + will query kube-dns service for cache misses of cluster hostnames ("cluster.local" + suffix by default). Further documentation is [here](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) This + helm chart works for both kube-proxy setups (iptables or ipvs). ' + digest: fe1e4aba9a40a68ce8fbc85da459661f0692bc1f17ae25373fa407727e03499b + icon: https://miro.medium.com/max/641/1*q3vcbyVXcvaRnFLEQEHrXw.png + maintainers: + - email: no-reply@deliveryhero.com + name: gabrieladt + name: node-local-dns + urls: + - charts/node-local-dns-1.1.5.tgz + version: 1.1.5 + - apiVersion: v2 + appVersion: 1.22.20 + created: "2023-08-15T16:07:28Z" + description: 'A chart to install node-local-dns. NodeLocal DNSCache improves + Cluster DNS performance by running a DNS caching agent on cluster nodes as a + DaemonSet. In today''s architecture, Pods in ''ClusterFirst'' DNS mode reach + out to a kube-dns serviceIP for DNS queries. This is translated to a kube-dns/CoreDNS + endpoint via iptables rules added by kube-proxy. With this new architecture, + Pods will reach out to the DNS caching agent running on the same node, thereby + avoiding iptables DNAT rules and connection tracking. The local caching agent + will query kube-dns service for cache misses of cluster hostnames ("cluster.local" + suffix by default). Further documentation is [here](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) This + helm chart works for both kube-proxy setups (iptables or ipvs). ' + digest: 8f901b0019cd4920542577de52dc91b0302ffd992c98a983644cfbf18421ab1f + icon: https://miro.medium.com/max/641/1*q3vcbyVXcvaRnFLEQEHrXw.png + maintainers: + - email: no-reply@deliveryhero.com + name: gabrieladt + name: node-local-dns + urls: + - charts/node-local-dns-1.1.4.tgz + version: 1.1.4 + - apiVersion: v2 + appVersion: 1.22.20 + created: "2023-08-09T10:25:15Z" + description: 'A chart to install node-local-dns. NodeLocal DNSCache improves + Cluster DNS performance by running a DNS caching agent on cluster nodes as a + DaemonSet. In today''s architecture, Pods in ''ClusterFirst'' DNS mode reach + out to a kube-dns serviceIP for DNS queries. This is translated to a kube-dns/CoreDNS + endpoint via iptables rules added by kube-proxy. With this new architecture, + Pods will reach out to the DNS caching agent running on the same node, thereby + avoiding iptables DNAT rules and connection tracking. The local caching agent + will query kube-dns service for cache misses of cluster hostnames ("cluster.local" + suffix by default). Further documentation is [here](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) This + helm chart works for both kube-proxy setups (iptables or ipvs). ' + digest: bccb4e8f54ecec73b6d1cc2dc848af9fd871badce44e06437cee7c0ca8584ecb + icon: https://miro.medium.com/max/641/1*q3vcbyVXcvaRnFLEQEHrXw.png + maintainers: + - email: no-reply@deliveryhero.com + name: gabrieladt + name: node-local-dns + urls: + - charts/node-local-dns-1.1.3.tgz + version: 1.1.3 + - apiVersion: v2 + appVersion: 1.22.20 + created: "2023-07-13T12:25:47Z" + description: 'A chart to install node-local-dns. NodeLocal DNSCache improves + Cluster DNS performance by running a DNS caching agent on cluster nodes as a + DaemonSet. In today''s architecture, Pods in ''ClusterFirst'' DNS mode reach + out to a kube-dns serviceIP for DNS queries. This is translated to a kube-dns/CoreDNS + endpoint via iptables rules added by kube-proxy. With this new architecture, + Pods will reach out to the DNS caching agent running on the same node, thereby + avoiding iptables DNAT rules and connection tracking. The local caching agent + will query kube-dns service for cache misses of cluster hostnames ("cluster.local" + suffix by default). Further documentation is [here](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) This + helm chart works for both kube-proxy setups (iptables or ipvs). ' + digest: b0efc70de2c3b31352150080b824ff20e8d9b566533a2529149888eb47edd32e + icon: https://miro.medium.com/max/641/1*q3vcbyVXcvaRnFLEQEHrXw.png + maintainers: + - email: no-reply@deliveryhero.com + name: gabrieladt + name: node-local-dns + urls: + - charts/node-local-dns-1.1.2.tgz + version: 1.1.2 + - apiVersion: v2 + appVersion: 1.22.20 + created: "2023-07-06T11:19:02Z" + description: 'A chart to install node-local-dns. NodeLocal DNSCache improves + Cluster DNS performance by running a DNS caching agent on cluster nodes as a + DaemonSet. In today''s architecture, Pods in ''ClusterFirst'' DNS mode reach + out to a kube-dns serviceIP for DNS queries. This is translated to a kube-dns/CoreDNS + endpoint via iptables rules added by kube-proxy. With this new architecture, + Pods will reach out to the DNS caching agent running on the same node, thereby + avoiding iptables DNAT rules and connection tracking. The local caching agent + will query kube-dns service for cache misses of cluster hostnames ("cluster.local" + suffix by default). Further documentation is [here](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) This + helm chart works for both kube-proxy setups (iptables or ipvs). ' + digest: aac41d8a16b7cea6ba5b9f5a0370dd1be988981cb95bda732fcc5c94148722ce + icon: https://miro.medium.com/max/641/1*q3vcbyVXcvaRnFLEQEHrXw.png + maintainers: + - email: no-reply@deliveryhero.com + name: gabrieladt + name: node-local-dns + urls: + - charts/node-local-dns-1.1.1.tgz + version: 1.1.1 + - apiVersion: v2 + appVersion: 1.22.20 + created: "2023-06-21T10:01:00Z" + description: 'A chart to install node-local-dns. NodeLocal DNSCache improves + Cluster DNS performance by running a DNS caching agent on cluster nodes as a + DaemonSet. In today''s architecture, Pods in ''ClusterFirst'' DNS mode reach + out to a kube-dns serviceIP for DNS queries. This is translated to a kube-dns/CoreDNS + endpoint via iptables rules added by kube-proxy. With this new architecture, + Pods will reach out to the DNS caching agent running on the same node, thereby + avoiding iptables DNAT rules and connection tracking. The local caching agent + will query kube-dns service for cache misses of cluster hostnames ("cluster.local" + suffix by default). Further documentation is [here](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) This + helm chart works for both kube-proxy setups (iptables or ipvs). ' + digest: f82df23c96002d4c42982520f47b7f713d57226bed549ce14aa3ffe4d16043d8 + icon: https://miro.medium.com/max/641/1*q3vcbyVXcvaRnFLEQEHrXw.png + maintainers: + - email: no-reply@deliveryhero.com + name: gabrieladt + name: node-local-dns + urls: + - charts/node-local-dns-1.1.0.tgz + version: 1.1.0 + - apiVersion: v2 + appVersion: 1.22.20 + created: "2023-06-13T13:06:35Z" + description: 'A chart to install node-local-dns. NodeLocal DNSCache improves + Cluster DNS performance by running a DNS caching agent on cluster nodes as a + DaemonSet. In today''s architecture, Pods in ''ClusterFirst'' DNS mode reach + out to a kube-dns serviceIP for DNS queries. This is translated to a kube-dns/CoreDNS + endpoint via iptables rules added by kube-proxy. With this new architecture, + Pods will reach out to the DNS caching agent running on the same node, thereby + avoiding iptables DNAT rules and connection tracking. The local caching agent + will query kube-dns service for cache misses of cluster hostnames ("cluster.local" + suffix by default). Further documentation is [here](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) This + helm chart works for both kube-proxy setups (iptables or ipvs). ' + digest: 4ac279d56e630b325d3a647551b15dc777e5986f46e3267da9d2bcc21e417711 + icon: https://miro.medium.com/max/641/1*q3vcbyVXcvaRnFLEQEHrXw.png + maintainers: + - email: no-reply@deliveryhero.com + name: gabrieladt + name: node-local-dns + urls: + - charts/node-local-dns-1.0.1.tgz + version: 1.0.1 + - apiVersion: v2 + appVersion: 1.22.20 + created: "2023-05-30T14:47:38Z" + description: 'A chart to install node-local-dns. NodeLocal DNSCache improves + Cluster DNS performance by running a DNS caching agent on cluster nodes as a + DaemonSet. In today''s architecture, Pods in ''ClusterFirst'' DNS mode reach + out to a kube-dns serviceIP for DNS queries. This is translated to a kube-dns/CoreDNS + endpoint via iptables rules added by kube-proxy. With this new architecture, + Pods will reach out to the DNS caching agent running on the same node, thereby + avoiding iptables DNAT rules and connection tracking. The local caching agent + will query kube-dns service for cache misses of cluster hostnames ("cluster.local" + suffix by default). Further documentation is [here](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) This + helm chart works for both kube-proxy setups (iptables or ipvs). ' + digest: 6aba4b62b063b077b32c1104ef51978ed2d9fec13621996cdf162b5d7270f34e + icon: https://miro.medium.com/max/641/1*q3vcbyVXcvaRnFLEQEHrXw.png + maintainers: + - email: no-reply@deliveryhero.com + name: gabrieladt + name: node-local-dns + urls: + - charts/node-local-dns-1.0.0.tgz + version: 1.0.0 + - apiVersion: v2 + appVersion: 1.22.20 + created: "2023-04-05T13:47:51Z" + description: 'A chart to install node-local-dns. NodeLocal DNSCache improves + Cluster DNS performance by running a DNS caching agent on cluster nodes as a + DaemonSet. In today''s architecture, Pods in ''ClusterFirst'' DNS mode reach + out to a kube-dns serviceIP for DNS queries. This is translated to a kube-dns/CoreDNS + endpoint via iptables rules added by kube-proxy. With this new architecture, + Pods will reach out to the DNS caching agent running on the same node, thereby + avoiding iptables DNAT rules and connection tracking. The local caching agent + will query kube-dns service for cache misses of cluster hostnames ("cluster.local" + suffix by default). Further documentation is [here](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) This + helm chart works for both kube-proxy setups (iptables or ipvs). ' + digest: cd496dfae05f9725304c099ead94759c5e9c56fad8a54ba79ba1473ba93a7d89 + icon: https://miro.medium.com/max/641/1*q3vcbyVXcvaRnFLEQEHrXw.png + maintainers: + - email: no-reply@deliveryhero.com + name: gabrieladt + name: node-local-dns + urls: + - charts/node-local-dns-0.2.4.tgz + version: 0.2.4 + - apiVersion: v2 + appVersion: 1.22.20 + created: "2023-03-20T04:13:57.033846305Z" + description: 'A chart to install node-local-dns. NodeLocal DNSCache improves + Cluster DNS performance by running a DNS caching agent on cluster nodes as a + DaemonSet. In today''s architecture, Pods in ''ClusterFirst'' DNS mode reach + out to a kube-dns serviceIP for DNS queries. This is translated to a kube-dns/CoreDNS + endpoint via iptables rules added by kube-proxy. With this new architecture, + Pods will reach out to the DNS caching agent running on the same node, thereby + avoiding iptables DNAT rules and connection tracking. The local caching agent + will query kube-dns service for cache misses of cluster hostnames ("cluster.local" + suffix by default). Further documentation is [here](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) This + helm chart works for both kube-proxy setups (iptables or ipvs). ' + digest: 68e12ef39c370eac5768e3df2e1d8815bae6b92aebbea0235640403dcefa9026 + icon: https://miro.medium.com/max/641/1*q3vcbyVXcvaRnFLEQEHrXw.png + maintainers: + - email: no-reply@deliveryhero.com + name: gabrieladt + name: node-local-dns + urls: + - charts/node-local-dns-0.2.3.tgz + version: 0.2.3 + - apiVersion: v2 + appVersion: 1.21.1 + created: "2023-03-02T09:40:51Z" + description: 'A chart to install node-local-dns. NodeLocal DNSCache improves + Cluster DNS performance by running a DNS caching agent on cluster nodes as a + DaemonSet. In today''s architecture, Pods in ''ClusterFirst'' DNS mode reach + out to a kube-dns serviceIP for DNS queries. This is translated to a kube-dns/CoreDNS + endpoint via iptables rules added by kube-proxy. With this new architecture, + Pods will reach out to the DNS caching agent running on the same node, thereby + avoiding iptables DNAT rules and connection tracking. The local caching agent + will query kube-dns service for cache misses of cluster hostnames ("cluster.local" + suffix by default). Further documentation is [here](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) This + helm chart works for both kube-proxy setups (iptables or ipvs). ' + digest: 69808807386b476848a13b61263f9654b2694ce4eff2a5a1a0cdc73877dce275 + icon: https://miro.medium.com/max/641/1*q3vcbyVXcvaRnFLEQEHrXw.png + maintainers: + - email: no-reply@deliveryhero.com + name: gabrieladt + name: node-local-dns + urls: + - charts/node-local-dns-0.2.2.tgz + version: 0.2.2 + - apiVersion: v2 + appVersion: 1.21.1 + created: "2023-01-30T15:57:43Z" + description: 'A chart to install node-local-dns. NodeLocal DNSCache improves + Cluster DNS performance by running a DNS caching agent on cluster nodes as a + DaemonSet. In today''s architecture, Pods in ''ClusterFirst'' DNS mode reach + out to a kube-dns serviceIP for DNS queries. This is translated to a kube-dns/CoreDNS + endpoint via iptables rules added by kube-proxy. With this new architecture, + Pods will reach out to the DNS caching agent running on the same node, thereby + avoiding iptables DNAT rules and connection tracking. The local caching agent + will query kube-dns service for cache misses of cluster hostnames ("cluster.local" + suffix by default). Further documentation is [here](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) This + helm chart works for both kube-proxy setups (iptables or ipvs). ' + digest: f4346cd647b1b15d910339179c92d31c3f8dc58be5e98f22e8019dd58a38e7a8 + icon: https://miro.medium.com/max/641/1*q3vcbyVXcvaRnFLEQEHrXw.png + maintainers: + - email: no-reply@deliveryhero.com + name: gabrieladt + name: node-local-dns + urls: + - charts/node-local-dns-0.2.1.tgz + version: 0.2.1 + - apiVersion: v2 + appVersion: 1.21.1 + created: "2022-10-13T09:53:02Z" + description: 'A chart to install node-local-dns. NodeLocal DNSCache improves + Cluster DNS performance by running a DNS caching agent on cluster nodes as a + DaemonSet. In today''s architecture, Pods in ''ClusterFirst'' DNS mode reach + out to a kube-dns serviceIP for DNS queries. This is translated to a kube-dns/CoreDNS + endpoint via iptables rules added by kube-proxy. With this new architecture, + Pods will reach out to the DNS caching agent running on the same node, thereby + avoiding iptables DNAT rules and connection tracking. The local caching agent + will query kube-dns service for cache misses of cluster hostnames ("cluster.local" + suffix by default). Further documentation is [here](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) This + helm chart works for both kube-proxy setups (iptables or ipvs). ' + digest: b361be33072419430b789fa3bd5509765c1e4b5f03f79bf8fe7c8fed5488f109 + icon: https://miro.medium.com/max/641/1*q3vcbyVXcvaRnFLEQEHrXw.png + maintainers: + - email: no-reply@deliveryhero.com + name: gabrieladt + name: node-local-dns + urls: + - charts/node-local-dns-0.2.0.tgz + version: 0.2.0 + - apiVersion: v2 + appVersion: 1.21.1 + created: "2022-09-19T08:56:35Z" + description: 'A chart to install node-local-dns. NodeLocal DNSCache improves + Cluster DNS performance by running a DNS caching agent on cluster nodes as a + DaemonSet. In today''s architecture, Pods in ''ClusterFirst'' DNS mode reach + out to a kube-dns serviceIP for DNS queries. This is translated to a kube-dns/CoreDNS + endpoint via iptables rules added by kube-proxy. With this new architecture, + Pods will reach out to the DNS caching agent running on the same node, thereby + avoiding iptables DNAT rules and connection tracking. The local caching agent + will query kube-dns service for cache misses of cluster hostnames ("cluster.local" + suffix by default). Further documentation is [here](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) This + helm chart works for both kube-proxy setups (iptables or ipvs). ' + digest: b934511e6dbec25604cf248de016c33e7158604497b1aba6ed537f64b296eb09 + icon: https://miro.medium.com/max/641/1*q3vcbyVXcvaRnFLEQEHrXw.png + maintainers: + - email: no-reply@deliveryhero.com + name: gabrieladt + name: node-local-dns + urls: + - charts/node-local-dns-0.1.1.tgz + version: 0.1.1 + - apiVersion: v2 + created: "2022-05-19T13:43:41Z" + description: 'A chart to install node-local-dns. NodeLocal DNSCache improves + Cluster DNS performance by running a DNS caching agent on cluster nodes as a + DaemonSet. In today''s architecture, Pods in ''ClusterFirst'' DNS mode reach + out to a kube-dns serviceIP for DNS queries. This is translated to a kube-dns/CoreDNS + endpoint via iptables rules added by kube-proxy. With this new architecture, + Pods will reach out to the DNS caching agent running on the same node, thereby + avoiding iptables DNAT rules and connection tracking. The local caching agent + will query kube-dns service for cache misses of cluster hostnames ("cluster.local" + suffix by default). Further documentation is [here](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) This + helm chart works for both kube-proxy setups (iptables or ipvs). ' + digest: 91a62f256aa8652b18b2cfa83f00f251d3ceac62eef8e9e19b00827fd296ff7c + icon: https://miro.medium.com/max/641/1*q3vcbyVXcvaRnFLEQEHrXw.png + maintainers: + - email: no-reply@deliveryhero.com + name: gabrieladt + name: node-local-dns + urls: + - charts/node-local-dns-0.1.0.tgz + version: 0.1.0 + node-problem-detector: + - apiVersion: v1 + appVersion: v0.8.19 + created: "2024-10-04T09:09:55Z" + description: 'This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) + daemonset. This tool aims to make various node problems visible to the upstream + layers in cluster management stack. It is a daemon which runs on each node, + detects node problems and reports them to apiserver. ' + digest: 192248313bcda86b7d21db4c7d435754dea0c6a41ee950d83e45c37b99c2609c + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-2.3.14.tgz + version: 2.3.14 + - apiVersion: v1 + appVersion: v0.8.18 + created: "2024-09-25T16:49:40Z" + description: 'This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) + daemonset. This tool aims to make various node problems visible to the upstream + layers in cluster management stack. It is a daemon which runs on each node, + detects node problems and reports them to apiserver. ' + digest: fce3d5170bd4a7b18fe9114d0e1a5a8049908fb8191733d11cbd25bb1b255c2f + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-2.3.13.tgz + version: 2.3.13 + - apiVersion: v1 + appVersion: v0.8.15 + created: "2024-04-07T22:35:27Z" + description: 'This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) + daemonset. This tool aims to make various node problems visible to the upstream + layers in cluster management stack. It is a daemon which runs on each node, + detects node problems and reports them to apiserver. ' + digest: 892b11c93a28c0a0c742272c0a963debc8907f62707b28c6ceb5888e07fbeac3 + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-2.3.12.tgz + version: 2.3.12 + - apiVersion: v1 + appVersion: v0.8.14 + created: "2024-01-08T09:25:12Z" + description: 'This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) + daemonset. This tool aims to make various node problems visible to the upstream + layers in cluster management stack. It is a daemon which runs on each node, + detects node problems and reports them to apiserver. ' + digest: c328637c97ea890fbb930bb9fe741e137fa6d769b3215e30b6baf395e47494e3 + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-2.3.11.tgz + version: 2.3.11 + - apiVersion: v1 + appVersion: v0.8.13 + created: "2023-09-27T13:56:43Z" + description: 'This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) + daemonset. This tool aims to make various node problems visible to the upstream + layers in cluster management stack. It is a daemon which runs on each node, + detects node problems and reports them to apiserver. ' + digest: 6d3efd61e3cc2b6d575c9c7881a53d47e9b6590974f071360cceb646cc01ebb7 + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-2.3.10.tgz + version: 2.3.10 + - apiVersion: v1 + appVersion: v0.8.13 + created: "2023-09-07T07:39:45Z" + description: 'This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) + daemonset. This tool aims to make various node problems visible to the upstream + layers in cluster management stack. It is a daemon which runs on each node, + detects node problems and reports them to apiserver. ' + digest: a925d2887ddca72bef95af94829369cc2668a28df729c1f3ea9ccccb25eaf773 + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-2.3.9.tgz + version: 2.3.9 + - apiVersion: v1 + appVersion: v0.8.13 + created: "2023-09-06T07:49:36Z" + description: 'This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) + daemonset. This tool aims to make various node problems visible to the upstream + layers in cluster management stack. It is a daemon which runs on each node, + detects node problems and reports them to apiserver. ' + digest: d1e4f8db6d619bfb6a208c310d4d8d0545d2769b928d813e18bd45dd7c157c2b + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-2.3.8.tgz + version: 2.3.8 + - apiVersion: v1 + appVersion: v0.8.13 + created: "2023-09-05T15:20:25.020637502Z" + description: 'This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) + daemonset. This tool aims to make various node problems visible to the upstream + layers in cluster management stack. It is a daemon which runs on each node, + detects node problems and reports them to apiserver. ' + digest: e8f24227b4030dd40f90fbf35a918b87e060f13a7910c21563337390becd39ae + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-2.3.7.tgz + version: 2.3.7 + - apiVersion: v1 + appVersion: v0.8.13 + created: "2023-08-31T12:17:23Z" + description: 'This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) + daemonset. This tool aims to make various node problems visible to the upstream + layers in cluster management stack. It is a daemon which runs on each node, + detects node problems and reports them to apiserver. ' + digest: 6ec52b5b49e707b252c7dfb4c330b16ca9d42b8f74fd6c768a6692d1aa7fc96c + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-2.3.6.tgz + version: 2.3.6 + - apiVersion: v1 + appVersion: v0.8.13 + created: "2023-08-21T07:55:31Z" + description: 'This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) + daemonset. This tool aims to make various node problems visible to the upstream + layers in cluster management stack. It is a daemon which runs on each node, + detects node problems and reports them to apiserver. ' + digest: 1961568fa15ddb09b67cac7cda7e77c8070049864fdf89c2a2aee64fd3a8bca6 + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-2.3.5.tgz + version: 2.3.5 + - apiVersion: v1 + appVersion: v0.8.12 + created: "2023-06-20T15:05:12Z" + description: 'This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) + daemonset. This tool aims to make various node problems visible to the upstream + layers in cluster management stack. It is a daemon which runs on each node, + detects node problems and reports them to apiserver. ' + digest: a68ef338169d4d95112616eb1ab7f099fca7c4f8d6302e73148aba4cdbb51611 + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-2.3.4.tgz + version: 2.3.4 + - apiVersion: v1 + appVersion: v0.8.12 + created: "2023-03-16T21:38:03Z" + description: 'This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) + daemonset. This tool aims to make various node problems visible to the upstream + layers in cluster management stack. It is a daemon which runs on each node, + detects node problems and reports them to apiserver. ' + digest: 5aeb43b6dca2b2752f3340032201ba2e952c0b2730e99f9df9d9b6cec67ce2b8 + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-2.3.3.tgz + version: 2.3.3 + - apiVersion: v1 + appVersion: v0.8.12 + created: "2023-02-05T20:05:16Z" + description: 'This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) + daemonset. This tool aims to make various node problems visible to the upstream + layers in cluster management stack. It is a daemon which runs on each node, + detects node problems and reports them to apiserver. ' + digest: f60c780af20ce81bbe6ad9ccaa6cea3566eebe05198eb81f3ee1ebedc06b9cd7 + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-2.3.2.tgz + version: 2.3.2 + - apiVersion: v1 + appVersion: v0.8.12 + created: "2022-12-01T16:58:06Z" + description: 'This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) + daemonset. This tool aims to make various node problems visible to the upstream + layers in cluster management stack. It is a daemon which runs on each node, + detects node problems and reports them to apiserver. ' + digest: e4c17d8536d63cce04ea57bb84099839d1ccfbf4adc6a9a826fbe0ba4294f65e + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-2.3.1.tgz + version: 2.3.1 + - apiVersion: v1 + appVersion: v0.8.12 + created: "2022-11-07T13:27:22Z" + description: 'This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) + daemonset. This tool aims to make various node problems visible to the upstream + layers in cluster management stack. It is a daemon which runs on each node, + detects node problems and reports them to apiserver. ' + digest: eb702a47a19f802dd2e52b9bea6b11a17702060773b9be085274cba698d634db + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-2.3.0.tgz + version: 2.3.0 + - apiVersion: v1 + appVersion: v0.8.12 + created: "2022-10-24T10:56:37Z" + description: 'This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) + daemonset. This tool aims to make various node problems visible to the upstream + layers in cluster management stack. It is a daemon which runs on each node, + detects node problems and reports them to apiserver. ' + digest: a2c48483e193812dc7b07dbf07a499b3f796602fc9500735a3e19d8836d2c712 + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-2.2.6.tgz + version: 2.2.6 + - apiVersion: v1 + appVersion: v0.8.12 + created: "2022-09-13T13:00:09Z" + description: 'This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) + daemonset. This tool aims to make various node problems visible to the upstream + layers in cluster management stack. It is a daemon which runs on each node, + detects node problems and reports them to apiserver. ' + digest: f4a7613350d4056d1ea83baa5486c611f0f1b05608c95cac2a7fa5ec126f9be2 + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-2.2.5.tgz + version: 2.2.5 + - apiVersion: v1 + appVersion: v0.8.11 + created: "2022-09-02T11:01:08Z" + description: 'This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) + daemonset. This tool aims to make various node problems visible to the upstream + layers in cluster management stack. It is a daemon which runs on each node, + detects node problems and reports them to apiserver. ' + digest: 0f4b85bf0872cca8bfb1defeb7c8b36673cb5eff3bfb48c3538acdd45043c9f2 + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-2.2.4.tgz + version: 2.2.4 + - apiVersion: v1 + appVersion: v0.8.11 + created: "2022-08-29T07:17:28Z" + description: 'This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) + daemonset. This tool aims to make various node problems visible to the upstream + layers in cluster management stack. It is a daemon which runs on each node, + detects node problems and reports them to apiserver. ' + digest: 6304e854aab75c88879cff54432c33416f38bcb5243716029a6f8648eb025487 + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-2.2.3.tgz + version: 2.2.3 + - apiVersion: v1 + appVersion: v0.8.10 + created: "2022-07-29T12:07:45Z" + description: 'This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) + daemonset. This tool aims to make various node problems visible to the upstream + layers in cluster management stack. It is a daemon which runs on each node, + detects node problems and reports them to apiserver. ' + digest: f8267df75275f522c83962f0484fefb16e5756eb0fa5166d98525a5b4280c61c + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-2.2.2.tgz + version: 2.2.2 + - apiVersion: v1 + appVersion: v0.8.10 + created: "2022-05-20T07:40:58Z" + description: 'This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) + daemonset. This tool aims to make various node problems visible to the upstream + layers in cluster management stack. It is a daemon which runs on each node, + detects node problems and reports them to apiserver. ' + digest: 69dc372267cc692a2c495ed8c261cbc7c28d6768504504383009b194524447ee + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-2.2.1.tgz + version: 2.2.1 + - apiVersion: v1 + appVersion: v0.8.10 + created: "2022-04-14T10:58:51Z" + description: 'This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) + daemonset. This tool aims to make various node problems visible to the upstream + layers in cluster management stack. It is a daemon which runs on each node, + detects node problems and reports them to apiserver. ' + digest: f167de2f8d4ac8b19696db74f21a0f5914ba5a0439cebdcd445787d164e885c3 + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-2.2.0.tgz + version: 2.2.0 + - apiVersion: v1 + appVersion: v0.8.10 + created: "2022-03-23T13:23:17Z" + description: 'This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) + daemonset. This tool aims to make various node problems visible to the upstream + layers in cluster management stack. It is a daemon which runs on each node, + detects node problems and reports them to apiserver. ' + digest: 6d3b8040de82d62e3dc6cf9abd6f51e3bd709eaca01c735f22033e4d4fcfa2c3 + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-2.1.0.tgz + version: 2.1.0 + - apiVersion: v1 + appVersion: v0.8.10 + created: "2022-03-22T13:27:13Z" + description: 'This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) + daemonset. This tool aims to make various node problems visible to the upstream + layers in cluster management stack. It is a daemon which runs on each node, + detects node problems and reports them to apiserver. ' + digest: 9956bea98f7248239be5edabb398f011a7ae68aded6377520400ee414abe6f1b + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-2.0.9.tgz + version: 2.0.9 + - apiVersion: v1 + appVersion: v0.8.10 + created: "2021-10-06T15:11:34Z" + description: 'This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) + daemonset. This tool aims to make various node problems visible to the upstream + layers in cluster management stack. It is a daemon which runs on each node, + detects node problems and reports them to apiserver. ' + digest: d16e910873fb10650b75994786035d18ef14140dbd54b6afce7b3e8a9ae8eff4 + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-2.0.8.tgz + version: 2.0.8 + - apiVersion: v1 + appVersion: v0.8.10 + created: "2021-09-07T07:46:18Z" + description: | + This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) daemonset. This tool aims to make various node problems visible to the upstream layers in cluster management stack. It is a daemon which runs on each node, detects node problems and reports them to apiserver. + digest: ba2890e4cc8923bb79b833c57c4ef08314f4bdd70b923d3cf7d857c000ae6090 + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-2.0.7.tgz + version: 2.0.7 + - apiVersion: v1 + appVersion: v0.8.10 + created: "2021-09-02T14:34:16Z" + description: | + This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) daemonset. This tool aims to make various node problems visible to the upstream layers in cluster management stack. It is a daemon which runs on each node, detects node problems and reports them to apiserver. + digest: a0b813b8de17ec7491492a32193ab3e6840eb90a636b45bc9902449ee6d909f5 + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-2.0.6.tgz + version: 2.0.6 + - apiVersion: v1 + appVersion: v0.8.9 + created: "2021-08-30T15:48:54Z" + description: | + This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) daemonset. This tool aims to make various node problems visible to the upstream layers in cluster management stack. It is a daemon which runs on each node, detects node problems and reports them to apiserver. + digest: 9e2c1430f6fcd83741e2b20b8498c504927a4e12a7b201182084f065143a22bd + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-2.0.5.tgz + version: 2.0.5 + - apiVersion: v1 + appVersion: v0.8.9 + created: "2021-08-20T15:51:21Z" + description: | + This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) daemonset. This tool aims to make various node problems visible to the upstream layers in cluster management stack. It is a daemon which runs on each node, detects node problems and reports them to apiserver. + digest: f447ef7863d0fdfa80c2e7779c8a950990638a7c9f7bb009303056435d676a99 + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-2.0.4.tgz + version: 2.0.4 + - apiVersion: v1 + appVersion: v0.8.9 + created: "2021-07-20T10:55:13Z" + description: | + This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) daemonset. This tool aims to make various node problems visible to the upstream layers in cluster management stack. It is a daemon which runs on each node, detects node problems and reports them to apiserver. + digest: 22086f3227343a26fbb091ee72f9051d1e280af516b4b58a90d8ae8bf9af6256 + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-2.0.3.tgz + version: 2.0.3 + - apiVersion: v1 + appVersion: v0.8.8 + created: "2021-07-12T09:37:30Z" + description: | + This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) daemonset. This tool aims to make various node problems visible to the upstream layers in cluster management stack. It is a daemon which runs on each node, detects node problems and reports them to apiserver. + digest: 2ff6c8796bff51f25df6e74d817e1be5f9fb71239696ce9cf12469d294139810 + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-2.0.2.tgz + version: 2.0.2 + - apiVersion: v1 + appVersion: v0.8.7 + created: "2021-05-12T07:31:57Z" + description: | + This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) daemonset. This tool aims to make various node problems visible to the upstream layers in cluster management stack. It is a daemon which runs on each node, detects node problems and reports them to apiserver. + digest: dceacecb867c53cd55818bef667878d5048e2e4956266a0195d986b54702d188 + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-2.0.1.tgz + version: 2.0.1 + - apiVersion: v1 + appVersion: v0.8.7 + created: "2021-03-09T11:09:14Z" + description: | + This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) daemonset. This tool aims to make various node problems visible to the upstream layers in cluster management stack. It is a daemon which runs on each node, detects node problems and reports them to apiserver. + digest: b2efd18fafa653160b972ef8d920e0160c550eeefb58226364a16ac38aab97a7 + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-2.0.0.tgz + version: 2.0.0 + - apiVersion: v1 + appVersion: v0.8.7 + created: "2021-03-09T10:46:10Z" + description: | + This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) daemonset. This tool aims to make various node problems visible to the upstream layers in cluster management stack. It is a daemon which runs on each node, detects node problems and reports them to apiserver. + digest: 08c6d83a5fdabde6dd8ed82346f6b6f138208e451a33b410d50e9cd6724dcb5c + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-1.8.7.tgz + version: 1.8.7 + - apiVersion: v1 + appVersion: v0.8.5 + created: "2021-02-24T09:54:50Z" + description: | + This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) daemonset. This tool aims to make various node problems visible to the upstream layers in cluster management stack. It is a daemon which runs on each node, detects node problems and reports them to apiserver. + digest: 417df86c4e41d7c40f2b4c2ad41570caff0fc66618adc67ce9fce9d01995dc4b + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-1.8.6.tgz + version: 1.8.6 + - apiVersion: v1 + appVersion: v0.8.5 + created: "2020-12-07T16:26:15Z" + description: | + This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) daemonset. This tool aims to make various node problems visible to the upstream layers in cluster management stack. It is a daemon which runs on each node, detects node problems and reports them to apiserver. + digest: 908fa2702b09cddc9b02ad76efc55230f65aed81c25f3137abea5a6075eb1177 + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-1.8.5.tgz + version: 1.8.5 + - apiVersion: v1 + appVersion: v0.8.5 + created: "2020-12-04T14:45:55Z" + description: | + This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) daemonset. This tool aims to make various node problems visible to the upstream layers in cluster management stack. It is a daemon which runs on each node, detects node problems and reports them to apiserver. + digest: d67f41c0283163c7254a35af3302f67adfec834d4a2e2055ed75692a7f457205 + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-1.8.4.tgz + version: 1.8.4 + - apiVersion: v1 + appVersion: v0.8.5 + created: "2020-12-04T13:27:57Z" + description: | + This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) daemonset. This tool aims to make various node problems visible to the upstream layers in cluster management stack. It is a daemon which runs on each node, detects node problems and reports them to apiserver. + digest: 5d752483cc2206efaeadef82a60dc56f3b1f1240437b1340cbccbec7342c46b6 + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-1.8.3.tgz + version: 1.8.3 + - apiVersion: v1 + appVersion: v0.8.4 + created: "2020-11-18T12:00:35Z" + description: | + This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) daemonset. This tool aims to make various node problems visible to the upstream layers in cluster management stack. It is a daemon which runs on each node, detects node problems and reports them to apiserver. + digest: 6b0427b5620efd6fa1d46da1e1aef08d1c07389005859ce69d45a57f2ad364c4 + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-1.8.2.tgz + version: 1.8.2 + - apiVersion: v1 + appVersion: v0.8.4 + created: "2020-11-16T22:07:51Z" + description: | + This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) daemonset. This tool aims to make various node problems visible to the upstream layers in cluster management stack. It is a daemon which runs on each node, detects node problems and reports them to apiserver. + digest: 6944f1e727f1f39da343825ec1cc0e48eee3a5b23b173cbf89acd0ea77e3e5d7 + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-1.8.1.tgz + version: 1.8.1 + - apiVersion: v1 + appVersion: v0.8.1 + created: "2020-11-02T18:26:50Z" + description: | + This chart installs a [node-problem-detector](https://github.com/kubernetes/node-problem-detector) daemonset. This tool aims to make various node problems visible to the upstream layers in cluster management stack. It is a daemon which runs on each node, detects node problems and reports them to apiserver. + digest: b43c43c62423755ca89ae60c5a7bc4b7492e824a6844647b815313f505ffbbed + home: https://github.com/kubernetes/node-problem-detector + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - node + - problem + - detector + - monitoring + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: node-problem-detector + sources: + - https://github.com/kubernetes/node-problem-detector + - https://kubernetes.io/docs/concepts/architecture/nodes/#condition + urls: + - charts/node-problem-detector-1.8.0.tgz + version: 1.8.0 + pg-repack-scheduler: + - apiVersion: v1 + appVersion: "1.0" + created: "2024-10-04T09:09:55Z" + description: 'For running [pg_repack](https://github.com/reorg/pg_repack) as a + `Job` or `CronJob`. By default the `CronJob` is disabled but a new `Job` can + be created adhoc like this: ```console kubectl create job pg-repack --from=cronjob/pg-repack-scheduler + ``` ' + digest: 2d16202338a4c3d4c242b2dc5f9022e287965bca778796de28e95015742439d2 + home: https://github.com/deliveryhero/helm-charts + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - scaling + - cost saving + maintainers: + - email: max.williams@deliveryhero.com + name: max-rocket-internet + name: pg-repack-scheduler + sources: + - https://github.com/reorg/pg_repack + urls: + - charts/pg-repack-scheduler-1.0.tgz + version: "1.0" + postgres-controller: + - apiVersion: v1 + appVersion: "0.5" + created: "2024-10-04T09:09:56Z" + description: A controller for managing PostgreSQL databases, roles and more + digest: d3c1dad3faa63b359fa7f68e6fc7c210ad5423679dc118d0717682880f0a3838 + home: https://github.com/max-rocket-internet/postgres-controller + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: postgres-controller + sources: + - https://github.com/max-rocket-internet/postgres-controller + urls: + - charts/postgres-controller-1.3.tgz + version: "1.3" + - apiVersion: v1 + appVersion: "0.5" + created: "2023-02-10T16:01:24Z" + description: A controller for managing PostgreSQL databases, roles and more + digest: d843e45fb1bb30fa7c28516576d141c743c4a95f20de263baadbde86445ddc45 + home: https://github.com/max-rocket-internet/postgres-controller + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: postgres-controller + sources: + - https://github.com/max-rocket-internet/postgres-controller + urls: + - charts/postgres-controller-1.2.tgz + version: "1.2" + - apiVersion: v1 + appVersion: "0.5" + created: "2021-04-30T09:30:36Z" + description: A controller for managing PostgreSQL databases, roles and more + digest: fd55b741dcbf46663111da3165fce0baa0e7e7e20e5c28b28454a9b3369cc2ee + home: https://github.com/max-rocket-internet/postgres-controller + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: postgres-controller + sources: + - https://github.com/max-rocket-internet/postgres-controller + urls: + - charts/postgres-controller-1.1.tgz + version: "1.1" + postwoman: + - apiVersion: v1 + appVersion: v1.9.7 + created: "2020-09-02T08:52:28Z" + description: A free, fast and beautiful API request builder + digest: 1f4f44d1b8d3f332e5b524049d7fb8822dc74dab55f8f4dc93f992c681cb741f + home: https://github.com/postwoman-io/postwoman + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + name: postwoman + sources: + - https://github.com/postwoman-io/postwoman + urls: + - charts/postwoman-0.2.5.tgz + version: 0.2.5 + priority-class: + - apiVersion: v1 + appVersion: "1.0" + created: "2024-10-04T09:09:56Z" + description: A very simple chart that creates priority classes + digest: 3020fb2caccfcc3eca1e3ad4f8298741c4fe7f144a83f4d9f74dbcdd511fc174 + home: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: priority-class + urls: + - charts/priority-class-0.1.1.tgz + version: 0.1.1 + prometheus-aws-costs-exporter: + - apiVersion: v1 + appVersion: "1.0" + created: "2024-10-04T09:09:57Z" + description: Exporter for AWS Cost Explorer daily costs + digest: 3953fa62c41888ee632cc79d863cde1d2a18909c4a2dea3a400be587631a9e21 + home: https://github.com/nachomillangarcia/prometheus_aws_cost_exporter + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: prometheus-aws-costs-exporter + sources: + - https://github.com/nachomillangarcia/prometheus_aws_cost_exporter + urls: + - charts/prometheus-aws-costs-exporter-0.1.4.tgz + version: 0.1.4 + - apiVersion: v1 + appVersion: "1.0" + created: "2021-04-20T12:18:02Z" + description: Exporter for AWS Cost Explorer daily costs + digest: ec63694097440fe3c4e90e276f457bb01d61e43eef97af4ef49fc49d3c8658e8 + home: https://github.com/nachomillangarcia/prometheus_aws_cost_exporter + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: prometheus-aws-costs-exporter + sources: + - https://github.com/nachomillangarcia/prometheus_aws_cost_exporter + urls: + - charts/prometheus-aws-costs-exporter-0.1.3.tgz + version: 0.1.3 + - apiVersion: v1 + appVersion: "1.0" + created: "2020-11-13T08:32:08Z" + description: Exporter for AWS Cost Explorer daily costs + digest: ef4faeda544cb7354c4ad205371287dc85ad094e884ce2f12bb0ebe01395b13a + home: https://github.com/nachomillangarcia/prometheus_aws_cost_exporter + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: prometheus-aws-costs-exporter + sources: + - https://github.com/nachomillangarcia/prometheus_aws_cost_exporter + urls: + - charts/prometheus-aws-costs-exporter-0.1.2.tgz + version: 0.1.2 + prometheus-aws-health-exporter: + - apiVersion: v1 + appVersion: "1.0" + created: "2024-10-04T09:09:57Z" + description: AWS Health API Exporter for Prometheus + digest: 78984d07faee1f6d4c057439160deb77251433fd8fbf56ffe2911aa467d69f9e + home: https://github.com/Jimdo/aws-health-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + - email: no-reply@deliveryhero.com + name: javad-hajiani + name: prometheus-aws-health-exporter + sources: + - https://github.com/Jimdo/aws-health-exporter + urls: + - charts/prometheus-aws-health-exporter-0.1.5.tgz + version: 0.1.5 + - apiVersion: v1 + appVersion: "1.0" + created: "2022-05-10T14:52:41Z" + description: AWS Health API Exporter for Prometheus + digest: 11931caa55621904b3319982c0f33e2477acd9093e6c4de52dc21812259feece + home: https://github.com/Jimdo/aws-health-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: prometheus-aws-health-exporter + sources: + - https://github.com/Jimdo/aws-health-exporter + urls: + - charts/prometheus-aws-health-exporter-0.1.4.tgz + version: 0.1.4 + - apiVersion: v1 + appVersion: "1.0" + created: "2022-05-04T10:09:28Z" + description: AWS Health API Exporter for Prometheus + digest: deb0fba0612f20368cbb4b7a358a6682aaa6f70a6265e7545b1e5e5289a2f958 + home: https://github.com/Jimdo/aws-health-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: prometheus-aws-health-exporter + sources: + - https://github.com/Jimdo/aws-health-exporter + urls: + - charts/prometheus-aws-health-exporter-0.1.3.tgz + version: 0.1.3 + - apiVersion: v1 + appVersion: "1.0" + created: "2020-11-13T08:32:09Z" + description: AWS Health API Exporter for Prometheus + digest: 856e90be6a3e182cafe1ffb3b32a4eb2be530b4cf854e88e86cc192f4c615a72 + home: https://github.com/Jimdo/aws-health-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: prometheus-aws-health-exporter + sources: + - https://github.com/Jimdo/aws-health-exporter + urls: + - charts/prometheus-aws-health-exporter-0.1.2.tgz + version: 0.1.2 + prometheus-aws-limits-exporter: + - apiVersion: v2 + appVersion: 0.6.0 + created: "2024-10-04T09:09:58Z" + description: 'This helmchart provides a Prometheus metrics endpoint that exposes + AWS usage and limits as reported by the AWS Trusted Advisor API. ' + digest: 5c840664246f53d8d2e72d7027dc2dc44550da7f5f1aceffc6949bbcc8fb0c53 + home: https://github.com/danielfm/aws-limits-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + name: prometheus-aws-limits-exporter + sources: + - https://github.com/danielfm/aws-limits-exporter + type: application + urls: + - charts/prometheus-aws-limits-exporter-0.2.1.tgz + version: 0.2.1 + - apiVersion: v2 + appVersion: 0.6.0 + created: "2022-04-26T10:53:42Z" + description: 'This helmchart provides a Prometheus metrics endpoint that exposes + AWS usage and limits as reported by the AWS Trusted Advisor API. ' + digest: 1bd77c3f39e0126aab3afe5824359b94bef02e570314c9fc3710183d82a9c9c6 + home: https://github.com/danielfm/aws-limits-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + name: prometheus-aws-limits-exporter + sources: + - https://github.com/danielfm/aws-limits-exporter + type: application + urls: + - charts/prometheus-aws-limits-exporter-0.2.0.tgz + version: 0.2.0 + - apiVersion: v2 + appVersion: 0.4.0 + created: "2021-11-09T10:00:16Z" + description: 'This helmchart provides a Prometheus metrics endpoint that exposes + AWS usage and limits as reported by the AWS Trusted Advisor API. ' + digest: 80099ff43e0ce6759d2e1689655afb3ff518114e9c7c41bfe060c9eff1d95654 + home: https://github.com/danielfm/aws-limits-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + name: prometheus-aws-limits-exporter + sources: + - https://github.com/danielfm/aws-limits-exporter + type: application + urls: + - charts/prometheus-aws-limits-exporter-0.1.4.tgz + version: 0.1.4 + - apiVersion: v2 + appVersion: 0.4.0 + created: "2021-10-21T11:39:52Z" + description: 'This helmchart provides a Prometheus metrics endpoint that exposes + AWS usage and limits as reported by the AWS Trusted Advisor API. ' + digest: e42b35e1b23bf39ee4a36333c8161819fdccfaf29cc92771b65caefe9e89ecf7 + home: https://github.com/danielfm/aws-limits-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + name: prometheus-aws-limits-exporter + sources: + - https://github.com/danielfm/aws-limits-exporter + type: application + urls: + - charts/prometheus-aws-limits-exporter-0.1.3.tgz + version: 0.1.3 + - apiVersion: v2 + appVersion: 0.4.0 + created: "2021-08-16T13:22:56Z" + description: | + This helmchart provides a Prometheus metrics endpoint that exposes AWS usage and limits as reported by the AWS Trusted Advisor API. + digest: e8c477cdf36f5508ddb293d9d0f9aa398f30d1a38db36a5c04cd0a88633b6202 + home: https://github.com/danielfm/aws-limits-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + name: prometheus-aws-limits-exporter + sources: + - https://github.com/danielfm/aws-limits-exporter + type: application + urls: + - charts/prometheus-aws-limits-exporter-0.1.2.tgz + version: 0.1.2 + - apiVersion: v2 + appVersion: 0.4.0 + created: "2021-08-16T13:06:49Z" + description: | + This helmchart provides a Prometheus metrics endpoint that exposes AWS usage and limits as reported by the AWS Trusted Advisor API. + digest: 6ce0c9ee3bfa4856fd97897a4cae45b33016bf03136f3ee7321a62e4a2daf814 + home: https://github.com/danielfm/aws-limits-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + name: prometheus-aws-limits-exporter + sources: + - https://github.com/danielfm/aws-limits-exporter + type: application + urls: + - charts/prometheus-aws-limits-exporter-0.1.1.tgz + version: 0.1.1 + - apiVersion: v2 + appVersion: 0.4.0 + created: "2021-08-16T07:57:20Z" + description: | + This helmchart provides a Prometheus metrics endpoint that exposes AWS usage and limits as reported by the AWS Trusted Advisor API. + digest: 751920851ebac888d30b35066329a3116409a3fba574973a5941c8b495a32c40 + home: https://github.com/danielfm/aws-limits-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + name: prometheus-aws-limits-exporter + sources: + - https://github.com/danielfm/aws-limits-exporter + type: application + urls: + - charts/prometheus-aws-limits-exporter-0.1.0.tgz + version: 0.1.0 + prometheus-cloudflare-exporter: + - apiVersion: v1 + appVersion: 1.2.0 + created: "2024-10-04T09:09:58Z" + description: Cloudflare metrics exporter + digest: b981adc393f332b2751c265093766cc17bbfbe6666c0e58c40b0c4bf5807ff1d + home: https://github.com/wehkamp/docker-prometheus-cloudflare-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: javad-hajiani + name: prometheus-cloudflare-exporter + sources: + - https://github.com/wehkamp/docker-prometheus-cloudflare-exporter + urls: + - charts/prometheus-cloudflare-exporter-0.0.1.tgz + version: 0.0.1 + prometheus-darksky-exporter: + - apiVersion: v1 + appVersion: v0.0.4 + created: "2021-04-30T09:30:40Z" + description: A Helm chart to export Dark Sky results + digest: a412cca44975d25cbaa94c27eb43b4abd618bc32e0ea1f240af87723d385a4b6 + home: https://github.com/billykwooten/darksky-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: prometheus-darksky-exporter + sources: + - https://github.com/billykwooten/darksky-exporter + urls: + - charts/prometheus-darksky-exporter-0.1.3.tgz + version: 0.1.3 + - apiVersion: v1 + appVersion: v0.0.4 + created: "2020-11-13T08:32:09Z" + description: A Helm chart to export Dark Sky results + digest: 4c3e0a49add2b953102503d610849fd7fe48a48559bbb14e1c02d2f723e0af2a + home: https://github.com/billykwooten/darksky-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: prometheus-darksky-exporter + sources: + - https://github.com/billykwooten/darksky-exporter + urls: + - charts/prometheus-darksky-exporter-0.1.2.tgz + version: 0.1.2 + prometheus-k8s-events-exporter: + - apiVersion: v1 + appVersion: v1.0.0 + created: "2024-10-04T09:09:59Z" + description: Exporter for kubernetes events + digest: e04fa5217af92d8309c2f0af575e0162d7e73625df69009678183f51574ffd85 + home: https://github.com/caicloud/event_exporter + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: prometheus-k8s-events-exporter + sources: + - https://github.com/caicloud/event_exporter + urls: + - charts/prometheus-k8s-events-exporter-0.2.1.tgz + version: 0.2.1 + - apiVersion: v1 + appVersion: v0.2.0 + created: "2023-03-29T09:28:21Z" + description: Exporter for kubernetes events + digest: 7689637cbdaa24f7c4fa1d2452ab1a2febce04446abf61d6a58e1a9f847a1402 + home: https://github.com/caicloud/event_exporter + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: prometheus-k8s-events-exporter + sources: + - https://github.com/caicloud/event_exporter + urls: + - charts/prometheus-k8s-events-exporter-0.2.0.tgz + version: 0.2.0 + - apiVersion: v1 + appVersion: v0.2.0 + created: "2023-01-17T19:55:06Z" + description: Exporter for kubernetes events + digest: ddc7807078897973d7a0740cfd38b05bd68a542d51e56be841485259bc02e8fa + home: https://github.com/caicloud/event_exporter + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: prometheus-k8s-events-exporter + sources: + - https://github.com/caicloud/event_exporter + urls: + - charts/prometheus-k8s-events-exporter-0.1.3.tgz + version: 0.1.3 + - apiVersion: v1 + appVersion: v0.2.0 + created: "2020-11-13T08:32:10Z" + description: Exporter for kubernetes events + digest: 4ba22a501f519ddb40b4bb38476da4ace2c0dda4c506e10fe980b23ae91aa230 + home: https://github.com/caicloud/event_exporter + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: prometheus-k8s-events-exporter + sources: + - https://github.com/caicloud/event_exporter + urls: + - charts/prometheus-k8s-events-exporter-0.1.2.tgz + version: 0.1.2 + prometheus-locust-exporter: + - apiVersion: v1 + appVersion: v0.4.1 + created: "2024-10-04T09:09:59Z" + description: A Helm chart a prometheus exporter locust load test metrics + digest: 35611aaff1b3ffd11fbd3d0d07ef258e6fbd4078dda31328140ada0071d9fc1f + home: https://github.com/ContainerSolutions/locust_exporter + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: prometheus-locust-exporter + sources: + - https://github.com/ContainerSolutions/locust_exporter + urls: + - charts/prometheus-locust-exporter-1.2.2.tgz + version: 1.2.2 + - apiVersion: v1 + appVersion: v0.4.1 + created: "2024-04-22T13:11:10Z" + description: A Helm chart a prometheus exporter locust load test metrics + digest: f30b184949b69e990a7f1aa5d087df75c72780831422d816e4fe6872a736039c + home: https://github.com/ContainerSolutions/locust_exporter + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: prometheus-locust-exporter + sources: + - https://github.com/ContainerSolutions/locust_exporter + urls: + - charts/prometheus-locust-exporter-1.2.1.tgz + version: 1.2.1 + - apiVersion: v1 + appVersion: v0.4.1 + created: "2023-09-27T07:46:13Z" + description: A Helm chart a prometheus exporter locust load test metrics + digest: 3158c873002d8c8e2e330742d9952e560428fb592f3efce7cbf28b3c4d01ece2 + home: https://github.com/ContainerSolutions/locust_exporter + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: prometheus-locust-exporter + sources: + - https://github.com/ContainerSolutions/locust_exporter + urls: + - charts/prometheus-locust-exporter-1.2.0.tgz + version: 1.2.0 + - apiVersion: v1 + appVersion: v0.4.1 + created: "2023-08-30T07:22:34Z" + description: A Helm chart a prometheus exporter locust load test metrics + digest: 9d9e6fac22b441a82471743f871be46fcd630e0e34d9362fa30eac7bd833f47a + home: https://github.com/ContainerSolutions/locust_exporter + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: prometheus-locust-exporter + sources: + - https://github.com/ContainerSolutions/locust_exporter + urls: + - charts/prometheus-locust-exporter-1.1.0.tgz + version: 1.1.0 + - apiVersion: v1 + appVersion: v0.4.1 + created: "2022-12-29T11:48:20Z" + description: A Helm chart a prometheus exporter locust load test metrics + digest: 078e4e9a45bf3f08ea9c78ffdf847c6340d8c75c13474e3393bff3fece9cd8ab + home: https://github.com/ContainerSolutions/locust_exporter + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: prometheus-locust-exporter + sources: + - https://github.com/ContainerSolutions/locust_exporter + urls: + - charts/prometheus-locust-exporter-1.0.1.tgz + version: 1.0.1 + - apiVersion: v1 + appVersion: v0.3.0 + created: "2021-02-24T14:16:47Z" + description: A Helm chart a prometheus exporter locust load test metrics + digest: a944fd12d0513b4e52235ca272f01512cfd66cb31dec13d070becb5eb5e8d3ab + home: https://github.com/ContainerSolutions/locust_exporter + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: prometheus-locust-exporter + sources: + - https://github.com/ContainerSolutions/locust_exporter + urls: + - charts/prometheus-locust-exporter-1.0.0.tgz + version: 1.0.0 + prometheus-new-relic-app-exporter: + - apiVersion: v1 + appVersion: 0.0.2 + created: "2024-10-04T09:09:59Z" + description: A Helm chart a prometheus exporter for a single New Relic application + digest: 47fe0173e3bd32a72e25a75fee0c9cb93046cce313afe9ab35ac8b7c03973f8d + home: https://github.com/previousnext/newrelic-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: prometheus-new-relic-app-exporter + sources: + - https://github.com/previousnext/newrelic-exporter + urls: + - charts/prometheus-new-relic-app-exporter-1.0.0.tgz + version: 1.0.0 + prometheus-new-relic-exporter: + - apiVersion: v1 + appVersion: "1.0" + created: "2024-10-04T09:10:00Z" + description: Prometheus exporter for New Relic data. Requires a New Relic account. + digest: d0d10ef6cb6d35653ec50d8db8b4877e90f01bf8255e41d80a90726f72ab7497 + home: https://github.com/jfindley/newrelic_exporter + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: prometheus-new-relic-exporter + sources: + - https://github.com/jfindley/newrelic_exporter + urls: + - charts/prometheus-new-relic-exporter-0.1.3.tgz + version: 0.1.3 + - apiVersion: v1 + appVersion: "1.0" + created: "2020-11-13T08:32:10Z" + description: Prometheus exporter for New Relic data. Requires a New Relic account. + digest: 4695429b45dd2b9dddd77818b047cbb940834d949cc12c668f03fde2e8c1ce1e + home: https://github.com/jfindley/newrelic_exporter + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: prometheus-new-relic-exporter + sources: + - https://github.com/jfindley/newrelic_exporter + urls: + - charts/prometheus-new-relic-exporter-0.1.2.tgz + version: 0.1.2 + prometheus-sentry-exporter: + - apiVersion: v1 + appVersion: 0.5.0 + created: "2024-10-04T09:10:00Z" + description: Exports sentry project metrics for prometheus. + digest: 8d12a3e7693fe1e5f08d4bacc6ecaa2e320747989639f698ae0f6a6c9c304487 + home: https://github.com/ujamii/prometheus-sentry-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: prometheus-sentry-exporter + sources: + - https://github.com/ujamii/prometheus-sentry-exporter + urls: + - charts/prometheus-sentry-exporter-0.1.4.tgz + version: 0.1.4 + - apiVersion: v1 + appVersion: 0.5.0 + created: "2021-07-01T12:50:14Z" + description: Exports sentry project metrics for prometheus. + digest: 68e72dff52df4392dd199c72f69dfcc66d0659d26f8e2fce9956a7f52b97f3f9 + home: https://github.com/ujamii/prometheus-sentry-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: prometheus-sentry-exporter + sources: + - https://github.com/ujamii/prometheus-sentry-exporter + urls: + - charts/prometheus-sentry-exporter-0.1.3.tgz + version: 0.1.3 + - apiVersion: v1 + appVersion: 0.5.0 + created: "2020-11-13T08:32:11Z" + description: Exports sentry project metrics for prometheus. + digest: 4a246cefe0c58a2753480c877e0c9255d842aa9798d012c55e1cb51fc2dae33d + home: https://github.com/ujamii/prometheus-sentry-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: prometheus-sentry-exporter + sources: + - https://github.com/ujamii/prometheus-sentry-exporter + urls: + - charts/prometheus-sentry-exporter-0.1.2.tgz + version: 0.1.2 + prometheus-soti-mobicontrol-exporter: + - apiVersion: v1 + appVersion: "0.6" + created: "2024-10-04T09:10:00Z" + description: A Helm chart a prometheus exporter for SOTI MobiControl metrics + digest: 2a1a0ca11f5f9ad391e5fc7d58525860ec63a0180f5a7471c35f929b16455036 + home: https://github.com/max-rocket-internet/soti-mobicontrol-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: prometheus-soti-mobicontrol-exporter + sources: + - https://github.com/max-rocket-internet/soti-mobicontrol-exporter + urls: + - charts/prometheus-soti-mobicontrol-exporter-1.0.2.tgz + version: 1.0.2 + prometheus-spot-termination-exporter: + - apiVersion: v1 + appVersion: 0.0.2 + created: "2024-10-04T09:10:01Z" + description: Spot instance termination exporter for Prometheus + digest: 39df1c2e61b5238cf0a4649050101b7c79028d9192a55a93f8ef56e78c436b7f + home: https://github.com/banzaicloud/spot-termination-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: sietevecesmal + name: prometheus-spot-termination-exporter + sources: + - https://github.com/banzaicloud/spot-termination-exporter + urls: + - charts/prometheus-spot-termination-exporter-0.2.10.tgz + version: 0.2.10 + - apiVersion: v1 + appVersion: 0.0.2 + created: "2023-07-12T14:52:08Z" + description: Spot instance termination exporter for Prometheus + digest: 225c5642891edf8a987126c68e0f02420198740e0721168abdb8bc43357d67a7 + home: https://github.com/banzaicloud/spot-termination-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: sietevecesmal + name: prometheus-spot-termination-exporter + sources: + - https://github.com/banzaicloud/spot-termination-exporter + urls: + - charts/prometheus-spot-termination-exporter-0.2.9.tgz + version: 0.2.9 + - apiVersion: v1 + appVersion: 0.0.1 + created: "2023-07-06T12:33:41Z" + description: Spot instance termination exporter for Prometheus + digest: 21c8bb2af77b7784380a0fc25a2b807799332e4a3d132176ee61e981473f3e01 + home: https://github.com/banzaicloud/spot-termination-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: sietevecesmal + name: prometheus-spot-termination-exporter + sources: + - https://github.com/banzaicloud/spot-termination-exporter + urls: + - charts/prometheus-spot-termination-exporter-0.2.8.tgz + version: 0.2.8 + - apiVersion: v1 + appVersion: 0.0.1 + created: "2020-10-15T14:01:50Z" + description: Spot instance termination exporter for Prometheus + digest: 277b8bb26613b3b6fcc79bfac234690bbea8c717bbc8400ade45dff7885d031f + home: https://github.com/banzaicloud/spot-termination-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: sietevecesmal + name: prometheus-spot-termination-exporter + sources: + - https://github.com/banzaicloud/spot-termination-exporter + urls: + - charts/prometheus-spot-termination-exporter-0.2.7.tgz + version: 0.2.7 + - apiVersion: v1 + appVersion: 0.0.1 + created: "2020-10-15T08:05:20Z" + description: Spot instance termination exporter for Prometheus + digest: 5c19febf0f185d724b2f2ccdac6c2acf2ef900aaa09da1d728fa404ca0d5a5ef + home: https://github.com/banzaicloud/spot-termination-exporter + maintainers: + - email: no-reply@deliveryhero.com + name: sietevecesmal + name: prometheus-spot-termination-exporter + sources: + - https://github.com/banzaicloud/spot-termination-exporter + urls: + - charts/prometheus-spot-termination-exporter-0.2.6.tgz + version: 0.2.6 + prometheus-statsd-exporter: + - apiVersion: v1 + appVersion: v0.18.0 + created: "2024-10-04T09:10:01Z" + description: StatsD to Prometheus metrics exporter + digest: 9e7eea48a77c6de3e784adbfcb3100378fcf3aa4f8fb95db322da45cd5b008da + home: https://github.com/prometheus/statsd_exporter + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: prometheus-statsd-exporter + sources: + - https://github.com/prometheus/statsd_exporter + urls: + - charts/prometheus-statsd-exporter-0.1.4.tgz + version: 0.1.4 + - apiVersion: v1 + appVersion: v0.18.0 + created: "2024-04-11T09:30:27Z" + description: StatsD to Prometheus metrics exporter + digest: 9642a743e36bdd3f7aea3a42d563f5a6760dd8af6f2f7c3255c53b8d297963b8 + home: https://github.com/prometheus/statsd_exporter + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: prometheus-statsd-exporter + sources: + - https://github.com/prometheus/statsd_exporter + urls: + - charts/prometheus-statsd-exporter-0.1.3.tgz + version: 0.1.3 + - apiVersion: v1 + appVersion: v0.18.0 + created: "2020-11-13T08:32:13Z" + description: StatsD to Prometheus metrics exporter + digest: 429450d7c81787d08806c33547f2c95b128a39179fe199e1ea717fda23098056 + home: https://github.com/prometheus/statsd_exporter + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: prometheus-statsd-exporter + sources: + - https://github.com/prometheus/statsd_exporter + urls: + - charts/prometheus-statsd-exporter-0.1.2.tgz + version: 0.1.2 + rds-downscaler: + - apiVersion: v1 + appVersion: "1.0" + created: "2024-10-04T09:10:02Z" + description: 'A small python script that runs on a cron schedule and periodically + downscales AWS RDS instances. It will filter RDS instances/clusters by tag + key and value or a particular cluster specified with cluster identifier. ' + digest: 0926de159a423a27e02582932de4b89a6a75dbc0f0c4ff343d91a10ef7813ba3 + home: https://github.com/deliveryhero/helm-charts + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - scaling + - cost saving + maintainers: + - email: max.williams@deliveryhero.com + name: max-rocket-internet + name: rds-downscaler + urls: + - charts/rds-downscaler-1.0.4.tgz + version: 1.0.4 + - apiVersion: v1 + appVersion: "1.0" + created: "2021-01-11T16:10:54Z" + description: | + A small python script that runs on a cron schedule and periodically downscales AWS RDS instances. + + It will filter RDS instances/clusters by tag key and value or a particular instance specified with rds instance identifier. + digest: 1a44ac40ee5ab7f138b890eff74795fbcee02f22cdf0b863e16c57bd18d385a9 + home: https://github.com/deliveryhero/helm-charts + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - scaling + - cost saving + maintainers: + - email: max.williams@deliveryhero.com + name: max-rocket-internet + name: rds-downscaler + urls: + - charts/rds-downscaler-1.0.3.tgz + version: 1.0.3 + - apiVersion: v1 + appVersion: "1.0" + created: "2021-01-11T14:13:36Z" + description: | + A small python script that runs on a cron schedule and periodically downscales AWS RDS instances. + + It will filter RDS instances/clusters by tag key and value or a particular instance specified with rds instance identifier. + digest: 805b91d3603e778090965b171528d862514a55f4c3ad617711c2b2e97bf41435 + home: https://github.com/deliveryhero/helm-charts + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - scaling + - cost saving + maintainers: + - email: max.williams@deliveryhero.com + name: max-rocket-internet + name: rds-downscaler + urls: + - charts/rds-downscaler-1.0.2.tgz + version: 1.0.2 + - apiVersion: v1 + appVersion: "1.0" + created: "2021-01-07T15:18:53Z" + description: | + A small python script that runs on a cron schedule and periodically downscales AWS RDS instances. + + It will filter RDS instances/clusters by tag key and value. + digest: 1790c6016df252c7bcac1a67764d2ae64de782b39527b154ad3f62ca02226f5c + home: https://github.com/deliveryhero/helm-charts + icon: https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png + keywords: + - scaling + - cost saving + maintainers: + - email: max.williams@deliveryhero.com + name: max-rocket-internet + name: rds-downscaler + urls: + - charts/rds-downscaler-1.0.1.tgz + version: 1.0.1 + service-account: + - apiVersion: v1 + created: "2024-10-04T09:10:02Z" + description: 'Creates a ServiceAccount, ClusterRoleBinding and a ClusterRole with + some provided rules. This is useful when used with [IAM roles for service accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) ' + digest: 3d73c2da52789f3ae5d0d18bd80fa4fccb0133946e5d4d80db11614ce1dc67fa + home: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: service-account + urls: + - charts/service-account-1.1.0.tgz + version: 1.1.0 + - apiVersion: v1 + created: "2022-11-21T14:53:35Z" + description: 'Creates a ServiceAccount, ClusterRoleBinding and a ClusterRole with + some provided rules. This is useful when used with [IAM roles for service accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) ' + digest: bbd0eb4b900ceb57995568dfff1476273a2d06e1ba9a64abe3dfed1f823f9ed9 + home: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: service-account + urls: + - charts/service-account-1.0.2.tgz + version: 1.0.2 + - apiVersion: v1 + created: "2021-04-26T12:17:11Z" + description: | + Creates a ServiceAccount, ClusterRoleBinding and a ClusterRole with some provided rules. + + This is useful when used with [IAM roles for service accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) + digest: e312468f667f88b474533e9400a35bae6fe55bec59195d08aadb21d0683b6463 + home: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + maintainers: + - email: no-reply@deliveryhero.com + name: max-rocket-internet + name: service-account + urls: + - charts/service-account-1.0.1.tgz + version: 1.0.1 + superset: + - apiVersion: v2 + appVersion: latest + created: "2024-10-04T09:10:03Z" + description: A Helm chart for Apache Superset + digest: 7d76255e6a0e3641ed3d7c5049f5d9b9893bf43fcf52af5ed130ae8a82993ad9 + home: https://superset.apache.org/ + keywords: + - apache + - supertest + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: superset + sources: + - https://github.com/apache/superset + type: application + urls: + - charts/superset-1.1.2.tgz + version: 1.1.2 + - apiVersion: v2 + appVersion: latest + created: "2023-09-07T15:34:45Z" + description: A Helm chart for Apache Superset + digest: 19d78be7cc472182106576b9905a24cf28c799a19d89eaea4956dc0b285cf29b + home: https://superset.apache.org/ + keywords: + - apache + - supertest + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: superset + sources: + - https://github.com/apache/superset + type: application + urls: + - charts/superset-1.1.1.tgz + version: 1.1.1 + - apiVersion: v2 + appVersion: latest + created: "2023-09-07T13:50:44Z" + description: A Helm chart for Apache Superset + digest: 028cc4277cae2bc3fba3b37e568e983201f084e31580e23b784703df95036e61 + home: https://superset.apache.org/ + keywords: + - apache + - supertest + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: superset + sources: + - https://github.com/apache/superset + type: application + urls: + - charts/superset-1.1.0.tgz + version: 1.1.0 + - apiVersion: v2 + appVersion: latest + created: "2023-05-08T14:35:37Z" + description: A Helm chart for Apache Superset + digest: e49a65197e3a9e20ffc6ad65a93ea912886f308e08c02fba5d38c03edab66dd0 + home: https://superset.apache.org/ + keywords: + - apache + - supertest + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: superset + sources: + - https://github.com/apache/superset + type: application + urls: + - charts/superset-1.0.15.tgz + version: 1.0.15 + - apiVersion: v2 + appVersion: latest + created: "2023-01-19T09:41:08Z" + description: A Helm chart for Apache Superset + digest: 006de0f6b3e0f6346b31f28df00dc7a0497ac50e04135df580a8083f30c49d73 + home: https://superset.apache.org/ + keywords: + - apache + - supertest + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: superset + sources: + - https://github.com/apache/superset + type: application + urls: + - charts/superset-1.0.14.tgz + version: 1.0.14 + - apiVersion: v2 + appVersion: latest + created: "2022-07-05T19:14:43Z" + description: A Helm chart for Apache Superset + digest: 48b3d2cbb8b7a1d46cbaba8418f4bdacbd8bb3e9995b15f2867d4c80828a0531 + home: https://superset.apache.org/ + keywords: + - apache + - supertest + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: superset + sources: + - https://github.com/apache/superset + type: application + urls: + - charts/superset-1.0.13.tgz + version: 1.0.13 + - apiVersion: v2 + appVersion: latest + created: "2022-07-05T10:14:12Z" + description: A Helm chart for Apache Superset + digest: ff7db8c9bf2e83b552ade16235d8ba1c90cc0579e459afced29076885c9a47bc + home: https://superset.apache.org/ + keywords: + - apache + - supertest + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: superset + sources: + - https://github.com/apache/superset + type: application + urls: + - charts/superset-1.0.12.tgz + version: 1.0.12 + - apiVersion: v2 + appVersion: latest + created: "2022-07-01T15:30:07Z" + description: A Helm chart for Apache Superset + digest: 231dc559c172e99a26cb541eb9e34d7426afcfd7e43f9faa05946ea6fc5adf13 + home: https://superset.apache.org/ + keywords: + - apache + - supertest + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: superset + sources: + - https://github.com/apache/superset + type: application + urls: + - charts/superset-1.0.11.tgz + version: 1.0.11 + - apiVersion: v2 + appVersion: latest + created: "2022-07-01T12:22:54Z" + description: A Helm chart for Apache Superset + digest: 28bcd46eaf9c7ea3a98e45914122efb41f871ac17ab33e632d3666dbd872140c + home: https://superset.apache.org/ + keywords: + - apache + - supertest + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: superset + sources: + - https://github.com/apache/superset + type: application + urls: + - charts/superset-1.0.10.tgz + version: 1.0.10 + - apiVersion: v2 + appVersion: latest + created: "2022-06-10T10:36:26Z" + description: A Helm chart for Apache Superset + digest: 4ecd024d9a1e8afc17ae600bf725e25c101c6ba3fa1d87c3cfa2433af26be61f + home: https://superset.apache.org/ + keywords: + - apache + - supertest + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: superset + sources: + - https://github.com/apache/superset + type: application + urls: + - charts/superset-1.0.9.tgz + version: 1.0.9 + - apiVersion: v2 + created: "2021-04-30T09:30:49Z" + description: A Helm chart for Apache Superset + digest: 05def5c4bdf0d4c8e07d3d8378fd5b7feb9091aaf87c74b12813c126d5487d82 + home: https://superset.apache.org/ + keywords: + - apache + - supertest + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: superset + sources: + - https://github.com/apache/superset + type: application + urls: + - charts/superset-1.0.8.tgz + version: 1.0.8 + - apiVersion: v2 + created: "2021-04-20T07:56:36Z" + description: A Helm chart for Apache Superset + digest: 3870403f4872a539a764a8002e98823daa2d906bdc3d115a70b842b7f2dfdaff + home: https://superset.apache.org/ + keywords: + - apache + - supertest + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: superset + sources: + - https://github.com/apache/superset + type: application + urls: + - charts/superset-1.0.7.tgz + version: 1.0.7 + - apiVersion: v2 + created: "2021-04-16T08:19:10Z" + description: A Helm chart for Apache Superset + digest: 6f45ddfba86f2a4159acb78ece52d969d2c49a89ddc3c7f6042177cd342bd49d + home: https://superset.apache.org/ + keywords: + - apache + - supertest + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: superset + sources: + - https://github.com/apache/superset + type: application + urls: + - charts/superset-1.0.6.tgz + version: 1.0.6 + - apiVersion: v2 + created: "2021-04-15T09:51:45Z" + description: A Helm chart for Apache Superset + digest: faad8f8288b569378eb06fc5cbf0cf69baee11abefde6541426d2ca1543ea538 + home: https://superset.apache.org/ + keywords: + - apache + - supertest + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: superset + sources: + - https://github.com/apache/superset + type: application + urls: + - charts/superset-1.0.5.tgz + version: 1.0.5 + - apiVersion: v2 + created: "2021-04-15T07:55:07Z" + description: A Helm chart for Apache Superset + digest: fef6d0fa438c2bd64f0fefc08144d1f87b829451d19ae107dad857ce1962b33b + home: https://superset.apache.org/ + keywords: + - apache + - supertest + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: superset + sources: + - https://github.com/apache/superset + type: application + urls: + - charts/superset-1.0.4.tgz + version: 1.0.4 + - apiVersion: v2 + created: "2021-04-12T13:12:11Z" + description: A Helm chart for Apache Superset + digest: b626b90f88bb09dbad442ecdbbe0958a6b308b1afbd9e0b64444d1483d96bc14 + home: https://superset.apache.org/ + keywords: + - apache + - supertest + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: superset + sources: + - https://github.com/apache/superset + type: application + urls: + - charts/superset-1.0.3.tgz + version: 1.0.3 + - apiVersion: v2 + created: "2021-04-11T19:54:49Z" + description: A Helm chart for Apache Superset + digest: e7689ab6a80b5823520ac671e677da60954b57dad9c11da12e90e454e27bebd1 + home: https://superset.apache.org/ + keywords: + - apache + - supertest + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: superset + sources: + - https://github.com/apache/superset + type: application + urls: + - charts/superset-1.0.2.tgz + version: 1.0.2 + - apiVersion: v2 + created: "2021-04-09T18:50:49Z" + description: A Helm chart for Apache Superset + digest: a8bed773328309803d9bd65d4adf9fee695152b7e36c96f07eb8964c342ab8a5 + home: https://superset.apache.org/ + keywords: + - apache + - supertest + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: superset + sources: + - https://github.com/apache/superset + type: application + urls: + - charts/superset-1.0.1.tgz + version: 1.0.1 + - apiVersion: v2 + created: "2021-04-09T08:26:18Z" + description: A Helm chart for Apache Superset + digest: 3bfce9b0f8e9e7a4073a384d5718750e0bcdd033668f6c340ee1b43090f9774a + home: https://superset.apache.org/ + keywords: + - apache + - supertest + maintainers: + - email: thomas.nyambati@deliveryhero.com + name: nyambati + name: superset + sources: + - https://github.com/apache/superset + type: application + urls: + - charts/superset-1.0.0.tgz + version: 1.0.0 + toxiproxy: + - apiVersion: v1 + appVersion: 2.7.0 + created: "2024-10-04T09:10:03Z" + description: 'A TCP proxy to simulate network and system conditions for chaos + and resiliency testing. By default the chart will install toxiproxy with blank + configuration. You can add [toxics](https://github.com/Shopify/toxiproxy#toxics) + to the running configuration using the [API](https://github.com/Shopify/toxiproxy#http-api). For + large configurations it is easier to store your toxics in a JSON file, in a + `ConfigMap` and pass this to the chart to be used by toxiproxy: ```console + kubectl create configmap my-toxiproxy-config --from-file path/to/your/toxiproxy.json + ``` And then install the chart passing the name of the `ConfigMap` as a value: ```console + helm install toxiproxy deliveryhero/toxiproxy --set toxiproxyConfig=my-toxiproxy-config + ``` ' + digest: ebbf20aaa053bacec64cc7073ff52f24b3a733279f50822c045f881d24a8792e + home: https://github.com/Shopify/toxiproxy + maintainers: + - email: no-reply@deliveryhero.com + name: nreymundo + name: toxiproxy + sources: + - https://github.com/Shopify/toxiproxy + - https://github.com/buckle/toxiproxy-frontend + urls: + - charts/toxiproxy-1.3.8.tgz + version: 1.3.8 + - apiVersion: v1 + appVersion: 2.1.2 + created: "2023-10-17T18:09:02Z" + description: 'A TCP proxy to simulate network and system conditions for chaos + and resiliency testing. By default the chart will install toxiproxy with blank + configuration. You can add [toxics](https://github.com/Shopify/toxiproxy#toxics) + to the running configuration using the [API](https://github.com/Shopify/toxiproxy#http-api). For + large configurations it is easier to store your toxics in a JSON file, in a + `ConfigMap` and pass this to the chart to be used by toxiproxy: ```console + kubectl create configmap my-toxiproxy-config --from-file path/to/your/toxiproxy.json + ``` And then install the chart passing the name of the `ConfigMap` as a value: ```console + helm install toxiproxy deliveryhero/toxiproxy --set toxiproxyConfig=my-toxiproxy-config + ``` ' + digest: 92b43af3e9f73dda234d1fc83a73def8f1ca93c2ff38b8b697a89050a3ecc3e0 + home: https://github.com/Shopify/toxiproxy + maintainers: + - email: no-reply@deliveryhero.com + name: nreymundo + name: toxiproxy + sources: + - https://github.com/Shopify/toxiproxy + - https://github.com/buckle/toxiproxy-frontend + urls: + - charts/toxiproxy-1.3.7.tgz + version: 1.3.7 + - apiVersion: v1 + appVersion: 2.1.2 + created: "2023-01-27T10:19:52Z" + description: 'A TCP proxy to simulate network and system conditions for chaos + and resiliency testing. By default the chart will install toxiproxy with blank + configuration. You can add [toxics](https://github.com/Shopify/toxiproxy#toxics) + to the running configuration using the [API](https://github.com/Shopify/toxiproxy#http-api). For + large configurations it is easier to store your toxics in a JSON file, in a + `ConfigMap` and pass this to the chart to be used by toxiproxy: ```console + kubectl create configmap my-toxiproxy-config --from-file path/to/your/toxiproxy.json + ``` And then install the chart passing the name of the `ConfigMap` as a value: ```console + helm install toxiproxy deliveryhero/toxiproxy --set toxiproxyConfig=my-toxiproxy-config + ``` ' + digest: 88bb0e2917a6279963ccd518fcefd18646c3317e4a0b59ef5e5983798e17e8e6 + home: https://github.com/Shopify/toxiproxy + maintainers: + - email: no-reply@deliveryhero.com + name: nreymundo + name: toxiproxy + sources: + - https://github.com/Shopify/toxiproxy + - https://github.com/buckle/toxiproxy-frontend + urls: + - charts/toxiproxy-1.3.6.tgz + version: 1.3.6 + - apiVersion: v1 + appVersion: 2.1.2 + created: "2021-11-01T14:43:55Z" + description: 'A TCP proxy to simulate network and system conditions for chaos + and resiliency testing. By default the chart will install toxiproxy with blank + configuration. You can add [toxics](https://github.com/Shopify/toxiproxy#toxics) + to the running configuration using the [API](https://github.com/Shopify/toxiproxy#http-api). For + large configurations it is easier to store your toxics in a JSON file, in a + `ConfigMap` and pass this to the chart to be used by toxiproxy: ```console + kubectl create configmap my-toxiproxy-config --from-file path/to/your/toxiproxy.json + ``` And then install the chart passing the name of the `ConfigMap` as a value: ```console + helm install toxiproxy deliveryhero/toxiproxy --set toxiproxyConfig=my-toxiproxy-config + ``` ' + digest: f4c74eaaff206cbd8223e600f4d895a50e1ecffc2acc1672516e5353022890fc + home: https://github.com/Shopify/toxiproxy + maintainers: + - email: no-reply@deliveryhero.com + name: nreymundo + name: toxiproxy + sources: + - https://github.com/Shopify/toxiproxy + - https://github.com/buckle/toxiproxy-frontend + urls: + - charts/toxiproxy-1.3.5.tgz + version: 1.3.5 + - apiVersion: v1 + appVersion: 2.1.2 + created: "2021-07-20T09:27:40Z" + description: | + A TCP proxy to simulate network and system conditions for chaos and resiliency testing. + + By default the chart will install toxiproxy with blank configuration. You can add [toxics](https://github.com/Shopify/toxiproxy#toxics) to the running configuration using the [API](https://github.com/Shopify/toxiproxy#http-api). + + For large configurations it is easier to store your toxics in a JSON file, in a `ConfigMap` and pass this to the chart to be used by toxiproxy: + + ```console + kubectl create configmap my-toxiproxy-config --from-file path/to/your/toxiproxy.json + ``` + + And then install the chart passing the name of the `ConfigMap` as a value: + + ```console + helm install toxiproxy deliveryhero/toxiproxy --set toxiproxyConfig=my-toxiproxy-config + ``` + digest: fec59bcbfcff5194d3a95b63836b2f88950890f09d925666d8cd90ff7ec58e4f + home: https://github.com/Shopify/toxiproxy + maintainers: + - email: no-reply@deliveryhero.com + name: nreymundo + name: toxiproxy + sources: + - https://github.com/Shopify/toxiproxy + - https://github.com/buckle/toxiproxy-frontend + urls: + - charts/toxiproxy-1.3.4.tgz + version: 1.3.4 + - apiVersion: v1 + appVersion: 2.1.2 + created: "2021-05-03T11:31:19Z" + description: | + A TCP proxy to simulate network and system conditions for chaos and resiliency testing. + + By default the chart will install toxiproxy with blank configuration. You can add [toxics](https://github.com/Shopify/toxiproxy#toxics) to the running configuration using the [API](https://github.com/Shopify/toxiproxy#http-api). + + For large configurations it is easier to store your toxics in a JSON file, in a `ConfigMap` and pass this to the chart to be used by toxiproxy: + + ```console + kubectl create configmap my-toxiproxy-config --from-file path/to/your/toxiproxy.json + ``` + + And then install the chart passing the name of the `ConfigMap` as a value: + + ```console + helm install toxiproxy deliveryhero/toxiproxy --set toxiproxyConfig=my-toxiproxy-config + ``` + digest: 7c65b938b6e1a06e535b8c6b01f76a9c9600a5f54b38f191b0e08f34e5d6be84 + home: https://github.com/Shopify/toxiproxy + maintainers: + - email: no-reply@deliveryhero.com + name: nreymundo + name: toxiproxy + sources: + - https://github.com/Shopify/toxiproxy + - https://github.com/buckle/toxiproxy-frontend + urls: + - charts/toxiproxy-1.3.3.tgz + version: 1.3.3 + - apiVersion: v1 + appVersion: 2.1.2 + created: "2021-04-30T09:56:02Z" + description: | + A TCP proxy to simulate network and system conditions for chaos and resiliency testing. + + By default the chart will install toxiproxy with blank configuration. You can add [toxics](https://github.com/Shopify/toxiproxy#toxics) to the running configuration using the [API](https://github.com/Shopify/toxiproxy#http-api). + + For large configurations it is easier to store your toxics in a JSON file, in a `ConfigMap` and pass this to the chart to be used by toxiproxy: + + ```console + kubectl create configmap my-toxiproxy-config --from-file path/to/your/toxiproxy.json + ``` + + And then install the chart passing the name of the `ConfigMap` as a value: + + ```console + helm install toxiproxy deliveryhero/toxiproxy --set toxiproxyConfig=my-toxiproxy-config + ``` + digest: cadb8304e5c49803392ed06b299945fe429d2d76b38399ce6d4e7b93f6fabce9 + home: https://github.com/Shopify/toxiproxy + maintainers: + - email: no-reply@deliveryhero.com + name: nreymundo + name: toxiproxy + sources: + - https://github.com/Shopify/toxiproxy + - https://github.com/buckle/toxiproxy-frontend + urls: + - charts/toxiproxy-1.3.2.tgz + version: 1.3.2 + - apiVersion: v1 + appVersion: 2.1.2 + created: "2021-04-26T10:30:45Z" + description: | + A TCP proxy to simulate network and system conditions for chaos and resiliency testing. + + By default the chart will install toxiproxy with blank configuration. You can add [toxics](https://github.com/Shopify/toxiproxy#toxics) to the running configuration using the [API](https://github.com/Shopify/toxiproxy#http-api). + + For large configurations it is easier to store your toxics in a JSON file, in a `ConfigMap` and pass this to the chart to be used by toxiproxy: + + ```console + kubectl create configmap my-toxiproxy-config --from-file path/to/your/toxiproxy.json + ``` + + And then install the chart passing the name of the `ConfigMap` as a value: + + ```console + helm install toxiproxy deliveryhero/toxiproxy --set toxiproxyConfig=my-toxiproxy-config + ``` + digest: 8d6092420f7d94d8d67b0f6e17440f4605dd399210ffc66649f786023e5c78dd + home: https://github.com/Shopify/toxiproxy + maintainers: + - email: no-reply@deliveryhero.com + name: nreymundo + name: toxiproxy + sources: + - https://github.com/Shopify/toxiproxy + - https://github.com/buckle/toxiproxy-frontend + urls: + - charts/toxiproxy-1.3.1.tgz + version: 1.3.1 + - apiVersion: v1 + appVersion: 2.1.2 + created: "2021-03-18T09:30:37Z" + description: | + A TCP proxy to simulate network and system conditions for chaos and resiliency testing. + + By default the chart will install toxiproxy with blank configuration. You can add [toxics](https://github.com/Shopify/toxiproxy#toxics) to the running configuration using the [API](https://github.com/Shopify/toxiproxy#http-api). + + For large configurations it is easier to store your toxics in a JSON file, in a `ConfigMap` and pass this to the chart to be used by toxiproxy: + + ```console + kubectl create configmap my-toxiproxy-config --from-file path/to/your/toxiproxy.json + ``` + + And then install the chart passing the name of the `ConfigMap` as a value: + + ```console + helm install locust deliveryhero/toxiproxy \ + --set toxiproxyConfig=my-toxiproxy-config + ``` + digest: 2c679976cd34ab29f3864818d2a75b844f1664619009978331b50a703d29ec58 + home: https://github.com/Shopify/toxiproxy + maintainers: + - email: no-reply@deliveryhero.com + name: nreymundo + name: toxiproxy + sources: + - https://github.com/Shopify/toxiproxy + - https://github.com/buckle/toxiproxy-frontend + urls: + - charts/toxiproxy-1.3.0.tgz + version: 1.3.0 + - apiVersion: v1 + appVersion: 2.1.2 + created: "2020-12-31T15:35:50Z" + description: | + A TCP proxy to simulate network and system conditions for chaos and resiliency testing. + + By default the chart will install toxiproxy with blank configuration. You can add [toxics](https://github.com/Shopify/toxiproxy#toxics) to the running configuration using the [API](https://github.com/Shopify/toxiproxy#http-api). + + For large configurations it is easier to store your toxics in a JSON file, in a `ConfigMap` and pass this to the chart to be used by toxiproxy: + + ```console + kubectl create configmap my-toxiproxy-config --from-file path/to/your/toxiproxy.json + ``` + + And then install the chart passing the name of the `ConfigMap` as a value: + + ```console + helm install locust deliveryhero/toxiproxy \ + --set toxiproxyConfig=my-toxiproxy-config + ``` + digest: 7af5ff7828a1fdd30cc519aa77b72d4d25de05513da3c2b47900c8d2d5871b23 + home: https://github.com/Shopify/toxiproxy + maintainers: + - email: no-reply@deliveryhero.com + name: nreymundo + name: toxiproxy + sources: + - https://github.com/Shopify/toxiproxy + urls: + - charts/toxiproxy-1.2.tgz + version: "1.2" + - apiVersion: v1 + appVersion: 2.1.2 + created: "2020-10-19T14:46:41Z" + description: A TCP proxy to simulate network and system conditions for chaos and + resiliency testing. + digest: f53fe15356fbc782590094457f308742ff2648cbb2c95340560cc997c873089e + home: https://github.com/Shopify/toxiproxy + maintainers: + - email: no-reply@deliveryhero.com + name: nreymundo + name: toxiproxy + sources: + - https://github.com/Shopify/toxiproxy + urls: + - charts/toxiproxy-1.1.tgz + version: "1.1" + - apiVersion: v1 + appVersion: 2.1.2 + created: "2020-09-17T15:04:53Z" + description: A TCP proxy to simulate network and system conditions for chaos and + resiliency testing. + digest: 7188e3e333e95a6a3c7423a053db3f50696f90f44a3944ec06c56f979936c88a + home: https://github.com/Shopify/toxiproxy + maintainers: + - email: no-reply@deliveryhero.com + name: nreymundo + name: toxiproxy + sources: + - https://github.com/Shopify/toxiproxy + urls: + - charts/toxiproxy-1.0.tgz + version: "1.0" + weblate: + - apiVersion: v1 + appVersion: 4.2-1 + created: "2024-10-04T09:10:17Z" + dependencies: + - condition: postgresql.enabled + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 11.6.26 + - condition: redis.enabled + name: redis + repository: https://charts.bitnami.com/bitnami + version: 17.0.10 + description: Free web-based translation management system. + digest: 40b6753622384162e2521ad67c2d06d172fd70023126340474ce5c2cfb0bc294 + home: https://weblate.org + maintainers: + - email: no-reply@deliveryhero.com + name: sietevecesmal + name: weblate + sources: + - https://github.com/WeblateOrg/weblate + urls: + - charts/weblate-0.3.0.tgz + version: 0.3.0 + - apiVersion: v1 + appVersion: 4.2-1 + created: "2023-01-24T14:04:20Z" + dependencies: + - condition: postgresql.enabled + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 11.6.26 + - condition: redis.enabled + name: redis + repository: https://charts.bitnami.com/bitnami + version: 17.0.10 + description: Free web-based translation management system. + digest: f58010146991e3c04f3f2c969e01e9072bb33e8e613b7f8b68068b0779dd16e2 + home: https://weblate.org + maintainers: + - email: no-reply@deliveryhero.com + name: sietevecesmal + name: weblate + sources: + - https://github.com/WeblateOrg/weblate + urls: + - charts/weblate-0.2.4.tgz + version: 0.2.4 + - apiVersion: v1 + appVersion: 4.2-1 + created: "2022-05-30T14:22:56Z" + dependencies: + - condition: postgresql.enabled + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 9.1.4 + - condition: redis.enabled + name: redis + repository: https://charts.bitnami.com/bitnami + version: 11.1.0 + description: Free web-based translation management system. + digest: 23e9a8fc01fb7de6b875ae50c6ad86c07e805dcb9adbc2b5640f192a38c1a386 + home: https://weblate.org + maintainers: + - email: no-reply@deliveryhero.com + name: sietevecesmal + name: weblate + sources: + - https://github.com/WeblateOrg/weblate + urls: + - charts/weblate-0.2.3.tgz + version: 0.2.3 + - apiVersion: v1 + appVersion: 4.2-1 + created: "2021-05-19T08:27:41Z" + dependencies: + - condition: postgresql.enabled + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 9.1.4 + - condition: redis.enabled + name: redis + repository: https://charts.bitnami.com/bitnami + version: 11.1.0 + description: Free web-based translation management system. + digest: 90446717ba48bb5acfb19c80b6757ec779edbb39951e51e52069e6b8b6e09a3b + home: https://weblate.org + maintainers: + - email: no-reply@deliveryhero.com + name: sietevecesmal + name: weblate + sources: + - https://github.com/WeblateOrg/weblate + urls: + - charts/weblate-0.2.2.tgz + version: 0.2.2 + - apiVersion: v1 + appVersion: 4.2-1 + created: "2021-04-30T09:30:59Z" + dependencies: + - condition: postgresql.enabled + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 9.1.4 + - condition: redis.enabled + name: redis + repository: https://charts.bitnami.com/bitnami + version: 11.1.0 + description: Free web-based translation management system. + digest: cb2d358ec79641282d8a10bc84ada7cf33e9d46b0cad6ba8dc2e031fcc8e2cde + home: https://weblate.org + maintainers: + - email: no-reply@deliveryhero.com + name: sietevecesmal + name: weblate + sources: + - https://github.com/WeblateOrg/weblate + urls: + - charts/weblate-0.2.1.tgz + version: 0.2.1 + - apiVersion: v1 + appVersion: 4.2-1 + created: "2021-04-20T12:18:18Z" + dependencies: + - condition: postgresql.enabled + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 9.1.4 + - condition: redis.enabled + name: redis + repository: https://charts.bitnami.com/bitnami + version: 11.1.0 + description: Free web-based translation management system. + digest: 30d829777feae4e553caf80f95b85369a5d5ef404e6de0f76368b27a34d42e85 + home: https://weblate.org + maintainers: + - email: no-reply@deliveryhero.com + name: sietevecesmal + name: weblate + sources: + - https://github.com/WeblateOrg/weblate + urls: + - charts/weblate-0.2.0.tgz + version: 0.2.0 + - apiVersion: v1 + appVersion: 4.2-1 + created: "2020-10-08T16:36:02Z" + dependencies: + - condition: postgresql.enabled + name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 9.1.4 + - condition: redis.enabled + name: redis + repository: https://charts.bitnami.com/bitnami + version: 11.1.0 + description: Free web-based translation management system. + digest: e8c8d625681153626417094b0467c57e5966ab033d42fd4aef5436b8ee1c4157 + home: https://weblate.org + maintainers: + - email: no-reply@deliveryhero.com + name: sietevecesmal + name: weblate + sources: + - https://github.com/WeblateOrg/weblate + urls: + - charts/weblate-0.1.0.tgz + version: 0.1.0 + wiremock: + - apiVersion: v1 + appVersion: 2.26.0 + created: "2024-10-04T09:10:18Z" + description: 'A service virtualization tool (some call it mock server) for testing + purposes. This is a templated deployment of [WireMock](http://wiremock.org/) + for mocking services during test scenario execution for load tests as well as + for manual and automated QA purposes. By default the chart will install WireMock + with only a `/status` mapping for readiness probes. One can utilize it''s HTTP + API as well as the file configuration documented in the [Running as a Standalone + Process](http://wiremock.org/docs/running-standalone/) described in the "Configuring + via JSON over HTTP" and "JSON file configuration" chapters. The JSON file configuration + is the recommended setup and the [stub mappings](http://wiremock.org/docs/stubbing/) + should be provided in `ConfigMap`s one per folder. > :warning: both folders + `mappings` and `__files` are optional but each folder requires it''s own `ConfigMap`. + The `-mappings` and `-files` suffixes are obligate. ```console kubectl create + configmap my-service1-stubs-mappings --from-file=path/to/your/service1/mappings + kubectl create configmap my-service1-stubs-files --from-file=path/to/your/service1/__files kubectl + create configmap my-service2-stubs-mappings --from-file=path/to/your/service2/mappings + kubectl create configmap my-service2-stubs-files --from-file=path/to/your/service2/__files + ``` Install the chart passing the `stubs` as a value omitting the suffixes + as both `mappings` and `__files` folders are handled transparently during initialization + depending on their existence. ```console helm install my-wiremock deliveryhero/wiremock + \ --set consumer=my-consumer --set "consumer.stubs.my-service1-stubs=/mnt/my-service1-stubs" + \ --set "consumer.stubs.my-service2-stubs=/mnt/my-service2-stubs" ``` WireMock''s + [admin API](http://wiremock.org/docs/api/) is not publicly exposed, but can + be accessed using port forwarding. ```console kubectl port-forward my-wiremock-123456789a-bcdef + 8080 ``` The HTTP API can then be accessed using `http://localhost:8080/__admin/docs/` + where a swagger UI is availabe. > :warning: this does not work out for multi + instance setups as there is no synchronization of mappings created using the + HTTP API between multiple instances. In case JSON files need to be provided, + which are too large for usual `ConfigMap`s, one can define a binary config map + with a zip archive that contains the file in question. ```console gzip large.json + kubectl create configmap my-binary-stub --from-file=large.json.gz ``` The resulting + archive can be best installed in the wiremock using a `values.yaml` file. ```yaml + consumer: initContainer: - name: unzip-large-file image: busybox:latest command: + ["sh", "-c", "cp /archive/large.json.gz /working/mappings; gunzip /working/mappings/large.json.gz"] volumeMounts: - + mountPath: /working name: working - mountPath: /archive name: + my-binary-stub initVolume: - name: my-binary-stub configMap: name: + my-binary-stub ``` ' + digest: 7c5f2303f4bae69a2760452b1e324f5e7974d7078364b5541b6a00e049314f3a + home: http://wiremock.org/ + icon: http://wiremock.org/images/wiremock-concept-icon-01.png + maintainers: + - email: no-reply@deliveryhero.com + name: mshero + name: wiremock + sources: + - https://github.com/tomakehurst/wiremock + - https://github.com/rodolpheche/wiremock-docker + urls: + - charts/wiremock-1.4.2.tgz + version: 1.4.2 + - apiVersion: v1 + appVersion: 2.26.0 + created: "2024-10-04T07:26:57Z" + description: 'A service virtualization tool (some call it mock server) for testing + purposes. This is a templated deployment of [WireMock](http://wiremock.org/) + for mocking services during test scenario execution for load tests as well as + for manual and automated QA purposes. By default the chart will install WireMock + with only a `/status` mapping for readiness probes. One can utilize it''s HTTP + API as well as the file configuration documented in the [Running as a Standalone + Process](http://wiremock.org/docs/running-standalone/) described in the "Configuring + via JSON over HTTP" and "JSON file configuration" chapters. The JSON file configuration + is the recommended setup and the [stub mappings](http://wiremock.org/docs/stubbing/) + should be provided in `ConfigMap`s one per folder. > :warning: both folders + `mappings` and `__files` are optional but each folder requires it''s own `ConfigMap`. + The `-mappings` and `-files` suffixes are obligate. ```console kubectl create + configmap my-service1-stubs-mappings --from-file=path/to/your/service1/mappings + kubectl create configmap my-service1-stubs-files --from-file=path/to/your/service1/__files kubectl + create configmap my-service2-stubs-mappings --from-file=path/to/your/service2/mappings + kubectl create configmap my-service2-stubs-files --from-file=path/to/your/service2/__files + ``` Install the chart passing the `stubs` as a value omitting the suffixes + as both `mappings` and `__files` folders are handled transparently during initialization + depending on their existence. ```console helm install my-wiremock deliveryhero/wiremock + \ --set consumer=my-consumer --set "consumer.stubs.my-service1-stubs=/mnt/my-service1-stubs" + \ --set "consumer.stubs.my-service2-stubs=/mnt/my-service2-stubs" ``` WireMock''s + [admin API](http://wiremock.org/docs/api/) is not publicly exposed, but can + be accessed using port forwarding. ```console kubectl port-forward my-wiremock-123456789a-bcdef + 8080 ``` The HTTP API can then be accessed using `http://localhost:8080/__admin/docs/` + where a swagger UI is availabe. > :warning: this does not work out for multi + instance setups as there is no synchronization of mappings created using the + HTTP API between multiple instances. In case JSON files need to be provided, + which are too large for usual `ConfigMap`s, one can define a binary config map + with a zip archive that contains the file in question. ```console gzip large.json + kubectl create configmap my-binary-stub --from-file=large.json.gz ``` The resulting + archive can be best installed in the wiremock using a `values.yaml` file. ```yaml + consumer: initContainer: - name: unzip-large-file image: busybox:latest command: + ["sh", "-c", "cp /archive/large.json.gz /working/mappings; gunzip /working/mappings/large.json.gz"] volumeMounts: - + mountPath: /working name: working - mountPath: /archive name: + my-binary-stub initVolume: - name: my-binary-stub configMap: name: + my-binary-stub ``` ' + digest: 697c6c12b4f16d36902292ab8c9f98ccc12f3ecf6a4972d2adaa64d0805d5082 + home: http://wiremock.org/ + icon: http://wiremock.org/images/wiremock-concept-icon-01.png + maintainers: + - email: no-reply@deliveryhero.com + name: mshero + name: wiremock + sources: + - https://github.com/tomakehurst/wiremock + - https://github.com/rodolpheche/wiremock-docker + urls: + - charts/wiremock-1.4.1.tgz + version: 1.4.1 + - apiVersion: v1 + appVersion: 2.26.0 + created: "2023-09-11T11:17:04Z" + description: 'A service virtualization tool (some call it mock server) for testing + purposes. This is a templated deployment of [WireMock](http://wiremock.org/) + for mocking services during test scenario execution for load tests as well as + for manual and automated QA purposes. By default the chart will install WireMock + with only a `/status` mapping for readiness probes. One can utilize it''s HTTP + API as well as the file configuration documented in the [Running as a Standalone + Process](http://wiremock.org/docs/running-standalone/) described in the "Configuring + via JSON over HTTP" and "JSON file configuration" chapters. The JSON file configuration + is the recommended setup and the [stub mappings](http://wiremock.org/docs/stubbing/) + should be provided in `ConfigMap`s one per folder. > :warning: both folders + `mappings` and `__files` are optional but each folder requires it''s own `ConfigMap`. + The `-mappings` and `-files` suffixes are obligate. ```console kubectl create + configmap my-service1-stubs-mappings --from-file=path/to/your/service1/mappings + kubectl create configmap my-service1-stubs-files --from-file=path/to/your/service1/__files kubectl + create configmap my-service2-stubs-mappings --from-file=path/to/your/service2/mappings + kubectl create configmap my-service2-stubs-files --from-file=path/to/your/service2/__files + ``` Install the chart passing the `stubs` as a value omitting the suffixes + as both `mappings` and `__files` folders are handled transparently during initialization + depending on their existence. ```console helm install my-wiremock deliveryhero/wiremock + \ --set consumer=my-consumer --set "consumer.stubs.my-service1-stubs=/mnt/my-service1-stubs" + \ --set "consumer.stubs.my-service2-stubs=/mnt/my-service2-stubs" ``` WireMock''s + [admin API](http://wiremock.org/docs/api/) is not publicly exposed, but can + be accessed using port forwarding. ```console kubectl port-forward my-wiremock-123456789a-bcdef + 8080 ``` The HTTP API can then be accessed using `http://localhost:8080/__admin/docs/` + where a swagger UI is availabe. > :warning: this does not work out for multi + instance setups as there is no synchronization of mappings created using the + HTTP API between multiple instances. In case JSON files need to be provided, + which are too large for usual `ConfigMap`s, one can define a binary config map + with a zip archive that contains the file in question. ```console gzip large.json + kubectl create configmap my-binary-stub --from-file=large.json.gz ``` The resulting + archive can be best installed in the wiremock using a `values.yaml` file. ```yaml + consumer: initContainer: - name: unzip-large-file image: busybox:latest command: + ["sh", "-c", "cp /archive/large.json.gz /working/mappings; gunzip /working/mappings/large.json.gz"] volumeMounts: - + mountPath: /working name: working - mountPath: /archive name: + my-binary-stub initVolume: - name: my-binary-stub configMap: name: + my-binary-stub ``` ' + digest: bc43048458081d99d4d1c97b0bcabf25e633be92cfb2c865505dd1acfe1f474f + home: http://wiremock.org/ + icon: http://wiremock.org/images/wiremock-concept-icon-01.png + maintainers: + - email: no-reply@deliveryhero.com + name: mshero + name: wiremock + sources: + - https://github.com/tomakehurst/wiremock + - https://github.com/rodolpheche/wiremock-docker + urls: + - charts/wiremock-1.4.0.tgz + version: 1.4.0 + - apiVersion: v1 + appVersion: 2.26.0 + created: "2023-01-27T10:33:20Z" + description: 'A service virtualization tool (some call it mock server) for testing + purposes. This is a templated deployment of [WireMock](http://wiremock.org/) + for mocking services during test scenario execution for load tests as well as + for manual and automated QA purposes. By default the chart will install WireMock + with only a `/status` mapping for readiness probes. One can utilize it''s HTTP + API as well as the file configuration documented in the [Running as a Standalone + Process](http://wiremock.org/docs/running-standalone/) described in the "Configuring + via JSON over HTTP" and "JSON file configuration" chapters. The JSON file configuration + is the recommended setup and the [stub mappings](http://wiremock.org/docs/stubbing/) + should be provided in `ConfigMap`s one per folder. > :warning: both folders + `mappings` and `__files` are optional but each folder requires it''s own `ConfigMap`. + The `-mappings` and `-files` suffixes are obligate. ```console kubectl create + configmap my-service1-stubs-mappings --from-file=path/to/your/service1/mappings + kubectl create configmap my-service1-stubs-files --from-file=path/to/your/service1/__files kubectl + create configmap my-service2-stubs-mappings --from-file=path/to/your/service2/mappings + kubectl create configmap my-service2-stubs-files --from-file=path/to/your/service2/__files + ``` Install the chart passing the `stubs` as a value omitting the suffixes + as both `mappings` and `__files` folders are handled transparently during initialization + depending on their existence. ```console helm install my-wiremock deliveryhero/wiremock + \ --set consumer=my-consumer --set "consumer.stubs.my-service1-stubs=/mnt/my-service1-stubs" + \ --set "consumer.stubs.my-service2-stubs=/mnt/my-service2-stubs" ``` WireMock''s + [admin API](http://wiremock.org/docs/api/) is not publicly exposed, but can + be accessed using port forwarding. ```console kubectl port-forward my-wiremock-123456789a-bcdef + 8080 ``` The HTTP API can then be accessed using `http://localhost:8080/__admin/docs/` + where a swagger UI is availabe. > :warning: this does not work out for multi + instance setups as there is no synchronization of mappings created using the + HTTP API between multiple instances. In case JSON files need to be provided, + which are too large for usual `ConfigMap`s, one can define a binary config map + with a zip archive that contains the file in question. ```console gzip large.json + kubectl create configmap my-binary-stub --from-file=large.json.gz ``` The resulting + archive can be best installed in the wiremock using a `values.yaml` file. ```yaml + consumer: initContainer: - name: unzip-large-file image: busybox:latest command: + ["sh", "-c", "cp /archive/large.json.gz /working/mappings; gunzip /working/mappings/large.json.gz"] volumeMounts: - + mountPath: /working name: working - mountPath: /archive name: + my-binary-stub initVolume: - name: my-binary-stub configMap: name: + my-binary-stub ``` ' + digest: 043e66092e0fe6654f556b963a3ae8975db424bfa92e50a7a9500f612e370b18 + home: http://wiremock.org/ + icon: http://wiremock.org/images/wiremock-concept-icon-01.png + maintainers: + - email: no-reply@deliveryhero.com + name: mshero + name: wiremock + sources: + - https://github.com/tomakehurst/wiremock + - https://github.com/rodolpheche/wiremock-docker + urls: + - charts/wiremock-1.3.0.tgz + version: 1.3.0 + - apiVersion: v1 + appVersion: 2.26.0 + created: "2022-07-18T07:36:07Z" + description: 'A service virtualization tool (some call it mock server) for testing + purposes. This is a templated deployment of [WireMock](http://wiremock.org/) + for mocking services during test scenario execution for load tests as well as + for manual and automated QA purposes. By default the chart will install WireMock + with only a `/status` mapping for readiness probes. One can utilize it''s HTTP + API as well as the file configuration documented in the [Running as a Standalone + Process](http://wiremock.org/docs/running-standalone/) described in the "Configuring + via JSON over HTTP" and "JSON file configuration" chapters. The JSON file configuration + is the recommended setup and the [stub mappings](http://wiremock.org/docs/stubbing/) + should be provided in `ConfigMap`s one per folder. > :warning: both folders + `mappings` and `__files` are optional but each folder requires it''s own `ConfigMap`. + The `-mappings` and `-files` suffixes are obligate. ```console kubectl create + configmap my-service1-stubs-mappings --from-file=path/to/your/service1/mappings + kubectl create configmap my-service1-stubs-files --from-file=path/to/your/service1/__files kubectl + create configmap my-service2-stubs-mappings --from-file=path/to/your/service2/mappings + kubectl create configmap my-service2-stubs-files --from-file=path/to/your/service2/__files + ``` Install the chart passing the `stubs` as a value omitting the suffixes + as both `mappings` and `__files` folders are handled transparently during initialization + depending on their existence. ```console helm install my-wiremock deliveryhero/wiremock + \ --set consumer=my-consumer --set "consumer.stubs.my-service1-stubs=/mnt/my-service1-stubs" + \ --set "consumer.stubs.my-service2-stubs=/mnt/my-service2-stubs" ``` WireMock''s + [admin API](http://wiremock.org/docs/api/) is not publicly exposed, but can + be accessed using port forwarding. ```console kubectl port-forward my-wiremock-123456789a-bcdef + 8080 ``` The HTTP API can then be accessed using `http://localhost:8080/__admin/docs/` + where a swagger UI is availabe. > :warning: this does not work out for multi + instance setups as there is no synchronization of mappings created using the + HTTP API between multiple instances. In case JSON files need to be provided, + which are too large for usual `ConfigMap`s, one can define a binary config map + with a zip archive that contains the file in question. ```console gzip large.json + kubectl create configmap my-binary-stub --from-file=large.json.gz ``` The resulting + archive can be best installed in the wiremock using a `values.yaml` file. ```yaml + consumer: initContainer: - name: unzip-large-file image: busybox:latest command: + ["sh", "-c", "cp /archive/large.json.gz /working/mappings; gunzip /working/mappings/large.json.gz"] volumeMounts: - + mountPath: /working name: working - mountPath: /archive name: + my-binary-stub initVolume: - name: my-binary-stub configMap: name: + my-binary-stub ``` ' + digest: 61c4ff25bd7e41041a4e964bf687962f351a3d9140719ab32485ba052a0bd6ca + home: http://wiremock.org/ + icon: http://wiremock.org/images/wiremock-concept-icon-01.png + maintainers: + - email: no-reply@deliveryhero.com + name: mshero + name: wiremock + sources: + - https://github.com/tomakehurst/wiremock + - https://github.com/rodolpheche/wiremock-docker + urls: + - charts/wiremock-1.2.0.tgz + version: 1.2.0 + - apiVersion: v1 + appVersion: 2.26.0 + created: "2022-07-18T07:24:55Z" + description: 'A service virtualization tool (some call it mock server) for testing + purposes. This is a templated deployment of [WireMock](http://wiremock.org/) + for mocking services during test scenario execution for load tests as well as + for manual and automated QA purposes. By default the chart will install WireMock + with only a `/status` mapping for readiness probes. One can utilize it''s HTTP + API as well as the file configuration documented in the [Running as a Standalone + Process](http://wiremock.org/docs/running-standalone/) described in the "Configuring + via JSON over HTTP" and "JSON file configuration" chapters. The JSON file configuration + is the recommended setup and the [stub mappings](http://wiremock.org/docs/stubbing/) + should be provided in `ConfigMap`s one per folder. > :warning: both folders + `mappings` and `__files` are optional but each folder requires it''s own `ConfigMap`. + The `-mappings` and `-files` suffixes are obligate. ```console kubectl create + configmap my-service1-stubs-mappings --from-file=path/to/your/service1/mappings + kubectl create configmap my-service1-stubs-files --from-file=path/to/your/service1/__files kubectl + create configmap my-service2-stubs-mappings --from-file=path/to/your/service2/mappings + kubectl create configmap my-service2-stubs-files --from-file=path/to/your/service2/__files + ``` Install the chart passing the `stubs` as a value omitting the suffixes + as both `mappings` and `__files` folders are handled transparently during initialization + depending on their existence. ```console helm install my-wiremock deliveryhero/wiremock + \ --set consumer=my-consumer --set "consumer.stubs.my-service1-stubs=/mnt/my-service1-stubs" + \ --set "consumer.stubs.my-service2-stubs=/mnt/my-service2-stubs" ``` WireMock''s + [admin API](http://wiremock.org/docs/api/) is not publicly exposed, but can + be accessed using port forwarding. ```console kubectl port-forward my-wiremock-123456789a-bcdef + 8080 ``` The HTTP API can then be accessed using `http://localhost:8080/__admin/docs/` + where a swagger UI is availabe. > :warning: this does not work out for multi + instance setups as there is no synchronization of mappings created using the + HTTP API between multiple instances. In case JSON files need to be provided, + which are too large for usual `ConfigMap`s, one can define a binary config map + with a zip archive that contains the file in question. ```console gzip large.json + kubectl create configmap my-binary-stub --from-file=large.json.gz ``` The resulting + archive can be best installed in the wiremock using a `values.yaml` file. ```yaml + consumer: initContainer: - name: unzip-large-file image: busybox:latest command: + ["sh", "-c", "cp /archive/large.json.gz /working/mappings; gunzip /working/mappings/large.json.gz"] volumeMounts: - + mountPath: /working name: working - mountPath: /archive name: + my-binary-stub initVolume: - name: my-binary-stub configMap: name: + my-binary-stub ``` ' + digest: 86eabd39e24e111bf0b3bb252677e1b61fb3c75f1693fc2cb771bb31235b7966 + home: http://wiremock.org/ + icon: http://wiremock.org/images/wiremock-concept-icon-01.png + maintainers: + - email: no-reply@deliveryhero.com + name: mshero + name: wiremock + sources: + - https://github.com/tomakehurst/wiremock + - https://github.com/rodolpheche/wiremock-docker + urls: + - charts/wiremock-1.1.3.tgz + version: 1.1.3 + - apiVersion: v1 + appVersion: 2.26.0 + created: "2021-07-16T07:39:31Z" + description: | + A service virtualization tool (some call it mock server) for testing purposes. + + This is a templated deployment of [WireMock](http://wiremock.org/) for mocking services during test scenario execution + for load tests as well as for manual and automated QA purposes. + + By default the chart will install WireMock with only a `/status` mapping for readiness probes. + + One can utilize it's HTTP API as well as the file configuration documented in the + [Running as a Standalone Process](http://wiremock.org/docs/running-standalone/) described in the "Configuring via JSON + over HTTP" and "JSON file configuration" chapters. + + The JSON file configuration is the recommended setup and the [stub mappings](http://wiremock.org/docs/stubbing/) + should be provided in `ConfigMap`s one per folder. + + > :warning: both folders `mappings` and `__files` are optional but each folder requires it's own `ConfigMap`. The + `-mappings` and `-files` suffixes are obligate. + + ```console + kubectl create configmap my-service1-stubs-mappings --from-file=path/to/your/service1/mappings + kubectl create configmap my-service1-stubs-files --from-file=path/to/your/service1/__files + + kubectl create configmap my-service2-stubs-mappings --from-file=path/to/your/service2/mappings + kubectl create configmap my-service2-stubs-files --from-file=path/to/your/service2/__files + ``` + + Install the chart passing the `stubs` as a value omitting the suffixes as both `mappings` and `__files` folders are + handled transparently during initialization depending on their existence. + + ```console + helm install my-wiremock deliveryhero/wiremock \ + --set consumer=my-consumer + --set "consumer.stubs.my-service1-stubs=/mnt/my-service1-stubs" \ + --set "consumer.stubs.my-service2-stubs=/mnt/my-service2-stubs" + ``` + + WireMock's [admin API](http://wiremock.org/docs/api/) is not publicly exposed, but can be accessed using port forwarding. + + ```console + kubectl port-forward my-wiremock-123456789a-bcdef 8080 + ``` + + The HTTP API can then be accessed using `http://localhost:8080/__admin/docs/` where a swagger UI is availabe. + + > :warning: this does not work out for multi instance setups as there is no synchronization of mappings created using + the HTTP API between multiple instances. + + In case JSON files need to be provided, which are too large for usual `ConfigMap`s, one can define a binary config map + with a zip archive that contains the file in question. + + ```console + gzip large.json + kubectl create configmap my-binary-stub --from-file=large.json.gz + ``` + + The resulting archive can be best installed in the wiremock using a `values.yaml` file. + + ```yaml + consumer: + initContainer: + - name: unzip-large-file + image: busybox:latest + command: ["sh", "-c", "cp /archive/large.json.gz /working/mappings; gunzip /working/mappings/large.json.gz"] + volumeMounts: + - mountPath: /working + name: working + - mountPath: /archive + name: my-binary-stub + initVolume: + - name: my-binary-stub + configMap: + name: my-binary-stub + ``` + digest: 9dd9d0c15f75f77458a9baef3bfa3e2cdce76549d7b79b932fcd95e62c30a9d9 + home: http://wiremock.org/ + icon: http://wiremock.org/images/wiremock-concept-icon-01.png + maintainers: + - email: no-reply@deliveryhero.com + name: mshero + name: wiremock + sources: + - https://github.com/tomakehurst/wiremock + - https://github.com/rodolpheche/wiremock-docker + urls: + - charts/wiremock-1.1.2.tgz + version: 1.1.2 + - apiVersion: v1 + appVersion: 2.26.0 + created: "2021-04-23T13:52:18Z" + description: | + A service virtualization tool (some call it mock server) for testing purposes. + + This is a templated deployment of [WireMock](http://wiremock.org/) for mocking services during test scenario execution + for load tests as well as for manual and automated QA purposes. + + By default the chart will install WireMock with only a `/status` mapping for readiness probes. + + One can utilize it's HTTP API as well as the file configuration documented in the + [Running as a Standalone Process](http://wiremock.org/docs/running-standalone/) described in the "Configuring via JSON + over HTTP" and "JSON file configuration" chapters. + + The JSON file configuration is the recommended setup and the [stub mappings](http://wiremock.org/docs/stubbing/) + should be provided in `ConfigMap`s one per folder. + + > :warning: both folders `mappings` and `__files` are optional but each folder requires it's own `ConfigMap`. The + `-mappings` and `-files` suffixes are obligate. + + ```console + kubectl create configmap my-service1-stubs-mappings --from-file=path/to/your/service1/mappings + kubectl create configmap my-service1-stubs-files --from-file=path/to/your/service1/__files + + kubectl create configmap my-service2-stubs-mappings --from-file=path/to/your/service2/mappings + kubectl create configmap my-service2-stubs-files --from-file=path/to/your/service2/__files + ``` + + Install the chart passing the `stubs` as a value omitting the suffixes as both `mappings` and `__files` folders are + handled transparently during initialization depending on their existence. + + ```console + helm install my-wiremock deliveryhero/wiremock \ + --set consumer=my-consumer + --set "consumer.stubs.my-service1-stubs=/mnt/my-service1-stubs" \ + --set "consumer.stubs.my-service2-stubs=/mnt/my-service2-stubs" + ``` + + WireMock's [admin API](http://wiremock.org/docs/api/) is not publicly exposed, but can be accessed using port forwarding. + + ```console + kubectl port-forward my-wiremock-123456789a-bcdef 8080 + ``` + + The HTTP API can then be accessed using `http://localhost:8080/__admin/docs/` where a swagger UI is availabe. + + > :warning: this does not work out for multi instance setups as there is no synchronization of mappings created using + the HTTP API between multiple instances. + + In case JSON files need to be provided, which are too large for usual `ConfigMap`s, one can define a binary config map + with a zip archive that contains the file in question. + + ```console + gzip large.json + kubectl create configmap my-binary-stub --from-file=large.json.gz + ``` + + The resulting archive can be best installed in the wiremock using a `values.yaml` file. + + ```yaml + consumer: + initContainer: + - name: unzip-large-file + image: busybox:latest + command: ["sh", "-c", "cp /archive/large.json.gz /working/mappings; gunzip /working/mappings/large.json.gz"] + volumeMounts: + - mountPath: /working + name: working + - mountPath: /archive + name: my-binary-stub + initVolume: + - name: my-binary-stub + configMap: + name: my-binary-stub + ``` + digest: 86d4fe40ef48ab6da389c175e470828e39e90ce27db3f7a97ee3d32a7d348974 + home: http://wiremock.org/ + icon: http://wiremock.org/images/wiremock-concept-icon-01.png + maintainers: + - email: no-reply@deliveryhero.com + name: mshero + name: wiremock + sources: + - https://github.com/tomakehurst/wiremock + - https://github.com/rodolpheche/wiremock-docker + urls: + - charts/wiremock-1.1.1.tgz + version: 1.1.1 + - apiVersion: v1 + appVersion: 2.26.0 + created: "2021-04-23T07:40:13Z" + description: | + A service virtualization tool (some call it mock server) for testing purposes. + + This is a templated deployment of [WireMock](http://wiremock.org/) for mocking services during test scenario execution + for load tests as well as for manual and automated QA purposes. + + By default the chart will install WireMock with only a `/status` mapping for readiness probes. + + One can utilize it's HTTP API as well as the file configuration documented in the + [Running as a Standalone Process](http://wiremock.org/docs/running-standalone/) described in the "Configuring via JSON + over HTTP" and "JSON file configuration" chapters. + + The JSON file configuration is the recommended setup and the [stub mappings](http://wiremock.org/docs/stubbing/) + should be provided in `ConfigMap`s one per folder. + + > :warning: both folders `mappings` and `__files` are optional but each folder requires it's own `ConfigMap`. The + `-mappings` and `-files` suffixes are obligate. + + ```console + kubectl create configmap my-service1-stubs-mappings --from-file=path/to/your/service1/mappings + kubectl create configmap my-service1-stubs-files --from-file=path/to/your/service1/__files + + kubectl create configmap my-service2-stubs-mappings --from-file=path/to/your/service2/mappings + kubectl create configmap my-service2-stubs-files --from-file=path/to/your/service2/__files + ``` + + Install the chart passing the `stubs` as a value omitting the suffixes as both `mappings` and `__files` folders are + handled transparently during initialization depending on their existence. + + ```console + helm install my-wiremock deliveryhero/wiremock \ + --set consumer=my-consumer + --set "consumer.stubs.my-service1-stubs=/mnt/my-service1-stubs" \ + --set "consumer.stubs.my-service2-stubs=/mnt/my-service2-stubs" + ``` + + WireMock's [admin API](http://wiremock.org/docs/api/) is not publicly exposed, but can be accessed using port forwarding. + + ```console + kubectl port-forward my-wiremock-123456789a-bcdef 8080 + ``` + + The HTTP API can then be accessed using `http://localhost:8080/__admin/docs/` where a swagger UI is availabe. + + > :warning: this does not work out for multi instance setups as there is no synchronization of mappings created using + the HTTP API between multiple instances. + + In case JSON files need to be provided, which are too large for usual `ConfigMap`s, one can define a binary config map + with a zip archive that contains the file in question. + + ```console + gzip large.json + kubectl create configmap my-binary-stub --from-file=large.json.gz + ``` + + The resulting archive can be best installed in the wiremock using a `values.yaml` file. + + ```yaml + consumer: + initContainer: + - name: unzip-large-file + image: busybox:latest + command: ["sh", "-c", "cp /archive/large.json.gz /working/mappings; gunzip /working/mappings/large.json.gz"] + volumeMounts: + - mountPath: /working + name: working + - mountPath: /archive + name: my-binary-stub + initVolume: + - name: my-binary-stub + configMap: + name: my-binary-stub + ``` + digest: 3ee2acfa0d3c96c549411762faf26708aec0c8a639939dca38f4d9a624af5dcb + home: http://wiremock.org/ + icon: http://wiremock.org/images/wiremock-concept-icon-01.png + maintainers: + - email: no-reply@deliveryhero.com + name: mshero + name: wiremock + sources: + - https://github.com/tomakehurst/wiremock + - https://github.com/rodolpheche/wiremock-docker + urls: + - charts/wiremock-1.1.0.tgz + version: 1.1.0 +generated: "2024-10-04T09:10:22Z" +serverInfo: {}