diff --git a/charts/argocd/Chart.lock b/charts/argocd/Chart.lock index f8b65043..39d542b4 100644 --- a/charts/argocd/Chart.lock +++ b/charts/argocd/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: argo-cd repository: https://argoproj.github.io/argo-helm - version: 8.0.1 -digest: sha256:ba6c49d64851ea12a80e5c30e96ce38ebff712aa90678955595479f613e12089 -generated: "2025-05-14T10:23:53.65818767Z" + version: 9.1.4 +digest: sha256:cc401eb6b6d70a7b656a9c6bb6b235204238dcd7f15263e72cfa77f67225d188 +generated: "2025-11-26T10:26:00.694755019Z" diff --git a/charts/argocd/Chart.yaml b/charts/argocd/Chart.yaml index b29e4f01..a87a1514 100644 --- a/charts/argocd/Chart.yaml +++ b/charts/argocd/Chart.yaml @@ -2,11 +2,11 @@ apiVersion: v2 name: argocd description: A Helm chart for Kubernetes type: application -version: 0.1.3 +version: 0.1.4 appVersion: "2.14.4" dependencies: - name: argo-cd - version: 8.0.1 + version: 9.1.4 repository: "https://argoproj.github.io/argo-helm" alias: argocd maintainers: diff --git a/charts/argocd/README.md b/charts/argocd/README.md index c88faadb..1c88031d 100644 --- a/charts/argocd/README.md +++ b/charts/argocd/README.md @@ -1,6 +1,6 @@ # argocd -![Version: 0.1.3](https://img.shields.io/badge/Version-0.1.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.14.4](https://img.shields.io/badge/AppVersion-2.14.4-informational?style=flat-square) +![Version: 0.1.4](https://img.shields.io/badge/Version-0.1.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.14.4](https://img.shields.io/badge/AppVersion-2.14.4-informational?style=flat-square) ## Prerequisites @@ -11,7 +11,7 @@ | Repository | Name | Version | |------------|------|---------| -| https://argoproj.github.io/argo-helm | argocd(argo-cd) | 8.0.1 | +| https://argoproj.github.io/argo-helm | argocd(argo-cd) | 9.1.4 | ## Maintainers @@ -49,6 +49,7 @@ A Helm chart for Kubernetes | argocd.applicationSet.containerPorts.webhook | int | `7000` | Webhook container port | | argocd.applicationSet.containerSecurityContext | object | See [values.yaml] | ApplicationSet controller container-level security context | | argocd.applicationSet.deploymentAnnotations | object | `{}` | Annotations to be added to ApplicationSet controller Deployment | +| argocd.applicationSet.deploymentLabels | object | `{}` | Labels for the ApplicationSet controller Deployment | | argocd.applicationSet.deploymentStrategy | object | `{}` | Deployment strategy to be added to the ApplicationSet controller Deployment | | argocd.applicationSet.dnsConfig | object | `{}` | [DNS configuration] | | argocd.applicationSet.dnsPolicy | string | `"ClusterFirst"` | Alternative DNS policy for ApplicationSet controller pods | @@ -102,6 +103,7 @@ A Helm chart for Kubernetes | argocd.applicationSet.metrics.serviceMonitor.selector | object | `{}` | Prometheus ServiceMonitor selector | | argocd.applicationSet.metrics.serviceMonitor.tlsConfig | object | `{}` | Prometheus ServiceMonitor tlsConfig | | argocd.applicationSet.name | string | `"applicationset-controller"` | ApplicationSet controller name string | +| argocd.applicationSet.networkPolicy.create | bool | `false` (defaults to global.networkPolicy.create) | Default network policy rules used by ApplicationSet controller | | argocd.applicationSet.nodeSelector | object | `{}` (defaults to global.nodeSelector) | [Node selector] | | argocd.applicationSet.pdb.annotations | object | `{}` | Annotations to be added to ApplicationSet controller pdb | | argocd.applicationSet.pdb.enabled | bool | `false` | Deploy a [PodDisruptionBudget] for the ApplicationSet controller | @@ -137,6 +139,7 @@ A Helm chart for Kubernetes | argocd.commitServer.automountServiceAccountToken | bool | `false` | Automount API credentials for the Service Account into the pod. | | argocd.commitServer.containerSecurityContext | object | See [values.yaml] | commit server container-level security context | | argocd.commitServer.deploymentAnnotations | object | `{}` | Annotations to be added to commit server Deployment | +| argocd.commitServer.deploymentLabels | object | `{}` | Labels for the commit server Deployment | | argocd.commitServer.deploymentStrategy | object | `{}` | Deployment strategy to be added to the commit server Deployment | | argocd.commitServer.dnsConfig | object | `{}` | [DNS configuration] | | argocd.commitServer.dnsPolicy | string | `"ClusterFirst"` | Alternative DNS policy for commit server pods | @@ -162,6 +165,7 @@ A Helm chart for Kubernetes | argocd.commitServer.metrics.service.servicePort | int | `8087` | Metrics service port | | argocd.commitServer.metrics.service.type | string | `"ClusterIP"` | Metrics service type | | argocd.commitServer.name | string | `"commit-server"` | Commit server name | +| argocd.commitServer.networkPolicy.create | bool | `false` (defaults to global.networkPolicy.create) | Default network policy rules used by commit server | | argocd.commitServer.nodeSelector | object | `{}` (defaults to global.nodeSelector) | [Node selector] | | argocd.commitServer.podAnnotations | object | `{}` | Annotations for the commit server pods | | argocd.commitServer.podLabels | object | `{}` | Labels for the commit server pods | @@ -175,6 +179,8 @@ A Helm chart for Kubernetes | argocd.commitServer.runtimeClassName | string | `""` (defaults to global.runtimeClassName) | Runtime class name for the commit server | | argocd.commitServer.service.annotations | object | `{}` | commit server service annotations | | argocd.commitServer.service.labels | object | `{}` | commit server service labels | +| argocd.commitServer.service.port | int | `8086` | commit server service port | +| argocd.commitServer.service.portName | string | `"server"` | commit server service port name | | argocd.commitServer.serviceAccount.annotations | object | `{}` | Annotations applied to created service account | | argocd.commitServer.serviceAccount.automountServiceAccountToken | bool | `true` | Automount API credentials for the Service Account | | argocd.commitServer.serviceAccount.create | bool | `true` | Create commit server service account | @@ -197,7 +203,6 @@ A Helm chart for Kubernetes | argocd.configs.cm."resource.customizations.ignoreResourceUpdates.autoscaling_HorizontalPodAutoscaler" | string | See [values.yaml] | Legacy annotations used on HPA autoscaling/v1 | | argocd.configs.cm."resource.customizations.ignoreResourceUpdates.discovery.k8s.io_EndpointSlice" | string | See [values.yaml] | Ignores update if EndpointSlice is not excluded globally | | argocd.configs.cm."resource.exclusions" | string | See [values.yaml] | Resource Exclusion/Inclusion | -| argocd.configs.cm."server.rbac.log.enforce.enable" | bool | `false` | Enable logs RBAC enforcement # Ref: https://argo-cd.readthedocs.io/en/latest/operator-manual/upgrading/2.3-2.4/#enable-logs-rbac-enforcement | | argocd.configs.cm."statusbadge.enabled" | bool | `false` | Enable Status Badge # Ref: https://argo-cd.readthedocs.io/en/stable/user-guide/status-badge/ | | argocd.configs.cm."timeout.hard.reconciliation" | string | `"0s"` | Timeout to refresh application data as well as target manifests cache | | argocd.configs.cm."timeout.reconciliation" | string | `"180s"` | Timeout to discover if a new manifests version got published to the repository | @@ -210,26 +215,6 @@ A Helm chart for Kubernetes | argocd.configs.credentialTemplatesAnnotations | object | `{}` | Annotations to be added to `configs.credentialTemplates` Secret | | argocd.configs.gpg.annotations | object | `{}` | Annotations to be added to argocd-gpg-keys-cm configmap | | argocd.configs.gpg.keys | object | `{}` (See [values.yaml]) | [GnuPG] public keys to add to the keyring # Note: Public keys should be exported with `gpg --export --armor ` | -| argocd.configs.params."application.namespaces" | string | `""` | Enables [Applications in any namespace] # List of additional namespaces where applications may be created in and reconciled from. # The namespace where Argo CD is installed to will always be allowed. # Set comma-separated list. (e.g. app-team-one, app-team-two) | -| argocd.configs.params."applicationsetcontroller.enable.progressive.syncs" | bool | `false` | Enables use of the Progressive Syncs capability | -| argocd.configs.params."applicationsetcontroller.namespaces" | string | `""` (default is only the ns where the controller is installed) | A list of glob patterns specifying where to look for ApplicationSet resources. (e.g. `"argocd,argocd-appsets-*"`) # For more information: https://argo-cd.readthedocs.io/en/stable/operator-manual/applicationset/Appset-Any-Namespace/ | -| argocd.configs.params."applicationsetcontroller.policy" | string | `"sync"` | Modify how application is synced between the generator and the cluster. One of: `sync`, `create-only`, `create-update`, `create-delete` | -| argocd.configs.params."controller.ignore.normalizer.jq.timeout" | string | `"1s"` | JQ Path expression timeout # By default, the evaluation of a JQPathExpression is limited to one second. # If you encounter a "JQ patch execution timed out" error message due to a complex JQPathExpression # that requires more time to evaluate, you can extend the timeout period. | -| argocd.configs.params."controller.operation.processors" | int | `10` | Number of application operation processors | -| argocd.configs.params."controller.repo.server.timeout.seconds" | int | `60` | Repo server RPC call timeout seconds. | -| argocd.configs.params."controller.self.heal.timeout.seconds" | int | `5` | Specifies timeout between application self heal attempts | -| argocd.configs.params."controller.status.processors" | int | `20` | Number of application status processors | -| argocd.configs.params."controller.sync.timeout.seconds" | int | `0` | Specifies the timeout after which a sync would be terminated. 0 means no timeout | -| argocd.configs.params."otlp.address" | string | `""` | Open-Telemetry collector address: (e.g. "otel-collector:4317") | -| argocd.configs.params."reposerver.parallelism.limit" | int | `0` | Limit on number of concurrent manifests generate requests. Any value less the 1 means no limit. | -| argocd.configs.params."server.basehref" | string | `"/"` | Value for base href in index.html. Used if Argo CD is running behind reverse proxy under subpath different from / | -| argocd.configs.params."server.disable.auth" | bool | `false` | Disable Argo CD RBAC for user authentication | -| argocd.configs.params."server.enable.gzip" | bool | `true` | Enable GZIP compression | -| argocd.configs.params."server.enable.proxy.extension" | bool | `false` | Enable proxy extension feature. (proxy extension is in Alpha phase) | -| argocd.configs.params."server.insecure" | bool | `false` | Run server without TLS # NOTE: This value should be set when you generate params by other means as it changes ports used by ingress template. | -| argocd.configs.params."server.rootpath" | string | `""` | Used if Argo CD is running behind reverse proxy under subpath different from / | -| argocd.configs.params."server.staticassets" | string | `"/shared/app"` | Directory path that contains additional static assets | -| argocd.configs.params."server.x.frame.options" | string | `"sameorigin"` | Set X-Frame-Options header in HTTP responses to value. To disable, set to "". | | argocd.configs.params.annotations | object | `{}` | Annotations to be added to the argocd-cmd-params-cm ConfigMap | | argocd.configs.params.create | bool | `true` | Create the argocd-cmd-params-cm configmap If false, it is expected the configmap will be created by something else. | | argocd.configs.rbac."policy.csv" | string | `''` (See [values.yaml]) | File containing user-defined policies and role definitions. | @@ -268,6 +253,7 @@ A Helm chart for Kubernetes | argocd.controller.containerPorts.metrics | int | `8082` | Metrics container port | | argocd.controller.containerSecurityContext | object | See [values.yaml] | Application controller container-level security context | | argocd.controller.deploymentAnnotations | object | `{}` | Annotations for the application controller Deployment | +| argocd.controller.deploymentLabels | object | `{}` | Labels for the application controller Deployment | | argocd.controller.dnsConfig | object | `{}` | [DNS configuration] | | argocd.controller.dnsPolicy | string | `"ClusterFirst"` | Alternative DNS policy for application controller pods | | argocd.controller.dynamicClusterDistribution | bool | `false` | Enable dynamic cluster distribution (alpha) Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/dynamic-cluster-distribution # This is done using a deployment instead of a statefulSet # When replicas are added or removed, the sharding algorithm is re-run to ensure that the # clusters are distributed according to the algorithm. If the algorithm is well-balanced, # like round-robin, then the shards will be well-balanced. | @@ -311,6 +297,7 @@ A Helm chart for Kubernetes | argocd.controller.metrics.serviceMonitor.selector | object | `{}` | Prometheus ServiceMonitor selector | | argocd.controller.metrics.serviceMonitor.tlsConfig | object | `{}` | Prometheus ServiceMonitor tlsConfig | | argocd.controller.name | string | `"application-controller"` | Application controller name string | +| argocd.controller.networkPolicy.create | bool | `false` (defaults to global.networkPolicy.create) | Default network policy rules used by application controller | | argocd.controller.nodeSelector | object | `{}` (defaults to global.nodeSelector) | [Node selector] | | argocd.controller.pdb.annotations | object | `{}` | Annotations to be added to application controller pdb | | argocd.controller.pdb.enabled | bool | `false` | Deploy a [PodDisruptionBudget] for the application controller | @@ -328,6 +315,7 @@ A Helm chart for Kubernetes | argocd.controller.replicas | int | `1` | The number of application controller pods to run. Additional replicas will cause sharding of managed clusters across number of replicas. # With dynamic cluster distribution turned on, sharding of the clusters will gracefully # rebalance if the number of replica's changes or one becomes unhealthy. (alpha) | | argocd.controller.resources | object | `{}` | Resource limits and requests for the application controller pods | | argocd.controller.revisionHistoryLimit | int | `5` | Maximum number of controller revisions that will be maintained in StatefulSet history | +| argocd.controller.roleRules | list | `[]` | List of custom rules for the application controller's Role resource | | argocd.controller.runtimeClassName | string | `""` (defaults to global.runtimeClassName) | Runtime class name for the application controller | | argocd.controller.serviceAccount.annotations | object | `{}` | Annotations applied to created service account | | argocd.controller.serviceAccount.automountServiceAccountToken | bool | `true` | Automount API credentials for the Service Account | @@ -340,7 +328,12 @@ A Helm chart for Kubernetes | argocd.controller.topologySpreadConstraints | list | `[]` (defaults to global.topologySpreadConstraints) | Assign custom [TopologySpreadConstraints] rules to the application controller # Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ # If labelSelector is left out, it will default to the labelSelector configuration of the deployment | | argocd.controller.volumeMounts | list | `[]` | Additional volumeMounts to the application controller main container | | argocd.controller.volumes | list | `[]` | Additional volumes to the application controller pod | -| argocd.crds.additionalLabels | object | `{}` | Addtional labels to be added to all CRDs | +| argocd.controller.vpa.annotations | object | `{}` | Annotations to be added to application controller vpa | +| argocd.controller.vpa.containerPolicy | object | `{}` | Controls how VPA computes the recommended resources for application controller container # Ref: https://github.com/kubernetes/autoscaler/blob/master/vertical-pod-autoscaler/examples/hamster.yaml | +| argocd.controller.vpa.enabled | bool | `false` | Deploy a [VerticalPodAutoscaler](https://kubernetes.io/docs/concepts/workloads/autoscaling/#scaling-workloads-vertically/) for the application controller | +| argocd.controller.vpa.labels | object | `{}` | Labels to be added to application controller vpa | +| argocd.controller.vpa.updateMode | string | `"Initial"` | One of the VPA operation modes # Ref: https://kubernetes.io/docs/concepts/workloads/autoscaling/#scaling-workloads-vertically # Note: Recreate update mode requires more than one replica unless the min-replicas VPA controller flag is overridden | +| argocd.crds.additionalLabels | object | `{}` | Additional labels to be added to all CRDs | | argocd.crds.annotations | object | `{}` | Annotations to be added to all CRDs | | argocd.crds.install | bool | `true` | Install and upgrade CRDs | | argocd.crds.keep | bool | `true` | Keep CRDs on chart uninstall | @@ -359,6 +352,7 @@ A Helm chart for Kubernetes | argocd.dex.containerPorts.metrics | int | `5558` | Metrics container port | | argocd.dex.containerSecurityContext | object | See [values.yaml] | Dex container-level security context | | argocd.dex.deploymentAnnotations | object | `{}` | Annotations to be added to the Dex server Deployment | +| argocd.dex.deploymentLabels | object | `{}` | Labels for the Dex server Deployment | | argocd.dex.deploymentStrategy | object | `{}` | Deployment strategy to be added to the Dex server Deployment | | argocd.dex.dnsConfig | object | `{}` | [DNS configuration] | | argocd.dex.dnsPolicy | string | `"ClusterFirst"` | Alternative DNS policy for Dex server pods | @@ -370,7 +364,7 @@ A Helm chart for Kubernetes | argocd.dex.extraContainers | list | `[]` | Additional containers to be added to the dex pod # Note: Supports use of custom Helm templates | | argocd.dex.image.imagePullPolicy | string | `""` (defaults to global.image.imagePullPolicy) | Dex imagePullPolicy | | argocd.dex.image.repository | string | `"ghcr.io/dexidp/dex"` | Dex image repository | -| argocd.dex.image.tag | string | `"v2.42.1"` | Dex image tag | +| argocd.dex.image.tag | string | `"v2.44.0"` | Dex image tag | | argocd.dex.imagePullSecrets | list | `[]` (defaults to global.imagePullSecrets) | Secrets with credentials to pull images from a private registry | | argocd.dex.initContainers | list | `[]` | Init containers to add to the dex pod # Note: Supports use of custom Helm templates | | argocd.dex.initImage.imagePullPolicy | string | `""` (defaults to global.image.imagePullPolicy) | Argo CD init image imagePullPolicy | @@ -402,6 +396,7 @@ A Helm chart for Kubernetes | argocd.dex.metrics.serviceMonitor.selector | object | `{}` | Prometheus ServiceMonitor selector | | argocd.dex.metrics.serviceMonitor.tlsConfig | object | `{}` | Prometheus ServiceMonitor tlsConfig | | argocd.dex.name | string | `"dex-server"` | Dex name | +| argocd.dex.networkPolicy.create | bool | `false` (defaults to global.networkPolicy.create) | Default network policy rules used by Dex server | | argocd.dex.nodeSelector | object | `{}` (defaults to global.nodeSelector) | [Node selector] | | argocd.dex.pdb.annotations | object | `{}` | Annotations to be added to Dex server pdb | | argocd.dex.pdb.enabled | bool | `false` | Deploy a [PodDisruptionBudget] for the Dex server | @@ -436,7 +431,7 @@ A Helm chart for Kubernetes | argocd.dex.topologySpreadConstraints | list | `[]` (defaults to global.topologySpreadConstraints) | Assign custom [TopologySpreadConstraints] rules to dex # Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ # If labelSelector is left out, it will default to the labelSelector configuration of the deployment | | argocd.dex.volumeMounts | list | `[]` | Additional volumeMounts to the dex main container | | argocd.dex.volumes | list | `[]` | Additional volumes to the dex pod | -| argocd.externalRedis.existingSecret | string | `""` | The name of an existing secret with Redis (must contain key `redis-password`) and Sentinel credentials. When it's set, the `externalRedis.password` parameter is ignored | +| argocd.externalRedis.existingSecret | string | `""` | The name of an existing secret with Redis (must contain key `redis-password`. And should contain `redis-username` if username is not `default`) and Sentinel credentials. When it's set, the `externalRedis.username` and `externalRedis.password` parameters are ignored | | argocd.externalRedis.host | string | `""` | External Redis server host | | argocd.externalRedis.password | string | `""` | External Redis password | | argocd.externalRedis.port | int | `6379` | External Redis server port | @@ -451,6 +446,7 @@ A Helm chart for Kubernetes | argocd.global.affinity.podAntiAffinity | string | `"soft"` | Default pod anti-affinity rules. Either: `none`, `soft` or `hard` | | argocd.global.certificateAnnotations | object | `{}` | Annotations for the all deployed Certificates | | argocd.global.deploymentAnnotations | object | `{}` | Annotations for the all deployed Deployments | +| argocd.global.deploymentLabels | object | `{}` | Labels for the all deployed Deployments | | argocd.global.deploymentStrategy | object | `{}` | Deployment strategy for the all deployed Deployments | | argocd.global.domain | string | `"argocd.example.com"` | Default domain used by all components # Used for ingresses, certificates, SSO, notifications, etc. | | argocd.global.dualStack.ipFamilies | list | `[]` | IP families that should be supported and the order in which they should be applied to ClusterIP as well. Can be IPv4 and/or IPv6. | @@ -487,6 +483,7 @@ A Helm chart for Kubernetes | argocd.notifications.containerSecurityContext | object | See [values.yaml] | Notification controller container-level security Context | | argocd.notifications.context | object | `{}` | Define user-defined context # For more information: https://argo-cd.readthedocs.io/en/stable/operator-manual/notifications/templates/#defining-user-defined-context | | argocd.notifications.deploymentAnnotations | object | `{}` | Annotations to be applied to the notifications controller Deployment | +| argocd.notifications.deploymentLabels | object | `{}` | Labels for the notifications controller Deployment | | argocd.notifications.deploymentStrategy | object | `{"type":"Recreate"}` | Deployment strategy to be added to the notifications controller Deployment | | argocd.notifications.dnsConfig | object | `{}` | [DNS configuration] | | argocd.notifications.dnsPolicy | string | `"ClusterFirst"` | Alternative DNS policy for notifications controller Pods | @@ -525,6 +522,7 @@ A Helm chart for Kubernetes | argocd.notifications.metrics.serviceMonitor.selector | object | `{}` | Prometheus ServiceMonitor selector | | argocd.notifications.metrics.serviceMonitor.tlsConfig | object | `{}` | Prometheus ServiceMonitor tlsConfig | | argocd.notifications.name | string | `"notifications-controller"` | Notifications controller name string | +| argocd.notifications.networkPolicy.create | bool | `false` (defaults to global.networkPolicy.create) | Default network policy rules used by notifications controller | | argocd.notifications.nodeSelector | object | `{}` (defaults to global.nodeSelector) | [Node selector] | | argocd.notifications.notifiers | object | See [values.yaml] | Configures notification services such as slack, email or custom webhook # For more information: https://argo-cd.readthedocs.io/en/stable/operator-manual/notifications/services/overview/ | | argocd.notifications.pdb.annotations | object | `{}` | Annotations to be added to notifications controller pdb | @@ -568,18 +566,19 @@ A Helm chart for Kubernetes | argocd.redis-ha.existingSecret | string | `"argocd-redis"` | Existing Secret to use for redis-ha authentication. By default the redis-secret-init Job is generating this Secret. | | argocd.redis-ha.exporter.enabled | bool | `false` | Enable Prometheus redis-exporter sidecar | | argocd.redis-ha.exporter.image | string | `"ghcr.io/oliver006/redis_exporter"` | Repository to use for the redis-exporter | -| argocd.redis-ha.exporter.tag | string | `"v1.69.0"` | Tag to use for the redis-exporter | +| argocd.redis-ha.exporter.tag | string | `"v1.75.0"` | Tag to use for the redis-exporter | | argocd.redis-ha.haproxy.additionalAffinities | object | `{}` | Additional affinities to add to the haproxy pods. | | argocd.redis-ha.haproxy.affinity | string | `""` | Assign custom [affinity] rules to the haproxy pods. | | argocd.redis-ha.haproxy.containerSecurityContext | object | See [values.yaml] | HAProxy container-level security context | | argocd.redis-ha.haproxy.enabled | bool | `true` | Enabled HAProxy LoadBalancing/Proxy | | argocd.redis-ha.haproxy.hardAntiAffinity | bool | `true` | Whether the haproxy pods should be forced to run on separate nodes. | +| argocd.redis-ha.haproxy.image.repository | string | `"ecr-public.aws.com/docker/library/haproxy"` | HAProxy Image Repository | | argocd.redis-ha.haproxy.labels | object | `{"app.kubernetes.io/name":"argocd-redis-ha-haproxy"}` | Custom labels for the haproxy pod. This is relevant for Argo CD CLI. | | argocd.redis-ha.haproxy.metrics.enabled | bool | `true` | HAProxy enable prometheus metric scraping | | argocd.redis-ha.haproxy.tolerations | list | `[]` | [Tolerations] for use with node taints for haproxy pods. | | argocd.redis-ha.hardAntiAffinity | bool | `true` | Whether the Redis server pods should be forced to run on separate nodes. | -| argocd.redis-ha.image.repository | string | `"public.ecr.aws/docker/library/redis"` | Redis repository | -| argocd.redis-ha.image.tag | string | `"7.2.8-alpine"` | Redis tag # Do not upgrade to >= 7.4.0, otherwise you are no longer using an open source version of Redis | +| argocd.redis-ha.image.repository | string | `"ecr-public.aws.com/docker/library/redis"` | Redis repository | +| argocd.redis-ha.image.tag | string | `"8.2.2-alpine"` | Redis tag # Do not upgrade to >= 7.4.0, otherwise you are no longer using an open source version of Redis | | argocd.redis-ha.persistentVolume.enabled | bool | `false` | Configures persistence on Redis nodes | | argocd.redis-ha.redis.config | object | See [values.yaml] | Any valid redis config options in this section will be applied to each server (see `redis-ha` chart) | | argocd.redis-ha.redis.config.save | string | `'""'` | Will save the DB if both the given number of seconds and the given number of write operations against the DB occurred. `""` is disabled | @@ -596,6 +595,7 @@ A Helm chart for Kubernetes | argocd.redis.containerPorts.redis | int | `6379` | Redis container port | | argocd.redis.containerSecurityContext | object | See [values.yaml] | Redis container-level security context | | argocd.redis.deploymentAnnotations | object | `{}` | Annotations to be added to the Redis server Deployment | +| argocd.redis.deploymentLabels | object | `{}` | Labels for the Redis server Deployment | | argocd.redis.dnsConfig | object | `{}` | [DNS configuration] | | argocd.redis.dnsPolicy | string | `"ClusterFirst"` | Alternative DNS policy for Redis server pods | | argocd.redis.enabled | bool | `true` | Enable redis | @@ -606,7 +606,7 @@ A Helm chart for Kubernetes | argocd.redis.exporter.env | list | `[]` | Environment variables to pass to the Redis exporter | | argocd.redis.exporter.image.imagePullPolicy | string | `""` (defaults to global.image.imagePullPolicy) | Image pull policy for the redis-exporter | | argocd.redis.exporter.image.repository | string | `"ghcr.io/oliver006/redis_exporter"` | Repository to use for the redis-exporter | -| argocd.redis.exporter.image.tag | string | `"v1.71.0"` | Tag to use for the redis-exporter | +| argocd.redis.exporter.image.tag | string | `"v1.80.1"` | Tag to use for the redis-exporter | | argocd.redis.exporter.livenessProbe.enabled | bool | `false` | Enable Kubernetes liveness probe for Redis exporter | | argocd.redis.exporter.livenessProbe.failureThreshold | int | `5` | Minimum consecutive failures for the [probe] to be considered failed after having succeeded | | argocd.redis.exporter.livenessProbe.initialDelaySeconds | int | `30` | Number of seconds after the container has started before [probe] is initiated | @@ -623,8 +623,8 @@ A Helm chart for Kubernetes | argocd.redis.extraArgs | list | `[]` | Additional command line arguments to pass to redis-server | | argocd.redis.extraContainers | list | `[]` | Additional containers to be added to the redis pod # Note: Supports use of custom Helm templates | | argocd.redis.image.imagePullPolicy | string | `""` (defaults to global.image.imagePullPolicy) | Redis image pull policy | -| argocd.redis.image.repository | string | `"public.ecr.aws/docker/library/redis"` | Redis repository | -| argocd.redis.image.tag | string | `"7.2.8-alpine"` | Redis tag # Do not upgrade to >= 7.4.0, otherwise you are no longer using an open source version of Redis | +| argocd.redis.image.repository | string | `"ecr-public.aws.com/docker/library/redis"` | Redis repository | +| argocd.redis.image.tag | string | `"8.2.2-alpine"` | Redis tag # Do not upgrade to >= 7.4.0, otherwise you are no longer using an open source version of Redis | | argocd.redis.imagePullSecrets | list | `[]` (defaults to global.imagePullSecrets) | Secrets with credentials to pull images from a private registry | | argocd.redis.initContainers | list | `[]` | Init containers to add to the redis pod # Note: Supports use of custom Helm templates | | argocd.redis.livenessProbe.enabled | bool | `false` | Enable Kubernetes liveness probe for Redis server | @@ -652,6 +652,7 @@ A Helm chart for Kubernetes | argocd.redis.metrics.serviceMonitor.selector | object | `{}` | Prometheus ServiceMonitor selector | | argocd.redis.metrics.serviceMonitor.tlsConfig | object | `{}` | Prometheus ServiceMonitor tlsConfig | | argocd.redis.name | string | `"redis"` | Redis name | +| argocd.redis.networkPolicy.create | bool | `false` (defaults to global.networkPolicy.create) | Default network policy rules used by redis | | argocd.redis.nodeSelector | object | `{}` (defaults to global.nodeSelector) | [Node selector] | | argocd.redis.pdb.annotations | object | `{}` | Annotations to be added to Redis pdb | | argocd.redis.pdb.enabled | bool | `false` | Deploy a [PodDisruptionBudget] for the Redis | @@ -685,6 +686,7 @@ A Helm chart for Kubernetes | argocd.redisSecretInit.affinity | object | `{}` | Assign custom [affinity] rules to the Redis secret-init Job | | argocd.redisSecretInit.containerSecurityContext | object | See [values.yaml] | Application controller container-level security context | | argocd.redisSecretInit.enabled | bool | `true` | Enable Redis secret initialization. If disabled, secret must be provisioned by alternative methods | +| argocd.redisSecretInit.extraArgs | list | `[]` | Additional command line arguments for the Redis secret-init Job | | argocd.redisSecretInit.image.imagePullPolicy | string | `""` (defaults to global.image.imagePullPolicy) | Image pull policy for the Redis secret-init Job | | argocd.redisSecretInit.image.repository | string | `""` (defaults to global.image.repository) | Repository to use for the Redis secret-init Job | | argocd.redisSecretInit.image.tag | string | `""` (defaults to global.image.tag) | Tag to use for the Redis secret-init Job | @@ -696,6 +698,7 @@ A Helm chart for Kubernetes | argocd.redisSecretInit.podLabels | object | `{}` | Labels to be added to the Redis secret-init Job | | argocd.redisSecretInit.priorityClassName | string | `""` (defaults to global.priorityClassName) | Priority class for Redis secret-init Job | | argocd.redisSecretInit.resources | object | `{}` | Resource limits and requests for Redis secret-init Job | +| argocd.redisSecretInit.runtimeClassName | string | `""` (defaults to global.runtimeClassName) | Runtime class name for the Redis secret-init Job | | argocd.redisSecretInit.securityContext | object | `{}` | Redis secret-init Job pod-level security context | | argocd.redisSecretInit.serviceAccount.annotations | object | `{}` | Annotations applied to created service account | | argocd.redisSecretInit.serviceAccount.automountServiceAccountToken | bool | `true` | Automount API credentials for the Service Account | @@ -722,7 +725,9 @@ A Helm chart for Kubernetes | argocd.repoServer.containerPorts.metrics | int | `8084` | Metrics container port | | argocd.repoServer.containerPorts.server | int | `8081` | Repo server container port | | argocd.repoServer.containerSecurityContext | object | See [values.yaml] | Repo server container-level security context | +| argocd.repoServer.copyutil.resources | object | `{}` | Resource limits and requests for the repo server copyutil initContainer | | argocd.repoServer.deploymentAnnotations | object | `{}` | Annotations to be added to repo server Deployment | +| argocd.repoServer.deploymentLabels | object | `{}` | Labels for the repo server Deployment | | argocd.repoServer.deploymentStrategy | object | `{}` | Deployment strategy to be added to the repo server Deployment | | argocd.repoServer.dnsConfig | object | `{}` | [DNS configuration] | | argocd.repoServer.dnsPolicy | string | `"ClusterFirst"` | Alternative DNS policy for Repo server pods | @@ -764,6 +769,7 @@ A Helm chart for Kubernetes | argocd.repoServer.metrics.serviceMonitor.selector | object | `{}` | Prometheus ServiceMonitor selector | | argocd.repoServer.metrics.serviceMonitor.tlsConfig | object | `{}` | Prometheus ServiceMonitor tlsConfig | | argocd.repoServer.name | string | `"repo-server"` | Repo server name | +| argocd.repoServer.networkPolicy.create | bool | `false` (defaults to global.networkPolicy.create) | Default network policy rules used by repo server | | argocd.repoServer.nodeSelector | object | `{}` (defaults to global.nodeSelector) | [Node selector] | | argocd.repoServer.pdb.annotations | object | `{}` | Annotations to be added to repo server pdb | | argocd.repoServer.pdb.enabled | bool | `false` | Deploy a [PodDisruptionBudget] for the repo server | @@ -786,6 +792,7 @@ A Helm chart for Kubernetes | argocd.repoServer.service.labels | object | `{}` | Repo server service labels | | argocd.repoServer.service.port | int | `8081` | Repo server service port | | argocd.repoServer.service.portName | string | `"tcp-repo-server"` | Repo server service port name | +| argocd.repoServer.service.trafficDistribution | string | `""` | Traffic distribution preference for the repo server service. If the field is not set, the implementation will apply its default routing strategy. | | argocd.repoServer.serviceAccount.annotations | object | `{}` | Annotations applied to created service account | | argocd.repoServer.serviceAccount.automountServiceAccountToken | bool | `true` | Automount API credentials for the Service Account | | argocd.repoServer.serviceAccount.create | bool | `true` | Create repo server service account | @@ -806,6 +813,11 @@ A Helm chart for Kubernetes | argocd.server.autoscaling.minReplicas | int | `1` | Minimum number of replicas for the Argo CD server [HPA] | | argocd.server.autoscaling.targetCPUUtilizationPercentage | int | `50` | Average CPU utilization percentage for the Argo CD server [HPA] | | argocd.server.autoscaling.targetMemoryUtilizationPercentage | int | `50` | Average memory utilization percentage for the Argo CD server [HPA] | +| argocd.server.backendTLSPolicy.annotations | object | `{}` | Additional BackendTLSPolicy annotations | +| argocd.server.backendTLSPolicy.enabled | bool | `false` | Enable BackendTLSPolicy resource for Argo CD server (Gateway API) | +| argocd.server.backendTLSPolicy.labels | object | `{}` | Additional BackendTLSPolicy labels | +| argocd.server.backendTLSPolicy.targetRefs | list | `[]` (See [values.yaml]) | Target references for the BackendTLSPolicy | +| argocd.server.backendTLSPolicy.validation | object | `{}` (See [values.yaml]) | TLS validation configuration | | argocd.server.certificate.additionalHosts | list | `[]` | Certificate Subject Alternate Names (SANs) | | argocd.server.certificate.annotations | object | `{}` | Annotations to be applied to the Server Certificate | | argocd.server.certificate.domain | string | `""` (defaults to global.domain) | Certificate primary domain (commonName) | @@ -832,6 +844,7 @@ A Helm chart for Kubernetes | argocd.server.containerPorts.server | int | `8080` | Server container port | | argocd.server.containerSecurityContext | object | See [values.yaml] | Server container-level security context | | argocd.server.deploymentAnnotations | object | `{}` | Annotations to be added to server Deployment | +| argocd.server.deploymentLabels | object | `{}` | Labels for the server Deployment | | argocd.server.deploymentStrategy | object | `{}` | Deployment strategy to be added to the server Deployment | | argocd.server.dnsConfig | object | `{}` | [DNS configuration] | | argocd.server.dnsPolicy | string | `"ClusterFirst"` | Alternative DNS policy for Server pods | @@ -847,13 +860,26 @@ A Helm chart for Kubernetes | argocd.server.extensions.resources | object | `{}` | Resource limits and requests for the argocd-extensions container | | argocd.server.extraArgs | list | `[]` | Additional command line arguments to pass to Argo CD server | | argocd.server.extraContainers | list | `[]` | Additional containers to be added to the server pod # Note: Supports use of custom Helm templates | +| argocd.server.grpcroute.annotations | object | `{}` | Additional GRPCRoute annotations | +| argocd.server.grpcroute.enabled | bool | `false` | Enable GRPCRoute resource for Argo CD server (Gateway API) | +| argocd.server.grpcroute.hostnames | list | `[]` (See [values.yaml]) | List of hostnames for the GRPCRoute | +| argocd.server.grpcroute.labels | object | `{}` | Additional GRPCRoute labels | +| argocd.server.grpcroute.parentRefs | list | `[]` (See [values.yaml]) | Gateway API parentRefs for the GRPCRoute # Must reference an existing Gateway | +| argocd.server.grpcroute.rules | list | `[]` (See [values.yaml]) | GRPCRoute rules configuration | | argocd.server.hostNetwork | bool | `false` | Host Network for Server pods | +| argocd.server.httproute.annotations | object | `{}` | Additional HTTPRoute annotations | +| argocd.server.httproute.enabled | bool | `false` | Enable HTTPRoute resource for Argo CD server (Gateway API) | +| argocd.server.httproute.hostnames | list | `[]` (See [values.yaml]) | List of hostnames for the HTTPRoute | +| argocd.server.httproute.labels | object | `{}` | Additional HTTPRoute labels | +| argocd.server.httproute.parentRefs | list | `[]` (See [values.yaml]) | Gateway API parentRefs for the HTTPRoute # Must reference an existing Gateway | +| argocd.server.httproute.rules | list | `[]` (See [values.yaml]) | HTTPRoute rules configuration | | argocd.server.image.imagePullPolicy | string | `""` (defaults to global.image.imagePullPolicy) | Image pull policy for the Argo CD server | | argocd.server.image.repository | string | `""` (defaults to global.image.repository) | Repository to use for the Argo CD server | | argocd.server.image.tag | string | `""` (defaults to global.image.tag) | Tag to use for the Argo CD server | | argocd.server.imagePullSecrets | list | `[]` (defaults to global.imagePullSecrets) | Secrets with credentials to pull images from a private registry | | argocd.server.ingress.annotations | object | `{}` | Additional ingress annotations # Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/ingress/#option-1-ssl-passthrough | | argocd.server.ingress.aws.backendProtocolVersion | string | `"GRPC"` | Backend protocol version for the AWS ALB gRPC service # This tells AWS to send traffic from the ALB using gRPC. # For more information: https://docs.aws.amazon.com/elasticloadbalancing/latest/application/target-group-health-checks.html#health-check-settings | +| argocd.server.ingress.aws.serviceAnnotations | object | `{}` | Annotations for the AWS ALB gRPC service # Allows adding custom annotations to the gRPC service for integrations like DataDog, Prometheus, etc. | | argocd.server.ingress.aws.serviceType | string | `"NodePort"` | Service type for the AWS ALB gRPC service # Can be of type NodePort or ClusterIP depending on which mode you are running. # Instance mode needs type NodePort, IP mode needs type ClusterIP # Ref: https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.2/how-it-works/#ingress-traffic | | argocd.server.ingress.controller | string | `"generic"` | Specific implementation for ingress controller. One of `generic`, `aws` or `gke` # Additional configuration might be required in related configuration sections | | argocd.server.ingress.enabled | bool | `false` | Enable an ingress resource for the Argo CD server | @@ -910,6 +936,7 @@ A Helm chart for Kubernetes | argocd.server.metrics.serviceMonitor.selector | object | `{}` | Prometheus ServiceMonitor selector | | argocd.server.metrics.serviceMonitor.tlsConfig | object | `{}` | Prometheus ServiceMonitor tlsConfig | | argocd.server.name | string | `"server"` | Argo CD server name | +| argocd.server.networkPolicy.create | bool | `false` (defaults to global.networkPolicy.create) | Default network policy rules used by ArgoCD Server | | argocd.server.nodeSelector | object | `{}` (defaults to global.nodeSelector) | [Node selector] | | argocd.server.pdb.annotations | object | `{}` | Annotations to be added to Argo CD server pdb | | argocd.server.pdb.enabled | bool | `false` | Deploy a [PodDisruptionBudget] for the Argo CD server | @@ -989,7 +1016,7 @@ spec: source: repoURL: "https://edixos.github.io/ekp-helm" - targetRevision: "0.1.3" + targetRevision: "0.1.4" chart: argocd path: '' helm: diff --git a/charts/argocd/charts/argo-cd-8.0.1.tgz b/charts/argocd/charts/argo-cd-8.0.1.tgz deleted file mode 100644 index cd15af30..00000000 Binary files a/charts/argocd/charts/argo-cd-8.0.1.tgz and /dev/null differ diff --git a/charts/argocd/charts/argo-cd-9.1.4.tgz b/charts/argocd/charts/argo-cd-9.1.4.tgz new file mode 100644 index 00000000..912bf8bc Binary files /dev/null and b/charts/argocd/charts/argo-cd-9.1.4.tgz differ diff --git a/charts/argocd/values.yaml b/charts/argocd/values.yaml index 25e7fd2f..cc2e1b8e 100644 --- a/charts/argocd/values.yaml +++ b/charts/argocd/values.yaml @@ -54,7 +54,7 @@ argocd: keep: true # -- Annotations to be added to all CRDs annotations: {} - # -- Addtional labels to be added to all CRDs + # -- Additional labels to be added to all CRDs additionalLabels: {} ## Globally shared configuration @@ -98,6 +98,9 @@ argocd: # -- Annotations for the all deployed Deployments deploymentAnnotations: {} + # -- Labels for the all deployed Deployments + deploymentLabels: {} + # -- Annotations for the all deployed pods podAnnotations: {} @@ -199,10 +202,6 @@ argocd: ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/app-sync-using-impersonation/ application.sync.impersonation.enabled: false - # -- Enable logs RBAC enforcement - ## Ref: https://argo-cd.readthedocs.io/en/latest/operator-manual/upgrading/2.3-2.4/#enable-logs-rbac-enforcement - server.rbac.log.enforce.enable: false - # -- Enable exec feature in Argo UI ## Ref: https://argo-cd.readthedocs.io/en/latest/operator-manual/rbac/#exec-resource exec.enabled: false @@ -238,20 +237,45 @@ argocd: # oidc.config: | # name: AzureAD # issuer: https://login.microsoftonline.com/TENANT_ID/v2.0 - # clientID: CLIENT_ID + # clientID: aaaabbbbccccddddeee # clientSecret: $oidc.azuread.clientSecret + + # Some OIDC providers require a separate clientID for different callback URLs. + # For example, if configuring Argo CD with self-hosted Dex, you will need a separate client ID + # for the 'localhost' (CLI) client to Dex. This field is optional. If omitted, the CLI will + # use the same clientID as the Argo CD server + # cliClientID: vvvvwwwwxxxxyyyyzzzz + # rootCA: | # -----BEGIN CERTIFICATE----- # ... encoded certificate data here ... # -----END CERTIFICATE----- + + # Optional list of allowed aud claims. If omitted or empty, defaults to the clientID value above (and the + # cliClientID, if that is also specified). If you specify a list and want the clientID to be allowed, you must + # explicitly include it in the list. + # Token verification will pass if any of the token's audiences matches any of the audiences in this list. + # allowedAudiences: + # - aaaabbbbccccddddeee + # - qqqqwwwweeeerrrrttt + + # Optional set of OIDC claims to request on the ID token. # requestedIDTokenClaims: # groups: # essential: true + + # Optional set of OIDC scopes to request. If omitted, defaults to: ["openid", "profile", "email", "groups"] # requestedScopes: # - openid # - profile # - email + # PKCE authentication flow processes authorization flow from browser only - default false + # uses the clientID + # make sure the Identity Provider (IdP) is public and doesn't need clientSecret + # make sure the Identity Provider (IdP) has this redirect URI registered: https://argocd.example.com/pkce/verify + # enablePKCEAuthentication: true + # Extension Configuration ## Ref: https://argo-cd.readthedocs.io/en/latest/developer-guide/extensions/proxy-extensions/ # extension.config: | @@ -406,66 +430,9 @@ argocd: # -- Annotations to be added to the argocd-cmd-params-cm ConfigMap annotations: {} - ## Generic parameters - # -- Open-Telemetry collector address: (e.g. "otel-collector:4317") - otlp.address: '' - - ## Controller Properties - # -- Number of application status processors - controller.status.processors: 20 - # -- Number of application operation processors - controller.operation.processors: 10 - # -- Specifies timeout between application self heal attempts - controller.self.heal.timeout.seconds: 5 - # -- Repo server RPC call timeout seconds. - controller.repo.server.timeout.seconds: 60 - # -- Specifies the timeout after which a sync would be terminated. 0 means no timeout - controller.sync.timeout.seconds: 0 - - ## Server properties - # -- Run server without TLS - ## NOTE: This value should be set when you generate params by other means as it changes ports used by ingress template. - server.insecure: false - # -- Value for base href in index.html. Used if Argo CD is running behind reverse proxy under subpath different from / - server.basehref: / - # -- Used if Argo CD is running behind reverse proxy under subpath different from / - server.rootpath: '' - # -- Directory path that contains additional static assets - server.staticassets: /shared/app - # -- Disable Argo CD RBAC for user authentication - server.disable.auth: false - # -- Enable GZIP compression - server.enable.gzip: true - # -- Enable proxy extension feature. (proxy extension is in Alpha phase) - server.enable.proxy.extension: false - # -- Set X-Frame-Options header in HTTP responses to value. To disable, set to "". - server.x.frame.options: sameorigin - - ## Repo-server properties - # -- Limit on number of concurrent manifests generate requests. Any value less the 1 means no limit. - reposerver.parallelism.limit: 0 - - ## ApplicationSet Properties - # -- Modify how application is synced between the generator and the cluster. One of: `sync`, `create-only`, `create-update`, `create-delete` - applicationsetcontroller.policy: sync - # -- Enables use of the Progressive Syncs capability - applicationsetcontroller.enable.progressive.syncs: false - # -- A list of glob patterns specifying where to look for ApplicationSet resources. (e.g. `"argocd,argocd-appsets-*"`) - # @default -- `""` (default is only the ns where the controller is installed) - ## For more information: https://argo-cd.readthedocs.io/en/stable/operator-manual/applicationset/Appset-Any-Namespace/ - applicationsetcontroller.namespaces: "" - - # -- Enables [Applications in any namespace] - ## List of additional namespaces where applications may be created in and reconciled from. - ## The namespace where Argo CD is installed to will always be allowed. - ## Set comma-separated list. (e.g. app-team-one, app-team-two) - application.namespaces: "" - - # -- JQ Path expression timeout - ## By default, the evaluation of a JQPathExpression is limited to one second. - ## If you encounter a "JQ patch execution timed out" error message due to a complex JQPathExpression - ## that requires more time to evaluate, you can extend the timeout period. - controller.ignore.normalizer.jq.timeout: "1s" + # You can customize parameters by adding parameters here. + # (e.g.) + # otlp.address: '' # Argo CD RBAC policy configuration ## Ref: https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/rbac.md @@ -831,6 +798,31 @@ argocd: ## Has higher precedence over `controller.pdb.minAvailable` maxUnavailable: "" + ## Application controller Vertical Pod Autoscaler + ## Ref: https://kubernetes.io/docs/concepts/workloads/autoscaling/#scaling-workloads-vertically/ + vpa: + # -- Deploy a [VerticalPodAutoscaler](https://kubernetes.io/docs/concepts/workloads/autoscaling/#scaling-workloads-vertically/) for the application controller + enabled: false + # -- Labels to be added to application controller vpa + labels: {} + # -- Annotations to be added to application controller vpa + annotations: {} + # -- One of the VPA operation modes + ## Ref: https://kubernetes.io/docs/concepts/workloads/autoscaling/#scaling-workloads-vertically + ## Note: Recreate update mode requires more than one replica unless the min-replicas VPA controller flag is overridden + updateMode: Initial + # -- Controls how VPA computes the recommended resources for application controller container + ## Ref: https://github.com/kubernetes/autoscaler/blob/master/vertical-pod-autoscaler/examples/hamster.yaml + containerPolicy: {} + # controlledResources: ["cpu", "memory"] + # minAllowed: + # cpu: 250m + # memory: 256Mi + # maxAllowed: + # cpu: 1 + # memory: 1Gi + + ## Application controller image image: # -- Repository to use for the application controller @@ -906,6 +898,9 @@ argocd: # -- Annotations for the application controller Deployment deploymentAnnotations: {} + # -- Labels for the application controller Deployment + deploymentLabels: {} + # -- Annotations to be added to application controller pods podAnnotations: {} @@ -1101,6 +1096,17 @@ argocd: # -- List of custom rules for the application controller's ClusterRole resource rules: [] + ## Enable this and set the rules: to whatever custom rules you want for the Role resource. + ## Defaults to off + # -- List of custom rules for the application controller's Role resource + roleRules: [] + + # Default application controller's network policy + networkPolicy: + # -- Default network policy rules used by application controller + # @default -- `false` (defaults to global.networkPolicy.create) + create: false + ## Dex dex: # -- Enable dex @@ -1172,7 +1178,7 @@ argocd: # -- Dex image repository repository: ghcr.io/dexidp/dex # -- Dex image tag - tag: v2.42.1 + tag: v2.44.0 # -- Dex imagePullPolicy # @default -- `""` (defaults to global.image.imagePullPolicy) imagePullPolicy: "" @@ -1254,6 +1260,9 @@ argocd: # -- Annotations to be added to the Dex server Deployment deploymentAnnotations: {} + # -- Labels for the Dex server Deployment + deploymentLabels: {} + # -- Annotations to be added to the Dex server pods podAnnotations: {} @@ -1397,6 +1406,12 @@ argocd: # maxSurge: 25% # maxUnavailable: 25% + # Default Dex server's network policy + networkPolicy: + # -- Default network policy rules used by Dex server + # @default -- `false` (defaults to global.networkPolicy.create) + create: false + # DEPRECATED - Use configs.params to override # -- Dex log format. Either `text` or `json` # @default -- `""` (defaults to global.logging.format) @@ -1435,10 +1450,10 @@ argocd: ## Redis image image: # -- Redis repository - repository: public.ecr.aws/docker/library/redis + repository: ecr-public.aws.com/docker/library/redis # -- Redis tag ## Do not upgrade to >= 7.4.0, otherwise you are no longer using an open source version of Redis - tag: 7.2.8-alpine + tag: 8.2.2-alpine # -- Redis image pull policy # @default -- `""` (defaults to global.image.imagePullPolicy) imagePullPolicy: "" @@ -1454,7 +1469,7 @@ argocd: # -- Repository to use for the redis-exporter repository: ghcr.io/oliver006/redis_exporter # -- Tag to use for the redis-exporter - tag: v1.71.0 + tag: v1.80.1 # -- Image pull policy for the redis-exporter # @default -- `""` (defaults to global.image.imagePullPolicy) imagePullPolicy: "" @@ -1575,6 +1590,9 @@ argocd: # -- Annotations to be added to the Redis server Deployment deploymentAnnotations: {} + # -- Labels for the Redis server Deployment + deploymentLabels: {} + # -- Annotations to be added to the Redis server pods podAnnotations: {} @@ -1714,6 +1732,12 @@ argocd: # -- Prometheus ServiceMonitor annotations annotations: {} + # Default redis's network policy + networkPolicy: + # -- Default network policy rules used by redis + # @default -- `false` (defaults to global.networkPolicy.create) + create: false + ## Redis-HA subchart replaces custom redis deployment when `redis-ha.enabled=true` # Ref: https://github.com/DandyDeveloper/charts/blob/master/charts/redis-ha/values.yaml redis-ha: @@ -1722,10 +1746,10 @@ argocd: ## Redis image image: # -- Redis repository - repository: public.ecr.aws/docker/library/redis + repository: ecr-public.aws.com/docker/library/redis # -- Redis tag ## Do not upgrade to >= 7.4.0, otherwise you are no longer using an open source version of Redis - tag: 7.2.8-alpine + tag: 8.2.2-alpine ## Prometheus redis-exporter sidecar exporter: # -- Enable Prometheus redis-exporter sidecar @@ -1733,7 +1757,7 @@ argocd: # -- Repository to use for the redis-exporter image: ghcr.io/oliver006/redis_exporter # -- Tag to use for the redis-exporter - tag: v1.69.0 + tag: v1.75.0 persistentVolume: # -- Configures persistence on Redis nodes enabled: false @@ -1754,6 +1778,9 @@ argocd: # -- Custom labels for the haproxy pod. This is relevant for Argo CD CLI. labels: app.kubernetes.io/name: argocd-redis-ha-haproxy + image: + # -- HAProxy Image Repository + repository: ecr-public.aws.com/docker/library/haproxy metrics: # -- HAProxy enable prometheus metric scraping enabled: true @@ -1818,8 +1845,8 @@ argocd: password: "" # -- External Redis server port port: 6379 - # -- The name of an existing secret with Redis (must contain key `redis-password`) and Sentinel credentials. - # When it's set, the `externalRedis.password` parameter is ignored + # -- The name of an existing secret with Redis (must contain key `redis-password`. And should contain `redis-username` if username is not `default`) and Sentinel credentials. + # When it's set, the `externalRedis.username` and `externalRedis.password` parameters are ignored existingSecret: "" # -- External Redis Secret annotations secretAnnotations: {} @@ -1841,10 +1868,17 @@ argocd: # @default -- `""` (defaults to global.image.imagePullPolicy) imagePullPolicy: "" # IfNotPresent + # -- Additional command line arguments for the Redis secret-init Job + extraArgs: [] + # -- Secrets with credentials to pull images from a private registry # @default -- `[]` (defaults to global.imagePullSecrets) imagePullSecrets: [] + # -- Runtime class name for the Redis secret-init Job + # @default -- `""` (defaults to global.runtimeClassName) + runtimeClassName: "" + # -- Annotations to be added to the Redis secret-init Job jobAnnotations: {} @@ -2107,6 +2141,9 @@ argocd: # -- Annotations to be added to server Deployment deploymentAnnotations: {} + # -- Labels for the server Deployment + deploymentLabels: {} + # -- Annotations to be added to server pods podAnnotations: {} @@ -2454,6 +2491,9 @@ argocd: ## Instance mode needs type NodePort, IP mode needs type ClusterIP ## Ref: https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.2/how-it-works/#ingress-traffic serviceType: NodePort + # -- Annotations for the AWS ALB gRPC service + ## Allows adding custom annotations to the gRPC service for integrations like DataDog, Prometheus, etc. + serviceAnnotations: {} # Google specific options for Google Application Load Balancer # Applies only when `server.ingress.controller` is set to `gke` @@ -2566,6 +2606,106 @@ argocd: # -- Termination policy of Openshift Route termination_policy: None + # Gateway API HTTPRoute configuration + # NOTE: Gateway API support is in EXPERIMENTAL status + # Support depends on your Gateway controller implementation + # Some controllers may require additional configuration (e.g., BackendTLSPolicy for HTTPS backends) + # Refer to https://gateway-api.sigs.k8s.io/implementations/ for controller-specific details + httproute: + # -- Enable HTTPRoute resource for Argo CD server (Gateway API) + enabled: false + # -- Additional HTTPRoute labels + labels: {} + # -- Additional HTTPRoute annotations + annotations: {} + # -- Gateway API parentRefs for the HTTPRoute + ## Must reference an existing Gateway + # @default -- `[]` (See [values.yaml]) + parentRefs: [] + # - name: example-gateway + # namespace: example-gateway-namespace + # sectionName: https + # -- List of hostnames for the HTTPRoute + # @default -- `[]` (See [values.yaml]) + hostnames: [] + # - argocd.example.com + # -- HTTPRoute rules configuration + # @default -- `[]` (See [values.yaml]) + rules: + - matches: + - path: + type: PathPrefix + value: / + # filters: [] + # - type: RequestHeaderModifier + # requestHeaderModifier: + # add: + # - name: X-Custom-Header + # value: custom-value + + # Gateway API GRPCRoute configuration + # NOTE: Gateway API support is in EXPERIMENTAL status + # Support depends on your Gateway controller implementation + # Refer to https://gateway-api.sigs.k8s.io/implementations/ for controller-specific details + grpcroute: + # -- Enable GRPCRoute resource for Argo CD server (Gateway API) + enabled: false + # -- Additional GRPCRoute labels + labels: {} + # -- Additional GRPCRoute annotations + annotations: {} + # -- Gateway API parentRefs for the GRPCRoute + ## Must reference an existing Gateway + # @default -- `[]` (See [values.yaml]) + parentRefs: [] + # - name: example-gateway + # namespace: example-gateway-namespace + # sectionName: grpc + # -- List of hostnames for the GRPCRoute + # @default -- `[]` (See [values.yaml]) + hostnames: [] + # - grpc.argocd.example.com + # -- GRPCRoute rules configuration + # @default -- `[]` (See [values.yaml]) + rules: + - matches: + - method: + type: Exact + # filters: [] + # - type: RequestHeaderModifier + # requestHeaderModifier: + # add: + # - name: X-Custom-Header + # value: custom-value + + # Gateway API BackendTLSPolicy configuration + # NOTE: BackendTLSPolicy is in EXPERIMENTAL status (v1alpha3) + # Required for HTTPS backends when using Gateway API + # Not all Gateway controllers support this resource (e.g., Cilium does not support it yet) + backendTLSPolicy: + # -- Enable BackendTLSPolicy resource for Argo CD server (Gateway API) + enabled: false + # -- Additional BackendTLSPolicy labels + labels: {} + # -- Additional BackendTLSPolicy annotations + annotations: {} + # -- Target references for the BackendTLSPolicy + # @default -- `[]` (See [values.yaml]) + targetRefs: [] + # - group: "" + # kind: Service + # name: argocd-server + # sectionName: https + # -- TLS validation configuration + # @default -- `{}` (See [values.yaml]) + validation: {} + # hostname: argocd-server.argocd.svc.cluster.local + # caCertificateRefs: + # - name: example-ca-cert + # group: "" + # kind: ConfigMap + # wellKnownCACertificates: System + ## Enable this and set the rules: to whatever custom rules you want for the Cluster Role resource. ## Defaults to off clusterRoleRules: @@ -2574,6 +2714,12 @@ argocd: # -- List of custom rules for the server's ClusterRole resource rules: [] + # Default ArgoCD Server's network policy + networkPolicy: + # -- Default network policy rules used by ArgoCD Server + # @default -- `false` (defaults to global.networkPolicy.create) + create: false + ## Repo Server repoServer: # -- Repo server name @@ -2713,6 +2859,16 @@ argocd: # -- Init containers to add to the repo server pods initContainers: [] + copyutil: + # -- Resource limits and requests for the repo server copyutil initContainer + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 50m + # memory: 64Mi + # -- Additional volumeMounts to the repo server main container volumeMounts: [] @@ -2755,6 +2911,9 @@ argocd: # -- Annotations to be added to repo server Deployment deploymentAnnotations: {} + # -- Labels for the repo server Deployment + deploymentLabels: {} + # -- Annotations to be added to repo server pods podAnnotations: {} @@ -2885,6 +3044,8 @@ argocd: port: 8081 # -- Repo server service port name portName: tcp-repo-server + # -- Traffic distribution preference for the repo server service. If the field is not set, the implementation will apply its default routing strategy. + trafficDistribution: "" ## Repo server metrics service configuration metrics: @@ -2968,6 +3129,12 @@ argocd: # - list # - watch + # Default repo server's network policy + networkPolicy: + # -- Default network policy rules used by repo server + # @default -- `false` (defaults to global.networkPolicy.create) + create: false + ## ApplicationSet controller applicationSet: # -- ApplicationSet controller name string @@ -3125,6 +3292,9 @@ argocd: # -- Annotations to be added to ApplicationSet controller Deployment deploymentAnnotations: {} + # -- Labels for the ApplicationSet controller Deployment + deploymentLabels: {} + # -- Annotations for the ApplicationSet controller pods podAnnotations: {} @@ -3337,6 +3507,13 @@ argocd: # - argocd-applicationset.example.com # -- Enable ApplicationSet in any namespace feature allowAnyNamespace: false + + # Default ApplicationSet controller's network policy + networkPolicy: + # -- Default network policy rules used by ApplicationSet controller + # @default -- `false` (defaults to global.networkPolicy.create) + create: false + ## Notifications controller notifications: # -- Enable notifications controller @@ -3507,6 +3684,9 @@ argocd: # -- Annotations to be applied to the notifications controller Deployment deploymentAnnotations: {} + # -- Labels for the notifications controller Deployment + deploymentLabels: {} + # -- Annotations to be applied to the notifications controller Pods podAnnotations: {} @@ -3903,6 +4083,12 @@ argocd: # defaultTriggers: | # - on-sync-status-unknown + # Default notifications controller's network policy + networkPolicy: + # -- Default network policy rules used by notifications controller + # @default -- `false` (defaults to global.networkPolicy.create) + create: false + commitServer: # -- Enable commit server enabled: false @@ -3971,6 +4157,10 @@ argocd: annotations: {} # -- commit server service labels labels: {} + # -- commit server service port + port: 8086 + # -- commit server service port name + portName: server # -- Automount API credentials for the Service Account into the pod. automountServiceAccountToken: false @@ -3990,6 +4180,9 @@ argocd: # -- Annotations to be added to commit server Deployment deploymentAnnotations: {} + # -- Labels for the commit server Deployment + deploymentLabels: {} + # -- Annotations for the commit server pods podAnnotations: {} @@ -4082,3 +4275,9 @@ argocd: # -- Priority class for the commit server pods # @default -- `""` (defaults to global.priorityClassName) priorityClassName: "" + + # Default commit server's network policy + networkPolicy: + # -- Default network policy rules used by commit server + # @default -- `false` (defaults to global.networkPolicy.create) + create: false diff --git a/charts/cert-manager/Chart.lock b/charts/cert-manager/Chart.lock index ba361324..14a941d6 100644 --- a/charts/cert-manager/Chart.lock +++ b/charts/cert-manager/Chart.lock @@ -1,12 +1,12 @@ dependencies: - name: cert-manager repository: https://charts.jetstack.io - version: v1.17.2 + version: v1.19.1 - name: gcp-workload-identity repository: https://edixos.github.io/ekp-helm version: 0.1.1 - name: gcp-iam-policy-members repository: https://edixos.github.io/ekp-helm version: 0.1.2 -digest: sha256:332d9476ee0ae270e6ab49c0a8474c4a9ded472b0198920ab2f457119509c2f8 -generated: "2025-05-07T10:23:12.154607043Z" +digest: sha256:da99755d669db105cefe55543db5618f1ef1adaccf4009e3f030bcba665b87f6 +generated: "2025-11-26T10:26:15.933751003Z" diff --git a/charts/cert-manager/Chart.yaml b/charts/cert-manager/Chart.yaml index e45f8d04..8e2ddc90 100644 --- a/charts/cert-manager/Chart.yaml +++ b/charts/cert-manager/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: cert-manager description: A Helm chart for cert-manager type: application -version: 0.1.3 +version: 0.1.4 appVersion: "1.17.1" maintainers: - name: wiemaouadi @@ -13,7 +13,7 @@ maintainers: url: https://github.com/smileisak dependencies: - name: cert-manager - version: "v1.17.2" + version: "v1.19.1" repository: "https://charts.jetstack.io" alias: certmanager - name: gcp-workload-identity diff --git a/charts/cert-manager/README.md b/charts/cert-manager/README.md index 3e99aee1..492efbc3 100644 --- a/charts/cert-manager/README.md +++ b/charts/cert-manager/README.md @@ -1,6 +1,6 @@ # cert-manager -![Version: 0.1.3](https://img.shields.io/badge/Version-0.1.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.17.1](https://img.shields.io/badge/AppVersion-1.17.1-informational?style=flat-square) +![Version: 0.1.4](https://img.shields.io/badge/Version-0.1.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.17.1](https://img.shields.io/badge/AppVersion-1.17.1-informational?style=flat-square) ## Prerequisites @@ -11,7 +11,7 @@ | Repository | Name | Version | |------------|------|---------| -| https://charts.jetstack.io | certmanager(cert-manager) | v1.17.2 | +| https://charts.jetstack.io | certmanager(cert-manager) | v1.19.1 | | https://edixos.github.io/ekp-helm | iamPolicyMembers(gcp-iam-policy-members) | 0.1.2 | | https://edixos.github.io/ekp-helm | workloadIdentity(gcp-workload-identity) | 0.1.1 | @@ -84,6 +84,7 @@ A Helm chart for cert-manager | certmanager.global.imagePullSecrets | list | `[]` | | | certmanager.global.leaderElection.namespace | string | `"kube-system"` | | | certmanager.global.logLevel | int | `2` | | +| certmanager.global.nodeSelector | object | `{}` | | | certmanager.global.podSecurityPolicy.enabled | bool | `false` | | | certmanager.global.podSecurityPolicy.useAppArmor | bool | `true` | | | certmanager.global.priorityClassName | string | `""` | | @@ -124,7 +125,7 @@ A Helm chart for cert-manager | certmanager.prometheus.servicemonitor.path | string | `"/metrics"` | | | certmanager.prometheus.servicemonitor.prometheusInstance | string | `"default"` | | | certmanager.prometheus.servicemonitor.scrapeTimeout | string | `"30s"` | | -| certmanager.prometheus.servicemonitor.targetPort | int | `9402` | | +| certmanager.prometheus.servicemonitor.targetPort | string | `"http-metrics"` | | | certmanager.replicaCount | int | `1` | | | certmanager.resources | object | `{}` | | | certmanager.securityContext.runAsNonRoot | bool | `true` | | @@ -196,8 +197,10 @@ A Helm chart for cert-manager | certmanager.webhook.networkPolicy.egress[0].ports[4].port | int | `6443` | | | certmanager.webhook.networkPolicy.egress[0].ports[4].protocol | string | `"TCP"` | | | certmanager.webhook.networkPolicy.egress[0].to[0].ipBlock.cidr | string | `"0.0.0.0/0"` | | +| certmanager.webhook.networkPolicy.egress[0].to[1].ipBlock.cidr | string | `"::/0"` | | | certmanager.webhook.networkPolicy.enabled | bool | `false` | | | certmanager.webhook.networkPolicy.ingress[0].from[0].ipBlock.cidr | string | `"0.0.0.0/0"` | | +| certmanager.webhook.networkPolicy.ingress[0].from[1].ipBlock.cidr | string | `"::/0"` | | | certmanager.webhook.nodeSelector."kubernetes.io/os" | string | `"linux"` | | | certmanager.webhook.podDisruptionBudget.enabled | bool | `false` | | | certmanager.webhook.podLabels | object | `{}` | | @@ -273,7 +276,7 @@ spec: source: repoURL: "https://edixos.github.io/ekp-helm" - targetRevision: "0.1.3" + targetRevision: "0.1.4" chart: cert-manager path: '' helm: diff --git a/charts/cert-manager/charts/cert-manager-v1.17.2.tgz b/charts/cert-manager/charts/cert-manager-v1.17.2.tgz deleted file mode 100644 index 770113d1..00000000 Binary files a/charts/cert-manager/charts/cert-manager-v1.17.2.tgz and /dev/null differ diff --git a/charts/cert-manager/charts/cert-manager-v1.19.1.tgz b/charts/cert-manager/charts/cert-manager-v1.19.1.tgz new file mode 100644 index 00000000..f452a3f5 Binary files /dev/null and b/charts/cert-manager/charts/cert-manager-v1.19.1.tgz differ diff --git a/charts/cert-manager/values.yaml b/charts/cert-manager/values.yaml index 8d554e22..b5748970 100644 --- a/charts/cert-manager/values.yaml +++ b/charts/cert-manager/values.yaml @@ -29,6 +29,16 @@ certmanager: # - name: "image-pull-secret" imagePullSecrets: [] + # Global node selector + # + # The nodeSelector on Pods tells Kubernetes to schedule Pods on the nodes with + # matching labels. + # For more information, see [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/). + # + # If a component-specific nodeSelector is also set, it will take precedence. + # +docs:property + nodeSelector: {} + # Labels to apply to all resources. # Please note that this does not add labels to the resources created dynamically by the controllers. # For these resources, you have to add the labels in the template in the cert-manager custom resource: @@ -45,6 +55,19 @@ certmanager: # The optional priority class to be used for the cert-manager pods. priorityClassName: "" + # Set all pods to run in a user namespace without host access. + # Experimental: may be removed once the Kubernetes User Namespaces feature is GA. + # + # Requirements: + # - Kubernetes ≥ 1.33, or + # - Kubernetes 1.27–1.32 with UserNamespacesSupport feature gate enabled. + # + # Set to false to run pods in a user namespace without host access. + # + # See [limitations](https://kubernetes.io/docs/concepts/workloads/pods/user-namespaces/#limitations) for details. + # +docs:property + # hostUsers: false + rbac: # Create required ClusterRoles and ClusterRoleBindings for cert-manager. create: true @@ -134,14 +157,14 @@ certmanager: enabled: false # This configures the minimum available pods for disruptions. It can either be set to - # an integer (e.g. 1) or a percentage value (e.g. 25%). + # an integer (e.g., 1) or a percentage value (e.g., 25%). # It cannot be used if `maxUnavailable` is set. # +docs:property # +docs:type=unknown # minAvailable: 1 # This configures the maximum unavailable pods for disruptions. It can either be set to - # an integer (e.g. 1) or a percentage value (e.g. 25%). + # an integer (e.g., 1) or a percentage value (e.g., 25%). # it cannot be used if `minAvailable` is set. # +docs:property # +docs:type=unknown @@ -193,7 +216,7 @@ certmanager: # Override the "cert-manager.name" value, which is used to annotate some of # the resources that are created by this Chart (using "app.kubernetes.io/name"). # NOTE: There are some inconsistencies in the Helm chart when it comes to - # these annotations (some resources use eg. "cainjector.name" which resolves + # these annotations (some resources use, e.g., "cainjector.name" which resolves # to the value "cainjector"). # +docs:property # nameOverride: "my-cert-manager" @@ -248,10 +271,10 @@ certmanager: # kubernetesAPIBurst: 9000 # numberOfConcurrentWorkers: 200 # enableGatewayAPI: true - # # Feature gates as of v1.17.0. Listed with their default values. + # # Feature gates as of v1.18.1. Listed with their default values. # # See https://cert-manager.io/docs/cli/controller/ # featureGates: - # AdditionalCertificateOutputFormats: true # BETA - default=true + # AdditionalCertificateOutputFormats: true # GA - default=true # AllAlpha: false # ALPHA - default=false # AllBeta: false # BETA - default=false # ExperimentalCertificateSigningRequestControllers: false # ALPHA - default=false @@ -263,8 +286,10 @@ certmanager: # ServerSideApply: false # ALPHA - default=false # StableCertificateRequestName: true # BETA - default=true # UseCertificateRequestBasicConstraints: false # ALPHA - default=false - # UseDomainQualifiedFinalizer: true # BETA - default=false + # UseDomainQualifiedFinalizer: true # GA - default=true # ValidateCAA: false # ALPHA - default=false + # DefaultPrivateKeyRotationPolicyAlways: true # BETA - default=true + # ACMEHTTP01IngressPathTypeExact: true # BETA - default=true # # Configure the metrics server for TLS # # See https://cert-manager.io/docs/devops-tips/prometheus-metrics/#tls # metricsTLSConfig: @@ -295,7 +320,7 @@ certmanager: # referencing these signer names will be auto-approved by cert-manager. Defaults to just # approving the cert-manager.io Issuer and ClusterIssuer issuers. When set to an empty # array, ALL issuers will be auto-approved by cert-manager. To disable the auto-approval, - # because eg. you are using approver-policy, you can enable 'disableAutoApproval'. + # because, e.g., you are using approver-policy, you can enable 'disableAutoApproval'. # ref: https://cert-manager.io/docs/concepts/certificaterequest/#approval # +docs:property approveSignerNames: @@ -451,7 +476,6 @@ certmanager: # +docs:property # no_proxy: 127.0.0.1,localhost - # A Kubernetes Affinity, if required. For more information, see [Affinity v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#affinity-v1-core). # # For example: @@ -519,7 +543,7 @@ certmanager: # ServiceMonitor resource. # Otherwise, 'prometheus.io' annotations are added to the cert-manager and # cert-manager-webhook Deployments. - # Note that you can not enable both PodMonitor and ServiceMonitor as they are + # Note that you cannot enable both PodMonitor and ServiceMonitor as they are # mutually exclusive. Enabling both will result in an error. enabled: true @@ -539,7 +563,8 @@ certmanager: # The target port to set on the ServiceMonitor. This must match the port that the # cert-manager controller is listening on for metrics. - targetPort: 9402 + # +docs:type=string,integer + targetPort: http-metrics # The path to scrape for metrics. path: /metrics @@ -573,7 +598,7 @@ certmanager: # +docs:property endpointAdditionalProperties: {} - # Note that you can not enable both PodMonitor and ServiceMonitor as they are mutually exclusive. Enabling both will result in an error. + # Note that you cannot enable both PodMonitor and ServiceMonitor as they are mutually exclusive. Enabling both will result in an error. podmonitor: # Create a PodMonitor to add cert-manager to Prometheus. enabled: false @@ -723,14 +748,14 @@ certmanager: enabled: false # This property configures the minimum available pods for disruptions. Can either be set to - # an integer (e.g. 1) or a percentage value (e.g. 25%). + # an integer (e.g., 1) or a percentage value (e.g., 25%). # It cannot be used if `maxUnavailable` is set. # +docs:property # +docs:type=unknown # minAvailable: 1 # This property configures the maximum unavailable pods for disruptions. Can either be set to - # an integer (e.g. 1) or a percentage value (e.g. 25%). + # an integer (e.g., 1) or a percentage value (e.g., 25%). # It cannot be used if `minAvailable` is set. # +docs:property # +docs:type=unknown @@ -976,6 +1001,8 @@ certmanager: - from: - ipBlock: cidr: 0.0.0.0/0 + - ipBlock: + cidr: "::/0" # Egress rule for the webhook network policy. By default, it allows all # outbound traffic to ports 80 and 443, as well as DNS ports. @@ -997,6 +1024,8 @@ certmanager: to: - ipBlock: cidr: 0.0.0.0/0 + - ipBlock: + cidr: "::/0" # Additional volumes to add to the cert-manager controller pod. volumes: [] @@ -1090,14 +1119,14 @@ certmanager: enabled: false # `minAvailable` configures the minimum available pods for disruptions. It can either be set to - # an integer (e.g. 1) or a percentage value (e.g. 25%). + # an integer (e.g., 1) or a percentage value (e.g., 25%). # Cannot be used if `maxUnavailable` is set. # +docs:property # +docs:type=unknown # minAvailable: 1 # `maxUnavailable` configures the maximum unavailable pods for disruptions. It can either be set to - # an integer (e.g. 1) or a percentage value (e.g. 25%). + # an integer (e.g., 1) or a percentage value (e.g., 25%). # Cannot be used if `minAvailable` is set. # +docs:property # +docs:type=unknown diff --git a/charts/dex/Chart.lock b/charts/dex/Chart.lock index 09b5cdf8..c023a7aa 100644 --- a/charts/dex/Chart.lock +++ b/charts/dex/Chart.lock @@ -1,12 +1,12 @@ dependencies: - name: dex repository: https://charts.dexidp.io - version: 0.23.0 + version: 0.24.0 - name: gcp-workload-identity repository: https://edixos.github.io/ekp-helm version: 0.1.1 - name: gcp-iam-policy-members repository: https://edixos.github.io/ekp-helm version: 0.1.2 -digest: sha256:33de3c86abf097766978c659379862374f824ee040c17dd22afac6b98bf07c5c -generated: "2025-04-28T15:46:20.243117+02:00" +digest: sha256:ee18d7f42c735677da0e9aba79f1050aa71aad98c79a067c20f2029ee077c5a8 +generated: "2025-11-26T10:27:29.468809248Z" diff --git a/charts/dex/Chart.yaml b/charts/dex/Chart.yaml index b13e0a46..18ab1619 100644 --- a/charts/dex/Chart.yaml +++ b/charts/dex/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: dex description: A Helm chart for Dex - OpenID Connect Identity (OIDC) and OAuth 2.0 Provider with Pluggable Connectors type: application -version: 0.1.4 +version: 0.1.5 appVersion: "2.42.0" maintainers: - name: wiemaouadi @@ -13,7 +13,7 @@ maintainers: url: https://github.com/smileisak dependencies: - name: dex - version: 0.23.0 + version: 0.24.0 repository: https://charts.dexidp.io alias: dex - name: gcp-workload-identity diff --git a/charts/dex/README.md b/charts/dex/README.md index 4385a656..64e180e6 100644 --- a/charts/dex/README.md +++ b/charts/dex/README.md @@ -1,6 +1,6 @@ # dex -![Version: 0.1.4](https://img.shields.io/badge/Version-0.1.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.42.0](https://img.shields.io/badge/AppVersion-2.42.0-informational?style=flat-square) +![Version: 0.1.5](https://img.shields.io/badge/Version-0.1.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.42.0](https://img.shields.io/badge/AppVersion-2.42.0-informational?style=flat-square) ## Prerequisites @@ -11,7 +11,7 @@ | Repository | Name | Version | |------------|------|---------| -| https://charts.dexidp.io | dex(dex) | 0.23.0 | +| https://charts.dexidp.io | dex(dex) | 0.24.0 | | https://edixos.github.io/ekp-helm | iamPolicyMembers(gcp-iam-policy-members) | 0.1.2 | | https://edixos.github.io/ekp-helm | workloadIdentity(gcp-workload-identity) | 0.1.1 | @@ -41,10 +41,11 @@ A Helm chart for Dex - OpenID Connect Identity (OIDC) and OAuth 2.0 Provider wit | dex.env | object | `{}` | Additional environment variables passed directly to containers. See the [API reference](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#environment-variables) for details. | | dex.envFrom | list | `[]` | Additional environment variables mounted from [secrets](https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets-as-environment-variables) or [config maps](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#configure-all-key-value-pairs-in-a-configmap-as-container-environment-variables). See the [API reference](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#environment-variables) for details. | | dex.envVars | list | `[]` | Similar to env but with support for all possible configurations. See the [API reference](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#environment-variables) for details. | -| dex.fullnameOverride | string | `"test"` | A name to substitute for the full names of resources. | +| dex.fullnameOverride | string | `""` | A name to substitute for the full names of resources. | | dex.grpc.enabled | bool | `false` | Enable the gRPC endpoint. Read more in the [documentation](https://dexidp.io/docs/api/). | | dex.hostAliases | list | `[]` | A list of hosts and IPs that will be injected into the pod's hosts file if specified. See the [API reference](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#hostname-and-name-resolution) | | dex.https.enabled | bool | `false` | Enable the HTTPS endpoint. | +| dex.image.digest | string | `""` | When digest is set to a non-empty value, images will be pulled by digest (regardless of tag value). | | dex.image.pullPolicy | string | `"IfNotPresent"` | [Image pull policy](https://kubernetes.io/docs/concepts/containers/images/#updating-images) for updating already existing images on a node. | | dex.image.repository | string | `"ghcr.io/dexidp/dex"` | Name of the image repository to pull the container image from. | | dex.image.tag | string | `""` | Image tag override for the default value (chart appVersion). | @@ -145,7 +146,7 @@ spec: source: repoURL: "https://edixos.github.io/ekp-helm" - targetRevision: "0.1.4" + targetRevision: "0.1.5" chart: dex path: '' diff --git a/charts/dex/charts/dex-0.23.0.tgz b/charts/dex/charts/dex-0.23.0.tgz deleted file mode 100644 index 881f77dc..00000000 Binary files a/charts/dex/charts/dex-0.23.0.tgz and /dev/null differ diff --git a/charts/dex/charts/dex-0.24.0.tgz b/charts/dex/charts/dex-0.24.0.tgz new file mode 100644 index 00000000..5f58895c Binary files /dev/null and b/charts/dex/charts/dex-0.24.0.tgz differ diff --git a/charts/dex/values.yaml b/charts/dex/values.yaml index 8f5f7ce7..27c1ab01 100644 --- a/charts/dex/values.yaml +++ b/charts/dex/values.yaml @@ -19,6 +19,10 @@ prometheus: # This is a YAML-formatted file. # Declare variables to be passed into your templates. dex: + # Default values for dex. + # This is a YAML-formatted file. + # Declare variables to be passed into your templates. + # -- Number of replicas (pods) to launch. replicaCount: 1 @@ -36,6 +40,9 @@ dex: # -- Image tag override for the default value (chart appVersion). tag: "" + # -- When digest is set to a non-empty value, images will be pulled by digest (regardless of tag value). + digest: "" + # -- Reference to one or more secrets to be used when [pulling images](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#create-a-pod-that-uses-your-secret) (from private registries). imagePullSecrets: [] @@ -46,7 +53,7 @@ dex: nameOverride: "" # -- A name to substitute for the full names of resources. - fullnameOverride: "test" + fullnameOverride: "" # -- A list of hosts and IPs that will be injected into the pod's hosts file if specified. # See the [API reference](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#hostname-and-name-resolution) @@ -357,7 +364,6 @@ dex: # ports: # - port: 636 # protocol: TCP - tags: # -- Enables Config Connector features configConnector: false diff --git a/charts/eso/Chart.lock b/charts/eso/Chart.lock index 5be1b4bf..cad3fe0c 100644 --- a/charts/eso/Chart.lock +++ b/charts/eso/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: external-secrets repository: https://charts.external-secrets.io - version: 0.16.2 -digest: sha256:94cbf896c19437687c2804fc14c7937cd9b12f6d70cc32b1a78aa323777580cb -generated: "2025-05-14T10:23:28.800416977Z" + version: 1.1.0 +digest: sha256:e6de466da84336d35dfff1335d40d10c1d143b562abc1b8877ea41eb87df4575 +generated: "2025-11-26T10:25:50.880872793Z" diff --git a/charts/eso/Chart.yaml b/charts/eso/Chart.yaml index 31b9c963..df3cef7b 100644 --- a/charts/eso/Chart.yaml +++ b/charts/eso/Chart.yaml @@ -2,11 +2,11 @@ apiVersion: v2 name: eso description: A Helm chart ESO for Kubernetes type: application -version: 0.1.4 +version: 0.1.5 appVersion: "0.14.2" dependencies: - name: external-secrets - version: 0.16.2 + version: 1.1.0 repository: https://charts.external-secrets.io alias: eso maintainers: diff --git a/charts/eso/README.md b/charts/eso/README.md index fb6aba26..05f4448d 100644 --- a/charts/eso/README.md +++ b/charts/eso/README.md @@ -1,6 +1,6 @@ # eso -![Version: 0.1.4](https://img.shields.io/badge/Version-0.1.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.14.2](https://img.shields.io/badge/AppVersion-0.14.2-informational?style=flat-square) +![Version: 0.1.5](https://img.shields.io/badge/Version-0.1.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.14.2](https://img.shields.io/badge/AppVersion-0.14.2-informational?style=flat-square) ## Prerequisites @@ -11,7 +11,7 @@ | Repository | Name | Version | |------------|------|---------| -| https://charts.external-secrets.io | eso(external-secrets) | 0.16.2 | +| https://charts.external-secrets.io | eso(external-secrets) | 1.1.0 | ## Maintainers @@ -30,29 +30,29 @@ A Helm chart ESO for Kubernetes |-----|------|---------|-------------| | eso.affinity | object | `{}` | | | eso.bitwarden-sdk-server.enabled | bool | `false` | | +| eso.bitwarden-sdk-server.namespaceOverride | string | `""` | | | eso.certController.affinity | object | `{}` | | | eso.certController.create | bool | `true` | Specifies whether a certificate controller deployment be created. | | eso.certController.deploymentAnnotations | object | `{}` | Annotations to add to Deployment | | eso.certController.extraArgs | object | `{}` | | | eso.certController.extraEnv | list | `[]` | | +| eso.certController.extraInitContainers | list | `[]` | | | eso.certController.extraVolumeMounts | list | `[]` | | | eso.certController.extraVolumes | list | `[]` | | -| eso.certController.fullnameOverride | string | `""` | | | eso.certController.hostNetwork | bool | `false` | Run the certController on the host network | | eso.certController.image.flavour | string | `""` | | | eso.certController.image.pullPolicy | string | `"IfNotPresent"` | | -| eso.certController.image.repository | string | `"oci.external-secrets.io/external-secrets/external-secrets"` | | +| eso.certController.image.repository | string | `"ghcr.io/external-secrets/external-secrets"` | | | eso.certController.image.tag | string | `""` | | | eso.certController.imagePullSecrets | list | `[]` | | -| eso.certController.log | object | `{"level":"info","timeEncoding":"epoch"}` | Specifices Log Params to the Certificate Controller | +| eso.certController.log | object | `{"level":"info","timeEncoding":"epoch"}` | Specifies Log Params to the Certificate Controller | | eso.certController.metrics.listen.port | int | `8080` | | | eso.certController.metrics.service.annotations | object | `{}` | Additional service annotations | | eso.certController.metrics.service.enabled | bool | `false` | Enable if you use another monitoring tool than Prometheus to scrape the metrics | | eso.certController.metrics.service.port | int | `8080` | Metrics service port to scrape | -| eso.certController.nameOverride | string | `""` | | | eso.certController.nodeSelector | object | `{}` | | | eso.certController.podAnnotations | object | `{}` | Annotations to add to Pod | -| eso.certController.podDisruptionBudget | object | `{"enabled":false,"minAvailable":1}` | Pod disruption budget - for more details see https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ | +| eso.certController.podDisruptionBudget | object | `{"enabled":false,"minAvailable":1,"nameOverride":""}` | Pod disruption budget - for more details see https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ | | eso.certController.podLabels | object | `{}` | | | eso.certController.podSecurityContext.enabled | bool | `true` | | | eso.certController.priorityClassName | string | `""` | Pod priority class name. | @@ -75,6 +75,10 @@ A Helm chart ESO for Kubernetes | eso.certController.serviceAccount.create | bool | `true` | Specifies whether a service account should be created. | | eso.certController.serviceAccount.extraLabels | object | `{}` | Extra Labels to add to the service account. | | eso.certController.serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template. | +| eso.certController.startupProbe.enabled | bool | `false` | Enabled determines if the startup probe should be used or not. By default it's enabled | +| eso.certController.startupProbe.port | string | `""` | Port for startup probe. | +| eso.certController.startupProbe.useReadinessProbePort | bool | `true` | whether to use the readiness probe port for startup probe. | +| eso.certController.strategy | object | `{}` | Set deployment strategy | | eso.certController.tolerations | list | `[]` | | | eso.certController.topologySpreadConstraints | list | `[]` | | | eso.commonLabels | object | `{}` | Additional labels added to all helm chart resources. | @@ -91,14 +95,19 @@ A Helm chart ESO for Kubernetes | eso.deploymentAnnotations | object | `{}` | Annotations to add to Deployment | | eso.dnsConfig | object | `{}` | Specifies `dnsOptions` to deployment | | eso.dnsPolicy | string | `"ClusterFirst"` | Specifies `dnsPolicy` to deployment | +| eso.enableHTTP2 | bool | `false` | if true, HTTP2 will be enabled for the services created by all controllers, curently metrics and webhook. | | eso.extendedMetricLabels | bool | `false` | If true external secrets will use recommended kubernetes annotations as prometheus metric labels. | | eso.extraArgs | object | `{}` | | | eso.extraContainers | list | `[]` | | | eso.extraEnv | list | `[]` | | +| eso.extraInitContainers | list | `[]` | | | eso.extraObjects | list | `[]` | | | eso.extraVolumeMounts | list | `[]` | | | eso.extraVolumes | list | `[]` | | | eso.fullnameOverride | string | `""` | | +| eso.genericTargets | object | `{"enabled":false,"resources":[]}` | Enable support for generic targets (ConfigMaps, Custom Resources). Warning: Using generic target. Make sure access policies and encryption are properly configured. When enabled, this grants the controller permissions to create/update/delete ConfigMaps and optionally other resource types specified in generic.resources. | +| eso.genericTargets.enabled | bool | `false` | Enable generic target support | +| eso.genericTargets.resources | list | `[]` | List of additional resource types to grant permissions for. Each entry should specify apiGroup, resources, and verbs. Example: resources: - apiGroup: "argoproj.io" resources: ["applications"] verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] | | eso.global.affinity | object | `{}` | | | eso.global.compatibility.openshift.adaptSecurityContext | string | `"auto"` | Manages the securityContext properties to make them compatible with OpenShift. Possible values: auto - Apply configurations if it is detected that OpenShift is the target platform. force - Always apply configurations. disabled - No modification applied. | | eso.global.nodeSelector | object | `{}` | | @@ -106,18 +115,34 @@ A Helm chart ESO for Kubernetes | eso.global.topologySpreadConstraints | list | `[]` | | | eso.grafanaDashboard.annotations | object | `{}` | Annotations that ConfigMaps can have to get configured in Grafana, See: sidecar.dashboards.folderAnnotation for specifying the dashboard folder. https://github.com/grafana/helm-charts/tree/main/charts/grafana | | eso.grafanaDashboard.enabled | bool | `false` | If true creates a Grafana dashboard. | +| eso.grafanaDashboard.extraLabels | object | `{}` | Extra labels to add to the Grafana dashboard ConfigMap. | | eso.grafanaDashboard.sidecarLabel | string | `"grafana_dashboard"` | Label that ConfigMaps should have to be loaded as dashboards. | | eso.grafanaDashboard.sidecarLabelValue | string | `"1"` | Label value that ConfigMaps should have to be loaded as dashboards. | | eso.hostNetwork | bool | `false` | Run the controller on the host network | | eso.image.flavour | string | `""` | The flavour of tag you want to use There are different image flavours available, like distroless and ubi. Please see GitHub release notes for image tags for these flavors. By default, the distroless image is used. | | eso.image.pullPolicy | string | `"IfNotPresent"` | | -| eso.image.repository | string | `"oci.external-secrets.io/external-secrets/external-secrets"` | | +| eso.image.repository | string | `"ghcr.io/external-secrets/external-secrets"` | | | eso.image.tag | string | `""` | The image tag to use. The default is the chart appVersion. | | eso.imagePullSecrets | list | `[]` | | | eso.installCRDs | bool | `true` | If set, install and upgrade CRDs through helm chart. | | eso.leaderElect | bool | `false` | If true, external-secrets will perform leader election between instances to ensure no more than one instance of external-secrets operates at a time. | -| eso.log | object | `{"level":"info","timeEncoding":"epoch"}` | Specifices Log Params to the External Secrets Operator | +| eso.livenessProbe.enabled | bool | `false` | Enabled determines if the liveness probe should be used or not. By default it's disabled. | +| eso.livenessProbe.spec | object | `{"address":"","failureThreshold":5,"httpGet":{"path":"/healthz","port":8082},"initialDelaySeconds":10,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":5}` | The body of the liveness probe settings. | +| eso.livenessProbe.spec.address | string | `""` | Address for liveness probe. | +| eso.livenessProbe.spec.failureThreshold | int | `5` | Number of consecutive probe failures that should occur before considering the probe as failed. | +| eso.livenessProbe.spec.httpGet | object | `{"path":"/healthz","port":8082}` | Handler for liveness probe. | +| eso.livenessProbe.spec.httpGet.path | string | `"/healthz"` | Path for liveness probe. | +| eso.livenessProbe.spec.httpGet.port | int | `8082` | Set this value to 8082 to active liveness probes. @schema type: [string, integer] | +| eso.livenessProbe.spec.initialDelaySeconds | int | `10` | Delay in seconds for the container to start before performing the initial probe. | +| eso.livenessProbe.spec.periodSeconds | int | `10` | Period in seconds for K8s to start performing probes. | +| eso.livenessProbe.spec.successThreshold | int | `1` | Number of successful probes to mark probe successful. | +| eso.livenessProbe.spec.timeoutSeconds | int | `5` | Specify the maximum amount of time to wait for a probe to respond before considering it fails. | +| eso.log | object | `{"level":"info","timeEncoding":"epoch"}` | Specifies Log Params to the External Secrets Operator | | eso.metrics.listen.port | int | `8080` | | +| eso.metrics.listen.secure.certDir | string | `"/etc/tls"` | TLS cert directory path | +| eso.metrics.listen.secure.certFile | string | `"/etc/tls/tls.crt"` | TLS cert file path | +| eso.metrics.listen.secure.enabled | bool | `false` | | +| eso.metrics.listen.secure.keyFile | string | `"/etc/tls/tls.key"` | TLS key file path | | eso.metrics.service.annotations | object | `{}` | Additional service annotations | | eso.metrics.service.enabled | bool | `false` | Enable if you use another monitoring tool than Prometheus to scrape the metrics | | eso.metrics.service.port | int | `8080` | Metrics service port to scrape | @@ -126,12 +151,13 @@ A Helm chart ESO for Kubernetes | eso.nodeSelector | object | `{}` | | | eso.openshiftFinalizers | bool | `true` | If true the OpenShift finalizer permissions will be added to RBAC | | eso.podAnnotations | object | `{}` | Annotations to add to Pod | -| eso.podDisruptionBudget | object | `{"enabled":false,"minAvailable":1}` | Pod disruption budget - for more details see https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ | +| eso.podDisruptionBudget | object | `{"enabled":false,"minAvailable":1,"nameOverride":""}` | Pod disruption budget - for more details see https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ | | eso.podLabels | object | `{}` | | | eso.podSecurityContext.enabled | bool | `true` | | | eso.podSpecExtra | object | `{}` | Any extra pod spec on the deployment | | eso.priorityClassName | string | `""` | Pod priority class name. | -| eso.processClusterExternalSecret | bool | `true` | if true, the operator will process cluster external secret. Else, it will ignore them. | +| eso.processClusterExternalSecret | bool | `true` | if true, the operator will process cluster external secret. Else, it will ignore them. When enabled, this adds update/patch permissions on namespaces to handle finalizers for proper cleanup during namespace deletion, preventing race conditions with ExternalSecrets. | +| eso.processClusterGenerator | bool | `true` | if true, the operator will process cluster generator. Else, it will ignore them. | | eso.processClusterPushSecret | bool | `true` | if true, the operator will process cluster push secret. Else, it will ignore them. | | eso.processClusterStore | bool | `true` | if true, the operator will process cluster store. Else, it will ignore them. | | eso.processPushSecret | bool | `true` | if true, the operator will process push secret. Else, it will ignore them. | @@ -165,17 +191,20 @@ A Helm chart ESO for Kubernetes | eso.serviceMonitor.metricRelabelings | list | `[]` | Metric relabel configs to apply to samples before ingestion. [Metric Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs) | | eso.serviceMonitor.namespace | string | `""` | namespace where you want to install ServiceMonitors | | eso.serviceMonitor.relabelings | list | `[]` | Relabel configs to apply to samples before ingestion. [Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) | +| eso.serviceMonitor.renderMode | string | `"skipIfMissing"` | How should we react to missing CRD "`monitoring.coreos.com/v1/ServiceMonitor`" Possible values: - `skipIfMissing`: Only render ServiceMonitor resources if CRD is present, skip if missing. - `failIfMissing`: Fail Helm install if CRD is not present. - `alwaysRender` : Always render ServiceMonitor resources, do not check for CRD. @schema enum: - skipIfMissing - failIfMissing - alwaysRender @schema | | eso.serviceMonitor.scrapeTimeout | string | `"25s"` | Timeout if metrics can't be retrieved in given time interval | +| eso.strategy | object | `{}` | Set deployment strategy | +| eso.systemAuthDelegator | bool | `false` | If true the system:auth-delegator ClusterRole will be added to RBAC | | eso.tolerations | list | `[]` | | | eso.topologySpreadConstraints | list | `[]` | | | eso.webhook.affinity | object | `{}` | | | eso.webhook.annotations | object | `{}` | Annotations to place on validating webhook configuration. | -| eso.webhook.certCheckInterval | string | `"5m"` | Specifices the time to check if the cert is valid | +| eso.webhook.certCheckInterval | string | `"5m"` | Specifies the time to check if the cert is valid | | eso.webhook.certDir | string | `"/tmp/certs"` | | | eso.webhook.certManager.addInjectorAnnotations | bool | `true` | Automatically add the cert-manager.io/inject-ca-from annotation to the webhooks and CRDs. As long as you have the cert-manager CA Injector enabled, this will automatically setup your webhook's CA to the one used by cert-manager. See https://cert-manager.io/docs/concepts/ca-injector | | eso.webhook.certManager.cert.annotations | object | `{}` | Add extra annotations to the Certificate resource. | | eso.webhook.certManager.cert.create | bool | `true` | Create a certificate resource within this chart. See https://cert-manager.io/docs/usage/certificate/ | -| eso.webhook.certManager.cert.duration | string | `"8760h"` | Set the requested duration (i.e. lifetime) of the Certificate. See https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificateSpec One year by default. | +| eso.webhook.certManager.cert.duration | string | `"8760h0m0s"` | Set the requested duration (i.e. lifetime) of the Certificate. See https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificateSpec One year by default. | | eso.webhook.certManager.cert.issuerRef | object | `{"group":"cert-manager.io","kind":"Issuer","name":"my-issuer"}` | For the Certificate created by this chart, setup the issuer. See https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.IssuerSpec | | eso.webhook.certManager.cert.renewBefore | string | `""` | How long before the currently issued certificate’s expiry cert-manager should renew the certificate. See https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificateSpec Note that renewBefore should be greater than .webhook.lookaheadInterval since the webhook will check this far in advance that the certificate is valid. | | eso.webhook.certManager.cert.revisionHistoryLimit | int | `0` | Set the revisionHistoryLimit on the Certificate. See https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificateSpec Defaults to 0 (ignored). | @@ -184,31 +213,29 @@ A Helm chart ESO for Kubernetes | eso.webhook.deploymentAnnotations | object | `{}` | Annotations to add to Deployment | | eso.webhook.extraArgs | object | `{}` | | | eso.webhook.extraEnv | list | `[]` | | +| eso.webhook.extraInitContainers | list | `[]` | | | eso.webhook.extraVolumeMounts | list | `[]` | | | eso.webhook.extraVolumes | list | `[]` | | | eso.webhook.failurePolicy | string | `"Fail"` | Specifies whether validating webhooks should be created with failurePolicy: Fail or Ignore | -| eso.webhook.fullnameOverride | string | `""` | | | eso.webhook.hostNetwork | bool | `false` | Specifies if webhook pod should use hostNetwork or not. | | eso.webhook.image.flavour | string | `""` | The flavour of tag you want to use | | eso.webhook.image.pullPolicy | string | `"IfNotPresent"` | | -| eso.webhook.image.repository | string | `"oci.external-secrets.io/external-secrets/external-secrets"` | | +| eso.webhook.image.repository | string | `"ghcr.io/external-secrets/external-secrets"` | | | eso.webhook.image.tag | string | `""` | The image tag to use. The default is the chart appVersion. | | eso.webhook.imagePullSecrets | list | `[]` | | -| eso.webhook.log | object | `{"level":"info","timeEncoding":"epoch"}` | Specifices Log Params to the Webhook | -| eso.webhook.lookaheadInterval | string | `""` | Specifices the lookaheadInterval for certificate validity | +| eso.webhook.log | object | `{"level":"info","timeEncoding":"epoch"}` | Specifies Log Params to the Webhook | +| eso.webhook.lookaheadInterval | string | `""` | Specifies the lookaheadInterval for certificate validity | | eso.webhook.metrics.listen.port | int | `8080` | | | eso.webhook.metrics.service.annotations | object | `{}` | Additional service annotations | | eso.webhook.metrics.service.enabled | bool | `false` | Enable if you use another monitoring tool than Prometheus to scrape the metrics | | eso.webhook.metrics.service.port | int | `8080` | Metrics service port to scrape | -| eso.webhook.nameOverride | string | `""` | | | eso.webhook.nodeSelector | object | `{}` | | | eso.webhook.podAnnotations | object | `{}` | Annotations to add to Pod | -| eso.webhook.podDisruptionBudget | object | `{"enabled":false,"minAvailable":1}` | Pod disruption budget - for more details see https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ | +| eso.webhook.podDisruptionBudget | object | `{"enabled":false,"minAvailable":1,"nameOverride":""}` | Pod disruption budget - for more details see https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ | | eso.webhook.podLabels | object | `{}` | | | eso.webhook.podSecurityContext.enabled | bool | `true` | | | eso.webhook.port | int | `10250` | The port the webhook will listen to | | eso.webhook.priorityClassName | string | `""` | Pod priority class name. | -| eso.webhook.rbac.create | bool | `true` | Specifies whether role and rolebinding resources should be created. | | eso.webhook.readinessProbe.address | string | `""` | Address for readiness probe | | eso.webhook.readinessProbe.port | int | `8081` | ReadinessProbe port for kubelet | | eso.webhook.replicaCount | int | `1` | | @@ -233,6 +260,7 @@ A Helm chart ESO for Kubernetes | eso.webhook.serviceAccount.create | bool | `true` | Specifies whether a service account should be created. | | eso.webhook.serviceAccount.extraLabels | object | `{}` | Extra Labels to add to the service account. | | eso.webhook.serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template. | +| eso.webhook.strategy | object | `{}` | Set deployment strategy | | eso.webhook.tolerations | list | `[]` | | | eso.webhook.topologySpreadConstraints | list | `[]` | | | prometheus.enabled | bool | `false` | Enables Prometheus Operator monitoring | @@ -266,7 +294,7 @@ spec: source: repoURL: "https://edixos.github.io/ekp-helm" - targetRevision: "0.1.4" + targetRevision: "0.1.5" chart: eso path: '' diff --git a/charts/eso/charts/external-secrets-0.16.2.tgz b/charts/eso/charts/external-secrets-0.16.2.tgz deleted file mode 100644 index 110c6b39..00000000 Binary files a/charts/eso/charts/external-secrets-0.16.2.tgz and /dev/null differ diff --git a/charts/eso/charts/external-secrets-1.1.0.tgz b/charts/eso/charts/external-secrets-1.1.0.tgz new file mode 100644 index 00000000..a63fa30a Binary files /dev/null and b/charts/eso/charts/external-secrets-1.1.0.tgz differ diff --git a/charts/eso/values.yaml b/charts/eso/values.yaml index 328a4aa0..1bc0412e 100644 --- a/charts/eso/values.yaml +++ b/charts/eso/values.yaml @@ -33,12 +33,13 @@ eso: bitwarden-sdk-server: enabled: false + namespaceOverride: "" # -- Specifies the amount of historic ReplicaSets k8s should keep (see https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#clean-up-policy) revisionHistoryLimit: 10 image: - repository: oci.external-secrets.io/external-secrets/external-secrets + repository: ghcr.io/external-secrets/external-secrets pullPolicy: IfNotPresent # -- The image tag to use. The default is the chart appVersion. tag: "" @@ -98,7 +99,12 @@ eso: # -- If true the OpenShift finalizer permissions will be added to RBAC openshiftFinalizers: true + # -- If true the system:auth-delegator ClusterRole will be added to RBAC + systemAuthDelegator: false + # -- if true, the operator will process cluster external secret. Else, it will ignore them. + # When enabled, this adds update/patch permissions on namespaces to handle finalizers for proper + # cleanup during namespace deletion, preventing race conditions with ExternalSecrets. processClusterExternalSecret: true # -- if true, the operator will process cluster push secret. Else, it will ignore them. @@ -107,16 +113,38 @@ eso: # -- if true, the operator will process cluster store. Else, it will ignore them. processClusterStore: true + # -- if true, the operator will process cluster generator. Else, it will ignore them. + processClusterGenerator: true + # -- if true, the operator will process push secret. Else, it will ignore them. processPushSecret: true + # -- Enable support for generic targets (ConfigMaps, Custom Resources). + # Warning: Using generic target. Make sure access policies and encryption are properly configured. + # When enabled, this grants the controller permissions to create/update/delete + # ConfigMaps and optionally other resource types specified in generic.resources. + genericTargets: + # -- Enable generic target support + enabled: false + # -- List of additional resource types to grant permissions for. + # Each entry should specify apiGroup, resources, and verbs. + # Example: + # resources: + # - apiGroup: "argoproj.io" + # resources: ["applications"] + # verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + resources: [] + # -- Specifies whether an external secret operator deployment be created. createOperator: true + # -- if true, HTTP2 will be enabled for the services created by all controllers, curently metrics and webhook. + enableHTTP2: false + # -- Specifies the number of concurrent ExternalSecret Reconciles external-secret executes at # a time. concurrent: 1 - # -- Specifices Log Params to the External Secrets Operator + # -- Specifies Log Params to the External Secrets Operator log: level: info timeEncoding: epoch @@ -168,12 +196,18 @@ eso: ## -- Extra volumes to mount to the container. extraVolumeMounts: [] + ## -- Extra init containers to add to the pod. + extraInitContainers: [] + ## -- Extra containers to add to the pod. extraContainers: [] # -- Annotations to add to Deployment deploymentAnnotations: {} + # -- Set deployment strategy + strategy: {} + # -- Annotations to add to Pod podAnnotations: {} @@ -204,6 +238,21 @@ eso: # -- Specifies whether to create a ServiceMonitor resource for collecting Prometheus metrics enabled: false + # -- How should we react to missing CRD "`monitoring.coreos.com/v1/ServiceMonitor`" + # + # Possible values: + # - `skipIfMissing`: Only render ServiceMonitor resources if CRD is present, skip if missing. + # - `failIfMissing`: Fail Helm install if CRD is not present. + # - `alwaysRender` : Always render ServiceMonitor resources, do not check for CRD. + + # @schema + # enum: + # - skipIfMissing + # - failIfMissing + # - alwaysRender + # @schema + renderMode: skipIfMissing # @schema enum: [skipIfMissing, failIfMissing, alwaysRender] + # -- namespace where you want to install ServiceMonitors namespace: "" @@ -241,6 +290,15 @@ eso: listen: port: 8080 + secure: + enabled: false + # -- if those are not set or invalid, self-signed certs will be generated + # -- TLS cert directory path + certDir: /etc/tls + # -- TLS cert file path + certFile: /etc/tls/tls.crt + # -- TLS key file path + keyFile: /etc/tls/tls.key service: # -- Enable if you use another monitoring tool than Prometheus to scrape the metrics @@ -267,6 +325,34 @@ eso: # https://github.com/grafana/helm-charts/tree/main/charts/grafana annotations: {} + # -- Extra labels to add to the Grafana dashboard ConfigMap. + extraLabels: {} + + livenessProbe: + # -- Enabled determines if the liveness probe should be used or not. By default it's disabled. + enabled: false + # -- The body of the liveness probe settings. + spec: + # -- Address for liveness probe. + address: "" + # -- Specify the maximum amount of time to wait for a probe to respond before considering it fails. + timeoutSeconds: 5 + # -- Number of consecutive probe failures that should occur before considering the probe as failed. + failureThreshold: 5 + # -- Period in seconds for K8s to start performing probes. + periodSeconds: 10 + # -- Number of successful probes to mark probe successful. + successThreshold: 1 + # -- Delay in seconds for the container to start before performing the initial probe. + initialDelaySeconds: 10 + # -- Handler for liveness probe. + httpGet: + # -- Set this value to 8082 to active liveness probes. + # @schema type: [string, integer] + port: 8082 + # -- Path for liveness probe. + path: /healthz + nodeSelector: {} tolerations: [] @@ -281,8 +367,9 @@ eso: # -- Pod disruption budget - for more details see https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ podDisruptionBudget: enabled: false - minAvailable: 1 - # maxUnavailable: 1 + minAvailable: 1 # @schema type:[integer, string] + nameOverride: "" + # maxUnavailable: "50%" # -- Run the controller on the host network hostNetwork: false @@ -292,12 +379,12 @@ eso: annotations: {} # -- Specifies whether a webhook deployment be created. If set to false, crds.conversion.enabled should also be set to false otherwise the kubeapi will be hammered because the conversion is looking for a webhook endpoint. create: true - # -- Specifices the time to check if the cert is valid + # -- Specifies the time to check if the cert is valid certCheckInterval: "5m" - # -- Specifices the lookaheadInterval for certificate validity + # -- Specifies the lookaheadInterval for certificate validity lookaheadInterval: "" replicaCount: 1 - # -- Specifices Log Params to the Webhook + # -- Specifies Log Params to the Webhook log: level: info timeEncoding: epoch @@ -310,20 +397,15 @@ eso: # -- Specifies if webhook pod should use hostNetwork or not. hostNetwork: false image: - repository: oci.external-secrets.io/external-secrets/external-secrets + repository: ghcr.io/external-secrets/external-secrets pullPolicy: IfNotPresent # -- The image tag to use. The default is the chart appVersion. tag: "" # -- The flavour of tag you want to use flavour: "" imagePullSecrets: [] - nameOverride: "" - fullnameOverride: "" # -- The port the webhook will listen to port: 10250 - rbac: - # -- Specifies whether role and rolebinding resources should be created. - create: true serviceAccount: # -- Specifies whether a service account should be created. create: true @@ -362,7 +444,7 @@ eso: # -- Set the requested duration (i.e. lifetime) of the Certificate. See # https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificateSpec # One year by default. - duration: "8760h" + duration: "8760h0m0s" # -- Set the revisionHistoryLimit on the Certificate. See # https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificateSpec # Defaults to 0 (ignored). @@ -383,14 +465,18 @@ eso: affinity: {} + # -- Set deployment strategy + strategy: {} + # -- Pod priority class name. priorityClassName: "" # -- Pod disruption budget - for more details see https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ podDisruptionBudget: enabled: false - minAvailable: 1 - # maxUnavailable: 1 + minAvailable: 1 # @schema type:[integer, string] + nameOverride: "" + # maxUnavailable: "50%" metrics: @@ -421,6 +507,9 @@ eso: ## -- Map of extra arguments to pass to container. extraArgs: {} + ## -- Extra init containers to add to the pod. + extraInitContainers: [] + ## -- Extra volumes to pass to pod. extraVolumes: [] @@ -478,7 +567,7 @@ eso: create: true requeueInterval: "5m" replicaCount: 1 - # -- Specifices Log Params to the Certificate Controller + # -- Specifies Log Params to the Certificate Controller log: level: info timeEncoding: epoch @@ -486,13 +575,11 @@ eso: revisionHistoryLimit: 10 image: - repository: oci.external-secrets.io/external-secrets/external-secrets + repository: ghcr.io/external-secrets/external-secrets pullPolicy: IfNotPresent tag: "" flavour: "" imagePullSecrets: [] - nameOverride: "" - fullnameOverride: "" rbac: # -- Specifies whether role and rolebinding resources should be created. create: true @@ -516,6 +603,9 @@ eso: affinity: {} + # -- Set deployment strategy + strategy: {} + # -- Run the certController on the host network hostNetwork: false @@ -525,8 +615,9 @@ eso: # -- Pod disruption budget - for more details see https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ podDisruptionBudget: enabled: false - minAvailable: 1 - # maxUnavailable: 1 + minAvailable: 1 # @schema type:[integer, string] + nameOverride: "" + # maxUnavailable: "50%" metrics: @@ -549,12 +640,22 @@ eso: # -- ReadinessProbe port for kubelet port: 8081 + startupProbe: + # -- Enabled determines if the startup probe should be used or not. By default it's enabled + enabled: false + # -- whether to use the readiness probe port for startup probe. + useReadinessProbePort: true + # -- Port for startup probe. + port: "" + ## -- Extra environment variables to add to container. extraEnv: [] ## -- Map of extra arguments to pass to container. extraArgs: {} + ## -- Extra init containers to add to the pod. + extraInitContainers: [] ## -- Extra volumes to pass to pod. extraVolumes: [] diff --git a/charts/ingress-nginx/Chart.lock b/charts/ingress-nginx/Chart.lock index 17b9b6c0..09702f1e 100644 --- a/charts/ingress-nginx/Chart.lock +++ b/charts/ingress-nginx/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: ingress-nginx repository: https://kubernetes.github.io/ingress-nginx - version: 4.12.2 -digest: sha256:b58107199720c48a5d00da482ca4cfef20f3971db28ac19aa2158d8f3ee70158 -generated: "2025-05-07T10:25:43.915827482Z" + version: 4.14.0 +digest: sha256:6363c4361b356dade8eff51b95dcc8808371a6af64f902ae7a64c38b732fbe70 +generated: "2025-11-26T10:27:13.797530112Z" diff --git a/charts/ingress-nginx/Chart.yaml b/charts/ingress-nginx/Chart.yaml index dd3bf33f..54a9a92c 100644 --- a/charts/ingress-nginx/Chart.yaml +++ b/charts/ingress-nginx/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: ingress-nginx description: A Helm chart for Kubernetes type: application -version: 0.1.3 +version: 0.1.4 appVersion: "1.12.1" maintainers: - name: ilyasabdellaoui @@ -10,6 +10,6 @@ maintainers: url: https://github.com/ilyasabdellaoui dependencies: - name: ingress-nginx - version: 4.12.2 + version: 4.14.0 repository: "https://kubernetes.github.io/ingress-nginx" alias: ingressNginx diff --git a/charts/ingress-nginx/README.md b/charts/ingress-nginx/README.md index 8f5d6dbb..c082d0d2 100644 --- a/charts/ingress-nginx/README.md +++ b/charts/ingress-nginx/README.md @@ -1,6 +1,6 @@ # ingress-nginx -![Version: 0.1.3](https://img.shields.io/badge/Version-0.1.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.12.1](https://img.shields.io/badge/AppVersion-1.12.1-informational?style=flat-square) +![Version: 0.1.4](https://img.shields.io/badge/Version-0.1.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.12.1](https://img.shields.io/badge/AppVersion-1.12.1-informational?style=flat-square) ## Prerequisites @@ -11,7 +11,7 @@ | Repository | Name | Version | |------------|------|---------| -| https://kubernetes.github.io/ingress-nginx | ingressNginx(ingress-nginx) | 4.12.2 | +| https://kubernetes.github.io/ingress-nginx | ingressNginx(ingress-nginx) | 4.14.0 | ## Maintainers @@ -31,12 +31,17 @@ A Helm chart for Kubernetes | ingressNginx.controller.addHeaders | object | `{}` | Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers | | ingressNginx.controller.admissionWebhooks.annotations | object | `{}` | | | ingressNginx.controller.admissionWebhooks.certManager.admissionCert.duration | string | `""` | | +| ingressNginx.controller.admissionWebhooks.certManager.admissionCert.revisionHistoryLimit | int | `0` | Revision history limit of the webhook certificate. Ref.: https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificateSpec | | ingressNginx.controller.admissionWebhooks.certManager.enabled | bool | `false` | | | ingressNginx.controller.admissionWebhooks.certManager.rootCert.duration | string | `""` | | +| ingressNginx.controller.admissionWebhooks.certManager.rootCert.revisionHistoryLimit | int | `0` | Revision history limit of the root certificate. Ref.: https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificateSpec | | ingressNginx.controller.admissionWebhooks.certificate | string | `"/usr/local/certificates/cert"` | | +| ingressNginx.controller.admissionWebhooks.createSecretJob.activeDeadlineSeconds | int | `0` | Deadline in seconds for the job to complete. Must be greater than 0 to enforce. If unset or 0, no deadline is enforced. | | ingressNginx.controller.admissionWebhooks.createSecretJob.name | string | `"create"` | | | ingressNginx.controller.admissionWebhooks.createSecretJob.resources | object | `{}` | | | ingressNginx.controller.admissionWebhooks.createSecretJob.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true,"runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532,"seccompProfile":{"type":"RuntimeDefault"}}` | Security context for secret creation containers | +| ingressNginx.controller.admissionWebhooks.createSecretJob.volumeMounts | list | `[]` | Volume mounts for secret creation containers | +| ingressNginx.controller.admissionWebhooks.createSecretJob.volumes | list | `[]` | Volumes for secret creation pod | | ingressNginx.controller.admissionWebhooks.enabled | bool | `true` | | | ingressNginx.controller.admissionWebhooks.extraEnvs | list | `[]` | Additional environment variables to set | | ingressNginx.controller.admissionWebhooks.failurePolicy | string | `"Fail"` | Admission Webhook failure policy to use | @@ -46,10 +51,10 @@ A Helm chart for Kubernetes | ingressNginx.controller.admissionWebhooks.namespaceSelector | object | `{}` | | | ingressNginx.controller.admissionWebhooks.objectSelector | object | `{}` | | | ingressNginx.controller.admissionWebhooks.patch.enabled | bool | `true` | | -| ingressNginx.controller.admissionWebhooks.patch.image.digest | string | `"sha256:2cf4ebfa82a37c357455458f6dfc334aea1392d508270b2517795a9933a02524"` | | +| ingressNginx.controller.admissionWebhooks.patch.image.digest | string | `"sha256:bcfc926ed57831edf102d62c5c0e259572591df4796ef1420b87f9cf6092497f"` | | | ingressNginx.controller.admissionWebhooks.patch.image.image | string | `"ingress-nginx/kube-webhook-certgen"` | | | ingressNginx.controller.admissionWebhooks.patch.image.pullPolicy | string | `"IfNotPresent"` | | -| ingressNginx.controller.admissionWebhooks.patch.image.tag | string | `"v1.5.3"` | | +| ingressNginx.controller.admissionWebhooks.patch.image.tag | string | `"v1.6.4"` | | | ingressNginx.controller.admissionWebhooks.patch.labels | object | `{}` | Labels to be added to patch job resources | | ingressNginx.controller.admissionWebhooks.patch.networkPolicy.enabled | bool | `false` | Enable 'networkPolicy' or not | | ingressNginx.controller.admissionWebhooks.patch.nodeSelector."kubernetes.io/os" | string | `"linux"` | | @@ -57,15 +62,19 @@ A Helm chart for Kubernetes | ingressNginx.controller.admissionWebhooks.patch.priorityClassName | string | `""` | Provide a priority class name to the webhook patching job # | | ingressNginx.controller.admissionWebhooks.patch.rbac | object | `{"create":true}` | Admission webhook patch job RBAC | | ingressNginx.controller.admissionWebhooks.patch.rbac.create | bool | `true` | Create RBAC or not | +| ingressNginx.controller.admissionWebhooks.patch.runtimeClassName | string | `""` | Instruct the kubelet to use the named RuntimeClass to run the pod | | ingressNginx.controller.admissionWebhooks.patch.securityContext | object | `{}` | Security context for secret creation & webhook patch pods | | ingressNginx.controller.admissionWebhooks.patch.serviceAccount | object | `{"automountServiceAccountToken":true,"create":true,"name":""}` | Admission webhook patch job service account | | ingressNginx.controller.admissionWebhooks.patch.serviceAccount.automountServiceAccountToken | bool | `true` | Auto-mount service account token or not | | ingressNginx.controller.admissionWebhooks.patch.serviceAccount.create | bool | `true` | Create a service account or not | | ingressNginx.controller.admissionWebhooks.patch.serviceAccount.name | string | `""` | Custom service account name | | ingressNginx.controller.admissionWebhooks.patch.tolerations | list | `[]` | | +| ingressNginx.controller.admissionWebhooks.patchWebhookJob.activeDeadlineSeconds | int | `0` | Deadline in seconds for the job to complete. Must be greater than 0 to enforce. If unset or 0, no deadline is enforced. | | ingressNginx.controller.admissionWebhooks.patchWebhookJob.name | string | `"patch"` | | | ingressNginx.controller.admissionWebhooks.patchWebhookJob.resources | object | `{}` | | | ingressNginx.controller.admissionWebhooks.patchWebhookJob.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true,"runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532,"seccompProfile":{"type":"RuntimeDefault"}}` | Security context for webhook patch containers | +| ingressNginx.controller.admissionWebhooks.patchWebhookJob.volumeMounts | list | `[]` | Volume mounts for webhook patch containers | +| ingressNginx.controller.admissionWebhooks.patchWebhookJob.volumes | list | `[]` | Volumes for webhook patch pod | | ingressNginx.controller.admissionWebhooks.port | int | `8443` | | | ingressNginx.controller.admissionWebhooks.service.annotations | object | `{}` | | | ingressNginx.controller.admissionWebhooks.service.externalIPs | list | `[]` | | @@ -102,7 +111,7 @@ A Helm chart for Kubernetes | ingressNginx.controller.extraArgs | object | `{}` | Additional command line arguments to pass to Ingress-Nginx Controller E.g. to specify the default SSL certificate you can use | | ingressNginx.controller.extraContainers | list | `[]` | Additional containers to be added to the controller pod. See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. | | ingressNginx.controller.extraEnvs | list | `[]` | Additional environment variables to set | -| ingressNginx.controller.extraInitContainers | list | `[]` | Containers, which are run before the app containers are started. | +| ingressNginx.controller.extraInitContainers | list | `[]` | Containers, which are run before the app containers are started. Values may contain Helm templates. | | ingressNginx.controller.extraModules | list | `[]` | Modules, which are mounted into the core nginx image. | | ingressNginx.controller.extraVolumeMounts | list | `[]` | Additional volumeMounts to the controller main container. | | ingressNginx.controller.extraVolumes | list | `[]` | Additional volumes to the controller pod. | @@ -116,8 +125,8 @@ A Helm chart for Kubernetes | ingressNginx.controller.hostname | object | `{}` | Optionally customize the pod hostname. | | ingressNginx.controller.image.allowPrivilegeEscalation | bool | `false` | | | ingressNginx.controller.image.chroot | bool | `false` | | -| ingressNginx.controller.image.digest | string | `"sha256:03497ee984628e95eca9b2279e3f3a3c1685dd48635479e627d219f00c8eefa9"` | | -| ingressNginx.controller.image.digestChroot | string | `"sha256:a697e2bfa419768315250d079ccbbca45f6099c60057769702b912d20897a574"` | | +| ingressNginx.controller.image.digest | string | `"sha256:e4127065d0317bd11dc64c4dd38dcf7fb1c3d72e468110b4086e636dbaac943d"` | | +| ingressNginx.controller.image.digestChroot | string | `"sha256:d0158a50630981a945325c15a638e52c2d0691bc528caf5c04d2cf2051c5665f"` | | | ingressNginx.controller.image.image | string | `"ingress-nginx/controller"` | | | ingressNginx.controller.image.pullPolicy | string | `"IfNotPresent"` | | | ingressNginx.controller.image.readOnlyRootFilesystem | bool | `false` | | @@ -125,7 +134,7 @@ A Helm chart for Kubernetes | ingressNginx.controller.image.runAsNonRoot | bool | `true` | | | ingressNginx.controller.image.runAsUser | int | `101` | This value must not be changed using the official image. uid=101(www-data) gid=82(www-data) groups=82(www-data) | | ingressNginx.controller.image.seccompProfile.type | string | `"RuntimeDefault"` | | -| ingressNginx.controller.image.tag | string | `"v1.12.2"` | | +| ingressNginx.controller.image.tag | string | `"v1.14.0"` | | | ingressNginx.controller.ingressClass | string | `"nginx"` | For backwards compatibility with ingress.class annotation, use ingressClass. Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation | | ingressNginx.controller.ingressClassByName | bool | `false` | Process IngressClass per name (additionally as per spec.controller). | | ingressNginx.controller.ingressClassResource | object | `{"aliases":[],"annotations":{},"controllerValue":"k8s.io/ingress-nginx","default":false,"enabled":true,"name":"nginx","parameters":{}}` | This section refers to the creation of the IngressClass resource. IngressClasses are immutable and cannot be changed after creation. We do not support namespaced IngressClasses, yet, so a ClusterRole and a ClusterRoleBinding is required. | @@ -175,12 +184,18 @@ A Helm chart for Kubernetes | ingressNginx.controller.metrics.serviceMonitor.additionalLabels | object | `{}` | | | ingressNginx.controller.metrics.serviceMonitor.annotations | object | `{}` | Annotations to be added to the ServiceMonitor. | | ingressNginx.controller.metrics.serviceMonitor.enabled | bool | `false` | | +| ingressNginx.controller.metrics.serviceMonitor.labelLimit | int | `0` | Per-scrape limit on number of labels that will be accepted for a sample. | +| ingressNginx.controller.metrics.serviceMonitor.labelNameLengthLimit | int | `0` | Per-scrape limit on length of labels name that will be accepted for a sample. | +| ingressNginx.controller.metrics.serviceMonitor.labelValueLengthLimit | int | `0` | Per-scrape limit on length of labels value that will be accepted for a sample. | | ingressNginx.controller.metrics.serviceMonitor.metricRelabelings | list | `[]` | | | ingressNginx.controller.metrics.serviceMonitor.namespace | string | `""` | | | ingressNginx.controller.metrics.serviceMonitor.namespaceSelector | object | `{}` | | | ingressNginx.controller.metrics.serviceMonitor.relabelings | list | `[]` | | +| ingressNginx.controller.metrics.serviceMonitor.sampleLimit | int | `0` | Defines a per-scrape limit on the number of scraped samples that will be accepted. | | ingressNginx.controller.metrics.serviceMonitor.scrapeInterval | string | `"30s"` | | +| ingressNginx.controller.metrics.serviceMonitor.scrapeTimeout | string | `""` | Timeout after which the scrape is ended. Not being set if empty and therefore defaults to the global Prometheus scrape timeout. | | ingressNginx.controller.metrics.serviceMonitor.targetLabels | list | `[]` | | +| ingressNginx.controller.metrics.serviceMonitor.targetLimit | int | `0` | Defines a limit on the number of scraped targets that will be accepted. | | ingressNginx.controller.minAvailable | int | `1` | Minimum available pods set in PodDisruptionBudget. Define either 'minAvailable' or 'maxUnavailable', never both. | | ingressNginx.controller.minReadySeconds | int | `0` | `minReadySeconds` to avoid killing pods before we are ready # | | ingressNginx.controller.name | string | `"controller"` | | @@ -205,28 +220,34 @@ A Helm chart for Kubernetes | ingressNginx.controller.readinessProbe.timeoutSeconds | int | `1` | | | ingressNginx.controller.replicaCount | int | `1` | | | ingressNginx.controller.reportNodeInternalIp | bool | `false` | Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network Ingress status was blank because there is no Service exposing the Ingress-Nginx Controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply | +| ingressNginx.controller.resizePolicy | list | `[]` | Resize policy for controller containers. Ref: https://kubernetes.io/docs/tasks/configure-pod-container/resize-container-resources | | ingressNginx.controller.resources.requests.cpu | string | `"100m"` | | | ingressNginx.controller.resources.requests.memory | string | `"90Mi"` | | +| ingressNginx.controller.runtimeClassName | string | `""` | Instruct the kubelet to use the named RuntimeClass to run the pod | | ingressNginx.controller.scope.enabled | bool | `false` | Enable 'scope' or not | | ingressNginx.controller.scope.namespace | string | `""` | Namespace to limit the controller to; defaults to $(POD_NAMESPACE) | | ingressNginx.controller.scope.namespaceSelector | string | `""` | When scope.enabled == false, instead of watching all namespaces, we watching namespaces whose labels only match with namespaceSelector. Format like foo=bar. Defaults to empty, means watching all namespaces. | | ingressNginx.controller.service.annotations | object | `{}` | Annotations to be added to the external controller service. See `controller.service.internal.annotations` for annotations to be added to the internal controller service. | | ingressNginx.controller.service.appProtocol | bool | `true` | Declare the app protocol of the external HTTP and HTTPS listeners or not. Supersedes provider-specific annotations for declaring the backend protocol. Ref: https://kubernetes.io/docs/concepts/services-networking/service/#application-protocol | | ingressNginx.controller.service.clusterIP | string | `""` | Pre-defined cluster internal IP address of the external controller service. Take care of collisions with existing services. This value is immutable. Set once, it can not be changed without deleting and re-creating the service. Ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address | +| ingressNginx.controller.service.clusterIPs | list | `[]` | Pre-defined cluster internal IP addresses of the external controller service. Take care of collisions with existing services. This value is immutable. Set once, it can not be changed without deleting and re-creating the service. Ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address | | ingressNginx.controller.service.enableHttp | bool | `true` | Enable the HTTP listener on both controller services or not. | | ingressNginx.controller.service.enableHttps | bool | `true` | Enable the HTTPS listener on both controller services or not. | | ingressNginx.controller.service.enabled | bool | `true` | Enable controller services or not. This does not influence the creation of either the admission webhook or the metrics service. | | ingressNginx.controller.service.external.enabled | bool | `true` | Enable the external controller service or not. Useful for internal-only deployments. | +| ingressNginx.controller.service.external.labels | object | `{}` | Labels to be added to the external controller service. | | ingressNginx.controller.service.externalIPs | list | `[]` | List of node IP addresses at which the external controller service is available. Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips | | ingressNginx.controller.service.externalTrafficPolicy | string | `""` | External traffic policy of the external controller service. Set to "Local" to preserve source IP on providers supporting it. Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip | | ingressNginx.controller.service.internal.annotations | object | `{}` | Annotations to be added to the internal controller service. Mandatory for the internal controller service to be created. Varies with the cloud service. Ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer | | ingressNginx.controller.service.internal.appProtocol | bool | `true` | Declare the app protocol of the internal HTTP and HTTPS listeners or not. Supersedes provider-specific annotations for declaring the backend protocol. Ref: https://kubernetes.io/docs/concepts/services-networking/service/#application-protocol | | ingressNginx.controller.service.internal.clusterIP | string | `""` | Pre-defined cluster internal IP address of the internal controller service. Take care of collisions with existing services. This value is immutable. Set once, it can not be changed without deleting and re-creating the service. Ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address | +| ingressNginx.controller.service.internal.clusterIPs | list | `[]` | Pre-defined cluster internal IP addresses of the internal controller service. Take care of collisions with existing services. This value is immutable. Set once, it can not be changed without deleting and re-creating the service. Ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address | | ingressNginx.controller.service.internal.enabled | bool | `false` | Enable the internal controller service or not. Remember to configure `controller.service.internal.annotations` when enabling this. | | ingressNginx.controller.service.internal.externalIPs | list | `[]` | List of node IP addresses at which the internal controller service is available. Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips | | ingressNginx.controller.service.internal.externalTrafficPolicy | string | `""` | External traffic policy of the internal controller service. Set to "Local" to preserve source IP on providers supporting it. Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip | | ingressNginx.controller.service.internal.ipFamilies | list | `["IPv4"]` | List of IP families (e.g. IPv4, IPv6) assigned to the internal controller service. This field is usually assigned automatically based on cluster configuration and the `ipFamilyPolicy` field. Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services | | ingressNginx.controller.service.internal.ipFamilyPolicy | string | `"SingleStack"` | Represents the dual-stack capabilities of the internal controller service. Possible values are SingleStack, PreferDualStack or RequireDualStack. Fields `ipFamilies` and `clusterIP` depend on the value of this field. Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services | +| ingressNginx.controller.service.internal.labels | object | `{}` | Labels to be added to the internal controller service. | | ingressNginx.controller.service.internal.loadBalancerClass | string | `""` | Load balancer class of the internal controller service. Used by cloud providers to select a load balancer implementation other than the cloud provider default. Ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class | | ingressNginx.controller.service.internal.loadBalancerIP | string | `""` | Deprecated: Pre-defined IP address of the internal controller service. Used by cloud providers to connect the resulting load balancer service to a pre-existing static IP. Ref: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer | | ingressNginx.controller.service.internal.loadBalancerSourceRanges | list | `[]` | Restrict access to the internal controller service. Values must be CIDRs. Allows any source address by default. | @@ -237,6 +258,7 @@ A Helm chart for Kubernetes | ingressNginx.controller.service.internal.ports | object | `{}` | | | ingressNginx.controller.service.internal.sessionAffinity | string | `""` | Session affinity of the internal controller service. Must be either "None" or "ClientIP" if set. Defaults to "None". Ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#session-affinity | | ingressNginx.controller.service.internal.targetPorts | object | `{}` | | +| ingressNginx.controller.service.internal.trafficDistribution | string | `""` | Traffic distribution policy of the internal controller service. Set to "PreferClose" to route traffic to endpoints that are topologically closer to the client. Ref: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-distribution | | ingressNginx.controller.service.internal.type | string | `""` | Type of the internal controller service. Defaults to the value of `controller.service.type`. Ref: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types | | ingressNginx.controller.service.ipFamilies | list | `["IPv4"]` | List of IP families (e.g. IPv4, IPv6) assigned to the external controller service. This field is usually assigned automatically based on cluster configuration and the `ipFamilyPolicy` field. Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services | | ingressNginx.controller.service.ipFamilyPolicy | string | `"SingleStack"` | Represents the dual-stack capabilities of the external controller service. Possible values are SingleStack, PreferDualStack or RequireDualStack. Fields `ipFamilies` and `clusterIP` depend on the value of this field. Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services | @@ -253,6 +275,7 @@ A Helm chart for Kubernetes | ingressNginx.controller.service.sessionAffinity | string | `""` | Session affinity of the external controller service. Must be either "None" or "ClientIP" if set. Defaults to "None". Ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#session-affinity | | ingressNginx.controller.service.targetPorts.http | string | `"http"` | Port of the ingress controller the external HTTP listener is mapped to. | | ingressNginx.controller.service.targetPorts.https | string | `"https"` | Port of the ingress controller the external HTTPS listener is mapped to. | +| ingressNginx.controller.service.trafficDistribution | string | `""` | Traffic distribution policy of the external controller service. Set to "PreferClose" to route traffic to endpoints that are topologically closer to the client. Ref: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-distribution | | ingressNginx.controller.service.type | string | `"LoadBalancer"` | Type of the external controller service. Ref: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types | | ingressNginx.controller.shareProcessNamespace | bool | `false` | | | ingressNginx.controller.sysctls | object | `{}` | sysctls for controller pods # Ref: https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ | @@ -312,7 +335,9 @@ A Helm chart for Kubernetes | ingressNginx.defaultBackend.readinessProbe.timeoutSeconds | int | `5` | | | ingressNginx.defaultBackend.replicaCount | int | `1` | | | ingressNginx.defaultBackend.resources | object | `{}` | | +| ingressNginx.defaultBackend.runtimeClassName | string | `""` | Instruct the kubelet to use the named RuntimeClass to run the pod | | ingressNginx.defaultBackend.service.annotations | object | `{}` | | +| ingressNginx.defaultBackend.service.clusterIPs | list | `[]` | Pre-defined cluster internal IP addresses of the default backend service. Take care of collisions with existing services. This value is immutable. Set once, it can not be changed without deleting and re-creating the service. Ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address | | ingressNginx.defaultBackend.service.externalIPs | list | `[]` | List of IP addresses at which the default backend service is available # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips # | | ingressNginx.defaultBackend.service.loadBalancerSourceRanges | list | `[]` | | | ingressNginx.defaultBackend.service.servicePort | int | `80` | | @@ -368,7 +393,7 @@ spec: source: repoURL: "https://edixos.github.io/ekp-helm" - targetRevision: "0.1.3" + targetRevision: "0.1.4" chart: ingress-nginx path: '' helm: diff --git a/charts/ingress-nginx/charts/ingress-nginx-4.12.2.tgz b/charts/ingress-nginx/charts/ingress-nginx-4.12.2.tgz deleted file mode 100644 index 937ad18d..00000000 Binary files a/charts/ingress-nginx/charts/ingress-nginx-4.12.2.tgz and /dev/null differ diff --git a/charts/ingress-nginx/charts/ingress-nginx-4.14.0.tgz b/charts/ingress-nginx/charts/ingress-nginx-4.14.0.tgz new file mode 100644 index 00000000..824f35dd Binary files /dev/null and b/charts/ingress-nginx/charts/ingress-nginx-4.14.0.tgz differ diff --git a/charts/ingress-nginx/values.yaml b/charts/ingress-nginx/values.yaml index 4022f0da..8c131e28 100644 --- a/charts/ingress-nginx/values.yaml +++ b/charts/ingress-nginx/values.yaml @@ -47,9 +47,9 @@ ingressNginx: ## for backwards compatibility consider setting the full image url via the repository value below ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail ## repository: - tag: "v1.12.2" - digest: sha256:03497ee984628e95eca9b2279e3f3a3c1685dd48635479e627d219f00c8eefa9 - digestChroot: sha256:a697e2bfa419768315250d079ccbbca45f6099c60057769702b912d20897a574 + tag: "v1.14.0" + digest: sha256:e4127065d0317bd11dc64c4dd38dcf7fb1c3d72e468110b4086e636dbaac943d + digestChroot: sha256:d0158a50630981a945325c15a638e52c2d0691bc528caf5c04d2cf2051c5665f pullPolicy: IfNotPresent runAsNonRoot: true # -- This value must not be changed using the official image. @@ -95,6 +95,8 @@ ingressNginx: # By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller # to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. dnsPolicy: ClusterFirst + # -- Instruct the kubelet to use the named RuntimeClass to run the pod + runtimeClassName: "" # -- Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network # Ingress status was blank because there is no Service exposing the Ingress-Nginx Controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply reportNodeInternalIp: false @@ -416,6 +418,13 @@ ingressNginx: requests: cpu: 100m memory: 90Mi + # -- Resize policy for controller containers. + # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/resize-container-resources + resizePolicy: [] + # - resourceName: cpu + # restartPolicy: NotRequired + # - resourceName: memory + # restartPolicy: RestartContainer # Mutually exclusive with keda autoscaling autoscaling: enabled: false @@ -503,6 +512,8 @@ ingressNginx: external: # -- Enable the external controller service or not. Useful for internal-only deployments. enabled: true + # -- Labels to be added to the external controller service. + labels: {} # -- Annotations to be added to the external controller service. See `controller.service.internal.annotations` for annotations to be added to the internal controller service. annotations: {} # -- Labels to be added to both controller services. @@ -514,6 +525,10 @@ ingressNginx: # This value is immutable. Set once, it can not be changed without deleting and re-creating the service. # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address clusterIP: "" + # -- Pre-defined cluster internal IP addresses of the external controller service. Take care of collisions with existing services. + # This value is immutable. Set once, it can not be changed without deleting and re-creating the service. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + clusterIPs: [] # -- List of node IP addresses at which the external controller service is available. # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips externalIPs: [] @@ -540,6 +555,9 @@ ingressNginx: # Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip # healthCheckNodePort: 0 + # -- Traffic distribution policy of the external controller service. Set to "PreferClose" to route traffic to endpoints that are topologically closer to the client. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-distribution + trafficDistribution: "" # -- Represents the dual-stack capabilities of the external controller service. Possible values are SingleStack, PreferDualStack or RequireDualStack. # Fields `ipFamilies` and `clusterIP` depend on the value of this field. # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services @@ -583,6 +601,8 @@ ingressNginx: internal: # -- Enable the internal controller service or not. Remember to configure `controller.service.internal.annotations` when enabling this. enabled: false + # -- Labels to be added to the internal controller service. + labels: {} # -- Annotations to be added to the internal controller service. Mandatory for the internal controller service to be created. Varies with the cloud service. # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer annotations: {} @@ -594,6 +614,10 @@ ingressNginx: # This value is immutable. Set once, it can not be changed without deleting and re-creating the service. # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address clusterIP: "" + # -- Pre-defined cluster internal IP addresses of the internal controller service. Take care of collisions with existing services. + # This value is immutable. Set once, it can not be changed without deleting and re-creating the service. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + clusterIPs: [] # -- List of node IP addresses at which the internal controller service is available. # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips externalIPs: [] @@ -620,6 +644,9 @@ ingressNginx: # Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip # healthCheckNodePort: 0 + # -- Traffic distribution policy of the internal controller service. Set to "PreferClose" to route traffic to endpoints that are topologically closer to the client. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-distribution + trafficDistribution: "" # -- Represents the dual-stack capabilities of the internal controller service. Possible values are SingleStack, PreferDualStack or RequireDualStack. # Fields `ipFamilies` and `clusterIP` depend on the value of this field. # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services @@ -699,11 +726,17 @@ ingressNginx: # - name: copy-portal-skins # emptyDir: {} - # -- Containers, which are run before the app containers are started. + # -- Containers, which are run before the app containers are started. Values may contain Helm templates. extraInitContainers: [] # - name: init-myservice # image: busybox # command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;'] + # - name: init-dynamic + # image: busybox + # command: + # - sh + # - -c + # - echo "Release={{ .Release.Name }} Namespace={{ .Release.Namespace }}" # -- Modules, which are mounted into the core nginx image. extraModules: [] @@ -771,6 +804,8 @@ ingressNginx: type: ClusterIP createSecretJob: name: create + # -- Deadline in seconds for the job to complete. Must be greater than 0 to enforce. If unset or 0, no deadline is enforced. + activeDeadlineSeconds: 0 # -- Security context for secret creation containers securityContext: runAsNonRoot: true @@ -790,8 +825,20 @@ ingressNginx: # requests: # cpu: 10m # memory: 20Mi + # -- Volume mounts for secret creation containers + volumeMounts: [] + # - name: certs + # mountPath: /etc/webhook/certs + # readOnly: true + # -- Volumes for secret creation pod + volumes: [] + # - name: certs + # secret: + # secretName: my-webhook-secret patchWebhookJob: name: patch + # -- Deadline in seconds for the job to complete. Must be greater than 0 to enforce. If unset or 0, no deadline is enforced. + activeDeadlineSeconds: 0 # -- Security context for webhook patch containers securityContext: runAsNonRoot: true @@ -805,6 +852,16 @@ ingressNginx: - ALL readOnlyRootFilesystem: true resources: {} + # -- Volume mounts for webhook patch containers + volumeMounts: [] + # - name: certs + # mountPath: /etc/webhook/certs + # readOnly: true + # -- Volumes for webhook patch pod + volumes: [] + # - name: certs + # secret: + # secretName: my-webhook-secret patch: enabled: true image: @@ -813,12 +870,14 @@ ingressNginx: ## for backwards compatibility consider setting the full image url via the repository value below ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail ## repository: - tag: v1.5.3 - digest: sha256:2cf4ebfa82a37c357455458f6dfc334aea1392d508270b2517795a9933a02524 + tag: v1.6.4 + digest: sha256:bcfc926ed57831edf102d62c5c0e259572591df4796ef1420b87f9cf6092497f pullPolicy: IfNotPresent # -- Provide a priority class name to the webhook patching job ## priorityClassName: "" + # -- Instruct the kubelet to use the named RuntimeClass to run the pod + runtimeClassName: "" podAnnotations: {} # NetworkPolicy for webhook patch networkPolicy: @@ -850,9 +909,15 @@ ingressNginx: rootCert: # default to be 5y duration: "" + # -- Revision history limit of the root certificate. + # Ref.: https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificateSpec + revisionHistoryLimit: 0 admissionCert: # default to be 1y duration: "" + # -- Revision history limit of the webhook certificate. + # Ref.: https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificateSpec + revisionHistoryLimit: 0 # issuerRef: # name: "issuer" # kind: "ClusterIssuer" @@ -895,10 +960,22 @@ ingressNginx: ## namespaceSelector: ## any: true scrapeInterval: 30s + # -- Timeout after which the scrape is ended. Not being set if empty and therefore defaults to the global Prometheus scrape timeout. + scrapeTimeout: "" # honorLabels: true targetLabels: [] relabelings: [] metricRelabelings: [] + # -- Per-scrape limit on number of labels that will be accepted for a sample. + labelLimit: 0 + # -- Per-scrape limit on length of labels name that will be accepted for a sample. + labelNameLengthLimit: 0 + # -- Per-scrape limit on length of labels value that will be accepted for a sample. + labelValueLengthLimit: 0 + # -- Defines a per-scrape limit on the number of scraped samples that will be accepted. + sampleLimit: 0 + # -- Defines a limit on the number of scraped targets that will be accepted. + targetLimit: 0 prometheusRule: enabled: false additionalLabels: {} @@ -1162,7 +1239,10 @@ ingressNginx: service: annotations: {} # clusterIP: "" - + # -- Pre-defined cluster internal IP addresses of the default backend service. Take care of collisions with existing services. + # This value is immutable. Set once, it can not be changed without deleting and re-creating the service. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + clusterIPs: [] # -- List of IP addresses at which the default backend service is available ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips ## @@ -1172,6 +1252,8 @@ ingressNginx: servicePort: 80 type: ClusterIP priorityClassName: "" + # -- Instruct the kubelet to use the named RuntimeClass to run the pod + runtimeClassName: "" # -- Labels to be added to the default backend resources labels: {} ## Enable RBAC as per https://github.com/kubernetes/ingress-nginx/blob/main/docs/deploy/rbac.md and https://github.com/kubernetes/ingress-nginx/issues/266 diff --git a/charts/kube-prometheus-stack/Chart.lock b/charts/kube-prometheus-stack/Chart.lock index f988a640..ab88582e 100644 --- a/charts/kube-prometheus-stack/Chart.lock +++ b/charts/kube-prometheus-stack/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: kube-prometheus-stack repository: https://prometheus-community.github.io/helm-charts - version: 72.3.1 -digest: sha256:0fa4db9176dd8b6927926ad48aefd95ae8ca6c7205f0b6fda94c18841017b934 -generated: "2025-05-14T10:23:41.25331317Z" + version: 79.8.2 +digest: sha256:44478599dd1f16d9eb0b5387383113e37b702b8d9b6e270096851ce663f8576c +generated: "2025-11-26T10:27:41.22696534Z" diff --git a/charts/kube-prometheus-stack/Chart.yaml b/charts/kube-prometheus-stack/Chart.yaml index aa159653..adeba22a 100644 --- a/charts/kube-prometheus-stack/Chart.yaml +++ b/charts/kube-prometheus-stack/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.2 +version: 0.1.3 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to @@ -24,6 +24,6 @@ version: 0.1.2 appVersion: "v0.80.1" dependencies: - name: kube-prometheus-stack - version: 72.3.1 + version: 79.8.2 repository: "https://prometheus-community.github.io/helm-charts" alias: kubePrometheusStack diff --git a/charts/kube-prometheus-stack/README.md b/charts/kube-prometheus-stack/README.md index d124e9e5..04cb58ff 100644 --- a/charts/kube-prometheus-stack/README.md +++ b/charts/kube-prometheus-stack/README.md @@ -1,6 +1,6 @@ # kube-prometheus-stack -![Version: 0.1.2](https://img.shields.io/badge/Version-0.1.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v0.80.1](https://img.shields.io/badge/AppVersion-v0.80.1-informational?style=flat-square) +![Version: 0.1.3](https://img.shields.io/badge/Version-0.1.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v0.80.1](https://img.shields.io/badge/AppVersion-v0.80.1-informational?style=flat-square) ## Prerequisites @@ -11,7 +11,7 @@ | Repository | Name | Version | |------------|------|---------| -| https://prometheus-community.github.io/helm-charts | kubePrometheusStack(kube-prometheus-stack) | 72.3.1 | +| https://prometheus-community.github.io/helm-charts | kubePrometheusStack(kube-prometheus-stack) | 79.8.2 | ## Description @@ -22,6 +22,8 @@ A Helm chart for Kubernetes | Key | Type | Default | Description | |-----|------|---------|-------------| | kubePrometheusStack.additionalPrometheusRulesMap | object | `{}` | | +| kubePrometheusStack.alertmanager.additionalLabels | object | `{}` | | +| kubePrometheusStack.alertmanager.alertmanagerSpec.additionalArgs | list | `[]` | | | kubePrometheusStack.alertmanager.alertmanagerSpec.additionalConfig | object | `{}` | | | kubePrometheusStack.alertmanager.alertmanagerSpec.additionalConfigString | string | `""` | | | kubePrometheusStack.alertmanager.alertmanagerSpec.additionalPeers | list | `[]` | | @@ -38,12 +40,15 @@ A Helm chart for Kubernetes | kubePrometheusStack.alertmanager.alertmanagerSpec.clusterPushpullInterval | string | `""` | | | kubePrometheusStack.alertmanager.alertmanagerSpec.configMaps | list | `[]` | | | kubePrometheusStack.alertmanager.alertmanagerSpec.containers | list | `[]` | | +| kubePrometheusStack.alertmanager.alertmanagerSpec.dnsConfig | object | `{}` | | +| kubePrometheusStack.alertmanager.alertmanagerSpec.dnsPolicy | string | `""` | | | kubePrometheusStack.alertmanager.alertmanagerSpec.externalUrl | string | `nil` | | | kubePrometheusStack.alertmanager.alertmanagerSpec.forceEnableClusterMode | bool | `false` | | +| kubePrometheusStack.alertmanager.alertmanagerSpec.image.pullPolicy | string | `"IfNotPresent"` | | | kubePrometheusStack.alertmanager.alertmanagerSpec.image.registry | string | `"quay.io"` | | | kubePrometheusStack.alertmanager.alertmanagerSpec.image.repository | string | `"prometheus/alertmanager"` | | | kubePrometheusStack.alertmanager.alertmanagerSpec.image.sha | string | `""` | | -| kubePrometheusStack.alertmanager.alertmanagerSpec.image.tag | string | `"v0.28.1"` | | +| kubePrometheusStack.alertmanager.alertmanagerSpec.image.tag | string | `"v0.29.0"` | | | kubePrometheusStack.alertmanager.alertmanagerSpec.initContainers | list | `[]` | | | kubePrometheusStack.alertmanager.alertmanagerSpec.listenLocal | bool | `false` | | | kubePrometheusStack.alertmanager.alertmanagerSpec.logFormat | string | `"logfmt"` | | @@ -109,6 +114,7 @@ A Helm chart for Kubernetes | kubePrometheusStack.alertmanager.ingress.annotations | object | `{}` | | | kubePrometheusStack.alertmanager.ingress.enabled | bool | `false` | | | kubePrometheusStack.alertmanager.ingress.hosts | list | `[]` | | +| kubePrometheusStack.alertmanager.ingress.ingressClassName | string | `""` | | | kubePrometheusStack.alertmanager.ingress.labels | object | `{}` | | | kubePrometheusStack.alertmanager.ingress.paths | list | `[]` | | | kubePrometheusStack.alertmanager.ingress.tls | list | `[]` | | @@ -116,6 +122,7 @@ A Helm chart for Kubernetes | kubePrometheusStack.alertmanager.ingressPerReplica.enabled | bool | `false` | | | kubePrometheusStack.alertmanager.ingressPerReplica.hostDomain | string | `""` | | | kubePrometheusStack.alertmanager.ingressPerReplica.hostPrefix | string | `""` | | +| kubePrometheusStack.alertmanager.ingressPerReplica.ingressClassName | string | `""` | | | kubePrometheusStack.alertmanager.ingressPerReplica.labels | object | `{}` | | | kubePrometheusStack.alertmanager.ingressPerReplica.paths | list | `[]` | | | kubePrometheusStack.alertmanager.ingressPerReplica.tlsSecretName | string | `""` | | @@ -136,7 +143,6 @@ A Helm chart for Kubernetes | kubePrometheusStack.alertmanager.networkPolicy.monitoringRules.prometheus | bool | `true` | Enable ingress from Prometheus # | | kubePrometheusStack.alertmanager.networkPolicy.policyTypes | list | `["Ingress"]` | Define policy types. If egress is enabled, both Ingress and Egress will be used Valid values are ["Ingress"] or ["Ingress", "Egress"] # | | kubePrometheusStack.alertmanager.podDisruptionBudget.enabled | bool | `false` | | -| kubePrometheusStack.alertmanager.podDisruptionBudget.maxUnavailable | string | `""` | | | kubePrometheusStack.alertmanager.podDisruptionBudget.minAvailable | int | `1` | | | kubePrometheusStack.alertmanager.podDisruptionBudget.unhealthyPodEvictionPolicy | string | `"AlwaysAllow"` | | | kubePrometheusStack.alertmanager.route | object | `{"main":{"additionalRules":[],"annotations":{},"apiVersion":"gateway.networking.k8s.io/v1","enabled":false,"filters":[],"hostnames":[],"httpsRedirect":false,"kind":"HTTPRoute","labels":{},"matches":[{"path":{"type":"PathPrefix","value":"/"}}],"parentRefs":[]}}` | BETA: Configure the gateway routes for the chart here. More routes can be added by adding a dictionary key like the 'main' route. Be aware that this is an early beta of this feature, kube-prometheus-stack does not guarantee this works and is subject to change. Being BETA this can/will change in the future without notice, do not use unless you want to take that risk [[ref]](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io%2fv1alpha2) | @@ -327,6 +333,7 @@ A Helm chart for Kubernetes | kubePrometheusStack.defaultRules.additionalRuleGroupLabels.prometheusOperator | object | `{}` | | | kubePrometheusStack.defaultRules.additionalRuleLabels | object | `{}` | | | kubePrometheusStack.defaultRules.annotations | object | `{}` | | +| kubePrometheusStack.defaultRules.appNamespacesOperator | string | `"=~"` | | | kubePrometheusStack.defaultRules.appNamespacesTarget | string | `".*"` | | | kubePrometheusStack.defaultRules.create | bool | `true` | | | kubePrometheusStack.defaultRules.disabled | object | `{}` | | @@ -374,10 +381,10 @@ A Helm chart for Kubernetes | kubePrometheusStack.global.imageRegistry | string | `""` | | | kubePrometheusStack.global.rbac.create | bool | `true` | | | kubePrometheusStack.global.rbac.createAggregateClusterRoles | bool | `false` | | -| kubePrometheusStack.global.rbac.pspAnnotations | object | `{}` | | -| kubePrometheusStack.global.rbac.pspEnabled | bool | `false` | | | kubePrometheusStack.grafana.additionalDataSources | list | `[]` | | -| kubePrometheusStack.grafana.adminPassword | string | `"prom-operator"` | | +| kubePrometheusStack.grafana.admin.existingSecret | string | `""` | | +| kubePrometheusStack.grafana.admin.passwordKey | string | `"admin-password"` | | +| kubePrometheusStack.grafana.admin.userKey | string | `"admin-user"` | | | kubePrometheusStack.grafana.adminUser | string | `"admin"` | | | kubePrometheusStack.grafana.defaultDashboardsEditable | bool | `true` | | | kubePrometheusStack.grafana.defaultDashboardsEnabled | bool | `true` | | @@ -441,23 +448,11 @@ A Helm chart for Kubernetes | kubePrometheusStack.grafana.sidecar.datasources.name | string | `"Prometheus"` | | | kubePrometheusStack.grafana.sidecar.datasources.prometheusServiceName | string | `"prometheus-operated"` | | | kubePrometheusStack.grafana.sidecar.datasources.uid | string | `"prometheus"` | | -| kubePrometheusStack.kube-state-metrics.namespaceOverride | string | `""` | | | kubePrometheusStack.kube-state-metrics.prometheus.monitor.enabled | bool | `true` | | -| kubePrometheusStack.kube-state-metrics.prometheus.monitor.honorLabels | bool | `true` | | -| kubePrometheusStack.kube-state-metrics.prometheus.monitor.interval | string | `""` | | -| kubePrometheusStack.kube-state-metrics.prometheus.monitor.labelLimit | int | `0` | | -| kubePrometheusStack.kube-state-metrics.prometheus.monitor.labelNameLengthLimit | int | `0` | | -| kubePrometheusStack.kube-state-metrics.prometheus.monitor.labelValueLengthLimit | int | `0` | | -| kubePrometheusStack.kube-state-metrics.prometheus.monitor.metricRelabelings | list | `[]` | | -| kubePrometheusStack.kube-state-metrics.prometheus.monitor.proxyUrl | string | `""` | | -| kubePrometheusStack.kube-state-metrics.prometheus.monitor.relabelings | list | `[]` | | -| kubePrometheusStack.kube-state-metrics.prometheus.monitor.sampleLimit | int | `0` | | -| kubePrometheusStack.kube-state-metrics.prometheus.monitor.scrapeTimeout | string | `""` | | -| kubePrometheusStack.kube-state-metrics.prometheus.monitor.targetLimit | int | `0` | | +| kubePrometheusStack.kube-state-metrics.prometheus.monitor.http.honorLabels | bool | `true` | | +| kubePrometheusStack.kube-state-metrics.prometheus.monitor.metrics.honorLabels | bool | `true` | | | kubePrometheusStack.kube-state-metrics.prometheusScrape | bool | `false` | | -| kubePrometheusStack.kube-state-metrics.rbac.create | bool | `true` | | | kubePrometheusStack.kube-state-metrics.releaseLabel | bool | `true` | | -| kubePrometheusStack.kube-state-metrics.selfMonitor.enabled | bool | `false` | | | kubePrometheusStack.kubeApiServer.enabled | bool | `true` | | | kubePrometheusStack.kubeApiServer.serviceMonitor.additionalLabels | object | `{}` | | | kubePrometheusStack.kubeApiServer.serviceMonitor.enabled | bool | `true` | | @@ -481,6 +476,7 @@ A Helm chart for Kubernetes | kubePrometheusStack.kubeApiServer.tlsConfig.serverName | string | `"kubernetes"` | | | kubePrometheusStack.kubeControllerManager.enabled | bool | `true` | | | kubePrometheusStack.kubeControllerManager.endpoints | list | `[]` | | +| kubePrometheusStack.kubeControllerManager.jobNameOverride | string | `""` | | | kubePrometheusStack.kubeControllerManager.service.enabled | bool | `true` | | | kubePrometheusStack.kubeControllerManager.service.ipDualStack.enabled | bool | `false` | | | kubePrometheusStack.kubeControllerManager.service.ipDualStack.ipFamilies[0] | string | `"IPv6"` | | @@ -562,6 +558,7 @@ A Helm chart for Kubernetes | kubePrometheusStack.kubeEtcd.serviceMonitor.targetLimit | int | `0` | | | kubePrometheusStack.kubeProxy.enabled | bool | `true` | | | kubePrometheusStack.kubeProxy.endpoints | list | `[]` | | +| kubePrometheusStack.kubeProxy.jobNameOverride | string | `""` | | | kubePrometheusStack.kubeProxy.service.enabled | bool | `true` | | | kubePrometheusStack.kubeProxy.service.ipDualStack.enabled | bool | `false` | | | kubePrometheusStack.kubeProxy.service.ipDualStack.ipFamilies[0] | string | `"IPv6"` | | @@ -587,6 +584,7 @@ A Helm chart for Kubernetes | kubePrometheusStack.kubeProxy.serviceMonitor.targetLimit | int | `0` | | | kubePrometheusStack.kubeScheduler.enabled | bool | `true` | | | kubePrometheusStack.kubeScheduler.endpoints | list | `[]` | | +| kubePrometheusStack.kubeScheduler.jobNameOverride | string | `""` | | | kubePrometheusStack.kubeScheduler.service.enabled | bool | `true` | | | kubePrometheusStack.kubeScheduler.service.ipDualStack.enabled | bool | `false` | | | kubePrometheusStack.kubeScheduler.service.ipDualStack.ipFamilies[0] | string | `"IPv6"` | | @@ -616,6 +614,7 @@ A Helm chart for Kubernetes | kubePrometheusStack.kubeTargetVersionOverride | string | `""` | | | kubePrometheusStack.kubeVersionOverride | string | `""` | | | kubePrometheusStack.kubelet.enabled | bool | `true` | | +| kubePrometheusStack.kubelet.jobNameOverride | string | `""` | | | kubePrometheusStack.kubelet.namespace | string | `"kube-system"` | | | kubePrometheusStack.kubelet.serviceMonitor.additionalLabels | object | `{}` | | | kubePrometheusStack.kubelet.serviceMonitor.attachMetadata.node | bool | `false` | | @@ -692,8 +691,8 @@ A Helm chart for Kubernetes | kubePrometheusStack.nodeExporter.operatingSystems.aix.enabled | bool | `true` | | | kubePrometheusStack.nodeExporter.operatingSystems.darwin.enabled | bool | `true` | | | kubePrometheusStack.nodeExporter.operatingSystems.linux.enabled | bool | `true` | | -| kubePrometheusStack.prometheus-node-exporter.extraArgs[0] | string | `"--collector.filesystem.mount-points-exclude=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/.+)($|/)"` | | -| kubePrometheusStack.prometheus-node-exporter.extraArgs[1] | string | `"--collector.filesystem.fs-types-exclude=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$"` | | +| kubePrometheusStack.prometheus-node-exporter.extraArgs[0] | string | `"--collector.filesystem.mount-points-exclude=^/(dev|proc|sys|run/containerd/.+|var/lib/docker/.+|var/lib/kubelet/.+)($|/)"` | | +| kubePrometheusStack.prometheus-node-exporter.extraArgs[1] | string | `"--collector.filesystem.fs-types-exclude=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs|erofs)$"` | | | kubePrometheusStack.prometheus-node-exporter.namespaceOverride | string | `""` | | | kubePrometheusStack.prometheus-node-exporter.podLabels.jobLabel | string | `"node-exporter"` | | | kubePrometheusStack.prometheus-node-exporter.prometheus.monitor.enabled | bool | `true` | | @@ -708,6 +707,8 @@ A Helm chart for Kubernetes | kubePrometheusStack.prometheus-node-exporter.prometheus.monitor.sampleLimit | int | `0` | | | kubePrometheusStack.prometheus-node-exporter.prometheus.monitor.scrapeTimeout | string | `""` | | | kubePrometheusStack.prometheus-node-exporter.prometheus.monitor.targetLimit | int | `0` | | +| kubePrometheusStack.prometheus-node-exporter.prometheus.podMonitor.enabled | bool | `false` | | +| kubePrometheusStack.prometheus-node-exporter.prometheus.podMonitor.jobLabel | string | `"jobLabel"` | | | kubePrometheusStack.prometheus-node-exporter.rbac.pspEnabled | bool | `false` | | | kubePrometheusStack.prometheus-node-exporter.releaseLabel | bool | `true` | | | kubePrometheusStack.prometheus-node-exporter.service.ipDualStack.enabled | bool | `false` | | @@ -721,6 +722,7 @@ A Helm chart for Kubernetes | kubePrometheusStack.prometheus-windows-exporter.prometheus.monitor.enabled | bool | `true` | | | kubePrometheusStack.prometheus-windows-exporter.prometheus.monitor.jobLabel | string | `"jobLabel"` | | | kubePrometheusStack.prometheus-windows-exporter.releaseLabel | bool | `true` | | +| kubePrometheusStack.prometheus.additionalLabels | object | `{}` | | | kubePrometheusStack.prometheus.additionalPodMonitors | list | `[]` | | | kubePrometheusStack.prometheus.additionalRulesForClusterRole | list | `[]` | | | kubePrometheusStack.prometheus.additionalServiceMonitors | list | `[]` | | @@ -732,6 +734,7 @@ A Helm chart for Kubernetes | kubePrometheusStack.prometheus.ingress.annotations | object | `{}` | | | kubePrometheusStack.prometheus.ingress.enabled | bool | `false` | | | kubePrometheusStack.prometheus.ingress.hosts | list | `[]` | | +| kubePrometheusStack.prometheus.ingress.ingressClassName | string | `""` | | | kubePrometheusStack.prometheus.ingress.labels | object | `{}` | | | kubePrometheusStack.prometheus.ingress.paths | list | `[]` | | | kubePrometheusStack.prometheus.ingress.tls | list | `[]` | | @@ -739,6 +742,7 @@ A Helm chart for Kubernetes | kubePrometheusStack.prometheus.ingressPerReplica.enabled | bool | `false` | | | kubePrometheusStack.prometheus.ingressPerReplica.hostDomain | string | `""` | | | kubePrometheusStack.prometheus.ingressPerReplica.hostPrefix | string | `""` | | +| kubePrometheusStack.prometheus.ingressPerReplica.ingressClassName | string | `""` | | | kubePrometheusStack.prometheus.ingressPerReplica.labels | object | `{}` | | | kubePrometheusStack.prometheus.ingressPerReplica.paths | list | `[]` | | | kubePrometheusStack.prometheus.ingressPerReplica.tlsSecretName | string | `""` | | @@ -746,13 +750,10 @@ A Helm chart for Kubernetes | kubePrometheusStack.prometheus.ingressPerReplica.tlsSecretPerReplica.prefix | string | `"prometheus"` | | | kubePrometheusStack.prometheus.networkPolicy.enabled | bool | `false` | | | kubePrometheusStack.prometheus.networkPolicy.flavor | string | `"kubernetes"` | | +| kubePrometheusStack.prometheus.networkPolicy.namespace | string | `nil` | | | kubePrometheusStack.prometheus.podDisruptionBudget.enabled | bool | `false` | | -| kubePrometheusStack.prometheus.podDisruptionBudget.maxUnavailable | string | `""` | | | kubePrometheusStack.prometheus.podDisruptionBudget.minAvailable | int | `1` | | | kubePrometheusStack.prometheus.podDisruptionBudget.unhealthyPodEvictionPolicy | string | `"AlwaysAllow"` | | -| kubePrometheusStack.prometheus.podSecurityPolicy.allowedCapabilities | list | `[]` | | -| kubePrometheusStack.prometheus.podSecurityPolicy.allowedHostPaths | list | `[]` | | -| kubePrometheusStack.prometheus.podSecurityPolicy.volumes | list | `[]` | | | kubePrometheusStack.prometheus.prometheusSpec.additionalAlertManagerConfigs | list | `[]` | | | kubePrometheusStack.prometheus.prometheusSpec.additionalAlertManagerConfigsSecret | object | `{}` | | | kubePrometheusStack.prometheus.prometheusSpec.additionalAlertRelabelConfigs | list | `[]` | | @@ -774,8 +775,11 @@ A Helm chart for Kubernetes | kubePrometheusStack.prometheus.prometheusSpec.configMaps | list | `[]` | | | kubePrometheusStack.prometheus.prometheusSpec.containers | list | `[]` | | | kubePrometheusStack.prometheus.prometheusSpec.disableCompaction | bool | `false` | | +| kubePrometheusStack.prometheus.prometheusSpec.dnsConfig | object | `{}` | | +| kubePrometheusStack.prometheus.prometheusSpec.dnsPolicy | string | `""` | | | kubePrometheusStack.prometheus.prometheusSpec.enableAdminAPI | bool | `false` | | | kubePrometheusStack.prometheus.prometheusSpec.enableFeatures | list | `[]` | | +| kubePrometheusStack.prometheus.prometheusSpec.enableOTLPReceiver | bool | `false` | | | kubePrometheusStack.prometheus.prometheusSpec.enableRemoteWriteReceiver | bool | `false` | | | kubePrometheusStack.prometheus.prometheusSpec.enforcedKeepDroppedTargets | int | `0` | | | kubePrometheusStack.prometheus.prometheusSpec.enforcedLabelLimit | bool | `false` | | @@ -792,10 +796,11 @@ A Helm chart for Kubernetes | kubePrometheusStack.prometheus.prometheusSpec.hostAliases | list | `[]` | | | kubePrometheusStack.prometheus.prometheusSpec.hostNetwork | bool | `false` | | | kubePrometheusStack.prometheus.prometheusSpec.ignoreNamespaceSelectors | bool | `false` | | +| kubePrometheusStack.prometheus.prometheusSpec.image.pullPolicy | string | `"IfNotPresent"` | | | kubePrometheusStack.prometheus.prometheusSpec.image.registry | string | `"quay.io"` | | | kubePrometheusStack.prometheus.prometheusSpec.image.repository | string | `"prometheus/prometheus"` | | | kubePrometheusStack.prometheus.prometheusSpec.image.sha | string | `""` | | -| kubePrometheusStack.prometheus.prometheusSpec.image.tag | string | `"v3.3.1"` | | +| kubePrometheusStack.prometheus.prometheusSpec.image.tag | string | `"v3.7.3"` | | | kubePrometheusStack.prometheus.prometheusSpec.initContainers | list | `[]` | | | kubePrometheusStack.prometheus.prometheusSpec.listenLocal | bool | `false` | | | kubePrometheusStack.prometheus.prometheusSpec.logFormat | string | `"logfmt"` | | @@ -804,6 +809,7 @@ A Helm chart for Kubernetes | kubePrometheusStack.prometheus.prometheusSpec.minReadySeconds | int | `0` | | | kubePrometheusStack.prometheus.prometheusSpec.nameValidationScheme | string | `""` | | | kubePrometheusStack.prometheus.prometheusSpec.nodeSelector | object | `{}` | | +| kubePrometheusStack.prometheus.prometheusSpec.otlp | object | `{}` | | | kubePrometheusStack.prometheus.prometheusSpec.overrideHonorLabels | bool | `false` | | | kubePrometheusStack.prometheus.prometheusSpec.overrideHonorTimestamps | bool | `false` | | | kubePrometheusStack.prometheus.prometheusSpec.paused | bool | `false` | | @@ -814,6 +820,7 @@ A Helm chart for Kubernetes | kubePrometheusStack.prometheus.prometheusSpec.podMonitorNamespaceSelector | object | `{}` | | | kubePrometheusStack.prometheus.prometheusSpec.podMonitorSelector | object | `{}` | | | kubePrometheusStack.prometheus.prometheusSpec.podMonitorSelectorNilUsesHelmValues | bool | `true` | | +| kubePrometheusStack.prometheus.prometheusSpec.podTargetLabels | list | `[]` | | | kubePrometheusStack.prometheus.prometheusSpec.portName | string | `"http-web"` | | | kubePrometheusStack.prometheus.prometheusSpec.priorityClassName | string | `""` | | | kubePrometheusStack.prometheus.prometheusSpec.probeNamespaceSelector | object | `{}` | | @@ -844,6 +851,7 @@ A Helm chart for Kubernetes | kubePrometheusStack.prometheus.prometheusSpec.scrapeConfigSelectorNilUsesHelmValues | bool | `true` | | | kubePrometheusStack.prometheus.prometheusSpec.scrapeFailureLogFile | string | `""` | | | kubePrometheusStack.prometheus.prometheusSpec.scrapeInterval | string | `""` | | +| kubePrometheusStack.prometheus.prometheusSpec.scrapeProtocols | list | `[]` | | | kubePrometheusStack.prometheus.prometheusSpec.scrapeTimeout | string | `""` | | | kubePrometheusStack.prometheus.prometheusSpec.secrets | list | `[]` | | | kubePrometheusStack.prometheus.prometheusSpec.securityContext.fsGroup | int | `2000` | | @@ -927,6 +935,7 @@ A Helm chart for Kubernetes | kubePrometheusStack.prometheus.thanosIngress.annotations | object | `{}` | | | kubePrometheusStack.prometheus.thanosIngress.enabled | bool | `false` | | | kubePrometheusStack.prometheus.thanosIngress.hosts | list | `[]` | | +| kubePrometheusStack.prometheus.thanosIngress.ingressClassName | string | `""` | | | kubePrometheusStack.prometheus.thanosIngress.labels | object | `{}` | | | kubePrometheusStack.prometheus.thanosIngress.nodePort | int | `30901` | | | kubePrometheusStack.prometheus.thanosIngress.paths | list | `[]` | | @@ -972,7 +981,6 @@ A Helm chart for Kubernetes | kubePrometheusStack.prometheus.thanosServiceMonitor.metricRelabelings | list | `[]` | | | kubePrometheusStack.prometheus.thanosServiceMonitor.relabelings | list | `[]` | | | kubePrometheusStack.prometheus.thanosServiceMonitor.scheme | string | `""` | | -| kubePrometheusStack.prometheus.thanosServiceMonitor.scrapeProtocols | list | `[]` | | | kubePrometheusStack.prometheus.thanosServiceMonitor.tlsConfig | object | `{}` | | | kubePrometheusStack.prometheusOperator.admissionWebhooks.annotations | object | `{}` | | | kubePrometheusStack.prometheusOperator.admissionWebhooks.caBundle | string | `""` | | @@ -1008,7 +1016,6 @@ A Helm chart for Kubernetes | kubePrometheusStack.prometheusOperator.admissionWebhooks.deployment.nodeSelector | object | `{}` | | | kubePrometheusStack.prometheusOperator.admissionWebhooks.deployment.podAnnotations | object | `{}` | | | kubePrometheusStack.prometheusOperator.admissionWebhooks.deployment.podDisruptionBudget.enabled | bool | `false` | | -| kubePrometheusStack.prometheusOperator.admissionWebhooks.deployment.podDisruptionBudget.maxUnavailable | string | `""` | | | kubePrometheusStack.prometheusOperator.admissionWebhooks.deployment.podDisruptionBudget.minAvailable | int | `1` | | | kubePrometheusStack.prometheusOperator.admissionWebhooks.deployment.podDisruptionBudget.unhealthyPodEvictionPolicy | string | `"AlwaysAllow"` | | | kubePrometheusStack.prometheusOperator.admissionWebhooks.deployment.podLabels | object | `{}` | | @@ -1052,6 +1059,7 @@ A Helm chart for Kubernetes | kubePrometheusStack.prometheusOperator.admissionWebhooks.deployment.tolerations | list | `[]` | | | kubePrometheusStack.prometheusOperator.admissionWebhooks.enabled | bool | `true` | | | kubePrometheusStack.prometheusOperator.admissionWebhooks.failurePolicy | string | `""` | | +| kubePrometheusStack.prometheusOperator.admissionWebhooks.matchConditions | object | `{}` | | | kubePrometheusStack.prometheusOperator.admissionWebhooks.mutatingWebhookConfiguration.annotations | object | `{}` | | | kubePrometheusStack.prometheusOperator.admissionWebhooks.namespaceSelector | object | `{}` | | | kubePrometheusStack.prometheusOperator.admissionWebhooks.objectSelector | object | `{}` | | @@ -1062,7 +1070,7 @@ A Helm chart for Kubernetes | kubePrometheusStack.prometheusOperator.admissionWebhooks.patch.image.registry | string | `"registry.k8s.io"` | | | kubePrometheusStack.prometheusOperator.admissionWebhooks.patch.image.repository | string | `"ingress-nginx/kube-webhook-certgen"` | | | kubePrometheusStack.prometheusOperator.admissionWebhooks.patch.image.sha | string | `""` | | -| kubePrometheusStack.prometheusOperator.admissionWebhooks.patch.image.tag | string | `"v1.5.3"` | | +| kubePrometheusStack.prometheusOperator.admissionWebhooks.patch.image.tag | string | `"v1.6.4"` | | | kubePrometheusStack.prometheusOperator.admissionWebhooks.patch.nodeSelector | object | `{}` | | | kubePrometheusStack.prometheusOperator.admissionWebhooks.patch.podAnnotations | object | `{}` | | | kubePrometheusStack.prometheusOperator.admissionWebhooks.patch.priorityClassName | string | `""` | | @@ -1124,7 +1132,6 @@ A Helm chart for Kubernetes | kubePrometheusStack.prometheusOperator.nodeSelector | object | `{}` | | | kubePrometheusStack.prometheusOperator.podAnnotations | object | `{}` | | | kubePrometheusStack.prometheusOperator.podDisruptionBudget.enabled | bool | `false` | | -| kubePrometheusStack.prometheusOperator.podDisruptionBudget.maxUnavailable | string | `""` | | | kubePrometheusStack.prometheusOperator.podDisruptionBudget.minAvailable | int | `1` | | | kubePrometheusStack.prometheusOperator.podDisruptionBudget.unhealthyPodEvictionPolicy | string | `"AlwaysAllow"` | | | kubePrometheusStack.prometheusOperator.podLabels | object | `{}` | | @@ -1185,7 +1192,7 @@ A Helm chart for Kubernetes | kubePrometheusStack.prometheusOperator.thanosImage.registry | string | `"quay.io"` | | | kubePrometheusStack.prometheusOperator.thanosImage.repository | string | `"thanos/thanos"` | | | kubePrometheusStack.prometheusOperator.thanosImage.sha | string | `""` | | -| kubePrometheusStack.prometheusOperator.thanosImage.tag | string | `"v0.38.0"` | | +| kubePrometheusStack.prometheusOperator.thanosImage.tag | string | `"v0.40.1"` | | | kubePrometheusStack.prometheusOperator.thanosRulerInstanceNamespaces | list | `[]` | | | kubePrometheusStack.prometheusOperator.thanosRulerInstanceSelector | string | `""` | | | kubePrometheusStack.prometheusOperator.tls.enabled | bool | `true` | | @@ -1204,11 +1211,11 @@ A Helm chart for Kubernetes | kubePrometheusStack.thanosRuler.ingress.annotations | object | `{}` | | | kubePrometheusStack.thanosRuler.ingress.enabled | bool | `false` | | | kubePrometheusStack.thanosRuler.ingress.hosts | list | `[]` | | +| kubePrometheusStack.thanosRuler.ingress.ingressClassName | string | `""` | | | kubePrometheusStack.thanosRuler.ingress.labels | object | `{}` | | | kubePrometheusStack.thanosRuler.ingress.paths | list | `[]` | | | kubePrometheusStack.thanosRuler.ingress.tls | list | `[]` | | | kubePrometheusStack.thanosRuler.podDisruptionBudget.enabled | bool | `false` | | -| kubePrometheusStack.thanosRuler.podDisruptionBudget.maxUnavailable | string | `""` | | | kubePrometheusStack.thanosRuler.podDisruptionBudget.minAvailable | int | `1` | | | kubePrometheusStack.thanosRuler.podDisruptionBudget.unhealthyPodEvictionPolicy | string | `"AlwaysAllow"` | | | kubePrometheusStack.thanosRuler.route | object | `{"main":{"additionalRules":[],"annotations":{},"apiVersion":"gateway.networking.k8s.io/v1","enabled":false,"filters":[],"hostnames":[],"httpsRedirect":false,"kind":"HTTPRoute","labels":{},"matches":[{"path":{"type":"PathPrefix","value":"/"}}],"parentRefs":[]}}` | BETA: Configure the gateway routes for the chart here. More routes can be added by adding a dictionary key like the 'main' route. Be aware that this is an early beta of this feature, kube-prometheus-stack does not guarantee this works and is subject to change. Being BETA this can/will change in the future without notice, do not use unless you want to take that risk [[ref]](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io%2fv1alpha2) | @@ -1265,7 +1272,7 @@ A Helm chart for Kubernetes | kubePrometheusStack.thanosRuler.thanosRulerSpec.image.registry | string | `"quay.io"` | | | kubePrometheusStack.thanosRuler.thanosRulerSpec.image.repository | string | `"thanos/thanos"` | | | kubePrometheusStack.thanosRuler.thanosRulerSpec.image.sha | string | `""` | | -| kubePrometheusStack.thanosRuler.thanosRulerSpec.image.tag | string | `"v0.38.0"` | | +| kubePrometheusStack.thanosRuler.thanosRulerSpec.image.tag | string | `"v0.40.1"` | | | kubePrometheusStack.thanosRuler.thanosRulerSpec.initContainers | list | `[]` | | | kubePrometheusStack.thanosRuler.thanosRulerSpec.labels | object | `{}` | | | kubePrometheusStack.thanosRuler.thanosRulerSpec.listenLocal | bool | `false` | | @@ -1329,7 +1336,7 @@ spec: source: repoURL: "https://edixos.github.io/ekp-helm" - targetRevision: "0.1.2" + targetRevision: "0.1.3" chart: kube-prometheus-stack path: '' helm: diff --git a/charts/kube-prometheus-stack/charts/kube-prometheus-stack-72.3.1.tgz b/charts/kube-prometheus-stack/charts/kube-prometheus-stack-72.3.1.tgz deleted file mode 100644 index ea520468..00000000 Binary files a/charts/kube-prometheus-stack/charts/kube-prometheus-stack-72.3.1.tgz and /dev/null differ diff --git a/charts/kube-prometheus-stack/charts/kube-prometheus-stack-79.8.2.tgz b/charts/kube-prometheus-stack/charts/kube-prometheus-stack-79.8.2.tgz new file mode 100644 index 00000000..8cdfba10 Binary files /dev/null and b/charts/kube-prometheus-stack/charts/kube-prometheus-stack-79.8.2.tgz differ diff --git a/charts/kube-prometheus-stack/values.yaml b/charts/kube-prometheus-stack/values.yaml index ac16c284..d736266b 100644 --- a/charts/kube-prometheus-stack/values.yaml +++ b/charts/kube-prometheus-stack/values.yaml @@ -36,7 +36,7 @@ kubePrometheusStack: enabled: true ## The CRD upgrade job mitigates the limitation of helm not being able to upgrade CRDs. ## The job will apply the CRDs to the cluster before the operator is deployed, using helm hooks. - ## It deploy a corresponding clusterrole, clusterrolebinding and serviceaccount to apply the CRDs. + ## It deploys a corresponding clusterrole, clusterrolebinding and serviceaccount to apply the CRDs. ## This feature is in preview, off by default and may change in the future. upgradeJob: enabled: false @@ -156,7 +156,7 @@ kubePrometheusStack: seccompProfile: type: RuntimeDefault - ## custom Rules to override "for" and "severity" in defaultRules + ## Custom rules to override "for" and "severity" in defaultRules ## customRules: {} # AlertmanagerFailedReload: @@ -205,6 +205,11 @@ kubePrometheusStack: prometheusOperator: true windows: true + # Defines the operator for namespace selection in rules + # Use "=~" to include namespaces matching the pattern (default) + # Use "!~" to exclude namespaces matching the pattern + appNamespacesOperator: "=~" + ## Reduce app namespace alert scope appNamespacesTarget: ".*" @@ -257,7 +262,7 @@ kubePrometheusStack: prometheus: {} prometheusOperator: {} - ## Additional annotations for specific PrometheusRule alerts groups + ## Additional annotations for specific PrometheusRule alert groups additionalRuleGroupAnnotations: alertmanager: {} etcd: {} @@ -334,18 +339,8 @@ kubePrometheusStack: ## Create ClusterRoles that extend the existing view, edit and admin ClusterRoles to interact with prometheus-operator CRDs ## Ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles createAggregateClusterRoles: false - pspEnabled: false - pspAnnotations: {} - ## Specify pod annotations - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl - ## - # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' - # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' - # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' - ## Global image registry to use if it needs to be overridden for some specific use cases (e.g local registries, custom images, ...) + ## Global image registry to use if it needs to be overridden for some specific use cases (e.g. local registries, custom images, ...) ## imageRegistry: "" @@ -401,7 +396,11 @@ kubePrometheusStack: ## annotations: {} - ## Api that prometheus will use to communicate with alertmanager. Possible values are v1, v2 + ## Additional labels for Alertmanager + ## + additionalLabels: {} + + ## API that Prometheus will use to communicate with alertmanager. Possible values are v1, v2 ## apiVersion: v2 @@ -501,7 +500,7 @@ kubePrometheusStack: podDisruptionBudget: enabled: false minAvailable: 1 - maxUnavailable: "" + # maxUnavailable: "" unhealthyPodEvictionPolicy: AlwaysAllow ## Alertmanager configuration directives @@ -550,7 +549,7 @@ kubePrometheusStack: - '/etc/alertmanager/config/*.tmpl' ## Alertmanager configuration directives (as string type, preferred over the config hash map) - ## stringConfig will be used only, if tplConfig is true + ## stringConfig will be used only if tplConfig is true ## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file ## https://prometheus.io/webtools/alerting/routing-tree-editor/ ## @@ -597,9 +596,7 @@ kubePrometheusStack: ingress: enabled: false - # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName - # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress - # ingressClassName: nginx + ingressClassName: "" annotations: {} @@ -685,9 +682,7 @@ kubePrometheusStack: ingressPerReplica: enabled: false - # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName - # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress - # ingressClassName: nginx + ingressClassName: "" annotations: {} labels: {} @@ -920,8 +915,9 @@ kubePrometheusStack: image: registry: quay.io repository: prometheus/alertmanager - tag: v0.28.1 + tag: v0.29.0 sha: "" + pullPolicy: IfNotPresent ## If true then the user will be responsible to provide a secret with alertmanager configuration ## So when true the config part will be ignored (including templateFiles) and the one in the secret will be used @@ -1001,6 +997,9 @@ kubePrometheusStack: # alertmanagerConfigMatcherStrategy: # type: OnNamespace + ## Additional command line arguments to pass to Alertmanager (in addition to those generated by the chart) + additionalArgs: [] + ## Define Log Format # Use logfmt (default) or json logging logFormat: logfmt @@ -1029,7 +1028,7 @@ kubePrometheusStack: # resources: # requests: # storage: 50Gi - # selector: {} + # selector: {} ## The external URL the Alertmanager instances will be available under. This is necessary to generate correct URLs. This is necessary if Alertmanager is not served from root of a DNS name. string false @@ -1122,6 +1121,14 @@ kubePrometheusStack: seccompProfile: type: RuntimeDefault + ## DNS configuration for Alertmanager. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.PodDNSConfig + dnsConfig: {} + + ## DNS policy for Alertmanager. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#dnspolicystring-alias + dnsPolicy: "" + ## ListenLocal makes the Alertmanager server listen on loopback, so that it does not bind against the Pod IP. ## Note this is only for the Alertmanager UI, not the gossip communication. ## @@ -1132,7 +1139,7 @@ kubePrometheusStack: containers: [] # containers: # - name: oauth-proxy - # image: quay.io/oauth2-proxy/oauth2-proxy:v7.9.0 + # image: quay.io/oauth2-proxy/oauth2-proxy:v7.13.0 # args: # - --upstream=http://127.0.0.1:9093 # - --http-address=0.0.0.0:8081 @@ -1174,15 +1181,15 @@ kubePrometheusStack: clusterAdvertiseAddress: false ## clusterGossipInterval determines interval between gossip attempts. - ## Needs to be specified as GoDuration, a time duration that can be parsed by Go’s time.ParseDuration() (e.g. 45ms, 30s, 1m, 1h20m15s) + ## Needs to be specified as GoDuration, a time duration that can be parsed by Go's time.ParseDuration() (e.g. 45ms, 30s, 1m, 1h20m15s) clusterGossipInterval: "" ## clusterPeerTimeout determines timeout for cluster peering. - ## Needs to be specified as GoDuration, a time duration that can be parsed by Go’s time.ParseDuration() (e.g. 45ms, 30s, 1m, 1h20m15s) + ## Needs to be specified as GoDuration, a time duration that can be parsed by Go's time.ParseDuration() (e.g. 45ms, 30s, 1m, 1h20m15s) clusterPeerTimeout: "" ## clusterPushpullInterval determines interval between pushpull attempts. - ## Needs to be specified as GoDuration, a time duration that can be parsed by Go’s time.ParseDuration() (e.g. 45ms, 30s, 1m, 1h20m15s) + ## Needs to be specified as GoDuration, a time duration that can be parsed by Go's time.ParseDuration() (e.g. 45ms, 30s, 1m, 1h20m15s) clusterPushpullInterval: "" ## clusterLabel defines the identifier that uniquely identifies the Alertmanager cluster. @@ -1292,8 +1299,16 @@ kubePrometheusStack: ## defaultDashboardsInterval: 1m + # Administrator credentials when not using an existing secret (see below) adminUser: admin - adminPassword: prom-operator + # adminPassword: strongpassword + + # Use an existing secret for the admin user. + admin: + ## Name of the secret. Can be templated. + existingSecret: "" + userKey: admin-user + passwordKey: admin-password rbac: ## If true, Grafana PSPs will be created @@ -1383,6 +1398,10 @@ kubePrometheusStack: name: Prometheus uid: prometheus + ## Extra jsonData properties to add to the datasource + # extraJsonData: + # prometheusType: Prometheus + ## URL of prometheus datasource ## # url: http://prometheus-stack-prometheus:9090/ @@ -1390,6 +1409,10 @@ kubePrometheusStack: ## Prometheus request timeout in seconds # timeout: 30 + ## Query parameters to add, as a URL-encoded string, + ## to query Prometheus + # customQueryParameters: "" + # If not defined, will use prometheus.prometheusSpec.scrapeInterval or its default # defaultDatasourceScrapeInterval: 15s @@ -1403,7 +1426,7 @@ kubePrometheusStack: ## Create datasource for each Pod of Prometheus StatefulSet; ## this uses by default the headless service `prometheus-operated` which is ## created by Prometheus Operator. In case you deployed your own Service for your - ## Prometheus instance, you can specifiy it with the field `prometheusServiceName` + ## Prometheus instance, you can specify it with the field `prometheusServiceName` ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/0fee93e12dc7c2ea1218f19ae25ec6b893460590/pkg/prometheus/statefulset.go#L255-L286 createPrometheusReplicasDatasources: false prometheusServiceName: prometheus-operated @@ -1585,6 +1608,10 @@ kubePrometheusStack: enabled: true namespace: kube-system + # Overrides the job selector in Grafana dashboards and Prometheus rules + # For k3s clusters, change to k3s-server + jobNameOverride: "" + serviceMonitor: enabled: true ## Enable scraping /metrics from kubelet's service @@ -1825,6 +1852,10 @@ kubePrometheusStack: kubeControllerManager: enabled: true + # Overrides the job selector in Grafana dashboards and Prometheus rules + # For k3s clusters, change to k3s-server + jobNameOverride: "" + ## If your kube controller manager is not deployed as a pod, specify IPs it can be found on ## endpoints: [] @@ -2221,6 +2252,10 @@ kubePrometheusStack: kubeScheduler: enabled: true + # Overrides the job selector in Grafana dashboards and Prometheus rules + # For k3s clusters, change to k3s-server + jobNameOverride: "" + ## If your kube scheduler is not deployed as a pod, specify IPs it can be found on ## endpoints: [] @@ -2327,6 +2362,10 @@ kubePrometheusStack: kubeProxy: enabled: true + # Overrides the job selector in Grafana dashboards and Prometheus rules + # For k3s clusters, change to k3s-server + jobNameOverride: "" + ## If your kube proxy is not deployed as a pod, specify IPs it can be found on ## endpoints: [] @@ -2422,9 +2461,7 @@ kubePrometheusStack: ## Configuration for kube-state-metrics subchart ## kube-state-metrics: - namespaceOverride: "" - rbac: - create: true + ## set to true to add the release label so scraping of the servicemonitor with kube-prometheus-stack works out of the box releaseLabel: true ## Enable scraping via kubernetes-service-endpoints @@ -2434,69 +2471,19 @@ kubePrometheusStack: prometheus: monitor: - ## Enable scraping via service monitor ## Disable to prevent duplication if you enable prometheusScrape above - ## enabled: true - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. - ## - sampleLimit: 0 - - ## TargetLimit defines a limit on the number of scraped targets that will be accepted. - ## - targetLimit: 0 + ## kube-state-metrics endpoint + http: + ## Keep labels from scraped data, overriding server-side labels + honorLabels: true - ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelLimit: 0 - - ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelNameLengthLimit: 0 - - ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. - ## - labelValueLengthLimit: 0 - - ## Scrape Timeout. If not set, the Prometheus default scrape timeout is used. - ## - scrapeTimeout: "" - - ## proxyUrl: URL of a proxy that should be used for scraping. - ## - proxyUrl: "" - - # Keep labels from scraped data, overriding server-side labels - ## - honorLabels: true - - ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - ## RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # targetLabel: nodename - # replacement: $1 - # action: replace - - selfMonitor: - enabled: false + ## selfMonitor endpoint + metrics: + ## Keep labels from scraped data, overriding server-side labels + honorLabels: true ## Deploy node exporter as a daemonset to all nodes ## @@ -2519,13 +2506,13 @@ kubePrometheusStack: prometheus-node-exporter: namespaceOverride: "" podLabels: - ## Add the 'node-exporter' label to be used by serviceMonitor to match standard common usage in rules and grafana dashboards + ## Add the 'node-exporter' label to be used by serviceMonitor and podMonitor to match standard common usage in rules and grafana dashboards ## jobLabel: node-exporter releaseLabel: true extraArgs: - - --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/.+)($|/) - - --collector.filesystem.fs-types-exclude=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ + - --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|run/containerd/.+|var/lib/docker/.+|var/lib/kubelet/.+)($|/) + - --collector.filesystem.fs-types-exclude=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs|erofs)$ service: portName: http-metrics ipDualStack: @@ -2599,6 +2586,11 @@ kubePrometheusStack: # attachMetadata: # node: false + podMonitor: + enabled: false + + jobLabel: jobLabel + rbac: ## If true, create PSPs for node-exporter ## @@ -2670,6 +2662,7 @@ kubePrometheusStack: namespaceSelector: {} objectSelector: {} + matchConditions: {} mutatingWebhookConfiguration: annotations: {} @@ -2694,7 +2687,7 @@ kubePrometheusStack: podDisruptionBudget: enabled: false minAvailable: 1 - maxUnavailable: "" + # maxUnavailable: "" unhealthyPodEvictionPolicy: AlwaysAllow ## Number of old replicasets to retain ## @@ -2905,7 +2898,7 @@ kubePrometheusStack: image: registry: registry.k8s.io repository: ingress-nginx/kube-webhook-certgen - tag: v1.5.3 # latest tag: https://github.com/kubernetes/ingress-nginx/blob/main/images/kube-webhook-certgen/TAG + tag: v1.6.4 # latest tag: https://github.com/kubernetes/ingress-nginx/blob/main/images/kube-webhook-certgen/TAG sha: "" pullPolicy: IfNotPresent resources: {} @@ -3096,7 +3089,7 @@ kubePrometheusStack: podDisruptionBudget: enabled: false minAvailable: 1 - maxUnavailable: "" + # maxUnavailable: "" unhealthyPodEvictionPolicy: AlwaysAllow ## Assign a PriorityClassName to pods if set @@ -3343,7 +3336,7 @@ kubePrometheusStack: thanosImage: registry: quay.io repository: thanos/thanos - tag: v0.38.0 + tag: v0.40.1 sha: "" ## Set a Label Selector to filter watched prometheus and prometheusAgent @@ -3388,6 +3381,10 @@ kubePrometheusStack: ## annotations: {} + ## Additional labels for Prometheus + ## + additionalLabels: {} + ## Configure network policy for the prometheus networkPolicy: enabled: false @@ -3398,6 +3395,8 @@ kubePrometheusStack: # * cilium for cilium.io/v2/CiliumNetworkPolicy flavor: kubernetes + namespace: + # cilium: # endpointSelector: # egress: @@ -3489,9 +3488,6 @@ kubePrometheusStack: ## relabel configs to apply to samples before ingestion. relabelings: [] - ## Set default scrapeProtocols for Prometheus instances - ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#scrapeprotocolstring-alias - scrapeProtocols: [] # Service for external access to sidecar # Enabling this creates a service to expose thanos-sidecar outside the cluster. thanosServiceExternal: @@ -3640,16 +3636,14 @@ kubePrometheusStack: podDisruptionBudget: enabled: false minAvailable: 1 - maxUnavailable: "" + # maxUnavailable: "" unhealthyPodEvictionPolicy: AlwaysAllow # Ingress exposes thanos sidecar outside the cluster thanosIngress: enabled: false - # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName - # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress - # ingressClassName: nginx + ingressClassName: "" annotations: {} labels: {} @@ -3697,9 +3691,7 @@ kubePrometheusStack: ingress: enabled: false - # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName - # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress - # ingressClassName: nginx + ingressClassName: "" annotations: {} labels: {} @@ -3778,9 +3770,7 @@ kubePrometheusStack: ingressPerReplica: enabled: false - # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName - # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress - # ingressClassName: nginx + ingressClassName: "" annotations: {} labels: {} @@ -3816,13 +3806,6 @@ kubePrometheusStack: ## prefix: "prometheus" - ## Configure additional options for default pod security policy for Prometheus - ## ref: https://kubernetes.io/docs/concepts/security/pod-security-policy/ - podSecurityPolicy: - allowedCapabilities: [] - allowedHostPaths: [] - volumes: [] - serviceMonitor: ## If true, create a serviceMonitor for prometheus ## @@ -3907,9 +3890,9 @@ kubePrometheusStack: disableCompaction: false ## AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in the pod, - ## If the field isn’t set, the operator mounts the service account token by default. + ## If the field isn't set, the operator mounts the service account token by default. ## Warning: be aware that by default, Prometheus requires the service account token for Kubernetes service discovery, - ## It is possible to use strategic merge patch to project the service account token into the ‘prometheus’ container. + ## It is possible to use strategic merge patch to project the service account token into the 'prometheus' container. automountServiceAccountToken: true ## APIServerConfig @@ -3948,6 +3931,11 @@ kubePrometheusStack: # caFile: /etc/prometheus/secrets/istio.default/root-cert.pem # certFile: /etc/prometheus/secrets/istio.default/cert-chain.pem + ## PodTargetLabels are appended to the `spec.podTargetLabels` field of all PodMonitor and ServiceMonitor objects. + ## + podTargetLabels: [] + # - customlabel + ## Interval between consecutive evaluations. ## evaluationInterval: "" @@ -3956,6 +3944,9 @@ kubePrometheusStack: ## listenLocal: false + ## enableOTLPReceiver enables the OTLP receiver for Prometheus. + enableOTLPReceiver: false + ## EnableAdminAPI enables Prometheus the administrative HTTP API which includes functionality such as deleting time series. ## This is disabled by default. ## ref: https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis @@ -3983,6 +3974,14 @@ kubePrometheusStack: enableFeatures: [] # - exemplar-storage + ## https://prometheus.io/docs/guides/opentelemetry + ## + otlp: {} + # promoteResourceAttributes: [] + # keepIdentifyingResourceAttributes: false + # translationStrategy: NoUTF8EscapingWithSuffixes + # convertHistogramsToNHCB: false + ## serviceName: @@ -3991,8 +3990,9 @@ kubePrometheusStack: image: registry: quay.io repository: prometheus/prometheus - tag: v3.3.1 + tag: v3.7.3 sha: "" + pullPolicy: IfNotPresent ## Tolerations for use with node taints ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ @@ -4209,7 +4209,7 @@ kubePrometheusStack: retention: 10d ## Maximum size of metrics - ## + ## Unit format should be in the form of "50GiB" retentionSize: "" ## Allow out-of-order/out-of-bounds samples ingested into Prometheus for a specified duration @@ -4321,7 +4321,7 @@ kubePrometheusStack: # resources: # requests: # storage: 50Gi - # selector: {} + # selector: {} ## Using tmpfs volume ## @@ -4360,7 +4360,7 @@ kubePrometheusStack: # regex: __meta_kubernetes_node_label_(.+) # - source_labels: [__address__] # action: replace - # targetLabel: __address__ + # target_label: __address__ # regex: ([^:;]+):(\d+) # replacement: ${1}:2379 # - source_labels: [__meta_kubernetes_node_name] @@ -4368,7 +4368,7 @@ kubePrometheusStack: # regex: .*mst.* # - source_labels: [__meta_kubernetes_node_name] # action: replace - # targetLabel: node + # target_label: node # regex: (.*) # replacement: ${1} # metric_relabel_configs: @@ -4459,6 +4459,14 @@ kubePrometheusStack: seccompProfile: type: RuntimeDefault + ## DNS configuration for Prometheus. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.PodDNSConfig + dnsConfig: {} + + ## DNS policy for Prometheus. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#dnspolicystring-alias + dnsPolicy: "" + ## Priority class assigned to the Pods ## priorityClassName: "" @@ -4498,7 +4506,7 @@ kubePrometheusStack: containers: [] # containers: # - name: oauth-proxy - # image: quay.io/oauth2-proxy/oauth2-proxy:v7.9.0 + # image: quay.io/oauth2-proxy/oauth2-proxy:v7.13.0 # args: # - --upstream=http://127.0.0.1:9090 # - --http-address=0.0.0.0:8081 @@ -4615,7 +4623,7 @@ kubePrometheusStack: hostNetwork: false # HostAlias holds the mapping between IP and hostnames that will be injected - # as an entry in the pod’s hosts file. + # as an entry in the pod's hosts file. hostAliases: [] # - ip: 10.10.0.100 # hostnames: @@ -4627,7 +4635,7 @@ kubePrometheusStack: tracingConfig: {} ## Defines the service discovery role used to discover targets from ServiceMonitor objects and Alertmanager endpoints. - ## If set, the value should be either “Endpoints” or “EndpointSlice”. If unset, the operator assumes the “Endpoints” role. + ## If set, the value should be either "Endpoints" or "EndpointSlice". If unset, the operator assumes the "Endpoints" role. serviceDiscoveryRole: "" ## Additional configuration which is not covered by the properties above. (passed through tpl) @@ -4645,6 +4653,10 @@ kubePrometheusStack: ## minutes). maximumStartupDurationSeconds: 0 + ## Set default scrapeProtocols for Prometheus instances + ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#scrapeprotocolstring-alias + scrapeProtocols: [] + additionalRulesForClusterRole: [] # - apiGroups: [ "" ] # resources: @@ -4776,6 +4788,12 @@ kubePrometheusStack: ## # fallbackScrapeProtocol: "" + ## Attaches node metadata to the discovered targets + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.AttachMetadata + ## + # attachMetadata: + # node: true + additionalPodMonitors: [] ## Name of the PodMonitor to create ## @@ -4836,6 +4854,12 @@ kubePrometheusStack: ## # fallbackScrapeProtocol: "" + ## Attaches node metadata to the discovered targets + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.AttachMetadata + ## + # attachMetadata: + # node: true + ## Configuration for thanosRuler ## ref: https://thanos.io/tip/components/rule.md/ ## @@ -4863,15 +4887,13 @@ kubePrometheusStack: podDisruptionBudget: enabled: false minAvailable: 1 - maxUnavailable: "" + # maxUnavailable: "" unhealthyPodEvictionPolicy: AlwaysAllow ingress: enabled: false - # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName - # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress - # ingressClassName: nginx + ingressClassName: "" annotations: {} @@ -5071,7 +5093,7 @@ kubePrometheusStack: image: registry: quay.io repository: thanos/thanos - tag: v0.38.0 + tag: v0.40.1 sha: "" ## Namespaces to be selected for PrometheusRules discovery. @@ -5137,7 +5159,7 @@ kubePrometheusStack: # resources: # requests: # storage: 50Gi - # selector: {} + # selector: {} ## AlertmanagerConfig define configuration for connecting to alertmanager. ## Only available with Thanos v0.10.0 and higher. Maps to the alertmanagers.config Thanos Ruler arg. diff --git a/charts/kyverno-policies/Chart.lock b/charts/kyverno-policies/Chart.lock index 4ce80734..b17c16cd 100644 --- a/charts/kyverno-policies/Chart.lock +++ b/charts/kyverno-policies/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: kyverno-policies repository: https://kyverno.github.io/kyverno/ - version: 3.4.1 -digest: sha256:b89431a68f4f8f139e462342b965ceac69e2e75b17a53008e94b61ecfd3f79c1 -generated: "2025-05-07T10:22:57.488368538Z" + version: 3.6.0 +digest: sha256:3d2d171eb6179825f9414e937c92faaa237d57554e6f055081b293fe2a6d5344 +generated: "2025-11-26T10:26:46.534075853Z" diff --git a/charts/kyverno-policies/Chart.yaml b/charts/kyverno-policies/Chart.yaml index 8628e595..b55628e5 100644 --- a/charts/kyverno-policies/Chart.yaml +++ b/charts/kyverno-policies/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.1 +version: 0.1.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to @@ -24,7 +24,7 @@ version: 0.1.1 appVersion: "1.13.4" dependencies: - name: kyverno-policies - version: 3.4.1 + version: 3.6.0 repository: "https://kyverno.github.io/kyverno/" alias: kyvernopolicies maintainers: diff --git a/charts/kyverno-policies/README.md b/charts/kyverno-policies/README.md index 24023ee5..bbfbd750 100644 --- a/charts/kyverno-policies/README.md +++ b/charts/kyverno-policies/README.md @@ -1,6 +1,6 @@ # kyverno-policies -![Version: 0.1.1](https://img.shields.io/badge/Version-0.1.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.13.4](https://img.shields.io/badge/AppVersion-1.13.4-informational?style=flat-square) +![Version: 0.1.2](https://img.shields.io/badge/Version-0.1.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.13.4](https://img.shields.io/badge/AppVersion-1.13.4-informational?style=flat-square) ## Prerequisites @@ -11,7 +11,7 @@ | Repository | Name | Version | |------------|------|---------| -| https://kyverno.github.io/kyverno/ | kyvernopolicies(kyverno-policies) | 3.4.1 | +| https://kyverno.github.io/kyverno/ | kyvernopolicies(kyverno-policies) | 3.6.0 | ## Maintainers @@ -28,11 +28,12 @@ A Helm chart for Kubernetes | Key | Type | Default | Description | |-----|------|---------|-------------| -| kyvernopolicies.autogenControllers | string | `""` | Customize the target Pod controllers for the auto-generated rules. (Eg. `none`, `Deployment`, `DaemonSet,Deployment,StatefulSet`) For more info https://kyverno.io/docs/writing-policies/autogen/. | +| kyvernopolicies.autogenControllers | string | `""` | Customize the target Pod controllers for the auto-generated rules. (Eg. `none`, `Deployment`, `DaemonSet,Deployment,StatefulSet`) For more info https://kyverno.io/docs/policy-types/cluster-policy/autogen/. | | kyvernopolicies.background | bool | `true` | Policies background mode | +| kyvernopolicies.customAnnotations | object | `{}` | Additional Annotations. | | kyvernopolicies.customLabels | object | `{}` | Additional labels. | | kyvernopolicies.customPolicies | list | `[]` | Additional custom policies to include. | -| kyvernopolicies.failurePolicy | string | `"Fail"` | API server behavior if the webhook fails to respond ('Ignore', 'Fail') For more info: https://kyverno.io/docs/writing-policies/policy-settings/ | +| kyvernopolicies.failurePolicy | string | `"Fail"` | API server behavior if the webhook fails to respond ('Ignore', 'Fail') For more info: https://kyverno.io/docs/policy-types/cluster-policy/policy-settings/ | | kyvernopolicies.includeOtherPolicies | list | `[]` | Additional policies to include from `other`. | | kyvernopolicies.includeRestrictedPolicies | list | `[]` | Additional policies to include from `restricted`. | | kyvernopolicies.kubeVersionOverride | string | `nil` | Kubernetes version override Override default value of kubeVersion set by release team taken from Chart.yaml with custom value. Ideally range of versions no more than two prior (ex., 1.28-1.31), must be enclosed in quotes. | @@ -45,8 +46,8 @@ A Helm chart for Kubernetes | kyvernopolicies.policyKind | string | `"ClusterPolicy"` | Policy kind (`ClusterPolicy`, `Policy`) Set to `Policy` if you need namespaced policies and not cluster policies | | kyvernopolicies.policyPreconditions | object | `{}` | Add preconditions to individual policies. Policies with multiple rules can have individual rules excluded by using the name of the rule as the key in the `policyPreconditions` map. | | kyvernopolicies.skipBackgroundRequests | bool | `nil` | SkipBackgroundRequests bypasses admission requests that are sent by the background controller | -| kyvernopolicies.validationAllowExistingViolations | bool | `true` | Validate already existing resources. For more info https://kyverno.io/docs/writing-policies/validate. | -| kyvernopolicies.validationFailureAction | string | `"Audit"` | Validation failure action (`Audit`, `Enforce`). For more info https://kyverno.io/docs/writing-policies/validate. | +| kyvernopolicies.validationAllowExistingViolations | bool | `true` | Validate already existing resources. For more info https://kyverno.io/docs/policy-types/. | +| kyvernopolicies.validationFailureAction | string | `"Audit"` | Validation failure action (`Audit`, `Enforce`). For more info https://kyverno.io/docs/policy-types/cluster-policy/validate. | | kyvernopolicies.validationFailureActionByPolicy | object | `{}` | Define validationFailureActionByPolicy for specific policies. Override the defined `validationFailureAction` with a individual validationFailureAction for individual Policies. | | kyvernopolicies.validationFailureActionOverrides | object | `{"all":[]}` | Define validationFailureActionOverrides for specific policies. The overrides for `all` will apply to all policies. | @@ -75,7 +76,7 @@ spec: source: repoURL: "https://edixos.github.io/ekp-helm" - targetRevision: "0.1.1" + targetRevision: "0.1.2" chart: kyverno-policies path: '' helm: diff --git a/charts/kyverno-policies/charts/kyverno-policies-3.4.1.tgz b/charts/kyverno-policies/charts/kyverno-policies-3.4.1.tgz deleted file mode 100644 index f9a948ad..00000000 Binary files a/charts/kyverno-policies/charts/kyverno-policies-3.4.1.tgz and /dev/null differ diff --git a/charts/kyverno-policies/charts/kyverno-policies-3.6.0.tgz b/charts/kyverno-policies/charts/kyverno-policies-3.6.0.tgz new file mode 100644 index 00000000..011b9fc6 Binary files /dev/null and b/charts/kyverno-policies/charts/kyverno-policies-3.6.0.tgz differ diff --git a/charts/kyverno-policies/values.yaml b/charts/kyverno-policies/values.yaml index 507647bd..47276e32 100644 --- a/charts/kyverno-policies/values.yaml +++ b/charts/kyverno-policies/values.yaml @@ -30,11 +30,11 @@ kyvernopolicies: # spec: # spec # -- API server behavior if the webhook fails to respond ('Ignore', 'Fail') - # For more info: https://kyverno.io/docs/writing-policies/policy-settings/ + # For more info: https://kyverno.io/docs/policy-types/cluster-policy/policy-settings/ failurePolicy: Fail # -- Validation failure action (`Audit`, `Enforce`). - # For more info https://kyverno.io/docs/writing-policies/validate. + # For more info https://kyverno.io/docs/policy-types/cluster-policy/validate. validationFailureAction: Audit # -- Define validationFailureActionByPolicy for specific policies. @@ -58,7 +58,7 @@ kyvernopolicies: # - fluent # -- Validate already existing resources. - # For more info https://kyverno.io/docs/writing-policies/validate. + # For more info https://kyverno.io/docs/policy-types/. validationAllowExistingViolations: true # -- Exclude resources from individual policies. @@ -102,12 +102,15 @@ kyvernopolicies: # value: "dcgm-exporter*" # -- Customize the target Pod controllers for the auto-generated rules. (Eg. `none`, `Deployment`, `DaemonSet,Deployment,StatefulSet`) - # For more info https://kyverno.io/docs/writing-policies/autogen/. + # For more info https://kyverno.io/docs/policy-types/cluster-policy/autogen/. autogenControllers: "" # -- Name override. nameOverride: + # -- Additional Annotations. + customAnnotations: {} + # -- Additional labels. customLabels: {} diff --git a/charts/kyverno/Chart.lock b/charts/kyverno/Chart.lock index 405959e7..917a36b1 100644 --- a/charts/kyverno/Chart.lock +++ b/charts/kyverno/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: kyverno repository: https://kyverno.github.io/kyverno/ - version: 3.4.1 -digest: sha256:91a0bea17ffa77211290f7a569dc9e5f9383814f736c25caea2a07a2b500c2ff -generated: "2025-05-07T10:25:24.475931183Z" + version: 3.6.0 +digest: sha256:b55e0649bfb39ce8f327dd0d85d7e242d926b2f8d67e9a6378b9a01b50735e62 +generated: "2025-11-26T10:25:24.754386225Z" diff --git a/charts/kyverno/Chart.yaml b/charts/kyverno/Chart.yaml index dbee9a78..5881c082 100644 --- a/charts/kyverno/Chart.yaml +++ b/charts/kyverno/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.2 +version: 0.1.3 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to @@ -24,7 +24,7 @@ version: 0.1.2 appVersion: "1.13.4" dependencies: - name: kyverno - version: 3.4.1 + version: 3.6.0 repository: "https://kyverno.github.io/kyverno/" maintainers: - name: wiemaouadi diff --git a/charts/kyverno/README.md b/charts/kyverno/README.md index 9c95965b..1a34c818 100644 --- a/charts/kyverno/README.md +++ b/charts/kyverno/README.md @@ -1,6 +1,6 @@ # kyverno -![Version: 0.1.2](https://img.shields.io/badge/Version-0.1.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.13.4](https://img.shields.io/badge/AppVersion-1.13.4-informational?style=flat-square) +![Version: 0.1.3](https://img.shields.io/badge/Version-0.1.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.13.4](https://img.shields.io/badge/AppVersion-1.13.4-informational?style=flat-square) ## Prerequisites @@ -11,7 +11,7 @@ | Repository | Name | Version | |------------|------|---------| -| https://kyverno.github.io/kyverno/ | kyverno | 3.4.1 | +| https://kyverno.github.io/kyverno/ | kyverno | 3.6.0 | ## Maintainers @@ -48,6 +48,7 @@ A Helm chart for kyverno | kyverno.admissionController.container.resources.limits | object | `{"memory":"384Mi"}` | Pod resource limits | | kyverno.admissionController.container.resources.requests | object | `{"cpu":"100m","memory":"128Mi"}` | Pod resource requests | | kyverno.admissionController.container.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsNonRoot":true,"seccompProfile":{"type":"RuntimeDefault"}}` | Container security context | +| kyverno.admissionController.crdWatcher | bool | `false` | Enable/Disable custom resource watcher to invalidate cache | | kyverno.admissionController.createSelfSignedCert | bool | `false` | Create self-signed certificates at deployment time. The certificates won't be automatically renewed if this is set to `true`. | | kyverno.admissionController.dnsConfig | object | `{}` | `dnsConfig` allows to specify DNS configuration for the pod. For further reference: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config. | | kyverno.admissionController.dnsPolicy | string | `"ClusterFirst"` | `dnsPolicy` determines the manner in which DNS resolution happens in the cluster. In case of `hostNetwork: true`, usually, the `dnsPolicy` is suitable to be `ClusterFirstWithHostNet`. For further reference: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy. | @@ -77,6 +78,7 @@ A Helm chart for kyverno | kyverno.admissionController.metricsService.create | bool | `true` | Create service. | | kyverno.admissionController.metricsService.nodePort | string | `nil` | Service node port. Only used if `type` is `NodePort`. | | kyverno.admissionController.metricsService.port | int | `8000` | Service port. Kyverno's metrics server will be exposed at this port. | +| kyverno.admissionController.metricsService.trafficDistribution | string | `nil` | Service traffic distribution policy. Set to `PreferClose` to route traffic to nearby endpoints, reducing latency and cross-zone costs. | | kyverno.admissionController.metricsService.type | string | `"ClusterIP"` | Service type. | | kyverno.admissionController.networkPolicy.enabled | bool | `false` | When true, use a NetworkPolicy to allow ingress to the webhook This is useful on clusters using Calico and/or native k8s network policies in a default-deny setup. | | kyverno.admissionController.networkPolicy.ingressFrom | list | `[]` | A list of valid from selectors according to https://kubernetes.io/docs/concepts/services-networking/network-policies. | @@ -88,6 +90,7 @@ A Helm chart for kyverno | kyverno.admissionController.podDisruptionBudget.enabled | bool | `false` | Enable PodDisruptionBudget. Will always be enabled if replicas > 1. This non-declarative behavior should ideally be avoided, but changing it now would be breaking. | | kyverno.admissionController.podDisruptionBudget.maxUnavailable | string | `nil` | Configures the maximum unavailable pods for disruptions. Cannot be used if `minAvailable` is set. | | kyverno.admissionController.podDisruptionBudget.minAvailable | int | `1` | Configures the minimum available pods for disruptions. Cannot be used if `maxUnavailable` is set. | +| kyverno.admissionController.podDisruptionBudget.unhealthyPodEvictionPolicy | string | `nil` | Unhealthy pod eviction policy to be used. Possible values are `IfHealthyBudget` or `AlwaysAllow`. | | kyverno.admissionController.podLabels | object | `{}` | Additional labels to add to each pod | | kyverno.admissionController.podSecurityContext | object | `{}` | Security context for the pod | | kyverno.admissionController.priorityClassName | string | `""` | Optional priority class | @@ -101,6 +104,7 @@ A Helm chart for kyverno | kyverno.admissionController.rbac.create | bool | `true` | Create RBAC resources | | kyverno.admissionController.rbac.createViewRoleBinding | bool | `true` | Create rolebinding to view role | | kyverno.admissionController.rbac.serviceAccount.annotations | object | `{}` | Annotations for the ServiceAccount | +| kyverno.admissionController.rbac.serviceAccount.automountServiceAccountToken | bool | `true` | Toggle automounting of the ServiceAccount | | kyverno.admissionController.rbac.serviceAccount.name | string | `nil` | The ServiceAccount name | | kyverno.admissionController.rbac.viewRoleName | string | `"view"` | The view role to use in the rolebinding | | kyverno.admissionController.readinessProbe | object | See [values.yaml](values.yaml) | Readiness Probe. The block is directly forwarded into the deployment, so you can use whatever readinessProbe configuration you want. ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ | @@ -110,7 +114,9 @@ A Helm chart for kyverno | kyverno.admissionController.service.annotations | object | `{}` | Service annotations. | | kyverno.admissionController.service.nodePort | string | `nil` | Service node port. Only used if `type` is `NodePort`. | | kyverno.admissionController.service.port | int | `443` | Service port. | +| kyverno.admissionController.service.trafficDistribution | string | `nil` | Service traffic distribution policy. Set to `PreferClose` to route traffic to nearby endpoints, reducing latency and cross-zone costs. | | kyverno.admissionController.service.type | string | `"ClusterIP"` | Service type. | +| kyverno.admissionController.serviceMonitor.additionalAnnotations | object | `{}` | Additional annotations | | kyverno.admissionController.serviceMonitor.additionalLabels | object | `{}` | Additional labels | | kyverno.admissionController.serviceMonitor.enabled | bool | `false` | Create a `ServiceMonitor` to collect Prometheus metrics. | | kyverno.admissionController.serviceMonitor.interval | string | `"30s"` | Interval to scrape metrics | @@ -158,6 +164,7 @@ A Helm chart for kyverno | kyverno.backgroundController.metricsService.create | bool | `true` | Create service. | | kyverno.backgroundController.metricsService.nodePort | string | `nil` | Service node port. Only used if `metricsService.type` is `NodePort`. | | kyverno.backgroundController.metricsService.port | int | `8000` | Service port. Metrics server will be exposed at this port. | +| kyverno.backgroundController.metricsService.trafficDistribution | string | `nil` | Service traffic distribution policy. Set to `PreferClose` to route traffic to nearby endpoints, reducing latency and cross-zone costs. | | kyverno.backgroundController.metricsService.type | string | `"ClusterIP"` | Service type. | | kyverno.backgroundController.networkPolicy.enabled | bool | `false` | When true, use a NetworkPolicy to allow ingress to the webhook This is useful on clusters using Calico and/or native k8s network policies in a default-deny setup. | | kyverno.backgroundController.networkPolicy.ingressFrom | list | `[]` | A list of valid from selectors according to https://kubernetes.io/docs/concepts/services-networking/network-policies. | @@ -169,6 +176,7 @@ A Helm chart for kyverno | kyverno.backgroundController.podDisruptionBudget.enabled | bool | `false` | Enable PodDisruptionBudget. Will always be enabled if replicas > 1. This non-declarative behavior should ideally be avoided, but changing it now would be breaking. | | kyverno.backgroundController.podDisruptionBudget.maxUnavailable | string | `nil` | Configures the maximum unavailable pods for disruptions. Cannot be used if `minAvailable` is set. | | kyverno.backgroundController.podDisruptionBudget.minAvailable | int | `1` | Configures the minimum available pods for disruptions. Cannot be used if `maxUnavailable` is set. | +| kyverno.backgroundController.podDisruptionBudget.unhealthyPodEvictionPolicy | string | `nil` | Unhealthy pod eviction policy to be used. Possible values are `IfHealthyBudget` or `AlwaysAllow`. | | kyverno.backgroundController.podLabels | object | `{}` | Additional labels to add to each pod | | kyverno.backgroundController.podSecurityContext | object | `{}` | Security context for the pod | | kyverno.backgroundController.priorityClassName | string | `""` | Optional priority class | @@ -181,6 +189,7 @@ A Helm chart for kyverno | kyverno.backgroundController.rbac.create | bool | `true` | Create RBAC resources | | kyverno.backgroundController.rbac.createViewRoleBinding | bool | `true` | Create rolebinding to view role | | kyverno.backgroundController.rbac.serviceAccount.annotations | object | `{}` | Annotations for the ServiceAccount | +| kyverno.backgroundController.rbac.serviceAccount.automountServiceAccountToken | bool | `true` | Toggle automounting of the ServiceAccount | | kyverno.backgroundController.rbac.serviceAccount.name | string | `nil` | Service account name | | kyverno.backgroundController.rbac.viewRoleName | string | `"view"` | The view role to use in the rolebinding | | kyverno.backgroundController.replicas | int | `nil` | Desired number of pods | @@ -190,6 +199,7 @@ A Helm chart for kyverno | kyverno.backgroundController.revisionHistoryLimit | int | `10` | The number of revisions to keep | | kyverno.backgroundController.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsNonRoot":true,"seccompProfile":{"type":"RuntimeDefault"}}` | Security context for the containers | | kyverno.backgroundController.server | object | `{"port":9443}` | backgroundController server port in case you are using hostNetwork: true, you might want to change the port the backgroundController is listening to | +| kyverno.backgroundController.serviceMonitor.additionalAnnotations | object | `{}` | Additional annotations | | kyverno.backgroundController.serviceMonitor.additionalLabels | object | `{}` | Additional labels | | kyverno.backgroundController.serviceMonitor.enabled | bool | `false` | Create a `ServiceMonitor` to collect Prometheus metrics. | | kyverno.backgroundController.serviceMonitor.interval | string | `"30s"` | Interval to scrape metrics | @@ -232,6 +242,7 @@ A Helm chart for kyverno | kyverno.cleanupController.metricsService.create | bool | `true` | Create service. | | kyverno.cleanupController.metricsService.nodePort | string | `nil` | Service node port. Only used if `metricsService.type` is `NodePort`. | | kyverno.cleanupController.metricsService.port | int | `8000` | Service port. Metrics server will be exposed at this port. | +| kyverno.cleanupController.metricsService.trafficDistribution | string | `nil` | Service traffic distribution policy. Set to `PreferClose` to route traffic to nearby endpoints, reducing latency and cross-zone costs. | | kyverno.cleanupController.metricsService.type | string | `"ClusterIP"` | Service type. | | kyverno.cleanupController.networkPolicy.enabled | bool | `false` | When true, use a NetworkPolicy to allow ingress to the webhook This is useful on clusters using Calico and/or native k8s network policies in a default-deny setup. | | kyverno.cleanupController.networkPolicy.ingressFrom | list | `[]` | A list of valid from selectors according to https://kubernetes.io/docs/concepts/services-networking/network-policies. | @@ -243,6 +254,7 @@ A Helm chart for kyverno | kyverno.cleanupController.podDisruptionBudget.enabled | bool | `false` | Enable PodDisruptionBudget. Will always be enabled if replicas > 1. This non-declarative behavior should ideally be avoided, but changing it now would be breaking. | | kyverno.cleanupController.podDisruptionBudget.maxUnavailable | string | `nil` | Configures the maximum unavailable pods for disruptions. Cannot be used if `minAvailable` is set. | | kyverno.cleanupController.podDisruptionBudget.minAvailable | int | `1` | Configures the minimum available pods for disruptions. Cannot be used if `maxUnavailable` is set. | +| kyverno.cleanupController.podDisruptionBudget.unhealthyPodEvictionPolicy | string | `nil` | Unhealthy pod eviction policy to be used. Possible values are `IfHealthyBudget` or `AlwaysAllow`. | | kyverno.cleanupController.podLabels | object | `{}` | Additional labels to add to each pod | | kyverno.cleanupController.podSecurityContext | object | `{}` | Security context for the pod | | kyverno.cleanupController.priorityClassName | string | `""` | Optional priority class | @@ -253,6 +265,7 @@ A Helm chart for kyverno | kyverno.cleanupController.rbac.clusterRole.extraResources | list | `[]` | Extra resource permissions to add in the cluster role | | kyverno.cleanupController.rbac.create | bool | `true` | Create RBAC resources | | kyverno.cleanupController.rbac.serviceAccount.annotations | object | `{}` | Annotations for the ServiceAccount | +| kyverno.cleanupController.rbac.serviceAccount.automountServiceAccountToken | bool | `true` | Toggle automounting of the ServiceAccount | | kyverno.cleanupController.rbac.serviceAccount.name | string | `nil` | Service account name | | kyverno.cleanupController.readinessProbe | object | See [values.yaml](values.yaml) | Readiness Probe. The block is directly forwarded into the deployment, so you can use whatever readinessProbe configuration you want. ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ | | kyverno.cleanupController.replicas | int | `nil` | Desired number of pods | @@ -265,7 +278,9 @@ A Helm chart for kyverno | kyverno.cleanupController.service.annotations | object | `{}` | Service annotations. | | kyverno.cleanupController.service.nodePort | string | `nil` | Service node port. Only used if `service.type` is `NodePort`. | | kyverno.cleanupController.service.port | int | `443` | Service port. | +| kyverno.cleanupController.service.trafficDistribution | string | `nil` | Service traffic distribution policy. Set to `PreferClose` to route traffic to nearby endpoints, reducing latency and cross-zone costs. | | kyverno.cleanupController.service.type | string | `"ClusterIP"` | Service type. | +| kyverno.cleanupController.serviceMonitor.additionalAnnotations | object | `{}` | Additional annotations | | kyverno.cleanupController.serviceMonitor.additionalLabels | object | `{}` | Additional labels | | kyverno.cleanupController.serviceMonitor.enabled | bool | `false` | Create a `ServiceMonitor` to collect Prometheus metrics. | | kyverno.cleanupController.serviceMonitor.interval | string | `"30s"` | Interval to scrape metrics | @@ -283,7 +298,6 @@ A Helm chart for kyverno | kyverno.cleanupController.tracing.enabled | bool | `false` | Enable tracing | | kyverno.cleanupController.tracing.port | string | `nil` | Traces receiver port | | kyverno.cleanupController.updateStrategy | object | See [values.yaml](values.yaml) | Deployment update strategy. Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy | -| kyverno.cleanupController.webhookServer | object | `{"port":9443}` | cleanupController webhook server port in case you are using hostNetwork: true, you might want to change the port the webhookServer is listening to | | kyverno.config.annotations | object | `{}` | Additional annotations to add to the configmap. | | kyverno.config.create | bool | `true` | Create the configmap. | | kyverno.config.defaultRegistry | string | `"docker.io"` | The registry hostname used for the image mutation. | @@ -308,8 +322,8 @@ A Helm chart for kyverno | kyverno.config.webhooks | object | `{"namespaceSelector":{"matchExpressions":[{"key":"kubernetes.io/metadata.name","operator":"NotIn","values":["kube-system"]}]}}` | Defines the `namespaceSelector`/`objectSelector` in the webhook configurations. The Kyverno namespace is excluded if `excludeKyvernoNamespace` is `true` (default) | | kyverno.crds.annotations | object | `{}` | Additional CRDs annotations | | kyverno.crds.customLabels | object | `{}` | Additional CRDs labels | -| kyverno.crds.groups.kyverno | object | `{"cleanuppolicies":true,"clustercleanuppolicies":true,"clusterpolicies":true,"globalcontextentries":true,"policies":true,"policyexceptions":true,"updaterequests":true,"validatingpolicies":true}` | Install CRDs in group `kyverno.io` | -| kyverno.crds.groups.policies | object | `{"imagevalidatingpolicies":true,"policyexceptions":true,"validatingpolicies":true}` | Install CRDs in group `policies.kyverno.io` | +| kyverno.crds.groups.kyverno | object | `{"cleanuppolicies":true,"clustercleanuppolicies":true,"clusterpolicies":true,"globalcontextentries":true,"policies":true,"policyexceptions":true,"updaterequests":true}` | Install CRDs in group `kyverno.io` | +| kyverno.crds.groups.policies | object | `{"deletingpolicies":true,"generatingpolicies":true,"imagevalidatingpolicies":true,"mutatingpolicies":true,"namespaceddeletingpolicies":true,"namespacedimagevalidatingpolicies":true,"namespacedvalidatingpolicies":true,"policyexceptions":true,"validatingpolicies":true}` | Install CRDs in group `policies.kyverno.io` | | kyverno.crds.groups.reports | object | `{"clusterephemeralreports":true,"ephemeralreports":true}` | Install CRDs in group `reports.kyverno.io` | | kyverno.crds.groups.wgpolicyk8s | object | `{"clusterpolicyreports":true,"policyreports":true}` | Install CRDs in group `wgpolicyk8s.io` | | kyverno.crds.install | bool | `true` | Whether to have Helm install the Kyverno CRDs, if the CRDs are not installed by Helm, they must be added before policies can be created | @@ -329,9 +343,11 @@ A Helm chart for kyverno | kyverno.crds.migration.podResources.limits | object | `{"cpu":"100m","memory":"256Mi"}` | Pod resource limits | | kyverno.crds.migration.podResources.requests | object | `{"cpu":"10m","memory":"64Mi"}` | Pod resource requests | | kyverno.crds.migration.podSecurityContext | object | `{}` | Security context for the pod | -| kyverno.crds.migration.resources | list | `["cleanuppolicies.kyverno.io","clustercleanuppolicies.kyverno.io","clusterpolicies.kyverno.io","globalcontextentries.kyverno.io","policies.kyverno.io","policyexceptions.kyverno.io","updaterequests.kyverno.io"]` | Resources to migrate | +| kyverno.crds.migration.resources | list | `["cleanuppolicies.kyverno.io","clustercleanuppolicies.kyverno.io","clusterpolicies.kyverno.io","globalcontextentries.kyverno.io","policies.kyverno.io","policyexceptions.kyverno.io","updaterequests.kyverno.io","deletingpolicies.policies.kyverno.io","generatingpolicies.policies.kyverno.io","imagevalidatingpolicies.policies.kyverno.io","namespacedimagevalidatingpolicies.policies.kyverno.io","mutatingpolicies.policies.kyverno.io","namespaceddeletingpolicies.policies.kyverno.io","namespacedvalidatingpolicies.policies.kyverno.io","policyexceptions.policies.kyverno.io","validatingpolicies.policies.kyverno.io"]` | Resources to migrate | | kyverno.crds.migration.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsGroup":65534,"runAsNonRoot":true,"runAsUser":65534,"seccompProfile":{"type":"RuntimeDefault"}}` | Security context for the hook containers | +| kyverno.crds.migration.serviceAccount.automountServiceAccountToken | bool | `true` | Toggle automounting of the ServiceAccount | | kyverno.crds.migration.tolerations | list | `[]` | List of node taints to tolerate | +| kyverno.crds.reportsServer.enabled | bool | `false` | Kyverno reports-server is used in your cluster | | kyverno.customLabels | object | `{}` | Additional labels | | kyverno.existingImagePullSecrets | list | `[]` | Existing Image pull secrets for image verification policies, this will define the `--imagePullSecrets` argument | | kyverno.features.admissionReports.enabled | bool | `true` | Enables the feature | @@ -342,14 +358,17 @@ A Helm chart for kyverno | kyverno.features.backgroundScan.enabled | bool | `true` | Enables the feature | | kyverno.features.backgroundScan.skipResourceFilters | bool | `true` | Skips resource filters in background scan | | kyverno.features.configMapCaching.enabled | bool | `true` | Enables the feature | +| kyverno.features.controllerRuntimeMetrics.bindAddress | string | `":8080"` | Bind address for controller-runtime metrics (use "0" to disable it) | | kyverno.features.deferredLoading.enabled | bool | `true` | Enables the feature | | kyverno.features.dumpPatches.enabled | bool | `false` | Enables the feature | | kyverno.features.dumpPayload.enabled | bool | `false` | Enables the feature | | kyverno.features.forceFailurePolicyIgnore.enabled | bool | `false` | Enables the feature | -| kyverno.features.generateValidatingAdmissionPolicy.enabled | bool | `false` | Enables the feature | +| kyverno.features.generateMutatingAdmissionPolicy.enabled | bool | `false` | Enables the feature | +| kyverno.features.generateValidatingAdmissionPolicy.enabled | bool | `true` | Enables the feature | | kyverno.features.globalContext.maxApiCallResponseLength | int | `2000000` | Maximum allowed response size from API Calls. A value of 0 bypasses checks (not recommended) | | kyverno.features.logging.format | string | `"text"` | Logging format | | kyverno.features.logging.verbosity | int | `2` | Logging verbosity | +| kyverno.features.mutatingAdmissionPolicyReports.enabled | bool | `false` | Enables the feature | | kyverno.features.omitEvents.eventTypes | list | `["PolicyApplied","PolicySkipped"]` | Events which should not be emitted (possible values `PolicyViolation`, `PolicyApplied`, `PolicyError`, and `PolicySkipped`) | | kyverno.features.policyExceptions.enabled | bool | `false` | Enables the feature | | kyverno.features.policyExceptions.namespace | string | `""` | Restrict policy exceptions to a single namespace Set to "*" to allow exceptions in all namespaces | @@ -367,10 +386,11 @@ A Helm chart for kyverno | kyverno.features.tuf.mirror | string | `nil` | Tuf mirror | | kyverno.features.tuf.root | string | `nil` | Path to Tuf root | | kyverno.features.tuf.rootRaw | string | `nil` | Raw Tuf root | -| kyverno.features.validatingAdmissionPolicyReports.enabled | bool | `false` | Enables the feature | +| kyverno.features.validatingAdmissionPolicyReports.enabled | bool | `true` | Enables the feature | | kyverno.fullnameOverride | string | `nil` | Override the expanded name of the chart | | kyverno.global.caCertificates.data | string | `nil` | Global CA certificates to use with Kyverno deployments This value is expected to be one large string of CA certificates Individual controller values will override this global value | | kyverno.global.caCertificates.volume | object | `{}` | Global value to set single volume to be mounted for CA certificates for all deployments. Not used when `.Values.global.caCertificates.data` is defined Individual controller values will override this global value | +| kyverno.global.crdWatcher | bool | `false` | Enable/Disable custom resource watcher to invalidate cache | | kyverno.global.extraEnvVars | list | `[]` | Additional container environment variables to apply to all containers and init containers | | kyverno.global.image.registry | string | `nil` | Global value that allows to set a single image registry across all deployments. When set, it will override any values set under `.image.registry` across the chart. | | kyverno.global.imagePullSecrets | list | `[]` | Global list of Image pull secrets When set, it will override any values set under `imagePullSecrets` under different components across the chart. | @@ -387,30 +407,15 @@ A Helm chart for kyverno | kyverno.metricsConfig.annotations | object | `{}` | Additional annotations to add to the configmap. | | kyverno.metricsConfig.bucketBoundaries | list | `[0.005,0.01,0.025,0.05,0.1,0.25,0.5,1,2.5,5,10,15,20,25,30]` | Configures the bucket boundaries for all Histogram metrics, changing this configuration requires restart of the kyverno admission controller | | kyverno.metricsConfig.create | bool | `true` | Create the configmap. | -| kyverno.metricsConfig.metricsExposure | map | `{"kyverno_admission_requests_total":{"disabledLabelDimensions":["resource_namespace"]},"kyverno_admission_review_duration_seconds":{"disabledLabelDimensions":["resource_namespace"]},"kyverno_cleanup_controller_deletedobjects_total":{"disabledLabelDimensions":["resource_namespace","policy_namespace"]},"kyverno_policy_execution_duration_seconds":{"disabledLabelDimensions":["resource_namespace","resource_request_operation"]},"kyverno_policy_results_total":{"disabledLabelDimensions":["resource_namespace","policy_namespace"]},"kyverno_policy_rule_info_total":{"disabledLabelDimensions":["resource_namespace","policy_namespace"]}}` | Configures the exposure of individual metrics, by default all metrics and all labels are exported, changing this configuration requires restart of the kyverno admission controller | +| kyverno.metricsConfig.metricsExposure | map | `{"kyverno_admission_requests_total":{"disabledLabelDimensions":["resource_namespace"]},"kyverno_admission_review_duration_seconds":{"disabledLabelDimensions":["resource_namespace"]},"kyverno_cleanup_controller_deletedobjects_total":{"disabledLabelDimensions":["resource_namespace","policy_namespace"]},"kyverno_generating_policy_execution_duration_seconds":{"disabledLabelDimensions":["resource_namespace","resource_request_operation"]},"kyverno_image_validating_policy_execution_duration_seconds":{"disabledLabelDimensions":["resource_namespace","resource_request_operation"]},"kyverno_mutating_policy_execution_duration_seconds":{"disabledLabelDimensions":["resource_namespace","resource_request_operation"]},"kyverno_policy_execution_duration_seconds":{"disabledLabelDimensions":["resource_namespace","resource_request_operation"]},"kyverno_policy_results_total":{"disabledLabelDimensions":["resource_namespace","policy_namespace"]},"kyverno_policy_rule_info_total":{"disabledLabelDimensions":["resource_namespace","policy_namespace"]},"kyverno_validating_policy_execution_duration_seconds":{"disabledLabelDimensions":["resource_namespace","resource_request_operation"]}}` | Configures the exposure of individual metrics, by default all metrics and all labels are exported, changing this configuration requires restart of the kyverno admission controller | | kyverno.metricsConfig.metricsRefreshInterval | string | `nil` | Rate at which metrics should reset so as to clean up the memory footprint of kyverno metrics, if you might be expecting high memory footprint of Kyverno's metrics. Default: 0, no refresh of metrics. WARNING: This flag is not working since Kyverno 1.8.0 | | kyverno.metricsConfig.name | string | `nil` | The configmap name (required if `create` is `false`). | | kyverno.metricsConfig.namespaces.exclude | list | `[]` | list of namespaces to NOT capture metrics for. | | kyverno.metricsConfig.namespaces.include | list | `[]` | List of namespaces to capture metrics for. | | kyverno.nameOverride | string | `nil` | Override the name of the chart | | kyverno.namespaceOverride | string | `nil` | Override the namespace the chart deploys to | -| kyverno.policyReportsCleanup.enabled | bool | `true` | Create a helm post-upgrade hook to cleanup the old policy reports. | -| kyverno.policyReportsCleanup.image.pullPolicy | string | `nil` | Image pull policy Defaults to image.pullPolicy if omitted | -| kyverno.policyReportsCleanup.image.registry | string | `nil` | Image registry | -| kyverno.policyReportsCleanup.image.repository | string | `"bitnami/kubectl"` | Image repository | -| kyverno.policyReportsCleanup.image.tag | string | `"1.32.3"` | Image tag Defaults to `latest` if omitted | -| kyverno.policyReportsCleanup.imagePullSecrets | list | `[]` | Image pull secrets | -| kyverno.policyReportsCleanup.nodeAffinity | object | `{}` | Node affinity constraints. | -| kyverno.policyReportsCleanup.nodeSelector | object | `{}` | Node labels for pod assignment | -| kyverno.policyReportsCleanup.podAffinity | object | `{}` | Pod affinity constraints. | -| kyverno.policyReportsCleanup.podAnnotations | object | `{}` | Pod annotations. | -| kyverno.policyReportsCleanup.podAntiAffinity | object | `{}` | Pod anti affinity constraints. | -| kyverno.policyReportsCleanup.podLabels | object | `{}` | Pod labels. | -| kyverno.policyReportsCleanup.podSecurityContext | object | `{}` | Security context for the pod | -| kyverno.policyReportsCleanup.resources.limits | object | `{"cpu":"100m","memory":"256Mi"}` | Pod resource limits | -| kyverno.policyReportsCleanup.resources.requests | object | `{"cpu":"10m","memory":"64Mi"}` | Pod resource requests | -| kyverno.policyReportsCleanup.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsGroup":65534,"runAsNonRoot":true,"runAsUser":65534,"seccompProfile":{"type":"RuntimeDefault"}}` | Security context for the hook containers | -| kyverno.policyReportsCleanup.tolerations | list | `[]` | List of node taints to tolerate | +| kyverno.openreports.enabled | bool | `false` | Enable OpenReports feature in controllers | +| kyverno.openreports.installCrds | bool | `false` | Whether to install CRDs from the upstream OpenReports chart. Setting this to true requires enabled to also be true. | | kyverno.rbac.roles.aggregate | object | `{"admin":true,"view":true}` | Aggregate ClusterRoles to Kubernetes default user-facing roles. For more information, see [User-facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) | | kyverno.reportsController.annotations | object | `{}` | Deployment annotations. | | kyverno.reportsController.antiAffinity.enabled | bool | `true` | Pod antiAffinities toggle. Enabled by default but can be disabled if you want to schedule pods to the same node. | @@ -439,6 +444,7 @@ A Helm chart for kyverno | kyverno.reportsController.metricsService.create | bool | `true` | Create service. | | kyverno.reportsController.metricsService.nodePort | string | `nil` | Service node port. Only used if `type` is `NodePort`. | | kyverno.reportsController.metricsService.port | int | `8000` | Service port. Metrics server will be exposed at this port. | +| kyverno.reportsController.metricsService.trafficDistribution | string | `nil` | Service traffic distribution policy. Set to `PreferClose` to route traffic to nearby endpoints, reducing latency and cross-zone costs. | | kyverno.reportsController.metricsService.type | string | `"ClusterIP"` | Service type. | | kyverno.reportsController.networkPolicy.enabled | bool | `false` | When true, use a NetworkPolicy to allow ingress to the webhook This is useful on clusters using Calico and/or native k8s network policies in a default-deny setup. | | kyverno.reportsController.networkPolicy.ingressFrom | list | `[]` | A list of valid from selectors according to https://kubernetes.io/docs/concepts/services-networking/network-policies. | @@ -450,6 +456,7 @@ A Helm chart for kyverno | kyverno.reportsController.podDisruptionBudget.enabled | bool | `false` | Enable PodDisruptionBudget. Will always be enabled if replicas > 1. This non-declarative behavior should ideally be avoided, but changing it now would be breaking. | | kyverno.reportsController.podDisruptionBudget.maxUnavailable | string | `nil` | Configures the maximum unavailable pods for disruptions. Cannot be used if `minAvailable` is set. | | kyverno.reportsController.podDisruptionBudget.minAvailable | int | `1` | Configures the minimum available pods for disruptions. Cannot be used if `maxUnavailable` is set. | +| kyverno.reportsController.podDisruptionBudget.unhealthyPodEvictionPolicy | string | `nil` | Unhealthy pod eviction policy to be used. Possible values are `IfHealthyBudget` or `AlwaysAllow`. | | kyverno.reportsController.podLabels | object | `{}` | Additional labels to add to each pod | | kyverno.reportsController.podSecurityContext | object | `{}` | Security context for the pod | | kyverno.reportsController.priorityClassName | string | `""` | Optional priority class | @@ -463,6 +470,7 @@ A Helm chart for kyverno | kyverno.reportsController.rbac.create | bool | `true` | Create RBAC resources | | kyverno.reportsController.rbac.createViewRoleBinding | bool | `true` | Create rolebinding to view role | | kyverno.reportsController.rbac.serviceAccount.annotations | object | `{}` | Annotations for the ServiceAccount | +| kyverno.reportsController.rbac.serviceAccount.automountServiceAccountToken | bool | `true` | Toggle automounting of the ServiceAccount | | kyverno.reportsController.rbac.serviceAccount.name | string | `nil` | Service account name | | kyverno.reportsController.rbac.viewRoleName | string | `"view"` | The view role to use in the rolebinding | | kyverno.reportsController.replicas | int | `nil` | Desired number of pods | @@ -473,6 +481,7 @@ A Helm chart for kyverno | kyverno.reportsController.sanityChecks | bool | `true` | Enable sanity check for reports CRDs | | kyverno.reportsController.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsNonRoot":true,"seccompProfile":{"type":"RuntimeDefault"}}` | Security context for the containers | | kyverno.reportsController.server | object | `{"port":9443}` | reportsController server port in case you are using hostNetwork: true, you might want to change the port the reportsController is listening to | +| kyverno.reportsController.serviceMonitor.additionalAnnotations | object | `{}` | Additional annotations | | kyverno.reportsController.serviceMonitor.additionalLabels | object | `{}` | Additional labels | | kyverno.reportsController.serviceMonitor.enabled | bool | `false` | Create a `ServiceMonitor` to collect Prometheus metrics. | | kyverno.reportsController.serviceMonitor.interval | string | `"30s"` | Interval to scrape metrics | @@ -491,22 +500,26 @@ A Helm chart for kyverno | kyverno.reportsController.tracing.port | string | `nil` | Traces receiver port | | kyverno.reportsController.tufRootMountPath | string | `"/.sigstore"` | A writable volume to use for the TUF root initialization. | | kyverno.reportsController.updateStrategy | object | See [values.yaml](values.yaml) | Deployment update strategy. Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy | +| kyverno.test.automountServiceAccountToken | bool | `true` | Toggle automounting of the ServiceAccount | | kyverno.test.image.pullPolicy | string | `nil` | Image pull policy Defaults to image.pullPolicy if omitted | -| kyverno.test.image.registry | string | `nil` | Image registry | -| kyverno.test.image.repository | string | `"busybox"` | Image repository | -| kyverno.test.image.tag | string | `"1.35"` | Image tag Defaults to `latest` if omitted | +| kyverno.test.image.registry | string | `"curlimages"` | Image registry | +| kyverno.test.image.repository | string | `"curl"` | Image repository | +| kyverno.test.image.tag | string | `"8.10.1"` | Image tag Defaults to `latest` if omitted | | kyverno.test.imagePullSecrets | list | `[]` | Image pull secrets | +| kyverno.test.nodeSelector | object | `{}` | Node labels for pod assignment | +| kyverno.test.podAnnotations | object | `{}` | Additional Pod annotations | | kyverno.test.resources.limits | object | `{"cpu":"100m","memory":"256Mi"}` | Pod resource limits | | kyverno.test.resources.requests | object | `{"cpu":"10m","memory":"64Mi"}` | Pod resource requests | | kyverno.test.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsGroup":65534,"runAsNonRoot":true,"runAsUser":65534,"seccompProfile":{"type":"RuntimeDefault"}}` | Security context for the test containers | | kyverno.test.sleep | int | `20` | Sleep time before running test | +| kyverno.test.tolerations | list | `[]` | List of node taints to tolerate | | kyverno.upgrade.fromV2 | bool | `false` | Upgrading from v2 to v3 is not allowed by default, set this to true once changes have been reviewed. | | kyverno.webhooksCleanup.autoDeleteWebhooks.enabled | bool | `false` | Allow webhooks controller to delete webhooks using finalizers | | kyverno.webhooksCleanup.enabled | bool | `true` | Create a helm pre-delete hook to cleanup webhooks. | | kyverno.webhooksCleanup.image.pullPolicy | string | `nil` | Image pull policy Defaults to image.pullPolicy if omitted | -| kyverno.webhooksCleanup.image.registry | string | `nil` | Image registry | -| kyverno.webhooksCleanup.image.repository | string | `"bitnami/kubectl"` | Image repository | -| kyverno.webhooksCleanup.image.tag | string | `"1.32.3"` | Image tag Defaults to `latest` if omitted | +| kyverno.webhooksCleanup.image.registry | string | `"registry.k8s.io"` | Image registry | +| kyverno.webhooksCleanup.image.repository | string | `"kubectl"` | Image repository | +| kyverno.webhooksCleanup.image.tag | string | `"v1.32.7"` | Image tag Defaults to `latest` if omitted | | kyverno.webhooksCleanup.imagePullSecrets | list | `[]` | Image pull secrets | | kyverno.webhooksCleanup.nodeAffinity | object | `{}` | Node affinity constraints. | | kyverno.webhooksCleanup.nodeSelector | object | `{}` | Node labels for pod assignment | @@ -518,6 +531,7 @@ A Helm chart for kyverno | kyverno.webhooksCleanup.resources.limits | object | `{"cpu":"100m","memory":"256Mi"}` | Pod resource limits | | kyverno.webhooksCleanup.resources.requests | object | `{"cpu":"10m","memory":"64Mi"}` | Pod resource requests | | kyverno.webhooksCleanup.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsGroup":65534,"runAsNonRoot":true,"runAsUser":65534,"seccompProfile":{"type":"RuntimeDefault"}}` | Security context for the hook containers | +| kyverno.webhooksCleanup.serviceAccount.automountServiceAccountToken | bool | `true` | Toggle automounting of the ServiceAccount | | kyverno.webhooksCleanup.tolerations | list | `[]` | List of node taints to tolerate | | prometheus.enabled | bool | `false` | Enables Prometheus Operator monitoring | | prometheus.grafanaDashboard.enabled | bool | `true` | Add grafana dashboard as a configmap | @@ -550,7 +564,7 @@ spec: source: repoURL: "https://edixos.github.io/ekp-helm" - targetRevision: "0.1.2" + targetRevision: "0.1.3" chart: kyverno path: '' helm: diff --git a/charts/kyverno/charts/kyverno-3.4.1.tgz b/charts/kyverno/charts/kyverno-3.4.1.tgz deleted file mode 100644 index 4cc88c26..00000000 Binary files a/charts/kyverno/charts/kyverno-3.4.1.tgz and /dev/null differ diff --git a/charts/kyverno/charts/kyverno-3.6.0.tgz b/charts/kyverno/charts/kyverno-3.6.0.tgz new file mode 100644 index 00000000..d441bf47 Binary files /dev/null and b/charts/kyverno/charts/kyverno-3.6.0.tgz differ diff --git a/charts/kyverno/values.yaml b/charts/kyverno/values.yaml index 98935898..55e717ab 100644 --- a/charts/kyverno/values.yaml +++ b/charts/kyverno/values.yaml @@ -17,15 +17,15 @@ prometheus: # Default values for kyverno. kyverno: - # -- Internal settings used with `helm template` to generate install manifest - # @ignored - templating: - enabled: false - debug: false - version: ~ - global: + # -- Internal settings used with `helm template` to generate install manifest + # @ignored + templating: + enabled: false + debug: false + version: ~ + image: # -- (string) Global value that allows to set a single image registry across all deployments. # When set, it will override any values set under `.image.registry` across the chart. @@ -37,6 +37,9 @@ kyverno: # -- Resync period for informers resyncPeriod: 15m + # -- Enable/Disable custom resource watcher to invalidate cache + crdWatcher: false + caCertificates: # -- Global CA certificates to use with Kyverno deployments # This value is expected to be one large string of CA certificates @@ -91,12 +94,23 @@ kyverno: admin: true view: true + # Use openreports.io as the API group for reporting + openreports: + # -- Enable OpenReports feature in controllers + enabled: false + # -- Whether to install CRDs from the upstream OpenReports chart. Setting this to true requires enabled to also be true. + installCrds: false + # CRDs configuration crds: # -- Whether to have Helm install the Kyverno CRDs, if the CRDs are not installed by Helm, they must be added before policies can be created install: true + reportsServer: + # -- Kyverno reports-server is used in your cluster + enabled: false + groups: # -- Install CRDs in group `kyverno.io` @@ -108,13 +122,18 @@ kyverno: policies: true policyexceptions: true updaterequests: true - validatingpolicies: true # -- Install CRDs in group `policies.kyverno.io` policies: validatingpolicies: true policyexceptions: true imagevalidatingpolicies: true + namespacedimagevalidatingpolicies: true + mutatingpolicies: true + generatingpolicies: true + deletingpolicies: true + namespaceddeletingpolicies: true + namespacedvalidatingpolicies: true # -- Install CRDs in group `reports.kyverno.io` reports: @@ -148,6 +167,15 @@ kyverno: - policies.kyverno.io - policyexceptions.kyverno.io - updaterequests.kyverno.io + - deletingpolicies.policies.kyverno.io + - generatingpolicies.policies.kyverno.io + - imagevalidatingpolicies.policies.kyverno.io + - namespacedimagevalidatingpolicies.policies.kyverno.io + - mutatingpolicies.policies.kyverno.io + - namespaceddeletingpolicies.policies.kyverno.io + - namespacedvalidatingpolicies.policies.kyverno.io + - policyexceptions.policies.kyverno.io + - validatingpolicies.policies.kyverno.io image: # -- (string) Image registry @@ -213,6 +241,10 @@ kyverno: cpu: 10m memory: 64Mi + serviceAccount: + # -- Toggle automounting of the ServiceAccount + automountServiceAccountToken: true + # Configuration config: @@ -261,16 +293,16 @@ kyverno: - '[*/*,kube-public,*]' - '[*/*,kube-node-lease,*]' - '[Node,*,*]' - - '[Node/*,*,*]' + - '[Node/?*,*,*]' - '[APIService,*,*]' - - '[APIService/*,*,*]' + - '[APIService/?*,*,*]' - '[TokenReview,*,*]' - '[SubjectAccessReview,*,*]' - '[SelfSubjectAccessReview,*,*]' - '[Binding,*,*]' - '[Pod/binding,*,*]' - '[ReplicaSet,*,*]' - - '[ReplicaSet/*,*,*]' + - '[ReplicaSet/?*,*,*]' - '[EphemeralReport,*,*]' - '[ClusterEphemeralReport,*,*]' # exclude resources from the chart @@ -291,13 +323,13 @@ kyverno: - '[ClusterRoleBinding,*,{{ template "kyverno.cleanup-controller.roleName" . }}]' - '[ClusterRoleBinding,*,{{ template "kyverno.reports-controller.roleName" . }}]' - '[ServiceAccount,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.serviceAccountName" . }}]' - - '[ServiceAccount/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.serviceAccountName" . }}]' + - '[ServiceAccount/?*,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.serviceAccountName" . }}]' - '[ServiceAccount,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.serviceAccountName" . }}]' - - '[ServiceAccount/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.serviceAccountName" . }}]' + - '[ServiceAccount/?*,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.serviceAccountName" . }}]' - '[ServiceAccount,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.serviceAccountName" . }}]' - - '[ServiceAccount/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.serviceAccountName" . }}]' + - '[ServiceAccount/?*,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.serviceAccountName" . }}]' - '[ServiceAccount,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.serviceAccountName" . }}]' - - '[ServiceAccount/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.serviceAccountName" . }}]' + - '[ServiceAccount/?*,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.serviceAccountName" . }}]' - '[Role,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.roleName" . }}]' - '[Role,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.roleName" . }}]' - '[Role,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.roleName" . }}]' @@ -309,51 +341,51 @@ kyverno: - '[ConfigMap,{{ include "kyverno.namespace" . }},{{ template "kyverno.config.configMapName" . }}]' - '[ConfigMap,{{ include "kyverno.namespace" . }},{{ template "kyverno.config.metricsConfigMapName" . }}]' - '[Deployment,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.name" . }}]' - - '[Deployment/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.name" . }}]' + - '[Deployment/?*,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.name" . }}]' - '[Deployment,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}]' - - '[Deployment/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}]' + - '[Deployment/?*,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}]' - '[Deployment,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}]' - - '[Deployment/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}]' + - '[Deployment/?*,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}]' - '[Deployment,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}]' - - '[Deployment/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}]' + - '[Deployment/?*,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}]' - '[Pod,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.name" . }}-*]' - - '[Pod/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.name" . }}-*]' + - '[Pod/?*,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.name" . }}-*]' - '[Pod,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}-*]' - - '[Pod/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}-*]' + - '[Pod/?*,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}-*]' - '[Pod,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}-*]' - - '[Pod/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}-*]' + - '[Pod/?*,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}-*]' - '[Pod,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}-*]' - - '[Pod/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}-*]' + - '[Pod/?*,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}-*]' - '[Job,{{ include "kyverno.namespace" . }},{{ template "kyverno.fullname" . }}-hook-pre-delete]' - - '[Job/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.fullname" . }}-hook-pre-delete]' + - '[Job/?*,{{ include "kyverno.namespace" . }},{{ template "kyverno.fullname" . }}-hook-pre-delete]' - '[NetworkPolicy,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.name" . }}]' - - '[NetworkPolicy/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.name" . }}]' + - '[NetworkPolicy/?*,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.name" . }}]' - '[NetworkPolicy,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}]' - - '[NetworkPolicy/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}]' + - '[NetworkPolicy/?*,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}]' - '[NetworkPolicy,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}]' - - '[NetworkPolicy/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}]' + - '[NetworkPolicy/?*,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}]' - '[NetworkPolicy,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}]' - - '[NetworkPolicy/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}]' + - '[NetworkPolicy/?*,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}]' - '[PodDisruptionBudget,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.name" . }}]' - - '[PodDisruptionBudget/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.name" . }}]' + - '[PodDisruptionBudget/?*,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.name" . }}]' - '[PodDisruptionBudget,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}]' - - '[PodDisruptionBudget/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}]' + - '[PodDisruptionBudget/?*,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}]' - '[PodDisruptionBudget,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}]' - - '[PodDisruptionBudget/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}]' + - '[PodDisruptionBudget/?*,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}]' - '[PodDisruptionBudget,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}]' - - '[PodDisruptionBudget/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}]' + - '[PodDisruptionBudget/?*,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}]' - '[Service,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.serviceName" . }}]' - - '[Service/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.serviceName" . }}]' + - '[Service/?*,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.serviceName" . }}]' - '[Service,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.serviceName" . }}-metrics]' - - '[Service/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.serviceName" . }}-metrics]' + - '[Service/?*,{{ include "kyverno.namespace" . }},{{ template "kyverno.admission-controller.serviceName" . }}-metrics]' - '[Service,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}-metrics]' - - '[Service/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}-metrics]' + - '[Service/?*,{{ include "kyverno.namespace" . }},{{ template "kyverno.background-controller.name" . }}-metrics]' - '[Service,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}]' - - '[Service/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}]' + - '[Service/?*,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}]' - '[Service,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}-metrics]' - - '[Service/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}-metrics]' + - '[Service/?*,{{ include "kyverno.namespace" . }},{{ template "kyverno.cleanup-controller.name" . }}-metrics]' - '[Service,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}-metrics]' - - '[Service/*,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}-metrics]' + - '[Service/?*,{{ include "kyverno.namespace" . }},{{ template "kyverno.reports-controller.name" . }}-metrics]' - '[ServiceMonitor,{{ if .Values.admissionController.serviceMonitor.namespace }}{{ .Values.admissionController.serviceMonitor.namespace }}{{ else }}{{ template "kyverno.namespace" . }}{{ end }},{{ template "kyverno.admission-controller.name" . }}]' - '[ServiceMonitor,{{ if .Values.admissionController.serviceMonitor.namespace }}{{ .Values.admissionController.serviceMonitor.namespace }}{{ else }}{{ template "kyverno.namespace" . }}{{ end }},{{ template "kyverno.background-controller.name" . }}]' - '[ServiceMonitor,{{ if .Values.admissionController.serviceMonitor.namespace }}{{ .Values.admissionController.serviceMonitor.namespace }}{{ else }}{{ template "kyverno.namespace" . }}{{ end }},{{ template "kyverno.cleanup-controller.name" . }}]' @@ -375,10 +407,10 @@ kyverno: values: - kube-system # Exclude objects - # - objectSelector: - # matchExpressions: - # - key: webhooks.kyverno.io/exclude - # operator: DoesNotExist + # objectSelector: + # matchExpressions: + # - key: webhooks.kyverno.io/exclude + # operator: DoesNotExist # -- Defines annotations to set on webhook configurations. webhookAnnotations: @@ -445,6 +477,18 @@ kyverno: kyverno_policy_execution_duration_seconds: # bucketBoundaries: [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5] disabledLabelDimensions: ["resource_namespace", "resource_request_operation"] + kyverno_validating_policy_execution_duration_seconds: + # bucketBoundaries: [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5] + disabledLabelDimensions: ["resource_namespace", "resource_request_operation"] + kyverno_image_validating_policy_execution_duration_seconds: + # bucketBoundaries: [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5] + disabledLabelDimensions: ["resource_namespace", "resource_request_operation"] + kyverno_mutating_policy_execution_duration_seconds: + # bucketBoundaries: [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5] + disabledLabelDimensions: ["resource_namespace", "resource_request_operation"] + kyverno_generating_policy_execution_duration_seconds: + # bucketBoundaries: [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5] + disabledLabelDimensions: ["resource_namespace", "resource_request_operation"] kyverno_admission_review_duration_seconds: # enabled: false disabledLabelDimensions: ["resource_namespace"] @@ -480,12 +524,12 @@ kyverno: image: # -- (string) Image registry - registry: ~ + registry: curlimages # -- Image repository - repository: busybox + repository: curl # -- Image tag # Defaults to `latest` if omitted - tag: '1.35' + tag: '8.10.1' # -- (string) Image pull policy # Defaults to image.pullPolicy if omitted pullPolicy: ~ @@ -518,9 +562,22 @@ kyverno: seccompProfile: type: RuntimeDefault + # -- Toggle automounting of the ServiceAccount + automountServiceAccountToken: true + + # -- Node labels for pod assignment + nodeSelector: {} + + # -- Additional Pod annotations + podAnnotations: {} + + # -- List of node taints to tolerate + tolerations: [] + # -- Additional labels customLabels: {} + webhooksCleanup: # -- Create a helm pre-delete hook to cleanup webhooks. enabled: true @@ -531,12 +588,12 @@ kyverno: image: # -- (string) Image registry - registry: ~ + registry: registry.k8s.io # -- Image repository - repository: bitnami/kubectl + repository: kubectl # -- Image tag # Defaults to `latest` if omitted - tag: '1.32.3' + tag: 'v1.32.7' # -- (string) Image pull policy # Defaults to image.pullPolicy if omitted pullPolicy: ~ @@ -592,73 +649,9 @@ kyverno: cpu: 10m memory: 64Mi - policyReportsCleanup: - # -- Create a helm post-upgrade hook to cleanup the old policy reports. - enabled: true - - image: - # -- (string) Image registry - registry: ~ - # -- Image repository - repository: bitnami/kubectl - # -- Image tag - # Defaults to `latest` if omitted - tag: '1.32.3' - # -- (string) Image pull policy - # Defaults to image.pullPolicy if omitted - pullPolicy: ~ - - # -- Image pull secrets - imagePullSecrets: [] - # - name: secretName - - # -- Security context for the pod - podSecurityContext: {} - - # -- Node labels for pod assignment - nodeSelector: {} - - # -- List of node taints to tolerate - tolerations: [] - - # -- Pod anti affinity constraints. - podAntiAffinity: {} - - # -- Pod affinity constraints. - podAffinity: {} - - # -- Pod labels. - podLabels: {} - - # -- Pod annotations. - podAnnotations: {} - - # -- Node affinity constraints. - nodeAffinity: {} - - # -- Security context for the hook containers - securityContext: - runAsUser: 65534 - runAsGroup: 65534 - runAsNonRoot: true - privileged: false - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - - resources: - # -- Pod resource limits - limits: - cpu: 100m - memory: 256Mi - # -- Pod resource requests - requests: - cpu: 10m - memory: 64Mi + serviceAccount: + # -- Toggle automounting of the ServiceAccount + automountServiceAccountToken: true grafana: # -- Enable grafana dashboard creation. @@ -699,6 +692,9 @@ kyverno: # -- Enables the feature enabled: true validatingAdmissionPolicyReports: + # -- Enables the feature + enabled: true + mutatingAdmissionPolicyReports: # -- Enables the feature enabled: false reporting: @@ -727,6 +723,9 @@ kyverno: configMapCaching: # -- Enables the feature enabled: true + controllerRuntimeMetrics: + # -- Bind address for controller-runtime metrics (use "0" to disable it) + bindAddress: ":8080" deferredLoading: # -- Enables the feature enabled: true @@ -737,6 +736,9 @@ kyverno: # -- Enables the feature enabled: false generateValidatingAdmissionPolicy: + # -- Enables the feature + enabled: true + generateMutatingAdmissionPolicy: # -- Enables the feature enabled: false dumpPatches: @@ -831,6 +833,9 @@ kyverno: annotations: {} # example.com/annotation: value + # -- Toggle automounting of the ServiceAccount + automountServiceAccountToken: true + coreClusterRole: # -- Extra resource permissions to add in the core cluster role. # This was introduced to avoid breaking change in the chart but should ideally be moved in `clusterRole.extraResources`. @@ -862,6 +867,9 @@ kyverno: # -- Resync period for informers resyncPeriod: 15m + # -- Enable/Disable custom resource watcher to invalidate cache + crdWatcher: false + # -- Additional labels to add to each pod podLabels: {} # example.com/label: foo @@ -1015,6 +1023,9 @@ kyverno: # -- Configures the maximum unavailable pods for disruptions. # Cannot be used if `minAvailable` is set. maxUnavailable: + # -- Unhealthy pod eviction policy to be used. + # Possible values are `IfHealthyBudget` or `AlwaysAllow`. + unhealthyPodEvictionPolicy: # -- A writable volume to use for the TUF root initialization. tufRootMountPath: /.sigstore @@ -1153,6 +1164,9 @@ kyverno: nodePort: # -- Service annotations. annotations: {} + # -- (string) Service traffic distribution policy. + # Set to `PreferClose` to route traffic to nearby endpoints, reducing latency and cross-zone costs. + trafficDistribution: ~ metricsService: # -- Create service. @@ -1167,6 +1181,9 @@ kyverno: nodePort: # -- Service annotations. annotations: {} + # -- (string) Service traffic distribution policy. + # Set to `PreferClose` to route traffic to nearby endpoints, reducing latency and cross-zone costs. + trafficDistribution: ~ networkPolicy: # -- When true, use a NetworkPolicy to allow ingress to the webhook @@ -1178,6 +1195,8 @@ kyverno: serviceMonitor: # -- Create a `ServiceMonitor` to collect Prometheus metrics. enabled: false + # -- Additional annotations + additionalAnnotations: {} # -- Additional labels additionalLabels: {} # -- (string) Override namespace @@ -1255,6 +1274,9 @@ kyverno: annotations: {} # example.com/annotation: value + # -- Toggle automounting of the ServiceAccount + automountServiceAccountToken: true + coreClusterRole: # -- Extra resource permissions to add in the core cluster role. # This was introduced to avoid breaking change in the chart but should ideally be moved in `clusterRole.extraResources`. @@ -1292,7 +1314,17 @@ kyverno: - update - patch - delete - + - apiGroups: + - resource.k8s.io + resources: + - resourceclaims + - resourceclaimtemplates + verbs: + - create + - delete + - update + - patch + - deletecollection clusterRole: # -- Extra resource permissions to add in the cluster role extraResources: [] @@ -1449,6 +1481,9 @@ kyverno: # -- Configures the maximum unavailable pods for disruptions. # Cannot be used if `minAvailable` is set. maxUnavailable: + # -- Unhealthy pod eviction policy to be used. + # Possible values are `IfHealthyBudget` or `AlwaysAllow`. + unhealthyPodEvictionPolicy: caCertificates: # -- CA certificates to use with Kyverno deployments @@ -1475,6 +1510,9 @@ kyverno: nodePort: # -- Service annotations. annotations: {} + # -- (string) Service traffic distribution policy. + # Set to `PreferClose` to route traffic to nearby endpoints, reducing latency and cross-zone costs. + trafficDistribution: ~ networkPolicy: @@ -1488,6 +1526,8 @@ kyverno: serviceMonitor: # -- Create a `ServiceMonitor` to collect Prometheus metrics. enabled: false + # -- Additional annotations + additionalAnnotations: {} # -- Additional labels additionalLabels: {} # -- (string) Override namespace @@ -1564,6 +1604,9 @@ kyverno: annotations: {} # example.com/annotation: value + # -- Toggle automounting of the ServiceAccount + automountServiceAccountToken: true + clusterRole: # -- Extra resource permissions to add in the cluster role extraResources: [] @@ -1637,10 +1680,6 @@ kyverno: # in case you are using hostNetwork: true, you might want to change the port the cleanupController is listening to server: port: 9443 - # -- cleanupController webhook server port - # in case you are using hostNetwork: true, you might want to change the port the webhookServer is listening to - webhookServer: - port: 9443 # -- `dnsPolicy` determines the manner in which DNS resolution happens in the cluster. # In case of `hostNetwork: true`, usually, the `dnsPolicy` is suitable to be `ClusterFirstWithHostNet`. @@ -1775,6 +1814,9 @@ kyverno: # -- Configures the maximum unavailable pods for disruptions. # Cannot be used if `minAvailable` is set. maxUnavailable: + # -- Unhealthy pod eviction policy to be used. + # Possible values are `IfHealthyBudget` or `AlwaysAllow`. + unhealthyPodEvictionPolicy: service: # -- Service port. @@ -1786,6 +1828,9 @@ kyverno: nodePort: # -- Service annotations. annotations: {} + # -- (string) Service traffic distribution policy. + # Set to `PreferClose` to route traffic to nearby endpoints, reducing latency and cross-zone costs. + trafficDistribution: ~ metricsService: # -- Create service. @@ -1800,6 +1845,9 @@ kyverno: nodePort: # -- Service annotations. annotations: {} + # -- (string) Service traffic distribution policy. + # Set to `PreferClose` to route traffic to nearby endpoints, reducing latency and cross-zone costs. + trafficDistribution: ~ networkPolicy: @@ -1813,6 +1861,8 @@ kyverno: serviceMonitor: # -- Create a `ServiceMonitor` to collect Prometheus metrics. enabled: false + # -- Additional annotations + additionalAnnotations: {} # -- Additional labels additionalLabels: {} # -- (string) Override namespace @@ -1890,6 +1940,9 @@ kyverno: annotations: {} # example.com/annotation: value + # -- Toggle automounting of the ServiceAccount + automountServiceAccountToken: true + coreClusterRole: # -- Extra resource permissions to add in the core cluster role. # This was introduced to avoid breaking change in the chart but should ideally be moved in `clusterRole.extraResources`. @@ -2065,6 +2118,9 @@ kyverno: # -- Configures the maximum unavailable pods for disruptions. # Cannot be used if `minAvailable` is set. maxUnavailable: + # -- Unhealthy pod eviction policy to be used. + # Possible values are `IfHealthyBudget` or `AlwaysAllow`. + unhealthyPodEvictionPolicy: # -- A writable volume to use for the TUF root initialization. tufRootMountPath: /.sigstore @@ -2099,6 +2155,9 @@ kyverno: nodePort: ~ # -- Service annotations. annotations: {} + # -- (string) Service traffic distribution policy. + # Set to `PreferClose` to route traffic to nearby endpoints, reducing latency and cross-zone costs. + trafficDistribution: ~ networkPolicy: @@ -2112,6 +2171,8 @@ kyverno: serviceMonitor: # -- Create a `ServiceMonitor` to collect Prometheus metrics. enabled: false + # -- Additional annotations + additionalAnnotations: {} # -- Additional labels additionalLabels: {} # -- (string) Override namespace diff --git a/charts/velero/Chart.lock b/charts/velero/Chart.lock index b32a7e61..e5123722 100644 --- a/charts/velero/Chart.lock +++ b/charts/velero/Chart.lock @@ -1,7 +1,7 @@ dependencies: - name: velero repository: https://vmware-tanzu.github.io/helm-charts - version: 9.1.2 + version: 11.2.0 - name: gcp-workload-identity repository: https://edixos.github.io/ekp-helm version: 0.1.1 @@ -14,5 +14,5 @@ dependencies: - name: gcp-bucket repository: https://edixos.github.io/ekp-helm version: 0.1.0 -digest: sha256:56dafcc28b5517504b03be7a9549166c131b26251d03d0d55a63954e2c5bf30a -generated: "2025-05-14T10:23:09.920610947Z" +digest: sha256:27b6615d6bba5872fe5bce97c1973e6aa1e89663711f7a648b94263768d9e772 +generated: "2025-11-26T10:26:36.080943765Z" diff --git a/charts/velero/Chart.yaml b/charts/velero/Chart.yaml index 2bca7d40..1efd1fb8 100644 --- a/charts/velero/Chart.yaml +++ b/charts/velero/Chart.yaml @@ -2,11 +2,11 @@ apiVersion: v2 name: velero description: A Helm chart for velero type: application -version: 0.1.4 +version: 0.1.5 appVersion: "1.15.2" dependencies: - name: velero - version: 9.1.2 + version: 11.2.0 repository: "https://vmware-tanzu.github.io/helm-charts" - name: gcp-workload-identity version: 0.1.1 diff --git a/charts/velero/README.md b/charts/velero/README.md index c13ca208..ac68d133 100644 --- a/charts/velero/README.md +++ b/charts/velero/README.md @@ -1,6 +1,6 @@ # velero -![Version: 0.1.4](https://img.shields.io/badge/Version-0.1.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.15.2](https://img.shields.io/badge/AppVersion-1.15.2-informational?style=flat-square) +![Version: 0.1.5](https://img.shields.io/badge/Version-0.1.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.15.2](https://img.shields.io/badge/AppVersion-1.15.2-informational?style=flat-square) ## Prerequisites @@ -15,7 +15,7 @@ | https://edixos.github.io/ekp-helm | iamCustomRole(gcp-iam-custom-role) | 0.1.0 | | https://edixos.github.io/ekp-helm | iamPolicyMembers(gcp-iam-policy-members) | 0.1.2 | | https://edixos.github.io/ekp-helm | workloadIdentity(gcp-workload-identity) | 0.1.1 | -| https://vmware-tanzu.github.io/helm-charts | velero | 9.1.2 | +| https://vmware-tanzu.github.io/helm-charts | velero | 11.2.0 | ## Maintainers @@ -53,20 +53,21 @@ A Helm chart for velero | velero.configMaps | object | `{}` | | | velero.configuration.backupStorageLocation[0].accessMode | string | `"ReadWrite"` | | | velero.configuration.backupStorageLocation[0].annotations | object | `{}` | | -| velero.configuration.backupStorageLocation[0].bucket | string | `nil` | | +| velero.configuration.backupStorageLocation[0].bucket | string | `""` | | | velero.configuration.backupStorageLocation[0].caCert | string | `nil` | | | velero.configuration.backupStorageLocation[0].config | object | `{}` | | | velero.configuration.backupStorageLocation[0].credential.key | string | `nil` | | | velero.configuration.backupStorageLocation[0].credential.name | string | `nil` | | -| velero.configuration.backupStorageLocation[0].default | string | `nil` | | +| velero.configuration.backupStorageLocation[0].default | bool | `false` | | | velero.configuration.backupStorageLocation[0].name | string | `nil` | | | velero.configuration.backupStorageLocation[0].prefix | string | `nil` | | -| velero.configuration.backupStorageLocation[0].provider | string | `nil` | | +| velero.configuration.backupStorageLocation[0].provider | string | `""` | | | velero.configuration.backupStorageLocation[0].validationFrequency | string | `nil` | | | velero.configuration.backupSyncPeriod | string | `nil` | | | velero.configuration.clientBurst | string | `nil` | | | velero.configuration.clientPageSize | string | `nil` | | | velero.configuration.clientQPS | string | `nil` | | +| velero.configuration.dataMoverPrepareTimeout | string | `nil` | | | velero.configuration.defaultBackupStorageLocation | string | `nil` | | | velero.configuration.defaultBackupTTL | string | `nil` | | | velero.configuration.defaultItemOperationTimeout | string | `nil` | | @@ -77,7 +78,7 @@ A Helm chart for velero | velero.configuration.disableControllers | string | `nil` | | | velero.configuration.disableInformerCache | bool | `false` | | | velero.configuration.extraArgs | list | `[]` | | -| velero.configuration.extraEnvVars | object | `{}` | | +| velero.configuration.extraEnvVars | list | `[]` | | | velero.configuration.features | string | `nil` | | | velero.configuration.fsBackupTimeout | string | `nil` | | | velero.configuration.garbageCollectionFrequency | string | `nil` | | @@ -88,9 +89,9 @@ A Helm chart for velero | velero.configuration.namespace | string | `nil` | | | velero.configuration.pluginDir | string | `nil` | | | velero.configuration.profilerAddress | string | `nil` | | -| velero.configuration.repositoryMaintenanceJob.latestJobsCount | int | `3` | | -| velero.configuration.repositoryMaintenanceJob.limits | string | `nil` | | -| velero.configuration.repositoryMaintenanceJob.requests | string | `nil` | | +| velero.configuration.repositoryMaintenanceJob.repositoryConfigData.global.keepLatestMaintenanceJobs | int | `3` | | +| velero.configuration.repositoryMaintenanceJob.repositoryConfigData.name | string | `"velero-repo-maintenance"` | | +| velero.configuration.repositoryMaintenanceJob.repositoryConfigData.repositories | object | `{}` | | | velero.configuration.restoreOnlyMode | string | `nil` | | | velero.configuration.restoreResourcePriorities | string | `nil` | | | velero.configuration.storeValidationFrequency | string | `nil` | | @@ -101,7 +102,7 @@ A Helm chart for velero | velero.configuration.volumeSnapshotLocation[0].credential.key | string | `nil` | | | velero.configuration.volumeSnapshotLocation[0].credential.name | string | `nil` | | | velero.configuration.volumeSnapshotLocation[0].name | string | `nil` | | -| velero.configuration.volumeSnapshotLocation[0].provider | string | `nil` | | +| velero.configuration.volumeSnapshotLocation[0].provider | string | `""` | | | velero.containerSecurityContext | object | `{}` | | | velero.credentials.existingSecret | string | `nil` | | | velero.credentials.extraEnvVars | object | `{}` | | @@ -116,16 +117,17 @@ A Helm chart for velero | velero.extraVolumeMounts | list | `[]` | | | velero.extraVolumes | list | `[]` | | | velero.fullnameOverride | string | `""` | | +| velero.hostAliases | list | `[]` | | | velero.image.imagePullSecrets | list | `[]` | | | velero.image.pullPolicy | string | `"IfNotPresent"` | | | velero.image.repository | string | `"velero/velero"` | | -| velero.image.tag | string | `"v1.16.0"` | | +| velero.image.tag | string | `"v1.17.1"` | | | velero.initContainers | string | `nil` | | | velero.kubectl.annotations | object | `{}` | | | velero.kubectl.containerSecurityContext | object | `{}` | | | velero.kubectl.extraVolumeMounts | list | `[]` | | | velero.kubectl.extraVolumes | list | `[]` | | -| velero.kubectl.image.repository | string | `"docker.io/bitnami/kubectl"` | | +| velero.kubectl.image.repository | string | `"docker.io/bitnamilegacy/kubectl"` | | | velero.kubectl.labels | object | `{}` | | | velero.kubectl.resources | object | `{}` | | | velero.labels | object | `{}` | | @@ -153,7 +155,13 @@ A Helm chart for velero | velero.metrics.scrapeInterval | string | `"30s"` | | | velero.metrics.scrapeTimeout | string | `"10s"` | | | velero.metrics.service.annotations | object | `{}` | | +| velero.metrics.service.externalTrafficPolicy | string | `""` | | +| velero.metrics.service.internalTrafficPolicy | string | `""` | | +| velero.metrics.service.ipFamilies | list | `[]` | | +| velero.metrics.service.ipFamilyPolicy | string | `""` | | | velero.metrics.service.labels | object | `{}` | | +| velero.metrics.service.nodePort | string | `nil` | | +| velero.metrics.service.type | string | `"ClusterIP"` | | | velero.metrics.serviceMonitor.additionalLabels | object | `{}` | | | velero.metrics.serviceMonitor.annotations | object | `{}` | | | velero.metrics.serviceMonitor.autodetect | bool | `true` | | @@ -166,9 +174,10 @@ A Helm chart for velero | velero.nodeAgent.dnsConfig | object | `{}` | | | velero.nodeAgent.dnsPolicy | string | `"ClusterFirst"` | | | velero.nodeAgent.extraArgs | list | `[]` | | -| velero.nodeAgent.extraEnvVars | object | `{}` | | +| velero.nodeAgent.extraEnvVars | list | `[]` | | | velero.nodeAgent.extraVolumeMounts | list | `[]` | | | velero.nodeAgent.extraVolumes | list | `[]` | | +| velero.nodeAgent.hostAliases | list | `[]` | | | velero.nodeAgent.labels | object | `{}` | | | velero.nodeAgent.lifecycle | object | `{}` | | | velero.nodeAgent.nodeSelector | object | `{}` | | @@ -177,6 +186,7 @@ A Helm chart for velero | velero.nodeAgent.podSecurityContext.runAsUser | int | `0` | | | velero.nodeAgent.podVolumePath | string | `"/var/lib/kubelet/pods"` | | | velero.nodeAgent.priorityClassName | string | `""` | | +| velero.nodeAgent.resizePolicy | list | `[]` | | | velero.nodeAgent.resources | object | `{}` | | | velero.nodeAgent.runtimeClassName | string | `""` | | | velero.nodeAgent.tolerations | list | `[]` | | @@ -198,6 +208,7 @@ A Helm chart for velero | velero.readinessProbe.periodSeconds | int | `30` | | | velero.readinessProbe.successThreshold | int | `1` | | | velero.readinessProbe.timeoutSeconds | int | `5` | | +| velero.resizePolicy | list | `[]` | | | velero.resources | object | `{}` | | | velero.runtimeClassName | string | `""` | | | velero.schedules | object | `{}` | | @@ -213,7 +224,7 @@ A Helm chart for velero | velero.tolerations | list | `[]` | | | velero.upgradeCRDs | bool | `true` | | | velero.upgradeCRDsJob.automountServiceAccountToken | bool | `true` | | -| velero.upgradeCRDsJob.extraEnvVars | object | `{}` | | +| velero.upgradeCRDsJob.extraEnvVars | list | `[]` | | | velero.upgradeCRDsJob.extraVolumeMounts | list | `[]` | | | velero.upgradeCRDsJob.extraVolumes | list | `[]` | | | velero.upgradeJobResources | object | `{}` | | @@ -244,7 +255,7 @@ spec: source: repoURL: "https://edixos.github.io/ekp-helm" - targetRevision: "0.1.4" + targetRevision: "0.1.5" chart: velero path: '' helm: diff --git a/charts/velero/charts/velero-11.2.0.tgz b/charts/velero/charts/velero-11.2.0.tgz new file mode 100644 index 00000000..fbd9452c Binary files /dev/null and b/charts/velero/charts/velero-11.2.0.tgz differ diff --git a/charts/velero/charts/velero-9.1.2.tgz b/charts/velero/charts/velero-9.1.2.tgz deleted file mode 100644 index 14de8687..00000000 Binary files a/charts/velero/charts/velero-9.1.2.tgz and /dev/null differ diff --git a/charts/velero/values.yaml b/charts/velero/values.yaml index 33574af6..a20918f6 100644 --- a/charts/velero/values.yaml +++ b/charts/velero/values.yaml @@ -17,18 +17,12 @@ velero: labels: {} # Enforce Pod Security Standards with Namespace Labels # https://kubernetes.io/docs/tasks/configure-pod-container/enforce-standards-namespace-labels/ - # - key: pod-security.kubernetes.io/enforce - # value: privileged - # - key: pod-security.kubernetes.io/enforce-version - # value: latest - # - key: pod-security.kubernetes.io/audit - # value: privileged - # - key: pod-security.kubernetes.io/audit-version - # value: latest - # - key: pod-security.kubernetes.io/warn - # value: privileged - # - key: pod-security.kubernetes.io/warn-version - # value: latest + # pod-security.kubernetes.io/enforce: privileged + # pod-security.kubernetes.io/enforce-version: latest + # pod-security.kubernetes.io/audit: privileged + # pod-security.kubernetes.io/audit-version: latest + # pod-security.kubernetes.io/warn: privileged + # pod-security.kubernetes.io/warn-version: latest ## ## End of namespace-related settings. @@ -43,7 +37,7 @@ velero: # enabling node-agent). Required. image: repository: velero/velero - tag: v1.16.0 + tag: v1.17.1 # Digest value example: sha256:d238835e151cec91c6a811fe3a89a66d3231d9f64d09e5f3c49552672d271f38. # If used, it will take precedence over the image.tag. # digest: @@ -91,6 +85,22 @@ velero: # cpu: 1000m # memory: 512Mi + # Container resize policy for the Velero deployment. + # See: https://kubernetes.io/docs/tasks/configure-pod-container/resize-container-resources/ + resizePolicy: [] + # - resourceName: cpu + # restartPolicy: NotRequired + # - resourceName: memory + # restartPolicy: RestartContainer + + # Configure hostAliases for Velero deployment. Optional + # For more information, check: https://kubernetes.io/docs/tasks/network/customize-hosts-file-for-pods/ + hostAliases: [] + # - ip: "127.0.0.1" + # hostnames: + # - "foo.local" + # - "bar.local" + # Resource requests/limits to specify for the upgradeCRDs job pod. Need to be adjusted by user accordingly. upgradeJobResources: {} # requests: @@ -104,8 +114,18 @@ velero: extraVolumes: [] # Extra volumeMounts for the Upgrade CRDs Job. Optional. extraVolumeMounts: [] - # Extra key/value pairs to be used as environment variables. Optional. - extraEnvVars: {} + # Additional values to be used as environment variables. Optional. + extraEnvVars: [] + # Simple value + # - name: SIMPLE_VAR + # value: "simple-value" + + # FieldRef example + # - name: MY_POD_LABEL + # valueFrom: + # fieldRef: + # fieldPath: metadata.labels['my_label'] + # Configure if API credential for Service Account is automounted. automountServiceAccountToken: true # Configure the shell cmd in case you are using custom image @@ -120,7 +140,7 @@ velero: # If the value is a string then it is evaluated as a template. initContainers: # - name: velero-plugin-for-aws - # image: velero/velero-plugin-for-aws:v1.10.0 + # image: velero/velero-plugin-for-aws:v1.13.1 # imagePullPolicy: IfNotPresent # volumeMounts: # - mountPath: /target @@ -230,7 +250,19 @@ velero: # service metdata if metrics are enabled service: annotations: {} + type: ClusterIP labels: {} + nodePort: null + + # External/Internal traffic policy setting (Cluster, Local) + # https://kubernetes.io/docs/reference/networking/virtual-ips/#traffic-policies + externalTrafficPolicy: "" + internalTrafficPolicy: "" + + # the IP family policy for the metrics Service to be able to configure dual-stack; see [Configure dual-stack](https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services). + ipFamilyPolicy: "" + # a list of IP families for the metrics Service that should be supported, in the order in which they should be applied to ClusterIP. Can be "IPv4" and/or "IPv6". + ipFamilies: [] # Pod annotations for Prometheus podAnnotations: @@ -260,11 +292,16 @@ velero: enabled: false annotations: {} additionalLabels: {} - # ServiceMonitor namespace. Default to Velero namespace. + # metrics.nodeAgentPodMonitor.metricRelabelings Specify Metric Relabelings to add to the scrape endpoint + # ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + # metricRelabelings: [] + # metrics.nodeAgentPodMonitor.relabelings [array] Prometheus relabeling rules + # relabelings: [] + # PodMonitor namespace. Default to Velero namespace. # namespace: - # ServiceMonitor connection scheme. Defaults to HTTP. + # PodMonitor connection scheme. Defaults to HTTP. # scheme: "" - # ServiceMonitor connection tlsConfig. Defaults to {}. + # PodMonitor connection tlsConfig. Defaults to {}. # tlsConfig: {} prometheusRule: @@ -276,26 +313,47 @@ velero: # namespace: "" # Rules to be deployed spec: [] - # - alert: VeleroBackupPartialFailures + # - alert: VeleroBackupFailed # annotations: - # message: Velero backup {{ $labels.schedule }} has {{ $value | humanizePercentage }} partialy failed backups. + # message: Velero backup {{ $labels.schedule }} has failed # expr: |- - # velero_backup_partial_failure_total{schedule!=""} / velero_backup_attempt_total{schedule!=""} > 0.25 + # velero_backup_last_status{schedule!=""} != 1 # for: 15m # labels: # severity: warning - # - alert: VeleroBackupFailures + # - alert: VeleroBackupFailing + # annotations: + # message: Velero backup {{ $labels.schedule }} has been failing for the last 12h + # expr: |- + # velero_backup_last_status{schedule!=""} != 1 + # for: 12h + # labels: + # severity: critical + # - alert: VeleroNoNewBackup # annotations: - # message: Velero backup {{ $labels.schedule }} has {{ $value | humanizePercentage }} failed backups. + # message: Velero backup {{ $labels.schedule }} has not run successfully in the last 25h # expr: |- - # velero_backup_failure_total{schedule!=""} / velero_backup_attempt_total{schedule!=""} > 0.25 + # ( + # (time() - velero_backup_last_successful_timestamp{schedule!=""}) >bool (25 * 3600) + # or + # absent(velero_backup_last_successful_timestamp{schedule!=""}) + # ) == 1 + # for: 1h + # labels: + # severity: critical + # - alert: VeleroBackupPartialFailures + # annotations: + # message: Velero backup {{ $labels.schedule }} has {{ $value | humanizePercentage }} partialy failed backups + # expr: |- + # rate(velero_backup_partial_failure_total{schedule!=""}[25m]) + # / rate(velero_backup_attempt_total{schedule!=""}[25m]) > 0.5 # for: 15m # labels: # severity: warning kubectl: image: - repository: docker.io/bitnami/kubectl + repository: docker.io/bitnamilegacy/kubectl # Digest value example: sha256:d238835e151cec91c6a811fe3a89a66d3231d9f64d09e5f3c49552672d271f38. # If used, it will take precedence over the kubectl.image.tag. # digest: @@ -339,15 +397,15 @@ velero: # a backup storage location will be created with the name "default". Optional. - name: # provider is the name for the backup storage location provider. - provider: + provider: "" # bucket is the name of the bucket to store backups in. Required. - bucket: + bucket: "" # caCert defines a base64 encoded CA bundle to use when verifying TLS connections to the provider. Optional. caCert: # prefix is the directory under which all Velero data should be stored within the bucket. Optional. prefix: # default indicates this location is the default backup storage location. Optional. - default: + default: false # validationFrequency defines how frequently Velero should validate the object storage. Optional. validationFrequency: # accessMode determines if velero can write to this backup storage location. Optional. @@ -383,10 +441,11 @@ velero: # Parameters for the VolumeSnapshotLocation(s). Configure multiple by adding other element(s) to the volumeSnapshotLocation slice. # See https://velero.io/docs/v1.6/api-types/volumesnapshotlocation/ volumeSnapshotLocation: - # name is the name of the volume snapshot location where snapshots are being taken. Required. + # name is the name of the volume snapshot location where snapshots are being taken. If a name is not provided, + # a volume snapshot location will be created with the name "default". Optional. - name: # provider is the name for the volume snapshot provider. - provider: + provider: "" credential: # name of the secret used by this volumeSnapshotLocation. name: @@ -463,25 +522,70 @@ velero: # Comma separated list of velero feature flags. default: empty # features: EnableCSI features: + # Configures the timeout for provisioning the volume created from the CSI snapshot. Default: 30m + dataMoverPrepareTimeout: # Resource requests/limits to specify for the repository-maintenance job. Optional. # https://velero.io/docs/v1.14/repository-maintenance/#resource-limitation repositoryMaintenanceJob: - requests: - # cpu: 500m - # memory: 512Mi - limits: - # cpu: 1000m - # memory: 1024Mi - # Number of latest maintenance jobs to keep for each repository - latestJobsCount: 3 + # Per-repository resource settings ConfigMap + # This ConfigMap allows specifying different settings for different repositories + # See: https://velero.io/docs/main/repository-maintenance/ + repositoryConfigData: + # Name of the ConfigMap to create. If not provided, will use "velero-repo-maintenance" + name: "velero-repo-maintenance" + # Global configuration applied to all repositories + # This configuration is used when no specific repository configuration is found + # global: + # podResources: + # cpuRequest: "100m" + # cpuLimit: "200m" + # memoryRequest: "100Mi" + # memoryLimit: "200Mi" + # keepLatestMaintenanceJobs: 1 + # loadAffinity: + # - nodeSelector: + # matchExpressions: + # - key: "cloud.google.com/machine-family" + # operator: "In" + # values: ["e2"] + # - nodeSelector: + # matchExpressions: + # - key: "topology.kubernetes.io/zone" + # operator: "In" + # values: ["us-central1-a", "us-central1-b", "us-central1-c"] + # priorityClassName: "low-priority" # Note: priorityClassName is only supported in global configuration + global: + keepLatestMaintenanceJobs: 3 + # Repository-specific configurations + # Repository keys are formed as: "{namespace}-{storageLocation}-{repositoryType}" + # For example: "default-default-kopia" or "prod-s3-backup-kopia" + # Note: priorityClassName is NOT supported in repository-specific configurations + # repositories: + # "kibishii-default-kopia": + # podResources: + # cpuRequest: "200m" + # cpuLimit: "400m" + # memoryRequest: "200Mi" + # memoryLimit: "400Mi" + # keepLatestMaintenanceJobs: 2 + repositories: {} # `velero server` default: velero namespace: # additional command-line arguments that will be passed to the `velero server` # e.g.: extraArgs: ["--foo=bar"] extraArgs: [] - # additional key/value pairs to be used as environment variables such as "AWS_CLUSTER_NAME: 'yourcluster.domain.tld'" - extraEnvVars: {} + # Additional values to be used as environment variables. Optional. + extraEnvVars: [] + # Simple value + # - name: SIMPLE_VAR + # value: "simple-value" + + # FieldRef example + # - name: MY_POD_LABEL + # valueFrom: + # fieldRef: + # fieldPath: metadata.labels['my_label'] # Set true for backup all pod volumes without having to apply annotation on the pod when used file system backup Default: false. defaultVolumesToFsBackup: @@ -575,6 +679,13 @@ velero: # limits: # cpu: 1000m # memory: 1024Mi + # Container resize policy for the node-agent daemonset. + # See: https://kubernetes.io/docs/tasks/configure-pod-container/resize-container-resources/ + resizePolicy: [] + # - resourceName: cpu + # restartPolicy: NotRequired + # - resourceName: memory + # restartPolicy: RestartContainer # Tolerations to use for the node-agent daemonset. Optional. tolerations: [] @@ -600,8 +711,17 @@ velero: # Extra volumeMounts for the node-agent daemonset. Optional. extraVolumeMounts: [] - # Key/value pairs to be used as environment variables for the node-agent daemonset. Optional. - extraEnvVars: {} + # Additional values to be used as environment variables for node-agent daemonset. Optional. + extraEnvVars: [] + # Simple key/value + # - name: SIMPLE_VAR + # value: "simple-value" + + # FieldRef example + # - name: MY_POD_LABEL + # valueFrom: + # fieldRef: + # fieldPath: metadata.labels['my_label'] # Additional command-line arguments that will be passed to the node-agent. Optional. # e.g.: extraArgs: ["--foo=bar"] @@ -611,6 +731,14 @@ velero: # See: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy dnsPolicy: ClusterFirst + # Configure hostAliases for node-agent daemonset. Optional + # For more information, check: https://kubernetes.io/docs/tasks/network/customize-hosts-file-for-pods/ + hostAliases: [] + # - ip: "127.0.0.1" + # hostnames: + # - "foo.local" + # - "bar.local" + # SecurityContext to use for the Velero deployment. Optional. # Set fsGroup for `AWS IAM Roles for Service Accounts` # see more informations at: https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html @@ -671,7 +799,7 @@ velero: # velero.io/plugin-config: "" # velero.io/pod-volume-restore: RestoreItemAction # data: - # image: velero/velero-restore-helper:v1.10.2 + # image: velero/velero:v1.17.1 # cpuRequest: 200m # memRequest: 128Mi # cpuLimit: 200m