diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index 4c690bc8..db2b6b9f 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -125,7 +125,7 @@ jobs:
- id: install-pytest-html
run: kclvm -mpip install pytest-html pytest-xdist ruamel.yaml kclvm -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com
- id: test
- run: kclvm -mpytest -v -n 5 hack/test_konfig.py --junitxml ./hack/report/TEST.xml --html=./hack/report/test.html
+ run: kclvm -mpytest -vv -n 5 hack/test_konfig.py --junitxml ./hack/report/TEST.xml --html=./hack/report/test.html
- id: upload-test-report
if: always()
uses: actions/upload-artifact@v2
diff --git a/Makefile b/Makefile
index d52de40a..54cca8b1 100644
--- a/Makefile
+++ b/Makefile
@@ -35,4 +35,4 @@ install-hooks: ## 安装 git hooks,目前主要有 pre-commit hook(提交
uninstall-hooks: ## 卸载 git hooks
@rm -rf .git/hooks/pre-commit
- @echo 'Successfully uninstall pre-commit hooks!'
\ No newline at end of file
+ @echo 'Successfully uninstall pre-commit hooks!'
diff --git a/appops/clickhouse-operator/prod/ci-test/settings.yaml b/appops/clickhouse-operator/prod/ci-test/settings.yaml
index a1d83a13..71415894 100644
--- a/appops/clickhouse-operator/prod/ci-test/settings.yaml
+++ b/appops/clickhouse-operator/prod/ci-test/settings.yaml
@@ -1,3 +1,5 @@
kcl_options:
- # - key: __konfig_output_format__
- # value: raw
\ No newline at end of file
+ - key: app
+ value: clickhouse-operator
+ - key: env
+ value: prod
diff --git a/appops/clickhouse-operator/prod/ci-test/stdout.golden.yaml b/appops/clickhouse-operator/prod/ci-test/stdout.golden.yaml
index 34580ee7..b2c19e00 100644
--- a/appops/clickhouse-operator/prod/ci-test/stdout.golden.yaml
+++ b/appops/clickhouse-operator/prod/ci-test/stdout.golden.yaml
@@ -1,869 +1,3617 @@
-id: apps/v1:Deployment:clickhouse-operator:clickhouse-operatorprod
-type: Kubernetes
-dependsOn:
-- rbac.authorization.k8s.io/v1:ClusterRole:clickhouse-operator-kube-system
-- rbac.authorization.k8s.io/v1:ClusterRoleBinding:clickhouse-operator-kube-system
-- v1:Namespace:clickhouse-operator
-- v1:ServiceAccount:clickhouse-operator:clickhouse-operator
-- v1:ConfigMap:clickhouse-operator:etc-clickhouse-operator-files
-- v1:ConfigMap:clickhouse-operator:etc-clickhouse-operator-confd-files
-- v1:ConfigMap:clickhouse-operator:etc-clickhouse-operator-configd-files
-- v1:ConfigMap:clickhouse-operator:etc-clickhouse-operator-templatesd-files
-- v1:ConfigMap:clickhouse-operator:etc-clickhouse-operator-usersd-files
-- v1:Service:clickhouse-operator:clickhouse-operator-metrics
-attributes:
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- labels:
- clickhouse.altinity.com/chop: 0.19.2
- app: clickhouse-operator
- name: clickhouse-operatorprod
- namespace: clickhouse-operator
- spec:
- replicas: 1
- selector:
- matchLabels:
- app: clickhouse-operator
- template:
- metadata:
- annotations:
- prometheus.io/port: '8888'
- prometheus.io/scrape: 'true'
+- id: apps/v1:Deployment:clickhouse-operator:clickhouse-operatorprod
+ type: Kubernetes
+ attributes:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
labels:
- app: clickhouse-operator
- spec:
- containers:
- - env:
- - name: OPERATOR_POD_NODE_NAME
- valueFrom:
- fieldRef:
- apiVersion: v1
- fieldPath: spec.nodeName
- - name: OPERATOR_POD_NAME
- valueFrom:
- fieldRef:
- apiVersion: v1
- fieldPath: metadata.name
- - name: OPERATOR_POD_NAMESPACE
- valueFrom:
- fieldRef:
- apiVersion: v1
- fieldPath: metadata.namespace
- - name: OPERATOR_POD_IP
- valueFrom:
- fieldRef:
- apiVersion: v1
- fieldPath: status.podIP
- - name: OPERATOR_POD_SERVICE_ACCOUNT
- valueFrom:
- fieldRef:
- apiVersion: v1
- fieldPath: spec.serviceAccountName
- - name: OPERATOR_CONTAINER_CPU_REQUEST
- valueFrom:
- resourceFieldRef:
- containerName: clickhouse-operator
- divisor: '1'
- resource: requests.cpu
- - name: OPERATOR_CONTAINER_CPU_LIMIT
- valueFrom:
- resourceFieldRef:
- containerName: clickhouse-operator
- divisor: '1'
- resource: limits.cpu
- - name: OPERATOR_CONTAINER_MEM_REQUEST
- valueFrom:
- resourceFieldRef:
- containerName: clickhouse-operator
- divisor: '1'
- resource: requests.memory
- - name: OPERATOR_CONTAINER_MEM_LIMIT
- valueFrom:
- resourceFieldRef:
- containerName: clickhouse-operator
- divisor: '1'
- resource: limits.memory
- image: altinity/clickhouse-operator:0.19.2
- name: clickhouse-operator
- volumeMounts:
- - mountPath: /etc/clickhouse-operator
- name: etc-clickhouse-operator-folder
- - mountPath: /etc/clickhouse-operator/conf.d
- name: etc-clickhouse-operator-confd-folder
- - mountPath: /etc/clickhouse-operator/config.d
- name: etc-clickhouse-operator-configd-folder
- - mountPath: /etc/clickhouse-operator/templates.d
- name: etc-clickhouse-operator-templatesd-folder
- - mountPath: /etc/clickhouse-operator/users.d
- name: etc-clickhouse-operator-usersd-folder
- - image: altinity/metrics-exporter:0.19.2
- name: metrics-exporter
- volumeMounts:
- - mountPath: /etc/clickhouse-operator
- name: etc-clickhouse-operator-folder
- - mountPath: /etc/clickhouse-operator/conf.d
- name: etc-clickhouse-operator-confd-folder
- - mountPath: /etc/clickhouse-operator/config.d
- name: etc-clickhouse-operator-configd-folder
- - mountPath: /etc/clickhouse-operator/templates.d
- name: etc-clickhouse-operator-templatesd-folder
- - mountPath: /etc/clickhouse-operator/users.d
- name: etc-clickhouse-operator-usersd-folder
- serviceAccountName: clickhouse-operator
- volumes:
- - name: etc-clickhouse-operator-folder
- configMap:
- name: etc-clickhouse-operator-files
- - name: etc-clickhouse-operator-confd-folder
- configMap:
- name: etc-clickhouse-operator-confd-files
- - name: etc-clickhouse-operator-configd-folder
- configMap:
- name: etc-clickhouse-operator-configd-files
- - name: etc-clickhouse-operator-templatesd-folder
- configMap:
- name: etc-clickhouse-operator-templatesd-files
- - name: etc-clickhouse-operator-usersd-folder
- configMap:
- name: etc-clickhouse-operator-usersd-files
----
-id: v1:Namespace:clickhouse-operator
-type: Kubernetes
-dependsOn:
-- rbac.authorization.k8s.io/v1:ClusterRole:clickhouse-operator-kube-system
-- rbac.authorization.k8s.io/v1:ClusterRoleBinding:clickhouse-operator-kube-system
-attributes:
- apiVersion: v1
- kind: Namespace
- metadata:
- name: clickhouse-operator
----
-id: v1:ConfigMap:clickhouse-operator:etc-clickhouse-operator-files
-type: Kubernetes
-dependsOn:
-- rbac.authorization.k8s.io/v1:ClusterRole:clickhouse-operator-kube-system
-- rbac.authorization.k8s.io/v1:ClusterRoleBinding:clickhouse-operator-kube-system
-- v1:Namespace:clickhouse-operator
-- v1:ServiceAccount:clickhouse-operator:clickhouse-operator
-attributes:
- apiVersion: v1
- data:
- config.yaml: |
- # IMPORTANT
- # This file is auto-generated
- # Do not edit this file - all changes would be lost
- # Edit appropriate template in the following folder:
- # deploy/builder/templates-config
- # IMPORTANT
- #
- # Template parameters available:
- # watchNamespaces
- # chUsername
- # chPassword
- # password_sha256_hex
+ app: clickhouse-operator
+ clickhouse.altinity.com/chop: 0.19.2
+ name: clickhouse-operatorprod
+ namespace: clickhouse-operator
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: clickhouse-operator
+ template:
+ metadata:
+ annotations:
+ prometheus.io/port: "8888"
+ prometheus.io/scrape: "true"
+ labels:
+ app: clickhouse-operator
+ spec:
+ containers:
+ - env:
+ - name: OPERATOR_POD_NODE_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: spec.nodeName
+ - name: OPERATOR_POD_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.name
+ - name: OPERATOR_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
+ - name: OPERATOR_POD_IP
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: status.podIP
+ - name: OPERATOR_POD_SERVICE_ACCOUNT
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: spec.serviceAccountName
+ - name: OPERATOR_CONTAINER_CPU_REQUEST
+ valueFrom:
+ resourceFieldRef:
+ containerName: clickhouse-operator
+ divisor: "1"
+ resource: requests.cpu
+ - name: OPERATOR_CONTAINER_CPU_LIMIT
+ valueFrom:
+ resourceFieldRef:
+ containerName: clickhouse-operator
+ divisor: "1"
+ resource: limits.cpu
+ - name: OPERATOR_CONTAINER_MEM_REQUEST
+ valueFrom:
+ resourceFieldRef:
+ containerName: clickhouse-operator
+ divisor: "1"
+ resource: requests.memory
+ - name: OPERATOR_CONTAINER_MEM_LIMIT
+ valueFrom:
+ resourceFieldRef:
+ containerName: clickhouse-operator
+ divisor: "1"
+ resource: limits.memory
+ image: altinity/clickhouse-operator:0.19.2
+ name: clickhouse-operator
+ volumeMounts:
+ - mountPath: /etc/clickhouse-operator
+ name: etc-clickhouse-operator-folder
+ - mountPath: /etc/clickhouse-operator/conf.d
+ name: etc-clickhouse-operator-confd-folder
+ - mountPath: /etc/clickhouse-operator/config.d
+ name: etc-clickhouse-operator-configd-folder
+ - mountPath: /etc/clickhouse-operator/templates.d
+ name: etc-clickhouse-operator-templatesd-folder
+ - mountPath: /etc/clickhouse-operator/users.d
+ name: etc-clickhouse-operator-usersd-folder
+ - image: altinity/metrics-exporter:0.19.2
+ name: metrics-exporter
+ volumeMounts:
+ - mountPath: /etc/clickhouse-operator
+ name: etc-clickhouse-operator-folder
+ - mountPath: /etc/clickhouse-operator/conf.d
+ name: etc-clickhouse-operator-confd-folder
+ - mountPath: /etc/clickhouse-operator/config.d
+ name: etc-clickhouse-operator-configd-folder
+ - mountPath: /etc/clickhouse-operator/templates.d
+ name: etc-clickhouse-operator-templatesd-folder
+ - mountPath: /etc/clickhouse-operator/users.d
+ name: etc-clickhouse-operator-usersd-folder
+ serviceAccountName: clickhouse-operator
+ volumes:
+ - configMap:
+ name: etc-clickhouse-operator-files
+ name: etc-clickhouse-operator-folder
+ - configMap:
+ name: etc-clickhouse-operator-confd-files
+ name: etc-clickhouse-operator-confd-folder
+ - configMap:
+ name: etc-clickhouse-operator-configd-files
+ name: etc-clickhouse-operator-configd-folder
+ - configMap:
+ name: etc-clickhouse-operator-templatesd-files
+ name: etc-clickhouse-operator-templatesd-folder
+ - configMap:
+ name: etc-clickhouse-operator-usersd-files
+ name: etc-clickhouse-operator-usersd-folder
+ dependsOn:
+ - rbac.authorization.k8s.io/v1:ClusterRole:clickhouse-operator-kube-system
+ - rbac.authorization.k8s.io/v1:ClusterRoleBinding:clickhouse-operator-kube-system
+ - v1:Namespace:clickhouse-operator
+ - v1:ServiceAccount:clickhouse-operator:clickhouse-operator
+ - v1:ConfigMap:clickhouse-operator:etc-clickhouse-operator-files
+ - v1:ConfigMap:clickhouse-operator:etc-clickhouse-operator-confd-files
+ - v1:ConfigMap:clickhouse-operator:etc-clickhouse-operator-configd-files
+ - v1:ConfigMap:clickhouse-operator:etc-clickhouse-operator-templatesd-files
+ - v1:ConfigMap:clickhouse-operator:etc-clickhouse-operator-usersd-files
+ - v1:Service:clickhouse-operator:clickhouse-operator-metrics
+- id: v1:Namespace:clickhouse-operator
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: clickhouse-operator
+ dependsOn:
+ - rbac.authorization.k8s.io/v1:ClusterRole:clickhouse-operator-kube-system
+ - rbac.authorization.k8s.io/v1:ClusterRoleBinding:clickhouse-operator-kube-system
+- id: v1:ConfigMap:clickhouse-operator:etc-clickhouse-operator-files
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ data:
+ config.yaml: |
+ # IMPORTANT
+ # This file is auto-generated
+ # Do not edit this file - all changes would be lost
+ # Edit appropriate template in the following folder:
+ # deploy/builder/templates-config
+ # IMPORTANT
+ #
+ # Template parameters available:
+ # watchNamespaces
+ # chUsername
+ # chPassword
+ # password_sha256_hex
- ################################################
- ##
- ## Watch Section
- ##
- ################################################
- watch:
- # List of namespaces where clickhouse-operator watches for events.
- # Concurrently running operators should watch on different namespaces
- #namespaces: ["dev", "test"]
- namespaces: []
+ ################################################
+ ##
+ ## Watch Section
+ ##
+ ################################################
+ watch:
+ # List of namespaces where clickhouse-operator watches for events.
+ # Concurrently running operators should watch on different namespaces
+ #namespaces: ["dev", "test"]
+ namespaces: []
- clickhouse:
- configuration:
- ################################################
- ##
- ## Configuration Files Section
- ##
- ################################################
- file:
- path:
- # Path to the folder where ClickHouse configuration files common for all instances within a CHI are located.
- common: config.d
- # Path to the folder where ClickHouse configuration files unique for each instance (host) within a CHI are located.
- host: conf.d
- # Path to the folder where ClickHouse configuration files with users settings are located.
- # Files are common for all instances within a CHI.
- user: users.d
- ################################################
- ##
- ## Configuration Users Section
- ##
- ################################################
- user:
- default:
- # Default values for ClickHouse user configuration
- # 1. user/profile - string
- # 2. user/quota - string
- # 3. user/networks/ip - multiple strings
- # 4. user/password - string
- profile: default
- quota: default
- networksIP:
- - "::1"
- - "127.0.0.1"
- password: "default"
- ################################################
- ##
- ## Configuration Network Section
- ##
- ################################################
- network:
- # Default host_regexp to limit network connectivity from outside
- hostRegexpTemplate: "(chi-{chi}-[^.]+\d+-\d+|clickhouse\-{chi})\.{namespace}\.svc\.cluster\.local$"
- ################################################
- ##
- ## Access to ClickHouse instances
- ##
- ################################################
- access:
- # ClickHouse credentials (username, password and port) to be used by operator to connect to ClickHouse instances
- # for:
- # 1. Metrics requests
- # 2. Schema maintenance
- # 3. DROP DNS CACHE
- # User with such credentials can be specified in additional ClickHouse .xml config files,
- # located in `chUsersConfigsPath` folder
- username: "clickhouse_operator"
- password: "clickhouse_operator_password"
- secret:
- # Location of k8s Secret with username and password to be used by operator to connect to ClickHouse instances
- # Can be used instead of explicitly specified username and password
- namespace: ""
- name: ""
- # Port where to connect to ClickHouse instances to
- port: 8123
+ clickhouse:
+ configuration:
+ ################################################
+ ##
+ ## Configuration Files Section
+ ##
+ ################################################
+ file:
+ path:
+ # Path to the folder where ClickHouse configuration files common for all instances within a CHI are located.
+ common: config.d
+ # Path to the folder where ClickHouse configuration files unique for each instance (host) within a CHI are located.
+ host: conf.d
+ # Path to the folder where ClickHouse configuration files with users settings are located.
+ # Files are common for all instances within a CHI.
+ user: users.d
+ ################################################
+ ##
+ ## Configuration Users Section
+ ##
+ ################################################
+ user:
+ default:
+ # Default values for ClickHouse user configuration
+ # 1. user/profile - string
+ # 2. user/quota - string
+ # 3. user/networks/ip - multiple strings
+ # 4. user/password - string
+ profile: default
+ quota: default
+ networksIP:
+ - "::1"
+ - "127.0.0.1"
+ password: "default"
+ ################################################
+ ##
+ ## Configuration Network Section
+ ##
+ ################################################
+ network:
+ # Default host_regexp to limit network connectivity from outside
+ hostRegexpTemplate: "(chi-{chi}-[^.]+\d+-\d+|clickhouse\-{chi})\.{namespace}\.svc\.cluster\.local$"
+ ################################################
+ ##
+ ## Access to ClickHouse instances
+ ##
+ ################################################
+ access:
+ # ClickHouse credentials (username, password and port) to be used by operator to connect to ClickHouse instances
+ # for:
+ # 1. Metrics requests
+ # 2. Schema maintenance
+ # 3. DROP DNS CACHE
+ # User with such credentials can be specified in additional ClickHouse .xml config files,
+ # located in `chUsersConfigsPath` folder
+ username: "clickhouse_operator"
+ password: "clickhouse_operator_password"
+ secret:
+ # Location of k8s Secret with username and password to be used by operator to connect to ClickHouse instances
+ # Can be used instead of explicitly specified username and password
+ namespace: ""
+ name: ""
+ # Port where to connect to ClickHouse instances to
+ port: 8123
- ################################################
- ##
- ## Templates Section
- ##
- ################################################
- template:
- chi:
- # Path to the folder where ClickHouseInstallation .yaml manifests are located.
- # Manifests are applied in sorted alpha-numeric order.
- path: templates.d
+ ################################################
+ ##
+ ## Templates Section
+ ##
+ ################################################
+ template:
+ chi:
+ # Path to the folder where ClickHouseInstallation .yaml manifests are located.
+ # Manifests are applied in sorted alpha-numeric order.
+ path: templates.d
- ################################################
- ##
- ## Reconcile Section
- ##
- ################################################
- reconcile:
- runtime:
- # Max number of concurrent reconciles in progress
- threadsNumber: 10
+ ################################################
+ ##
+ ## Reconcile Section
+ ##
+ ################################################
+ reconcile:
+ runtime:
+ # Max number of concurrent reconciles in progress
+ threadsNumber: 10
- statefulSet:
- create:
- # What to do in case created StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds
- # Possible options:
- # 1. abort - do nothing, just break the process and wait for admin
- # 2. delete - delete newly created problematic StatefulSet
- # 3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet
- onFailure: ignore
+ statefulSet:
+ create:
+ # What to do in case created StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds
+ # Possible options:
+ # 1. abort - do nothing, just break the process and wait for admin
+ # 2. delete - delete newly created problematic StatefulSet
+ # 3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet
+ onFailure: ignore
- update:
- # How many seconds to wait for created/updated StatefulSet to be Ready
- timeout: 300
- # How many seconds to wait between checks for created/updated StatefulSet status
- pollInterval: 5
- # What to do in case updated StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds
- # Possible options:
- # 1. abort - do nothing, just break the process and wait for admin
- # 2. rollback - delete Pod and rollback StatefulSet to previous Generation.
- # Pod would be recreated by StatefulSet based on rollback-ed configuration
- # 3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet
- onFailure: rollback
+ update:
+ # How many seconds to wait for created/updated StatefulSet to be Ready
+ timeout: 300
+ # How many seconds to wait between checks for created/updated StatefulSet status
+ pollInterval: 5
+ # What to do in case updated StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds
+ # Possible options:
+ # 1. abort - do nothing, just break the process and wait for admin
+ # 2. rollback - delete Pod and rollback StatefulSet to previous Generation.
+ # Pod would be recreated by StatefulSet based on rollback-ed configuration
+ # 3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet
+ onFailure: rollback
- host:
- # Whether reconciler should wait for host:
- # to be excluded from cluster OR
- # to be included into cluster
- # respectfully
- wait:
- exclude: true
- include: false
+ host:
+ # Whether reconciler should wait for host:
+ # to be excluded from cluster OR
+ # to be included into cluster
+ # respectfully
+ wait:
+ exclude: true
+ include: false
- ################################################
- ##
- ## Annotations management
- ##
- ################################################
- annotation:
- # Applied when:
- # 1. Propagating annotations from the CHI's `metadata.annotations` to child objects' `metadata.annotations`,
- # 2. Propagating annotations from the CHI Template's `metadata.annotations` to CHI's `metadata.annotations`,
- # Include annotations from the following list:
- # Applied only when not empty. Empty list means "include all, no selection"
- include: []
- # Exclude annotations from the following list:
- exclude: []
+ ################################################
+ ##
+ ## Annotations management
+ ##
+ ################################################
+ annotation:
+ # Applied when:
+ # 1. Propagating annotations from the CHI's `metadata.annotations` to child objects' `metadata.annotations`,
+ # 2. Propagating annotations from the CHI Template's `metadata.annotations` to CHI's `metadata.annotations`,
+ # Include annotations from the following list:
+ # Applied only when not empty. Empty list means "include all, no selection"
+ include: []
+ # Exclude annotations from the following list:
+ exclude: []
- ################################################
- ##
- ## Labels management
- ##
- ################################################
- label:
- # Applied when:
- # 1. Propagating labels from the CHI's `metadata.labels` to child objects' `metadata.labels`,
- # 2. Propagating labels from the CHI Template's `metadata.labels` to CHI's `metadata.labels`,
- # Include labels from the following list:
- # Applied only when not empty. Empty list means "include all, no selection"
- include: []
- # Exclude labels from the following list:
- exclude: []
- # Whether to append *Scope* labels to StatefulSet and Pod.
- # Full list of available *scope* labels check in labeler.go
- # LabelShardScopeIndex
- # LabelReplicaScopeIndex
- # LabelCHIScopeIndex
- # LabelCHIScopeCycleSize
- # LabelCHIScopeCycleIndex
- # LabelCHIScopeCycleOffset
- # LabelClusterScopeIndex
- # LabelClusterScopeCycleSize
- # LabelClusterScopeCycleIndex
- # LabelClusterScopeCycleOffset
- appendScope: "no"
+ ################################################
+ ##
+ ## Labels management
+ ##
+ ################################################
+ label:
+ # Applied when:
+ # 1. Propagating labels from the CHI's `metadata.labels` to child objects' `metadata.labels`,
+ # 2. Propagating labels from the CHI Template's `metadata.labels` to CHI's `metadata.labels`,
+ # Include labels from the following list:
+ # Applied only when not empty. Empty list means "include all, no selection"
+ include: []
+ # Exclude labels from the following list:
+ exclude: []
+ # Whether to append *Scope* labels to StatefulSet and Pod.
+ # Full list of available *scope* labels check in labeler.go
+ # LabelShardScopeIndex
+ # LabelReplicaScopeIndex
+ # LabelCHIScopeIndex
+ # LabelCHIScopeCycleSize
+ # LabelCHIScopeCycleIndex
+ # LabelCHIScopeCycleOffset
+ # LabelClusterScopeIndex
+ # LabelClusterScopeCycleSize
+ # LabelClusterScopeCycleIndex
+ # LabelClusterScopeCycleOffset
+ appendScope: "no"
- ################################################
- ##
- ## StatefulSet management
- ##
- ################################################
- statefulSet:
- revisionHistoryLimit: 0
+ ################################################
+ ##
+ ## StatefulSet management
+ ##
+ ################################################
+ statefulSet:
+ revisionHistoryLimit: 0
- ################################################
- ##
- ## Pod management
- ##
- ################################################
- pod:
- # Grace period for Pod termination.
- # How many seconds to wait between sending
- # SIGTERM and SIGKILL during Pod termination process.
- # Increase this number is case of slow shutdown.
- terminationGracePeriod: 30
+ ################################################
+ ##
+ ## Pod management
+ ##
+ ################################################
+ pod:
+ # Grace period for Pod termination.
+ # How many seconds to wait between sending
+ # SIGTERM and SIGKILL during Pod termination process.
+ # Increase this number is case of slow shutdown.
+ terminationGracePeriod: 30
- ################################################
- ##
- ## Log parameters
- ##
- ################################################
- logger:
- logtostderr: "true"
- alsologtostderr: "false"
- v: "1"
- stderrthreshold: ""
- vmodule: ""
- log_backtrace_at: ""
- kind: ConfigMap
- metadata:
- labels:
- clickhouse.altinity.com/chop: 0.19.2
- app: clickhouse-operator
- name: etc-clickhouse-operator-files
- namespace: clickhouse-operator
----
-id: v1:ConfigMap:clickhouse-operator:etc-clickhouse-operator-confd-files
-type: Kubernetes
-dependsOn:
-- rbac.authorization.k8s.io/v1:ClusterRole:clickhouse-operator-kube-system
-- rbac.authorization.k8s.io/v1:ClusterRoleBinding:clickhouse-operator-kube-system
-- v1:Namespace:clickhouse-operator
-- v1:ServiceAccount:clickhouse-operator:clickhouse-operator
-attributes:
- apiVersion: v1
- kind: ConfigMap
- metadata:
- labels:
- clickhouse.altinity.com/chop: 0.19.2
- app: clickhouse-operator
- name: etc-clickhouse-operator-confd-files
- namespace: clickhouse-operator
----
-id: v1:ConfigMap:clickhouse-operator:etc-clickhouse-operator-configd-files
-type: Kubernetes
-dependsOn:
-- rbac.authorization.k8s.io/v1:ClusterRole:clickhouse-operator-kube-system
-- rbac.authorization.k8s.io/v1:ClusterRoleBinding:clickhouse-operator-kube-system
-- v1:Namespace:clickhouse-operator
-- v1:ServiceAccount:clickhouse-operator:clickhouse-operator
-attributes:
- apiVersion: v1
- data:
- 01-clickhouse-01-listen.xml: |
-
-
-
-
-
-
-
-
- ::
- 0.0.0.0
- 1
-
- 01-clickhouse-02-logger.xml: |
-
-
-
-
-
-
-
-
-
- debug
- /var/log/clickhouse-server/clickhouse-server.log
- /var/log/clickhouse-server/clickhouse-server.err.log
- 1000M
- 10
-
- 1
-
-
- 01-clickhouse-03-query_log.xml: |
-
-
-
-
-
-
-
-
- system
-
- Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day
- 7500
-
-
-
- 01-clickhouse-04-part_log.xml: |
-
-
-
-
-
-
-
-
- system
-
- Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day
- 7500
-
-
- kind: ConfigMap
- metadata:
- labels:
- clickhouse.altinity.com/chop: 0.19.2
- app: clickhouse-operator
- name: etc-clickhouse-operator-configd-files
- namespace: clickhouse-operator
----
-id: v1:ConfigMap:clickhouse-operator:etc-clickhouse-operator-templatesd-files
-type: Kubernetes
-dependsOn:
-- rbac.authorization.k8s.io/v1:ClusterRole:clickhouse-operator-kube-system
-- rbac.authorization.k8s.io/v1:ClusterRoleBinding:clickhouse-operator-kube-system
-- v1:Namespace:clickhouse-operator
-- v1:ServiceAccount:clickhouse-operator:clickhouse-operator
-attributes:
- apiVersion: v1
- data:
- 001-templates.json.example: |
- {
- "apiVersion": "clickhouse.altinity.com/v1",
- "kind": "ClickHouseInstallationTemplate",
- "metadata": {
- "name": "01-default-volumeclaimtemplate"
- },
- "spec": {
- "templates": {
- "volumeClaimTemplates": [
- {
- "name": "chi-default-volume-claim-template",
- "spec": {
- "accessModes": [
- "ReadWriteOnce"
- ],
- "resources": {
- "requests": {
- "storage": "2Gi"
- }
- }
- }
- }
- ],
- "podTemplates": [
- {
- "name": "chi-default-oneperhost-pod-template",
- "distribution": "OnePerHost",
- "spec": {
- "containers" : [
+ ################################################
+ ##
+ ## Log parameters
+ ##
+ ################################################
+ logger:
+ logtostderr: "true"
+ alsologtostderr: "false"
+ v: "1"
+ stderrthreshold: ""
+ vmodule: ""
+ log_backtrace_at: ""
+ kind: ConfigMap
+ metadata:
+ labels:
+ app: clickhouse-operator
+ clickhouse.altinity.com/chop: 0.19.2
+ name: etc-clickhouse-operator-files
+ namespace: clickhouse-operator
+ dependsOn:
+ - rbac.authorization.k8s.io/v1:ClusterRole:clickhouse-operator-kube-system
+ - rbac.authorization.k8s.io/v1:ClusterRoleBinding:clickhouse-operator-kube-system
+ - v1:Namespace:clickhouse-operator
+ - v1:ServiceAccount:clickhouse-operator:clickhouse-operator
+- id: v1:ConfigMap:clickhouse-operator:etc-clickhouse-operator-confd-files
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ labels:
+ app: clickhouse-operator
+ clickhouse.altinity.com/chop: 0.19.2
+ name: etc-clickhouse-operator-confd-files
+ namespace: clickhouse-operator
+ dependsOn:
+ - rbac.authorization.k8s.io/v1:ClusterRole:clickhouse-operator-kube-system
+ - rbac.authorization.k8s.io/v1:ClusterRoleBinding:clickhouse-operator-kube-system
+ - v1:Namespace:clickhouse-operator
+ - v1:ServiceAccount:clickhouse-operator:clickhouse-operator
+- id: v1:ConfigMap:clickhouse-operator:etc-clickhouse-operator-configd-files
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ data:
+ 01-clickhouse-01-listen.xml: |
+
+
+
+
+
+
+
+
+ ::
+ 0.0.0.0
+ 1
+
+ 01-clickhouse-02-logger.xml: |
+
+
+
+
+
+
+
+
+
+ debug
+ /var/log/clickhouse-server/clickhouse-server.log
+ /var/log/clickhouse-server/clickhouse-server.err.log
+ 1000M
+ 10
+
+ 1
+
+
+ 01-clickhouse-03-query_log.xml: |
+
+
+
+
+
+
+
+
+ system
+
+ Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day
+ 7500
+
+
+
+ 01-clickhouse-04-part_log.xml: |
+
+
+
+
+
+
+
+
+ system
+
+ Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day
+ 7500
+
+
+ kind: ConfigMap
+ metadata:
+ labels:
+ app: clickhouse-operator
+ clickhouse.altinity.com/chop: 0.19.2
+ name: etc-clickhouse-operator-configd-files
+ namespace: clickhouse-operator
+ dependsOn:
+ - rbac.authorization.k8s.io/v1:ClusterRole:clickhouse-operator-kube-system
+ - rbac.authorization.k8s.io/v1:ClusterRoleBinding:clickhouse-operator-kube-system
+ - v1:Namespace:clickhouse-operator
+ - v1:ServiceAccount:clickhouse-operator:clickhouse-operator
+- id: v1:ConfigMap:clickhouse-operator:etc-clickhouse-operator-templatesd-files
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ data:
+ 001-templates.json.example: |
+ {
+ "apiVersion": "clickhouse.altinity.com/v1",
+ "kind": "ClickHouseInstallationTemplate",
+ "metadata": {
+ "name": "01-default-volumeclaimtemplate"
+ },
+ "spec": {
+ "templates": {
+ "volumeClaimTemplates": [
{
- "name": "clickhouse",
- "image": "clickhouse/clickhouse-server:22.3",
- "ports": [
- {
- "name": "http",
- "containerPort": 8123
- },
- {
- "name": "client",
- "containerPort": 9000
- },
- {
- "name": "interserver",
- "containerPort": 9009
+ "name": "chi-default-volume-claim-template",
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "2Gi"
+ }
}
- ]
+ }
+ }
+ ],
+ "podTemplates": [
+ {
+ "name": "chi-default-oneperhost-pod-template",
+ "distribution": "OnePerHost",
+ "spec": {
+ "containers" : [
+ {
+ "name": "clickhouse",
+ "image": "clickhouse/clickhouse-server:22.3",
+ "ports": [
+ {
+ "name": "http",
+ "containerPort": 8123
+ },
+ {
+ "name": "client",
+ "containerPort": 9000
+ },
+ {
+ "name": "interserver",
+ "containerPort": 9009
+ }
+ ]
+ }
+ ]
+ }
}
]
}
}
- ]
- }
- }
- }
- default-pod-template.yaml.example: |
- apiVersion: "clickhouse.altinity.com/v1"
- kind: "ClickHouseInstallationTemplate"
- metadata:
- name: "default-oneperhost-pod-template"
- spec:
- templates:
- podTemplates:
- - name: default-oneperhost-pod-template
- distribution: "OnePerHost"
- default-storage-template.yaml.example: |
- apiVersion: "clickhouse.altinity.com/v1"
- kind: "ClickHouseInstallationTemplate"
- metadata:
- name: "default-storage-template-2Gi"
- spec:
- templates:
- volumeClaimTemplates:
- - name: default-storage-template-2Gi
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 2Gi
- readme: |
- Templates in this folder are packaged with an operator and available via 'useTemplate'
- kind: ConfigMap
- metadata:
- labels:
- clickhouse.altinity.com/chop: 0.19.2
- app: clickhouse-operator
- name: etc-clickhouse-operator-templatesd-files
- namespace: clickhouse-operator
----
-id: v1:ConfigMap:clickhouse-operator:etc-clickhouse-operator-usersd-files
-type: Kubernetes
-dependsOn:
-- rbac.authorization.k8s.io/v1:ClusterRole:clickhouse-operator-kube-system
-- rbac.authorization.k8s.io/v1:ClusterRoleBinding:clickhouse-operator-kube-system
-- v1:Namespace:clickhouse-operator
-- v1:ServiceAccount:clickhouse-operator:clickhouse-operator
-attributes:
- apiVersion: v1
- data:
- 01-clickhouse-user.xml: |
-
-
-
-
-
-
-
-
-
-
- 127.0.0.1
-
- 716b36073a90c6fe1d445ac1af85f4777c5b7a155cea359961826a030513e448
- clickhouse_operator
- default
-
-
-
-
- 0
- 1
- 10
-
-
-
- 02-clickhouse-default-profile.xml: |
-
-
-
-
-
-
-
-
-
- 1
- 1000
- 1
- 1
-
-
-
- 03-database-ordinary.xml: |
-
-
-
-
-
-
-
-
-
-
- Ordinary
-
-
-
- kind: ConfigMap
- metadata:
- labels:
- clickhouse.altinity.com/chop: 0.19.2
- app: clickhouse-operator
- name: etc-clickhouse-operator-usersd-files
- namespace: clickhouse-operator
----
-id: v1:Service:clickhouse-operator:clickhouse-operator-metrics
-type: Kubernetes
-dependsOn:
-- rbac.authorization.k8s.io/v1:ClusterRole:clickhouse-operator-kube-system
-- rbac.authorization.k8s.io/v1:ClusterRoleBinding:clickhouse-operator-kube-system
-- v1:Namespace:clickhouse-operator
-- v1:ServiceAccount:clickhouse-operator:clickhouse-operator
-- v1:ConfigMap:clickhouse-operator:etc-clickhouse-operator-files
-- v1:ConfigMap:clickhouse-operator:etc-clickhouse-operator-confd-files
-- v1:ConfigMap:clickhouse-operator:etc-clickhouse-operator-configd-files
-- v1:ConfigMap:clickhouse-operator:etc-clickhouse-operator-templatesd-files
-- v1:ConfigMap:clickhouse-operator:etc-clickhouse-operator-usersd-files
-attributes:
- apiVersion: v1
- kind: Service
- metadata:
- labels:
- clickhouse.altinity.com/chop: 0.19.2
- app: clickhouse-operator
- name: clickhouse-operator-metrics
- namespace: clickhouse-operator
- spec:
- ports:
- - name: clickhouse-operator-metrics
- port: 8888
- selector:
- app: clickhouse-operator
----
-id: v1:ServiceAccount:clickhouse-operator:clickhouse-operator
-type: Kubernetes
-dependsOn:
-- rbac.authorization.k8s.io/v1:ClusterRole:clickhouse-operator-kube-system
-- rbac.authorization.k8s.io/v1:ClusterRoleBinding:clickhouse-operator-kube-system
-- v1:Namespace:clickhouse-operator
-attributes:
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- labels:
- clickhouse.altinity.com/chop: 0.19.2
- name: clickhouse-operator
- namespace: clickhouse-operator
----
-id: rbac.authorization.k8s.io/v1:ClusterRoleBinding:clickhouse-operator-kube-system
-type: Kubernetes
-dependsOn:
-- rbac.authorization.k8s.io/v1:ClusterRole:clickhouse-operator-kube-system
-attributes:
- apiVersion: rbac.authorization.k8s.io/v1
- kind: ClusterRoleBinding
- subjects:
- - kind: ServiceAccount
- name: clickhouse-operator
- namespace: kube-system
- metadata:
- labels:
- clickhouse.altinity.com/chop: 0.19.2
- name: clickhouse-operator-kube-system
- roleRef:
- apiGroup: rbac.authorization.k8s.io
+ }
+ default-pod-template.yaml.example: |
+ apiVersion: "clickhouse.altinity.com/v1"
+ kind: "ClickHouseInstallationTemplate"
+ metadata:
+ name: "default-oneperhost-pod-template"
+ spec:
+ templates:
+ podTemplates:
+ - name: default-oneperhost-pod-template
+ distribution: "OnePerHost"
+ default-storage-template.yaml.example: |
+ apiVersion: "clickhouse.altinity.com/v1"
+ kind: "ClickHouseInstallationTemplate"
+ metadata:
+ name: "default-storage-template-2Gi"
+ spec:
+ templates:
+ volumeClaimTemplates:
+ - name: default-storage-template-2Gi
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 2Gi
+ readme: |
+ Templates in this folder are packaged with an operator and available via 'useTemplate'
+ kind: ConfigMap
+ metadata:
+ labels:
+ app: clickhouse-operator
+ clickhouse.altinity.com/chop: 0.19.2
+ name: etc-clickhouse-operator-templatesd-files
+ namespace: clickhouse-operator
+ dependsOn:
+ - rbac.authorization.k8s.io/v1:ClusterRole:clickhouse-operator-kube-system
+ - rbac.authorization.k8s.io/v1:ClusterRoleBinding:clickhouse-operator-kube-system
+ - v1:Namespace:clickhouse-operator
+ - v1:ServiceAccount:clickhouse-operator:clickhouse-operator
+- id: v1:ConfigMap:clickhouse-operator:etc-clickhouse-operator-usersd-files
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ data:
+ 01-clickhouse-user.xml: |
+
+
+
+
+
+
+
+
+
+
+ 127.0.0.1
+
+ 716b36073a90c6fe1d445ac1af85f4777c5b7a155cea359961826a030513e448
+ clickhouse_operator
+ default
+
+
+
+
+ 0
+ 1
+ 10
+
+
+
+ 02-clickhouse-default-profile.xml: |
+
+
+
+
+
+
+
+
+
+ 1
+ 1000
+ 1
+ 1
+
+
+
+ 03-database-ordinary.xml: |
+
+
+
+
+
+
+
+
+
+
+ Ordinary
+
+
+
+ kind: ConfigMap
+ metadata:
+ labels:
+ app: clickhouse-operator
+ clickhouse.altinity.com/chop: 0.19.2
+ name: etc-clickhouse-operator-usersd-files
+ namespace: clickhouse-operator
+ dependsOn:
+ - rbac.authorization.k8s.io/v1:ClusterRole:clickhouse-operator-kube-system
+ - rbac.authorization.k8s.io/v1:ClusterRoleBinding:clickhouse-operator-kube-system
+ - v1:Namespace:clickhouse-operator
+ - v1:ServiceAccount:clickhouse-operator:clickhouse-operator
+- id: v1:Service:clickhouse-operator:clickhouse-operator-metrics
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ labels:
+ app: clickhouse-operator
+ clickhouse.altinity.com/chop: 0.19.2
+ name: clickhouse-operator-metrics
+ namespace: clickhouse-operator
+ spec:
+ ports:
+ - name: clickhouse-operator-metrics
+ port: 8888
+ selector:
+ app: clickhouse-operator
+ dependsOn:
+ - rbac.authorization.k8s.io/v1:ClusterRole:clickhouse-operator-kube-system
+ - rbac.authorization.k8s.io/v1:ClusterRoleBinding:clickhouse-operator-kube-system
+ - v1:Namespace:clickhouse-operator
+ - v1:ServiceAccount:clickhouse-operator:clickhouse-operator
+ - v1:ConfigMap:clickhouse-operator:etc-clickhouse-operator-files
+ - v1:ConfigMap:clickhouse-operator:etc-clickhouse-operator-confd-files
+ - v1:ConfigMap:clickhouse-operator:etc-clickhouse-operator-configd-files
+ - v1:ConfigMap:clickhouse-operator:etc-clickhouse-operator-templatesd-files
+ - v1:ConfigMap:clickhouse-operator:etc-clickhouse-operator-usersd-files
+- id: v1:ServiceAccount:clickhouse-operator:clickhouse-operator
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ labels:
+ clickhouse.altinity.com/chop: 0.19.2
+ name: clickhouse-operator
+ namespace: clickhouse-operator
+ dependsOn:
+ - rbac.authorization.k8s.io/v1:ClusterRole:clickhouse-operator-kube-system
+ - rbac.authorization.k8s.io/v1:ClusterRoleBinding:clickhouse-operator-kube-system
+ - v1:Namespace:clickhouse-operator
+- id: rbac.authorization.k8s.io/v1:ClusterRoleBinding:clickhouse-operator-kube-system
+ type: Kubernetes
+ attributes:
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ labels:
+ clickhouse.altinity.com/chop: 0.19.2
+ name: clickhouse-operator-kube-system
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: clickhouse-operator-kube-system
+ subjects:
+ - kind: ServiceAccount
+ name: clickhouse-operator
+ namespace: kube-system
+ dependsOn:
+ - rbac.authorization.k8s.io/v1:ClusterRole:clickhouse-operator-kube-system
+- id: rbac.authorization.k8s.io/v1:ClusterRole:clickhouse-operator-kube-system
+ type: Kubernetes
+ attributes:
+ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
- name: clickhouse-operator-kube-system
----
-id: rbac.authorization.k8s.io/v1:ClusterRole:clickhouse-operator-kube-system
-type: Kubernetes
-attributes:
- apiVersion: rbac.authorization.k8s.io/v1
- kind: ClusterRole
- rules:
- - apiGroups:
- - ''
- resources:
- - configmaps
- - services
- verbs:
- - get
- - list
- - patch
- - update
- - watch
- - create
- - delete
- - apiGroups:
- - ''
- resources:
- - endpoints
- verbs:
- - get
- - list
- - watch
- - apiGroups:
- - ''
- resources:
- - events
- verbs:
- - create
- - apiGroups:
- - ''
- resources:
- - persistentvolumeclaims
- verbs:
- - get
- - list
- - patch
- - update
- - watch
- - delete
- - apiGroups:
- - ''
- resources:
- - persistentvolumes
- - pods
- verbs:
- - get
- - list
- - patch
- - update
- - watch
- - apiGroups:
- - apps
- resources:
- - statefulsets
- verbs:
- - get
- - list
- - patch
- - update
- - watch
- - create
- - delete
- - apiGroups:
- - apps
- resources:
- - replicasets
- verbs:
- - get
- - patch
- - update
- - delete
- - apiGroups:
- - apps
- resourceNames:
- - clickhouse-operator
- resources:
- - deployments
- verbs:
- - get
- - patch
- - update
- - delete
- - apiGroups:
- - policy
- resources:
- - poddisruptionbudgets
- verbs:
- - get
- - list
- - patch
- - update
- - watch
- - create
- - delete
- - apiGroups:
- - clickhouse.altinity.com
- resources:
- - clickhouseinstallations
- verbs:
- - get
- - patch
- - update
- - delete
- - apiGroups:
- - clickhouse.altinity.com
- resources:
- - clickhouseinstallations
- - clickhouseinstallationtemplates
- - clickhouseoperatorconfigurations
- verbs:
- - get
- - list
- - watch
- - apiGroups:
- - clickhouse.altinity.com
- resources:
- - clickhouseinstallations/finalizers
- - clickhouseinstallationtemplates/finalizers
- - clickhouseoperatorconfigurations/finalizers
- verbs:
- - update
- - apiGroups:
- - clickhouse.altinity.com
- resources:
- - clickhouseinstallations/status
- - clickhouseinstallationtemplates/status
- - clickhouseoperatorconfigurations/status
- verbs:
- - get
- - update
- - patch
- - create
- - delete
- - apiGroups:
- - ''
- resources:
- - secrets
- verbs:
- - get
- - list
- - apiGroups:
- - apiextensions.k8s.io
- resources:
- - customresourcedefinitions
- verbs:
- - get
- - list
- metadata:
- labels:
- clickhouse.altinity.com/chop: 0.19.2
- name: clickhouse-operator-kube-system
+ metadata:
+ labels:
+ clickhouse.altinity.com/chop: 0.19.2
+ name: clickhouse-operator-kube-system
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - services
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - delete
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ - pods
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - apps
+ resources:
+ - statefulsets
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - apps
+ resources:
+ - replicasets
+ verbs:
+ - get
+ - patch
+ - update
+ - delete
+ - apiGroups:
+ - apps
+ resourceNames:
+ - clickhouse-operator
+ resources:
+ - deployments
+ verbs:
+ - get
+ - patch
+ - update
+ - delete
+ - apiGroups:
+ - policy
+ resources:
+ - poddisruptionbudgets
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - clickhouse.altinity.com
+ resources:
+ - clickhouseinstallations
+ verbs:
+ - get
+ - patch
+ - update
+ - delete
+ - apiGroups:
+ - clickhouse.altinity.com
+ resources:
+ - clickhouseinstallations
+ - clickhouseinstallationtemplates
+ - clickhouseoperatorconfigurations
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - clickhouse.altinity.com
+ resources:
+ - clickhouseinstallations/finalizers
+ - clickhouseinstallationtemplates/finalizers
+ - clickhouseoperatorconfigurations/finalizers
+ verbs:
+ - update
+ - apiGroups:
+ - clickhouse.altinity.com
+ resources:
+ - clickhouseinstallations/status
+ - clickhouseinstallationtemplates/status
+ - clickhouseoperatorconfigurations/status
+ verbs:
+ - get
+ - update
+ - patch
+ - create
+ - delete
+ - apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - get
+ - list
+- id: apiextensions.k8s.io/v1:CustomResourceDefinition:clickhouseinstallations.clickhouse.altinity.com
+ type: Kubernetes
+ attributes:
+ apiVersion: apiextensions.k8s.io/v1
+ kind: CustomResourceDefinition
+ metadata:
+ labels:
+ clickhouse.altinity.com/chop: 0.19.2
+ name: clickhouseinstallations.clickhouse.altinity.com
+ spec:
+ group: clickhouse.altinity.com
+ names:
+ kind: ClickHouseInstallation
+ plural: clickhouseinstallations
+ shortNames:
+ - chi
+ singular: clickhouseinstallation
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: Operator version
+ jsonPath: .status.chop-version
+ name: version
+ priority: 1
+ type: string
+ - description: Clusters count
+ jsonPath: .status.clusters
+ name: clusters
+ priority: 0
+ type: integer
+ - description: Shards count
+ jsonPath: .status.shards
+ name: shards
+ priority: 1
+ type: integer
+ - description: Hosts count
+ jsonPath: .status.hosts
+ name: hosts
+ priority: 0
+ type: integer
+ - description: TaskID
+ jsonPath: .status.taskID
+ name: taskID
+ priority: 1
+ type: string
+ - description: CHI status
+ jsonPath: .status.status
+ name: status
+ priority: 0
+ type: string
+ - description: Updated hosts count
+ jsonPath: .status.updated
+ name: updated
+ priority: 1
+ type: integer
+ - description: Added hosts count
+ jsonPath: .status.added
+ name: added
+ priority: 1
+ type: integer
+ - description: Hosts deleted count
+ jsonPath: .status.deleted
+ name: deleted
+ priority: 1
+ type: integer
+ - description: Hosts to be deleted count
+ jsonPath: .status.delete
+ name: delete
+ priority: 1
+ type: integer
+ - description: Client access endpoint
+ jsonPath: .status.endpoint
+ name: endpoint
+ priority: 1
+ type: string
+ - description: Age of the resource
+ jsonPath: .metadata.creationTimestamp
+ name: age
+ type: date
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: define a set of Kubernetes resources (StatefulSet, PVC, Service, ConfigMap) which describe behavior one or more ClickHouse clusters
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: |
+ Specification of the desired behavior of one or more ClickHouse clusters
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md"
+ properties:
+ configuration:
+ description: allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource
+ properties:
+ clusters:
+ description: |
+ describes ClickHouse clusters layout and allows change settings on cluster-level, shard-level and replica-level
+ every cluster is a set of StatefulSet, one StatefulSet contains only one Pod with `clickhouse-server`
+ all Pods will rendered in part of ClickHouse configs, mounted from ConfigMap as `/etc/clickhouse-server/config.d/chop-generated-remote_servers.xml`
+ Clusters will use for Distributed table engine, more details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ If `cluster` contains zookeeper settings (could be inherited from top `chi` level), when you can create *ReplicatedMergeTree tables
+ items:
+ properties:
+ files:
+ description: |
+ optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files`
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ layout:
+ description: |
+ describe current cluster layout, how much shards in cluster, how much replica in shard
+ allows override settings on each shard and replica separatelly
+ properties:
+ replicas:
+ description: optional, allows override top-level `chi.spec.configuration` and cluster-level `chi.spec.configuration.clusters` configuration for each replica and each shard relates to selected replica, use it only if you fully understand what you do
+ items:
+ properties:
+ files:
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ name:
+ description: optional, by default replica name is generated, but you can override it and setup custom name
+ maxLength: 15
+ minLength: 1
+ pattern: ^[a-zA-Z0-9-]{0,15}$
+ type: string
+ settings:
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and will ignore if shard-level `chi.spec.configuration.clusters.layout.shards` present
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ shards:
+ description: optional, list of shards related to current replica, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ items:
+ properties:
+ files:
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ httpPort:
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `http` for selected shard, override `chi.spec.templates.hostTemplates.spec.httpPort`
+ allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service`
+ maximum: 65535
+ minimum: 1
+ type: integer
+ interserverHTTPPort:
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `interserver` for selected shard, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort`
+ allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol
+ maximum: 65535
+ minimum: 1
+ type: integer
+ name:
+ description: optional, by default shard name is generated, but you can override it and setup custom name
+ maxLength: 15
+ minLength: 1
+ pattern: ^[a-zA-Z0-9-]{0,15}$
+ type: string
+ settings:
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and replica-level `chi.spec.configuration.clusters.layout.replicas.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ tcpPort:
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `tcp` for selected shard, override `chi.spec.templates.hostTemplates.spec.tcpPort`
+ allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service`
+ maximum: 65535
+ minimum: 1
+ type: integer
+ templates:
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates`
+ properties:
+ clusterServiceTemplate:
+ description: optional, fully ignores for shard-level
+ type: string
+ dataVolumeClaimTemplate:
+ description: optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard
+ type: string
+ hostTemplate:
+ description: optional, template name from chi.spec.templates.hostTemplates, which will apply to configure each `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod` only for one shard
+ type: string
+ logVolumeClaimTemplate:
+ description: optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard
+ type: string
+ podTemplate:
+ description: optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard
+ type: string
+ replicaServiceTemplate:
+ description: optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside clickhouse cluster described in `chi.spec.configuration.clusters` only for one shard
+ type: string
+ serviceTemplate:
+ description: optional, fully ignores for shard-level
+ type: string
+ shardServiceTemplate:
+ description: optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover shard inside clickhouse cluster described in `chi.spec.configuration.clusters` only for one shard
+ type: string
+ volumeClaimTemplate:
+ description: DEPRECATED! VolumeClaimTemplate is deprecated in favor of DataVolumeClaimTemplate and LogVolumeClaimTemplate
+ type: string
+ type: object
+ type: object
+ type: array
+ shardsCount:
+ description: optional, count of shards related to current replica, you can override each shard behavior on low-level `chi.spec.configuration.clusters.layout.replicas.shards`
+ minimum: 1
+ type: integer
+ templates:
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`
+ properties:
+ clusterServiceTemplate:
+ description: optional, fully ignores for replica-level
+ type: string
+ dataVolumeClaimTemplate:
+ description: optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard
+ type: string
+ hostTemplate:
+ description: optional, template name from chi.spec.templates.hostTemplates, which will apply to configure `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod` only for one replica
+ type: string
+ logVolumeClaimTemplate:
+ description: optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard
+ type: string
+ podTemplate:
+ description: optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one replica
+ type: string
+ replicaServiceTemplate:
+ description: optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside clickhouse cluster described in `chi.spec.configuration.clusters` only for one replica
+ type: string
+ serviceTemplate:
+ description: optional, fully ignores for replica-level
+ type: string
+ shardServiceTemplate:
+ description: optional, fully ignores for replica-level
+ type: string
+ volumeClaimTemplate:
+ description: DEPRECATED! VolumeClaimTemplate is deprecated in favor of DataVolumeClaimTemplate and LogVolumeClaimTemplate
+ type: string
+ type: object
+ type: object
+ type: array
+ replicasCount:
+ description: how much replicas in each shards for current ClickHouse cluster will run in Kubernetes, each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance, every shard contains 1 replica by default
+ type: integer
+ shards:
+ description: optional, allows override top-level `chi.spec.configuration`, cluster-level `chi.spec.configuration.clusters` settings for each shard separately, use it only if you fully understand what you do
+ items:
+ properties:
+ definitionType:
+ description: DEPRECATED - to be removed soon
+ type: string
+ files:
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ internalReplication:
+ description: |
+ optional, `true` by default when `chi.spec.configuration.clusters[].layout.ReplicaCount` > 1 and 0 otherwise
+ allows setup setting which will use during insert into tables with `Distributed` engine for insert only in one live replica and other replicas will download inserted data during replication,
+ will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml
+ More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ enum:
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - Disable
+ - disable
+ - Enable
+ - enable
+ - Disabled
+ - disabled
+ - Enabled
+ - enabled
+ type: string
+ name:
+ description: optional, by default shard name is generated, but you can override it and setup custom name
+ maxLength: 15
+ minLength: 1
+ pattern: ^[a-zA-Z0-9-]{0,15}$
+ type: string
+ replicas:
+ description: |
+ optional, allows override behavior for selected replicas from cluster-level `chi.spec.configuration.clusters` and shard-level `chi.spec.configuration.clusters.layout.shards`
+ items:
+ properties:
+ files:
+ description: |
+ optional, allows define content of any setting file inside `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files`, cluster-level `chi.spec.configuration.clusters.files` and shard-level `chi.spec.configuration.clusters.layout.shards.files`
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ httpPort:
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `http` for selected replica, override `chi.spec.templates.hostTemplates.spec.httpPort`
+ allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service`
+ maximum: 65535
+ minimum: 1
+ type: integer
+ interserverHTTPPort:
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `interserver` for selected replica, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort`
+ allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol
+ maximum: 65535
+ minimum: 1
+ type: integer
+ name:
+ description: optional, by default replica name is generated, but you can override it and setup custom name
+ maxLength: 15
+ minLength: 1
+ pattern: ^[a-zA-Z0-9-]{0,15}$
+ type: string
+ settings:
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and shard-level `chi.spec.configuration.clusters.layout.shards.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ tcpPort:
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `tcp` for selected replica, override `chi.spec.templates.hostTemplates.spec.tcpPort`
+ allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service`
+ maximum: 65535
+ minimum: 1
+ type: integer
+ templates:
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates` and shard-level `chi.spec.configuration.clusters.layout.shards.templates`
+ properties:
+ clusterServiceTemplate:
+ description: optional, fully ignores for replica-level
+ type: string
+ dataVolumeClaimTemplate:
+ description: optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard
+ type: string
+ hostTemplate:
+ description: optional, template name from chi.spec.templates.hostTemplates, which will apply to configure `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod` only for one replica
+ type: string
+ logVolumeClaimTemplate:
+ description: optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard
+ type: string
+ podTemplate:
+ description: optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one replica
+ type: string
+ replicaServiceTemplate:
+ description: optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside clickhouse cluster described in `chi.spec.configuration.clusters` only for one replica
+ type: string
+ serviceTemplate:
+ description: optional, fully ignores for replica-level
+ type: string
+ shardServiceTemplate:
+ description: optional, fully ignores for replica-level
+ type: string
+ volumeClaimTemplate:
+ description: DEPRECATED! VolumeClaimTemplate is deprecated in favor of DataVolumeClaimTemplate and LogVolumeClaimTemplate
+ type: string
+ type: object
+ type: object
+ type: array
+ replicasCount:
+ description: |
+ optional, how much replicas in selected shard for selected ClickHouse cluster will run in Kubernetes, each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance,
+ shard contains 1 replica by default
+ override cluster-level `chi.spec.configuration.clusters.layout.replicasCount`
+ minimum: 1
+ type: integer
+ settings:
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.settings` and cluster-level `chi.spec.configuration.clusters.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ templates:
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected shard
+ override top-level `chi.spec.configuration.templates` and cluster-level `chi.spec.configuration.clusters.templates`
+ properties:
+ clusterServiceTemplate:
+ description: optional, fully ignores for shard-level
+ type: string
+ dataVolumeClaimTemplate:
+ description: optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard
+ type: string
+ hostTemplate:
+ description: optional, template name from chi.spec.templates.hostTemplates, which will apply to configure each `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod` only for one shard
+ type: string
+ logVolumeClaimTemplate:
+ description: optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard
+ type: string
+ podTemplate:
+ description: optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard
+ type: string
+ replicaServiceTemplate:
+ description: optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside clickhouse cluster described in `chi.spec.configuration.clusters` only for one shard
+ type: string
+ serviceTemplate:
+ description: optional, fully ignores for shard-level
+ type: string
+ shardServiceTemplate:
+ description: optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover shard inside clickhouse cluster described in `chi.spec.configuration.clusters` only for one shard
+ type: string
+ volumeClaimTemplate:
+ description: DEPRECATED! VolumeClaimTemplate is deprecated in favor of DataVolumeClaimTemplate and LogVolumeClaimTemplate
+ type: string
+ type: object
+ weight:
+ description: |
+ optional, 1 by default, allows setup shard setting which will use during insert into tables with `Distributed` engine,
+ will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml
+ More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ type: integer
+ type: object
+ type: array
+ shardsCount:
+ description: how much shards for current ClickHouse cluster will run in Kubernetes, each shard contains shared-nothing part of data and contains set of replicas, cluster contains 1 shard by default
+ type: integer
+ type:
+ description: DEPRECATED - to be removed soon
+ type: string
+ type: object
+ name:
+ description: cluster name, used to identify set of ClickHouse servers and wide used during generate names of related Kubernetes resources
+ maxLength: 15
+ minLength: 1
+ pattern: ^[a-zA-Z0-9-]{0,15}$
+ type: string
+ schemaPolicy:
+ description: |
+ describes how schema is propagated within replicas and shards
+ properties:
+ replica:
+ description: how schema is propagated within a replica
+ enum:
+ - None
+ - All
+ type: string
+ shard:
+ description: how schema is propagated between shards
+ enum:
+ - None
+ - All
+ - DistributedTablesOnly
+ type: string
+ type: object
+ settings:
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ templates:
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
+ override top-level `chi.spec.configuration.templates`
+ properties:
+ clusterServiceTemplate:
+ description: optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters` only for one cluster
+ type: string
+ dataVolumeClaimTemplate:
+ description: optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one cluster
+ type: string
+ hostTemplate:
+ description: optional, template name from chi.spec.templates.hostTemplates, which will apply to configure each `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod` only for one cluster
+ type: string
+ logVolumeClaimTemplate:
+ description: optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one cluster
+ type: string
+ podTemplate:
+ description: optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one cluster
+ type: string
+ replicaServiceTemplate:
+ description: optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters` only for one cluster
+ type: string
+ serviceTemplate:
+ description: optional, fully ignores for cluster-level
+ type: string
+ shardServiceTemplate:
+ description: optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters` only for one cluster
+ type: string
+ volumeClaimTemplate:
+ description: DEPRECATED! VolumeClaimTemplate is deprecated in favor of DataVolumeClaimTemplate and LogVolumeClaimTemplate
+ type: string
+ type: object
+ zookeeper:
+ description: |
+ optional, allows configure .. section in each `Pod` only in current ClickHouse cluster, during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.zookeeper` settings
+ properties:
+ identity:
+ description: optional access credentials string with `user:password` format used when use digest authorization in Zookeeper
+ type: string
+ nodes:
+ description: describe every available zookeeper cluster node for interaction
+ items:
+ properties:
+ host:
+ description: dns name or ip address for Zookeeper node
+ type: string
+ port:
+ description: TCP port which used to connect to Zookeeper node
+ maximum: 65535
+ minimum: 0
+ type: integer
+ type: object
+ type: array
+ operation_timeout_ms:
+ description: one operation timeout during Zookeeper transactions
+ type: integer
+ root:
+ description: optional root znode path inside zookeeper to store ClickHouse related data (replication queue or distributed DDL)
+ type: string
+ session_timeout_ms:
+ description: session timeout during connect to Zookeeper
+ type: integer
+ type: object
+ type: object
+ type: array
+ files:
+ description: |
+ allows define content of any setting file inside each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ every key in this object is the file name
+ every value in this object is the file content
+ you can use `!!binary |` and base64 for binary files, see details here https://yaml.org/type/binary.html
+ each key could contains prefix like USERS, COMMON, HOST or config.d, users.d, cond.d, wrong prefixes will ignored, subfolders also will ignored
+ More details: https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-05-files-nested.yaml
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ profiles:
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure any aspect of settings profile
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings-profiles/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationprofiles
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ quotas:
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure any aspect of resource quotas
+ More details: https://clickhouse.tech/docs/en/operations/quotas/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationquotas
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ settings:
+ description: |
+ allows configure `clickhouse-server` settings inside ... tag in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationsettings
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ users:
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure password hashed, authorization restrictions, database level security row filters etc.
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings-users/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationusers
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ zookeeper:
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/`
+ `clickhouse-operator` itself doesn't manage Zookeeper, please install Zookeeper separatelly look examples on https://github.com/Altinity/clickhouse-operator/tree/master/deploy/zookeeper/
+ currently, zookeeper (or clickhouse-keeper replacement) used for *ReplicatedMergeTree table engines and for `distributed_ddl`
+ More details: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings_zookeeper
+ properties:
+ identity:
+ description: optional access credentials string with `user:password` format used when use digest authorization in Zookeeper
+ type: string
+ nodes:
+ description: describe every available zookeeper cluster node for interaction
+ items:
+ properties:
+ host:
+ description: dns name or ip address for Zookeeper node
+ type: string
+ port:
+ description: TCP port which used to connect to Zookeeper node
+ maximum: 65535
+ minimum: 0
+ type: integer
+ type: object
+ type: array
+ operation_timeout_ms:
+ description: one operation timeout during Zookeeper transactions
+ type: integer
+ root:
+ description: optional root znode path inside zookeeper to store ClickHouse related data (replication queue or distributed DDL)
+ type: string
+ session_timeout_ms:
+ description: session timeout during connect to Zookeeper
+ type: integer
+ type: object
+ type: object
+ defaults:
+ description: |
+ define default behavior for whole ClickHouseInstallation, some behavior can be re-define on cluster, shard and replica level
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specdefaults
+ properties:
+ distributedDDL:
+ description: |
+ allows change `` settings
+ More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings-distributed_ddl
+ properties:
+ profile:
+ description: Settings from this profile will be used to execute DDL queries
+ type: string
+ type: object
+ replicasUseFQDN:
+ description: |
+ define should replicas be specified by FQDN in ``, then "no" then will use short hostname and clickhouse-server will use kubernetes default suffixes for properly DNS lookup
+ "yes" by default
+ enum:
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - Disable
+ - disable
+ - Enable
+ - enable
+ - Disabled
+ - disabled
+ - Enabled
+ - enabled
+ type: string
+ templates:
+ description: optional, configuration of the templates names which will use for generate Kubernetes resources according to one or more ClickHouse clusters described in current ClickHouseInstallation (chi) resource
+ properties:
+ clusterServiceTemplate:
+ description: optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters`
+ type: string
+ dataVolumeClaimTemplate:
+ description: optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`
+ type: string
+ hostTemplate:
+ description: optional, template name from chi.spec.templates.hostTemplates, which will apply to configure every `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod`
+ type: string
+ logVolumeClaimTemplate:
+ description: optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`
+ type: string
+ podTemplate:
+ description: optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters`
+ type: string
+ replicaServiceTemplate:
+ description: optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters`
+ type: string
+ serviceTemplate:
+ description: optional, template name from chi.spec.templates.serviceTemplates, allows customization for one `Service` resource which will created by `clickhouse-operator` which cover all clusters in whole `chi` resource
+ type: string
+ shardServiceTemplate:
+ description: optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters`
+ type: string
+ volumeClaimTemplate:
+ description: DEPRECATED! VolumeClaimTemplate is deprecated in favor of DataVolumeClaimTemplate and LogVolumeClaimTemplate
+ type: string
+ type: object
+ type: object
+ namespaceDomainPattern:
+ description: custom domain suffix which will add to end of `Service` or `Pod` name, use it when you use custom cluster domain in your Kubernetes cluster
+ type: string
+ reconciling:
+ description: optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side
+ properties:
+ cleanup:
+ description: optional, define behavior for cleanup Kubernetes resources during reconcile cycle
+ properties:
+ reconcileFailedObjects:
+ description: what clickhouse-operator shall do when reconciling Kubernetes resources are failed, default behavior is `Retain`
+ properties:
+ configMap:
+ description: behavior policy for failed ConfigMap reconciling, Retain by default
+ enum:
+ - Retain
+ - Delete
+ type: string
+ pvc:
+ description: behavior policy for failed PVC reconciling, Retain by default
+ enum:
+ - Retain
+ - Delete
+ type: string
+ service:
+ description: behavior policy for failed Service reconciling, Retain by default
+ enum:
+ - Retain
+ - Delete
+ type: string
+ statefulSet:
+ description: behavior policy for failed StatefulSet reconciling, Retain by default
+ enum:
+ - Retain
+ - Delete
+ type: string
+ type: object
+ unknownObjects:
+ description: what clickhouse-operator shall do when found Kubernetes resources which should be managed with clickhouse-operator, but not have `ownerReference` to any currently managed `ClickHouseInstallation` resource, default behavior is `Delete`
+ properties:
+ configMap:
+ description: behavior policy for unknown ConfigMap, Delete by default
+ enum:
+ - Retain
+ - Delete
+ type: string
+ pvc:
+ description: behavior policy for unknown PVC, Delete by default
+ enum:
+ - Retain
+ - Delete
+ type: string
+ service:
+ description: behavior policy for unknown Service, Delete by default
+ enum:
+ - Retain
+ - Delete
+ type: string
+ statefulSet:
+ description: behavior policy for unknown StatefulSet, Delete by default
+ enum:
+ - Retain
+ - Delete
+ type: string
+ type: object
+ type: object
+ configMapPropagationTimeout:
+ description: |
+ timeout in seconds when `clickhouse-operator` will wait when applied `ConfigMap` during reconcile `ClickhouseInstallation` pods will updated from cache
+ see details: https://kubernetes.io/docs/concepts/configuration/configmap/#mounted-configmaps-are-updated-automatically
+ maximum: 3600
+ minimum: 0
+ type: integer
+ policy:
+ description: DEPRECATED
+ type: string
+ type: object
+ restart:
+ description: This is a 'soft restart' button. When set to 'RollingUpdate' operator will restart ClickHouse pods in a graceful way. Remove it after the use in order to avoid unneeded restarts
+ enum:
+ - ""
+ - RollingUpdate
+ type: string
+ stop:
+ description: |
+ Allow stop all ClickHouse clusters described in current chi.
+ Stop mechanism works as follows:
+ - When `stop` is `1` then setup `Replicas: 0` in each related to current `chi` StatefulSet resource, all `Pods` and `Service` resources will desctroy, but PVCs still live
+ - When `stop` is `0` then `Pods` will created again and will attach retained PVCs and `Service` also will created again
+ enum:
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - Disable
+ - disable
+ - Enable
+ - enable
+ - Disabled
+ - disabled
+ - Enabled
+ - enabled
+ type: string
+ taskID:
+ description: Allow define custom taskID for named update and watch status of this update execution in .status.taskIDs field, by default every update of chi manifest will generate random taskID
+ type: string
+ templates:
+ description: allows define templates which will use for render Kubernetes resources like StatefulSet, ConfigMap, Service, PVC, by default, clickhouse-operator have own templates, but you can override it
+ properties:
+ hostTemplates:
+ description: hostTemplate will use during apply to generate `clickhose-server` config files
+ items:
+ properties:
+ name:
+ description: template name, could use to link inside top-level `chi.spec.defaults.templates.hostTemplate`, cluster-level `chi.spec.configuration.clusters.templates.hostTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.hostTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.hostTemplate`
+ type: string
+ portDistribution:
+ description: define how will distribute numeric values of named ports in `Pod.spec.containers.ports` and clickhouse-server configs
+ items:
+ properties:
+ type:
+ description: type of distribution, when `Unspecified` (default value) then all listen ports on clickhouse-server configuration in all Pods will have the same value, when `ClusterScopeIndex` then ports will increment to offset from base value depends on shard and replica index inside cluster with combination of `chi.spec.templates.podTemlates.spec.HostNetwork` it allows setup ClickHouse cluster inside Kubernetes and provide access via external network bypass Kubernetes internal network
+ enum:
+ - ""
+ - Unspecified
+ - ClusterScopeIndex
+ type: string
+ type: object
+ type: array
+ spec:
+ properties:
+ files:
+ description: |
+ optional, allows define content of any setting file inside each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ httpPort:
+ description: |
+ optional, setup `http_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=http]`
+ More info: https://clickhouse.tech/docs/en/interfaces/http/
+ maximum: 65535
+ minimum: 1
+ type: integer
+ interserverHTTPPort:
+ description: |
+ optional, setup `interserver_http_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=interserver]`
+ More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#interserver-http-port
+ maximum: 65535
+ minimum: 1
+ type: integer
+ name:
+ description: by default, hostname will generate, but this allows define custom name for each `clickhuse-server`
+ maxLength: 15
+ minLength: 1
+ pattern: ^[a-zA-Z0-9-]{0,15}$
+ type: string
+ settings:
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ tcpPort:
+ description: |
+ optional, setup `tcp_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=tcp]`
+ More info: https://clickhouse.tech/docs/en/interfaces/tcp/
+ maximum: 65535
+ minimum: 1
+ type: integer
+ templates:
+ description: be carefull, this part of CRD allows override template inside template, don't use it if you don't understand what you do
+ properties:
+ clusterServiceTemplate:
+ type: string
+ dataVolumeClaimTemplate:
+ type: string
+ hostTemplate:
+ type: string
+ logVolumeClaimTemplate:
+ type: string
+ podTemplate:
+ type: string
+ replicaServiceTemplate:
+ type: string
+ serviceTemplate:
+ type: string
+ shardServiceTemplate:
+ type: string
+ type: object
+ type: object
+ type: object
+ type: array
+ podTemplates:
+ description: |
+ podTemplate will use during render `Pod` inside `StatefulSet.spec` and allows define rendered `Pod.spec`, pod scheduling distribution and pod zone
+ More information: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatespodtemplates
+ items:
+ properties:
+ distribution:
+ description: DEPRECATED, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`
+ enum:
+ - ""
+ - Unspecified
+ - OnePerHost
+ type: string
+ generateName:
+ description: allows define format for generated `Pod` name, look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates for details about aviailable template variables
+ type: string
+ metadata:
+ description: |
+ allows pass standard object's metadata from template to Pod
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ name:
+ description: template name, could use to link inside top-level `chi.spec.defaults.templates.podTemplate`, cluster-level `chi.spec.configuration.clusters.templates.podTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.podTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.podTemplate`
+ type: string
+ podDistribution:
+ description: define ClickHouse Pod distibution policy between Kubernetes Nodes inside Shard, Replica, Namespace, CHI, another ClickHouse cluster
+ items:
+ properties:
+ number:
+ description: define, how much ClickHouse Pods could be inside selected scope with selected distribution type
+ maximum: 65535
+ minimum: 0
+ type: integer
+ scope:
+ description: scope for apply each podDistribution
+ enum:
+ - ""
+ - Unspecified
+ - Shard
+ - Replica
+ - Cluster
+ - ClickHouseInstallation
+ - Namespace
+ type: string
+ topologyKey:
+ description: 'use for inter-pod affinity look to `pod.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey`, More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity'
+ type: string
+ type:
+ description: you can define multiple affinity policy types
+ enum:
+ - ""
+ - Unspecified
+ - ClickHouseAntiAffinity
+ - ShardAntiAffinity
+ - ReplicaAntiAffinity
+ - AnotherNamespaceAntiAffinity
+ - AnotherClickHouseInstallationAntiAffinity
+ - AnotherClusterAntiAffinity
+ - MaxNumberPerNode
+ - NamespaceAffinity
+ - ClickHouseInstallationAffinity
+ - ClusterAffinity
+ - ShardAffinity
+ - ReplicaAffinity
+ - PreviousTailAffinity
+ - CircularReplication
+ type: string
+ type: object
+ type: array
+ spec:
+ description: allows define whole Pod.spec inside StaefulSet.spec, look to https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates for details
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ zone:
+ description: allows define custom zone name and will separate ClickHouse `Pods` between nodes, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`
+ properties:
+ key:
+ description: optional, if defined, allows select kubernetes nodes by label with `name` equal `key`
+ type: string
+ values:
+ description: optional, if defined, allows select kubernetes nodes by label with `value` in `values`
+ items:
+ type: string
+ type: array
+ type: object
+ type: object
+ type: array
+ serviceTemplates:
+ description: |
+ allows define template for rendering `Service` which would get endpoint from Pods which scoped chi-wide, cluster-wide, shard-wide, replica-wide level
+ items:
+ properties:
+ generateName:
+ description: allows define format for generated `Service` name, look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates for details about aviailable template variables
+ type: string
+ metadata:
+ description: |
+ allows pass standard object's metadata from template to Service
+ Could be use for define specificly for Cloud Provider metadata which impact to behavior of service
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ name:
+ description: |
+ template name, could use to link inside
+ chi-level `chi.spec.defaults.templates.serviceTemplate`
+ cluster-level `chi.spec.configuration.clusters.templates.clusterServiceTemplate`
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.shardServiceTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.replicaServiceTemplate` or `chi.spec.configuration.clusters.layout.shards.replicas.replicaServiceTemplate`
+ type: string
+ spec:
+ description: |
+ describe behavior of generated Service
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ type: array
+ volumeClaimTemplates:
+ description: allows define template for rendering `PVC` kubernetes resource, which would use inside `Pod` for mount clickhouse `data`, clickhouse `logs` or something else
+ items:
+ properties:
+ metadata:
+ description: |
+ allows pass standard object's metadata from template to PVC
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ name:
+ description: |
+ template name, could use to link inside
+ top-level `chi.spec.defaults.templates.dataVolumeClaimTemplate` or `chi.spec.defaults.templates.logVolumeClaimTemplate`,
+ cluster-level `chi.spec.configuration.clusters.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.templates.logVolumeClaimTemplate`,
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.shards.temlates.logVolumeClaimTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.replicas.templates.logVolumeClaimTemplate`
+ type: string
+ reclaimPolicy:
+ description: define behavior of `PVC` deletion policy during delete `Pod`, `Delete` by default, when `Retain` then `PVC` still alive even `Pod` will deleted
+ enum:
+ - ""
+ - Retain
+ - Delete
+ type: string
+ spec:
+ description: |
+ allows define all aspects of `PVC` resource
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ type: array
+ type: object
+ templating:
+ description: optional, define policy for auto applying ClickHouseInstallationTemplate inside ClickHouseInstallation
+ properties:
+ policy:
+ description: when defined as `auto` inside ClickhouseInstallationTemplate, it will auto add into all ClickHouseInstallation, manual value is default
+ enum:
+ - auto
+ - manual
+ type: string
+ type: object
+ troubleshoot:
+ description: allows troubleshoot Pods during CrashLoopBack state, when you apply wrong configuration, `clickhouse-server` wouldn't startup
+ enum:
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - Disable
+ - disable
+ - Enable
+ - enable
+ - Disabled
+ - disabled
+ - Enabled
+ - enabled
+ type: string
+ useTemplates:
+ description: list of `ClickHouseInstallationTemplate` (chit) resource names which will merge with current `Chi` manifest during render Kubernetes resources to create related ClickHouse clusters
+ items:
+ properties:
+ name:
+ description: name of `ClickHouseInstallationTemplate` (chit) resource
+ type: string
+ namespace:
+ description: Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`
+ type: string
+ useType:
+ description: optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`
+ enum:
+ - ""
+ - merge
+ type: string
+ type: object
+ type: array
+ type: object
+ status:
+ description: Current ClickHouseInstallation manifest status, contains many fields like a normalized configuration, clickhouse-operator version, current action and all applied action list, current taskID and all applied taskIDs and other
+ properties:
+ action:
+ description: Action
+ type: string
+ actions:
+ description: Actions
+ items:
+ type: string
+ type: array
+ added:
+ description: Added Hosts count
+ minimum: 0
+ type: integer
+ chop-commit:
+ description: ClickHouse operator git commit SHA
+ type: string
+ chop-date:
+ description: ClickHouse operator build date
+ type: string
+ chop-ip:
+ description: IP address of the operator's pod which managed this CHI
+ type: string
+ chop-version:
+ description: ClickHouse operator version
+ type: string
+ clusters:
+ description: Clusters count
+ minimum: 0
+ type: integer
+ delete:
+ description: About to delete Hosts count
+ minimum: 0
+ type: integer
+ deleted:
+ description: Deleted Hosts count
+ minimum: 0
+ type: integer
+ endpoint:
+ description: Endpoint
+ type: string
+ error:
+ description: Last error
+ type: string
+ errors:
+ description: Errors
+ items:
+ type: string
+ type: array
+ fqdns:
+ description: Pods FQDNs
+ items:
+ type: string
+ type: array
+ generation:
+ description: Generation
+ minimum: 0
+ type: integer
+ hosts:
+ description: Hosts count
+ minimum: 0
+ type: integer
+ normalized:
+ description: Normalized CHI
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ pod-ips:
+ description: Pod IPs
+ items:
+ type: string
+ type: array
+ pods:
+ description: Pods
+ items:
+ type: string
+ type: array
+ replicas:
+ description: Replicas count
+ minimum: 0
+ type: integer
+ shards:
+ description: Shards count
+ minimum: 0
+ type: integer
+ status:
+ description: Status
+ type: string
+ taskID:
+ description: Current task id
+ type: string
+ taskIDsCompleted:
+ description: Completed task ids
+ items:
+ type: string
+ type: array
+ taskIDsStarted:
+ description: Started task ids
+ items:
+ type: string
+ type: array
+ updated:
+ description: Updated Hosts count
+ minimum: 0
+ type: integer
+ type: object
+ required:
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+- id: apiextensions.k8s.io/v1:CustomResourceDefinition:clickhouseinstallationtemplates.clickhouse.altinity.com
+ type: Kubernetes
+ attributes:
+ apiVersion: apiextensions.k8s.io/v1
+ kind: CustomResourceDefinition
+ metadata:
+ labels:
+ clickhouse.altinity.com/chop: 0.19.2
+ name: clickhouseinstallationtemplates.clickhouse.altinity.com
+ spec:
+ group: clickhouse.altinity.com
+ names:
+ kind: ClickHouseInstallationTemplate
+ plural: clickhouseinstallationtemplates
+ shortNames:
+ - chit
+ singular: clickhouseinstallationtemplate
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: Operator version
+ jsonPath: .status.chop-version
+ name: version
+ priority: 1
+ type: string
+ - description: Clusters count
+ jsonPath: .status.clusters
+ name: clusters
+ priority: 0
+ type: integer
+ - description: Shards count
+ jsonPath: .status.shards
+ name: shards
+ priority: 1
+ type: integer
+ - description: Hosts count
+ jsonPath: .status.hosts
+ name: hosts
+ priority: 0
+ type: integer
+ - description: TaskID
+ jsonPath: .status.taskID
+ name: taskID
+ priority: 1
+ type: string
+ - description: CHI status
+ jsonPath: .status.status
+ name: status
+ priority: 0
+ type: string
+ - description: Updated hosts count
+ jsonPath: .status.updated
+ name: updated
+ priority: 1
+ type: integer
+ - description: Added hosts count
+ jsonPath: .status.added
+ name: added
+ priority: 1
+ type: integer
+ - description: Hosts deleted count
+ jsonPath: .status.deleted
+ name: deleted
+ priority: 1
+ type: integer
+ - description: Hosts to be deleted count
+ jsonPath: .status.delete
+ name: delete
+ priority: 1
+ type: integer
+ - description: Client access endpoint
+ jsonPath: .status.endpoint
+ name: endpoint
+ priority: 1
+ type: string
+ - description: Age of the resource
+ jsonPath: .metadata.creationTimestamp
+ name: age
+ type: date
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: define a set of Kubernetes resources (StatefulSet, PVC, Service, ConfigMap) which describe behavior one or more ClickHouse clusters
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: |
+ Specification of the desired behavior of one or more ClickHouse clusters
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md"
+ properties:
+ configuration:
+ description: allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource
+ properties:
+ clusters:
+ description: |
+ describes ClickHouse clusters layout and allows change settings on cluster-level, shard-level and replica-level
+ every cluster is a set of StatefulSet, one StatefulSet contains only one Pod with `clickhouse-server`
+ all Pods will rendered in part of ClickHouse configs, mounted from ConfigMap as `/etc/clickhouse-server/config.d/chop-generated-remote_servers.xml`
+ Clusters will use for Distributed table engine, more details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ If `cluster` contains zookeeper settings (could be inherited from top `chi` level), when you can create *ReplicatedMergeTree tables
+ items:
+ properties:
+ files:
+ description: |
+ optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files`
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ layout:
+ description: |
+ describe current cluster layout, how much shards in cluster, how much replica in shard
+ allows override settings on each shard and replica separatelly
+ properties:
+ replicas:
+ description: optional, allows override top-level `chi.spec.configuration` and cluster-level `chi.spec.configuration.clusters` configuration for each replica and each shard relates to selected replica, use it only if you fully understand what you do
+ items:
+ properties:
+ files:
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ name:
+ description: optional, by default replica name is generated, but you can override it and setup custom name
+ maxLength: 15
+ minLength: 1
+ pattern: ^[a-zA-Z0-9-]{0,15}$
+ type: string
+ settings:
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and will ignore if shard-level `chi.spec.configuration.clusters.layout.shards` present
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ shards:
+ description: optional, list of shards related to current replica, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ items:
+ properties:
+ files:
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ httpPort:
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `http` for selected shard, override `chi.spec.templates.hostTemplates.spec.httpPort`
+ allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service`
+ maximum: 65535
+ minimum: 1
+ type: integer
+ interserverHTTPPort:
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `interserver` for selected shard, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort`
+ allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol
+ maximum: 65535
+ minimum: 1
+ type: integer
+ name:
+ description: optional, by default shard name is generated, but you can override it and setup custom name
+ maxLength: 15
+ minLength: 1
+ pattern: ^[a-zA-Z0-9-]{0,15}$
+ type: string
+ settings:
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and replica-level `chi.spec.configuration.clusters.layout.replicas.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ tcpPort:
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `tcp` for selected shard, override `chi.spec.templates.hostTemplates.spec.tcpPort`
+ allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service`
+ maximum: 65535
+ minimum: 1
+ type: integer
+ templates:
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates`
+ properties:
+ clusterServiceTemplate:
+ description: optional, fully ignores for shard-level
+ type: string
+ dataVolumeClaimTemplate:
+ description: optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard
+ type: string
+ hostTemplate:
+ description: optional, template name from chi.spec.templates.hostTemplates, which will apply to configure each `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod` only for one shard
+ type: string
+ logVolumeClaimTemplate:
+ description: optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard
+ type: string
+ podTemplate:
+ description: optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard
+ type: string
+ replicaServiceTemplate:
+ description: optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside clickhouse cluster described in `chi.spec.configuration.clusters` only for one shard
+ type: string
+ serviceTemplate:
+ description: optional, fully ignores for shard-level
+ type: string
+ shardServiceTemplate:
+ description: optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover shard inside clickhouse cluster described in `chi.spec.configuration.clusters` only for one shard
+ type: string
+ volumeClaimTemplate:
+ description: DEPRECATED! VolumeClaimTemplate is deprecated in favor of DataVolumeClaimTemplate and LogVolumeClaimTemplate
+ type: string
+ type: object
+ type: object
+ type: array
+ shardsCount:
+ description: optional, count of shards related to current replica, you can override each shard behavior on low-level `chi.spec.configuration.clusters.layout.replicas.shards`
+ minimum: 1
+ type: integer
+ templates:
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`
+ properties:
+ clusterServiceTemplate:
+ description: optional, fully ignores for replica-level
+ type: string
+ dataVolumeClaimTemplate:
+ description: optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard
+ type: string
+ hostTemplate:
+ description: optional, template name from chi.spec.templates.hostTemplates, which will apply to configure `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod` only for one replica
+ type: string
+ logVolumeClaimTemplate:
+ description: optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard
+ type: string
+ podTemplate:
+ description: optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one replica
+ type: string
+ replicaServiceTemplate:
+ description: optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside clickhouse cluster described in `chi.spec.configuration.clusters` only for one replica
+ type: string
+ serviceTemplate:
+ description: optional, fully ignores for replica-level
+ type: string
+ shardServiceTemplate:
+ description: optional, fully ignores for replica-level
+ type: string
+ volumeClaimTemplate:
+ description: DEPRECATED! VolumeClaimTemplate is deprecated in favor of DataVolumeClaimTemplate and LogVolumeClaimTemplate
+ type: string
+ type: object
+ type: object
+ type: array
+ replicasCount:
+ description: how much replicas in each shards for current ClickHouse cluster will run in Kubernetes, each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance, every shard contains 1 replica by default
+ type: integer
+ shards:
+ description: optional, allows override top-level `chi.spec.configuration`, cluster-level `chi.spec.configuration.clusters` settings for each shard separately, use it only if you fully understand what you do
+ items:
+ properties:
+ definitionType:
+ description: DEPRECATED - to be removed soon
+ type: string
+ files:
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ internalReplication:
+ description: |
+ optional, `true` by default when `chi.spec.configuration.clusters[].layout.ReplicaCount` > 1 and 0 otherwise
+ allows setup setting which will use during insert into tables with `Distributed` engine for insert only in one live replica and other replicas will download inserted data during replication,
+ will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml
+ More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ enum:
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - Disable
+ - disable
+ - Enable
+ - enable
+ - Disabled
+ - disabled
+ - Enabled
+ - enabled
+ type: string
+ name:
+ description: optional, by default shard name is generated, but you can override it and setup custom name
+ maxLength: 15
+ minLength: 1
+ pattern: ^[a-zA-Z0-9-]{0,15}$
+ type: string
+ replicas:
+ description: |
+ optional, allows override behavior for selected replicas from cluster-level `chi.spec.configuration.clusters` and shard-level `chi.spec.configuration.clusters.layout.shards`
+ items:
+ properties:
+ files:
+ description: |
+ optional, allows define content of any setting file inside `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files`, cluster-level `chi.spec.configuration.clusters.files` and shard-level `chi.spec.configuration.clusters.layout.shards.files`
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ httpPort:
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `http` for selected replica, override `chi.spec.templates.hostTemplates.spec.httpPort`
+ allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service`
+ maximum: 65535
+ minimum: 1
+ type: integer
+ interserverHTTPPort:
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `interserver` for selected replica, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort`
+ allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol
+ maximum: 65535
+ minimum: 1
+ type: integer
+ name:
+ description: optional, by default replica name is generated, but you can override it and setup custom name
+ maxLength: 15
+ minLength: 1
+ pattern: ^[a-zA-Z0-9-]{0,15}$
+ type: string
+ settings:
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and shard-level `chi.spec.configuration.clusters.layout.shards.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ tcpPort:
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `tcp` for selected replica, override `chi.spec.templates.hostTemplates.spec.tcpPort`
+ allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service`
+ maximum: 65535
+ minimum: 1
+ type: integer
+ templates:
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates` and shard-level `chi.spec.configuration.clusters.layout.shards.templates`
+ properties:
+ clusterServiceTemplate:
+ description: optional, fully ignores for replica-level
+ type: string
+ dataVolumeClaimTemplate:
+ description: optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard
+ type: string
+ hostTemplate:
+ description: optional, template name from chi.spec.templates.hostTemplates, which will apply to configure `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod` only for one replica
+ type: string
+ logVolumeClaimTemplate:
+ description: optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard
+ type: string
+ podTemplate:
+ description: optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one replica
+ type: string
+ replicaServiceTemplate:
+ description: optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside clickhouse cluster described in `chi.spec.configuration.clusters` only for one replica
+ type: string
+ serviceTemplate:
+ description: optional, fully ignores for replica-level
+ type: string
+ shardServiceTemplate:
+ description: optional, fully ignores for replica-level
+ type: string
+ volumeClaimTemplate:
+ description: DEPRECATED! VolumeClaimTemplate is deprecated in favor of DataVolumeClaimTemplate and LogVolumeClaimTemplate
+ type: string
+ type: object
+ type: object
+ type: array
+ replicasCount:
+ description: |
+ optional, how much replicas in selected shard for selected ClickHouse cluster will run in Kubernetes, each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance,
+ shard contains 1 replica by default
+ override cluster-level `chi.spec.configuration.clusters.layout.replicasCount`
+ minimum: 1
+ type: integer
+ settings:
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.settings` and cluster-level `chi.spec.configuration.clusters.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ templates:
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected shard
+ override top-level `chi.spec.configuration.templates` and cluster-level `chi.spec.configuration.clusters.templates`
+ properties:
+ clusterServiceTemplate:
+ description: optional, fully ignores for shard-level
+ type: string
+ dataVolumeClaimTemplate:
+ description: optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard
+ type: string
+ hostTemplate:
+ description: optional, template name from chi.spec.templates.hostTemplates, which will apply to configure each `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod` only for one shard
+ type: string
+ logVolumeClaimTemplate:
+ description: optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard
+ type: string
+ podTemplate:
+ description: optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one shard
+ type: string
+ replicaServiceTemplate:
+ description: optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside clickhouse cluster described in `chi.spec.configuration.clusters` only for one shard
+ type: string
+ serviceTemplate:
+ description: optional, fully ignores for shard-level
+ type: string
+ shardServiceTemplate:
+ description: optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover shard inside clickhouse cluster described in `chi.spec.configuration.clusters` only for one shard
+ type: string
+ volumeClaimTemplate:
+ description: DEPRECATED! VolumeClaimTemplate is deprecated in favor of DataVolumeClaimTemplate and LogVolumeClaimTemplate
+ type: string
+ type: object
+ weight:
+ description: |
+ optional, 1 by default, allows setup shard setting which will use during insert into tables with `Distributed` engine,
+ will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml
+ More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ type: integer
+ type: object
+ type: array
+ shardsCount:
+ description: how much shards for current ClickHouse cluster will run in Kubernetes, each shard contains shared-nothing part of data and contains set of replicas, cluster contains 1 shard by default
+ type: integer
+ type:
+ description: DEPRECATED - to be removed soon
+ type: string
+ type: object
+ name:
+ description: cluster name, used to identify set of ClickHouse servers and wide used during generate names of related Kubernetes resources
+ maxLength: 15
+ minLength: 1
+ pattern: ^[a-zA-Z0-9-]{0,15}$
+ type: string
+ schemaPolicy:
+ description: |
+ describes how schema is propagated within replicas and shards
+ properties:
+ replica:
+ description: how schema is propagated within a replica
+ enum:
+ - None
+ - All
+ type: string
+ shard:
+ description: how schema is propagated between shards
+ enum:
+ - None
+ - All
+ - DistributedTablesOnly
+ type: string
+ type: object
+ settings:
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ templates:
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
+ override top-level `chi.spec.configuration.templates`
+ properties:
+ clusterServiceTemplate:
+ description: optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters` only for one cluster
+ type: string
+ dataVolumeClaimTemplate:
+ description: optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one cluster
+ type: string
+ hostTemplate:
+ description: optional, template name from chi.spec.templates.hostTemplates, which will apply to configure each `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod` only for one cluster
+ type: string
+ logVolumeClaimTemplate:
+ description: optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one cluster
+ type: string
+ podTemplate:
+ description: optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters` only for one cluster
+ type: string
+ replicaServiceTemplate:
+ description: optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters` only for one cluster
+ type: string
+ serviceTemplate:
+ description: optional, fully ignores for cluster-level
+ type: string
+ shardServiceTemplate:
+ description: optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters` only for one cluster
+ type: string
+ volumeClaimTemplate:
+ description: DEPRECATED! VolumeClaimTemplate is deprecated in favor of DataVolumeClaimTemplate and LogVolumeClaimTemplate
+ type: string
+ type: object
+ zookeeper:
+ description: |
+ optional, allows configure .. section in each `Pod` only in current ClickHouse cluster, during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.zookeeper` settings
+ properties:
+ identity:
+ description: optional access credentials string with `user:password` format used when use digest authorization in Zookeeper
+ type: string
+ nodes:
+ description: describe every available zookeeper cluster node for interaction
+ items:
+ properties:
+ host:
+ description: dns name or ip address for Zookeeper node
+ type: string
+ port:
+ description: TCP port which used to connect to Zookeeper node
+ maximum: 65535
+ minimum: 0
+ type: integer
+ type: object
+ type: array
+ operation_timeout_ms:
+ description: one operation timeout during Zookeeper transactions
+ type: integer
+ root:
+ description: optional root znode path inside zookeeper to store ClickHouse related data (replication queue or distributed DDL)
+ type: string
+ session_timeout_ms:
+ description: session timeout during connect to Zookeeper
+ type: integer
+ type: object
+ type: object
+ type: array
+ files:
+ description: |
+ allows define content of any setting file inside each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ every key in this object is the file name
+ every value in this object is the file content
+ you can use `!!binary |` and base64 for binary files, see details here https://yaml.org/type/binary.html
+ each key could contains prefix like USERS, COMMON, HOST or config.d, users.d, cond.d, wrong prefixes will ignored, subfolders also will ignored
+ More details: https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-05-files-nested.yaml
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ profiles:
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure any aspect of settings profile
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings-profiles/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationprofiles
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ quotas:
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure any aspect of resource quotas
+ More details: https://clickhouse.tech/docs/en/operations/quotas/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationquotas
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ settings:
+ description: |
+ allows configure `clickhouse-server` settings inside ... tag in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationsettings
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ users:
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure password hashed, authorization restrictions, database level security row filters etc.
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings-users/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationusers
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ zookeeper:
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/`
+ `clickhouse-operator` itself doesn't manage Zookeeper, please install Zookeeper separatelly look examples on https://github.com/Altinity/clickhouse-operator/tree/master/deploy/zookeeper/
+ currently, zookeeper (or clickhouse-keeper replacement) used for *ReplicatedMergeTree table engines and for `distributed_ddl`
+ More details: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings_zookeeper
+ properties:
+ identity:
+ description: optional access credentials string with `user:password` format used when use digest authorization in Zookeeper
+ type: string
+ nodes:
+ description: describe every available zookeeper cluster node for interaction
+ items:
+ properties:
+ host:
+ description: dns name or ip address for Zookeeper node
+ type: string
+ port:
+ description: TCP port which used to connect to Zookeeper node
+ maximum: 65535
+ minimum: 0
+ type: integer
+ type: object
+ type: array
+ operation_timeout_ms:
+ description: one operation timeout during Zookeeper transactions
+ type: integer
+ root:
+ description: optional root znode path inside zookeeper to store ClickHouse related data (replication queue or distributed DDL)
+ type: string
+ session_timeout_ms:
+ description: session timeout during connect to Zookeeper
+ type: integer
+ type: object
+ type: object
+ defaults:
+ description: |
+ define default behavior for whole ClickHouseInstallation, some behavior can be re-define on cluster, shard and replica level
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specdefaults
+ properties:
+ distributedDDL:
+ description: |
+ allows change `` settings
+ More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings-distributed_ddl
+ properties:
+ profile:
+ description: Settings from this profile will be used to execute DDL queries
+ type: string
+ type: object
+ replicasUseFQDN:
+ description: |
+ define should replicas be specified by FQDN in ``, then "no" then will use short hostname and clickhouse-server will use kubernetes default suffixes for properly DNS lookup
+ "yes" by default
+ enum:
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - Disable
+ - disable
+ - Enable
+ - enable
+ - Disabled
+ - disabled
+ - Enabled
+ - enabled
+ type: string
+ templates:
+ description: optional, configuration of the templates names which will use for generate Kubernetes resources according to one or more ClickHouse clusters described in current ClickHouseInstallation (chi) resource
+ properties:
+ clusterServiceTemplate:
+ description: optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters`
+ type: string
+ dataVolumeClaimTemplate:
+ description: optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`
+ type: string
+ hostTemplate:
+ description: optional, template name from chi.spec.templates.hostTemplates, which will apply to configure every `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod`
+ type: string
+ logVolumeClaimTemplate:
+ description: optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`
+ type: string
+ podTemplate:
+ description: optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters`
+ type: string
+ replicaServiceTemplate:
+ description: optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters`
+ type: string
+ serviceTemplate:
+ description: optional, template name from chi.spec.templates.serviceTemplates, allows customization for one `Service` resource which will created by `clickhouse-operator` which cover all clusters in whole `chi` resource
+ type: string
+ shardServiceTemplate:
+ description: optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters`
+ type: string
+ volumeClaimTemplate:
+ description: DEPRECATED! VolumeClaimTemplate is deprecated in favor of DataVolumeClaimTemplate and LogVolumeClaimTemplate
+ type: string
+ type: object
+ type: object
+ namespaceDomainPattern:
+ description: custom domain suffix which will add to end of `Service` or `Pod` name, use it when you use custom cluster domain in your Kubernetes cluster
+ type: string
+ reconciling:
+ description: optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side
+ properties:
+ cleanup:
+ description: optional, define behavior for cleanup Kubernetes resources during reconcile cycle
+ properties:
+ reconcileFailedObjects:
+ description: what clickhouse-operator shall do when reconciling Kubernetes resources are failed, default behavior is `Retain`
+ properties:
+ configMap:
+ description: behavior policy for failed ConfigMap reconciling, Retain by default
+ enum:
+ - Retain
+ - Delete
+ type: string
+ pvc:
+ description: behavior policy for failed PVC reconciling, Retain by default
+ enum:
+ - Retain
+ - Delete
+ type: string
+ service:
+ description: behavior policy for failed Service reconciling, Retain by default
+ enum:
+ - Retain
+ - Delete
+ type: string
+ statefulSet:
+ description: behavior policy for failed StatefulSet reconciling, Retain by default
+ enum:
+ - Retain
+ - Delete
+ type: string
+ type: object
+ unknownObjects:
+ description: what clickhouse-operator shall do when found Kubernetes resources which should be managed with clickhouse-operator, but not have `ownerReference` to any currently managed `ClickHouseInstallation` resource, default behavior is `Delete`
+ properties:
+ configMap:
+ description: behavior policy for unknown ConfigMap, Delete by default
+ enum:
+ - Retain
+ - Delete
+ type: string
+ pvc:
+ description: behavior policy for unknown PVC, Delete by default
+ enum:
+ - Retain
+ - Delete
+ type: string
+ service:
+ description: behavior policy for unknown Service, Delete by default
+ enum:
+ - Retain
+ - Delete
+ type: string
+ statefulSet:
+ description: behavior policy for unknown StatefulSet, Delete by default
+ enum:
+ - Retain
+ - Delete
+ type: string
+ type: object
+ type: object
+ configMapPropagationTimeout:
+ description: |
+ timeout in seconds when `clickhouse-operator` will wait when applied `ConfigMap` during reconcile `ClickhouseInstallation` pods will updated from cache
+ see details: https://kubernetes.io/docs/concepts/configuration/configmap/#mounted-configmaps-are-updated-automatically
+ maximum: 3600
+ minimum: 0
+ type: integer
+ policy:
+ description: DEPRECATED
+ type: string
+ type: object
+ restart:
+ description: This is a 'soft restart' button. When set to 'RollingUpdate' operator will restart ClickHouse pods in a graceful way. Remove it after the use in order to avoid unneeded restarts
+ enum:
+ - ""
+ - RollingUpdate
+ type: string
+ stop:
+ description: |
+ Allow stop all ClickHouse clusters described in current chi.
+ Stop mechanism works as follows:
+ - When `stop` is `1` then setup `Replicas: 0` in each related to current `chi` StatefulSet resource, all `Pods` and `Service` resources will desctroy, but PVCs still live
+ - When `stop` is `0` then `Pods` will created again and will attach retained PVCs and `Service` also will created again
+ enum:
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - Disable
+ - disable
+ - Enable
+ - enable
+ - Disabled
+ - disabled
+ - Enabled
+ - enabled
+ type: string
+ taskID:
+ description: Allow define custom taskID for named update and watch status of this update execution in .status.taskIDs field, by default every update of chi manifest will generate random taskID
+ type: string
+ templates:
+ description: allows define templates which will use for render Kubernetes resources like StatefulSet, ConfigMap, Service, PVC, by default, clickhouse-operator have own templates, but you can override it
+ properties:
+ hostTemplates:
+ description: hostTemplate will use during apply to generate `clickhose-server` config files
+ items:
+ properties:
+ name:
+ description: template name, could use to link inside top-level `chi.spec.defaults.templates.hostTemplate`, cluster-level `chi.spec.configuration.clusters.templates.hostTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.hostTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.hostTemplate`
+ type: string
+ portDistribution:
+ description: define how will distribute numeric values of named ports in `Pod.spec.containers.ports` and clickhouse-server configs
+ items:
+ properties:
+ type:
+ description: type of distribution, when `Unspecified` (default value) then all listen ports on clickhouse-server configuration in all Pods will have the same value, when `ClusterScopeIndex` then ports will increment to offset from base value depends on shard and replica index inside cluster with combination of `chi.spec.templates.podTemlates.spec.HostNetwork` it allows setup ClickHouse cluster inside Kubernetes and provide access via external network bypass Kubernetes internal network
+ enum:
+ - ""
+ - Unspecified
+ - ClusterScopeIndex
+ type: string
+ type: object
+ type: array
+ spec:
+ properties:
+ files:
+ description: |
+ optional, allows define content of any setting file inside each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ httpPort:
+ description: |
+ optional, setup `http_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=http]`
+ More info: https://clickhouse.tech/docs/en/interfaces/http/
+ maximum: 65535
+ minimum: 1
+ type: integer
+ interserverHTTPPort:
+ description: |
+ optional, setup `interserver_http_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=interserver]`
+ More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#interserver-http-port
+ maximum: 65535
+ minimum: 1
+ type: integer
+ name:
+ description: by default, hostname will generate, but this allows define custom name for each `clickhuse-server`
+ maxLength: 15
+ minLength: 1
+ pattern: ^[a-zA-Z0-9-]{0,15}$
+ type: string
+ settings:
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ tcpPort:
+ description: |
+ optional, setup `tcp_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=tcp]`
+ More info: https://clickhouse.tech/docs/en/interfaces/tcp/
+ maximum: 65535
+ minimum: 1
+ type: integer
+ templates:
+ description: be carefull, this part of CRD allows override template inside template, don't use it if you don't understand what you do
+ properties:
+ clusterServiceTemplate:
+ type: string
+ dataVolumeClaimTemplate:
+ type: string
+ hostTemplate:
+ type: string
+ logVolumeClaimTemplate:
+ type: string
+ podTemplate:
+ type: string
+ replicaServiceTemplate:
+ type: string
+ serviceTemplate:
+ type: string
+ shardServiceTemplate:
+ type: string
+ type: object
+ type: object
+ type: object
+ type: array
+ podTemplates:
+ description: |
+ podTemplate will use during render `Pod` inside `StatefulSet.spec` and allows define rendered `Pod.spec`, pod scheduling distribution and pod zone
+ More information: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatespodtemplates
+ items:
+ properties:
+ distribution:
+ description: DEPRECATED, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`
+ enum:
+ - ""
+ - Unspecified
+ - OnePerHost
+ type: string
+ generateName:
+ description: allows define format for generated `Pod` name, look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates for details about aviailable template variables
+ type: string
+ metadata:
+ description: |
+ allows pass standard object's metadata from template to Pod
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ name:
+ description: template name, could use to link inside top-level `chi.spec.defaults.templates.podTemplate`, cluster-level `chi.spec.configuration.clusters.templates.podTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.podTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.podTemplate`
+ type: string
+ podDistribution:
+ description: define ClickHouse Pod distibution policy between Kubernetes Nodes inside Shard, Replica, Namespace, CHI, another ClickHouse cluster
+ items:
+ properties:
+ number:
+ description: define, how much ClickHouse Pods could be inside selected scope with selected distribution type
+ maximum: 65535
+ minimum: 0
+ type: integer
+ scope:
+ description: scope for apply each podDistribution
+ enum:
+ - ""
+ - Unspecified
+ - Shard
+ - Replica
+ - Cluster
+ - ClickHouseInstallation
+ - Namespace
+ type: string
+ topologyKey:
+ description: 'use for inter-pod affinity look to `pod.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey`, More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity'
+ type: string
+ type:
+ description: you can define multiple affinity policy types
+ enum:
+ - ""
+ - Unspecified
+ - ClickHouseAntiAffinity
+ - ShardAntiAffinity
+ - ReplicaAntiAffinity
+ - AnotherNamespaceAntiAffinity
+ - AnotherClickHouseInstallationAntiAffinity
+ - AnotherClusterAntiAffinity
+ - MaxNumberPerNode
+ - NamespaceAffinity
+ - ClickHouseInstallationAffinity
+ - ClusterAffinity
+ - ShardAffinity
+ - ReplicaAffinity
+ - PreviousTailAffinity
+ - CircularReplication
+ type: string
+ type: object
+ type: array
+ spec:
+ description: allows define whole Pod.spec inside StaefulSet.spec, look to https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates for details
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ zone:
+ description: allows define custom zone name and will separate ClickHouse `Pods` between nodes, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`
+ properties:
+ key:
+ description: optional, if defined, allows select kubernetes nodes by label with `name` equal `key`
+ type: string
+ values:
+ description: optional, if defined, allows select kubernetes nodes by label with `value` in `values`
+ items:
+ type: string
+ type: array
+ type: object
+ type: object
+ type: array
+ serviceTemplates:
+ description: |
+ allows define template for rendering `Service` which would get endpoint from Pods which scoped chi-wide, cluster-wide, shard-wide, replica-wide level
+ items:
+ properties:
+ generateName:
+ description: allows define format for generated `Service` name, look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates for details about aviailable template variables
+ type: string
+ metadata:
+ description: |
+ allows pass standard object's metadata from template to Service
+ Could be use for define specificly for Cloud Provider metadata which impact to behavior of service
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ name:
+ description: |
+ template name, could use to link inside
+ chi-level `chi.spec.defaults.templates.serviceTemplate`
+ cluster-level `chi.spec.configuration.clusters.templates.clusterServiceTemplate`
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.shardServiceTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.replicaServiceTemplate` or `chi.spec.configuration.clusters.layout.shards.replicas.replicaServiceTemplate`
+ type: string
+ spec:
+ description: |
+ describe behavior of generated Service
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ type: array
+ volumeClaimTemplates:
+ description: allows define template for rendering `PVC` kubernetes resource, which would use inside `Pod` for mount clickhouse `data`, clickhouse `logs` or something else
+ items:
+ properties:
+ metadata:
+ description: |
+ allows pass standard object's metadata from template to PVC
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ name:
+ description: |
+ template name, could use to link inside
+ top-level `chi.spec.defaults.templates.dataVolumeClaimTemplate` or `chi.spec.defaults.templates.logVolumeClaimTemplate`,
+ cluster-level `chi.spec.configuration.clusters.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.templates.logVolumeClaimTemplate`,
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.shards.temlates.logVolumeClaimTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.replicas.templates.logVolumeClaimTemplate`
+ type: string
+ reclaimPolicy:
+ description: define behavior of `PVC` deletion policy during delete `Pod`, `Delete` by default, when `Retain` then `PVC` still alive even `Pod` will deleted
+ enum:
+ - ""
+ - Retain
+ - Delete
+ type: string
+ spec:
+ description: |
+ allows define all aspects of `PVC` resource
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ type: array
+ type: object
+ templating:
+ description: optional, define policy for auto applying ClickHouseInstallationTemplate inside ClickHouseInstallation
+ properties:
+ policy:
+ description: when defined as `auto` inside ClickhouseInstallationTemplate, it will auto add into all ClickHouseInstallation, manual value is default
+ enum:
+ - auto
+ - manual
+ type: string
+ type: object
+ troubleshoot:
+ description: allows troubleshoot Pods during CrashLoopBack state, when you apply wrong configuration, `clickhouse-server` wouldn't startup
+ enum:
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - Disable
+ - disable
+ - Enable
+ - enable
+ - Disabled
+ - disabled
+ - Enabled
+ - enabled
+ type: string
+ useTemplates:
+ description: list of `ClickHouseInstallationTemplate` (chit) resource names which will merge with current `Chi` manifest during render Kubernetes resources to create related ClickHouse clusters
+ items:
+ properties:
+ name:
+ description: name of `ClickHouseInstallationTemplate` (chit) resource
+ type: string
+ namespace:
+ description: Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clichouse-operator`
+ type: string
+ useType:
+ description: optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`
+ enum:
+ - ""
+ - merge
+ type: string
+ type: object
+ type: array
+ type: object
+ status:
+ description: Current ClickHouseInstallation manifest status, contains many fields like a normalized configuration, clickhouse-operator version, current action and all applied action list, current taskID and all applied taskIDs and other
+ properties:
+ action:
+ description: Action
+ type: string
+ actions:
+ description: Actions
+ items:
+ type: string
+ type: array
+ added:
+ description: Added Hosts count
+ minimum: 0
+ type: integer
+ chop-commit:
+ description: ClickHouse operator git commit SHA
+ type: string
+ chop-date:
+ description: ClickHouse operator build date
+ type: string
+ chop-ip:
+ description: IP address of the operator's pod which managed this CHI
+ type: string
+ chop-version:
+ description: ClickHouse operator version
+ type: string
+ clusters:
+ description: Clusters count
+ minimum: 0
+ type: integer
+ delete:
+ description: About to delete Hosts count
+ minimum: 0
+ type: integer
+ deleted:
+ description: Deleted Hosts count
+ minimum: 0
+ type: integer
+ endpoint:
+ description: Endpoint
+ type: string
+ error:
+ description: Last error
+ type: string
+ errors:
+ description: Errors
+ items:
+ type: string
+ type: array
+ fqdns:
+ description: Pods FQDNs
+ items:
+ type: string
+ type: array
+ generation:
+ description: Generation
+ minimum: 0
+ type: integer
+ hosts:
+ description: Hosts count
+ minimum: 0
+ type: integer
+ normalized:
+ description: Normalized CHI
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ pod-ips:
+ description: Pod IPs
+ items:
+ type: string
+ type: array
+ pods:
+ description: Pods
+ items:
+ type: string
+ type: array
+ replicas:
+ description: Replicas count
+ minimum: 0
+ type: integer
+ shards:
+ description: Shards count
+ minimum: 0
+ type: integer
+ status:
+ description: Status
+ type: string
+ taskID:
+ description: Current task id
+ type: string
+ taskIDsCompleted:
+ description: Completed task ids
+ items:
+ type: string
+ type: array
+ taskIDsStarted:
+ description: Started task ids
+ items:
+ type: string
+ type: array
+ updated:
+ description: Updated Hosts count
+ minimum: 0
+ type: integer
+ type: object
+ required:
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+- id: apiextensions.k8s.io/v1:CustomResourceDefinition:clickhouseoperatorconfigurations.clickhouse.altinity.com
+ type: Kubernetes
+ attributes:
+ apiVersion: apiextensions.k8s.io/v1
+ kind: CustomResourceDefinition
+ metadata:
+ labels:
+ clickhouse.altinity.com/chop: 0.19.2
+ name: clickhouseoperatorconfigurations.clickhouse.altinity.com
+ spec:
+ group: clickhouse.altinity.com
+ names:
+ kind: ClickHouseOperatorConfiguration
+ plural: clickhouseoperatorconfigurations
+ shortNames:
+ - chopconf
+ singular: clickhouseoperatorconfiguration
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: Watch namespaces
+ jsonPath: .status
+ name: namespaces
+ priority: 0
+ type: string
+ - description: Age of the resource
+ jsonPath: .metadata.creationTimestamp
+ name: age
+ type: date
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: allows customize `clickhouse-operator` settings, need restart clickhouse-operator pod after adding, more details https://github.com/Altinity/clickhouse-operator/blob/master/docs/operator_configuration.md
+ properties:
+ spec:
+ description: |
+ Allows to define settings of the clickhouse-operator.
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/config/config.yaml
+ Check into etc-clickhouse-operator* ConfigMaps if you need more control
+ properties:
+ annotation:
+ properties:
+ exclude:
+ items:
+ type: string
+ type: array
+ include:
+ items:
+ type: string
+ type: array
+ type: object
+ clickhouse:
+ properties:
+ access:
+ properties:
+ password:
+ description: ClickHouse password to be used by operator to connect to ClickHouse instances, deprecated, use chCredentialsSecretName
+ type: string
+ port:
+ description: port to be used by operator to connect to ClickHouse instances
+ maximum: 65535
+ minimum: 1
+ type: integer
+ rootCA:
+ description: Root certificate authority that clients use when verifying server certificates. Used for https connection to ClickHouse
+ type: string
+ scheme:
+ description: The scheme to user for connecting to ClickHouse. One of http or https
+ type: string
+ secret:
+ properties:
+ name:
+ description: Name of k8s Secret with username and password to be used by operator to connect to ClickHouse instances
+ type: string
+ namespace:
+ description: Location of k8s Secret with username and password to be used by operator to connect to ClickHouse instances
+ type: string
+ type: object
+ username:
+ description: ClickHouse username to be used by operator to connect to ClickHouse instances, deprecated, use chCredentialsSecretName
+ type: string
+ type: object
+ configuration:
+ properties:
+ file:
+ properties:
+ path:
+ properties:
+ common:
+ description: Path to the folder where ClickHouse configuration files common for all instances within a CHI are located. Default - config.d
+ type: string
+ host:
+ description: Path to the folder where ClickHouse configuration files unique for each instance (host) within a CHI are located. Default - conf.d
+ type: string
+ user:
+ description: Path to the folder where ClickHouse configuration files with users settings are located. Files are common for all instances within a CHI.
+ type: string
+ type: object
+ type: object
+ network:
+ properties:
+ hostRegexpTemplate:
+ description: ClickHouse server configuration `...` for any
+ type: string
+ type: object
+ user:
+ properties:
+ default:
+ properties:
+ networksIP:
+ description: ClickHouse server configuration `...` for any
+ items:
+ type: string
+ type: array
+ password:
+ description: ClickHouse server configuration `...` for any
+ type: string
+ profile:
+ description: ClickHouse server configuration `...` for any
+ type: string
+ quota:
+ description: ClickHouse server configuration `...` for any
+ type: string
+ type: object
+ type: object
+ type: object
+ type: object
+ label:
+ properties:
+ appendScope:
+ description: |
+ Whether to append *Scope* labels to StatefulSet and Pod
+ - "LabelShardScopeIndex"
+ - "LabelReplicaScopeIndex"
+ - "LabelCHIScopeIndex"
+ - "LabelCHIScopeCycleSize"
+ - "LabelCHIScopeCycleIndex"
+ - "LabelCHIScopeCycleOffset"
+ - "LabelClusterScopeIndex"
+ - "LabelClusterScopeCycleSize"
+ - "LabelClusterScopeCycleIndex"
+ - "LabelClusterScopeCycleOffset"
+ enum:
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - Disable
+ - disable
+ - Enable
+ - enable
+ - Disabled
+ - disabled
+ - Enabled
+ - enabled
+ type: string
+ exclude:
+ description: |
+ When propagating labels from the chi's `metadata.labels` section to child objects' `metadata.labels`,
+ exclude labels from the following list
+ items:
+ type: string
+ type: array
+ include:
+ items:
+ type: string
+ type: array
+ type: object
+ logger:
+ properties:
+ alsologtostderr:
+ description: boolean allows logs to stderr and files both
+ type: string
+ log_backtrace_at:
+ description: |
+ It can be set to a file and line number with a logging line.
+ Ex.: file.go:123
+ Each time when this line is being executed, a stack trace will be written to the Info log.
+ type: string
+ logtostderr:
+ description: boolean, allows logs to stderr
+ type: string
+ stderrthreshold:
+ type: string
+ v:
+ description: verbosity level of clickhouse-operator log, default - 1 max - 9
+ type: string
+ vmodule:
+ description: |
+ Comma-separated list of filename=N, where filename (can be a pattern) must have no .go ext, and N is a V level.
+ Ex.: file*=2 sets the 'V' to 2 in all files with names like file*.
+ type: string
+ type: object
+ pod:
+ properties:
+ terminationGracePeriod:
+ type: integer
+ type: object
+ reconcile:
+ properties:
+ host:
+ properties:
+ wait:
+ properties:
+ exclude:
+ type: boolean
+ include:
+ type: boolean
+ type: object
+ type: object
+ runtime:
+ properties:
+ threadsNumber:
+ description: How many goroutines will be used to reconcile in parallel, 10 by default
+ maximum: 65535
+ minimum: 1
+ type: integer
+ type: object
+ statefulSet:
+ properties:
+ create:
+ properties:
+ onFailure:
+ description: |
+ What to do in case created StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds
+ Possible options:
+ 1. abort - do nothing, just break the process and wait for admin.
+ 2. delete - delete newly created problematic StatefulSet.
+ 3. ignore (default) - ignore error, pretend nothing happened and move on to the next StatefulSet.
+ type: string
+ type: object
+ update:
+ properties:
+ onFailure:
+ description: |
+ What to do in case updated StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds
+ Possible options:
+ 1. abort - do nothing, just break the process and wait for admin.
+ 2. rollback (default) - delete Pod and rollback StatefulSet to previous Generation. Pod would be recreated by StatefulSet based on rollback-ed configuration.
+ 3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet.
+ type: string
+ pollInterval:
+ description: How many seconds to wait between checks for created/updated StatefulSet status
+ type: integer
+ timeout:
+ description: How many seconds to wait for created/updated StatefulSet to be Ready
+ type: integer
+ type: object
+ type: object
+ type: object
+ statefulSet:
+ properties:
+ revisionHistoryLimit:
+ type: integer
+ type: object
+ template:
+ properties:
+ chi:
+ properties:
+ path:
+ description: Path to folder where ClickHouseInstallationTemplate .yaml manifests are located.
+ type: string
+ type: object
+ type: object
+ watch:
+ properties:
+ namespaces:
+ description: List of namespaces where clickhouse-operator watches for events.
+ items:
+ type: string
+ type: array
+ type: object
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ served: true
+ storage: true
diff --git a/appops/guestbook/dev/ci-test/settings.yaml b/appops/guestbook/dev/ci-test/settings.yaml
index a8e3d924..713821bf 100644
--- a/appops/guestbook/dev/ci-test/settings.yaml
+++ b/appops/guestbook/dev/ci-test/settings.yaml
@@ -1,3 +1,7 @@
kcl_options:
- key: cluster
value: demo-cluster-name
+ - key: app
+ value: guestbook
+ - key: env
+ value: dev
diff --git a/appops/guestbook/dev/ci-test/stdout.golden.yaml b/appops/guestbook/dev/ci-test/stdout.golden.yaml
index f72753a7..bf656629 100644
--- a/appops/guestbook/dev/ci-test/stdout.golden.yaml
+++ b/appops/guestbook/dev/ci-test/stdout.golden.yaml
@@ -1,211 +1,205 @@
-id: apps/v1:Deployment:guestbook:redis-leader
-type: Kubernetes
-dependsOn:
-- v1:Namespace:guestbook
-- v1:Service:guestbook:redis-leader
-- v1:Service:guestbook:redis-follower
-- v1:Service:guestbook:frontend
-attributes:
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: redis-leader
- namespace: guestbook
- spec:
- replicas: 1
- selector:
- matchLabels:
- app.kubernetes.io/name: guestbook
- app.kubernetes.io/env: dev
- app.kubernetes.io/instance: guestbook-dev
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: redis-leader
- template:
- metadata:
- labels:
- app.kubernetes.io/name: guestbook
- app.kubernetes.io/env: dev
- app.kubernetes.io/instance: guestbook-dev
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: redis-leader
- spec:
- containers:
- - image: docker.io/redis:6.0.5
- name: main
- ports:
- - containerPort: 6379
- protocol: TCP
- resources:
- limits:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
- requests:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
----
-id: apps/v1:Deployment:guestbook:redis-follower
-type: Kubernetes
-dependsOn:
-- v1:Namespace:guestbook
-- v1:Service:guestbook:redis-leader
-- v1:Service:guestbook:redis-follower
-- v1:Service:guestbook:frontend
-attributes:
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: redis-follower
- namespace: guestbook
- spec:
- replicas: 1
- selector:
- matchLabels:
- app.kubernetes.io/name: guestbook
- app.kubernetes.io/env: dev
- app.kubernetes.io/instance: guestbook-dev
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: redis-follower
- template:
- metadata:
- labels:
- app.kubernetes.io/name: guestbook
- app.kubernetes.io/env: dev
- app.kubernetes.io/instance: guestbook-dev
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: redis-follower
- spec:
- containers:
- - image: gcr.io/google_samples/gb-redis-follower:v2
- name: main
- ports:
- - containerPort: 6379
- protocol: TCP
- resources:
- limits:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
- requests:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
----
-id: apps/v1:Deployment:guestbook:frontend
-type: Kubernetes
-dependsOn:
-- v1:Namespace:guestbook
-- v1:Service:guestbook:redis-leader
-- v1:Service:guestbook:redis-follower
-- v1:Service:guestbook:frontend
-attributes:
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: frontend
- namespace: guestbook
- spec:
- replicas: 1
- selector:
- matchLabels:
- app.kubernetes.io/name: guestbook
- app.kubernetes.io/env: dev
- app.kubernetes.io/instance: guestbook-dev
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: frontend
- template:
- metadata:
- labels:
- app.kubernetes.io/name: guestbook
- app.kubernetes.io/env: dev
- app.kubernetes.io/instance: guestbook-dev
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: frontend
- spec:
- containers:
- - env:
- - name: GET_HOSTS_FROM
- value: dns
- image: gcr.io/google-samples/gb-frontend:v5
- name: php-redis
- ports:
- - containerPort: 80
- protocol: TCP
- resources:
- limits:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
- requests:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
----
-id: v1:Namespace:guestbook
-type: Kubernetes
-attributes:
- apiVersion: v1
- kind: Namespace
- metadata:
- name: guestbook
----
-id: v1:Service:guestbook:redis-leader
-type: Kubernetes
-dependsOn:
-- v1:Namespace:guestbook
-attributes:
- apiVersion: v1
- kind: Service
- metadata:
- name: redis-leader
- namespace: guestbook
- spec:
- ports:
- - port: 6379
- selector:
- app.kubernetes.io/name: guestbook
- app.kubernetes.io/env: dev
- app.kubernetes.io/instance: guestbook-dev
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: redis-leader
----
-id: v1:Service:guestbook:redis-follower
-type: Kubernetes
-dependsOn:
-- v1:Namespace:guestbook
-attributes:
- apiVersion: v1
- kind: Service
- metadata:
- name: redis-follower
- namespace: guestbook
- spec:
- ports:
- - port: 6379
- selector:
- app.kubernetes.io/name: guestbook
- app.kubernetes.io/env: dev
- app.kubernetes.io/instance: guestbook-dev
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: redis-follower
----
-id: v1:Service:guestbook:frontend
-type: Kubernetes
-dependsOn:
-- v1:Namespace:guestbook
-attributes:
- apiVersion: v1
- kind: Service
- metadata:
- name: frontend
- namespace: guestbook
- spec:
- ports:
- - port: 80
- selector:
- app.kubernetes.io/name: guestbook
- app.kubernetes.io/env: dev
- app.kubernetes.io/instance: guestbook-dev
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: frontend
+- id: apps/v1:Deployment:guestbook:redis-leader
+ type: Kubernetes
+ attributes:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: redis-leader
+ namespace: guestbook
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: redis-leader
+ app.kubernetes.io/env: dev
+ app.kubernetes.io/instance: guestbook-dev
+ app.kubernetes.io/name: guestbook
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: redis-leader
+ app.kubernetes.io/env: dev
+ app.kubernetes.io/instance: guestbook-dev
+ app.kubernetes.io/name: guestbook
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ spec:
+ containers:
+ - image: docker.io/redis:6.0.5
+ name: main
+ ports:
+ - containerPort: 6379
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ requests:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ dependsOn:
+ - v1:Namespace:guestbook
+ - v1:Service:guestbook:redis-leader
+ - v1:Service:guestbook:redis-follower
+ - v1:Service:guestbook:frontend
+- id: apps/v1:Deployment:guestbook:redis-follower
+ type: Kubernetes
+ attributes:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: redis-follower
+ namespace: guestbook
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: redis-follower
+ app.kubernetes.io/env: dev
+ app.kubernetes.io/instance: guestbook-dev
+ app.kubernetes.io/name: guestbook
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: redis-follower
+ app.kubernetes.io/env: dev
+ app.kubernetes.io/instance: guestbook-dev
+ app.kubernetes.io/name: guestbook
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ spec:
+ containers:
+ - image: gcr.io/google_samples/gb-redis-follower:v2
+ name: main
+ ports:
+ - containerPort: 6379
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ requests:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ dependsOn:
+ - v1:Namespace:guestbook
+ - v1:Service:guestbook:redis-leader
+ - v1:Service:guestbook:redis-follower
+ - v1:Service:guestbook:frontend
+- id: apps/v1:Deployment:guestbook:frontend
+ type: Kubernetes
+ attributes:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: frontend
+ namespace: guestbook
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: frontend
+ app.kubernetes.io/env: dev
+ app.kubernetes.io/instance: guestbook-dev
+ app.kubernetes.io/name: guestbook
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: frontend
+ app.kubernetes.io/env: dev
+ app.kubernetes.io/instance: guestbook-dev
+ app.kubernetes.io/name: guestbook
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ spec:
+ containers:
+ - env:
+ - name: GET_HOSTS_FROM
+ value: dns
+ image: gcr.io/google-samples/gb-frontend:v5
+ name: php-redis
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ requests:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ dependsOn:
+ - v1:Namespace:guestbook
+ - v1:Service:guestbook:redis-leader
+ - v1:Service:guestbook:redis-follower
+ - v1:Service:guestbook:frontend
+- id: v1:Namespace:guestbook
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: guestbook
+- id: v1:Service:guestbook:redis-leader
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: redis-leader
+ namespace: guestbook
+ spec:
+ ports:
+ - port: 6379
+ selector:
+ app.kubernetes.io/component: redis-leader
+ app.kubernetes.io/env: dev
+ app.kubernetes.io/instance: guestbook-dev
+ app.kubernetes.io/name: guestbook
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ dependsOn:
+ - v1:Namespace:guestbook
+- id: v1:Service:guestbook:redis-follower
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: redis-follower
+ namespace: guestbook
+ spec:
+ ports:
+ - port: 6379
+ selector:
+ app.kubernetes.io/component: redis-follower
+ app.kubernetes.io/env: dev
+ app.kubernetes.io/instance: guestbook-dev
+ app.kubernetes.io/name: guestbook
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ dependsOn:
+ - v1:Namespace:guestbook
+- id: v1:Service:guestbook:frontend
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: frontend
+ namespace: guestbook
+ spec:
+ ports:
+ - port: 80
+ selector:
+ app.kubernetes.io/component: frontend
+ app.kubernetes.io/env: dev
+ app.kubernetes.io/instance: guestbook-dev
+ app.kubernetes.io/name: guestbook
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ dependsOn:
+ - v1:Namespace:guestbook
diff --git a/appops/guestbook/prod/ci-test/settings.yaml b/appops/guestbook/prod/ci-test/settings.yaml
index a8e3d924..2dbe55e2 100644
--- a/appops/guestbook/prod/ci-test/settings.yaml
+++ b/appops/guestbook/prod/ci-test/settings.yaml
@@ -1,3 +1,7 @@
kcl_options:
- key: cluster
value: demo-cluster-name
+ - key: app
+ value: guestbook
+ - key: env
+ value: prod
diff --git a/appops/guestbook/prod/ci-test/stdout.golden.yaml b/appops/guestbook/prod/ci-test/stdout.golden.yaml
index 99c25d86..d8ec24ce 100644
--- a/appops/guestbook/prod/ci-test/stdout.golden.yaml
+++ b/appops/guestbook/prod/ci-test/stdout.golden.yaml
@@ -1,211 +1,205 @@
-id: apps/v1:Deployment:guestbook:redis-leader
-type: Kubernetes
-dependsOn:
-- v1:Namespace:guestbook
-- v1:Service:guestbook:redis-leader
-- v1:Service:guestbook:redis-follower
-- v1:Service:guestbook:frontend
-attributes:
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: redis-leader
- namespace: guestbook
- spec:
- replicas: 1
- selector:
- matchLabels:
- app.kubernetes.io/name: guestbook
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: guestbook-prod
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: redis-leader
- template:
- metadata:
- labels:
- app.kubernetes.io/name: guestbook
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: guestbook-prod
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: redis-leader
- spec:
- containers:
- - image: docker.io/redis:6.0.5
- name: main
- ports:
- - containerPort: 6379
- protocol: TCP
- resources:
- limits:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
- requests:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
----
-id: apps/v1:Deployment:guestbook:redis-follower
-type: Kubernetes
-dependsOn:
-- v1:Namespace:guestbook
-- v1:Service:guestbook:redis-leader
-- v1:Service:guestbook:redis-follower
-- v1:Service:guestbook:frontend
-attributes:
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: redis-follower
- namespace: guestbook
- spec:
- replicas: 1
- selector:
- matchLabels:
- app.kubernetes.io/name: guestbook
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: guestbook-prod
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: redis-follower
- template:
- metadata:
- labels:
- app.kubernetes.io/name: guestbook
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: guestbook-prod
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: redis-follower
- spec:
- containers:
- - image: gcr.io/google_samples/gb-redis-follower:v2
- name: main
- ports:
- - containerPort: 6379
- protocol: TCP
- resources:
- limits:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
- requests:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
----
-id: apps/v1:Deployment:guestbook:frontend
-type: Kubernetes
-dependsOn:
-- v1:Namespace:guestbook
-- v1:Service:guestbook:redis-leader
-- v1:Service:guestbook:redis-follower
-- v1:Service:guestbook:frontend
-attributes:
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: frontend
- namespace: guestbook
- spec:
- replicas: 1
- selector:
- matchLabels:
- app.kubernetes.io/name: guestbook
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: guestbook-prod
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: frontend
- template:
- metadata:
- labels:
- app.kubernetes.io/name: guestbook
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: guestbook-prod
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: frontend
- spec:
- containers:
- - env:
- - name: GET_HOSTS_FROM
- value: dns
- image: gcr.io/google-samples/gb-frontend:v3
- name: php-redis
- ports:
- - containerPort: 80
- protocol: TCP
- resources:
- limits:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
- requests:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
----
-id: v1:Namespace:guestbook
-type: Kubernetes
-attributes:
- apiVersion: v1
- kind: Namespace
- metadata:
- name: guestbook
----
-id: v1:Service:guestbook:redis-leader
-type: Kubernetes
-dependsOn:
-- v1:Namespace:guestbook
-attributes:
- apiVersion: v1
- kind: Service
- metadata:
- name: redis-leader
- namespace: guestbook
- spec:
- ports:
- - port: 6379
- selector:
- app.kubernetes.io/name: guestbook
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: guestbook-prod
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: redis-leader
----
-id: v1:Service:guestbook:redis-follower
-type: Kubernetes
-dependsOn:
-- v1:Namespace:guestbook
-attributes:
- apiVersion: v1
- kind: Service
- metadata:
- name: redis-follower
- namespace: guestbook
- spec:
- ports:
- - port: 6379
- selector:
- app.kubernetes.io/name: guestbook
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: guestbook-prod
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: redis-follower
----
-id: v1:Service:guestbook:frontend
-type: Kubernetes
-dependsOn:
-- v1:Namespace:guestbook
-attributes:
- apiVersion: v1
- kind: Service
- metadata:
- name: frontend
- namespace: guestbook
- spec:
- ports:
- - port: 80
- selector:
- app.kubernetes.io/name: guestbook
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: guestbook-prod
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: frontend
+- id: apps/v1:Deployment:guestbook:redis-leader
+ type: Kubernetes
+ attributes:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: redis-leader
+ namespace: guestbook
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: redis-leader
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: guestbook-prod
+ app.kubernetes.io/name: guestbook
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: redis-leader
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: guestbook-prod
+ app.kubernetes.io/name: guestbook
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ spec:
+ containers:
+ - image: docker.io/redis:6.0.5
+ name: main
+ ports:
+ - containerPort: 6379
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ requests:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ dependsOn:
+ - v1:Namespace:guestbook
+ - v1:Service:guestbook:redis-leader
+ - v1:Service:guestbook:redis-follower
+ - v1:Service:guestbook:frontend
+- id: apps/v1:Deployment:guestbook:redis-follower
+ type: Kubernetes
+ attributes:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: redis-follower
+ namespace: guestbook
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: redis-follower
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: guestbook-prod
+ app.kubernetes.io/name: guestbook
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: redis-follower
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: guestbook-prod
+ app.kubernetes.io/name: guestbook
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ spec:
+ containers:
+ - image: gcr.io/google_samples/gb-redis-follower:v2
+ name: main
+ ports:
+ - containerPort: 6379
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ requests:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ dependsOn:
+ - v1:Namespace:guestbook
+ - v1:Service:guestbook:redis-leader
+ - v1:Service:guestbook:redis-follower
+ - v1:Service:guestbook:frontend
+- id: apps/v1:Deployment:guestbook:frontend
+ type: Kubernetes
+ attributes:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: frontend
+ namespace: guestbook
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: frontend
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: guestbook-prod
+ app.kubernetes.io/name: guestbook
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: frontend
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: guestbook-prod
+ app.kubernetes.io/name: guestbook
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ spec:
+ containers:
+ - env:
+ - name: GET_HOSTS_FROM
+ value: dns
+ image: gcr.io/google-samples/gb-frontend:v3
+ name: php-redis
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ requests:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ dependsOn:
+ - v1:Namespace:guestbook
+ - v1:Service:guestbook:redis-leader
+ - v1:Service:guestbook:redis-follower
+ - v1:Service:guestbook:frontend
+- id: v1:Namespace:guestbook
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: guestbook
+- id: v1:Service:guestbook:redis-leader
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: redis-leader
+ namespace: guestbook
+ spec:
+ ports:
+ - port: 6379
+ selector:
+ app.kubernetes.io/component: redis-leader
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: guestbook-prod
+ app.kubernetes.io/name: guestbook
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ dependsOn:
+ - v1:Namespace:guestbook
+- id: v1:Service:guestbook:redis-follower
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: redis-follower
+ namespace: guestbook
+ spec:
+ ports:
+ - port: 6379
+ selector:
+ app.kubernetes.io/component: redis-follower
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: guestbook-prod
+ app.kubernetes.io/name: guestbook
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ dependsOn:
+ - v1:Namespace:guestbook
+- id: v1:Service:guestbook:frontend
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: frontend
+ namespace: guestbook
+ spec:
+ ports:
+ - port: 80
+ selector:
+ app.kubernetes.io/component: frontend
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: guestbook-prod
+ app.kubernetes.io/name: guestbook
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ dependsOn:
+ - v1:Namespace:guestbook
diff --git a/appops/guestbook/test/ci-test/settings.yaml b/appops/guestbook/test/ci-test/settings.yaml
index a8e3d924..216187d1 100644
--- a/appops/guestbook/test/ci-test/settings.yaml
+++ b/appops/guestbook/test/ci-test/settings.yaml
@@ -1,3 +1,7 @@
kcl_options:
- key: cluster
value: demo-cluster-name
+ - key: app
+ value: guestbook
+ - key: env
+ value: test
diff --git a/appops/guestbook/test/ci-test/stdout.golden.yaml b/appops/guestbook/test/ci-test/stdout.golden.yaml
index 195c7017..3644ca22 100644
--- a/appops/guestbook/test/ci-test/stdout.golden.yaml
+++ b/appops/guestbook/test/ci-test/stdout.golden.yaml
@@ -1,211 +1,205 @@
-id: apps/v1:Deployment:guestbook:redis-leader
-type: Kubernetes
-dependsOn:
-- v1:Namespace:guestbook
-- v1:Service:guestbook:redis-leader
-- v1:Service:guestbook:redis-follower
-- v1:Service:guestbook:frontend
-attributes:
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: redis-leader
- namespace: guestbook
- spec:
- replicas: 1
- selector:
- matchLabels:
- app.kubernetes.io/name: guestbook
- app.kubernetes.io/env: test
- app.kubernetes.io/instance: guestbook-test
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: redis-leader
- template:
- metadata:
- labels:
- app.kubernetes.io/name: guestbook
- app.kubernetes.io/env: test
- app.kubernetes.io/instance: guestbook-test
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: redis-leader
- spec:
- containers:
- - image: docker.io/redis:6.0.5
- name: main
- ports:
- - containerPort: 6379
- protocol: TCP
- resources:
- limits:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
- requests:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
----
-id: apps/v1:Deployment:guestbook:redis-follower
-type: Kubernetes
-dependsOn:
-- v1:Namespace:guestbook
-- v1:Service:guestbook:redis-leader
-- v1:Service:guestbook:redis-follower
-- v1:Service:guestbook:frontend
-attributes:
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: redis-follower
- namespace: guestbook
- spec:
- replicas: 1
- selector:
- matchLabels:
- app.kubernetes.io/name: guestbook
- app.kubernetes.io/env: test
- app.kubernetes.io/instance: guestbook-test
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: redis-follower
- template:
- metadata:
- labels:
- app.kubernetes.io/name: guestbook
- app.kubernetes.io/env: test
- app.kubernetes.io/instance: guestbook-test
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: redis-follower
- spec:
- containers:
- - image: gcr.io/google_samples/gb-redis-follower:v2
- name: main
- ports:
- - containerPort: 6379
- protocol: TCP
- resources:
- limits:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
- requests:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
----
-id: apps/v1:Deployment:guestbook:frontend
-type: Kubernetes
-dependsOn:
-- v1:Namespace:guestbook
-- v1:Service:guestbook:redis-leader
-- v1:Service:guestbook:redis-follower
-- v1:Service:guestbook:frontend
-attributes:
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: frontend
- namespace: guestbook
- spec:
- replicas: 1
- selector:
- matchLabels:
- app.kubernetes.io/name: guestbook
- app.kubernetes.io/env: test
- app.kubernetes.io/instance: guestbook-test
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: frontend
- template:
- metadata:
- labels:
- app.kubernetes.io/name: guestbook
- app.kubernetes.io/env: test
- app.kubernetes.io/instance: guestbook-test
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: frontend
- spec:
- containers:
- - env:
- - name: GET_HOSTS_FROM
- value: dns
- image: gcr.io/google-samples/gb-frontend:v4
- name: php-redis
- ports:
- - containerPort: 80
- protocol: TCP
- resources:
- limits:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
- requests:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
----
-id: v1:Namespace:guestbook
-type: Kubernetes
-attributes:
- apiVersion: v1
- kind: Namespace
- metadata:
- name: guestbook
----
-id: v1:Service:guestbook:redis-leader
-type: Kubernetes
-dependsOn:
-- v1:Namespace:guestbook
-attributes:
- apiVersion: v1
- kind: Service
- metadata:
- name: redis-leader
- namespace: guestbook
- spec:
- ports:
- - port: 6379
- selector:
- app.kubernetes.io/name: guestbook
- app.kubernetes.io/env: test
- app.kubernetes.io/instance: guestbook-test
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: redis-leader
----
-id: v1:Service:guestbook:redis-follower
-type: Kubernetes
-dependsOn:
-- v1:Namespace:guestbook
-attributes:
- apiVersion: v1
- kind: Service
- metadata:
- name: redis-follower
- namespace: guestbook
- spec:
- ports:
- - port: 6379
- selector:
- app.kubernetes.io/name: guestbook
- app.kubernetes.io/env: test
- app.kubernetes.io/instance: guestbook-test
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: redis-follower
----
-id: v1:Service:guestbook:frontend
-type: Kubernetes
-dependsOn:
-- v1:Namespace:guestbook
-attributes:
- apiVersion: v1
- kind: Service
- metadata:
- name: frontend
- namespace: guestbook
- spec:
- ports:
- - port: 80
- selector:
- app.kubernetes.io/name: guestbook
- app.kubernetes.io/env: test
- app.kubernetes.io/instance: guestbook-test
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: frontend
+- id: apps/v1:Deployment:guestbook:redis-leader
+ type: Kubernetes
+ attributes:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: redis-leader
+ namespace: guestbook
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: redis-leader
+ app.kubernetes.io/env: test
+ app.kubernetes.io/instance: guestbook-test
+ app.kubernetes.io/name: guestbook
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: redis-leader
+ app.kubernetes.io/env: test
+ app.kubernetes.io/instance: guestbook-test
+ app.kubernetes.io/name: guestbook
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ spec:
+ containers:
+ - image: docker.io/redis:6.0.5
+ name: main
+ ports:
+ - containerPort: 6379
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ requests:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ dependsOn:
+ - v1:Namespace:guestbook
+ - v1:Service:guestbook:redis-leader
+ - v1:Service:guestbook:redis-follower
+ - v1:Service:guestbook:frontend
+- id: apps/v1:Deployment:guestbook:redis-follower
+ type: Kubernetes
+ attributes:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: redis-follower
+ namespace: guestbook
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: redis-follower
+ app.kubernetes.io/env: test
+ app.kubernetes.io/instance: guestbook-test
+ app.kubernetes.io/name: guestbook
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: redis-follower
+ app.kubernetes.io/env: test
+ app.kubernetes.io/instance: guestbook-test
+ app.kubernetes.io/name: guestbook
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ spec:
+ containers:
+ - image: gcr.io/google_samples/gb-redis-follower:v2
+ name: main
+ ports:
+ - containerPort: 6379
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ requests:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ dependsOn:
+ - v1:Namespace:guestbook
+ - v1:Service:guestbook:redis-leader
+ - v1:Service:guestbook:redis-follower
+ - v1:Service:guestbook:frontend
+- id: apps/v1:Deployment:guestbook:frontend
+ type: Kubernetes
+ attributes:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: frontend
+ namespace: guestbook
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: frontend
+ app.kubernetes.io/env: test
+ app.kubernetes.io/instance: guestbook-test
+ app.kubernetes.io/name: guestbook
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: frontend
+ app.kubernetes.io/env: test
+ app.kubernetes.io/instance: guestbook-test
+ app.kubernetes.io/name: guestbook
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ spec:
+ containers:
+ - env:
+ - name: GET_HOSTS_FROM
+ value: dns
+ image: gcr.io/google-samples/gb-frontend:v4
+ name: php-redis
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ requests:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ dependsOn:
+ - v1:Namespace:guestbook
+ - v1:Service:guestbook:redis-leader
+ - v1:Service:guestbook:redis-follower
+ - v1:Service:guestbook:frontend
+- id: v1:Namespace:guestbook
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: guestbook
+- id: v1:Service:guestbook:redis-leader
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: redis-leader
+ namespace: guestbook
+ spec:
+ ports:
+ - port: 6379
+ selector:
+ app.kubernetes.io/component: redis-leader
+ app.kubernetes.io/env: test
+ app.kubernetes.io/instance: guestbook-test
+ app.kubernetes.io/name: guestbook
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ dependsOn:
+ - v1:Namespace:guestbook
+- id: v1:Service:guestbook:redis-follower
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: redis-follower
+ namespace: guestbook
+ spec:
+ ports:
+ - port: 6379
+ selector:
+ app.kubernetes.io/component: redis-follower
+ app.kubernetes.io/env: test
+ app.kubernetes.io/instance: guestbook-test
+ app.kubernetes.io/name: guestbook
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ dependsOn:
+ - v1:Namespace:guestbook
+- id: v1:Service:guestbook:frontend
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: frontend
+ namespace: guestbook
+ spec:
+ ports:
+ - port: 80
+ selector:
+ app.kubernetes.io/component: frontend
+ app.kubernetes.io/env: test
+ app.kubernetes.io/instance: guestbook-test
+ app.kubernetes.io/name: guestbook
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ dependsOn:
+ - v1:Namespace:guestbook
diff --git a/appops/http-echo/dev/ci-test/settings.yaml b/appops/http-echo/dev/ci-test/settings.yaml
index c3c9d406..cda1166f 100644
--- a/appops/http-echo/dev/ci-test/settings.yaml
+++ b/appops/http-echo/dev/ci-test/settings.yaml
@@ -1,3 +1,7 @@
kcl_options:
- key: cluster
value: default
+ - key: app
+ value: http-echo
+ - key: env
+ value: dev
diff --git a/appops/http-echo/dev/ci-test/stdout.golden.yaml b/appops/http-echo/dev/ci-test/stdout.golden.yaml
index 1333a487..849ed6f9 100644
--- a/appops/http-echo/dev/ci-test/stdout.golden.yaml
+++ b/appops/http-echo/dev/ci-test/stdout.golden.yaml
@@ -1,96 +1,93 @@
-id: apps/v1:Deployment:http-echo:http-echodev
-type: Kubernetes
-dependsOn:
-- v1:Namespace:http-echo
-- v1:Service:http-echo:apple-service
-attributes:
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: http-echodev
- namespace: http-echo
- spec:
- replicas: 1
- selector:
- matchLabels:
- app.kubernetes.io/name: http-echo
- app.kubernetes.io/env: dev
- app.kubernetes.io/instance: http-echo-dev
- cluster.x-k8s.io/cluster-name: default
- app.kubernetes.io/component: http-echodev
- template:
- metadata:
- labels:
- app.kubernetes.io/name: http-echo
- app.kubernetes.io/env: dev
- app.kubernetes.io/instance: http-echo-dev
- cluster.x-k8s.io/cluster-name: default
- app.kubernetes.io/component: http-echodev
- spec:
- containers:
- - args:
- - -text=apple
- image: hashicorp/http-echo
- name: apple-app
- resources:
- limits:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
- requests:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
----
-id: v1:Namespace:http-echo
-type: Kubernetes
-attributes:
- apiVersion: v1
- kind: Namespace
- metadata:
- name: http-echo
----
-id: v1:Service:http-echo:apple-service
-type: Kubernetes
-dependsOn:
-- v1:Namespace:http-echo
-attributes:
- apiVersion: v1
- kind: Service
- metadata:
- name: apple-service
- namespace: http-echo
- spec:
- ports:
- - port: 5678
- selector:
- app.kubernetes.io/name: http-echo
- app.kubernetes.io/env: dev
- app.kubernetes.io/instance: http-echo-dev
- cluster.x-k8s.io/cluster-name: default
- app.kubernetes.io/component: http-echodev
- type: NodePort
----
-id: networking.k8s.io/v1:Ingress:http-echo:example-ingress
-type: Kubernetes
-dependsOn:
-- v1:Namespace:http-echo
-- v1:Service:http-echo:apple-service
-- apps/v1:Deployment:http-echo:http-echodev
-attributes:
- apiVersion: networking.k8s.io/v1
- kind: Ingress
- metadata:
- name: example-ingress
- namespace: http-echo
- spec:
- rules:
- - http:
- paths:
- - path: /apple
- pathType: Prefix
- backend:
- service:
- name: apple-service
- port:
- number: 5678
+- id: apps/v1:Deployment:http-echo:http-echodev
+ type: Kubernetes
+ attributes:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: http-echodev
+ namespace: http-echo
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: http-echodev
+ app.kubernetes.io/env: dev
+ app.kubernetes.io/instance: http-echo-dev
+ app.kubernetes.io/name: http-echo
+ cluster.x-k8s.io/cluster-name: default
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: http-echodev
+ app.kubernetes.io/env: dev
+ app.kubernetes.io/instance: http-echo-dev
+ app.kubernetes.io/name: http-echo
+ cluster.x-k8s.io/cluster-name: default
+ spec:
+ containers:
+ - args:
+ - -text=apple
+ image: hashicorp/http-echo
+ name: apple-app
+ resources:
+ limits:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ requests:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ dependsOn:
+ - v1:Namespace:http-echo
+ - v1:Service:http-echo:apple-service
+- id: v1:Namespace:http-echo
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: http-echo
+- id: v1:Service:http-echo:apple-service
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: apple-service
+ namespace: http-echo
+ spec:
+ ports:
+ - port: 5678
+ selector:
+ app.kubernetes.io/component: http-echodev
+ app.kubernetes.io/env: dev
+ app.kubernetes.io/instance: http-echo-dev
+ app.kubernetes.io/name: http-echo
+ cluster.x-k8s.io/cluster-name: default
+ type: NodePort
+ dependsOn:
+ - v1:Namespace:http-echo
+- id: networking.k8s.io/v1:Ingress:http-echo:example-ingress
+ type: Kubernetes
+ attributes:
+ apiVersion: networking.k8s.io/v1
+ kind: Ingress
+ metadata:
+ name: example-ingress
+ namespace: http-echo
+ spec:
+ rules:
+ - http:
+ paths:
+ - backend:
+ service:
+ name: apple-service
+ port:
+ number: 5678
+ path: /apple
+ pathType: Prefix
+ dependsOn:
+ - v1:Namespace:http-echo
+ - v1:Service:http-echo:apple-service
+ - apps/v1:Deployment:http-echo:http-echodev
diff --git a/appops/nginx-example/dev/ci-test/settings.yaml b/appops/nginx-example/dev/ci-test/settings.yaml
index c3c9d406..16b466ff 100644
--- a/appops/nginx-example/dev/ci-test/settings.yaml
+++ b/appops/nginx-example/dev/ci-test/settings.yaml
@@ -1,3 +1,7 @@
kcl_options:
- key: cluster
value: default
+ - key: app
+ value: nginx-example
+ - key: env
+ value: dev
diff --git a/appops/nginx-example/dev/ci-test/stdout.golden.yaml b/appops/nginx-example/dev/ci-test/stdout.golden.yaml
index f7642d0f..435760a1 100644
--- a/appops/nginx-example/dev/ci-test/stdout.golden.yaml
+++ b/appops/nginx-example/dev/ci-test/stdout.golden.yaml
@@ -1,75 +1,73 @@
-id: apps/v1:Deployment:nginx-example:nginx-exampledev
-type: Kubernetes
-dependsOn:
-- v1:Namespace:nginx-example
-- v1:Service:nginx-example:nginx-example
-attributes:
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: nginx-exampledev
- namespace: nginx-example
- spec:
- replicas: 1
- selector:
- matchLabels:
- app.kubernetes.io/name: nginx-example
- app.kubernetes.io/env: dev
- app.kubernetes.io/instance: nginx-example-dev
- cluster.x-k8s.io/cluster-name: default
- app.kubernetes.io/component: nginx-exampledev
- template:
- metadata:
- labels:
- app.kubernetes.io/name: nginx-example
- app.kubernetes.io/env: dev
- app.kubernetes.io/instance: nginx-example-dev
- cluster.x-k8s.io/cluster-name: default
- app.kubernetes.io/component: nginx-exampledev
- spec:
- containers:
- - image: nginx:1.7.8
- name: main
- ports:
- - containerPort: 80
- protocol: TCP
- resources:
- limits:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
- requests:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
----
-id: v1:Namespace:nginx-example
-type: Kubernetes
-attributes:
- apiVersion: v1
- kind: Namespace
- metadata:
- name: nginx-example
----
-id: v1:Service:nginx-example:nginx-example
-type: Kubernetes
-dependsOn:
-- v1:Namespace:nginx-example
-attributes:
- apiVersion: v1
- kind: Service
- metadata:
- name: nginx-example
- namespace: nginx-example
- spec:
- ports:
- - nodePort: 30201
- port: 80
- targetPort: 80
- selector:
- app.kubernetes.io/name: nginx-example
- app.kubernetes.io/env: dev
- app.kubernetes.io/instance: nginx-example-dev
- cluster.x-k8s.io/cluster-name: default
- app.kubernetes.io/component: nginx-exampledev
- type: NodePort
+- id: apps/v1:Deployment:nginx-example:nginx-exampledev
+ type: Kubernetes
+ attributes:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: nginx-exampledev
+ namespace: nginx-example
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: nginx-exampledev
+ app.kubernetes.io/env: dev
+ app.kubernetes.io/instance: nginx-example-dev
+ app.kubernetes.io/name: nginx-example
+ cluster.x-k8s.io/cluster-name: default
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: nginx-exampledev
+ app.kubernetes.io/env: dev
+ app.kubernetes.io/instance: nginx-example-dev
+ app.kubernetes.io/name: nginx-example
+ cluster.x-k8s.io/cluster-name: default
+ spec:
+ containers:
+ - image: nginx:1.7.8
+ name: main
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ requests:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ dependsOn:
+ - v1:Namespace:nginx-example
+ - v1:Service:nginx-example:nginx-example
+- id: v1:Namespace:nginx-example
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: nginx-example
+- id: v1:Service:nginx-example:nginx-example
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: nginx-example
+ namespace: nginx-example
+ spec:
+ ports:
+ - nodePort: 30201
+ port: 80
+ targetPort: 80
+ selector:
+ app.kubernetes.io/component: nginx-exampledev
+ app.kubernetes.io/env: dev
+ app.kubernetes.io/instance: nginx-example-dev
+ app.kubernetes.io/name: nginx-example
+ cluster.x-k8s.io/cluster-name: default
+ type: NodePort
+ dependsOn:
+ - v1:Namespace:nginx-example
diff --git a/appops/wordpress/dev/ci-test/settings.yaml b/appops/wordpress/dev/ci-test/settings.yaml
index c3c9d406..23fbfc51 100644
--- a/appops/wordpress/dev/ci-test/settings.yaml
+++ b/appops/wordpress/dev/ci-test/settings.yaml
@@ -1,3 +1,7 @@
kcl_options:
- key: cluster
value: default
+ - key: app
+ value: wordpress
+ - key: env
+ value: dev
diff --git a/appops/wordpress/dev/ci-test/stdout.golden.yaml b/appops/wordpress/dev/ci-test/stdout.golden.yaml
index 083835fe..8ba8562c 100644
--- a/appops/wordpress/dev/ci-test/stdout.golden.yaml
+++ b/appops/wordpress/dev/ci-test/stdout.golden.yaml
@@ -1,182 +1,176 @@
-id: aliyun:alicloud:alicloud_oss_bucket:kusion-oss
-type: Terraform
-attributes:
- acl: public-read
- bucket: kusion-oss
-extensions:
- provider: registry.terraform.io/aliyun/alicloud/1.153.0
- resourceType: alicloud_oss_bucket
- providerMeta:
- region: cn-beijing
----
-id: apps/v1:Deployment:wordpress-example:wordpress-mysql
-type: Kubernetes
-dependsOn:
-- v1:Namespace:wordpress-example
-- v1:Secret:wordpress-example:mysql-pass
-- v1:Service:wordpress-example:wordpress-mysql
-- v1:Service:wordpress-example:wordpress
-attributes:
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: wordpress-mysql
- namespace: wordpress-example
- spec:
- replicas: 1
- selector:
- matchLabels:
- app.kubernetes.io/name: wordpress-example
- app.kubernetes.io/env: dev
- app.kubernetes.io/instance: wordpress-example-dev
- cluster.x-k8s.io/cluster-name: default
- app: wordpress-mysql
- tier: database
- app.kubernetes.io/component: wordpress-mysql
- template:
- metadata:
- labels:
- app.kubernetes.io/name: wordpress-example
- app.kubernetes.io/env: dev
- app.kubernetes.io/instance: wordpress-example-dev
- cluster.x-k8s.io/cluster-name: default
- app.kubernetes.io/component: wordpress-mysql
- app: wordpress-mysql
- tier: database
- spec:
- containers:
- - env:
- - name: MYSQL_ROOT_PASSWORD
- valueFrom:
- secretKeyRef:
- key: password
- name: mysql-pass
- image: mysql:5.6
- name: mysql
- ports:
- - containerPort: 3306
- name: mysql
- protocol: TCP
- volumeMounts:
- - mountPath: /var/lib/mysql
- name: mysql-persistent-storage
- volumes:
- - name: mysql-persistent-storage
- emptyDir: {}
----
-id: apps/v1:Deployment:wordpress-example:wordpress-deployment
-type: Kubernetes
-dependsOn:
-- v1:Namespace:wordpress-example
-- v1:Secret:wordpress-example:mysql-pass
-- v1:Service:wordpress-example:wordpress-mysql
-- v1:Service:wordpress-example:wordpress
-attributes:
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: wordpress-deployment
- namespace: wordpress-example
- spec:
- replicas: 1
- selector:
- matchLabels:
- app.kubernetes.io/name: wordpress-example
- app.kubernetes.io/env: dev
- app.kubernetes.io/instance: wordpress-example-dev
- cluster.x-k8s.io/cluster-name: default
- app: wordpress-mysql
- tier: frontend
- app.kubernetes.io/component: wordpress-deployment
- template:
- metadata:
- labels:
- app.kubernetes.io/name: wordpress-example
- app.kubernetes.io/env: dev
- app.kubernetes.io/instance: wordpress-example-dev
- cluster.x-k8s.io/cluster-name: default
- app.kubernetes.io/component: wordpress-deployment
- app: wordpress-mysql
- tier: frontend
- spec:
- containers:
- - env:
- - name: WORDPRESS_DB_HOST
- value: wordpress-mysql
- - name: WORDPRESS_DB_PASSWORD
- valueFrom:
- secretKeyRef:
- key: password
- name: mysql-pass
- image: wordpress:4.8-apache
- name: wordpress
- ports:
- - containerPort: 80
- name: wordpress
- protocol: TCP
- volumeMounts:
- - mountPath: /var/www/html
- name: wordpress-persistent-storage
- volumes:
- - name: wordpress-persistent-storage
- emptyDir: {}
----
-id: v1:Namespace:wordpress-example
-type: Kubernetes
-attributes:
- apiVersion: v1
- kind: Namespace
- metadata:
- name: wordpress-example
----
-id: v1:Secret:wordpress-example:mysql-pass
-type: Kubernetes
-dependsOn:
-- v1:Namespace:wordpress-example
-attributes:
- apiVersion: v1
- data:
- password: MTIzNDU2
- kind: Secret
- type: Opaque
- metadata:
- name: mysql-pass
- namespace: wordpress-example
----
-id: v1:Service:wordpress-example:wordpress-mysql
-type: Kubernetes
-dependsOn:
-- v1:Namespace:wordpress-example
-- v1:Secret:wordpress-example:mysql-pass
-attributes:
- apiVersion: v1
- kind: Service
- metadata:
- name: wordpress-mysql
- namespace: wordpress-example
- spec:
- clusterIP: None
- ports:
- - port: 3306
- selector:
- app: wordpress-mysql
- tier: database
----
-id: v1:Service:wordpress-example:wordpress
-type: Kubernetes
-dependsOn:
-- v1:Namespace:wordpress-example
-- v1:Secret:wordpress-example:mysql-pass
-attributes:
- apiVersion: v1
- kind: Service
- metadata:
- name: wordpress
- namespace: wordpress-example
- spec:
- ports:
- - port: 80
- selector:
- app: wordpress-mysql
- tier: frontend
- type: LoadBalancer
+- id: aliyun:alicloud:alicloud_oss_bucket:kusion-oss
+ type: Terraform
+ attributes:
+ acl: public-read
+ bucket: kusion-oss
+ extensions:
+ provider: registry.terraform.io/aliyun/alicloud/1.153.0
+ providerMeta:
+ region: cn-beijing
+ resourceType: alicloud_oss_bucket
+- id: apps/v1:Deployment:wordpress:wordpress-mysql
+ type: Kubernetes
+ attributes:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: wordpress-mysql
+ namespace: wordpress
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: wordpress-mysql
+ app.kubernetes.io/component: wordpress-mysql
+ app.kubernetes.io/env: dev
+ app.kubernetes.io/instance: wordpress-dev
+ app.kubernetes.io/name: wordpress
+ cluster.x-k8s.io/cluster-name: default
+ tier: database
+ template:
+ metadata:
+ labels:
+ app: wordpress-mysql
+ app.kubernetes.io/component: wordpress-mysql
+ app.kubernetes.io/env: dev
+ app.kubernetes.io/instance: wordpress-dev
+ app.kubernetes.io/name: wordpress
+ cluster.x-k8s.io/cluster-name: default
+ tier: database
+ spec:
+ containers:
+ - env:
+ - name: MYSQL_ROOT_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ key: password
+ name: mysql-pass
+ image: mysql:5.6
+ name: mysql
+ ports:
+ - containerPort: 3306
+ name: mysql
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /var/lib/mysql
+ name: mysql-persistent-storage
+ volumes:
+ - emptyDir: {}
+ name: mysql-persistent-storage
+ dependsOn:
+ - v1:Namespace:wordpress
+ - v1:Secret:wordpress:mysql-pass
+ - v1:Service:wordpress:wordpress-mysql
+ - v1:Service:wordpress:wordpress
+- id: apps/v1:Deployment:wordpress:wordpress-deployment
+ type: Kubernetes
+ attributes:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: wordpress-deployment
+ namespace: wordpress
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: wordpress-mysql
+ app.kubernetes.io/component: wordpress-deployment
+ app.kubernetes.io/env: dev
+ app.kubernetes.io/instance: wordpress-dev
+ app.kubernetes.io/name: wordpress
+ cluster.x-k8s.io/cluster-name: default
+ tier: frontend
+ template:
+ metadata:
+ labels:
+ app: wordpress-mysql
+ app.kubernetes.io/component: wordpress-deployment
+ app.kubernetes.io/env: dev
+ app.kubernetes.io/instance: wordpress-dev
+ app.kubernetes.io/name: wordpress
+ cluster.x-k8s.io/cluster-name: default
+ tier: frontend
+ spec:
+ containers:
+ - env:
+ - name: WORDPRESS_DB_HOST
+ value: wordpress-mysql
+ - name: WORDPRESS_DB_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ key: password
+ name: mysql-pass
+ image: wordpress:4.8-apache
+ name: wordpress
+ ports:
+ - containerPort: 80
+ name: wordpress
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /var/www/html
+ name: wordpress-persistent-storage
+ volumes:
+ - emptyDir: {}
+ name: wordpress-persistent-storage
+ dependsOn:
+ - v1:Namespace:wordpress
+ - v1:Secret:wordpress:mysql-pass
+ - v1:Service:wordpress:wordpress-mysql
+ - v1:Service:wordpress:wordpress
+- id: v1:Namespace:wordpress
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: wordpress
+- id: v1:Secret:wordpress:mysql-pass
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ data:
+ password: MTIzNDU2
+ kind: Secret
+ metadata:
+ name: mysql-pass
+ namespace: wordpress
+ type: Opaque
+ dependsOn:
+ - v1:Namespace:wordpress
+- id: v1:Service:wordpress:wordpress-mysql
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: wordpress-mysql
+ namespace: wordpress
+ spec:
+ clusterIP: None
+ ports:
+ - port: 3306
+ selector:
+ app: wordpress-mysql
+ tier: database
+ dependsOn:
+ - v1:Namespace:wordpress
+ - v1:Secret:wordpress:mysql-pass
+- id: v1:Service:wordpress:wordpress
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: wordpress
+ namespace: wordpress
+ spec:
+ ports:
+ - port: 80
+ selector:
+ app: wordpress-mysql
+ tier: frontend
+ type: LoadBalancer
+ dependsOn:
+ - v1:Namespace:wordpress
+ - v1:Secret:wordpress:mysql-pass
diff --git a/base/examples/job-example/dev/ci-test/settings.yaml b/base/examples/job-example/dev/ci-test/settings.yaml
index 4cafb256..b85440de 100644
--- a/base/examples/job-example/dev/ci-test/settings.yaml
+++ b/base/examples/job-example/dev/ci-test/settings.yaml
@@ -1,3 +1,7 @@
kcl_options:
- key: cluster
value: minikube
+ - key: app
+ value: job-example
+ - key: env
+ value: dev
diff --git a/base/examples/job-example/dev/ci-test/stdout.golden.yaml b/base/examples/job-example/dev/ci-test/stdout.golden.yaml
index 072460f6..93e87931 100644
--- a/base/examples/job-example/dev/ci-test/stdout.golden.yaml
+++ b/base/examples/job-example/dev/ci-test/stdout.golden.yaml
@@ -1,89 +1,86 @@
-id: batch/v1:Job:job-example:job-example-dev
-type: Kubernetes
-dependsOn:
-- v1:Namespace:job-example
-- v1:ServiceAccount:job-example:job-example-sa
-- v1:ConfigMap:job-example:job-log-config
-attributes:
- apiVersion: batch/v1
- kind: Job
- metadata:
- name: job-example-dev
- namespace: job-example
- spec:
- activeDeadlineSeconds: 100
- backoffLimit: 5
- completionMode: NonIndexed
- completions: 5
- manualSelector: true
- parallelism: 1
- ttlSecondsAfterFinished: 100
- selector:
- matchLabels:
- app.kubernetes.io/name: job-example
- app.kubernetes.io/env: dev
- app.kubernetes.io/instance: job-example-dev
- cluster.x-k8s.io/cluster-name: minikube
- foo: bar
- template:
- metadata:
- labels:
- app.kubernetes.io/name: job-example
- app.kubernetes.io/env: dev
- app.kubernetes.io/instance: job-example-dev
- cluster.x-k8s.io/cluster-name: minikube
- foo: bar
- spec:
- containers:
- - command:
- - perl
- - -Mbignum=bpi
- - -wle
- - print bpi(2000)
- image: perl
- name: pi
- volumeMounts:
- - mountPath: /etc/config
- name: config-vol
- restartPolicy: Never
- serviceAccountName: job-example-sa
- volumes:
- - name: config-vol
- configMap:
- name: job-log-config
- items:
- - key: log_level
- path: log_level
----
-id: v1:Namespace:job-example
-type: Kubernetes
-attributes:
- apiVersion: v1
- kind: Namespace
- metadata:
- name: job-example
----
-id: v1:ConfigMap:job-example:job-log-config
-type: Kubernetes
-dependsOn:
-- v1:Namespace:job-example
-- v1:ServiceAccount:job-example:job-example-sa
-attributes:
- apiVersion: v1
- data:
- log_level: INFO
- kind: ConfigMap
- metadata:
- name: job-log-config
- namespace: job-example
----
-id: v1:ServiceAccount:job-example:job-example-sa
-type: Kubernetes
-dependsOn:
-- v1:Namespace:job-example
-attributes:
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: job-example-sa
- namespace: job-example
+- id: batch/v1:Job:job-example:job-example-dev
+ type: Kubernetes
+ attributes:
+ apiVersion: batch/v1
+ kind: Job
+ metadata:
+ name: job-example-dev
+ namespace: job-example
+ spec:
+ activeDeadlineSeconds: 100
+ backoffLimit: 5
+ completionMode: NonIndexed
+ completions: 5
+ manualSelector: true
+ parallelism: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/env: dev
+ app.kubernetes.io/instance: job-example-dev
+ app.kubernetes.io/name: job-example
+ cluster.x-k8s.io/cluster-name: minikube
+ foo: bar
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/env: dev
+ app.kubernetes.io/instance: job-example-dev
+ app.kubernetes.io/name: job-example
+ cluster.x-k8s.io/cluster-name: minikube
+ foo: bar
+ spec:
+ containers:
+ - command:
+ - perl
+ - -Mbignum=bpi
+ - -wle
+ - print bpi(2000)
+ image: perl
+ name: pi
+ volumeMounts:
+ - mountPath: /etc/config
+ name: config-vol
+ restartPolicy: Never
+ serviceAccountName: job-example-sa
+ volumes:
+ - configMap:
+ items:
+ - key: log_level
+ path: log_level
+ name: job-log-config
+ name: config-vol
+ ttlSecondsAfterFinished: 100
+ dependsOn:
+ - v1:Namespace:job-example
+ - v1:ServiceAccount:job-example:job-example-sa
+ - v1:ConfigMap:job-example:job-log-config
+- id: v1:Namespace:job-example
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: job-example
+- id: v1:ConfigMap:job-example:job-log-config
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ data:
+ log_level: INFO
+ kind: ConfigMap
+ metadata:
+ name: job-log-config
+ namespace: job-example
+ dependsOn:
+ - v1:Namespace:job-example
+ - v1:ServiceAccount:job-example:job-example-sa
+- id: v1:ServiceAccount:job-example:job-example-sa
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: job-example-sa
+ namespace: job-example
+ dependsOn:
+ - v1:Namespace:job-example
diff --git a/base/examples/kcl-vault-agent/dev/ci-test/settings.yaml b/base/examples/kcl-vault-agent/dev/ci-test/settings.yaml
index c3c9d406..7854b8a0 100644
--- a/base/examples/kcl-vault-agent/dev/ci-test/settings.yaml
+++ b/base/examples/kcl-vault-agent/dev/ci-test/settings.yaml
@@ -1,3 +1,7 @@
kcl_options:
- key: cluster
value: default
+ - key: app
+ value: kcl-vault-agent
+ - key: env
+ value: dev
diff --git a/base/examples/kcl-vault-agent/dev/ci-test/stdout.golden.yaml b/base/examples/kcl-vault-agent/dev/ci-test/stdout.golden.yaml
index f6b8482f..d19d1126 100644
--- a/base/examples/kcl-vault-agent/dev/ci-test/stdout.golden.yaml
+++ b/base/examples/kcl-vault-agent/dev/ci-test/stdout.golden.yaml
@@ -1,48 +1,46 @@
-id: apps/v1:Deployment:kcl-vault-agent:kcl-vault-agentdev
-type: Kubernetes
-dependsOn:
-- v1:Namespace:kcl-vault-agent
-- v1:ServiceAccount:kcl-vault-agent:kcl-vault-agent-sa
-attributes:
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: kcl-vault-agentdev
- namespace: kcl-vault-agent
- spec:
- replicas: 1
- selector:
- matchLabels:
- app: kcl-vault-agent-test
- template:
- metadata:
- annotations:
- vault.hashicorp.com/agent-inject: 'true'
- vault.hashicorp.com/role: kcl-vault-agent-role
- vault.hashicorp.com/agent-inject-secret-database-config.txt: internal/data/database/config
- labels:
- app: kcl-vault-agent-test
- spec:
- containers:
- - image: jweissig/app:0.0.1
- name: kcl-vault-test
- serviceAccountName: kcl-vault-agent-sa
----
-id: v1:Namespace:kcl-vault-agent
-type: Kubernetes
-attributes:
- apiVersion: v1
- kind: Namespace
- metadata:
- name: kcl-vault-agent
----
-id: v1:ServiceAccount:kcl-vault-agent:kcl-vault-agent-sa
-type: Kubernetes
-dependsOn:
-- v1:Namespace:kcl-vault-agent
-attributes:
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: kcl-vault-agent-sa
- namespace: kcl-vault-agent
+- id: apps/v1:Deployment:kcl-vault-agent:kcl-vault-agentdev
+ type: Kubernetes
+ attributes:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: kcl-vault-agentdev
+ namespace: kcl-vault-agent
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: kcl-vault-agent-test
+ template:
+ metadata:
+ annotations:
+ vault.hashicorp.com/agent-inject: "true"
+ vault.hashicorp.com/agent-inject-secret-database-config.txt: internal/data/database/config
+ vault.hashicorp.com/role: kcl-vault-agent-role
+ labels:
+ app: kcl-vault-agent-test
+ spec:
+ containers:
+ - image: jweissig/app:0.0.1
+ name: kcl-vault-test
+ serviceAccountName: kcl-vault-agent-sa
+ dependsOn:
+ - v1:Namespace:kcl-vault-agent
+ - v1:ServiceAccount:kcl-vault-agent:kcl-vault-agent-sa
+- id: v1:Namespace:kcl-vault-agent
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: kcl-vault-agent
+- id: v1:ServiceAccount:kcl-vault-agent:kcl-vault-agent-sa
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: kcl-vault-agent-sa
+ namespace: kcl-vault-agent
+ dependsOn:
+ - v1:Namespace:kcl-vault-agent
diff --git a/base/examples/kcl-vault-csi/dev/ci-test/settings.yaml b/base/examples/kcl-vault-csi/dev/ci-test/settings.yaml
index c3c9d406..7afbdb01 100644
--- a/base/examples/kcl-vault-csi/dev/ci-test/settings.yaml
+++ b/base/examples/kcl-vault-csi/dev/ci-test/settings.yaml
@@ -1,3 +1,7 @@
kcl_options:
- key: cluster
value: default
+ - key: app
+ value: kcl-vault-csi
+ - key: env
+ value: dev
diff --git a/base/examples/kcl-vault-csi/dev/ci-test/stdout.golden.yaml b/base/examples/kcl-vault-csi/dev/ci-test/stdout.golden.yaml
index 8ad9ccf2..f55ef736 100644
--- a/base/examples/kcl-vault-csi/dev/ci-test/stdout.golden.yaml
+++ b/base/examples/kcl-vault-csi/dev/ci-test/stdout.golden.yaml
@@ -1,70 +1,56 @@
-id: apps/v1:Deployment:kcl-vault-csi:kcl-vault-csidev
-type: Kubernetes
-dependsOn:
-- v1:Namespace:kcl-vault-csi
-- v1:ServiceAccount:kcl-vault-csi:kcl-vault-csi-sa
-attributes:
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: kcl-vault-csidev
- namespace: kcl-vault-csi
- spec:
- replicas: 1
- selector:
- matchLabels:
- app: kcl-vault-csi
- template:
- metadata:
- labels:
- app: kcl-vault-csi
- spec:
- containers:
- - image: jweissig/app:0.0.1
- name: kcl-vault-csi
- volumeMounts:
- - mountPath: /mnt/secrets-store
- name: secrets-store-inline
- readOnly: true
- serviceAccountName: kcl-vault-csi-sa
- volumes:
- - name: secrets-store-inline
- csi:
- driver: secrets-store.csi.k8s.io
- readOnly: true
- volumeAttributes:
- secretProviderClass: kcl-vault-csi-database
----
-id: v1:Namespace:kcl-vault-csi
-type: Kubernetes
-attributes:
- apiVersion: v1
- kind: Namespace
- metadata:
- name: kcl-vault-csi
----
-id: v1:ServiceAccount:kcl-vault-csi:kcl-vault-csi-sa
-type: Kubernetes
-dependsOn:
-- v1:Namespace:kcl-vault-csi
-attributes:
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: kcl-vault-csi-sa
- namespace: kcl-vault-csi
----
-apiVersion: secrets-store.csi.x-k8s.io/v1
-kind: SecretProviderClass
-metadata:
- name: kcl-vault-csi-database
- namespace: kcl-vault-csi
-spec:
- provider: vault
- parameters:
- vaultAddress: http://vault.default:8200
- roleName: kcl-vault-csi-role
- objects: |
- - objectName: "db-password"
- secretPath: "secret/data/db-pass"
- secretKey: "password"
+- id: apps/v1:Deployment:kcl-vault-csi:kcl-vault-csidev
+ type: Kubernetes
+ attributes:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: kcl-vault-csidev
+ namespace: kcl-vault-csi
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: kcl-vault-csi
+ template:
+ metadata:
+ labels:
+ app: kcl-vault-csi
+ spec:
+ containers:
+ - image: jweissig/app:0.0.1
+ name: kcl-vault-csi
+ volumeMounts:
+ - mountPath: /mnt/secrets-store
+ name: secrets-store-inline
+ readOnly: true
+ serviceAccountName: kcl-vault-csi-sa
+ volumes:
+ - csi:
+ driver: secrets-store.csi.k8s.io
+ readOnly: true
+ volumeAttributes:
+ secretProviderClass: kcl-vault-csi-database
+ name: secrets-store-inline
+ dependsOn:
+ - v1:Namespace:kcl-vault-csi
+ - v1:ServiceAccount:kcl-vault-csi:kcl-vault-csi-sa
+- id: v1:Namespace:kcl-vault-csi
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: kcl-vault-csi
+- id: v1:ServiceAccount:kcl-vault-csi:kcl-vault-csi-sa
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: kcl-vault-csi-sa
+ namespace: kcl-vault-csi
+ dependsOn:
+ - v1:Namespace:kcl-vault-csi
+- id: ""
+ type: ""
+ attributes: {}
diff --git a/base/examples/monitoring/prometheus-example-app/prod/ci-test/settings.yaml b/base/examples/monitoring/prometheus-example-app/prod/ci-test/settings.yaml
index 4e71a293..dc795877 100644
--- a/base/examples/monitoring/prometheus-example-app/prod/ci-test/settings.yaml
+++ b/base/examples/monitoring/prometheus-example-app/prod/ci-test/settings.yaml
@@ -1 +1,5 @@
kcl_options:
+ - key: app
+ value: prometheus-example-app
+ - key: env
+ value: prod
diff --git a/base/examples/monitoring/prometheus-example-app/prod/ci-test/stdout.golden.yaml b/base/examples/monitoring/prometheus-example-app/prod/ci-test/stdout.golden.yaml
index 718c7ba5..8de74910 100644
--- a/base/examples/monitoring/prometheus-example-app/prod/ci-test/stdout.golden.yaml
+++ b/base/examples/monitoring/prometheus-example-app/prod/ci-test/stdout.golden.yaml
@@ -1,73 +1,71 @@
-id: apps/v1:Deployment:prometheus-example-app:prometheus-example-appprod
-type: Kubernetes
-dependsOn:
-- v1:Namespace:prometheus-example-app
-attributes:
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: prometheus-example-appprod
- namespace: prometheus-example-app
- spec:
- replicas: 1
- selector:
- matchLabels:
- app.kubernetes.io/name: prometheus-example-app
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: prometheus-example-app-prod
- app.kubernetes.io/component: prometheus-example-appprod
- template:
- metadata:
+- id: apps/v1:Deployment:prometheus-example-app:prometheus-example-appprod
+ type: Kubernetes
+ attributes:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: prometheus-example-appprod
+ namespace: prometheus-example-app
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: prometheus-example-appprod
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: prometheus-example-app-prod
+ app.kubernetes.io/name: prometheus-example-app
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: prometheus-example-appprod
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: prometheus-example-app-prod
+ app.kubernetes.io/name: prometheus-example-app
+ spec:
+ containers:
+ - image: quay.io/brancz/prometheus-example-app:v0.3.0
+ name: prometheus-example-app
+ ports:
+ - containerPort: 8080
+ name: web
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ requests:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ dependsOn:
+ - v1:Namespace:prometheus-example-app
+- id: v1:Namespace:prometheus-example-app
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: prometheus-example-app
+- id: monitoring.coreos.com/v1:PodMonitor:prometheus-example-app:prometheus-example-appprod
+ type: Kubernetes
+ attributes:
+ apiVersion: monitoring.coreos.com/v1
+ kind: PodMonitor
+ metadata:
labels:
- app.kubernetes.io/name: prometheus-example-app
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: prometheus-example-app-prod
- app.kubernetes.io/component: prometheus-example-appprod
- spec:
- containers:
- - image: quay.io/brancz/prometheus-example-app:v0.3.0
- name: prometheus-example-app
- ports:
- - containerPort: 8080
- name: web
- protocol: TCP
- resources:
- limits:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
- requests:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
----
-id: v1:Namespace:prometheus-example-app
-type: Kubernetes
-attributes:
- apiVersion: v1
- kind: Namespace
- metadata:
- name: prometheus-example-app
----
-id: monitoring.coreos.com/v1:PodMonitor:prometheus-example-app:prometheus-example-appprod
-type: Kubernetes
-attributes:
- apiVersion: monitoring.coreos.com/v1
- kind: PodMonitor
- metadata:
- labels:
- app.kubernetes.io/name: prometheus-example-app
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: prometheus-example-app-prod
- app.kubernetes.io/component: prometheus-example-appprod
- name: prometheus-example-appprod
- namespace: prometheus-example-app
- spec:
- podMetricsEndpoints:
- - port: web
- selector:
- matchLabels:
- app.kubernetes.io/name: prometheus-example-app
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: prometheus-example-app-prod
- app.kubernetes.io/component: prometheus-example-appprod
+ app.kubernetes.io/component: prometheus-example-appprod
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: prometheus-example-app-prod
+ app.kubernetes.io/name: prometheus-example-app
+ name: prometheus-example-appprod
+ namespace: prometheus-example-app
+ spec:
+ podMetricsEndpoints:
+ - port: web
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: prometheus-example-appprod
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: prometheus-example-app-prod
+ app.kubernetes.io/name: prometheus-example-app
diff --git a/base/examples/monitoring/prometheus-install/prod/ci-test/settings.yaml b/base/examples/monitoring/prometheus-install/prod/ci-test/settings.yaml
index 4e71a293..ef632abb 100644
--- a/base/examples/monitoring/prometheus-install/prod/ci-test/settings.yaml
+++ b/base/examples/monitoring/prometheus-install/prod/ci-test/settings.yaml
@@ -1 +1,5 @@
kcl_options:
+ - key: app
+ value: prometheus-install
+ - key: env
+ value: prod
diff --git a/base/examples/monitoring/prometheus-install/prod/ci-test/stdout.golden.yaml b/base/examples/monitoring/prometheus-install/prod/ci-test/stdout.golden.yaml
index 5af71223..0419207d 100644
--- a/base/examples/monitoring/prometheus-install/prod/ci-test/stdout.golden.yaml
+++ b/base/examples/monitoring/prometheus-install/prod/ci-test/stdout.golden.yaml
@@ -1,181 +1,174 @@
-id: rbac.authorization.k8s.io/v1:ClusterRoleBinding:default:prometheus
-type: Kubernetes
-dependsOn:
-- rbac.authorization.k8s.io/v1:ClusterRole:default:prometheus
-attributes:
- apiVersion: rbac.authorization.k8s.io/v1
- kind: ClusterRoleBinding
- subjects:
- - kind: ServiceAccount
- name: prometheus
- namespace: default
- metadata:
- name: prometheus
- namespace: default
- roleRef:
- apiGroup: rbac.authorization.k8s.io
+- id: rbac.authorization.k8s.io/v1:ClusterRoleBinding:default:prometheus
+ type: Kubernetes
+ attributes:
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ name: prometheus
+ namespace: default
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: prometheus
+ subjects:
+ - kind: ServiceAccount
+ name: prometheus
+ namespace: default
+ dependsOn:
+ - rbac.authorization.k8s.io/v1:ClusterRole:default:prometheus
+- id: rbac.authorization.k8s.io/v1:ClusterRole:default:prometheus
+ type: Kubernetes
+ attributes:
+ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
- name: prometheus
----
-id: rbac.authorization.k8s.io/v1:ClusterRole:default:prometheus
-type: Kubernetes
-attributes:
- apiVersion: rbac.authorization.k8s.io/v1
- kind: ClusterRole
- rules:
- - apiGroups:
- - ''
- resources:
- - nodes
- - nodes/metrics
- - services
- - endpoints
- - pods
- verbs:
- - get
- - list
- - watch
- - apiGroups:
- - ''
- resources:
- - configmaps
- verbs:
- - get
- - apiGroups:
- - networking.k8s.io
- resources:
- - ingresses
- verbs:
- - get
- - list
- - watch
- - nonResourceURLs:
- - /metrics
- verbs:
- - get
- metadata:
- name: prometheus
- namespace: default
----
-id: monitoring.coreos.com/v1alpha1:AlertmanagerConfig:default:main
-type: Kubernetes
-attributes:
- apiVersion: monitoring.coreos.com/v1alpha1
- kind: AlertmanagerConfig
- metadata:
- labels:
- alertmanagerConfig: main
- name: main
- namespace: default
- spec:
- receivers:
- - name: webhook
- webhookConfigs:
- - url: http://example.com/
- route:
- groupBy:
- - job
- groupInterval: 5m
- groupWait: 30s
- receiver: webhook
- repeatInterval: 12h
----
-id: monitoring.coreos.com/v1:Prometheus:default:main
-type: Kubernetes
-attributes:
- apiVersion: monitoring.coreos.com/v1
- kind: Prometheus
- metadata:
- name: main
- namespace: default
- spec:
- serviceMonitorSelector:
- matchLabels:
- prometheus: main
- replicas: 2
- scrapeInterval: 30s
- serviceAccountName: prometheus
- evaluationInterval: 30s
- alerting:
- alertmanagers:
- - name: alertmanager
+ metadata:
+ name: prometheus
+ namespace: default
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ - nodes/metrics
+ - services
+ - endpoints
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingresses
+ verbs:
+ - get
+ - list
+ - watch
+ - nonResourceURLs:
+ - /metrics
+ verbs:
+ - get
+- id: monitoring.coreos.com/v1alpha1:AlertmanagerConfig:default:main
+ type: Kubernetes
+ attributes:
+ apiVersion: monitoring.coreos.com/v1alpha1
+ kind: AlertmanagerConfig
+ metadata:
+ labels:
+ alertmanagerConfig: main
+ name: main
+ namespace: default
+ spec:
+ receivers:
+ - name: webhook
+ webhookConfigs:
+ - url: http://example.com/
+ route:
+ groupBy:
+ - job
+ groupInterval: 5m
+ groupWait: 30s
+ receiver: webhook
+ repeatInterval: 12h
+- id: monitoring.coreos.com/v1:Prometheus:default:main
+ type: Kubernetes
+ attributes:
+ apiVersion: monitoring.coreos.com/v1
+ kind: Prometheus
+ metadata:
+ name: main
+ namespace: default
+ spec:
+ alerting:
+ alertmanagers:
+ - name: alertmanager
+ namespace: default
+ port: web
+ evaluationInterval: 30s
+ replicas: 2
+ ruleSelector:
+ matchLabels:
+ prometheus: main
+ role: alert-rules
+ scrapeInterval: 30s
+ serviceAccountName: prometheus
+ serviceMonitorSelector:
+ matchLabels:
+ prometheus: main
+- id: monitoring.coreos.com/v1:Alertmanager:default:main
+ type: Kubernetes
+ attributes:
+ apiVersion: monitoring.coreos.com/v1
+ kind: Alertmanager
+ metadata:
+ name: main
+ namespace: default
+ spec:
+ alertmanagerConfiguration:
+ name: main
+ replicas: 3
+ retention: 120h
+- id: v1:Service:default:alertmanager
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: alertmanager
+ namespace: default
+ spec:
+ ports:
+ - name: web
+ port: 9093
+ targetPort: web
+ - name: reloader-web
+ port: 8080
+ targetPort: reloader-web
+ selector:
+ alertmanager: main
+ sessionAffinity: ClientIP
+ dependsOn:
+ - rbac.authorization.k8s.io/v1:ClusterRole:default:prometheus
+ - rbac.authorization.k8s.io/v1:ClusterRoleBinding:default:prometheus
+ - v1:ServiceAccount:default:prometheus
+- id: v1:Service:default:prometheus
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: prometheus
+ namespace: default
+ spec:
+ ports:
+ - name: web
+ port: 9090
+ targetPort: web
+ - name: reloader-web
+ port: 8080
+ targetPort: reloader-web
+ selector:
+ prometheus: main
+ sessionAffinity: ClientIP
+ dependsOn:
+ - rbac.authorization.k8s.io/v1:ClusterRole:default:prometheus
+ - rbac.authorization.k8s.io/v1:ClusterRoleBinding:default:prometheus
+ - v1:ServiceAccount:default:prometheus
+- id: v1:ServiceAccount:default:prometheus
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: prometheus
namespace: default
- port: web
- ruleSelector:
- matchLabels:
- role: alert-rules
- prometheus: main
----
-id: monitoring.coreos.com/v1:Alertmanager:default:main
-type: Kubernetes
-attributes:
- apiVersion: monitoring.coreos.com/v1
- kind: Alertmanager
- metadata:
- name: main
- namespace: default
- spec:
- replicas: 3
- retention: 120h
- alertmanagerConfiguration:
- name: main
----
-id: v1:Service:default:alertmanager
-type: Kubernetes
-dependsOn:
-- rbac.authorization.k8s.io/v1:ClusterRole:default:prometheus
-- rbac.authorization.k8s.io/v1:ClusterRoleBinding:default:prometheus
-- v1:ServiceAccount:default:prometheus
-attributes:
- apiVersion: v1
- kind: Service
- metadata:
- name: alertmanager
- namespace: default
- spec:
- ports:
- - name: web
- port: 9093
- targetPort: web
- - name: reloader-web
- port: 8080
- targetPort: reloader-web
- selector:
- alertmanager: main
- sessionAffinity: ClientIP
----
-id: v1:Service:default:prometheus
-type: Kubernetes
-dependsOn:
-- rbac.authorization.k8s.io/v1:ClusterRole:default:prometheus
-- rbac.authorization.k8s.io/v1:ClusterRoleBinding:default:prometheus
-- v1:ServiceAccount:default:prometheus
-attributes:
- apiVersion: v1
- kind: Service
- metadata:
- name: prometheus
- namespace: default
- spec:
- ports:
- - name: web
- port: 9090
- targetPort: web
- - name: reloader-web
- port: 8080
- targetPort: reloader-web
- selector:
- prometheus: main
- sessionAffinity: ClientIP
----
-id: v1:ServiceAccount:default:prometheus
-type: Kubernetes
-dependsOn:
-- rbac.authorization.k8s.io/v1:ClusterRole:default:prometheus
-- rbac.authorization.k8s.io/v1:ClusterRoleBinding:default:prometheus
-attributes:
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: prometheus
- namespace: default
+ dependsOn:
+ - rbac.authorization.k8s.io/v1:ClusterRole:default:prometheus
+ - rbac.authorization.k8s.io/v1:ClusterRoleBinding:default:prometheus
diff --git a/base/examples/monitoring/prometheus-rules/alert/ci-test/settings.yaml b/base/examples/monitoring/prometheus-rules/alert/ci-test/settings.yaml
index 2beb8a49..4e71a293 100644
--- a/base/examples/monitoring/prometheus-rules/alert/ci-test/settings.yaml
+++ b/base/examples/monitoring/prometheus-rules/alert/ci-test/settings.yaml
@@ -1 +1 @@
-kcl_options:
\ No newline at end of file
+kcl_options:
diff --git a/base/examples/monitoring/prometheus-rules/alert/ci-test/stdout.golden.yaml b/base/examples/monitoring/prometheus-rules/alert/ci-test/stdout.golden.yaml
index 65767f10..9bec6b5e 100644
--- a/base/examples/monitoring/prometheus-rules/alert/ci-test/stdout.golden.yaml
+++ b/base/examples/monitoring/prometheus-rules/alert/ci-test/stdout.golden.yaml
@@ -1,17 +1,17 @@
-id: monitoring.coreos.com/v1:PrometheusRule:default:alert
-type: Kubernetes
-attributes:
- apiVersion: monitoring.coreos.com/v1
- kind: PrometheusRule
- metadata:
- labels:
- prometheus: main
- role: alert-rules
- name: alert
- namespace: default
- spec:
- groups:
- - name: alert.rules
- rules:
- - alert: WebhookAlert
- expr: vector(1)
+- id: monitoring.coreos.com/v1:PrometheusRule:default:alert
+ type: Kubernetes
+ attributes:
+ apiVersion: monitoring.coreos.com/v1
+ kind: PrometheusRule
+ metadata:
+ labels:
+ prometheus: main
+ role: alert-rules
+ name: alert
+ namespace: default
+ spec:
+ groups:
+ - name: alert.rules
+ rules:
+ - alert: WebhookAlert
+ expr: vector(1)
diff --git a/base/examples/monitoring/prometheus-rules/record/ci-test/stdout.golden.yaml b/base/examples/monitoring/prometheus-rules/record/ci-test/stdout.golden.yaml
index 0cd17a2c..849ae4ad 100644
--- a/base/examples/monitoring/prometheus-rules/record/ci-test/stdout.golden.yaml
+++ b/base/examples/monitoring/prometheus-rules/record/ci-test/stdout.golden.yaml
@@ -1,30 +1,30 @@
-id: monitoring.coreos.com/v1:PrometheusRule:default:k8s-rules
-type: Kubernetes
-attributes:
- apiVersion: monitoring.coreos.com/v1
- kind: PrometheusRule
- metadata:
- labels:
- prometheus: main
- role: alert-rules
- name: k8s-rules
- namespace: default
- spec:
- groups:
- - name: node.rules
- rules:
- - expr: |
- sum(
- node_memory_MemAvailable_bytes{job="node-exporter"} or
- (
- node_memory_Buffers_bytes{job="node-exporter"} +
- node_memory_Cached_bytes{job="node-exporter"} +
- node_memory_MemFree_bytes{job="node-exporter"} +
- node_memory_Slab_bytes{job="node-exporter"}
- )
- ) by (cluster)
- record: :node_memory_MemAvailable_bytes:sum
- - expr: |
- sum(rate(node_cpu_seconds_total{job="node-exporter",mode!="idle",mode!="iowait",mode!="steal"}[5m])) /
- count(sum(node_cpu_seconds_total{job="node-exporter"}) by (cluster, instance, cpu))
- record: cluster:node_cpu:ratio_rate5m
+- id: monitoring.coreos.com/v1:PrometheusRule:default:k8s-rules
+ type: Kubernetes
+ attributes:
+ apiVersion: monitoring.coreos.com/v1
+ kind: PrometheusRule
+ metadata:
+ labels:
+ prometheus: main
+ role: alert-rules
+ name: k8s-rules
+ namespace: default
+ spec:
+ groups:
+ - name: node.rules
+ rules:
+ - expr: |
+ sum(
+ node_memory_MemAvailable_bytes{job="node-exporter"} or
+ (
+ node_memory_Buffers_bytes{job="node-exporter"} +
+ node_memory_Cached_bytes{job="node-exporter"} +
+ node_memory_MemFree_bytes{job="node-exporter"} +
+ node_memory_Slab_bytes{job="node-exporter"}
+ )
+ ) by (cluster)
+ record: :node_memory_MemAvailable_bytes:sum
+ - expr: |
+ sum(rate(node_cpu_seconds_total{job="node-exporter",mode!="idle",mode!="iowait",mode!="steal"}[5m])) /
+ count(sum(node_cpu_seconds_total{job="node-exporter"}) by (cluster, instance, cpu))
+ record: cluster:node_cpu:ratio_rate5m
diff --git a/base/examples/secret-as-code/dev/ci-test/settings.yaml b/base/examples/secret-as-code/dev/ci-test/settings.yaml
index c3c9d406..2459bae4 100644
--- a/base/examples/secret-as-code/dev/ci-test/settings.yaml
+++ b/base/examples/secret-as-code/dev/ci-test/settings.yaml
@@ -1,3 +1,7 @@
kcl_options:
- key: cluster
value: default
+ - key: app
+ value: secret-as-code
+ - key: env
+ value: dev
diff --git a/base/examples/secret-as-code/dev/ci-test/stdout.golden.yaml b/base/examples/secret-as-code/dev/ci-test/stdout.golden.yaml
index 87286bc3..585dd689 100644
--- a/base/examples/secret-as-code/dev/ci-test/stdout.golden.yaml
+++ b/base/examples/secret-as-code/dev/ci-test/stdout.golden.yaml
@@ -1,46 +1,45 @@
-id: apps/v1:Deployment:secret-as-code:secret-as-codedev
-type: Kubernetes
-dependsOn:
-- v1:Namespace:secret-as-code
-attributes:
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- annotations:
- secret-store: vault
- foo: ref+vault://secret/foo#/foo
- bar: ref+vault://secret/bar#/bar
- name: secret-as-codedev
- namespace: secret-as-code
- spec:
- replicas: 1
- selector:
- matchLabels:
- app.kubernetes.io/name: secret-as-code
- app.kubernetes.io/env: dev
- app.kubernetes.io/instance: secret-as-code-dev
- cluster.x-k8s.io/cluster-name: default
- app.kubernetes.io/component: secret-as-codedev
- template:
- metadata:
- labels:
- app.kubernetes.io/name: secret-as-code
- app.kubernetes.io/env: dev
- app.kubernetes.io/instance: secret-as-code-dev
- cluster.x-k8s.io/cluster-name: default
- app.kubernetes.io/component: secret-as-codedev
- spec:
- containers:
- - image: nginx:1.7.8
- name: main
- ports:
- - containerPort: 80
- protocol: TCP
----
-id: v1:Namespace:secret-as-code
-type: Kubernetes
-attributes:
- apiVersion: v1
- kind: Namespace
- metadata:
- name: secret-as-code
+- id: apps/v1:Deployment:secret-as-code:secret-as-codedev
+ type: Kubernetes
+ attributes:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ annotations:
+ bar: ref+vault://secret/bar#/bar
+ foo: ref+vault://secret/foo#/foo
+ secret-store: vault
+ name: secret-as-codedev
+ namespace: secret-as-code
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: secret-as-codedev
+ app.kubernetes.io/env: dev
+ app.kubernetes.io/instance: secret-as-code-dev
+ app.kubernetes.io/name: secret-as-code
+ cluster.x-k8s.io/cluster-name: default
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: secret-as-codedev
+ app.kubernetes.io/env: dev
+ app.kubernetes.io/instance: secret-as-code-dev
+ app.kubernetes.io/name: secret-as-code
+ cluster.x-k8s.io/cluster-name: default
+ spec:
+ containers:
+ - image: nginx:1.7.8
+ name: main
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ dependsOn:
+ - v1:Namespace:secret-as-code
+- id: v1:Namespace:secret-as-code
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: secret-as-code
diff --git a/base/examples/server/app_config_map/prod/ci-test/settings.yaml b/base/examples/server/app_config_map/prod/ci-test/settings.yaml
index a8e3d924..7765ac24 100644
--- a/base/examples/server/app_config_map/prod/ci-test/settings.yaml
+++ b/base/examples/server/app_config_map/prod/ci-test/settings.yaml
@@ -1,3 +1,7 @@
kcl_options:
- key: cluster
value: demo-cluster-name
+ - key: app
+ value: sampleapp
+ - key: env
+ value: prod
diff --git a/base/examples/server/app_config_map/prod/ci-test/stdout.golden.yaml b/base/examples/server/app_config_map/prod/ci-test/stdout.golden.yaml
index 5dfef0f3..6d3d4001 100644
--- a/base/examples/server/app_config_map/prod/ci-test/stdout.golden.yaml
+++ b/base/examples/server/app_config_map/prod/ci-test/stdout.golden.yaml
@@ -1,76 +1,73 @@
-id: apps/v1:Deployment:sampleapp:sampleappprod
-type: Kubernetes
-dependsOn:
-- v1:Namespace:sampleapp
-- v1:ConfigMap:sampleapp:sampleappprod0
-- v1:ConfigMap:sampleapp:sampleappprod1
-attributes:
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: sampleappprod
- namespace: sampleapp
- spec:
- replicas: 1
- selector:
- matchLabels:
- app.kubernetes.io/name: sampleapp
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: sampleapp-prod
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: sampleappprod
- template:
- metadata:
- labels:
- app.kubernetes.io/name: sampleapp
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: sampleapp-prod
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: sampleappprod
- spec:
- containers:
- - image: gcr.io/google-samples/gb-frontend:v4
- name: main
- resources:
- limits:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
- requests:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
----
-id: v1:Namespace:sampleapp
-type: Kubernetes
-attributes:
- apiVersion: v1
- kind: Namespace
- metadata:
- name: sampleapp
----
-id: v1:ConfigMap:sampleapp:sampleappprod0
-type: Kubernetes
-dependsOn:
-- v1:Namespace:sampleapp
-attributes:
- apiVersion: v1
- data:
- app.name: sampleapp
- kind: ConfigMap
- metadata:
- name: sampleappprod0
- namespace: sampleapp
----
-id: v1:ConfigMap:sampleapp:sampleappprod1
-type: Kubernetes
-dependsOn:
-- v1:Namespace:sampleapp
-attributes:
- apiVersion: v1
- data:
- nginx.conf: configuration_content
- kind: ConfigMap
- metadata:
- name: sampleappprod1
- namespace: sampleapp
+- id: apps/v1:Deployment:sampleapp:sampleappprod
+ type: Kubernetes
+ attributes:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: sampleappprod
+ namespace: sampleapp
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: sampleappprod
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: sampleapp-prod
+ app.kubernetes.io/name: sampleapp
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: sampleappprod
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: sampleapp-prod
+ app.kubernetes.io/name: sampleapp
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ spec:
+ containers:
+ - image: gcr.io/google-samples/gb-frontend:v4
+ name: main
+ resources:
+ limits:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ requests:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ dependsOn:
+ - v1:Namespace:sampleapp
+ - v1:ConfigMap:sampleapp:sampleappprod0
+ - v1:ConfigMap:sampleapp:sampleappprod1
+- id: v1:Namespace:sampleapp
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: sampleapp
+- id: v1:ConfigMap:sampleapp:sampleappprod0
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ data:
+ app.name: sampleapp
+ kind: ConfigMap
+ metadata:
+ name: sampleappprod0
+ namespace: sampleapp
+ dependsOn:
+ - v1:Namespace:sampleapp
+- id: v1:ConfigMap:sampleapp:sampleappprod1
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ data:
+ nginx.conf: configuration_content
+ kind: ConfigMap
+ metadata:
+ name: sampleappprod1
+ namespace: sampleapp
+ dependsOn:
+ - v1:Namespace:sampleapp
diff --git a/base/examples/server/app_label_selector/prod/ci-test/settings.yaml b/base/examples/server/app_label_selector/prod/ci-test/settings.yaml
index a8e3d924..7765ac24 100644
--- a/base/examples/server/app_label_selector/prod/ci-test/settings.yaml
+++ b/base/examples/server/app_label_selector/prod/ci-test/settings.yaml
@@ -1,3 +1,7 @@
kcl_options:
- key: cluster
value: demo-cluster-name
+ - key: app
+ value: sampleapp
+ - key: env
+ value: prod
diff --git a/base/examples/server/app_label_selector/prod/ci-test/stdout.golden.yaml b/base/examples/server/app_label_selector/prod/ci-test/stdout.golden.yaml
index 9f0ae357..8efef4aa 100644
--- a/base/examples/server/app_label_selector/prod/ci-test/stdout.golden.yaml
+++ b/base/examples/server/app_label_selector/prod/ci-test/stdout.golden.yaml
@@ -1,52 +1,51 @@
-id: apps/v1:Deployment:sampleapp:sampleappprod
-type: Kubernetes
-dependsOn:
-- v1:Namespace:sampleapp
-attributes:
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: sampleappprod
- namespace: sampleapp
- spec:
- replicas: 1
- selector:
- matchLabels:
- app.kubernetes.io/name: sampleapp
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: sampleapp-prod
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/version: v1.0.0
- app.kubernetes.io/component: sampleappprod
- template:
- metadata:
- annotations:
- owner: team-iac
- labels:
- app.kubernetes.io/name: sampleapp
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: sampleapp-prod
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: sampleappprod
- app.kubernetes.io/version: v1.0.0
- spec:
- containers:
- - image: gcr.io/google-samples/gb-frontend:v4
- name: main
- resources:
- limits:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
- requests:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
----
-id: v1:Namespace:sampleapp
-type: Kubernetes
-attributes:
- apiVersion: v1
- kind: Namespace
- metadata:
- name: sampleapp
+- id: apps/v1:Deployment:sampleapp:sampleappprod
+ type: Kubernetes
+ attributes:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: sampleappprod
+ namespace: sampleapp
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: sampleappprod
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: sampleapp-prod
+ app.kubernetes.io/name: sampleapp
+ app.kubernetes.io/version: v1.0.0
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ template:
+ metadata:
+ annotations:
+ owner: team-iac
+ labels:
+ app.kubernetes.io/component: sampleappprod
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: sampleapp-prod
+ app.kubernetes.io/name: sampleapp
+ app.kubernetes.io/version: v1.0.0
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ spec:
+ containers:
+ - image: gcr.io/google-samples/gb-frontend:v4
+ name: main
+ resources:
+ limits:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ requests:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ dependsOn:
+ - v1:Namespace:sampleapp
+- id: v1:Namespace:sampleapp
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: sampleapp
diff --git a/base/examples/server/app_main_container/prod/ci-test/settings.yaml b/base/examples/server/app_main_container/prod/ci-test/settings.yaml
index a8e3d924..7765ac24 100644
--- a/base/examples/server/app_main_container/prod/ci-test/settings.yaml
+++ b/base/examples/server/app_main_container/prod/ci-test/settings.yaml
@@ -1,3 +1,7 @@
kcl_options:
- key: cluster
value: demo-cluster-name
+ - key: app
+ value: sampleapp
+ - key: env
+ value: prod
diff --git a/base/examples/server/app_main_container/prod/ci-test/stdout.golden.yaml b/base/examples/server/app_main_container/prod/ci-test/stdout.golden.yaml
index 245554b5..010cc203 100644
--- a/base/examples/server/app_main_container/prod/ci-test/stdout.golden.yaml
+++ b/base/examples/server/app_main_container/prod/ci-test/stdout.golden.yaml
@@ -1,101 +1,100 @@
-id: apps/v1:Deployment:sampleapp:sampleappprod
-type: Kubernetes
-dependsOn:
-- v1:Namespace:sampleapp
-attributes:
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: sampleappprod
- namespace: sampleapp
- spec:
- replicas: 1
- selector:
- matchLabels:
- app.kubernetes.io/name: sampleapp
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: sampleapp-prod
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: sampleappprod
- template:
- metadata:
- labels:
- app.kubernetes.io/name: sampleapp
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: sampleapp-prod
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: sampleappprod
- spec:
- containers:
- - args:
- - start
- command:
- - /home/admin/server.sh
- env:
- - name: app.version
- value: v1.0.0
- envFrom:
- - configMapRef:
- name: my-configmap
- image: gcr.io/google-samples/gb-frontend:v4
- name: main
- ports:
- - containerPort: 12201
- protocol: TCP
- lifecycle:
- preStop:
- exec:
- command:
- - /bin/sh
- - -c
- - echo 1
- livenessProbe:
- failureThreshold: 3
- initialDelaySeconds: 30
- periodSeconds: 5
- successThreshold: 1
- timeoutSeconds: 10
- exec:
- command:
- - /bin/sh
- - -c
- - echo livenessProbe
- readinessProbe:
- failureThreshold: 3
- initialDelaySeconds: 30
- periodSeconds: 5
- successThreshold: 2
- timeoutSeconds: 10
- exec:
- command:
- - /bin/sh
- - -c
- - echo readinessProbe
- resources:
- limits:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
- requests:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
- startupProbe:
- failureThreshold: 3
- initialDelaySeconds: 30
- periodSeconds: 5
- successThreshold: 2
- timeoutSeconds: 10
- exec:
- command:
- - /bin/sh
- - -c
- - echo startupProbe
----
-id: v1:Namespace:sampleapp
-type: Kubernetes
-attributes:
- apiVersion: v1
- kind: Namespace
- metadata:
- name: sampleapp
+- id: apps/v1:Deployment:sampleapp:sampleappprod
+ type: Kubernetes
+ attributes:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: sampleappprod
+ namespace: sampleapp
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: sampleappprod
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: sampleapp-prod
+ app.kubernetes.io/name: sampleapp
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: sampleappprod
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: sampleapp-prod
+ app.kubernetes.io/name: sampleapp
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ spec:
+ containers:
+ - args:
+ - start
+ command:
+ - /home/admin/server.sh
+ env:
+ - name: app.version
+ value: v1.0.0
+ envFrom:
+ - configMapRef:
+ name: my-configmap
+ image: gcr.io/google-samples/gb-frontend:v4
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - echo 1
+ livenessProbe:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - echo livenessProbe
+ failureThreshold: 3
+ initialDelaySeconds: 30
+ periodSeconds: 5
+ successThreshold: 1
+ timeoutSeconds: 10
+ name: main
+ ports:
+ - containerPort: 12201
+ protocol: TCP
+ readinessProbe:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - echo readinessProbe
+ failureThreshold: 3
+ initialDelaySeconds: 30
+ periodSeconds: 5
+ successThreshold: 2
+ timeoutSeconds: 10
+ resources:
+ limits:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ requests:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ startupProbe:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - echo startupProbe
+ failureThreshold: 3
+ initialDelaySeconds: 30
+ periodSeconds: 5
+ successThreshold: 2
+ timeoutSeconds: 10
+ dependsOn:
+ - v1:Namespace:sampleapp
+- id: v1:Namespace:sampleapp
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: sampleapp
diff --git a/base/examples/server/app_need_namespace/prod/ci-test/settings.yaml b/base/examples/server/app_need_namespace/prod/ci-test/settings.yaml
index a8e3d924..7765ac24 100644
--- a/base/examples/server/app_need_namespace/prod/ci-test/settings.yaml
+++ b/base/examples/server/app_need_namespace/prod/ci-test/settings.yaml
@@ -1,3 +1,7 @@
kcl_options:
- key: cluster
value: demo-cluster-name
+ - key: app
+ value: sampleapp
+ - key: env
+ value: prod
diff --git a/base/examples/server/app_need_namespace/prod/ci-test/stdout.golden.yaml b/base/examples/server/app_need_namespace/prod/ci-test/stdout.golden.yaml
index 956cad48..90b659e0 100644
--- a/base/examples/server/app_need_namespace/prod/ci-test/stdout.golden.yaml
+++ b/base/examples/server/app_need_namespace/prod/ci-test/stdout.golden.yaml
@@ -1,48 +1,47 @@
-id: apps/v1:Deployment:sampleapp:sampleappprod
-type: Kubernetes
-dependsOn:
-- v1:Namespace:sampleapp
-attributes:
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: sampleappprod
- namespace: sampleapp
- spec:
- replicas: 1
- selector:
- matchLabels:
- app.kubernetes.io/name: sampleapp
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: sampleapp-prod
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: sampleappprod
- template:
- metadata:
- labels:
- app.kubernetes.io/name: sampleapp
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: sampleapp-prod
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: sampleappprod
- spec:
- containers:
- - image: gcr.io/google-samples/gb-frontend:v4
- name: main
- resources:
- limits:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
- requests:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
----
-id: v1:Namespace:sampleapp
-type: Kubernetes
-attributes:
- apiVersion: v1
- kind: Namespace
- metadata:
- name: sampleapp
+- id: apps/v1:Deployment:sampleapp:sampleappprod
+ type: Kubernetes
+ attributes:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: sampleappprod
+ namespace: sampleapp
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: sampleappprod
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: sampleapp-prod
+ app.kubernetes.io/name: sampleapp
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: sampleappprod
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: sampleapp-prod
+ app.kubernetes.io/name: sampleapp
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ spec:
+ containers:
+ - image: gcr.io/google-samples/gb-frontend:v4
+ name: main
+ resources:
+ limits:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ requests:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ dependsOn:
+ - v1:Namespace:sampleapp
+- id: v1:Namespace:sampleapp
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: sampleapp
diff --git a/base/examples/server/app_scheduling_strategy/prod/ci-test/settings.yaml b/base/examples/server/app_scheduling_strategy/prod/ci-test/settings.yaml
index a8e3d924..7765ac24 100644
--- a/base/examples/server/app_scheduling_strategy/prod/ci-test/settings.yaml
+++ b/base/examples/server/app_scheduling_strategy/prod/ci-test/settings.yaml
@@ -1,3 +1,7 @@
kcl_options:
- key: cluster
value: demo-cluster-name
+ - key: app
+ value: sampleapp
+ - key: env
+ value: prod
diff --git a/base/examples/server/app_scheduling_strategy/prod/ci-test/stdout.golden.yaml b/base/examples/server/app_scheduling_strategy/prod/ci-test/stdout.golden.yaml
index 346d5e8b..a33fa0c5 100644
--- a/base/examples/server/app_scheduling_strategy/prod/ci-test/stdout.golden.yaml
+++ b/base/examples/server/app_scheduling_strategy/prod/ci-test/stdout.golden.yaml
@@ -1,48 +1,47 @@
-id: apps/v1:Deployment:sampleapp:sampleappprod
-type: Kubernetes
-dependsOn:
-- v1:Namespace:sampleapp
-attributes:
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: sampleappprod
- namespace: sampleapp
- spec:
- replicas: 1
- selector:
- matchLabels:
- app.kubernetes.io/name: sampleapp
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: sampleapp-prod
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: sampleappprod
- template:
- metadata:
- labels:
- app.kubernetes.io/name: sampleapp
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: sampleapp-prod
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: sampleappprod
- spec:
- containers:
- - image: gcr.io/google-samples/gb-frontend:v4
- name: main
- resources:
- limits:
- cpu: '1'
- memory: 1Gi
- ephemeral-storage: 30Gi
- requests:
- cpu: '1'
- memory: 1Gi
- ephemeral-storage: 30Gi
----
-id: v1:Namespace:sampleapp
-type: Kubernetes
-attributes:
- apiVersion: v1
- kind: Namespace
- metadata:
- name: sampleapp
+- id: apps/v1:Deployment:sampleapp:sampleappprod
+ type: Kubernetes
+ attributes:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: sampleappprod
+ namespace: sampleapp
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: sampleappprod
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: sampleapp-prod
+ app.kubernetes.io/name: sampleapp
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: sampleappprod
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: sampleapp-prod
+ app.kubernetes.io/name: sampleapp
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ spec:
+ containers:
+ - image: gcr.io/google-samples/gb-frontend:v4
+ name: main
+ resources:
+ limits:
+ cpu: "1"
+ ephemeral-storage: 30Gi
+ memory: 1Gi
+ requests:
+ cpu: "1"
+ ephemeral-storage: 30Gi
+ memory: 1Gi
+ dependsOn:
+ - v1:Namespace:sampleapp
+- id: v1:Namespace:sampleapp
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: sampleapp
diff --git a/base/examples/server/app_secret/prod/ci-test/settings.yaml b/base/examples/server/app_secret/prod/ci-test/settings.yaml
index a8e3d924..7765ac24 100644
--- a/base/examples/server/app_secret/prod/ci-test/settings.yaml
+++ b/base/examples/server/app_secret/prod/ci-test/settings.yaml
@@ -1,3 +1,7 @@
kcl_options:
- key: cluster
value: demo-cluster-name
+ - key: app
+ value: sampleapp
+ - key: env
+ value: prod
diff --git a/base/examples/server/app_secret/prod/ci-test/stdout.golden.yaml b/base/examples/server/app_secret/prod/ci-test/stdout.golden.yaml
index eb87514a..1317f9ac 100644
--- a/base/examples/server/app_secret/prod/ci-test/stdout.golden.yaml
+++ b/base/examples/server/app_secret/prod/ci-test/stdout.golden.yaml
@@ -1,78 +1,75 @@
-id: apps/v1:Deployment:sampleapp:sampleappprod
-type: Kubernetes
-dependsOn:
-- v1:Namespace:sampleapp
-- v1:Secret:sampleapp:sampleappprod0
-- v1:Secret:sampleapp:sampleappprod1
-attributes:
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: sampleappprod
- namespace: sampleapp
- spec:
- replicas: 1
- selector:
- matchLabels:
- app.kubernetes.io/name: sampleapp
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: sampleapp-prod
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: sampleappprod
- template:
- metadata:
- labels:
- app.kubernetes.io/name: sampleapp
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: sampleapp-prod
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: sampleappprod
- spec:
- containers:
- - image: gcr.io/google-samples/gb-frontend:v4
- name: main
- resources:
- limits:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
- requests:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
----
-id: v1:Namespace:sampleapp
-type: Kubernetes
-attributes:
- apiVersion: v1
- kind: Namespace
- metadata:
- name: sampleapp
----
-id: v1:Secret:sampleapp:sampleappprod0
-type: Kubernetes
-dependsOn:
-- v1:Namespace:sampleapp
-attributes:
- apiVersion: v1
- data:
- ca.conf: ZG9tYWluSWQ9CmdhdGV3YXk9aHR0cDovL2xhYi5nYXRld2F5LmdhbGF4eS5teWJhbmsuY2
- kind: Secret
- type: Opaque
- metadata:
- name: sampleappprod0
- namespace: sampleapp
----
-id: v1:Secret:sampleapp:sampleappprod1
-type: Kubernetes
-dependsOn:
-- v1:Namespace:sampleapp
-attributes:
- apiVersion: v1
- data:
- token: cVZFZGowOGg1Zm5nbWlJS0FzMWw2OUhPUVdEd1pNeHo=
- kind: Secret
- type: Opaque
- metadata:
- name: sampleappprod1
- namespace: sampleapp
+- id: apps/v1:Deployment:sampleapp:sampleappprod
+ type: Kubernetes
+ attributes:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: sampleappprod
+ namespace: sampleapp
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: sampleappprod
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: sampleapp-prod
+ app.kubernetes.io/name: sampleapp
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: sampleappprod
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: sampleapp-prod
+ app.kubernetes.io/name: sampleapp
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ spec:
+ containers:
+ - image: gcr.io/google-samples/gb-frontend:v4
+ name: main
+ resources:
+ limits:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ requests:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ dependsOn:
+ - v1:Namespace:sampleapp
+ - v1:Secret:sampleapp:sampleappprod0
+ - v1:Secret:sampleapp:sampleappprod1
+- id: v1:Namespace:sampleapp
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: sampleapp
+- id: v1:Secret:sampleapp:sampleappprod0
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ data:
+ ca.conf: ZG9tYWluSWQ9CmdhdGV3YXk9aHR0cDovL2xhYi5nYXRld2F5LmdhbGF4eS5teWJhbmsuY2
+ kind: Secret
+ metadata:
+ name: sampleappprod0
+ namespace: sampleapp
+ type: Opaque
+ dependsOn:
+ - v1:Namespace:sampleapp
+- id: v1:Secret:sampleapp:sampleappprod1
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ data:
+ token: cVZFZGowOGg1Zm5nbWlJS0FzMWw2OUhPUVdEd1pNeHo=
+ kind: Secret
+ metadata:
+ name: sampleappprod1
+ namespace: sampleapp
+ type: Opaque
+ dependsOn:
+ - v1:Namespace:sampleapp
diff --git a/base/examples/server/app_service/prod/ci-test/settings.yaml b/base/examples/server/app_service/prod/ci-test/settings.yaml
index a8e3d924..7765ac24 100644
--- a/base/examples/server/app_service/prod/ci-test/settings.yaml
+++ b/base/examples/server/app_service/prod/ci-test/settings.yaml
@@ -1,3 +1,7 @@
kcl_options:
- key: cluster
value: demo-cluster-name
+ - key: app
+ value: sampleapp
+ - key: env
+ value: prod
diff --git a/base/examples/server/app_service/prod/ci-test/stdout.golden.yaml b/base/examples/server/app_service/prod/ci-test/stdout.golden.yaml
index 9d49d781..148eb6ca 100644
--- a/base/examples/server/app_service/prod/ci-test/stdout.golden.yaml
+++ b/base/examples/server/app_service/prod/ci-test/stdout.golden.yaml
@@ -1,70 +1,68 @@
-id: apps/v1:Deployment:sampleapp:sampleappprod
-type: Kubernetes
-dependsOn:
-- v1:Namespace:sampleapp
-- v1:Service:sampleapp:frontend
-attributes:
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: sampleappprod
- namespace: sampleapp
- spec:
- replicas: 1
- selector:
- matchLabels:
- app.kubernetes.io/name: sampleapp
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: sampleapp-prod
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: sampleappprod
- template:
- metadata:
- labels:
- app.kubernetes.io/name: sampleapp
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: sampleapp-prod
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: sampleappprod
- spec:
- containers:
- - image: gcr.io/google-samples/gb-frontend:v4
- name: main
- resources:
- limits:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
- requests:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
----
-id: v1:Namespace:sampleapp
-type: Kubernetes
-attributes:
- apiVersion: v1
- kind: Namespace
- metadata:
- name: sampleapp
----
-id: v1:Service:sampleapp:frontend
-type: Kubernetes
-dependsOn:
-- v1:Namespace:sampleapp
-attributes:
- apiVersion: v1
- kind: Service
- metadata:
- name: frontend
- namespace: sampleapp
- spec:
- ports:
- - port: 80
- selector:
- app.kubernetes.io/name: sampleapp
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: sampleapp-prod
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: sampleappprod
- type: NodePort
+- id: apps/v1:Deployment:sampleapp:sampleappprod
+ type: Kubernetes
+ attributes:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: sampleappprod
+ namespace: sampleapp
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: sampleappprod
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: sampleapp-prod
+ app.kubernetes.io/name: sampleapp
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: sampleappprod
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: sampleapp-prod
+ app.kubernetes.io/name: sampleapp
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ spec:
+ containers:
+ - image: gcr.io/google-samples/gb-frontend:v4
+ name: main
+ resources:
+ limits:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ requests:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ dependsOn:
+ - v1:Namespace:sampleapp
+ - v1:Service:sampleapp:frontend
+- id: v1:Namespace:sampleapp
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: sampleapp
+- id: v1:Service:sampleapp:frontend
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: frontend
+ namespace: sampleapp
+ spec:
+ ports:
+ - port: 80
+ selector:
+ app.kubernetes.io/component: sampleappprod
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: sampleapp-prod
+ app.kubernetes.io/name: sampleapp
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ type: NodePort
+ dependsOn:
+ - v1:Namespace:sampleapp
diff --git a/base/examples/server/app_sidecar/prod/ci-test/settings.yaml b/base/examples/server/app_sidecar/prod/ci-test/settings.yaml
index a8e3d924..7765ac24 100644
--- a/base/examples/server/app_sidecar/prod/ci-test/settings.yaml
+++ b/base/examples/server/app_sidecar/prod/ci-test/settings.yaml
@@ -1,3 +1,7 @@
kcl_options:
- key: cluster
value: demo-cluster-name
+ - key: app
+ value: sampleapp
+ - key: env
+ value: prod
diff --git a/base/examples/server/app_sidecar/prod/ci-test/stdout.golden.yaml b/base/examples/server/app_sidecar/prod/ci-test/stdout.golden.yaml
index 2789a7a5..837acbe6 100644
--- a/base/examples/server/app_sidecar/prod/ci-test/stdout.golden.yaml
+++ b/base/examples/server/app_sidecar/prod/ci-test/stdout.golden.yaml
@@ -1,69 +1,68 @@
-id: apps/v1:Deployment:sampleapp:sampleappprod
-type: Kubernetes
-dependsOn:
-- v1:Namespace:sampleapp
-attributes:
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: sampleappprod
- namespace: sampleapp
- spec:
- replicas: 1
- selector:
- matchLabels:
- app.kubernetes.io/name: sampleapp
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: sampleapp-prod
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: sampleappprod
- template:
- metadata:
- labels:
- app.kubernetes.io/name: sampleapp
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: sampleapp-prod
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: sampleappprod
- spec:
- containers:
- - image: gcr.io/google-samples/gb-frontend:v4
- name: main
- resources:
- limits:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
- requests:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
- - env:
- - name: FILES_TO_COLLECT
- value: /mnt/log/synthetic-count.log /mnt/log/synthetic-dates.log
- image: gcr.io/google_containers/fluentd-sidecar-es:1.0
- name: sidecar-log-collector
- volumeMounts:
- - mountPath: /mnt/log
- name: log-storage
- readOnly: true
- resources:
- limits:
- cpu: '1'
- memory: 2Gi
- ephemeral-storage: 20Gi
- requests:
- cpu: '1'
- memory: 2Gi
- ephemeral-storage: 20Gi
- volumes:
- - name: log-storage
- emptyDir: {}
----
-id: v1:Namespace:sampleapp
-type: Kubernetes
-attributes:
- apiVersion: v1
- kind: Namespace
- metadata:
- name: sampleapp
+- id: apps/v1:Deployment:sampleapp:sampleappprod
+ type: Kubernetes
+ attributes:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: sampleappprod
+ namespace: sampleapp
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: sampleappprod
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: sampleapp-prod
+ app.kubernetes.io/name: sampleapp
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: sampleappprod
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: sampleapp-prod
+ app.kubernetes.io/name: sampleapp
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ spec:
+ containers:
+ - image: gcr.io/google-samples/gb-frontend:v4
+ name: main
+ resources:
+ limits:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ requests:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ - env:
+ - name: FILES_TO_COLLECT
+ value: /mnt/log/synthetic-count.log /mnt/log/synthetic-dates.log
+ image: gcr.io/google_containers/fluentd-sidecar-es:1.0
+ name: sidecar-log-collector
+ resources:
+ limits:
+ cpu: "1"
+ ephemeral-storage: 20Gi
+ memory: 2Gi
+ requests:
+ cpu: "1"
+ ephemeral-storage: 20Gi
+ memory: 2Gi
+ volumeMounts:
+ - mountPath: /mnt/log
+ name: log-storage
+ readOnly: true
+ volumes:
+ - emptyDir: {}
+ name: log-storage
+ dependsOn:
+ - v1:Namespace:sampleapp
+- id: v1:Namespace:sampleapp
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: sampleapp
diff --git a/base/examples/server/app_stateful_set/prod/ci-test/settings.yaml b/base/examples/server/app_stateful_set/prod/ci-test/settings.yaml
index a8e3d924..7765ac24 100644
--- a/base/examples/server/app_stateful_set/prod/ci-test/settings.yaml
+++ b/base/examples/server/app_stateful_set/prod/ci-test/settings.yaml
@@ -1,3 +1,7 @@
kcl_options:
- key: cluster
value: demo-cluster-name
+ - key: app
+ value: sampleapp
+ - key: env
+ value: prod
diff --git a/base/examples/server/app_stateful_set/prod/ci-test/stdout.golden.yaml b/base/examples/server/app_stateful_set/prod/ci-test/stdout.golden.yaml
index 9fcf8c7d..6787f6f5 100644
--- a/base/examples/server/app_stateful_set/prod/ci-test/stdout.golden.yaml
+++ b/base/examples/server/app_stateful_set/prod/ci-test/stdout.golden.yaml
@@ -1,69 +1,67 @@
-id: apps/v1:StatefulSet:sampleapp:sampleappprod
-type: Kubernetes
-dependsOn:
-- v1:Namespace:sampleapp
-- v1:Service:sampleapp:sampleappprod
-attributes:
- apiVersion: apps/v1
- kind: StatefulSet
- metadata:
- name: sampleappprod
- namespace: sampleapp
- spec:
- replicas: 1
- serviceName: sampleappprod
- selector:
- matchLabels:
- app.kubernetes.io/name: sampleapp
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: sampleapp-prod
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: sampleappprod
- template:
- metadata:
- labels:
- app.kubernetes.io/name: sampleapp
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: sampleapp-prod
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: sampleappprod
- spec:
- containers:
- - image: gcr.io/google-samples/gb-frontend:v4
- name: main
- resources:
- limits:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
- requests:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
----
-id: v1:Service:sampleapp:sampleappprod
-type: Kubernetes
-dependsOn:
-- v1:Namespace:sampleapp
-attributes:
- apiVersion: v1
- kind: Service
- metadata:
- name: sampleappprod
- namespace: sampleapp
- spec:
- clusterIP: None
- selector:
- app.kubernetes.io/name: sampleapp
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: sampleapp-prod
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: sampleappprod
----
-id: v1:Namespace:sampleapp
-type: Kubernetes
-attributes:
- apiVersion: v1
- kind: Namespace
- metadata:
- name: sampleapp
+- id: apps/v1:StatefulSet:sampleapp:sampleappprod
+ type: Kubernetes
+ attributes:
+ apiVersion: apps/v1
+ kind: StatefulSet
+ metadata:
+ name: sampleappprod
+ namespace: sampleapp
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: sampleappprod
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: sampleapp-prod
+ app.kubernetes.io/name: sampleapp
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ serviceName: sampleappprod
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: sampleappprod
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: sampleapp-prod
+ app.kubernetes.io/name: sampleapp
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ spec:
+ containers:
+ - image: gcr.io/google-samples/gb-frontend:v4
+ name: main
+ resources:
+ limits:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ requests:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ dependsOn:
+ - v1:Namespace:sampleapp
+ - v1:Service:sampleapp:sampleappprod
+- id: v1:Service:sampleapp:sampleappprod
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ name: sampleappprod
+ namespace: sampleapp
+ spec:
+ clusterIP: None
+ selector:
+ app.kubernetes.io/component: sampleappprod
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: sampleapp-prod
+ app.kubernetes.io/name: sampleapp
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ dependsOn:
+ - v1:Namespace:sampleapp
+- id: v1:Namespace:sampleapp
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: sampleapp
diff --git a/base/examples/server/app_volume/prod/ci-test/settings.yaml b/base/examples/server/app_volume/prod/ci-test/settings.yaml
index a8e3d924..7765ac24 100644
--- a/base/examples/server/app_volume/prod/ci-test/settings.yaml
+++ b/base/examples/server/app_volume/prod/ci-test/settings.yaml
@@ -1,3 +1,7 @@
kcl_options:
- key: cluster
value: demo-cluster-name
+ - key: app
+ value: sampleapp
+ - key: env
+ value: prod
diff --git a/base/examples/server/app_volume/prod/ci-test/stdout.golden.yaml b/base/examples/server/app_volume/prod/ci-test/stdout.golden.yaml
index f1ecfa18..ffd7de0d 100644
--- a/base/examples/server/app_volume/prod/ci-test/stdout.golden.yaml
+++ b/base/examples/server/app_volume/prod/ci-test/stdout.golden.yaml
@@ -1,54 +1,53 @@
-id: apps/v1:Deployment:sampleapp:sampleappprod
-type: Kubernetes
-dependsOn:
-- v1:Namespace:sampleapp
-attributes:
- apiVersion: apps/v1
- kind: Deployment
- metadata:
- name: sampleappprod
- namespace: sampleapp
- spec:
- replicas: 1
- selector:
- matchLabels:
- app.kubernetes.io/name: sampleapp
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: sampleapp-prod
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: sampleappprod
- template:
- metadata:
- labels:
- app.kubernetes.io/name: sampleapp
- app.kubernetes.io/env: prod
- app.kubernetes.io/instance: sampleapp-prod
- cluster.x-k8s.io/cluster-name: demo-cluster-name
- app.kubernetes.io/component: sampleappprod
- spec:
- containers:
- - image: gcr.io/google-samples/gb-frontend:v4
- name: main
- volumeMounts:
- - mountPath: /home/admin/logs
- name: log-volume
- resources:
- limits:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
- requests:
- cpu: 100m
- memory: 100Mi
- ephemeral-storage: 1Gi
- volumes:
- - name: log-volume
- emptyDir: {}
----
-id: v1:Namespace:sampleapp
-type: Kubernetes
-attributes:
- apiVersion: v1
- kind: Namespace
- metadata:
- name: sampleapp
+- id: apps/v1:Deployment:sampleapp:sampleappprod
+ type: Kubernetes
+ attributes:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: sampleappprod
+ namespace: sampleapp
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/component: sampleappprod
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: sampleapp-prod
+ app.kubernetes.io/name: sampleapp
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/component: sampleappprod
+ app.kubernetes.io/env: prod
+ app.kubernetes.io/instance: sampleapp-prod
+ app.kubernetes.io/name: sampleapp
+ cluster.x-k8s.io/cluster-name: demo-cluster-name
+ spec:
+ containers:
+ - image: gcr.io/google-samples/gb-frontend:v4
+ name: main
+ resources:
+ limits:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ requests:
+ cpu: 100m
+ ephemeral-storage: 1Gi
+ memory: 100Mi
+ volumeMounts:
+ - mountPath: /home/admin/logs
+ name: log-volume
+ volumes:
+ - emptyDir: {}
+ name: log-volume
+ dependsOn:
+ - v1:Namespace:sampleapp
+- id: v1:Namespace:sampleapp
+ type: Kubernetes
+ attributes:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: sampleapp
diff --git a/base/pkg/kusion_models/kube/metadata/metadata.k b/base/pkg/kusion_models/kube/metadata/metadata.k
index f25f2fe0..742f5d74 100644
--- a/base/pkg/kusion_models/kube/metadata/metadata.k
+++ b/base/pkg/kusion_models/kube/metadata/metadata.k
@@ -1,6 +1,3 @@
-import base.pkg.kusion_models.metadata.project
-import base.pkg.kusion_models.metadata.stack
-
-__META_APP_NAME = project.__META_PROJECT_NAME
-__META_ENV_TYPE_NAME = stack.__META_STACK_NAME
+__META_APP_NAME = option("app") or "sampleapp"
+__META_ENV_TYPE_NAME = option("env") or "prod"
__META_CLUSTER_NAME = option("cluster") or Undefined
diff --git a/base/pkg/kusion_models/metadata/project.k b/base/pkg/kusion_models/metadata/project.k
deleted file mode 100644
index c4c06e74..00000000
--- a/base/pkg/kusion_models/metadata/project.k
+++ /dev/null
@@ -1,6 +0,0 @@
-import kcl_plugin.project_context as ctx
-
-# Get project information from project.yaml by kcl plugin
-_projectContext = ctx.get_project_context()
-
-__META_PROJECT_NAME = _projectContext['name']
diff --git a/base/pkg/kusion_models/metadata/stack.k b/base/pkg/kusion_models/metadata/stack.k
deleted file mode 100644
index 959ae16f..00000000
--- a/base/pkg/kusion_models/metadata/stack.k
+++ /dev/null
@@ -1,6 +0,0 @@
-import kcl_plugin.project_context as ctx
-
-# Get stack information from stack.yaml by kcl plugin
-_stackContext = ctx.get_stack_context()
-
-__META_STACK_NAME = _stackContext['name']
diff --git a/clouds/alicloud/alicloud_instance/instance/ci-test/stdout.golden.yaml b/clouds/alicloud/alicloud_instance/instance/ci-test/stdout.golden.yaml
index 9bcb2264..7743cffd 100644
--- a/clouds/alicloud/alicloud_instance/instance/ci-test/stdout.golden.yaml
+++ b/clouds/alicloud/alicloud_instance/instance/ci-test/stdout.golden.yaml
@@ -1,73 +1,69 @@
-id: aliyun:alicloud:alicloud_vpc:alicloud_vpc
-type: Terraform
-attributes:
- cidr_block: 172.16.0.0/16
- name: alicloud_vpc
-extensions:
- provider: registry.terraform.io/aliyun/alicloud/1.153.0
- resourceType: alicloud_vpc
- providerMeta:
- region: cn-beijing
----
-id: aliyun:alicloud:alicloud_vswitch:alicloud_vswitch
-type: Terraform
-attributes:
- availability_zone: cn-beijing-b
- cidr_block: 172.16.0.0/21
- name: alicloud_vswitch
- vpc_id: $kusion_path.aliyun:alicloud:alicloud_vpc:alicloud_vpc.id
-extensions:
- provider: registry.terraform.io/aliyun/alicloud/1.153.0
- resourceType: alicloud_vswitch
- providerMeta:
- region: cn-beijing
----
-id: aliyun:alicloud:alicloud_security_group:alicloud_security_group
-type: Terraform
-attributes:
- description: alicloud-security-group
- name: alicloud_security_group
- vpc_id: $kusion_path.aliyun:alicloud:alicloud_vpc:alicloud_vpc.id
-extensions:
- provider: registry.terraform.io/aliyun/alicloud/1.153.0
- resourceType: alicloud_security_group
- providerMeta:
- region: cn-beijing
----
-id: aliyun:alicloud:alicloud_instance:kusion-alicloud-instance
-type: Terraform
-attributes:
- availability_zone: cn-beijing-b
- image_id: ubuntu_18_04_64_20G_alibase_20190624.vhd
- instance_name: kusion-alicloud-instance
- instance_type: ecs.n2.small
- internet_charge_type: PayByTraffic
- password: Demokusion
- security_groups:
- - $kusion_path.aliyun:alicloud:alicloud_security_group:alicloud_security_group.id
- system_disk_category: cloud_efficiency
- vswitch_id: $kusion_path.aliyun:alicloud:alicloud_vswitch:alicloud_vswitch.id
-extensions:
- provider: registry.terraform.io/aliyun/alicloud/1.153.0
- resourceType: alicloud_instance
- providerMeta:
- region: cn-beijing
----
-id: aliyun:alicloud:alicloud_instance:kusion-alicloud-instance2
-type: Terraform
-attributes:
- availability_zone: cn-beijing-b
- image_id: ubuntu_18_04_64_20G_alibase_20190624.vhd
- instance_name: kusion-alicloud-instance2
- instance_type: ecs.n2.small
- internet_charge_type: PayByTraffic
- password: Demokusion
- security_groups:
- - $kusion_path.aliyun:alicloud:alicloud_security_group:alicloud_security_group.id
- system_disk_category: cloud_efficiency
- vswitch_id: $kusion_path.aliyun:alicloud:alicloud_vswitch:alicloud_vswitch.id
-extensions:
- provider: registry.terraform.io/aliyun/alicloud/1.153.0
- resourceType: alicloud_instance
- providerMeta:
- region: cn-beijing
+- id: aliyun:alicloud:alicloud_vpc:alicloud_vpc
+ type: Terraform
+ attributes:
+ cidr_block: 172.16.0.0/16
+ name: alicloud_vpc
+ extensions:
+ provider: registry.terraform.io/aliyun/alicloud/1.153.0
+ providerMeta:
+ region: cn-beijing
+ resourceType: alicloud_vpc
+- id: aliyun:alicloud:alicloud_vswitch:alicloud_vswitch
+ type: Terraform
+ attributes:
+ availability_zone: cn-beijing-b
+ cidr_block: 172.16.0.0/21
+ name: alicloud_vswitch
+ vpc_id: $kusion_path.aliyun:alicloud:alicloud_vpc:alicloud_vpc.id
+ extensions:
+ provider: registry.terraform.io/aliyun/alicloud/1.153.0
+ providerMeta:
+ region: cn-beijing
+ resourceType: alicloud_vswitch
+- id: aliyun:alicloud:alicloud_security_group:alicloud_security_group
+ type: Terraform
+ attributes:
+ description: alicloud-security-group
+ name: alicloud_security_group
+ vpc_id: $kusion_path.aliyun:alicloud:alicloud_vpc:alicloud_vpc.id
+ extensions:
+ provider: registry.terraform.io/aliyun/alicloud/1.153.0
+ providerMeta:
+ region: cn-beijing
+ resourceType: alicloud_security_group
+- id: aliyun:alicloud:alicloud_instance:kusion-alicloud-instance
+ type: Terraform
+ attributes:
+ availability_zone: cn-beijing-b
+ image_id: ubuntu_18_04_64_20G_alibase_20190624.vhd
+ instance_name: kusion-alicloud-instance
+ instance_type: ecs.n2.small
+ internet_charge_type: PayByTraffic
+ password: Demokusion
+ security_groups:
+ - $kusion_path.aliyun:alicloud:alicloud_security_group:alicloud_security_group.id
+ system_disk_category: cloud_efficiency
+ vswitch_id: $kusion_path.aliyun:alicloud:alicloud_vswitch:alicloud_vswitch.id
+ extensions:
+ provider: registry.terraform.io/aliyun/alicloud/1.153.0
+ providerMeta:
+ region: cn-beijing
+ resourceType: alicloud_instance
+- id: aliyun:alicloud:alicloud_instance:kusion-alicloud-instance2
+ type: Terraform
+ attributes:
+ availability_zone: cn-beijing-b
+ image_id: ubuntu_18_04_64_20G_alibase_20190624.vhd
+ instance_name: kusion-alicloud-instance2
+ instance_type: ecs.n2.small
+ internet_charge_type: PayByTraffic
+ password: Demokusion
+ security_groups:
+ - $kusion_path.aliyun:alicloud:alicloud_security_group:alicloud_security_group.id
+ system_disk_category: cloud_efficiency
+ vswitch_id: $kusion_path.aliyun:alicloud:alicloud_vswitch:alicloud_vswitch.id
+ extensions:
+ provider: registry.terraform.io/aliyun/alicloud/1.153.0
+ providerMeta:
+ region: cn-beijing
+ resourceType: alicloud_instance
diff --git a/clouds/alicloud/alicloud_oss/oss/ci-test/stdout.golden.yaml b/clouds/alicloud/alicloud_oss/oss/ci-test/stdout.golden.yaml
index 16f09743..3844dba8 100644
--- a/clouds/alicloud/alicloud_oss/oss/ci-test/stdout.golden.yaml
+++ b/clouds/alicloud/alicloud_oss/oss/ci-test/stdout.golden.yaml
@@ -1,10 +1,10 @@
-id: aliyun:alicloud:alicloud_oss_bucket:kusion-oss
-type: Terraform
-attributes:
- acl: private
- bucket: kusion-oss
-extensions:
- provider: registry.terraform.io/aliyun/alicloud/1.153.0
- resourceType: alicloud_oss_bucket
- providerMeta:
- region: cn-beijing
+- id: aliyun:alicloud:alicloud_oss_bucket:kusion-oss
+ type: Terraform
+ attributes:
+ acl: private
+ bucket: kusion-oss
+ extensions:
+ provider: registry.terraform.io/aliyun/alicloud/1.153.0
+ providerMeta:
+ region: cn-beijing
+ resourceType: alicloud_oss_bucket
diff --git a/hack/test_konfig.py b/hack/test_konfig.py
index a93fc274..c1e77534 100644
--- a/hack/test_konfig.py
+++ b/hack/test_konfig.py
@@ -10,15 +10,56 @@
import pytest
from ruamel.yaml import YAML
+from collections.abc import Mapping, Sequence
from lib.common import *
from lib import utils
+TEST_FILE = "kcl.yaml"
+ROOT = str(Path(__file__).parent.parent)
+
yaml = YAML(typ="unsafe", pure=True)
+def find_test_dirs():
+ result = []
+ root_dirs = [ROOT]
+ for root_dir in root_dirs:
+ for root, _, files in os.walk(root_dir):
+ for name in files:
+ if name == TEST_FILE:
+ result.append(root)
+ return result
+
def compare_results(result, golden_result):
- # Convert result and golden_result string to string lines with line ending stripped, then compare.
- assert list(yaml.load_all(result)) == list(yaml.load_all(golden_result))
+ """Convert result and golden_result string to string lines with line ending stripped, then compare."""
+
+ assert compare_unordered_yaml_objects(
+ list(yaml.load_all(result)), list(yaml.load_all(golden_result))
+ )
+
+
+def compare_unordered_yaml_objects(result, golden_result):
+ """Comparing the contents of two YAML objects for equality in an unordered manner."""
+ if isinstance(result, Mapping) and isinstance(golden_result, Mapping):
+ if result.keys() != golden_result.keys():
+ return False
+ for key in result.keys():
+ if not compare_unordered_yaml_objects(result[key], golden_result[key]):
+ return False
+
+ return True
+ elif isinstance(result, Sequence) and isinstance(golden_result, Sequence):
+ if len(result) != len(golden_result):
+ return False
+ for item in result:
+ if item not in golden_result:
+ return False
+ for item in golden_result:
+ if item not in result:
+ return False
+ return True
+ else:
+ return result == golden_result
print("##### K Language Grammar Test Suite #####")
@@ -60,7 +101,7 @@ def test_konfigs(test_dir):
if process.returncode == 0 and len(stderr) == 0:
try:
golden = open(golden_file, "r")
- test = open(test_yaml)
+ test = open(test_yaml, "r")
compare_results(test, golden)
os.remove(test_yaml)
except FileNotFoundError: