diff --git a/bootstrap/kubeadm/config/manager/manager_image_patch.yaml b/bootstrap/kubeadm/config/manager/manager_image_patch.yaml index b30b919bdbfd..7e792790c1be 100644 --- a/bootstrap/kubeadm/config/manager/manager_image_patch.yaml +++ b/bootstrap/kubeadm/config/manager/manager_image_patch.yaml @@ -7,5 +7,5 @@ spec: template: spec: containers: - - image: gcr.io/k8s-staging-cluster-api/kubeadm-bootstrap-controller:master + - image: gcr.io/k8s-staging-cluster-api/kubeadm-bootstrap-controller-amd64:dev name: manager diff --git a/config/manager/manager_image_patch.yaml b/config/manager/manager_image_patch.yaml index 472f75963741..acaecdad5257 100644 --- a/config/manager/manager_image_patch.yaml +++ b/config/manager/manager_image_patch.yaml @@ -7,5 +7,5 @@ spec: template: spec: containers: - - image: gcr.io/k8s-staging-cluster-api/cluster-api-controller:master + - image: gcr.io/k8s-staging-cluster-api/cluster-api-controller-amd64:dev name: manager diff --git a/controlplane/kubeadm/config/manager/manager_image_patch.yaml b/controlplane/kubeadm/config/manager/manager_image_patch.yaml index 46ae15ec140c..ee85cda73fa8 100644 --- a/controlplane/kubeadm/config/manager/manager_image_patch.yaml +++ b/controlplane/kubeadm/config/manager/manager_image_patch.yaml @@ -7,5 +7,5 @@ spec: template: spec: containers: - - image: gcr.io/k8s-staging-cluster-api/kubeadm-control-plane-controller:master + - image: gcr.io/k8s-staging-cluster-api/kubeadm-control-plane-controller-amd64:dev name: manager diff --git a/test/e2e/common.go b/test/e2e/common.go index 7104b2a8c075..baad2876cca9 100644 --- a/test/e2e/common.go +++ b/test/e2e/common.go @@ -20,15 +20,28 @@ import ( "context" "fmt" "path/filepath" + "strings" . "github.com/onsi/ginkgo" + "github.com/blang/semver" + "github.com/onsi/gomega/types" corev1 "k8s.io/api/core/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/util" ) +// Test suite constants for e2e config variables +const ( + KubernetesVersion = "KUBERNETES_VERSION" + CNIPath = "CNI" + KubernetesVersionUpgradeFrom = "KUBERNETES_VERSION_UPGRADE_FROM" + KubernetesVersionUpgradeTo = "KUBERNETES_VERSION_UPGRADE_TO" + EtcdVersionUpgradeTo = "ETCD_VERSION_UPGRADE_TO" + CoreDNSVersionUpgradeTo = "COREDNS_VERSION_UPGRADE_TO" +) + func Byf(format string, a ...interface{}) { By(fmt.Sprintf(format, a...)) } @@ -72,3 +85,25 @@ func dumpSpecResourcesAndCleanup(ctx context.Context, specName string, clusterPr } cancelWatches() } + +// HaveValidVersion succeeds if version is a valid semver version +func HaveValidVersion(version string) types.GomegaMatcher { + return &validVersionMatcher{version: version} +} + +type validVersionMatcher struct{ version string } + +func (m *validVersionMatcher) Match(actual interface{}) (success bool, err error) { + if _, err := semver.Parse(strings.TrimPrefix(strings.TrimSpace(m.version), "v")); err != nil { + return false, err + } + return true, nil +} + +func (m *validVersionMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected\n%s\n%s", m.version, " to be a valid version ") +} + +func (m *validVersionMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected\n%s\n%s", m.version, " not to be a valid version ") +} diff --git a/test/e2e/config/docker-ci.yaml b/test/e2e/config/docker-ci.yaml index cd5528189af1..6b2ce86ee039 100644 --- a/test/e2e/config/docker-ci.yaml +++ b/test/e2e/config/docker-ci.yaml @@ -62,10 +62,10 @@ providers: variables: KUBERNETES_VERSION: "v1.18.2" - ETCD_VERSION_CURRENT: "3.4.3-0" - COREDNS_VERSION_CURRENT: "1.6.7" + ETCD_VERSION_UPGRADE_TO: "3.4.3-0" + COREDNS_VERSION_UPGRADE_TO: "1.6.7" + KUBERNETES_VERSION_UPGRADE_TO: "v1.18.2" KUBERNETES_VERSION_UPGRADE_FROM: "v1.17.2" - KUBERNETES_VERSION_MINIMUM_CAPI_SUPPORTED: "v1.16.4" DOCKER_SERVICE_DOMAIN: "cluster.local" DOCKER_SERVICE_CIDRS: "10.128.0.0/12" # IMPORTANT! This values should match the one used by the CNI provider @@ -79,4 +79,4 @@ intervals: default/wait-worker-nodes: ["5m", "10s"] default/wait-delete-cluster: ["3m", "10s"] default/wait-machine-upgrade: ["20m", "10s"] - + default/wait-machine-remediation: ["3m", "10s"] diff --git a/test/e2e/config/docker-dev.yaml b/test/e2e/config/docker-dev.yaml index 9c5849873d5e..56159d633588 100644 --- a/test/e2e/config/docker-dev.yaml +++ b/test/e2e/config/docker-dev.yaml @@ -92,10 +92,10 @@ providers: variables: KUBERNETES_VERSION: "v1.18.2" - ETCD_VERSION_CURRENT: "3.4.3-0" - COREDNS_VERSION_CURRENT: "1.6.7" + ETCD_VERSION_UPGRADE_TO: "3.4.3-0" + COREDNS_VERSION_UPGRADE_TO: "1.6.7" + KUBERNETES_VERSION_UPGRADE_TO: "v1.18.2" KUBERNETES_VERSION_UPGRADE_FROM: "v1.17.2" - KUBERNETES_VERSION_MINIMUM_CAPI_SUPPORTED: "v1.16.4" DOCKER_SERVICE_DOMAIN: "cluster.local" DOCKER_SERVICE_CIDRS: "10.128.0.0/12" # IMPORTANT! This values should match the one used by the CNI provider @@ -110,3 +110,4 @@ intervals: default/wait-worker-nodes: ["3m", "10s"] default/wait-delete-cluster: ["3m", "10s"] default/wait-machine-upgrade: ["15m", "10s"] + default/wait-machine-remediation: ["3m", "10s"] \ No newline at end of file diff --git a/test/e2e/data/infrastructure-aws/cluster-template.yaml b/test/e2e/data/infrastructure-aws/cluster-template.yaml deleted file mode 100644 index 0bafd4746ebc..000000000000 --- a/test/e2e/data/infrastructure-aws/cluster-template.yaml +++ /dev/null @@ -1,113 +0,0 @@ ---- -apiVersion: cluster.x-k8s.io/v1alpha3 -kind: Cluster -metadata: - name: "${CLUSTER_NAME}" -spec: - clusterNetwork: - pods: - cidrBlocks: ["192.168.0.0/16"] - infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 - kind: AWSCluster - name: "${CLUSTER_NAME}" - controlPlaneRef: - kind: KubeadmControlPlane - apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 - name: "${CLUSTER_NAME}-control-plane" ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 -kind: AWSCluster -metadata: - name: "${CLUSTER_NAME}" -spec: - region: "${AWS_REGION}" - sshKeyName: "${AWS_SSH_KEY_NAME}" ---- -kind: KubeadmControlPlane -apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 -metadata: - name: "${CLUSTER_NAME}-control-plane" -spec: - replicas: ${CONTROL_PLANE_MACHINE_COUNT} - infrastructureTemplate: - kind: AWSMachineTemplate - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 - name: "${CLUSTER_NAME}-control-plane" - kubeadmConfigSpec: - initConfiguration: - nodeRegistration: - name: '{{ ds.meta_data.local_hostname }}' - kubeletExtraArgs: - cloud-provider: aws - clusterConfiguration: - apiServer: - extraArgs: - cloud-provider: aws - controllerManager: - extraArgs: - cloud-provider: aws - joinConfiguration: - nodeRegistration: - name: '{{ ds.meta_data.local_hostname }}' - kubeletExtraArgs: - cloud-provider: aws - version: "${KUBERNETES_VERSION}" ---- -kind: AWSMachineTemplate -apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 -metadata: - name: "${CLUSTER_NAME}-control-plane" -spec: - template: - spec: - instanceType: "${AWS_CONTROL_PLANE_MACHINE_TYPE}" - iamInstanceProfile: "control-plane.cluster-api-provider-aws.sigs.k8s.io" - sshKeyName: "${AWS_SSH_KEY_NAME}" ---- -apiVersion: cluster.x-k8s.io/v1alpha3 -kind: MachineDeployment -metadata: - name: "${CLUSTER_NAME}-md-0" -spec: - clusterName: "${CLUSTER_NAME}" - replicas: ${WORKER_MACHINE_COUNT} - selector: - matchLabels: - template: - spec: - clusterName: "${CLUSTER_NAME}" - version: "${KUBERNETES_VERSION}" - bootstrap: - configRef: - name: "${CLUSTER_NAME}-md-0" - apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 - kind: KubeadmConfigTemplate - infrastructureRef: - name: "${CLUSTER_NAME}-md-0" - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 - kind: AWSMachineTemplate ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 -kind: AWSMachineTemplate -metadata: - name: "${CLUSTER_NAME}-md-0" -spec: - template: - spec: - instanceType: "${AWS_NODE_MACHINE_TYPE}" - iamInstanceProfile: "nodes.cluster-api-provider-aws.sigs.k8s.io" - sshKeyName: "${AWS_SSH_KEY_NAME}" ---- -apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 -kind: KubeadmConfigTemplate -metadata: - name: "${CLUSTER_NAME}-md-0" -spec: - template: - spec: - joinConfiguration: - nodeRegistration: - name: '{{ ds.meta_data.local_hostname }}' - kubeletExtraArgs: - cloud-provider: aws diff --git a/test/e2e/data/infrastructure-docker/cluster-template-ci.yaml b/test/e2e/data/infrastructure-docker/cluster-template-ci.yaml index fbc0f5a2acea..03fd6f3390f8 100644 --- a/test/e2e/data/infrastructure-docker/cluster-template-ci.yaml +++ b/test/e2e/data/infrastructure-docker/cluster-template-ci.yaml @@ -105,3 +105,19 @@ spec: name: "${ CLUSTER_NAME }-md-0" apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 kind: DockerMachineTemplate +--- + +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: MachineHealthCheck +metadata: + name: "${CLUSTER_NAME}-mhc-0" +spec: + clusterName: "${ CLUSTER_NAME }" + maxUnhealthy: 40% + selector: + matchLabels: + mhc: unhealthy + unhealthyConditions: + - type: E2ENodeUnhealthy + status: "True" + timeout: 30s \ No newline at end of file diff --git a/test/e2e/data/infrastructure-docker/cluster-template.yaml b/test/e2e/data/infrastructure-docker/cluster-template.yaml index fbc0f5a2acea..03fd6f3390f8 100644 --- a/test/e2e/data/infrastructure-docker/cluster-template.yaml +++ b/test/e2e/data/infrastructure-docker/cluster-template.yaml @@ -105,3 +105,19 @@ spec: name: "${ CLUSTER_NAME }-md-0" apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3 kind: DockerMachineTemplate +--- + +apiVersion: cluster.x-k8s.io/v1alpha3 +kind: MachineHealthCheck +metadata: + name: "${CLUSTER_NAME}-mhc-0" +spec: + clusterName: "${ CLUSTER_NAME }" + maxUnhealthy: 40% + selector: + matchLabels: + mhc: unhealthy + unhealthyConditions: + - type: E2ENodeUnhealthy + status: "True" + timeout: 30s \ No newline at end of file diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index 19ac7c6744b7..6ec76fc11a73 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -26,6 +26,7 @@ import ( "path/filepath" "strings" "testing" + "time" . "github.com/onsi/ginkgo" "github.com/onsi/ginkgo/config" @@ -78,11 +79,14 @@ func init() { } func TestE2E(t *testing.T) { + configPath = "/Users/ssavas/dev/qa_capi/tilttest/cluster-api/test/e2e/config/docker-dev.yaml" + artifactFolder = "/Users/ssavas/dev/qa_capi/tilttest/cluster-api/test/e2e/artifacts/" // If running in prow, make sure to use the artifacts folder that will be reported in test grid (ignoring the value provided by flag). if prowArtifactFolder, exists := os.LookupEnv("ARTIFACTS"); exists { artifactFolder = prowArtifactFolder } - + SetDefaultEventuallyTimeout(2 * time.Minute) + SetDefaultEventuallyPollingInterval(10 * time.Second) RegisterFailHandler(Fail) junitPath := filepath.Join(artifactFolder, fmt.Sprintf("junit.e2e_suite.%d.xml", config.GinkgoConfig.ParallelNode)) junitReporter := reporters.NewJUnitReporter(junitPath) diff --git a/test/e2e/kcp_upgrade.go b/test/e2e/kcp_upgrade.go index a82b4d6817af..4e4880e74f6c 100644 --- a/test/e2e/kcp_upgrade.go +++ b/test/e2e/kcp_upgrade.go @@ -34,12 +34,6 @@ import ( "sigs.k8s.io/cluster-api/util" ) -const ( - PreviousKubernetesVersion = "KUBERNETES_VERSION_UPGRADE_FROM" - EtcdCurrentVersion = "ETCD_VERSION_CURRENT" - CoreDNSCurrentVersion = "COREDNS_VERSION_CURRENT" -) - // KCPUpgradeSpecInput is the input for KCPUpgradeSpec. type KCPUpgradeSpecInput struct { E2EConfig *clusterctl.E2EConfig @@ -67,9 +61,11 @@ func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput) Expect(input.ClusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling %s spec", specName) Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName) Expect(os.MkdirAll(input.ArtifactFolder, 0755)).To(Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName) - Expect(input.E2EConfig.Variables).To(HaveKey(PreviousKubernetesVersion)) - Expect(input.E2EConfig.Variables).To(HaveKey(EtcdCurrentVersion)) - Expect(input.E2EConfig.Variables).To(HaveKey(CoreDNSCurrentVersion)) + Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersionUpgradeTo)) + Expect(input.E2EConfig.Variables).To(HaveKey(CNIPath)) + Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersionUpgradeFrom)) + Expect(input.E2EConfig.Variables).To(HaveKey(EtcdVersionUpgradeTo)) + Expect(input.E2EConfig.Variables).To(HaveKey(CoreDNSVersionUpgradeTo)) // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder) @@ -78,9 +74,6 @@ func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput) It("Should successfully upgrade Kubernetes, DNS, kube-proxy, and etcd in a single control plane cluster", func() { By("Creating a workload cluster") - Expect(input.E2EConfig.Variables).To(HaveKey(clusterctl.KubernetesVersion)) - Expect(input.E2EConfig.Variables).To(HaveKey(clusterctl.CNIPath)) - cluster, controlPlane, _ = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, ConfigCluster: clusterctl.ConfigClusterInput{ @@ -91,11 +84,11 @@ func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput) Flavor: clusterctl.DefaultFlavor, Namespace: namespace.Name, ClusterName: fmt.Sprintf("cluster-%s", util.RandomString(6)), - KubernetesVersion: input.GetPreviousKubernetesVersion(), + KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeFrom), ControlPlaneMachineCount: pointer.Int64Ptr(1), WorkerMachineCount: pointer.Int64Ptr(1), }, - CNIManifestPath: input.E2EConfig.GetCNIPath(), + CNIManifestPath: input.E2EConfig.GetVariable(CNIPath), WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"), WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), @@ -103,13 +96,12 @@ func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput) By("Upgrading Kubernetes, DNS, kube-proxy, and etcd versions") framework.UpgradeControlPlaneAndWaitForUpgrade(ctx, framework.UpgradeControlPlaneAndWaitForUpgradeInput{ - ClusterProxy: input.BootstrapClusterProxy, - Cluster: cluster, - ControlPlane: controlPlane, - //Valid image tags for v1.17.2 - EtcdImageTag: "3.4.3-0", - DNSImageTag: "1.6.7", - KubernetesUpgradeVersion: input.E2EConfig.GetKubernetesVersion(), + ClusterProxy: input.BootstrapClusterProxy, + Cluster: cluster, + ControlPlane: controlPlane, + EtcdImageTag: input.E2EConfig.GetVariable(EtcdVersionUpgradeTo), + DNSImageTag: input.E2EConfig.GetVariable(CoreDNSVersionUpgradeTo), + KubernetesUpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo), WaitForMachinesToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"), WaitForDNSUpgrade: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"), WaitForEtcdUpgrade: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"), @@ -121,8 +113,6 @@ func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput) It("Should successfully upgrade Kubernetes, DNS, kube-proxy, and etcd in a HA cluster", func() { By("Creating a workload cluster") - Expect(input.E2EConfig.Variables).To(HaveKey(clusterctl.KubernetesVersion)) - Expect(input.E2EConfig.Variables).To(HaveKey(clusterctl.CNIPath)) cluster, controlPlane, _ = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, @@ -134,24 +124,24 @@ func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput) Flavor: clusterctl.DefaultFlavor, Namespace: namespace.Name, ClusterName: fmt.Sprintf("cluster-%s", util.RandomString(6)), - KubernetesVersion: input.GetPreviousKubernetesVersion(), + KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeFrom), ControlPlaneMachineCount: pointer.Int64Ptr(3), WorkerMachineCount: pointer.Int64Ptr(1), }, - CNIManifestPath: input.E2EConfig.GetCNIPath(), + CNIManifestPath: input.E2EConfig.GetVariable(CNIPath), WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"), WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), }) - By("Upgrading Kubernetes") + By("Upgrading Kubernetes, DNS, kube-proxy, and etcd versions") framework.UpgradeControlPlaneAndWaitForUpgrade(ctx, framework.UpgradeControlPlaneAndWaitForUpgradeInput{ ClusterProxy: input.BootstrapClusterProxy, Cluster: cluster, ControlPlane: controlPlane, - EtcdImageTag: input.GetEtcdCurrentVersion(), - DNSImageTag: input.GetCoreDNSCurrentVersion(), - KubernetesUpgradeVersion: input.E2EConfig.GetKubernetesVersion(), + EtcdImageTag: input.E2EConfig.GetVariable(EtcdVersionUpgradeTo), + DNSImageTag: input.E2EConfig.GetVariable(CoreDNSVersionUpgradeTo), + KubernetesUpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo), WaitForMachinesToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"), WaitForDNSUpgrade: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"), WaitForEtcdUpgrade: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"), @@ -165,18 +155,3 @@ func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput) dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) }) } - -// GetPreviousKubernetesVersion returns the previous kubernetes version to test an upgrade from -func (k KCPUpgradeSpecInput) GetPreviousKubernetesVersion() string { - return k.E2EConfig.Variables[PreviousKubernetesVersion] -} - -// GetEtcdCurrentVersion returns the version of etcd to upgrade to -func (k KCPUpgradeSpecInput) GetEtcdCurrentVersion() string { - return k.E2EConfig.Variables[EtcdCurrentVersion] -} - -// GetCoreDNSUpgradeVersion returns the version of etcd to upgrade to -func (k KCPUpgradeSpecInput) GetCoreDNSCurrentVersion() string { - return k.E2EConfig.Variables[CoreDNSCurrentVersion] -} diff --git a/test/e2e/md_upgrades.go b/test/e2e/md_upgrades.go new file mode 100644 index 000000000000..2f772c4aad4c --- /dev/null +++ b/test/e2e/md_upgrades.go @@ -0,0 +1,120 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "os" + "path/filepath" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/pointer" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" + "sigs.k8s.io/cluster-api/util" +) + +// MachineDeploymentUpgradesSpecInput is the input for MachineDeploymentUpgradesSpec. +type MachineDeploymentUpgradesSpecInput struct { + E2EConfig *clusterctl.E2EConfig + ClusterctlConfigPath string + BootstrapClusterProxy framework.ClusterProxy + ArtifactFolder string + SkipCleanup bool +} + +// MachineDeploymentUpgradesSpec implements a test that verifies that MachineDeployment upgrades are successful. +func MachineDeploymentUpgradesSpec(ctx context.Context, inputGetter func() MachineDeploymentUpgradesSpecInput) { + var ( + specName = "md-upgrades" + input MachineDeploymentUpgradesSpecInput + namespace *corev1.Namespace + cancelWatches context.CancelFunc + cluster *clusterv1.Cluster + ) + + BeforeEach(func() { + Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName) + input = inputGetter() + Expect(input.E2EConfig).ToNot(BeNil(), "Invalid argument. input.E2EConfig can't be nil when calling %s spec", specName) + Expect(input.ClusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling %s spec", specName) + Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName) + Expect(os.MkdirAll(input.ArtifactFolder, 0755)).To(Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName) + Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersion)) + Expect(input.E2EConfig.Variables).To(HaveValidVersion(input.E2EConfig.GetVariable(KubernetesVersion))) + Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersionUpgradeFrom)) + Expect(input.E2EConfig.Variables).To(HaveValidVersion(input.E2EConfig.GetVariable(KubernetesVersionUpgradeFrom))) + Expect(input.E2EConfig.Variables).To(HaveKey(CNIPath)) + + // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. + namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder) + }) + + It("Should successfully upgrade Machines upon changes in relevant MachineDeployment fields", func() { + + By("Creating a workload cluster") + + var mds []*clusterv1.MachineDeployment + cluster, _, mds = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + ConfigCluster: clusterctl.ConfigClusterInput{ + LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), + ClusterctlConfigPath: input.ClusterctlConfigPath, + KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), + InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + Flavor: clusterctl.DefaultFlavor, + Namespace: namespace.Name, + ClusterName: fmt.Sprintf("cluster-%s", util.RandomString(6)), + KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeFrom), + ControlPlaneMachineCount: pointer.Int64Ptr(1), + WorkerMachineCount: pointer.Int64Ptr(1), + }, + CNIManifestPath: input.E2EConfig.GetVariable(CNIPath), + WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), + WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"), + WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), + }) + + By("Upgrading MachineDeployment's Kubernetes version to a valid version") + framework.UpgradeMachineDeploymentsAndWait(context.TODO(), framework.UpgradeMachineDeploymentsAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + Cluster: cluster, + UpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersion), + WaitForMachinesToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"), + MachineDeployments: mds, + }) + + By("Upgrading MachineDeployment Infrastructure ref and wait for rolling upgrade") + framework.UpgradeMachineDeploymentInfrastructureRefAndWait(context.TODO(), framework.UpgradeMachineDeploymentInfrastructureRefAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + Cluster: cluster, + WaitForMachinesToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"), + MachineDeployments: mds, + }) + By("PASSED!") + }) + + AfterEach(func() { + // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. + dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + }) +} diff --git a/test/e2e/md_upgrades_test.go b/test/e2e/md_upgrades_test.go new file mode 100644 index 000000000000..cf6409789e38 --- /dev/null +++ b/test/e2e/md_upgrades_test.go @@ -0,0 +1,40 @@ +// +build e2e + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +// +//import ( +// "context" +// +// . "github.com/onsi/ginkgo" +//) +// +//var _ = Describe("When testing MachineDeployment upgrades", func() { +// +// MachineDeploymentUpgradesSpec(context.TODO(), func() MachineDeploymentUpgradesSpecInput { +// return MachineDeploymentUpgradesSpecInput{ +// E2EConfig: e2eConfig, +// ClusterctlConfigPath: clusterctlConfigPath, +// BootstrapClusterProxy: bootstrapClusterProxy, +// ArtifactFolder: artifactFolder, +// SkipCleanup: skipCleanup, +// } +// }) +// +//}) diff --git a/test/e2e/mhc_remediations.go b/test/e2e/mhc_remediations.go new file mode 100644 index 000000000000..a0511649780d --- /dev/null +++ b/test/e2e/mhc_remediations.go @@ -0,0 +1,109 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "os" + "path/filepath" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/pointer" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" + "sigs.k8s.io/cluster-api/util" +) + +// MachineRemediationSpecInput is the input for MachineRemediationSpec. +type MachineRemediationSpecInput struct { + E2EConfig *clusterctl.E2EConfig + ClusterctlConfigPath string + BootstrapClusterProxy framework.ClusterProxy + ArtifactFolder string + SkipCleanup bool +} + +// MachineRemediationSpec implements a test that verifies that Machines are remediated by MHC during unhealthy conditions. +func MachineRemediationSpec(ctx context.Context, inputGetter func() MachineRemediationSpecInput) { + var ( + specName = "mhc-remediation" + input MachineRemediationSpecInput + namespace *corev1.Namespace + cancelWatches context.CancelFunc + cluster *clusterv1.Cluster + ) + + BeforeEach(func() { + Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName) + input = inputGetter() + Expect(input.E2EConfig).ToNot(BeNil(), "Invalid argument. input.E2EConfig can't be nil when calling %s spec", specName) + Expect(input.ClusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling %s spec", specName) + Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName) + Expect(os.MkdirAll(input.ArtifactFolder, 0755)).To(Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName) + Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersion)) + Expect(input.E2EConfig.Variables).To(HaveKey(CNIPath)) + + // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. + namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder) + }) + + It("Should successfully remediate unhealthy machines with MachineHealthCheck", func() { + + By("Creating a workload cluster") + + var mds []*clusterv1.MachineDeployment + cluster, _, mds = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + ConfigCluster: clusterctl.ConfigClusterInput{ + LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), + ClusterctlConfigPath: input.ClusterctlConfigPath, + KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), + InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + Flavor: clusterctl.DefaultFlavor, + Namespace: namespace.Name, + ClusterName: fmt.Sprintf("cluster-%s", util.RandomString(6)), + KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersion), + ControlPlaneMachineCount: pointer.Int64Ptr(1), + WorkerMachineCount: pointer.Int64Ptr(1), + }, + CNIManifestPath: input.E2EConfig.GetVariable(CNIPath), + WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), + WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"), + WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), + }) + + By("Creating a MachineHealthCheck and wait for remediation") + framework.DiscoverMachineHealthChecksAndWait(context.TODO(), framework.DiscoverMachineHealthCheckAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + Cluster: cluster, + MachineDeployments: mds, + WaitForMachineRemediation: input.E2EConfig.GetIntervals(specName, "wait-machine-remediation"), + }) + + By("PASSED!") + }) + + AfterEach(func() { + // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. + dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + }) +} diff --git a/test/e2e/mhc_remediations_test.go b/test/e2e/mhc_remediations_test.go new file mode 100644 index 000000000000..2c7d534210d5 --- /dev/null +++ b/test/e2e/mhc_remediations_test.go @@ -0,0 +1,39 @@ +// +build e2e + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + + . "github.com/onsi/ginkgo" +) + +var _ = Describe("When testing unhealthy machines remediation", func() { + + MachineRemediationSpec(context.TODO(), func() MachineRemediationSpecInput { + return MachineRemediationSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + } + }) + +}) diff --git a/test/e2e/quick_start.go b/test/e2e/quick_start.go index 75cc48828e81..a0b2959b0865 100644 --- a/test/e2e/quick_start.go +++ b/test/e2e/quick_start.go @@ -62,6 +62,9 @@ func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput) Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName) Expect(os.MkdirAll(input.ArtifactFolder, 0755)).To(Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName) + Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersion)) + Expect(input.E2EConfig.Variables).To(HaveKey(CNIPath)) + // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder) }) @@ -69,8 +72,6 @@ func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput) It("Should create a workload cluster", func() { By("Creating a workload cluster") - Expect(input.E2EConfig.Variables).To(HaveKey(clusterctl.KubernetesVersion)) - Expect(input.E2EConfig.Variables).To(HaveKey(clusterctl.CNIPath)) cluster, _, _ = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, @@ -82,11 +83,11 @@ func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput) Flavor: clusterctl.DefaultFlavor, Namespace: namespace.Name, ClusterName: fmt.Sprintf("cluster-%s", util.RandomString(6)), - KubernetesVersion: input.E2EConfig.GetKubernetesVersion(), + KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersion), ControlPlaneMachineCount: pointer.Int64Ptr(1), WorkerMachineCount: pointer.Int64Ptr(1), }, - CNIManifestPath: input.E2EConfig.GetCNIPath(), + CNIManifestPath: input.E2EConfig.GetVariable(CNIPath), WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"), WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), diff --git a/test/e2e/self_hosted.go b/test/e2e/self_hosted.go index 450a8975457f..8070de1210ec 100644 --- a/test/e2e/self_hosted.go +++ b/test/e2e/self_hosted.go @@ -34,8 +34,6 @@ import ( "sigs.k8s.io/cluster-api/util" ) -const MinimumCAPISupportedKubernetesVersion = "KUBERNETES_VERSION_MINIMUM_CAPI_SUPPORTED" - // SelfHostedSpecInput is the input for SelfHostedSpec. type SelfHostedSpecInput struct { E2EConfig *clusterctl.E2EConfig @@ -67,17 +65,16 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput) Expect(input.ClusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling %s spec", specName) Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName) Expect(os.MkdirAll(input.ArtifactFolder, 0755)).To(Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName) - Expect(input.E2EConfig.Variables).To(HaveKey(MinimumCAPISupportedKubernetesVersion)) + Expect(input.E2EConfig.Variables).To(HaveKey(CNIPath)) + Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersion)) // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. namespace, cancelWatches = setupSpecNamespace(context.TODO(), specName, input.BootstrapClusterProxy, input.ArtifactFolder) }) - It("Should create a workload cluster", func() { + It("Should pivot the bootstrap cluster to a self-hosted cluster", func() { By("Creating a workload cluster") - Expect(input.E2EConfig.Variables).To(HaveKey(clusterctl.KubernetesVersion)) - Expect(input.E2EConfig.Variables).To(HaveKey(clusterctl.CNIPath)) cluster, _, _ = clusterctl.ApplyClusterTemplateAndWait(context.TODO(), clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, @@ -89,11 +86,11 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput) Flavor: clusterctl.DefaultFlavor, Namespace: namespace.Name, ClusterName: fmt.Sprintf("cluster-%s", util.RandomString(6)), - KubernetesVersion: input.GetMinimumCAPISupportedKubernetesVersion(), + KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersion), ControlPlaneMachineCount: pointer.Int64Ptr(1), WorkerMachineCount: pointer.Int64Ptr(1), }, - CNIManifestPath: input.E2EConfig.GetCNIPath(), + CNIManifestPath: input.E2EConfig.GetVariable(CNIPath), WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"), WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), @@ -193,7 +190,3 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput) dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) }) } - -func (k SelfHostedSpecInput) GetMinimumCAPISupportedKubernetesVersion() string { - return k.E2EConfig.Variables[MinimumCAPISupportedKubernetesVersion] -} diff --git a/test/framework/alltypes_helpers.go b/test/framework/alltypes_helpers.go index 87c4d0ee3df8..bb8a79d17ed1 100644 --- a/test/framework/alltypes_helpers.go +++ b/test/framework/alltypes_helpers.go @@ -24,6 +24,8 @@ import ( "path" "path/filepath" + "k8s.io/apimachinery/pkg/labels" + . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -172,3 +174,20 @@ func CreateRelatedResources(ctx context.Context, input CreateRelatedResourcesInp }, intervals...).Should(Succeed()) } } + +// hasMatchingLabels verifies that the Label Selector matches the given Labels +func hasMatchingLabels(matchSelector metav1.LabelSelector, matchLabels map[string]string) bool { + // This should never fail, validating webhook should catch this first + selector, err := metav1.LabelSelectorAsSelector(&matchSelector) + if err != nil { + return false + } + // If selector is nil or empty , it should match all. + if selector.Empty() { + return true + } + if !selector.Matches(labels.Set(matchLabels)) { + return false + } + return true +} diff --git a/test/framework/clusterctl/client.go b/test/framework/clusterctl/client.go index 9b4eb216f726..ce6ce60b050b 100644 --- a/test/framework/clusterctl/client.go +++ b/test/framework/clusterctl/client.go @@ -19,6 +19,7 @@ package clusterctl import ( "context" "fmt" + "os" "strings" . "github.com/onsi/ginkgo" @@ -146,6 +147,12 @@ type MoveInput struct { // Move moves workload clusters. func Move(ctx context.Context, input MoveInput) { + Expect(ctx).NotTo(BeNil(), "ctx is required for Move") + Expect(input.ClusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling Move") + Expect(input.FromKubeconfigPath).To(BeAnExistingFile(), "Invalid argument. input.FromKubeconfigPath must be an existing file when calling Move") + Expect(input.ToKubeconfigPath).To(BeAnExistingFile(), "Invalid argument. input.ToKubeconfigPath must be an existing file when calling Move") + Expect(os.MkdirAll(input.LogFolder, 0755)).To(Succeed(), "Invalid argument. input.LogFolder can't be created for Move") + By("Moving workload clusters") clusterctlClient, log := getClusterctlClientWithLogger(input.ClusterctlConfigPath, "clusterctl-move.log", input.LogFolder) diff --git a/test/framework/clusterctl/e2e_config.go b/test/framework/clusterctl/e2e_config.go index dd93b919f131..7934ce939720 100644 --- a/test/framework/clusterctl/e2e_config.go +++ b/test/framework/clusterctl/e2e_config.go @@ -40,12 +40,6 @@ import ( // Provides access to the configuration for an e2e test. -// Define constants for e2e config variables -const ( - KubernetesVersion = "KUBERNETES_VERSION" - CNIPath = "CNI" -) - // LoadE2EConfigInput is the input for LoadE2EConfig. type LoadE2EConfigInput struct { // ConfigPath for the e2e test. @@ -192,8 +186,6 @@ func errEmptyArg(argName string) error { // - There should be one InfraProvider (pick your own). // - Image should have name and loadBehavior be one of [mustload, tryload]. // - Intervals should be valid ginkgo intervals. -// - KubernetesVersion is not nil and valid. -// - CNIPath is not nil. func (c *E2EConfig) Validate() error { // ManagementClusterName should not be empty. if c.ManagementClusterName == "" { @@ -232,20 +224,6 @@ func (c *E2EConfig) Validate() error { } } } - - // If KubernetesVersion is nil or not valid, return error. - k8sVersion := c.GetKubernetesVersion() - if k8sVersion == "" { - return errEmptyArg(fmt.Sprintf("Variables[%s]", KubernetesVersion)) - } else if _, err := version.ParseSemantic(k8sVersion); err != nil { - return errInvalidArg("Variables[%s]=%q", KubernetesVersion, k8sVersion) - } - - // If CniPath is nil, return error. - cniPath := c.GetCNIPath() - if cniPath == "" { - return errEmptyArg(fmt.Sprintf("Variables[%s]", CNIPath)) - } return nil } @@ -394,9 +372,7 @@ func (c *E2EConfig) GetIntervals(spec, key string) []interface{} { // GetVariable returns a variable from the e2e config file. func (c *E2EConfig) GetVariable(varName string) string { version, ok := c.Variables[varName] - if !ok { - return "" - } + Expect(ok).NotTo(BeFalse()) return version } @@ -411,21 +387,3 @@ func (c *E2EConfig) GetInt64PtrVariable(varName string) *int64 { Expect(err).NotTo(HaveOccurred()) return pointer.Int64Ptr(wCount) } - -// GetKubernetesVersion returns the kubernetes version provided in e2e config. -func (c *E2EConfig) GetKubernetesVersion() string { - version, ok := c.Variables[KubernetesVersion] - if !ok { - return "" - } - return version -} - -// GetCNIPath returns the CNI path provided in e2e config. -func (c *E2EConfig) GetCNIPath() string { - path, ok := c.Variables[CNIPath] - if !ok { - return "" - } - return path -} diff --git a/test/framework/controlpane_helpers.go b/test/framework/controlpane_helpers.go index 537c504cdc54..128589fd3054 100644 --- a/test/framework/controlpane_helpers.go +++ b/test/framework/controlpane_helpers.go @@ -116,6 +116,10 @@ type WaitForOneKubeadmControlPlaneMachineToExistInput struct { // WaitForKubeadmControlPlaneMachineToExist will wait until all control plane machines have node refs. func WaitForOneKubeadmControlPlaneMachineToExist(ctx context.Context, input WaitForOneKubeadmControlPlaneMachineToExistInput, intervals ...interface{}) { + Expect(ctx).NotTo(BeNil(), "ctx is required for WaitForOneKubeadmControlPlaneMachineToExist") + Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Getter can't be nil when calling WaitForOneKubeadmControlPlaneMachineToExist") + Expect(input.ControlPlane).ToNot(BeNil(), "Invalid argument. input.ControlPlane can't be nil when calling WaitForOneKubeadmControlPlaneMachineToExist") + By("waiting for one control plane node to exist") inClustersNamespaceListOption := client.InNamespace(input.Cluster.Namespace) // ControlPlane labels @@ -205,31 +209,6 @@ func AssertControlPlaneFailureDomains(ctx context.Context, input AssertControlPl Expect(failureDomainCounts).To(Equal(input.ExpectedFailureDomains)) } -type GetMachinesByClusterInput struct { - Lister Lister - ClusterName string - Namespace string -} - -// GetControlPlaneMachinesByCluster returns the Machine objects for a cluster. -// Important! this method relies on labels that are created by the CAPI controllers during the first reconciliation, so -// it is necessary to ensure this is already happened before calling it. -func GetControlPlaneMachinesByCluster(ctx context.Context, input GetMachinesByClusterInput) []clusterv1.Machine { - options := append(byClusterOptions(input.ClusterName, input.Namespace), controlPlaneMachineOptions()...) - - machineList := &clusterv1.MachineList{} - Expect(input.Lister.List(ctx, machineList, options...)).To(Succeed(), "Failed to list MachineList object for Cluster %s/%s", input.Namespace, input.ClusterName) - - return machineList.Items -} - -// controlPlaneMachineOptions returns a set of ListOptions that allows to get all machine objects belonging to control plane. -func controlPlaneMachineOptions() []client.ListOption { - return []client.ListOption{ - client.HasLabels{clusterv1.MachineControlPlaneLabelName}, - } -} - // DiscoveryAndWaitForControlPlaneInitializedInput is the input type for DiscoveryAndWaitForControlPlaneInitialized. type DiscoveryAndWaitForControlPlaneInitializedInput struct { Lister Lister @@ -336,7 +315,7 @@ func UpgradeControlPlaneAndWaitForUpgrade(ctx context.Context, input UpgradeCont Expect(patchHelper.Patch(ctx, input.ControlPlane)).To(Succeed()) fmt.Fprintf(GinkgoWriter, "Waiting for machines to have the upgraded kubernetes version\n") - WaitForMachinesToBeUpgraded(ctx, WaitForMachinesToBeUpgradedInput{ + WaitForControlPlaneMachinesToBeUpgraded(ctx, WaitForControlPlaneMachinesToBeUpgradedInput{ Lister: mgmtClient, Cluster: input.Cluster, MachineCount: int(*input.ControlPlane.Spec.Replicas), @@ -366,3 +345,10 @@ func UpgradeControlPlaneAndWaitForUpgrade(ctx context.Context, input UpgradeCont Condition: EtcdImageTagCondition(input.EtcdImageTag, int(*input.ControlPlane.Spec.Replicas)), }, input.WaitForEtcdUpgrade...) } + +// controlPlaneMachineOptions returns a set of ListOptions that allows to get all machine objects belonging to control plane. +func controlPlaneMachineOptions() []client.ListOption { + return []client.ListOption{ + client.HasLabels{clusterv1.MachineControlPlaneLabelName}, + } +} diff --git a/test/framework/deployment_helpers.go b/test/framework/deployment_helpers.go index 26cec38dbbf9..dc913077efd1 100644 --- a/test/framework/deployment_helpers.go +++ b/test/framework/deployment_helpers.go @@ -80,7 +80,7 @@ type WatchDeploymentLogsInput struct { func WatchDeploymentLogs(ctx context.Context, input WatchDeploymentLogsInput) { Expect(ctx).NotTo(BeNil(), "ctx is required for WatchControllerLogs") Expect(input.ClientSet).NotTo(BeNil(), "input.ClientSet is required for WatchControllerLogs") - Expect(input.Deployment).NotTo(BeNil(), "input.Name is required for WatchControllerLogs") + Expect(input.Deployment).NotTo(BeNil(), "input.Deployment is required for WatchControllerLogs") deployment := &appsv1.Deployment{} key, err := client.ObjectKeyFromObject(input.Deployment) diff --git a/test/framework/machine_helpers.go b/test/framework/machine_helpers.go index a49921f8c2e3..6025ba701d40 100644 --- a/test/framework/machine_helpers.go +++ b/test/framework/machine_helpers.go @@ -18,27 +18,116 @@ package framework import ( "context" + "fmt" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - "github.com/pkg/errors" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + "sigs.k8s.io/cluster-api/util/patch" ) -// WaitForMachinesToBeUpgradedInput is the input for WaitForMachinesToBeUpgraded. -type WaitForMachinesToBeUpgradedInput struct { +// GetMachinesByMachineDeploymentsInput is the input for GetMachinesByMachineDeployments. +type GetMachinesByMachineDeploymentsInput struct { + Lister Lister + ClusterName string + Namespace string + MachineDeployment clusterv1.MachineDeployment +} + +// GetMachinesByMachineDeployments returns Machine objects for a cluster belonging to a machine deployment. +// Important! this method relies on labels that are created by the CAPI controllers during the first reconciliation, so +// it is necessary to ensure this is already happened before calling it. +func GetMachinesByMachineDeployments(ctx context.Context, input GetMachinesByMachineDeploymentsInput) []clusterv1.Machine { + Expect(ctx).NotTo(BeNil(), "ctx is required for GetMachinesByMachineDeployments") + Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Lister can't be nil when calling GetMachinesByMachineDeployments") + Expect(input.ClusterName).ToNot(BeEmpty(), "Invalid argument. input.ClusterName can't be empty when calling GetMachinesByMachineDeployments") + Expect(input.Namespace).ToNot(BeEmpty(), "Invalid argument. input.Namespace can't be empty when calling GetMachinesByMachineDeployments") + Expect(input.MachineDeployment).ToNot(BeNil(), "Invalid argument. input.MachineDeployment can't be nil when calling GetMachinesByMachineDeployments") + + opts := byClusterOptions(input.ClusterName, input.Namespace) + opts = append(opts, machineDeploymentOptions(input.MachineDeployment)...) + + machineList := &clusterv1.MachineList{} + Expect(input.Lister.List(ctx, machineList, opts...)).To(Succeed(), "Failed to list MachineList object for Cluster %s/%s", input.Namespace, input.ClusterName) + + return machineList.Items +} + +// GetMachinesByMachineHealthCheckInput is the input for GetMachinesByMachineHealthCheck. +type GetMachinesByMachineHealthCheckInput struct { + Lister Lister + ClusterName string + Namespace string + MachineHealthCheck clusterv1.MachineHealthCheck +} + +// GetMachinesByMachineHealthCheckInput returns Machine objects for a cluster that match with MachineHealthCheck selector. +// Important! this method relies on labels that are created by the CAPI controllers during the first reconciliation, so +// it is necessary to ensure this is already happened before calling it. +func GetMachinesByMachineHealthCheck(ctx context.Context, input GetMachinesByMachineHealthCheckInput) []clusterv1.Machine { + Expect(ctx).NotTo(BeNil(), "ctx is required for GetMachinesByMachineDeployments") + Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Lister can't be nil when calling GetMachinesByMachineHealthCheck") + Expect(input.ClusterName).ToNot(BeEmpty(), "Invalid argument. input.ClusterName can't be empty when calling GetMachinesByMachineHealthCheck") + Expect(input.Namespace).ToNot(BeEmpty(), "Invalid argument. input.Namespace can't be empty when calling GetMachinesByMachineHealthCheck") + Expect(input.MachineHealthCheck).ToNot(BeNil(), "Invalid argument. input.MachineHealthCheck can't be nil when calling GetMachinesByMachineHealthCheck") + + opts := byClusterOptions(input.ClusterName, input.Namespace) + opts = append(opts, machineHealthCheckOptions(input.MachineHealthCheck)...) + + machineList := &clusterv1.MachineList{} + Expect(input.Lister.List(ctx, machineList, opts...)).To(Succeed(), "Failed to list MachineList object for Cluster %s/%s", input.Namespace, input.ClusterName) + + return machineList.Items +} + +// GetControlPlaneMachinesByClusterInput is the input for GetControlPlaneMachinesByCluster. +type GetControlPlaneMachinesByClusterInput struct { + Lister Lister + ClusterName string + Namespace string +} + +// GetControlPlaneMachinesByCluster returns the Machine objects for a cluster. +// Important! this method relies on labels that are created by the CAPI controllers during the first reconciliation, so +// it is necessary to ensure this is already happened before calling it. +func GetControlPlaneMachinesByCluster(ctx context.Context, input GetControlPlaneMachinesByClusterInput) []clusterv1.Machine { + Expect(ctx).NotTo(BeNil(), "ctx is required for GetControlPlaneMachinesByCluster") + Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Lister can't be nil when calling GetControlPlaneMachinesByCluster") + Expect(input.ClusterName).ToNot(BeEmpty(), "Invalid argument. input.ClusterName can't be empty when calling GetControlPlaneMachinesByCluster") + Expect(input.Namespace).ToNot(BeEmpty(), "Invalid argument. input.Namespace can't be empty when calling GetControlPlaneMachinesByCluster") + + options := append(byClusterOptions(input.ClusterName, input.Namespace), controlPlaneMachineOptions()...) + + machineList := &clusterv1.MachineList{} + Expect(input.Lister.List(ctx, machineList, options...)).To(Succeed(), "Failed to list MachineList object for Cluster %s/%s", input.Namespace, input.ClusterName) + + return machineList.Items +} + +// WaitForControlPlaneMachinesToBeUpgradedInput is the input for WaitForControlPlaneMachinesToBeUpgraded. +type WaitForControlPlaneMachinesToBeUpgradedInput struct { Lister Lister Cluster *clusterv1.Cluster KubernetesUpgradeVersion string MachineCount int } -// WaitForMachinesToBeUpgraded waits until all machines are upgraded to the correct kubernetes version. -func WaitForMachinesToBeUpgraded(ctx context.Context, input WaitForMachinesToBeUpgradedInput, intervals ...interface{}) { +// WaitForControlPlaneMachinesToBeUpgraded waits until all machines are upgraded to the correct kubernetes version. +func WaitForControlPlaneMachinesToBeUpgraded(ctx context.Context, input WaitForControlPlaneMachinesToBeUpgradedInput, intervals ...interface{}) { + Expect(ctx).NotTo(BeNil(), "ctx is required for WaitForControlPlaneMachinesToBeUpgraded") + Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Lister can't be nil when calling WaitForControlPlaneMachinesToBeUpgraded") + Expect(input.KubernetesUpgradeVersion).ToNot(BeEmpty(), "Invalid argument. input.KubernetesUpgradeVersion can't be empty when calling WaitForControlPlaneMachinesToBeUpgraded") + Expect(input.MachineCount).To(BeNumerically(">", 0), "Invalid argument. input.MachineCount can't be smaller than 1 when calling WaitForControlPlaneMachinesToBeUpgraded") + By("ensuring all machines have upgraded kubernetes version") + fmt.Fprintf(GinkgoWriter, "Ensuring all MachineDeployment Machines have upgraded kubernetes version %s\n", input.KubernetesUpgradeVersion) + Eventually(func() (int, error) { - machines := GetControlPlaneMachinesByCluster(context.TODO(), GetMachinesByClusterInput{ + machines := GetControlPlaneMachinesByCluster(context.TODO(), GetControlPlaneMachinesByClusterInput{ Lister: input.Lister, ClusterName: input.Cluster.Name, Namespace: input.Cluster.Namespace, @@ -56,3 +145,69 @@ func WaitForMachinesToBeUpgraded(ctx context.Context, input WaitForMachinesToBeU return upgraded, nil }, intervals...).Should(Equal(input.MachineCount)) } + +// WaitForMachineDeploymentMachinesToBeUpgradedInput is the input for WaitForMachineDeploymentMachinesToBeUpgraded. +type WaitForMachineDeploymentMachinesToBeUpgradedInput struct { + Lister Lister + Cluster *clusterv1.Cluster + KubernetesUpgradeVersion string + MachineCount int + MachineDeployment clusterv1.MachineDeployment +} + +// WaitForMachineDeploymentMachinesToBeUpgraded waits until all machines belonging to a MachineDeployment are upgraded to the correct kubernetes version. +func WaitForMachineDeploymentMachinesToBeUpgraded(ctx context.Context, input WaitForMachineDeploymentMachinesToBeUpgradedInput, intervals ...interface{}) { + Expect(ctx).NotTo(BeNil(), "ctx is required for WaitForMachineDeploymentMachinesToBeUpgraded") + Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Getter can't be nil when calling WaitForMachineDeploymentMachinesToBeUpgraded") + Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling WaitForMachineDeploymentMachinesToBeUpgraded") + Expect(input.KubernetesUpgradeVersion).ToNot(BeNil(), "Invalid argument. input.KubernetesUpgradeVersion can't be nil when calling WaitForMachineDeploymentMachinesToBeUpgraded") + Expect(input.MachineDeployment).ToNot(BeNil(), "Invalid argument. input.MachineDeployment can't be nil when calling WaitForMachineDeploymentMachinesToBeUpgraded") + Expect(input.MachineCount).To(BeNumerically(">", 0), "Invalid argument. input.MachineCount can't be smaller than 1 when calling WaitForMachineDeploymentMachinesToBeUpgraded") + + fmt.Fprintf(GinkgoWriter, "Ensuring all MachineDeployment Machines have upgraded kubernetes version %s\n", input.KubernetesUpgradeVersion) + Eventually(func() (int, error) { + machines := GetMachinesByMachineDeployments(context.TODO(), GetMachinesByMachineDeploymentsInput{ + Lister: input.Lister, + ClusterName: input.Cluster.Name, + Namespace: input.Cluster.Namespace, + MachineDeployment: input.MachineDeployment, + }) + + upgraded := 0 + for _, machine := range machines { + if *machine.Spec.Version == input.KubernetesUpgradeVersion { + upgraded++ + } + } + if len(machines) > upgraded { + return 0, errors.New("old nodes remain") + } + return upgraded, nil + }, intervals...).Should(Equal(input.MachineCount)) +} + +// PatchNodeConditionInput is the input for PatchNodeCondition. +type PatchNodeConditionInput struct { + ClusterProxy ClusterProxy + Cluster *clusterv1.Cluster + NodeCondition corev1.NodeCondition + Machine clusterv1.Machine +} + +// PatchNodeCondition patches a node condition to any one of the machines with a node ref. +func PatchNodeCondition(ctx context.Context, input PatchNodeConditionInput) { + Expect(ctx).NotTo(BeNil(), "ctx is required for PatchNodeConditions") + Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling PatchNodeConditions") + Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling PatchNodeConditions") + Expect(input.NodeCondition).ToNot(BeNil(), "Invalid argument. input.NodeCondition can't be nil when calling PatchNodeConditions") + Expect(input.Machine).ToNot(BeNil(), "Invalid argument. input.Machine can't be nil when calling PatchNodeConditions") + + fmt.Fprintf(GinkgoWriter, "Patching the node condition to the node\n") + Expect(input.Machine.Status.NodeRef).ToNot(BeNil()) + node := &corev1.Node{} + Expect(input.ClusterProxy.GetWorkloadCluster(ctx, input.Cluster.Namespace, input.Cluster.Name).GetClient().Get(ctx, types.NamespacedName{Name: input.Machine.Status.NodeRef.Name, Namespace: input.Machine.Status.NodeRef.Namespace}, node)).To(Succeed()) + patchHelper, err := patch.NewHelper(node, input.ClusterProxy.GetWorkloadCluster(ctx, input.Cluster.Namespace, input.Cluster.Name).GetClient()) + Expect(err).ToNot(HaveOccurred()) + node.Status.Conditions = append(node.Status.Conditions, input.NodeCondition) + Expect(patchHelper.Patch(ctx, node)).To(Succeed()) +} diff --git a/test/framework/machinedeployment_helpers.go b/test/framework/machinedeployment_helpers.go index cd10dc1ab878..d3f8f176f231 100644 --- a/test/framework/machinedeployment_helpers.go +++ b/test/framework/machinedeployment_helpers.go @@ -18,14 +18,18 @@ package framework import ( "context" + "fmt" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - "github.com/pkg/errors" + "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -79,6 +83,10 @@ type WaitForMachineDeploymentNodesToExistInput struct { // WaitForMachineDeploymentNodesToExist waits until all nodes associated with a machine deployment exist. func WaitForMachineDeploymentNodesToExist(ctx context.Context, input WaitForMachineDeploymentNodesToExistInput, intervals ...interface{}) { + Expect(ctx).NotTo(BeNil(), "ctx is required for WaitForMachineDeploymentNodesToExist") + Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Lister can't be nil when calling WaitForMachineDeploymentNodesToExist") + Expect(input.MachineDeployment).ToNot(BeNil(), "Invalid argument. input.MachineDeployment can't be nil when calling WaitForMachineDeploymentNodesToExist") + By("waiting for the workload nodes to exist") Eventually(func() (int, error) { selectorMap, err := metav1.LabelSelectorAsMap(&input.MachineDeployment.Spec.Selector) @@ -137,3 +145,147 @@ func DiscoveryAndWaitForMachineDeployments(ctx context.Context, input DiscoveryA } return machineDeployments } + +// UpgradeMachineDeploymentsAndWaitInput is the input type for UpgradeMachineDeploymentsAndWait. +type UpgradeMachineDeploymentsAndWaitInput struct { + ClusterProxy ClusterProxy + Cluster *clusterv1.Cluster + UpgradeVersion string + MachineDeployments []*clusterv1.MachineDeployment + WaitForMachinesToBeUpgraded []interface{} +} + +// UpgradeMachineDeploymentsAndWait upgrades a machine deployment and waits for its machines to be upgraded. +func UpgradeMachineDeploymentsAndWait(ctx context.Context, input UpgradeMachineDeploymentsAndWaitInput) { + Expect(ctx).NotTo(BeNil(), "ctx is required for UpgradeMachineDeploymentsAndWait") + Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling UpgradeMachineDeploymentsAndWait") + Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling UpgradeMachineDeploymentsAndWait") + Expect(input.UpgradeVersion).ToNot(BeNil(), "Invalid argument. input.UpgradeVersion can't be nil when calling UpgradeMachineDeploymentsAndWait") + Expect(input.MachineDeployments).ToNot(BeEmpty(), "Invalid argument. input.MachineDeployments can't be empty when calling UpgradeMachineDeploymentsAndWait") + + mgmtClient := input.ClusterProxy.GetClient() + + for _, deployment := range input.MachineDeployments { + fmt.Fprintf(GinkgoWriter, "Patching the new kubernetes version to Machine Deployment %s/%s\n", deployment.Namespace, deployment.Name) + patchHelper, err := patch.NewHelper(deployment, mgmtClient) + Expect(err).ToNot(HaveOccurred()) + + oldVersion := deployment.Spec.Template.Spec.Version + deployment.Spec.Template.Spec.Version = &input.UpgradeVersion + Expect(patchHelper.Patch(context.TODO(), deployment)).To(Succeed()) + + fmt.Fprintf(GinkgoWriter, "Waiting for Kubernetes versions of machines in MachineDeployment %s/%s to be upgraded from %s to %s\n", + deployment.Namespace, deployment.Name, *oldVersion, input.UpgradeVersion) + WaitForMachineDeploymentMachinesToBeUpgraded(ctx, WaitForMachineDeploymentMachinesToBeUpgradedInput{ + Lister: mgmtClient, + Cluster: input.Cluster, + MachineCount: int(*deployment.Spec.Replicas), + KubernetesUpgradeVersion: input.UpgradeVersion, + MachineDeployment: *deployment, + }, input.WaitForMachinesToBeUpgraded...) + } +} + +// WaitForMachineDeploymentRollingUpgradeToStartInput is the input for WaitForMachineDeploymentRollingUpgradeToStart. +type WaitForMachineDeploymentRollingUpgradeToStartInput struct { + Getter Getter + MachineDeployment *clusterv1.MachineDeployment +} + +// WaitForMachineDeploymentRollingUpgradeToStart waits until rolling upgrade starts. +func WaitForMachineDeploymentRollingUpgradeToStart(ctx context.Context, input WaitForMachineDeploymentRollingUpgradeToStartInput, intervals ...interface{}) { + Expect(ctx).NotTo(BeNil(), "ctx is required for WaitForMachineDeploymentRollingUpgradeToStart") + Expect(input.Getter).ToNot(BeNil(), "Invalid argument. input.Getter can't be nil when calling WaitForMachineDeploymentRollingUpgradeToStart") + Expect(input.MachineDeployment).ToNot(BeNil(), "Invalid argument. input.MachineDeployment can't be nil when calling WaitForMachineDeploymentRollingUpgradeToStarts") + + fmt.Fprintf(GinkgoWriter, "Waiting for MachineDeployment rolling upgrade to start\n") + Eventually(func() bool { + md := &clusterv1.MachineDeployment{} + Expect(input.Getter.Get(ctx, client.ObjectKey{Namespace: input.MachineDeployment.Namespace, Name: input.MachineDeployment.Name}, md)).To(Succeed()) + return md.Status.Replicas != md.Status.AvailableReplicas + }, intervals...).Should(BeTrue()) +} + +// WaitForMachineDeploymentRollingUpgradeToCompleteInput is the input for WaitForMachineDeploymentRollingUpgradeToComplete. +type WaitForMachineDeploymentRollingUpgradeToCompleteInput struct { + Getter Getter + MachineDeployment *clusterv1.MachineDeployment +} + +// WaitForMachineDeploymentNodesToExist waits until rolling upgrade is complete. +func WaitForMachineDeploymentRollingUpgradeToComplete(ctx context.Context, input WaitForMachineDeploymentRollingUpgradeToCompleteInput, intervals ...interface{}) { + Expect(ctx).NotTo(BeNil(), "ctx is required for WaitForMachineDeploymentRollingUpgradeToComplete") + Expect(input.Getter).ToNot(BeNil(), "Invalid argument. input.Getter can't be nil when calling WaitForMachineDeploymentRollingUpgradeToComplete") + Expect(input.MachineDeployment).ToNot(BeNil(), "Invalid argument. input.MachineDeployment can't be nil when calling WaitForMachineDeploymentRollingUpgradeToComplete") + + fmt.Fprintf(GinkgoWriter, "Waiting for MachineDeployment rolling upgrade to complete\n") + Eventually(func() bool { + md := &clusterv1.MachineDeployment{} + Expect(input.Getter.Get(ctx, client.ObjectKey{Namespace: input.MachineDeployment.Namespace, Name: input.MachineDeployment.Name}, md)).To(Succeed()) + return md.Status.Replicas == md.Status.AvailableReplicas + }, intervals...).Should(BeTrue()) +} + +// UpgradeMachineDeploymentInfrastructureRefAndWaitInput is the input type for UpgradeMachineDeploymentInfrastructureRefAndWait. +type UpgradeMachineDeploymentInfrastructureRefAndWaitInput struct { + ClusterProxy ClusterProxy + Cluster *clusterv1.Cluster + MachineDeployments []*clusterv1.MachineDeployment + WaitForMachinesToBeUpgraded []interface{} +} + +// UpgradeMachineDeploymentInfrastructureRefAndWait upgrades a machine deployment infrastructure ref and waits for its machines to be upgraded. +func UpgradeMachineDeploymentInfrastructureRefAndWait(ctx context.Context, input UpgradeMachineDeploymentInfrastructureRefAndWaitInput) { + Expect(ctx).NotTo(BeNil(), "ctx is required for UpgradeMachineDeploymentInfrastructureRefAndWait") + Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling UpgradeMachineDeploymentInfrastructureRefAndWait") + Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling UpgradeMachineDeploymentInfrastructureRefAndWait") + Expect(input.MachineDeployments).ToNot(BeEmpty(), "Invalid argument. input.MachineDeployments can't be empty when calling UpgradeMachineDeploymentInfrastructureRefAndWait") + + mgmtClient := input.ClusterProxy.GetClient() + + for _, deployment := range input.MachineDeployments { + fmt.Fprintf(GinkgoWriter, "Patching the new infrastructure ref to Machine Deployment %s/%s\n", deployment.Namespace, deployment.Name) + // Retrieve infra object + infraRef := deployment.Spec.Template.Spec.InfrastructureRef + infraObj := &unstructured.Unstructured{} + infraObj.SetGroupVersionKind(infraRef.GroupVersionKind()) + key := client.ObjectKey{ + Namespace: input.Cluster.Namespace, + Name: infraRef.Name, + } + Expect(mgmtClient.Get(ctx, key, infraObj)).NotTo(HaveOccurred()) + + // Creates a new infra object + newInfraObj := infraObj + newInfraObjName := fmt.Sprintf("%s-%s", infraRef.Name, util.RandomString(6)) + newInfraObj.SetName(newInfraObjName) + newInfraObj.SetResourceVersion("") + Expect(mgmtClient.Create(ctx, newInfraObj)).NotTo(HaveOccurred()) + + // Patch the new infra object's ref to the machine deployment + patchHelper, err := patch.NewHelper(deployment, mgmtClient) + Expect(err).ToNot(HaveOccurred()) + infraRef.Name = newInfraObjName + deployment.Spec.Template.Spec.InfrastructureRef = infraRef + Expect(patchHelper.Patch(context.TODO(), deployment)).To(Succeed()) + + fmt.Fprintf(GinkgoWriter, "Waiting for rolling upgrade to start.\n") + WaitForMachineDeploymentRollingUpgradeToStart(ctx, WaitForMachineDeploymentRollingUpgradeToStartInput{ + Getter: mgmtClient, + MachineDeployment: deployment, + }, input.WaitForMachinesToBeUpgraded...) + + fmt.Fprintf(GinkgoWriter, "Waiting for rolling upgrade to complete.\n") + WaitForMachineDeploymentRollingUpgradeToComplete(ctx, WaitForMachineDeploymentRollingUpgradeToCompleteInput{ + Getter: mgmtClient, + MachineDeployment: deployment, + }, input.WaitForMachinesToBeUpgraded...) + } +} + +// machineDeploymentOptions returns a set of ListOptions that allows to get all machine objects belonging to a machine deployment. +func machineDeploymentOptions(deployment clusterv1.MachineDeployment) []client.ListOption { + return []client.ListOption{ + client.MatchingLabels(deployment.Spec.Selector.MatchLabels), + } +} diff --git a/test/framework/machinehealthcheck_helpers.go b/test/framework/machinehealthcheck_helpers.go new file mode 100644 index 000000000000..b1abed05f35c --- /dev/null +++ b/test/framework/machinehealthcheck_helpers.go @@ -0,0 +1,191 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "context" + "fmt" + "time" + + "sigs.k8s.io/cluster-api/util/patch" + + "k8s.io/apimachinery/pkg/types" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// DiscoverMachineHealthCheckAndWaitInput is the input for DiscoverMachineHealthCheckAndWait. +type DiscoverMachineHealthCheckAndWaitInput struct { + ClusterProxy ClusterProxy + Cluster *clusterv1.Cluster + MachineDeployments []*clusterv1.MachineDeployment + WaitForMachineRemediation []interface{} +} + +// DiscoverMachineHealthCheckAndWait patches an unhealthy node condition to one node in each MachineDeployment and then wait for remediation. +func DiscoverMachineHealthChecksAndWait(ctx context.Context, input DiscoverMachineHealthCheckAndWaitInput) { + Expect(ctx).NotTo(BeNil(), "ctx is required for DiscoverMachineHealthChecksAndWait") + Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling DiscoverMachineHealthChecksAndWait") + Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling DiscoverMachineHealthChecksAndWait") + Expect(input.MachineDeployments).ToNot(BeEmpty(), "Invalid argument. input.MachineDeployments can't be empty when calling DiscoverMachineHealthChecksAndWait") + + machineHealthChecks := GetMachineHealthChecksByCluster(ctx, GetMachineHealthChecksByClusterInput{ + Lister: input.ClusterProxy.GetClient(), + ClusterName: input.Cluster.Name, + Namespace: input.Cluster.Namespace, + }) + + fmt.Fprintf(GinkgoWriter, "Patching an unhealthy condition to nodes and waiting for remediation\n") + for _, mhc := range machineHealthChecks { + if len(mhc.Spec.UnhealthyConditions) < 1 { + continue + } + fmt.Fprintf(GinkgoWriter, "Patching MachineDeployments with MachinehealthCheckLabel\n") + for _, md := range input.MachineDeployments { + if len(mhc.Spec.Selector.MatchLabels) < 0 { + continue + } + selectorMap, err := metav1.LabelSelectorAsMap(&md.Spec.Selector) + Expect(err).ToNot(HaveOccurred()) + + ms := &clusterv1.MachineSetList{} + Expect(input.ClusterProxy.GetClient().List(ctx, ms, client.InNamespace(input.Cluster.Namespace), client.MatchingLabels(selectorMap))).To(Succeed()) + + if len(ms.Items) == 0 { + continue + } + machineSet := ms.Items[0] + selectorMap, err = metav1.LabelSelectorAsMap(&machineSet.Spec.Selector) + Expect(err).ToNot(HaveOccurred()) + + machines := &clusterv1.MachineList{} + Expect(input.ClusterProxy.GetClient().List(ctx, machines, client.InNamespace(machineSet.Namespace), client.MatchingLabels(selectorMap))).To(Succeed()) + + for _, machine := range machines.Items { + patchHelper, err := patch.NewHelper(&machine, input.ClusterProxy.GetClient()) + Expect(err).ToNot(HaveOccurred()) + machine.SetLabels(mhc.Spec.Selector.MatchLabels) + Expect(patchHelper.Patch(ctx, &machine)).To(Succeed()) + } + } + + machines := GetMachinesByMachineHealthCheck(context.TODO(), GetMachinesByMachineHealthCheckInput{ + Lister: input.ClusterProxy.GetClient(), + ClusterName: input.Cluster.Name, + Namespace: input.Cluster.Namespace, + MachineHealthCheck: *mhc, + }) + if len(machines) == 0 { + continue + } + + fmt.Fprintf(GinkgoWriter, "Patching MachineHealthCheck unhealthy condition to one of the nodes\n") + unhealthyNodeCondition := corev1.NodeCondition{ + Type: mhc.Spec.UnhealthyConditions[0].Type, + Status: mhc.Spec.UnhealthyConditions[0].Status, + LastTransitionTime: metav1.Time{Time: time.Now()}, + } + PatchNodeCondition(ctx, PatchNodeConditionInput{ + ClusterProxy: input.ClusterProxy, + Cluster: input.Cluster, + NodeCondition: unhealthyNodeCondition, + Machine: machines[0], + }) + } + + fmt.Fprintf(GinkgoWriter, "Waiting for remediation\n") + WaitForMachineHealthCheckToRemediateUnhealthyNodeCondition(ctx, WaitForMachineHealthCheckToRemediateUnhealthyNodeConditionInput{ + ClusterProxy: input.ClusterProxy, + Cluster: input.Cluster, + MachineHealthChecks: machineHealthChecks, + }, input.WaitForMachineRemediation...) +} + +// GetMachineHealthChecksByClusterInput is the input for GetMachineHealthChecksByCluster. +type GetMachineHealthChecksByClusterInput struct { + Lister Lister + ClusterName string + Namespace string +} + +// GetMachineHealthChecksByCluster returns the MachineHealthCheck objects for a cluster. +// Important! this method relies on labels that are created by the CAPI controllers during the first reconciliation, so +// it is necessary to ensure this is already happened before calling it. +func GetMachineHealthChecksByCluster(ctx context.Context, input GetMachineHealthChecksByClusterInput) []*clusterv1.MachineHealthCheck { + machineHealthCheckList := &clusterv1.MachineHealthCheckList{} + Expect(input.Lister.List(ctx, machineHealthCheckList, byClusterOptions(input.ClusterName, input.Namespace)...)).To(Succeed(), "Failed to list MachineDeployments object for Cluster %s/%s", input.Namespace, input.ClusterName) + + machineHealthChecks := make([]*clusterv1.MachineHealthCheck, len(machineHealthCheckList.Items)) + for i := range machineHealthCheckList.Items { + machineHealthChecks[i] = &machineHealthCheckList.Items[i] + } + return machineHealthChecks +} + +// machineHealthCheckOptions returns a set of ListOptions that allows to get all machine objects belonging to a MachineHealthCheck. +func machineHealthCheckOptions(machineHealthCheck clusterv1.MachineHealthCheck) []client.ListOption { + return []client.ListOption{ + client.MatchingLabels(machineHealthCheck.Spec.Selector.MatchLabels), + } +} + +// WaitForMachineHealthCheckToRemediateUnhealthyNodeConditionInput is the input for WaitForMachineHealthCheckToRemediateUnhealthyNodeCondition. +type WaitForMachineHealthCheckToRemediateUnhealthyNodeConditionInput struct { + ClusterProxy ClusterProxy + Cluster *clusterv1.Cluster + MachineHealthChecks []*clusterv1.MachineHealthCheck +} + +// WaitForMachineHealthCheckToRemediateUnhealthyNodeCondition patches a node condition to any one of the machines with a node ref. +func WaitForMachineHealthCheckToRemediateUnhealthyNodeCondition(ctx context.Context, input WaitForMachineHealthCheckToRemediateUnhealthyNodeConditionInput, intervals ...interface{}) { + Expect(ctx).NotTo(BeNil(), "ctx is required for PatchNodeConditions") + Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling PatchNodeConditions") + Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling PatchNodeConditions") + + for _, mhc := range input.MachineHealthChecks { + fmt.Fprintf(GinkgoWriter, "Waiting until the node with unhealthy node condition is remediated\n") + Eventually(func() bool { + machines := GetMachinesByMachineHealthCheck(context.TODO(), GetMachinesByMachineHealthCheckInput{ + Lister: input.ClusterProxy.GetClient(), + ClusterName: input.Cluster.Name, + Namespace: input.Cluster.Namespace, + MachineHealthCheck: *mhc, + }) + if len(machines) == 0 { + return true + } + + for _, machine := range machines { + if machine.Status.NodeRef == nil { + return false + } + node := &corev1.Node{} + Expect(input.ClusterProxy.GetWorkloadCluster(ctx, input.Cluster.Namespace, input.Cluster.Name).GetClient().Get(ctx, types.NamespacedName{Name: machine.Status.NodeRef.Name, Namespace: machine.Status.NodeRef.Namespace}, node)).To(Succeed()) + if hasMatchingLabels(mhc.Spec.Selector, node.Labels) { + return false + } + } + return true + }, intervals...).Should(BeTrue()) + } +} diff --git a/test/infrastructure/docker/config/default/manager_image_patch.yaml b/test/infrastructure/docker/config/default/manager_image_patch.yaml index a2be79bb164a..078b7b393ae4 100644 --- a/test/infrastructure/docker/config/default/manager_image_patch.yaml +++ b/test/infrastructure/docker/config/default/manager_image_patch.yaml @@ -8,5 +8,5 @@ spec: spec: containers: # Change the value of image field below to your controller image URL - - image: gcr.io/k8s-staging-capi-docker/capd-manager:dev + - image: gcr.io/k8s-staging-cluster-api/capd-manager-amd64:dev name: manager