From d55c54a8685252c2c8066e27e654bd037b924cff Mon Sep 17 00:00:00 2001 From: Peter Hunt Date: Fri, 5 Jul 2024 12:49:59 -0400 Subject: [PATCH 1/4] UPSTREAM: : kubelet/cm: fix bug where kubelet restarts from missing cpuset cgroup on None cpumanager policy, cgroupv2, and systemd cgroup manager, kubelet could get into a situation where it believes the cpuset cgroup was created (by libcontainer in the cgroupfs) but systemd has deleted it, as it wasn't requested to create it. This causes one unnecessary restart, as kubelet fails with `failed to initialize top level QOS containers: root container [kubepods] doesn't exist.` This only causes one restart because the kubelet skips recreating the cgroup if it already exists, but it's still a bother and is fixed this way adapted version of https://github.com/kubernetes/kubernetes/pull/125923/commits/c51195dbd02abcab28821a77f657c78857c43519, avoid merge conflict this can be dropped in the 4.19 rebase Signed-off-by: Peter Hunt --- pkg/kubelet/cm/cgroup_manager_linux.go | 3 ++ pkg/kubelet/cm/container_manager_linux.go | 5 ++ .../cm/node_container_manager_linux.go | 50 +++++++++++++++---- pkg/kubelet/cm/types.go | 3 ++ 4 files changed, 52 insertions(+), 9 deletions(-) diff --git a/pkg/kubelet/cm/cgroup_manager_linux.go b/pkg/kubelet/cm/cgroup_manager_linux.go index 11267c96db198..0a668a76895d6 100644 --- a/pkg/kubelet/cm/cgroup_manager_linux.go +++ b/pkg/kubelet/cm/cgroup_manager_linux.go @@ -383,6 +383,9 @@ func (m *cgroupManagerImpl) toResources(resourceConfig *ResourceConfig) *libcont if resourceConfig.PidsLimit != nil { resources.PidsLimit = *resourceConfig.PidsLimit } + if !resourceConfig.CPUSet.IsEmpty() { + resources.CpusetCpus = resourceConfig.CPUSet.String() + } m.maybeSetHugetlb(resourceConfig, resources) diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go index 7b6899c776b95..2120f06db82e1 100644 --- a/pkg/kubelet/cm/container_manager_linux.go +++ b/pkg/kubelet/cm/container_manager_linux.go @@ -34,6 +34,7 @@ import ( "github.com/opencontainers/runc/libcontainer/configs" "k8s.io/klog/v2" "k8s.io/mount-utils" + "k8s.io/utils/cpuset" utilpath "k8s.io/utils/path" libcontaineruserns "github.com/opencontainers/runc/libcontainer/userns" @@ -131,6 +132,10 @@ type containerManagerImpl struct { topologyManager topologymanager.Manager // Interface for Dynamic Resource Allocation management. draManager dra.Manager + // The full set of CPUs on the node. This field is set lazily, and is used to make sure + // the `cpuset` cgroup hierarchy is created on cgroup v2 when cpumanager is using a + // None policy. + allCPUs cpuset.CPUSet } type features struct { diff --git a/pkg/kubelet/cm/node_container_manager_linux.go b/pkg/kubelet/cm/node_container_manager_linux.go index 74221c67047b4..31d44d9db3845 100644 --- a/pkg/kubelet/cm/node_container_manager_linux.go +++ b/pkg/kubelet/cm/node_container_manager_linux.go @@ -31,9 +31,12 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/klog/v2" kubefeatures "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager" + "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology" "k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/stats/pidlimit" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/utils/cpuset" ) const ( @@ -52,7 +55,7 @@ func (cm *containerManagerImpl) createNodeAllocatableCgroups() error { cgroupConfig := &CgroupConfig{ Name: cm.cgroupRoot, // The default limits for cpu shares can be very low which can lead to CPU starvation for pods. - ResourceParameters: getCgroupConfig(nodeAllocatable), + ResourceParameters: cm.getCgroupConfig(nodeAllocatable), } if cm.cgroupManager.Exists(cgroupConfig.Name) { return nil @@ -80,7 +83,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error { cgroupConfig := &CgroupConfig{ Name: cm.cgroupRoot, - ResourceParameters: getCgroupConfig(nodeAllocatable), + ResourceParameters: cm.getCgroupConfig(nodeAllocatable), } // Using ObjectReference for events as the node maybe not cached; refer to #42701 for detail. @@ -114,7 +117,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error { // Now apply kube reserved and system reserved limits if required. if nc.EnforceNodeAllocatable.Has(kubetypes.SystemReservedEnforcementKey) { klog.V(2).InfoS("Enforcing system reserved on cgroup", "cgroupName", nc.SystemReservedCgroupName, "limits", nc.SystemReserved) - if err := enforceExistingCgroup(cm.cgroupManager, cm.cgroupManager.CgroupName(nc.SystemReservedCgroupName), nc.SystemReserved); err != nil { + if err := cm.enforceExistingCgroup(nc.SystemReservedCgroupName, nc.SystemReserved); err != nil { message := fmt.Sprintf("Failed to enforce System Reserved Cgroup Limits on %q: %v", nc.SystemReservedCgroupName, err) cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) return fmt.Errorf(message) @@ -123,7 +126,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error { } if nc.EnforceNodeAllocatable.Has(kubetypes.KubeReservedEnforcementKey) { klog.V(2).InfoS("Enforcing kube reserved on cgroup", "cgroupName", nc.KubeReservedCgroupName, "limits", nc.KubeReserved) - if err := enforceExistingCgroup(cm.cgroupManager, cm.cgroupManager.CgroupName(nc.KubeReservedCgroupName), nc.KubeReserved); err != nil { + if err := cm.enforceExistingCgroup(nc.KubeReservedCgroupName, nc.KubeReserved); err != nil { message := fmt.Sprintf("Failed to enforce Kube Reserved Cgroup Limits on %q: %v", nc.KubeReservedCgroupName, err) cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) return fmt.Errorf(message) @@ -134,8 +137,9 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error { } // enforceExistingCgroup updates the limits `rl` on existing cgroup `cName` using `cgroupManager` interface. -func enforceExistingCgroup(cgroupManager CgroupManager, cName CgroupName, rl v1.ResourceList) error { - rp := getCgroupConfig(rl) +func (cm *containerManagerImpl) enforceExistingCgroup(cNameStr string, rl v1.ResourceList) error { + cName := cm.cgroupManager.CgroupName(cNameStr) + rp := cm.getCgroupConfig(rl) if rp == nil { return fmt.Errorf("%q cgroup is not configured properly", cName) } @@ -156,17 +160,17 @@ func enforceExistingCgroup(cgroupManager CgroupManager, cName CgroupName, rl v1. ResourceParameters: rp, } klog.V(4).InfoS("Enforcing limits on cgroup", "cgroupName", cName, "cpuShares", cgroupConfig.ResourceParameters.CPUShares, "memory", cgroupConfig.ResourceParameters.Memory, "pidsLimit", cgroupConfig.ResourceParameters.PidsLimit) - if err := cgroupManager.Validate(cgroupConfig.Name); err != nil { + if err := cm.cgroupManager.Validate(cgroupConfig.Name); err != nil { return err } - if err := cgroupManager.Update(cgroupConfig); err != nil { + if err := cm.cgroupManager.Update(cgroupConfig); err != nil { return err } return nil } // getCgroupConfig returns a ResourceConfig object that can be used to create or update cgroups via CgroupManager interface. -func getCgroupConfig(rl v1.ResourceList) *ResourceConfig { +func (cm *containerManagerImpl) getCgroupConfig(rl v1.ResourceList) *ResourceConfig { // TODO(vishh): Set CPU Quota if necessary. if rl == nil { return nil @@ -188,9 +192,37 @@ func getCgroupConfig(rl v1.ResourceList) *ResourceConfig { } rc.HugePageLimit = HugePageLimits(rl) + // In the case of a None policy, cgroupv2 and systemd cgroup manager, we must make sure systemd is aware of the cpuset cgroup. + // By default, systemd will not create it, as we've not chosen to delegate it, and we haven't included it in the Apply() request. + // However, this causes a bug where kubelet restarts unnecessarily (cpuset cgroup is created in the cgroupfs, but systemd + // doesn't know about it and deletes it, and then kubelet doesn't continue because the cgroup isn't configured as expected). + // An alternative is to delegate the `cpuset` cgroup to the kubelet, but that would require some plumbing in libcontainer, + // and this is sufficient. + // Only do so on None policy, as Static policy will do its own updating of the cpuset. + if cm.NodeConfig.CPUManagerPolicy == string(cpumanager.PolicyNone) { + if cm.allCPUs.IsEmpty() { + cm.allCPUs = cm.getAllCPUs() + } + rc.CPUSet = cm.allCPUs + } + return &rc } +func (cm *containerManagerImpl) getAllCPUs() cpuset.CPUSet { + machineInfo, err := cm.cadvisorInterface.MachineInfo() + if err != nil { + klog.V(4).InfoS("Failed to get machine info to get default cpuset", "error", err) + return cpuset.CPUSet{} + } + topo, err := topology.Discover(machineInfo) + if err != nil { + klog.V(4).InfoS("Failed to get topology info to get default cpuset", "error", err) + return cpuset.CPUSet{} + } + return topo.CPUDetails.CPUs() +} + // GetNodeAllocatableAbsolute returns the absolute value of Node Allocatable which is primarily useful for enforcement. // Note that not all resources that are available on the node are included in the returned list of resources. // Returns a ResourceList. diff --git a/pkg/kubelet/cm/types.go b/pkg/kubelet/cm/types.go index 01011900a6051..85dcbcf9bc517 100644 --- a/pkg/kubelet/cm/types.go +++ b/pkg/kubelet/cm/types.go @@ -19,12 +19,15 @@ package cm import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/cpuset" ) // ResourceConfig holds information about all the supported cgroup resource parameters. type ResourceConfig struct { // Memory limit (in bytes). Memory *int64 + // CPU set (number of cpus the cgroup has access to). + CPUSet cpuset.CPUSet // CPU shares (relative weight vs. other containers). CPUShares *uint64 // CPU hardcap limit (in usecs). Allowed cpu time in a given period. From 24b582dc81d51853ffde8e6af44ecde17e4efad1 Mon Sep 17 00:00:00 2001 From: Peter Hunt Date: Mon, 8 Jul 2024 10:33:47 -0400 Subject: [PATCH 2/4] UPSTREAM: : kubelet/cm: move CPU reading from cm to cm/cpumanager Adapted from https://github.com/kubernetes/kubernetes/pull/125923/commits/77d03e42cdaf7482c81f5406505a608a04691a8d to avoid merge conflict This can be dropped in the 4.19 rebase Authored-by: Francesco Romani Signed-off-by: Peter Hunt --- pkg/kubelet/cm/container_manager_linux.go | 5 ---- pkg/kubelet/cm/cpumanager/cpu_manager.go | 24 ++++++++++++++---- pkg/kubelet/cm/cpumanager/cpu_manager_test.go | 11 ++------ pkg/kubelet/cm/cpumanager/fake_cpu_manager.go | 5 ++++ .../cm/node_container_manager_linux.go | 25 +++---------------- 5 files changed, 29 insertions(+), 41 deletions(-) diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go index 2120f06db82e1..7b6899c776b95 100644 --- a/pkg/kubelet/cm/container_manager_linux.go +++ b/pkg/kubelet/cm/container_manager_linux.go @@ -34,7 +34,6 @@ import ( "github.com/opencontainers/runc/libcontainer/configs" "k8s.io/klog/v2" "k8s.io/mount-utils" - "k8s.io/utils/cpuset" utilpath "k8s.io/utils/path" libcontaineruserns "github.com/opencontainers/runc/libcontainer/userns" @@ -132,10 +131,6 @@ type containerManagerImpl struct { topologyManager topologymanager.Manager // Interface for Dynamic Resource Allocation management. draManager dra.Manager - // The full set of CPUs on the node. This field is set lazily, and is used to make sure - // the `cpuset` cgroup hierarchy is created on cgroup v2 when cpumanager is using a - // None policy. - allCPUs cpuset.CPUSet } type features struct { diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager.go b/pkg/kubelet/cm/cpumanager/cpu_manager.go index d2ca5f5ade202..69b3690695e40 100644 --- a/pkg/kubelet/cm/cpumanager/cpu_manager.go +++ b/pkg/kubelet/cm/cpumanager/cpu_manager.go @@ -94,6 +94,10 @@ type Manager interface { // GetCPUAffinity returns cpuset which includes cpus from shared pools // as well as exclusively allocated cpus GetCPUAffinity(podUID, containerName string) cpuset.CPUSet + + // GetAllCPUs returns all the CPUs known by cpumanager, as reported by the + // hardware discovery. Maps to the CPU capacity. + GetAllCPUs() cpuset.CPUSet } type manager struct { @@ -137,7 +141,11 @@ type manager struct { // stateFileDirectory holds the directory where the state file for checkpoints is held. stateFileDirectory string - // allocatableCPUs is the set of online CPUs as reported by the system + // allCPUs is the set of online CPUs as reported by the system + allCPUs cpuset.CPUSet + + // allocatableCPUs is the set of online CPUs as reported by the system, + // and available for allocation, minus the reserved set allocatableCPUs cpuset.CPUSet // pendingAdmissionPod contain the pod during the admission phase @@ -157,6 +165,11 @@ func NewManager(cpuPolicyName string, cpuPolicyOptions map[string]string, reconc var policy Policy var err error + topo, err = topology.Discover(machineInfo) + if err != nil { + return nil, err + } + switch policyName(cpuPolicyName) { case PolicyNone: @@ -166,10 +179,6 @@ func NewManager(cpuPolicyName string, cpuPolicyOptions map[string]string, reconc } case PolicyStatic: - topo, err = topology.Discover(machineInfo) - if err != nil { - return nil, err - } klog.InfoS("Detected CPU topology", "topology", topo) reservedCPUs, ok := nodeAllocatableReservation[v1.ResourceCPU] @@ -206,6 +215,7 @@ func NewManager(cpuPolicyName string, cpuPolicyOptions map[string]string, reconc topology: topo, nodeAllocatableReservation: nodeAllocatableReservation, stateFileDirectory: stateFileDirectory, + allCPUs: topo.CPUDetails.CPUs(), } manager.sourcesReady = &sourcesReadyStub{} return manager, nil @@ -340,6 +350,10 @@ func (m *manager) GetAllocatableCPUs() cpuset.CPUSet { return m.allocatableCPUs.Clone() } +func (m *manager) GetAllCPUs() cpuset.CPUSet { + return m.allCPUs.Clone() +} + type reconciledContainer struct { podName string containerName string diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager_test.go b/pkg/kubelet/cm/cpumanager/cpu_manager_test.go index 250f1eb014a36..e60db6a00489c 100644 --- a/pkg/kubelet/cm/cpumanager/cpu_manager_test.go +++ b/pkg/kubelet/cm/cpumanager/cpu_manager_test.go @@ -645,15 +645,8 @@ func TestCPUManagerGenerate(t *testing.T) { if rawMgr.policy.Name() != testCase.expectedPolicy { t.Errorf("Unexpected policy name. Have: %q wants %q", rawMgr.policy.Name(), testCase.expectedPolicy) } - if rawMgr.policy.Name() == string(PolicyNone) { - if rawMgr.topology != nil { - t.Errorf("Expected topology to be nil for 'none' policy. Have: %q", rawMgr.topology) - } - } - if rawMgr.policy.Name() != string(PolicyNone) { - if rawMgr.topology == nil { - t.Errorf("Expected topology to be non-nil for policy '%v'. Have: %q", rawMgr.policy.Name(), rawMgr.topology) - } + if rawMgr.topology == nil { + t.Errorf("Expected topology to be non-nil for policy '%v'. Have: %q", rawMgr.policy.Name(), rawMgr.topology) } } }) diff --git a/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go b/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go index 933697051355e..9a0329db443bb 100644 --- a/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go +++ b/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go @@ -85,6 +85,11 @@ func (m *fakeManager) GetCPUAffinity(podUID, containerName string) cpuset.CPUSet return cpuset.CPUSet{} } +func (m *fakeManager) GetAllCPUs() cpuset.CPUSet { + klog.InfoS("GetAllCPUs") + return cpuset.CPUSet{} +} + // NewFakeManager creates empty/fake cpu manager func NewFakeManager() Manager { return &fakeManager{ diff --git a/pkg/kubelet/cm/node_container_manager_linux.go b/pkg/kubelet/cm/node_container_manager_linux.go index 31d44d9db3845..d90279a01124d 100644 --- a/pkg/kubelet/cm/node_container_manager_linux.go +++ b/pkg/kubelet/cm/node_container_manager_linux.go @@ -31,12 +31,9 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/klog/v2" kubefeatures "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager" - "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology" "k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/stats/pidlimit" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" - "k8s.io/utils/cpuset" ) const ( @@ -199,30 +196,14 @@ func (cm *containerManagerImpl) getCgroupConfig(rl v1.ResourceList) *ResourceCon // An alternative is to delegate the `cpuset` cgroup to the kubelet, but that would require some plumbing in libcontainer, // and this is sufficient. // Only do so on None policy, as Static policy will do its own updating of the cpuset. - if cm.NodeConfig.CPUManagerPolicy == string(cpumanager.PolicyNone) { - if cm.allCPUs.IsEmpty() { - cm.allCPUs = cm.getAllCPUs() - } - rc.CPUSet = cm.allCPUs + // Please see the comment on policy none's GetAllocatableCPUs + if cm.cpuManager.GetAllocatableCPUs().IsEmpty() { + rc.CPUSet = cm.cpuManager.GetAllCPUs() } return &rc } -func (cm *containerManagerImpl) getAllCPUs() cpuset.CPUSet { - machineInfo, err := cm.cadvisorInterface.MachineInfo() - if err != nil { - klog.V(4).InfoS("Failed to get machine info to get default cpuset", "error", err) - return cpuset.CPUSet{} - } - topo, err := topology.Discover(machineInfo) - if err != nil { - klog.V(4).InfoS("Failed to get topology info to get default cpuset", "error", err) - return cpuset.CPUSet{} - } - return topo.CPUDetails.CPUs() -} - // GetNodeAllocatableAbsolute returns the absolute value of Node Allocatable which is primarily useful for enforcement. // Note that not all resources that are available on the node are included in the returned list of resources. // Returns a ResourceList. From f733719ace371a1ea8b78dfc56d2ebbdff821edd Mon Sep 17 00:00:00 2001 From: Francesco Romani Date: Tue, 10 Sep 2024 14:29:38 +0200 Subject: [PATCH 3/4] UPSTREAM: : e2e_node: add a test to verify the kubelet starts with systemd cgroup driver and cpumanager none policy. This was originally planned to be a correctness check for https://issues.k8s.io/125923, but it was difficult to reproduce the bug, so it's now a regression test against it. Signed-off-by: Francesco Romani Signed-off-by: Peter Hunt --- test/e2e_node/node_container_manager_test.go | 75 ++++++++++++++++++++ 1 file changed, 75 insertions(+) diff --git a/test/e2e_node/node_container_manager_test.go b/test/e2e_node/node_container_manager_test.go index ae57fa75e3025..2fb614e12166a 100644 --- a/test/e2e_node/node_container_manager_test.go +++ b/test/e2e_node/node_container_manager_test.go @@ -71,6 +71,81 @@ var _ = SIGDescribe("Node Container Manager [Serial]", func() { framework.ExpectNoError(runTest(ctx, f)) }) }) + ginkgo.Context("Validate CGroup management", func() { + // Regression test for https://issues.k8s.io/125923 + // In this issue there's a race involved with systemd which seems to manifest most likely, or perhaps only + // (data gathered so far seems inconclusive) on the very first boot of the machine, so restarting the kubelet + // seems not sufficient. OTOH, the exact reproducer seems to require a dedicate lane with only this test, or + // to reboot the machine before to run this test. Both are practically unrealistic in CI. + // The closest approximation is this test in this current form, using a kubelet restart. This at least + // acts as non regression testing, so it still brings value. + ginkgo.It("should correctly start with cpumanager none policy in use with systemd", func(ctx context.Context) { + if !IsCgroup2UnifiedMode() { + ginkgo.Skip("this test requires cgroups v2") + } + + var err error + var oldCfg *kubeletconfig.KubeletConfiguration + // Get current kubelet configuration + oldCfg, err = getCurrentKubeletConfig(ctx) + framework.ExpectNoError(err) + + ginkgo.DeferCleanup(func(ctx context.Context) { + if oldCfg != nil { + // Update the Kubelet configuration. + ginkgo.By("Stopping the kubelet") + startKubelet := stopKubelet() + + // wait until the kubelet health check will fail + gomega.Eventually(ctx, func() bool { + return kubeletHealthCheck(kubeletHealthCheckURL) + }).WithTimeout(time.Minute).WithPolling(time.Second).Should(gomega.BeFalseBecause("expected kubelet health check to be failed")) + ginkgo.By("Stopped the kubelet") + + framework.ExpectNoError(e2enodekubelet.WriteKubeletConfigFile(oldCfg)) + + ginkgo.By("Starting the kubelet") + startKubelet() + + // wait until the kubelet health check will succeed + gomega.Eventually(ctx, func(ctx context.Context) bool { + return kubeletHealthCheck(kubeletHealthCheckURL) + }).WithTimeout(2 * time.Minute).WithPolling(5 * time.Second).Should(gomega.BeTrueBecause("expected kubelet to be in healthy state")) + ginkgo.By("Started the kubelet") + } + }) + + newCfg := oldCfg.DeepCopy() + // Change existing kubelet configuration + newCfg.CPUManagerPolicy = "none" + newCfg.CgroupDriver = "systemd" + + // Update the Kubelet configuration. + ginkgo.By("Stopping the kubelet") + startKubelet := stopKubelet() + + // wait until the kubelet health check will fail + gomega.Eventually(ctx, func() bool { + return kubeletHealthCheck(kubeletHealthCheckURL) + }).WithTimeout(time.Minute).WithPolling(time.Second).Should(gomega.BeFalseBecause("expected kubelet health check to be failed")) + ginkgo.By("Stopped the kubelet") + + framework.ExpectNoError(e2enodekubelet.WriteKubeletConfigFile(newCfg)) + + ginkgo.By("Starting the kubelet") + startKubelet() + + // wait until the kubelet health check will succeed + gomega.Eventually(ctx, func() bool { + return getNodeReadyStatus(ctx, f) && kubeletHealthCheck(kubeletHealthCheckURL) + }).WithTimeout(2 * time.Minute).WithPolling(5 * time.Second).Should(gomega.BeTrueBecause("expected kubelet to be in healthy state")) + ginkgo.By("Started the kubelet") + + gomega.Consistently(ctx, func(ctx context.Context) bool { + return getNodeReadyStatus(ctx, f) && kubeletHealthCheck(kubeletHealthCheckURL) + }).WithTimeout(2 * time.Minute).WithPolling(2 * time.Second).Should(gomega.BeTrueBecause("node keeps reporting ready status")) + }) + }) }) func expectFileValToEqual(filePath string, expectedValue, delta int64) error { From 60d07b32b0f07c7fa41d8b2ed3cffb3d9a62bd81 Mon Sep 17 00:00:00 2001 From: Peter Hunt Date: Fri, 11 Oct 2024 16:53:33 -0400 Subject: [PATCH 4/4] UPSTREAM: : e2e_node: use restart instead of start stop Signed-off-by: Peter Hunt --- test/e2e_node/node_container_manager_test.go | 36 +++++--------------- 1 file changed, 9 insertions(+), 27 deletions(-) diff --git a/test/e2e_node/node_container_manager_test.go b/test/e2e_node/node_container_manager_test.go index 2fb614e12166a..9cad91d8b6363 100644 --- a/test/e2e_node/node_container_manager_test.go +++ b/test/e2e_node/node_container_manager_test.go @@ -93,24 +93,15 @@ var _ = SIGDescribe("Node Container Manager [Serial]", func() { ginkgo.DeferCleanup(func(ctx context.Context) { if oldCfg != nil { // Update the Kubelet configuration. - ginkgo.By("Stopping the kubelet") - startKubelet := stopKubelet() - - // wait until the kubelet health check will fail - gomega.Eventually(ctx, func() bool { - return kubeletHealthCheck(kubeletHealthCheckURL) - }).WithTimeout(time.Minute).WithPolling(time.Second).Should(gomega.BeFalseBecause("expected kubelet health check to be failed")) - ginkgo.By("Stopped the kubelet") - framework.ExpectNoError(e2enodekubelet.WriteKubeletConfigFile(oldCfg)) - ginkgo.By("Starting the kubelet") - startKubelet() + ginkgo.By("Restarting the kubelet") + restartKubelet(true) // wait until the kubelet health check will succeed gomega.Eventually(ctx, func(ctx context.Context) bool { return kubeletHealthCheck(kubeletHealthCheckURL) - }).WithTimeout(2 * time.Minute).WithPolling(5 * time.Second).Should(gomega.BeTrueBecause("expected kubelet to be in healthy state")) + }).WithTimeout(2 * time.Minute).WithPolling(5 * time.Second).Should(gomega.BeTrue()) ginkgo.By("Started the kubelet") } }) @@ -121,29 +112,20 @@ var _ = SIGDescribe("Node Container Manager [Serial]", func() { newCfg.CgroupDriver = "systemd" // Update the Kubelet configuration. - ginkgo.By("Stopping the kubelet") - startKubelet := stopKubelet() - - // wait until the kubelet health check will fail - gomega.Eventually(ctx, func() bool { - return kubeletHealthCheck(kubeletHealthCheckURL) - }).WithTimeout(time.Minute).WithPolling(time.Second).Should(gomega.BeFalseBecause("expected kubelet health check to be failed")) - ginkgo.By("Stopped the kubelet") - framework.ExpectNoError(e2enodekubelet.WriteKubeletConfigFile(newCfg)) - ginkgo.By("Starting the kubelet") - startKubelet() + ginkgo.By("Restarting the kubelet") + restartKubelet(true) // wait until the kubelet health check will succeed - gomega.Eventually(ctx, func() bool { - return getNodeReadyStatus(ctx, f) && kubeletHealthCheck(kubeletHealthCheckURL) - }).WithTimeout(2 * time.Minute).WithPolling(5 * time.Second).Should(gomega.BeTrueBecause("expected kubelet to be in healthy state")) + gomega.Eventually(ctx, func(ctx context.Context) bool { + return kubeletHealthCheck(kubeletHealthCheckURL) + }).WithTimeout(2 * time.Minute).WithPolling(5 * time.Second).Should(gomega.BeTrue()) ginkgo.By("Started the kubelet") gomega.Consistently(ctx, func(ctx context.Context) bool { return getNodeReadyStatus(ctx, f) && kubeletHealthCheck(kubeletHealthCheckURL) - }).WithTimeout(2 * time.Minute).WithPolling(2 * time.Second).Should(gomega.BeTrueBecause("node keeps reporting ready status")) + }).WithTimeout(2 * time.Minute).WithPolling(2 * time.Second).Should(gomega.BeTrue()) }) }) })