Skip to content

Commit 1ced6d4

Browse files
committed
Support old control-plane label
This queries both the new (/control-plane) and old (/master) labels. The old label is only queried if we don't find any of the newer label.
1 parent 25c19b5 commit 1ced6d4

File tree

4 files changed

+80
-15
lines changed

4 files changed

+80
-15
lines changed

config/default/kustomization.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
# Adds namespace to all resources.
2-
namespace: cluster-bootstrap-controller-system
2+
namespace: flux-system
33

44
# Value of this field is prepended to the
55
# names of all resources, e.g. a deployment named

controllers/cluster.go

Lines changed: 36 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -4,32 +4,34 @@ import (
44
"context"
55
"fmt"
66

7+
"github.com/go-logr/logr"
78
corev1 "k8s.io/api/core/v1"
89
"sigs.k8s.io/controller-runtime/pkg/client"
910
"sigs.k8s.io/controller-runtime/pkg/log"
1011
)
1112

13+
const (
14+
deprecatedControlPlaneLabel = "node-role.kubernetes.io/master"
15+
controlPlaneLabel = "node-role.kubernetes.io/control-plane"
16+
)
17+
1218
// IsControlPlaneReady takes a client connected to a cluster and reports whether or
1319
// not the control-plane for the cluster is "ready".
1420
func IsControlPlaneReady(ctx context.Context, cl client.Client) (bool, error) {
1521
logger := log.FromContext(ctx)
16-
nodes := &corev1.NodeList{}
17-
// https://github.com/kubernetes/enhancements/blob/master/keps/sig-cluster-lifecycle/kubeadm/2067-rename-master-label-taint/README.md#design-details
18-
err := cl.List(ctx, nodes, client.HasLabels([]string{"node-role.kubernetes.io/control-plane"}))
22+
readiness := []bool{}
23+
readyNodes, err := listReadyNodesWithLabel(ctx, logger, cl, controlPlaneLabel)
1924
if err != nil {
20-
return false, fmt.Errorf("failed to query cluster node list: %w", err)
25+
return false, err
2126
}
22-
logger.Info("listed nodes", "count", len(nodes.Items))
27+
readiness = append(readiness, readyNodes...)
2328

24-
readiness := []bool{}
25-
for _, node := range nodes.Items {
26-
for _, c := range node.Status.Conditions {
27-
logger.Info("node status", "type", c.Type, "status", c.Status)
28-
switch c.Type {
29-
case corev1.NodeReady:
30-
readiness = append(readiness, c.Status == corev1.ConditionTrue)
31-
}
29+
if len(readyNodes) == 0 {
30+
readyNodes, err := listReadyNodesWithLabel(ctx, logger, cl, deprecatedControlPlaneLabel)
31+
if err != nil {
32+
return false, err
3233
}
34+
readiness = append(readiness, readyNodes...)
3335
}
3436

3537
isReady := func(bools []bool) bool {
@@ -45,3 +47,24 @@ func IsControlPlaneReady(ctx context.Context, cl client.Client) (bool, error) {
4547
// If we have no statuses, then we really don't know if we're ready or not.
4648
return (len(readiness) > 0 && isReady(readiness)), nil
4749
}
50+
51+
func listReadyNodesWithLabel(ctx context.Context, logger logr.Logger, cl client.Client, label string) ([]bool, error) {
52+
nodes := &corev1.NodeList{}
53+
// https://github.com/kubernetes/enhancements/blob/master/keps/sig-cluster-lifecycle/kubeadm/2067-rename-master-label-taint/README.md#design-details
54+
err := cl.List(ctx, nodes, client.HasLabels([]string{label}))
55+
if err != nil {
56+
return nil, fmt.Errorf("failed to query cluster node list: %w", err)
57+
}
58+
logger.Info("listed nodes with control plane label", "label", label, "count", len(nodes.Items))
59+
60+
readiness := []bool{}
61+
for _, node := range nodes.Items {
62+
for _, c := range node.Status.Conditions {
63+
switch c.Type {
64+
case corev1.NodeReady:
65+
readiness = append(readiness, c.Status == corev1.ConditionTrue)
66+
}
67+
}
68+
}
69+
return readiness, nil
70+
}

controllers/clusterbootstrapconfig_controller.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ func (r *ClusterBootstrapConfigReconciler) Reconcile(ctx context.Context, req ct
104104
if !ready {
105105
logger.Info("waiting for control plane to be ready", "cluster", clusterName)
106106

107-
return ctrl.Result{RequeueAfter: time.Minute * 2}, nil
107+
return ctrl.Result{RequeueAfter: requeueAfterTime}, nil
108108
}
109109
}
110110
if err := bootstrapClusterWithConfig(ctx, logger, r.Client, c, &clusterBootstrapConfig); err != nil {

controllers/clusterbootstrapconfig_controller_test.go

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -131,6 +131,48 @@ func TestReconcile_when_cluster_ready(t *testing.T) {
131131
}
132132
}
133133

134+
func TestReconcile_when_cluster_ready_and_old_label(t *testing.T) {
135+
bc := makeTestClusterBootstrapConfig(func(c *capiv1alpha1.ClusterBootstrapConfig) {
136+
c.Spec.RequireClusterReady = true
137+
})
138+
readyNode := makeNode(map[string]string{
139+
"node-role.kubernetes.io/master": "",
140+
}, corev1.NodeCondition{Type: "Ready", Status: "True", LastHeartbeatTime: metav1.Now(), LastTransitionTime: metav1.Now(), Reason: "KubeletReady", Message: "kubelet is posting ready status"})
141+
142+
cl := makeTestCluster(func(c *clusterv1.Cluster) {
143+
c.ObjectMeta.Labels = bc.Spec.ClusterSelector.MatchLabels
144+
c.Status.Phase = string(clusterv1.ClusterPhaseProvisioned)
145+
})
146+
secret := makeTestSecret(types.NamespacedName{
147+
Name: cl.GetName() + "-kubeconfig",
148+
Namespace: cl.GetNamespace(),
149+
}, map[string][]byte{"value": []byte("testing")})
150+
// This cheats by using the local client as the remote client to simplify
151+
// getting the value from the remote client.
152+
reconciler := makeTestReconciler(t, bc, cl, secret, readyNode)
153+
reconciler.configParser = func(b []byte) (client.Client, error) {
154+
return reconciler.Client, nil
155+
}
156+
157+
result, err := reconciler.Reconcile(context.TODO(), ctrl.Request{NamespacedName: types.NamespacedName{
158+
Name: bc.GetName(),
159+
Namespace: bc.GetNamespace(),
160+
}})
161+
if err != nil {
162+
t.Fatal(err)
163+
}
164+
if !result.IsZero() {
165+
t.Fatalf("want empty result, got %v", result)
166+
}
167+
var jobs batchv1.JobList
168+
if err := reconciler.List(context.TODO(), &jobs, client.InNamespace(testNamespace)); err != nil {
169+
t.Fatal(err)
170+
}
171+
if l := len(jobs.Items); l != 1 {
172+
t.Fatalf("found %d jobs, want %d", l, 1)
173+
}
174+
}
175+
134176
func makeTestReconciler(t *testing.T, objs ...runtime.Object) *ClusterBootstrapConfigReconciler {
135177
s, tc := makeTestClientAndScheme(t, objs...)
136178
return NewClusterBootstrapConfigReconciler(tc, s)

0 commit comments

Comments
 (0)