Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 23 additions & 0 deletions controllers/clustercache/cluster_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,13 +60,22 @@ type Options struct {
// will never be created.
WatchFilterValue string

// ClusterFilter is a function that can be used to filter which clusters should be handled
// by the ClusterCache. If nil, all clusters will be handled. If set, only clusters for which
// the filter returns true will be handled.
ClusterFilter ClusterFilter
Copy link
Member

@sbueringer sbueringer Aug 22, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Let's add some simple unit test coverage (for the reconciler logic)


// Cache are the cache options for the caches that are created per cluster.
Cache CacheOptions

// Client are the client options for the clients that are created per cluster.
Client ClientOptions
}

// ClusterFilter is a function that filters which clusters should be handled by the ClusterCache.
// It returns true if the cluster should be handled, false otherwise.
type ClusterFilter func(cluster *clusterv1.Cluster) bool

// CacheOptions are the cache options for the caches that are created per cluster.
type CacheOptions struct {
// SyncPeriod is the sync period of the cache.
Expand Down Expand Up @@ -357,6 +366,11 @@ type clusterCache struct {

// cacheCtxCancel is used during Shutdown to stop caches.
cacheCtxCancel context.CancelCauseFunc

// ClusterFilter is a function that can be used to filter which clusters should be handled
// by the ClusterCache. If nil, all clusters will be handled. If set, only clusters for which
// the filter returns true will be handled.
clusterFilter ClusterFilter
}

// clusterSource stores the necessary information so we can enqueue reconcile.Requests for reconcilers that
Expand Down Expand Up @@ -451,6 +465,15 @@ func (cc *clusterCache) Reconcile(ctx context.Context, req reconcile.Request) (r
return ctrl.Result{RequeueAfter: defaultRequeueAfter}, nil
}

// Apply cluster filter if set
if cc.clusterFilter != nil && !cc.clusterFilter(cluster) {
Copy link
Member

@fabriziopandini fabriziopandini Sep 22, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

wondering if we can avoid to create an accessor entirely if the cluster is filtered out (~like what we can achieve with predicates).

Copy link
Member

@sbueringer sbueringer Sep 22, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not sure how this would look like. The problem is that we should also handle the case where a clusterFilter starts filtering out a cluster that it didn't filter out before

Creating the clusterAccessor is super cheap

Copy link
Member

@sbueringer sbueringer Sep 22, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

But maybe there's a better option, I assume it requires some refactoring though (and accordingly maybe some risk of introducing new issues)

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe we can move getOrCreateClusterAccessor to line 476 and use getClusterAccessor (+ a nil check) instead "locally" in the if branches where we use accessor above line 476

log.V(6).Info("Cluster filtered out by ClusterFilter, not connecting")
accessor.Disconnect(ctx)
cc.deleteClusterAccessor(clusterKey)
cc.cleanupClusterSourcesForCluster(clusterKey)
return ctrl.Result{}, nil
}

// Return if infrastructure is not ready yet to avoid trying to open a connection when it cannot succeed.
// Requeue is not needed as there will be a new reconcile.Request when Cluster.status.initialization.infrastructureProvisioned is set.
if !ptr.Deref(cluster.Status.Initialization.InfrastructureProvisioned, false) {
Expand Down
31 changes: 31 additions & 0 deletions controllers/clustercache/cluster_cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,9 @@ func TestReconcile(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Name: "test-cluster",
Namespace: metav1.NamespaceDefault,
Labels: map[string]string{
"cluster.x-k8s.io/included-in-clustercache-tests": "true",
},
},
Spec: clusterv1.ClusterSpec{
ControlPlaneRef: clusterv1.ContractVersionedObjectReference{
Expand Down Expand Up @@ -87,6 +90,9 @@ func TestReconcile(t *testing.T) {
clusterAccessorConfig: accessorConfig,
clusterAccessors: make(map[client.ObjectKey]*clusterAccessor),
cacheCtx: context.Background(),
clusterFilter: func(cluster *clusterv1.Cluster) bool {
return (cluster.ObjectMeta.Labels["cluster.x-k8s.io/included-in-clustercache-tests"] == "true")
},
}

// Add a Cluster source and start it (queue will be later used to verify the source works correctly)
Expand All @@ -110,6 +116,31 @@ func TestReconcile(t *testing.T) {
testCluster.Status.Initialization.InfrastructureProvisioned = ptr.To(true)
g.Expect(env.Status().Patch(ctx, testCluster, patch)).To(Succeed())

// Exclude from clustercache by changing the label
patch = client.MergeFrom(testCluster.DeepCopy())
testCluster.ObjectMeta.Labels = map[string]string{
"cluster.x-k8s.io/included-in-clustercache-tests": "false",
}
g.Expect(env.Patch(ctx, testCluster, patch)).To(Succeed())
// Sanity check that the clusterFIlter does not include the cluster now
g.Expect(cc.clusterFilter(testCluster)).To((BeFalse()))

// Reconcile, cluster should be ignored now
// => no requeue, no cluster accessor created
res, err = cc.Reconcile(ctx, reconcile.Request{NamespacedName: clusterKey})
g.Expect(err).ToNot(HaveOccurred())
g.Expect(res).To(Equal(ctrl.Result{}))
g.Expect(res.IsZero()).To(BeTrue())

// Put the label back
patch = client.MergeFrom(testCluster.DeepCopy())
testCluster.ObjectMeta.Labels = map[string]string{
"cluster.x-k8s.io/included-in-clustercache-tests": "true",
}
g.Expect(env.Patch(ctx, testCluster, patch)).To(Succeed())
// Sanity check that the clusterFIlter does include the cluster now
g.Expect(cc.clusterFilter(testCluster)).To((BeTrue()))

// Reconcile, kubeconfig Secret doesn't exist
// => accessor.Connect will fail so we expect a retry with ConnectionCreationRetryInterval.
res, err = cc.Reconcile(ctx, reconcile.Request{NamespacedName: clusterKey})
Expand Down