Skip to content

Commit ed97655

Browse files
committed
Add ClusterFilter to ClusterCache Options
This allows filtering the Clusters that are handled by the cache. It can be used for example by providers that only want to cache Clusters of the relevant type to them. Signed-off-by: Lennart Jern <[email protected]>
1 parent e4fd578 commit ed97655

File tree

2 files changed

+54
-0
lines changed

2 files changed

+54
-0
lines changed

controllers/clustercache/cluster_cache.go

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,13 +60,22 @@ type Options struct {
6060
// will never be created.
6161
WatchFilterValue string
6262

63+
// ClusterFilter is a function that can be used to filter which clusters should be handled
64+
// by the ClusterCache. If nil, all clusters will be handled. If set, only clusters for which
65+
// the filter returns true will be handled.
66+
ClusterFilter ClusterFilter
67+
6368
// Cache are the cache options for the caches that are created per cluster.
6469
Cache CacheOptions
6570

6671
// Client are the client options for the clients that are created per cluster.
6772
Client ClientOptions
6873
}
6974

75+
// ClusterFilter is a function that filters which clusters should be handled by the ClusterCache.
76+
// It returns true if the cluster should be handled, false otherwise.
77+
type ClusterFilter func(cluster *clusterv1.Cluster) bool
78+
7079
// CacheOptions are the cache options for the caches that are created per cluster.
7180
type CacheOptions struct {
7281
// SyncPeriod is the sync period of the cache.
@@ -357,6 +366,11 @@ type clusterCache struct {
357366

358367
// cacheCtxCancel is used during Shutdown to stop caches.
359368
cacheCtxCancel context.CancelCauseFunc
369+
370+
// ClusterFilter is a function that can be used to filter which clusters should be handled
371+
// by the ClusterCache. If nil, all clusters will be handled. If set, only clusters for which
372+
// the filter returns true will be handled.
373+
clusterFilter ClusterFilter
360374
}
361375

362376
// clusterSource stores the necessary information so we can enqueue reconcile.Requests for reconcilers that
@@ -451,6 +465,15 @@ func (cc *clusterCache) Reconcile(ctx context.Context, req reconcile.Request) (r
451465
return ctrl.Result{RequeueAfter: defaultRequeueAfter}, nil
452466
}
453467

468+
// Apply cluster filter if set
469+
if cc.clusterFilter != nil && !cc.clusterFilter(cluster) {
470+
log.V(6).Info("Cluster filtered out by ClusterFilter, not connecting")
471+
accessor.Disconnect(ctx)
472+
cc.deleteClusterAccessor(clusterKey)
473+
cc.cleanupClusterSourcesForCluster(clusterKey)
474+
return ctrl.Result{}, nil
475+
}
476+
454477
// Return if infrastructure is not ready yet to avoid trying to open a connection when it cannot succeed.
455478
// Requeue is not needed as there will be a new reconcile.Request when Cluster.status.initialization.infrastructureProvisioned is set.
456479
if !ptr.Deref(cluster.Status.Initialization.InfrastructureProvisioned, false) {

controllers/clustercache/cluster_cache_test.go

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,9 @@ func TestReconcile(t *testing.T) {
5757
ObjectMeta: metav1.ObjectMeta{
5858
Name: "test-cluster",
5959
Namespace: metav1.NamespaceDefault,
60+
Labels: map[string]string{
61+
"cluster.x-k8s.io/included-in-clustercache-tests": "true",
62+
},
6063
},
6164
Spec: clusterv1.ClusterSpec{
6265
ControlPlaneRef: clusterv1.ContractVersionedObjectReference{
@@ -87,6 +90,9 @@ func TestReconcile(t *testing.T) {
8790
clusterAccessorConfig: accessorConfig,
8891
clusterAccessors: make(map[client.ObjectKey]*clusterAccessor),
8992
cacheCtx: context.Background(),
93+
clusterFilter: func(cluster *clusterv1.Cluster) bool {
94+
return (cluster.ObjectMeta.Labels["cluster.x-k8s.io/included-in-clustercache-tests"] == "true")
95+
},
9096
}
9197

9298
// Add a Cluster source and start it (queue will be later used to verify the source works correctly)
@@ -110,6 +116,31 @@ func TestReconcile(t *testing.T) {
110116
testCluster.Status.Initialization.InfrastructureProvisioned = ptr.To(true)
111117
g.Expect(env.Status().Patch(ctx, testCluster, patch)).To(Succeed())
112118

119+
// Exclude from clustercache by changing the label
120+
patch = client.MergeFrom(testCluster.DeepCopy())
121+
testCluster.ObjectMeta.Labels = map[string]string{
122+
"cluster.x-k8s.io/included-in-clustercache-tests": "false",
123+
}
124+
g.Expect(env.Patch(ctx, testCluster, patch)).To(Succeed())
125+
// Sanity check that the clusterFIlter does not include the cluster now
126+
g.Expect(cc.clusterFilter(testCluster)).To((BeFalse()))
127+
128+
// Reconcile, cluster should be ignored now
129+
// => no requeue, no cluster accessor created
130+
res, err = cc.Reconcile(ctx, reconcile.Request{NamespacedName: clusterKey})
131+
g.Expect(err).ToNot(HaveOccurred())
132+
g.Expect(res).To(Equal(ctrl.Result{}))
133+
g.Expect(res.IsZero()).To(BeTrue())
134+
135+
// Put the label back
136+
patch = client.MergeFrom(testCluster.DeepCopy())
137+
testCluster.ObjectMeta.Labels = map[string]string{
138+
"cluster.x-k8s.io/included-in-clustercache-tests": "true",
139+
}
140+
g.Expect(env.Patch(ctx, testCluster, patch)).To(Succeed())
141+
// Sanity check that the clusterFIlter does include the cluster now
142+
g.Expect(cc.clusterFilter(testCluster)).To((BeTrue()))
143+
113144
// Reconcile, kubeconfig Secret doesn't exist
114145
// => accessor.Connect will fail so we expect a retry with ConnectionCreationRetryInterval.
115146
res, err = cc.Reconcile(ctx, reconcile.Request{NamespacedName: clusterKey})

0 commit comments

Comments
 (0)