Skip to content

Commit 6997617

Browse files
committed
Add --node-selector-label flag to VKC controller
When creating VirtualKubernetesClusters especially, it will sometimes be useful to limit the nodes used by the vCluster, with a node selector. The setting `fromHost.nodes.selector.labels` both limits which nodes appear from inside the vcluster, and which nodes pods can be scheduled on. I've represented this as a flag `--node-selector-label` -- label because the _value_ is going to be particular to the vcluster. It has a counterpart in the Helm chart values.yaml.
1 parent af3a975 commit 6997617

File tree

4 files changed

+33
-13
lines changed

4 files changed

+33
-13
lines changed

charts/kubernetes/templates/virtualcluster-controller/deployment.yaml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,9 @@ spec:
2626
{{- with $domain := .Values.virtualClusterController.virtualKubernetesClusterDomain }}
2727
- --virtual-kubernetes-cluster-domain={{ $domain }}
2828
{{- end }}
29+
{{- with $label := .Values.virtualClusterController.nodeSelectorLabel }}{{ if $label}}
30+
- --node-selector-label={{ $label }}
31+
{{- end }}{{- end }}
2932
ports:
3033
- name: prometheus
3134
containerPort: 8080

charts/kubernetes/values.yaml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,8 @@ virtualClusterController:
3030
image: ~
3131
# Sets the DDNS domain virtual clusters will be part of
3232
virtualKubernetesClusterDomain: ~
33+
# Tells the controller to use a nodeSelector when creating vClusters
34+
nodeSelectorLabel: ""
3335

3436
# Monitor specific configuration.
3537
monitor:

pkg/provisioners/helmapplications/virtualcluster/provisioner.go

Lines changed: 23 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -48,14 +48,19 @@ func init() {
4848
metrics.Registry.MustRegister(durationMetric)
4949
}
5050

51+
type ProvisionerOptions struct {
52+
Domain string
53+
NodeSelectorLabel string
54+
}
55+
5156
type Provisioner struct {
52-
domain string
57+
Options ProvisionerOptions
5358
}
5459

5560
// New returns a new initialized provisioner object.
56-
func New(getApplication application.GetterFunc, domain string) *application.Provisioner {
61+
func New(getApplication application.GetterFunc, options ProvisionerOptions) *application.Provisioner {
5762
p := &Provisioner{
58-
domain: domain,
63+
Options: options,
5964
}
6065

6166
return application.New(getApplication).WithGenerator(p)
@@ -84,7 +89,8 @@ func (p *Provisioner) Values(ctx context.Context, version unikornv1core.Semantic
8489
// and the cost is "what you use", we'll need to worry about billing, so it may
8590
// be prudent to add organization, project and cluster labels to pods.
8691
// We use SNI to demutiplex at the ingress to the correct vcluster instance.
87-
hostname := p.ReleaseName(ctx) + "." + p.domain
92+
releaseName := p.ReleaseName(ctx)
93+
hostname := releaseName + "." + p.Options.Domain
8894

8995
// Allow users to actually hit the cluster.
9096
ingress := map[string]any{
@@ -132,12 +138,21 @@ func (p *Provisioner) Values(ctx context.Context, version unikornv1core.Semantic
132138
"statefulSet": statefulSet,
133139
}
134140

141+
syncNodes := map[string]any{
142+
"enabled": true,
143+
"clearImageStatus": true,
144+
}
145+
if nodeSelectorLabel := p.Options.NodeSelectorLabel; nodeSelectorLabel != "" {
146+
syncNodes["selector"] = map[string]any{
147+
"labels": map[string]string{
148+
nodeSelectorLabel: releaseName,
149+
},
150+
}
151+
}
152+
135153
sync := map[string]any{
136154
"fromHost": map[string]any{
137-
"nodes": map[string]any{
138-
"enabled": true,
139-
"clearImageStatus": true,
140-
},
155+
"nodes": syncNodes,
141156
"runtimeClasses": map[string]any{
142157
"enabled": true,
143158
},

pkg/provisioners/managers/virtualcluster/provisioner.go

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -117,9 +117,8 @@ type Options struct {
117117
// we need to talk to identity to get a token, and then to region
118118
// to ensure cloud identities and networks are provisioned, as well
119119
// as deprovisioning them.
120-
clientOptions coreclient.HTTPClientOptions
121-
// domain vclusters should appear in.
122-
domain string
120+
clientOptions coreclient.HTTPClientOptions
121+
provisionerOptions virtualcluster.ProvisionerOptions
123122
}
124123

125124
func (o *Options) AddFlags(f *pflag.FlagSet) {
@@ -135,7 +134,8 @@ func (o *Options) AddFlags(f *pflag.FlagSet) {
135134
o.regionOptions.AddFlags(f)
136135
o.clientOptions.AddFlags(f)
137136

138-
f.StringVar(&o.domain, "virtual-kubernetes-cluster-domain", "virtual-kubernetes.example.com", "DNS domain for vclusters to be hosts of.")
137+
f.StringVar(&o.provisionerOptions.Domain, "virtual-kubernetes-cluster-domain", "virtual-kubernetes.example.com", "DNS domain for vclusters to be hosts of.")
138+
f.StringVar(&o.provisionerOptions.NodeSelectorLabel, "node-selector-label", "", "Label to use for vCluster node selectors (with the value of the vcluster name).")
139139
}
140140

141141
// Provisioner encapsulates control plane provisioning.
@@ -245,7 +245,7 @@ func (p *Provisioner) getProvisioner(kubeconfig []byte) provisioners.Provisioner
245245
// from the workload pool. This information and the scheduling
246246
// stuff needs passing into the provisioner.
247247
provisioner := remoteCluster.ProvisionOn(
248-
virtualcluster.New(apps.vCluster, p.options.domain).InNamespace(p.cluster.Name),
248+
virtualcluster.New(apps.vCluster, p.options.provisionerOptions).InNamespace(p.cluster.Name),
249249
// NOTE: If you are using a unikorn-provisioned physical cluster as a region
250250
// then you'll end up with two remotes for the same thing, and the
251251
// secrets will alias (aka split brain), so override the secret name

0 commit comments

Comments
 (0)