Skip to content

Commit c911719

Browse files
authored
Feature: Optionally retain user PVCs
* Add retainPVCs configuration to the userdata spec The desktop controller will avoid placing owner references on the PVC object and finalizers on the Desktop Session when the value is set to true. * * Update go to 1.17 * * Update dependencies * * Makefile/actions cleanup * * Update github link in main layout
1 parent 113a9fd commit c911719

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

41 files changed

+1449
-4748
lines changed

.github/workflows/build.yml

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -33,11 +33,3 @@ jobs:
3333
- name: Push the manager, app, and kvdi-proxy docker images
3434
run: VERSION=${{ steps.version.outputs.tag }} make -j 3 push-manager push-app push-kvdi-proxy
3535
if: ${{ github.event_name != 'pull_request' }}
36-
37-
- name: Force pkg.go.dev to refresh main
38-
run: curl "https://proxy.golang.org/github.com/kvdi/kvdi/@v/${GITHUB_REF##*/}.info"
39-
if: ${{ steps.version.outputs.tag == 'latest' && github.event_name != 'pull_request' }}
40-
41-
- name: Force pkg.go.dev to refresh version
42-
run: curl https://proxy.golang.org/github.com/kvdi/kvdi/@v/${{ steps.version.outputs.tag }}.info
43-
if: ${{ steps.version.outputs.tag != 'latest' && github.event_name != 'pull_request' }}

Makefile

Lines changed: 52 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -213,30 +213,6 @@ test-in-docker:
213213
lint-in-docker:
214214
$(MAKE) run-in-docker TEST_CMD="make lint"
215215

216-
##
217-
## # Helm Generation
218-
##
219-
220-
## make helm-chart # Generates the templates for the helm chart.
221-
helm-chart: bundle chart-yaml
222-
bash hack/gen-helm-templates.sh
223-
224-
## make chart-yaml # Generate the Chart.yaml from the template in hack/Makevars.mk.
225-
chart-yaml:
226-
echo "$$CHART_YAML" > deploy/charts/kvdi/Chart.yaml
227-
228-
## make package-chart # Packages the helm chart.
229-
package-chart: ${HELM} helm-chart
230-
cd deploy/charts && helm package kvdi
231-
232-
## make package-index # Create the helm repo package index.
233-
package-index:
234-
cd deploy/charts && helm repo index .
235-
236-
## make helm-docs # Generates the helm chart documentation.
237-
helm-docs: helm-chart
238-
docker run --rm -v "$(PWD)/deploy/charts/kvdi:/helm-docs" -u $(shell id -u) jnorwood/helm-docs:latest
239-
240216
##
241217
## # Local Testing with k3d
242218
##
@@ -246,21 +222,22 @@ $(K3D):
246222
curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | K3D_INSTALL_DIR=$(GOBIN) bash -s -- --no-sudo
247223

248224
# Ensures a repo-local installation of kubectl
249-
${KUBECTL}:
250-
$(call download_bin,${KUBECTL},${KUBECTL_DOWNLOAD_URL})
225+
$(KUBECTL):
226+
$(call download_bin,$(KUBECTL),${KUBECTL_DOWNLOAD_URL})
251227

252228
# Ensures a repo-local installation of helm
253-
${HELM}:
229+
$(HELM):
254230
$(call get_helm)
255231

256232
## make test-cluster # Make a local k3d cluster for testing.
257233
test-cluster: $(K3D)
258234
$(K3D) cluster create $(CLUSTER_NAME) \
259235
--kubeconfig-update-default=false \
260-
--k3s-server-arg="--disable=traefik" \
261-
--volume="/dev/shm:/dev/shm@server[0]" \
262-
--volume="/dev/kvm:/dev/kvm@server[0]" \
236+
--k3s-arg --disable=traefik@server:0 \
237+
--volume="/dev/shm:/dev/shm@server:0" \
238+
--volume="/dev/kvm:/dev/kvm@server:0" \
263239
-p 443:443@loadbalancer -p 5556:5556@loadbalancer
240+
mkdir -p $(shell dirname $(CLUSTER_KUBECONFIG))
264241
$(K3D) kubeconfig get $(CLUSTER_NAME) > $(CLUSTER_KUBECONFIG)
265242

266243
##
@@ -279,60 +256,64 @@ load-app: $(K3D) build-app
279256
load-kvdi-proxy: $(K3D) build-kvdi-proxy
280257
$(call load_image,${KVDI_PROXY_IMAGE})
281258

282-
KUBECTL_K3D = ${KUBECTL} --kubeconfig ${CLUSTER_KUBECONFIG}
283-
HELM_K3D = ${HELM} --kubeconfig ${CLUSTER_KUBECONFIG}
259+
KUBECTL_K3D = $(KUBECTL) --kubeconfig ${CLUSTER_KUBECONFIG}
260+
HELM_K3D = $(HELM) --kubeconfig ${CLUSTER_KUBECONFIG}
284261

285262
## make test-vault # Deploys a vault instance into the k3d cluster.
286-
test-vault: ${KUBECTL} ${HELM}
287-
${HELM} repo add hashicorp https://helm.releases.hashicorp.com
288-
${HELM_K3D} upgrade --install vault hashicorp/vault \
263+
test-vault: $(KUBECTL) $(HELM)
264+
$(HELM) repo add hashicorp https://helm.releases.hashicorp.com
265+
$(HELM_K3D) upgrade --install vault hashicorp/vault \
289266
--set server.dev.enabled=true \
290267
--wait
291-
${KUBECTL_K3D} wait --for=condition=ready pod vault-0 --timeout=300s
292-
${KUBECTL_K3D} exec -it vault-0 -- vault auth enable kubernetes
293-
${KUBECTL_K3D} \
268+
$(KUBECTL_K3D) wait --for=condition=ready pod vault-0 --timeout=300s
269+
$(KUBECTL_K3D) exec -it vault-0 -- vault auth enable kubernetes
270+
$(KUBECTL_K3D) \
294271
config view --raw --minify --flatten -o jsonpath='{.clusters[].cluster.certificate-authority-data}' | \
295272
base64 --decode > ca.crt
296-
${KUBECTL_K3D} exec -it vault-0 -- vault write auth/kubernetes/config \
297-
token_reviewer_jwt=`${KUBECTL_K3D} exec -it vault-0 -- cat /var/run/secrets/kubernetes.io/serviceaccount/token` \
273+
$(KUBECTL_K3D) exec -it vault-0 -- vault write auth/kubernetes/config \
274+
token_reviewer_jwt=`$(KUBECTL_K3D) exec -it vault-0 -- cat /var/run/secrets/kubernetes.io/serviceaccount/token` \
298275
kubernetes_host=https://kubernetes.default:443 \
299276
kubernetes_ca_cert="`cat ca.crt`"
300277
rm ca.crt
301-
echo "$$VAULT_POLICY" | ${KUBECTL_K3D} exec -it vault-0 -- vault policy write kvdi -
302-
${KUBECTL_K3D} exec -it vault-0 -- vault secrets enable --path=kvdi/ kv
303-
${KUBECTL_K3D} exec -it vault-0 -- vault write auth/kubernetes/role/kvdi \
278+
echo "$$VAULT_POLICY" | $(KUBECTL_K3D) exec -it vault-0 -- vault policy write kvdi -
279+
$(KUBECTL_K3D) exec -it vault-0 -- vault secrets enable --path=kvdi/ kv
280+
$(KUBECTL_K3D) exec -it vault-0 -- vault write auth/kubernetes/role/kvdi \
304281
bound_service_account_names=kvdi-app,kvdi-manager \
305282
bound_service_account_namespaces=default \
306283
policies=kvdi \
307284
ttl=1h
308285

309286
## make get-vault-token # Returns a token that can be used to login to vault from the CLI or UI.
310287
get-vault-token:
311-
${KUBECTL_K3D} exec -it vault-0 -- vault token create | grep token | head -n1
288+
$(KUBECTL_K3D) exec -it vault-0 -- vault token create | grep token | head -n1
312289

313290
## make test-ldap # Deploys a test LDAP server into the k3d cluster.
314291
test-ldap:
315-
${KUBECTL_K3D} apply -f hack/glauth.yaml
292+
$(KUBECTL_K3D) apply -f hack/glauth.yaml
316293

317294
## make test-oidc # Deploys a test OIDC provider using dex
318295
test-oidc:
319-
${KUBECTL_K3D} apply -f hack/oidc.yaml
296+
$(KUBECTL_K3D) apply -f hack/oidc.yaml
320297

321298
## make test-image-populator # Deploys the imagepopulator CSI plugin
322299
test-image-populator:
323-
${KUBECTL_K3D} apply -f https://raw.githubusercontent.com/kubernetes-csi/csi-driver-image-populator/master/deploy/kubernetes-1.16/csi-image-daemonset.yaml
324-
${KUBECTL_K3D} apply -f https://raw.githubusercontent.com/kubernetes-csi/csi-driver-image-populator/master/deploy/kubernetes-1.16/csi-image-csidriverinfo.yaml
300+
$(KUBECTL_K3D) apply -f https://raw.githubusercontent.com/kubernetes-csi/csi-driver-image-populator/master/deploy/kubernetes-1.16/csi-image-daemonset.yaml
301+
$(KUBECTL_K3D) apply -f https://raw.githubusercontent.com/kubernetes-csi/csi-driver-image-populator/master/deploy/kubernetes-1.16/csi-image-csidriverinfo.yaml
325302

326303
##
327304
## make deploy # Deploys kVDI into the local k3d cluster.
328305
.PHONY: deploy
329306
HELM_ARGS ?=
330-
deploy: ${HELM} chart-yaml
331-
${HELM_K3D} upgrade --install kvdi deploy/charts/kvdi --wait ${HELM_ARGS}
307+
REPO_URL ?= https://kvdi.github.io/helm-charts/charts
308+
deploy: $(HELM)
309+
$(HELM_K3D) repo add kvdi $(REPO_URL)
310+
$(HELM_K3D) upgrade --install kvdi kvdi/kvdi \
311+
--set manager.image.tag=$(VERSION) \
312+
--wait ${HELM_ARGS}
332313

333314
## make deploy-crds # Deploys just the CRDs into the k3d cluster.
334315
deploy-crds: manifests kustomize
335-
$(KUSTOMIZE) build config/crd | ${KUBECTL_K3D} apply -f -
316+
$(KUSTOMIZE) build config/crd | $(KUBECTL_K3D) apply -f -
336317

337318
## make deploy-with-vault # Deploys kVDI into the k3d cluster with a vault configuration for the product of `test-vault`.
338319
deploy-with-vault:
@@ -341,36 +322,36 @@ deploy-with-vault:
341322
## make deploy-with-ldap # Deploys kVDI into the k3d cluster with an LDAP configuration for the product of `test-ldap`.
342323
deploy-with-ldap:
343324
$(MAKE) deploy HELM_ARGS="-f deploy/examples/example-ldap-helm-values.yaml"
344-
${KUBECTL_K3D} apply -f hack/glauth-role.yaml
325+
$(KUBECTL_K3D) apply -f hack/glauth-role.yaml
345326

346327
## make deploy-with-oidc # Deploys kVDI into the k3d cluster with an OIDC configuration for the product of `test-oidc`.
347328
## # Requires you set kvdi.local to the localhost in /etc/hosts.
348329
deploy-with-oidc:
349330
$(MAKE) deploy HELM_ARGS="-f deploy/examples/example-oidc-helm-values.yaml"
350-
${KUBECTL_K3D} apply -f hack/oidc-role.yaml
331+
$(KUBECTL_K3D) apply -f hack/oidc-role.yaml
351332

352333
##
353334
## make example-vdi-templates # Deploys the example VDITemplates into the k3d cluster.
354-
example-vdi-templates: ${KUBECTL}
355-
${KUBECTL_K3D} apply \
335+
example-vdi-templates: $(KUBECTL)
336+
$(KUBECTL_K3D) apply \
356337
-f deploy/examples/example-desktop-templates.yaml
357338

358339
##
359340
## make restart-manager # Restart the manager pod.
360-
restart-manager: ${KUBECTL}
361-
${KUBECTL_K3D} delete pod -l app.kubernetes.io/name=kvdi
341+
restart-manager: $(KUBECTL)
342+
$(KUBECTL_K3D) delete pod -l app.kubernetes.io/name=kvdi
362343

363344
## make restart-app # Restart the app pod.
364-
restart-app: ${KUBECTL}
365-
${KUBECTL_K3D} delete pod -l vdiComponent=app
345+
restart-app: $(KUBECTL)
346+
$(KUBECTL_K3D) delete pod -l vdiComponent=app
366347

367348
## make restart # Restart the manager and app pod.
368349
restart: restart-manager restart-app
369350

370351
## make clean-cluster # Remove all kVDI components from the cluster for a fresh start.
371-
clean-cluster: ${KUBECTL} ${HELM}
372-
${KUBECTL_K3D} delete --ignore-not-found certificate --all
373-
${HELM_K3D} del kvdi
352+
clean-cluster: $(KUBECTL) $(HELM)
353+
$(KUBECTL_K3D) delete --ignore-not-found certificate --all
354+
$(HELM_K3D) del kvdi
374355

375356
## make remove-cluster # Deletes the k3d cluster.
376357
remove-cluster: $(K3D)
@@ -382,18 +363,18 @@ remove-cluster: $(K3D)
382363
##
383364

384365
## make forward-app # Run a kubectl port-forward to the app pod.
385-
forward-app: ${KUBECTL}
386-
${KUBECTL_K3D} port-forward --address 0.0.0.0 svc/kvdi-app 8443:443
366+
forward-app: $(KUBECTL)
367+
$(KUBECTL_K3D) port-forward --address 0.0.0.0 svc/kvdi-app 8443:443
387368

388369
## make get-app-secret # Get the app client TLS certificate for debugging.
389-
get-app-secret: ${KUBECTL}
390-
${KUBECTL_K3D} get secret kvdi-app-client -o json | jq -r '.data["ca.crt"]' | base64 -d > _bin/ca.crt
391-
${KUBECTL_K3D} get secret kvdi-app-client -o json | jq -r '.data["tls.crt"]' | base64 -d > _bin/tls.crt
392-
${KUBECTL_K3D} get secret kvdi-app-client -o json | jq -r '.data["tls.key"]' | base64 -d > _bin/tls.key
370+
get-app-secret: $(KUBECTL)
371+
$(KUBECTL_K3D) get secret kvdi-app-client -o json | jq -r '.data["ca.crt"]' | base64 -d > _bin/ca.crt
372+
$(KUBECTL_K3D) get secret kvdi-app-client -o json | jq -r '.data["tls.crt"]' | base64 -d > _bin/tls.crt
373+
$(KUBECTL_K3D) get secret kvdi-app-client -o json | jq -r '.data["tls.key"]' | base64 -d > _bin/tls.key
393374

394375
## make get-admin-password # Get the generated admin password for kVDI.
395-
get-admin-password: ${KUBECTL}
396-
${KUBECTL_K3D} get secret kvdi-admin-secret -o json | jq -r .data.password | base64 -d && echo
376+
get-admin-password: $(KUBECTL)
377+
$(KUBECTL_K3D) get secret kvdi-admin-secret -o json | jq -r .data.password | base64 -d && echo
397378

398379
##
399380
## # Doc generation
@@ -426,4 +407,4 @@ check-release:
426407
echo "You must specify a VERSION for release" ; exit 1 ; \
427408
fi
428409

429-
prep-release: check-release generate manifests helm-chart api-docs helm-docs kvdictl-docs package-chart package-index
410+
prep-release: check-release generate manifests api-docs kvdictl-docs

apis/app/v1/vdicluster_common_util.go

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -107,8 +107,8 @@ func (c *VDICluster) GetUserdataSelector() *UserdataSelector {
107107

108108
// GetUserdataVolumeSpec returns the spec for creating PVCs for user persistence.
109109
func (c *VDICluster) GetUserdataVolumeSpec() *corev1.PersistentVolumeClaimSpec {
110-
if c.Spec.UserdataSpec != nil && !reflect.DeepEqual(*c.Spec.UserdataSpec, corev1.PersistentVolumeClaimSpec{}) {
111-
return c.Spec.UserdataSpec
110+
if c.Spec.UserdataSpec != nil && !reflect.DeepEqual(*c.Spec.UserdataSpec.PersistentVolumeClaimSpec, corev1.PersistentVolumeClaimSpec{}) {
111+
return c.Spec.UserdataSpec.PersistentVolumeClaimSpec
112112
}
113113
return nil
114114
}
@@ -125,3 +125,11 @@ func (c *VDICluster) GetUserdataVolumeMapName() types.NamespacedName {
125125
Namespace: c.GetCoreNamespace(),
126126
}
127127
}
128+
129+
// RetainUserdataPVCs returns if userdata PVCs should be retained across sessions.
130+
func (c *VDICluster) RetainPVCs() bool {
131+
if c.Spec.UserdataSpec != nil {
132+
return c.Spec.UserdataSpec.RetainPVCs
133+
}
134+
return false
135+
}

apis/app/v1/vdicluster_types.go

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ type VDIClusterSpec struct {
3939
// **NOTE:** Even though the controller will try to force the reclaim policy on
4040
// created volumes to `Retain`, you may want to set it explicitly on your storage-class
4141
// controller as an extra safeguard.
42-
UserdataSpec *corev1.PersistentVolumeClaimSpec `json:"userdataSpec,omitempty"`
42+
UserdataSpec *UserdataSpec `json:"userdataSpec,omitempty"`
4343
// A configuration for selecting pre-existing PVCs to use as the $HOME directory for
4444
// sessions. This configuration takes precedence over `userdataSpec`.
4545
UserdataSelector *UserdataSelector `json:"userdataSelector,omitempty"`
@@ -55,6 +55,19 @@ type VDIClusterSpec struct {
5555
Metrics *MetricsConfig `json:"metrics,omitempty"`
5656
}
5757

58+
// UserdataSpec is an inline of the corev1 PersistentVolumeClaimSpec. It contains
59+
// additional fields for controlling how kvdi works with volumes.
60+
type UserdataSpec struct {
61+
*corev1.PersistentVolumeClaimSpec `json:",inline"`
62+
63+
// RetainPVCs tells the desktop controller to leave PVCs in-tact after they
64+
// are allocated for a user. The default behavior is to free the volume from
65+
// the PVC after each desktop session so it can be used across other namespaces.
66+
// Note that if you set this value to `true` users will only be able to launch
67+
// sessions in a single namespace (unless the PVC is manually removed).
68+
RetainPVCs bool `json:"retainPVCs,omitempty"`
69+
}
70+
5871
// UserdataSelector represents a means for selecting pre-existing userdata PVCs based off
5972
// a label or name match. Note that you will need to restrict templates to launching in
6073
// namespaces that contain the PVCs yourself.

apis/app/v1/zz_generated.deepcopy.go

Lines changed: 22 additions & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

apis/desktops/v1/zz_generated.deepcopy.go

Lines changed: 1 addition & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

apis/rbac/v1/zz_generated.deepcopy.go

Lines changed: 1 addition & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

build/Dockerfile.base

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
######################
22
# Dependencies image #
33
######################
4-
FROM golang:1.16-alpine as builder
4+
FROM golang:1.17-alpine as builder
55

66
RUN apk --update-cache add upx curl
77

0 commit comments

Comments
 (0)