diff --git a/api/v1alpha5/virtualmachine_storage_types.go b/api/v1alpha5/virtualmachine_storage_types.go index 16614b424..a38ed4beb 100644 --- a/api/v1alpha5/virtualmachine_storage_types.go +++ b/api/v1alpha5/virtualmachine_storage_types.go @@ -222,8 +222,8 @@ type PersistentVolumeClaimVolumeSource struct { type UnmanagedVolumeClaimVolumeType string const ( - UnmanagedVolumeClaimVolumeTypeFromImage = "FromImage" - UnmanagedVolumeClaimVolumeTypeFromVM = "FromVM" + UnmanagedVolumeClaimVolumeTypeFromImage UnmanagedVolumeClaimVolumeType = "FromImage" + UnmanagedVolumeClaimVolumeTypeFromVM UnmanagedVolumeClaimVolumeType = "FromVM" ) type UnmanagedVolumeClaimVolumeSource struct { @@ -237,12 +237,12 @@ type UnmanagedVolumeClaimVolumeSource struct { // +required - // Name describes the name of the unmanaged volume. + // Name describes the information used to identify the unmanaged volume. // - // For volumes from an image, the name is from the image's + // For volumes from an image, the value is from the image's // status.disks[].name field. // - // For volumes from the VM, the name is from the VM's + // For volumes from the VM, the value is from the VM's // status.volumes[].name field. // // Please note, specifying the name of an existing, managed volume is not diff --git a/config/crd/bases/vmoperator.vmware.com_virtualmachinereplicasets.yaml b/config/crd/bases/vmoperator.vmware.com_virtualmachinereplicasets.yaml index 1e1428034..259ab9672 100644 --- a/config/crd/bases/vmoperator.vmware.com_virtualmachinereplicasets.yaml +++ b/config/crd/bases/vmoperator.vmware.com_virtualmachinereplicasets.yaml @@ -7556,12 +7556,12 @@ spec: properties: name: description: |- - Name describes the name of the unmanaged volume. + Name describes the information used to identify the unmanaged volume. - For volumes from an image, the name is from the image's + For volumes from an image, the value is from the image's status.disks[].name field. - For volumes from the VM, the name is from the VM's + For volumes from the VM, the value is from the VM's status.volumes[].name field. Please note, specifying the name of an existing, managed volume is not diff --git a/config/crd/bases/vmoperator.vmware.com_virtualmachines.yaml b/config/crd/bases/vmoperator.vmware.com_virtualmachines.yaml index 47523cef7..45de8e155 100644 --- a/config/crd/bases/vmoperator.vmware.com_virtualmachines.yaml +++ b/config/crd/bases/vmoperator.vmware.com_virtualmachines.yaml @@ -12184,12 +12184,12 @@ spec: properties: name: description: |- - Name describes the name of the unmanaged volume. + Name describes the information used to identify the unmanaged volume. - For volumes from an image, the name is from the image's + For volumes from an image, the value is from the image's status.disks[].name field. - For volumes from the VM, the name is from the VM's + For volumes from the VM, the value is from the VM's status.volumes[].name field. Please note, specifying the name of an existing, managed volume is not diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 461987f4f..baa88c15b 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -104,6 +104,14 @@ rules: verbs: - get - list +- apiGroups: + - cns.vmware.com + resources: + - cnsregistervolumes + verbs: + - get + - list + - watch - apiGroups: - cns.vmware.com resources: diff --git a/pkg/constants/constants.go b/pkg/constants/constants.go index 28f3ae1c8..979fb33fb 100644 --- a/pkg/constants/constants.go +++ b/pkg/constants/constants.go @@ -240,4 +240,8 @@ const ( // ReconcilePriorityAnnotationKey is the annotation key that specifies the // reconcile priority for an object. ReconcilePriorityAnnotationKey = "vmoperator.vmware.com.protected/reconcile-priority" + + // CreatedByLabel is the label key that indicates an object was + // created on behalf of a VM Operator VM. The value is the name of a VM. + CreatedByLabel = "vmoperator.vmware.com/created-by" ) diff --git a/pkg/providers/vsphere/contentlibrary/content_library_utils.go b/pkg/providers/vsphere/contentlibrary/content_library_utils.go index 536d40ed7..e114139cf 100644 --- a/pkg/providers/vsphere/contentlibrary/content_library_utils.go +++ b/pkg/providers/vsphere/contentlibrary/content_library_utils.go @@ -14,7 +14,6 @@ import ( "github.com/vmware/govmomi/vim25/mo" vimtypes "github.com/vmware/govmomi/vim25/types" "github.com/vmware/govmomi/vmdk" - "k8s.io/apimachinery/pkg/api/resource" "sigs.k8s.io/controller-runtime/pkg/client" vmopv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha5" @@ -22,6 +21,7 @@ import ( "github.com/vmware-tanzu/vm-operator/pkg/conditions" pkgconst "github.com/vmware-tanzu/vm-operator/pkg/constants" "github.com/vmware-tanzu/vm-operator/pkg/providers/vsphere/constants" + pkgutil "github.com/vmware-tanzu/vm-operator/pkg/util" kubeutil "github.com/vmware-tanzu/vm-operator/pkg/util/kube" "github.com/vmware-tanzu/vm-operator/pkg/util/ptr" ) @@ -158,46 +158,21 @@ func UpdateVmiWithVirtualMachine( for _, bd := range vm.Config.Hardware.Device { if disk, ok := bd.(*vimtypes.VirtualDisk); ok { - var ( - capacity *resource.Quantity - size *resource.Quantity - ) - - var uuid string - switch tb := disk.Backing.(type) { - case *vimtypes.VirtualDiskSeSparseBackingInfo: - uuid = tb.Uuid - case *vimtypes.VirtualDiskSparseVer2BackingInfo: - uuid = tb.Uuid - case *vimtypes.VirtualDiskFlatVer2BackingInfo: - uuid = tb.Uuid - case *vimtypes.VirtualDiskLocalPMemBackingInfo: - uuid = tb.Uuid - case *vimtypes.VirtualDiskRawDiskMappingVer1BackingInfo: - uuid = tb.Uuid - case *vimtypes.VirtualDiskRawDiskVer2BackingInfo: - uuid = tb.Uuid - case *vimtypes.VirtualDiskPartitionedRawDiskVer2BackingInfo: - uuid = tb.Uuid - } - - if uuid != "" { + if info := pkgutil.GetVirtualDiskInfo(disk); info.UUID != "" { di, _ := vmdk.GetVirtualDiskInfoByUUID( ctx, nil, /* client is nil since props are not re-fetched */ vm, /* use props from this object */ false, /* do not refetch props */ false, /* include disks related to snapshots */ - uuid) - capacity = kubeutil.BytesToResource(di.CapacityInBytes) - size = kubeutil.BytesToResource(di.Size) + info.UUID) status.Disks = append( status.Disks, vmopv1.VirtualMachineImageDiskInfo{ - Name: uuid, - Limit: capacity, - Requested: size, + Name: info.UUID, + Limit: kubeutil.BytesToResource(di.CapacityInBytes), + Requested: kubeutil.BytesToResource(di.Size), }) } } diff --git a/pkg/providers/vsphere/vmprovider_vm.go b/pkg/providers/vsphere/vmprovider_vm.go index f04a553a5..058a3b287 100644 --- a/pkg/providers/vsphere/vmprovider_vm.go +++ b/pkg/providers/vsphere/vmprovider_vm.go @@ -29,6 +29,7 @@ import ( corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" apierrorsutil "k8s.io/apimachinery/pkg/util/errors" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -67,6 +68,7 @@ import ( "github.com/vmware-tanzu/vm-operator/pkg/util/kube/cource" vmopv1util "github.com/vmware-tanzu/vm-operator/pkg/util/vmopv1" vmutil "github.com/vmware-tanzu/vm-operator/pkg/util/vsphere/vm" + vmconfalldisksarepvcs "github.com/vmware-tanzu/vm-operator/pkg/vmconfig/alldisksarepvcs" vmconfcrypto "github.com/vmware-tanzu/vm-operator/pkg/vmconfig/crypto" vmconfdiskpromo "github.com/vmware-tanzu/vm-operator/pkg/vmconfig/diskpromo" vmconfpolicy "github.com/vmware-tanzu/vm-operator/pkg/vmconfig/policy" @@ -88,6 +90,7 @@ var ( ErrUpdate = pkgerr.NoRequeueNoErr("updated vm") ErrSnapshotRevert = pkgerr.RequeueError{Message: "reverted snapshot"} ErrPolicyNotReady = vmconfpolicy.ErrPolicyNotReady + ErrUnmanagedVols = vmconfalldisksarepvcs.ErrUnmanagedVols ) // VMCreateArgs contains the arguments needed to create a VM on VC. @@ -1087,10 +1090,63 @@ func (vs *vSphereVMProvider) getTags( func (vs *vSphereVMProvider) reconcileUnmanagedToManagedDisks( vmCtx pkgctx.VirtualMachineContext) error { - _ = vmCtx // Remove once vmCtx is used. + return vmconfalldisksarepvcs.Reconcile( + vmCtx, + vs.k8sClient, + nil, + vmCtx.VM, + vmCtx.MoVM, + nil) +} + +// isUnmanagedVolumeFromImage checks if a volume is an unmanaged volume of type +// "FromImage". +func isUnmanagedVolumeFromImage( + vol *vmopv1.VirtualMachineVolume) bool { + + return vol != nil && + vol.PersistentVolumeClaim != nil && + vol.PersistentVolumeClaim.UnmanagedVolumeClaim != nil && + vol.PersistentVolumeClaim.UnmanagedVolumeClaim.Type == vmopv1.UnmanagedVolumeClaimVolumeTypeFromImage +} + +// updateDiskDeviceFromPVC updates a disk device configuration from PVC +// information. +func (vs *vSphereVMProvider) updateDiskDeviceFromPVC( + disk *vimtypes.VirtualDisk, + pvc *corev1.PersistentVolumeClaim, + deviceChange *vimtypes.BaseVirtualDeviceConfigSpec) error { + + _, ok := (*deviceChange).(*vimtypes.VirtualDeviceConfigSpec) + if !ok { + return fmt.Errorf("device change is not a VirtualDeviceConfigSpec") + } + + // Get PVC storage class + storageClass := pvc.Spec.StorageClassName + + // Get PVC requested capacity + var pvcCapacity *resource.Quantity + if requestedStorage, ok := pvc.Spec.Resources.Requests[corev1.ResourceStorage]; ok { + pvcCapacity = &requestedStorage + } + + // Update disk capacity if PVC capacity is larger than current disk capacity + if pvcCapacity != nil { + pvcCapacityBytes, ok := pvcCapacity.AsInt64() + if ok { + currentCapacityBytes := disk.CapacityInBytes + if pvcCapacityBytes > currentCapacityBytes { + disk.CapacityInBytes = pvcCapacityBytes + // Also update CapacityInKB for consistency + disk.CapacityInKB = pvcCapacityBytes / 1024 + } + } + } // TODO(AllDisksArePVCs) // + // // Implement this function. For example, for each of the non-FCD disks in // vmCtx.MoVM.Config.Hardware.Devices, do the following: // @@ -1171,6 +1227,12 @@ func (vs *vSphereVMProvider) reconcileUnmanagedToManagedDisks( // loop, uses the K8s client to list all CnsRegisterVolume objects // with a label vmoperator.vmware.com/created-by whose value is the // name of the current VM. If there are any, then delete them. + // + // Update storage class information on the disk device + // This would typically involve setting storage policy or other + // storage-related configurations based on the PVC's storage class. For now, + // we'll just note that we have the storage class. + _ = storageClass // Acknowledge we have the storage class for future use return nil } @@ -2473,47 +2535,80 @@ func (vs *vSphereVMProvider) vmCreateGenConfigSpecImagePVCDataSourceRefs( vmCtx pkgctx.VirtualMachineContext, createArgs *VMCreateArgs) error { - _, _ = vmCtx, createArgs // Remove once args are used. + // Check if the feature is enabled + if !pkgcfg.FromContext(vmCtx).Features.AllDisksArePVCs { + return nil + } - // TODO(AllDisksArePVCs) - // - // Implement this function. For example, for each of the disks in - // createArgs.ConfigSpec, do the following: - // - // 1. Iterate over spec.volumes and look for an entry with an - // unmanagedVolumeSource with a type of "FromImage" and name set to - // the value that identifies the current disk (the logic for - // deriving this name is located in the UpdateVmiWithOVF and - // UpdateVmiWithVirtualMachine functions in the file - // pkg/providers/vsphere/contentlibrary/content_library_utils.go). - // - // 2. If no such entry exists, then continue to the next iteration of the - // loop. - // - // 3. Use the k8s client's Get function (do not use CreateOrPatch) - // to see if a PersistentVolumeClaim object already exists for - // this disk. The name of the PersistentVolumeClaim object should - // be what was specified in the field - // spec.volumes[].persistentVolumeClaim.claimName from the prior - // step. - // - // If no such entry exists, then return an error indicating the PVC for - // the specified claim is missing and VM creation may not proceed. - // - // 4. Get the PVC's specified storage class and requested capacity. - // - // The PVC *may* specify a different encryption class from the VM, but it - // is probably too much work to handle multiple encryption classes in our - // BYOK reconciler at this point. We will allow the PVC to be recrypted - // by CSI *after* the VM is created and the disk is turned into a PVC. - // - // Update the disk device's specified storage class using the value from - // the PVC. Update the disk device's specified capacity using the value - // from the PVC *if* it is larger than the disk's current requested size. - // - // That should be it! We are not actually registering these disks as PVCs - // at this point. That happens in the VM's update workflow later. For now - // we just need the information about these disks *from* the PVCs. + if createArgs.ConfigSpec.DeviceChange == nil { + return nil + } + + // Build a map of volumes by name for quick lookup + volumeByName := make(map[string]*vmopv1.VirtualMachineVolume) + for i := range vmCtx.VM.Spec.Volumes { + vol := &vmCtx.VM.Spec.Volumes[i] + volumeByName[vol.Name] = vol + } + + var errs []error + + // Iterate through device changes to find disks + for i, deviceChange := range createArgs.ConfigSpec.DeviceChange { + deviceSpec, ok := deviceChange.(*vimtypes.VirtualDeviceConfigSpec) + if !ok || deviceSpec.Device == nil { + continue + } + + disk, ok := deviceSpec.Device.(*vimtypes.VirtualDisk) + if !ok { + continue + } + + // The device label, prior to the actual create, will match the value + // from UVC.Name. + // + // disk.DeviceInfo.GetDescription().Label + + // Generate disk name using the same logic as content library utils + diskInfo := pkgutil.GetVirtualDiskInfo(disk) + diskName := diskInfo.Name() + if diskName == "" { + continue + } + + // Step 1: Look for volume entry with unmanagedVolumeSource type "FromImage" + volume, exists := volumeByName[diskName] + if !exists || !isUnmanagedVolumeFromImage(volume) { + continue // Step 2: Skip if no such entry exists + } + + // Step 3: Get the PVC + pvcName := volume.PersistentVolumeClaim.ClaimName + pvc := &corev1.PersistentVolumeClaim{} + pvcKey := ctrlclient.ObjectKey{Namespace: vmCtx.VM.Namespace, Name: pvcName} + + if err := vs.k8sClient.Get(vmCtx, pvcKey, pvc); err != nil { + if apierrors.IsNotFound(err) { + errs = append(errs, fmt.Errorf("PVC %s for disk %s is missing and VM creation cannot proceed", pvcName, diskName)) + } else { + errs = append(errs, fmt.Errorf("failed to get PVC %s for disk %s: %w", pvcName, diskName, err)) + } + continue + } + + // TODO Clean this up. + disk.Backing.(*vimtypes.VirtualDiskFlatVer2BackingInfo).Uuid = volume.PersistentVolumeClaim.UnmanagedVolumeClaim.UUID + + // Step 4: Update disk device with PVC information + if err := vs.updateDiskDeviceFromPVC(disk, pvc, &createArgs.ConfigSpec.DeviceChange[i]); err != nil { + errs = append(errs, fmt.Errorf("failed to update disk device %s from PVC %s: %w", diskName, pvcName, err)) + } + } + + if len(errs) > 0 { + return apierrorsutil.NewAggregate(errs) + } return nil } diff --git a/pkg/util/devices.go b/pkg/util/devices.go index 19a749e19..dc1281798 100644 --- a/pkg/util/devices.go +++ b/pkg/util/devices.go @@ -5,9 +5,7 @@ package util import ( - "path" "reflect" - "strings" vimtypes "github.com/vmware/govmomi/vim25/types" ) @@ -254,12 +252,9 @@ type VirtualDiskInfo struct { Device *vimtypes.VirtualDisk } -// Name returns the name of the disk to use in the volume status. -// TODO(akutz) This cannot be used in the status as it may not be unique. +// Name returns the name of the disk to use in the volume status as well as +// the UnmanagedVolumeSource.ID field. func (vdi VirtualDiskInfo) Name() string { - if vdi.FileName != "" { - return strings.TrimSuffix(path.Base(vdi.FileName), path.Ext(vdi.FileName)) - } return vdi.Label } diff --git a/pkg/util/pvc.go b/pkg/util/pvc.go new file mode 100644 index 000000000..06dbc44e8 --- /dev/null +++ b/pkg/util/pvc.go @@ -0,0 +1,29 @@ +// © Broadcom. All Rights Reserved. +// The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. +// SPDX-License-Identifier: Apache-2.0 + +package util + +import ( + "fmt" + + "github.com/cespare/xxhash/v2" +) + +// GeneratePVCName generates a DNS-safe PVC name by concatenating the name of +// the VM, a hyphen, and the first eight characters of an xxhash of the +// diskUUID. +// +// Please see https://xxhash.com/ for more information on xxhash. +func GeneratePVCName(vmName, diskUUID string) string { + // Get an xxHash of the disk's UUID. + hash := xxhash.New() + _, _ = hash.Write([]byte(diskUUID)) + h := fmt.Sprintf("%x", hash.Sum(nil)) + + if len(vmName) > 54 { // DNS name is 63, less 8char for h, less 1char for -. + vmName = vmName[:54] + } + + return fmt.Sprintf("%s-%s", vmName, h[:8]) +} diff --git a/pkg/util/pvc_test.go b/pkg/util/pvc_test.go new file mode 100644 index 000000000..89618f0aa --- /dev/null +++ b/pkg/util/pvc_test.go @@ -0,0 +1,43 @@ +// © Broadcom. All Rights Reserved. +// The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. +// SPDX-License-Identifier: Apache-2.0 + +package util_test + +import ( + "strings" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + pkgutil "github.com/vmware-tanzu/vm-operator/pkg/util" +) + +var _ = DescribeTable("GeneratePVCName", + func(vmName, diskUUID, expectedName string) { + Expect(pkgutil.GeneratePVCName(vmName, diskUUID)).To(Equal(expectedName)) + }, + Entry("normal VM name and disk UUID", "test-vm", "disk-uuid-123", "test-vm-54cddb33"), + Entry("VM name exactly 54 characters", strings.Repeat("a", 54), "disk-uuid-123", strings.Repeat("a", 54)+"-54cddb33"), + Entry("VM name longer than 54 characters gets truncated", strings.Repeat("a", 60), "disk-uuid-123", strings.Repeat("a", 54)+"-54cddb33"), + Entry("VM name exactly 55 characters gets truncated", strings.Repeat("a", 55), "disk-uuid-123", strings.Repeat("a", 54)+"-54cddb33"), + Entry("empty VM name", "", "disk-uuid-123", "-54cddb33"), + Entry("empty disk UUID", "test-vm", "", "test-vm-ef46db37"), + Entry("both empty strings", "", "", "-ef46db37"), + Entry("VM name with special characters", "test-vm_123", "disk-uuid-456", "test-vm_123-c6a0a3e7"), + Entry("VM name with hyphens", "test-vm-name", "disk-uuid-789", "test-vm-name-134e95b6"), + Entry("VM name with dots", "test.vm.name", "disk-uuid-abc", "test.vm.name-6efba0c7"), + Entry("very long VM name", strings.Repeat("a", 100), "disk-uuid-xyz", strings.Repeat("a", 54)+"-fe45de1a"), + Entry("VM name with unicode characters", "测试虚拟机", "disk-uuid-123", "测试虚拟机-54cddb33"), //nolint:gosmopolitan + Entry("disk UUID with special characters", "test-vm", "disk-uuid_123!@#", "test-vm-918f3a0c"), + Entry("very long disk UUID", "test-vm", strings.Repeat("a", 1000), "test-vm-56e43b71"), + Entry("disk UUID with newlines", "test-vm", "disk-uuid\nwith\nnewlines", "test-vm-6d58741c"), + Entry("disk UUID with tabs", "test-vm", "disk-uuid\twith\ttabs", "test-vm-76fa9f93"), + Entry("single character VM name", "a", "disk-uuid-123", "a-54cddb33"), + Entry("single character disk UUID", "test-vm", "x", "test-vm-5c80c096"), + Entry("VM name with numbers only", "123456789", "disk-uuid-123", "123456789-54cddb33"), + Entry("disk UUID with numbers only", "test-vm", "123456789", "test-vm-8cb841db"), + Entry("VM name at boundary 53 characters", strings.Repeat("a", 53), "disk-uuid-123", strings.Repeat("a", 53)+"-54cddb33"), + Entry("VM name at boundary 54 characters", strings.Repeat("a", 54), "disk-uuid-123", strings.Repeat("a", 54)+"-54cddb33"), + Entry("VM name at boundary 55 characters", strings.Repeat("a", 55), "disk-uuid-123", strings.Repeat("a", 54)+"-54cddb33"), +) diff --git a/pkg/util/vmopv1/vm.go b/pkg/util/vmopv1/vm.go index 856936e23..666a944a7 100644 --- a/pkg/util/vmopv1/vm.go +++ b/pkg/util/vmopv1/vm.go @@ -24,7 +24,7 @@ import ( byokv1 "github.com/vmware-tanzu/vm-operator/external/byok/api/v1alpha1" pkgcond "github.com/vmware-tanzu/vm-operator/pkg/conditions" pkgcfg "github.com/vmware-tanzu/vm-operator/pkg/config" - "github.com/vmware-tanzu/vm-operator/pkg/constants" + pkgconst "github.com/vmware-tanzu/vm-operator/pkg/constants" pkglog "github.com/vmware-tanzu/vm-operator/pkg/log" pkgutil "github.com/vmware-tanzu/vm-operator/pkg/util" spqutil "github.com/vmware-tanzu/vm-operator/pkg/util/kube/spq" @@ -193,12 +193,12 @@ func DetermineHardwareVersion( var minVerFromDevs vimtypes.HardwareVersion switch { case pkgutil.HasVirtualPCIPassthroughDeviceChange(configSpec.DeviceChange): - minVerFromDevs = max(imageVersion, constants.MinSupportedHWVersionForPCIPassthruDevices) + minVerFromDevs = max(imageVersion, pkgconst.MinSupportedHWVersionForPCIPassthruDevices) case HasPVC(vm): // This only catches volumes set at VM create time. - minVerFromDevs = max(imageVersion, constants.MinSupportedHWVersionForPVC) + minVerFromDevs = max(imageVersion, pkgconst.MinSupportedHWVersionForPVC) case hasvTPM(configSpec.DeviceChange): - minVerFromDevs = max(imageVersion, constants.MinSupportedHWVersionForVTPM) + minVerFromDevs = max(imageVersion, pkgconst.MinSupportedHWVersionForVTPM) } // Return the larger of the two versions. If both versions are zero, then @@ -397,13 +397,49 @@ func CnsRegisterVolumeToVirtualMachineMapper( ctx context.Context, k8sClient client.Client) handler.MapFunc { - // TODO(AllDisksArePVCs) - // - // Implement this function. Please see the function - // reconcileUnmanagedToManagedDisks from vmprovider_vm.go for more - // information on how to implement this function. + if ctx == nil { + panic("context is nil") + } + if k8sClient == nil { + panic("k8sClient is nil") + } + + // For a given CnsRegisterVolume, return reconcile requests for the VM + // that owns it. + return func(ctx context.Context, o client.Object) []reconcile.Request { + if ctx == nil { + panic("context is nil") + } + if o == nil { + panic("object is nil") + } + + logger := pkglog.FromContextOrDefault(ctx). + WithValues("name", o.GetName(), "namespace", o.GetNamespace()) + logger.V(4).Info("Reconciling VMs due to CnsRegisterVolume event") + + var requests []reconcile.Request + + for _, ownerRef := range o.GetOwnerReferences() { + if ownerRef.Kind == "VirtualMachine" { + requests = append(requests, reconcile.Request{ + NamespacedName: client.ObjectKey{ + Namespace: o.GetNamespace(), + Name: ownerRef.Name, + }, + }) + logger.V(4).Info("Found VM owner reference", "vm", ownerRef.Name) + } + } - return nil + if len(requests) > 0 { + logger.V(4).Info( + "Reconciling VMs due to CnsRegisterVolume watch", + "requests", requests) + } + + return requests + } } // KubernetesNodeLabelKey is the name of the label key used to identify a diff --git a/pkg/util/vmopv1/vm_test.go b/pkg/util/vmopv1/vm_test.go index 810b1e8e7..c65df6936 100644 --- a/pkg/util/vmopv1/vm_test.go +++ b/pkg/util/vmopv1/vm_test.go @@ -26,6 +26,7 @@ import ( vmopv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha5" byokv1 "github.com/vmware-tanzu/vm-operator/external/byok/api/v1alpha1" + cnsv1alpha1 "github.com/vmware-tanzu/vm-operator/external/vsphere-csi-driver/api/v1alpha1" pkgcfg "github.com/vmware-tanzu/vm-operator/pkg/config" pkgconst "github.com/vmware-tanzu/vm-operator/pkg/constants" "github.com/vmware-tanzu/vm-operator/pkg/util/kube/cource" @@ -1010,3 +1011,108 @@ var _ = DescribeTable("ConvertPowerState", vmopv1.VirtualMachinePowerStateOff, ), ) + +var _ = Describe("CnsRegisterVolumeToVirtualMachineMapper", func() { + var ( + ctx context.Context + k8sClient ctrlclient.Client + vm *vmopv1.VirtualMachine + mapperFunc func(context.Context, ctrlclient.Object) []reconcile.Request + ) + + BeforeEach(func() { + ctx = context.Background() + k8sClient = builder.NewFakeClient() + vm = &vmopv1.VirtualMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-vm", + Namespace: "test-namespace", + UID: types.UID("test-uid"), + }, + Spec: vmopv1.VirtualMachineSpec{}, + } + Expect(k8sClient.Create(ctx, vm)).To(Succeed()) + mapperFunc = vmopv1util.CnsRegisterVolumeToVirtualMachineMapper(ctx, k8sClient) + }) + + It("should panic with nil context", func() { + Expect(func() { + ctx = nil + vmopv1util.CnsRegisterVolumeToVirtualMachineMapper(ctx, k8sClient) + }).To(Panic()) + }) + + It("should panic with nil client", func() { + Expect(func() { + vmopv1util.CnsRegisterVolumeToVirtualMachineMapper(ctx, nil) + }).To(Panic()) + }) + + Context("when CnsRegisterVolume has owner reference to VM", func() { + var crv *cnsv1alpha1.CnsRegisterVolume + + BeforeEach(func() { + crv = &cnsv1alpha1.CnsRegisterVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-crv", + Namespace: vm.Namespace, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: vmopv1.GroupVersion.String(), + Kind: "VirtualMachine", + Name: vm.Name, + UID: vm.UID, + }, + }, + }, + Spec: cnsv1alpha1.CnsRegisterVolumeSpec{ + PvcName: "test-pvc", + }, + } + }) + + It("should return reconcile request for the owner VM", func() { + requests := mapperFunc(ctx, crv) + Expect(requests).To(HaveLen(1)) + Expect(requests[0].NamespacedName.Name).To(Equal(vm.Name)) + Expect(requests[0].NamespacedName.Namespace).To(Equal(vm.Namespace)) + }) + }) + + Context("when CnsRegisterVolume has no VM association", func() { + var crv *cnsv1alpha1.CnsRegisterVolume + + BeforeEach(func() { + crv = &cnsv1alpha1.CnsRegisterVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-crv", + Namespace: vm.Namespace, + }, + Spec: cnsv1alpha1.CnsRegisterVolumeSpec{ + PvcName: "test-pvc", + }, + } + }) + + It("should return no reconcile requests", func() { + requests := mapperFunc(ctx, crv) + Expect(requests).To(HaveLen(0)) + }) + }) + + Context("when mapper function is called with nil context", func() { + It("should panic", func() { + Expect(func() { + mapperFunc(nil, &cnsv1alpha1.CnsRegisterVolume{}) + }).To(Panic()) + }) + }) + + Context("when mapper function is called with nil object", func() { + It("should panic", func() { + Expect(func() { + mapperFunc(ctx, nil) + }).To(Panic()) + }) + }) +}) diff --git a/pkg/util/vsphere/datastore/datastore.go b/pkg/util/vsphere/datastore/datastore.go new file mode 100644 index 000000000..7def1837b --- /dev/null +++ b/pkg/util/vsphere/datastore/datastore.go @@ -0,0 +1,44 @@ +// © Broadcom. All Rights Reserved. +// The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. +// SPDX-License-Identifier: Apache-2.0 + +package datastore + +import ( + "context" + "fmt" + + "github.com/vmware/govmomi/find" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25" +) + +// GetDatastoreURLFromDatastorePath returns the datastore URL for a given +// datastore path. +func GetDatastoreURLFromDatastorePath( + ctx context.Context, + vimClient *vim25.Client, + path string) (string, error) { + + if ctx == nil { + panic("ctx is nil") + } + if vimClient == nil { + panic("vimClient is nil") + } + + var dsPath object.DatastorePath + if !dsPath.FromString(path) { + return "", fmt.Errorf("failed to parse datastore path %q", path) + } + + finder := find.NewFinder(vimClient) + + datastore, err := finder.Datastore(ctx, dsPath.Datastore) + if err != nil { + return "", fmt.Errorf( + "failed to get datastore for %q: %w", dsPath.Datastore, err) + } + + return datastore.NewURL(dsPath.Path).String(), nil +} diff --git a/pkg/util/vsphere/datastore/datastore_suite_test.go b/pkg/util/vsphere/datastore/datastore_suite_test.go new file mode 100644 index 000000000..6bdbbea58 --- /dev/null +++ b/pkg/util/vsphere/datastore/datastore_suite_test.go @@ -0,0 +1,25 @@ +// © Broadcom. All Rights Reserved. +// The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. +// SPDX-License-Identifier: Apache-2.0 + +package datastore_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "k8s.io/klog/v2" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +func init() { + klog.SetOutput(GinkgoWriter) + logf.SetLogger(klog.Background()) +} + +func TestSuite(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "vSphere Datastore Test Suite") +} diff --git a/pkg/util/vsphere/datastore/datastore_test.go b/pkg/util/vsphere/datastore/datastore_test.go new file mode 100644 index 000000000..73192d02a --- /dev/null +++ b/pkg/util/vsphere/datastore/datastore_test.go @@ -0,0 +1,133 @@ +// © Broadcom. All Rights Reserved. +// The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. +// SPDX-License-Identifier: Apache-2.0 + +package datastore_test + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/vmware/govmomi/vim25" + + pkgcfg "github.com/vmware-tanzu/vm-operator/pkg/config" + ctxop "github.com/vmware-tanzu/vm-operator/pkg/context/operation" + "github.com/vmware-tanzu/vm-operator/pkg/util/vsphere/datastore" + "github.com/vmware-tanzu/vm-operator/pkg/vmconfig" + "github.com/vmware-tanzu/vm-operator/test/builder" +) + +const localDS0 = "LocalDS_0" + +var _ = Describe("GetDatastoreURLFromDatastorePath", func() { + var ( + ctx context.Context + vimClient *vim25.Client + ) + + BeforeEach(func() { + vcsimCtx := builder.NewTestContextForVCSim( + ctxop.WithContext( + pkgcfg.NewContextWithDefaultConfig()), + builder.VCSimTestConfig{}) + ctx = vcsimCtx + ctx = vmconfig.WithContext(ctx) + + vimClient = vcsimCtx.VCClient.Client + }) + + Context("GetDatastoreURLForStorageURI", func() { + It("should panic when context is nil", func() { + datastoreName := localDS0 + + Expect(func() { + ctx = nil + _, _ = datastore.GetDatastoreURLFromDatastorePath(ctx, vimClient, datastoreName) + }).To(PanicWith("ctx is nil")) + }) + + It("should panic when vimClient is nil", func() { + datastoreName := localDS0 + + Expect(func() { + _, _ = datastore.GetDatastoreURLFromDatastorePath(ctx, nil, datastoreName) + }).To(PanicWith("vimClient is nil")) + }) + + It("should return datastore URL for valid storage URI", func() { + datastorePath := "[" + localDS0 + "] vm-name/vm-name.vmx" + + url, err := datastore.GetDatastoreURLFromDatastorePath( + ctx, vimClient, datastorePath) + + Expect(err).ToNot(HaveOccurred()) + Expect(url).ToNot(BeEmpty()) + Expect(url).To(ContainSubstring(localDS0)) + }) + + It("should return datastore URL for storage URI with just datastore name", func() { + datastorePath := "[" + localDS0 + "]" + + url, err := datastore.GetDatastoreURLFromDatastorePath( + ctx, vimClient, datastorePath) + + Expect(err).ToNot(HaveOccurred()) + Expect(url).ToNot(BeEmpty()) + Expect(url).To(ContainSubstring(localDS0)) + }) + + It("should return datastore URL for storage URI with complex path", func() { + datastorePath := "[" + localDS0 + "] complex/path/to/vm/vm-name.vmx" + + url, err := datastore.GetDatastoreURLFromDatastorePath( + ctx, vimClient, datastorePath) + + Expect(err).ToNot(HaveOccurred()) + Expect(url).ToNot(BeEmpty()) + Expect(url).To(ContainSubstring(localDS0)) + }) + + It("should return error for empty storage URI", func() { + datastorePath := "" + + url, err := datastore.GetDatastoreURLFromDatastorePath( + ctx, vimClient, datastorePath) + + Expect(err).To(MatchError(`failed to parse datastore path ""`)) + Expect(url).To(BeEmpty()) + }) + + It("should return error for invalid storage URI format", func() { + datastorePath := "invalid-uri-format" + + url, err := datastore.GetDatastoreURLFromDatastorePath( + ctx, vimClient, datastorePath) + + Expect(err).To(MatchError(`failed to parse datastore path "invalid-uri-format"`)) + Expect(url).To(BeEmpty()) + }) + + It("should return error for storage URI with empty datastore name", func() { + datastorePath := "[] vm-name/vm-name.vmx" + + url, err := datastore.GetDatastoreURLFromDatastorePath( + ctx, vimClient, datastorePath) + + Expect(err).To(MatchError(`failed to get datastore for "": datastore '' not found`)) + Expect(url).To(BeEmpty()) + }) + + It("should return error for non-existent datastore", func() { + datastorePath := "[NonExistentDS] vm-name/vm-name.vmx" + + url, err := datastore.GetDatastoreURLFromDatastorePath( + ctx, vimClient, datastorePath) + + Expect(err).To(MatchError(`failed to get datastore for "NonExistentDS": datastore 'NonExistentDS' not found`)) + Expect(url).To(BeEmpty()) + }) + }) + +}) diff --git a/pkg/vmconfig/alldisksarepvcs/alldisksarepvcs.go b/pkg/vmconfig/alldisksarepvcs/alldisksarepvcs.go new file mode 100644 index 000000000..952b01201 --- /dev/null +++ b/pkg/vmconfig/alldisksarepvcs/alldisksarepvcs.go @@ -0,0 +1,613 @@ +// © Broadcom. All Rights Reserved. +// The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. +// SPDX-License-Identifier: Apache-2.0 + +package alldisksarepvcs + +import ( + "context" + "fmt" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/mo" + vimtypes "github.com/vmware/govmomi/vim25/types" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + + vmopv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha5" + cnsv1alpha1 "github.com/vmware-tanzu/vm-operator/external/vsphere-csi-driver/api/v1alpha1" + pkgconst "github.com/vmware-tanzu/vm-operator/pkg/constants" + pkgerr "github.com/vmware-tanzu/vm-operator/pkg/errors" + pkgutil "github.com/vmware-tanzu/vm-operator/pkg/util" + kubeutil "github.com/vmware-tanzu/vm-operator/pkg/util/kube" + "github.com/vmware-tanzu/vm-operator/pkg/util/ptr" + pkgdatastore "github.com/vmware-tanzu/vm-operator/pkg/util/vsphere/datastore" + "github.com/vmware-tanzu/vm-operator/pkg/vmconfig" +) + +var ErrUnmanagedVols = pkgerr.NoRequeueNoErr("has unmanaged volumes") + +type reconciler struct{} + +var _ vmconfig.Reconciler = reconciler{} + +// New returns a new Reconciler for a VM's disk promotion state. +func New() vmconfig.Reconciler { + return reconciler{} +} + +// Name returns the unique name used to identify the reconciler. +func (r reconciler) Name() string { + return "alldisksarepvcs" +} + +func (r reconciler) OnResult( + _ context.Context, + _ *vmopv1.VirtualMachine, + _ mo.VirtualMachine, + _ error) error { + + return nil +} + +// +kubebuilder:rbac:groups=cns.vmware.com,resources=cnsregistervolumes,verbs=get;list;watch + +// Reconcile ensures all non-PVC disks become PVCs. +func Reconcile( + ctx context.Context, + k8sClient ctrlclient.Client, + vimClient *vim25.Client, + vm *vmopv1.VirtualMachine, + moVM mo.VirtualMachine, + _ *vimtypes.VirtualMachineConfigSpec) error { + + return New().Reconcile(ctx, k8sClient, vimClient, vm, moVM, nil) +} + +type controllerSpec struct { + key int32 + bus int32 + ctrlType vmopv1.VirtualControllerType + scsiType vmopv1.SCSIControllerType + sharingMode vmopv1.VirtualControllerSharingMode +} + +// Reconcile ensures all non-PVC disks become PVCs. +func (r reconciler) Reconcile( + ctx context.Context, + k8sClient ctrlclient.Client, + vimClient *vim25.Client, + vm *vmopv1.VirtualMachine, + moVM mo.VirtualMachine, + _ *vimtypes.VirtualMachineConfigSpec) error { + + if ctx == nil { + panic("context is nil") + } + if k8sClient == nil { + panic("k8sClient is nil") + } + if vm == nil { + panic("vm is nil") + } + + if moVM.Config == nil || len(moVM.Config.Hardware.Device) == 0 { + return nil + } + + var ( + volumeByUUID = map[string]vmopv1.VirtualMachineVolume{} + controllerKeyToSpec = map[int32]controllerSpec{} + snapshotDiskKeys = map[int32]struct{}{} + ) + + // Find all of the disks that are participating in snapshots. + if layoutEx := moVM.LayoutEx; layoutEx != nil { + for _, snap := range layoutEx.Snapshot { + for _, disk := range snap.Disk { + snapshotDiskKeys[disk.Key] = struct{}{} + } + } + } + + // Build a map of existing volumes by name for quick lookup. + for _, vol := range vm.Spec.Volumes { + if pvc := vol.PersistentVolumeClaim; pvc != nil { + if uvc := pvc.UnmanagedVolumeClaim; uvc != nil { + volumeByUUID[uvc.UUID] = vol + } + } + } + + // Iterate through hardware devices to find non-FCD disks and disk + // controllers. + unmanagedDisks := getUnmanagedDisksAndControllers( + moVM, + snapshotDiskKeys, + controllerKeyToSpec) + + // Process each unmanaged disk. + addedVolsToSpec := updateSpecWithUnmanagedDisks( + vm, + unmanagedDisks, + volumeByUUID, + controllerKeyToSpec) + + // If any unmanaged volumes were added to spec, then return a NoRequeueError + // to ensure the spec is patched before we create any PVCs/CnsRegisterVolume + // objects. + if addedVolsToSpec { + return ErrUnmanagedVols + } + + hasPendingVolumes, err := registerUnmanagedDisks( + ctx, + k8sClient, + vimClient, + vm, + unmanagedDisks, + volumeByUUID) + if err != nil { + return err + } + + // If there were PVCs that were not yet bound, return a NoRequeue error to + // wait for CnsRegisterVolume completion. + if hasPendingVolumes { + return ErrUnmanagedVols + } + + // Clean up any remaining CnsRegisterVolume objects for this VM + if err := cleanupAllCnsRegisterVolumesForVM( + ctx, + k8sClient, + vm); err != nil { + + return err + } + + return nil +} + +func registerUnmanagedDisks( + ctx context.Context, + k8sClient ctrlclient.Client, + vimClient *vim25.Client, + vm *vmopv1.VirtualMachine, + unmanagedDisks []pkgutil.VirtualDiskInfo, + volumeByUUID map[string]vmopv1.VirtualMachineVolume) (bool, error) { + + var hasPendingVolumes bool + + for _, di := range unmanagedDisks { + // Step 1: Look for the volume entry from spec.volumes. + volume := volumeByUUID[di.UUID] + + // Step 2: Ensure PVC exists. + pvcName := volume.PersistentVolumeClaim.ClaimName + pvc, err := ensurePVCForUnmanagedDisk( + ctx, + k8sClient, + vm, + pvcName, + di) + if err != nil { + return false, fmt.Errorf( + "failed to ensure pvc %s for disk %s: %w", + pvcName, di.UUID, err) + } + + switch pvc.Status.Phase { + case "", corev1.ClaimPending: + // Step 3: Check if PVC is bound and create CnsRegisterVolume if + // needed. + if _, err := ensureCnsRegisterVolumeForDisk( + ctx, + k8sClient, + vimClient, + vm, + pvcName, + di); err != nil { + + return false, fmt.Errorf( + "failed to ensure CnsRegisterVolume %s: %w", + pvcName, err) + } + hasPendingVolumes = true + + case corev1.ClaimBound: + // Step 4: Clean up completed CnsRegisterVolume objects. + if err := cleanupCnsRegisterVolumeForVM( + ctx, + k8sClient, + vm, + pvcName); err != nil { + + return false, err + } + } + } + + return hasPendingVolumes, nil +} + +func updateSpecWithUnmanagedDisks( + vm *vmopv1.VirtualMachine, + unmanagedDisks []pkgutil.VirtualDiskInfo, + volumeByUUID map[string]vmopv1.VirtualMachineVolume, + controllerKeyToSpec map[int32]controllerSpec) bool { + + var addedToSpec bool + + for _, di := range unmanagedDisks { + // Step 1: Look for existing volume entry with the provided UUID. + if _, exists := volumeByUUID[di.UUID]; !exists { + // Step 2: If no such entry exists, add one. + vm.Spec.Volumes = append( + vm.Spec.Volumes, + vmopv1.VirtualMachineVolume{ + Name: di.UUID, + VirtualMachineVolumeSource: vmopv1.VirtualMachineVolumeSource{ + PersistentVolumeClaim: &vmopv1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: pkgutil.GeneratePVCName( + vm.Name, + di.UUID, + ), + }, + UnmanagedVolumeClaim: &vmopv1.UnmanagedVolumeClaimVolumeSource{ + Type: vmopv1.UnmanagedVolumeClaimVolumeTypeFromVM, + Name: di.UUID, + UUID: di.UUID, + }, + ControllerBusNumber: ptr.To(controllerKeyToSpec[di.ControllerKey].bus), + ControllerType: controllerKeyToSpec[di.ControllerKey].ctrlType, + UnitNumber: di.UnitNumber, + }, + }, + }, + ) + addedToSpec = true + } + } + return addedToSpec +} + +func getUnmanagedDisksAndControllers( + moVM mo.VirtualMachine, + snapshotDiskKeys map[int32]struct{}, + controllerKeyToSpec map[int32]controllerSpec) []pkgutil.VirtualDiskInfo { + + var unmanagedDisks []pkgutil.VirtualDiskInfo + + for _, device := range moVM.Config.Hardware.Device { + + var spec controllerSpec + + switch d := device.(type) { + case *vimtypes.VirtualDisk: + if d.VDiskId == nil || d.VDiskId.Id == "" { // Skip FCDs. + di := pkgutil.GetVirtualDiskInfo(d) + if di.UUID != "" && di.FileName != "" { // Skip if no UUID/file. + _, inSnap := snapshotDiskKeys[d.Key] + if !di.HasParent || (di.HasParent && inSnap) { + // Include disks that are not child disks, or if they + // are, are the running point for a snapshot. + unmanagedDisks = append(unmanagedDisks, di) + } + } + } + + case vimtypes.BaseVirtualController: + bd := d.GetVirtualController() + + spec.key = bd.Key + spec.bus = bd.BusNumber + + switch d := d.(type) { + + case *vimtypes.VirtualIDEController: + spec.ctrlType = vmopv1.VirtualControllerTypeIDE + + case vimtypes.BaseVirtualSCSIController: + spec.ctrlType = vmopv1.VirtualControllerTypeSCSI + + switch d.(type) { + case *vimtypes.ParaVirtualSCSIController: + spec.scsiType = vmopv1.SCSIControllerTypeParaVirtualSCSI + case *vimtypes.VirtualLsiLogicController: + spec.scsiType = vmopv1.SCSIControllerTypeLsiLogic + case *vimtypes.VirtualLsiLogicSASController: + spec.scsiType = vmopv1.SCSIControllerTypeLsiLogicSAS + case *vimtypes.VirtualBusLogicController: + spec.scsiType = vmopv1.SCSIControllerTypeBusLogic + } + + switch d.GetVirtualSCSIController().SharedBus { + case vimtypes.VirtualSCSISharingNoSharing: + spec.sharingMode = vmopv1.VirtualControllerSharingModeNone + case vimtypes.VirtualSCSISharingPhysicalSharing: + spec.sharingMode = vmopv1.VirtualControllerSharingModePhysical + case vimtypes.VirtualSCSISharingVirtualSharing: + spec.sharingMode = vmopv1.VirtualControllerSharingModeVirtual + } + + case *vimtypes.VirtualNVMEController: + spec.ctrlType = vmopv1.VirtualControllerTypeNVME + + switch d.SharedBus { + case string(vimtypes.VirtualNVMEControllerSharingNoSharing): + spec.sharingMode = vmopv1.VirtualControllerSharingModeNone + case string(vimtypes.VirtualNVMEControllerSharingPhysicalSharing): + spec.sharingMode = vmopv1.VirtualControllerSharingModePhysical + } + + case vimtypes.BaseVirtualSATAController: + spec.ctrlType = vmopv1.VirtualControllerTypeSATA + } + + if spec.ctrlType != "" { + controllerKeyToSpec[bd.Key] = spec + } + } + } + return unmanagedDisks +} + +// ensurePVCForUnmanagedDisk ensures a PVC exists for an unmanaged disk. +func ensurePVCForUnmanagedDisk( + ctx context.Context, + k8sClient ctrlclient.Client, + vm *vmopv1.VirtualMachine, + pvcName string, + diskInfo pkgutil.VirtualDiskInfo) (*corev1.PersistentVolumeClaim, error) { + + const virtualMachine = "VirtualMachine" + + var ( + obj = &corev1.PersistentVolumeClaim{} + key = ctrlclient.ObjectKey{ + Namespace: vm.Namespace, + Name: pvcName, + } + + expDSRef = corev1.TypedObjectReference{ + APIGroup: &vmopv1.GroupVersion.Group, + Kind: virtualMachine, + Name: vm.Name, + } + + expOwnerRef = metav1.OwnerReference{ + APIVersion: vmopv1.GroupVersion.String(), + Kind: virtualMachine, + Name: vm.Name, + UID: vm.UID, + } + ) + + if err := k8sClient.Get(ctx, key, obj); err != nil { + + if !apierrors.IsNotFound(err) { + return nil, fmt.Errorf("failed to get pvc %s: %w", key, err) + } + + // + // The PVC is not found, create a new one. + // + + // Set the object metadata. + obj.Name = pvcName + obj.Namespace = vm.Namespace + + // Set an OwnerRef on the PVC pointing back to the VM. + obj.OwnerReferences = []metav1.OwnerReference{expOwnerRef} + + // Set dataSourceRef to point to this VM. + obj.Spec.DataSourceRef = &expDSRef + + // Set the PVC's AccessModes to ReadWriteMany if the existing, + // underlying disk has a sharing mode set to MultiWriter. Otherwise init + // the PVC's AccessModes to ReadWriteOnce. + if diskInfo.Sharing == vimtypes.VirtualDiskSharingSharingMultiWriter { + obj.Spec.AccessModes = []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteMany, + } + } else { + obj.Spec.AccessModes = []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + } + } + + // Initialize the requested size of the PVC to the current capacity of + // the existing disk. + obj.Spec.Resources.Requests = corev1.ResourceList{ + corev1.ResourceStorage: ptr.Deref( + kubeutil.BytesToResource(diskInfo.CapacityInBytes)), + } + + // Create the PVC. + if err := k8sClient.Create(ctx, obj); err != nil { + return nil, fmt.Errorf("failed to create pvc %s: %w", key, err) + } + + return obj, nil + } + + var ( + hasDSRef bool + hasOwnerRef bool + ) + + // Check to see if the PVC already has the expected OwnerRef. + for _, r := range obj.OwnerReferences { + if r.APIVersion == expOwnerRef.APIVersion && + r.Kind == expOwnerRef.Kind && + r.Name == expOwnerRef.Name && + r.UID == expOwnerRef.UID { + + hasOwnerRef = true + break + } + } + + // Check to see if the PVC already has the expected DataSourceRef. + if dsRef := obj.Spec.DataSourceRef; dsRef != nil { + if dsRef.APIGroup != nil && + *dsRef.APIGroup == *expDSRef.APIGroup && + dsRef.Kind == expDSRef.Kind && + dsRef.Name == expDSRef.Name { + + hasDSRef = true + } + } + + if hasOwnerRef && hasDSRef { + return obj, nil + } + + objPatch := ctrlclient.MergeFrom(obj.DeepCopy()) + + // Ensure the PVC's dataSourceRef to point to this VM. + obj.Spec.DataSourceRef = &expDSRef + + // And the OwnerReference to the PVC that points back to the VM. + if !hasOwnerRef { + obj.OwnerReferences = append(obj.OwnerReferences, expOwnerRef) + } + + if err := k8sClient.Patch(ctx, obj, objPatch); err != nil { + return nil, fmt.Errorf("failed to patch pvc %s: %w", key, err) + } + + return obj, nil +} + +// ensureCnsRegisterVolumeForDisk ensures a CnsRegisterVolume exists for an +// unmanaged disk. +func ensureCnsRegisterVolumeForDisk( + ctx context.Context, + k8sClient ctrlclient.Client, + vimClient *vim25.Client, + vm *vmopv1.VirtualMachine, + pvcName string, + diskInfo pkgutil.VirtualDiskInfo) (*cnsv1alpha1.CnsRegisterVolume, error) { + + var ( + obj = &cnsv1alpha1.CnsRegisterVolume{} + key = ctrlclient.ObjectKey{ + Namespace: vm.Namespace, + Name: pvcName, + } + ) + + if err := k8sClient.Get(ctx, key, obj); err != nil { + + if !apierrors.IsNotFound(err) { + return nil, fmt.Errorf( + "failed to get CnsRegisterVolume %s: %w", key, err) + } + + // + // The CRV is not found, create a new one. + // + // Set the object metadata. + obj.Name = pvcName + obj.Namespace = vm.Namespace + + obj.Labels = map[string]string{ + pkgconst.CreatedByLabel: vm.Name, + } + + // Set a ControllerOwnerRef on the CRV pointing back to the VM. + obj.OwnerReferences = []metav1.OwnerReference{ + { + APIVersion: vmopv1.GroupVersion.String(), + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + Kind: "VirtualMachine", + Name: vm.Name, + UID: vm.UID, + }, + } + + // Get the datastore URL for the provided file name. + datastoreURL, err := pkgdatastore.GetDatastoreURLFromDatastorePath( + ctx, + vimClient, + diskInfo.FileName) + if err != nil { + return nil, fmt.Errorf( + "failed to get datastore url for %q: %w", + diskInfo.FileName, err) + } + + obj.Spec = cnsv1alpha1.CnsRegisterVolumeSpec{ + PvcName: pvcName, + DiskURLPath: datastoreURL, + } + + // Set the AccessMode to ReadWriteMany if the existing, + // underlying disk has a sharing mode set to MultiWriter. Otherwise init + // the AccessMode to ReadWriteOnce. + if diskInfo.Sharing == vimtypes.VirtualDiskSharingSharingMultiWriter { + obj.Spec.AccessMode = corev1.ReadWriteMany + } else { + obj.Spec.AccessMode = corev1.ReadWriteOnce + } + + // Create the CRV. + if err := k8sClient.Create(ctx, obj); err != nil { + return nil, fmt.Errorf("failed to create crv %s: %w", key, err) + } + } + + return obj, nil +} + +// cleanupCnsRegisterVolumeForVM cleans up a CnsRegisterVolume object. +func cleanupCnsRegisterVolumeForVM( + ctx context.Context, + k8sClient ctrlclient.Client, + vm *vmopv1.VirtualMachine, + pvcName string) error { + + if err := ctrlclient.IgnoreNotFound(k8sClient.Delete( + ctx, + &cnsv1alpha1.CnsRegisterVolume{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: vm.Namespace, + Name: pvcName, + }, + })); err != nil { + + return fmt.Errorf("failed to delete CnsRegisterVolume %s: %w", + pvcName, err) + } + + return nil +} + +// cleanupAllCnsRegisterVolumesForVM cleans up all CnsRegisterVolume objects for +// a VM. +func cleanupAllCnsRegisterVolumesForVM( + ctx context.Context, + k8sClient ctrlclient.Client, + vm *vmopv1.VirtualMachine) error { + + if err := k8sClient.DeleteAllOf( + ctx, + &cnsv1alpha1.CnsRegisterVolume{}, + ctrlclient.InNamespace(vm.Namespace), + ctrlclient.MatchingLabels{ + pkgconst.CreatedByLabel: vm.Name, + }); err != nil { + + return fmt.Errorf( + "failed to delete CnsRegisterVolume objects for VM %s: %w", + vm.NamespacedName(), err) + } + + return nil +} diff --git a/pkg/vmconfig/alldisksarepvcs/alldisksarepvcs_suite_test.go b/pkg/vmconfig/alldisksarepvcs/alldisksarepvcs_suite_test.go new file mode 100644 index 000000000..361a232c8 --- /dev/null +++ b/pkg/vmconfig/alldisksarepvcs/alldisksarepvcs_suite_test.go @@ -0,0 +1,25 @@ +// © Broadcom. All Rights Reserved. +// The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. +// SPDX-License-Identifier: Apache-2.0 + +package alldisksarepvcs_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "k8s.io/klog/v2" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +func init() { + klog.SetOutput(GinkgoWriter) + logf.SetLogger(klog.Background()) +} + +func TestSuite(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "AllDisksArePVCs Reconciler Test Suite") +} diff --git a/pkg/vmconfig/alldisksarepvcs/alldisksarepvcs_test.go b/pkg/vmconfig/alldisksarepvcs/alldisksarepvcs_test.go new file mode 100644 index 000000000..806c0f297 --- /dev/null +++ b/pkg/vmconfig/alldisksarepvcs/alldisksarepvcs_test.go @@ -0,0 +1,2162 @@ +// © Broadcom. All Rights Reserved. +// The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. +// SPDX-License-Identifier: Apache-2.0 + +package alldisksarepvcs_test + +import ( + "context" + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/mo" + vimtypes "github.com/vmware/govmomi/vim25/types" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/interceptor" + + vmopv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha5" + cnsv1alpha1 "github.com/vmware-tanzu/vm-operator/external/vsphere-csi-driver/api/v1alpha1" + pkgcfg "github.com/vmware-tanzu/vm-operator/pkg/config" + ctxop "github.com/vmware-tanzu/vm-operator/pkg/context/operation" + kubeutil "github.com/vmware-tanzu/vm-operator/pkg/util/kube" + "github.com/vmware-tanzu/vm-operator/pkg/util/ptr" + "github.com/vmware-tanzu/vm-operator/pkg/vmconfig" + "github.com/vmware-tanzu/vm-operator/pkg/vmconfig/alldisksarepvcs" + "github.com/vmware-tanzu/vm-operator/test/builder" +) + +var _ = Describe("New", func() { + It("should return a reconciler", func() { + Expect(alldisksarepvcs.New()).ToNot(BeNil()) + }) +}) + +var _ = Describe("Name", func() { + It("should return 'alldisksarepvcs'", func() { + Expect(alldisksarepvcs.New().Name()).To(Equal("alldisksarepvcs")) + }) +}) + +var _ = Describe("OnResult", func() { + It("should return nil", func() { + var ctx context.Context + Expect(alldisksarepvcs.New().OnResult(ctx, nil, mo.VirtualMachine{}, nil)).To(Succeed()) + }) +}) + +var _ = Describe("Reconcile", func() { + + var ( + r vmconfig.Reconciler + ctx context.Context + k8sClient ctrlclient.Client + vimClient *vim25.Client + moVM mo.VirtualMachine + vm *vmopv1.VirtualMachine + withObjs []ctrlclient.Object + withFuncs interceptor.Funcs + configSpec *vimtypes.VirtualMachineConfigSpec + httpPrefix string + ) + + BeforeEach(func() { + r = alldisksarepvcs.New() + + vcsimCtx := builder.NewTestContextForVCSim( + ctxop.WithContext(pkgcfg.NewContextWithDefaultConfig()), builder.VCSimTestConfig{}) + ctx = vcsimCtx + ctx = vmconfig.WithContext(ctx) + + vimClient = vcsimCtx.VCClient.Client + + httpPrefix = fmt.Sprintf("https://%s", vimClient.URL().Host) + + moVM = mo.VirtualMachine{ + Config: &vimtypes.VirtualMachineConfigInfo{}, + } + + configSpec = &vimtypes.VirtualMachineConfigSpec{} + + vm = &vmopv1.VirtualMachine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "my-namespace", + Name: "my-vm", + Annotations: map[string]string{}, + }, + Spec: vmopv1.VirtualMachineSpec{}, + } + + withFuncs = interceptor.Funcs{} + withObjs = []ctrlclient.Object{} + }) + + JustBeforeEach(func() { + k8sClient = builder.NewFakeClientWithInterceptors(withFuncs, withObjs...) + }) + + Context("a panic is expected", func() { + When("ctx is nil", func() { + JustBeforeEach(func() { + ctx = nil + }) + It("should panic", func() { + fn := func() { + _ = r.Reconcile(ctx, k8sClient, vimClient, vm, moVM, configSpec) + } + Expect(fn).To(PanicWith("context is nil")) + }) + }) + When("k8sClient is nil", func() { + JustBeforeEach(func() { + k8sClient = nil + }) + It("should panic", func() { + fn := func() { + _ = r.Reconcile(ctx, k8sClient, vimClient, vm, moVM, configSpec) + } + Expect(fn).To(PanicWith("k8sClient is nil")) + }) + }) + When("vm is nil", func() { + JustBeforeEach(func() { + vm = nil + }) + It("should panic", func() { + fn := func() { + _ = r.Reconcile(ctx, k8sClient, vimClient, vm, moVM, configSpec) + } + Expect(fn).To(PanicWith("vm is nil")) + }) + }) + }) + + When("no panic is expected", func() { + Context("early return conditions", func() { + When("moVM.Config is nil", func() { + JustBeforeEach(func() { + moVM.Config = nil + }) + It("should return nil without error", func() { + err := r.Reconcile(ctx, k8sClient, vimClient, vm, moVM, configSpec) + Expect(err).ToNot(HaveOccurred()) + }) + }) + + When("moVM.Config.Hardware.Device is empty", func() { + JustBeforeEach(func() { + moVM.Config = &vimtypes.VirtualMachineConfigInfo{ + Hardware: vimtypes.VirtualHardware{ + Device: []vimtypes.BaseVirtualDevice{}, + }, + } + }) + It("should return nil without error", func() { + err := r.Reconcile(ctx, k8sClient, vimClient, vm, moVM, configSpec) + Expect(err).ToNot(HaveOccurred()) + }) + }) + }) + + Context("disk processing", func() { + When("VM has unmanaged disks", func() { + var ( + disk1 *vimtypes.VirtualDisk + disk2 *vimtypes.VirtualDisk + ideController *vimtypes.VirtualIDEController + scsiController *vimtypes.VirtualSCSIController + ) + + BeforeEach(func() { + // Create IDE controller + ideController = &vimtypes.VirtualIDEController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 100, + }, + BusNumber: 0, + }, + } + + // Create SCSI controller + scsiController = &vimtypes.VirtualSCSIController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 200, + }, + BusNumber: 1, + }, + } + + // Create disk 1 with SeSparse backing (has UUID) + disk1 = &vimtypes.VirtualDisk{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 300, + ControllerKey: 100, + UnitNumber: ptr.To(int32(0)), + Backing: &vimtypes.VirtualDiskSeSparseBackingInfo{ + VirtualDeviceFileBackingInfo: vimtypes.VirtualDeviceFileBackingInfo{ + FileName: "[LocalDS_0] vm1/disk1.vmdk", + }, + Uuid: "disk1-uuid-123", + }, + DeviceInfo: &vimtypes.Description{ + Label: "Hard disk 1", + }, + }, + CapacityInBytes: 1024 * 1024 * 1024, // 1GB + } + + // Create disk 2 with FlatVer2 backing (has UUID and sharing) + disk2 = &vimtypes.VirtualDisk{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 400, + ControllerKey: 200, + UnitNumber: ptr.To(int32(0)), + Backing: &vimtypes.VirtualDiskFlatVer2BackingInfo{ + VirtualDeviceFileBackingInfo: vimtypes.VirtualDeviceFileBackingInfo{ + FileName: "[LocalDS_0] vm1/disk2.vmdk", + }, + Uuid: "disk2-uuid-456", + Sharing: string(vimtypes.VirtualDiskSharingSharingMultiWriter), + }, + DeviceInfo: &vimtypes.Description{ + Label: "Hard disk 2", + }, + }, + CapacityInBytes: 2 * 1024 * 1024 * 1024, // 2GB + } + + moVM.Config = &vimtypes.VirtualMachineConfigInfo{ + Hardware: vimtypes.VirtualHardware{ + Device: []vimtypes.BaseVirtualDevice{ + ideController, + scsiController, + disk1, + disk2, + }, + }, + } + }) + + It("should add volumes to VM spec and return ErrUnmanagedVols", func() { + err := alldisksarepvcs.Reconcile(ctx, k8sClient, vimClient, vm, moVM, configSpec) + Expect(err).To(Equal(alldisksarepvcs.ErrUnmanagedVols)) + + // Verify volumes were added to VM spec + Expect(vm.Spec.Volumes).To(HaveLen(2)) + + // Find the volumes by name (UUID) + vol1 := findVolumeByName(vm.Spec.Volumes, "disk1-uuid-123") + vol2 := findVolumeByName(vm.Spec.Volumes, "disk2-uuid-456") + + Expect(vol1).ToNot(BeNil()) + Expect(vol2).ToNot(BeNil()) + + // Verify volume 1 properties + Expect(vol1.PersistentVolumeClaim).ToNot(BeNil()) + Expect(vol1.PersistentVolumeClaim.ClaimName).To(Equal("my-vm-a515c82a")) // Generated from xxHash + Expect(vol1.PersistentVolumeClaim.UnmanagedVolumeClaim).ToNot(BeNil()) + Expect(vol1.PersistentVolumeClaim.UnmanagedVolumeClaim.UUID).To(Equal("disk1-uuid-123")) + Expect(vol1.PersistentVolumeClaim.UnmanagedVolumeClaim.Type).To(Equal(vmopv1.UnmanagedVolumeClaimVolumeTypeFromVM)) + Expect(vol1.PersistentVolumeClaim.ControllerType).To(Equal(vmopv1.VirtualControllerTypeIDE)) + Expect(*vol1.PersistentVolumeClaim.ControllerBusNumber).To(Equal(int32(0))) + Expect(*vol1.PersistentVolumeClaim.UnitNumber).To(Equal(int32(0))) + + // Verify volume 2 properties + Expect(vol2.PersistentVolumeClaim).ToNot(BeNil()) + Expect(vol2.PersistentVolumeClaim.ClaimName).To(Equal("my-vm-0627fe5c")) // Generated from xxHash + Expect(vol2.PersistentVolumeClaim.UnmanagedVolumeClaim).ToNot(BeNil()) + Expect(vol2.PersistentVolumeClaim.UnmanagedVolumeClaim.UUID).To(Equal("disk2-uuid-456")) + Expect(vol2.PersistentVolumeClaim.UnmanagedVolumeClaim.Type).To(Equal(vmopv1.UnmanagedVolumeClaimVolumeTypeFromVM)) + Expect(vol2.PersistentVolumeClaim.ControllerType).To(Equal(vmopv1.VirtualControllerTypeSCSI)) + Expect(*vol2.PersistentVolumeClaim.ControllerBusNumber).To(Equal(int32(1))) + Expect(*vol2.PersistentVolumeClaim.UnitNumber).To(Equal(int32(0))) + }) + }) + + When("VM has existing volumes with matching UUIDs", func() { + BeforeEach(func() { + // Start with empty volumes - let Reconcile add them + vm.Spec.Volumes = []vmopv1.VirtualMachineVolume{} + + // Create a disk with matching UUID + disk := &vimtypes.VirtualDisk{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 300, + ControllerKey: 100, + UnitNumber: ptr.To(int32(0)), + Backing: &vimtypes.VirtualDiskSeSparseBackingInfo{ + VirtualDeviceFileBackingInfo: vimtypes.VirtualDeviceFileBackingInfo{ + FileName: "[LocalDS_0] vm1/existing-disk.vmdk", + }, + Uuid: "existing-disk-uuid", + }, + }, + CapacityInBytes: 1024 * 1024 * 1024, + } + + ideController := &vimtypes.VirtualIDEController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 100, + }, + BusNumber: 0, + }, + } + + moVM.Config = &vimtypes.VirtualMachineConfigInfo{ + Hardware: vimtypes.VirtualHardware{ + Device: []vimtypes.BaseVirtualDevice{ + ideController, + disk, + }, + }, + } + + }) + + It("should add volumes to VM spec and return ErrUnmanagedVols", func() { + err := r.Reconcile(ctx, k8sClient, vimClient, vm, moVM, configSpec) + Expect(err).To(Equal(alldisksarepvcs.ErrUnmanagedVols)) + Expect(vm.Spec.Volumes).To(HaveLen(1)) + }) + }) + }) + + Context("disk filtering", func() { + When("VM has FCD disks", func() { + BeforeEach(func() { + // Create FCD disk (has VDiskId) + fcdDisk := &vimtypes.VirtualDisk{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 300, + ControllerKey: 100, + Backing: &vimtypes.VirtualDiskSeSparseBackingInfo{ + VirtualDeviceFileBackingInfo: vimtypes.VirtualDeviceFileBackingInfo{ + FileName: "[LocalDS_0] vm1/fcd-disk.vmdk", + }, + Uuid: "fcd-uuid-123", + }, + }, + CapacityInBytes: 1024 * 1024 * 1024, + VDiskId: &vimtypes.ID{ + Id: "fcd-id-123", + }, + } + + ideController := &vimtypes.VirtualIDEController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 100, + }, + BusNumber: 0, + }, + } + + moVM.Config = &vimtypes.VirtualMachineConfigInfo{ + Hardware: vimtypes.VirtualHardware{ + Device: []vimtypes.BaseVirtualDevice{ + ideController, + fcdDisk, + }, + }, + } + }) + + It("should skip FCD disks", func() { + err := r.Reconcile(ctx, k8sClient, vimClient, vm, moVM, configSpec) + Expect(err).ToNot(HaveOccurred()) + Expect(vm.Spec.Volumes).To(BeEmpty()) + }) + }) + + When("VM has disks without UUID", func() { + BeforeEach(func() { + // Create disk without UUID (SparseVer1 backing) + disk := &vimtypes.VirtualDisk{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 300, + ControllerKey: 100, + Backing: &vimtypes.VirtualDiskSparseVer1BackingInfo{ + VirtualDeviceFileBackingInfo: vimtypes.VirtualDeviceFileBackingInfo{ + FileName: "[LocalDS_0] vm1/no-uuid-disk.vmdk", + }, + }, + }, + CapacityInBytes: 1024 * 1024 * 1024, + } + + ideController := &vimtypes.VirtualIDEController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 100, + }, + BusNumber: 0, + }, + } + + moVM.Config = &vimtypes.VirtualMachineConfigInfo{ + Hardware: vimtypes.VirtualHardware{ + Device: []vimtypes.BaseVirtualDevice{ + ideController, + disk, + }, + }, + } + }) + + It("should skip disks without UUID", func() { + err := r.Reconcile(ctx, k8sClient, vimClient, vm, moVM, configSpec) + Expect(err).ToNot(HaveOccurred()) + Expect(vm.Spec.Volumes).To(BeEmpty()) + }) + }) + + When("VM has disks without filename", func() { + BeforeEach(func() { + // Create disk without filename + disk := &vimtypes.VirtualDisk{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 300, + ControllerKey: 100, + Backing: &vimtypes.VirtualDiskSeSparseBackingInfo{ + VirtualDeviceFileBackingInfo: vimtypes.VirtualDeviceFileBackingInfo{}, + Uuid: "no-filename-uuid-123", + }, + }, + CapacityInBytes: 1024 * 1024 * 1024, + } + + ideController := &vimtypes.VirtualIDEController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 100, + }, + BusNumber: 0, + }, + } + + moVM.Config = &vimtypes.VirtualMachineConfigInfo{ + Hardware: vimtypes.VirtualHardware{ + Device: []vimtypes.BaseVirtualDevice{ + ideController, + disk, + }, + }, + } + }) + + It("should skip disks without filename", func() { + err := r.Reconcile(ctx, k8sClient, vimClient, vm, moVM, configSpec) + Expect(err).ToNot(HaveOccurred()) + Expect(vm.Spec.Volumes).To(BeEmpty()) + }) + }) + + When("VM has child disks not in snapshots", func() { + BeforeEach(func() { + // Create child disk (has parent) + childDisk := &vimtypes.VirtualDisk{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 300, + ControllerKey: 100, + Backing: &vimtypes.VirtualDiskSeSparseBackingInfo{ + VirtualDeviceFileBackingInfo: vimtypes.VirtualDeviceFileBackingInfo{ + FileName: "[LocalDS_0] vm1/child-disk.vmdk", + }, + Uuid: "child-disk-uuid-123", + Parent: &vimtypes.VirtualDiskSeSparseBackingInfo{ + VirtualDeviceFileBackingInfo: vimtypes.VirtualDeviceFileBackingInfo{}, + Uuid: "parent-disk-uuid", + }, + }, + }, + CapacityInBytes: 1024 * 1024 * 1024, + } + + ideController := &vimtypes.VirtualIDEController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 100, + }, + BusNumber: 0, + }, + } + + moVM.Config = &vimtypes.VirtualMachineConfigInfo{ + Hardware: vimtypes.VirtualHardware{ + Device: []vimtypes.BaseVirtualDevice{ + ideController, + childDisk, + }, + }, + } + }) + + It("should skip child disks not in snapshots", func() { + err := r.Reconcile(ctx, k8sClient, vimClient, vm, moVM, configSpec) + Expect(err).ToNot(HaveOccurred()) + Expect(vm.Spec.Volumes).To(BeEmpty()) + }) + }) + + When("VM has child disks in snapshots", func() { + BeforeEach(func() { + // Create child disk (has parent) that is in snapshot + childDisk := &vimtypes.VirtualDisk{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 300, + ControllerKey: 100, + Backing: &vimtypes.VirtualDiskSeSparseBackingInfo{ + VirtualDeviceFileBackingInfo: vimtypes.VirtualDeviceFileBackingInfo{ + FileName: "[LocalDS_0] vm1/child-disk.vmdk", + }, + Uuid: "child-disk-uuid-123", + Parent: &vimtypes.VirtualDiskSeSparseBackingInfo{ + VirtualDeviceFileBackingInfo: vimtypes.VirtualDeviceFileBackingInfo{}, + Uuid: "parent-disk-uuid", + }, + }, + }, + CapacityInBytes: 1024 * 1024 * 1024, + } + + ideController := &vimtypes.VirtualIDEController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 100, + }, + BusNumber: 0, + }, + } + + // Set up snapshot layout with this disk + moVM.LayoutEx = &vimtypes.VirtualMachineFileLayoutEx{ + Snapshot: []vimtypes.VirtualMachineFileLayoutExSnapshotLayout{ + { + Disk: []vimtypes.VirtualMachineFileLayoutExDiskLayout{ + { + Key: 300, // Same key as child disk + }, + }, + }, + }, + } + + moVM.Config = &vimtypes.VirtualMachineConfigInfo{ + Hardware: vimtypes.VirtualHardware{ + Device: []vimtypes.BaseVirtualDevice{ + ideController, + childDisk, + }, + }, + } + }) + + It("should include child disks in snapshots", func() { + err := r.Reconcile(ctx, k8sClient, vimClient, vm, moVM, configSpec) + Expect(err).To(Equal(alldisksarepvcs.ErrUnmanagedVols)) + Expect(vm.Spec.Volumes).To(HaveLen(1)) + + vol := findVolumeByName(vm.Spec.Volumes, "child-disk-uuid-123") + Expect(vol).ToNot(BeNil()) + Expect(vol.PersistentVolumeClaim.UnmanagedVolumeClaim.UUID).To(Equal("child-disk-uuid-123")) + }) + }) + }) + + Context("controller types", func() { + When("VM has NVME controller", func() { + BeforeEach(func() { + nvmeController := &vimtypes.VirtualNVMEController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 100, + }, + BusNumber: 0, + }, + } + + disk := &vimtypes.VirtualDisk{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 300, + ControllerKey: 100, + UnitNumber: ptr.To(int32(0)), + Backing: &vimtypes.VirtualDiskSeSparseBackingInfo{ + VirtualDeviceFileBackingInfo: vimtypes.VirtualDeviceFileBackingInfo{ + FileName: "[LocalDS_0] vm1/nvme-disk.vmdk", + }, + Uuid: "nvme-disk-uuid-123", + }, + }, + CapacityInBytes: 1024 * 1024 * 1024, + } + + moVM.Config = &vimtypes.VirtualMachineConfigInfo{ + Hardware: vimtypes.VirtualHardware{ + Device: []vimtypes.BaseVirtualDevice{ + nvmeController, + disk, + }, + }, + } + }) + + It("should detect NVME controller type", func() { + err := r.Reconcile(ctx, k8sClient, vimClient, vm, moVM, configSpec) + Expect(err).To(Equal(alldisksarepvcs.ErrUnmanagedVols)) + + vol := findVolumeByName(vm.Spec.Volumes, "nvme-disk-uuid-123") + Expect(vol).ToNot(BeNil()) + Expect(vol.PersistentVolumeClaim.ControllerType).To(Equal(vmopv1.VirtualControllerTypeNVME)) + Expect(*vol.PersistentVolumeClaim.ControllerBusNumber).To(Equal(int32(0))) + }) + }) + + When("VM has SATA controller", func() { + BeforeEach(func() { + sataController := &vimtypes.VirtualSATAController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 100, + }, + BusNumber: 0, + }, + } + + disk := &vimtypes.VirtualDisk{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 300, + ControllerKey: 100, + UnitNumber: ptr.To(int32(0)), + Backing: &vimtypes.VirtualDiskSeSparseBackingInfo{ + VirtualDeviceFileBackingInfo: vimtypes.VirtualDeviceFileBackingInfo{ + FileName: "[LocalDS_0] vm1/sata-disk.vmdk", + }, + Uuid: "sata-disk-uuid-123", + }, + }, + CapacityInBytes: 1024 * 1024 * 1024, + } + + moVM.Config = &vimtypes.VirtualMachineConfigInfo{ + Hardware: vimtypes.VirtualHardware{ + Device: []vimtypes.BaseVirtualDevice{ + sataController, + disk, + }, + }, + } + }) + + It("should detect SATA controller type", func() { + err := r.Reconcile(ctx, k8sClient, vimClient, vm, moVM, configSpec) + Expect(err).To(Equal(alldisksarepvcs.ErrUnmanagedVols)) + + vol := findVolumeByName(vm.Spec.Volumes, "sata-disk-uuid-123") + Expect(vol).ToNot(BeNil()) + Expect(vol.PersistentVolumeClaim.ControllerType).To(Equal(vmopv1.VirtualControllerTypeSATA)) + Expect(*vol.PersistentVolumeClaim.ControllerBusNumber).To(Equal(int32(0))) + }) + }) + }) + + Context("PVC creation and management", func() { + When("VM has unmanaged disks and no existing PVCs", func() { + BeforeEach(func() { + // Start with empty volumes - let Reconcile add them + vm.Spec.Volumes = []vmopv1.VirtualMachineVolume{} + + disk := &vimtypes.VirtualDisk{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 300, + ControllerKey: 100, + UnitNumber: ptr.To(int32(0)), + Backing: &vimtypes.VirtualDiskSeSparseBackingInfo{ + VirtualDeviceFileBackingInfo: vimtypes.VirtualDeviceFileBackingInfo{ + FileName: "[LocalDS_0] vm1/disk.vmdk", + }, + Uuid: "disk-uuid-123", + }, + }, + CapacityInBytes: 1024 * 1024 * 1024, + } + + ideController := &vimtypes.VirtualIDEController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 100, + }, + BusNumber: 0, + }, + } + + moVM.Config = &vimtypes.VirtualMachineConfigInfo{ + Hardware: vimtypes.VirtualHardware{ + Device: []vimtypes.BaseVirtualDevice{ + ideController, + disk, + }, + }, + } + }) + + It("should add volumes to VM spec and return ErrUnmanagedVols", func() { + err := r.Reconcile(ctx, k8sClient, vimClient, vm, moVM, configSpec) + Expect(err).To(Equal(alldisksarepvcs.ErrUnmanagedVols)) + + // Verify volumes were added to VM spec + Expect(vm.Spec.Volumes).To(HaveLen(1)) + + vol := findVolumeByName(vm.Spec.Volumes, "disk-uuid-123") + Expect(vol).ToNot(BeNil()) + Expect(vol.PersistentVolumeClaim.ClaimName).To(Equal("my-vm-54cddb33")) + }) + }) + + When("VM has unmanaged disk with MultiWriter sharing", func() { + BeforeEach(func() { + // Start with empty volumes - let Reconcile add them + vm.Spec.Volumes = []vmopv1.VirtualMachineVolume{} + + disk := &vimtypes.VirtualDisk{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 300, + ControllerKey: 200, + UnitNumber: ptr.To(int32(0)), + Backing: &vimtypes.VirtualDiskFlatVer2BackingInfo{ + VirtualDeviceFileBackingInfo: vimtypes.VirtualDeviceFileBackingInfo{ + FileName: "[LocalDS_0] vm1/multiwriter-disk.vmdk", + }, + Uuid: "disk-uuid-456", + Sharing: string(vimtypes.VirtualDiskSharingSharingMultiWriter), + }, + }, + CapacityInBytes: 2 * 1024 * 1024 * 1024, + } + + scsiController := &vimtypes.VirtualSCSIController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 200, + }, + BusNumber: 1, + }, + } + + moVM.Config = &vimtypes.VirtualMachineConfigInfo{ + Hardware: vimtypes.VirtualHardware{ + Device: []vimtypes.BaseVirtualDevice{ + scsiController, + disk, + }, + }, + } + }) + + It("should add volumes to VM spec and return ErrUnmanagedVols", func() { + err := r.Reconcile(ctx, k8sClient, vimClient, vm, moVM, configSpec) + Expect(err).To(Equal(alldisksarepvcs.ErrUnmanagedVols)) + + // Verify volumes were added to VM spec + Expect(vm.Spec.Volumes).To(HaveLen(1)) + + vol := findVolumeByName(vm.Spec.Volumes, "disk-uuid-456") + Expect(vol).ToNot(BeNil()) + Expect(vol.PersistentVolumeClaim.ClaimName).To(Equal("my-vm-c6a0a3e7")) + }) + }) + + When("VM has volumes in spec and needs PVC creation", func() { + var datastorePath string + + BeforeEach(func() { + datastorePath = "[LocalDS_0] vm1/disk.vmdk" + }) + + JustBeforeEach(func() { + // Set up VM with volumes already in spec + vm.Spec.Volumes = []vmopv1.VirtualMachineVolume{ + { + Name: "disk-uuid-123", + VirtualMachineVolumeSource: vmopv1.VirtualMachineVolumeSource{ + PersistentVolumeClaim: &vmopv1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "my-vm-54cddb33", + }, + UnmanagedVolumeClaim: &vmopv1.UnmanagedVolumeClaimVolumeSource{ + Type: vmopv1.UnmanagedVolumeClaimVolumeTypeFromVM, + Name: "disk-uuid-123", + UUID: "disk-uuid-123", + }, + ControllerType: vmopv1.VirtualControllerTypeIDE, + ControllerBusNumber: ptr.To(int32(0)), + UnitNumber: ptr.To(int32(0)), + }, + }, + }, + } + + disk := &vimtypes.VirtualDisk{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 300, + ControllerKey: 100, + UnitNumber: ptr.To(int32(0)), + Backing: &vimtypes.VirtualDiskSeSparseBackingInfo{ + VirtualDeviceFileBackingInfo: vimtypes.VirtualDeviceFileBackingInfo{ + FileName: datastorePath, + }, + Uuid: "disk-uuid-123", + }, + }, + CapacityInBytes: 1024 * 1024 * 1024, + } + + ideController := &vimtypes.VirtualIDEController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 100, + }, + BusNumber: 0, + }, + } + + moVM.Config = &vimtypes.VirtualMachineConfigInfo{ + Hardware: vimtypes.VirtualHardware{ + Device: []vimtypes.BaseVirtualDevice{ + ideController, + disk, + }, + }, + } + }) + + It("should create PVC and CnsRegisterVolume", func() { + err := r.Reconcile(ctx, k8sClient, vimClient, vm, moVM, configSpec) + Expect(err).To(Equal(alldisksarepvcs.ErrUnmanagedVols)) + + // Verify PVC was created + pvc := &corev1.PersistentVolumeClaim{} + err = k8sClient.Get(ctx, ctrlclient.ObjectKey{ + Namespace: vm.Namespace, + Name: "my-vm-54cddb33", + }, pvc) + Expect(err).ToNot(HaveOccurred()) + + // Verify PVC properties + Expect(pvc.Spec.DataSourceRef).ToNot(BeNil()) + Expect(pvc.Spec.DataSourceRef.Kind).To(Equal("VirtualMachine")) + Expect(pvc.Spec.DataSourceRef.Name).To(Equal(vm.Name)) + Expect(pvc.Spec.AccessModes).To(ContainElement(corev1.ReadWriteOnce)) + expectedStorage := *kubeutil.BytesToResource(1024 * 1024 * 1024) + Expect(pvc.Spec.Resources.Requests[corev1.ResourceStorage].Equal(expectedStorage)).To(BeTrue()) + + // Verify CnsRegisterVolume was created + crv := &cnsv1alpha1.CnsRegisterVolume{} + err = k8sClient.Get(ctx, ctrlclient.ObjectKey{ + Namespace: vm.Namespace, + Name: "my-vm-54cddb33", + }, crv) + Expect(err).ToNot(HaveOccurred()) + + // Verify CnsRegisterVolume properties + Expect(crv.Spec.PvcName).To(Equal("my-vm-54cddb33")) + + Expect(crv.Spec.DiskURLPath).To(Equal(httpPrefix + `/folder/vm1/disk.vmdk?dcPath=%2FDC0&dsName=LocalDS_0`)) + Expect(crv.Spec.AccessMode).To(Equal(corev1.ReadWriteOnce)) + Expect(crv.Labels["vmoperator.vmware.com/created-by"]).To(Equal(vm.Name)) + Expect(crv.OwnerReferences).To(HaveLen(1)) + Expect(crv.OwnerReferences[0].Kind).To(Equal("VirtualMachine")) + Expect(crv.OwnerReferences[0].Name).To(Equal(vm.Name)) + Expect(*crv.OwnerReferences[0].Controller).To(BeTrue()) + }) + + When("VM has disks with invalid filename", func() { + BeforeEach(func() { + datastorePath = "[invalid] vm1/disk.vmdk" + }) + + It("should return an error", func() { + err := r.Reconcile(ctx, k8sClient, vimClient, vm, moVM, configSpec) + Expect(err).To(MatchError(`failed to ensure CnsRegisterVolume my-vm-54cddb33: ` + + `failed to get datastore url for "[invalid] vm1/disk.vmdk": ` + + `failed to get datastore for "invalid": datastore 'invalid' not found`)) + }) + }) + }) + + When("VM has MultiWriter disk and needs PVC creation", func() { + BeforeEach(func() { + // Set up VM with volumes already in spec + vm.Spec.Volumes = []vmopv1.VirtualMachineVolume{ + { + Name: "disk-uuid-456", + VirtualMachineVolumeSource: vmopv1.VirtualMachineVolumeSource{ + PersistentVolumeClaim: &vmopv1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "my-vm-c6a0a3e7", + }, + UnmanagedVolumeClaim: &vmopv1.UnmanagedVolumeClaimVolumeSource{ + Type: vmopv1.UnmanagedVolumeClaimVolumeTypeFromVM, + Name: "disk-uuid-456", + UUID: "disk-uuid-456", + }, + ControllerType: vmopv1.VirtualControllerTypeSCSI, + ControllerBusNumber: ptr.To(int32(1)), + UnitNumber: ptr.To(int32(0)), + }, + }, + }, + } + + disk := &vimtypes.VirtualDisk{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 300, + ControllerKey: 200, + UnitNumber: ptr.To(int32(0)), + Backing: &vimtypes.VirtualDiskFlatVer2BackingInfo{ + VirtualDeviceFileBackingInfo: vimtypes.VirtualDeviceFileBackingInfo{ + FileName: "[LocalDS_0] vm1/multiwriter-disk.vmdk", + }, + Uuid: "disk-uuid-456", + Sharing: string(vimtypes.VirtualDiskSharingSharingMultiWriter), + }, + }, + CapacityInBytes: 2 * 1024 * 1024 * 1024, + } + + scsiController := &vimtypes.VirtualSCSIController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 200, + }, + BusNumber: 1, + }, + } + + moVM.Config = &vimtypes.VirtualMachineConfigInfo{ + Hardware: vimtypes.VirtualHardware{ + Device: []vimtypes.BaseVirtualDevice{ + scsiController, + disk, + }, + }, + } + }) + + It("should create PVC and CnsRegisterVolume with ReadWriteMany access mode", func() { + err := r.Reconcile(ctx, k8sClient, vimClient, vm, moVM, configSpec) + Expect(err).To(Equal(alldisksarepvcs.ErrUnmanagedVols)) + + // Verify PVC was created with ReadWriteMany + pvc := &corev1.PersistentVolumeClaim{} + err = k8sClient.Get(ctx, ctrlclient.ObjectKey{ + Namespace: vm.Namespace, + Name: "my-vm-c6a0a3e7", + }, pvc) + Expect(err).ToNot(HaveOccurred()) + Expect(pvc.Spec.AccessModes).To(ContainElement(corev1.ReadWriteMany)) + + // Verify CnsRegisterVolume was created with ReadWriteMany + crv := &cnsv1alpha1.CnsRegisterVolume{} + err = k8sClient.Get(ctx, ctrlclient.ObjectKey{ + Namespace: vm.Namespace, + Name: "my-vm-c6a0a3e7", + }, crv) + Expect(err).ToNot(HaveOccurred()) + Expect(crv.Spec.AccessMode).To(Equal(corev1.ReadWriteMany)) + }) + }) + + When("PVC exists and is bound", func() { + BeforeEach(func() { + // Set up VM with volumes already in spec + vm.Spec.Volumes = []vmopv1.VirtualMachineVolume{ + { + Name: "disk-uuid-789", + VirtualMachineVolumeSource: vmopv1.VirtualMachineVolumeSource{ + PersistentVolumeClaim: &vmopv1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "my-vm-134e95b6", + }, + UnmanagedVolumeClaim: &vmopv1.UnmanagedVolumeClaimVolumeSource{ + Type: vmopv1.UnmanagedVolumeClaimVolumeTypeFromVM, + Name: "disk-uuid-789", + UUID: "disk-uuid-789", + }, + ControllerType: vmopv1.VirtualControllerTypeIDE, + ControllerBusNumber: ptr.To(int32(0)), + UnitNumber: ptr.To(int32(0)), + }, + }, + }, + } + + // Create bound PVC + boundPVC := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-vm-134e95b6", + Namespace: vm.Namespace, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: vmopv1.GroupVersion.String(), + Kind: "VirtualMachine", + Name: vm.Name, + UID: vm.UID, + Controller: ptr.To(true), + }, + }, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + DataSourceRef: &corev1.TypedObjectReference{ + APIGroup: &vmopv1.GroupVersion.Group, + Kind: "VirtualMachine", + Name: vm.Name, + }, + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: *kubeutil.BytesToResource(1024 * 1024 * 1024), + }, + }, + }, + Status: corev1.PersistentVolumeClaimStatus{ + Phase: corev1.ClaimBound, + }, + } + + // Create existing CnsRegisterVolume to be cleaned up + existingCRV := &cnsv1alpha1.CnsRegisterVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-vm-134e95b6", + Namespace: vm.Namespace, + Labels: map[string]string{ + "vmoperator.vmware.com/created-by": vm.Name, + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: vmopv1.GroupVersion.String(), + Kind: "VirtualMachine", + Name: vm.Name, + UID: vm.UID, + Controller: ptr.To(true), + }, + }, + }, + Spec: cnsv1alpha1.CnsRegisterVolumeSpec{ + PvcName: "my-vm-134e95b6", + DiskURLPath: "[LocalDS_0] vm1/disk.vmdk", + AccessMode: corev1.ReadWriteOnce, + }, + } + + withObjs = []ctrlclient.Object{boundPVC, existingCRV} + + disk := &vimtypes.VirtualDisk{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 300, + ControllerKey: 100, + UnitNumber: ptr.To(int32(0)), + Backing: &vimtypes.VirtualDiskSeSparseBackingInfo{ + VirtualDeviceFileBackingInfo: vimtypes.VirtualDeviceFileBackingInfo{ + FileName: "[LocalDS_0] vm1/disk.vmdk", + }, + Uuid: "disk-uuid-789", + }, + }, + CapacityInBytes: 1024 * 1024 * 1024, + } + + ideController := &vimtypes.VirtualIDEController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 100, + }, + BusNumber: 0, + }, + } + + moVM.Config = &vimtypes.VirtualMachineConfigInfo{ + Hardware: vimtypes.VirtualHardware{ + Device: []vimtypes.BaseVirtualDevice{ + ideController, + disk, + }, + }, + } + }) + + It("should clean up CnsRegisterVolume and return nil", func() { + err := r.Reconcile(ctx, k8sClient, vimClient, vm, moVM, configSpec) + Expect(err).ToNot(HaveOccurred()) + + // Verify CnsRegisterVolume was deleted + crv := &cnsv1alpha1.CnsRegisterVolume{} + err = k8sClient.Get(ctx, ctrlclient.ObjectKey{ + Namespace: vm.Namespace, + Name: "my-vm-134e95b6", + }, crv) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("not found")) + }) + }) + + When("PVC exists but needs patching", func() { + BeforeEach(func() { + // Set up VM with volumes already in spec + vm.Spec.Volumes = []vmopv1.VirtualMachineVolume{ + { + Name: "disk-uuid-patch", + VirtualMachineVolumeSource: vmopv1.VirtualMachineVolumeSource{ + PersistentVolumeClaim: &vmopv1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "my-vm-f3069b5c", + }, + UnmanagedVolumeClaim: &vmopv1.UnmanagedVolumeClaimVolumeSource{ + Type: vmopv1.UnmanagedVolumeClaimVolumeTypeFromVM, + Name: "disk-uuid-patch", + UUID: "disk-uuid-patch", + }, + ControllerType: vmopv1.VirtualControllerTypeIDE, + ControllerBusNumber: ptr.To(int32(0)), + UnitNumber: ptr.To(int32(0)), + }, + }, + }, + } + + // Create PVC without OwnerRef and DataSourceRef + existingPVC := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-vm-f3069b5c", + Namespace: vm.Namespace, + // No OwnerReferences + }, + Spec: corev1.PersistentVolumeClaimSpec{ + // No DataSourceRef + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: *kubeutil.BytesToResource(1024 * 1024 * 1024), + }, + }, + }, + Status: corev1.PersistentVolumeClaimStatus{ + Phase: corev1.ClaimPending, + }, + } + + withObjs = []ctrlclient.Object{existingPVC} + + disk := &vimtypes.VirtualDisk{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 300, + ControllerKey: 100, + UnitNumber: ptr.To(int32(0)), + Backing: &vimtypes.VirtualDiskSeSparseBackingInfo{ + VirtualDeviceFileBackingInfo: vimtypes.VirtualDeviceFileBackingInfo{ + FileName: "[LocalDS_0] vm1/disk.vmdk", + }, + Uuid: "disk-uuid-patch", + }, + }, + CapacityInBytes: 1024 * 1024 * 1024, + } + + ideController := &vimtypes.VirtualIDEController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 100, + }, + BusNumber: 0, + }, + } + + moVM.Config = &vimtypes.VirtualMachineConfigInfo{ + Hardware: vimtypes.VirtualHardware{ + Device: []vimtypes.BaseVirtualDevice{ + ideController, + disk, + }, + }, + } + }) + + It("should patch PVC with OwnerRef and DataSourceRef", func() { + err := r.Reconcile(ctx, k8sClient, vimClient, vm, moVM, configSpec) + Expect(err).To(Equal(alldisksarepvcs.ErrUnmanagedVols)) + + // Verify PVC was patched + pvc := &corev1.PersistentVolumeClaim{} + err = k8sClient.Get(ctx, ctrlclient.ObjectKey{ + Namespace: vm.Namespace, + Name: "my-vm-f3069b5c", + }, pvc) + Expect(err).ToNot(HaveOccurred()) + + // Verify OwnerRef was added + Expect(pvc.OwnerReferences).To(HaveLen(1)) + Expect(pvc.OwnerReferences[0].Kind).To(Equal("VirtualMachine")) + Expect(pvc.OwnerReferences[0].Name).To(Equal(vm.Name)) + + // Verify DataSourceRef was added + Expect(pvc.Spec.DataSourceRef).ToNot(BeNil()) + Expect(pvc.Spec.DataSourceRef.Kind).To(Equal("VirtualMachine")) + Expect(pvc.Spec.DataSourceRef.Name).To(Equal(vm.Name)) + + // Verify CnsRegisterVolume was created + crv := &cnsv1alpha1.CnsRegisterVolume{} + err = k8sClient.Get(ctx, ctrlclient.ObjectKey{ + Namespace: vm.Namespace, + Name: "my-vm-f3069b5c", + }, crv) + Expect(err).ToNot(HaveOccurred()) + }) + }) + + When("PVC is pending and CnsRegisterVolume exists", func() { + BeforeEach(func() { + // Set up VM with volumes already in spec + vm.Spec.Volumes = []vmopv1.VirtualMachineVolume{ + { + Name: "disk-uuid-pending", + VirtualMachineVolumeSource: vmopv1.VirtualMachineVolumeSource{ + PersistentVolumeClaim: &vmopv1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "my-vm-0c2d85ea", + }, + UnmanagedVolumeClaim: &vmopv1.UnmanagedVolumeClaimVolumeSource{ + Type: vmopv1.UnmanagedVolumeClaimVolumeTypeFromVM, + Name: "disk-uuid-pending", + UUID: "disk-uuid-pending", + }, + ControllerType: vmopv1.VirtualControllerTypeIDE, + ControllerBusNumber: ptr.To(int32(0)), + UnitNumber: ptr.To(int32(0)), + }, + }, + }, + } + + // Create pending PVC + pendingPVC := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-vm-0c2d85ea", + Namespace: vm.Namespace, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: vmopv1.GroupVersion.String(), + Kind: "VirtualMachine", + Name: vm.Name, + UID: vm.UID, + Controller: ptr.To(true), + }, + }, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + DataSourceRef: &corev1.TypedObjectReference{ + APIGroup: &vmopv1.GroupVersion.Group, + Kind: "VirtualMachine", + Name: vm.Name, + }, + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: *kubeutil.BytesToResource(1024 * 1024 * 1024), + }, + }, + }, + Status: corev1.PersistentVolumeClaimStatus{ + Phase: corev1.ClaimPending, + }, + } + + withObjs = []ctrlclient.Object{pendingPVC} + + disk := &vimtypes.VirtualDisk{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 300, + ControllerKey: 100, + UnitNumber: ptr.To(int32(0)), + Backing: &vimtypes.VirtualDiskSeSparseBackingInfo{ + VirtualDeviceFileBackingInfo: vimtypes.VirtualDeviceFileBackingInfo{ + FileName: "[LocalDS_0] vm1/disk.vmdk", + }, + Uuid: "disk-uuid-pending", + }, + }, + CapacityInBytes: 1024 * 1024 * 1024, + } + + ideController := &vimtypes.VirtualIDEController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 100, + }, + BusNumber: 0, + }, + } + + moVM.Config = &vimtypes.VirtualMachineConfigInfo{ + Hardware: vimtypes.VirtualHardware{ + Device: []vimtypes.BaseVirtualDevice{ + ideController, + disk, + }, + }, + } + }) + + It("should return ErrUnmanagedVols for pending PVC", func() { + err := r.Reconcile(ctx, k8sClient, vimClient, vm, moVM, configSpec) + Expect(err).To(Equal(alldisksarepvcs.ErrUnmanagedVols)) + }) + }) + + When("multiple reconciliation cycles with state changes", func() { + BeforeEach(func() { + // Set up VM with volumes already in spec + vm.Spec.Volumes = []vmopv1.VirtualMachineVolume{ + { + Name: "disk-uuid-cycle", + VirtualMachineVolumeSource: vmopv1.VirtualMachineVolumeSource{ + PersistentVolumeClaim: &vmopv1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "my-vm-e926b968", + }, + UnmanagedVolumeClaim: &vmopv1.UnmanagedVolumeClaimVolumeSource{ + Type: vmopv1.UnmanagedVolumeClaimVolumeTypeFromVM, + Name: "disk-uuid-cycle", + UUID: "disk-uuid-cycle", + }, + ControllerType: vmopv1.VirtualControllerTypeIDE, + ControllerBusNumber: ptr.To(int32(0)), + UnitNumber: ptr.To(int32(0)), + }, + }, + }, + } + + disk := &vimtypes.VirtualDisk{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 300, + ControllerKey: 100, + UnitNumber: ptr.To(int32(0)), + Backing: &vimtypes.VirtualDiskSeSparseBackingInfo{ + VirtualDeviceFileBackingInfo: vimtypes.VirtualDeviceFileBackingInfo{ + FileName: "[LocalDS_0] vm1/disk.vmdk", + }, + Uuid: "disk-uuid-cycle", + }, + }, + CapacityInBytes: 1024 * 1024 * 1024, + } + + ideController := &vimtypes.VirtualIDEController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 100, + }, + BusNumber: 0, + }, + } + + moVM.Config = &vimtypes.VirtualMachineConfigInfo{ + Hardware: vimtypes.VirtualHardware{ + Device: []vimtypes.BaseVirtualDevice{ + ideController, + disk, + }, + }, + } + }) + + It("should handle complete reconciliation cycle", func() { + // First reconciliation: should create PVC and CnsRegisterVolume + err := r.Reconcile(ctx, k8sClient, vimClient, vm, moVM, configSpec) + Expect(err).To(Equal(alldisksarepvcs.ErrUnmanagedVols)) + + // Verify PVC was created + pvc := &corev1.PersistentVolumeClaim{} + err = k8sClient.Get(ctx, ctrlclient.ObjectKey{ + Namespace: vm.Namespace, + Name: "my-vm-e926b968", + }, pvc) + Expect(err).ToNot(HaveOccurred()) + + // Set the PVC status to Pending for the test + pvc.Status.Phase = corev1.ClaimPending + err = k8sClient.Status().Update(ctx, pvc) + Expect(err).ToNot(HaveOccurred()) + + // Verify CnsRegisterVolume was created + crv := &cnsv1alpha1.CnsRegisterVolume{} + err = k8sClient.Get(ctx, ctrlclient.ObjectKey{ + Namespace: vm.Namespace, + Name: "my-vm-e926b968", + }, crv) + Expect(err).ToNot(HaveOccurred()) + + // Simulate PVC becoming bound + pvc.Status.Phase = corev1.ClaimBound + err = k8sClient.Status().Update(ctx, pvc) + Expect(err).ToNot(HaveOccurred()) + + // Second reconciliation: should clean up CnsRegisterVolume + err = r.Reconcile(ctx, k8sClient, vimClient, vm, moVM, configSpec) + Expect(err).ToNot(HaveOccurred()) + + // Verify CnsRegisterVolume was deleted + crv = &cnsv1alpha1.CnsRegisterVolume{} + err = k8sClient.Get(ctx, ctrlclient.ObjectKey{ + Namespace: vm.Namespace, + Name: "my-vm-e926b968", + }, crv) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("not found")) + }) + }) + }) + + Context("error handling", func() { + When("k8sClient.Get fails for PVC", func() { + BeforeEach(func() { + // Set up VM with unmanaged disk + vm.Spec.Volumes = []vmopv1.VirtualMachineVolume{ + { + Name: "disk-uuid-error", + VirtualMachineVolumeSource: vmopv1.VirtualMachineVolumeSource{ + PersistentVolumeClaim: &vmopv1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "my-vm-87ce75a6", + }, + UnmanagedVolumeClaim: &vmopv1.UnmanagedVolumeClaimVolumeSource{ + Type: vmopv1.UnmanagedVolumeClaimVolumeTypeFromVM, + Name: "disk-uuid-error", + UUID: "disk-uuid-error", + }, + ControllerType: vmopv1.VirtualControllerTypeIDE, + ControllerBusNumber: ptr.To(int32(0)), + UnitNumber: ptr.To(int32(0)), + }, + }, + }, + } + + disk := &vimtypes.VirtualDisk{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 300, + ControllerKey: 100, + UnitNumber: ptr.To(int32(0)), + Backing: &vimtypes.VirtualDiskSeSparseBackingInfo{ + VirtualDeviceFileBackingInfo: vimtypes.VirtualDeviceFileBackingInfo{ + FileName: "[LocalDS_0] vm1/disk.vmdk", + }, + Uuid: "disk-uuid-error", + }, + }, + CapacityInBytes: 1024 * 1024 * 1024, + } + + ideController := &vimtypes.VirtualIDEController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 100, + }, + BusNumber: 0, + }, + } + + moVM.Config = &vimtypes.VirtualMachineConfigInfo{ + Hardware: vimtypes.VirtualHardware{ + Device: []vimtypes.BaseVirtualDevice{ + ideController, + disk, + }, + }, + } + + // Set up interceptor to return error on Get + withFuncs = interceptor.Funcs{ + Get: func(ctx context.Context, client ctrlclient.WithWatch, key ctrlclient.ObjectKey, obj ctrlclient.Object, opts ...ctrlclient.GetOption) error { + if _, ok := obj.(*corev1.PersistentVolumeClaim); ok { + return fmt.Errorf("simulated get error") + } + return client.Get(ctx, key, obj, opts...) + }, + } + }) + + It("should return error", func() { + err := r.Reconcile(ctx, k8sClient, vimClient, vm, moVM, configSpec) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to get pvc")) + }) + }) + + When("k8sClient.Create fails for PVC", func() { + BeforeEach(func() { + // Set up VM with unmanaged disk + vm.Spec.Volumes = []vmopv1.VirtualMachineVolume{ + { + Name: "disk-uuid-create-error", + VirtualMachineVolumeSource: vmopv1.VirtualMachineVolumeSource{ + PersistentVolumeClaim: &vmopv1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "my-vm-f60a60d0", + }, + UnmanagedVolumeClaim: &vmopv1.UnmanagedVolumeClaimVolumeSource{ + Type: vmopv1.UnmanagedVolumeClaimVolumeTypeFromVM, + Name: "disk-uuid-create-error", + UUID: "disk-uuid-create-error", + }, + ControllerType: vmopv1.VirtualControllerTypeIDE, + ControllerBusNumber: ptr.To(int32(0)), + UnitNumber: ptr.To(int32(0)), + }, + }, + }, + } + + disk := &vimtypes.VirtualDisk{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 300, + ControllerKey: 100, + UnitNumber: ptr.To(int32(0)), + Backing: &vimtypes.VirtualDiskSeSparseBackingInfo{ + VirtualDeviceFileBackingInfo: vimtypes.VirtualDeviceFileBackingInfo{ + FileName: "[LocalDS_0] vm1/disk.vmdk", + }, + Uuid: "disk-uuid-create-error", + }, + }, + CapacityInBytes: 1024 * 1024 * 1024, + } + + ideController := &vimtypes.VirtualIDEController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 100, + }, + BusNumber: 0, + }, + } + + moVM.Config = &vimtypes.VirtualMachineConfigInfo{ + Hardware: vimtypes.VirtualHardware{ + Device: []vimtypes.BaseVirtualDevice{ + ideController, + disk, + }, + }, + } + + // Set up interceptor to return error on Create + withFuncs = interceptor.Funcs{ + Create: func(ctx context.Context, client ctrlclient.WithWatch, obj ctrlclient.Object, opts ...ctrlclient.CreateOption) error { + if _, ok := obj.(*corev1.PersistentVolumeClaim); ok { + return fmt.Errorf("simulated create error") + } + return client.Create(ctx, obj, opts...) + }, + } + }) + + It("should return error", func() { + err := r.Reconcile(ctx, k8sClient, vimClient, vm, moVM, configSpec) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to create pvc")) + }) + }) + + When("k8sClient.Create fails for CnsRegisterVolume", func() { + BeforeEach(func() { + // Set up VM with unmanaged disk + vm.Spec.Volumes = []vmopv1.VirtualMachineVolume{ + { + Name: "disk-uuid-crv-error", + VirtualMachineVolumeSource: vmopv1.VirtualMachineVolumeSource{ + PersistentVolumeClaim: &vmopv1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "my-vm-3144bc43", + }, + UnmanagedVolumeClaim: &vmopv1.UnmanagedVolumeClaimVolumeSource{ + Type: vmopv1.UnmanagedVolumeClaimVolumeTypeFromVM, + Name: "disk-uuid-crv-error", + UUID: "disk-uuid-crv-error", + }, + ControllerType: vmopv1.VirtualControllerTypeIDE, + ControllerBusNumber: ptr.To(int32(0)), + UnitNumber: ptr.To(int32(0)), + }, + }, + }, + } + + disk := &vimtypes.VirtualDisk{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 300, + ControllerKey: 100, + UnitNumber: ptr.To(int32(0)), + Backing: &vimtypes.VirtualDiskSeSparseBackingInfo{ + VirtualDeviceFileBackingInfo: vimtypes.VirtualDeviceFileBackingInfo{ + FileName: "[LocalDS_0] vm1/disk.vmdk", + }, + Uuid: "disk-uuid-crv-error", + }, + }, + CapacityInBytes: 1024 * 1024 * 1024, + } + + ideController := &vimtypes.VirtualIDEController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 100, + }, + BusNumber: 0, + }, + } + + moVM.Config = &vimtypes.VirtualMachineConfigInfo{ + Hardware: vimtypes.VirtualHardware{ + Device: []vimtypes.BaseVirtualDevice{ + ideController, + disk, + }, + }, + } + + // Set up interceptor to return error on Create for CnsRegisterVolume + withFuncs = interceptor.Funcs{ + Create: func(ctx context.Context, client ctrlclient.WithWatch, obj ctrlclient.Object, opts ...ctrlclient.CreateOption) error { + if _, ok := obj.(*cnsv1alpha1.CnsRegisterVolume); ok { + return fmt.Errorf("simulated crv create error") + } + return client.Create(ctx, obj, opts...) + }, + } + }) + + It("should return error", func() { + err := r.Reconcile(ctx, k8sClient, vimClient, vm, moVM, configSpec) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to ensure CnsRegisterVolume")) + }) + }) + + When("k8sClient.Get fails for CnsRegisterVolume", func() { + BeforeEach(func() { + // Set up VM with unmanaged disk + vm.Spec.Volumes = []vmopv1.VirtualMachineVolume{ + { + Name: "disk-uuid-crv-get-error", + VirtualMachineVolumeSource: vmopv1.VirtualMachineVolumeSource{ + PersistentVolumeClaim: &vmopv1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "my-vm-a89248a4", + }, + UnmanagedVolumeClaim: &vmopv1.UnmanagedVolumeClaimVolumeSource{ + Type: vmopv1.UnmanagedVolumeClaimVolumeTypeFromVM, + Name: "disk-uuid-crv-get-error", + UUID: "disk-uuid-crv-get-error", + }, + ControllerType: vmopv1.VirtualControllerTypeIDE, + ControllerBusNumber: ptr.To(int32(0)), + UnitNumber: ptr.To(int32(0)), + }, + }, + }, + } + + disk := &vimtypes.VirtualDisk{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 300, + ControllerKey: 100, + UnitNumber: ptr.To(int32(0)), + Backing: &vimtypes.VirtualDiskSeSparseBackingInfo{ + VirtualDeviceFileBackingInfo: vimtypes.VirtualDeviceFileBackingInfo{ + FileName: "[LocalDS_0] vm1/disk.vmdk", + }, + Uuid: "disk-uuid-crv-get-error", + }, + }, + CapacityInBytes: 1024 * 1024 * 1024, + } + + ideController := &vimtypes.VirtualIDEController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 100, + }, + BusNumber: 0, + }, + } + + moVM.Config = &vimtypes.VirtualMachineConfigInfo{ + Hardware: vimtypes.VirtualHardware{ + Device: []vimtypes.BaseVirtualDevice{ + ideController, + disk, + }, + }, + } + + // Set up interceptor to return error on Get for CnsRegisterVolume + withFuncs = interceptor.Funcs{ + Get: func(ctx context.Context, client ctrlclient.WithWatch, key ctrlclient.ObjectKey, obj ctrlclient.Object, opts ...ctrlclient.GetOption) error { + if _, ok := obj.(*cnsv1alpha1.CnsRegisterVolume); ok { + return fmt.Errorf("simulated crv get error") + } + return client.Get(ctx, key, obj, opts...) + }, + } + }) + + It("should return error", func() { + err := r.Reconcile(ctx, k8sClient, vimClient, vm, moVM, configSpec) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to get CnsRegisterVolume")) + }) + }) + + When("k8sClient.Patch fails for PVC", func() { + BeforeEach(func() { + // Set up VM with unmanaged disk + vm.Spec.Volumes = []vmopv1.VirtualMachineVolume{ + { + Name: "disk-uuid-patch-error", + VirtualMachineVolumeSource: vmopv1.VirtualMachineVolumeSource{ + PersistentVolumeClaim: &vmopv1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "my-vm-cd73132d", + }, + UnmanagedVolumeClaim: &vmopv1.UnmanagedVolumeClaimVolumeSource{ + Type: vmopv1.UnmanagedVolumeClaimVolumeTypeFromVM, + Name: "disk-uuid-patch-error", + UUID: "disk-uuid-patch-error", + }, + ControllerType: vmopv1.VirtualControllerTypeIDE, + ControllerBusNumber: ptr.To(int32(0)), + UnitNumber: ptr.To(int32(0)), + }, + }, + }, + } + + // Create existing PVC without OwnerRef and DataSourceRef + existingPVC := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-vm-cd73132d", + Namespace: vm.Namespace, + // No OwnerReferences + }, + Spec: corev1.PersistentVolumeClaimSpec{ + // No DataSourceRef + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: *kubeutil.BytesToResource(1024 * 1024 * 1024), + }, + }, + }, + Status: corev1.PersistentVolumeClaimStatus{ + Phase: corev1.ClaimPending, + }, + } + + withObjs = []ctrlclient.Object{existingPVC} + + disk := &vimtypes.VirtualDisk{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 300, + ControllerKey: 100, + UnitNumber: ptr.To(int32(0)), + Backing: &vimtypes.VirtualDiskSeSparseBackingInfo{ + VirtualDeviceFileBackingInfo: vimtypes.VirtualDeviceFileBackingInfo{ + FileName: "[LocalDS_0] vm1/disk.vmdk", + }, + Uuid: "disk-uuid-patch-error", + }, + }, + CapacityInBytes: 1024 * 1024 * 1024, + } + + ideController := &vimtypes.VirtualIDEController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 100, + }, + BusNumber: 0, + }, + } + + moVM.Config = &vimtypes.VirtualMachineConfigInfo{ + Hardware: vimtypes.VirtualHardware{ + Device: []vimtypes.BaseVirtualDevice{ + ideController, + disk, + }, + }, + } + + // Set up interceptor to return error on Patch + withFuncs = interceptor.Funcs{ + Patch: func(ctx context.Context, client ctrlclient.WithWatch, obj ctrlclient.Object, patch ctrlclient.Patch, opts ...ctrlclient.PatchOption) error { + if _, ok := obj.(*corev1.PersistentVolumeClaim); ok { + return fmt.Errorf("simulated patch error") + } + return client.Patch(ctx, obj, patch, opts...) + }, + } + }) + + It("should return error", func() { + err := r.Reconcile(ctx, k8sClient, vimClient, vm, moVM, configSpec) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to patch pvc")) + }) + }) + + When("k8sClient.Delete fails for CnsRegisterVolume cleanup", func() { + BeforeEach(func() { + // Set up VM with bound PVC + vm.Spec.Volumes = []vmopv1.VirtualMachineVolume{ + { + Name: "disk-uuid-delete-error", + VirtualMachineVolumeSource: vmopv1.VirtualMachineVolumeSource{ + PersistentVolumeClaim: &vmopv1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "my-vm-9eb41331", + }, + UnmanagedVolumeClaim: &vmopv1.UnmanagedVolumeClaimVolumeSource{ + Type: vmopv1.UnmanagedVolumeClaimVolumeTypeFromVM, + Name: "disk-uuid-delete-error", + UUID: "disk-uuid-delete-error", + }, + ControllerType: vmopv1.VirtualControllerTypeIDE, + ControllerBusNumber: ptr.To(int32(0)), + UnitNumber: ptr.To(int32(0)), + }, + }, + }, + } + + // Create bound PVC + boundPVC := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-vm-9eb41331", + Namespace: vm.Namespace, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: vmopv1.GroupVersion.String(), + Kind: "VirtualMachine", + Name: vm.Name, + UID: vm.UID, + Controller: ptr.To(true), + }, + }, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + DataSourceRef: &corev1.TypedObjectReference{ + APIGroup: &vmopv1.GroupVersion.Group, + Kind: "VirtualMachine", + Name: vm.Name, + }, + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: *kubeutil.BytesToResource(1024 * 1024 * 1024), + }, + }, + }, + Status: corev1.PersistentVolumeClaimStatus{ + Phase: corev1.ClaimBound, + }, + } + + // Create existing CnsRegisterVolume to be cleaned up + existingCRV := &cnsv1alpha1.CnsRegisterVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-vm-9eb41331", + Namespace: vm.Namespace, + Labels: map[string]string{ + "vmoperator.vmware.com/created-by": vm.Name, + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: vmopv1.GroupVersion.String(), + Kind: "VirtualMachine", + Name: vm.Name, + UID: vm.UID, + Controller: ptr.To(true), + }, + }, + }, + Spec: cnsv1alpha1.CnsRegisterVolumeSpec{ + PvcName: "my-vm-9eb41331", + DiskURLPath: "[LocalDS_0] vm1/disk.vmdk", + AccessMode: corev1.ReadWriteOnce, + }, + } + + withObjs = []ctrlclient.Object{boundPVC, existingCRV} + + disk := &vimtypes.VirtualDisk{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 300, + ControllerKey: 100, + UnitNumber: ptr.To(int32(0)), + Backing: &vimtypes.VirtualDiskSeSparseBackingInfo{ + VirtualDeviceFileBackingInfo: vimtypes.VirtualDeviceFileBackingInfo{ + FileName: "[LocalDS_0] vm1/disk.vmdk", + }, + Uuid: "disk-uuid-delete-error", + }, + }, + CapacityInBytes: 1024 * 1024 * 1024, + } + + ideController := &vimtypes.VirtualIDEController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 100, + }, + BusNumber: 0, + }, + } + + moVM.Config = &vimtypes.VirtualMachineConfigInfo{ + Hardware: vimtypes.VirtualHardware{ + Device: []vimtypes.BaseVirtualDevice{ + ideController, + disk, + }, + }, + } + + // Set up interceptor to return error on Delete + withFuncs = interceptor.Funcs{ + Delete: func(ctx context.Context, client ctrlclient.WithWatch, obj ctrlclient.Object, opts ...ctrlclient.DeleteOption) error { + if _, ok := obj.(*cnsv1alpha1.CnsRegisterVolume); ok { + return fmt.Errorf("simulated delete error") + } + return client.Delete(ctx, obj, opts...) + }, + } + }) + + It("should return error", func() { + err := r.Reconcile(ctx, k8sClient, vimClient, vm, moVM, configSpec) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to delete CnsRegisterVolume")) + }) + }) + + When("k8sClient.DeleteAllOf fails for CnsRegisterVolume cleanup", func() { + BeforeEach(func() { + // Set up VM with bound PVC + vm.Spec.Volumes = []vmopv1.VirtualMachineVolume{ + { + Name: "disk-uuid-delete-all-error", + VirtualMachineVolumeSource: vmopv1.VirtualMachineVolumeSource{ + PersistentVolumeClaim: &vmopv1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "my-vm-1aeeec80", + }, + UnmanagedVolumeClaim: &vmopv1.UnmanagedVolumeClaimVolumeSource{ + Type: vmopv1.UnmanagedVolumeClaimVolumeTypeFromVM, + Name: "disk-uuid-delete-all-error", + UUID: "disk-uuid-delete-all-error", + }, + ControllerType: vmopv1.VirtualControllerTypeIDE, + ControllerBusNumber: ptr.To(int32(0)), + UnitNumber: ptr.To(int32(0)), + }, + }, + }, + } + + // Create bound PVC + boundPVC := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-vm-1aeeec80", + Namespace: vm.Namespace, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: vmopv1.GroupVersion.String(), + Kind: "VirtualMachine", + Name: vm.Name, + UID: vm.UID, + Controller: ptr.To(true), + }, + }, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + DataSourceRef: &corev1.TypedObjectReference{ + APIGroup: &vmopv1.GroupVersion.Group, + Kind: "VirtualMachine", + Name: vm.Name, + }, + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: *kubeutil.BytesToResource(1024 * 1024 * 1024), + }, + }, + }, + Status: corev1.PersistentVolumeClaimStatus{ + Phase: corev1.ClaimBound, + }, + } + + withObjs = []ctrlclient.Object{boundPVC} + + disk := &vimtypes.VirtualDisk{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 300, + ControllerKey: 100, + UnitNumber: ptr.To(int32(0)), + Backing: &vimtypes.VirtualDiskSeSparseBackingInfo{ + VirtualDeviceFileBackingInfo: vimtypes.VirtualDeviceFileBackingInfo{ + FileName: "[LocalDS_0] vm1/disk.vmdk", + }, + Uuid: "disk-uuid-delete-all-error", + }, + }, + CapacityInBytes: 1024 * 1024 * 1024, + } + + ideController := &vimtypes.VirtualIDEController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 100, + }, + BusNumber: 0, + }, + } + + moVM.Config = &vimtypes.VirtualMachineConfigInfo{ + Hardware: vimtypes.VirtualHardware{ + Device: []vimtypes.BaseVirtualDevice{ + ideController, + disk, + }, + }, + } + + // Set up interceptor to return error on DeleteAllOf + withFuncs = interceptor.Funcs{ + DeleteAllOf: func(ctx context.Context, client ctrlclient.WithWatch, obj ctrlclient.Object, opts ...ctrlclient.DeleteAllOfOption) error { + if _, ok := obj.(*cnsv1alpha1.CnsRegisterVolume); ok { + return fmt.Errorf("simulated delete all error") + } + return client.DeleteAllOf(ctx, obj, opts...) + }, + } + }) + + It("should return error", func() { + err := r.Reconcile(ctx, k8sClient, vimClient, vm, moVM, configSpec) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to delete CnsRegisterVolume")) + }) + }) + + When("k8sClient.Status().Patch fails for PVC status update", func() { + BeforeEach(func() { + // Set up VM with unmanaged disk + vm.Spec.Volumes = []vmopv1.VirtualMachineVolume{ + { + Name: "disk-uuid-status-update-error", + VirtualMachineVolumeSource: vmopv1.VirtualMachineVolumeSource{ + PersistentVolumeClaim: &vmopv1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "my-vm-ca8e6f4d", + }, + UnmanagedVolumeClaim: &vmopv1.UnmanagedVolumeClaimVolumeSource{ + Type: vmopv1.UnmanagedVolumeClaimVolumeTypeFromVM, + Name: "disk-uuid-status-update-error", + UUID: "disk-uuid-status-update-error", + }, + ControllerType: vmopv1.VirtualControllerTypeIDE, + ControllerBusNumber: ptr.To(int32(0)), + UnitNumber: ptr.To(int32(0)), + }, + }, + }, + } + + disk := &vimtypes.VirtualDisk{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 300, + ControllerKey: 100, + UnitNumber: ptr.To(int32(0)), + Backing: &vimtypes.VirtualDiskSeSparseBackingInfo{ + VirtualDeviceFileBackingInfo: vimtypes.VirtualDeviceFileBackingInfo{ + FileName: "[LocalDS_0] vm1/disk.vmdk", + }, + Uuid: "disk-uuid-status-update-error", + }, + }, + CapacityInBytes: 1024 * 1024 * 1024, + } + + ideController := &vimtypes.VirtualIDEController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: 100, + }, + BusNumber: 0, + }, + } + + moVM.Config = &vimtypes.VirtualMachineConfigInfo{ + Hardware: vimtypes.VirtualHardware{ + Device: []vimtypes.BaseVirtualDevice{ + ideController, + disk, + }, + }, + } + + // Set up interceptor to return error on Status().Update + withFuncs = interceptor.Funcs{ + SubResourcePatch: func(ctx context.Context, client ctrlclient.Client, subResourceName string, obj ctrlclient.Object, patch ctrlclient.Patch, opts ...ctrlclient.SubResourcePatchOption) error { + if _, ok := obj.(*corev1.PersistentVolumeClaim); ok { + return fmt.Errorf("simulated status update error") + } + return client.Status().Patch(ctx, obj, patch, opts...) + }, + } + }) + + It("should handle status update error gracefully", func() { + // This test verifies that status update errors don't break the reconciliation + // The Reconcile function doesn't directly call Status().Update, but this + // test ensures the interceptor is working correctly + err := r.Reconcile(ctx, k8sClient, vimClient, vm, moVM, configSpec) + // The function should still work even if status updates fail + Expect(err).To(Equal(alldisksarepvcs.ErrUnmanagedVols)) + }) + }) + }) + }) +}) + +// findVolumeByName finds a volume by name. +func findVolumeByName( + volumes []vmopv1.VirtualMachineVolume, + name string) *vmopv1.VirtualMachineVolume { + + for i := range volumes { + if volumes[i].Name == name { + return &volumes[i] + } + } + return nil +} diff --git a/webhooks/virtualmachine/validation/virtualmachine_validator.go b/webhooks/virtualmachine/validation/virtualmachine_validator.go index 52a1cc61c..1ea48090b 100644 --- a/webhooks/virtualmachine/validation/virtualmachine_validator.go +++ b/webhooks/virtualmachine/validation/virtualmachine_validator.go @@ -2707,29 +2707,22 @@ func (v validator) validatePVCUnmanagedVolumeClaimInfo( pp = pp.Child("unmanagedVolumeClaim") - switch uvc.Type { - case vmopv1.UnmanagedVolumeClaimVolumeTypeFromImage: - if uvc.UUID == "" { - allErrs = append(allErrs, field.Required( - pp.Child("uuid"), - "uuid is required when type=FromImage")) - } else if _, err := uuid.Parse(uvc.UUID); err != nil { - allErrs = append(allErrs, field.Invalid( - pp.Child("uuid"), - uvc.UUID, - err.Error())) - } - case vmopv1.UnmanagedVolumeClaimVolumeTypeFromVM: + if uvc.UUID == "" { + allErrs = append(allErrs, field.Required( + pp.Child("uuid"), + "uuid is required")) + } else if _, err := uuid.Parse(uvc.UUID); err != nil { + allErrs = append(allErrs, field.Invalid( + pp.Child("uuid"), + uvc.UUID, + err.Error())) + } + + if uvc.Type == vmopv1.UnmanagedVolumeClaimVolumeTypeFromVM { if _, err := uuid.Parse(uvc.Name); err != nil { allErrs = append(allErrs, field.Invalid( pp.Child("name"), - uvc.UUID, - err.Error())) - } - if _, err := uuid.Parse(uvc.UUID); err != nil { - allErrs = append(allErrs, field.Invalid( - pp.Child("uuid"), - uvc.UUID, + uvc.Name, err.Error())) } } diff --git a/webhooks/virtualmachine/validation/virtualmachine_validator_unit_test.go b/webhooks/virtualmachine/validation/virtualmachine_validator_unit_test.go index 7d624b4fa..ed864b317 100644 --- a/webhooks/virtualmachine/validation/virtualmachine_validator_unit_test.go +++ b/webhooks/virtualmachine/validation/virtualmachine_validator_unit_test.go @@ -3668,20 +3668,35 @@ func unitTestsValidateCreate() { expectAllowed: true, }, ), - Entry("should deny VM with FromImage UnmanagedVolumeClaim missing UUID", + Entry("should deny VM with UnmanagedVolumeClaim missing UUID", testParams{ setup: func(ctx *unitValidatingWebhookContext) { ctx.vm.Spec.Volumes = []vmopv1.VirtualMachineVolume{ { - Name: "test-volume", + Name: "test-volume-1", VirtualMachineVolumeSource: vmopv1.VirtualMachineVolumeSource{ PersistentVolumeClaim: &vmopv1.PersistentVolumeClaimVolumeSource{ PersistentVolumeClaimVolumeSource: corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: "test-pvc", + ClaimName: "test-pvc-1", }, UnmanagedVolumeClaim: &vmopv1.UnmanagedVolumeClaimVolumeSource{ Type: vmopv1.UnmanagedVolumeClaimVolumeTypeFromImage, - Name: "test-uvc", + Name: "test-uvc-1", + UUID: "", + }, + }, + }, + }, + { + Name: "test-volume-2", + VirtualMachineVolumeSource: vmopv1.VirtualMachineVolumeSource{ + PersistentVolumeClaim: &vmopv1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "test-pvc-2", + }, + UnmanagedVolumeClaim: &vmopv1.UnmanagedVolumeClaimVolumeSource{ + Type: vmopv1.UnmanagedVolumeClaimVolumeTypeFromVM, + Name: "test-uvc-2", UUID: "", }, }, @@ -3691,25 +3706,41 @@ func unitTestsValidateCreate() { }, expectAllowed: false, validate: doValidateWithMsg( - field.Required(field.NewPath("spec", "volumes").Index(0).Child("persistentVolumeClaim").Child("unmanagedVolumeClaim").Child("uuid"), "uuid is required when type=FromImage").Error(), + field.Required(field.NewPath("spec", "volumes").Index(0).Child("persistentVolumeClaim").Child("unmanagedVolumeClaim").Child("uuid"), "uuid is required").Error(), + field.Required(field.NewPath("spec", "volumes").Index(1).Child("persistentVolumeClaim").Child("unmanagedVolumeClaim").Child("uuid"), "uuid is required").Error(), ), }, ), - Entry("should deny VM with FromImage UnmanagedVolumeClaim invalid UUID", + Entry("should deny VM with UnmanagedVolumeClaim invalid UUID", testParams{ setup: func(ctx *unitValidatingWebhookContext) { ctx.vm.Spec.Volumes = []vmopv1.VirtualMachineVolume{ { - Name: "test-volume", + Name: "test-volume-1", VirtualMachineVolumeSource: vmopv1.VirtualMachineVolumeSource{ PersistentVolumeClaim: &vmopv1.PersistentVolumeClaimVolumeSource{ PersistentVolumeClaimVolumeSource: corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: "test-pvc", + ClaimName: "test-pvc-1", }, UnmanagedVolumeClaim: &vmopv1.UnmanagedVolumeClaimVolumeSource{ Type: vmopv1.UnmanagedVolumeClaimVolumeTypeFromImage, - Name: "test-uvc", - UUID: "invalid-uuid", + Name: "test-uvc-1", + UUID: "invalid-uuid-1", + }, + }, + }, + }, + { + Name: "test-volume-2", + VirtualMachineVolumeSource: vmopv1.VirtualMachineVolumeSource{ + PersistentVolumeClaim: &vmopv1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "test-pvc-2", + }, + UnmanagedVolumeClaim: &vmopv1.UnmanagedVolumeClaimVolumeSource{ + Type: vmopv1.UnmanagedVolumeClaimVolumeTypeFromVM, + Name: "test-uvc-2", + UUID: "invalid-uuid-2", }, }, }, @@ -3718,7 +3749,8 @@ func unitTestsValidateCreate() { }, expectAllowed: false, validate: doValidateWithMsg( - field.Invalid(field.NewPath("spec", "volumes").Index(0).Child("persistentVolumeClaim").Child("unmanagedVolumeClaim").Child("uuid"), "invalid-uuid", "invalid UUID length: 12").Error(), + field.Invalid(field.NewPath("spec", "volumes").Index(0).Child("persistentVolumeClaim").Child("unmanagedVolumeClaim").Child("uuid"), "invalid-uuid-1", "invalid UUID length: 14").Error(), + field.Invalid(field.NewPath("spec", "volumes").Index(1).Child("persistentVolumeClaim").Child("unmanagedVolumeClaim").Child("uuid"), "invalid-uuid-2", "invalid UUID length: 14").Error(), ), }, ),