diff --git a/Makefile b/Makefile index ba8834fb02..b7abca9470 100644 --- a/Makefile +++ b/Makefile @@ -30,6 +30,7 @@ VELERO_INSTANCE_NAME ?= velero-test ARTIFACT_DIR ?= /tmp OC_CLI = $(shell which oc) TEST_VIRT ?= false +TEST_HCP ?= false TEST_UPGRADE ?= false # TOOL VERSIONS @@ -601,6 +602,11 @@ ifeq ($(TEST_UPGRADE),true) else TEST_FILTER += && (! upgrade) endif +ifeq ($(TEST_HCP),true) + TEST_FILTER += && (hcp) +else + TEST_FILTER += && (! hcp) +endif SETTINGS_TMP=/tmp/test-settings .PHONY: test-e2e-setup diff --git a/go.mod b/go.mod index 0fbc1ae2e0..a55df062ed 100644 --- a/go.mod +++ b/go.mod @@ -9,24 +9,25 @@ require ( github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 github.com/onsi/ginkgo/v2 v2.19.0 github.com/onsi/gomega v1.34.1 - github.com/openshift/api v0.0.0-20230213134911-7ba313770556 // release-4.12 + github.com/openshift/api v0.0.0-20240214165302-89248c87b7fc // release-4.12 github.com/operator-framework/api v0.10.7 github.com/operator-framework/operator-lib v0.9.0 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.51.2 github.com/sirupsen/logrus v1.9.3 - k8s.io/api v0.29.0 + k8s.io/api v0.29.2 k8s.io/apiextensions-apiserver v0.29.0 - k8s.io/apimachinery v0.29.0 + k8s.io/apimachinery v0.29.2 k8s.io/client-go v0.29.0 - k8s.io/utils v0.0.0-20230726121419-3b25d923346b + k8s.io/utils v0.0.0-20240102154912-e7106e64919e sigs.k8s.io/controller-runtime v0.17.2 ) require ( github.com/deckarep/golang-set/v2 v2.3.0 github.com/google/go-cmp v0.7.0 + github.com/openshift/hypershift/api v0.0.0-20240522104800-604a957be25e github.com/vmware-tanzu/velero v1.14.0 - k8s.io/klog/v2 v2.110.1 + k8s.io/klog/v2 v2.120.1 ) require ( @@ -64,7 +65,7 @@ require ( github.com/bombsimon/logrusr/v3 v3.0.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chmduquesne/rollinghash v4.0.0+incompatible // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/edsrzf/mmap-go v1.1.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect @@ -138,7 +139,7 @@ require ( github.com/rogpeppe/go-internal v1.14.1 // indirect github.com/rs/xid v1.5.0 // indirect github.com/spf13/cobra v1.7.0 // indirect - github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace // indirect github.com/stretchr/testify v1.10.0 // indirect github.com/zeebo/blake3 v0.2.3 // indirect go.opencensus.io v0.24.0 // indirect @@ -181,3 +182,5 @@ require ( replace github.com/vmware-tanzu/velero => github.com/openshift/velero v0.10.2-0.20240822153644-9ac863aaa452 replace github.com/kopia/kopia => github.com/project-velero/kopia v0.0.0-20240417031915-e07d5b7de567 + +replace github.com/openshift/hypershift => github.com/openshift/hypershift v0.1.52-0.20250828102706-84d7581f683c diff --git a/go.sum b/go.sum index 7c9d235112..03ae950082 100644 --- a/go.sum +++ b/go.sum @@ -216,8 +216,9 @@ github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ github.com/danieljoos/wincred v1.2.0 h1:ozqKHaLK0W/ii4KVbbvluM91W2H3Sh0BncbUNPS7jLE= github.com/danieljoos/wincred v1.2.0/go.mod h1:FzQLLMKBFdvu+osBrnFODiv32YGwCfx0SkRa/eYHgec= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/deckarep/golang-set/v2 v2.3.0 h1:qs18EKUfHm2X9fA50Mr/M5hccg2tNnVqsiBImnyDs0g= github.com/deckarep/golang-set/v2 v2.3.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= @@ -292,7 +293,6 @@ github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTg github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= @@ -706,8 +706,10 @@ github.com/onsi/gomega v1.14.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+t github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= -github.com/openshift/api v0.0.0-20230213134911-7ba313770556 h1:7W2fOhJicyEff24VaF7ASNzPtYvr+iSCVft4SIBAzaE= -github.com/openshift/api v0.0.0-20230213134911-7ba313770556/go.mod h1:aQ6LDasvHMvHZXqLHnX2GRmnfTWCF/iIwz8EMTTIE9A= +github.com/openshift/api v0.0.0-20240214165302-89248c87b7fc h1:/QQKBisQey7+qtKJS4fReHmXx/GyGRS8Tb+IU2WOMh0= +github.com/openshift/api v0.0.0-20240214165302-89248c87b7fc/go.mod h1:CxgbWAlvu2iQB0UmKTtRu1YfepRg1/vJ64n2DlIEVz4= +github.com/openshift/hypershift/api v0.0.0-20240522104800-604a957be25e h1:8MuhfnkUWjKdJ35pjPLXkPELRTj89LgJfKwcdKLBII4= +github.com/openshift/hypershift/api v0.0.0-20240522104800-604a957be25e/go.mod h1:NUkcQ8wwJw0/U7VVhTKSKptjHmIvptSMjL1c0FnTqqs= github.com/openshift/velero v0.10.2-0.20240822153644-9ac863aaa452 h1:rIGzeje6KnnudkYkiua/LigiNloNZfE6Kr0hCKqhtZI= github.com/openshift/velero v0.10.2-0.20240822153644-9ac863aaa452/go.mod h1:T+tSiinatCuVO7K3zGVxbCcMNYwfIdnlfc8SmIVDI4U= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -817,8 +819,9 @@ github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0 github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace h1:9PNP1jnUjRhfmGMlkXHjYPishpcw4jpSt/V/xYY3FMA= +github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= @@ -1453,8 +1456,8 @@ k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s= k8s.io/api v0.21.3/go.mod h1:hUgeYHUbBp23Ue4qdX9tR8/ANi/g3ehylAqDn9NWVOg= k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY= k8s.io/api v0.24.0/go.mod h1:5Jl90IUrJHUJYEMANRURMiVvJ0g7Ax7r3R1bqO8zx8I= -k8s.io/api v0.29.0 h1:NiCdQMY1QOp1H8lfRyeEf8eOwV6+0xA6XEE44ohDX2A= -k8s.io/api v0.29.0/go.mod h1:sdVmXoz2Bo/cb77Pxi71IPTSErEW32xa4aXwKH7gfBA= +k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A= +k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0= k8s.io/apiextensions-apiserver v0.18.3/go.mod h1:TMsNGs7DYpMXd+8MOCX8KzPOCx8fnZMoIGB24m03+JE= k8s.io/apiextensions-apiserver v0.21.1/go.mod h1:KESQFCGjqVcVsZ9g0xX5bacMjyX5emuWcS2arzdEouA= k8s.io/apiextensions-apiserver v0.21.3/go.mod h1:kl6dap3Gd45+21Jnh6utCx8Z2xxLm8LGDkprcd+KbsE= @@ -1467,8 +1470,8 @@ k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswP k8s.io/apimachinery v0.21.3/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI= k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apimachinery v0.24.0/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= -k8s.io/apimachinery v0.29.0 h1:+ACVktwyicPz0oc6MTMLwa2Pw3ouLAfAon1wPLtG48o= -k8s.io/apimachinery v0.29.0/go.mod h1:eVBxQ/cwiJxH58eK/jd/vAk4mrxmVlnpBH5J2GbMeis= +k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8= +k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= k8s.io/apiserver v0.18.3/go.mod h1:tHQRmthRPLUtwqsOnJJMoI8SW3lnoReZeE861lH8vUw= k8s.io/apiserver v0.21.1/go.mod h1:nLLYZvMWn35glJ4/FZRhzLG/3MPxAaZTgV4FJZdr+tY= k8s.io/apiserver v0.21.3/go.mod h1:eDPWlZG6/cCCMj/JBcEpDoK+I+6i3r9GsChYBHSbAzU= @@ -1508,8 +1511,8 @@ k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= -k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= @@ -1524,8 +1527,8 @@ k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/ k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/tests/e2e/backup_restore_suite_test.go b/tests/e2e/backup_restore_suite_test.go index d79aec0465..7f7b89657c 100755 --- a/tests/e2e/backup_restore_suite_test.go +++ b/tests/e2e/backup_restore_suite_test.go @@ -8,10 +8,11 @@ import ( "time" "github.com/google/uuid" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - . "github.com/openshift/oadp-operator/tests/e2e/lib" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/openshift/oadp-operator/tests/e2e/lib" ) type VerificationFunction func(client.Client, string) error @@ -19,7 +20,7 @@ type VerificationFunction func(client.Client, string) error type BackupRestoreCase struct { Namespace string Name string - BackupRestoreType BackupRestoreType + BackupRestoreType lib.BackupRestoreType PreBackupVerify VerificationFunction PostRestoreVerify VerificationFunction SkipVerifyLogs bool // TODO remove @@ -35,36 +36,38 @@ type ApplicationBackupRestoreCase struct { func todoListReady(preBackupState bool, twoVol bool, database string) VerificationFunction { return VerificationFunction(func(ocClient client.Client, namespace string) error { log.Printf("checking for the NAMESPACE: %s", namespace) - Eventually(IsDeploymentReady(ocClient, namespace, database), time.Minute*10, time.Second*10).Should(BeTrue()) - Eventually(IsDCReady(ocClient, namespace, "todolist"), time.Minute*10, time.Second*10).Should(BeTrue()) - Eventually(AreApplicationPodsRunning(kubernetesClientForSuiteRun, namespace), time.Minute*9, time.Second*5).Should(BeTrue()) + gomega.Eventually(lib.IsDeploymentReady(ocClient, namespace, database), time.Minute*10, time.Second*10).Should(gomega.BeTrue()) + gomega.Eventually(lib.IsDCReady(ocClient, namespace, "todolist"), time.Minute*10, time.Second*10).Should(gomega.BeTrue()) + gomega.Eventually(lib.AreApplicationPodsRunning(kubernetesClientForSuiteRun, namespace), time.Minute*9, time.Second*5).Should(gomega.BeTrue()) // This test confirms that SCC restore logic in our plugin is working - err := DoesSCCExist(ocClient, database+"-persistent-scc") + err := lib.DoesSCCExist(ocClient, database+"-persistent-scc") if err != nil { return err } - err = VerifyBackupRestoreData(runTimeClientForSuiteRun, artifact_dir, namespace, "todolist-route", "todolist", preBackupState, twoVol) + err = lib.VerifyBackupRestoreData(runTimeClientForSuiteRun, kubernetesClientForSuiteRun, kubeConfig, artifact_dir, namespace, "todolist-route", "todolist", "todolist", preBackupState, twoVol) return err }) } -func waitOADPReadiness(backupRestoreType BackupRestoreType) { - err := dpaCR.CreateOrUpdate(runTimeClientForSuiteRun, dpaCR.Build(backupRestoreType)) - Expect(err).NotTo(HaveOccurred()) +func waitOADPReadiness(backupRestoreType lib.BackupRestoreType) { + err := dpaCR.CreateOrUpdate(dpaCR.Build(backupRestoreType)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) log.Print("Checking if DPA is reconciled") - Eventually(dpaCR.IsReconciledTrue(), time.Minute*3, time.Second*5).Should(BeTrue()) + gomega.Eventually(dpaCR.IsReconciledTrue(), time.Minute*3, time.Second*5).Should(gomega.BeTrue()) - log.Print("Checking if velero Pod is running") - Eventually(VeleroPodIsRunning(kubernetesClientForSuiteRun, namespace), time.Minute*3, time.Second*5).Should(BeTrue()) + log.Printf("Waiting for Velero Pod to be running") + gomega.Eventually(lib.VeleroPodIsRunning(kubernetesClientForSuiteRun, namespace), time.Minute*3, time.Second*5).Should(gomega.BeTrue()) - if backupRestoreType == RESTIC || backupRestoreType == KOPIA || backupRestoreType == CSIDataMover { + if backupRestoreType == lib.RESTIC || backupRestoreType == lib.KOPIA || backupRestoreType == lib.CSIDataMover { log.Printf("Waiting for Node Agent pods to be running") - Eventually(AreNodeAgentPodsRunning(kubernetesClientForSuiteRun, namespace), time.Minute*3, time.Second*5).Should(BeTrue()) + gomega.Eventually(lib.AreNodeAgentPodsRunning(kubernetesClientForSuiteRun, namespace), time.Minute*3, time.Second*5).Should(gomega.BeTrue()) } + // Velero does not change status of VSL objects. Users can only confirm if VSLs are correct configured when running a native snapshot backup/restore + log.Print("Checking if BSL is available") - Eventually(dpaCR.BSLsAreAvailable(), time.Minute*3, time.Second*5).Should(BeTrue()) + gomega.Eventually(dpaCR.BSLsAreAvailable(), time.Minute*3, time.Second*5).Should(gomega.BeTrue()) } func prepareBackupAndRestore(brCase BackupRestoreCase, updateLastInstallTime func()) (string, string) { @@ -72,12 +75,12 @@ func prepareBackupAndRestore(brCase BackupRestoreCase, updateLastInstallTime fun waitOADPReadiness(brCase.BackupRestoreType) - if brCase.BackupRestoreType == CSI || brCase.BackupRestoreType == CSIDataMover { - if provider == "aws" || provider == "ibmcloud" || provider == "gcp" || provider == "azure" { + if brCase.BackupRestoreType == lib.CSI || brCase.BackupRestoreType == lib.CSIDataMover { + if provider == "aws" || provider == "ibmcloud" || provider == "gcp" || provider == "azure" || provider == "openstack" { log.Printf("Creating VolumeSnapshotClass for CSI backuprestore of %s", brCase.Name) snapshotClassPath := fmt.Sprintf("./sample-applications/snapclass-csi/%s.yaml", provider) - err := InstallApplication(dpaCR.Client, snapshotClassPath) - Expect(err).ToNot(HaveOccurred()) + err := lib.InstallApplication(dpaCR.Client, snapshotClassPath) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) } } @@ -92,18 +95,22 @@ func prepareBackupAndRestore(brCase BackupRestoreCase, updateLastInstallTime fun return backupName, restoreName } -func runApplicationBackupAndRestore(brCase ApplicationBackupRestoreCase, expectedErr error, updateLastBRcase func(brCase ApplicationBackupRestoreCase), updateLastInstallTime func()) { +func runApplicationBackupAndRestore(brCase ApplicationBackupRestoreCase, updateLastBRcase func(brCase ApplicationBackupRestoreCase), updateLastInstallTime func()) { updateLastBRcase(brCase) // create DPA backupName, restoreName := prepareBackupAndRestore(brCase.BackupRestoreCase, updateLastInstallTime) + // Ensure that an existing backup repository is deleted + brerr := lib.DeleteBackupRepositories(runTimeClientForSuiteRun, namespace) + gomega.Expect(brerr).ToNot(gomega.HaveOccurred()) + // install app updateLastInstallTime() log.Printf("Installing application for case %s", brCase.Name) - err := InstallApplication(dpaCR.Client, brCase.ApplicationTemplate) - Expect(err).ToNot(HaveOccurred()) - if brCase.BackupRestoreType == CSI || brCase.BackupRestoreType == CSIDataMover { + err := lib.InstallApplication(dpaCR.Client, brCase.ApplicationTemplate) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + if brCase.BackupRestoreType == lib.CSI || brCase.BackupRestoreType == lib.CSIDataMover { log.Printf("Creating pvc for case %s", brCase.Name) var pvcName string var pvcPath string @@ -120,15 +127,15 @@ func runApplicationBackupAndRestore(brCase ApplicationBackupRestoreCase, expecte pvcPath = fmt.Sprintf(pvcPathFormat, brCase.Namespace, pvcName) - err = InstallApplication(dpaCR.Client, pvcPath) - Expect(err).ToNot(HaveOccurred()) + err = lib.InstallApplication(dpaCR.Client, pvcPath) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) } // Run optional custom verification if brCase.PreBackupVerify != nil { log.Printf("Running pre-backup custom function for case %s", brCase.Name) err := brCase.PreBackupVerify(dpaCR.Client, brCase.Namespace) - Expect(err).ToNot(HaveOccurred()) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) } // do the backup for real @@ -136,11 +143,11 @@ func runApplicationBackupAndRestore(brCase ApplicationBackupRestoreCase, expecte // uninstall app log.Printf("Uninstalling app for case %s", brCase.Name) - err = UninstallApplication(dpaCR.Client, brCase.ApplicationTemplate) - Expect(err).ToNot(HaveOccurred()) + err = lib.UninstallApplication(dpaCR.Client, brCase.ApplicationTemplate) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) // Wait for namespace to be deleted - Eventually(IsNamespaceDeleted(kubernetesClientForSuiteRun, brCase.Namespace), time.Minute*4, time.Second*5).Should(BeTrue()) + gomega.Eventually(lib.IsNamespaceDeleted(kubernetesClientForSuiteRun, brCase.Namespace), time.Minute*4, time.Second*5).Should(gomega.BeTrue()) updateLastInstallTime() @@ -151,13 +158,13 @@ func runApplicationBackupAndRestore(brCase ApplicationBackupRestoreCase, expecte if brCase.PostRestoreVerify != nil { log.Printf("Running post-restore custom function for case %s", brCase.Name) err = brCase.PostRestoreVerify(dpaCR.Client, brCase.Namespace) - Expect(err).ToNot(HaveOccurred()) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) } } func runBackup(brCase BackupRestoreCase, backupName string) bool { - nsRequiresResticDCWorkaround, err := NamespaceRequiresResticDCWorkaround(dpaCR.Client, brCase.Namespace) - Expect(err).ToNot(HaveOccurred()) + nsRequiresResticDCWorkaround, err := lib.NamespaceRequiresResticDCWorkaround(dpaCR.Client, brCase.Namespace) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) if strings.Contains(brCase.Name, "twovol") { volumeSyncDelay := 30 * time.Second @@ -168,58 +175,53 @@ func runBackup(brCase BackupRestoreCase, backupName string) bool { // create backup log.Printf("Creating backup %s for case %s", backupName, brCase.Name) - err = CreateBackupForNamespaces(dpaCR.Client, namespace, backupName, []string{brCase.Namespace}, brCase.BackupRestoreType == RESTIC || brCase.BackupRestoreType == KOPIA, brCase.BackupRestoreType == CSIDataMover) - Expect(err).ToNot(HaveOccurred()) + err = lib.CreateBackupForNamespaces(dpaCR.Client, namespace, backupName, []string{brCase.Namespace}, brCase.BackupRestoreType == lib.RESTIC || brCase.BackupRestoreType == lib.KOPIA, brCase.BackupRestoreType == lib.CSIDataMover) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) // wait for backup to not be running - Eventually(IsBackupDone(dpaCR.Client, namespace, backupName), brCase.BackupTimeout, time.Second*10).Should(BeTrue()) + gomega.Eventually(lib.IsBackupDone(dpaCR.Client, namespace, backupName), brCase.BackupTimeout, time.Second*10).Should(gomega.BeTrue()) // TODO only log on fail? - describeBackup := DescribeBackup(veleroClientForSuiteRun, dpaCR.Client, namespace, backupName) - GinkgoWriter.Println(describeBackup) + describeBackup := lib.DescribeBackup(dpaCR.Client, namespace, backupName) + ginkgo.GinkgoWriter.Println(describeBackup) - backupLogs := BackupLogs(kubernetesClientForSuiteRun, dpaCR.Client, namespace, backupName) - backupErrorLogs := BackupErrorLogs(kubernetesClientForSuiteRun, dpaCR.Client, namespace, backupName) + backupLogs := lib.BackupLogs(kubernetesClientForSuiteRun, dpaCR.Client, namespace, backupName) + backupErrorLogs := lib.BackupErrorLogs(kubernetesClientForSuiteRun, dpaCR.Client, namespace, backupName) accumulatedTestLogs = append(accumulatedTestLogs, describeBackup, backupLogs) if !brCase.SkipVerifyLogs { - Expect(backupErrorLogs).Should(Equal([]string{})) + gomega.Expect(backupErrorLogs).Should(gomega.Equal([]string{})) } // check if backup succeeded - succeeded, err := IsBackupCompletedSuccessfully(kubernetesClientForSuiteRun, dpaCR.Client, namespace, backupName) - Expect(err).ToNot(HaveOccurred()) - Expect(succeeded).To(Equal(true)) + succeeded, err := lib.IsBackupCompletedSuccessfully(kubernetesClientForSuiteRun, dpaCR.Client, namespace, backupName) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Expect(succeeded).To(gomega.Equal(true)) log.Printf("Backup for case %s succeeded", brCase.Name) - if brCase.BackupRestoreType == CSI { - // wait for volume snapshot to be Ready - Eventually(AreVolumeSnapshotsReady(dpaCR.Client, backupName), time.Minute*4, time.Second*10).Should(BeTrue()) - } - return nsRequiresResticDCWorkaround } func runRestore(brCase BackupRestoreCase, backupName, restoreName string, nsRequiresResticDCWorkaround bool) { log.Printf("Creating restore %s for case %s", restoreName, brCase.Name) - err := CreateRestoreFromBackup(dpaCR.Client, namespace, backupName, restoreName) - Expect(err).ToNot(HaveOccurred()) - Eventually(IsRestoreDone(dpaCR.Client, namespace, restoreName), time.Minute*60, time.Second*10).Should(BeTrue()) + err := lib.CreateRestoreFromBackup(dpaCR.Client, namespace, backupName, restoreName) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Eventually(lib.IsRestoreDone(dpaCR.Client, namespace, restoreName), time.Minute*60, time.Second*10).Should(gomega.BeTrue()) // TODO only log on fail? - describeRestore := DescribeRestore(veleroClientForSuiteRun, dpaCR.Client, namespace, restoreName) - GinkgoWriter.Println(describeRestore) + describeRestore := lib.DescribeRestore(dpaCR.Client, namespace, restoreName) + ginkgo.GinkgoWriter.Println(describeRestore) - restoreLogs := RestoreLogs(kubernetesClientForSuiteRun, dpaCR.Client, namespace, restoreName) - restoreErrorLogs := RestoreErrorLogs(kubernetesClientForSuiteRun, dpaCR.Client, namespace, restoreName) + restoreLogs := lib.RestoreLogs(kubernetesClientForSuiteRun, dpaCR.Client, namespace, restoreName) + restoreErrorLogs := lib.RestoreErrorLogs(kubernetesClientForSuiteRun, dpaCR.Client, namespace, restoreName) accumulatedTestLogs = append(accumulatedTestLogs, describeRestore, restoreLogs) if !brCase.SkipVerifyLogs { - Expect(restoreErrorLogs).Should(Equal([]string{})) + gomega.Expect(restoreErrorLogs).Should(gomega.Equal([]string{})) } // Check if restore succeeded - succeeded, err := IsRestoreCompletedSuccessfully(kubernetesClientForSuiteRun, dpaCR.Client, namespace, restoreName) - Expect(err).ToNot(HaveOccurred()) - Expect(succeeded).To(Equal(true)) + succeeded, err := lib.IsRestoreCompletedSuccessfully(kubernetesClientForSuiteRun, dpaCR.Client, namespace, restoreName) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Expect(succeeded).To(gomega.Equal(true)) if nsRequiresResticDCWorkaround { // We run the dc-post-restore.sh script for both restic and @@ -228,53 +230,53 @@ func runRestore(brCase BackupRestoreCase, backupName, restoreName string, nsRequ // The script is designed to work with labels set by the // openshift-velero-plugin and can be run without pre-conditions. log.Printf("Running dc-post-restore.sh script.") - err = RunDcPostRestoreScript(restoreName) - Expect(err).ToNot(HaveOccurred()) + err = lib.RunDcPostRestoreScript(restoreName) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) } } -func getFailedTestLogs(oadpNamespace string, appNamespace string, installTime time.Time, report SpecReport) { +func getFailedTestLogs(oadpNamespace string, appNamespace string, installTime time.Time, report ginkgo.SpecReport) { baseReportDir := artifact_dir + "/" + report.LeafNodeText err := os.MkdirAll(baseReportDir, 0755) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) log.Println("Printing OADP namespace events") - PrintNamespaceEventsAfterTime(kubernetesClientForSuiteRun, oadpNamespace, installTime) - err = SavePodLogs(kubernetesClientForSuiteRun, oadpNamespace, baseReportDir) - Expect(err).NotTo(HaveOccurred()) + lib.PrintNamespaceEventsAfterTime(kubernetesClientForSuiteRun, oadpNamespace, installTime) + err = lib.SavePodLogs(kubernetesClientForSuiteRun, oadpNamespace, baseReportDir) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) if appNamespace != "" { log.Println("Printing app namespace events") - PrintNamespaceEventsAfterTime(kubernetesClientForSuiteRun, appNamespace, installTime) - err = SavePodLogs(kubernetesClientForSuiteRun, appNamespace, baseReportDir) - Expect(err).NotTo(HaveOccurred()) + lib.PrintNamespaceEventsAfterTime(kubernetesClientForSuiteRun, appNamespace, installTime) + err = lib.SavePodLogs(kubernetesClientForSuiteRun, appNamespace, baseReportDir) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } } -func tearDownBackupAndRestore(brCase BackupRestoreCase, installTime time.Time, report SpecReport) { +func tearDownBackupAndRestore(brCase BackupRestoreCase, installTime time.Time, report ginkgo.SpecReport) { log.Println("Post backup and restore state: ", report.State.String()) if report.Failed() { - knownFlake = CheckIfFlakeOccurred(accumulatedTestLogs) + knownFlake = lib.CheckIfFlakeOccurred(accumulatedTestLogs) accumulatedTestLogs = nil getFailedTestLogs(namespace, brCase.Namespace, installTime, report) } - if brCase.BackupRestoreType == CSI || brCase.BackupRestoreType == CSIDataMover { + if brCase.BackupRestoreType == lib.CSI || brCase.BackupRestoreType == lib.CSIDataMover { log.Printf("Deleting VolumeSnapshot for CSI backuprestore of %s", brCase.Name) snapshotClassPath := fmt.Sprintf("./sample-applications/snapclass-csi/%s.yaml", provider) - err := UninstallApplication(dpaCR.Client, snapshotClassPath) - Expect(err).ToNot(HaveOccurred()) + err := lib.UninstallApplication(dpaCR.Client, snapshotClassPath) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) } err := dpaCR.Delete() - Expect(err).ToNot(HaveOccurred()) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) - err = DeleteNamespace(kubernetesClientForSuiteRun, brCase.Namespace) - Expect(err).ToNot(HaveOccurred()) - Eventually(IsNamespaceDeleted(kubernetesClientForSuiteRun, brCase.Namespace), time.Minute*5, time.Second*5).Should(BeTrue()) + err = lib.DeleteNamespace(kubernetesClientForSuiteRun, brCase.Namespace) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Eventually(lib.IsNamespaceDeleted(kubernetesClientForSuiteRun, brCase.Namespace), time.Minute*5, time.Second*5).Should(gomega.BeTrue()) } -var _ = Describe("Backup and restore tests", Ordered, func() { +var _ = ginkgo.Describe("Backup and restore tests", ginkgo.Ordered, func() { var lastBRCase ApplicationBackupRestoreCase var lastInstallTime time.Time updateLastBRcase := func(brCase ApplicationBackupRestoreCase) { @@ -284,136 +286,152 @@ var _ = Describe("Backup and restore tests", Ordered, func() { lastInstallTime = time.Now() } - var _ = AfterEach(func(ctx SpecContext) { + var _ = ginkgo.AfterEach(func(ctx ginkgo.SpecContext) { tearDownBackupAndRestore(lastBRCase.BackupRestoreCase, lastInstallTime, ctx.SpecReport()) }) - var _ = AfterAll(func() { + var _ = ginkgo.AfterAll(func() { // DPA just needs to have BSL so gathering of backups/restores logs/describe work // using kopia to collect more info (DaemonSet) - waitOADPReadiness(KOPIA) - - log.Printf("Running OADP must-gather") - err := RunMustGather(artifact_dir, dpaCR.Client) - Expect(err).ToNot(HaveOccurred()) + waitOADPReadiness(lib.KOPIA) - err = dpaCR.Delete() - Expect(err).ToNot(HaveOccurred()) }) - DescribeTable("Backup and restore applications", + ginkgo.DescribeTable("Backup and restore applications", func(brCase ApplicationBackupRestoreCase, expectedErr error) { - if CurrentSpecReport().NumAttempts > 1 && !knownFlake { - Fail("No known FLAKE found in a previous run, marking test as failed.") + if ginkgo.CurrentSpecReport().NumAttempts > 1 && !knownFlake { + ginkgo.Fail("No known FLAKE found in a previous run, marking test as failed.") } - runApplicationBackupAndRestore(brCase, expectedErr, updateLastBRcase, updateLastInstallTime) + runApplicationBackupAndRestore(brCase, updateLastBRcase, updateLastInstallTime) }, - Entry("MySQL application CSI", FlakeAttempts(flakeAttempts), ApplicationBackupRestoreCase{ + ginkgo.Entry("MySQL application CSI", ginkgo.FlakeAttempts(flakeAttempts), ApplicationBackupRestoreCase{ ApplicationTemplate: "./sample-applications/mysql-persistent/mysql-persistent-csi.yaml", BackupRestoreCase: BackupRestoreCase{ Namespace: "mysql-persistent", Name: "mysql-csi-e2e", - BackupRestoreType: CSI, + BackupRestoreType: lib.CSI, PreBackupVerify: todoListReady(true, false, "mysql"), PostRestoreVerify: todoListReady(false, false, "mysql"), BackupTimeout: 20 * time.Minute, }, }, nil), - Entry("Mongo application CSI", FlakeAttempts(flakeAttempts), ApplicationBackupRestoreCase{ + ginkgo.Entry("Mongo application CSI", ginkgo.FlakeAttempts(flakeAttempts), ApplicationBackupRestoreCase{ ApplicationTemplate: "./sample-applications/mongo-persistent/mongo-persistent-csi.yaml", BackupRestoreCase: BackupRestoreCase{ Namespace: "mongo-persistent", Name: "mongo-csi-e2e", - BackupRestoreType: CSI, + BackupRestoreType: lib.CSI, PreBackupVerify: todoListReady(true, false, "mongo"), PostRestoreVerify: todoListReady(false, false, "mongo"), BackupTimeout: 20 * time.Minute, }, }, nil), - Entry("MySQL application two Vol CSI", FlakeAttempts(flakeAttempts), ApplicationBackupRestoreCase{ + ginkgo.Entry("MySQL application two Vol CSI", ginkgo.FlakeAttempts(flakeAttempts), ApplicationBackupRestoreCase{ ApplicationTemplate: "./sample-applications/mysql-persistent/mysql-persistent-twovol-csi.yaml", BackupRestoreCase: BackupRestoreCase{ Namespace: "mysql-persistent", Name: "mysql-twovol-csi-e2e", - BackupRestoreType: CSI, + BackupRestoreType: lib.CSI, PreBackupVerify: todoListReady(true, true, "mysql"), PostRestoreVerify: todoListReady(false, true, "mysql"), BackupTimeout: 20 * time.Minute, }, }, nil), - Entry("Mongo application RESTIC", FlakeAttempts(flakeAttempts), ApplicationBackupRestoreCase{ + ginkgo.Entry("Mongo application RESTIC", ginkgo.FlakeAttempts(flakeAttempts), ApplicationBackupRestoreCase{ ApplicationTemplate: "./sample-applications/mongo-persistent/mongo-persistent.yaml", BackupRestoreCase: BackupRestoreCase{ Namespace: "mongo-persistent", Name: "mongo-restic-e2e", - BackupRestoreType: RESTIC, + BackupRestoreType: lib.RESTIC, PreBackupVerify: todoListReady(true, false, "mongo"), PostRestoreVerify: todoListReady(false, false, "mongo"), BackupTimeout: 20 * time.Minute, }, }, nil), - Entry("MySQL application RESTIC", FlakeAttempts(flakeAttempts), ApplicationBackupRestoreCase{ + ginkgo.Entry("MySQL application RESTIC", ginkgo.FlakeAttempts(flakeAttempts), ApplicationBackupRestoreCase{ ApplicationTemplate: "./sample-applications/mysql-persistent/mysql-persistent.yaml", BackupRestoreCase: BackupRestoreCase{ Namespace: "mysql-persistent", Name: "mysql-restic-e2e", - BackupRestoreType: RESTIC, + BackupRestoreType: lib.RESTIC, PreBackupVerify: todoListReady(true, false, "mysql"), PostRestoreVerify: todoListReady(false, false, "mysql"), BackupTimeout: 20 * time.Minute, }, }, nil), - Entry("Mongo application KOPIA", FlakeAttempts(flakeAttempts), ApplicationBackupRestoreCase{ + ginkgo.Entry("Mongo application KOPIA", ginkgo.FlakeAttempts(flakeAttempts), ApplicationBackupRestoreCase{ ApplicationTemplate: "./sample-applications/mongo-persistent/mongo-persistent.yaml", BackupRestoreCase: BackupRestoreCase{ Namespace: "mongo-persistent", Name: "mongo-kopia-e2e", - BackupRestoreType: KOPIA, + BackupRestoreType: lib.KOPIA, PreBackupVerify: todoListReady(true, false, "mongo"), PostRestoreVerify: todoListReady(false, false, "mongo"), BackupTimeout: 20 * time.Minute, }, }, nil), - Entry("MySQL application KOPIA", FlakeAttempts(flakeAttempts), ApplicationBackupRestoreCase{ + ginkgo.Entry("MySQL application KOPIA", ginkgo.FlakeAttempts(flakeAttempts), ApplicationBackupRestoreCase{ ApplicationTemplate: "./sample-applications/mysql-persistent/mysql-persistent.yaml", BackupRestoreCase: BackupRestoreCase{ Namespace: "mysql-persistent", Name: "mysql-kopia-e2e", - BackupRestoreType: KOPIA, + BackupRestoreType: lib.KOPIA, PreBackupVerify: todoListReady(true, false, "mysql"), PostRestoreVerify: todoListReady(false, false, "mysql"), BackupTimeout: 20 * time.Minute, }, }, nil), - Entry("Mongo application DATAMOVER", FlakeAttempts(flakeAttempts), ApplicationBackupRestoreCase{ + ginkgo.Entry("Mongo application DATAMOVER", ginkgo.FlakeAttempts(flakeAttempts), ApplicationBackupRestoreCase{ ApplicationTemplate: "./sample-applications/mongo-persistent/mongo-persistent-csi.yaml", BackupRestoreCase: BackupRestoreCase{ Namespace: "mongo-persistent", Name: "mongo-datamover-e2e", - BackupRestoreType: CSIDataMover, + BackupRestoreType: lib.CSIDataMover, PreBackupVerify: todoListReady(true, false, "mongo"), PostRestoreVerify: todoListReady(false, false, "mongo"), BackupTimeout: 20 * time.Minute, }, }, nil), - Entry("MySQL application DATAMOVER", FlakeAttempts(flakeAttempts), ApplicationBackupRestoreCase{ + ginkgo.Entry("MySQL application DATAMOVER", ginkgo.FlakeAttempts(flakeAttempts), ApplicationBackupRestoreCase{ ApplicationTemplate: "./sample-applications/mysql-persistent/mysql-persistent-csi.yaml", BackupRestoreCase: BackupRestoreCase{ Namespace: "mysql-persistent", Name: "mysql-datamover-e2e", - BackupRestoreType: CSIDataMover, + BackupRestoreType: lib.CSIDataMover, PreBackupVerify: todoListReady(true, false, "mysql"), PostRestoreVerify: todoListReady(false, false, "mysql"), BackupTimeout: 20 * time.Minute, }, }, nil), - Entry("Mongo application BlockDevice DATAMOVER", FlakeAttempts(flakeAttempts), ApplicationBackupRestoreCase{ + ginkgo.Entry("Mongo application BlockDevice DATAMOVER", ginkgo.FlakeAttempts(flakeAttempts), ApplicationBackupRestoreCase{ ApplicationTemplate: "./sample-applications/mongo-persistent/mongo-persistent-block.yaml", PvcSuffixName: "-block-mode", BackupRestoreCase: BackupRestoreCase{ Namespace: "mongo-persistent", Name: "mongo-blockdevice-e2e", - BackupRestoreType: CSIDataMover, + BackupRestoreType: lib.CSIDataMover, + PreBackupVerify: todoListReady(true, false, "mongo"), + PostRestoreVerify: todoListReady(false, false, "mongo"), + BackupTimeout: 20 * time.Minute, + }, + }, nil), + ginkgo.Entry("MySQL application Native-Snapshots", ginkgo.FlakeAttempts(flakeAttempts), ginkgo.Label("aws", "azure", "gcp"), ApplicationBackupRestoreCase{ + ApplicationTemplate: "./sample-applications/mysql-persistent/mysql-persistent.yaml", + BackupRestoreCase: BackupRestoreCase{ + Namespace: "mysql-persistent", + Name: "mysql-native-snapshots-e2e", + BackupRestoreType: lib.NativeSnapshots, + PreBackupVerify: todoListReady(true, false, "mysql"), + PostRestoreVerify: todoListReady(false, false, "mysql"), + BackupTimeout: 20 * time.Minute, + }, + }, nil), + ginkgo.Entry("Mongo application Native-Snapshots", ginkgo.FlakeAttempts(flakeAttempts), ginkgo.Label("aws", "azure", "gcp"), ApplicationBackupRestoreCase{ + ApplicationTemplate: "./sample-applications/mongo-persistent/mongo-persistent.yaml", + BackupRestoreCase: BackupRestoreCase{ + Namespace: "mongo-persistent", + Name: "mongo-native-snapshots-e2e", + BackupRestoreType: lib.NativeSnapshots, PreBackupVerify: todoListReady(true, false, "mongo"), PostRestoreVerify: todoListReady(false, false, "mongo"), BackupTimeout: 20 * time.Minute, diff --git a/tests/e2e/dpa_deployment_suite_test.go b/tests/e2e/dpa_deployment_suite_test.go index 94ac09d8bc..8afcde680b 100644 --- a/tests/e2e/dpa_deployment_suite_test.go +++ b/tests/e2e/dpa_deployment_suite_test.go @@ -7,14 +7,15 @@ import ( "time" "github.com/google/go-cmp/cmp" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - oadpv1alpha1 "github.com/openshift/oadp-operator/api/v1alpha1" - . "github.com/openshift/oadp-operator/tests/e2e/lib" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" velero "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/utils/ptr" + + oadpv1alpha1 "github.com/openshift/oadp-operator/api/v1alpha1" + "github.com/openshift/oadp-operator/tests/e2e/lib" ) type TestDPASpec struct { @@ -23,16 +24,15 @@ type TestDPASpec struct { CustomPlugins []oadpv1alpha1.CustomPlugin SnapshotLocations []oadpv1alpha1.SnapshotLocation VeleroPodConfig oadpv1alpha1.PodConfig - ResticPodConfig oadpv1alpha1.PodConfig NodeAgentPodConfig oadpv1alpha1.PodConfig EnableRestic bool EnableNodeAgent bool + UploaderType string NoDefaultBackupLocation bool s3ForcePathStyle bool NoS3ForcePathStyle bool NoRegion bool DoNotBackupImages bool - UnsupportedOverrides map[oadpv1alpha1.UnsupportedImageKey]string } func createTestDPASpec(testSpec TestDPASpec) *oadpv1alpha1.DataProtectionApplicationSpec { @@ -66,7 +66,7 @@ func createTestDPASpec(testSpec TestDPASpec) *oadpv1alpha1.DataProtectionApplica }, }, SnapshotLocations: testSpec.SnapshotLocations, - UnsupportedOverrides: testSpec.UnsupportedOverrides, + UnsupportedOverrides: dpaCR.UnsupportedOverrides, } if len(testSpec.DefaultPlugins) > 0 { dpaSpec.Configuration.Velero.DefaultPlugins = testSpec.DefaultPlugins @@ -74,16 +74,16 @@ func createTestDPASpec(testSpec TestDPASpec) *oadpv1alpha1.DataProtectionApplica if testSpec.EnableNodeAgent { dpaSpec.Configuration.NodeAgent = &oadpv1alpha1.NodeAgentConfig{ NodeAgentCommonFields: oadpv1alpha1.NodeAgentCommonFields{ - Enable: ptr.To(testSpec.EnableNodeAgent), + Enable: ptr.To(true), PodConfig: &testSpec.NodeAgentPodConfig, }, - UploaderType: "kopia", + UploaderType: testSpec.UploaderType, } - } else { + } + if testSpec.EnableRestic { dpaSpec.Configuration.Restic = &oadpv1alpha1.ResticConfig{ NodeAgentCommonFields: oadpv1alpha1.NodeAgentCommonFields{ - Enable: ptr.To(testSpec.EnableRestic), - PodConfig: &testSpec.ResticPodConfig, + Enable: ptr.To(true), }, } } @@ -131,110 +131,99 @@ func createTestDPASpec(testSpec TestDPASpec) *oadpv1alpha1.DataProtectionApplica return dpaSpec } -var _ = Describe("Configuration testing for DPA Custom Resource", func() { +var _ = ginkgo.Describe("Configuration testing for DPA Custom Resource", func() { type InstallCase struct { DpaSpec *oadpv1alpha1.DataProtectionApplicationSpec } - var lastInstallTime time.Time - var _ = AfterEach(func(ctx SpecContext) { + var _ = ginkgo.AfterEach(func(ctx ginkgo.SpecContext) { report := ctx.SpecReport() if report.Failed() { getFailedTestLogs(namespace, "", lastInstallTime, report) } }) - DescribeTable("DPA reconciled to true", + ginkgo.DescribeTable("DPA reconciled to true", func(installCase InstallCase) { lastInstallTime = time.Now() - err := dpaCR.CreateOrUpdate(runTimeClientForSuiteRun, installCase.DpaSpec) - Expect(err).ToNot(HaveOccurred()) + err := dpaCR.CreateOrUpdate(installCase.DpaSpec) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) - Eventually(dpaCR.IsReconciledTrue(), time.Minute*2, time.Second*5).Should(BeTrue()) + gomega.Eventually(dpaCR.IsReconciledTrue(), time.Minute*2, time.Second*5).Should(gomega.BeTrue()) // TODO do not use Consistently, using because no field in DPA is updated telling when it was last reconciled - Consistently(dpaCR.IsReconciledTrue(), time.Minute*1, time.Second*15).Should(BeTrue()) + gomega.Consistently(dpaCR.IsReconciledTrue(), time.Minute*1, time.Second*15).Should(gomega.BeTrue()) timeReconciled := time.Now() - adpLogsAtReconciled, err := GetManagerPodLogs(kubernetesClientForSuiteRun, dpaCR.Namespace) - Expect(err).NotTo(HaveOccurred()) + adpLogsAtReconciled, err := lib.GetManagerPodLogs(kubernetesClientForSuiteRun, dpaCR.Namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) log.Printf("Waiting for velero Pod to be running") // TODO do not use Consistently - Consistently(VeleroPodIsRunning(kubernetesClientForSuiteRun, namespace), time.Minute*1, time.Second*15).Should(BeTrue()) + gomega.Consistently(lib.VeleroPodIsRunning(kubernetesClientForSuiteRun, namespace), time.Minute*1, time.Second*15).Should(gomega.BeTrue()) timeAfterVeleroIsRunning := time.Now() - if installCase.DpaSpec.Configuration.Restic != nil && *installCase.DpaSpec.Configuration.Restic.Enable { - log.Printf("Waiting for restic Pods to be running") - Eventually(AreNodeAgentPodsRunning(kubernetesClientForSuiteRun, namespace), time.Minute*3, time.Second*5).Should(BeTrue()) - if installCase.DpaSpec.Configuration.Restic.PodConfig != nil { - log.Printf("Waiting for restic DaemonSet to have nodeSelector") - for key, value := range installCase.DpaSpec.Configuration.Restic.PodConfig.NodeSelector { - log.Printf("Waiting for restic DaemonSet to get node selector") - Eventually(NodeAgentDaemonSetHasNodeSelector(kubernetesClientForSuiteRun, namespace, key, value), time.Minute*6, time.Second*5).Should(BeTrue()) - } - } - } else if installCase.DpaSpec.Configuration.NodeAgent != nil && *installCase.DpaSpec.Configuration.NodeAgent.Enable { + if installCase.DpaSpec.Configuration.NodeAgent != nil && *installCase.DpaSpec.Configuration.NodeAgent.Enable { log.Printf("Waiting for NodeAgent Pods to be running") - Eventually(AreNodeAgentPodsRunning(kubernetesClientForSuiteRun, namespace), time.Minute*3, time.Second*5).Should(BeTrue()) + gomega.Eventually(lib.AreNodeAgentPodsRunning(kubernetesClientForSuiteRun, namespace), time.Minute*3, time.Second*5).Should(gomega.BeTrue()) if installCase.DpaSpec.Configuration.NodeAgent.PodConfig != nil { log.Printf("Waiting for NodeAgent DaemonSet to have nodeSelector") for key, value := range installCase.DpaSpec.Configuration.NodeAgent.PodConfig.NodeSelector { log.Printf("Waiting for NodeAgent DaemonSet to get node selector") - Eventually(NodeAgentDaemonSetHasNodeSelector(kubernetesClientForSuiteRun, namespace, key, value), time.Minute*6, time.Second*5).Should(BeTrue()) + gomega.Eventually(lib.NodeAgentDaemonSetHasNodeSelector(kubernetesClientForSuiteRun, namespace, key, value), time.Minute*6, time.Second*5).Should(gomega.BeTrue()) } } } else { log.Printf("Waiting for NodeAgent DaemonSet to be deleted") - Eventually(IsNodeAgentDaemonSetDeleted(kubernetesClientForSuiteRun, namespace), time.Minute*3, time.Second*5).Should(BeTrue()) + gomega.Eventually(lib.IsNodeAgentDaemonSetDeleted(kubernetesClientForSuiteRun, namespace), time.Minute*3, time.Second*5).Should(gomega.BeTrue()) } if len(installCase.DpaSpec.BackupLocations) > 0 { log.Print("Checking if BSLs are available") - Eventually(dpaCR.BSLsAreUpdated(timeAfterVeleroIsRunning), time.Minute*3, time.Second*5).Should(BeTrue()) - Eventually(dpaCR.BSLsAreAvailable(), time.Minute*3, time.Second*5).Should(BeTrue()) + gomega.Eventually(dpaCR.BSLsAreUpdated(timeAfterVeleroIsRunning), time.Minute*3, time.Second*5).Should(gomega.BeTrue()) + gomega.Eventually(dpaCR.BSLsAreAvailable(), time.Minute*3, time.Second*5).Should(gomega.BeTrue()) for _, bsl := range installCase.DpaSpec.BackupLocations { log.Printf("Checking for BSL spec") - Expect(dpaCR.DoesBSLSpecMatchesDpa(namespace, *bsl.Velero)).To(BeTrue()) + gomega.Expect(dpaCR.DoesBSLSpecMatchesDpa(*bsl.Velero)).To(gomega.BeTrue()) } } else { log.Println("Checking no BSLs are deployed") _, err = dpaCR.ListBSLs() - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(Equal(fmt.Sprintf("no BSL in %s namespace", namespace))) + gomega.Expect(err).To(gomega.HaveOccurred()) + gomega.Expect(err.Error()).To(gomega.Equal(fmt.Sprintf("no BSL in %s namespace", namespace))) } if len(installCase.DpaSpec.SnapshotLocations) > 0 { - // TODO Check if VSLs are available creating new backup/restore test with VSL + // Velero does not change status of VSL objects. Users can only confirm if VSLs are correct configured when running a native snapshot backup/restore for _, vsl := range installCase.DpaSpec.SnapshotLocations { log.Printf("Checking for VSL spec") - Expect(dpaCR.DoesVSLSpecMatchesDpa(namespace, *vsl.Velero)).To(BeTrue()) + gomega.Expect(dpaCR.DoesVSLSpecMatchesDpa(*vsl.Velero)).To(gomega.BeTrue()) } } else { log.Println("Checking no VSLs are deployed") _, err = dpaCR.ListVSLs() - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(Equal(fmt.Sprintf("no VSL in %s namespace", namespace))) + gomega.Expect(err).To(gomega.HaveOccurred()) + gomega.Expect(err.Error()).To(gomega.Equal(fmt.Sprintf("no VSL in %s namespace", namespace))) } if len(installCase.DpaSpec.Configuration.Velero.PodConfig.Tolerations) > 0 { log.Printf("Checking for velero tolerances") - Eventually(VerifyVeleroTolerations(kubernetesClientForSuiteRun, namespace, installCase.DpaSpec.Configuration.Velero.PodConfig.Tolerations), time.Minute*3, time.Second*5).Should(BeTrue()) + gomega.Eventually(lib.VerifyVeleroTolerations(kubernetesClientForSuiteRun, namespace, installCase.DpaSpec.Configuration.Velero.PodConfig.Tolerations), time.Minute*3, time.Second*5).Should(gomega.BeTrue()) } if installCase.DpaSpec.Configuration.Velero.PodConfig.ResourceAllocations.Requests != nil { log.Printf("Checking for velero resource allocation requests") - Eventually(VerifyVeleroResourceRequests(kubernetesClientForSuiteRun, namespace, installCase.DpaSpec.Configuration.Velero.PodConfig.ResourceAllocations.Requests), time.Minute*3, time.Second*5).Should(BeTrue()) + gomega.Eventually(lib.VerifyVeleroResourceRequests(kubernetesClientForSuiteRun, namespace, installCase.DpaSpec.Configuration.Velero.PodConfig.ResourceAllocations.Requests), time.Minute*3, time.Second*5).Should(gomega.BeTrue()) } if installCase.DpaSpec.Configuration.Velero.PodConfig.ResourceAllocations.Limits != nil { log.Printf("Checking for velero resource allocation limits") - Eventually(VerifyVeleroResourceLimits(kubernetesClientForSuiteRun, namespace, installCase.DpaSpec.Configuration.Velero.PodConfig.ResourceAllocations.Limits), time.Minute*3, time.Second*5).Should(BeTrue()) + gomega.Eventually(lib.VerifyVeleroResourceLimits(kubernetesClientForSuiteRun, namespace, installCase.DpaSpec.Configuration.Velero.PodConfig.ResourceAllocations.Limits), time.Minute*3, time.Second*5).Should(gomega.BeTrue()) } if len(installCase.DpaSpec.Configuration.Velero.DefaultPlugins) > 0 { log.Printf("Waiting for velero Deployment to have expected default plugins") for _, plugin := range installCase.DpaSpec.Configuration.Velero.DefaultPlugins { log.Printf("Checking for %s default plugin", plugin) - Eventually(DoesPluginExist(kubernetesClientForSuiteRun, namespace, plugin), time.Minute*6, time.Second*5).Should(BeTrue()) + gomega.Eventually(lib.DoesPluginExist(kubernetesClientForSuiteRun, namespace, plugin), time.Minute*6, time.Second*5).Should(gomega.BeTrue()) } } @@ -242,36 +231,35 @@ var _ = Describe("Configuration testing for DPA Custom Resource", func() { log.Printf("Waiting for velero Deployment to have expected custom plugins") for _, plugin := range installCase.DpaSpec.Configuration.Velero.CustomPlugins { log.Printf("Checking for %s custom plugin", plugin.Name) - Eventually(DoesCustomPluginExist(kubernetesClientForSuiteRun, namespace, plugin), time.Minute*6, time.Second*5).Should(BeTrue()) + gomega.Eventually(lib.DoesCustomPluginExist(kubernetesClientForSuiteRun, namespace, plugin), time.Minute*6, time.Second*5).Should(gomega.BeTrue()) } } // wait at least 1 minute after reconciled - Eventually( + gomega.Eventually( func() bool { //has it been at least 1 minute since reconciled? log.Printf("Waiting for 1 minute after reconciled: %v elapsed", time.Since(timeReconciled).String()) return time.Now().After(timeReconciled.Add(time.Minute)) }, time.Minute*5, time.Second*5, - ).Should(BeTrue()) - adpLogsAfterOneMinute, err := GetManagerPodLogs(kubernetesClientForSuiteRun, dpaCR.Namespace) - Expect(err).NotTo(HaveOccurred()) + ).Should(gomega.BeTrue()) + adpLogsAfterOneMinute, err := lib.GetManagerPodLogs(kubernetesClientForSuiteRun, dpaCR.Namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // We expect OADP logs to be the same after 1 minute adpLogsDiff := cmp.Diff(adpLogsAtReconciled, adpLogsAfterOneMinute) // If registry deployment were deleted after CR update, we expect to see a new log entry, ignore that. - // We also ignore case where deprecated restic entry was used - if !strings.Contains(adpLogsDiff, "Registry Deployment deleted") && !strings.Contains(adpLogsDiff, "(Deprecation Warning) Use nodeAgent instead of restic, which is deprecated and will be removed in the future") { - Expect(adpLogsDiff).To(Equal("")) + if !strings.Contains(adpLogsDiff, "Registry Deployment deleted") { + gomega.Expect(adpLogsDiff).To(gomega.Equal("")) } }, - Entry("Default DPA CR", InstallCase{ + ginkgo.Entry("Default DPA CR", InstallCase{ DpaSpec: createTestDPASpec(TestDPASpec{BSLSecretName: bslSecretName}), }), - Entry("DPA CR with BSL secret with carriage return", InstallCase{ + ginkgo.Entry("DPA CR with BSL secret with carriage return", InstallCase{ DpaSpec: createTestDPASpec(TestDPASpec{BSLSecretName: bslSecretNameWithCarriageReturn}), }), - Entry("DPA CR with Velero custom plugin", InstallCase{ + ginkgo.Entry("DPA CR with Velero custom plugin", InstallCase{ DpaSpec: createTestDPASpec(TestDPASpec{ BSLSecretName: bslSecretName, CustomPlugins: []oadpv1alpha1.CustomPlugin{ @@ -282,7 +270,7 @@ var _ = Describe("Configuration testing for DPA Custom Resource", func() { }, }), }), - Entry("DPA CR with Velero resource allocations", InstallCase{ + ginkgo.Entry("DPA CR with Velero resource allocations", InstallCase{ DpaSpec: createTestDPASpec(TestDPASpec{ BSLSecretName: bslSecretName, VeleroPodConfig: oadpv1alpha1.PodConfig{ @@ -299,7 +287,7 @@ var _ = Describe("Configuration testing for DPA Custom Resource", func() { }, }), }), - Entry("DPA CR with Velero toleration", InstallCase{ + ginkgo.Entry("DPA CR with Velero toleration", InstallCase{ DpaSpec: createTestDPASpec(TestDPASpec{ BSLSecretName: bslSecretName, VeleroPodConfig: oadpv1alpha1.PodConfig{ @@ -314,27 +302,29 @@ var _ = Describe("Configuration testing for DPA Custom Resource", func() { }, }), }), - Entry("DPA CR with VSL", Label("aws", "azure", "gcp"), InstallCase{ + ginkgo.Entry("DPA CR with VSL", ginkgo.Label("aws", "azure", "gcp"), InstallCase{ DpaSpec: createTestDPASpec(TestDPASpec{ BSLSecretName: bslSecretName, SnapshotLocations: dpaCR.SnapshotLocations, }), }), - Entry("DPA CR with restic enabled with node selector", InstallCase{ + ginkgo.Entry("DPA CR with NodeAgent enabled with restic and node selector", InstallCase{ DpaSpec: createTestDPASpec(TestDPASpec{ - BSLSecretName: bslSecretName, - EnableRestic: true, - ResticPodConfig: oadpv1alpha1.PodConfig{ + BSLSecretName: bslSecretName, + EnableNodeAgent: true, + UploaderType: "restic", + NodeAgentPodConfig: oadpv1alpha1.PodConfig{ NodeSelector: map[string]string{ "foo": "bar", }, }, }), }), - Entry("DPA CR with kopia enabled with node selector", InstallCase{ + ginkgo.Entry("DPA CR with NodeAgent enabled with kopia and node selector", InstallCase{ DpaSpec: createTestDPASpec(TestDPASpec{ BSLSecretName: bslSecretName, EnableNodeAgent: true, + UploaderType: "kopia", NodeAgentPodConfig: oadpv1alpha1.PodConfig{ NodeSelector: map[string]string{ "foo": "bar", @@ -342,54 +332,45 @@ var _ = Describe("Configuration testing for DPA Custom Resource", func() { }, }), }), - Entry("DPA CR with NoDefaultBackupLocation and with BackupImages false", InstallCase{ + ginkgo.Entry("DPA CR with NoDefaultBackupLocation and with BackupImages false", InstallCase{ DpaSpec: createTestDPASpec(TestDPASpec{ BSLSecretName: bslSecretName, NoDefaultBackupLocation: true, DoNotBackupImages: true, }), }), - Entry("DPA CR with legacy-aws plugin", Label("aws", "ibmcloud"), InstallCase{ + ginkgo.Entry("DPA CR with legacy-aws plugin", ginkgo.Label("aws", "ibmcloud"), InstallCase{ DpaSpec: createTestDPASpec(TestDPASpec{ BSLSecretName: bslSecretName, DefaultPlugins: []oadpv1alpha1.DefaultPlugin{oadpv1alpha1.DefaultPluginOpenShift, oadpv1alpha1.DefaultPluginLegacyAWS}, }), }), - Entry("DPA CR with S3ForcePathStyle true", Label("aws"), InstallCase{ + ginkgo.Entry("DPA CR with S3ForcePathStyle true", ginkgo.Label("aws"), InstallCase{ DpaSpec: createTestDPASpec(TestDPASpec{ BSLSecretName: bslSecretName, s3ForcePathStyle: true, }), }), - // TODO bug https://github.com/vmware-tanzu/velero/issues/8022 - // Entry("DPA CR without Region, without S3ForcePathStyle and with BackupImages false", Label("aws"), InstallCase{ - // DpaSpec: createTestDPASpec(TestDPASpec{ - // BSLSecretName: bslSecretName, - // NoRegion: true, - // NoS3ForcePathStyle: true, - // DoNotBackupImages: true, - // }), - // }), - Entry("DPA CR with unsupportedOverrides", Label("aws", "ibmcloud"), InstallCase{ + ginkgo.Entry("DPA CR without Region, without S3ForcePathStyle and with BackupImages false", ginkgo.Label("aws"), InstallCase{ DpaSpec: createTestDPASpec(TestDPASpec{ - BSLSecretName: bslSecretName, - UnsupportedOverrides: map[oadpv1alpha1.UnsupportedImageKey]string{ - "awsPluginImageFqin": "quay.io/konveyor/velero-plugin-for-aws:oadp-1.4", - }, + BSLSecretName: bslSecretName, + NoRegion: true, + NoS3ForcePathStyle: true, + DoNotBackupImages: true, }), }), ) - DescribeTable("DPA reconciled to false", + ginkgo.DescribeTable("DPA reconciled to false", func(installCase InstallCase, message string) { lastInstallTime = time.Now() - err := dpaCR.CreateOrUpdate(runTimeClientForSuiteRun, installCase.DpaSpec) - Expect(err).ToNot(HaveOccurred()) + err := dpaCR.CreateOrUpdate(installCase.DpaSpec) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) log.Printf("Test case expected to error. Waiting for the error to show in DPA Status") - Eventually(dpaCR.IsReconciledFalse(message), time.Minute*3, time.Second*5).Should(BeTrue()) + gomega.Eventually(dpaCR.IsReconciledFalse(message), time.Minute*3, time.Second*5).Should(gomega.BeTrue()) }, - Entry("DPA CR without Region and with S3ForcePathStyle true", Label("aws", "ibmcloud"), InstallCase{ + ginkgo.Entry("DPA CR without Region and with S3ForcePathStyle true", ginkgo.Label("aws", "ibmcloud"), InstallCase{ DpaSpec: createTestDPASpec(TestDPASpec{ BSLSecretName: bslSecretName, NoRegion: true, @@ -398,19 +379,19 @@ var _ = Describe("Configuration testing for DPA Custom Resource", func() { }, "region for AWS backupstoragelocation not automatically discoverable. Please set the region in the backupstoragelocation config"), ) - DescribeTable("DPA Deletion test", + ginkgo.DescribeTable("DPA Deletion test", func() { log.Printf("Creating DPA") - err := dpaCR.CreateOrUpdate(runTimeClientForSuiteRun, dpaCR.Build(KOPIA)) - Expect(err).NotTo(HaveOccurred()) + err := dpaCR.CreateOrUpdate(dpaCR.Build(lib.KOPIA)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) log.Printf("Waiting for velero Pod to be running") - Eventually(VeleroPodIsRunning(kubernetesClientForSuiteRun, namespace), time.Minute*3, time.Second*5).Should(BeTrue()) + gomega.Eventually(lib.VeleroPodIsRunning(kubernetesClientForSuiteRun, namespace), time.Minute*3, time.Second*5).Should(gomega.BeTrue()) log.Printf("Deleting DPA") err = dpaCR.Delete() - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) log.Printf("Waiting for velero to be deleted") - Eventually(VeleroIsDeleted(kubernetesClientForSuiteRun, namespace), time.Minute*3, time.Second*5).Should(BeTrue()) + gomega.Eventually(lib.VeleroIsDeleted(kubernetesClientForSuiteRun, namespace), time.Minute*3, time.Second*5).Should(gomega.BeTrue()) }, - Entry("Should succeed"), + ginkgo.Entry("Should succeed"), ) }) diff --git a/tests/e2e/e2e_suite_test.go b/tests/e2e/e2e_suite_test.go index 413a5987d0..41305bbaed 100755 --- a/tests/e2e/e2e_suite_test.go +++ b/tests/e2e/e2e_suite_test.go @@ -8,29 +8,18 @@ import ( "testing" "time" - volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - openshiftappsv1 "github.com/openshift/api/apps/v1" - openshiftbuildv1 "github.com/openshift/api/build/v1" - openshiftconfigv1 "github.com/openshift/api/config/v1" - openshiftsecurityv1 "github.com/openshift/api/security/v1" - openshifttemplatev1 "github.com/openshift/api/template/v1" - . "github.com/openshift/oadp-operator/tests/e2e/lib" - "github.com/openshift/oadp-operator/tests/e2e/utils" - operatorsv1 "github.com/operator-framework/api/pkg/operators/v1" - operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" - velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - veleroClientset "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" - corev1 "k8s.io/api/core/v1" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/controller-runtime/pkg/log/zap" - oadpv1alpha1 "github.com/openshift/oadp-operator/api/v1alpha1" + "github.com/openshift/oadp-operator/tests/e2e/lib" ) var ( @@ -40,10 +29,9 @@ var ( kubernetesClientForSuiteRun *kubernetes.Clientset runTimeClientForSuiteRun client.Client - veleroClientForSuiteRun veleroClientset.Interface dynamicClientForSuiteRun dynamic.Interface - dpaCR *DpaCustomResource + dpaCR *lib.DpaCustomResource bslSecretName string bslSecretNameWithCarriageReturn string vslSecretName string @@ -51,6 +39,10 @@ var ( kubeConfig *rest.Config knownFlake bool accumulatedTestLogs []string + + kvmEmulation bool + useUpstreamHco bool + skipMustGather bool ) func init() { @@ -64,6 +56,9 @@ func init() { flag.StringVar(&provider, "provider", "aws", "Cloud provider") flag.StringVar(&artifact_dir, "artifact_dir", "/tmp", "Directory for storing must gather") flag.Int64Var(&flakeAttempts, "flakeAttempts", 3, "Customize the number of flake retries (3)") + flag.BoolVar(&kvmEmulation, "kvm_emulation", true, "Enable or disable KVM emulation for virtualization testing") + flag.BoolVar(&useUpstreamHco, "hco_upstream", false, "Force use of upstream virtualization operator") + flag.BoolVar(&skipMustGather, "skipMustGather", false, "avoid errors with local execution and cluster architecture") // helps with launching debug sessions from IDE if os.Getenv("E2E_USE_ENV_FLAGS") == "true" { @@ -99,7 +94,29 @@ func init() { flakeAttempts = parsedValue } } + if envValue := os.Getenv("KVM_EMULATION"); envValue != "" { + if parsedValue, err := strconv.ParseBool(envValue); err == nil { + kvmEmulation = parsedValue + } else { + log.Println("Error parsing KVM_EMULATION, it will be enabled by default: ", err) + } + } + if envValue := os.Getenv("HCO_UPSTREAM"); envValue != "" { + if parsedValue, err := strconv.ParseBool(envValue); err == nil { + useUpstreamHco = parsedValue + } else { + log.Println("Error parsing HCO_UPSTREAM, it will be disabled by default: ", err) + } + } + if envValue := os.Getenv("SKIP_MUST_GATHER"); envValue != "" { + if parsedValue, err := strconv.ParseBool(envValue); err == nil { + skipMustGather = parsedValue + } else { + log.Println("Error parsing SKIP_MUST_GATHER, must-gather will be enabled by default: ", err) + } + } } + } func TestOADPE2E(t *testing.T) { @@ -110,38 +127,23 @@ func TestOADPE2E(t *testing.T) { kubeConfig.QPS = 50 kubeConfig.Burst = 100 - RegisterFailHandler(Fail) + gomega.RegisterFailHandler(ginkgo.Fail) kubernetesClientForSuiteRun, err = kubernetes.NewForConfig(kubeConfig) - Expect(err).NotTo(HaveOccurred()) - - runTimeClientForSuiteRun, err = client.New(kubeConfig, client.Options{}) - Expect(err).NotTo(HaveOccurred()) - - oadpv1alpha1.AddToScheme(runTimeClientForSuiteRun.Scheme()) - velerov1.AddToScheme(runTimeClientForSuiteRun.Scheme()) - openshiftappsv1.AddToScheme(runTimeClientForSuiteRun.Scheme()) - openshiftbuildv1.AddToScheme(runTimeClientForSuiteRun.Scheme()) - openshiftsecurityv1.AddToScheme(runTimeClientForSuiteRun.Scheme()) - openshifttemplatev1.AddToScheme(runTimeClientForSuiteRun.Scheme()) - corev1.AddToScheme(runTimeClientForSuiteRun.Scheme()) - volumesnapshotv1.AddToScheme(runTimeClientForSuiteRun.Scheme()) - operatorsv1alpha1.AddToScheme(runTimeClientForSuiteRun.Scheme()) - operatorsv1.AddToScheme(runTimeClientForSuiteRun.Scheme()) - openshiftconfigv1.AddToScheme(runTimeClientForSuiteRun.Scheme()) - - veleroClientForSuiteRun, err = veleroClientset.NewForConfig(kubeConfig) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + runTimeClientForSuiteRun, err = client.New(kubeConfig, client.Options{Scheme: lib.Scheme}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) dynamicClientForSuiteRun, err = dynamic.NewForConfig(kubeConfig) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = CreateNamespace(kubernetesClientForSuiteRun, namespace) - Expect(err).To(BeNil()) - Expect(DoesNamespaceExist(kubernetesClientForSuiteRun, namespace)).Should(BeTrue()) + err = lib.CreateNamespace(kubernetesClientForSuiteRun, namespace) + gomega.Expect(err).To(gomega.BeNil()) + gomega.Expect(lib.DoesNamespaceExist(kubernetesClientForSuiteRun, namespace)).Should(gomega.BeTrue()) - dpa, err := LoadDpaSettingsFromJson(settings) - Expect(err).NotTo(HaveOccurred()) + dpa, err := lib.LoadDpaSettingsFromJson(settings) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) bslSecretName = "bsl-cloud-credentials-" + provider bslSecretNameWithCarriageReturn = "bsl-cloud-credentials-" + provider + "-with-carriage-return" @@ -149,10 +151,11 @@ func TestOADPE2E(t *testing.T) { veleroPrefix := "velero-e2e-" + string(uuid.NewUUID()) - dpaCR = &DpaCustomResource{ + dpaCR = &lib.DpaCustomResource{ Name: "ts-" + instanceName, Namespace: namespace, Client: runTimeClientForSuiteRun, + VSLSecretName: vslSecretName, BSLSecretName: bslSecretName, BSLConfig: dpa.DeepCopy().Spec.BackupLocations[0].Velero.Config, BSLProvider: dpa.DeepCopy().Spec.BackupLocations[0].Velero.Provider, @@ -160,42 +163,46 @@ func TestOADPE2E(t *testing.T) { BSLBucketPrefix: veleroPrefix, VeleroDefaultPlugins: dpa.DeepCopy().Spec.Configuration.Velero.DefaultPlugins, SnapshotLocations: dpa.DeepCopy().Spec.SnapshotLocations, + UnsupportedOverrides: dpa.DeepCopy().Spec.UnsupportedOverrides, } - RunSpecs(t, "OADP E2E using velero prefix: "+veleroPrefix) + ginkgo.RunSpecs(t, "OADP E2E using velero prefix: "+veleroPrefix) } -var _ = BeforeSuite(func() { +var _ = ginkgo.BeforeSuite(func() { + // Initialize controller-runtime logger + ctrl.SetLogger(zap.New(zap.UseDevMode(true))) + // TODO create logger (hh:mm:ss message) to be used by all functions log.Printf("Creating Secrets") - bslCredFileData, err := utils.ReadFile(bslCredFile) - Expect(err).NotTo(HaveOccurred()) - err = CreateCredentialsSecret(kubernetesClientForSuiteRun, bslCredFileData, namespace, bslSecretName) - Expect(err).NotTo(HaveOccurred()) - err = CreateCredentialsSecret( + bslCredFileData, err := lib.ReadFile(bslCredFile) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = lib.CreateCredentialsSecret(kubernetesClientForSuiteRun, bslCredFileData, namespace, bslSecretName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = lib.CreateCredentialsSecret( kubernetesClientForSuiteRun, - utils.ReplaceSecretDataNewLineWithCarriageReturn(bslCredFileData), + lib.ReplaceSecretDataNewLineWithCarriageReturn(bslCredFileData), namespace, bslSecretNameWithCarriageReturn, ) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - vslCredFileData, err := utils.ReadFile(vslCredFile) - Expect(err).NotTo(HaveOccurred()) - err = CreateCredentialsSecret(kubernetesClientForSuiteRun, vslCredFileData, namespace, vslSecretName) - Expect(err).NotTo(HaveOccurred()) + vslCredFileData, err := lib.ReadFile(vslCredFile) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = lib.CreateCredentialsSecret(kubernetesClientForSuiteRun, vslCredFileData, namespace, vslSecretName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) -var _ = AfterSuite(func() { +var _ = ginkgo.AfterSuite(func() { log.Printf("Deleting Secrets") - err := DeleteSecret(kubernetesClientForSuiteRun, namespace, vslSecretName) - Expect(err).ToNot(HaveOccurred()) - err = DeleteSecret(kubernetesClientForSuiteRun, namespace, bslSecretName) - Expect(err).ToNot(HaveOccurred()) - err = DeleteSecret(kubernetesClientForSuiteRun, namespace, bslSecretNameWithCarriageReturn) - Expect(err).ToNot(HaveOccurred()) + err := lib.DeleteSecret(kubernetesClientForSuiteRun, namespace, vslSecretName) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + err = lib.DeleteSecret(kubernetesClientForSuiteRun, namespace, bslSecretName) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + err = lib.DeleteSecret(kubernetesClientForSuiteRun, namespace, bslSecretNameWithCarriageReturn) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) log.Printf("Deleting DPA") err = dpaCR.Delete() - Expect(err).ToNot(HaveOccurred()) - Eventually(dpaCR.IsDeleted(), time.Minute*2, time.Second*5).Should(BeTrue()) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Eventually(dpaCR.IsDeleted(), time.Minute*2, time.Second*5).Should(gomega.BeTrue()) }) diff --git a/tests/e2e/hcp_backup_restore_suite_test.go b/tests/e2e/hcp_backup_restore_suite_test.go new file mode 100644 index 0000000000..7580534101 --- /dev/null +++ b/tests/e2e/hcp_backup_restore_suite_test.go @@ -0,0 +1,236 @@ +package e2e_test + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + + "github.com/openshift/oadp-operator/tests/e2e/lib" + libhcp "github.com/openshift/oadp-operator/tests/e2e/lib/hcp" +) + +type HCPBackupRestoreCase struct { + BackupRestoreCase + Template string + Provider string +} + +func runHCPBackupAndRestore(brCase HCPBackupRestoreCase, updateLastBRcase func(brCase HCPBackupRestoreCase), h *libhcp.HCHandler) { + updateLastBRcase(brCase) + + log.Printf("Preparing backup and restore") + backupName, restoreName := prepareBackupAndRestore(brCase.BackupRestoreCase, func() {}) + + err := h.AddHCPPluginToDPA(dpaCR.Namespace, dpaCR.Name, false) + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to add HCP plugin to DPA: %v", err) + // TODO: move the wait for HC just after the DPA modification to allow reconciliation to go ahead without waiting for the HC to be created + + //Wait for HCP plugin to be added + gomega.Eventually(libhcp.IsHCPPluginAdded(h.Client, dpaCR.Namespace, dpaCR.Name), 3*time.Minute, 1*time.Second).Should(gomega.BeTrue()) + + // Create the HostedCluster for the test + h.HCPNamespace = libhcp.GetHCPNamespace(brCase.BackupRestoreCase.Name, libhcp.ClustersNamespace) + h.HostedCluster, err = h.DeployHCManifest(brCase.Template, brCase.Provider, brCase.BackupRestoreCase.Name) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + if brCase.PreBackupVerify != nil { + err := brCase.PreBackupVerify(runTimeClientForSuiteRun, brCase.Namespace) + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to run HCP pre-backup verification: %v", err) + } + + // Backup HCP & HC + log.Printf("Backing up HC") + includedResources := libhcp.HCPIncludedResources + excludedResources := libhcp.HCPExcludedResources + includedNamespaces := append(libhcp.HCPIncludedNamespaces, libhcp.GetHCPNamespace(h.HostedCluster.Name, libhcp.ClustersNamespace)) + + nsRequiresResticDCWorkaround := runHCPBackup(brCase.BackupRestoreCase, backupName, h, includedNamespaces, includedResources, excludedResources) + + // Delete everything in HCP namespace + log.Printf("Deleting HCP & HC") + err = h.RemoveHCP(libhcp.Wait10Min) + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to remove HCP: %v", err) + + // Restore HC + log.Printf("Restoring HC") + runHCPRestore(brCase.BackupRestoreCase, backupName, restoreName, nsRequiresResticDCWorkaround) + + // Wait for HCP to be restored + log.Printf("Validating HC") + err = libhcp.ValidateHCP(libhcp.ValidateHCPTimeout, libhcp.Wait10Min, []string{}, h.HCPNamespace)(h.Client, libhcp.ClustersNamespace) + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to run HCP post-restore verification: %v", err) +} + +var _ = ginkgo.Describe("HCP Backup and Restore tests", ginkgo.Ordered, func() { + var ( + lastInstallTime time.Time + lastBRCase HCPBackupRestoreCase + h *libhcp.HCHandler + err error + ctx = context.Background() + ) + + updateLastBRcase := func(brCase HCPBackupRestoreCase) { + lastBRCase = brCase + } + + // Before All + var _ = ginkgo.BeforeAll(func() { + reqOperators := []libhcp.RequiredOperator{ + { + Name: libhcp.MCEName, + Namespace: libhcp.MCENamespace, + OperatorGroup: libhcp.MCEOperatorGroup, + }, + } + + // Install MCE and Hypershift operators + h, err = libhcp.InstallRequiredOperators(ctx, runTimeClientForSuiteRun, reqOperators) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Expect(h).ToNot(gomega.BeNil()) + gomega.Eventually(lib.IsDeploymentReady(h.Client, libhcp.MCENamespace, libhcp.MCEOperatorName), libhcp.Wait10Min, time.Second*5).Should(gomega.BeTrue()) + + // Deploy the MCE manifest + err = h.DeployMCEManifest() + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // Deploy the MCE and wait for it to be ready + gomega.Eventually(lib.IsDeploymentReady(h.Client, libhcp.MCENamespace, libhcp.MCEOperatorName), libhcp.Wait10Min, time.Second*5).Should(gomega.BeTrue()) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // Validate the Hypershift operator + gomega.Eventually(lib.IsDeploymentReady(h.Client, libhcp.HONamespace, libhcp.HypershiftOperatorName), libhcp.Wait10Min, time.Second*5).Should(gomega.BeTrue()) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + }) + + // After All + var _ = ginkgo.AfterAll(func() { + err := h.RemoveHCP(libhcp.Wait10Min) + gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to remove HCP: %v", err) + }) + + // After Each + var _ = ginkgo.AfterEach(func(ctx ginkgo.SpecContext) { + h.RemoveHCP(libhcp.Wait10Min) + tearDownBackupAndRestore(lastBRCase.BackupRestoreCase, lastInstallTime, ctx.SpecReport()) + }) + + ginkgo.DescribeTable("Basic HCP backup and restore test", + func(brCase HCPBackupRestoreCase, expectedErr error) { + if ginkgo.CurrentSpecReport().NumAttempts > 1 && !knownFlake { + ginkgo.Fail("No known FLAKE found in a previous run, marking test as failed.") + } + runHCPBackupAndRestore(brCase, updateLastBRcase, h) + }, + + // Test Cases + ginkgo.Entry("None HostedCluster backup and restore", ginkgo.Label("hcp"), HCPBackupRestoreCase{ + Template: libhcp.HCPNoneManifest, + Provider: "None", + BackupRestoreCase: BackupRestoreCase{ + Namespace: libhcp.GetHCPNamespace(fmt.Sprintf("%s-none", libhcp.HostedClusterPrefix), libhcp.ClustersNamespace), + Name: fmt.Sprintf("%s-none", libhcp.HostedClusterPrefix), + BackupRestoreType: lib.CSIDataMover, + PreBackupVerify: libhcp.ValidateHCP(libhcp.ValidateHCPTimeout, libhcp.Wait10Min, []string{}, libhcp.GetHCPNamespace(fmt.Sprintf("%s-none", libhcp.HostedClusterPrefix), libhcp.ClustersNamespace)), + PostRestoreVerify: libhcp.ValidateHCP(libhcp.ValidateHCPTimeout, libhcp.Wait10Min, []string{}, libhcp.GetHCPNamespace(fmt.Sprintf("%s-none", libhcp.HostedClusterPrefix), libhcp.ClustersNamespace)), + BackupTimeout: libhcp.HCPBackupTimeout, + }, + }, nil), + + ginkgo.Entry("Agent HostedCluster backup and restore", ginkgo.Label("hcp"), HCPBackupRestoreCase{ + Template: libhcp.HCPAgentManifest, + Provider: "Agent", + BackupRestoreCase: BackupRestoreCase{ + Namespace: libhcp.GetHCPNamespace(fmt.Sprintf("%s-agent", libhcp.HostedClusterPrefix), libhcp.ClustersNamespace), + Name: fmt.Sprintf("%s-agent", libhcp.HostedClusterPrefix), + BackupRestoreType: lib.CSIDataMover, + PreBackupVerify: libhcp.ValidateHCP(libhcp.ValidateHCPTimeout, libhcp.Wait10Min, []string{}, libhcp.GetHCPNamespace(fmt.Sprintf("%s-agent", libhcp.HostedClusterPrefix), libhcp.ClustersNamespace)), + PostRestoreVerify: libhcp.ValidateHCP(libhcp.ValidateHCPTimeout, libhcp.Wait10Min, []string{}, libhcp.GetHCPNamespace(fmt.Sprintf("%s-agent", libhcp.HostedClusterPrefix), libhcp.ClustersNamespace)), + BackupTimeout: libhcp.HCPBackupTimeout, + }, + }, nil), + ) +}) + +// TODO: Modify the runBackup function to inject the filtered error logs to avoid repeating code with this +func runHCPBackup(brCase BackupRestoreCase, backupName string, h *libhcp.HCHandler, namespaces []string, includedResources, excludedResources []string) bool { + nsRequiresResticDCWorkaround, err := lib.NamespaceRequiresResticDCWorkaround(h.Client, brCase.Namespace) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // create backup + log.Printf("Creating backup %s for case %s", backupName, brCase.Name) + err = lib.CreateCustomBackupForNamespaces(h.Client, namespace, backupName, namespaces, includedResources, excludedResources, brCase.BackupRestoreType == lib.RESTIC || brCase.BackupRestoreType == lib.KOPIA, brCase.BackupRestoreType == lib.CSIDataMover) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // wait for backup to not be running + gomega.Eventually(lib.IsBackupDone(h.Client, namespace, backupName), brCase.BackupTimeout, time.Second*10).Should(gomega.BeTrue()) + // TODO only log on fail? + describeBackup := lib.DescribeBackup(h.Client, namespace, backupName) + ginkgo.GinkgoWriter.Println(describeBackup) + + backupLogs := lib.BackupLogs(kubernetesClientForSuiteRun, h.Client, namespace, backupName) + backupErrorLogs := lib.BackupErrorLogs(kubernetesClientForSuiteRun, h.Client, namespace, backupName) + accumulatedTestLogs = append(accumulatedTestLogs, describeBackup, backupLogs) + + // Check error logs for non-relevant errors + filteredBackupErrorLogs := libhcp.FilterErrorLogs(backupErrorLogs) + + if !brCase.SkipVerifyLogs { + gomega.Expect(filteredBackupErrorLogs).Should(gomega.Equal([]string{})) + } + + // check if backup succeeded + succeeded, err := lib.IsBackupCompletedSuccessfully(kubernetesClientForSuiteRun, h.Client, namespace, backupName) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Expect(succeeded).To(gomega.Equal(true)) + log.Printf("Backup for case %s succeeded", brCase.Name) + + if brCase.BackupRestoreType == lib.CSI { + // wait for volume snapshot to be Ready + gomega.Eventually(lib.AreVolumeSnapshotsReady(h.Client, backupName), time.Minute*4, time.Second*10).Should(gomega.BeTrue()) + } + + return nsRequiresResticDCWorkaround +} + +// TODO: Modify the runRestore function to inject the filtered error logs to avoid repeating code with this +func runHCPRestore(brCase BackupRestoreCase, backupName string, restoreName string, nsRequiresResticDCWorkaround bool) { + log.Printf("Creating restore %s for case %s", restoreName, brCase.Name) + err := lib.CreateRestoreFromBackup(dpaCR.Client, namespace, backupName, restoreName) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Eventually(lib.IsRestoreDone(dpaCR.Client, namespace, restoreName), time.Minute*60, time.Second*10).Should(gomega.BeTrue()) + // TODO only log on fail? + describeRestore := lib.DescribeRestore(dpaCR.Client, namespace, restoreName) + ginkgo.GinkgoWriter.Println(describeRestore) + + restoreLogs := lib.RestoreLogs(kubernetesClientForSuiteRun, dpaCR.Client, namespace, restoreName) + restoreErrorLogs := lib.RestoreErrorLogs(kubernetesClientForSuiteRun, dpaCR.Client, namespace, restoreName) + accumulatedTestLogs = append(accumulatedTestLogs, describeRestore, restoreLogs) + + // Check error logs for non-relevant errors + filteredRestoreErrorLogs := libhcp.FilterErrorLogs(restoreErrorLogs) + + if !brCase.SkipVerifyLogs { + gomega.Expect(filteredRestoreErrorLogs).Should(gomega.Equal([]string{})) + } + + // Check if restore succeeded + succeeded, err := lib.IsRestoreCompletedSuccessfully(kubernetesClientForSuiteRun, dpaCR.Client, namespace, restoreName) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Expect(succeeded).To(gomega.Equal(true)) + + if nsRequiresResticDCWorkaround { + // We run the dc-post-restore.sh script for both restic and + // kopia backups and for any DCs with attached volumes, + // regardless of whether it was restic or kopia backup. + // The script is designed to work with labels set by the + // openshift-velero-plugin and can be run without pre-conditions. + log.Printf("Running dc-post-restore.sh script.") + err = lib.RunDcPostRestoreScript(restoreName) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + } +} diff --git a/tests/e2e/lib/apps.go b/tests/e2e/lib/apps.go index bb929fcd03..d0afbaa8d1 100755 --- a/tests/e2e/lib/apps.go +++ b/tests/e2e/lib/apps.go @@ -5,10 +5,7 @@ import ( "context" "errors" "fmt" - "io" "log" - "net/http" - "net/url" "os" "os/exec" "path/filepath" @@ -23,7 +20,6 @@ import ( "github.com/onsi/gomega" ocpappsv1 "github.com/openshift/api/apps/v1" openshiftconfigv1 "github.com/openshift/api/config/v1" - routev1 "github.com/openshift/api/route/v1" security "github.com/openshift/api/security/v1" templatev1 "github.com/openshift/api/template/v1" "github.com/vmware-tanzu/velero/pkg/label" @@ -36,6 +32,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -63,12 +60,12 @@ func InstallApplicationWithRetries(ocClient client.Client, file string, retries return err } for _, resource := range obj.Items { - labels := resource.GetLabels() - if labels == nil { - labels = make(map[string]string) + resourceLabels := resource.GetLabels() + if resourceLabels == nil { + resourceLabels = make(map[string]string) } - labels[e2eAppLabelKey] = e2eAppLabelValue - resource.SetLabels(labels) + resourceLabels[e2eAppLabelKey] = e2eAppLabelValue + resource.SetLabels(resourceLabels) resourceCreate := resource.DeepCopy() err = nil // reset error for each resource for i := 0; i < retries; i++ { @@ -93,13 +90,13 @@ func InstallApplicationWithRetries(ocClient client.Client, file string, retries resource.SetDeletionTimestamp(clusterResource.GetDeletionTimestamp()) resource.SetFinalizers(clusterResource.GetFinalizers()) // append cluster labels to existing labels if they don't already exist - labels := resource.GetLabels() - if labels == nil { - labels = make(map[string]string) + resourceLabels := resource.GetLabels() + if resourceLabels == nil { + resourceLabels = make(map[string]string) } for k, v := range clusterResource.GetLabels() { - if _, exists := labels[k]; !exists { - labels[k] = v + if _, exists := resourceLabels[k]; !exists { + resourceLabels[k] = v } } } @@ -111,14 +108,14 @@ func InstallApplicationWithRetries(ocClient client.Client, file string, retries continue } if !reflect.DeepEqual(clusterResource.Object[key], resource.Object[key]) { - fmt.Println("diff found for key:", key) + log.Println("diff found for key:", key) ginkgo.GinkgoWriter.Println(cmp.Diff(clusterResource.Object[key], resource.Object[key])) needsUpdate = true clusterResource.Object[key] = resource.Object[key] } } if needsUpdate { - fmt.Printf("updating resource: %s; name: %s\n", resource.GroupVersionKind(), resource.GetName()) + log.Printf("updating resource: %s; name: %s\n", resource.GroupVersionKind(), resource.GetName()) err = ocClient.Update(context.Background(), &clusterResource) } } @@ -127,7 +124,7 @@ func InstallApplicationWithRetries(ocClient client.Client, file string, retries break } // if error, retry - fmt.Printf("error creating or updating resource: %s; name: %s; error: %s; retrying for %d more times\n", resource.GroupVersionKind(), resource.GetName(), err, retries-i) + log.Printf("error creating or updating resource: %s; name: %s; error: %s; retrying for %d more times\n", resource.GroupVersionKind(), resource.GetName(), err, retries-i) } // if still error on this resource, return error if err != nil { @@ -291,6 +288,7 @@ func IsDeploymentReady(ocClient client.Client, namespace, dName string) wait.Con if err != nil { return false, err } + log.Printf("Deployment %s status: %v", dName, deployment.Status) if deployment.Status.AvailableReplicas != deployment.Status.Replicas || deployment.Status.Replicas == 0 { for _, condition := range deployment.Status.Conditions { if len(condition.Message) > 0 { @@ -303,6 +301,30 @@ func IsDeploymentReady(ocClient client.Client, namespace, dName string) wait.Con } } +// IsStatefulSetReady checks if a StatefulSet is ready +func IsStatefulSetReady(ocClient client.Client, namespace, name string) wait.ConditionFunc { + return func() (bool, error) { + sts := &appsv1.StatefulSet{} + err := ocClient.Get(context.Background(), client.ObjectKey{ + Namespace: namespace, + Name: name, + }, sts) + if err != nil { + return false, err + } + log.Printf("StatefulSet %s status: %v", name, sts.Status) + if sts.Status.ReadyReplicas != sts.Status.Replicas || sts.Status.Replicas == 0 { + for _, condition := range sts.Status.Conditions { + if len(condition.Message) > 0 { + ginkgo.GinkgoWriter.Write([]byte(fmt.Sprintf("statefulset not available with condition: %s\n", condition.Message))) + } + } + return false, errors.New("statefulset is not in a ready state") + } + return true, nil + } +} + func AreApplicationPodsRunning(c *kubernetes.Clientset, namespace string) wait.ConditionFunc { return func() (bool, error) { podList, err := GetAllPodsWithLabel(c, namespace, e2eAppLabel) @@ -397,105 +419,148 @@ func RunMustGather(artifact_dir string, clusterClient client.Client) error { return nil } -func makeRequest(request string, api string, todo string) { - params := url.Values{} - params.Add("description", todo) - body := strings.NewReader(params.Encode()) - req, err := http.NewRequest(request, api, body) - if err != nil { - log.Printf("Error making post request to todo app before Prebackup %s", err) - } - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - resp, err := http.DefaultClient.Do(req) - if err != nil { - log.Printf("Response of todo POST REQUEST %s", resp.Status) - } - defer resp.Body.Close() -} - // VerifyBackupRestoreData verifies if app ready before backup and after restore to compare data. -func VerifyBackupRestoreData(clientv1 client.Client, artifact_dir string, namespace string, routeName string, app string, prebackupState bool, twoVol bool) error { +func VerifyBackupRestoreData(ocClient client.Client, kubeClient *kubernetes.Clientset, kubeConfig *rest.Config, artifactDir string, namespace string, routeName string, serviceName string, app string, prebackupState bool, twoVol bool) error { log.Printf("Verifying backup/restore data of %s", app) - appRoute := &routev1.Route{} - backupFile := artifact_dir + "/backup-data.txt" - routev1.AddToScheme(clientv1.Scheme()) - err := clientv1.Get(context.Background(), client.ObjectKey{ - Namespace: namespace, - Name: routeName, - }, appRoute) + appEndpointURL, proxyPodParams, err := getAppEndpointURLAndProxyParams(ocClient, kubeClient, kubeConfig, namespace, serviceName, routeName) if err != nil { return err } - appApi := "http://" + appRoute.Spec.Host - appEndpoint := appApi + "/todo-incomplete" - volumeEndpoint := appApi + "/log" - //if this is prebackstate = true, add items via makeRequest function. We only want to make request before backup - //and ignore post restore checks. - log.Printf("PrebackState: %t\n", prebackupState) + // Construct request parameters for the "todo-incomplete" endpoint + requestParamsTodoIncomplete := getRequestParameters(appEndpointURL+"/todo-incomplete", proxyPodParams, GET, nil) + if prebackupState { - // delete backupFile if it exists - if _, err := os.Stat(backupFile); err == nil { - os.Remove(backupFile) - } - //data before curl request - dataBeforeCurl, err := getResponseData(appEndpoint) + // Clean up existing backup file + RemoveFileIfExists(artifactDir + "/backup-data.txt") + + // Make requests and update data before backup + dataBeforeCurl, errResp, err := MakeRequest(*requestParamsTodoIncomplete) if err != nil { + if errResp != "" { + log.Printf("Request response error msg: %s\n", errResp) + } return err } log.Printf("Data before the curl request: \n %s\n", dataBeforeCurl) - //make post request to given api - makeRequest("POST", appApi+"/todo", time.Now().String()) - makeRequest("POST", appApi+"/todo", time.Now().Weekday().String()) + + // Make two post requests to the "todo" endpoint + postPayload := `{"description": "` + time.Now().String() + `"}` + requestParams := getRequestParameters(appEndpointURL+"/todo", proxyPodParams, POST, &postPayload) + MakeRequest(*requestParams) + + postPayload = `{"description": "` + time.Now().Weekday().String() + `"}` + requestParams = getRequestParameters(appEndpointURL+"/todo", proxyPodParams, POST, &postPayload) + MakeRequest(*requestParams) } - //get response Data if response status is 200 - respData, err := getResponseData(appEndpoint) + + // Make request to the "todo-incomplete" endpoint + respData, errResp, err := MakeRequest(*requestParamsTodoIncomplete) if err != nil { + if errResp != "" { + log.Printf("Request response error msg: %s\n", errResp) + } return err } if prebackupState { + // Write data to backup file log.Printf("Writing data to backupFile (backup-data.txt): \n %s\n", respData) - err := os.WriteFile(backupFile, respData, 0644) - if err != nil { + if err := os.WriteFile(artifactDir+"/backup-data.txt", []byte(respData), 0644); err != nil { return err } } else { - backupData, err := os.ReadFile(backupFile) + // Compare data with backup file after restore + backupData, err := os.ReadFile(artifactDir + "/backup-data.txt") if err != nil { return err } log.Printf("Data came from backup-file\n %s\n", backupData) - backDataIsEqual := false - log.Printf("Data from the response after restore\n %s\n", respData) - backDataIsEqual = bytes.Equal(backupData, respData) - if backDataIsEqual != true { + log.Printf("Data came from response\n %s\n", respData) + trimmedBackup := bytes.TrimSpace(backupData) + trimmedResp := bytes.TrimSpace([]byte(respData)) + if !bytes.Equal(trimmedBackup, trimmedResp) { return errors.New("Backup and Restore Data are not the same") } } if twoVol { - volumeFile := artifact_dir + "/volume-data.txt" - return verifyVolume(volumeFile, volumeEndpoint, prebackupState) + // Verify volume data if needed + requestParamsVolume := getRequestParameters(appEndpointURL+"/log", proxyPodParams, GET, nil) + volumeFile := artifactDir + "/volume-data.txt" + return verifyVolume(requestParamsVolume, volumeFile, prebackupState) } return nil } -// VerifyVolumeData for application with two volumes -func verifyVolume(volumeFile string, volumeApi string, prebackupState bool) error { - //get response Data if response status is 200 - volData, err := getResponseData(volumeApi) +func getRequestParameters(url string, proxyPodParams *ProxyPodParameters, method HTTPMethod, payload *string) *RequestParameters { + return &RequestParameters{ + ProxyPodParams: proxyPodParams, + RequestMethod: &method, + URL: url, + Payload: payload, + } +} + +func getAppEndpointURLAndProxyParams(ocClient client.Client, kubeClient *kubernetes.Clientset, kubeConfig *rest.Config, namespace, serviceName, routeName string) (string, *ProxyPodParameters, error) { + appEndpointURL, err := GetRouteEndpointURL(ocClient, namespace, routeName) + // Something wrong with standard endpoint, try with proxy pod. if err != nil { - return err + log.Println("Can not connect to the application endpoint with route:", err) + log.Println("Trying to get to the service via proxy POD") + + pod, podErr := GetFirstPodByLabel(kubeClient, namespace, "curl-tool=true") + if podErr != nil { + return "", nil, fmt.Errorf("Error getting pod for the proxy command: %v", podErr) + } + + proxyPodParams := &ProxyPodParameters{ + KubeClient: kubeClient, + KubeConfig: kubeConfig, + Namespace: namespace, + PodName: pod.ObjectMeta.Name, + ContainerName: "curl-tool", + } + + appEndpointURL = GetInternalServiceEndpointURL(namespace, serviceName) + + return appEndpointURL, proxyPodParams, nil + } + + return appEndpointURL, nil, nil +} + +// VerifyVolumeData for application with two volumes +func verifyVolume(requestParams *RequestParameters, volumeFile string, prebackupState bool) error { + var volData string + var err error + + // Function to get response + getResponseFromVolumeCall := func() bool { + // Attempt to make the request + var errResp string + volData, errResp, err = MakeRequest(*requestParams) + if err != nil { + if errResp != "" { + log.Printf("Request response error msg: %s\n", errResp) + } + log.Printf("Request errored out: %v\n", err) + return false + } + return true } + + if success := gomega.Eventually(getResponseFromVolumeCall, time.Minute*4, time.Second*15).Should(gomega.BeTrue()); !success { + return fmt.Errorf("Failed to get response for volume: %v", err) + } + if prebackupState { // delete volumeFile if it exists - if _, err := os.Stat(volumeFile); err == nil { - os.Remove(volumeFile) - } + RemoveFileIfExists(volumeFile) + log.Printf("Writing data to volumeFile (volume-data.txt): \n %s", volData) - err := os.WriteFile(volumeFile, volData, 0644) + err := os.WriteFile(volumeFile, []byte(volData), 0644) if err != nil { return err } @@ -507,32 +572,10 @@ func verifyVolume(volumeFile string, volumeApi string, prebackupState bool) erro } log.Printf("Data came from volume-file\n %s", volumeBackupData) log.Printf("Volume Data after restore\n %s", volData) - dataIsIn := bytes.Contains(volData, volumeBackupData) + dataIsIn := bytes.Contains([]byte(volData), volumeBackupData) if dataIsIn != true { return errors.New("Backup data is not in Restore Data") } } return nil } - -func getResponseData(appApi string) ([]byte, error) { - var body []byte - var readError error - - checkStatusCode := func() (bool, error) { - resp, err := http.Get(appApi) - if err != nil { - return false, err - } - defer resp.Body.Close() - if resp.StatusCode != 200 { - log.Printf("Request errored out with Status Code %v\n", resp.StatusCode) - return false, fmt.Errorf("Request errored out with Status Code %v", resp.StatusCode) - } - body, readError = io.ReadAll(resp.Body) - return true, nil - } - - gomega.Eventually(checkStatusCode, time.Minute*4, time.Second*15).Should(gomega.BeTrue()) - return body, readError -} diff --git a/tests/e2e/lib/backup.go b/tests/e2e/lib/backup.go index 458696eee7..2836b5e98b 100755 --- a/tests/e2e/lib/backup.go +++ b/tests/e2e/lib/backup.go @@ -11,7 +11,6 @@ import ( pkgbackup "github.com/vmware-tanzu/velero/pkg/backup" "github.com/vmware-tanzu/velero/pkg/cmd/util/downloadrequest" "github.com/vmware-tanzu/velero/pkg/cmd/util/output" - veleroClientset "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" "github.com/vmware-tanzu/velero/pkg/label" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" @@ -34,6 +33,23 @@ func CreateBackupForNamespaces(ocClient client.Client, veleroNamespace, backupNa return ocClient.Create(context.Background(), &backup) } +func CreateCustomBackupForNamespaces(ocClient client.Client, veleroNamespace, backupName string, namespaces []string, includedResources, excludedResources []string, defaultVolumesToFsBackup bool, snapshotMoveData bool) error { + backup := velero.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Name: backupName, + Namespace: veleroNamespace, + }, + Spec: velero.BackupSpec{ + IncludedNamespaces: namespaces, + IncludedResources: includedResources, + ExcludedResources: excludedResources, + DefaultVolumesToFsBackup: &defaultVolumesToFsBackup, + SnapshotMoveData: &snapshotMoveData, + }, + } + return ocClient.Create(context.Background(), &backup) +} + func GetBackup(c client.Client, namespace string, name string) (*velero.Backup, error) { backup := velero.Backup{} err := c.Get(context.Background(), client.ObjectKey{ @@ -90,7 +106,7 @@ func IsBackupCompletedSuccessfully(c *kubernetes.Clientset, ocClient client.Clie } // https://github.com/vmware-tanzu/velero/blob/11bfe82342c9f54c63f40d3e97313ce763b446f2/pkg/cmd/cli/backup/describe.go#L77-L111 -func DescribeBackup(veleroClient veleroClientset.Interface, ocClient client.Client, namespace string, name string) (backupDescription string) { +func DescribeBackup(ocClient client.Client, namespace string, name string) (backupDescription string) { backup, err := GetBackup(ocClient, namespace, name) if err != nil { return "could not get provided backup: " + err.Error() @@ -100,13 +116,15 @@ func DescribeBackup(veleroClient veleroClientset.Interface, ocClient client.Clie caCertFile := "" deleteRequestListOptions := pkgbackup.NewDeleteBackupRequestListOptions(backup.Name, string(backup.UID)) - deleteRequestList, err := veleroClient.VeleroV1().DeleteBackupRequests(backup.Namespace).List(context.Background(), deleteRequestListOptions) + deleteRequestList := &velero.DeleteBackupRequestList{} + err = ocClient.List(context.Background(), deleteRequestList, client.InNamespace(backup.Namespace), &client.ListOptions{Raw: &deleteRequestListOptions}) if err != nil { log.Printf("error getting DeleteBackupRequests for backup %s: %v\n", backup.Name, err) } opts := label.NewListOptionsForBackup(backup.Name) - podVolumeBackupList, err := veleroClient.VeleroV1().PodVolumeBackups(backup.Namespace).List(context.Background(), opts) + podVolumeBackupList := &velero.PodVolumeBackupList{} + err = ocClient.List(context.Background(), podVolumeBackupList, client.InNamespace(backup.Namespace), &client.ListOptions{Raw: &opts}) if err != nil { log.Printf("error getting PodVolumeBackups for backup %s: %v\n", backup.Name, err) } @@ -144,3 +162,58 @@ func BackupErrorLogs(c *kubernetes.Clientset, ocClient client.Client, namespace bl := BackupLogs(c, ocClient, namespace, name) return errorLogsExcludingIgnored(bl) } + +func GetBackupRepositoryList(c client.Client, namespace string) (*velero.BackupRepositoryList, error) { + // initialize an empty list of BackupRepositories + backupRepositoryList := &velero.BackupRepositoryList{ + Items: []velero.BackupRepository{}, + } + // get the list of BackupRepositories in the given namespace + err := c.List(context.Background(), backupRepositoryList, client.InNamespace(namespace)) + if err != nil { + log.Printf("error getting BackupRepository list: %v", err) + return nil, err + } + return backupRepositoryList, nil +} + +func DeleteBackupRepository(c client.Client, namespace string, name string) error { + backupRepository := &velero.BackupRepository{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + } + err := c.Delete(context.Background(), backupRepository) + if err != nil { + return err + } + return nil +} + +// DeleteBackupRepositories deletes all BackupRepositories in the given namespace. +func DeleteBackupRepositories(c client.Client, namespace string) error { + log.Printf("Checking if backuprepository's exist in %s", namespace) + + backupRepos, err := GetBackupRepositoryList(c, namespace) + if err != nil { + return fmt.Errorf("failed to get BackupRepository list: %v", err) + } + if len(backupRepos.Items) == 0 { + log.Printf("No BackupRepositories found in namespace %s", namespace) + return nil + } + + // Get a list of the BackupRepositories and delete all of them. + for _, repo := range backupRepos.Items { + log.Printf("backuprepository name is %s", repo.Name) + err := DeleteBackupRepository(c, namespace, repo.Name) + if err != nil { + log.Printf("failed to delete BackupRepository %s: ", repo.Name) + return err + } + log.Printf("Successfully deleted BackupRepository: %s", repo.Name) + } + + return nil +} diff --git a/tests/e2e/lib/common_helpers.go b/tests/e2e/lib/common_helpers.go new file mode 100644 index 0000000000..b1f7b5869b --- /dev/null +++ b/tests/e2e/lib/common_helpers.go @@ -0,0 +1,285 @@ +package lib + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net/http" + neturl "net/url" + "os" + "os/exec" + "strings" +) + +type RequestParameters struct { + ProxyPodParams *ProxyPodParameters // Required, when using K8s proxy container + RequestMethod *HTTPMethod + URL string + Payload *string // Required for POST method +} + +type HTTPMethod string + +const ( + GET HTTPMethod = "GET" + POST HTTPMethod = "POST" +) + +func ReadFile(path string) ([]byte, error) { + file, err := ioutil.ReadFile(path) + return file, err +} + +func RemoveFileIfExists(filePath string) { + if _, err := os.Stat(filePath); err == nil { + os.Remove(filePath) + } +} + +// Replace new line with carriage return +func ReplaceSecretDataNewLineWithCarriageReturn(data []byte) []byte { + data = []byte(strings.ReplaceAll(string(data), "\n", "\r\n")) + return data +} + +// Extract tar.gz file to a directory of the same name in the same directory +func ExtractTarGz(pathToDir, tarGzFileName string) error { + return exec.Command("tar", "-xzf", pathToDir+"/"+tarGzFileName, "-C", pathToDir).Run() +} + +// Checks if the payload is actually valid json +func isPayloadValidJSON(payLoad string) bool { + var js map[string]interface{} + return json.Unmarshal([]byte(payLoad), &js) == nil +} + +// MakeRequest performs an HTTP request with the given parameters and returns the response, +// error message, and any encountered errors. +// +// It can make such request directly or via proxy pod container, so the URL can be also +// reached using internal to k8s service endpoint. +// +// Parameters: +// - params: RequestParameters struct containing the details of the HTTP request. +// The struct includes fields like RequestMethod, URL, Payload, and ProxyPodParams. +// +// Returns: +// - response: The response body as a string in case of a successful HTTP request. +// - errorResponse: The error response message if the HTTP request encounters an error. +// - err: An error object indicating any errors that occurred during the HTTP request. +func MakeRequest(params RequestParameters) (string, string, error) { + + var requestMethod HTTPMethod + + // Allowed is only GET and POST, however + // Request method defaults to GET when not provided + if params.RequestMethod == nil { + requestMethod = GET + } else if *params.RequestMethod == GET || *params.RequestMethod == POST { + requestMethod = *params.RequestMethod + } else { + log.Printf("Invalid Request Method: %s", *params.RequestMethod) + return "", "", fmt.Errorf("Invalid Request Method: %s", *params.RequestMethod) + } + + if params.URL == "" { + errMsg := "URL in a request can not be empty" + log.Printf(errMsg) + return "", "", fmt.Errorf(errMsg) + } + + // Check if the Payload is provided when using POST + if requestMethod == POST && (params.Payload == nil || *params.Payload == "") { + errMsg := "Payload is required while performing POST Request" + log.Printf(errMsg) + return "", "", fmt.Errorf(errMsg) + } else if requestMethod == POST { + if !isPayloadValidJSON(*params.Payload) { + errMsg := fmt.Sprintf("Invalid JSON payload: %s", *params.Payload) + fmt.Println(errMsg) + return "", "", fmt.Errorf(errMsg) + } + } + + if params.ProxyPodParams != nil && params.ProxyPodParams.PodName != "" && params.ProxyPodParams.KubeConfig != nil && params.ProxyPodParams.KubeClient != nil && params.ProxyPodParams.Namespace != "" { + // Make request via Proxy POD + var curlInProxyCmd string + if requestMethod == GET { + log.Printf("Using Proxy pod container: %s", params.ProxyPodParams.PodName) + curlInProxyCmd = "curl -X GET --silent --show-error " + params.URL + } else if requestMethod == POST { + body, err := convertJsonStringToURLParams(*params.Payload) + if err != nil { + return "", "", fmt.Errorf("Error while converting parameters: %v", err) + } + curlInProxyCmd = fmt.Sprintf("curl -X POST -d %s --silent --show-error %s", body, params.URL) + } + return ExecuteCommandInPodsSh(*params.ProxyPodParams, curlInProxyCmd) + } else { + var response string + var errorResponse string + var err error + if requestMethod == POST { + response, errorResponse, err = MakeHTTPRequest(params.URL, requestMethod, *params.Payload) + } else { + response, errorResponse, err = MakeHTTPRequest(params.URL, requestMethod, "") + } + if err != nil { + return "", errorResponse, err + } + return response, errorResponse, nil + } + +} + +// GetInternalServiceEndpointURL constructs the internal service endpoint URI +// for a service in a Kubernetes cluster. +// +// Parameters: +// - namespace: The namespace of the service. +// - serviceName: The name of the service. +// - servicePort: (Optional) The port number of the service. If not provided, +// the default port 8000 is used. +// +// Returns: +// - string: The constructed internal service endpoint URI. +func GetInternalServiceEndpointURL(namespace, serviceName string, servicePort ...int) string { + port := 8000 + if len(servicePort) > 0 { + port = servicePort[0] + } + + return fmt.Sprintf("http://%s.%s.svc.cluster.local:%d", serviceName, namespace, port) +} + +// ConvertJsonStringToURLParams takes a JSON string as input and converts it to URL-encoded parameters. +// It returns a string containing the URL-encoded parameters. +// +// Parameters: +// - payload (string): The JSON string to be converted. +// +// Returns: +// - string: The URL-encoded parameters. +// - error: An error, if any, during the conversion process. +// +// Example: +// +// input: `{"name": "John", "age": 30, "city": "New York"}` +// output: `&{name=John&age=30&city=New+York}` +func convertJsonStringToURLParams(payload string) (string, error) { + var data map[string]interface{} + err := json.Unmarshal([]byte(payload), &data) + if err != nil { + log.Printf("Can not convert JSON string to URL Param: %s", payload) + return "", err + } + + params := neturl.Values{} + for key, value := range data { + params.Add(key, fmt.Sprintf("%v", value)) + } + encodedParams := params.Encode() + log.Printf("Payload encoded parameters: %s", encodedParams) + return encodedParams, nil +} + +// IsURLReachable checks the reachability of an HTTP or HTTPS URL. +// +// Parameters: +// - url: The URL to check for reachability. +// +// Returns: +// - bool: True if the URL is reachable, false otherwise. +// - error: An error, if any, encountered during the HTTP request. +// +// It performs a HEAD request to the specified URL and returns true if +// the request is successful (status code in the 2xx range), indicating +// that the site is reachable. If there is an error during the request +// or if the status code indicates an error, it returns false. +func IsURLReachable(url string) (bool, error) { + // Attempt to perform a GET request to the specified URL Head + resp, err := http.Get(url) + if err != nil { + // An error occurred during the HTTP request + return false, err + } + defer resp.Body.Close() + + // Check if the response status code indicates success (2xx range) + if resp.StatusCode >= 200 && resp.StatusCode < 300 { + return true, nil + } + + // The response status code indicates an error + return false, fmt.Errorf("HTTP request failed with status code: %d", resp.StatusCode) +} + +// MakeHTTPRequest performs an HTTP request with the specified URL, request method, and payload. +// The function supports both GET and POST methods. If the request method is invalid or an error occurs +// during the HTTP request, it returns an error along with the error response body. +// +// Parameters: +// - url: The URL for the HTTP request. +// - requestMethod:string The HTTP request method (e.g., "GET" or "POST"). +// - payload: The payload for the POST request. It is optional and can be nil for GET requests. +// +// Returns: +// - string: The successful response body for a 2xx status code. +// - string: The error response body for non-2xx status codes. +// - error: An error indicating any issues during the HTTP request or response handling. +func MakeHTTPRequest(url string, requestMethod HTTPMethod, payload string) (string, string, error) { + var resp *http.Response + var req *http.Request + var err error + var body string + + if requestMethod == GET { + resp, err = http.Get(url) + } else if requestMethod == POST { + body, err = convertJsonStringToURLParams(payload) + if err != nil { + return "", "", err + } + req, err = http.NewRequest(string(requestMethod), url, strings.NewReader(body)) + if err != nil { + log.Printf("Error making post request %s", err) + return "", "", err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + resp, err = http.DefaultClient.Do(req) + if err != nil { + if resp != nil { + log.Printf("Response of POST REQUEST %s", resp.Status) + } + return "", "", err + } + + } else { + errMsg := fmt.Sprintf("Invalid request method: %s", requestMethod) + log.Printf(errMsg) + return "", "", fmt.Errorf(errMsg) + } + + if err != nil { + return "", "", err + } + defer resp.Body.Close() + + if resp.StatusCode >= 200 && resp.StatusCode < 300 { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", string(body), fmt.Errorf("Error reading response body: %v", err) + } + return string(body), "", nil + } + + // The response status code indicates an error + // Read the error response body + responseBody, responseErr := ioutil.ReadAll(resp.Body) + if responseErr != nil { + return "", string(responseBody), fmt.Errorf("HTTP request failed with status code %d: %v", resp.StatusCode, responseErr) + } + + return "", string(responseBody), fmt.Errorf("HTTP request failed with status code: %d", resp.StatusCode) +} diff --git a/tests/e2e/lib/dpa_helpers.go b/tests/e2e/lib/dpa_helpers.go index 2fd0ca6ad0..71c4c8c008 100755 --- a/tests/e2e/lib/dpa_helpers.go +++ b/tests/e2e/lib/dpa_helpers.go @@ -10,8 +10,6 @@ import ( "time" "github.com/google/go-cmp/cmp" - oadpv1alpha1 "github.com/openshift/oadp-operator/api/v1alpha1" - utils "github.com/openshift/oadp-operator/tests/e2e/utils" velero "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -19,21 +17,25 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" + + oadpv1alpha1 "github.com/openshift/oadp-operator/api/v1alpha1" ) type BackupRestoreType string const ( - CSI BackupRestoreType = "csi" - CSIDataMover BackupRestoreType = "csi-datamover" - RESTIC BackupRestoreType = "restic" - KOPIA BackupRestoreType = "kopia" + CSI BackupRestoreType = "csi" + CSIDataMover BackupRestoreType = "csi-datamover" + RESTIC BackupRestoreType = "restic" + KOPIA BackupRestoreType = "kopia" + NativeSnapshots BackupRestoreType = "native-snapshots" ) type DpaCustomResource struct { Name string Namespace string Client client.Client + VSLSecretName string BSLSecretName string BSLConfig map[string]string BSLProvider string @@ -41,14 +43,14 @@ type DpaCustomResource struct { BSLBucketPrefix string VeleroDefaultPlugins []oadpv1alpha1.DefaultPlugin SnapshotLocations []oadpv1alpha1.SnapshotLocation + UnsupportedOverrides map[oadpv1alpha1.UnsupportedImageKey]string } func LoadDpaSettingsFromJson(settings string) (*oadpv1alpha1.DataProtectionApplication, error) { - file, err := utils.ReadFile(settings) + file, err := ReadFile(settings) if err != nil { return nil, fmt.Errorf("Error getting settings json file: %v", err) } - dpa := &oadpv1alpha1.DataProtectionApplication{} err = json.Unmarshal(file, &dpa) if err != nil { @@ -93,6 +95,7 @@ func (v *DpaCustomResource) Build(backupRestoreType BackupRestoreType) *oadpv1al }, }, }, + UnsupportedOverrides: v.UnsupportedOverrides, } switch backupRestoreType { case RESTIC, KOPIA: @@ -111,11 +114,15 @@ func (v *DpaCustomResource) Build(backupRestoreType BackupRestoreType) *oadpv1al dpaSpec.Configuration.Velero.DefaultPlugins = append(dpaSpec.Configuration.Velero.DefaultPlugins, oadpv1alpha1.DefaultPluginCSI) dpaSpec.Configuration.Velero.FeatureFlags = append(dpaSpec.Configuration.Velero.FeatureFlags, velero.CSIFeatureFlag) dpaSpec.SnapshotLocations = nil + case NativeSnapshots: + dpaSpec.SnapshotLocations[0].Velero.Credential = &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: v.VSLSecretName, + }, + Key: "cloud", + } } - // Uncomment to override plugin images to use - dpaSpec.UnsupportedOverrides = map[oadpv1alpha1.UnsupportedImageKey]string{ - // oadpv1alpha1.VeleroImageKey: "quay.io/konveyor/velero:oadp-1.1", - } + return &dpaSpec } @@ -141,11 +148,10 @@ func (v *DpaCustomResource) Get() (*oadpv1alpha1.DataProtectionApplication, erro return &dpa, nil } -func (v *DpaCustomResource) CreateOrUpdate(c client.Client, spec *oadpv1alpha1.DataProtectionApplicationSpec) error { +func (v *DpaCustomResource) CreateOrUpdate(spec *oadpv1alpha1.DataProtectionApplicationSpec) error { // for debugging // prettyPrint, _ := json.MarshalIndent(spec, "", " ") // log.Printf("DPA with spec\n%s\n", prettyPrint) - dpa, err := v.Get() if err != nil { if apierrors.IsNotFound(err) { @@ -156,6 +162,7 @@ func (v *DpaCustomResource) CreateOrUpdate(c client.Client, spec *oadpv1alpha1.D }, Spec: *spec.DeepCopy(), } + dpa.Spec.UnsupportedOverrides = v.UnsupportedOverrides return v.Create(dpa) } return err @@ -163,6 +170,7 @@ func (v *DpaCustomResource) CreateOrUpdate(c client.Client, spec *oadpv1alpha1.D dpaPatch := dpa.DeepCopy() spec.DeepCopyInto(&dpaPatch.Spec) dpaPatch.ObjectMeta.ManagedFields = nil + dpaPatch.Spec.UnsupportedOverrides = v.UnsupportedOverrides err = v.Client.Patch(context.Background(), dpaPatch, client.MergeFrom(dpa), &client.PatchOptions{}) if err != nil { log.Printf("error patching DPA: %s", err) @@ -171,6 +179,7 @@ func (v *DpaCustomResource) CreateOrUpdate(c client.Client, spec *oadpv1alpha1.D } return err } + return nil } @@ -183,7 +192,7 @@ func (v *DpaCustomResource) Delete() error { return err } err = v.Client.Delete(context.Background(), dpa) - if apierrors.IsNotFound(err) { + if err != nil && apierrors.IsNotFound(err) { return nil } return err @@ -299,7 +308,7 @@ func (v *DpaCustomResource) BSLsAreUpdated(updateTime time.Time) wait.ConditionF } // check if bsl matches the spec -func (v *DpaCustomResource) DoesBSLSpecMatchesDpa(namespace string, dpaBSLSpec velero.BackupStorageLocationSpec) (bool, error) { +func (v *DpaCustomResource) DoesBSLSpecMatchesDpa(dpaBSLSpec velero.BackupStorageLocationSpec) (bool, error) { bsls, err := v.ListBSLs() if err != nil { return false, err @@ -336,7 +345,7 @@ func (v *DpaCustomResource) ListVSLs() (*velero.VolumeSnapshotLocationList, erro } // check if vsl matches the spec -func (v *DpaCustomResource) DoesVSLSpecMatchesDpa(namespace string, dpaVSLSpec velero.VolumeSnapshotLocationSpec) (bool, error) { +func (v *DpaCustomResource) DoesVSLSpecMatchesDpa(dpaVSLSpec velero.VolumeSnapshotLocationSpec) (bool, error) { vsls, err := v.ListVSLs() if err != nil { return false, err diff --git a/tests/e2e/lib/hcp/dpa.go b/tests/e2e/lib/hcp/dpa.go new file mode 100644 index 0000000000..668747e199 --- /dev/null +++ b/tests/e2e/lib/hcp/dpa.go @@ -0,0 +1,99 @@ +package hcp + +import ( + "context" + "fmt" + "log" + + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + oadpv1alpha1 "github.com/openshift/oadp-operator/api/v1alpha1" +) + +// AddHCPPluginToDPA adds the HCP plugin to a DPA +func (h *HCHandler) AddHCPPluginToDPA(namespace, name string, overrides bool) error { + addHCPlugin := true + + log.Printf("Adding HCP default plugin to DPA") + dpa := &oadpv1alpha1.DataProtectionApplication{} + err := h.Client.Get(h.Ctx, types.NamespacedName{Namespace: namespace, Name: name}, dpa) + if err != nil { + return err + } + + // Check if the hypershift plugin is already in the default plugins + for _, plugin := range dpa.Spec.Configuration.Velero.DefaultPlugins { + if plugin == oadpv1alpha1.DefaultPluginHypershift { + log.Printf("HCP plugin already in DPA") + if overrides { + log.Printf("Override set to true, removing HCP plugin from DPA") + addHCPlugin = false + break + } + return nil + } + } + + if addHCPlugin { + dpa.Spec.Configuration.Velero.DefaultPlugins = append(dpa.Spec.Configuration.Velero.DefaultPlugins, oadpv1alpha1.DefaultPluginHypershift) + } + + if overrides { + dpa.Spec.UnsupportedOverrides = map[oadpv1alpha1.UnsupportedImageKey]string{ + oadpv1alpha1.HypershiftPluginImageKey: "quay.io/redhat-user-workloads/ocp-art-tenant/oadp-hypershift-oadp-plugin-oadp-1-5:oadp-1.5", + } + } + + err = h.Client.Update(h.Ctx, dpa) + if err != nil { + return fmt.Errorf("failed to update DPA: %v", err) + } + log.Printf("HCP plugin added to DPA") + return nil +} + +// RemoveHCPPluginFromDPA removes the HCP plugin from a DPA +func (h *HCHandler) RemoveHCPPluginFromDPA(namespace, name string) error { + log.Printf("Removing HCP plugin from DPA") + dpa := &oadpv1alpha1.DataProtectionApplication{} + err := h.Client.Get(h.Ctx, types.NamespacedName{Namespace: namespace, Name: name}, dpa) + if err != nil { + return err + } + delete(dpa.Spec.UnsupportedOverrides, oadpv1alpha1.HypershiftPluginImageKey) + // remove hypershift plugin from default plugins + for i, plugin := range dpa.Spec.Configuration.Velero.DefaultPlugins { + if plugin == oadpv1alpha1.DefaultPluginHypershift { + dpa.Spec.Configuration.Velero.DefaultPlugins = append(dpa.Spec.Configuration.Velero.DefaultPlugins[:i], dpa.Spec.Configuration.Velero.DefaultPlugins[i+1:]...) + break + } + } + err = h.Client.Update(h.Ctx, dpa) + if err != nil { + return fmt.Errorf("failed to update DPA: %v", err) + } + log.Printf("HCP plugin removed from DPA") + return nil +} + +// IsHCPPluginAdded checks if the HCP plugin is added to a DPA +func IsHCPPluginAdded(c client.Client, namespace, name string) bool { + dpa := &oadpv1alpha1.DataProtectionApplication{} + err := c.Get(context.Background(), types.NamespacedName{Namespace: namespace, Name: name}, dpa) + if err != nil { + return false + } + + if dpa.Spec.Configuration == nil || dpa.Spec.Configuration.Velero == nil { + return false + } + + for _, plugin := range dpa.Spec.Configuration.Velero.DefaultPlugins { + if plugin == oadpv1alpha1.DefaultPluginHypershift { + return true + } + } + + return false +} diff --git a/tests/e2e/lib/hcp/dpa_test.go b/tests/e2e/lib/hcp/dpa_test.go new file mode 100644 index 0000000000..a7c3fcd307 --- /dev/null +++ b/tests/e2e/lib/hcp/dpa_test.go @@ -0,0 +1,295 @@ +package hcp + +import ( + "context" + "testing" + + "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + oadpv1alpha1 "github.com/openshift/oadp-operator/api/v1alpha1" +) + +func TestAddHCPPluginToDPA(t *testing.T) { + g := gomega.NewGomegaWithT(t) + + tests := []struct { + name string + dpa *oadpv1alpha1.DataProtectionApplication + overrides bool + }{ + { + name: "Add plugin without overrides", + dpa: &oadpv1alpha1.DataProtectionApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dpa", + Namespace: "test-ns", + }, + Spec: oadpv1alpha1.DataProtectionApplicationSpec{ + Configuration: &oadpv1alpha1.ApplicationConfig{ + Velero: &oadpv1alpha1.VeleroConfig{ + DefaultPlugins: []oadpv1alpha1.DefaultPlugin{}, + }, + }, + }, + }, + overrides: false, + }, + { + name: "Add plugin with overrides", + dpa: &oadpv1alpha1.DataProtectionApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dpa", + Namespace: "test-ns", + }, + Spec: oadpv1alpha1.DataProtectionApplicationSpec{ + Configuration: &oadpv1alpha1.ApplicationConfig{ + Velero: &oadpv1alpha1.VeleroConfig{ + DefaultPlugins: []oadpv1alpha1.DefaultPlugin{}, + }, + }, + }, + }, + overrides: true, + }, + { + name: "Plugin already exists", + dpa: &oadpv1alpha1.DataProtectionApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dpa", + Namespace: "test-ns", + }, + Spec: oadpv1alpha1.DataProtectionApplicationSpec{ + Configuration: &oadpv1alpha1.ApplicationConfig{ + Velero: &oadpv1alpha1.VeleroConfig{ + DefaultPlugins: []oadpv1alpha1.DefaultPlugin{ + oadpv1alpha1.DefaultPluginHypershift, + }, + }, + }, + }, + }, + overrides: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create a new scheme with DataProtectionApplication registered + scheme := runtime.NewScheme() + err := oadpv1alpha1.AddToScheme(scheme) + g.Expect(err).NotTo(gomega.HaveOccurred()) + + // Create a new client with the scheme + client := fake.NewClientBuilder().WithScheme(scheme).Build() + + // Create the handler + h := &HCHandler{ + Ctx: context.Background(), + Client: client, + } + + // Create DPA + err = client.Create(context.Background(), tt.dpa) + g.Expect(err).NotTo(gomega.HaveOccurred()) + + // Call AddHCPPluginToDPA + err = h.AddHCPPluginToDPA(tt.dpa.Namespace, tt.dpa.Name, tt.overrides) + g.Expect(err).NotTo(gomega.HaveOccurred()) + + // Verify DPA was updated + updatedDPA := &oadpv1alpha1.DataProtectionApplication{} + err = client.Get(context.Background(), types.NamespacedName{Name: tt.dpa.Name, Namespace: tt.dpa.Namespace}, updatedDPA) + g.Expect(err).NotTo(gomega.HaveOccurred()) + + // Check if plugin was added + pluginFound := false + for _, plugin := range updatedDPA.Spec.Configuration.Velero.DefaultPlugins { + if plugin == oadpv1alpha1.DefaultPluginHypershift { + pluginFound = true + break + } + } + g.Expect(pluginFound).To(gomega.BeTrue()) + + // Check if overrides were added + if tt.overrides { + g.Expect(updatedDPA.Spec.UnsupportedOverrides).To(gomega.HaveKey(oadpv1alpha1.HypershiftPluginImageKey)) + } + }) + } +} + +func TestRemoveHCPPluginFromDPA(t *testing.T) { + g := gomega.NewGomegaWithT(t) + + tests := []struct { + name string + dpa *oadpv1alpha1.DataProtectionApplication + }{ + { + name: "Remove plugin", + dpa: &oadpv1alpha1.DataProtectionApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dpa", + Namespace: "test-ns", + }, + Spec: oadpv1alpha1.DataProtectionApplicationSpec{ + Configuration: &oadpv1alpha1.ApplicationConfig{ + Velero: &oadpv1alpha1.VeleroConfig{ + DefaultPlugins: []oadpv1alpha1.DefaultPlugin{ + oadpv1alpha1.DefaultPluginHypershift, + }, + }, + }, + UnsupportedOverrides: map[oadpv1alpha1.UnsupportedImageKey]string{ + oadpv1alpha1.HypershiftPluginImageKey: "quay.io/redhat-user-workloads/ocp-art-tenant/oadp-hypershift-oadp-plugin-oadp-1-5:oadp-1.5", + }, + }, + }, + }, + { + name: "Plugin not present", + dpa: &oadpv1alpha1.DataProtectionApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dpa", + Namespace: "test-ns", + }, + Spec: oadpv1alpha1.DataProtectionApplicationSpec{ + Configuration: &oadpv1alpha1.ApplicationConfig{ + Velero: &oadpv1alpha1.VeleroConfig{ + DefaultPlugins: []oadpv1alpha1.DefaultPlugin{}, + }, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create a new scheme with DataProtectionApplication registered + scheme := runtime.NewScheme() + err := oadpv1alpha1.AddToScheme(scheme) + g.Expect(err).NotTo(gomega.HaveOccurred()) + + // Create a new client with the scheme + client := fake.NewClientBuilder().WithScheme(scheme).Build() + + // Create the handler + h := &HCHandler{ + Ctx: context.Background(), + Client: client, + } + + // Create DPA + err = client.Create(context.Background(), tt.dpa) + g.Expect(err).NotTo(gomega.HaveOccurred()) + + // Call RemoveHCPPluginFromDPA + err = h.RemoveHCPPluginFromDPA(tt.dpa.Namespace, tt.dpa.Name) + g.Expect(err).NotTo(gomega.HaveOccurred()) + + // Verify DPA was updated + updatedDPA := &oadpv1alpha1.DataProtectionApplication{} + err = client.Get(context.Background(), types.NamespacedName{Name: tt.dpa.Name, Namespace: tt.dpa.Namespace}, updatedDPA) + g.Expect(err).NotTo(gomega.HaveOccurred()) + + // Check if plugin was removed + pluginFound := false + for _, plugin := range updatedDPA.Spec.Configuration.Velero.DefaultPlugins { + if plugin == oadpv1alpha1.DefaultPluginHypershift { + pluginFound = true + break + } + } + g.Expect(pluginFound).To(gomega.BeFalse()) + + // Check if overrides were removed + g.Expect(updatedDPA.Spec.UnsupportedOverrides).NotTo(gomega.HaveKey(oadpv1alpha1.HypershiftPluginImageKey)) + }) + } +} + +func TestIsHCPPluginAdded(t *testing.T) { + g := gomega.NewGomegaWithT(t) + + tests := []struct { + name string + dpa *oadpv1alpha1.DataProtectionApplication + expectedResult bool + }{ + { + name: "HCP plugin exists", + dpa: &oadpv1alpha1.DataProtectionApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dpa", + Namespace: "test-ns", + }, + Spec: oadpv1alpha1.DataProtectionApplicationSpec{ + Configuration: &oadpv1alpha1.ApplicationConfig{ + Velero: &oadpv1alpha1.VeleroConfig{ + DefaultPlugins: []oadpv1alpha1.DefaultPlugin{ + oadpv1alpha1.DefaultPluginHypershift, + }, + }, + }, + }, + }, + expectedResult: true, + }, + { + name: "HCP plugin does not exist", + dpa: &oadpv1alpha1.DataProtectionApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dpa", + Namespace: "test-ns", + }, + Spec: oadpv1alpha1.DataProtectionApplicationSpec{ + Configuration: &oadpv1alpha1.ApplicationConfig{ + Velero: &oadpv1alpha1.VeleroConfig{ + DefaultPlugins: []oadpv1alpha1.DefaultPlugin{ + oadpv1alpha1.DefaultPluginAWS, + }, + }, + }, + }, + }, + expectedResult: false, + }, + { + name: "DPA is nil", + expectedResult: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create a new scheme with DataProtectionApplication registered + scheme := runtime.NewScheme() + err := oadpv1alpha1.AddToScheme(scheme) + g.Expect(err).NotTo(gomega.HaveOccurred()) + + // Create a new client with the scheme + client := fake.NewClientBuilder().WithScheme(scheme).Build() + + // Create DPA if it exists in the test case + if tt.dpa != nil { + err := client.Create(context.Background(), tt.dpa) + g.Expect(err).NotTo(gomega.HaveOccurred()) + } + + // Call IsHCPPluginAdded + var result bool + if tt.dpa != nil { + result = IsHCPPluginAdded(client, tt.dpa.Namespace, tt.dpa.Name) + } else { + result = IsHCPPluginAdded(client, "non-existent", "non-existent") + } + g.Expect(result).To(gomega.Equal(tt.expectedResult)) + }) + } +} diff --git a/tests/e2e/lib/hcp/hcp.go b/tests/e2e/lib/hcp/hcp.go new file mode 100644 index 0000000000..199fb332d4 --- /dev/null +++ b/tests/e2e/lib/hcp/hcp.go @@ -0,0 +1,579 @@ +package hcp + +import ( + "context" + "encoding/base64" + "fmt" + "log" + "time" + + hypershiftv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/openshift/oadp-operator/tests/e2e/lib" +) + +func (h *HCHandler) RemoveHCP(timeout time.Duration) error { + // Delete the hostedCluster + if err := h.DeleteHostedCluster(); err != nil { + return err + } + + // Delete HCP Namespace + if err := h.DeleteHCPNamespace(false); err != nil { + return err + } + + // Delete HCP + if err := h.DeleteHostedControlPlane(); err != nil { + return err + } + + // Wait for HCP deletion with timeout + var hcpName string + if h.HostedCluster != nil { + hcpName = h.HostedCluster.Name + } else { + // If HostedCluster is nil, try to get the HCP name from the namespace + hcpName = "test-hc" // Default name if we can't determine it + } + + hcp := hypershiftv1.HostedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: hcpName, + Namespace: h.HCPNamespace, + }, + } + if err := h.WaitForHCPDeletion(&hcp); err != nil { + return fmt.Errorf("failed to delete HCP: %v", err) + } + log.Printf("\tHCP deleted") + + // Delete HC Secrets + if err := h.DeleteHCSecrets(); err != nil { + return err + } + + // Wait for the HC to be deleted + log.Printf("\tWaiting for the HC to be deleted") + err := wait.PollUntilContextTimeout(h.Ctx, time.Second*5, timeout, true, func(ctx context.Context) (bool, error) { + log.Printf("\tAttempting to verify HC deletion...") + result := IsHCDeleted(h) + log.Printf("\tHC deletion check result: %v", result) + return result, nil + }) + + if err != nil { + return fmt.Errorf("failed to wait for HC deletion: %v", err) + } + + return nil +} + +// DeleteHostedCluster deletes a HostedCluster and waits for its deletion +func (h *HCHandler) DeleteHostedCluster() error { + if h.HostedCluster == nil { + log.Printf("No HostedCluster to delete") + return nil + } + + log.Printf("Deleting HostedCluster %s in namespace %s", h.HostedCluster.Name, h.HostedCluster.Namespace) + if err := h.deleteResource(h.HostedCluster); err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to delete HostedCluster: %v", err) + } + + // Wait for HC deletion + if err := h.WaitForHCDeletion(); err != nil { + return fmt.Errorf("failed waiting for HostedCluster deletion: %v", err) + } + + return nil +} + +// DeleteHCPNamespace deletes the HCP namespace and waits for its deletion if needed +func (h *HCHandler) DeleteHCPNamespace(shouldWait bool) error { + if h.HCPNamespace == "" { + log.Printf("No HCP namespace to delete") + return nil + } + + log.Printf("Deleting HCP namespace %s", h.HCPNamespace) + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: h.HCPNamespace, + }, + } + + if err := h.deleteResource(ns); err != nil { + if apierrors.IsNotFound(err) { + log.Printf("Namespace %s already deleted", h.HCPNamespace) + return nil + } + return fmt.Errorf("failed to delete HCP namespace %s: %v", h.HCPNamespace, err) + } + + if !shouldWait { + return nil + } + + log.Printf("Waiting for namespace %s to be deleted", h.HCPNamespace) + err := wait.PollUntilContextTimeout(h.Ctx, WaitForNextCheckTimeout, Wait10Min, true, func(ctx context.Context) (bool, error) { + err := h.Client.Get(ctx, types.NamespacedName{Name: h.HCPNamespace}, ns) + if err == nil { + log.Printf("Namespace %s still exists, waiting...", h.HCPNamespace) + return false, nil + } + + if apierrors.IsNotFound(err) { + log.Printf("Namespace %s successfully deleted", h.HCPNamespace) + return true, nil + } + + // Handle retryable errors + if apierrors.IsTooManyRequests(err) || apierrors.IsServerTimeout(err) || apierrors.IsTimeout(err) { + log.Printf("Retryable error while checking namespace %s deletion: %v", h.HCPNamespace, err) + return false, nil + } + + return false, fmt.Errorf("unexpected error while checking namespace %s deletion: %v", h.HCPNamespace, err) + }) + + if err != nil { + return fmt.Errorf("timeout waiting for namespace %s to be deleted: %v", h.HCPNamespace, err) + } + + return nil +} + +// DeleteHostedControlPlane deletes a HostedControlPlane and waits for its deletion +func (h *HCHandler) DeleteHostedControlPlane() error { + if h.HCPNamespace == "" { + log.Printf("No HCP namespace specified") + return nil + } + + // Get the HCP name from HostedCluster if available, otherwise use default + var hcpName string + if h.HostedCluster != nil { + hcpName = h.HostedCluster.Name + } else { + hcpName = "test-hc" // Default name if HostedCluster is nil + } + + hcp := &hypershiftv1.HostedControlPlane{} + err := h.Client.Get(h.Ctx, types.NamespacedName{ + Namespace: h.HCPNamespace, + Name: hcpName, + }, hcp) + + if err != nil { + if apierrors.IsNotFound(err) { + log.Printf("No HostedControlPlane found in namespace %s", h.HCPNamespace) + return nil + } + return fmt.Errorf("failed to get HostedControlPlane: %v", err) + } + + log.Printf("Deleting HostedControlPlane %s in namespace %s", hcp.Name, hcp.Namespace) + if err := h.deleteResource(hcp); err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to delete HostedControlPlane: %v", err) + } + + // Wait for HCP deletion + if err := h.WaitForHCPDeletion(hcp); err != nil { + return fmt.Errorf("failed waiting for HostedControlPlane deletion: %v", err) + } + + return nil +} + +// DeleteHCSecrets deletes secrets in the HCP namespace +func (h *HCHandler) DeleteHCSecrets() error { + if h.HCPNamespace == "" { + log.Printf("No HCP namespace specified") + return nil + } + + log.Printf("Deleting secrets in namespace %s", h.HCPNamespace) + secretList := &corev1.SecretList{} + if err := h.Client.List(h.Ctx, secretList, &client.ListOptions{ + Namespace: h.HCPNamespace, + }); err != nil { + return fmt.Errorf("failed to list secrets: %v", err) + } + + for _, secret := range secretList.Items { + if err := h.deleteResource(&secret); err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to delete secret %s: %v", secret.Name, err) + } + } + + return nil +} + +// WaitForHCDeletion waits for the HostedCluster to be deleted +func (h *HCHandler) WaitForHCDeletion() error { + return wait.PollUntilContextTimeout(h.Ctx, WaitForNextCheckTimeout, Wait10Min, true, func(ctx context.Context) (bool, error) { + return IsHCDeleted(h), nil + }) +} + +// WaitForHCPDeletion waits for the HostedControlPlane to be deleted +func (h *HCHandler) WaitForHCPDeletion(hcp *hypershiftv1.HostedControlPlane) error { + return wait.PollUntilContextTimeout(h.Ctx, WaitForNextCheckTimeout, Wait10Min, true, func(ctx context.Context) (bool, error) { + return IsHCPDeleted(h, hcp), nil + }) +} + +// NukeHostedCluster removes all resources associated with a HostedCluster +func (h *HCHandler) NukeHostedCluster() error { + // List of resource types to check + log.Printf("\tNuking HostedCluster") + resourceTypes := []struct { + kind string + gvk schema.GroupVersionKind + }{ + {"HostedControlPlane", hypershiftv1.GroupVersion.WithKind("HostedControlPlane")}, + {"Cluster", clusterGVK}, + {"AWSCluster", awsClusterGVK}, + {"AgentCluster", capiAgentGVK}, + } + + for _, rt := range resourceTypes { + obj := &unstructured.UnstructuredList{} + obj.SetGroupVersionKind(rt.gvk) + + if err := h.Client.List(h.Ctx, obj, &client.ListOptions{Namespace: h.HCPNamespace}); err != nil { + log.Printf("Error listing %s: %v", rt.kind, err) + continue + } + + for _, item := range obj.Items { + if len(item.GetFinalizers()) > 0 { + log.Printf("\tNUKE: Removing finalizers from %s %s", rt.kind, item.GetName()) + item.SetFinalizers([]string{}) + if err := h.Client.Update(h.Ctx, &item); err != nil { + return fmt.Errorf("\tNUKE: Error removing finalizers from %s %s: %v", rt.kind, item.GetName(), err) + } + } + } + } + + return nil +} + +// DeployHCManifest deploys a HostedCluster manifest +func (h *HCHandler) DeployHCManifest(tmpl, provider string, hcName string) (*hypershiftv1.HostedCluster, error) { + log.Printf("Deploying HostedCluster manifest - %s", provider) + // Create the clusters ns + clustersNS := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: ClustersNamespace, + }, + } + + log.Printf("Creating clusters namespace") + err := h.Client.Create(h.Ctx, clustersNS) + if err != nil { + if !apierrors.IsAlreadyExists(err) { + return nil, fmt.Errorf("failed to create clusters namespace: %v", err) + } + } + + log.Printf("Getting pull secret") + pullSecret, err := getPullSecret(h.Ctx, h.Client) + if err != nil { + return nil, fmt.Errorf("failed to get pull secret: %v", err) + } + + log.Printf("Applying pull secret manifest") + err = ApplyYAMLTemplate(h.Ctx, h.Client, PullSecretManifest, true, map[string]interface{}{ + "HostedClusterName": hcName, + "ClustersNamespace": ClustersNamespace, + "PullSecret": base64.StdEncoding.EncodeToString([]byte(pullSecret)), + }) + if err != nil { + return nil, fmt.Errorf("failed to apply pull secret manifest: %v", err) + } + + log.Printf("Applying encryption key manifest") + err = ApplyYAMLTemplate(h.Ctx, h.Client, EtcdEncryptionKeyManifest, true, map[string]interface{}{ + "HostedClusterName": hcName, + "ClustersNamespace": ClustersNamespace, + "EtcdEncryptionKey": SampleETCDEncryptionKey, + }) + if err != nil { + return nil, fmt.Errorf("failed to apply encryption key manifest: %v", err) + } + + if provider == "Agent" { + log.Printf("Applying capi-provider-role manifest") + err = ApplyYAMLTemplate(h.Ctx, h.Client, CapiProviderRoleManifest, true, map[string]interface{}{ + "ClustersNamespace": ClustersNamespace, + }) + if err != nil { + return nil, fmt.Errorf("failed to apply capi-provider-role manifest from %s: %v", CapiProviderRoleManifest, err) + } + } + + log.Printf("Applying HostedCluster manifest") + err = ApplyYAMLTemplate(h.Ctx, h.Client, tmpl, false, map[string]interface{}{ + "HostedClusterName": hcName, + "ClustersNamespace": ClustersNamespace, + "HCOCPTestImage": h.HCOCPTestImage, + "InfraIDSeed": "test", + }) + if err != nil { + return nil, fmt.Errorf("failed to apply HostedCluster manifest: %v", err) + } + + // Wait for HC to be present + var hc hypershiftv1.HostedCluster + err = wait.PollUntilContextTimeout(h.Ctx, WaitForNextCheckTimeout, Wait10Min, true, func(ctx context.Context) (bool, error) { + err := h.Client.Get(ctx, types.NamespacedName{ + Name: hcName, + Namespace: ClustersNamespace, + }, &hc) + if err != nil { + if !apierrors.IsNotFound(err) && !apierrors.IsTooManyRequests(err) && !apierrors.IsServerTimeout(err) && !apierrors.IsTimeout(err) { + return false, fmt.Errorf("failed to get HostedCluster %s: %v", hcName, err) + } + log.Printf("Error getting HostedCluster %s, retrying...: %v", hcName, err) + return false, nil + } + return true, nil + }) + if err != nil { + return nil, fmt.Errorf("failed waiting for HostedCluster to be present: %v", err) + } + + return &hc, nil +} + +// ValidateETCD validates that the ETCD StatefulSet is ready +func ValidateETCD(ctx context.Context, ocClient client.Client, hcpNamespace string, timeout time.Duration) error { + log.Printf("Validating ETCD StatefulSet with timeout: %v", timeout) + + // Create a separate context for ETCD validation with a longer timeout + etcdCtx, etcdCancel := context.WithTimeout(ctx, timeout) + defer etcdCancel() + + err := wait.PollUntilContextTimeout(etcdCtx, time.Second*10, timeout, true, func(ctx context.Context) (bool, error) { + etcdSts := &appsv1.StatefulSet{} + err := ocClient.Get(ctx, types.NamespacedName{Name: "etcd", Namespace: hcpNamespace}, etcdSts) + if err != nil { + if !apierrors.IsNotFound(err) && !apierrors.IsTooManyRequests(err) && !apierrors.IsServerTimeout(err) && !apierrors.IsTimeout(err) { + log.Printf("ETCD StatefulSet not found yet, waiting...") + return false, fmt.Errorf("failed to get etcd statefulset: %v", err) + } + log.Printf("Error getting etcd statefulset, retrying...: %v", err) + return false, nil + } + if etcdSts.Status.Replicas != etcdSts.Status.ReadyReplicas { + log.Printf("ETCD STS is not ready (Available: %d, Replicas: %d)", etcdSts.Status.ReadyReplicas, etcdSts.Status.Replicas) + return false, nil + } + log.Printf("ETCD STS is ready") + return true, nil + }) + if err != nil { + return fmt.Errorf("failed to wait for ETCD StatefulSet: %v", err) + } + return nil +} + +// ValidateDeployments validates that all required deployments are ready +func ValidateDeployments(ctx context.Context, ocClient client.Client, hcpNamespace string, deployments []string, contingencyTimeout time.Duration) error { + for _, depName := range deployments { + log.Printf("Checking deployment: %s", depName) + ready := false + err := wait.PollUntilContextTimeout(ctx, time.Second*10, contingencyTimeout, true, func(ctx context.Context) (bool, error) { + deployment := &appsv1.Deployment{} + err := ocClient.Get(ctx, types.NamespacedName{Name: depName, Namespace: hcpNamespace}, deployment) + if err != nil { + if !apierrors.IsNotFound(err) && !apierrors.IsTooManyRequests(err) && !apierrors.IsServerTimeout(err) && !apierrors.IsTimeout(err) { + return false, fmt.Errorf("failed to get deployment %s: %v", depName, err) + } + log.Printf("Error getting deployment %s: %v", depName, err) + return false, nil + } + if deployment.Status.AvailableReplicas != deployment.Status.Replicas { + log.Printf("Deployment %s is not ready (Available: %d, Replicas: %d)", depName, deployment.Status.AvailableReplicas, deployment.Status.Replicas) + return false, nil + } + ready = true + return true, nil + }) + + if err != nil || !ready { + log.Printf("Deployment %s validation failed", depName) + err := handleDeploymentValidationFailure(ctx, ocClient, hcpNamespace, deployments, contingencyTimeout) + if err != nil { + return fmt.Errorf("deployment %s failed after contingency applied: %v", depName, err) + } + } + } + log.Printf("All deployments validated successfully") + return nil +} + +// ValidateHCP returns a VerificationFunction that checks if the HostedCluster pods are running +func ValidateHCP(timeout time.Duration, contingencyTimeout time.Duration, deployments []string, hcpNamespace string) func(client.Client, string) error { + log.Printf("Starting HCP validation with timeout: %v, contingency timeout: %v", timeout, contingencyTimeout) + + if len(deployments) == 0 { + deployments = RequiredWorkingOperators + } + + if timeout == 0 { + timeout = ValidateHCPTimeout + } + + if contingencyTimeout == 0 { + contingencyTimeout = Wait10Min + } + + return func(ocClient client.Client, _ string) error { + log.Printf("Checking deployments in namespace: %s", hcpNamespace) + + // Create a new context for validation that won't be canceled by the parent context + valCtx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + // Validate ETCD StatefulSet + if err := ValidateETCD(valCtx, ocClient, hcpNamespace, timeout); err != nil { + return err + } + + // Validate deployments + if err := ValidateDeployments(valCtx, ocClient, hcpNamespace, deployments, contingencyTimeout); err != nil { + return err + } + + return nil + } +} + +// handleDeploymentValidationFailure handles the case when a deployment validation fails +// The function should list all the pods in the HCP namespace and restart them if they are not running. +// This is because after the restore of an HCP, the pods got stuck and +func handleDeploymentValidationFailure(ctx context.Context, ocClient client.Client, namespace string, deployments []string, timeout time.Duration) error { + log.Printf("Handling validation failure for deployments in namespace %s", namespace) + // List all pods in the HCP namespace + pods := &corev1.PodList{} + err := ocClient.List(ctx, pods, &client.ListOptions{Namespace: namespace}) + if err != nil { + log.Printf("Error listing pods in namespace %s: %v", namespace, err) + return err + } + + // Delete all non-running pods + for _, pod := range pods.Items { + if pod.Status.Phase != corev1.PodRunning { + log.Printf("Deleting non-running pod %s", pod.Name) + err := ocClient.Delete(ctx, &pod) + if err != nil { + log.Printf("Error deleting pod %s: %v", pod.Name, err) + return err + } + } + } + + // Check if all deployments are ready with timeout + for _, deployment := range deployments { + err := wait.PollUntilContextTimeout(ctx, time.Second*10, timeout, true, func(ctx context.Context) (bool, error) { + dep := &appsv1.Deployment{} + err := ocClient.Get(ctx, types.NamespacedName{Name: deployment, Namespace: namespace}, dep) + if err != nil { + log.Printf("Error getting deployment %s, retrying...: %v", deployment, err) + return false, nil + } + done, err := lib.IsDeploymentReady(ocClient, dep.Namespace, dep.Name)() + if !done || err != nil { + return false, nil + } + + return true, nil + }) + + if err != nil { + return fmt.Errorf("deployment %s is not ready after timeout: %v", deployment, err) + } + } + + return nil +} + +// IsHCPDeleted checks if a HostedControlPlane has been deleted +func IsHCPDeleted(h *HCHandler, hcp *hypershiftv1.HostedControlPlane) bool { + if hcp == nil { + log.Printf("\tNo HCP provided, assuming deleted") + return true + } + log.Printf("\tChecking if HCP %s is deleted...", hcp.Name) + newHCP := &hypershiftv1.HostedControlPlane{} + err := h.Client.Get(h.Ctx, types.NamespacedName{Namespace: hcp.Namespace, Name: hcp.Name}, newHCP, &client.GetOptions{ + Raw: &metav1.GetOptions{}, + }) + if err != nil { + if apierrors.IsNotFound(err) { + log.Printf("\tHCP %s is confirmed deleted", hcp.Name) + return true + } + log.Printf("\tHCP %s deletion check failed with error: %v", hcp.Name, err) + return false + } + log.Printf("\tHCP %s still exists", hcp.Name) + return false +} + +// IsHCDeleted checks if a HostedCluster has been deleted +func IsHCDeleted(h *HCHandler) bool { + if h.HostedCluster == nil { + log.Printf("\tNo HostedCluster provided, assuming deleted") + return true + } + log.Printf("\tChecking if HC %s is deleted...", h.HostedCluster.Name) + newHC := &hypershiftv1.HostedCluster{} + err := h.Client.Get(h.Ctx, types.NamespacedName{Namespace: h.HostedCluster.Namespace, Name: h.HostedCluster.Name}, newHC, &client.GetOptions{ + Raw: &metav1.GetOptions{}, + }) + if err != nil { + if apierrors.IsNotFound(err) { + log.Printf("\tHC %s is confirmed deleted", h.HostedCluster.Name) + return true + } + log.Printf("\tHC %s deletion check failed with error: %v", h.HostedCluster.Name, err) + return false + } + log.Printf("\tHC %s still exists", h.HostedCluster.Name) + return false +} + +// GetHCPNamespace returns the namespace for a HostedControlPlane +func GetHCPNamespace(name, namespace string) string { + return fmt.Sprintf("%s-%s", namespace, name) +} + +// RestartHCPPods restarts the pods for a HostedControlPlane namespace which stays in Init state +func RestartHCPPods(HCPNamespace string, c client.Client) error { + pl := &corev1.PodList{} + err := c.List(context.Background(), pl, &client.ListOptions{Namespace: HCPNamespace}) + if err != nil { + return fmt.Errorf("failed to list pods: %v", err) + } + for _, pod := range pl.Items { + if pod.Status.Phase != corev1.PodRunning { + return fmt.Errorf("pod %s is not running", pod.Name) + } + } + return nil +} diff --git a/tests/e2e/lib/hcp/hcp_test.go b/tests/e2e/lib/hcp/hcp_test.go new file mode 100644 index 0000000000..f48db9f2fb --- /dev/null +++ b/tests/e2e/lib/hcp/hcp_test.go @@ -0,0 +1,863 @@ +package hcp + +import ( + "context" + "testing" + "time" + + "github.com/onsi/gomega" + hypershiftv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/openshift/oadp-operator/tests/e2e/lib" +) + +func TestRemoveHCP(t *testing.T) { + g := gomega.NewGomegaWithT(t) + + tests := []struct { + name string + hc *hypershiftv1.HostedCluster + hcp *hypershiftv1.HostedControlPlane + namespace *corev1.Namespace + secrets []*corev1.Secret + expectedResult bool + }{ + { + name: "All resources exist", + hc: &hypershiftv1.HostedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hc", + Namespace: "clusters", + }, + }, + hcp: &hypershiftv1.HostedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hc", + Namespace: "clusters-test-hc", + }, + }, + namespace: &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "clusters-test-hc", + }, + }, + secrets: []*corev1.Secret{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hc-pull-secret", + Namespace: "clusters-test-hc", + }, + }, + }, + expectedResult: true, + }, + { + name: "Only HC exists", + hc: &hypershiftv1.HostedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hc", + Namespace: "clusters", + }, + }, + expectedResult: true, + }, + { + name: "Only HCP exists", + hcp: &hypershiftv1.HostedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hc", + Namespace: "clusters-test-hc", + }, + }, + expectedResult: true, + }, + { + name: "Only namespace exists", + namespace: &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "clusters-test-hc", + }, + }, + expectedResult: true, + }, + { + name: "No resources exist", + expectedResult: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create a new client with the correct scheme + client := fake.NewClientBuilder().WithScheme(lib.Scheme).Build() + + // Create the handler + h := &HCHandler{ + Ctx: context.Background(), + Client: client, + HostedCluster: tt.hc, + } + + // Set HCPNamespace based on either namespace or HCP + if tt.namespace != nil { + h.HCPNamespace = tt.namespace.Name + } else if tt.hcp != nil { + h.HCPNamespace = tt.hcp.Namespace + } + + // Create resources if they exist in the test case + if tt.hc != nil { + err := client.Create(context.Background(), tt.hc) + g.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if tt.hcp != nil { + err := client.Create(context.Background(), tt.hcp) + g.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if tt.namespace != nil { + err := client.Create(context.Background(), tt.namespace) + g.Expect(err).NotTo(gomega.HaveOccurred()) + } + + for _, secret := range tt.secrets { + err := client.Create(context.Background(), secret) + g.Expect(err).NotTo(gomega.HaveOccurred()) + } + + // Call RemoveHCP + err := h.RemoveHCP(Wait10Min) + g.Expect(err).NotTo(gomega.HaveOccurred()) + + // Verify resources are deleted + if tt.hc != nil { + hc := &hypershiftv1.HostedCluster{} + err = client.Get(context.Background(), types.NamespacedName{Name: tt.hc.Name, Namespace: tt.hc.Namespace}, hc) + g.Expect(apierrors.IsNotFound(err)).To(gomega.BeTrue()) + } + + if tt.hcp != nil { + hcp := &hypershiftv1.HostedControlPlane{} + err = client.Get(context.Background(), types.NamespacedName{Name: tt.hcp.Name, Namespace: tt.hcp.Namespace}, hcp) + g.Expect(apierrors.IsNotFound(err)).To(gomega.BeTrue()) + } + + if tt.namespace != nil { + ns := &corev1.Namespace{} + err = client.Get(context.Background(), types.NamespacedName{Name: tt.namespace.Name}, ns) + g.Expect(apierrors.IsNotFound(err)).To(gomega.BeTrue()) + } + + for _, secret := range tt.secrets { + s := &corev1.Secret{} + err = client.Get(context.Background(), types.NamespacedName{Name: secret.Name, Namespace: secret.Namespace}, s) + g.Expect(apierrors.IsNotFound(err)).To(gomega.BeTrue()) + } + }) + } +} + +func TestIsHCPDeleted(t *testing.T) { + g := gomega.NewGomegaWithT(t) + + tests := []struct { + name string + hcp *hypershiftv1.HostedControlPlane + expectedResult bool + }{ + { + name: "HCP exists", + hcp: &hypershiftv1.HostedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hc", + Namespace: "clusters-test-hc", + }, + }, + expectedResult: false, + }, + { + name: "HCP is nil", + expectedResult: true, + }, + { + name: "HCP does not exist", + hcp: &hypershiftv1.HostedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: "non-existent", + Namespace: "non-existent", + }, + }, + expectedResult: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create a new client with the correct scheme + client := fake.NewClientBuilder().WithScheme(lib.Scheme).Build() + + // Create the handler + h := &HCHandler{ + Ctx: context.Background(), + Client: client, + } + + // Create HCP if it exists in the test case + if tt.hcp != nil && tt.name != "HCP does not exist" { + err := client.Create(context.Background(), tt.hcp) + g.Expect(err).NotTo(gomega.HaveOccurred()) + } + + // Call IsHCPDeleted + result := IsHCPDeleted(h, tt.hcp) + g.Expect(result).To(gomega.Equal(tt.expectedResult)) + }) + } +} + +func TestIsHCDeleted(t *testing.T) { + g := gomega.NewGomegaWithT(t) + + tests := []struct { + name string + hc *hypershiftv1.HostedCluster + expectedResult bool + }{ + { + name: "HC exists", + hc: &hypershiftv1.HostedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hc", + Namespace: "clusters", + }, + }, + expectedResult: false, + }, + { + name: "HC is nil", + expectedResult: true, + }, + { + name: "HC does not exist", + hc: &hypershiftv1.HostedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "non-existent", + Namespace: "non-existent", + }, + }, + expectedResult: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create a new client with the correct scheme + client := fake.NewClientBuilder().WithScheme(lib.Scheme).Build() + + // Create the handler + h := &HCHandler{ + Ctx: context.Background(), + Client: client, + } + + // Create HC if it exists in the test case + if tt.hc != nil && tt.name != "HC does not exist" { + h.HostedCluster = tt.hc + err := client.Create(context.Background(), tt.hc) + g.Expect(err).NotTo(gomega.HaveOccurred()) + } + + // Call IsHCDeleted + result := IsHCDeleted(h) + g.Expect(result).To(gomega.Equal(tt.expectedResult)) + }) + } +} + +func TestValidateHCP(t *testing.T) { + g := gomega.NewGomegaWithT(t) + hostedClusterName := "test-hc" + hcpNamespace := GetHCPNamespace(hostedClusterName, ClustersNamespace) + + // Define test cases + tests := []struct { + name string + deployments []*appsv1.Deployment + statefulsets []*appsv1.StatefulSet + expectedError bool + }{ + { + name: "All required deployments ready", + deployments: []*appsv1.Deployment{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "kube-apiserver", + Namespace: hcpNamespace, + }, + Status: appsv1.DeploymentStatus{ + AvailableReplicas: 1, + Replicas: 1, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "kube-controller-manager", + Namespace: hcpNamespace, + }, + Status: appsv1.DeploymentStatus{ + AvailableReplicas: 1, + Replicas: 1, + }, + }, + }, + statefulsets: []*appsv1.StatefulSet{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "etcd", + Namespace: hcpNamespace, + }, + Status: appsv1.StatefulSetStatus{ + ReadyReplicas: 1, + Replicas: 1, + }, + }, + }, + expectedError: false, + }, + { + name: "Required deployment not ready", + deployments: []*appsv1.Deployment{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "kube-apiserver", + Namespace: hcpNamespace, + }, + Status: appsv1.DeploymentStatus{ + AvailableReplicas: 0, + Replicas: 1, + }, + }, + }, + statefulsets: []*appsv1.StatefulSet{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "etcd", + Namespace: hcpNamespace, + }, + Status: appsv1.StatefulSetStatus{ + ReadyReplicas: 1, + Replicas: 1, + }, + }, + }, + expectedError: true, + }, + { + name: "ETCD not ready", + deployments: []*appsv1.Deployment{}, + statefulsets: []*appsv1.StatefulSet{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "etcd", + Namespace: hcpNamespace, + }, + Status: appsv1.StatefulSetStatus{ + ReadyReplicas: 0, + Replicas: 1, + }, + }, + }, + expectedError: true, + }, + { + name: "Deployment not found, accessing the handleDeploymentValidationFailure function", + deployments: []*appsv1.Deployment{}, + statefulsets: []*appsv1.StatefulSet{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "etcd", + Namespace: hcpNamespace, + }, + Status: appsv1.StatefulSetStatus{ + ReadyReplicas: 1, + Replicas: 1, + }, + }, + }, + expectedError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create a new scheme and client for each test case + scheme := runtime.NewScheme() + err := appsv1.AddToScheme(scheme) + g.Expect(err).NotTo(gomega.HaveOccurred()) + err = corev1.AddToScheme(scheme) + g.Expect(err).NotTo(gomega.HaveOccurred()) + + // Combine deployments and statefulsets into a single slice of objects + objects := make([]client.Object, 0) + for _, deployment := range tt.deployments { + objects = append(objects, deployment) + } + for _, statefulset := range tt.statefulsets { + objects = append(objects, statefulset) + } + + client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(objects...).Build() + + // Run the validation function with both timeouts set to 5 seconds for testing + validateFunc := ValidateHCP(5*time.Second, 5*time.Second, []string{"kube-apiserver", "kube-controller-manager"}, hcpNamespace) + err = validateFunc(client, "") + + // Check results + if tt.expectedError { + g.Expect(err).To(gomega.HaveOccurred()) + } else { + g.Expect(err).NotTo(gomega.HaveOccurred()) + } + }) + } +} + +func TestWaitForHCPDeletion(t *testing.T) { + // Register Gomega fail handler + gomega.RegisterTestingT(t) + g := gomega.NewGomegaWithT(t) + + tests := []struct { + name string + hcp *hypershiftv1.HostedControlPlane + createObj bool + deleteObj bool + timeout time.Duration + deleteDelay time.Duration + expectedError bool + errorContains string + }{ + { + name: "HCP already deleted", + hcp: &hypershiftv1.HostedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hc", + Namespace: "clusters-test-hc", + }, + }, + createObj: false, + deleteObj: false, + timeout: Wait10Min, + deleteDelay: 0, + expectedError: false, + }, + { + name: "HCP deleted during wait", + hcp: &hypershiftv1.HostedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hc", + Namespace: "clusters-test-hc", + }, + }, + createObj: true, + deleteObj: true, + timeout: Wait10Min, + deleteDelay: WaitForNextCheckTimeout, + expectedError: false, + }, + { + name: "HCP not deleted within timeout", + hcp: &hypershiftv1.HostedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hc", + Namespace: "clusters-test-hc", + }, + }, + createObj: true, + deleteObj: false, + timeout: time.Second * 2, + deleteDelay: 0, + expectedError: true, + }, + { + name: "HCP with finalizers not deleted", + hcp: &hypershiftv1.HostedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hc", + Namespace: "clusters-test-hc", + Finalizers: []string{"test-finalizer"}, + }, + }, + createObj: true, + deleteObj: true, + timeout: time.Second * 2, + deleteDelay: WaitForNextCheckTimeout, + expectedError: true, + }, + { + name: "HCP is nil", + hcp: nil, + createObj: false, + deleteObj: false, + timeout: Wait10Min, + deleteDelay: 0, + expectedError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create a new scheme with HostedControlPlane registered + scheme := runtime.NewScheme() + err := hypershiftv1.AddToScheme(scheme) + g.Expect(err).NotTo(gomega.HaveOccurred()) + + // Create a new client with the scheme + client := fake.NewClientBuilder().WithScheme(scheme).Build() + + // Create a context with timeout + ctx, cancel := context.WithTimeout(context.Background(), tt.timeout) + defer cancel() + + // Create the handler + h := &HCHandler{ + Ctx: ctx, + Client: client, + } + + // Create HCP if needed + if tt.createObj { + err := client.Create(context.Background(), tt.hcp) + g.Expect(err).NotTo(gomega.HaveOccurred()) + } + + // Start deletion in background if needed + if tt.deleteObj { + go func() { + time.Sleep(tt.deleteDelay) + err := client.Delete(context.Background(), tt.hcp) + g.Expect(err).NotTo(gomega.HaveOccurred()) + }() + } + + // Call WaitForHCPDeletion + err = h.WaitForHCPDeletion(tt.hcp) + + // Check results + if tt.expectedError { + g.Expect(err).To(gomega.HaveOccurred()) + } else { + g.Expect(err).NotTo(gomega.HaveOccurred()) + } + }) + } +} + +func TestHandleDeploymentValidationFailure(t *testing.T) { + g := gomega.NewGomegaWithT(t) + namespace := "test-namespace" + deployments := []string{"test-deployment"} + timeout := 5 * time.Second + + tests := []struct { + name string + pods []*corev1.Pod + deployments []*appsv1.Deployment + expectedError bool + }{ + { + name: "Non-running pods are deleted and deployments become ready", + pods: []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "stuck-pod", + Namespace: namespace, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodPending, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "running-pod", + Namespace: namespace, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + }, + }, + }, + deployments: []*appsv1.Deployment{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: namespace, + }, + Status: appsv1.DeploymentStatus{ + Replicas: 1, + AvailableReplicas: 1, + }, + }, + }, + expectedError: false, + }, + { + name: "Deployment not ready after timeout", + pods: []*corev1.Pod{}, + deployments: []*appsv1.Deployment{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: namespace, + }, + Status: appsv1.DeploymentStatus{ + Replicas: 1, + AvailableReplicas: 0, + }, + }, + }, + expectedError: true, + }, + { + name: "No pods or deployments found", + pods: []*corev1.Pod{}, + deployments: []*appsv1.Deployment{}, + expectedError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create a new scheme and client for each test case + scheme := runtime.NewScheme() + err := corev1.AddToScheme(scheme) + g.Expect(err).NotTo(gomega.HaveOccurred()) + err = appsv1.AddToScheme(scheme) + g.Expect(err).NotTo(gomega.HaveOccurred()) + + // Create objects for the fake client + objects := []client.Object{} + for _, pod := range tt.pods { + objects = append(objects, pod) + } + for _, deployment := range tt.deployments { + objects = append(objects, deployment) + } + + // Create fake client with objects + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(objects...). + Build() + + // Create context with timeout + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + // Call the function + err = handleDeploymentValidationFailure(ctx, fakeClient, namespace, deployments, timeout) + + // Check results + if tt.expectedError { + g.Expect(err).To(gomega.HaveOccurred()) + } else { + g.Expect(err).NotTo(gomega.HaveOccurred()) + } + + // Verify pods were deleted if necessary + for _, pod := range tt.pods { + if pod.Status.Phase != corev1.PodRunning { + err := fakeClient.Get(ctx, types.NamespacedName{ + Name: pod.Name, + Namespace: pod.Namespace, + }, &corev1.Pod{}) + g.Expect(apierrors.IsNotFound(err)).To(gomega.BeTrue()) + } + } + }) + } +} + +func TestValidateETCD(t *testing.T) { + g := gomega.NewGomegaWithT(t) + hostedClusterName := "test-hc" + hcpNamespace := GetHCPNamespace(hostedClusterName, ClustersNamespace) + timeout := 5 * time.Second + + // Define test cases + tests := []struct { + name string + statefulsets []*appsv1.StatefulSet + expectedError bool + }{ + { + name: "ETCD ready", + statefulsets: []*appsv1.StatefulSet{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "etcd", + Namespace: hcpNamespace, + }, + Status: appsv1.StatefulSetStatus{ + ReadyReplicas: 1, + Replicas: 1, + }, + }, + }, + expectedError: false, + }, + { + name: "ETCD not ready", + statefulsets: []*appsv1.StatefulSet{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "etcd", + Namespace: hcpNamespace, + }, + Status: appsv1.StatefulSetStatus{ + ReadyReplicas: 0, + Replicas: 1, + }, + }, + }, + expectedError: true, + }, + { + name: "ETCD not found", + statefulsets: []*appsv1.StatefulSet{}, + expectedError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create a new scheme and client for each test case + scheme := runtime.NewScheme() + err := appsv1.AddToScheme(scheme) + g.Expect(err).NotTo(gomega.HaveOccurred()) + + // Create objects for the fake client + objects := []client.Object{} + for _, statefulset := range tt.statefulsets { + objects = append(objects, statefulset) + } + + // Create fake client with objects + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(objects...). + Build() + + // Create context with timeout + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + // Call the function + err = ValidateETCD(ctx, fakeClient, hcpNamespace, timeout) + + // Check results + if tt.expectedError { + g.Expect(err).To(gomega.HaveOccurred()) + } else { + g.Expect(err).NotTo(gomega.HaveOccurred()) + } + }) + } +} + +func TestValidateDeployments(t *testing.T) { + g := gomega.NewGomegaWithT(t) + namespace := "test-namespace" + deployments := []string{"test-deployment"} + timeout := 5 * time.Second + + tests := []struct { + name string + deployments []*appsv1.Deployment + expectedError bool + }{ + { + name: "All deployments ready", + deployments: []*appsv1.Deployment{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: namespace, + }, + Status: appsv1.DeploymentStatus{ + AvailableReplicas: 1, + Replicas: 1, + }, + }, + }, + expectedError: false, + }, + { + name: "Deployment not ready", + deployments: []*appsv1.Deployment{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: namespace, + }, + Status: appsv1.DeploymentStatus{ + AvailableReplicas: 0, + Replicas: 1, + }, + }, + }, + expectedError: true, + }, + { + name: "Deployment not found", + deployments: []*appsv1.Deployment{}, + expectedError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create a new scheme and client for each test case + scheme := runtime.NewScheme() + err := appsv1.AddToScheme(scheme) + g.Expect(err).NotTo(gomega.HaveOccurred()) + err = corev1.AddToScheme(scheme) + g.Expect(err).NotTo(gomega.HaveOccurred()) + + // Create objects for the fake client + objects := []client.Object{} + for _, deployment := range tt.deployments { + objects = append(objects, deployment) + } + + // Create fake client with objects + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(objects...). + Build() + + // Create context with timeout + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + // Call the function + err = ValidateDeployments(ctx, fakeClient, namespace, deployments, timeout) + + // Check results + if tt.expectedError { + g.Expect(err).To(gomega.HaveOccurred()) + } else { + g.Expect(err).NotTo(gomega.HaveOccurred()) + } + }) + } +} diff --git a/tests/e2e/lib/hcp/mce.go b/tests/e2e/lib/hcp/mce.go new file mode 100644 index 0000000000..8df1b39a90 --- /dev/null +++ b/tests/e2e/lib/hcp/mce.go @@ -0,0 +1,152 @@ +package hcp + +import ( + "context" + "fmt" + "log" + + operatorsv1 "github.com/operator-framework/api/pkg/operators/v1" + operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + // MCE related constants + MCEOperatorNamespace = "multicluster-engine" + MCEOperatorGroupName = "multicluster-engine" + MCESubscriptionName = "multicluster-engine" +) + +// DeleteMCEOperand deletes the MCE operand +func (h *HCHandler) DeleteMCEOperand() error { + log.Printf("Deleting MCE operand %s", MCEOperandName) + mce := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "MultiClusterEngine", + "apiVersion": mceGVR.GroupVersion().String(), + "metadata": map[string]interface{}{ + "name": MCEOperandName, + "namespace": MCENamespace, + }, + }, + } + return h.deleteResource(mce) +} + +// DeleteMCEOperatorGroup deletes the MCE operator group +func (h *HCHandler) DeleteMCEOperatorGroup() error { + log.Printf("Deleting MCE operator group %s", MCEOperatorGroup) + og := &operatorsv1.OperatorGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: MCEOperatorGroup, + Namespace: MCENamespace, + }, + } + return h.deleteResource(og) +} + +// DeleteMCESubscription deletes the MCE subscription +func (h *HCHandler) DeleteMCESubscription() error { + log.Printf("Deleting MCE subscription %s", MCEOperatorName) + sub := &operatorsv1alpha1.Subscription{ + ObjectMeta: metav1.ObjectMeta{ + Name: MCEOperatorName, + Namespace: MCENamespace, + }, + } + return h.deleteResource(sub) +} + +// RemoveMCE removes the MCE operand, operator group, and subscription +func (h *HCHandler) RemoveMCE() error { + log.Printf("Removing MCE resources") + + // Delete MCE operand + if err := h.DeleteMCEOperand(); err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to delete MCE operand: %v", err) + } + + // Delete MCE operator group + if err := h.DeleteMCEOperatorGroup(); err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to delete MCE operator group: %v", err) + } + + // Delete MCE subscription + if err := h.DeleteMCESubscription(); err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to delete MCE subscription: %v", err) + } + + // Wait for MCE operand to be deleted + mce := &unstructured.Unstructured{} + mce.SetGroupVersionKind(mceGVR.GroupVersion().WithKind("MultiClusterEngine")) + mce.SetName(MCEOperandName) + mce.SetNamespace(MCENamespace) + + err := wait.PollUntilContextTimeout(h.Ctx, WaitForNextCheckTimeout, Wait10Min, true, func(ctx context.Context) (bool, error) { + if err := h.Client.Get(ctx, types.NamespacedName{Name: MCEOperandName, Namespace: MCENamespace}, mce); err != nil { + if !apierrors.IsNotFound(err) && !apierrors.IsTooManyRequests(err) && !apierrors.IsServerTimeout(err) && !apierrors.IsTimeout(err) { + return false, fmt.Errorf("failed to get MCE operand: %v", err) + } + log.Printf("Error getting MCE operand, retrying...: %v", err) + return false, nil + } + return true, nil + }) + if err != nil { + return fmt.Errorf("failed waiting for MCE operand deletion: %v", err) + } + + return nil +} + +func (op *HCHandler) DeployMCEManifest() error { + log.Printf("Checking MCE manifest") + + // Create an unstructured object to check if the MCE operand exists + mce := &unstructured.Unstructured{} + mce.SetGroupVersionKind(mceGVR.GroupVersion().WithKind("MultiClusterEngine")) + mce.SetName(MCEOperandName) + mce.SetNamespace(MCENamespace) + + if err := op.Client.Get(op.Ctx, types.NamespacedName{Name: MCEOperandName, Namespace: MCENamespace}, mce); err != nil { + if apierrors.IsNotFound(err) { + log.Printf("Creating MCE manifest") + err = ApplyYAMLTemplate(op.Ctx, op.Client, MCEOperandManifest, true, map[string]interface{}{ + "MCEOperandName": MCEOperandName, + "MCEOperandNamespace": MCENamespace, + }) + if err != nil { + return fmt.Errorf("failed to apply mce-operand from %s: %v", MCEOperandManifest, err) + } + } + } + + return nil +} + +func (h *HCHandler) IsMCEDeployed() bool { + log.Printf("Checking if MCE deployment is finished...") + mcePods := &corev1.PodList{} + err := h.Client.List(h.Ctx, mcePods, client.InNamespace(MCENamespace)) + if err != nil { + return false + } + + if len(mcePods.Items) == 0 { + return false + } + + for _, pod := range mcePods.Items { + if pod.Status.Phase != corev1.PodRunning { + return false + } + } + + return true +} diff --git a/tests/e2e/lib/hcp/mce_test.go b/tests/e2e/lib/hcp/mce_test.go new file mode 100644 index 0000000000..358674a23e --- /dev/null +++ b/tests/e2e/lib/hcp/mce_test.go @@ -0,0 +1,167 @@ +package hcp + +import ( + "context" + "testing" + + "github.com/onsi/gomega" + operatorsv1 "github.com/operator-framework/api/pkg/operators/v1" + operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + oadpv1alpha1 "github.com/openshift/oadp-operator/api/v1alpha1" +) + +// createTestScheme creates a new scheme with all required types registered +func createTestScheme() *runtime.Scheme { + scheme := runtime.NewScheme() + corev1.AddToScheme(scheme) + oadpv1alpha1.AddToScheme(scheme) + operatorsv1.AddToScheme(scheme) + operatorsv1alpha1.AddToScheme(scheme) + return scheme +} + +func TestRemoveMCE(t *testing.T) { + // Register Gomega fail handler + gomega.RegisterTestingT(t) + g := gomega.NewGomegaWithT(t) + + tests := []struct { + Name string + MCE *unstructured.Unstructured + OperatorGroup *operatorsv1.OperatorGroup + Subscription *operatorsv1alpha1.Subscription + ExpectedResult bool + }{ + { + Name: "All resources exist", + MCE: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "MultiClusterEngine", + "apiVersion": mceGVR.GroupVersion().String(), + "metadata": map[string]interface{}{ + "name": MCEOperandName, + "namespace": MCENamespace, + }, + }, + }, + OperatorGroup: &operatorsv1.OperatorGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: MCEOperatorGroup, + Namespace: MCENamespace, + }, + }, + Subscription: &operatorsv1alpha1.Subscription{ + ObjectMeta: metav1.ObjectMeta{ + Name: MCEOperatorName, + Namespace: MCENamespace, + }, + }, + ExpectedResult: true, + }, + { + Name: "Only MCE exists", + MCE: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "MultiClusterEngine", + "apiVersion": mceGVR.GroupVersion().String(), + "metadata": map[string]interface{}{ + "name": MCEOperandName, + "namespace": MCENamespace, + }, + }, + }, + OperatorGroup: nil, + Subscription: nil, + ExpectedResult: true, + }, + { + Name: "Only OperatorGroup exists", + MCE: nil, + OperatorGroup: &operatorsv1.OperatorGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: MCEOperatorGroup, + Namespace: MCENamespace, + }, + }, + Subscription: nil, + ExpectedResult: true, + }, + { + Name: "Only Subscription exists", + MCE: nil, + OperatorGroup: nil, + Subscription: &operatorsv1alpha1.Subscription{ + ObjectMeta: metav1.ObjectMeta{ + Name: MCEOperatorName, + Namespace: MCENamespace, + }, + }, + ExpectedResult: true, + }, + { + Name: "No resources exist", + MCE: nil, + OperatorGroup: nil, + Subscription: nil, + ExpectedResult: true, + }, + } + + for _, tt := range tests { + t.Run(tt.Name, func(t *testing.T) { + // Create a new scheme and client for each test case + scheme := createTestScheme() + client := fake.NewClientBuilder().WithScheme(scheme).Build() + + // Create the handler + h := &HCHandler{ + Ctx: context.Background(), + Client: client, + } + + // Create resources if they exist in the test case + if tt.MCE != nil { + // Set the GVK for the MCE using the constant + tt.MCE.SetGroupVersionKind(mceGVR.GroupVersion().WithKind("MultiClusterEngine")) + err := client.Create(context.Background(), tt.MCE) + g.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if tt.OperatorGroup != nil { + err := client.Create(context.Background(), tt.OperatorGroup) + g.Expect(err).NotTo(gomega.HaveOccurred()) + } + + if tt.Subscription != nil { + err := client.Create(context.Background(), tt.Subscription) + g.Expect(err).NotTo(gomega.HaveOccurred()) + } + + // Call RemoveMCE + err := h.RemoveMCE() + g.Expect(err).NotTo(gomega.HaveOccurred()) + + // Verify resources are deleted + mce := &unstructured.Unstructured{} + mce.SetGroupVersionKind(mceGVR.GroupVersion().WithKind("MultiClusterEngine")) + err = client.Get(context.Background(), types.NamespacedName{Name: MCEOperandName, Namespace: MCENamespace}, mce) + g.Expect(apierrors.IsNotFound(err)).To(gomega.BeTrue()) + + og := &operatorsv1.OperatorGroup{} + err = client.Get(context.Background(), types.NamespacedName{Name: MCEOperatorGroup, Namespace: MCENamespace}, og) + g.Expect(apierrors.IsNotFound(err)).To(gomega.BeTrue()) + + sub := &operatorsv1alpha1.Subscription{} + err = client.Get(context.Background(), types.NamespacedName{Name: MCEOperatorName, Namespace: MCENamespace}, sub) + g.Expect(apierrors.IsNotFound(err)).To(gomega.BeTrue()) + }) + } +} diff --git a/tests/e2e/lib/hcp/types.go b/tests/e2e/lib/hcp/types.go new file mode 100644 index 0000000000..2d7caab4f1 --- /dev/null +++ b/tests/e2e/lib/hcp/types.go @@ -0,0 +1,159 @@ +package hcp + +import ( + "context" + "path/filepath" + "time" + + hypershiftv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Constants +const ( + MCEName = "multicluster-engine" + MCENamespace = "multicluster-engine" + MCEOperatorName = "multicluster-engine-operator" + MCEOperatorGroup = "multicluster-engine-operatorgroup" + HONamespace = "hypershift" + HypershiftOperatorName = "operator" + OCPMarketplaceNamespace = "openshift-marketplace" + RHOperatorsNamespace = "redhat-operators" + MCEOperandName = "mce-operand" + + ClustersNamespace = "clusters" + HostedClusterPrefix = "test-hc" + SampleETCDEncryptionKey = "7o9RQL/BlcNrBWfNBVrJg55oKrDDaDu2kfoULl9MNIE=" + HCOCPTestImage = "quay.io/openshift-release-dev/ocp-release:4.18.6-multi" +) + +// Template paths +var ( + MCEOperandManifest = filepath.Join(getProjectRoot(), "tests/e2e/sample-applications/hostedcontrolplanes/mce/mce-operand.yaml") + HCPNoneManifest = filepath.Join(getProjectRoot(), "tests/e2e/sample-applications/hostedcontrolplanes/hypershift/hostedcluster-none.yaml") + HCPAgentManifest = filepath.Join(getProjectRoot(), "tests/e2e/sample-applications/hostedcontrolplanes/hypershift/hostedcluster-agent.yaml") + PullSecretManifest = filepath.Join(getProjectRoot(), "tests/e2e/sample-applications/hostedcontrolplanes/hypershift/hostedcluster-pull-secret.yaml") + EtcdEncryptionKeyManifest = filepath.Join(getProjectRoot(), "tests/e2e/sample-applications/hostedcontrolplanes/hypershift/hostedcluster-etcd-enc-key.yaml") + CapiProviderRoleManifest = filepath.Join(getProjectRoot(), "tests/e2e/sample-applications/hostedcontrolplanes/hypershift/hostedcluster-agent-capi-role.yaml") +) + +// Global variables +var ( + packageManifestGVR = schema.GroupVersionResource{ + Group: "packages.operators.coreos.com", + Version: "v1", + Resource: "packagemanifests", + } + + mceGVR = schema.GroupVersionResource{ + Group: "multicluster.openshift.io", + Version: "v1", + Resource: "multiclusterengines", + } + + capiAgentGVK = schema.GroupVersionKind{ + Group: "capi-provider.agent-install.openshift.io", + Version: "v1beta1", + Kind: "AgentCluster", + } + + awsClusterGVK = schema.GroupVersionKind{ + Group: "infrastructure.cluster.x-k8s.io", + Version: "v1beta2", + Kind: "AWSCluster", + } + + clusterGVK = schema.GroupVersionKind{ + Group: "cluster.x-k8s.io", + Version: "v1beta1", + Kind: "Cluster", + } + + RequiredWorkingOperators = []string{ + "cluster-api", + "control-plane-operator", + "kube-apiserver", + "kube-controller-manager", + "kube-scheduler", + "ignition-server", + "cluster-image-registry-operator", + "cluster-network-operator", + "cluster-node-tuning-operator", + "cluster-policy-controller", + "cluster-storage-operator", + "cluster-version-operator", + "control-plane-pki-operator", + "dns-operator", + "hosted-cluster-config-operator", + "ignition-server-proxy", + "konnectivity-agent", + "machine-approver", + "oauth-openshift", + "openshift-apiserver", + "openshift-controller-manager", + "openshift-oauth-apiserver", + "openshift-route-controller-manager", + } + + HCPIncludedNamespaces = []string{ + ClustersNamespace, + } + + HCPIncludedResources = []string{ + "sa", + "role", + "rolebinding", + "pod", + "pvc", + "pv", + "configmap", + "priorityclasses", + "pdb", + "hostedcluster", + "nodepool", + "secrets", + "services", + "deployments", + "statefulsets", + "hostedcontrolplane", + "cluster", + "awscluster", + "awsmachinetemplate", + "awsmachine", + "machinedeployment", + "machineset", + "machine", + "route", + "clusterdeployment", + } + + HCPExcludedResources = []string{} + + HCPErrorIgnorePatterns = []string{ + "-error-template", + } + + // Timeout constants + Wait10Min = 10 * time.Minute + WaitForNextCheckTimeout = 10 * time.Second + ValidateHCPTimeout = 25 * time.Minute + HCPBackupTimeout = 30 * time.Minute +) + +// HCHandler handles operations related to HostedClusters +type HCHandler struct { + Ctx context.Context + Client client.Client + HCOCPTestImage string + HCPNamespace string + HostedCluster *hypershiftv1.HostedCluster +} + +type RequiredOperator struct { + Name string + Namespace string + Channel string + Csv string + OperatorGroup string +} diff --git a/tests/e2e/lib/hcp/utils.go b/tests/e2e/lib/hcp/utils.go new file mode 100644 index 0000000000..342e13186f --- /dev/null +++ b/tests/e2e/lib/hcp/utils.go @@ -0,0 +1,281 @@ +package hcp + +import ( + "bytes" + "context" + "fmt" + "log" + "os" + "path/filepath" + "runtime" + "strings" + "text/template" + "time" + + operatorsv1 "github.com/operator-framework/api/pkg/operators/v1" + operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/serializer/yaml" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// InstallRequiredOperators installs the required operators and returns a new HCHandler +func InstallRequiredOperators(ctx context.Context, c client.Client, reqOperators []RequiredOperator) (*HCHandler, error) { + for _, op := range reqOperators { + log.Printf("Installing operator %s", op.Name) + err := op.InstallOperator(ctx, c) + if err != nil { + return nil, fmt.Errorf("failed to install operator %s: %v", op.Name, err) + } + } + + return &HCHandler{ + Ctx: ctx, + Client: c, + HCOCPTestImage: HCOCPTestImage, + }, nil +} + +// InstallOperator installs a specific operator +func (op *RequiredOperator) InstallOperator(ctx context.Context, c client.Client) error { + log.Printf("Getting PackageManifest for operator %s", op.Name) + + // Create an unstructured object for the PackageManifest + pkg := &unstructured.Unstructured{} + pkg.SetGroupVersionKind(packageManifestGVR.GroupVersion().WithKind("PackageManifest")) + pkg.SetName(op.Name) + pkg.SetNamespace(op.Namespace) + + err := c.Get(ctx, types.NamespacedName{Name: op.Name, Namespace: op.Namespace}, pkg) + if err != nil { + return fmt.Errorf("failed to get PackageManifest for operator %s: %v", op.Name, err) + } + + log.Printf("Checking namespace for operator %s", op.Name) + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: op.Namespace, + }, + } + + tempNS := &corev1.Namespace{} + if err := c.Get(ctx, types.NamespacedName{Name: op.Namespace}, tempNS); err != nil { + if apierrors.IsNotFound(err) { + log.Printf("Creating namespace for operator %s", op.Name) + // Create the namespace if it doesn't exist + err = c.Create(ctx, ns) + if err != nil { + return fmt.Errorf("failed to create namespace %s: %v", op.Namespace, err) + } + } else { + return fmt.Errorf("failed to get namespace %s: %v", op.Namespace, err) + } + } + + opGroup := &operatorsv1.OperatorGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: op.OperatorGroup, + Namespace: op.Namespace, + }, + Spec: operatorsv1.OperatorGroupSpec{ + TargetNamespaces: []string{op.Namespace}, + }, + } + + log.Printf("Checking operator group for operator %s", op.Name) + tempOpGroup := &operatorsv1.OperatorGroup{} + if err := c.Get(ctx, types.NamespacedName{Name: op.OperatorGroup, Namespace: op.Namespace}, tempOpGroup); err != nil { + if apierrors.IsNotFound(err) { + log.Printf("Creating operator group for operator %s", op.Name) + // Create the operator group + err = c.Create(ctx, opGroup) + if err != nil { + return fmt.Errorf("failed to create operator group %s: %v", op.OperatorGroup, err) + } + } else { + return fmt.Errorf("failed to get operator group %s: %v", op.OperatorGroup, err) + } + } + + // Create the subscription + subscription := &operatorsv1alpha1.Subscription{ + ObjectMeta: metav1.ObjectMeta{ + Name: op.Name, + Namespace: op.Namespace, + }, + Spec: &operatorsv1alpha1.SubscriptionSpec{ + CatalogSource: RHOperatorsNamespace, + CatalogSourceNamespace: OCPMarketplaceNamespace, + Package: op.Name, + InstallPlanApproval: operatorsv1alpha1.ApprovalAutomatic, + }, + } + + // If a channel is specified, use it + if op.Channel != "" { + subscription.Spec.Channel = op.Channel + } else { + // Get the default channel from the PackageManifest + defaultChannel, ok, err := unstructured.NestedString(pkg.UnstructuredContent(), "status", "defaultChannel") + if err != nil { + return fmt.Errorf("failed to get default channel from PackageManifest: %v", err) + } + if !ok || defaultChannel == "" { + return fmt.Errorf("no default channel found in PackageManifest for operator %s", op.Name) + } + subscription.Spec.Channel = defaultChannel + } + + // If a CSV is specified, use it + if op.Csv != "" { + subscription.Spec.StartingCSV = op.Csv + } + + log.Printf("Checking subscription for operator %s", op.Name) + tempSub := &operatorsv1alpha1.Subscription{} + if err := c.Get(ctx, types.NamespacedName{Name: op.Name, Namespace: op.Namespace}, tempSub); err != nil { + if apierrors.IsNotFound(err) { + log.Printf("Creating subscription for operator %s", op.Name) + err = c.Create(ctx, subscription) + if err != nil { + return fmt.Errorf("failed to create subscription for operator %s: %v", op.Name, err) + } + } else { + return fmt.Errorf("failed to get subscription for operator %s: %v", op.Name, err) + } + } + + return nil +} + +// WaitForUnstructuredObject waits for an unstructured object to be deleted +func WaitForUnstructuredObject(ctx context.Context, c client.Client, obj *unstructured.Unstructured, timeout time.Duration) error { + return wait.PollUntilContextTimeout(ctx, WaitForNextCheckTimeout, timeout, true, func(ctx context.Context) (bool, error) { + log.Printf("\tWaiting for object %s in namespace %s to be deleted...", obj.GetName(), obj.GetNamespace()) + newObj := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": obj.GetKind(), + "apiVersion": obj.GetAPIVersion(), + "metadata": map[string]interface{}{ + "name": obj.GetName(), + "namespace": obj.GetNamespace(), + }, + }, + } + err := c.Get(ctx, types.NamespacedName{Name: obj.GetName(), Namespace: obj.GetNamespace()}, newObj) + log.Printf("\tObject %s exists in namespace %s: %v", obj.GetName(), obj.GetNamespace(), err) + return apierrors.IsNotFound(err), nil + }) +} + +// ApplyYAMLTemplate reads a YAML template file, renders it with the given data, and applies it using the client +func ApplyYAMLTemplate(ctx context.Context, c client.Client, manifestPath string, override bool, data interface{}) error { + // Read the manifest + log.Printf("\tReading YAML template %s", filepath.Base(manifestPath)) + manifest, err := os.ReadFile(manifestPath) + if err != nil { + return fmt.Errorf("failed to read manifest from %s: %v", manifestPath, err) + } + + // Parse the manifest + log.Printf("\tParsing manifest %s", filepath.Base(manifestPath)) + tmpl, err := template.New("manifest").Parse(string(manifest)) + if err != nil { + return fmt.Errorf("failed to parse manifest from %s: %v", manifestPath, err) + } + + // Execute the manifest + log.Printf("\tExecuting manifest %s", filepath.Base(manifestPath)) + var buf bytes.Buffer + if err := tmpl.Execute(&buf, data); err != nil { + return fmt.Errorf("failed to execute manifest from %s: %v", manifestPath, err) + } + + // Create a decoder for YAML + decoder := yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme) + + // Decode the YAML into an unstructured object + log.Printf("\tDecoding YAML %s", filepath.Base(manifestPath)) + obj := &unstructured.Unstructured{} + _, _, err = decoder.Decode(buf.Bytes(), nil, obj) + if err != nil { + return fmt.Errorf("failed to decode YAML from %s: %v", manifestPath, err) + } + + // Apply the object using the client + log.Printf("\tApplying object %s", filepath.Base(manifestPath)) + err = c.Create(ctx, obj) + if err != nil { + if override && apierrors.IsAlreadyExists(err) { + log.Printf("\tObject already exists, overriding...") + err = c.Update(ctx, obj) + if err != nil { + return fmt.Errorf("failed to update object from %s: %v", manifestPath, err) + } + } else { + return fmt.Errorf("failed to create object from %s: %v", manifestPath, err) + } + } + + log.Printf("\tObject applied successfully") + + return nil +} + +// getPullSecret gets the pull secret from the openshift-config namespace +func getPullSecret(ctx context.Context, c client.Client) (string, error) { + secret := &corev1.Secret{} + err := c.Get(ctx, types.NamespacedName{Name: "pull-secret", Namespace: "openshift-config"}, secret) + if err != nil { + return "", fmt.Errorf("failed to get pull secret: %v", err) + } + if secret.Data == nil || len(secret.Data) == 0 { + return "", fmt.Errorf("pull secret data is empty") + } + dockerConfig, ok := secret.Data[".dockerconfigjson"] + if !ok { + return "", fmt.Errorf("pull secret does not contain .dockerconfigjson key") + } + return string(dockerConfig), nil +} + +// FilterErrorLogs filters out error logs based on predefined patterns +func FilterErrorLogs(logs []string) []string { + filteredLogs := []string{} + for _, logEntry := range logs { + shouldInclude := true + for _, pattern := range HCPErrorIgnorePatterns { + if strings.Contains(logEntry, pattern) { + shouldInclude = false + break + } + } + if shouldInclude { + filteredLogs = append(filteredLogs, logEntry) + } + } + return filteredLogs +} + +// deleteResource deletes a Kubernetes resource with grace period 0 +func (h *HCHandler) deleteResource(obj client.Object) error { + deleteOptions := &client.DeleteOptions{ + GracePeriodSeconds: ptr.To(int64(0)), + } + return h.Client.Delete(h.Ctx, obj, deleteOptions) +} + +// getProjectRoot returns the absolute path to the project root +func getProjectRoot() string { + _, filename, _, ok := runtime.Caller(0) + if !ok { + return "" + } + return filepath.Dir(filepath.Dir(filepath.Dir(filepath.Dir(filepath.Dir(filename))))) +} diff --git a/tests/e2e/lib/hcp/utils_test.go b/tests/e2e/lib/hcp/utils_test.go new file mode 100644 index 0000000000..0df821329c --- /dev/null +++ b/tests/e2e/lib/hcp/utils_test.go @@ -0,0 +1,578 @@ +package hcp + +import ( + "context" + "fmt" + "os" + "path/filepath" + "testing" + "time" + + "github.com/onsi/gomega" + operatorsv1 "github.com/operator-framework/api/pkg/operators/v1" + operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestFilterErrorLogs(t *testing.T) { + g := gomega.NewGomegaWithT(t) + + tests := []struct { + name string + logs []string + expectedResult []string + }{ + { + name: "No error logs to filter", + logs: []string{ + "Normal log 1", + "Normal log 2", + "Normal log 3", + }, + expectedResult: []string{ + "Normal log 1", + "Normal log 2", + "Normal log 3", + }, + }, + { + name: "Filter error logs", + logs: []string{ + "Normal log 1", + "Error log with -error-template", + "Normal log 2", + "Another error with -error-template", + "Normal log 3", + }, + expectedResult: []string{ + "Normal log 1", + "Normal log 2", + "Normal log 3", + }, + }, + { + name: "Empty logs", + logs: []string{}, + expectedResult: []string{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := FilterErrorLogs(tt.logs) + g.Expect(result).To(gomega.Equal(tt.expectedResult)) + }) + } +} + +func TestApplyYAMLTemplate(t *testing.T) { + // Register Gomega fail handler + gomega.RegisterTestingT(t) + g := gomega.NewGomegaWithT(t) + manifestPath := "../../sample-applications/hostedcontrolplanes/hypershift/hostedcluster-etcd-enc-key.yaml" + hostedClusterName := "test-hc" + + tests := []struct { + name string + manifestPath string + data map[string]interface{} + override bool + expectedError bool + errorContains string + verifyResource func(*testing.T, client.Client) + }{ + { + name: "Valid etcd encryption key manifest", + manifestPath: manifestPath, + data: map[string]interface{}{ + "HostedClusterName": hostedClusterName, + "ClustersNamespace": ClustersNamespace, + "EtcdEncryptionKey": SampleETCDEncryptionKey, + }, + override: false, + expectedError: false, + verifyResource: func(t *testing.T, client client.Client) { + secret := &corev1.Secret{} + err := client.Get(context.Background(), types.NamespacedName{ + Name: fmt.Sprintf("%s-etcd-encryption-key", hostedClusterName), + Namespace: ClustersNamespace, + }, secret) + g.Expect(err).ToNot(gomega.HaveOccurred()) + g.Expect(secret.Data).To(gomega.HaveKey("key")) + }, + }, + { + name: "Missing template variable", + manifestPath: manifestPath, + data: map[string]interface{}{ + "HostedClusterName": hostedClusterName, + // Missing ClustersNamespace and EtcdEncryptionKey + }, + override: false, + expectedError: true, + }, + { + name: "Override existing resource", + manifestPath: manifestPath, + data: map[string]interface{}{ + "HostedClusterName": hostedClusterName, + "ClustersNamespace": ClustersNamespace, + "EtcdEncryptionKey": SampleETCDEncryptionKey, + }, + override: true, + expectedError: false, + verifyResource: func(t *testing.T, client client.Client) { + secret := &corev1.Secret{} + err := client.Get(context.Background(), types.NamespacedName{ + Name: fmt.Sprintf("%s-etcd-encryption-key", hostedClusterName), + Namespace: ClustersNamespace, + }, secret) + g.Expect(err).ToNot(gomega.HaveOccurred()) + g.Expect(secret.Data).To(gomega.HaveKey("key")) + }, + }, + { + name: "Non-existent manifest file", + manifestPath: "non-existent-file.yaml", + data: map[string]interface{}{ + "HostedClusterName": hostedClusterName, + "ClustersNamespace": ClustersNamespace, + "EtcdEncryptionKey": SampleETCDEncryptionKey, + }, + override: false, + expectedError: true, + errorContains: "failed to read manifest", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create a new scheme and client for each test case + scheme := createTestScheme() + client := fake.NewClientBuilder().WithScheme(scheme).Build() + + // Test ApplyYAMLTemplate + err := ApplyYAMLTemplate(context.Background(), client, tt.manifestPath, tt.override, tt.data) + + // Check results + if tt.expectedError { + g.Expect(err).To(gomega.HaveOccurred()) + } else { + g.Expect(err).ToNot(gomega.HaveOccurred()) + if tt.verifyResource != nil { + tt.verifyResource(t, client) + } + } + }) + } +} + +func TestGetPullSecret(t *testing.T) { + // Register Gomega fail handler + gomega.RegisterTestingT(t) + g := gomega.NewGomegaWithT(t) + + tests := []struct { + name string + pullSecret *corev1.Secret + expectedSecret string + expectError bool + }{ + { + name: "Valid pull secret", + pullSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pull-secret", + Namespace: "openshift-config", + }, + Data: map[string][]byte{ + ".dockerconfigjson": []byte("test-secret-data"), + }, + }, + expectedSecret: "test-secret-data", + expectError: false, + }, + { + name: "Pull secret without dockerconfigjson", + pullSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pull-secret", + Namespace: "openshift-config", + }, + Data: map[string][]byte{ + "other-key": []byte("other-data"), + }, + }, + expectedSecret: "", + expectError: true, + }, + { + name: "No pull secret exists", + pullSecret: nil, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create a new scheme and client for each test case + scheme := createTestScheme() + client := fake.NewClientBuilder().WithScheme(scheme).Build() + + // Create pull secret if provided + if tt.pullSecret != nil { + err := client.Create(context.Background(), tt.pullSecret) + g.Expect(err).ToNot(gomega.HaveOccurred()) + } + + // Test getPullSecret + secret, err := getPullSecret(context.Background(), client) + + // Check results + if tt.expectError { + g.Expect(err).To(gomega.HaveOccurred()) + } else { + g.Expect(err).ToNot(gomega.HaveOccurred()) + g.Expect(secret).To(gomega.Equal(tt.expectedSecret)) + } + }) + } +} + +func TestInstallRequiredOperators(t *testing.T) { + // Register Gomega fail handler + gomega.RegisterTestingT(t) + g := gomega.NewGomegaWithT(t) + + tests := []struct { + name string + packageManifest *unstructured.Unstructured + reqOperators []RequiredOperator + expectedError bool + errorContains string + verifyHandler func(*testing.T, *HCHandler, client.Client) + }{ + { + name: "Successfully install MCE operator", + packageManifest: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "status": map[string]interface{}{ + "defaultChannel": "stable-2.0", + }, + }, + }, + reqOperators: []RequiredOperator{ + { + Name: MCEName, + Namespace: MCENamespace, + OperatorGroup: MCEOperatorGroup, + }, + }, + expectedError: false, + verifyHandler: func(t *testing.T, h *HCHandler, c client.Client) { + g.Expect(h).ToNot(gomega.BeNil()) + g.Expect(h.HCOCPTestImage).To(gomega.Equal(HCOCPTestImage)) + + // Verify namespace was created + ns := &corev1.Namespace{} + err := c.Get(context.Background(), types.NamespacedName{Name: MCENamespace}, ns) + g.Expect(err).ToNot(gomega.HaveOccurred()) + + // Verify operator group was created + og := &operatorsv1.OperatorGroup{} + err = c.Get(context.Background(), types.NamespacedName{Name: MCEOperatorGroup, Namespace: MCENamespace}, og) + g.Expect(err).ToNot(gomega.HaveOccurred()) + g.Expect(og.Spec.TargetNamespaces).To(gomega.Equal([]string{MCENamespace})) + + // Verify subscription was created + sub := &operatorsv1alpha1.Subscription{} + err = c.Get(context.Background(), types.NamespacedName{Name: MCEName, Namespace: MCENamespace}, sub) + g.Expect(err).ToNot(gomega.HaveOccurred()) + g.Expect(sub.Spec.Channel).To(gomega.Equal("stable-2.0")) + g.Expect(sub.Spec.CatalogSource).To(gomega.Equal(RHOperatorsNamespace)) + g.Expect(sub.Spec.CatalogSourceNamespace).To(gomega.Equal(OCPMarketplaceNamespace)) + g.Expect(sub.Spec.Package).To(gomega.Equal(MCEName)) + g.Expect(sub.Spec.InstallPlanApproval).To(gomega.Equal(operatorsv1alpha1.ApprovalAutomatic)) + }, + }, + { + name: "Successfully install operator with specific channel", + packageManifest: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "status": map[string]interface{}{ + "defaultChannel": "stable-2.0", + }, + }, + }, + reqOperators: []RequiredOperator{ + { + Name: MCEName, + Namespace: MCENamespace, + OperatorGroup: MCEOperatorGroup, + Channel: "stable-2.1", + }, + }, + expectedError: false, + verifyHandler: func(t *testing.T, h *HCHandler, c client.Client) { + g.Expect(h).ToNot(gomega.BeNil()) + + // Verify subscription was created with correct channel + sub := &operatorsv1alpha1.Subscription{} + err := c.Get(context.Background(), types.NamespacedName{Name: MCEName, Namespace: MCENamespace}, sub) + g.Expect(err).ToNot(gomega.HaveOccurred()) + g.Expect(sub.Spec.Channel).To(gomega.Equal("stable-2.1")) + }, + }, + { + name: "Successfully install operator with specific CSV", + packageManifest: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "status": map[string]interface{}{ + "defaultChannel": "stable-2.0", + }, + }, + }, + reqOperators: []RequiredOperator{ + { + Name: MCEName, + Namespace: MCENamespace, + OperatorGroup: MCEOperatorGroup, + Csv: "multicluster-engine.v2.1.0", + }, + }, + expectedError: false, + verifyHandler: func(t *testing.T, h *HCHandler, c client.Client) { + g.Expect(h).ToNot(gomega.BeNil()) + + // Verify subscription was created with correct CSV + sub := &operatorsv1alpha1.Subscription{} + err := c.Get(context.Background(), types.NamespacedName{Name: MCEName, Namespace: MCENamespace}, sub) + g.Expect(err).ToNot(gomega.HaveOccurred()) + g.Expect(sub.Spec.StartingCSV).To(gomega.Equal("multicluster-engine.v2.1.0")) + }, + }, + { + name: "Fail to install operator with missing package manifest", + packageManifest: nil, + reqOperators: []RequiredOperator{ + { + Name: MCEName, + Namespace: MCENamespace, + OperatorGroup: MCEOperatorGroup, + }, + }, + expectedError: true, + errorContains: "failed to get PackageManifest", + }, + { + name: "Fail to install operator with missing default channel", + packageManifest: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "status": map[string]interface{}{}, + }, + }, + reqOperators: []RequiredOperator{ + { + Name: MCEName, + Namespace: MCENamespace, + OperatorGroup: MCEOperatorGroup, + }, + }, + expectedError: true, + errorContains: "no default channel found", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create a new scheme and client for each test case + scheme := createTestScheme() + client := fake.NewClientBuilder().WithScheme(scheme).Build() + + // Create package manifest if provided + if tt.packageManifest != nil { + tt.packageManifest.SetGroupVersionKind(packageManifestGVR.GroupVersion().WithKind("PackageManifest")) + tt.packageManifest.SetName(MCEName) + tt.packageManifest.SetNamespace(MCENamespace) + err := client.Create(context.Background(), tt.packageManifest) + g.Expect(err).ToNot(gomega.HaveOccurred()) + } + + // Test InstallRequiredOperators + h, err := InstallRequiredOperators(context.Background(), client, tt.reqOperators) + + if tt.expectedError { + g.Expect(err).To(gomega.HaveOccurred()) + if tt.errorContains != "" { + g.Expect(err.Error()).To(gomega.ContainSubstring(tt.errorContains)) + } + } else { + g.Expect(err).ToNot(gomega.HaveOccurred()) + if tt.verifyHandler != nil { + tt.verifyHandler(t, h, client) + } + } + }) + } +} + +func TestWaitForUnstructuredObject(t *testing.T) { + // Register Gomega fail handler + gomega.RegisterTestingT(t) + g := gomega.NewGomegaWithT(t) + + tests := []struct { + name string + obj *unstructured.Unstructured + createObj bool + deleteObj bool + timeout time.Duration + expectedError bool + errorContains string + }{ + { + name: "Object already deleted", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "Test", + "apiVersion": "test/v1", + "metadata": map[string]interface{}{ + "name": "test", + "namespace": "test", + }, + }, + }, + createObj: false, + deleteObj: false, + timeout: time.Minute, + expectedError: false, + }, + { + name: "Object deleted during wait", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "Test", + "apiVersion": "test/v1", + "metadata": map[string]interface{}{ + "name": "test", + "namespace": "test", + }, + }, + }, + createObj: true, + deleteObj: true, + timeout: time.Minute, + expectedError: false, + }, + { + name: "Object not deleted within timeout", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "Test", + "apiVersion": "test/v1", + "metadata": map[string]interface{}{ + "name": "test", + "namespace": "test", + }, + }, + }, + createObj: true, + deleteObj: false, + timeout: time.Second * 2, + expectedError: true, + errorContains: "context deadline exceeded", + }, + { + name: "Object with finalizers", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "Test", + "apiVersion": "test/v1", + "metadata": map[string]interface{}{ + "name": "test", + "namespace": "test", + "finalizers": []interface{}{"test-finalizer"}, + }, + }, + }, + createObj: true, + deleteObj: true, + timeout: time.Second * 2, + expectedError: true, + errorContains: "context deadline exceeded", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create a new scheme and client for each test case + scheme := createTestScheme() + client := fake.NewClientBuilder().WithScheme(scheme).Build() + + // Create the object if needed + if tt.createObj { + err := client.Create(context.Background(), tt.obj) + g.Expect(err).ToNot(gomega.HaveOccurred()) + } + + // Start deletion in background if needed + if tt.deleteObj { + go func() { + time.Sleep(100 * time.Millisecond) // Give time for the test to start + err := client.Delete(context.Background(), tt.obj) + g.Expect(err).ToNot(gomega.HaveOccurred()) + }() + } + + // Test WaitForUnstructuredObject + err := WaitForUnstructuredObject(context.Background(), client, tt.obj, tt.timeout) + + // Check results + if tt.expectedError { + g.Expect(err).To(gomega.HaveOccurred()) + if tt.errorContains != "" { + g.Expect(err.Error()).To(gomega.ContainSubstring(tt.errorContains)) + } + } else { + g.Expect(err).ToNot(gomega.HaveOccurred()) + } + }) + } +} + +func TestGetProjectRoot(t *testing.T) { + g := gomega.NewGomegaWithT(t) + + // Get the project root using the function + root := getProjectRoot() + + // Verify that the path exists + _, err := os.Stat(root) + g.Expect(err).ToNot(gomega.HaveOccurred(), "project root path should exist") + + // Verify that the path contains expected directories + expectedDirs := []string{ + "tests", + "go.mod", + "go.sum", + } + + for _, dir := range expectedDirs { + path := filepath.Join(root, dir) + _, err := os.Stat(path) + g.Expect(err).ToNot(gomega.HaveOccurred(), "expected directory/file %s should exist in project root", dir) + } + + // Verify that the path is absolute + g.Expect(filepath.IsAbs(root)).To(gomega.BeTrue(), "project root path should be absolute") + + // Verify that the path points to the correct directory by checking for a known file + knownFile := filepath.Join(root, "tests", "e2e", "lib", "hcp", "utils.go") + _, err = os.Stat(knownFile) + g.Expect(err).ToNot(gomega.HaveOccurred(), "should be able to find utils.go in the expected location") +} diff --git a/tests/e2e/lib/kube_helpers.go b/tests/e2e/lib/k8s_common_helpers.go similarity index 50% rename from tests/e2e/lib/kube_helpers.go rename to tests/e2e/lib/k8s_common_helpers.go index 51cbe9fdce..4614f581bb 100755 --- a/tests/e2e/lib/kube_helpers.go +++ b/tests/e2e/lib/k8s_common_helpers.go @@ -7,14 +7,26 @@ import ( "io" "log" "os" + "strings" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/remotecommand" ) +type ProxyPodParameters struct { + KubeClient *kubernetes.Clientset + KubeConfig *rest.Config + Namespace string + PodName string + ContainerName string +} + func CreateNamespace(clientset *kubernetes.Clientset, namespace string) error { ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} _, err := clientset.CoreV1().Namespaces().Create(context.Background(), &ns, metav1.CreateOptions{}) @@ -80,25 +92,141 @@ func DeleteSecret(clientset *kubernetes.Clientset, namespace string, credSecretR return err } +// ExecuteCommandInPodsSh executes a command in a Kubernetes pod using the provided parameters. +// +// Parameters: +// - params: ProxyPodParameters - Parameters specifying Connection to the Kubernetes, the pod, namespace, and container details. +// - command: string - The command to be executed in the specified pod. +// +// Returns: +// - string: Standard output of the executed command. +// - string: Standard error output of the executed command. +// - error: An error, if any, that occurred during the execution of the command. +// +// The function logs relevant information, such as the provided command, the pod name, container name, +// and the full command URL before initiating the command execution. It streams the command's standard +// output and error output, logging them if available. In case of errors, it returns an error message +// with details about the issue. +func ExecuteCommandInPodsSh(params ProxyPodParameters, command string) (string, string, error) { + + var containerName string + + kubeClient := params.KubeClient + kubeConfig := params.KubeConfig + + if command == "" { + return "", "", fmt.Errorf("No command specified") + } + + if kubeClient == nil { + return "", "", fmt.Errorf("No valid kubernetes.Clientset provided") + } + + if kubeConfig == nil { + return "", "", fmt.Errorf("No valid rest.Config provided") + } + + if params.PodName == "" { + return "", "", fmt.Errorf("No proxy pod specified for the command: %s", command) + } + + if params.Namespace == "" { + return "", "", fmt.Errorf("No proxy pod namespace specified for the command: %s", command) + } + + if params.ContainerName != "" { + containerName = params.ContainerName + } else { + containerName = "curl-tool" + } + + log.Printf("Provided command: %s\n", command) + log.Printf("Command will run in the pod: %s, container: %s in the namespace: %s\n", params.PodName, containerName, params.Namespace) + + option := &corev1.PodExecOptions{ + Command: strings.Split(command, " "), + Stdin: false, + Stdout: true, + Stderr: true, + TTY: true, + Container: containerName, + } + + postRequest := kubeClient.CoreV1().RESTClient().Post(). + Resource("pods"). + Name(params.PodName). + Namespace(params.Namespace). + SubResource("exec"). + VersionedParams(option, scheme.ParameterCodec) + + log.Printf("Full command URL: %s\n", postRequest.URL()) + + executor, err := remotecommand.NewSPDYExecutor(kubeConfig, "POST", postRequest.URL()) + if err != nil { + return "", "", err + } + + stdOutput := &bytes.Buffer{} + stdErrOutput := &bytes.Buffer{} + + err = executor.Stream(remotecommand.StreamOptions{Stdout: stdOutput, Stderr: stdErrOutput}) + + if stdOutput.Len() > 0 { + log.Printf("stdOutput: %s\n", stdOutput.String()) + } + + if stdErrOutput.Len() > 0 { + log.Printf("stdErrOutput: %s\n", stdErrOutput.String()) + } + + if err != nil { + log.Printf("Error while streaming command output: %v\n", err) + return stdOutput.String(), stdErrOutput.String(), fmt.Errorf("Error while streaming command output: %v", err) + } + + return stdOutput.String(), stdErrOutput.String(), nil +} + +// GetFirstPodByLabel retrieves a first found pod in the specified namespace based on the given label selector. +// It uses the provided Kubernetes client to interact with the Kubernetes API. +// +// Parameters: +// - clientset: A pointer to the Kubernetes client (*kubernetes.Clientset). +// - namespace: The namespace in which to search for the pod. +// - labelSelector: The label selector to filter pods. +// +// Returns: +// - (*corev1.Pod, error): A pointer to the first pod matching the label selector, or an error if any. +func GetFirstPodByLabel(clientset *kubernetes.Clientset, namespace string, labelSelector string) (*corev1.Pod, error) { + podList, err := GetAllPodsWithLabel(clientset, namespace, labelSelector) + if err != nil { + return nil, err + } + + return &podList.Items[0], nil +} + func SavePodLogs(clientset *kubernetes.Clientset, namespace, dir string) error { podList, err := clientset.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{}) if err != nil { - return nil + return err } for _, pod := range podList.Items { podDir := fmt.Sprintf("%s/%s/%s", dir, namespace, pod.Name) err = os.MkdirAll(podDir, 0755) if err != nil { log.Printf("Error creating pod directory: %v", err) + return err } for _, container := range pod.Spec.Containers { logs, err := GetPodContainerLogs(clientset, namespace, pod.Name, container.Name) if err != nil { - return nil + return err } err = os.WriteFile(podDir+"/"+container.Name+".log", []byte(logs), 0644) if err != nil { log.Printf("Error writing pod logs: %v", err) + return err } } } diff --git a/tests/e2e/lib/nodeagent_helpers.go b/tests/e2e/lib/nodeagent_helpers.go index e79b19f46f..e232727ff8 100755 --- a/tests/e2e/lib/nodeagent_helpers.go +++ b/tests/e2e/lib/nodeagent_helpers.go @@ -5,13 +5,14 @@ import ( "fmt" "log" - "github.com/openshift/oadp-operator/pkg/common" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" + + "github.com/openshift/oadp-operator/pkg/common" ) func GetNodeAgentDaemonSet(c *kubernetes.Clientset, namespace string) (*appsv1.DaemonSet, error) { @@ -54,6 +55,7 @@ func AreNodeAgentPodsRunning(c *kubernetes.Clientset, namespace string) wait.Con } } +// keep for now func IsNodeAgentDaemonSetDeleted(c *kubernetes.Clientset, namespace string) wait.ConditionFunc { log.Printf("Checking if NodeAgent DaemonSet has been deleted...") return func() (bool, error) { diff --git a/tests/e2e/lib/ocp_common_helpers.go b/tests/e2e/lib/ocp_common_helpers.go new file mode 100644 index 0000000000..b9883cd0d9 --- /dev/null +++ b/tests/e2e/lib/ocp_common_helpers.go @@ -0,0 +1,44 @@ +package lib + +import ( + "context" + "fmt" + "log" + + routev1 "github.com/openshift/api/route/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// getRouteEndpointURL retrieves and verifies the accessibility of a Kubernetes route HOST endpoint +// +// Parameters: +// - ocClient: An instance of the OpenShift client. +// - namespace: The Kubernetes namespace in which the service route is located. +// - routeName: The name of the Kubernetes route. +// +// Returns: +// - string: The full route endpoint URL if the service route is accessible. +// - error: An error message if the service route is not accessible, if the route is not found, or if there is an issue with the HTTP request. +func GetRouteEndpointURL(ocClient client.Client, namespace, routeName string) (string, error) { + log.Println("Verifying if the service is accessible via route") + route := &routev1.Route{} + err := ocClient.Get(context.Background(), client.ObjectKey{Namespace: namespace, Name: routeName}, route) + if err != nil { + if apierrors.IsNotFound(err) { + return "", fmt.Errorf("Service route not found: %v", err) + } + return "", err + } + // Construct the route endpoint + routeEndpoint := "http://" + route.Spec.Host + + // Check if the route is accessible + log.Printf("Verifying if the service is accessible via: %s", routeEndpoint) + resp, err := IsURLReachable(routeEndpoint) + if err != nil || resp == false { + return "", fmt.Errorf("Route endpoint not accessible: %v", err) + } + + return routeEndpoint, nil +} diff --git a/tests/e2e/lib/plugins_helpers.go b/tests/e2e/lib/plugins_helpers.go index 49835ed5df..5481f6f845 100755 --- a/tests/e2e/lib/plugins_helpers.go +++ b/tests/e2e/lib/plugins_helpers.go @@ -3,10 +3,11 @@ package lib import ( "fmt" - oadpv1alpha1 "github.com/openshift/oadp-operator/api/v1alpha1" - "github.com/openshift/oadp-operator/pkg/credentials" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" + + oadpv1alpha1 "github.com/openshift/oadp-operator/api/v1alpha1" + "github.com/openshift/oadp-operator/pkg/credentials" ) func DoesPluginExist(c *kubernetes.Clientset, namespace string, plugin oadpv1alpha1.DefaultPlugin) wait.ConditionFunc { diff --git a/tests/e2e/lib/restore.go b/tests/e2e/lib/restore.go index d4f4298d0b..b3a80bcaf9 100755 --- a/tests/e2e/lib/restore.go +++ b/tests/e2e/lib/restore.go @@ -10,7 +10,6 @@ import ( velero "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/cmd/util/downloadrequest" "github.com/vmware-tanzu/velero/pkg/cmd/util/output" - veleroClientset "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" "github.com/vmware-tanzu/velero/pkg/label" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" @@ -87,7 +86,7 @@ func IsRestoreCompletedSuccessfully(c *kubernetes.Clientset, ocClient client.Cli } // https://github.com/vmware-tanzu/velero/blob/11bfe82342c9f54c63f40d3e97313ce763b446f2/pkg/cmd/cli/restore/describe.go#L72-L78 -func DescribeRestore(veleroClient veleroClientset.Interface, ocClient client.Client, namespace string, name string) string { +func DescribeRestore(ocClient client.Client, namespace string, name string) string { restore, err := GetRestore(ocClient, namespace, name) if err != nil { return "could not get provided backup: " + err.Error() @@ -96,7 +95,8 @@ func DescribeRestore(veleroClient veleroClientset.Interface, ocClient client.Cli insecureSkipTLSVerify := true caCertFile := "" opts := metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", velero.RestoreNameLabel, label.GetValidName(restore.Name))} - podvolumeRestoreList, err := veleroClient.VeleroV1().PodVolumeRestores(restore.Namespace).List(context.Background(), opts) + podvolumeRestoreList := &velero.PodVolumeRestoreList{} + err = ocClient.List(context.Background(), podvolumeRestoreList, client.InNamespace(restore.Namespace), &client.ListOptions{Raw: &opts}) if err != nil { log.Printf("error getting PodVolumeRestores for restore %s: %v\n", restore.Name, err) } diff --git a/tests/e2e/lib/scheme.go b/tests/e2e/lib/scheme.go new file mode 100644 index 0000000000..ede8e8de6f --- /dev/null +++ b/tests/e2e/lib/scheme.go @@ -0,0 +1,41 @@ +package lib + +import ( + volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" + openshiftappsv1 "github.com/openshift/api/apps/v1" + openshiftbuildv1 "github.com/openshift/api/build/v1" + openshiftconfigv1 "github.com/openshift/api/config/v1" + openshiftroutev1 "github.com/openshift/api/route/v1" + openshiftsecurityv1 "github.com/openshift/api/security/v1" + openshifttemplatev1 "github.com/openshift/api/template/v1" + hypershiftv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + operatorsv1 "github.com/operator-framework/api/pkg/operators/v1" + operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" + velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apiruntime "k8s.io/apimachinery/pkg/runtime" + + oadpv1alpha1 "github.com/openshift/oadp-operator/api/v1alpha1" +) + +var ( + Scheme = apiruntime.NewScheme() +) + +func init() { + _ = oadpv1alpha1.AddToScheme(Scheme) + _ = velerov1.AddToScheme(Scheme) + _ = openshiftappsv1.AddToScheme(Scheme) + _ = openshiftbuildv1.AddToScheme(Scheme) + _ = openshiftsecurityv1.AddToScheme(Scheme) + _ = openshifttemplatev1.AddToScheme(Scheme) + _ = openshiftroutev1.AddToScheme(Scheme) + _ = corev1.AddToScheme(Scheme) + _ = volumesnapshotv1.AddToScheme(Scheme) + _ = operatorsv1alpha1.AddToScheme(Scheme) + _ = operatorsv1.AddToScheme(Scheme) + _ = hypershiftv1.AddToScheme(Scheme) + _ = appsv1.AddToScheme(Scheme) + _ = openshiftconfigv1.AddToScheme(Scheme) +} diff --git a/tests/e2e/lib/subscription_helpers.go b/tests/e2e/lib/subscription_helpers.go index 0e059385c9..593cd838a9 100644 --- a/tests/e2e/lib/subscription_helpers.go +++ b/tests/e2e/lib/subscription_helpers.go @@ -35,6 +35,9 @@ func getOperatorSubscription(c client.Client, namespace, label string) (*Subscri func (v *VirtOperator) getOperatorSubscription() (*Subscription, error) { label := "operators.coreos.com/kubevirt-hyperconverged.openshift-cnv" + if v.Upstream { + label = "operators.coreos.com/community-kubevirt-hyperconverged.kubevirt-hyperconverged" + } return getOperatorSubscription(v.Client, v.Namespace, label) } diff --git a/tests/e2e/lib/virt_helpers.go b/tests/e2e/lib/virt_helpers.go index d2e1a1844c..f21d6ff10e 100644 --- a/tests/e2e/lib/virt_helpers.go +++ b/tests/e2e/lib/virt_helpers.go @@ -10,11 +10,9 @@ import ( operatorsv1 "github.com/operator-framework/api/pkg/operators/v1" operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" - corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/version" @@ -28,7 +26,6 @@ const ( emulationAnnotation = "kubevirt.kubevirt.io/jsonpatch" useEmulation = `[{"op": "add", "path": "/spec/configuration/developerConfiguration", "value": {"useEmulation": true}}]` stopVmPath = "/apis/subresources.kubevirt.io/v1/namespaces/%s/virtualmachines/%s/stop" - isDefaultClass = "storageclass.kubernetes.io/is-default-class" ) var packageManifestsGvr = schema.GroupVersionResource{ @@ -62,25 +59,32 @@ type VirtOperator struct { Namespace string Csv string Version *version.Version + Upstream bool } // GetVirtOperator fills out a new VirtOperator -func GetVirtOperator(client client.Client, clientset *kubernetes.Clientset, dynamicClient dynamic.Interface) (*VirtOperator, error) { +func GetVirtOperator(c client.Client, clientset *kubernetes.Clientset, dynamicClient dynamic.Interface, upstream bool) (*VirtOperator, error) { namespace := "openshift-cnv" + manifest := "kubevirt-hyperconverged" + if upstream { + namespace = "kubevirt-hyperconverged" + manifest = "community-kubevirt-hyperconverged" + } - csv, version, err := getCsvFromPackageManifest(dynamicClient, "kubevirt-hyperconverged") + csv, operatorVersion, err := getCsvFromPackageManifest(dynamicClient, manifest) if err != nil { log.Printf("Failed to get CSV from package manifest") return nil, err } v := &VirtOperator{ - Client: client, + Client: c, Clientset: clientset, Dynamic: dynamicClient, Namespace: namespace, Csv: csv, - Version: version, + Version: operatorVersion, + Upstream: upstream, } return v, nil @@ -89,16 +93,23 @@ func GetVirtOperator(client client.Client, clientset *kubernetes.Clientset, dyna // Helper to create an operator group object, common to installOperatorGroup // and removeOperatorGroup. func (v *VirtOperator) makeOperatorGroup() *operatorsv1.OperatorGroup { + // Community operator fails with "cannot configure to watch own namespace", + // need to remove target namespaces. + spec := operatorsv1.OperatorGroupSpec{} + if !v.Upstream { + spec = operatorsv1.OperatorGroupSpec{ + TargetNamespaces: []string{ + v.Namespace, + }, + } + } + return &operatorsv1.OperatorGroup{ ObjectMeta: metav1.ObjectMeta{ Name: "kubevirt-hyperconverged-group", Namespace: v.Namespace, }, - Spec: operatorsv1.OperatorGroupSpec{ - TargetNamespaces: []string{ - v.Namespace, - }, - }, + Spec: spec, } } @@ -111,7 +122,7 @@ func (v *VirtOperator) makeOperatorGroup() *operatorsv1.OperatorGroup { // Version type, so it is easy to check against the current cluster version. func getCsvFromPackageManifest(dynamicClient dynamic.Interface, name string) (string, *version.Version, error) { log.Println("Getting packagemanifest...") - unstructuredManifest, err := dynamicClient.Resource(packageManifestsGvr).Namespace("default").Get(context.Background(), name, v1.GetOptions{}) + unstructuredManifest, err := dynamicClient.Resource(packageManifestsGvr).Namespace("default").Get(context.Background(), name, metav1.GetOptions{}) if err != nil { log.Printf("Error getting packagemanifest %s: %v", name, err) return "", nil, err @@ -168,18 +179,18 @@ func getCsvFromPackageManifest(dynamicClient dynamic.Interface, name string) (st } log.Printf("Current operator version is: %s", versionString) - version, err := version.ParseGeneric(versionString) + operatorVersion, err := version.ParseGeneric(versionString) if err != nil { return "", nil, err } - return csv, version, nil + return csv, operatorVersion, nil } // Checks the existence of the operator's target namespace -func (v *VirtOperator) checkNamespace() bool { +func (v *VirtOperator) checkNamespace(ns string) bool { // First check that the namespace exists - exists, _ := DoesNamespaceExist(v.Clientset, v.Namespace) + exists, _ := DoesNamespaceExist(v.Clientset, ns) return exists } @@ -217,7 +228,7 @@ func (v *VirtOperator) checkCsv() bool { // health status field is "healthy". Uses dynamic client to avoid uprooting lots // of package dependencies, which should probably be fixed later. func (v *VirtOperator) checkHco() bool { - unstructuredHco, err := v.Dynamic.Resource(hyperConvergedGvr).Namespace(v.Namespace).Get(context.Background(), "kubevirt-hyperconverged", v1.GetOptions{}) + unstructuredHco, err := v.Dynamic.Resource(hyperConvergedGvr).Namespace(v.Namespace).Get(context.Background(), "kubevirt-hyperconverged", metav1.GetOptions{}) if err != nil { log.Printf("Error getting HCO: %v", err) return false @@ -239,7 +250,7 @@ func (v *VirtOperator) checkHco() bool { // Check if KVM emulation is enabled. func (v *VirtOperator) checkEmulation() bool { - hco, err := v.Dynamic.Resource(hyperConvergedGvr).Namespace("openshift-cnv").Get(context.Background(), "kubevirt-hyperconverged", v1.GetOptions{}) + hco, err := v.Dynamic.Resource(hyperConvergedGvr).Namespace(v.Namespace).Get(context.Background(), "kubevirt-hyperconverged", metav1.GetOptions{}) if err != nil { return false } @@ -263,11 +274,12 @@ func (v *VirtOperator) checkEmulation() bool { return false } -// Creates the target virtualization namespace, likely openshift-cnv or kubevirt-hyperconverged -func (v *VirtOperator) installNamespace() error { - err := v.Client.Create(context.Background(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: v.Namespace}}) +// Creates the target namespace, likely openshift-cnv or kubevirt-hyperconverged, +// but also used for openshift-virtualization-os-images if not already present. +func (v *VirtOperator) installNamespace(ns string) error { + err := v.Client.Create(context.Background(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) if err != nil { - log.Printf("Failed to create namespace %s: %v", v.Namespace, err) + log.Printf("Failed to create namespace %s: %v", ns, err) return err } return nil @@ -288,19 +300,30 @@ func (v *VirtOperator) installOperatorGroup() error { // Creates the subscription, which triggers creation of the ClusterServiceVersion. func (v *VirtOperator) installSubscription() error { - subscription := &operatorsv1alpha1.Subscription{ - ObjectMeta: metav1.ObjectMeta{ - Name: "hco-operatorhub", - Namespace: v.Namespace, - }, - Spec: &operatorsv1alpha1.SubscriptionSpec{ - CatalogSource: "redhat-operators", + spec := &operatorsv1alpha1.SubscriptionSpec{ + CatalogSource: "redhat-operators", + CatalogSourceNamespace: "openshift-marketplace", + Package: "kubevirt-hyperconverged", + Channel: "stable", + StartingCSV: v.Csv, + InstallPlanApproval: operatorsv1alpha1.ApprovalAutomatic, + } + if v.Upstream { + spec = &operatorsv1alpha1.SubscriptionSpec{ + CatalogSource: "community-operators", CatalogSourceNamespace: "openshift-marketplace", - Package: "kubevirt-hyperconverged", + Package: "community-kubevirt-hyperconverged", Channel: "stable", StartingCSV: v.Csv, InstallPlanApproval: operatorsv1alpha1.ApprovalAutomatic, + } + } + subscription := &operatorsv1alpha1.Subscription{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hco-operatorhub", + Namespace: v.Namespace, }, + Spec: spec, } err := v.Client.Create(context.Background(), subscription) if err != nil { @@ -325,7 +348,7 @@ func (v *VirtOperator) installHco() error { "spec": map[string]interface{}{}, }, } - _, err := v.Dynamic.Resource(hyperConvergedGvr).Namespace(v.Namespace).Create(context.Background(), &unstructuredHco, v1.CreateOptions{}) + _, err := v.Dynamic.Resource(hyperConvergedGvr).Namespace(v.Namespace).Create(context.Background(), &unstructuredHco, metav1.CreateOptions{}) if err != nil { log.Printf("Error creating HCO: %v", err) return err @@ -335,7 +358,7 @@ func (v *VirtOperator) installHco() error { } func (v *VirtOperator) configureEmulation() error { - hco, err := v.Dynamic.Resource(hyperConvergedGvr).Namespace("openshift-cnv").Get(context.Background(), "kubevirt-hyperconverged", v1.GetOptions{}) + hco, err := v.Dynamic.Resource(hyperConvergedGvr).Namespace(v.Namespace).Get(context.Background(), "kubevirt-hyperconverged", metav1.GetOptions{}) if err != nil { return err } @@ -356,7 +379,7 @@ func (v *VirtOperator) configureEmulation() error { return err } - _, err = v.Dynamic.Resource(hyperConvergedGvr).Namespace("openshift-cnv").Update(context.Background(), hco, v1.UpdateOptions{}) + _, err = v.Dynamic.Resource(hyperConvergedGvr).Namespace(v.Namespace).Update(context.Background(), hco, metav1.UpdateOptions{}) if err != nil { return err } @@ -365,19 +388,19 @@ func (v *VirtOperator) configureEmulation() error { } // Creates target namespace if needed, and waits for it to exist -func (v *VirtOperator) ensureNamespace(timeout time.Duration) error { - if !v.checkNamespace() { - if err := v.installNamespace(); err != nil { +func (v *VirtOperator) EnsureNamespace(ns string, timeout time.Duration) error { + if !v.checkNamespace(ns) { + if err := v.installNamespace(ns); err != nil { return err } err := wait.PollImmediate(time.Second, timeout, func() (bool, error) { - return v.checkNamespace(), nil + return v.checkNamespace(ns), nil }) if err != nil { - return fmt.Errorf("timed out waiting to create namespace %s: %w", v.Namespace, err) + return fmt.Errorf("timed out waiting to create namespace %s: %w", ns, err) } } else { - log.Printf("Namespace %s already present, no action required", v.Namespace) + log.Printf("Namespace %s already present, no action required", ns) } return nil @@ -452,10 +475,10 @@ func (v *VirtOperator) ensureHco(timeout time.Duration) error { } // Deletes the virtualization operator namespace (likely openshift-cnv). -func (v *VirtOperator) removeNamespace() error { - err := v.Client.Delete(context.Background(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: v.Namespace}}) +func (v *VirtOperator) removeNamespace(ns string) error { + err := v.Client.Delete(context.Background(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) if err != nil { - log.Printf("Failed to delete namespace %s: %v", v.Namespace, err) + log.Printf("Failed to delete namespace %s: %v", ns, err) return err } return nil @@ -482,12 +505,12 @@ func (v *VirtOperator) removeSubscription() error { // Deletes the virt ClusterServiceVersion func (v *VirtOperator) removeCsv() error { - return v.Dynamic.Resource(csvGvr).Namespace(v.Namespace).Delete(context.Background(), v.Csv, v1.DeleteOptions{}) + return v.Dynamic.Resource(csvGvr).Namespace(v.Namespace).Delete(context.Background(), v.Csv, metav1.DeleteOptions{}) } // Deletes a HyperConverged Operator instance. func (v *VirtOperator) removeHco() error { - err := v.Dynamic.Resource(hyperConvergedGvr).Namespace(v.Namespace).Delete(context.Background(), "kubevirt-hyperconverged", v1.DeleteOptions{}) + err := v.Dynamic.Resource(hyperConvergedGvr).Namespace(v.Namespace).Delete(context.Background(), "kubevirt-hyperconverged", metav1.DeleteOptions{}) if err != nil { log.Printf("Error deleting HCO: %v", err) return err @@ -497,21 +520,21 @@ func (v *VirtOperator) removeHco() error { } // Makes sure the virtualization operator's namespace is removed. -func (v *VirtOperator) ensureNamespaceRemoved(timeout time.Duration) error { - if !v.checkNamespace() { - log.Printf("Namespace %s already removed, no action required", v.Namespace) +func (v *VirtOperator) ensureNamespaceRemoved(ns string, timeout time.Duration) error { + if !v.checkNamespace(ns) { + log.Printf("Namespace %s already removed, no action required", ns) return nil } - if err := v.removeNamespace(); err != nil { + if err := v.removeNamespace(ns); err != nil { return err } err := wait.PollImmediate(5*time.Second, timeout, func() (bool, error) { - return !v.checkNamespace(), nil + return !v.checkNamespace(ns), nil }) if err != nil { - return fmt.Errorf("timed out waiting to delete namespace %s: %w", v.Namespace, err) + return fmt.Errorf("timed out waiting to delete namespace %s: %w", ns, err) } return nil @@ -699,7 +722,7 @@ func (v *VirtOperator) EnsureEmulation(timeout time.Duration) error { // IsVirtInstalled returns whether or not the OpenShift Virtualization operator // is installed and ready, by checking for a HyperConverged operator resource. func (v *VirtOperator) IsVirtInstalled() bool { - if !v.checkNamespace() { + if !v.checkNamespace(v.Namespace) { return false } @@ -715,7 +738,7 @@ func (v *VirtOperator) EnsureVirtInstallation() error { } log.Printf("Creating virtualization namespace %s", v.Namespace) - if err := v.ensureNamespace(10 * time.Second); err != nil { + if err := v.EnsureNamespace(v.Namespace, 10*time.Second); err != nil { return err } log.Printf("Created namespace %s", v.Namespace) @@ -774,7 +797,7 @@ func (v *VirtOperator) EnsureVirtRemoval() error { log.Println("Deleted operator group") log.Printf("Deleting virtualization namespace %s", v.Namespace) - if err := v.ensureNamespaceRemoved(3 * time.Minute); err != nil { + if err := v.ensureNamespaceRemoved(v.Namespace, 3*time.Minute); err != nil { return err } log.Printf("Deleting namespace %s", v.Namespace) diff --git a/tests/e2e/lib/virt_storage_helpers.go b/tests/e2e/lib/virt_storage_helpers.go index c6aa4f9ba4..58ade8f758 100644 --- a/tests/e2e/lib/virt_storage_helpers.go +++ b/tests/e2e/lib/virt_storage_helpers.go @@ -38,7 +38,7 @@ func (v *VirtOperator) deleteDataVolume(namespace, name string) error { return v.Dynamic.Resource(dataVolumeGVR).Namespace(namespace).Delete(context.Background(), name, metav1.DeleteOptions{}) } -func (v *VirtOperator) checkDataVolumeExists(namespace, name string) bool { +func (v *VirtOperator) CheckDataVolumeExists(namespace, name string) bool { unstructuredDataVolume, err := v.getDataVolume(namespace, name) if err != nil { return false @@ -122,7 +122,7 @@ func (v *VirtOperator) createDataVolumeFromUrl(namespace, name, url, size string // Create a DataVolume and wait for it to be ready. func (v *VirtOperator) EnsureDataVolumeFromUrl(namespace, name, url, size string, timeout time.Duration) error { - if !v.checkDataVolumeExists(namespace, name) { + if !v.CheckDataVolumeExists(namespace, name) { if err := v.createDataVolumeFromUrl(namespace, name, url, size); err != nil { return err } @@ -155,7 +155,7 @@ func (v *VirtOperator) RemoveDataVolume(namespace, name string, timeout time.Dur } err = wait.PollImmediate(5*time.Second, timeout, func() (bool, error) { - return !v.checkDataVolumeExists(namespace, name), nil + return !v.CheckDataVolumeExists(namespace, name), nil }) if err != nil { return fmt.Errorf("timed out waiting for DataVolume %s/%s to be deleted: %w", namespace, name, err) @@ -173,26 +173,30 @@ func (v *VirtOperator) RemoveDataSource(namespace, name string) error { // Create a DataSource from an existing PVC, with the same name and namespace. // This way, the PVC can be specified as a sourceRef in the VM spec. func (v *VirtOperator) CreateDataSourceFromPvc(namespace, name string) error { + return v.CreateTargetDataSourceFromPvc(namespace, namespace, name, name) +} + +func (v *VirtOperator) CreateTargetDataSourceFromPvc(sourceNamespace, destinationNamespace, sourcePvcName, destinationDataSourceName string) error { unstructuredDataSource := unstructured.Unstructured{ Object: map[string]interface{}{ "apiVersion": "cdi.kubevirt.io/v1beta1", "kind": "DataSource", "metadata": map[string]interface{}{ - "name": name, - "namespace": namespace, + "name": destinationDataSourceName, + "namespace": destinationNamespace, }, "spec": map[string]interface{}{ "source": map[string]interface{}{ "pvc": map[string]interface{}{ - "name": name, - "namespace": namespace, + "name": sourcePvcName, + "namespace": sourceNamespace, }, }, }, }, } - _, err := v.Dynamic.Resource(dataSourceGVR).Namespace(namespace).Create(context.Background(), &unstructuredDataSource, metav1.CreateOptions{}) + _, err := v.Dynamic.Resource(dataSourceGVR).Namespace(destinationNamespace).Create(context.Background(), &unstructuredDataSource, metav1.CreateOptions{}) if err != nil { if apierrors.IsAlreadyExists(err) { return nil @@ -207,40 +211,101 @@ func (v *VirtOperator) CreateDataSourceFromPvc(namespace, name string) error { return nil } -// Check the VolumeBindingMode of the default storage class, and make an -// Immediate-mode copy if it is set to WaitForFirstConsumer. -func (v *VirtOperator) CreateImmediateModeStorageClass(name string) error { - // Find the default storage class +// Find the given DataSource, and return the PVC it points to +func (v *VirtOperator) GetDataSourcePvc(ns, name string) (string, string, error) { + unstructuredDataSource, err := v.Dynamic.Resource(dataSourceGVR).Namespace(ns).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + log.Printf("Error getting DataSource %s: %v", name, err) + return "", "", err + } + + pvcName, ok, err := unstructured.NestedString(unstructuredDataSource.UnstructuredContent(), "status", "source", "pvc", "name") + if err != nil { + log.Printf("Error getting PVC from DataSource: %v", err) + return "", "", err + } + if !ok { + return "", "", errors.New("failed to get PVC from " + name + " DataSource") + } + + pvcNamespace, ok, err := unstructured.NestedString(unstructuredDataSource.UnstructuredContent(), "status", "source", "pvc", "namespace") + if err != nil { + log.Printf("Error getting PVC namespace from DataSource: %v", err) + return "", "", err + } + if !ok { + return "", "", errors.New("failed to get PVC namespace from " + name + " DataSource") + } + + return pvcNamespace, pvcName, nil + +} + +// Find the default storage class +func (v *VirtOperator) GetDefaultStorageClass() (*storagev1.StorageClass, error) { storageClasses, err := v.Clientset.StorageV1().StorageClasses().List(context.Background(), metav1.ListOptions{}) if err != nil { - return err + return nil, err } + var defaultStorageClass *storagev1.StorageClass for _, storageClass := range storageClasses.Items { - if storageClass.Annotations[isDefaultClass] == "true" { + if storageClass.Annotations["storageclass.kubernetes.io/is-default-class"] == "true" { log.Printf("Found default storage class: %s", storageClass.Name) defaultStorageClass = storageClass.DeepCopy() - if storageClass.VolumeBindingMode != nil && *storageClass.VolumeBindingMode == storagev1.VolumeBindingImmediate { - log.Println("Default storage class already set to Immediate") - return nil - } - break + return defaultStorageClass, nil } } - if defaultStorageClass == nil { - return errors.New("no default storage class found") + + return nil, errors.New("no default storage class found") +} + +// Check the VolumeBindingMode of the default storage class, and make an +// Immediate-mode copy if it is set to WaitForFirstConsumer. +func (v *VirtOperator) CreateImmediateModeStorageClass(name string) error { + defaultStorageClass, err := v.GetDefaultStorageClass() + if err != nil { + return err } immediateStorageClass := defaultStorageClass immediateStorageClass.VolumeBindingMode = ptr.To[storagev1.VolumeBindingMode](storagev1.VolumeBindingImmediate) immediateStorageClass.Name = name immediateStorageClass.ResourceVersion = "" - immediateStorageClass.Annotations[isDefaultClass] = "false" + immediateStorageClass.Annotations["storageclass.kubernetes.io/is-default-class"] = "false" _, err = v.Clientset.StorageV1().StorageClasses().Create(context.Background(), immediateStorageClass, metav1.CreateOptions{}) + if apierrors.IsAlreadyExists(err) { + return nil + } + return err +} + +// Check the VolumeBindingMode of the default storage class, and make a +// WaitForFirstConsumer-mode copy if it is set to Immediate. +func (v *VirtOperator) CreateWaitForFirstConsumerStorageClass(name string) error { + defaultStorageClass, err := v.GetDefaultStorageClass() + if err != nil { + return err + } + + wffcStorageClass := defaultStorageClass + wffcStorageClass.VolumeBindingMode = ptr.To[storagev1.VolumeBindingMode](storagev1.VolumeBindingWaitForFirstConsumer) + wffcStorageClass.Name = name + wffcStorageClass.ResourceVersion = "" + wffcStorageClass.Annotations["storageclass.kubernetes.io/is-default-class"] = "false" + + _, err = v.Clientset.StorageV1().StorageClasses().Create(context.Background(), wffcStorageClass, metav1.CreateOptions{}) return err } func (v *VirtOperator) RemoveStorageClass(name string) error { - return v.Clientset.StorageV1().StorageClasses().Delete(context.Background(), name, metav1.DeleteOptions{}) + err := v.Clientset.StorageV1().StorageClasses().Delete(context.Background(), name, metav1.DeleteOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + return nil + } + return err + } + return nil } diff --git a/tests/e2e/sample-applications/hostedcontrolplanes/hypershift/hostedcluster-agent-capi-role.yaml b/tests/e2e/sample-applications/hostedcontrolplanes/hypershift/hostedcluster-agent-capi-role.yaml new file mode 100644 index 0000000000..068717ecb4 --- /dev/null +++ b/tests/e2e/sample-applications/hostedcontrolplanes/hypershift/hostedcluster-agent-capi-role.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: capi-provider-role + namespace: {{ .ClustersNamespace }} +rules: +- apiGroups: + - agent-install.openshift.io + resources: + - agents + verbs: + - '*' \ No newline at end of file diff --git a/tests/e2e/sample-applications/hostedcontrolplanes/hypershift/hostedcluster-agent.yaml b/tests/e2e/sample-applications/hostedcontrolplanes/hypershift/hostedcluster-agent.yaml new file mode 100644 index 0000000000..36faf960f3 --- /dev/null +++ b/tests/e2e/sample-applications/hostedcontrolplanes/hypershift/hostedcluster-agent.yaml @@ -0,0 +1,63 @@ +apiVersion: hypershift.openshift.io/v1beta1 +kind: HostedCluster +metadata: + name: {{ .HostedClusterName }} + namespace: {{ .ClustersNamespace }} + annotations: + hypershift.openshift.io/cleanup-cloud-resources: "true" + hypershift.openshift.io/skip-release-image-validation: "true" +spec: + configuration: + operatorhub: + disableAllDefaultSources: true + controllerAvailabilityPolicy: SingleReplica + dns: + baseDomain: example.com + etcd: + managed: + storage: + persistentVolume: + size: 8Gi + type: PersistentVolume + managementType: Managed + fips: false + infraID: {{ .HostedClusterName }}-{{ .InfraIDSeed }} + networking: + clusterNetwork: + - cidr: 10.132.0.0/14 + networkType: OVNKubernetes + serviceNetwork: + - cidr: 172.31.0.0/16 + olmCatalogPlacement: management + platform: + type: Agent + agent: + agentNamespace: {{ .ClustersNamespace }} + pullSecret: + name: {{ .HostedClusterName }}-pull-secret + release: + image: {{ .HCOCPTestImage }} + secretEncryption: + aescbc: + activeKey: + name: {{ .HostedClusterName }}-etcd-encryption-key + type: aescbc + services: + - service: APIServer + servicePublishingStrategy: + type: NodePort + nodePort: + address: 10.0.133.132 + - service: Ignition + servicePublishingStrategy: + type: Route + - service: Konnectivity + servicePublishingStrategy: + type: Route + - service: OAuthServer + servicePublishingStrategy: + type: Route + - service: OIDC + servicePublishingStrategy: + type: Route + sshKey: {} diff --git a/tests/e2e/sample-applications/hostedcontrolplanes/hypershift/hostedcluster-etcd-enc-key.yaml b/tests/e2e/sample-applications/hostedcontrolplanes/hypershift/hostedcluster-etcd-enc-key.yaml new file mode 100644 index 0000000000..372e00e027 --- /dev/null +++ b/tests/e2e/sample-applications/hostedcontrolplanes/hypershift/hostedcluster-etcd-enc-key.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +data: + key: {{ .EtcdEncryptionKey }} +kind: Secret +metadata: + labels: + hypershift.openshift.io/safe-to-delete-with-cluster: "true" + name: {{ .HostedClusterName }}-etcd-encryption-key + namespace: {{ .ClustersNamespace }} +type: Opaque diff --git a/tests/e2e/sample-applications/hostedcontrolplanes/hypershift/hostedcluster-none.yaml b/tests/e2e/sample-applications/hostedcontrolplanes/hypershift/hostedcluster-none.yaml new file mode 100644 index 0000000000..8fc612b62f --- /dev/null +++ b/tests/e2e/sample-applications/hostedcontrolplanes/hypershift/hostedcluster-none.yaml @@ -0,0 +1,61 @@ +apiVersion: hypershift.openshift.io/v1beta1 +kind: HostedCluster +metadata: + name: {{ .HostedClusterName }} + namespace: {{ .ClustersNamespace }} + annotations: + hypershift.openshift.io/cleanup-cloud-resources: "true" + hypershift.openshift.io/skip-release-image-validation: "true" +spec: + configuration: + operatorhub: + disableAllDefaultSources: true + controllerAvailabilityPolicy: SingleReplica + dns: + baseDomain: example.com + etcd: + managed: + storage: + persistentVolume: + size: 8Gi + type: PersistentVolume + managementType: Managed + fips: false + infraID: {{ .HostedClusterName }}-{{ .InfraIDSeed }} + networking: + clusterNetwork: + - cidr: 10.132.0.0/14 + networkType: OVNKubernetes + serviceNetwork: + - cidr: 172.31.0.0/16 + olmCatalogPlacement: management + platform: + type: None + pullSecret: + name: {{ .HostedClusterName }}-pull-secret + release: + image: {{ .HCOCPTestImage }} + secretEncryption: + aescbc: + activeKey: + name: {{ .HostedClusterName }}-etcd-encryption-key + type: aescbc + services: + - service: APIServer + servicePublishingStrategy: + type: NodePort + nodePort: + address: 10.0.133.132 + - service: Ignition + servicePublishingStrategy: + type: Route + - service: Konnectivity + servicePublishingStrategy: + type: Route + - service: OAuthServer + servicePublishingStrategy: + type: Route + - service: OIDC + servicePublishingStrategy: + type: Route + sshKey: {} diff --git a/tests/e2e/sample-applications/hostedcontrolplanes/hypershift/hostedcluster-pull-secret.yaml b/tests/e2e/sample-applications/hostedcontrolplanes/hypershift/hostedcluster-pull-secret.yaml new file mode 100644 index 0000000000..288008582e --- /dev/null +++ b/tests/e2e/sample-applications/hostedcontrolplanes/hypershift/hostedcluster-pull-secret.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +data: + .dockerconfigjson: {{ .PullSecret }} +kind: Secret +metadata: + labels: + hypershift.openshift.io/safe-to-delete-with-cluster: "true" + name: {{ .HostedClusterName }}-pull-secret + namespace: {{ .ClustersNamespace }} diff --git a/tests/e2e/sample-applications/hostedcontrolplanes/mce/mce-operand.yaml b/tests/e2e/sample-applications/hostedcontrolplanes/mce/mce-operand.yaml new file mode 100644 index 0000000000..784a8aea8d --- /dev/null +++ b/tests/e2e/sample-applications/hostedcontrolplanes/mce/mce-operand.yaml @@ -0,0 +1,48 @@ +apiVersion: multicluster.openshift.io/v1 +kind: MultiClusterEngine +metadata: + name: {{ .MCEOperandName }} + namespace: {{ .MCEOperandNamespace }} +spec: + availabilityConfig: Basic + overrides: + components: + - configOverrides: {} + enabled: true + name: assisted-service + - configOverrides: {} + enabled: true + name: cluster-lifecycle + - configOverrides: {} + enabled: true + name: cluster-manager + - configOverrides: {} + enabled: true + name: discovery + - configOverrides: {} + enabled: true + name: hive + - configOverrides: {} + enabled: true + name: server-foundation + - configOverrides: {} + enabled: true + name: local-cluster + - configOverrides: {} + enabled: true + name: hypershift-local-hosting + - configOverrides: {} + enabled: true + name: console-mce + - configOverrides: {} + enabled: true + name: cluster-proxy-addon + - configOverrides: {} + enabled: true + name: hypershift + - configOverrides: {} + enabled: true + name: managedserviceaccount + - configOverrides: {} + enabled: false + name: image-based-install-operator \ No newline at end of file diff --git a/tests/e2e/sample-applications/minimal-8csivol/build/minimal-block-3csivol/Dockerfile b/tests/e2e/sample-applications/minimal-8csivol/build/minimal-block-3csivol/Dockerfile new file mode 100644 index 0000000000..64c6bcf0c7 --- /dev/null +++ b/tests/e2e/sample-applications/minimal-8csivol/build/minimal-block-3csivol/Dockerfile @@ -0,0 +1,7 @@ +FROM fedora +USER root +RUN dnf install -y e2fsprogs +RUN mkdir -p /mnt/volume1 +RUN mkdir -p /mnt/volume2 +RUN mkdir -p /mnt/volume3 +USER 1001 diff --git a/tests/e2e/sample-applications/minimal-8csivol/minimal-block-3csivol.yaml b/tests/e2e/sample-applications/minimal-8csivol/minimal-block-3csivol.yaml new file mode 100644 index 0000000000..47f5b627bc --- /dev/null +++ b/tests/e2e/sample-applications/minimal-8csivol/minimal-block-3csivol.yaml @@ -0,0 +1,143 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: minimal-block-3csivol +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: minimal-block-3csivol-sa + namespace: minimal-block-3csivol +--- +# the command `oc adm policy add-scc-to-user privileged -z default -n minimal-block-3csivol +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: default-privileged-binding + namespace: minimal-block-3csivol +subjects: +- kind: ServiceAccount + name: default +roleRef: + kind: ClusterRole + name: system:openshift:scc:privileged + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: minimal-block-3csivol + namespace: minimal-block-3csivol +spec: + serviceAccountName: default + replicas: 1 + selector: + matchLabels: + app: minimal-block-3csivol + template: + metadata: + labels: + app: minimal-block-3csivol + spec: + containers: + - image: quay.io/migtools/3csivol-block:test + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 100m + memory: 100Mi + name: setup-block-device + securityContext: + privileged: true + runAsUser: 0 + command: + - "sh" + - "-c" + - | + ls -la /dev/xvdx* + for i in {1..3}; do + DEVICE="/dev/xvdx$i" + MOUNT_POINT="/mnt/volume$i" + if [ ! -e $DEVICE ]; then + echo "$DEVICE does not exist." + exit 1 + fi + if dumpe2fs -h $DEVICE 2>/dev/null; then + echo "Filesystem already exists on $DEVICE" + else + echo "Formatting $DEVICE" + mkfs.ext4 $DEVICE + echo $? + fi + echo "Mounting $DEVICE in the $MOUNT_POINT" + mount $DEVICE $MOUNT_POINT + echo $? + echo $(date +%s) > $MOUNT_POINT/format_timestamp + dd if=/dev/zero of=$MOUNT_POINT/binary_$(date +%s).file bs=1024 count=1024 + done + while true; do + ls -l /mnt/volume*/* | grep -v lost+found | grep -v total + sleep 10 + done + volumeDevices: + - name: block-volume-pv-1 + devicePath: /dev/xvdx1 + - name: block-volume-pv-2 + devicePath: /dev/xvdx2 + - name: block-volume-pv-3 + devicePath: /dev/xvdx3 + volumes: + - name: block-volume-pv-1 + persistentVolumeClaim: + claimName: block-volume-pv-1 + volumeMode: Block + - name: block-volume-pv-2 + persistentVolumeClaim: + claimName: block-volume-pv-2 + volumeMode: Block + - name: block-volume-pv-3 + persistentVolumeClaim: + claimName: block-volume-pv-3 + volumeMode: Block +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: block-volume-pv-1 + namespace: minimal-block-3csivol +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi + volumeMode: Block +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: block-volume-pv-2 + namespace: minimal-block-3csivol +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi + volumeMode: Block +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: block-volume-pv-3 + namespace: minimal-block-3csivol +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi + volumeMode: Block diff --git a/tests/e2e/sample-applications/mongo-persistent/mongo-persistent-block.yaml b/tests/e2e/sample-applications/mongo-persistent/mongo-persistent-block.yaml index 470d382083..63074af371 100644 --- a/tests/e2e/sample-applications/mongo-persistent/mongo-persistent-block.yaml +++ b/tests/e2e/sample-applications/mongo-persistent/mongo-persistent-block.yaml @@ -63,6 +63,7 @@ items: labels: e2e-app: "true" app: mongo + curl-tool: "true" spec: serviceAccountName: mongo-persistent-sa securityContext: @@ -147,6 +148,9 @@ items: timeoutSeconds: 2 successThreshold: 1 failureThreshold: 40 # 40x30sec before restart pod + - image: docker.io/curlimages/curl:8.5.0 + name: curl-tool + command: ["/bin/sleep", "infinity"] volumes: - name: block-volume-pv persistentVolumeClaim: @@ -193,7 +197,7 @@ items: spec: containers: - name: todolist - image: quay.io/migtools/oadp-ci-todolist-mongo-go:latest + image: quay.io/migtools/oadp-ci-todolist-mongo-go-1 env: - name: foo value: bar diff --git a/tests/e2e/sample-applications/mongo-persistent/mongo-persistent-csi.yaml b/tests/e2e/sample-applications/mongo-persistent/mongo-persistent-csi.yaml index 25c2c87a5b..614c1a1af4 100644 --- a/tests/e2e/sample-applications/mongo-persistent/mongo-persistent-csi.yaml +++ b/tests/e2e/sample-applications/mongo-persistent/mongo-persistent-csi.yaml @@ -63,6 +63,7 @@ items: labels: e2e-app: "true" app: mongo + curl-tool: "true" spec: serviceAccountName: mongo-persistent-sa containers: @@ -104,6 +105,9 @@ items: timeoutSeconds: 2 successThreshold: 1 failureThreshold: 40 # 40x30sec before restart pod + - image: docker.io/curlimages/curl:8.5.0 + name: curl-tool + command: ["/bin/sleep", "infinity"] volumes: - name: mongo-data persistentVolumeClaim: @@ -150,7 +154,7 @@ items: spec: containers: - name: todolist - image: quay.io/migtools/oadp-ci-todolist-mongo-go:latest + image: quay.io/migtools/oadp-ci-todolist-mongo-go-1 env: - name: foo value: bar diff --git a/tests/e2e/sample-applications/mongo-persistent/mongo-persistent.yaml b/tests/e2e/sample-applications/mongo-persistent/mongo-persistent.yaml index bae860a354..3ea05e2a6d 100644 --- a/tests/e2e/sample-applications/mongo-persistent/mongo-persistent.yaml +++ b/tests/e2e/sample-applications/mongo-persistent/mongo-persistent.yaml @@ -76,6 +76,7 @@ items: labels: e2e-app: "true" app: mongo + curl-tool: "true" spec: serviceAccountName: mongo-persistent-sa containers: @@ -117,6 +118,9 @@ items: timeoutSeconds: 2 successThreshold: 1 failureThreshold: 40 # 40x30sec before restart pod + - image: docker.io/curlimages/curl:8.5.0 + name: curl-tool + command: ["/bin/sleep", "infinity"] volumes: - name: mongo-data persistentVolumeClaim: @@ -163,7 +167,7 @@ items: spec: containers: - name: todolist - image: quay.io/migtools/oadp-ci-todolist-mongo-go:latest + image: quay.io/migtools/oadp-ci-todolist-mongo-go-1 env: - name: foo value: bar diff --git a/tests/e2e/sample-applications/mongo-persistent/pvc/openstack.yaml b/tests/e2e/sample-applications/mongo-persistent/pvc/openstack.yaml new file mode 100644 index 0000000000..347d46cd66 --- /dev/null +++ b/tests/e2e/sample-applications/mongo-persistent/pvc/openstack.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: List +items: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: mongo + namespace: mongo-persistent + labels: + app: mongo + spec: + accessModes: + - ReadWriteOnce + storageClassName: ocs-storagecluster-ceph-rbd + resources: + requests: + storage: 1Gi diff --git a/tests/e2e/sample-applications/mysql-persistent/mysql-persistent-csi.yaml b/tests/e2e/sample-applications/mysql-persistent/mysql-persistent-csi.yaml index 1e1b43072d..08cbf922f1 100644 --- a/tests/e2e/sample-applications/mysql-persistent/mysql-persistent-csi.yaml +++ b/tests/e2e/sample-applications/mysql-persistent/mysql-persistent-csi.yaml @@ -70,6 +70,7 @@ items: labels: e2e-app: "true" app: mysql + curl-tool: "true" spec: securityContext: runAsGroup: 27 @@ -126,6 +127,9 @@ items: timeoutSeconds: 2 successThreshold: 1 failureThreshold: 40 # 40x30sec before restart pod + - image: docker.io/curlimages/curl:8.5.0 + name: curl-tool + command: ["/bin/sleep", "infinity"] volumes: - name: mysql-data persistentVolumeClaim: @@ -172,7 +176,7 @@ items: spec: containers: - name: todolist - image: quay.io/konveyor/todolist-mariadb-go:v2_4 + image: quay.io/migtools/oadp-ci-todolist-mariadb-go:latest env: - name: foo value: bar diff --git a/tests/e2e/sample-applications/mysql-persistent/mysql-persistent-twovol-csi.yaml b/tests/e2e/sample-applications/mysql-persistent/mysql-persistent-twovol-csi.yaml index 3bde6f2203..e7b758e271 100644 --- a/tests/e2e/sample-applications/mysql-persistent/mysql-persistent-twovol-csi.yaml +++ b/tests/e2e/sample-applications/mysql-persistent/mysql-persistent-twovol-csi.yaml @@ -70,6 +70,7 @@ items: labels: e2e-app: "true" app: mysql + curl-tool: "true" spec: serviceAccountName: mysql-persistent-sa containers: @@ -118,6 +119,9 @@ items: timeoutSeconds: 2 successThreshold: 1 failureThreshold: 40 # 40x30sec before restart pod + - image: docker.io/curlimages/curl:8.5.0 + name: curl-tool + command: ["/bin/sleep", "infinity"] volumes: - name: mysql-data persistentVolumeClaim: @@ -163,7 +167,7 @@ items: spec: containers: - name: todolist - image: quay.io/konveyor/todolist-mariadb-go:v2_4 + image: quay.io/migtools/oadp-ci-todolist-mariadb-go:latest env: - name: foo value: bar diff --git a/tests/e2e/sample-applications/mysql-persistent/mysql-persistent.yaml b/tests/e2e/sample-applications/mysql-persistent/mysql-persistent.yaml index fb05f40241..d658c9e082 100644 --- a/tests/e2e/sample-applications/mysql-persistent/mysql-persistent.yaml +++ b/tests/e2e/sample-applications/mysql-persistent/mysql-persistent.yaml @@ -83,6 +83,7 @@ items: labels: e2e-app: "true" app: mysql + curl-tool: "true" spec: securityContext: runAsNonRoot: true @@ -139,6 +140,9 @@ items: timeoutSeconds: 2 successThreshold: 1 failureThreshold: 40 # 40x30sec before restart pod + - image: docker.io/curlimages/curl:8.5.0 + name: curl-tool + command: ["/bin/sleep", "infinity"] volumes: - name: mysql-data persistentVolumeClaim: @@ -185,7 +189,7 @@ items: spec: containers: - name: todolist - image: quay.io/konveyor/todolist-mariadb-go:v2_4 + image: quay.io/migtools/oadp-ci-todolist-mariadb-go:latest env: - name: foo value: bar diff --git a/tests/e2e/sample-applications/mysql-persistent/pvc-twoVol/openstack.yaml b/tests/e2e/sample-applications/mysql-persistent/pvc-twoVol/openstack.yaml new file mode 100644 index 0000000000..867dcfeff0 --- /dev/null +++ b/tests/e2e/sample-applications/mysql-persistent/pvc-twoVol/openstack.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: List +items: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: mysql + namespace: mysql-persistent + labels: + app: mysql + spec: + accessModes: + - ReadWriteOnce + storageClassName: ocs-storagecluster-ceph-rbd + resources: + requests: + storage: 1Gi + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: applog + namespace: mysql-persistent + labels: + app: todolist + spec: + accessModes: + - ReadWriteOnce + storageClassName: ocs-storagecluster-ceph-rbd + resources: + requests: + storage: 1Gi diff --git a/tests/e2e/sample-applications/mysql-persistent/pvc/openstack.yaml b/tests/e2e/sample-applications/mysql-persistent/pvc/openstack.yaml new file mode 100644 index 0000000000..df94805420 --- /dev/null +++ b/tests/e2e/sample-applications/mysql-persistent/pvc/openstack.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: List +items: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: mysql + namespace: mysql-persistent + labels: + app: mysql + spec: + accessModes: + - ReadWriteOnce + storageClassName: ocs-storagecluster-ceph-rbd + resources: + requests: + storage: 1Gi diff --git a/tests/e2e/sample-applications/snapclass-csi/openstack.yaml b/tests/e2e/sample-applications/snapclass-csi/openstack.yaml new file mode 100644 index 0000000000..2694b11c8c --- /dev/null +++ b/tests/e2e/sample-applications/snapclass-csi/openstack.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: List +items: + - apiVersion: snapshot.storage.k8s.io/v1 + kind: VolumeSnapshotClass + metadata: + name: oadp-example-snapclass + labels: + velero.io/csi-volumesnapshot-class: 'true' + driver: openshift-storage.rbd.csi.ceph.com + deletionPolicy: Retain + parameters: + clusterID: openshift-storage + csi.storage.k8s.io/snapshotter-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/snapshotter-secret-namespace: openshift-storage diff --git a/tests/e2e/sample-applications/virtual-machines/cirros-test/cirros-rbac.yaml b/tests/e2e/sample-applications/virtual-machines/cirros-test/cirros-rbac.yaml new file mode 100644 index 0000000000..383601b219 --- /dev/null +++ b/tests/e2e/sample-applications/virtual-machines/cirros-test/cirros-rbac.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: List +items: + - apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: dv-cloner-role + rules: + - apiGroups: ["cdi.kubevirt.io"] + resources: ["datavolumes/source"] + verbs: ["*"] + - apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: cirros-test-cloner + namespace: openshift-virtualization-os-images + subjects: + - kind: ServiceAccount + name: default + namespace: cirros-test + - kind: ServiceAccount + name: default + namespace: mysql-persistent + roleRef: + kind: ClusterRole + name: dv-cloner-role + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/tests/e2e/sample-applications/virtual-machines/cirros-test/cirros-test-immediate.yaml b/tests/e2e/sample-applications/virtual-machines/cirros-test/cirros-test-immediate.yaml index 72658c55a0..6c61975954 100644 --- a/tests/e2e/sample-applications/virtual-machines/cirros-test/cirros-test-immediate.yaml +++ b/tests/e2e/sample-applications/virtual-machines/cirros-test/cirros-test-immediate.yaml @@ -45,4 +45,4 @@ items: volumes: - name: rootdisk persistentVolumeClaim: - claimName: cirros-test-disk + claimName: cirros-test-disk \ No newline at end of file diff --git a/tests/e2e/sample-applications/virtual-machines/cirros-test/cirros-test.yaml b/tests/e2e/sample-applications/virtual-machines/cirros-test/cirros-test.yaml index 172561af0b..4226f0e394 100644 --- a/tests/e2e/sample-applications/virtual-machines/cirros-test/cirros-test.yaml +++ b/tests/e2e/sample-applications/virtual-machines/cirros-test/cirros-test.yaml @@ -21,6 +21,7 @@ items: resources: requests: storage: 150Mi + storageClassName: test-sc-wffc running: true template: metadata: @@ -44,4 +45,4 @@ items: volumes: - name: rootdisk persistentVolumeClaim: - claimName: cirros-test-disk + claimName: cirros-test-disk \ No newline at end of file diff --git a/tests/e2e/sample-applications/virtual-machines/fedora-todolist/fedora-todolist.yaml b/tests/e2e/sample-applications/virtual-machines/fedora-todolist/fedora-todolist.yaml index 7ba8bf4289..b09dedb181 100644 --- a/tests/e2e/sample-applications/virtual-machines/fedora-todolist/fedora-todolist.yaml +++ b/tests/e2e/sample-applications/virtual-machines/fedora-todolist/fedora-todolist.yaml @@ -164,8 +164,8 @@ items: selector: app: todolist service: todolist - - apiVersion: apps.openshift.io/v1 - kind: DeploymentConfig + - apiVersion: apps/v1 + kind: Deployment metadata: name: todolist namespace: mysql-persistent @@ -176,8 +176,9 @@ items: spec: replicas: 1 selector: - app: todolist - service: todolist + matchLabels: + app: todolist + service: todolist strategy: type: Recreate template: diff --git a/tests/e2e/scripts/aws_settings.sh b/tests/e2e/scripts/aws_settings.sh index f8101f767b..2064ef07e5 100644 --- a/tests/e2e/scripts/aws_settings.sh +++ b/tests/e2e/scripts/aws_settings.sh @@ -3,10 +3,21 @@ cat > $TMP_DIR/oadpcreds < $TMP_DIR/oadpcreds < $TMP_DIR/oadpcreds < $TMP_DIR/oadpcreds < $TMP_DIR/oadpcreds < $TMP_DIR/oadpcreds < $TMP_DIR/oadpcreds < $TMP_DIR/oadpcreds < $TMP_DIR/oadpcreds < $TMP_DIR/oadpcreds diff --git a/tests/e2e/templates/default_settings.json b/tests/e2e/templates/default_settings.json index acbdb6917e..a3596b2634 100644 --- a/tests/e2e/templates/default_settings.json +++ b/tests/e2e/templates/default_settings.json @@ -5,7 +5,8 @@ "defaultPlugins": [ "openshift", "aws", - "kubevirt" + "kubevirt", + "hypershift" ] } }, @@ -19,10 +20,6 @@ }, "objectStorage":{ "bucket": "myBucket" - }, - "credential":{ - "name": "cloud-credentials", - "key": "cloud" } } } @@ -31,7 +28,7 @@ { "velero": { "provider": "aws", - "config": { + "config": { "profile": "default", "region": "us-east-1" } diff --git a/tests/e2e/upgrade_suite_test.go b/tests/e2e/upgrade_suite_test.go index e1b774257f..4f5feb9e01 100644 --- a/tests/e2e/upgrade_suite_test.go +++ b/tests/e2e/upgrade_suite_test.go @@ -2,16 +2,18 @@ package e2e_test import ( "context" + "fmt" "log" "time" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" - "github.com/operator-framework/api/pkg/operators/v1" + operatorv1 "github.com/operator-framework/api/pkg/operators/v1" "github.com/operator-framework/api/pkg/operators/v1alpha1" velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" @@ -28,20 +30,20 @@ type channelUpgradeCase struct { var _ = ginkgo.Describe("OADP upgrade scenarios", ginkgo.Ordered, func() { ginkgo.DescribeTable("Upgrade OADP channel tests", func(scenario channelUpgradeCase) { - // Create operatorGroup and subscription with previous channel stable-1.3 + // Create OperatorGroup and Subscription with previous channel stable-1.4 log.Print("Checking if OperatorGroup needs to be created") - operatorGroupList := v1.OperatorGroupList{} + operatorGroupList := operatorv1.OperatorGroupList{} err := runTimeClientForSuiteRun.List(context.Background(), &operatorGroupList, client.InNamespace(namespace)) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(len(operatorGroupList.Items) > 1).To(gomega.BeFalse()) if len(operatorGroupList.Items) == 0 { log.Print("Creating OperatorGroup oadp-operator-group") - operatorGroup := v1.OperatorGroup{ + operatorGroup := operatorv1.OperatorGroup{ ObjectMeta: metav1.ObjectMeta{ Name: "oadp-operator-group", Namespace: namespace, }, - Spec: v1.OperatorGroupSpec{ + Spec: operatorv1.OperatorGroupSpec{ TargetNamespaces: []string{namespace}, }, } @@ -73,7 +75,7 @@ var _ = ginkgo.Describe("OADP upgrade scenarios", ginkgo.Ordered, func() { err = runTimeClientForSuiteRun.Create(context.Background(), &subscription) gomega.Expect(err).To(gomega.BeNil()) - // Check that after 5 minutes csv oadp-operator.v1.3.0 has status.phase Succeeded + // Check that after 5 minutes ClusterServiceVersion oadp-operator.v1.4.0 has status.phase Succeeded log.Print("Checking if previous channel CSV has status.phase Succeeded") subscriptionHelper := lib.Subscription{Subscription: &subscription} gomega.Eventually(subscriptionHelper.CsvIsReady(runTimeClientForSuiteRun), time.Minute*5, time.Second*5).Should(gomega.BeTrue()) @@ -85,14 +87,11 @@ var _ = ginkgo.Describe("OADP upgrade scenarios", ginkgo.Ordered, func() { Configuration: &oadpv1alpha1.ApplicationConfig{ Velero: &oadpv1alpha1.VeleroConfig{ LogLevel: "debug", - DefaultPlugins: append(dpaCR.VeleroDefaultPlugins, oadpv1alpha1.DefaultPluginCSI), - FeatureFlags: []string{velerov1.CSIFeatureFlag}, + DefaultPlugins: dpaCR.VeleroDefaultPlugins, }, - NodeAgent: &oadpv1alpha1.NodeAgentConfig{ - UploaderType: "kopia", + Restic: &oadpv1alpha1.ResticConfig{ NodeAgentCommonFields: oadpv1alpha1.NodeAgentCommonFields{ - PodConfig: &oadpv1alpha1.PodConfig{}, - Enable: ptr.To(false), + Enable: ptr.To(true), }, }, }, @@ -118,41 +117,40 @@ var _ = ginkgo.Describe("OADP upgrade scenarios", ginkgo.Ordered, func() { }, }, } - err = dpaCR.CreateOrUpdate(runTimeClientForSuiteRun, dpaSpec) + err = dpaCR.CreateOrUpdate(dpaSpec) gomega.Expect(err).To(gomega.BeNil()) // check that DPA is reconciled true - log.Print("Checking if DPA is reconciled") + log.Print("Checking if DPA is reconciled true") gomega.Eventually(dpaCR.IsReconciledTrue(), time.Minute*3, time.Second*5).Should(gomega.BeTrue()) - // check that velero pod is running - log.Print("Checking if velero pod is running") + // check that Velero Pod is running + log.Print("Checking if Velero Pod is running") gomega.Eventually(lib.VeleroPodIsRunning(kubernetesClientForSuiteRun, namespace), time.Minute*3, time.Second*5).Should(gomega.BeTrue()) + // check that NodeAgent Pods are running + log.Printf("Checking if Node Agent Pods are running") + gomega.Eventually(lib.AreNodeAgentPodsRunning(kubernetesClientForSuiteRun, namespace), time.Minute*3, time.Second*5).Should(gomega.BeTrue()) + // check if BSL is available log.Print("Checking if BSL is available") gomega.Eventually(dpaCR.BSLsAreAvailable(), time.Minute*3, time.Second*5).Should(gomega.BeTrue()) - // Velero api changes: - // check that BSL had checksumAlgorithm not set - bsls, err := dpaCR.ListBSLs() - gomega.Expect(err).To(gomega.BeNil()) - _, ok := bsls.Items[0].Spec.Config["checksumAlgorithm"] - gomega.Expect(ok).To(gomega.BeFalse()) - // check that velero Pod had 3 init containers (aws, openshift, csi) - velero, err := lib.GetVeleroPod(kubernetesClientForSuiteRun, namespace) - gomega.Expect(err).To(gomega.BeNil()) - gomega.Expect(len(velero.Spec.InitContainers)).To(gomega.Equal(3)) + // TODO Velero api changes + + // TODO OADP api changes // TODO backup/restore - // Update spec.channel in subscription to stable-1.4 + // Update spec.channel in Subscription to stable log.Print("Updating Subscription oadp-operator spec.channel") + err = runTimeClientForSuiteRun.Get(context.Background(), types.NamespacedName{Namespace: subscription.Namespace, Name: subscription.Name}, &subscription) + gomega.Expect(err).To(gomega.BeNil()) subscription.Spec.Channel = scenario.next err = runTimeClientForSuiteRun.Update(context.Background(), &subscription) gomega.Expect(err).To(gomega.BeNil()) - // Check that after 8 minutes csv oadp-operator.v1.4.0 has status.phase Installing -> Succeeded + // Check that after 8 minutes ClusterServiceVersion oadp-operator.v99.0.0 has status.phase Installing -> Succeeded log.Print("Waiting for next channel CSV to be created") gomega.Eventually(subscriptionHelper.CsvIsInstalling(runTimeClientForSuiteRun), time.Minute*3, time.Second*5).Should(gomega.BeTrue()) log.Print("Checking if next channel CSV has status.phase Succeeded") @@ -163,43 +161,81 @@ var _ = ginkgo.Describe("OADP upgrade scenarios", ginkgo.Ordered, func() { // check DPA after controller-manager Pod is running gomega.Eventually(lib.ManagerPodIsUp(kubernetesClientForSuiteRun, namespace), time.Minute*8, time.Second*15).Should(gomega.BeTrue()) + // check if updated DPA is reconciled + log.Print("Checking if DPA was reconciled after update") + gomega.Eventually(dpaCR.IsReconciledFalse("Delete restic object from spec.configuration, use spec.configuration.nodeAgent instead"), time.Minute*3, time.Second*5).Should(gomega.BeTrue()) + + log.Print("Updating DPA") + dpaSpec.Configuration.Restic = nil + dpaSpec.Configuration.NodeAgent = &oadpv1alpha1.NodeAgentConfig{ + UploaderType: "restic", + NodeAgentCommonFields: oadpv1alpha1.NodeAgentCommonFields{ + Enable: ptr.To(true), + }, + } + err = dpaCR.CreateOrUpdate(dpaSpec) + gomega.Expect(err).To(gomega.BeNil()) + // check if updated DPA is reconciled log.Print("Checking if DPA was reconciled after update") // TODO do not use Consistently, using because no field in DPA is updated telling when it was last reconciled gomega.Consistently(dpaCR.IsReconciledTrue(), time.Minute*3, time.Second*15).Should(gomega.BeTrue()) - // check if updated velero pod is running - log.Print("Checking if velero pod was recreated after update") + // check if updated Velero Pod is running + log.Print("Checking if Velero Pod was recreated after update") gomega.Eventually(lib.VeleroPodIsUpdated(kubernetesClientForSuiteRun, namespace, timeAfterUpgrade), time.Minute*3, time.Second*5).Should(gomega.BeTrue()) - log.Print("Checking if velero pod is running") + log.Print("Checking if Velero Pod is running") gomega.Eventually(lib.VeleroPodIsRunning(kubernetesClientForSuiteRun, namespace), time.Minute*3, time.Second*5).Should(gomega.BeTrue()) timeAfterVeleroIsRunning := time.Now() + // check if updated NodeAgent Pods are running + log.Print("Checking if Node Agent Pods were recreated after update") + gomega.Eventually(func() (bool, error) { + nodeAgentDaemonSet, err := lib.GetNodeAgentDaemonSet(kubernetesClientForSuiteRun, namespace) + if err != nil { + return false, err + } + + numScheduled := nodeAgentDaemonSet.Status.CurrentNumberScheduled + numDesired := nodeAgentDaemonSet.Status.DesiredNumberScheduled + // check correct number of NodeAgent Pods are initialized + if numScheduled != numDesired { + return false, fmt.Errorf("wrong number of Node Agent Pods") + } + + podList, err := lib.GetAllPodsWithLabel(kubernetesClientForSuiteRun, namespace, "name=node-agent") + if err != nil { + return false, err + } + if err != nil { + return false, err + } + for _, pod := range podList.Items { + if !pod.CreationTimestamp.After(timeAfterUpgrade) { + return false, fmt.Errorf("not all Node Agent Pods were updated") + } + } + return true, nil + }, time.Minute*3, time.Second*5).Should(gomega.BeTrue()) + log.Printf("Checking if Node Agent Pods are running") + gomega.Eventually(lib.AreNodeAgentPodsRunning(kubernetesClientForSuiteRun, namespace), time.Minute*3, time.Second*5).Should(gomega.BeTrue()) + // check if updated BSL is available log.Print("Checking if BSL was reconciled after update") gomega.Eventually(dpaCR.BSLsAreUpdated(timeAfterVeleroIsRunning), time.Minute*3, time.Second*5).Should(gomega.BeTrue()) log.Print("Checking if BSL is available") gomega.Eventually(dpaCR.BSLsAreAvailable(), time.Minute*3, time.Second*5).Should(gomega.BeTrue()) - // No OADP api changes + // TODO Velero api changes - // Velero api changes: - // check that BSL has checksumAlgorithm set to empty - bsls, err = dpaCR.ListBSLs() - gomega.Expect(err).To(gomega.BeNil()) - value, ok := bsls.Items[0].Spec.Config["checksumAlgorithm"] - gomega.Expect(ok).To(gomega.BeTrue()) - gomega.Expect(value).To(gomega.Equal("")) - // check that velero Pod has 2 init containers (aws, openshift) - velero, err = lib.GetVeleroPod(kubernetesClientForSuiteRun, namespace) - gomega.Expect(err).To(gomega.BeNil()) - gomega.Expect(len(velero.Spec.InitContainers)).To(gomega.Equal(2)) - // TODO check that CSI works after code integration + // TODO OADP api changes + + // TODO backup/restore }, - ginkgo.Entry("Upgrade from stable-1.3 (oadp-1.3 branch) to stable-1.4 (oadp-1.4 branch) channel", ginkgo.Label("upgrade", "aws", "ibmcloud"), channelUpgradeCase{ - previous: "stable-1.3", - next: "stable-1.4", + ginkgo.Entry("Upgrade from stable-1.4 (oadp-1.4 branch) to stable (oadp-1.5 branch) channel", ginkgo.Label("upgrade"), channelUpgradeCase{ + previous: "stable-1.4", + next: "stable", // to test production // production: true, }), diff --git a/tests/e2e/virt_backup_restore_suite_test.go b/tests/e2e/virt_backup_restore_suite_test.go index 59a1f55673..8a3698694c 100644 --- a/tests/e2e/virt_backup_restore_suite_test.go +++ b/tests/e2e/virt_backup_restore_suite_test.go @@ -8,8 +8,8 @@ import ( "strings" "time" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/util/wait" "sigs.k8s.io/controller-runtime/pkg/client" @@ -17,6 +17,25 @@ import ( "github.com/openshift/oadp-operator/tests/e2e/lib" ) +// TODO duplication of todoListReady in tests/e2e/backup_restore_suite_test.go +func vmTodoListReady(preBackupState bool, twoVol bool, database string) VerificationFunction { + return VerificationFunction(func(ocClient client.Client, namespace string) error { + log.Printf("checking for the NAMESPACE: %s", namespace) + gomega.Eventually(lib.IsDeploymentReady(ocClient, namespace, database), time.Minute*10, time.Second*10).Should(gomega.BeTrue()) + // in VM tests, DeploymentConfig was refactored to Deployment (to avoid deprecation warnings) + // gomega.Eventually(lib.IsDCReady(ocClient, namespace, "todolist"), time.Minute*10, time.Second*10).Should(gomega.BeTrue()) + gomega.Eventually(lib.IsDeploymentReady(ocClient, namespace, "todolist"), time.Minute*10, time.Second*10).Should(gomega.BeTrue()) + gomega.Eventually(lib.AreApplicationPodsRunning(kubernetesClientForSuiteRun, namespace), time.Minute*9, time.Second*5).Should(gomega.BeTrue()) + // This test confirms that SCC restore logic in our plugin is working + err := lib.DoesSCCExist(ocClient, database+"-persistent-scc") + if err != nil { + return err + } + err = lib.VerifyBackupRestoreData(runTimeClientForSuiteRun, kubernetesClientForSuiteRun, kubeConfig, artifact_dir, namespace, "todolist-route", "todolist", "todolist", preBackupState, twoVol) + return err + }) +} + func getLatestCirrosImageURL() (string, error) { cirrosVersionURL := "https://download.cirros-cloud.net/version/released" @@ -48,7 +67,7 @@ func vmPoweredOff(vmnamespace, vmname string) VerificationFunction { log.Printf("VM status is: %s\n", status) return status == "Stopped" } - Eventually(isOff, time.Minute*10, time.Second*10).Should(BeTrue()) + gomega.Eventually(isOff, time.Minute*10, time.Second*10).Should(gomega.BeTrue()) return nil }) } @@ -60,20 +79,20 @@ type VmBackupRestoreCase struct { PowerState string } -func runVmBackupAndRestore(brCase VmBackupRestoreCase, expectedErr error, updateLastBRcase func(brCase VmBackupRestoreCase), updateLastInstallTime func(), v *lib.VirtOperator) { +func runVmBackupAndRestore(brCase VmBackupRestoreCase, updateLastBRcase func(brCase VmBackupRestoreCase), v *lib.VirtOperator) { updateLastBRcase(brCase) // Create DPA backupName, restoreName := prepareBackupAndRestore(brCase.BackupRestoreCase, func() {}) err := lib.CreateNamespace(v.Clientset, brCase.Namespace) - Expect(err).To(BeNil()) + gomega.Expect(err).To(gomega.BeNil()) err = lib.InstallApplication(v.Client, brCase.Template) if err != nil { fmt.Printf("Failed to install VM template %s: %v", brCase.Template, err) } - Expect(err).To(BeNil()) + gomega.Expect(err).To(gomega.BeNil()) // Wait for VM to start, then give some time for cloud-init to run. // Afterward, run through the standard application verification to make sure @@ -82,7 +101,7 @@ func runVmBackupAndRestore(brCase VmBackupRestoreCase, expectedErr error, update status, err := v.GetVmStatus(brCase.Namespace, brCase.Name) return status == "Running", err }) - Expect(err).ToNot(HaveOccurred()) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) // TODO: find a better way to check for clout-init completion if brCase.InitDelay > 0*time.Second { @@ -95,14 +114,14 @@ func runVmBackupAndRestore(brCase VmBackupRestoreCase, expectedErr error, update if brCase.PowerState == "Stopped" { log.Print("Stopping VM before backup as specified in test case.") err = v.StopVm(brCase.Namespace, brCase.Name) - Expect(err).To(BeNil()) + gomega.Expect(err).To(gomega.BeNil()) } // Run optional custom verification if brCase.PreBackupVerify != nil { log.Printf("Running pre-backup custom function for case %s", brCase.Name) err := brCase.PreBackupVerify(dpaCR.Client, brCase.Namespace) - Expect(err).ToNot(HaveOccurred()) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) } // Back up VM @@ -110,10 +129,10 @@ func runVmBackupAndRestore(brCase VmBackupRestoreCase, expectedErr error, update // Delete everything in test namespace err = v.RemoveVm(brCase.Namespace, brCase.Name, 5*time.Minute) - Expect(err).To(BeNil()) + gomega.Expect(err).To(gomega.BeNil()) err = lib.DeleteNamespace(v.Clientset, brCase.Namespace) - Expect(err).To(BeNil()) - Eventually(lib.IsNamespaceDeleted(kubernetesClientForSuiteRun, brCase.Namespace), time.Minute*5, time.Second*5).Should(BeTrue()) + gomega.Expect(err).To(gomega.BeNil()) + gomega.Eventually(lib.IsNamespaceDeleted(kubernetesClientForSuiteRun, brCase.Namespace), time.Minute*5, time.Second*5).Should(gomega.BeTrue()) // Do restore runRestore(brCase.BackupRestoreCase, backupName, restoreName, nsRequiresResticDCWorkaround) @@ -122,87 +141,111 @@ func runVmBackupAndRestore(brCase VmBackupRestoreCase, expectedErr error, update if brCase.PostRestoreVerify != nil { log.Printf("Running post-restore custom function for VM case %s", brCase.Name) err = brCase.PostRestoreVerify(dpaCR.Client, brCase.Namespace) - Expect(err).ToNot(HaveOccurred()) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) } // avoid finalizers in namespace deletion err = v.RemoveVm(brCase.Namespace, brCase.Name, 5*time.Minute) - Expect(err).To(BeNil()) + gomega.Expect(err).To(gomega.BeNil()) } -var _ = Describe("VM backup and restore tests", Ordered, func() { +var _ = ginkgo.Describe("VM backup and restore tests", ginkgo.Ordered, func() { var v *lib.VirtOperator var err error wasInstalledFromTest := false + + cirrosDownloadedFromTest := false + bootImageNamespace := "openshift-virtualization-os-images" + var lastBRCase VmBackupRestoreCase var lastInstallTime time.Time updateLastBRcase := func(brCase VmBackupRestoreCase) { lastBRCase = brCase } - updateLastInstallTime := func() { - lastInstallTime = time.Now() - } - var _ = BeforeAll(func() { - v, err = lib.GetVirtOperator(runTimeClientForSuiteRun, kubernetesClientForSuiteRun, dynamicClientForSuiteRun) - Expect(err).To(BeNil()) - Expect(v).ToNot(BeNil()) + var _ = ginkgo.BeforeAll(func() { + v, err = lib.GetVirtOperator(runTimeClientForSuiteRun, kubernetesClientForSuiteRun, dynamicClientForSuiteRun, useUpstreamHco) + gomega.Expect(err).To(gomega.BeNil()) + gomega.Expect(v).ToNot(gomega.BeNil()) if !v.IsVirtInstalled() { err = v.EnsureVirtInstallation() - Expect(err).To(BeNil()) + gomega.Expect(err).To(gomega.BeNil()) wasInstalledFromTest = true } - err = v.EnsureEmulation(20 * time.Second) - Expect(err).To(BeNil()) + if kvmEmulation { + err = v.EnsureEmulation(20 * time.Second) + gomega.Expect(err).To(gomega.BeNil()) + } else { + log.Println("Avoiding setting KVM emulation, by command line request") + } url, err := getLatestCirrosImageURL() - Expect(err).To(BeNil()) - err = v.EnsureDataVolumeFromUrl("openshift-virtualization-os-images", "cirros", url, "150Mi", 5*time.Minute) - Expect(err).To(BeNil()) - err = v.CreateDataSourceFromPvc("openshift-virtualization-os-images", "cirros") - Expect(err).To(BeNil()) - + gomega.Expect(err).To(gomega.BeNil()) + err = v.EnsureNamespace(bootImageNamespace, 1*time.Minute) + gomega.Expect(err).To(gomega.BeNil()) + if !v.CheckDataVolumeExists(bootImageNamespace, "cirros") { + err = v.EnsureDataVolumeFromUrl(bootImageNamespace, "cirros", url, "150Mi", 5*time.Minute) + gomega.Expect(err).To(gomega.BeNil()) + err = v.CreateDataSourceFromPvc(bootImageNamespace, "cirros") + gomega.Expect(err).To(gomega.BeNil()) + cirrosDownloadedFromTest = true + } dpaCR.VeleroDefaultPlugins = append(dpaCR.VeleroDefaultPlugins, v1alpha1.DefaultPluginKubeVirt) err = v.CreateImmediateModeStorageClass("test-sc-immediate") - Expect(err).To(BeNil()) + gomega.Expect(err).To(gomega.BeNil()) + err = v.CreateWaitForFirstConsumerStorageClass("test-sc-wffc") + gomega.Expect(err).To(gomega.BeNil()) + err = lib.DeleteBackupRepositories(runTimeClientForSuiteRun, namespace) + gomega.Expect(err).To(gomega.BeNil()) + err = lib.InstallApplication(v.Client, "./sample-applications/virtual-machines/cirros-test/cirros-rbac.yaml") + gomega.Expect(err).To(gomega.BeNil()) + + if v.Upstream { + log.Printf("Creating fedora DataSource in openshift-virtualization-os-images namespace") + pvcNamespace, pvcName, err := v.GetDataSourcePvc("kubevirt-os-images", "fedora") + gomega.Expect(err).To(gomega.BeNil()) + err = v.CreateTargetDataSourceFromPvc(pvcNamespace, "openshift-virtualization-os-images", pvcName, "fedora") + gomega.Expect(err).To(gomega.BeNil()) + } + }) - var _ = AfterAll(func() { + var _ = ginkgo.AfterAll(func() { // DPA just needs to have BSL so gathering of backups/restores logs/describe work // using kopia to collect more info (DaemonSet) waitOADPReadiness(lib.KOPIA) - log.Printf("Running OADP must-gather") - err := lib.RunMustGather(artifact_dir, dpaCR.Client) - Expect(err).ToNot(HaveOccurred()) - err = dpaCR.Delete() - Expect(err).ToNot(HaveOccurred()) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) - v.RemoveDataSource("openshift-virtualization-os-images", "cirros") - v.RemoveDataVolume("openshift-virtualization-os-images", "cirros", 2*time.Minute) + if v != nil && cirrosDownloadedFromTest { + v.RemoveDataSource(bootImageNamespace, "cirros") + v.RemoveDataVolume(bootImageNamespace, "cirros", 2*time.Minute) + } if v != nil && wasInstalledFromTest { v.EnsureVirtRemoval() } err = v.RemoveStorageClass("test-sc-immediate") - Expect(err).To(BeNil()) + gomega.Expect(err).To(gomega.BeNil()) + err = v.RemoveStorageClass("test-sc-wffc") + gomega.Expect(err).To(gomega.BeNil()) }) - var _ = AfterEach(func(ctx SpecContext) { + var _ = ginkgo.AfterEach(func(ctx ginkgo.SpecContext) { tearDownBackupAndRestore(lastBRCase.BackupRestoreCase, lastInstallTime, ctx.SpecReport()) }) - DescribeTable("Backup and restore virtual machines", + ginkgo.DescribeTable("Backup and restore virtual machines", func(brCase VmBackupRestoreCase, expectedError error) { - runVmBackupAndRestore(brCase, expectedError, updateLastBRcase, updateLastInstallTime, v) + runVmBackupAndRestore(brCase, updateLastBRcase, v) }, - Entry("no-application CSI datamover backup and restore, CirrOS VM", Label("virt"), VmBackupRestoreCase{ + ginkgo.Entry("no-application CSI datamover backup and restore, CirrOS VM", ginkgo.Label("virt"), VmBackupRestoreCase{ Template: "./sample-applications/virtual-machines/cirros-test/cirros-test.yaml", InitDelay: 2 * time.Minute, // Just long enough to get to login prompt, VM is marked running while kernel messages are still scrolling by BackupRestoreCase: BackupRestoreCase{ @@ -214,7 +257,7 @@ var _ = Describe("VM backup and restore tests", Ordered, func() { }, }, nil), - Entry("no-application CSI backup and restore, CirrOS VM", Label("virt"), VmBackupRestoreCase{ + ginkgo.Entry("no-application CSI backup and restore, CirrOS VM", ginkgo.Label("virt"), VmBackupRestoreCase{ Template: "./sample-applications/virtual-machines/cirros-test/cirros-test.yaml", InitDelay: 2 * time.Minute, // Just long enough to get to login prompt, VM is marked running while kernel messages are still scrolling by BackupRestoreCase: BackupRestoreCase{ @@ -226,7 +269,7 @@ var _ = Describe("VM backup and restore tests", Ordered, func() { }, }, nil), - Entry("no-application CSI backup and restore, powered-off CirrOS VM", Label("virt"), VmBackupRestoreCase{ + ginkgo.Entry("no-application CSI backup and restore, powered-off CirrOS VM", ginkgo.Label("virt"), VmBackupRestoreCase{ Template: "./sample-applications/virtual-machines/cirros-test/cirros-test.yaml", InitDelay: 2 * time.Minute, PowerState: "Stopped", @@ -240,7 +283,7 @@ var _ = Describe("VM backup and restore tests", Ordered, func() { }, }, nil), - Entry("immediate binding no-application CSI datamover backup and restore, CirrOS VM", Label("virt"), VmBackupRestoreCase{ + ginkgo.Entry("immediate binding no-application CSI datamover backup and restore, CirrOS VM", ginkgo.Label("virt"), VmBackupRestoreCase{ Template: "./sample-applications/virtual-machines/cirros-test/cirros-test-immediate.yaml", InitDelay: 2 * time.Minute, // Just long enough to get to login prompt, VM is marked running while kernel messages are still scrolling by BackupRestoreCase: BackupRestoreCase{ @@ -252,7 +295,7 @@ var _ = Describe("VM backup and restore tests", Ordered, func() { }, }, nil), - Entry("immediate binding no-application CSI backup and restore, CirrOS VM", Label("virt"), VmBackupRestoreCase{ + ginkgo.Entry("immediate binding no-application CSI backup and restore, CirrOS VM", ginkgo.Label("virt"), VmBackupRestoreCase{ Template: "./sample-applications/virtual-machines/cirros-test/cirros-test-immediate.yaml", InitDelay: 2 * time.Minute, // Just long enough to get to login prompt, VM is marked running while kernel messages are still scrolling by BackupRestoreCase: BackupRestoreCase{ @@ -264,7 +307,7 @@ var _ = Describe("VM backup and restore tests", Ordered, func() { }, }, nil), - Entry("immediate binding no-application CSI+datamover backup and restore, powered-off CirrOS VM", Label("virt"), VmBackupRestoreCase{ + ginkgo.Entry("immediate binding no-application CSI+datamover backup and restore, powered-off CirrOS VM", ginkgo.Label("virt"), VmBackupRestoreCase{ Template: "./sample-applications/virtual-machines/cirros-test/cirros-test-immediate.yaml", InitDelay: 2 * time.Minute, PowerState: "Stopped", @@ -278,7 +321,7 @@ var _ = Describe("VM backup and restore tests", Ordered, func() { }, }, nil), - Entry("immediate binding no-application CSI backup and restore, powered-off CirrOS VM", Label("virt"), VmBackupRestoreCase{ + ginkgo.Entry("immediate binding no-application CSI backup and restore, powered-off CirrOS VM", ginkgo.Label("virt"), VmBackupRestoreCase{ Template: "./sample-applications/virtual-machines/cirros-test/cirros-test-immediate.yaml", InitDelay: 2 * time.Minute, PowerState: "Stopped", @@ -292,7 +335,7 @@ var _ = Describe("VM backup and restore tests", Ordered, func() { }, }, nil), - Entry("todolist CSI backup and restore, in a Fedora VM", Label("virt"), VmBackupRestoreCase{ + ginkgo.Entry("todolist CSI backup and restore, in a Fedora VM", ginkgo.Label("virt"), VmBackupRestoreCase{ Template: "./sample-applications/virtual-machines/fedora-todolist/fedora-todolist.yaml", InitDelay: 3 * time.Minute, // For cloud-init BackupRestoreCase: BackupRestoreCase{ @@ -300,8 +343,8 @@ var _ = Describe("VM backup and restore tests", Ordered, func() { Name: "fedora-todolist", SkipVerifyLogs: true, BackupRestoreType: lib.CSI, - PreBackupVerify: todoListReady(true, false, "mysql"), - PostRestoreVerify: todoListReady(false, false, "mysql"), + PreBackupVerify: vmTodoListReady(true, false, "mysql"), + PostRestoreVerify: vmTodoListReady(false, false, "mysql"), BackupTimeout: 45 * time.Minute, }, }, nil),