diff --git a/test/integration_test/common_test.go b/test/integration_test/common_test.go index f0a816d134..f8dac4a6cd 100644 --- a/test/integration_test/common_test.go +++ b/test/integration_test/common_test.go @@ -4,8 +4,10 @@ package integrationtest import ( + "crypto/tls" "flag" "fmt" + "net/http" "os" "path" "sort" @@ -40,6 +42,7 @@ import ( _ "github.com/portworx/torpedo/drivers/volume/generic_csi" _ "github.com/portworx/torpedo/drivers/volume/linstor" _ "github.com/portworx/torpedo/drivers/volume/portworx" + "github.com/portworx/torpedo/pkg/aetosutil" "github.com/portworx/torpedo/pkg/log" testrailutils "github.com/portworx/torpedo/pkg/testrailuttils" "github.com/sirupsen/logrus" @@ -135,6 +138,12 @@ const ( testrailUserNameVar = "TESTRAIL_USERNAME" testrailPasswordVar = "TESTRAIL_PASSWORD" testrailMilestoneVar = "TESTRAIL_MILESTONE" + + testUser = "nouser" + testProduct = "Stork" + testDescription = "" + testBranch = "" + testType = "stork integration test" ) var nodeDriver node.Driver @@ -166,6 +175,8 @@ var testrailSetupSuccessful bool var bidirectionalClusterpair bool var unidirectionalClusterpair bool +var dash *aetosutil.Dashboard + func TestSnapshot(t *testing.T) { t.Run("testSnapshot", testSnapshot) t.Run("testSnapshotRestore", testSnapshotRestore) @@ -321,6 +332,22 @@ func setup() error { return fmt.Errorf("TEST_MODE environment variable not set for stork: %v", err) } SetupTestRail() + dash = aetosutil.Get() + if !isDashboardReachable() { + log.Infof("Aetos Dashboard is not reachable. Disabling dashboard reporting.") + } + + dash.IsEnabled = true + testSet := aetosutil.TestSet{ + User: testUser, + Product: testProduct, + Description: testDescription, + Branch: testBranch, + TestType: testType, + Tags: make(map[string]string), + Status: aetosutil.NOTSTARTED, + } + dash.TestSet = &testSet return nil } @@ -387,6 +414,7 @@ func verifyScheduledNode(t *testing.T, appNode node.Node, volumes []string) { } } require.Equal(t, true, found, "Scheduled node not found in driver node list. DriverNodes: %v ScheduledNode: %v", driverNodes, appNode) + log.InfoD("Scheduled node for app found: %s", appNode.Name) scores := getScoringBasedOnHyperconvergence(t, driverNodes, volumes) @@ -399,6 +427,7 @@ func verifyScheduledNode(t *testing.T, appNode node.Node, volumes []string) { logrus.Infof("Scores: %v", scores) require.Equal(t, highScore, scores[appNode.Name], "Scheduled node does not have the highest score") + log.InfoD("Verified scheduled node for app has highest score: %s", appNode.Name) } // Helper function to get scoring of driverNodes based on hyper-convergence @@ -1891,3 +1920,27 @@ func getSupportedOperatorCRMapping() map[string][]meta_v1.APIResource { return operatorAppToCRMap } + +func isDashboardReachable() bool { + timeout := 15 * time.Second + client := &http.Client{ + Timeout: timeout, + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + }, + } + aboutURL := strings.Replace(aetosutil.DashBoardBaseURL, "dashboard", "datamodel/about", -1) + log.Infof("Checking URL: %s", aboutURL) + response, err := client.Get(aboutURL) + + if err != nil { + log.Warn(err.Error()) + return false + } + if response.StatusCode == 200 { + return true + } + return false +} diff --git a/test/integration_test/extender_test.go b/test/integration_test/extender_test.go index 256eae5ffe..8f56cf1590 100644 --- a/test/integration_test/extender_test.go +++ b/test/integration_test/extender_test.go @@ -13,6 +13,7 @@ import ( "github.com/portworx/sched-ops/k8s/storage" "github.com/portworx/torpedo/drivers/node" "github.com/portworx/torpedo/drivers/scheduler" + "github.com/portworx/torpedo/pkg/log" "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" apps_api "k8s.io/api/apps/v1" @@ -27,6 +28,7 @@ const ( ) func TestExtender(t *testing.T) { + dash.TestSetBegin(dash.TestSet) err := setSourceKubeConfig() require.NoError(t, err, "failed to set kubeconfig to source cluster: %v", err) @@ -47,10 +49,12 @@ func TestExtender(t *testing.T) { } func noPVCTest(t *testing.T) { + dash.TestCaseBegin("Stork scheduler No PVC test", "Stork scheduler test for app with no PVC", "", nil) var testrailID, testResult = 50785, testResultFail runID := testrailSetupForTest(testrailID, &testResult) defer updateTestRail(&testResult, testrailID, runID) + log.InfoD("Deploy app with no PVC") ctxs, err := schedulerDriver.Schedule(generateInstanceID(t, "nopvctest"), scheduler.ScheduleOptions{AppKeys: []string{"mysql-nopvc"}}) require.NoError(t, err, "Error scheduling task") @@ -60,6 +64,7 @@ func noPVCTest(t *testing.T) { require.NoError(t, err, "Error waiting for pod to get to running state") destroyAndWait(t, ctxs) + log.InfoD("Deleted app with no PVC") // If we are here then the test has passed testResult = testResultPass @@ -67,6 +72,8 @@ func noPVCTest(t *testing.T) { } func singlePVCTest(t *testing.T) { + dash.TestCaseBegin("Stork scheduler single PVC test", "Stork scheduler test for app with single PVC", "", nil) + log.InfoD("") var testrailID, testResult = 50786, testResultFail runID := testrailSetupForTest(testrailID, &testResult) defer updateTestRail(&testResult, testrailID, runID) @@ -82,6 +89,7 @@ func singlePVCTest(t *testing.T) { scheduledNodes, err := schedulerDriver.GetNodesForApp(ctxs[0]) require.NoError(t, err, "Error getting node for app") require.Equal(t, 1, len(scheduledNodes), "App should be scheduled on one node") + log.InfoD("App with single PVC scheduled on one node") volumeNames := getVolumeNames(t, ctxs[0]) require.Equal(t, 1, len(volumeNames), "Should only have one volume") @@ -93,9 +101,11 @@ func singlePVCTest(t *testing.T) { // If we are here then the test has passed testResult = testResultPass logrus.Infof("Test status at end of %s test: %s", t.Name(), testResult) + log.InfoD("Deleted app with single PVC") } func statefulsetTest(t *testing.T) { + dash.TestCaseBegin("Stateful set extender test", "Stork scheduler test with stateful set application", "", nil) var testrailID, testResult = 50787, testResultFail runID := testrailSetupForTest(testrailID, &testResult) defer updateTestRail(&testResult, testrailID, runID) @@ -133,6 +143,7 @@ func statefulsetTest(t *testing.T) { } func multiplePVCTest(t *testing.T) { + dash.TestCaseBegin("Multiple PVC test", "Stork scheduler test with app using multiple PVCS", "", nil) var testrailID, testResult = 50788, testResultFail runID := testrailSetupForTest(testrailID, &testResult) defer updateTestRail(&testResult, testrailID, runID) @@ -161,6 +172,7 @@ func multiplePVCTest(t *testing.T) { } func driverNodeErrorTest(t *testing.T) { + dash.TestCaseBegin("Driver node error", "Induce error on driver node by stopping PX on the node", "", nil) var testrailID, testResult = 50789, testResultFail runID := testrailSetupForTest(testrailID, &testResult) defer updateTestRail(&testResult, testrailID, runID) @@ -184,6 +196,7 @@ func driverNodeErrorTest(t *testing.T) { time.Sleep(1 * time.Minute) + log.InfoD("Stopping volume driver on node: %s", scheduledNodes[0]) err = volumeDriver.StopDriver(scheduledNodes, false, nil) require.NoError(t, err, "Error stopping driver on scheduled Node %+v", scheduledNodes[0]) stoppedNode := scheduledNodes[0] @@ -204,11 +217,13 @@ func driverNodeErrorTest(t *testing.T) { verifyScheduledNode(t, scheduledNodes[0], volumeNames) + log.InfoD("Starting volume driver on node: %s", stoppedNode) err = volumeDriver.StartDriver(stoppedNode) require.NoError(t, err, "Error starting driver on Node %+v", scheduledNodes[0]) err = volumeDriver.WaitDriverUpOnNode(stoppedNode, defaultWaitTimeout) require.NoError(t, err, "Error waiting for Node to start %+v", scheduledNodes[0]) + log.InfoD("Verified volume driver is up on node: %s", stoppedNode) destroyAndWait(t, ctxs) @@ -218,10 +233,12 @@ func driverNodeErrorTest(t *testing.T) { } func poolMaintenanceTest(t *testing.T) { + dash.TestCaseBegin("Pool Maintenance", "Stork scheduling test with pool in maintenance mode", "", nil) var testrailID, testResult = 86080, testResultFail runID := testrailSetupForTest(testrailID, &testResult) defer updateTestRail(&testResult, testrailID, runID) + log.InfoD("Deploy App") ctxs, err := schedulerDriver.Schedule(generateInstanceID(t, "pool-test"), scheduler.ScheduleOptions{AppKeys: []string{"mysql-1-pvc"}}) require.NoError(t, err, "Error scheduling task") @@ -239,6 +256,7 @@ func poolMaintenanceTest(t *testing.T) { verifyScheduledNode(t, scheduledNodes[0], volumeNames) + log.InfoD("Enter pool in maintenance mode on node: %s", scheduledNodes[0]) err = volumeDriver.EnterPoolMaintenance(scheduledNodes[0]) require.NoError(t, err, "Error entering pool maintenance mode on scheduled node %+v", scheduledNodes[0]) poolMaintenanceNode := scheduledNodes[0] @@ -274,10 +292,13 @@ func poolMaintenanceTest(t *testing.T) { } func pvcOwnershipTest(t *testing.T) { + dash.TestCaseBegin("PVC ownership", "Validating PVC ownership", "", nil) + defer dash.TestCaseEnd() var testrailID, testResult = 50781, testResultFail runID := testrailSetupForTest(testrailID, &testResult) defer updateTestRail(&testResult, testrailID, runID) + log.InfoD("Schedule mysql app") ctxs, err := schedulerDriver.Schedule(generateInstanceID(t, "ownershiptest"), scheduler.ScheduleOptions{AppKeys: []string{"mysql-repl-1"}}) require.NoError(t, err, "Error scheduling task") @@ -296,6 +317,7 @@ func pvcOwnershipTest(t *testing.T) { verifyScheduledNode(t, scheduledNodes[0], volumeNames) for _, spec := range ctxs[0].App.SpecList { + log.InfoD("Delete storage class.") if obj, ok := spec.(*storage_api.StorageClass); ok { err := storage.Instance().DeleteStorageClass(obj.Name) require.NoError(t, err, "Error deleting storage class for mysql.") @@ -303,12 +325,14 @@ func pvcOwnershipTest(t *testing.T) { if obj, ok := spec.(*v1.PersistentVolumeClaim); ok { updatePVC, err := core.Instance().GetPersistentVolumeClaim(obj.Name, obj.Namespace) require.NoError(t, err, "Error getting persistent volume claim.") + log.InfoD("Delete storage class annotation on PVC: %s", updatePVC.Name) delete(updatePVC.Annotations, annotationStorageProvisioner) _, err = core.Instance().UpdatePersistentVolumeClaim(updatePVC) require.NoError(t, err, "Error updating annotations in PVC.") } } + log.InfoD("Stop volume driver on scheduled node: %s", scheduledNodes[0].Name) err = volumeDriver.StopDriver(scheduledNodes, false, nil) require.NoError(t, err, "Error stopping driver on scheduled Node %+v", scheduledNodes[0]) // make sure to start driver if test failed @@ -332,6 +356,7 @@ func pvcOwnershipTest(t *testing.T) { for _, pod := range depPods { for _, cond := range pod.Status.Conditions { if cond.Type == v1.PodScheduled && cond.Status == v1.ConditionFalse { + log.InfoD("Unscheduled pod found: %s", pod.Name) errUnscheduledPod = true } } @@ -355,10 +380,12 @@ func pvcOwnershipTest(t *testing.T) { } func antihyperconvergenceTest(t *testing.T) { + dash.TestCaseBegin("Stork scheduler antihyperconvergence test", "validate antihyperconvergence for app with shared V4 SVC volume", "", nil) var testrailID, testResult = 85859, testResultFail runID := testrailSetupForTest(testrailID, &testResult) defer updateTestRail(&testResult, testrailID, runID) + log.InfoD("Schedule app") ctxs, err := schedulerDriver.Schedule("antihyperconvergencetest", scheduler.ScheduleOptions{ AppKeys: []string{"test-sv4-svc-repl1"}, @@ -388,6 +415,7 @@ func antihyperconvergenceTest(t *testing.T) { } func antihyperconvergenceTestPreferRemoteOnlyTest(t *testing.T) { + log.InfoD("Verify anti-hyperconvergence with prefer remote node only option") var testrailID, testResult = 85860, testResultFail runID := testrailSetupForTest(testrailID, &testResult) defer updateTestRail(&testResult, testrailID, runID) @@ -398,6 +426,7 @@ func antihyperconvergenceTestPreferRemoteOnlyTest(t *testing.T) { }) require.NoError(t, err, "Error scheduling task") require.Equal(t, 1, len(ctxs), "Only one task should have started") + log.InfoD("App deployed") logrus.Infof("Waiting for all Pods to come online") err = schedulerDriver.WaitForRunning(ctxs[0], defaultWaitTimeout, defaultWaitInterval) @@ -451,6 +480,7 @@ func antihyperconvergenceTestPreferRemoteOnlyTest(t *testing.T) { } func preferRemoteNodeFalseHyperconvergenceTest(t *testing.T) { + dash.TestCaseBegin("Stork scheduler prefer remote node antihyperconvergence test", "validate antihyperconvergence with preferRemoteNodeOnly flag", "", nil) var testrailID, testResult = 92964, testResultFail runID := testrailSetupForTest(testrailID, &testResult) defer updateTestRail(&testResult, testrailID, runID) @@ -509,9 +539,11 @@ func verifyAntihyperconvergence(t *testing.T, appNodes []node.Node, volumes []st for _, appNode := range appNodes { require.Equal(t, highScore, scores[appNode.Name], "Scheduled node does not have the highest score") } + log.InfoD("Verified scheduled node has the highest score") } func equalPodSpreadTest(t *testing.T) { + dash.TestCaseBegin("Stork scheduler equal pod spread test", "Verify equal pod spread is achieved using stork for an app", "", nil) var testrailID, testResult = 84664, testResultFail runID := testrailSetupForTest(testrailID, &testResult) defer updateTestRail(&testResult, testrailID, runID) @@ -544,6 +576,7 @@ func equalPodSpreadTest(t *testing.T) { require.Equal(t, 3, len(scheduledNodesMap), "App should be scheduled on 3 nodes, pod spread not achieved.") logrus.Infof("Verifying that volume replicase are spread equally across worker nodes") + log.InfoD("Pod spread verified") logrus.Info("Deleting apps created by the test") destroyAndWait(t, ctxs) diff --git a/test/integration_test/migration_test.go b/test/integration_test/migration_test.go index e6ab2ef41e..7fb96ade08 100644 --- a/test/integration_test/migration_test.go +++ b/test/integration_test/migration_test.go @@ -16,6 +16,7 @@ import ( "github.com/portworx/sched-ops/task" "github.com/portworx/torpedo/drivers/scheduler" "github.com/portworx/torpedo/drivers/scheduler/spec" + "github.com/portworx/torpedo/pkg/log" "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" apps_api "k8s.io/api/apps/v1" @@ -134,6 +135,7 @@ func triggerMigration( projectIDMappings string, namespaceLabels map[string]string, ) ([]*scheduler.Context, *scheduler.Context) { + log.InfoD("Schedule mysql app") ctxs, err := schedulerDriver.Schedule(instanceID, scheduler.ScheduleOptions{ AppKeys: []string{appKey}, @@ -144,6 +146,7 @@ func triggerMigration( err = schedulerDriver.WaitForRunning(ctxs[0], defaultWaitTimeout, defaultWaitInterval) require.NoError(t, err, "Error waiting for app to get to running state") + log.InfoD("Mysql app validated") preMigrationCtx := ctxs[0].DeepCopy() @@ -159,9 +162,11 @@ func triggerMigration( preMigrationCtx = ctxs[0].DeepCopy() } + log.InfoD("Schedule cluster pair") // Schedule bidirectional or regular cluster pair based on the flag scheduleClusterPairGeneric(t, ctxs, appKey, instanceID, defaultClusterPairDir, projectIDMappings, skipStoragePair, true, pairReverse) + log.InfoD("Start migration") // apply migration specs err = schedulerDriver.AddTasks(ctxs[0], scheduler.ScheduleOptions{AppKeys: migrationAppKeys}) @@ -257,6 +262,7 @@ func validateAndDestroyMigration( } else { require.Error(t, err, "Expected migration to fail") } + log.InfoD("Migration validated") // destroy app on cluster 1 if !skipAppDeletion { @@ -269,6 +275,7 @@ func validateAndDestroyMigration( } func deploymentMigrationTest(t *testing.T) { + dash.TestCaseBegin("Deployment migration test", "Validating Async DR with Deployment App", "", nil) var testrailID, testResult = 50803, testResultFail runID := testrailSetupForTest(testrailID, &testResult) defer updateTestRail(&testResult, testrailID, runID) @@ -293,6 +300,7 @@ func deploymentMigrationTest(t *testing.T) { } func deploymentMigrationReverseTest(t *testing.T) { + dash.TestCaseBegin("Deployment migration reverse test", "Validating Async DR and reverse Async DR from destination cluster", "", nil) var testrailID, testResult = 54210, testResultFail runID := testrailSetupForTest(testrailID, &testResult) defer updateTestRail(&testResult, testrailID, runID) @@ -2189,6 +2197,7 @@ func scheduleClusterPairGeneric(t *testing.T, ctxs []*scheduler.Context, skipStoragePair, resetConfig, pairReverse bool) { var err error if bidirectionalClusterpair { + log.InfoD("Scheduling bidirectional cluster pair") clusterPairNamespace := fmt.Sprintf("%s-%s", appKey, instanceID) logrus.Info("Bidirectional flag is set, will create bidirectional cluster pair:") logrus.Infof("Name: %s", remotePairName) @@ -2201,6 +2210,7 @@ func scheduleClusterPairGeneric(t *testing.T, ctxs []*scheduler.Context, require.NoError(t, err, "failed to set kubeconfig to source cluster: %v", err) } else if unidirectionalClusterpair { + log.InfoD("Scheduling unidirectional cluster pair") clusterPairNamespace := fmt.Sprintf("%s-%s", appKey, instanceID) logrus.Info("Unidirectional flag is set, will create unidirectional cluster pair:") logrus.Infof("Name: %s", remotePairName) @@ -2210,6 +2220,7 @@ func scheduleClusterPairGeneric(t *testing.T, ctxs []*scheduler.Context, err = scheduleUnidirectionalClusterPair(remotePairName, clusterPairNamespace, projectIDMappings, defaultBackupLocation, defaultSecretName, true, pairReverse) require.NoError(t, err, "failed to set unidirectional cluster pair: %v", err) } else { + log.InfoD("Scheduling cluster pair using storkctl generate") // create, apply and validate cluster pair specs err = scheduleClusterPair(ctxs[0], skipStoragePair, true, defaultClusterPairDir, projectIDMappings, pairReverse) require.NoError(t, err, "Error scheduling cluster pair")