diff --git a/tests/client.go b/tests/client.go index 52724d803ac3..2db6c36f0499 100644 --- a/tests/client.go +++ b/tests/client.go @@ -3,6 +3,8 @@ package tests import ( "context" "fmt" + "os" + "os/exec" "strings" corev1 "k8s.io/api/core/v1" @@ -15,6 +17,21 @@ import ( // This file consolidates functions that are used across multiple testing frameworks. // Most of it relates to interacting with the Kubernetes API and checking the status of resources. +func RunCommand(cmd string) (string, error) { + c := exec.Command("bash", "-c", cmd) + if kc, ok := os.LookupEnv("E2E_KUBECONFIG"); ok { + c.Env = append(os.Environ(), "KUBECONFIG="+kc) + } + if kc, ok := os.LookupEnv("DOCKER_KUBECONFIG"); ok { + c.Env = append(os.Environ(), "KUBECONFIG="+kc) + } + out, err := c.CombinedOutput() + if err != nil { + return string(out), fmt.Errorf("failed to run command: %s, %v", cmd, err) + } + return string(out), err +} + // CheckDefaultDeployments checks if the standard array of K3s deployments are ready, otherwise returns an error func CheckDefaultDeployments(kubeconfigFile string) error { return CheckDeployments(kubeconfigFile, "kube-system", "coredns", "local-path-provisioner", "metrics-server", "traefik") diff --git a/tests/docker/basics/basics_test.go b/tests/docker/basics/basics_test.go index 4f7f29b7401e..ce86492fc4d6 100644 --- a/tests/docker/basics/basics_test.go +++ b/tests/docker/basics/basics_test.go @@ -9,14 +9,13 @@ import ( "github.com/k3s-io/k3s/tests" "github.com/k3s-io/k3s/tests/docker" - tester "github.com/k3s-io/k3s/tests/docker" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) var k3sImage = flag.String("k3sImage", "", "The image used to provision containers") var ci = flag.Bool("ci", false, "running on CI, forced cleanup") -var config *tester.TestConfig +var config *docker.TestConfig func Test_DockerBasic(t *testing.T) { flag.Parse() @@ -29,7 +28,7 @@ var _ = Describe("Basic Tests", Ordered, func() { Context("Setup Cluster", func() { It("should provision servers and agents", func() { var err error - config, err = tester.NewTestConfig(*k3sImage) + config, err = docker.NewTestConfig(*k3sImage) Expect(err).NotTo(HaveOccurred()) Expect(config.ProvisionServers(1)).To(Succeed()) Expect(config.ProvisionAgents(1)).To(Succeed()) @@ -57,9 +56,9 @@ var _ = Describe("Basic Tests", Ordered, func() { Context("Verify Binaries and Images", func() { It("has valid bundled binaries", func() { for _, server := range config.Servers { - Expect(tester.VerifyValidVersion(server, "kubectl")).To(Succeed()) - Expect(tester.VerifyValidVersion(server, "ctr")).To(Succeed()) - Expect(tester.VerifyValidVersion(server, "crictl")).To(Succeed()) + Expect(docker.VerifyValidVersion(server, "kubectl")).To(Succeed()) + Expect(docker.VerifyValidVersion(server, "ctr")).To(Succeed()) + Expect(docker.VerifyValidVersion(server, "crictl")).To(Succeed()) } }) It("has valid airgap images", func() { @@ -87,7 +86,7 @@ var _ = AfterSuite(func() { }) // VerifyAirgapImages checks for changes in the airgap image list -func VerifyAirgapImages(config *tester.TestConfig) error { +func VerifyAirgapImages(config *docker.TestConfig) error { // This file is generated during the build packaging step const airgapImageList = "../../../scripts/airgap/image-list.txt" @@ -97,7 +96,7 @@ func VerifyAirgapImages(config *tester.TestConfig) error { // Collect all images from nodes for _, node := range config.GetNodeNames() { cmd := fmt.Sprintf("docker exec %s crictl images -o json | jq -r '.images[].repoTags[0] | select(. != null)'", node) - output, err := tester.RunCommand(cmd) + output, err := tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred(), "failed to execute crictl and jq: %v", err) for _, line := range strings.Split(strings.TrimSpace(string(output)), "\n") { diff --git a/tests/docker/bootstraptoken/bootstraptoken_test.go b/tests/docker/bootstraptoken/bootstraptoken_test.go index 1ce0f00c0503..9d226e6357f3 100644 --- a/tests/docker/bootstraptoken/bootstraptoken_test.go +++ b/tests/docker/bootstraptoken/bootstraptoken_test.go @@ -7,14 +7,13 @@ import ( "github.com/k3s-io/k3s/tests" "github.com/k3s-io/k3s/tests/docker" - tester "github.com/k3s-io/k3s/tests/docker" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) var k3sImage = flag.String("k3sImage", "", "The k3s image used to provision containers") var ci = flag.Bool("ci", false, "running on CI, forced cleanup") -var config *tester.TestConfig +var config *docker.TestConfig func Test_DockerBootstrapToken(t *testing.T) { flag.Parse() @@ -27,7 +26,7 @@ var _ = Describe("Boostrap Token Tests", Ordered, func() { Context("Setup Cluster", func() { It("should provision servers", func() { var err error - config, err = tester.NewTestConfig(*k3sImage) + config, err = docker.NewTestConfig(*k3sImage) Expect(err).NotTo(HaveOccurred()) Expect(config.ProvisionServers(1)).To(Succeed()) Eventually(func() error { diff --git a/tests/docker/cacerts/cacerts_test.go b/tests/docker/cacerts/cacerts_test.go index 0186b7a91d45..3fc76d1f6fff 100644 --- a/tests/docker/cacerts/cacerts_test.go +++ b/tests/docker/cacerts/cacerts_test.go @@ -10,14 +10,13 @@ import ( "github.com/k3s-io/k3s/tests" "github.com/k3s-io/k3s/tests/docker" - tester "github.com/k3s-io/k3s/tests/docker" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) var k3sImage = flag.String("k3sImage", "", "The k3s image used to provision containers") var ci = flag.Bool("ci", false, "running on CI, forced cleanup") -var config *tester.TestConfig +var config *docker.TestConfig var testID string func Test_DockerCACerts(t *testing.T) { @@ -36,7 +35,7 @@ var _ = Describe("CA Certs Tests", Ordered, func() { // share it with the other containers that need the file. It("should configure CA certs", func() { var err error - config, err = tester.NewTestConfig(*k3sImage) + config, err = docker.NewTestConfig(*k3sImage) Expect(err).NotTo(HaveOccurred()) Expect(os.MkdirAll(filepath.Join(config.TestDir, "pause"), 0755)).To(Succeed()) @@ -45,16 +44,16 @@ var _ = Describe("CA Certs Tests", Ordered, func() { tlsMount := fmt.Sprintf("--mount type=volume,src=%s,dst=/var/lib/rancher/k3s/server/tls", pauseName) cmd := fmt.Sprintf("docker run -d --name %s --hostname %s %s rancher/mirrored-pause:3.6", pauseName, pauseName, tlsMount) - _, err = tester.RunCommand(cmd) + _, err = tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred()) dataDir := filepath.Join(config.TestDir, "pause/k3s") cmd = fmt.Sprintf("DATA_DIR=%s ../../../contrib/util/generate-custom-ca-certs.sh", dataDir) - _, err = tester.RunCommand(cmd) + _, err = tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred()) cmd = fmt.Sprintf("docker cp %s %s:/var/lib/rancher", dataDir, pauseName) - _, err = tester.RunCommand(cmd) + _, err = tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred()) // Set SERVER_ARGS to include the custom CA certs @@ -76,7 +75,7 @@ var _ = Describe("CA Certs Tests", Ordered, func() { // Example: Check if the custom CA certs are present in the server container for _, server := range config.Servers { cmd := fmt.Sprintf("docker exec %s ls /var/lib/rancher/k3s/server/tls", server.Name) - output, err := tester.RunCommand(cmd) + output, err := tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred(), "failed to list custom CA certs: %v", err) Expect(output).To(ContainSubstring("ca.crt")) } @@ -98,13 +97,13 @@ var _ = AfterSuite(func() { if config != nil && !failed { config.Cleanup() cmd := fmt.Sprintf("docker stop k3s-pause-%s", testID) - _, err := tester.RunCommand(cmd) + _, err := tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred()) cmd = fmt.Sprintf("docker rm -v k3s-pause-%s", testID) - _, err = tester.RunCommand(cmd) + _, err = tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred()) cmd = fmt.Sprintf("docker volume rm k3s-pause-%s", testID) - _, err = tester.RunCommand(cmd) + _, err = tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred()) } diff --git a/tests/docker/conformance/conformance_test.go b/tests/docker/conformance/conformance_test.go index 8f9d7e0b5b6c..689feaefb4b8 100644 --- a/tests/docker/conformance/conformance_test.go +++ b/tests/docker/conformance/conformance_test.go @@ -13,7 +13,6 @@ import ( "github.com/k3s-io/k3s/tests" "github.com/k3s-io/k3s/tests/docker" - tester "github.com/k3s-io/k3s/tests/docker" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -22,7 +21,7 @@ var k3sImage = flag.String("k3sImage", "", "The k3s image used to provision cont var db = flag.String("db", "", "The database to use for the tests (sqlite, etcd, mysql, postgres)") var serial = flag.Bool("serial", false, "Run the Serial Conformance Tests") var ci = flag.Bool("ci", false, "running on CI, forced cleanup") -var config *tester.TestConfig +var config *docker.TestConfig func Test_DockerConformance(t *testing.T) { flag.Parse() @@ -35,7 +34,7 @@ var _ = Describe("Conformance Tests", Ordered, func() { Context("Setup Cluster", func() { It("should provision servers and agents", func() { var err error - config, err = tester.NewTestConfig(*k3sImage) + config, err = docker.NewTestConfig(*k3sImage) Expect(err).NotTo(HaveOccurred()) config.DBType = *db Expect(config.ProvisionServers(1)).To(Succeed()) @@ -58,7 +57,7 @@ var _ = Describe("Conformance Tests", Ordered, func() { hydrophoneURL := fmt.Sprintf("https://github.com/kubernetes-sigs/hydrophone/releases/download/%s/hydrophone_Linux_%s.tar.gz", hydrophoneVersion, hydrophoneArch) cmd := fmt.Sprintf("curl -L %s | tar -xzf - -C %s", hydrophoneURL, config.TestDir) - _, err := tester.RunCommand(cmd) + _, err := tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred()) Expect(os.Chmod(filepath.Join(config.TestDir, "hydrophone"), 0755)).To(Succeed()) }) @@ -84,7 +83,7 @@ var _ = Describe("Conformance Tests", Ordered, func() { if hc.ProcessState != nil { break } - res, _ := tester.RunCommand(cmd) + res, _ := tests.RunCommand(cmd) res = strings.TrimSpace(res) fmt.Printf("Status Report %d: %s tests complete\n", i, res) } @@ -109,7 +108,7 @@ var _ = Describe("Conformance Tests", Ordered, func() { break } time.Sleep(120 * time.Second) - res, _ := tester.RunCommand(cmd) + res, _ := tests.RunCommand(cmd) res = strings.TrimSpace(res) fmt.Printf("Status Report %d: %s tests complete\n", i, res) } diff --git a/tests/docker/dualstack/dualstack_test.go b/tests/docker/dualstack/dualstack_test.go index 35de6d7a3267..afdd98c4be90 100644 --- a/tests/docker/dualstack/dualstack_test.go +++ b/tests/docker/dualstack/dualstack_test.go @@ -80,7 +80,7 @@ var _ = DescribeTableSubtree("DualStack Tests", Ordered, func(ipConfig string) { Expect(err).NotTo(HaveOccurred()) Eventually(func() (string, error) { cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + tc.KubeconfigFile - return docker.RunCommand(cmd) + return tests.RunCommand(cmd) }, "120s", "5s").Should(ContainSubstring("ds-clusterip-pod")) // Checks both IPv4 and IPv6 @@ -100,7 +100,7 @@ var _ = DescribeTableSubtree("DualStack Tests", Ordered, func(ipConfig string) { _, err := tc.DeployWorkload("dualstack_ingress.yaml") Expect(err).NotTo(HaveOccurred(), "Ingress manifest not deployed") cmd := "kubectl get ingress ds-ingress --kubeconfig=" + tc.KubeconfigFile + " -o jsonpath=\"{.spec.rules[*].host}\"" - hostName, err := docker.RunCommand(cmd) + hostName, err := tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd) for _, node := range append(tc.Servers, tc.Agents...) { ips, err := tests.GetNodeIPs(node.Name, tc.KubeconfigFile) @@ -112,7 +112,7 @@ var _ = DescribeTableSubtree("DualStack Tests", Ordered, func(ipConfig string) { } cmd := fmt.Sprintf("curl --header host:%s http://%s/name.html", hostName, ip) Eventually(func() (string, error) { - return docker.RunCommand(cmd) + return tests.RunCommand(cmd) }, "10s", "2s").Should(ContainSubstring("ds-clusterip-pod"), "failed cmd: "+cmd) } } @@ -122,7 +122,7 @@ var _ = DescribeTableSubtree("DualStack Tests", Ordered, func(ipConfig string) { _, err := tc.DeployWorkload("dualstack_nodeport.yaml") Expect(err).NotTo(HaveOccurred()) cmd := "kubectl get service ds-nodeport-svc --kubeconfig=" + tc.KubeconfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\"" - nodeport, err := docker.RunCommand(cmd) + nodeport, err := tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd) for _, node := range append(tc.Servers, tc.Agents...) { ips, err := tests.GetNodeIPs(node.Name, tc.KubeconfigFile) @@ -134,7 +134,7 @@ var _ = DescribeTableSubtree("DualStack Tests", Ordered, func(ipConfig string) { } cmd = "curl -L --insecure http://" + ip + ":" + nodeport + "/name.html" Eventually(func() (string, error) { - return docker.RunCommand(cmd) + return tests.RunCommand(cmd) }, "10s", "1s").Should(ContainSubstring("ds-nodeport-pod"), "failed cmd: "+cmd) } } diff --git a/tests/docker/etcd/etcd_test.go b/tests/docker/etcd/etcd_test.go index e92e2e3d0dec..15be9970c307 100644 --- a/tests/docker/etcd/etcd_test.go +++ b/tests/docker/etcd/etcd_test.go @@ -7,14 +7,13 @@ import ( "github.com/k3s-io/k3s/tests" "github.com/k3s-io/k3s/tests/docker" - tester "github.com/k3s-io/k3s/tests/docker" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) var k3sImage = flag.String("k3sImage", "", "The k3s image used to provision containers") var ci = flag.Bool("ci", false, "running on CI, forced cleanup") -var config *tester.TestConfig +var config *docker.TestConfig func Test_DockerEtcd(t *testing.T) { flag.Parse() @@ -27,7 +26,7 @@ var _ = Describe("Etcd Tests", Ordered, func() { Context("Test a 3 server cluster", func() { It("should setup the cluster configuration", func() { var err error - config, err = tester.NewTestConfig(*k3sImage) + config, err = docker.NewTestConfig(*k3sImage) Expect(err).NotTo(HaveOccurred()) }) It("should provision servers", func() { @@ -47,7 +46,7 @@ var _ = Describe("Etcd Tests", Ordered, func() { Context("Test a Split Role cluster with 3 etcd, 2 control-plane, 1 agents", func() { It("should setup the cluster configuration", func() { var err error - config, err = tester.NewTestConfig(*k3sImage) + config, err = docker.NewTestConfig(*k3sImage) Expect(err).NotTo(HaveOccurred()) Expect(os.Setenv("SERVER_0_ARGS", "--disable-apiserver --disable-controller-manager --disable-scheduler --cluster-init")).To(Succeed()) Expect(os.Setenv("SERVER_1_ARGS", "--disable-apiserver --disable-controller-manager --disable-scheduler")).To(Succeed()) diff --git a/tests/docker/hardened/hardened_test.go b/tests/docker/hardened/hardened_test.go index 57bd51524325..bab412c692ed 100644 --- a/tests/docker/hardened/hardened_test.go +++ b/tests/docker/hardened/hardened_test.go @@ -55,7 +55,7 @@ kubelet-arg: for _, server := range config.Servers { cmd := "docker cp ./cluster-level-pss.yaml " + server.Name + ":/tmp/cluster-level-pss.yaml" - Expect(docker.RunCommand(cmd)).Error().NotTo(HaveOccurred()) + Expect(tests.RunCommand(cmd)).Error().NotTo(HaveOccurred()) cmd = "mkdir -p /var/lib/rancher/k3s/server/logs" Expect(server.RunCmdOnNode(cmd)).Error().NotTo(HaveOccurred()) @@ -81,8 +81,8 @@ kubelet-arg: _, err := config.DeployWorkload("hardened-ingress.yaml") Expect(err).NotTo(HaveOccurred()) Eventually(func() (string, error) { - cmd := "kubectl get daemonset -n default example -o jsonpath='{.status.numberReady}' --kubeconfig=" + config.KubeconfigFile - return docker.RunCommand(cmd) + cmd := "kubectl get daemonset -n default example -o jsonpath='{.status.numberReady}'" + return tests.RunCommand(cmd) }, "60s", "5s").Should(Equal("2")) _, err = config.DeployWorkload("hardened-netpool.yaml") Expect(err).NotTo(HaveOccurred()) @@ -92,23 +92,23 @@ kubelet-arg: Eventually(func(g Gomega) { for _, server := range config.Servers { cmd := fmt.Sprintf("curl -vksf -H 'Host: example.com' %s://%s/", scheme, server.IP) - g.Expect(docker.RunCommand(cmd)).Error().NotTo(HaveOccurred()) + g.Expect(tests.RunCommand(cmd)).Error().NotTo(HaveOccurred()) } for _, agent := range config.Agents { cmd := fmt.Sprintf("curl -vksf -H 'Host: example.com' %s://%s/", scheme, agent.IP) - g.Expect(docker.RunCommand(cmd)).Error().NotTo(HaveOccurred()) + g.Expect(tests.RunCommand(cmd)).Error().NotTo(HaveOccurred()) } }, "30s", "10s").Should(Succeed()) } }) It("confirms we can make a request through the nodeport service", func() { for _, server := range config.Servers { - cmd := "kubectl get service/example -o 'jsonpath={.spec.ports[*].nodePort}' --kubeconfig=" + config.KubeconfigFile - ports, err := docker.RunCommand(cmd) + cmd := "kubectl get service/example -o 'jsonpath={.spec.ports[*].nodePort}'" + ports, err := tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred()) for _, port := range strings.Split(ports, " ") { cmd := fmt.Sprintf("curl -vksf -H 'Host: example.com' http://%s:%s", server.IP, port) - Expect(docker.RunCommand(cmd)).Error().NotTo(HaveOccurred()) + Expect(tests.RunCommand(cmd)).Error().NotTo(HaveOccurred()) } } }) diff --git a/tests/docker/lazypull/lazypull_test.go b/tests/docker/lazypull/lazypull_test.go index f11c29371a66..01c450c86aec 100644 --- a/tests/docker/lazypull/lazypull_test.go +++ b/tests/docker/lazypull/lazypull_test.go @@ -8,14 +8,13 @@ import ( "github.com/k3s-io/k3s/tests" "github.com/k3s-io/k3s/tests/docker" - tester "github.com/k3s-io/k3s/tests/docker" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) var k3sImage = flag.String("k3sImage", "", "The k3s image used to provision containers") var ci = flag.Bool("ci", false, "running on CI, forced cleanup") -var config *tester.TestConfig +var config *docker.TestConfig func Test_DockerLazyPull(t *testing.T) { flag.Parse() @@ -28,7 +27,7 @@ var _ = Describe("LazyPull Tests", Ordered, func() { Context("Setup Cluster", func() { It("should provision servers", func() { var err error - config, err = tester.NewTestConfig(*k3sImage) + config, err = docker.NewTestConfig(*k3sImage) Expect(err).NotTo(HaveOccurred()) config.ServerYaml = "snapshotter: stargz" Expect(config.ProvisionServers(1)).To(Succeed()) @@ -90,7 +89,7 @@ func lookLayers(node, layer string) error { for layersNum = 0; layersNum < 100; layersNum++ { // We use RunCommand instead of RunCmdOnNode because we pipe the output to jq cmd := fmt.Sprintf("docker exec -i %s ctr --namespace=k8s.io snapshot --snapshotter=stargz info %s | jq -r '.Parent'", node, layer) - layer, err = tester.RunCommand(cmd) + layer, err = tests.RunCommand(cmd) if err != nil { return fmt.Errorf("failed to get parent layer: %v", err) } @@ -100,7 +99,7 @@ func lookLayers(node, layer string) error { break } cmd = fmt.Sprintf("docker exec -i %s ctr --namespace=k8s.io snapshots --snapshotter=stargz info %s | jq -r '.Labels.\"%s\"'", node, layer, remoteSnapshotLabel) - label, err := tester.RunCommand(cmd) + label, err := tests.RunCommand(cmd) if err != nil { return fmt.Errorf("failed to get layer label: %v", err) } @@ -123,7 +122,7 @@ func lookLayers(node, layer string) error { func getTopmostLayer(node, container string) (string, error) { var targetContainer string cmd := fmt.Sprintf("docker exec -i %s ctr --namespace=k8s.io c ls -q labels.\"io.kubernetes.container.name\"==\"%s\" | sed -n 1p", node, container) - targetContainer, err := tester.RunCommand(cmd) + targetContainer, err := tests.RunCommand(cmd) if err != nil { return "", fmt.Errorf("failed to get target container: %v", err) } @@ -133,7 +132,7 @@ func getTopmostLayer(node, container string) (string, error) { return "", fmt.Errorf("failed to get target container") } cmd = fmt.Sprintf("docker exec -i %s ctr --namespace=k8s.io c info %s | jq -r '.SnapshotKey'", node, targetContainer) - layer, err := tester.RunCommand(cmd) + layer, err := tests.RunCommand(cmd) if err != nil { return "", fmt.Errorf("failed to get topmost layer: %v", err) } diff --git a/tests/docker/resources/clusterip.yaml b/tests/docker/resources/clusterip.yaml index e972f32d19f1..90b5b4af314a 100644 --- a/tests/docker/resources/clusterip.yaml +++ b/tests/docker/resources/clusterip.yaml @@ -14,7 +14,8 @@ spec: spec: containers: - name: nginx - image: ranchertest/mytestcontainer + image: rancher/mirrored-library-busybox:1.37.0 + args: ['sh', '-c', 'echo Welcome to nginx! > index.html; hostname > name.html; httpd -vvf'] ports: - containerPort: 80 --- diff --git a/tests/docker/resources/dualstack_clusterip.yaml b/tests/docker/resources/dualstack_clusterip.yaml index c9d44764037d..3a6187bd1c2a 100644 --- a/tests/docker/resources/dualstack_clusterip.yaml +++ b/tests/docker/resources/dualstack_clusterip.yaml @@ -14,7 +14,8 @@ spec: spec: containers: - name: nginx - image: ranchertest/mytestcontainer + image: rancher/mirrored-library-busybox:1.37.0 + args: ['sh', '-c', 'echo Welcome to nginx! > index.html; hostname > name.html; httpd -vvf'] ports: - containerPort: 80 --- diff --git a/tests/docker/resources/dualstack_nodeport.yaml b/tests/docker/resources/dualstack_nodeport.yaml index 2a5b38f1c42b..91557a54b729 100644 --- a/tests/docker/resources/dualstack_nodeport.yaml +++ b/tests/docker/resources/dualstack_nodeport.yaml @@ -14,7 +14,8 @@ spec: spec: containers: - name: nginx - image: ranchertest/mytestcontainer + image: rancher/mirrored-library-busybox:1.37.0 + args: ['sh', '-c', 'echo Welcome to nginx! > index.html; hostname > name.html; httpd -vvf'] ports: - containerPort: 80 --- diff --git a/tests/docker/resources/loadbalancer-allTraffic.yaml b/tests/docker/resources/loadbalancer-allTraffic.yaml index 3a5dfac418fb..45909d7c357f 100644 --- a/tests/docker/resources/loadbalancer-allTraffic.yaml +++ b/tests/docker/resources/loadbalancer-allTraffic.yaml @@ -33,7 +33,7 @@ spec: spec: containers: - name: nginx - image: ranchertest/mytestcontainer + image: rancher/mirrored-library-nginx:1.29.1-alpine ports: - containerPort: 80 volumeMounts: diff --git a/tests/docker/resources/loadbalancer-extTrafficPol.yaml b/tests/docker/resources/loadbalancer-extTrafficPol.yaml index 50b4b0012b29..efaf8c4cd2ec 100644 --- a/tests/docker/resources/loadbalancer-extTrafficPol.yaml +++ b/tests/docker/resources/loadbalancer-extTrafficPol.yaml @@ -33,7 +33,7 @@ spec: spec: containers: - name: nginx - image: ranchertest/mytestcontainer + image: rancher/mirrored-library-nginx:1.29.1-alpine ports: - containerPort: 80 volumeMounts: diff --git a/tests/docker/resources/loadbalancer-intTrafficPol.yaml b/tests/docker/resources/loadbalancer-intTrafficPol.yaml index 5cc9e96f5e35..35efce556e6d 100644 --- a/tests/docker/resources/loadbalancer-intTrafficPol.yaml +++ b/tests/docker/resources/loadbalancer-intTrafficPol.yaml @@ -34,7 +34,7 @@ spec: spec: containers: - name: nginx - image: ranchertest/mytestcontainer + image: rancher/mirrored-library-nginx:1.29.1-alpine ports: - containerPort: 80 volumeMounts: diff --git a/tests/docker/resources/nodeport.yaml b/tests/docker/resources/nodeport.yaml index 2187b732db89..9773d92f7163 100644 --- a/tests/docker/resources/nodeport.yaml +++ b/tests/docker/resources/nodeport.yaml @@ -14,7 +14,8 @@ spec: spec: containers: - name: nginx - image: ranchertest/mytestcontainer + image: rancher/mirrored-library-busybox:1.37.0 + args: ['sh', '-c', 'echo Welcome to nginx! > index.html; hostname > name.html; httpd -vvf'] ports: - containerPort: 80 --- diff --git a/tests/docker/resources/pod_client.yaml b/tests/docker/resources/pod_client.yaml index 45b1a17e2f34..c8969704796a 100644 --- a/tests/docker/resources/pod_client.yaml +++ b/tests/docker/resources/pod_client.yaml @@ -15,9 +15,10 @@ spec: app: client spec: containers: - - image: ranchertest/mytestcontainer + - image: rancher/mirrored-library-busybox:1.37.0 + args: ['sh', '-c', 'echo Welcome to nginx! > index.html; hostname > name.html; httpd -vvf'] imagePullPolicy: Always - name: client-curl + name: client-wget affinity: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -32,10 +33,10 @@ spec: apiVersion: v1 kind: Service metadata: - name: client-curl + name: client-wget labels: app: client - service: client-curl + service: client-wget spec: type: ClusterIP selector: diff --git a/tests/docker/skew/skew_test.go b/tests/docker/skew/skew_test.go index 371552834032..26d96c4e326d 100644 --- a/tests/docker/skew/skew_test.go +++ b/tests/docker/skew/skew_test.go @@ -9,7 +9,6 @@ import ( "github.com/blang/semver/v4" "github.com/k3s-io/k3s/tests" "github.com/k3s-io/k3s/tests/docker" - tester "github.com/k3s-io/k3s/tests/docker" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -19,7 +18,7 @@ import ( var k3sImage = flag.String("k3sImage", "", "The current commit build of K3s") var channel = flag.String("channel", "latest", "The release channel to test") var ci = flag.Bool("ci", false, "running on CI, forced cleanup") -var config *tester.TestConfig +var config *docker.TestConfig func Test_DockerSkew(t *testing.T) { flag.Parse() @@ -39,7 +38,7 @@ var _ = BeforeSuite(func() { sV.Minor-- upgradeChannel = fmt.Sprintf("v%d.%d", sV.Major, sV.Minor) - lastMinorVersion, err = tester.GetVersionFromChannel(upgradeChannel) + lastMinorVersion, err = docker.GetVersionFromChannel(upgradeChannel) Expect(err).NotTo(HaveOccurred()) Expect(lastMinorVersion).To(ContainSubstring("v1.")) @@ -50,7 +49,7 @@ var _ = Describe("Skew Tests", Ordered, func() { Context("Setup Cluster with Server newer than Agent", func() { It("should provision new server and old agent", func() { var err error - config, err = tester.NewTestConfig(*k3sImage) + config, err = docker.NewTestConfig(*k3sImage) Expect(err).NotTo(HaveOccurred()) Expect(config.ProvisionServers(1)).To(Succeed()) config.K3sImage = "rancher/k3s:" + lastMinorVersion @@ -91,7 +90,7 @@ var _ = Describe("Skew Tests", Ordered, func() { Context("Test cluster with 1 Server older and 2 Servers newer", func() { It("should setup the cluster configuration", func() { var err error - config, err = tester.NewTestConfig("rancher/k3s:" + lastMinorVersion) + config, err = docker.NewTestConfig("rancher/k3s:" + lastMinorVersion) Expect(err).NotTo(HaveOccurred()) }) It("should provision servers", func() { diff --git a/tests/docker/snapshotrestore/snapshotrestore_test.go b/tests/docker/snapshotrestore/snapshotrestore_test.go index c4106aa5ea13..7ba56e255610 100644 --- a/tests/docker/snapshotrestore/snapshotrestore_test.go +++ b/tests/docker/snapshotrestore/snapshotrestore_test.go @@ -8,7 +8,6 @@ import ( "github.com/k3s-io/k3s/tests" "github.com/k3s-io/k3s/tests/docker" - tester "github.com/k3s-io/k3s/tests/docker" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "k8s.io/utils/set" @@ -17,7 +16,7 @@ import ( var serverCount = flag.Int("serverCount", 3, "number of server nodes") var agentCount = flag.Int("agentCount", 1, "number of agent nodes") var ci = flag.Bool("ci", false, "running on CI") -var config *tester.TestConfig +var config *docker.TestConfig var snapshotname string func Test_DockerSnapshotRestore(t *testing.T) { @@ -31,7 +30,7 @@ var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() { Context("Setup Cluster", func() { It("should provision servers and agents", func() { var err error - config, err = tester.NewTestConfig("rancher/systemd-node") + config, err = docker.NewTestConfig("rancher/systemd-node") Expect(err).NotTo(HaveOccurred()) Expect(config.ProvisionServers(*serverCount)).To(Succeed()) Expect(config.ProvisionAgents(*agentCount)).To(Succeed()) @@ -49,8 +48,8 @@ var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() { Expect(err).NotTo(HaveOccurred(), "Cluster IP manifest not deployed: "+res) Eventually(func(g Gomega) { - cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + config.KubeconfigFile - res, err := tester.RunCommand(cmd) + cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running" + res, err := tests.RunCommand(cmd) g.Expect(err).NotTo(HaveOccurred()) g.Expect(res).Should((ContainSubstring("test-clusterip")), "failed cmd: %q result: %s", cmd, res) }, "240s", "5s").Should(Succeed()) @@ -72,8 +71,8 @@ var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() { res, err := config.DeployWorkload("nodeport.yaml") Expect(err).NotTo(HaveOccurred(), "NodePort manifest not deployed: "+res) Eventually(func(g Gomega) { - cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + config.KubeconfigFile - res, err := tester.RunCommand(cmd) + cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running" + res, err := tests.RunCommand(cmd) g.Expect(err).NotTo(HaveOccurred()) g.Expect(res).Should(ContainSubstring("test-nodeport"), "nodeport pod was not created") }, "240s", "5s").Should(Succeed()) @@ -146,8 +145,8 @@ var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() { }) It("Verifies that workload1 exists and workload2 does not", func() { - cmd := "kubectl get pods --kubeconfig=" + config.KubeconfigFile - res, err := tester.RunCommand(cmd) + cmd := "kubectl get pods" + res, err := tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred()) Expect(res).Should(ContainSubstring("test-clusterip")) Expect(res).ShouldNot(ContainSubstring("test-nodeport")) @@ -178,7 +177,7 @@ func CheckNodeStatus(kubeconfigFile string, readyNodes, notReadyNodes []string) foundNotReadyNodes := make(set.Set[string], 0) cmd := "kubectl get nodes --no-headers --kubeconfig=" + kubeconfigFile - res, err := tester.RunCommand(cmd) + res, err := tests.RunCommand(cmd) if err != nil { return err } diff --git a/tests/docker/svcpoliciesandfirewall/svcpoliciesandfirewall_test.go b/tests/docker/svcpoliciesandfirewall/svcpoliciesandfirewall_test.go index d0ed0bde29ff..e6335228ccc8 100644 --- a/tests/docker/svcpoliciesandfirewall/svcpoliciesandfirewall_test.go +++ b/tests/docker/svcpoliciesandfirewall/svcpoliciesandfirewall_test.go @@ -107,14 +107,14 @@ var _ = Describe("Verify Services Traffic policies and firewall config", Ordered for _, externalIP := range lbSvcExternalIPs { Eventually(func() (string, error) { cmd := "curl -m 5 -s -f http://" + externalIP + ":81/ip" - return docker.RunCommand(cmd) + return tests.RunCommand(cmd) }, "25s", "5s").Should(ContainSubstring("10.42")) } // Verify connectivity to the external IP of the lbsvcExt service and the IP should not be the flannel interface IP Eventually(func() (string, error) { cmd := "curl -m 5 -s -f http://" + lbSvcExtExternalIPs[0] + ":82/ip" - return docker.RunCommand(cmd) + return tests.RunCommand(cmd) }, "25s", "5s").ShouldNot(ContainSubstring("10.42")) }) @@ -128,7 +128,7 @@ var _ = Describe("Verify Services Traffic policies and firewall config", Ordered // Check that service exists Eventually(func() (string, error) { cmd := "kubectl get svc nginx-loadbalancer-svc-int -o jsonpath='{.spec.clusterIP}' --kubeconfig=" + tc.KubeconfigFile - clusterIP, _ := docker.RunCommand(cmd) + clusterIP, _ := tests.RunCommand(cmd) return clusterIP, nil }, "25s", "5s").Should(ContainSubstring("10.43")) @@ -176,16 +176,16 @@ var _ = Describe("Verify Services Traffic policies and firewall config", Ordered var workingCmd, nonWorkingCmd string if serverNodeName == clientPod1Node { - workingCmd = fmt.Sprintf("kubectl exec --kubeconfig=%s %s -- curl -m 5 -s -f http://nginx-loadbalancer-svc-int:83/ip", tc.KubeconfigFile, clientPod1) - nonWorkingCmd = fmt.Sprintf("kubectl exec --kubeconfig=%s %s -- curl -m 5 -s -f http://nginx-loadbalancer-svc-int:83/ip", tc.KubeconfigFile, clientPod2) + workingCmd = fmt.Sprintf("kubectl exec --kubeconfig=%s %s -- wget -T 5 -q -O - http://nginx-loadbalancer-svc-int:83/ip", tc.KubeconfigFile, clientPod1) + nonWorkingCmd = fmt.Sprintf("kubectl exec --kubeconfig=%s %s -- wget -T 5 -q -O - http://nginx-loadbalancer-svc-int:83/ip", tc.KubeconfigFile, clientPod2) } if serverNodeName == clientPod2Node { - workingCmd = fmt.Sprintf("kubectl exec --kubeconfig=%s %s -- curl -m 5 -s -f http://nginx-loadbalancer-svc-int:83/ip", tc.KubeconfigFile, clientPod2) - nonWorkingCmd = fmt.Sprintf("kubectl exec --kubeconfig=%s %s -- curl -m 5 -s -f http://nginx-loadbalancer-svc-int:83/ip", tc.KubeconfigFile, clientPod1) + workingCmd = fmt.Sprintf("kubectl exec --kubeconfig=%s %s -- wget -T 5 -q -O - http://nginx-loadbalancer-svc-int:83/ip", tc.KubeconfigFile, clientPod2) + nonWorkingCmd = fmt.Sprintf("kubectl exec --kubeconfig=%s %s -- wget -T 5 -q -O - http://nginx-loadbalancer-svc-int:83/ip", tc.KubeconfigFile, clientPod1) } Eventually(func() (string, error) { - out, err := docker.RunCommand(workingCmd) + out, err := tests.RunCommand(workingCmd) return out, err }, "25s", "5s").Should(SatisfyAny( ContainSubstring(clientPod1IP), @@ -194,7 +194,7 @@ var _ = Describe("Verify Services Traffic policies and firewall config", Ordered // Check the non working command fails because of internal traffic policy=local Eventually(func() bool { - _, err := docker.RunCommand(nonWorkingCmd) + _, err := tests.RunCommand(nonWorkingCmd) if err != nil && strings.Contains(err.Error(), "exit status") { // Treat exit status as a successful condition return true @@ -204,9 +204,9 @@ var _ = Describe("Verify Services Traffic policies and firewall config", Ordered // curling a service with internal traffic policy=cluster. It should work on both pods for _, pod := range []string{clientPod1, clientPod2} { - cmd := "kubectl exec " + "--kubeconfig=" + tc.KubeconfigFile + " " + pod + " -- curl -m 5 -s -f http://nginx-loadbalancer-svc:81/ip" + cmd := "kubectl exec " + "--kubeconfig=" + tc.KubeconfigFile + " " + pod + " -- wget -T 5 -q -O - http://nginx-loadbalancer-svc:81/ip" Eventually(func() (string, error) { - return docker.RunCommand(cmd) + return tests.RunCommand(cmd) }, "20s", "5s").Should(SatisfyAny( ContainSubstring(clientPod1IP), ContainSubstring(clientPod2IP), @@ -235,7 +235,7 @@ spec: k8s-app: nginx-app-loadbalancer-ext ` By("Removing the service nginx-loadbalancer-svc-ext") - _, err := docker.RunCommand("kubectl --kubeconfig=" + tc.KubeconfigFile + " delete svc nginx-loadbalancer-svc-ext") + _, err := tests.RunCommand("kubectl --kubeconfig=" + tc.KubeconfigFile + " delete svc nginx-loadbalancer-svc-ext") Expect(err).NotTo(HaveOccurred(), "failed to remove service nginx-loadbalancer-svc-ext") // Parse and execute the template with the node IP @@ -259,12 +259,12 @@ spec: By("Applying the new manifest") applyCmd := fmt.Sprintf("kubectl apply --kubeconfig=%s -f %s", tc.KubeconfigFile, tmpFile.Name()) - out, err := docker.RunCommand(applyCmd) + out, err := tests.RunCommand(applyCmd) Expect(err).NotTo(HaveOccurred(), out) Eventually(func() (string, error) { cmd := "kubectl get svc nginx-loadbalancer-svc-ext-firewall -o jsonpath='{.spec.clusterIP}' --kubeconfig=" + tc.KubeconfigFile - clusterIP, _ := docker.RunCommand(cmd) + clusterIP, _ := tests.RunCommand(cmd) return clusterIP, nil }, "25s", "5s").Should(ContainSubstring("10.43")) }) diff --git a/tests/docker/test-helpers.go b/tests/docker/test-helpers.go index f30a0a1abd6c..2d0d614846ab 100644 --- a/tests/docker/test-helpers.go +++ b/tests/docker/test-helpers.go @@ -7,12 +7,12 @@ import ( "net" "net/http" "os" - "os/exec" "path/filepath" "regexp" "strings" "time" + "github.com/k3s-io/k3s/tests" "golang.org/x/mod/semver" "golang.org/x/sync/errgroup" ) @@ -38,6 +38,16 @@ type DockerNode struct { URL string // Not filled by agent nodes } +// RunCmdOnNode runs a command on a docker container +func (node DockerNode) RunCmdOnNode(cmd string) (string, error) { + dCmd := fmt.Sprintf("docker exec %s /bin/sh -c \"%s\"", node.Name, cmd) + out, err := tests.RunCommand(dCmd) + if err != nil { + return out, fmt.Errorf("%v: on node %s: %s", err, node.Name, out) + } + return out, nil +} + // NewTestConfig initializes the test environment and returns the configuration // If k3sImage == "rancher/systemd-node", then the systemd-node container and the local k3s binary // will be used to start the server. This is useful for scenarios where the server needs to be restarted. @@ -142,9 +152,9 @@ func (config *TestConfig) ProvisionServers(numOfServers int) error { if config.DualStack { // Check if the docker network exists, if not create it networkName := "k3s-test-dualstack" - if _, err := RunCommand(fmt.Sprintf("docker network inspect %s", networkName)); err != nil { + if _, err := tests.RunCommand(fmt.Sprintf("docker network inspect %s", networkName)); err != nil { cmd := fmt.Sprintf("docker network create --ipv6 --subnet=fd11:decf:c0ff:ee::/64 %s", networkName) - if _, err := RunCommand(cmd); err != nil { + if _, err := tests.RunCommand(cmd); err != nil { return fmt.Errorf("failed to create dual-stack network: %v", err) } } @@ -171,7 +181,7 @@ func (config *TestConfig) ProvisionServers(numOfServers int) error { "--mount", "type=bind,source=$(pwd)/../../../dist/artifacts/k3s,target=/usr/local/bin/k3s", fmt.Sprintf("%s:v0.0.5", config.K3sImage), "/usr/lib/systemd/systemd --unit=noop.target --show-status=true"}, " ") - if out, err := RunCommand(dRun); err != nil { + if out, err := tests.RunCommand(dRun); err != nil { return fmt.Errorf("failed to start systemd container: %s: %v", out, err) } time.Sleep(5 * time.Second) @@ -226,7 +236,7 @@ func (config *TestConfig) ProvisionServers(numOfServers int) error { yamlMount, config.K3sImage, "server", dbConnect, joinServer, os.Getenv(fmt.Sprintf("SERVER_%d_ARGS", i))}, " ") - if out, err := RunCommand(dRun); err != nil { + if out, err := tests.RunCommand(dRun); err != nil { return fmt.Errorf("failed to run server container: %s: %v", out, err) } } @@ -238,7 +248,7 @@ func (config *TestConfig) ProvisionServers(numOfServers int) error { } else { cmd = "docker inspect --format '{{ .NetworkSettings.IPAddress }}' " + name } - ipOutput, err := RunCommand(cmd) + ipOutput, err := tests.RunCommand(cmd) if err != nil { return fmt.Errorf("failed to get container IP address: %s: %v", ipOutput, err) } @@ -289,7 +299,7 @@ func (config *TestConfig) setupDatabase(startDB bool) (string, error) { } if startDB && startCmd != "" { - if out, err := RunCommand(startCmd); err != nil { + if out, err := tests.RunCommand(startCmd); err != nil { return "", fmt.Errorf("failed to start %s container: %s: %v", config.DBType, out, err) } // Wait for DB to start @@ -341,7 +351,7 @@ func (config *TestConfig) ProvisionAgents(numOfAgents int) error { "--mount", "type=bind,source=$(pwd)/../../../dist/artifacts/k3s,target=/usr/local/bin/k3s", fmt.Sprintf("%s:v0.0.5", config.K3sImage), "/usr/lib/systemd/systemd --unit=noop.target --show-status=true"}, " ") - if out, err := RunCommand(dRun); err != nil { + if out, err := tests.RunCommand(dRun); err != nil { return fmt.Errorf("failed to start systemd container: %s: %v", out, err) } time.Sleep(5 * time.Second) @@ -381,7 +391,7 @@ func (config *TestConfig) ProvisionAgents(numOfAgents int) error { config.K3sImage, "agent", os.Getenv("ARGS"), os.Getenv(agentInstanceArgs)}, " ") - if out, err := RunCommand(dRun); err != nil { + if out, err := tests.RunCommand(dRun); err != nil { return fmt.Errorf("failed to run agent container: %s: %v", out, err) } } @@ -393,7 +403,7 @@ func (config *TestConfig) ProvisionAgents(numOfAgents int) error { } else { cmd = "docker inspect --format '{{ .NetworkSettings.IPAddress }}' " + name } - ipOutput, err := RunCommand(cmd) + ipOutput, err := tests.RunCommand(cmd) if err != nil { return err } @@ -415,11 +425,11 @@ func (config *TestConfig) ProvisionAgents(numOfAgents int) error { func (config *TestConfig) RemoveNode(nodeName string) error { cmd := fmt.Sprintf("docker stop %s", nodeName) - if _, err := RunCommand(cmd); err != nil { + if _, err := tests.RunCommand(cmd); err != nil { return fmt.Errorf("failed to stop node %s: %v", nodeName, err) } cmd = fmt.Sprintf("docker rm -v %s", nodeName) - if _, err := RunCommand(cmd); err != nil { + if _, err := tests.RunCommand(cmd); err != nil { return fmt.Errorf("failed to remove node %s: %v", nodeName, err) } fmt.Println("Stopped and removed ", nodeName) @@ -473,24 +483,24 @@ func (config *TestConfig) Cleanup() error { // Remove volumes created by the agent/server containers cmd := fmt.Sprintf("docker volume ls -q | grep -F %s | xargs -r docker volume rm", strings.ToLower(filepath.Base(config.TestDir))) - if _, err := RunCommand(cmd); err != nil { + if _, err := tests.RunCommand(cmd); err != nil { errs = append(errs, fmt.Errorf("failed to remove volumes: %v", err)) } // Stop DB if it was started if config.DBType == "mysql" || config.DBType == "postgres" { cmd := fmt.Sprintf("docker stop %s", config.DBType) - if _, err := RunCommand(cmd); err != nil { + if _, err := tests.RunCommand(cmd); err != nil { errs = append(errs, fmt.Errorf("failed to stop %s: %v", config.DBType, err)) } cmd = fmt.Sprintf("docker rm -v %s", config.DBType) - if _, err := RunCommand(cmd); err != nil { + if _, err := tests.RunCommand(cmd); err != nil { errs = append(errs, fmt.Errorf("failed to remove %s: %v", config.DBType, err)) } } // Remove dual-stack network if it exists if config.DualStack { - if _, err := RunCommand("docker network rm k3s-test-dualstack"); err != nil { + if _, err := tests.RunCommand("docker network rm k3s-test-dualstack"); err != nil { errs = append(errs, fmt.Errorf("failed to remove dual-stack network: %v", err)) } } @@ -527,7 +537,7 @@ func (config *TestConfig) CopyAndModifyKubeconfig() error { var cmd string for i := 1; i <= 2; i++ { cmd = fmt.Sprintf("docker cp %s:/etc/rancher/k3s/k3s.yaml %s/kubeconfig.yaml", config.Servers[serverID].Name, config.TestDir) - _, err = RunCommand(cmd) + _, err = tests.RunCommand(cmd) if err != nil { fmt.Printf("Failed to copy kubeconfig, attempt %d: %v\n", i, err) time.Sleep(10 * time.Second) @@ -545,34 +555,17 @@ func (config *TestConfig) CopyAndModifyKubeconfig() error { } else { cmd = fmt.Sprintf("sed -i -e 's~:6443~:%d~g' %s/kubeconfig.yaml", config.Servers[serverID].Port, config.TestDir) } - if _, err := RunCommand(cmd); err != nil { + if _, err := tests.RunCommand(cmd); err != nil { return fmt.Errorf("failed to update kubeconfig: %v", err) } config.KubeconfigFile = filepath.Join(config.TestDir, "kubeconfig.yaml") + if err := os.Setenv("DOCKER_KUBECONFIG", config.KubeconfigFile); err != nil { + return err + } fmt.Println("Kubeconfig file: ", config.KubeconfigFile) return nil } -// RunCmdOnNode runs a command on a docker container -func (node DockerNode) RunCmdOnNode(cmd string) (string, error) { - dCmd := fmt.Sprintf("docker exec %s /bin/sh -c \"%s\"", node.Name, cmd) - out, err := RunCommand(dCmd) - if err != nil { - return out, fmt.Errorf("%v: on node %s: %s", err, node.Name, out) - } - return out, nil -} - -// RunCommand Runs command on the host. -func RunCommand(cmd string) (string, error) { - c := exec.Command("bash", "-c", cmd) - out, err := c.CombinedOutput() - if err != nil { - return string(out), fmt.Errorf("failed to run command: %s, %v", cmd, err) - } - return string(out), err -} - func checkVersionSkew(config *TestConfig) error { if len(config.Agents) > 0 { serverImage := getEnvOrDefault("K3S_IMAGE_SERVER", config.K3sImage) @@ -665,7 +658,7 @@ func (config TestConfig) DeployWorkload(workload string) (string, error) { filename := filepath.Join(resourceDir, f.Name()) if strings.TrimSpace(f.Name()) == workload { cmd := "kubectl apply -f " + filename + " --kubeconfig=" + config.KubeconfigFile - return RunCommand(cmd) + return tests.RunCommand(cmd) } } return "", nil @@ -674,7 +667,7 @@ func (config TestConfig) DeployWorkload(workload string) (string, error) { func (config TestConfig) FetchClusterIP(servicename string) (string, error) { if config.DualStack { cmd := "kubectl get svc " + servicename + " -o jsonpath='{.spec.clusterIPs}' --kubeconfig=" + config.KubeconfigFile - res, err := RunCommand(cmd) + res, err := tests.RunCommand(cmd) if err != nil { return res, err } @@ -682,7 +675,7 @@ func (config TestConfig) FetchClusterIP(servicename string) (string, error) { return strings.Trim(res, "[]"), nil } cmd := "kubectl get svc " + servicename + " -o jsonpath='{.spec.clusterIP}' --kubeconfig=" + config.KubeconfigFile - return RunCommand(cmd) + return tests.RunCommand(cmd) } type svcExternalIP struct { @@ -694,7 +687,7 @@ type svcExternalIP struct { func FetchExternalIPs(kubeconfig string, servicename string) ([]string, error) { var externalIPs []string cmd := "kubectl get svc " + servicename + " -o jsonpath='{.status.loadBalancer.ingress}' --kubeconfig=" + kubeconfig - output, err := RunCommand(cmd) + output, err := tests.RunCommand(cmd) if err != nil { return externalIPs, err } @@ -728,7 +721,7 @@ func RestartCluster(nodes []DockerNode) error { func DescribeNodesAndPods(config *TestConfig) string { cmd := "kubectl describe node,pod -A --kubeconfig=" + config.KubeconfigFile - out, err := RunCommand(cmd) + out, err := tests.RunCommand(cmd) if err != nil { return fmt.Sprintf("** %v **\n%s", err, out) } @@ -736,7 +729,7 @@ func DescribeNodesAndPods(config *TestConfig) string { } func ListContainers() string { - o, err := RunCommand("docker container list --all --no-trunc") + o, err := tests.RunCommand("docker container list --all --no-trunc") if err != nil { return fmt.Sprintf("** failed to list docker containers: %v **\n%s\n", err, o) } @@ -750,7 +743,7 @@ func TailDockerLogs(lines int, nodes []DockerNode) string { logs := &strings.Builder{} for _, node := range nodes { cmd := fmt.Sprintf("docker logs %s --tail=%d", node.Name, lines) - if l, err := RunCommand(cmd); err != nil { + if l, err := tests.RunCommand(cmd); err != nil { fmt.Fprintf(logs, "** failed to read docker logs for node %s ***\n%v\n", node.Name, err) } else { fmt.Fprintf(logs, "** docker logs for node %s ***\n%s\n", node.Name, l) diff --git a/tests/docker/upgrade/upgrade_test.go b/tests/docker/upgrade/upgrade_test.go index 61f9988685af..0920ec561371 100644 --- a/tests/docker/upgrade/upgrade_test.go +++ b/tests/docker/upgrade/upgrade_test.go @@ -11,7 +11,6 @@ import ( "github.com/k3s-io/k3s/tests" "github.com/k3s-io/k3s/tests/docker" - tester "github.com/k3s-io/k3s/tests/docker" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -21,7 +20,7 @@ import ( var k3sImage = flag.String("k3sImage", "", "The current commit build of K3s") var channel = flag.String("channel", "latest", "The release channel to test") var ci = flag.Bool("ci", false, "running on CI, forced cleanup") -var config *tester.TestConfig +var config *docker.TestConfig var numServers = 1 var numAgents = 1 @@ -45,14 +44,14 @@ var _ = Describe("Upgrade Tests", Ordered, func() { *channel = "latest" } - latestVersion, err = tester.GetVersionFromChannel(*channel) + latestVersion, err = docker.GetVersionFromChannel(*channel) Expect(err).NotTo(HaveOccurred()) Expect(latestVersion).To(ContainSubstring("v1.")) fmt.Println("Using latest version: ", latestVersion) }) It("should setup environment", func() { var err error - config, err = tester.NewTestConfig("rancher/k3s:" + latestVersion) + config, err = docker.NewTestConfig("rancher/k3s:" + latestVersion) testID := filepath.Base(config.TestDir) Expect(err).NotTo(HaveOccurred()) for i := 0; i < numServers; i++ { @@ -94,18 +93,18 @@ var _ = Describe("Upgrade Tests", Ordered, func() { By("Remove old servers and agents") for _, server := range config.Servers { cmd := fmt.Sprintf("docker stop %s", server.Name) - Expect(tester.RunCommand(cmd)).Error().NotTo(HaveOccurred()) + Expect(tests.RunCommand(cmd)).Error().NotTo(HaveOccurred()) cmd = fmt.Sprintf("docker rm %s", server.Name) - Expect(tester.RunCommand(cmd)).Error().NotTo(HaveOccurred()) + Expect(tests.RunCommand(cmd)).Error().NotTo(HaveOccurred()) fmt.Printf("Stopped %s\n", server.Name) } config.Servers = nil for _, agent := range config.Agents { cmd := fmt.Sprintf("docker stop %s", agent.Name) - Expect(tester.RunCommand(cmd)).Error().NotTo(HaveOccurred()) + Expect(tests.RunCommand(cmd)).Error().NotTo(HaveOccurred()) cmd = fmt.Sprintf("docker rm %s", agent.Name) - Expect(tester.RunCommand(cmd)).Error().NotTo(HaveOccurred()) + Expect(tests.RunCommand(cmd)).Error().NotTo(HaveOccurred()) } config.Agents = nil @@ -119,9 +118,9 @@ var _ = Describe("Upgrade Tests", Ordered, func() { }) It("should confirm commit version", func() { for _, server := range config.Servers { - Expect(tester.VerifyValidVersion(server, "kubectl")).To(Succeed()) - Expect(tester.VerifyValidVersion(server, "ctr")).To(Succeed()) - Expect(tester.VerifyValidVersion(server, "crictl")).To(Succeed()) + Expect(docker.VerifyValidVersion(server, "kubectl")).To(Succeed()) + Expect(docker.VerifyValidVersion(server, "ctr")).To(Succeed()) + Expect(docker.VerifyValidVersion(server, "crictl")).To(Succeed()) out, err := server.RunCmdOnNode("k3s --version") Expect(err).NotTo(HaveOccurred()) diff --git a/tests/e2e/amd64_resource_files/clusterip.yaml b/tests/e2e/amd64_resource_files/clusterip.yaml index e972f32d19f1..90b5b4af314a 100644 --- a/tests/e2e/amd64_resource_files/clusterip.yaml +++ b/tests/e2e/amd64_resource_files/clusterip.yaml @@ -14,7 +14,8 @@ spec: spec: containers: - name: nginx - image: ranchertest/mytestcontainer + image: rancher/mirrored-library-busybox:1.37.0 + args: ['sh', '-c', 'echo Welcome to nginx! > index.html; hostname > name.html; httpd -vvf'] ports: - containerPort: 80 --- diff --git a/tests/e2e/amd64_resource_files/daemonset.yaml b/tests/e2e/amd64_resource_files/daemonset.yaml index 3360f354214c..720c6ad11447 100644 --- a/tests/e2e/amd64_resource_files/daemonset.yaml +++ b/tests/e2e/amd64_resource_files/daemonset.yaml @@ -13,6 +13,6 @@ spec: spec: containers: - name: webserver - image: nginx + image: rancher/mirrored-library-nginx:1.29.1-alpine ports: - containerPort: 80 diff --git a/tests/e2e/amd64_resource_files/dualstack_clusterip.yaml b/tests/e2e/amd64_resource_files/dualstack_clusterip.yaml index c9d44764037d..3a6187bd1c2a 100644 --- a/tests/e2e/amd64_resource_files/dualstack_clusterip.yaml +++ b/tests/e2e/amd64_resource_files/dualstack_clusterip.yaml @@ -14,7 +14,8 @@ spec: spec: containers: - name: nginx - image: ranchertest/mytestcontainer + image: rancher/mirrored-library-busybox:1.37.0 + args: ['sh', '-c', 'echo Welcome to nginx! > index.html; hostname > name.html; httpd -vvf'] ports: - containerPort: 80 --- diff --git a/tests/e2e/amd64_resource_files/dualstack_nodeport.yaml b/tests/e2e/amd64_resource_files/dualstack_nodeport.yaml index 2a5b38f1c42b..91557a54b729 100644 --- a/tests/e2e/amd64_resource_files/dualstack_nodeport.yaml +++ b/tests/e2e/amd64_resource_files/dualstack_nodeport.yaml @@ -14,7 +14,8 @@ spec: spec: containers: - name: nginx - image: ranchertest/mytestcontainer + image: rancher/mirrored-library-busybox:1.37.0 + args: ['sh', '-c', 'echo Welcome to nginx! > index.html; hostname > name.html; httpd -vvf'] ports: - containerPort: 80 --- diff --git a/tests/e2e/amd64_resource_files/ingress.yaml b/tests/e2e/amd64_resource_files/ingress.yaml index cf49a5064099..9b1bb99df5e2 100644 --- a/tests/e2e/amd64_resource_files/ingress.yaml +++ b/tests/e2e/amd64_resource_files/ingress.yaml @@ -18,34 +18,35 @@ spec: apiVersion: v1 kind: Service metadata: - name: nginx-ingress-svc - labels: - k8s-app: nginx-app-ingress + name: nginx-ingress-svc + labels: + k8s-app: nginx-app-ingress spec: - ports: - - port: 80 - targetPort: 80 - protocol: TCP - name: http - selector: - k8s-app: nginx-app-ingress + ports: + - port: 80 + targetPort: 80 + protocol: TCP + name: http + selector: + k8s-app: nginx-app-ingress --- apiVersion: v1 kind: ReplicationController metadata: - name: test-ingress + name: test-ingress spec: - replicas: 2 - selector: - k8s-app: nginx-app-ingress - template: - metadata: - labels: - k8s-app: nginx-app-ingress - spec: - terminationGracePeriodSeconds: 60 - containers: - - name: testcontainer - image: ranchertest/mytestcontainer - ports: - - containerPort: 80 + replicas: 2 + selector: + k8s-app: nginx-app-ingress + template: + metadata: + labels: + k8s-app: nginx-app-ingress + spec: + terminationGracePeriodSeconds: 60 + containers: + - name: testcontainer + image: rancher/mirrored-library-busybox:1.37.0 + args: ['sh', '-c', 'echo Welcome to nginx! > index.html; hostname > name.html; httpd -vvf'] + ports: + - containerPort: 80 diff --git a/tests/e2e/amd64_resource_files/loadbalancer.yaml b/tests/e2e/amd64_resource_files/loadbalancer.yaml index 3a5dfac418fb..405031b1aba0 100644 --- a/tests/e2e/amd64_resource_files/loadbalancer.yaml +++ b/tests/e2e/amd64_resource_files/loadbalancer.yaml @@ -33,7 +33,8 @@ spec: spec: containers: - name: nginx - image: ranchertest/mytestcontainer + image: rancher/mirrored-library-busybox:1.37.0 + args: ['sh', '-c', 'echo Welcome to nginx! > index.html; hostname > name.html; httpd -vvf'] ports: - containerPort: 80 volumeMounts: diff --git a/tests/e2e/amd64_resource_files/multus_test.yaml b/tests/e2e/amd64_resource_files/multus_test.yaml index 2312d121700b..807e9ae624a0 100644 --- a/tests/e2e/amd64_resource_files/multus_test.yaml +++ b/tests/e2e/amd64_resource_files/multus_test.yaml @@ -43,7 +43,7 @@ metadata: }]' spec: containers: - - image: rancher/mirrored-library-busybox:1.36.1 + - image: rancher/mirrored-library-busybox:1.37.0 command: - sleep - infinity @@ -69,7 +69,7 @@ metadata: }]' spec: containers: - - image: rancher/mirrored-library-busybox:1.36.1 + - image: rancher/mirrored-library-busybox:1.37.0 command: - sleep - infinity diff --git a/tests/e2e/amd64_resource_files/nodeport.yaml b/tests/e2e/amd64_resource_files/nodeport.yaml index 2187b732db89..53997813cfaa 100644 --- a/tests/e2e/amd64_resource_files/nodeport.yaml +++ b/tests/e2e/amd64_resource_files/nodeport.yaml @@ -14,7 +14,7 @@ spec: spec: containers: - name: nginx - image: ranchertest/mytestcontainer + image: rancher/mirrored-library-nginx:1.29.1-alpine ports: - containerPort: 80 --- diff --git a/tests/e2e/amd64_resource_files/pod_client.yaml b/tests/e2e/amd64_resource_files/pod_client.yaml index 45b1a17e2f34..c8969704796a 100644 --- a/tests/e2e/amd64_resource_files/pod_client.yaml +++ b/tests/e2e/amd64_resource_files/pod_client.yaml @@ -15,9 +15,10 @@ spec: app: client spec: containers: - - image: ranchertest/mytestcontainer + - image: rancher/mirrored-library-busybox:1.37.0 + args: ['sh', '-c', 'echo Welcome to nginx! > index.html; hostname > name.html; httpd -vvf'] imagePullPolicy: Always - name: client-curl + name: client-wget affinity: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -32,10 +33,10 @@ spec: apiVersion: v1 kind: Service metadata: - name: client-curl + name: client-wget labels: app: client - service: client-curl + service: client-wget spec: type: ClusterIP selector: diff --git a/tests/e2e/dualstack/dualstack_test.go b/tests/e2e/dualstack/dualstack_test.go index ace452477960..e09d008a77a5 100644 --- a/tests/e2e/dualstack/dualstack_test.go +++ b/tests/e2e/dualstack/dualstack_test.go @@ -88,7 +88,7 @@ var _ = Describe("Verify DualStack Configuration", Ordered, func() { Expect(err).NotTo(HaveOccurred()) Eventually(func() (string, error) { cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + tc.KubeconfigFile - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "120s", "5s").Should(ContainSubstring("ds-clusterip-pod")) // Checks both IPv4 and IPv6 @@ -115,18 +115,18 @@ var _ = Describe("Verify DualStack Configuration", Ordered, func() { _, err := tc.DeployWorkload("dualstack_ingress.yaml") Expect(err).NotTo(HaveOccurred(), "Ingress manifest not deployed") cmd := "kubectl get ingress ds-ingress -o jsonpath=\"{.spec.rules[*].host}\"" - hostName, err := e2e.RunCommand(cmd) + hostName, err := tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd) nodeIPs, err := e2e.GetNodeIPs(tc.KubeconfigFile) Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd) for _, node := range nodeIPs { cmd := fmt.Sprintf("curl --header host:%s -m 5 -s -f http://%s/name.html", hostName, node.IPv4) Eventually(func() (string, error) { - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "10s", "2s").Should(ContainSubstring("ds-clusterip-pod"), "failed cmd: "+cmd) cmd = fmt.Sprintf("curl --header host:%s -m 5 -s -f http://[%s]/name.html", hostName, node.IPv6) Eventually(func() (string, error) { - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "5s", "1s").Should(ContainSubstring("ds-clusterip-pod"), "failed cmd: "+cmd) } }) @@ -135,41 +135,40 @@ var _ = Describe("Verify DualStack Configuration", Ordered, func() { _, err := tc.DeployWorkload("dualstack_nodeport.yaml") Expect(err).NotTo(HaveOccurred()) cmd := "kubectl get service ds-nodeport-svc --output jsonpath=\"{.spec.ports[0].nodePort}\"" - nodeport, err := e2e.RunCommand(cmd) + nodeport, err := tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd) nodeIPs, err := e2e.GetNodeIPs(tc.KubeconfigFile) Expect(err).NotTo(HaveOccurred()) for _, node := range nodeIPs { cmd = "curl -m 5 -s -f http://" + node.IPv4 + ":" + nodeport + "/name.html" Eventually(func() (string, error) { - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "10s", "1s").Should(ContainSubstring("ds-nodeport-pod"), "failed cmd: "+cmd) cmd = "curl -m 5 -s -f http://[" + node.IPv6 + "]:" + nodeport + "/name.html" Eventually(func() (string, error) { - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "10s", "1s").Should(ContainSubstring("ds-nodeport-pod"), "failed cmd: "+cmd) } }) It("Verifies podSelector Network Policy", func() { _, err := tc.DeployWorkload("pod_client.yaml") Expect(err).NotTo(HaveOccurred()) - cmd := "kubectl exec svc/client-curl -- curl -m 5 -s -f http://ds-clusterip-svc/name.html" + cmd := "kubectl exec svc/client-wget -- wget -T 5 -O - -q http://ds-clusterip-svc/name.html" Eventually(func() (string, error) { - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "20s", "3s").Should(ContainSubstring("ds-clusterip-pod"), "failed cmd: "+cmd) _, err = tc.DeployWorkload("netpol-fail.yaml") Expect(err).NotTo(HaveOccurred()) - cmd = "kubectl exec svc/client-curl -- curl -m 5 -s -f http://ds-clusterip-svc/name.html" - Eventually(func() error { - _, err = e2e.RunCommand(cmd) - Expect(err).To(HaveOccurred()) + cmd = "kubectl exec svc/client-wget -- wget -T 5 -O - -q http://ds-clusterip-svc/name.html" + Consistently(func() error { + _, err = tests.RunCommand(cmd) return err - }, "20s", "3s") + }, "20s", "3s").ShouldNot(Succeed()) _, err = tc.DeployWorkload("netpol-work.yaml") Expect(err).NotTo(HaveOccurred()) - cmd = "kubectl exec svc/client-curl -- curl -m 5 -s -f http://ds-clusterip-svc/name.html" + cmd = "kubectl exec svc/client-wget -- wget -T 5 -O - -q http://ds-clusterip-svc/name.html" Eventually(func() (string, error) { - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "20s", "3s").Should(ContainSubstring("ds-clusterip-pod"), "failed cmd: "+cmd) }) }) diff --git a/tests/e2e/embeddedmirror/embeddedmirror_test.go b/tests/e2e/embeddedmirror/embeddedmirror_test.go index 92289a9006ea..3ea81251f893 100644 --- a/tests/e2e/embeddedmirror/embeddedmirror_test.go +++ b/tests/e2e/embeddedmirror/embeddedmirror_test.go @@ -51,13 +51,13 @@ var _ = Describe("Verify Create", Ordered, func() { By(tc.Status()) }) It("Saves image into server images dir", func() { - res, err := e2e.RunCommand("docker image pull docker.io/rancher/mirrored-library-busybox:1.34.1") + res, err := tests.RunCommand("docker image pull docker.io/rancher/mirrored-library-busybox:1.34.1") Expect(err).NotTo(HaveOccurred(), "failed to pull image: "+res) - res, err = e2e.RunCommand("docker image tag docker.io/rancher/mirrored-library-busybox:1.34.1 registry.example.com/rancher/mirrored-library-busybox:1.34.1") + res, err = tests.RunCommand("docker image tag docker.io/rancher/mirrored-library-busybox:1.34.1 registry.example.com/rancher/mirrored-library-busybox:1.34.1") Expect(err).NotTo(HaveOccurred(), "failed to tag image: "+res) - res, err = e2e.RunCommand("docker image save registry.example.com/rancher/mirrored-library-busybox:1.34.1 -o mirrored-library-busybox.tar") + res, err = tests.RunCommand("docker image save registry.example.com/rancher/mirrored-library-busybox:1.34.1 -o mirrored-library-busybox.tar") Expect(err).NotTo(HaveOccurred(), "failed to save image: "+res) - res, err = e2e.RunCommand("vagrant scp mirrored-library-busybox.tar " + tc.Servers[0].String() + ":/tmp/mirrored-library-busybox.tar") + res, err = tests.RunCommand("vagrant scp mirrored-library-busybox.tar " + tc.Servers[0].Name + ":/tmp/mirrored-library-busybox.tar") Expect(err).NotTo(HaveOccurred(), "failed to 'vagrant scp' image tarball: "+res) res, err = tc.Servers[0].RunCmdOnNode("mv /tmp/mirrored-library-busybox.tar /var/lib/rancher/k3s/agent/images/mirrored-library-busybox.tar") Expect(err).NotTo(HaveOccurred(), "failed to move image tarball: "+res) @@ -75,80 +75,80 @@ var _ = Describe("Verify Create", Ordered, func() { }, "620s", "10s").Should(Succeed()) }) It("Should create and validate deployment with embedded registry mirror using image tag", func() { - res, err := e2e.RunCommand("kubectl create deployment my-deployment-1 --image=docker.io/rancher/mirrored-library-busybox:1.37.0 -- sleep 86400") + res, err := tests.RunCommand("kubectl create deployment my-deployment-1 --image=docker.io/rancher/mirrored-library-busybox:1.37.0 -- sleep 86400") fmt.Println(res) Expect(err).NotTo(HaveOccurred()) patchCmd := fmt.Sprintf(`kubectl patch deployment my-deployment-1 --patch '{"spec":{"replicas":%d,"revisionHistoryLimit":0,"strategy":{"type":"Recreate", "rollingUpdate": null},"template":{"spec":{"affinity":{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"app","operator":"In","values":["my-deployment-1"]}]},"topologyKey":"kubernetes.io/hostname"}]}}}}}}'`, *serverCount+*agentCount) - res, err = e2e.RunCommand(patchCmd) + res, err = tests.RunCommand(patchCmd) fmt.Println(res) Expect(err).NotTo(HaveOccurred()) - res, err = e2e.RunCommand("kubectl rollout status deployment my-deployment-1 --watch=true --timeout=360s") + res, err = tests.RunCommand("kubectl rollout status deployment my-deployment-1 --watch=true --timeout=360s") fmt.Println(res) Expect(err).NotTo(HaveOccurred()) - res, err = e2e.RunCommand("kubectl delete deployment my-deployment-1") + res, err = tests.RunCommand("kubectl delete deployment my-deployment-1") fmt.Println(res) Expect(err).NotTo(HaveOccurred()) }) // @sha256:101b4afd76732482eff9b95cae5f94bcf295e521fbec4e01b69c5421f3f3f3e5 is :1.37.0 which has already been pulled and should be reused It("Should create and validate deployment with embedded registry mirror using image digest for existing tag", func() { - res, err := e2e.RunCommand("kubectl create deployment my-deployment-2 --image=docker.io/rancher/mirrored-library-busybox@sha256:101b4afd76732482eff9b95cae5f94bcf295e521fbec4e01b69c5421f3f3f3e5 -- sleep 86400") + res, err := tests.RunCommand("kubectl create deployment my-deployment-2 --image=docker.io/rancher/mirrored-library-busybox@sha256:101b4afd76732482eff9b95cae5f94bcf295e521fbec4e01b69c5421f3f3f3e5 -- sleep 86400") fmt.Println(res) Expect(err).NotTo(HaveOccurred()) patchCmd := fmt.Sprintf(`kubectl patch deployment my-deployment-2 --patch '{"spec":{"replicas":%d,"revisionHistoryLimit":0,"strategy":{"type":"Recreate", "rollingUpdate": null},"template":{"spec":{"affinity":{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"app","operator":"In","values":["my-deployment-2"]}]},"topologyKey":"kubernetes.io/hostname"}]}}}}}}'`, *serverCount+*agentCount) - res, err = e2e.RunCommand(patchCmd) + res, err = tests.RunCommand(patchCmd) fmt.Println(res) Expect(err).NotTo(HaveOccurred()) - res, err = e2e.RunCommand("kubectl rollout status deployment my-deployment-2 --watch=true --timeout=360s") + res, err = tests.RunCommand("kubectl rollout status deployment my-deployment-2 --watch=true --timeout=360s") fmt.Println(res) Expect(err).NotTo(HaveOccurred()) - res, err = e2e.RunCommand("kubectl delete deployment my-deployment-2") + res, err = tests.RunCommand("kubectl delete deployment my-deployment-2") fmt.Println(res) Expect(err).NotTo(HaveOccurred()) }) // @sha256:8a45424ddf949bbe9bb3231b05f9032a45da5cd036eb4867b511b00734756d6f is :1.36.1 which should not have been pulled yet It("Should create and validate deployment with embedded registry mirror using image digest without existing tag", func() { - res, err := e2e.RunCommand("kubectl create deployment my-deployment-3 --image=docker.io/rancher/mirrored-library-busybox@sha256:8a45424ddf949bbe9bb3231b05f9032a45da5cd036eb4867b511b00734756d6f -- sleep 86400") + res, err := tests.RunCommand("kubectl create deployment my-deployment-3 --image=docker.io/rancher/mirrored-library-busybox@sha256:8a45424ddf949bbe9bb3231b05f9032a45da5cd036eb4867b511b00734756d6f -- sleep 86400") fmt.Println(res) Expect(err).NotTo(HaveOccurred()) patchCmd := fmt.Sprintf(`kubectl patch deployment my-deployment-3 --patch '{"spec":{"replicas":%d,"revisionHistoryLimit":0,"strategy":{"type":"Recreate", "rollingUpdate": null},"template":{"spec":{"affinity":{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"app","operator":"In","values":["my-deployment-3"]}]},"topologyKey":"kubernetes.io/hostname"}]}}}}}}'`, *serverCount+*agentCount) - res, err = e2e.RunCommand(patchCmd) + res, err = tests.RunCommand(patchCmd) fmt.Println(res) Expect(err).NotTo(HaveOccurred()) - res, err = e2e.RunCommand("kubectl rollout status deployment my-deployment-3 --watch=true --timeout=360s") + res, err = tests.RunCommand("kubectl rollout status deployment my-deployment-3 --watch=true --timeout=360s") fmt.Println(res) Expect(err).NotTo(HaveOccurred()) - res, err = e2e.RunCommand("kubectl delete deployment my-deployment-3") + res, err = tests.RunCommand("kubectl delete deployment my-deployment-3") fmt.Println(res) Expect(err).NotTo(HaveOccurred()) }) // create deployment from imported image It("Should create and validate deployment with embedded registry mirror using image tag from import", func() { - res, err := e2e.RunCommand("kubectl create deployment my-deployment-4 --image=registry.example.com/rancher/mirrored-library-busybox:1.34.1 -- sleep 86400") + res, err := tests.RunCommand("kubectl create deployment my-deployment-4 --image=registry.example.com/rancher/mirrored-library-busybox:1.34.1 -- sleep 86400") fmt.Println(res) Expect(err).NotTo(HaveOccurred()) patchCmd := fmt.Sprintf(`kubectl patch deployment my-deployment-4 --patch '{"spec":{"replicas":%d,"revisionHistoryLimit":0,"strategy":{"type":"Recreate", "rollingUpdate": null},"template":{"spec":{"affinity":{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"app","operator":"In","values":["my-deployment-4"]}]},"topologyKey":"kubernetes.io/hostname"}]}}}}}}'`, *serverCount+*agentCount) - res, err = e2e.RunCommand(patchCmd) + res, err = tests.RunCommand(patchCmd) fmt.Println(res) Expect(err).NotTo(HaveOccurred()) - res, err = e2e.RunCommand("kubectl rollout status deployment my-deployment-4 --watch=true --timeout=360s") + res, err = tests.RunCommand("kubectl rollout status deployment my-deployment-4 --watch=true --timeout=360s") fmt.Println(res) Expect(err).NotTo(HaveOccurred()) - res, err = e2e.RunCommand("kubectl delete deployment my-deployment-4") + res, err = tests.RunCommand("kubectl delete deployment my-deployment-4") fmt.Println(res) Expect(err).NotTo(HaveOccurred()) }) @@ -159,20 +159,20 @@ var _ = Describe("Verify Create", Ordered, func() { // snapshotter does not and will flatten the manifest list to a single-platform image with a different digest. // If this test fails, make sure the `docker image save` command above is run on a host that is using containerd-snapshotter. It("Should create and validate deployment with embedded registry mirror using image digest from import", func() { - res, err := e2e.RunCommand("kubectl create deployment my-deployment-5 --image=registry.example.com/rancher/mirrored-library-busybox@sha256:125dfcbe72a0158c16781d3ad254c0d226a6534b59cc7c2bf549cdd50c6e8989 -- sleep 86400") + res, err := tests.RunCommand("kubectl create deployment my-deployment-5 --image=registry.example.com/rancher/mirrored-library-busybox@sha256:125dfcbe72a0158c16781d3ad254c0d226a6534b59cc7c2bf549cdd50c6e8989 -- sleep 86400") fmt.Println(res) Expect(err).NotTo(HaveOccurred()) patchCmd := fmt.Sprintf(`kubectl patch deployment my-deployment-5 --patch '{"spec":{"replicas":%d,"revisionHistoryLimit":0,"strategy":{"type":"Recreate", "rollingUpdate": null},"template":{"spec":{"affinity":{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"app","operator":"In","values":["my-deployment-5"]}]},"topologyKey":"kubernetes.io/hostname"}]}}}}}}'`, *serverCount+*agentCount) - res, err = e2e.RunCommand(patchCmd) + res, err = tests.RunCommand(patchCmd) fmt.Println(res) Expect(err).NotTo(HaveOccurred()) - res, err = e2e.RunCommand("kubectl rollout status deployment my-deployment-5 --watch=true --timeout=360s") + res, err = tests.RunCommand("kubectl rollout status deployment my-deployment-5 --watch=true --timeout=360s") fmt.Println(res) Expect(err).NotTo(HaveOccurred()) - res, err = e2e.RunCommand("kubectl delete deployment my-deployment-5") + res, err = tests.RunCommand("kubectl delete deployment my-deployment-5") fmt.Println(res) Expect(err).NotTo(HaveOccurred()) }) @@ -180,7 +180,7 @@ var _ = Describe("Verify Create", Ordered, func() { /* Disabled, ref: https://github.com/spegel-org/spegel/issues/1023 It("Should expose embedded registry metrics", func() { grepCmd := fmt.Sprintf("kubectl get --raw /api/v1/nodes/%s/proxy/metrics | grep -F 'spegel_advertised_images{registry=\"docker.io\"}'", tc.Servers[0]) - res, err := e2e.RunCommand(grepCmd) + res, err := tests.RunCommand(grepCmd) fmt.Println(res) Expect(err).NotTo(HaveOccurred()) }) diff --git a/tests/e2e/externalip/externalip_test.go b/tests/e2e/externalip/externalip_test.go index 2eb4680a1aab..52e02e458ff2 100644 --- a/tests/e2e/externalip/externalip_test.go +++ b/tests/e2e/externalip/externalip_test.go @@ -27,7 +27,7 @@ var local = flag.Bool("local", false, "deploy a locally built K3s binary") // getLBServiceIPs returns the externalIP configured for flannel func getExternalIPs(kubeConfigFile string) ([]string, error) { cmd := `kubectl get nodes -o jsonpath='{range .items[*]}{.metadata.annotations.flannel\.alpha\.coreos\.com/public-ip-overwrite}' --kubeconfig=` + kubeConfigFile - res, err := e2e.RunCommand(cmd) + res, err := tests.RunCommand(cmd) if err != nil { return nil, err } @@ -127,9 +127,9 @@ var _ = Describe("Verify External-IP config", Ordered, func() { }, "40s", "5s").Should(Succeed(), "failed getClientIPs") for _, ip := range clientIPs { - cmd := "kubectl exec svc/client-curl -- curl -m 5 -s -f http://" + ip.IPv4 + "/name.html" + cmd := "kubectl exec svc/client-wget -- wget -T 5 -q -O - http://" + ip.IPv4 + "/name.html" Eventually(func() (string, error) { - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "30s", "10s").Should(ContainSubstring("client-deployment"), "failed cmd: "+cmd) } }) @@ -138,7 +138,7 @@ var _ = Describe("Verify External-IP config", Ordered, func() { Expect(err).NotTo(HaveOccurred()) cmd := "kubectl get svc -l k8s-app=nginx-app-loadbalancer -o=jsonpath='{range .items[*]}{.metadata.name}{.status.loadBalancer.ingress[*].ip}{end}'" Eventually(func() (string, error) { - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "20s", "3s").Should(ContainSubstring("10.100.100"), "failed cmd: "+cmd) }) }) diff --git a/tests/e2e/multus/multus_test.go b/tests/e2e/multus/multus_test.go index f0ac7b08ecc5..c0f6c2dafed7 100644 --- a/tests/e2e/multus/multus_test.go +++ b/tests/e2e/multus/multus_test.go @@ -85,7 +85,7 @@ var _ = Describe("Verify Multus config", Ordered, func() { It("Verifies multus daemonset comes up", func() { Eventually(func() (string, error) { cmd := "kubectl get ds multus -n kube-system -o jsonpath='{.status.numberReady}' --kubeconfig=" + tc.KubeconfigFile - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "120s", "5s").Should(ContainSubstring("2")) }) It("Deploys Multus NetworkAttachmentDefinition and test pods", func() { @@ -96,7 +96,7 @@ var _ = Describe("Verify Multus config", Ordered, func() { It("Verifies internode connectivity over multus network", func() { cmd := "kubectl exec pod-macvlan --kubeconfig=" + tc.KubeconfigFile + " -- ping -c 1 -w 2 10.1.1.102" Eventually(func() (string, error) { - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "20s", "3s").Should(ContainSubstring("0% packet loss"), "failed cmd: "+cmd) }) }) diff --git a/tests/e2e/rootless/rootless_test.go b/tests/e2e/rootless/rootless_test.go index 72961d9745a9..3e2c44e29dee 100644 --- a/tests/e2e/rootless/rootless_test.go +++ b/tests/e2e/rootless/rootless_test.go @@ -45,10 +45,10 @@ func StartK3sCluster(nodes []e2e.VagrantNode, serverYAML string) error { if _, err := node.RunCmdOnNode(yamlCmd); err != nil { return err } - if _, err := RunCmdOnRootlessNode("systemctl --user daemon-reload", node.String()); err != nil { + if _, err := RunCmdOnRootlessNode("systemctl --user daemon-reload", node.Name); err != nil { return err } - if _, err := RunCmdOnRootlessNode(startCmd, node.String()); err != nil { + if _, err := RunCmdOnRootlessNode(startCmd, node.Name); err != nil { return err } } @@ -57,13 +57,13 @@ func StartK3sCluster(nodes []e2e.VagrantNode, serverYAML string) error { func KillK3sCluster(nodes []e2e.VagrantNode) error { for _, node := range nodes { - if _, err := RunCmdOnRootlessNode(`systemctl --user stop k3s-rootless`, node.String()); err != nil { + if _, err := RunCmdOnRootlessNode(`systemctl --user stop k3s-rootless`, node.Name); err != nil { return err } - if _, err := RunCmdOnRootlessNode("k3s-killall.sh", node.String()); err != nil { + if _, err := RunCmdOnRootlessNode("k3s-killall.sh", node.Name); err != nil { return err } - if _, err := RunCmdOnRootlessNode("rm -rf /home/vagrant/.rancher/k3s/server/db", node.String()); err != nil { + if _, err := RunCmdOnRootlessNode("rm -rf /home/vagrant/.rancher/k3s/server/db", node.Name); err != nil { return err } } @@ -97,7 +97,7 @@ var _ = Describe("Various Startup Configurations", Ordered, func() { By(tc.Status()) Eventually(func() error { - kubeConfigFile, err := GenRootlessKubeconfigFile(tc.Servers[0].String()) + kubeConfigFile, err := GenRootlessKubeconfigFile(tc.Servers[0].Name) tc.KubeconfigFile = kubeConfigFile return err }, "360s", "5s").Should(Succeed()) @@ -121,10 +121,10 @@ var _ = Describe("Various Startup Configurations", Ordered, func() { var res, logs string var err error Eventually(func() error { - res, err = e2e.RunCommand(cmd) + res, err = tests.RunCommand(cmd) // Common error: metrics not available yet, pull more logs if err != nil && strings.Contains(res, "metrics not available yet") { - logs, _ = e2e.RunCommand("kubectl logs -n kube-system -l k8s-app=metrics-server") + logs, _ = tests.RunCommand("kubectl logs -n kube-system -l k8s-app=metrics-server") } return err }, "300s", "10s").Should(Succeed(), "failed to get pod metrics: %s: %s", res, logs) @@ -135,10 +135,10 @@ var _ = Describe("Various Startup Configurations", Ordered, func() { var err error cmd := "kubectl top node" Eventually(func() error { - res, err = e2e.RunCommand(cmd) + res, err = tests.RunCommand(cmd) // Common error: metrics not available yet, pull more logs if err != nil && strings.Contains(res, "metrics not available yet") { - logs, _ = e2e.RunCommand("kubectl logs -n kube-system -l k8s-app=metrics-server") + logs, _ = tests.RunCommand("kubectl logs -n kube-system -l k8s-app=metrics-server") } return err }, "30s", "5s").Should(Succeed(), "failed to get node metrics: %s: %s", res, logs) @@ -146,13 +146,13 @@ var _ = Describe("Various Startup Configurations", Ordered, func() { It("Runs an interactive command a pod", func() { cmd := "kubectl run busybox --rm -it --restart=Never --image=rancher/mirrored-library-busybox:1.34.1 -- uname -a" - _, err := e2e.RunCommand(cmd) + _, err := tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred()) }) It("Collects logs from a pod", func() { cmd := "kubectl logs -n kube-system -l app.kubernetes.io/name=traefik -c traefik" - _, err := e2e.RunCommand(cmd) + _, err := tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred()) }) @@ -188,7 +188,7 @@ func RunCmdOnRootlessNode(cmd string, nodename string) (string, error) { injectEnv = "GOCOVERDIR=/tmp/k3scov " } runcmd := "vagrant ssh " + nodename + " -c \"" + injectEnv + cmd + "\"" - out, err := e2e.RunCommand(runcmd) + out, err := tests.RunCommand(runcmd) // On GHA CI we see warnings about "[fog][WARNING] Unrecognized arguments: libvirt_ip_command" // these are added to the command output and need to be removed out = strings.ReplaceAll(out, "[fog][WARNING] Unrecognized arguments: libvirt_ip_command\n", "") @@ -203,7 +203,7 @@ func GenRootlessKubeconfigFile(serverName string) (string, error) { if err != nil { return "", err } - vNode := e2e.VagrantNode(serverName) + vNode := e2e.VagrantNode{Name: serverName} nodeIP, err := vNode.FetchNodeExternalIP() if err != nil { return "", err @@ -223,13 +223,13 @@ func GenRootlessKubeconfigFile(serverName string) (string, error) { // When used in GHA CI, the logs are uploaded as an artifact on failure. func SaveRootlessJournalLogs(nodes []e2e.VagrantNode) error { for _, node := range nodes { - lf, err := os.Create(node.String() + "-jlog.txt") + lf, err := os.Create(node.Name + "-jlog.txt") if err != nil { return err } defer lf.Close() - cmd := "vagrant ssh --no-tty " + node.String() + " -c \"journalctl -u --user k3s-rootless --no-pager\"" - logs, err := e2e.RunCommand(cmd) + cmd := "vagrant ssh --no-tty " + node.Name + " -c \"journalctl -u --user k3s-rootless --no-pager\"" + logs, err := tests.RunCommand(cmd) if err != nil { return err } diff --git a/tests/e2e/splitserver/splitserver_test.go b/tests/e2e/splitserver/splitserver_test.go index 63113febbb5b..bb39b5498550 100644 --- a/tests/e2e/splitserver/splitserver_test.go +++ b/tests/e2e/splitserver/splitserver_test.go @@ -37,13 +37,13 @@ func createSplitCluster(nodeOS string, etcdCount, controlPlaneCount, agentCount agentNodes := make([]e2e.VagrantNode, agentCount) for i := 0; i < etcdCount; i++ { - etcdNodes[i] = e2e.VagrantNode("server-etcd-" + strconv.Itoa(i)) + etcdNodes[i] = e2e.VagrantNode{Name: "server-etcd-" + strconv.Itoa(i)} } for i := 0; i < controlPlaneCount; i++ { - cpNodes[i] = e2e.VagrantNode("server-cp-" + strconv.Itoa(i)) + cpNodes[i] = e2e.VagrantNode{Name: "server-cp-" + strconv.Itoa(i)} } for i := 0; i < agentCount; i++ { - agentNodes[i] = e2e.VagrantNode("agent-" + strconv.Itoa(i)) + agentNodes[i] = e2e.VagrantNode{Name: "agent-" + strconv.Itoa(i)} } nodeRoles := strings.Join(e2e.VagrantSlice(etcdNodes), " ") + " " + strings.Join(e2e.VagrantSlice(cpNodes), " ") + " " + strings.Join(e2e.VagrantSlice(agentNodes), " ") @@ -63,9 +63,9 @@ func createSplitCluster(nodeOS string, etcdCount, controlPlaneCount, agentCount // Provision the first etcd node. In GitHub Actions, this also imports the VM image into libvirt, which // takes time and can cause the next vagrant up to fail if it is not given enough time to complete. - cmd := fmt.Sprintf(`E2E_NODE_ROLES="%s" E2E_NODE_BOXES="%s" vagrant up --no-tty --no-provision %s &> vagrant.log`, nodeRoles, nodeBoxes, etcdNodes[0].String()) + cmd := fmt.Sprintf(`E2E_NODE_ROLES="%s" E2E_NODE_BOXES="%s" vagrant up --no-tty --no-provision %s &> vagrant.log`, nodeRoles, nodeBoxes, etcdNodes[0].Name) fmt.Println(cmd) - if _, err := e2e.RunCommand(cmd); err != nil { + if _, err := tests.RunCommand(cmd); err != nil { return etcdNodes, cpNodes, agentNodes, err } @@ -74,7 +74,7 @@ func createSplitCluster(nodeOS string, etcdCount, controlPlaneCount, agentCount for _, node := range allNodeNames[1:] { cmd := fmt.Sprintf(`E2E_NODE_ROLES="%s" E2E_NODE_BOXES="%s" vagrant up --no-tty --no-provision %s &>> vagrant.log`, nodeRoles, nodeBoxes, node) errg.Go(func() error { - _, err := e2e.RunCommand(cmd) + _, err := tests.RunCommand(cmd) return err }) // libVirt/Virtualbox needs some time between provisioning nodes @@ -88,11 +88,11 @@ func createSplitCluster(nodeOS string, etcdCount, controlPlaneCount, agentCount testOptions += " E2E_RELEASE_VERSION=skip" for _, node := range allNodeNames { cmd := fmt.Sprintf(`E2E_NODE_ROLES=%s vagrant scp ../../../dist/artifacts/k3s %s:/tmp/`, node, node) - if _, err := e2e.RunCommand(cmd); err != nil { + if _, err := tests.RunCommand(cmd); err != nil { return etcdNodes, cpNodes, agentNodes, fmt.Errorf("failed to scp k3s binary to %s: %v", node, err) } cmd = fmt.Sprintf(`E2E_NODE_ROLES=%s vagrant ssh %s -c "sudo mv /tmp/k3s /usr/local/bin/"`, node, node) - if _, err := e2e.RunCommand(cmd); err != nil { + if _, err := tests.RunCommand(cmd); err != nil { return etcdNodes, cpNodes, agentNodes, err } } @@ -102,7 +102,7 @@ func createSplitCluster(nodeOS string, etcdCount, controlPlaneCount, agentCount for _, node := range allNodeNames { cmd = fmt.Sprintf(`E2E_NODE_ROLES="%s" E2E_NODE_BOXES="%s" %s vagrant provision %s &>> vagrant.log`, nodeRoles, nodeBoxes, testOptions, node) errg.Go(func() error { - _, err := e2e.RunCommand(cmd) + _, err := tests.RunCommand(cmd) return err }) // libVirt/Virtualbox needs some time between provisioning nodes @@ -154,7 +154,7 @@ var _ = DescribeTableSubtree("Verify Create", Ordered, func(startFlags string) { Expect(err).NotTo(HaveOccurred(), "failed to start k3s-agent") } Eventually(func() error { - kubeConfigFile, err := e2e.GenKubeconfigFile(cpNodes[0].String()) + kubeConfigFile, err := e2e.GenKubeconfigFile(cpNodes[0].Name) tc = &e2e.TestConfig{ KubeconfigFile: kubeConfigFile, Hardened: *hardened, @@ -185,7 +185,7 @@ var _ = DescribeTableSubtree("Verify Create", Ordered, func(startFlags string) { cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + tc.KubeconfigFile Eventually(func() (string, error) { - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "240s", "5s").Should(ContainSubstring("test-clusterip"), "failed cmd: "+cmd) clusterip, _ := e2e.FetchClusterIP(tc.KubeconfigFile, "nginx-clusterip-svc", false) @@ -204,17 +204,17 @@ var _ = DescribeTableSubtree("Verify Create", Ordered, func(startFlags string) { for _, node := range cpNodes { nodeExternalIP, _ := node.FetchNodeExternalIP() cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + tc.KubeconfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\"" - nodeport, err := e2e.RunCommand(cmd) + nodeport, err := tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred()) cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + tc.KubeconfigFile Eventually(func() (string, error) { - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "240s", "5s").Should(ContainSubstring("test-nodeport"), "nodeport pod was not created") cmd = "curl -m 5 -s -f http://" + nodeExternalIP + ":" + nodeport + "/name.html" Eventually(func() (string, error) { - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "240s", "5s").Should(ContainSubstring("test-nodeport"), "failed cmd: "+cmd) } }) @@ -227,17 +227,17 @@ var _ = DescribeTableSubtree("Verify Create", Ordered, func(startFlags string) { ip, _ := node.FetchNodeExternalIP() cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + tc.KubeconfigFile + " --output jsonpath=\"{.spec.ports[0].port}\"" - port, err := e2e.RunCommand(cmd) + port, err := tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred()) cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + tc.KubeconfigFile Eventually(func() (string, error) { - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "240s", "5s").Should(ContainSubstring("test-loadbalancer"), "failed cmd: "+cmd) cmd = "curl -m 5 -s -f http://" + ip + ":" + port + "/name.html" Eventually(func() (string, error) { - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "240s", "5s").Should(ContainSubstring("test-loadbalancer"), "failed cmd: "+cmd) } }) @@ -250,7 +250,7 @@ var _ = DescribeTableSubtree("Verify Create", Ordered, func(startFlags string) { ip, _ := node.FetchNodeExternalIP() cmd := "curl --header host:foo1.bar.com -m 5 -s -f http://" + ip + "/name.html" Eventually(func() (string, error) { - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "240s", "5s").Should(ContainSubstring("test-ingress"), "failed cmd: "+cmd) } }) @@ -272,12 +272,12 @@ var _ = DescribeTableSubtree("Verify Create", Ordered, func(startFlags string) { cmd := "kubectl get pods dnsutils --kubeconfig=" + tc.KubeconfigFile Eventually(func() (string, error) { - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "420s", "2s").Should(ContainSubstring("dnsutils"), "failed cmd: "+cmd) cmd = "kubectl --kubeconfig=" + tc.KubeconfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default" Eventually(func() (string, error) { - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "420s", "2s").Should(ContainSubstring("kubernetes.default.svc.cluster.local"), "failed cmd: "+cmd) }) }) diff --git a/tests/e2e/startup/startup_test.go b/tests/e2e/startup/startup_test.go index 6a8f78a7306c..7835dbdfe11d 100644 --- a/tests/e2e/startup/startup_test.go +++ b/tests/e2e/startup/startup_test.go @@ -41,7 +41,7 @@ func StartK3sCluster(nodes []e2e.VagrantNode, serverYAML string, agentYAML strin var yamlCmd string var resetCmd string var startCmd string - if strings.Contains(node.String(), "server") { + if strings.Contains(node.Name, "server") { resetCmd = "head -n 4 /etc/rancher/k3s/config.yaml > /tmp/config.yaml && sudo mv /tmp/config.yaml /etc/rancher/k3s/config.yaml" yamlCmd = fmt.Sprintf("echo '%s' >> /etc/rancher/k3s/config.yaml", serverYAML) startCmd = "systemctl start k3s" @@ -103,7 +103,7 @@ var _ = Describe("Various Startup Configurations", Ordered, func() { By("CLUSTER CONFIG") By("OS:" + *nodeOS) By(tc.Status()) - tc.KubeconfigFile, err = e2e.GenKubeconfigFile(tc.Servers[0].String()) + tc.KubeconfigFile, err = e2e.GenKubeconfigFile(tc.Servers[0].Name) Expect(err).NotTo(HaveOccurred()) }) @@ -124,26 +124,26 @@ var _ = Describe("Various Startup Configurations", Ordered, func() { It("Returns pod metrics", func() { cmd := "kubectl top pod -A" Eventually(func() error { - _, err := e2e.RunCommand(cmd) + _, err := tests.RunCommand(cmd) return err }, "600s", "5s").Should(Succeed()) }) It("Returns node metrics", func() { cmd := "kubectl top node" - res, err := e2e.RunCommand(cmd) + res, err := tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred(), "failed to get node metrics: %s", res) }) It("Runs an interactive command a pod", func() { - cmd := "kubectl run busybox --rm -it --restart=Never --image=rancher/mirrored-library-busybox:1.36.1 -- uname -a" + cmd := "kubectl run busybox --rm -it --restart=Never --image=rancher/mirrored-library-busybox:1.37.0 -- uname -a" _, err := tc.Servers[0].RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred()) }) It("Collects logs from a pod", func() { cmd := "kubectl logs -n kube-system -l k8s-app=metrics-server -c metrics-server" - _, err := e2e.RunCommand(cmd) + _, err := tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred()) }) @@ -161,7 +161,7 @@ var _ = Describe("Various Startup Configurations", Ordered, func() { By("CLUSTER CONFIG") By("OS:" + *nodeOS) By(tc.Status()) - tc.KubeconfigFile, err = e2e.GenKubeconfigFile(tc.Servers[0].String()) + tc.KubeconfigFile, err = e2e.GenKubeconfigFile(tc.Servers[0].Name) Expect(err).NotTo(HaveOccurred()) By("Fetching node status") @@ -199,7 +199,7 @@ var _ = Describe("Various Startup Configurations", Ordered, func() { Eventually(func() (string, error) { cmd := "kubectl get nodes -l node-role.kubernetes.io/etcd=true" return tc.Servers[0].RunCmdOnNode(cmd) - }, "120s", "5s").Should(ContainSubstring(tc.Servers[0].String())) + }, "120s", "5s").Should(ContainSubstring(tc.Servers[0].Name)) }) It("Checks node and pod status after migration", func() { @@ -245,7 +245,7 @@ var _ = Describe("Various Startup Configurations", Ordered, func() { By("CLUSTER CONFIG") By("OS:" + *nodeOS) By(tc.Status()) - tc.KubeconfigFile, err = e2e.GenKubeconfigFile(tc.Servers[0].String()) + tc.KubeconfigFile, err = e2e.GenKubeconfigFile(tc.Servers[0].Name) Expect(err).NotTo(HaveOccurred()) }) @@ -258,8 +258,8 @@ var _ = Describe("Various Startup Configurations", Ordered, func() { It("Returns kubelet configuration", func() { for _, node := range tc.AllNodes() { - cmd := "kubectl get --raw /api/v1/nodes/" + node.String() + "/proxy/configz" - Expect(e2e.RunCommand(cmd)).To(ContainSubstring(`"shutdownGracePeriod":"19s","shutdownGracePeriodCriticalPods":"13s"`)) + cmd := "kubectl get --raw /api/v1/nodes/" + node.Name + "/proxy/configz" + Expect(tests.RunCommand(cmd)).To(ContainSubstring(`"shutdownGracePeriod":"19s","shutdownGracePeriodCriticalPods":"13s"`)) } }) @@ -277,7 +277,7 @@ var _ = Describe("Various Startup Configurations", Ordered, func() { By("CLUSTER CONFIG") By("OS:" + *nodeOS) By(tc.Status()) - tc.KubeconfigFile, err = e2e.GenKubeconfigFile(tc.Servers[0].String()) + tc.KubeconfigFile, err = e2e.GenKubeconfigFile(tc.Servers[0].Name) Expect(err).NotTo(HaveOccurred()) }) @@ -301,10 +301,10 @@ var _ = Describe("Various Startup Configurations", Ordered, func() { var res, logs string var err error Eventually(func() error { - res, err = e2e.RunCommand(cmd) + res, err = tests.RunCommand(cmd) // Common error: metrics not available yet, pull more logs if err != nil && strings.Contains(res, "metrics not available yet") { - logs, _ = e2e.RunCommand("kubectl logs -n kube-system -l k8s-app=metrics-server") + logs, _ = tests.RunCommand("kubectl logs -n kube-system -l k8s-app=metrics-server") } return err }, "300s", "10s").Should(Succeed(), "failed to get pod metrics: %s: %s", res, logs) @@ -315,24 +315,24 @@ var _ = Describe("Various Startup Configurations", Ordered, func() { var err error cmd := "kubectl top node" Eventually(func() error { - res, err = e2e.RunCommand(cmd) + res, err = tests.RunCommand(cmd) // Common error: metrics not available yet, pull more logs if err != nil && strings.Contains(res, "metrics not available yet") { - logs, _ = e2e.RunCommand("kubectl logs -n kube-system -l k8s-app=metrics-server") + logs, _ = tests.RunCommand("kubectl logs -n kube-system -l k8s-app=metrics-server") } return err }, "30s", "5s").Should(Succeed(), "failed to get node metrics: %s: %s", res, logs) }) It("Runs an interactive command a pod", func() { - cmd := "kubectl run busybox --rm -it --restart=Never --image=rancher/mirrored-library-busybox:1.36.1 -- uname -a" + cmd := "kubectl run busybox --rm -it --restart=Never --image=rancher/mirrored-library-busybox:1.37.0 -- uname -a" _, err := tc.Servers[0].RunCmdOnNode(cmd) Expect(err).NotTo(HaveOccurred()) }) It("Collects logs from a pod", func() { cmd := "kubectl logs -n kube-system -l app.kubernetes.io/name=traefik -c traefik" - _, err := e2e.RunCommand(cmd) + _, err := tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred()) }) @@ -359,7 +359,7 @@ var _ = Describe("Various Startup Configurations", Ordered, func() { By("CLUSTER CONFIG") By("OS:" + *nodeOS) By(tc.Status()) - tc.KubeconfigFile, err = e2e.GenKubeconfigFile(tc.Servers[0].String()) + tc.KubeconfigFile, err = e2e.GenKubeconfigFile(tc.Servers[0].Name) Expect(err).NotTo(HaveOccurred()) }) It("has loaded the test container image", func() { @@ -382,7 +382,7 @@ var _ = Describe("Various Startup Configurations", Ordered, func() { By("CLUSTER CONFIG") By("OS:" + *nodeOS) By(tc.Status()) - tc.KubeconfigFile, err = e2e.GenKubeconfigFile(tc.Servers[0].String()) + tc.KubeconfigFile, err = e2e.GenKubeconfigFile(tc.Servers[0].Name) Expect(err).NotTo(HaveOccurred()) }) diff --git a/tests/e2e/testutils.go b/tests/e2e/testutils.go index c39191092925..30f9d8dd83b2 100644 --- a/tests/e2e/testutils.go +++ b/tests/e2e/testutils.go @@ -6,7 +6,6 @@ import ( "fmt" "io" "os" - "os/exec" "path/filepath" "regexp" "strconv" @@ -14,22 +13,38 @@ import ( "time" json "github.com/json-iterator/go" + "github.com/k3s-io/k3s/tests" ginkgo "github.com/onsi/ginkgo/v2" "golang.org/x/sync/errgroup" ) // defining the VagrantNode type allows methods like RunCmdOnNode to be defined on it. // This makes test code more consistent, as similar functions can exists in Docker and E2E tests. -type VagrantNode string +type VagrantNode struct { + Name string +} -func (v VagrantNode) String() string { - return string(v) +// RunCmdOnNode executes a command from within the given node as sudo +func (v VagrantNode) RunCmdOnNode(cmd string) (string, error) { + injectEnv := "" + if _, ok := os.LookupEnv("E2E_GOCOVER"); ok && strings.HasPrefix(cmd, "k3s") { + injectEnv = "GOCOVERDIR=/tmp/k3scov " + } + runcmd := "vagrant ssh --no-tty " + v.Name + " -c \"sudo " + injectEnv + cmd + "\"" + out, err := tests.RunCommand(runcmd) + // On GHA CI we see warnings about "[fog][WARNING] Unrecognized arguments: libvirt_ip_command" + // these are added to the command output and need to be removed + out = strings.ReplaceAll(out, "[fog][WARNING] Unrecognized arguments: libvirt_ip_command\n", "") + if err != nil { + return out, fmt.Errorf("failed to run command: %s on node %s: %s, %v", cmd, v.Name, out, err) + } + return out, nil } func VagrantSlice(v []VagrantNode) []string { nodes := make([]string, 0, len(v)) for _, node := range v { - nodes = append(nodes, node.String()) + nodes = append(nodes, node.Name) } return nodes } @@ -104,11 +119,11 @@ func newNodeError(cmd string, node VagrantNode, err error) *NodeError { func genNodeEnvs(nodeOS string, serverCount, agentCount int) ([]VagrantNode, []VagrantNode, string) { serverNodes := make([]VagrantNode, serverCount) for i := 0; i < serverCount; i++ { - serverNodes[i] = VagrantNode("server-" + strconv.Itoa(i)) + serverNodes[i] = VagrantNode{Name: "server-" + strconv.Itoa(i)} } agentNodes := make([]VagrantNode, agentCount) for i := 0; i < agentCount; i++ { - agentNodes[i] = VagrantNode("agent-" + strconv.Itoa(i)) + agentNodes[i] = VagrantNode{Name: "agent-" + strconv.Itoa(i)} } nodeRoles := strings.Join(VagrantSlice(serverNodes), " ") + " " + strings.Join(VagrantSlice(agentNodes), " ") @@ -133,25 +148,25 @@ func CreateCluster(nodeOS string, serverCount, agentCount int) (*TestConfig, err } } // Bring up the first server node - cmd := fmt.Sprintf(`%s %s vagrant up --no-tty %s &> vagrant.log`, nodeEnvs, testOptions, serverNodes[0]) + cmd := fmt.Sprintf(`%s %s vagrant up --no-tty %s &> vagrant.log`, nodeEnvs, testOptions, serverNodes[0].Name) fmt.Println(cmd) - if _, err := RunCommand(cmd); err != nil { + if _, err := tests.RunCommand(cmd); err != nil { return nil, newNodeError(cmd, serverNodes[0], err) } // Bring up the rest of the nodes in parallel errg, _ := errgroup.WithContext(context.Background()) for _, node := range append(serverNodes[1:], agentNodes...) { - cmd := fmt.Sprintf(`%s %s vagrant up --no-tty %s &>> vagrant.log`, nodeEnvs, testOptions, node.String()) + cmd := fmt.Sprintf(`%s %s vagrant up --no-tty %s &>> vagrant.log`, nodeEnvs, testOptions, node.Name) fmt.Println(cmd) errg.Go(func() error { - if _, err := RunCommand(cmd); err != nil { + if _, err := tests.RunCommand(cmd); err != nil { return newNodeError(cmd, node, err) } return nil }) // We must wait a bit between provisioning nodes to avoid too many learners attempting to join the cluster - if strings.Contains(node.String(), "agent") { + if strings.Contains(node.Name, "agent") { time.Sleep(5 * time.Second) } else { time.Sleep(30 * time.Second) @@ -168,7 +183,7 @@ func CreateCluster(nodeOS string, serverCount, agentCount int) (*TestConfig, err var err error res, _ := serverNodes[0].RunCmdOnNode("systemctl is-active k3s") if !strings.Contains(res, "inactive") && strings.Contains(res, "active") { - kubeConfigFile, err = GenKubeconfigFile(serverNodes[0].String()) + kubeConfigFile, err = GenKubeconfigFile(serverNodes[0].Name) if err != nil { return nil, err } @@ -185,12 +200,12 @@ func CreateCluster(nodeOS string, serverCount, agentCount int) (*TestConfig, err func scpK3sBinary(nodeNames []VagrantNode) error { for _, node := range nodeNames { - cmd := fmt.Sprintf(`vagrant scp ../../../dist/artifacts/k3s %s:/tmp/`, node.String()) - if _, err := RunCommand(cmd); err != nil { + cmd := fmt.Sprintf(`vagrant scp ../../../dist/artifacts/k3s %s:/tmp/`, node.Name) + if _, err := tests.RunCommand(cmd); err != nil { return fmt.Errorf("failed to scp k3s binary to %s: %v", node, err) } - cmd = "vagrant ssh " + node.String() + " -c \"sudo mv /tmp/k3s /usr/local/bin/\"" - if _, err := RunCommand(cmd); err != nil { + cmd = "vagrant ssh " + node.Name + " -c \"sudo mv /tmp/k3s /usr/local/bin/\"" + if _, err := tests.RunCommand(cmd); err != nil { return err } } @@ -215,18 +230,18 @@ func CreateLocalCluster(nodeOS string, serverCount, agentCount int) (*TestConfig // Provision the first server node. In GitHub Actions, this also imports the VM image into libvirt, which // takes time and can cause the next vagrant up to fail if it is not given enough time to complete. - cmd = fmt.Sprintf(`%s %s vagrant up --no-tty --no-provision %s &> vagrant.log`, nodeEnvs, testOptions, serverNodes[0]) + cmd = fmt.Sprintf(`%s %s vagrant up --no-tty --no-provision %s &> vagrant.log`, nodeEnvs, testOptions, serverNodes[0].Name) fmt.Println(cmd) - if _, err := RunCommand(cmd); err != nil { + if _, err := tests.RunCommand(cmd); err != nil { return nil, newNodeError(cmd, serverNodes[0], err) } // Bring up the rest of the nodes in parallel errg, _ := errgroup.WithContext(context.Background()) for _, node := range append(serverNodes[1:], agentNodes...) { - cmd := fmt.Sprintf(`%s %s vagrant up --no-tty --no-provision %s &>> vagrant.log`, nodeEnvs, testOptions, node) + cmd := fmt.Sprintf(`%s %s vagrant up --no-tty --no-provision %s &>> vagrant.log`, nodeEnvs, testOptions, node.Name) errg.Go(func() error { - if _, err := RunCommand(cmd); err != nil { + if _, err := tests.RunCommand(cmd); err != nil { return newNodeError(cmd, node, err) } return nil @@ -244,9 +259,9 @@ func CreateLocalCluster(nodeOS string, serverCount, agentCount int) (*TestConfig // Install K3s on all nodes in parallel errg, _ = errgroup.WithContext(context.Background()) for _, node := range append(serverNodes, agentNodes...) { - cmd = fmt.Sprintf(`%s %s vagrant provision %s &>> vagrant.log`, nodeEnvs, testOptions, node) + cmd = fmt.Sprintf(`%s %s vagrant provision %s &>> vagrant.log`, nodeEnvs, testOptions, node.Name) errg.Go(func() error { - if _, err := RunCommand(cmd); err != nil { + if _, err := tests.RunCommand(cmd); err != nil { return newNodeError(cmd, node, err) } return nil @@ -265,7 +280,7 @@ func CreateLocalCluster(nodeOS string, serverCount, agentCount int) (*TestConfig var err error res, _ := serverNodes[0].RunCmdOnNode("systemctl is-active k3s") if !strings.Contains(res, "inactive") && strings.Contains(res, "active") { - kubeConfigFile, err = GenKubeconfigFile(serverNodes[0].String()) + kubeConfigFile, err = GenKubeconfigFile(serverNodes[0].Name) if err != nil { return nil, err } @@ -295,7 +310,7 @@ func (tc TestConfig) DeployWorkload(workload string) (string, error) { filename := filepath.Join(resourceDir, f.Name()) if strings.TrimSpace(f.Name()) == workload { cmd := "kubectl apply -f " + filename + " --kubeconfig=" + tc.KubeconfigFile - return RunCommand(cmd) + return tests.RunCommand(cmd) } } return "", nil @@ -317,7 +332,7 @@ func KillK3sCluster(nodes []VagrantNode) error { } func DestroyCluster() error { - if out, err := RunCommand("vagrant destroy -f"); err != nil { + if out, err := tests.RunCommand("vagrant destroy -f"); err != nil { return fmt.Errorf("%v - command output:\n%s", err, out) } return os.Remove("vagrant.log") @@ -326,7 +341,7 @@ func DestroyCluster() error { func FetchClusterIP(kubeconfig string, servicename string, dualStack bool) (string, error) { if dualStack { cmd := "kubectl get svc " + servicename + " -o jsonpath='{.spec.clusterIPs}' --kubeconfig=" + kubeconfig - res, err := RunCommand(cmd) + res, err := tests.RunCommand(cmd) if err != nil { return res, err } @@ -334,14 +349,14 @@ func FetchClusterIP(kubeconfig string, servicename string, dualStack bool) (stri return strings.Trim(res, "[]"), nil } cmd := "kubectl get svc " + servicename + " -o jsonpath='{.spec.clusterIP}' --kubeconfig=" + kubeconfig - return RunCommand(cmd) + return tests.RunCommand(cmd) } // FetchExternalIPs fetches the external IPs of a service func FetchExternalIPs(kubeconfig string, servicename string) ([]string, error) { var externalIPs []string cmd := "kubectl get svc " + servicename + " -o jsonpath='{.status.loadBalancer.ingress}' --kubeconfig=" + kubeconfig - output, err := RunCommand(cmd) + output, err := tests.RunCommand(cmd) if err != nil { return externalIPs, err } @@ -362,7 +377,7 @@ func FetchExternalIPs(kubeconfig string, servicename string) ([]string, error) { func FetchIngressIP(kubeconfig string) ([]string, error) { cmd := "kubectl get ing ingress -o jsonpath='{.status.loadBalancer.ingress[*].ip}' --kubeconfig=" + kubeconfig - res, err := RunCommand(cmd) + res, err := tests.RunCommand(cmd) if err != nil { return nil, err } @@ -389,7 +404,7 @@ func (v VagrantNode) FetchNodeExternalIP() (string, error) { func GenKubeconfigFile(nodeName string) (string, error) { kubeconfigFile := fmt.Sprintf("kubeconfig-%s", nodeName) cmd := fmt.Sprintf("vagrant scp %s:/etc/rancher/k3s/k3s.yaml ./%s", nodeName, kubeconfigFile) - _, err := RunCommand(cmd) + _, err := tests.RunCommand(cmd) if err != nil { return "", err } @@ -401,7 +416,7 @@ func GenKubeconfigFile(nodeName string) (string, error) { re := regexp.MustCompile(`(?m)==> vagrant:.*\n`) modifiedKubeConfig := re.ReplaceAllString(string(kubeConfig), "") - vNode := VagrantNode(nodeName) + vNode := VagrantNode{nodeName} nodeIP, err := vNode.FetchNodeExternalIP() if err != nil { return "", err @@ -463,7 +478,7 @@ func SaveDocker(nodes []VagrantNode) error { if err != nil { logs = fmt.Sprintf("** failed to list docker containers and logs for node %s: %v **", node, err) } - lf, err := os.Create(node.String() + "-dockerlog.txt") + lf, err := os.Create(node.Name + "-dockerlog.txt") if err != nil { return err } @@ -482,7 +497,7 @@ func SaveKernel(nodes []VagrantNode) error { if err != nil { logs = fmt.Sprintf("** failed to read kernel message log for node %s: %v **", node, err) } - lf, err := os.Create(node.String() + "-kernlog.txt") + lf, err := os.Create(node.Name + "-kernlog.txt") if err != nil { return err } @@ -501,7 +516,7 @@ func SaveNetwork(nodes []VagrantNode) error { if err != nil { logs = fmt.Sprintf("** failed to read network config for node %s: %v **", node, err) } - lf, err := os.Create(node.String() + "-netlog.txt") + lf, err := os.Create(node.Name + "-netlog.txt") if err != nil { return err } @@ -521,7 +536,7 @@ func TailPodLogs(lines int, nodes []VagrantNode) error { if err != nil { logs = fmt.Sprintf("** failed to read pod logs for node %s: %v **", node, err) } - lf, err := os.Create(node.String() + "-podlog.txt") + lf, err := os.Create(node.Name + "-podlog.txt") if err != nil { return err } @@ -537,7 +552,7 @@ func TailPodLogs(lines int, nodes []VagrantNode) error { // When used in GHA CI, the logs are uploaded as an artifact on failure. func SaveJournalLogs(nodes []VagrantNode) error { for _, node := range nodes { - lf, err := os.Create(node.String() + "-jlog.txt") + lf, err := os.Create(node.Name + "-jlog.txt") if err != nil { return err } @@ -589,19 +604,19 @@ func GetVagrantLog(cErr error) string { func DumpNodes(kubeConfig string) { cmd := "kubectl get nodes --no-headers -o wide -A --kubeconfig=" + kubeConfig - res, _ := RunCommand(cmd) + res, _ := tests.RunCommand(cmd) fmt.Println(strings.TrimSpace(res)) } func DumpPods(kubeConfig string) { cmd := "kubectl get pods -o wide --no-headers -A --kubeconfig=" + kubeConfig - res, _ := RunCommand(cmd) + res, _ := tests.RunCommand(cmd) fmt.Println(strings.TrimSpace(res)) } func DescribePods(kubeConfig string) string { cmd := "kubectl describe pod -A --kubeconfig=" + kubeConfig - res, err := RunCommand(cmd) + res, err := tests.RunCommand(cmd) if err != nil { return fmt.Sprintf("Failed to describe pods: %v", err) } @@ -623,7 +638,7 @@ func RestartCluster(nodes []VagrantNode) error { func StartCluster(nodes []VagrantNode) error { for _, node := range nodes { cmd := "systemctl start k3s" - if strings.Contains(node.String(), "agent") { + if strings.Contains(node.Name, "agent") { cmd += "-agent" } if _, err := node.RunCmdOnNode(cmd); err != nil { @@ -644,35 +659,6 @@ func StopCluster(nodes []VagrantNode) error { return nil } -// RunCmdOnNode executes a command from within the given node as sudo -func (v VagrantNode) RunCmdOnNode(cmd string) (string, error) { - injectEnv := "" - if _, ok := os.LookupEnv("E2E_GOCOVER"); ok && strings.HasPrefix(cmd, "k3s") { - injectEnv = "GOCOVERDIR=/tmp/k3scov " - } - runcmd := "vagrant ssh --no-tty " + v.String() + " -c \"sudo " + injectEnv + cmd + "\"" - out, err := RunCommand(runcmd) - // On GHA CI we see warnings about "[fog][WARNING] Unrecognized arguments: libvirt_ip_command" - // these are added to the command output and need to be removed - out = strings.ReplaceAll(out, "[fog][WARNING] Unrecognized arguments: libvirt_ip_command\n", "") - if err != nil { - return out, fmt.Errorf("failed to run command: %s on node %s: %s, %v", cmd, v.String(), out, err) - } - return out, nil -} - -func RunCommand(cmd string) (string, error) { - c := exec.Command("bash", "-c", cmd) - if kc, ok := os.LookupEnv("E2E_KUBECONFIG"); ok { - c.Env = append(os.Environ(), "KUBECONFIG="+kc) - } - out, err := c.CombinedOutput() - if err != nil { - return string(out), fmt.Errorf("failed to run command: %s, %v", cmd, err) - } - return string(out), err -} - func UpgradeCluster(nodes []VagrantNode, local bool) error { upgradeVersion := "E2E_RELEASE_CHANNEL=commit" if local { @@ -682,8 +668,8 @@ func UpgradeCluster(nodes []VagrantNode, local bool) error { upgradeVersion = "E2E_RELEASE_VERSION=skip" } for _, node := range nodes { - cmd := upgradeVersion + " vagrant provision " + node.String() - if out, err := RunCommand(cmd); err != nil { + cmd := upgradeVersion + " vagrant provision " + node.Name + if out, err := tests.RunCommand(cmd); err != nil { fmt.Println("Error Upgrading Cluster", out) return err } @@ -697,17 +683,17 @@ func GetCoverageReport(nodes []VagrantNode) error { } covDirs := []string{} for _, node := range nodes { - covDir := node.String() + "-cov" + covDir := node.Name + "-cov" covDirs = append(covDirs, covDir) os.MkdirAll(covDir, 0755) - cmd := "vagrant scp " + node.String() + ":/tmp/k3scov/* " + covDir - if _, err := RunCommand(cmd); err != nil { + cmd := "vagrant scp " + node.Name + ":/tmp/k3scov/* " + covDir + if _, err := tests.RunCommand(cmd); err != nil { return err } } coverageFile := "coverage.out" cmd := "go tool covdata textfmt -i=" + strings.Join(covDirs, ",") + " -o=" + coverageFile - if out, err := RunCommand(cmd); err != nil { + if out, err := tests.RunCommand(cmd); err != nil { return fmt.Errorf("failed to generate coverage report: %s, %v", out, err) } @@ -734,7 +720,7 @@ func GetCoverageReport(nodes []VagrantNode) error { // GetDaemonsetReady returns the number of ready pods for the given daemonset func GetDaemonsetReady(daemonset string, kubeConfigFile string) (int, error) { cmd := "kubectl get ds " + daemonset + " -o jsonpath='{range .items[*]}{.status.numberReady}' --kubeconfig=" + kubeConfigFile - out, err := RunCommand(cmd) + out, err := tests.RunCommand(cmd) if err != nil { return 0, err } @@ -756,7 +742,7 @@ func GetNodeIPs(kubeConfigFile string) ([]ObjIP, error) { // GetObjIPs executes a command to collect IPs func GetObjIPs(cmd string) ([]ObjIP, error) { var objIPs []ObjIP - res, err := RunCommand(cmd) + res, err := tests.RunCommand(cmd) if err != nil { return nil, err } diff --git a/tests/e2e/upgradecluster/upgradecluster_test.go b/tests/e2e/upgradecluster/upgradecluster_test.go index bef5034925ba..e090e180a7f6 100644 --- a/tests/e2e/upgradecluster/upgradecluster_test.go +++ b/tests/e2e/upgradecluster/upgradecluster_test.go @@ -72,7 +72,7 @@ var _ = Describe("Verify Upgrade", Ordered, func() { cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + tc.KubeconfigFile Eventually(func() (string, error) { - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "240s", "5s").Should(ContainSubstring("test-clusterip"), "failed cmd: "+cmd) clusterip, _ := e2e.FetchClusterIP(tc.KubeconfigFile, "nginx-clusterip-svc", false) @@ -91,18 +91,18 @@ var _ = Describe("Verify Upgrade", Ordered, func() { for _, node := range tc.Servers { nodeExternalIP, _ := node.FetchNodeExternalIP() cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + tc.KubeconfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\"" - nodeport, err := e2e.RunCommand(cmd) + nodeport, err := tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd) cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + tc.KubeconfigFile Eventually(func() (string, error) { - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "240s", "5s").Should(ContainSubstring("test-nodeport"), "nodeport pod was not created") cmd = "curl -m 5 -s -f http://" + nodeExternalIP + ":" + nodeport + "/name.html" fmt.Println(cmd) Eventually(func() (string, error) { - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "240s", "5s").Should(ContainSubstring("test-nodeport"), "failed cmd: "+cmd) } }) @@ -113,17 +113,17 @@ var _ = Describe("Verify Upgrade", Ordered, func() { for _, node := range tc.Servers { ip, _ := node.FetchNodeExternalIP() cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + tc.KubeconfigFile + " --output jsonpath=\"{.spec.ports[0].port}\"" - port, err := e2e.RunCommand(cmd) + port, err := tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred()) cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + tc.KubeconfigFile Eventually(func() (string, error) { - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "240s", "5s").Should(ContainSubstring("test-loadbalancer")) cmd = "curl -m 5 -s -f http://" + ip + ":" + port + "/name.html" Eventually(func() (string, error) { - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "240s", "5s").Should(ContainSubstring("test-loadbalancer"), "failed cmd: "+cmd) } }) @@ -136,7 +136,7 @@ var _ = Describe("Verify Upgrade", Ordered, func() { ip, _ := node.FetchNodeExternalIP() cmd := "curl --header host:foo1.bar.com -m 5 -s -f http://" + ip + "/name.html" Eventually(func() (string, error) { - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "240s", "5s").Should(ContainSubstring("test-ingress"), "failed cmd: "+cmd) } }) @@ -159,12 +159,12 @@ var _ = Describe("Verify Upgrade", Ordered, func() { Eventually(func() (string, error) { cmd := "kubectl get pods dnsutils --kubeconfig=" + tc.KubeconfigFile - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "420s", "2s").Should(ContainSubstring("dnsutils")) cmd := "kubectl --kubeconfig=" + tc.KubeconfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default" Eventually(func() (string, error) { - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "420s", "2s").Should(ContainSubstring("kubernetes.default.svc.cluster.local")) }) @@ -173,7 +173,7 @@ var _ = Describe("Verify Upgrade", Ordered, func() { Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed") Eventually(func(g Gomega) { cmd := "kubectl get pvc local-path-pvc --kubeconfig=" + tc.KubeconfigFile - res, err := e2e.RunCommand(cmd) + res, err := tests.RunCommand(cmd) g.Expect(err).NotTo(HaveOccurred()) fmt.Println(res) g.Expect(res).Should(ContainSubstring("local-path-pvc")) @@ -182,7 +182,7 @@ var _ = Describe("Verify Upgrade", Ordered, func() { Eventually(func(g Gomega) { cmd := "kubectl get pod volume-test --kubeconfig=" + tc.KubeconfigFile - res, err := e2e.RunCommand(cmd) + res, err := tests.RunCommand(cmd) g.Expect(err).NotTo(HaveOccurred()) fmt.Println(res) @@ -191,12 +191,12 @@ var _ = Describe("Verify Upgrade", Ordered, func() { }, "420s", "2s").Should(Succeed()) cmd := "kubectl --kubeconfig=" + tc.KubeconfigFile + " exec volume-test -- sh -c 'echo local-path-test > /data/test'" - res, err := e2e.RunCommand(cmd) + res, err := tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res) fmt.Println("Data stored in pvc: local-path-test") cmd = "kubectl delete pod volume-test --kubeconfig=" + tc.KubeconfigFile - res, err = e2e.RunCommand(cmd) + res, err = tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res) _, err = tc.DeployWorkload("local-path-provisioner.yaml") @@ -204,12 +204,12 @@ var _ = Describe("Verify Upgrade", Ordered, func() { Eventually(func() (string, error) { cmd := "kubectl get pods -o=name -l app=local-path-provisioner --field-selector=status.phase=Running -n kube-system --kubeconfig=" + tc.KubeconfigFile - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "420s", "2s").Should(ContainSubstring("local-path-provisioner")) Eventually(func(g Gomega) { cmd := "kubectl get pod volume-test --kubeconfig=" + tc.KubeconfigFile - res, err := e2e.RunCommand(cmd) + res, err := tests.RunCommand(cmd) g.Expect(err).NotTo(HaveOccurred()) fmt.Println(res) g.Expect(res).Should(ContainSubstring("volume-test")) @@ -219,7 +219,7 @@ var _ = Describe("Verify Upgrade", Ordered, func() { // Check data after re-creation Eventually(func() (string, error) { cmd := "kubectl exec volume-test --kubeconfig=" + tc.KubeconfigFile + " -- cat /data/test" - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "180s", "2s").Should(ContainSubstring("local-path-test"), "Failed to retrieve data from pvc") }) @@ -228,7 +228,7 @@ var _ = Describe("Verify Upgrade", Ordered, func() { Expect(e2e.UpgradeCluster(tc.AllNodes(), *local)).To(Succeed()) Expect(e2e.RestartCluster(tc.AllNodes())).To(Succeed()) fmt.Println("CLUSTER UPGRADED") - tc.KubeconfigFile, err = e2e.GenKubeconfigFile(tc.Servers[0].String()) + tc.KubeconfigFile, err = e2e.GenKubeconfigFile(tc.Servers[0].Name) Expect(err).NotTo(HaveOccurred()) }) @@ -247,7 +247,7 @@ var _ = Describe("Verify Upgrade", Ordered, func() { It("After upgrade verifies ClusterIP Service", func() { Eventually(func() (string, error) { cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + tc.KubeconfigFile - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "420s", "5s").Should(ContainSubstring("test-clusterip")) clusterip, _ := e2e.FetchClusterIP(tc.KubeconfigFile, "nginx-clusterip-svc", false) @@ -265,18 +265,18 @@ var _ = Describe("Verify Upgrade", Ordered, func() { for _, node := range tc.Servers { nodeExternalIP, _ := node.FetchNodeExternalIP() cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + tc.KubeconfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\"" - nodeport, err := e2e.RunCommand(cmd) + nodeport, err := tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred()) Eventually(func() (string, error) { cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + tc.KubeconfigFile - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "240s", "5s").Should(ContainSubstring("test-nodeport"), "nodeport pod was not created") cmd = "curl -m 5 -s -f http://" + nodeExternalIP + ":" + nodeport + "/name.html" fmt.Println(cmd) Eventually(func() (string, error) { - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "240s", "5s").Should(ContainSubstring("test-nodeport")) } }) @@ -285,16 +285,16 @@ var _ = Describe("Verify Upgrade", Ordered, func() { for _, node := range tc.Servers { ip, _ := node.FetchNodeExternalIP() cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + tc.KubeconfigFile + " --output jsonpath=\"{.spec.ports[0].port}\"" - port, err := e2e.RunCommand(cmd) + port, err := tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred()) Eventually(func() (string, error) { cmd := "curl -m 5 -s -f http://" + ip + ":" + port + "/name.html" - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "240s", "5s").Should(ContainSubstring("test-loadbalancer")) Eventually(func() (string, error) { cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + tc.KubeconfigFile - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "240s", "5s").Should(ContainSubstring("test-loadbalancer")) } }) @@ -306,7 +306,7 @@ var _ = Describe("Verify Upgrade", Ordered, func() { fmt.Println(cmd) Eventually(func() (string, error) { - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "420s", "5s").Should(ContainSubstring("test-ingress")) } }) @@ -322,14 +322,14 @@ var _ = Describe("Verify Upgrade", Ordered, func() { It("After upgrade verifies dns access", func() { Eventually(func() (string, error) { cmd := "kubectl --kubeconfig=" + tc.KubeconfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default" - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "180s", "2s").Should((ContainSubstring("kubernetes.default.svc.cluster.local"))) }) It("After upgrade verify Local Path Provisioner storage ", func() { Eventually(func() (string, error) { cmd := "kubectl exec volume-test --kubeconfig=" + tc.KubeconfigFile + " -- cat /data/test" - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "180s", "2s").Should(ContainSubstring("local-path-test")) }) }) diff --git a/tests/e2e/validatecluster/validatecluster_test.go b/tests/e2e/validatecluster/validatecluster_test.go index 89a98fbad04d..ae75ba1caa53 100644 --- a/tests/e2e/validatecluster/validatecluster_test.go +++ b/tests/e2e/validatecluster/validatecluster_test.go @@ -77,7 +77,7 @@ var _ = Describe("Verify Create", Ordered, func() { Eventually(func(g Gomega) { cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + tc.KubeconfigFile - res, err := e2e.RunCommand(cmd) + res, err := tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred()) g.Expect(res).Should((ContainSubstring("test-clusterip")), "failed cmd: %q result: %s", cmd, res) }, "240s", "5s").Should(Succeed()) @@ -100,12 +100,12 @@ var _ = Describe("Verify Create", Ordered, func() { for _, node := range tc.Servers { nodeExternalIP, _ := node.FetchNodeExternalIP() cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + tc.KubeconfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\"" - nodeport, err := e2e.RunCommand(cmd) + nodeport, err := tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred()) Eventually(func(g Gomega) { cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + tc.KubeconfigFile - res, err := e2e.RunCommand(cmd) + res, err := tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred()) g.Expect(res).Should(ContainSubstring("test-nodeport"), "nodeport pod was not created") }, "240s", "5s").Should(Succeed()) @@ -113,7 +113,7 @@ var _ = Describe("Verify Create", Ordered, func() { cmd = "curl -m 5 -s -f http://" + nodeExternalIP + ":" + nodeport + "/name.html" Eventually(func(g Gomega) { - res, err := e2e.RunCommand(cmd) + res, err := tests.RunCommand(cmd) g.Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res) g.Expect(res).Should(ContainSubstring("test-nodeport")) }, "240s", "5s").Should(Succeed()) @@ -128,19 +128,19 @@ var _ = Describe("Verify Create", Ordered, func() { ip, _ := node.FetchNodeExternalIP() cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + tc.KubeconfigFile + " --output jsonpath=\"{.spec.ports[0].port}\"" - port, err := e2e.RunCommand(cmd) + port, err := tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred()) Eventually(func(g Gomega) { cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + tc.KubeconfigFile - res, err := e2e.RunCommand(cmd) + res, err := tests.RunCommand(cmd) g.Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res) g.Expect(res).Should(ContainSubstring("test-loadbalancer")) }, "240s", "5s").Should(Succeed()) Eventually(func(g Gomega) { cmd = "curl -m 5 -s -f http://" + ip + ":" + port + "/name.html" - res, err := e2e.RunCommand(cmd) + res, err := tests.RunCommand(cmd) g.Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res) g.Expect(res).Should(ContainSubstring("test-loadbalancer")) }, "240s", "5s").Should(Succeed()) @@ -157,7 +157,7 @@ var _ = Describe("Verify Create", Ordered, func() { fmt.Println(cmd) Eventually(func(g Gomega) { - res, err := e2e.RunCommand(cmd) + res, err := tests.RunCommand(cmd) g.Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res) g.Expect(res).Should(ContainSubstring("test-ingress")) }, "240s", "5s").Should(Succeed()) @@ -182,7 +182,7 @@ var _ = Describe("Verify Create", Ordered, func() { Eventually(func(g Gomega) { cmd := "kubectl get pods dnsutils --kubeconfig=" + tc.KubeconfigFile - res, err := e2e.RunCommand(cmd) + res, err := tests.RunCommand(cmd) g.Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res) g.Expect(res).Should(ContainSubstring("dnsutils")) }, "420s", "2s").Should(Succeed()) @@ -190,7 +190,7 @@ var _ = Describe("Verify Create", Ordered, func() { Eventually(func(g Gomega) { cmd := "kubectl --kubeconfig=" + tc.KubeconfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default" - res, err := e2e.RunCommand(cmd) + res, err := tests.RunCommand(cmd) g.Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res) g.Expect(res).Should(ContainSubstring("kubernetes.default.svc.cluster.local")) }, "420s", "2s").Should(Succeed()) @@ -202,7 +202,7 @@ var _ = Describe("Verify Create", Ordered, func() { Eventually(func(g Gomega) { cmd := "kubectl get pvc local-path-pvc --kubeconfig=" + tc.KubeconfigFile - res, err := e2e.RunCommand(cmd) + res, err := tests.RunCommand(cmd) g.Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res) g.Expect(res).Should(ContainSubstring("local-path-pvc")) g.Expect(res).Should(ContainSubstring("Bound")) @@ -210,18 +210,18 @@ var _ = Describe("Verify Create", Ordered, func() { Eventually(func(g Gomega) { cmd := "kubectl get pod volume-test --kubeconfig=" + tc.KubeconfigFile - res, err := e2e.RunCommand(cmd) + res, err := tests.RunCommand(cmd) g.Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res) g.Expect(res).Should(ContainSubstring("volume-test")) g.Expect(res).Should(ContainSubstring("Running")) }, "420s", "2s").Should(Succeed()) cmd := "kubectl --kubeconfig=" + tc.KubeconfigFile + " exec volume-test -- sh -c 'echo local-path-test > /data/test'" - res, err = e2e.RunCommand(cmd) + res, err = tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res) cmd = "kubectl delete pod volume-test --kubeconfig=" + tc.KubeconfigFile - res, err = e2e.RunCommand(cmd) + res, err = tests.RunCommand(cmd) Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res) _, err = tc.DeployWorkload("local-path-provisioner.yaml") @@ -229,13 +229,13 @@ var _ = Describe("Verify Create", Ordered, func() { Eventually(func(g Gomega) { cmd := "kubectl get pods -o=name -l app=local-path-provisioner --field-selector=status.phase=Running -n kube-system --kubeconfig=" + tc.KubeconfigFile - res, _ := e2e.RunCommand(cmd) + res, _ := tests.RunCommand(cmd) g.Expect(res).Should(ContainSubstring("local-path-provisioner")) }, "420s", "2s").Should(Succeed()) Eventually(func(g Gomega) { cmd := "kubectl get pod volume-test --kubeconfig=" + tc.KubeconfigFile - res, err := e2e.RunCommand(cmd) + res, err := tests.RunCommand(cmd) g.Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res) g.Expect(res).Should(ContainSubstring("volume-test")) @@ -244,7 +244,7 @@ var _ = Describe("Verify Create", Ordered, func() { Eventually(func(g Gomega) { cmd := "kubectl exec volume-test --kubeconfig=" + tc.KubeconfigFile + " -- cat /data/test" - res, err = e2e.RunCommand(cmd) + res, err = tests.RunCommand(cmd) g.Expect(err).NotTo(HaveOccurred(), "failed cmd: %q result: %s", cmd, res) fmt.Println("Data after re-creation", res) g.Expect(res).Should(ContainSubstring("local-path-test")) @@ -282,7 +282,7 @@ var _ = Describe("Verify Create", Ordered, func() { for _, node := range tc.Servers { cmd := "k3s certificate rotate" _, err := node.RunCmdOnNode(cmd) - Expect(err).NotTo(HaveOccurred(), "Certificate could not be rotated successfully on "+node.String()) + Expect(err).NotTo(HaveOccurred(), "Certificate could not be rotated successfully on "+node.Name) } }) @@ -298,7 +298,7 @@ var _ = Describe("Verify Create", Ordered, func() { for _, node := range tc.Servers { cmd := "test ! -e /var/lib/rancher/k3s/server/tls/dynamic-cert-regenerate" _, err := node.RunCmdOnNode(cmd) - Expect(err).NotTo(HaveOccurred(), "Dynamic cert regenerate file not removed on "+node.String()) + Expect(err).NotTo(HaveOccurred(), "Dynamic cert regenerate file not removed on "+node.Name) } }, "620s", "5s").Should(Succeed()) @@ -324,16 +324,16 @@ var _ = Describe("Verify Create", Ordered, func() { for _, node := range tc.Servers { grCert, errGrep := node.RunCmdOnNode(grepCert) - Expect(errGrep).NotTo(HaveOccurred(), "TLS dirs could not be listed on "+node.String()) + Expect(errGrep).NotTo(HaveOccurred(), "TLS dirs could not be listed on "+node.Name) re := regexp.MustCompile("tls-[0-9]+") tls := re.FindAllString(grCert, -1)[0] diff := fmt.Sprintf("diff -sr /var/lib/rancher/k3s/server/tls/ /var/lib/rancher/k3s/server/%s/"+ "| grep -i identical | cut -f4 -d ' ' | xargs basename -a \n", tls) result, err := node.RunCmdOnNode(diff) - Expect(err).NotTo(HaveOccurred(), "Certificate diff not created successfully on "+node.String()) + Expect(err).NotTo(HaveOccurred(), "Certificate diff not created successfully on "+node.Name) certArray := strings.Split(result, "\n") - Expect((certArray)).Should((Equal(expectResult)), "Certificate diff does not match the expected results on "+node.String()) + Expect((certArray)).Should((Equal(expectResult)), "Certificate diff does not match the expected results on "+node.Name) } errRestartAgent := e2e.RestartCluster(tc.Agents) diff --git a/tests/e2e/wasm/wasm_test.go b/tests/e2e/wasm/wasm_test.go index 59d3c456fa90..e8672fb8ad25 100644 --- a/tests/e2e/wasm/wasm_test.go +++ b/tests/e2e/wasm/wasm_test.go @@ -81,14 +81,14 @@ var _ = Describe("Verify K3s can run Wasm workloads", Ordered, func() { It("Wait for slight Pod to be up and running", func() { Eventually(func() (string, error) { cmd := "kubectl get pods -o=name -l app=wasm-slight --field-selector=status.phase=Running --kubeconfig=" + tc.KubeconfigFile - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "240s", "5s").Should(ContainSubstring("pod/wasm-slight")) }) It("Wait for spin Pod to be up and running", func() { Eventually(func() (string, error) { cmd := "kubectl get pods -o=name -l app=wasm-spin --field-selector=status.phase=Running --kubeconfig=" + tc.KubeconfigFile - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "120s", "5s").Should(ContainSubstring("pod/wasm-spin")) }) @@ -108,7 +108,7 @@ var _ = Describe("Verify K3s can run Wasm workloads", Ordered, func() { cmd := "curl -m 5 -s -f -v " + url Eventually(func() (string, error) { - return e2e.RunCommand(cmd) + return tests.RunCommand(cmd) }, "120s", "5s").Should(ContainSubstring("200 OK")) } }) diff --git a/tests/integration/startup/testdata/dummy.yaml b/tests/integration/startup/testdata/dummy.yaml index 7ef64b0d29ff..dc1f85cd6d7a 100644 --- a/tests/integration/startup/testdata/dummy.yaml +++ b/tests/integration/startup/testdata/dummy.yaml @@ -7,5 +7,5 @@ metadata: spec: containers: - name: dummy - image: ranchertest/mytestcontainer + image: rancher/mirrored-library-nginx:1.29.1-alpine imagePullPolicy: IfNotPresent