From ba6491d504163fe4044c3ea59e4bb5102c917b24 Mon Sep 17 00:00:00 2001 From: Andrew Bayer Date: Thu, 30 Jun 2022 15:10:50 -0400 Subject: [PATCH] Fix eventlistener e2e tests broken due to upgrading Triggers to v0.20.1 After upgrading to Triggers v0.20.1, `TestEventListenerE2E` started failing consistently due to a `CrashLoopBackOff`. After a bunch of debugging, I determined this is due to https://github.com/tektoncd/triggers/pull/1378. When an eventlistener comes up now, it looks for `caBundle` in the appropriate `ClusterInterceptor`, and with how things were setup, we weren't actually creating the core interceptors etc, and the eventlistener SA didn't have permission to even look for `ClusterInterceptor`s at creation time. To fix this, we need to change how we install Triggers a bit in `test/e2e-common.sh`, and then in `eventListener_test.go`, we need to create the necessary secret and SA _before_ we create the `ClusterRole` and `ClusterRoleBinding` to give that SA the right permissions. Also, while I was here, I added `t.Helper()` to a bunch of e2e helper functions, and changed cleanup logic to not delete test-created resources/namespaces if an env var is set or the test failed, matching behavior in Pipeline's e2e tests. Signed-off-by: Andrew Bayer --- test/cli/cli.go | 26 +++++++--- test/e2e-common.sh | 19 +++++++ test/e2e/clustertask/start_test.go | 10 ++-- test/e2e/eventListener/eventListener_test.go | 49 ++++++++++++++----- test/e2e/pipeline/pipeline_test.go | 38 +++++++------- test/e2e/pipelinerun/pipelinerun_test.go | 4 +- test/e2e/plugin/plugin_test.go | 6 +-- test/e2e/task/start_test.go | 8 +-- test/framework/helper.go | 9 ++-- .../eventlistener/eventlistener.yaml | 15 ------ .../eventlistener/eventlistener_v1beta1.yaml | 15 ------ 11 files changed, 113 insertions(+), 86 deletions(-) diff --git a/test/cli/cli.go b/test/cli/cli.go index 285560c47..bf6494be0 100644 --- a/test/cli/cli.go +++ b/test/cli/cli.go @@ -72,7 +72,8 @@ func NewTknRunner(namespace string) (TknRunner, error) { } // Run will help you execute tkn command on a specific namespace, with a timeout -func (e TknRunner) Run(args ...string) *icmd.Result { +func (e TknRunner) Run(t *testing.T, args ...string) *icmd.Result { + t.Helper() if e.namespace != "" { args = append(args, "--namespace", e.namespace) } @@ -82,24 +83,28 @@ func (e TknRunner) Run(args ...string) *icmd.Result { // MustSucceed asserts that the command ran with 0 exit code func (e TknRunner) MustSucceed(t *testing.T, args ...string) *icmd.Result { + t.Helper() return e.Assert(t, icmd.Success, args...) } // Assert runs a command and verifies exit code (0) func (e TknRunner) Assert(t *testing.T, exp icmd.Expected, args ...string) *icmd.Result { - res := e.Run(args...) + t.Helper() + res := e.Run(t, args...) res.Assert(t, exp) return res } // RunNoNamespace will help you execute tkn command without namespace, with a timeout -func (e TknRunner) RunNoNamespace(args ...string) *icmd.Result { +func (e TknRunner) RunNoNamespace(t *testing.T, args ...string) *icmd.Result { + t.Helper() cmd := append([]string{e.path}, args...) return icmd.RunCmd(icmd.Cmd{Command: cmd, Timeout: timeout}) } // RunWithOption will help you execute tkn command with namespace, cmd option -func (e TknRunner) RunWithOption(option icmd.CmdOp, args ...string) *icmd.Result { +func (e TknRunner) RunWithOption(t *testing.T, option icmd.CmdOp, args ...string) *icmd.Result { + t.Helper() if e.namespace != "" { args = append(args, "--namespace", e.namespace) } @@ -161,7 +166,8 @@ func NewKubectl(namespace string) Kubectl { } // Run will help you execute kubectl command on a specific namespace, with a timeout -func (k Kubectl) Run(args ...string) *icmd.Result { +func (k Kubectl) Run(t *testing.T, args ...string) *icmd.Result { + t.Helper() if k.namespace != "" { args = append(args, "--namespace", k.namespace) } @@ -170,19 +176,22 @@ func (k Kubectl) Run(args ...string) *icmd.Result { } // RunNoNamespace will help you execute kubectl command without namespace, with a timeout -func (k Kubectl) RunNoNamespace(args ...string) *icmd.Result { +func (k Kubectl) RunNoNamespace(t *testing.T, args ...string) *icmd.Result { + t.Helper() cmd := append([]string{"kubectl"}, args...) return icmd.RunCmd(icmd.Cmd{Command: cmd, Timeout: timeout}) } // MustSucceed asserts that the command ran with 0 exit code func (k Kubectl) MustSucceed(t *testing.T, args ...string) *icmd.Result { + t.Helper() return k.Assert(t, icmd.Success, args...) } // Assert runs a command and verifies against expected func (k Kubectl) Assert(t *testing.T, exp icmd.Expected, args ...string) *icmd.Result { - res := k.Run(args...) + t.Helper() + res := k.Run(t, args...) res.Assert(t, exp) time.Sleep(1 * time.Second) return res @@ -190,7 +199,8 @@ func (k Kubectl) Assert(t *testing.T, exp icmd.Expected, args ...string) *icmd.R // TODO: Re-write this to just get the version of Tekton components through tkn version // as described in https://github.com/tektoncd/cli/issues/1067 -func (e TknRunner) CheckVersion(component string, version string) bool { +func (e TknRunner) CheckVersion(t *testing.T, component string, version string) bool { + t.Helper() cmd := append([]string{e.path}, "version") result := icmd.RunCmd(icmd.Cmd{Command: cmd, Timeout: timeout}) diff --git a/test/e2e-common.sh b/test/e2e-common.sh index 6cbd19ff7..c27b06239 100755 --- a/test/e2e-common.sh +++ b/test/e2e-common.sh @@ -20,6 +20,7 @@ # instead of detecting the latest released one from tektoncd/pipeline releases RELEASE_YAML_PIPELINES=${RELEASE_YAML_PIPELINE:-} RELEASE_YAML_TRIGGERS=${RELEASE_YAML_TRIGGERS:-} +RELEASE_YAML_TRIGGERS_INTERCEPTORS=${RELEASE_YAML_TRIGGERS_INTERCEPTORS:-} source $(dirname $0)/../vendor/github.com/tektoncd/plumbing/scripts/e2e-tests.sh @@ -157,6 +158,7 @@ function install_pipeline_crd() { function install_triggers_crd() { local latestreleaseyaml + local latestinterceptorsyaml echo ">> Deploying Tekton Triggers" if [[ -n ${RELEASE_YAML_TRIGGERS} ]];then latestreleaseyaml=${RELEASE_YAML_TRIGGERS} @@ -168,10 +170,27 @@ function install_triggers_crd() { # If for whatever reason the nightly release wasnt there (nightly ci failure?), try the released version [[ -z ${latestreleaseyaml} ]] && latestreleaseyaml="https://storage.googleapis.com/tekton-releases/triggers/latest/release.yaml" fi + if [[ -n ${RELEASE_YAML_TRIGGERS_INTERCEPTORS} ]];then + latestinterceptorsyaml=${RELEASE_YAML_TRIGGERS_INTERCEPTORS} + else + # First try to install latest interceptors from nightly + curl -o/dev/null -s -LI -f https://storage.googleapis.com/tekton-releases-nightly/triggers/latest/interceptors.yaml && + latestinterceptorsyaml=https://storage.googleapis.com/tekton-releases-nightly/triggers/latest/interceptors.yaml + + # If for whatever reason the nightly release wasnt there (nightly ci failure?), try the released version + [[ -z ${latestinterceptorsyaml} ]] && latestinterceptorsyaml="https://storage.googleapis.com/tekton-releases/triggers/latest/interceptors.yaml" + fi [[ -z ${latestreleaseyaml} ]] && fail_test "Could not get latest released release.yaml" + [[ -z ${latestinterceptorsyaml} ]] && fail_test "Could not get latest released interceptors.yaml" kubectl apply -f ${latestreleaseyaml} || fail_test "Build triggers installation failed" + # Wait for pods to be running in the namespaces we are deploying to + wait_until_pods_running tekton-pipelines || fail_test "Tekton Triggers did not come up" + + kubectl wait --for=condition=Established --timeout=30s crds/clusterinterceptors.triggers.tekton.dev || fail_test "cluster interceptors never established" + kubectl apply -f ${latestinterceptorsyaml} || fail_test "Interceptors installation failed" + # Make sure that eveything is cleaned up in the current namespace. for res in eventlistener triggertemplate triggerbinding clustertriggerbinding; do kubectl delete --ignore-not-found=true ${res}.triggers.tekton.dev --all diff --git a/test/e2e/clustertask/start_test.go b/test/e2e/clustertask/start_test.go index cc879fac3..c4cc351fd 100644 --- a/test/e2e/clustertask/start_test.go +++ b/test/e2e/clustertask/start_test.go @@ -56,7 +56,7 @@ func TestClusterTaskInteractiveStartE2E(t *testing.T) { // Set environment variable TEST_CLUSTERTASK_LIST_EMPTY to any value to skip "No ClusterTasks found" test if os.Getenv("TEST_CLUSTERTASK_LIST_EMPTY") == "" { t.Run("Get list of ClusterTasks when none present", func(t *testing.T) { - res := tkn.Run("clustertask", "list") + res := tkn.Run(t, "clustertask", "list") expected := "No ClusterTasks found\n" res.Assert(t, icmd.Expected{ ExitCode: 0, @@ -74,7 +74,7 @@ func TestClusterTaskInteractiveStartE2E(t *testing.T) { kubectl.MustSucceed(t, "create", "-f", helper.GetResourcePath("git-resource.yaml")) t.Run("Get list of ClusterTasks", func(t *testing.T) { - res := tkn.Run("clustertask", "list") + res := tkn.Run(t, "clustertask", "list") if os.Getenv("TEST_CLUSTERTASK_LIST_EMPTY") == "" { expected := builder.ListAllClusterTasksOutput(t, c, map[int]interface{}{ 0: &builder.TaskData{ @@ -241,7 +241,7 @@ Waiting for logs to be available... }) t.Run("Start ClusterTask with --pod-template", func(t *testing.T) { - if tkn.CheckVersion("Pipeline", "v0.10.2") { + if tkn.CheckVersion(t, "Pipeline", "v0.10.2") { t.Skip("Skip test as pipeline v0.10 doesn't support certain PodTemplate properties") } @@ -389,7 +389,7 @@ Waiting for logs to be available... }) // Check if clustertask %s got deleted - res = tkn.Run("clustertask", "list") + res = tkn.Run(t, "clustertask", "list") assert.Assert(t, !strings.Contains(res.Stdout(), clusterTaskName)) }) @@ -403,7 +403,7 @@ Waiting for logs to be available... }) // Check if clustertask %s got deleted - res = tkn.Run("clustertask", "list") + res = tkn.Run(t, "clustertask", "list") assert.Assert(t, !strings.Contains(res.Stdout(), clusterTaskName2)) }) } diff --git a/test/e2e/eventListener/eventListener_test.go b/test/e2e/eventListener/eventListener_test.go index beaedec45..b78ecea25 100644 --- a/test/e2e/eventListener/eventListener_test.go +++ b/test/e2e/eventListener/eventListener_test.go @@ -19,19 +19,20 @@ package pipeline import ( "context" + "os" "testing" "github.com/tektoncd/cli/test/cli" "github.com/tektoncd/cli/test/framework" "github.com/tektoncd/cli/test/helper" "gotest.tools/assert" + corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" knativetest "knative.dev/pkg/test" ) func TestEventListenerE2E(t *testing.T) { - t.Parallel() c, namespace := framework.Setup(t) knativetest.CleanupOnInterrupt(func() { framework.TearDown(t, c, namespace) }, t.Logf) defer cleanupResources(t, c, namespace) @@ -42,8 +43,8 @@ func TestEventListenerE2E(t *testing.T) { elName := "github-listener-interceptor" t.Logf("Creating EventListener %s in namespace %s", elName, namespace) - kubectl.MustSucceed(t, "create", "-f", helper.GetResourcePath("eventlistener/eventlistener.yaml")) createResources(t, c, namespace) + kubectl.MustSucceed(t, "create", "-f", helper.GetResourcePath("eventlistener/eventlistener.yaml")) // Wait for pods to become available for next test kubectl.MustSucceed(t, "wait", "--for=condition=Ready", "pod", "-n", namespace, "--timeout=2m", "--all") @@ -104,8 +105,8 @@ func TestEventListener_v1beta1E2E(t *testing.T) { elName := "github-listener-interceptor" t.Logf("Creating EventListener %s in namespace %s", elName, namespace) - kubectl.MustSucceed(t, "create", "-f", helper.GetResourcePath("eventlistener/eventlistener_v1beta1.yaml")) createResources(t, c, namespace) + kubectl.MustSucceed(t, "create", "-f", helper.GetResourcePath("eventlistener/eventlistener_v1beta1.yaml")) // Wait for pods to become available for next test kubectl.MustSucceed(t, "wait", "--for=condition=Ready", "pod", "-n", namespace, "--timeout=2m", "--all") @@ -157,8 +158,32 @@ func TestEventListener_v1beta1E2E(t *testing.T) { func createResources(t *testing.T, c *framework.Clients, namespace string) { t.Helper() + + // Create SA and secret + _, err := c.KubeClient.CoreV1().Secrets(namespace).Create(context.Background(), + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "github-secret"}, + Type: corev1.SecretTypeOpaque, + StringData: map[string]string{"secretToken": "1234567"}, + }, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("Error creating secret: %s", err) + } + + _, err = c.KubeClient.CoreV1().ServiceAccounts(namespace).Create(context.Background(), + &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{Name: "tekton-triggers-github-sa"}, + Secrets: []corev1.ObjectReference{{ + Namespace: namespace, + Name: "github-secret", + }}, + }, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("Error creating SA: %s", err) + } + // Create ClusterRole required by triggers - _, err := c.KubeClient.RbacV1().ClusterRoles().Create(context.Background(), + _, err = c.KubeClient.RbacV1().ClusterRoles().Create(context.Background(), &rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: "sa-clusterrole"}, Rules: []rbacv1.PolicyRule{{ @@ -195,12 +220,14 @@ func cleanupResources(t *testing.T, c *framework.Clients, namespace string) { t.Helper() framework.TearDown(t, c, namespace) - // Cleanup cluster-scoped resources - t.Logf("Deleting cluster-scoped resources") - if err := c.KubeClient.RbacV1().ClusterRoles().Delete(context.Background(), "sa-clusterrole", metav1.DeleteOptions{}); err != nil { - t.Errorf("Failed to delete clusterrole sa-clusterrole: %s", err) - } - if err := c.KubeClient.RbacV1().ClusterRoleBindings().Delete(context.Background(), "sa-clusterrolebinding", metav1.DeleteOptions{}); err != nil { - t.Errorf("Failed to delete clusterrolebinding sa-clusterrolebinding: %s", err) + if os.Getenv("TEST_KEEP_NAMESPACES") == "" && !t.Failed() { + // Cleanup cluster-scoped resources + t.Logf("Deleting cluster-scoped resources") + if err := c.KubeClient.RbacV1().ClusterRoles().Delete(context.Background(), "sa-clusterrole", metav1.DeleteOptions{}); err != nil { + t.Errorf("Failed to delete clusterrole sa-clusterrole: %s", err) + } + if err := c.KubeClient.RbacV1().ClusterRoleBindings().Delete(context.Background(), "sa-clusterrolebinding", metav1.DeleteOptions{}); err != nil { + t.Errorf("Failed to delete clusterrolebinding sa-clusterrolebinding: %s", err) + } } } diff --git a/test/e2e/pipeline/pipeline_test.go b/test/e2e/pipeline/pipeline_test.go index 0c4cd65be..7836e0878 100644 --- a/test/e2e/pipeline/pipeline_test.go +++ b/test/e2e/pipeline/pipeline_test.go @@ -68,7 +68,7 @@ func TestPipelinesE2E(t *testing.T) { kubectl.MustSucceed(t, "create", "-f", helper.GetResourcePath("git-resource.yaml")) t.Run("Get list of Tasks from namespace "+namespace, func(t *testing.T) { - res := tkn.Run("task", "list") + res := tkn.Run(t, "task", "list") expected := builder.ListAllTasksOutput(t, c, map[int]interface{}{ 0: &builder.TaskData{ Name: TaskName2, @@ -85,7 +85,7 @@ func TestPipelinesE2E(t *testing.T) { }) t.Run("Get list of Pipelines from namespace "+namespace, func(t *testing.T) { - res := tkn.Run("pipelines", "list") + res := tkn.Run(t, "pipelines", "list") expected := builder.ListAllPipelinesOutput(t, c, map[int]interface{}{ 0: &builder.PipelinesListData{ Name: tePipelineName, @@ -100,7 +100,7 @@ func TestPipelinesE2E(t *testing.T) { }) t.Run("Get list of pipelines from other namespace [default] should throw Error", func(t *testing.T) { - res := tkn.RunNoNamespace("pipelines", "list", "-n", "default") + res := tkn.RunNoNamespace(t, "pipelines", "list", "-n", "default") res.Assert(t, icmd.Expected{ ExitCode: 0, Out: "No Pipelines found\n", @@ -109,7 +109,7 @@ func TestPipelinesE2E(t *testing.T) { }) t.Run("Validate pipelines format for -o (output) flag, as Json Path", func(t *testing.T) { - res := tkn.Run("pipelines", "list", `-o=jsonpath={range.items[*]}{.metadata.name}{"\n"}{end}`) + res := tkn.Run(t, "pipelines", "list", `-o=jsonpath={range.items[*]}{.metadata.name}{"\n"}{end}`) expected := builder.ListResourceNamesForJSONPath( builder.GetPipelineListWithTestData(t, c, map[int]interface{}{ @@ -131,7 +131,7 @@ func TestPipelinesE2E(t *testing.T) { }) t.Run("Validate Pipeline describe command in namespace "+namespace, func(t *testing.T) { - res := tkn.Run("pipeline", "describe", tePipelineName) + res := tkn.Run(t, "pipeline", "describe", tePipelineName) expected := builder.GetPipelineDescribeOutput(t, c, tePipelineName, map[int]interface{}{ 0: &builder.PipelineDescribeData{ @@ -182,7 +182,7 @@ Waiting for logs to be available... time.Sleep(1 * time.Second) t.Run("Get list of Taskruns from namespace "+namespace, func(t *testing.T) { - res := tkn.Run("taskrun", "list") + res := tkn.Run(t, "taskrun", "list") expected := builder.ListAllTaskRunsOutput(t, c, false, map[int]interface{}{ 0: &builder.TaskRunData{ Name: "output-pipeline-run-", @@ -201,7 +201,7 @@ Waiting for logs to be available... }) t.Run("Validate Pipeline describe command in namespace "+namespace+" after PipelineRun completed successfully", func(t *testing.T) { - res := tkn.Run("pipeline", "describe", tePipelineName) + res := tkn.Run(t, "pipeline", "describe", tePipelineName) expected := builder.GetPipelineDescribeOutput(t, c, tePipelineName, map[int]interface{}{ 0: &builder.PipelineDescribeData{ @@ -290,7 +290,7 @@ Waiting for logs to be available... kubectl.MustSucceed(t, "create", "-f", helper.GetResourcePath("pipeline-with-workspace.yaml")) t.Run("Start PipelineRun with --workspace and volumeClaimTemplate", func(t *testing.T) { - if tkn.CheckVersion("Pipeline", "v0.10.2") { + if tkn.CheckVersion(t, "Pipeline", "v0.10.2") { t.Skip("Skip test as pipeline v0.10.2 doesn't support volumeClaimTemplates") } @@ -339,7 +339,7 @@ Waiting for logs to be available... pipelineRunLast := builder.GetPipelineRunListWithName(c, tePipelineName, true).Items[0] // Cancel PipelineRun - res := tkn.Run("pipelinerun", "cancel", pipelineRunLast.Name) + res := tkn.Run(t, "pipelinerun", "cancel", pipelineRunLast.Name) // Expect error from PipelineRun cancel for already completed PipelineRun expected := "Error: failed to cancel PipelineRun " + pipelineRunLast.Name + ": PipelineRun has already finished execution\n" @@ -396,7 +396,7 @@ func TestPipelinesNegativeE2E(t *testing.T) { } t.Run("Get list of Pipelines from namespace "+namespace, func(t *testing.T) { - res := tkn.Run("pipelines", "list") + res := tkn.Run(t, "pipelines", "list") expected := builder.ListAllPipelinesOutput(t, c, map[int]interface{}{ 0: &builder.PipelinesListData{ Name: tePipelineName, @@ -411,7 +411,7 @@ func TestPipelinesNegativeE2E(t *testing.T) { }) t.Run("Get list of pipelines from other namespace [default] should throw Error", func(t *testing.T) { - res := tkn.RunNoNamespace("pipelines", "list", "-n", "default") + res := tkn.RunNoNamespace(t, "pipelines", "list", "-n", "default") res.Assert(t, icmd.Expected{ ExitCode: 0, Out: "No Pipelines found\n", @@ -420,7 +420,7 @@ func TestPipelinesNegativeE2E(t *testing.T) { }) t.Run("Validate pipelines format for -o (output) flag, as Json Path", func(t *testing.T) { - res := tkn.Run("pipelines", "list", `-o=jsonpath={range.items[*]}{.metadata.name}{"\n"}{end}`) + res := tkn.Run(t, "pipelines", "list", `-o=jsonpath={range.items[*]}{.metadata.name}{"\n"}{end}`) expected := builder.ListResourceNamesForJSONPath( builder.GetPipelineListWithTestData(t, c, map[int]interface{}{ @@ -442,7 +442,7 @@ func TestPipelinesNegativeE2E(t *testing.T) { }) t.Run("Validate Pipeline describe command in namespace "+namespace, func(t *testing.T) { - res := tkn.Run("pipeline", "describe", tePipelineName) + res := tkn.Run(t, "pipeline", "describe", tePipelineName) expected := builder.GetPipelineDescribeOutput(t, c, tePipelineName, map[int]interface{}{ 0: &builder.PipelineDescribeData{ @@ -494,7 +494,7 @@ Waiting for logs to be available... time.Sleep(1 * time.Second) t.Run("Validate Pipeline describe command in namespace "+namespace+" after PipelineRun completed successfully", func(t *testing.T) { - res := tkn.Run("pipeline", "describe", tePipelineName) + res := tkn.Run(t, "pipeline", "describe", tePipelineName) expected := builder.GetPipelineDescribeOutput(t, c, tePipelineName, map[int]interface{}{ 0: &builder.PipelineDescribeData{ @@ -565,7 +565,7 @@ func TestDeletePipelinesE2E(t *testing.T) { time.Sleep(1 * time.Second) t.Run("Delete pipeline "+tePipelineName+"-1"+" from namespace "+namespace+" With force delete flag (shorthand)", func(t *testing.T) { - res := tkn.Run("pipeline", "rm", tePipelineName+"-1", "-f") + res := tkn.Run(t, "pipeline", "rm", tePipelineName+"-1", "-f") res.Assert(t, icmd.Expected{ ExitCode: 0, Out: "Pipelines deleted: \"" + tePipelineName + "-1" + "\"\n", @@ -573,7 +573,7 @@ func TestDeletePipelinesE2E(t *testing.T) { }) t.Run("Delete pipeline "+tePipelineName+"-2"+" from namespace "+namespace+" With force delete flag", func(t *testing.T) { - res := tkn.Run("pipeline", "rm", tePipelineName+"-2", "--force") + res := tkn.Run(t, "pipeline", "rm", tePipelineName+"-2", "--force") res.Assert(t, icmd.Expected{ ExitCode: 0, Out: "Pipelines deleted: \"" + tePipelineName + "-2" + "\"\n", @@ -581,7 +581,7 @@ func TestDeletePipelinesE2E(t *testing.T) { }) t.Run("Delete pipeline "+tePipelineName+"-3"+" from namespace "+namespace+" without force flag, reply no", func(t *testing.T) { - res := tkn.RunWithOption(icmd.WithStdin(strings.NewReader("n")), + res := tkn.RunWithOption(t, icmd.WithStdin(strings.NewReader("n")), "pipeline", "rm", tePipelineName+"-3") res.Assert(t, icmd.Expected{ ExitCode: 1, @@ -590,7 +590,7 @@ func TestDeletePipelinesE2E(t *testing.T) { }) t.Run("Delete pipeline "+tePipelineName+"-3"+" from namespace "+namespace+" without force flag, reply yes", func(t *testing.T) { - res := tkn.RunWithOption(icmd.WithStdin(strings.NewReader("y")), + res := tkn.RunWithOption(t, icmd.WithStdin(strings.NewReader("y")), "pipeline", "rm", tePipelineName+"-3") res.Assert(t, icmd.Expected{ ExitCode: 0, @@ -600,7 +600,7 @@ func TestDeletePipelinesE2E(t *testing.T) { }) t.Run("Check for list of pipelines, After Successful Deletion of pipeline in namespace "+namespace+" should throw an error", func(t *testing.T) { - res := tkn.Run("pipelines", "list") + res := tkn.Run(t, "pipelines", "list") res.Assert(t, icmd.Expected{ ExitCode: 0, Out: "No Pipelines found\n", diff --git a/test/e2e/pipelinerun/pipelinerun_test.go b/test/e2e/pipelinerun/pipelinerun_test.go index 9b61e8c26..ed7a780a8 100644 --- a/test/e2e/pipelinerun/pipelinerun_test.go +++ b/test/e2e/pipelinerun/pipelinerun_test.go @@ -38,7 +38,7 @@ func TestPipelineRunLogE2E(t *testing.T) { tkn, err := cli.NewTknRunner(namespace) assert.NilError(t, err) - if tkn.CheckVersion("Pipeline", "v0.10.2") { + if tkn.CheckVersion(t, "Pipeline", "v0.10.2") { t.Skip("Skip test as pipeline v0.10.2 doesn't support finally") } @@ -46,7 +46,7 @@ func TestPipelineRunLogE2E(t *testing.T) { kubectl.MustSucceed(t, "create", "-f", helper.GetResourcePath("pipelinerun-with-finally.yaml")) t.Run("Pipelinerun logs with finally "+namespace, func(t *testing.T) { - res := tkn.Run("pipelinerun", "logs", "exit-handler", "-f") + res := tkn.Run(t, "pipelinerun", "logs", "exit-handler", "-f") s := []string{ "[print-msg : main] printing a message\n", "[echo-on-exit : main] finally\n", diff --git a/test/e2e/plugin/plugin_test.go b/test/e2e/plugin/plugin_test.go index 6f2469658..d80d666db 100644 --- a/test/e2e/plugin/plugin_test.go +++ b/test/e2e/plugin/plugin_test.go @@ -50,13 +50,13 @@ func TestTknPlugin(t *testing.T) { tkn.MustSucceed(t, "success", "with", "args") }) t.Run("Failure", func(t *testing.T) { - tkn.Run("failure").Assert(t, icmd.Expected{ + tkn.Run(t, "failure").Assert(t, icmd.Expected{ ExitCode: 12, }) - tkn.Run("failure", "with", "args").Assert(t, icmd.Expected{ + tkn.Run(t, "failure", "with", "args").Assert(t, icmd.Expected{ ExitCode: 12, }) - tkn.Run("failure", "exit20").Assert(t, icmd.Expected{ + tkn.Run(t, "failure", "exit20").Assert(t, icmd.Expected{ ExitCode: 20, }) }) diff --git a/test/e2e/task/start_test.go b/test/e2e/task/start_test.go index 4c3457380..b0dd7c1e5 100644 --- a/test/e2e/task/start_test.go +++ b/test/e2e/task/start_test.go @@ -58,7 +58,7 @@ func TestTaskStartE2E(t *testing.T) { kubectl.MustSucceed(t, "create", "-f", helper.GetResourcePath("git-resource.yaml")) t.Run("Get list of Tasks from namespace "+namespace, func(t *testing.T) { - res := tkn.Run("task", "list") + res := tkn.Run(t, "task", "list") expected := builder.ListAllTasksOutput(t, c, map[int]interface{}{ 0: &builder.TaskData{ Name: "read-task", @@ -149,7 +149,7 @@ Waiting for logs to be available... t.Errorf("Error waiting for TaskRun to Succeed: %s", err) } - res := tkn.Run("taskrun", "list") + res := tkn.Run(t, "taskrun", "list") expected := builder.ListAllTaskRunsOutput(t, c, false, map[int]interface{}{ 0: &builder.TaskRunData{ Name: "read-task-run-", @@ -228,7 +228,7 @@ Waiting for logs to be available... kubectl.MustSucceed(t, "create", "-f", helper.GetResourcePath("task-with-workspace.yaml")) t.Run("Start TaskRun with --workspace and volumeClaimTemplate", func(t *testing.T) { - if tkn.CheckVersion("Pipeline", "v0.10.2") { + if tkn.CheckVersion(t, "Pipeline", "v0.10.2") { t.Skip("Skip test as pipeline v0.10 doesn't support volumeClaimTemplates") } @@ -272,7 +272,7 @@ Waiting for logs to be available... taskRunLast := builder.GetTaskRunListWithTaskName(c, "read-task", true).Items[0] // Cancel TaskRun - res := tkn.Run("taskrun", "cancel", taskRunLast.Name) + res := tkn.Run(t, "taskrun", "cancel", taskRunLast.Name) // Expect error from TaskRun cancel for already completed TaskRun expected := "Error: failed to cancel TaskRun " + taskRunLast.Name + ": TaskRun has already finished execution\n" diff --git a/test/framework/helper.go b/test/framework/helper.go index 0821d0c93..3806e72ed 100644 --- a/test/framework/helper.go +++ b/test/framework/helper.go @@ -125,11 +125,12 @@ func TearDown(t *testing.T, cs *Clients, namespace string) { } } - t.Logf("Deleting namespace %s", namespace) - if err := cs.KubeClient.CoreV1().Namespaces().Delete(context.Background(), namespace, metav1.DeleteOptions{}); err != nil { - t.Errorf("Failed to delete namespace %s: %s", namespace, err) + if os.Getenv("TEST_KEEP_NAMESPACES") == "" && !t.Failed() { + t.Logf("Deleting namespace %s", namespace) + if err := cs.KubeClient.CoreV1().Namespaces().Delete(context.Background(), namespace, metav1.DeleteOptions{}); err != nil { + t.Errorf("Failed to delete namespace %s: %s", namespace, err) + } } - } func initializeLogsAndMetrics(t *testing.T) { diff --git a/test/resources/eventlistener/eventlistener.yaml b/test/resources/eventlistener/eventlistener.yaml index 6a8085ee5..44dbd4447 100644 --- a/test/resources/eventlistener/eventlistener.yaml +++ b/test/resources/eventlistener/eventlistener.yaml @@ -82,13 +82,6 @@ spec: - name: url value: $(tt.params.gitrepositoryurl) --- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: tekton-triggers-github-sa -secrets: - - name: github-secret ---- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -118,11 +111,3 @@ rules: - apiGroups: ["tekton.dev"] resources: ["pipelineruns", "pipelineresources", "taskruns"] verbs: ["create"] ---- -apiVersion: v1 -kind: Secret -metadata: - name: github-secret -type: Opaque -stringData: - secretToken: "1234567" diff --git a/test/resources/eventlistener/eventlistener_v1beta1.yaml b/test/resources/eventlistener/eventlistener_v1beta1.yaml index 6ca9160d9..b24351d26 100644 --- a/test/resources/eventlistener/eventlistener_v1beta1.yaml +++ b/test/resources/eventlistener/eventlistener_v1beta1.yaml @@ -89,13 +89,6 @@ spec: - name: url value: $(tt.params.gitrepositoryurl) --- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: tekton-triggers-github-sa -secrets: - - name: github-secret ---- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -125,11 +118,3 @@ rules: - apiGroups: ["tekton.dev"] resources: ["pipelineruns", "pipelineresources", "taskruns"] verbs: ["create"] ---- -apiVersion: v1 -kind: Secret -metadata: - name: github-secret -type: Opaque -stringData: - secretToken: "1234567"