Skip to content

Commit

Permalink
Fix eventlistener e2e tests broken due to upgrading Triggers to v0.20.1
Browse files Browse the repository at this point in the history
After upgrading to Triggers v0.20.1, `TestEventListenerE2E` started failing consistently due to a `CrashLoopBackOff`. After a bunch of debugging, I determined this is due to tektoncd/triggers#1378. When an eventlistener comes up now, it looks for `caBundle` in the appropriate `ClusterInterceptor`, and with how things were setup, we weren't actually creating the core interceptors etc, and the eventlistener SA didn't have permission to even look for `ClusterInterceptor`s at creation time.

To fix this, we need to change how we install Triggers a bit in `test/e2e-common.sh`, and then in `eventListener_test.go`, we need to create the necessary secret and SA _before_ we create the `ClusterRole` and `ClusterRoleBinding` to give that SA the right permissions.

Also, while I was here, I added `t.Helper()` to a bunch of e2e helper functions, and changed cleanup logic to not delete test-created resources/namespaces if an env var is set or the test failed, matching behavior in Pipeline's e2e tests.

Signed-off-by: Andrew Bayer <[email protected]>
  • Loading branch information
abayer authored and tekton-robot committed Jul 1, 2022
1 parent a7a818a commit ba6491d
Show file tree
Hide file tree
Showing 11 changed files with 113 additions and 86 deletions.
26 changes: 18 additions & 8 deletions test/cli/cli.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,8 @@ func NewTknRunner(namespace string) (TknRunner, error) {
}

// Run will help you execute tkn command on a specific namespace, with a timeout
func (e TknRunner) Run(args ...string) *icmd.Result {
func (e TknRunner) Run(t *testing.T, args ...string) *icmd.Result {
t.Helper()
if e.namespace != "" {
args = append(args, "--namespace", e.namespace)
}
Expand All @@ -82,24 +83,28 @@ func (e TknRunner) Run(args ...string) *icmd.Result {

// MustSucceed asserts that the command ran with 0 exit code
func (e TknRunner) MustSucceed(t *testing.T, args ...string) *icmd.Result {
t.Helper()
return e.Assert(t, icmd.Success, args...)
}

// Assert runs a command and verifies exit code (0)
func (e TknRunner) Assert(t *testing.T, exp icmd.Expected, args ...string) *icmd.Result {
res := e.Run(args...)
t.Helper()
res := e.Run(t, args...)
res.Assert(t, exp)
return res
}

// RunNoNamespace will help you execute tkn command without namespace, with a timeout
func (e TknRunner) RunNoNamespace(args ...string) *icmd.Result {
func (e TknRunner) RunNoNamespace(t *testing.T, args ...string) *icmd.Result {
t.Helper()
cmd := append([]string{e.path}, args...)
return icmd.RunCmd(icmd.Cmd{Command: cmd, Timeout: timeout})
}

// RunWithOption will help you execute tkn command with namespace, cmd option
func (e TknRunner) RunWithOption(option icmd.CmdOp, args ...string) *icmd.Result {
func (e TknRunner) RunWithOption(t *testing.T, option icmd.CmdOp, args ...string) *icmd.Result {
t.Helper()
if e.namespace != "" {
args = append(args, "--namespace", e.namespace)
}
Expand Down Expand Up @@ -161,7 +166,8 @@ func NewKubectl(namespace string) Kubectl {
}

// Run will help you execute kubectl command on a specific namespace, with a timeout
func (k Kubectl) Run(args ...string) *icmd.Result {
func (k Kubectl) Run(t *testing.T, args ...string) *icmd.Result {
t.Helper()
if k.namespace != "" {
args = append(args, "--namespace", k.namespace)
}
Expand All @@ -170,27 +176,31 @@ func (k Kubectl) Run(args ...string) *icmd.Result {
}

// RunNoNamespace will help you execute kubectl command without namespace, with a timeout
func (k Kubectl) RunNoNamespace(args ...string) *icmd.Result {
func (k Kubectl) RunNoNamespace(t *testing.T, args ...string) *icmd.Result {
t.Helper()
cmd := append([]string{"kubectl"}, args...)
return icmd.RunCmd(icmd.Cmd{Command: cmd, Timeout: timeout})
}

// MustSucceed asserts that the command ran with 0 exit code
func (k Kubectl) MustSucceed(t *testing.T, args ...string) *icmd.Result {
t.Helper()
return k.Assert(t, icmd.Success, args...)
}

// Assert runs a command and verifies against expected
func (k Kubectl) Assert(t *testing.T, exp icmd.Expected, args ...string) *icmd.Result {
res := k.Run(args...)
t.Helper()
res := k.Run(t, args...)
res.Assert(t, exp)
time.Sleep(1 * time.Second)
return res
}

// TODO: Re-write this to just get the version of Tekton components through tkn version
// as described in https://github.com/tektoncd/cli/issues/1067
func (e TknRunner) CheckVersion(component string, version string) bool {
func (e TknRunner) CheckVersion(t *testing.T, component string, version string) bool {
t.Helper()
cmd := append([]string{e.path}, "version")
result := icmd.RunCmd(icmd.Cmd{Command: cmd, Timeout: timeout})

Expand Down
19 changes: 19 additions & 0 deletions test/e2e-common.sh
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
# instead of detecting the latest released one from tektoncd/pipeline releases
RELEASE_YAML_PIPELINES=${RELEASE_YAML_PIPELINE:-}
RELEASE_YAML_TRIGGERS=${RELEASE_YAML_TRIGGERS:-}
RELEASE_YAML_TRIGGERS_INTERCEPTORS=${RELEASE_YAML_TRIGGERS_INTERCEPTORS:-}

source $(dirname $0)/../vendor/github.com/tektoncd/plumbing/scripts/e2e-tests.sh

Expand Down Expand Up @@ -157,6 +158,7 @@ function install_pipeline_crd() {

function install_triggers_crd() {
local latestreleaseyaml
local latestinterceptorsyaml
echo ">> Deploying Tekton Triggers"
if [[ -n ${RELEASE_YAML_TRIGGERS} ]];then
latestreleaseyaml=${RELEASE_YAML_TRIGGERS}
Expand All @@ -168,10 +170,27 @@ function install_triggers_crd() {
# If for whatever reason the nightly release wasnt there (nightly ci failure?), try the released version
[[ -z ${latestreleaseyaml} ]] && latestreleaseyaml="https://storage.googleapis.com/tekton-releases/triggers/latest/release.yaml"
fi
if [[ -n ${RELEASE_YAML_TRIGGERS_INTERCEPTORS} ]];then
latestinterceptorsyaml=${RELEASE_YAML_TRIGGERS_INTERCEPTORS}
else
# First try to install latest interceptors from nightly
curl -o/dev/null -s -LI -f https://storage.googleapis.com/tekton-releases-nightly/triggers/latest/interceptors.yaml &&
latestinterceptorsyaml=https://storage.googleapis.com/tekton-releases-nightly/triggers/latest/interceptors.yaml

# If for whatever reason the nightly release wasnt there (nightly ci failure?), try the released version
[[ -z ${latestinterceptorsyaml} ]] && latestinterceptorsyaml="https://storage.googleapis.com/tekton-releases/triggers/latest/interceptors.yaml"
fi
[[ -z ${latestreleaseyaml} ]] && fail_test "Could not get latest released release.yaml"
[[ -z ${latestinterceptorsyaml} ]] && fail_test "Could not get latest released interceptors.yaml"
kubectl apply -f ${latestreleaseyaml} ||
fail_test "Build triggers installation failed"

# Wait for pods to be running in the namespaces we are deploying to
wait_until_pods_running tekton-pipelines || fail_test "Tekton Triggers did not come up"

kubectl wait --for=condition=Established --timeout=30s crds/clusterinterceptors.triggers.tekton.dev || fail_test "cluster interceptors never established"
kubectl apply -f ${latestinterceptorsyaml} || fail_test "Interceptors installation failed"

# Make sure that eveything is cleaned up in the current namespace.
for res in eventlistener triggertemplate triggerbinding clustertriggerbinding; do
kubectl delete --ignore-not-found=true ${res}.triggers.tekton.dev --all
Expand Down
10 changes: 5 additions & 5 deletions test/e2e/clustertask/start_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ func TestClusterTaskInteractiveStartE2E(t *testing.T) {
// Set environment variable TEST_CLUSTERTASK_LIST_EMPTY to any value to skip "No ClusterTasks found" test
if os.Getenv("TEST_CLUSTERTASK_LIST_EMPTY") == "" {
t.Run("Get list of ClusterTasks when none present", func(t *testing.T) {
res := tkn.Run("clustertask", "list")
res := tkn.Run(t, "clustertask", "list")
expected := "No ClusterTasks found\n"
res.Assert(t, icmd.Expected{
ExitCode: 0,
Expand All @@ -74,7 +74,7 @@ func TestClusterTaskInteractiveStartE2E(t *testing.T) {
kubectl.MustSucceed(t, "create", "-f", helper.GetResourcePath("git-resource.yaml"))

t.Run("Get list of ClusterTasks", func(t *testing.T) {
res := tkn.Run("clustertask", "list")
res := tkn.Run(t, "clustertask", "list")
if os.Getenv("TEST_CLUSTERTASK_LIST_EMPTY") == "" {
expected := builder.ListAllClusterTasksOutput(t, c, map[int]interface{}{
0: &builder.TaskData{
Expand Down Expand Up @@ -241,7 +241,7 @@ Waiting for logs to be available...
})

t.Run("Start ClusterTask with --pod-template", func(t *testing.T) {
if tkn.CheckVersion("Pipeline", "v0.10.2") {
if tkn.CheckVersion(t, "Pipeline", "v0.10.2") {
t.Skip("Skip test as pipeline v0.10 doesn't support certain PodTemplate properties")
}

Expand Down Expand Up @@ -389,7 +389,7 @@ Waiting for logs to be available...
})

// Check if clustertask %s got deleted
res = tkn.Run("clustertask", "list")
res = tkn.Run(t, "clustertask", "list")
assert.Assert(t, !strings.Contains(res.Stdout(), clusterTaskName))
})

Expand All @@ -403,7 +403,7 @@ Waiting for logs to be available...
})

// Check if clustertask %s got deleted
res = tkn.Run("clustertask", "list")
res = tkn.Run(t, "clustertask", "list")
assert.Assert(t, !strings.Contains(res.Stdout(), clusterTaskName2))
})
}
49 changes: 38 additions & 11 deletions test/e2e/eventListener/eventListener_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,19 +19,20 @@ package pipeline

import (
"context"
"os"
"testing"

"github.com/tektoncd/cli/test/cli"
"github.com/tektoncd/cli/test/framework"
"github.com/tektoncd/cli/test/helper"
"gotest.tools/assert"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
knativetest "knative.dev/pkg/test"
)

func TestEventListenerE2E(t *testing.T) {
t.Parallel()
c, namespace := framework.Setup(t)
knativetest.CleanupOnInterrupt(func() { framework.TearDown(t, c, namespace) }, t.Logf)
defer cleanupResources(t, c, namespace)
Expand All @@ -42,8 +43,8 @@ func TestEventListenerE2E(t *testing.T) {
elName := "github-listener-interceptor"

t.Logf("Creating EventListener %s in namespace %s", elName, namespace)
kubectl.MustSucceed(t, "create", "-f", helper.GetResourcePath("eventlistener/eventlistener.yaml"))
createResources(t, c, namespace)
kubectl.MustSucceed(t, "create", "-f", helper.GetResourcePath("eventlistener/eventlistener.yaml"))
// Wait for pods to become available for next test
kubectl.MustSucceed(t, "wait", "--for=condition=Ready", "pod", "-n", namespace, "--timeout=2m", "--all")

Expand Down Expand Up @@ -104,8 +105,8 @@ func TestEventListener_v1beta1E2E(t *testing.T) {
elName := "github-listener-interceptor"

t.Logf("Creating EventListener %s in namespace %s", elName, namespace)
kubectl.MustSucceed(t, "create", "-f", helper.GetResourcePath("eventlistener/eventlistener_v1beta1.yaml"))
createResources(t, c, namespace)
kubectl.MustSucceed(t, "create", "-f", helper.GetResourcePath("eventlistener/eventlistener_v1beta1.yaml"))
// Wait for pods to become available for next test
kubectl.MustSucceed(t, "wait", "--for=condition=Ready", "pod", "-n", namespace, "--timeout=2m", "--all")

Expand Down Expand Up @@ -157,8 +158,32 @@ func TestEventListener_v1beta1E2E(t *testing.T) {

func createResources(t *testing.T, c *framework.Clients, namespace string) {
t.Helper()

// Create SA and secret
_, err := c.KubeClient.CoreV1().Secrets(namespace).Create(context.Background(),
&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: "github-secret"},
Type: corev1.SecretTypeOpaque,
StringData: map[string]string{"secretToken": "1234567"},
}, metav1.CreateOptions{})
if err != nil {
t.Fatalf("Error creating secret: %s", err)
}

_, err = c.KubeClient.CoreV1().ServiceAccounts(namespace).Create(context.Background(),
&corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{Name: "tekton-triggers-github-sa"},
Secrets: []corev1.ObjectReference{{
Namespace: namespace,
Name: "github-secret",
}},
}, metav1.CreateOptions{})
if err != nil {
t.Fatalf("Error creating SA: %s", err)
}

// Create ClusterRole required by triggers
_, err := c.KubeClient.RbacV1().ClusterRoles().Create(context.Background(),
_, err = c.KubeClient.RbacV1().ClusterRoles().Create(context.Background(),
&rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: "sa-clusterrole"},
Rules: []rbacv1.PolicyRule{{
Expand Down Expand Up @@ -195,12 +220,14 @@ func cleanupResources(t *testing.T, c *framework.Clients, namespace string) {
t.Helper()
framework.TearDown(t, c, namespace)

// Cleanup cluster-scoped resources
t.Logf("Deleting cluster-scoped resources")
if err := c.KubeClient.RbacV1().ClusterRoles().Delete(context.Background(), "sa-clusterrole", metav1.DeleteOptions{}); err != nil {
t.Errorf("Failed to delete clusterrole sa-clusterrole: %s", err)
}
if err := c.KubeClient.RbacV1().ClusterRoleBindings().Delete(context.Background(), "sa-clusterrolebinding", metav1.DeleteOptions{}); err != nil {
t.Errorf("Failed to delete clusterrolebinding sa-clusterrolebinding: %s", err)
if os.Getenv("TEST_KEEP_NAMESPACES") == "" && !t.Failed() {
// Cleanup cluster-scoped resources
t.Logf("Deleting cluster-scoped resources")
if err := c.KubeClient.RbacV1().ClusterRoles().Delete(context.Background(), "sa-clusterrole", metav1.DeleteOptions{}); err != nil {
t.Errorf("Failed to delete clusterrole sa-clusterrole: %s", err)
}
if err := c.KubeClient.RbacV1().ClusterRoleBindings().Delete(context.Background(), "sa-clusterrolebinding", metav1.DeleteOptions{}); err != nil {
t.Errorf("Failed to delete clusterrolebinding sa-clusterrolebinding: %s", err)
}
}
}
Loading

0 comments on commit ba6491d

Please sign in to comment.