forked from tektoncd/cli
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathe2e-common.sh
executable file
·281 lines (235 loc) · 9.56 KB
/
e2e-common.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
#!/usr/bin/env bash
# Copyright 2018 The Tekton Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Helper functions for E2E tests.
# Check if we have a specific RELEASE_YAML global environment variable to use
# instead of detecting the latest released one from tektoncd/pipeline releases
RELEASE_YAML_PIPELINES=${RELEASE_YAML_PIPELINE:-}
RELEASE_YAML_TRIGGERS=${RELEASE_YAML_TRIGGERS:-}
RELEASE_YAML_TRIGGERS_INTERCEPTORS=${RELEASE_YAML_TRIGGERS_INTERCEPTORS:-}
source $(dirname $0)/../vendor/github.com/tektoncd/plumbing/scripts/e2e-tests.sh
function teardown() {
subheader "Tearing down Tekton Pipelines"
ko delete --ignore-not-found=true -f config/
# teardown will be called when run against an existing cluster to cleanup before
# continuing, so we must wait for the cleanup to complete or the subsequent attempt
# to deploy to the same namespace will fail
wait_until_object_does_not_exist namespace tekton-pipelines
}
function output_yaml_test_results() {
# If formatting fails for any reason, use yaml as a fall back.
kubectl get $1.tekton.dev -o=custom-columns-file=${REPO_ROOT_DIR}/test/columns.txt || \
kubectl get $1.tekton.dev -oyaml
}
function output_pods_logs() {
echo ">>> $1"
kubectl get $1.tekton.dev -o yaml
local runs=$(kubectl get $1.tekton.dev --output=jsonpath="{.items[*].metadata.name}")
set +e
for run in ${runs}; do
echo ">>>> $1 ${run}"
case "$1" in
"taskrun")
go run ./test/logs/main.go -tr ${run}
;;
"pipelinerun")
go run ./test/logs/main.go -pr ${run}
;;
esac
done
set -e
echo ">>>> Pods"
kubectl get pods -o yaml
}
# Called by `fail_test` (provided by `e2e-tests.sh`) to dump info on test failure
function dump_extra_cluster_state() {
echo ">>> Pipeline controller log:"
kubectl -n tekton-pipelines logs $(get_app_pod tekton-pipelines-controller tekton-pipelines)
echo ">>> Pipeline webhook log:"
kubectl -n tekton-pipelines logs $(get_app_pod tekton-pipelines-webhook tekton-pipelines)
}
function validate_run() {
local tests_finished=0
for i in {1..60}; do
local finished="$(kubectl get $1.tekton.dev --output=jsonpath='{.items[*].status.conditions[*].status}')"
if [[ ! "$finished" == *"Unknown"* ]]; then
tests_finished=1
break
fi
sleep 10
done
return ${tests_finished}
}
function check_results() {
local failed=0
results="$(kubectl get $1.tekton.dev --output=jsonpath='{range .items[*]}{.metadata.name}={.status.conditions[*].type}{.status.conditions[*].status}{" "}{end}')"
for result in ${results}; do
if [[ ! "${result,,}" == *"=succeededtrue" ]]; then
echo "ERROR: test ${result} but should be succeededtrue"
failed=1
fi
done
return ${failed}
}
function run_yaml_tests() {
echo ">> Starting tests"
# Applying *taskruns
for file in $(find ${REPO_ROOT_DIR}/examples/taskruns/ -name *.yaml | sort); do
perl -p -e 's/gcr.io\/christiewilson-catfactory/$ENV{KO_DOCKER_REPO}/g' ${file} | ko apply -f - || return 1
done
# Applying *pipelineruns
for file in $(find ${REPO_ROOT_DIR}/examples/pipelineruns/ -name *.yaml | sort); do
perl -p -e 's/gcr.io\/christiewilson-catfactory/$ENV{KO_DOCKER_REPO}/g' ${file} | ko apply -f - || return 1
done
# Wait for tests to finish.
echo ">> Waiting for tests to finish"
for test in taskrun pipelinerun; do
if validate_run ${test}; then
echo "ERROR: tests timed out"
fi
done
# Check that tests passed.
echo ">> Checking test results"
for test in taskrun pipelinerun; do
if check_results ${test}; then
echo ">> All YAML tests passed"
return 0
fi
done
return 1
}
function install_pipeline_crd() {
local latestreleaseyaml
echo ">> Deploying Tekton Pipelines"
if [[ -n ${RELEASE_YAML_PIPELINE} ]];then
latestreleaseyaml=${RELEASE_YAML_PIPELINE}
else
# First try to install latestreleaseyaml from nightly
# TODO: re-enable this curl command once we are confident about nightly releases
# curl -o/dev/null -s -LI -f https://storage.googleapis.com/tekton-releases-nightly/pipeline/latest/release.yaml &&
# latestreleaseyaml=https://storage.googleapis.com/tekton-releases-nightly/pipeline/latest/release.yaml
# If for whatever reason the nightly release wasnt there (nightly ci failure?), try the released version
[[ -n ${latestreleaseyaml} ]] || latestreleaseyaml=https://storage.googleapis.com/tekton-releases/pipeline/latest/release.yaml
fi
# TODO: to remove this pinning of pipeline version after v1 support of Pipeline CRDs in cli, as this is done to make CI green till then
latestreleaseyaml="https://github.com/tektoncd/pipeline/releases/download/v0.42.0/release.yaml"
[[ -z ${latestreleaseyaml} ]] && fail_test "Could not get latest released release.yaml"
kubectl apply -f ${latestreleaseyaml} ||
fail_test "Build pipeline installation failed"
# Make sure that eveything is cleaned up in the current namespace.
for res in pipelineresources tasks pipelines taskruns pipelineruns; do
kubectl delete --ignore-not-found=true ${res}.tekton.dev --all
done
# Wait for pods to be running in the namespaces we are deploying to
wait_until_pods_running tekton-pipelines || fail_test "Tekton Pipeline did not come up"
}
function install_triggers_crd() {
local latestreleaseyaml
local latestinterceptorsyaml
echo ">> Deploying Tekton Triggers"
if [[ -n ${RELEASE_YAML_TRIGGERS} ]];then
latestreleaseyaml=${RELEASE_YAML_TRIGGERS}
else
# First try to install latestreleaseyaml from nightly
curl -o/dev/null -s -LI -f https://storage.googleapis.com/tekton-releases-nightly/triggers/latest/release.yaml &&
latestreleaseyaml=https://storage.googleapis.com/tekton-releases-nightly/triggers/latest/release.yaml
# If for whatever reason the nightly release wasnt there (nightly ci failure?), try the released version
[[ -z ${latestreleaseyaml} ]] && latestreleaseyaml="https://storage.googleapis.com/tekton-releases/triggers/latest/release.yaml"
fi
if [[ -n ${RELEASE_YAML_TRIGGERS_INTERCEPTORS} ]];then
latestinterceptorsyaml=${RELEASE_YAML_TRIGGERS_INTERCEPTORS}
else
# First try to install latest interceptors from nightly
curl -o/dev/null -s -LI -f https://storage.googleapis.com/tekton-releases-nightly/triggers/latest/interceptors.yaml &&
latestinterceptorsyaml=https://storage.googleapis.com/tekton-releases-nightly/triggers/latest/interceptors.yaml
# If for whatever reason the nightly release wasnt there (nightly ci failure?), try the released version
[[ -z ${latestinterceptorsyaml} ]] && latestinterceptorsyaml="https://storage.googleapis.com/tekton-releases/triggers/latest/interceptors.yaml"
fi
[[ -z ${latestreleaseyaml} ]] && fail_test "Could not get latest released release.yaml"
[[ -z ${latestinterceptorsyaml} ]] && fail_test "Could not get latest released interceptors.yaml"
kubectl apply -f ${latestreleaseyaml} ||
fail_test "Build triggers installation failed"
# Wait for pods to be running in the namespaces we are deploying to
wait_until_pods_running tekton-pipelines || fail_test "Tekton Triggers did not come up"
kubectl wait --for=condition=Established --timeout=30s crds/clusterinterceptors.triggers.tekton.dev || fail_test "cluster interceptors never established"
kubectl apply -f ${latestinterceptorsyaml} || fail_test "Interceptors installation failed"
# Make sure that eveything is cleaned up in the current namespace.
for res in eventlistener triggertemplate triggerbinding clustertriggerbinding; do
kubectl delete --ignore-not-found=true ${res}.triggers.tekton.dev --all
done
# Wait for pods to be running in the namespaces we are deploying to
wait_until_pods_running tekton-pipelines || fail_test "Tekton Triggers did not come up"
}
wait_until_ready(){
local timeout="$1"; shift
local obj="$1"; shift
echo "Waiting for $obj to be ready; timeout: $timeout"
local waited=0
while [[ $waited -lt $timeout ]]; do
local status=$(kubectl get $obj -o json | jq -r .status.conditions[0].status)
case "$status" in
True) return 0 ;;
False) return 1 ;;
*)
waited=$(( $waited + 2 ))
echo " ... [$waited] status is $status "
sleep 2
;;
esac
done
# timeout is an error
return 1
}
# ci_run allows certain steps to be skipped when running locally
# usage:
# ci_run && {
# commands to be run in CI
# }
#
ci_run() {
${LOCAL_CI_RUN:-false} || return 0
return 1
}
# skips the test if it fails unless NO_SKIP is set to true
# to skip a test:
# run_test "list pipelines" skip ./tkn pipeline list -n default
# to turn off skipping:
# NO_SKIP=true ./e2e-tests.sh ...
SKIP() {
${NO_SKIP:-false} && {
$@
return $?
}
$@ || {
echo "SKIPPING: $@ returned $?"
return 0
}
}
run_test() {
local desc="$1"; shift
echo "Running $@"
"$@" || fail_test "failed to $desc"
echo
}
# runs the test which should fail
# to run a fail test
# must_fail "describe pipeline" ./tkn pipeline describe foo -n default
must_fail() {
local desc="$1"; shift
echo "Running fail $@"
local failed=0
$@ || failed=1
(( failed )) || fail_test "failed to $desc"
echo
}