Skip to content

Commit f200e8c

Browse files
authored
chore(ci): update for e2e on new GKE and ephemeral AKS cluster (#2242)
* Delete tekton pipelines * Better k8s cleanups Update gke.sh * Unify `helm upgrade -i` multiline * Helm chart version * * Delete unused file * Update values for spot AKS cluster * Update values for GKE cluster * Use separate trap Revert "Use separate trap" This reverts commit 69e93ce4bd501b8e4854faee6dc1033a1999ecce. Reapply "Use separate trap" This reverts commit 6cb3fb86e3fbc52fb5c19aed8425a8b55e9bc48a. Reapply trap * Tekton pipelines install with kubectl * Better detection of ephemeral cluster * Longer wait time for backstage up for ephemeral cluster * Note the chart version for releases * Remove cluster url GKE - already in openshift/release * Service token and better exporting * GKE Cluster url * AKS use IP instead of name * Install tekton pipelines earlier * Tekton pipelines wait for endpoints instead * Conditional AKS cleanup * Fix auth keys * Tolerations for AKS * Old chart with new probes
1 parent a3d8796 commit f200e8c

14 files changed

+245
-160
lines changed

.ibm/pipelines/cluster/aks/deployment.sh

+6-4
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
11
#!/bin/bash
22

33
initiate_aks_deployment() {
4+
install_tekton_pipelines
45
add_helm_repos
56
delete_namespace "${NAME_SPACE_RBAC_K8S}"
67
configure_namespace "${NAME_SPACE_K8S}"
7-
install_tekton_pipelines
88
uninstall_helmchart "${NAME_SPACE_K8S}" "${RELEASE_NAME}"
99
cd "${DIR}" || exit
1010
local rhdh_base_url="https://${K8S_CLUSTER_ROUTER_BASE}"
@@ -13,18 +13,19 @@ initiate_aks_deployment() {
1313
mkdir -p "${ARTIFACT_DIR}/${NAME_SPACE_K8S}"
1414
cp -a "/tmp/${HELM_CHART_K8S_MERGED_VALUE_FILE_NAME}" "${ARTIFACT_DIR}/${NAME_SPACE_K8S}/" # Save the final value-file into the artifacts directory.
1515
echo "Deploying image from repository: ${QUAY_REPO}, TAG_NAME: ${TAG_NAME}, in NAME_SPACE: ${NAME_SPACE_K8S}"
16-
helm upgrade -i "${RELEASE_NAME}" -n "${NAME_SPACE_K8S}" "${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" --version "${CHART_VERSION}" \
16+
helm upgrade -i "${RELEASE_NAME}" -n "${NAME_SPACE_K8S}" \
17+
"${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" --version "${CHART_VERSION}" \
1718
-f "/tmp/${HELM_CHART_K8S_MERGED_VALUE_FILE_NAME}" \
1819
--set global.host="${K8S_CLUSTER_ROUTER_BASE}" \
1920
--set upstream.backstage.image.repository="${QUAY_REPO}" \
2021
--set upstream.backstage.image.tag="${TAG_NAME}"
2122
}
2223

2324
initiate_rbac_aks_deployment() {
25+
install_tekton_pipelines
2426
add_helm_repos
2527
delete_namespace "${NAME_SPACE_K8S}"
2628
configure_namespace "${NAME_SPACE_RBAC_K8S}"
27-
install_tekton_pipelines
2829
uninstall_helmchart "${NAME_SPACE_RBAC_K8S}" "${RELEASE_NAME_RBAC}"
2930
cd "${DIR}" || exit
3031
local rbac_rhdh_base_url="https://${K8S_CLUSTER_ROUTER_BASE}"
@@ -33,7 +34,8 @@ initiate_rbac_aks_deployment() {
3334
mkdir -p "${ARTIFACT_DIR}/${NAME_SPACE_RBAC_K8S}"
3435
cp -a "/tmp/${HELM_CHART_RBAC_K8S_MERGED_VALUE_FILE_NAME}" "${ARTIFACT_DIR}/${NAME_SPACE_RBAC_K8S}/" # Save the final value-file into the artifacts directory.
3536
echo "Deploying image from repository: ${QUAY_REPO}, TAG_NAME: ${TAG_NAME}, in NAME_SPACE: ${NAME_SPACE_RBAC_K8S}"
36-
helm upgrade -i "${RELEASE_NAME_RBAC}" -n "${NAME_SPACE_RBAC_K8S}" "${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" --version "${CHART_VERSION}" \
37+
helm upgrade -i "${RELEASE_NAME_RBAC}" -n "${NAME_SPACE_RBAC_K8S}" \
38+
"${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" --version "${CHART_VERSION}" \
3739
-f "/tmp/${HELM_CHART_RBAC_K8S_MERGED_VALUE_FILE_NAME}" \
3840
--set global.host="${K8S_CLUSTER_ROUTER_BASE}" \
3941
--set upstream.backstage.image.repository="${QUAY_REPO}" \

.ibm/pipelines/cluster/gke/deployment.sh

+6-4
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,10 @@
22

33
initiate_gke_deployment() {
44
gcloud_ssl_cert_create $GKE_CERT_NAME $GKE_INSTANCE_DOMAIN_NAME $GOOGLE_CLOUD_PROJECT
5+
install_tekton_pipelines
56
add_helm_repos
67
delete_namespace "${NAME_SPACE_RBAC_K8S}"
78
configure_namespace "${NAME_SPACE_K8S}"
8-
install_tekton_pipelines
99
uninstall_helmchart "${NAME_SPACE_K8S}" "${RELEASE_NAME}"
1010
cd "${DIR}" || exit
1111
local rhdh_base_url="https://${K8S_CLUSTER_ROUTER_BASE}"
@@ -15,7 +15,8 @@ initiate_gke_deployment() {
1515
mkdir -p "${ARTIFACT_DIR}/${NAME_SPACE_K8S}"
1616
cp -a "/tmp/${HELM_CHART_K8S_MERGED_VALUE_FILE_NAME}" "${ARTIFACT_DIR}/${NAME_SPACE_K8S}/" # Save the final value-file into the artifacts directory.
1717
echo "Deploying image from repository: ${QUAY_REPO}, TAG_NAME: ${TAG_NAME}, in NAME_SPACE: ${NAME_SPACE_K8S}"
18-
helm upgrade -i "${RELEASE_NAME}" -n "${NAME_SPACE_K8S}" "${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" --version "${CHART_VERSION}" \
18+
helm upgrade -i "${RELEASE_NAME}" -n "${NAME_SPACE_K8S}" \
19+
"${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" --version "${CHART_VERSION}" \
1920
-f "/tmp/${HELM_CHART_K8S_MERGED_VALUE_FILE_NAME}" \
2021
--set global.host="${K8S_CLUSTER_ROUTER_BASE}" \
2122
--set upstream.backstage.image.repository="${QUAY_REPO}" \
@@ -25,10 +26,10 @@ initiate_gke_deployment() {
2526

2627
initiate_rbac_gke_deployment() {
2728
gcloud_ssl_cert_create $GKE_CERT_NAME $GKE_INSTANCE_DOMAIN_NAME $GOOGLE_CLOUD_PROJECT
29+
install_tekton_pipelines
2830
add_helm_repos
2931
delete_namespace "${NAME_SPACE_K8S}"
3032
configure_namespace "${NAME_SPACE_RBAC_K8S}"
31-
install_tekton_pipelines
3233
uninstall_helmchart "${NAME_SPACE_RBAC_K8S}" "${RELEASE_NAME_RBAC}"
3334
cd "${DIR}" || exit
3435
local rbac_rhdh_base_url="https://${K8S_CLUSTER_ROUTER_BASE}"
@@ -37,7 +38,8 @@ initiate_rbac_gke_deployment() {
3738
mkdir -p "${ARTIFACT_DIR}/${NAME_SPACE_RBAC_K8S}"
3839
cp -a "/tmp/${HELM_CHART_RBAC_K8S_MERGED_VALUE_FILE_NAME}" "${ARTIFACT_DIR}/${NAME_SPACE_RBAC_K8S}/" # Save the final value-file into the artifacts directory.
3940
echo "Deploying image from repository: ${QUAY_REPO}, TAG_NAME: ${TAG_NAME}, in NAME_SPACE: ${NAME_SPACE_RBAC_K8S}"
40-
helm upgrade -i "${RELEASE_NAME_RBAC}" -n "${NAME_SPACE_RBAC_K8S}" "${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" --version "${CHART_VERSION}" \
41+
helm upgrade -i "${RELEASE_NAME_RBAC}" -n "${NAME_SPACE_RBAC_K8S}" \
42+
"${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" --version "${CHART_VERSION}" \
4143
-f "/tmp/${HELM_CHART_RBAC_K8S_MERGED_VALUE_FILE_NAME}" \
4244
--set global.host="${K8S_CLUSTER_ROUTER_BASE}" \
4345
--set upstream.backstage.image.repository="${QUAY_REPO}" \

.ibm/pipelines/env_variables.sh

+1-2
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ NAME_SPACE_RUNTIME="${NAME_SPACE_RUNTIME:-showcase-runtime}"
2424
NAME_SPACE_POSTGRES_DB="${NAME_SPACE_POSTGRES_DB:-postgress-external-db}"
2525
NAME_SPACE_RDS="showcase-rds-nightly"
2626
OPERATOR_MANAGER='rhdh-operator'
27-
CHART_VERSION="2.15.2"
27+
CHART_VERSION="2.15.2" # Fixed version should be used for release branches.
2828
GITHUB_APP_APP_ID=$(cat /tmp/secrets/GITHUB_APP_3_APP_ID)
2929
GITHUB_APP_CLIENT_ID=$(cat /tmp/secrets/GITHUB_APP_3_CLIENT_ID)
3030
GITHUB_APP_PRIVATE_KEY=$(cat /tmp/secrets/GITHUB_APP_3_PRIVATE_KEY)
@@ -93,7 +93,6 @@ ARM_CLIENT_ID=$(cat /tmp/secrets/ARM_CLIENT_ID)
9393
ARM_CLIENT_SECRET=$(cat /tmp/secrets/ARM_CLIENT_SECRET)
9494
AKS_NIGHTLY_CLUSTER_NAME=$(cat /tmp/secrets/AKS_NIGHTLY_CLUSTER_NAME)
9595
AKS_NIGHTLY_CLUSTER_RESOURCEGROUP=$(cat /tmp/secrets/AKS_NIGHTLY_CLUSTER_RESOURCEGROUP)
96-
AKS_INSTANCE_DOMAIN_NAME=$(cat /tmp/secrets/AKS_INSTANCE_DOMAIN_NAME)
9796

9897
GKE_CLUSTER_NAME=$(cat /tmp/secrets/GKE_CLUSTER_NAME)
9998
GKE_CLUSTER_REGION=$(cat /tmp/secrets/GKE_CLUSTER_REGION)

.ibm/pipelines/jobs/aks.sh

+10-3
Original file line numberDiff line numberDiff line change
@@ -4,14 +4,15 @@ handle_aks() {
44
echo "Starting AKS deployment"
55
for file in ${DIR}/cluster/aks/*.sh; do source $file; done
66

7-
export K8S_CLUSTER_ROUTER_BASE=$AKS_INSTANCE_DOMAIN_NAME
7+
export K8S_CLUSTER_ROUTER_BASE=$(kubectl get svc nginx --namespace app-routing-system -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
88
export NAME_SPACE_K8S="showcase-k8s-ci-nightly"
99
export NAME_SPACE_RBAC_K8S="showcase-rbac-k8s-ci-nightly"
1010

1111
url="https://${K8S_CLUSTER_ROUTER_BASE}"
1212

13-
if kubectl auth whoami > /dev/null 2>&1; then
13+
if oc whoami --show-server > /dev/null 2>&1; then
1414
echo "Using an ephemeral AKS cluster."
15+
export EPHEMERAL="true"
1516
else
1617
echo "Falling back to a long-running AKS cluster."
1718
export K8S_CLUSTER_TOKEN=$(cat /tmp/secrets/AKS_CLUSTER_TOKEN)
@@ -30,10 +31,16 @@ handle_aks() {
3031
fi
3132

3233
initiate_aks_deployment
33-
check_and_test "${RELEASE_NAME}" "${NAME_SPACE_K8S}" "${url}"
34+
check_and_test "${RELEASE_NAME}" "${NAME_SPACE_K8S}" "${url}" 50 30
3435
delete_namespace "${NAME_SPACE_K8S}"
3536
initiate_rbac_aks_deployment
3637
local rbac_rhdh_base_url="https://${K8S_CLUSTER_ROUTER_BASE}"
3738
check_and_test "${RELEASE_NAME_RBAC}" "${NAME_SPACE_RBAC_K8S}" "${rbac_rhdh_base_url}"
3839
delete_namespace "${NAME_SPACE_RBAC_K8S}"
3940
}
41+
42+
cleanup_aks() {
43+
if [ "$EPHEMERAL" != "true" ]; then
44+
az_aks_stop "${AKS_NIGHTLY_CLUSTER_NAME}" "${AKS_NIGHTLY_CLUSTER_RESOURCEGROUP}"
45+
fi
46+
}

.ibm/pipelines/jobs/gke.sh

+33-12
Original file line numberDiff line numberDiff line change
@@ -4,30 +4,51 @@ handle_gke() {
44
echo "Starting GKE deployment"
55
for file in ${DIR}/cluster/gke/*.sh; do source $file; done
66

7-
export K8S_CLUSTER_TOKEN=$(cat /tmp/secrets/GKE_CLUSTER_TOKEN)
8-
export K8S_CLUSTER_TOKEN_ENCODED=$(printf "%s" $K8S_CLUSTER_TOKEN | base64 | tr -d '\n')
9-
export K8S_SERVICE_ACCOUNT_TOKEN=$K8S_CLUSTER_TOKEN_ENCODED
10-
export OCM_CLUSTER_TOKEN=$K8S_CLUSTER_TOKEN_ENCODED
11-
12-
export K8S_CLUSTER_ROUTER_BASE=$GKE_INSTANCE_DOMAIN_NAME
13-
export NAME_SPACE_K8S="showcase-k8s-ci-nightly"
14-
export NAME_SPACE_RBAC_K8S="showcase-rbac-k8s-ci-nightly"
7+
K8S_CLUSTER_ROUTER_BASE=$GKE_INSTANCE_DOMAIN_NAME
8+
NAME_SPACE_K8S="showcase-k8s-ci-nightly"
9+
NAME_SPACE_RBAC_K8S="showcase-rbac-k8s-ci-nightly"
10+
export K8S_CLUSTER_ROUTER_BASE NAME_SPACE_K8S NAME_SPACE_RBAC_K8S
1511

1612
url="https://${K8S_CLUSTER_ROUTER_BASE}"
1713

1814
gcloud_auth "${GKE_SERVICE_ACCOUNT_NAME}" "/tmp/secrets/GKE_SERVICE_ACCOUNT_KEY"
1915
gcloud_gke_get_credentials "${GKE_CLUSTER_NAME}" "${GKE_CLUSTER_REGION}" "${GOOGLE_CLOUD_PROJECT}"
2016

21-
export K8S_CLUSTER_URL=$(oc whoami --show-server)
22-
export K8S_CLUSTER_API_SERVER_URL=$(printf "%s" "$K8S_CLUSTER_URL" | base64 | tr -d '\n')
23-
export OCM_CLUSTER_URL=$(printf "%s" "$K8S_CLUSTER_URL" | base64 | tr -d '\n')
17+
K8S_CLUSTER_URL=$(kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}')
18+
K8S_CLUSTER_API_SERVER_URL=$(printf "%s" "$K8S_CLUSTER_URL" | base64 | tr -d '\n')
19+
OCM_CLUSTER_URL=$(printf "%s" "$K8S_CLUSTER_URL" | base64 | tr -d '\n')
20+
export K8S_CLUSTER_URL K8S_CLUSTER_API_SERVER_URL OCM_CLUSTER_URL
21+
22+
# Create a service account and assign token
23+
SA_NAME="tester-sa-2"
24+
SA_NAMESPACE="default"
25+
SA_BINDING_NAME="${SA_NAME}-binding"
26+
if ! kubectl get serviceaccount ${SA_NAME} -n ${SA_NAMESPACE} &> /dev/null; then
27+
echo "Creating service account ${SA_NAME}..."
28+
kubectl create serviceaccount ${SA_NAME} -n ${SA_NAMESPACE}
29+
echo "Creating cluster role binding..."
30+
kubectl create clusterrolebinding ${SA_BINDING_NAME} \
31+
--clusterrole=cluster-admin \
32+
--serviceaccount=${SA_NAMESPACE}:${SA_NAME}
33+
echo "Service account and binding created successfully"
34+
else
35+
echo "Service account ${SA_NAME} already exists in namespace ${SA_NAMESPACE}"
36+
fi
37+
K8S_CLUSTER_TOKEN=$(kubectl create token tester-sa-2 -n default)
38+
K8S_CLUSTER_TOKEN_ENCODED=$(printf "%s" $K8S_CLUSTER_TOKEN | base64 | tr -d '\n')
39+
K8S_SERVICE_ACCOUNT_TOKEN=$K8S_CLUSTER_TOKEN_ENCODED
40+
OCM_CLUSTER_TOKEN=$K8S_CLUSTER_TOKEN_ENCODED
41+
export K8S_CLUSTER_TOKEN K8S_CLUSTER_TOKEN_ENCODED K8S_SERVICE_ACCOUNT_TOKEN OCM_CLUSTER_TOKEN
2442

2543
initiate_gke_deployment
26-
check_and_test "${RELEASE_NAME}" "${NAME_SPACE_K8S}" "${url}"
44+
check_and_test "${RELEASE_NAME}" "${NAME_SPACE_K8S}" "${url}" 50 30
2745
delete_namespace "${NAME_SPACE_K8S}"
2846
initiate_rbac_gke_deployment
2947
local rbac_rhdh_base_url="https://${K8S_CLUSTER_ROUTER_BASE}"
3048
check_and_test "${RELEASE_NAME_RBAC}" "${NAME_SPACE_RBAC_K8S}" "${rbac_rhdh_base_url}"
3149
delete_namespace "${NAME_SPACE_RBAC_K8S}"
50+
}
3251

52+
cleanup_gke() {
53+
delete_tekton_pipelines
3354
}

.ibm/pipelines/jobs/periodic.sh

+13-9
Original file line numberDiff line numberDiff line change
@@ -45,17 +45,21 @@ run_runtime_config_change_tests() {
4545
local runtime_url="https://${RELEASE_NAME}-backstage-${NAME_SPACE_RUNTIME}.${K8S_CLUSTER_ROUTER_BASE}"
4646

4747
apply_yaml_files "${DIR}" "${NAME_SPACE_RUNTIME}" "${runtime_url}"
48-
helm upgrade -i "${RELEASE_NAME}" -n "${NAME_SPACE_RUNTIME}" "${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" --version "${CHART_VERSION}" -f "${DIR}/value_files/${HELM_CHART_VALUE_FILE_NAME}" --set global.clusterRouterBase="${K8S_CLUSTER_ROUTER_BASE}" --set upstream.backstage.image.repository="${QUAY_REPO}" --set upstream.backstage.image.tag="${TAG_NAME}"
48+
helm upgrade -i "${RELEASE_NAME}" -n "${NAME_SPACE_RUNTIME}" \
49+
"${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" --version "${CHART_VERSION}" \
50+
-f "${DIR}/value_files/${HELM_CHART_VALUE_FILE_NAME}" \
51+
--set global.clusterRouterBase="${K8S_CLUSTER_ROUTER_BASE}" \
52+
--set upstream.backstage.image.repository="${QUAY_REPO}" \
53+
--set upstream.backstage.image.tag="${TAG_NAME}" \
4954
check_and_test "${RELEASE_NAME}" "${NAME_SPACE_RUNTIME}" "${runtime_url}"
5055
}
5156

5257
add_sanity_plugins_check() {
53-
helm upgrade -i "${RELEASE_NAME}" \
54-
-n "${NAME_SPACE}" "${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" \
55-
--version "${CHART_VERSION}" \
56-
-f "${DIR}/value_files/${HELM_CHART_VALUE_FILE_NAME}" \
57-
-f "${DIR}/value_files/sanity-check-plugins.yaml" \
58-
--set global.clusterRouterBase="${K8S_CLUSTER_ROUTER_BASE}" \
59-
--set upstream.backstage.image.repository="${QUAY_REPO}" \
60-
--set upstream.backstage.image.tag="${TAG_NAME}"
58+
helm upgrade -i "${RELEASE_NAME}" -n "${NAME_SPACE}" \
59+
"${HELM_REPO_NAME}/${HELM_IMAGE_NAME}" --version "${CHART_VERSION}" \
60+
-f "${DIR}/value_files/${HELM_CHART_VALUE_FILE_NAME}" \
61+
-f "${DIR}/value_files/sanity-check-plugins.yaml" \
62+
--set global.clusterRouterBase="${K8S_CLUSTER_ROUTER_BASE}" \
63+
--set upstream.backstage.image.repository="${QUAY_REPO}" \
64+
--set upstream.backstage.image.tag="${TAG_NAME}"
6165
}

.ibm/pipelines/kubernetes-tests.sh

-108
This file was deleted.

.ibm/pipelines/openshift-ci-tests.sh

+11-3
Original file line numberDiff line numberDiff line change
@@ -12,9 +12,17 @@ OVERALL_RESULT=0
1212
# shellcheck disable=SC2317
1313
cleanup() {
1414
echo "Cleaning up before exiting"
15-
if [[ "$JOB_NAME" == *aks* && "${OPENSHIFT_CI}" == "true" ]]; then
16-
# If the job is for Azure Kubernetes Service (AKS), stop the AKS cluster.
17-
az_aks_stop "${AKS_NIGHTLY_CLUSTER_NAME}" "${AKS_NIGHTLY_CLUSTER_RESOURCEGROUP}"
15+
if [[ "${OPENSHIFT_CI}" == "true" ]]; then
16+
case "$JOB_NAME" in
17+
*aks*)
18+
echo "Calling handle_aks"
19+
cleanup_aks
20+
;;
21+
*gke*)
22+
echo "Calling cleanup_gke"
23+
cleanup_gke
24+
;;
25+
esac
1826
fi
1927
rm -rf ~/tmpbin
2028
}

0 commit comments

Comments
 (0)