@@ -50,7 +50,7 @@ deploy_pmm_server() {
50
50
--set platform=" ${platform} " \
51
51
" https://percona-charts.storage.googleapis.com/pmm-server-${PMM_SERVER_VERSION} .tgz"
52
52
fi
53
- SERVICE=" postgres"
53
+ local SERVICE=" postgres"
54
54
until kubectl -n " ${NAMESPACE} " exec monitoring-0 -- bash -c " pgrep -x $SERVICE >/dev/null" ; do
55
55
echo " Retry $retry "
56
56
sleep 5
@@ -63,13 +63,13 @@ deploy_pmm_server() {
63
63
}
64
64
65
65
get_pmm_api_key () {
66
- ADMIN_PASSWORD=$( kubectl -n " ${NAMESPACE} " exec monitoring-0 -- bash -c " printenv | grep ADMIN_PASSWORD | cut -d '=' -f2" )
66
+ local ADMIN_PASSWORD=$( kubectl -n " ${NAMESPACE} " exec monitoring-0 -- bash -c " printenv | grep ADMIN_PASSWORD | cut -d '=' -f2" )
67
67
echo $( curl --insecure -X POST -H " Content-Type: application/json" -d ' {"name":"operator", "role": "Admin"}' " https://admin:$ADMIN_PASSWORD @" $( get_service_ip monitoring-service) " /graph/api/auth/keys" | jq .key)
68
68
}
69
69
70
70
deploy_minio () {
71
- accessKey=" $( kubectl -n " ${NAMESPACE} " get secret minio-secret -o jsonpath=' {.data.AWS_ACCESS_KEY_ID}' | base64 -d) "
72
- secretKey=" $( kubectl -n " ${NAMESPACE} " get secret minio-secret -o jsonpath=' {.data.AWS_SECRET_ACCESS_KEY}' | base64 -d) "
71
+ local accessKey=" $( kubectl -n " ${NAMESPACE} " get secret minio-secret -o jsonpath=' {.data.AWS_ACCESS_KEY_ID}' | base64 -d) "
72
+ local secretKey=" $( kubectl -n " ${NAMESPACE} " get secret minio-secret -o jsonpath=' {.data.AWS_SECRET_ACCESS_KEY}' | base64 -d) "
73
73
74
74
helm uninstall -n " ${NAMESPACE} " minio-service || :
75
75
helm repo remove minio || :
@@ -312,6 +312,7 @@ get_mysql_users() {
312
312
313
313
get_service_ip () {
314
314
local service=$1
315
+
315
316
while (kubectl get service/$service -n " ${NAMESPACE} " -o ' jsonpath={.spec.type}' 2>&1 || :) | grep -q NotFound; do
316
317
sleep 1
317
318
done
@@ -392,16 +393,43 @@ wait_pod() {
392
393
set -o xtrace
393
394
}
394
395
396
+ wait_deployment () {
397
+ local name=$1
398
+ local target_namespace=${2:- " $namespace " }
399
+
400
+ sleep 10
401
+ set +o xtrace
402
+ retry=0
403
+ echo -n $name
404
+ until [ -n " $( kubectl -n ${target_namespace} get deployment $name -o jsonpath=' {.status.replicas}' ) " \
405
+ -a " $( kubectl -n ${target_namespace} get deployment $name -o jsonpath=' {.status.replicas}' ) " \
406
+ == " $( kubectl -n ${target_namespace} get deployment $name -o jsonpath=' {.status.readyReplicas}' ) " ]; do
407
+ sleep 1
408
+ echo -n .
409
+ let retry+=1
410
+ if [ $retry -ge 360 ]; then
411
+ kubectl logs $( get_operator_pod) -c operator \
412
+ | grep -v ' level=info' \
413
+ | grep -v ' level=debug' \
414
+ | tail -100
415
+ echo max retry count $retry reached. something went wrong with operator or kubernetes cluster
416
+ exit 1
417
+ fi
418
+ done
419
+ echo
420
+ set -o xtrace
421
+ }
422
+
395
423
check_auto_tuning () {
396
- RAM_SIZE=$1
397
- RDS_MEM_INSTANCE=12582880
398
- CUSTOM_INNODB_SIZE=$2
399
- CUSTOM_CONNECTIONS=$3
424
+ local RAM_SIZE=$1
425
+ local RDS_MEM_INSTANCE=12582880
426
+ local CUSTOM_INNODB_SIZE=$2
427
+ local CUSTOM_CONNECTIONS=$3
400
428
401
- INNODB_SIZE=$( run_mysql \
429
+ local INNODB_SIZE=$( run_mysql \
402
430
' SELECT @@innodb_buffer_pool_size;' \
403
431
" -h $( get_haproxy_svc " $( get_cluster_name) " ) -uroot -proot_password" )
404
- CONNECTIONS=$( run_mysql \
432
+ local CONNECTIONS=$( run_mysql \
405
433
' SELECT @@max_connections;' \
406
434
" -h $( get_haproxy_svc " $( get_cluster_name) " ) -uroot -proot_password" )
407
435
@@ -461,12 +489,15 @@ get_primary_from_haproxy() {
461
489
run_mysql " SHOW VARIABLES LIKE '%hostname%';" " -h ${haproxy_pod_ip} -P3306 -uroot -proot_password" | awk ' {print $2}'
462
490
}
463
491
492
+ get_primary_from_group_replication () {
493
+ run_mysql " SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='PRIMARY';" " -h $( get_mysql_router_service $( get_cluster_name) ) -P 6446 -uroot -proot_password" | cut -d' .' -f1
494
+ }
495
+
464
496
verify_certificate_sans () {
465
497
local certificate=$1
466
498
local expected_sans=$2
467
-
468
- have=$( mktemp)
469
- want=$( mktemp)
499
+ local have=$( mktemp)
500
+ local want=$( mktemp)
470
501
471
502
kubectl -n " ${NAMESPACE} " get certificate " ${certificate} " -o jsonpath=' {.spec.dnsNames}' | jq ' .' > " ${have} "
472
503
echo " ${expected_sans} " | jq ' .' > " ${want} "
@@ -475,21 +506,19 @@ verify_certificate_sans() {
475
506
}
476
507
477
508
check_passwords_leak () {
478
-
479
- secrets=$( kubectl get secrets -o json | jq -r ' .items[].data | to_entries | .[] | select(.key | (endswith(".crt") or endswith(".key") or endswith(".pub") or endswith(".pem") or endswith(".p12")) | not) | .value' )
480
-
481
- passwords=" $( for i in $secrets ; do base64 -d <<< $i ; echo ; done) $secrets "
482
- pods=$( kubectl -n " ${NAMESPACE} " get pods -o name | awk -F " /" ' {print $2}' )
509
+ local secrets=$( kubectl get secrets -o json | jq -r ' .items[].data | to_entries | .[] | select(.key | (endswith(".crt") or endswith(".key") or endswith(".pub") or endswith(".pem") or endswith(".p12")) | not) | .value' )
510
+ local passwords=" $( for i in $secrets ; do base64 -d <<< $i ; echo ; done) $secrets "
511
+ local pods=$( kubectl -n " ${NAMESPACE} " get pods -o name | awk -F " /" ' {print $2}' )
483
512
484
513
collect_logs () {
485
514
NS=$1
486
515
for p in $pods ; do
487
- containers=$( kubectl -n " $NS " get pod $p -o jsonpath=' {.spec.containers[*].name}' )
516
+ local containers=$( kubectl -n " $NS " get pod $p -o jsonpath=' {.spec.containers[*].name}' )
488
517
for c in $containers ; do
489
518
kubectl -n " $NS " logs $p -c $c > ${TEMP_DIR} /logs_output-$p -$c .txt
490
519
echo logs saved in: ${TEMP_DIR} /logs_output-$p -$c .txt
491
520
for pass in $passwords ; do
492
- count=$( grep -c --fixed-strings -- " $pass " ${TEMP_DIR} /logs_output-$p -$c .txt || :)
521
+ local count=$( grep -c --fixed-strings -- " $pass " ${TEMP_DIR} /logs_output-$p -$c .txt || :)
493
522
if [[ $count != 0 ]]; then
494
523
echo leaked passwords are found in log ${TEMP_DIR} /logs_output-$p -$c .txt
495
524
false
@@ -502,7 +531,80 @@ check_passwords_leak() {
502
531
503
532
collect_logs $NAMESPACE
504
533
if [ -n " $OPERATOR_NS " ]; then
505
- pods=$( kubectl -n " ${OPERATOR_NS} " get pods -o name | awk -F " /" ' {print $2}' )
534
+ local pods=$( kubectl -n " ${OPERATOR_NS} " get pods -o name | awk -F " /" ' {print $2}' )
506
535
collect_logs $OPERATOR_NS
507
536
fi
508
537
}
538
+
539
+ deploy_chaos_mesh () {
540
+ destroy_chaos_mesh
541
+
542
+ helm repo add chaos-mesh https://charts.chaos-mesh.org
543
+ helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=${NAMESPACE} --set chaosDaemon.runtime=containerd --set chaosDaemon.socketPath=/run/containerd/containerd.sock --set dashboard.create=false --version 2.5.1
544
+ sleep 10
545
+ }
546
+
547
+ destroy_chaos_mesh () {
548
+ local chaos_mesh_ns=$( helm list --all-namespaces --filter chaos-mesh | tail -n1 | awk -F' ' ' {print $2}' | sed ' s/NAMESPACE//' )
549
+
550
+ for i in $( kubectl api-resources | grep chaos-mesh | awk ' {print $1}' ) ; do timeout 30 kubectl delete ${i} --all --all-namespaces || : ; done
551
+ if [ -n " ${chaos_mesh_ns} " ]; then
552
+ helm uninstall chaos-mesh --namespace ${chaos_mesh_ns} || :
553
+ fi
554
+ timeout 30 kubectl delete crd $( kubectl get crd | grep ' chaos-mesh.org' | awk ' {print $1}' ) || :
555
+ timeout 30 kubectl delete clusterrolebinding $( kubectl get clusterrolebinding | grep ' chaos-mesh' | awk ' {print $1}' ) || :
556
+ timeout 30 kubectl delete clusterrole $( kubectl get clusterrole | grep ' chaos-mesh' | awk ' {print $1}' ) || :
557
+ timeout 30 kubectl delete MutatingWebhookConfiguration $( kubectl get MutatingWebhookConfiguration | grep ' chaos-mesh' | awk ' {print $1}' ) || :
558
+ timeout 30 kubectl delete ValidatingWebhookConfiguration $( kubectl get ValidatingWebhookConfiguration | grep ' chaos-mesh' | awk ' {print $1}' ) || :
559
+ timeout 30 kubectl delete ValidatingWebhookConfiguration $( kubectl get ValidatingWebhookConfiguration | grep ' validate-auth' | awk ' {print $1}' ) || :
560
+ }
561
+
562
+ kill_pods () {
563
+ local ns=$1
564
+ local selector=$2
565
+ local pod_label=$3
566
+ local label_value=$4
567
+ local chaos_suffix=$5
568
+
569
+ if [ " ${selector} " == " pod" ]; then
570
+ yq eval '
571
+ .metadata.name = "chaos-pod-kill-' ${chaos_suffix} ' " |
572
+ del(.spec.selector.pods.test-namespace) |
573
+ .spec.selector.pods.' ${ns} ' [0] = "' ${pod_label} ' "' ${TESTS_CONFIG_DIR} /chaos-pod-kill.yml \
574
+ | kubectl apply --namespace ${ns} -f -
575
+ elif [ " ${selector} " == " label" ]; then
576
+ yq eval '
577
+ .metadata.name = "chaos-kill-label-' ${chaos_suffix} ' " |
578
+ .spec.mode = "all" |
579
+ del(.spec.selector.pods) |
580
+ .spec.selector.labelSelectors."' ${pod_label} ' " = "' ${label_value} ' "' ${TESTS_CONFIG_DIR} /chaos-pod-kill.yml \
581
+ | kubectl apply --namespace ${ns} -f -
582
+ fi
583
+ sleep 5
584
+ }
585
+
586
+ failure_pod () {
587
+ local ns=$1
588
+ local pod=$2
589
+ local chaos_suffix=$3
590
+
591
+ yq eval '
592
+ .metadata.name = "chaos-pod-failure-' ${chaos_suffix} ' " |
593
+ del(.spec.selector.pods.test-namespace) |
594
+ .spec.selector.pods.' ${ns} ' [0] = "' ${pod} ' "' ${TESTS_CONFIG_DIR} /chaos-pod-failure.yml \
595
+ | kubectl apply --namespace ${ns} -f -
596
+ sleep 5
597
+ }
598
+
599
+ network_loss () {
600
+ local ns=$1
601
+ local pod=$2
602
+ local chaos_suffix=$3
603
+
604
+ yq eval '
605
+ .metadata.name = "chaos-pod-network-loss-' ${chaos_suffix} ' " |
606
+ del(.spec.selector.pods.test-namespace) |
607
+ .spec.selector.pods.' ${ns} ' [0] = "' ${pod} ' "' ${TESTS_CONFIG_DIR} /chaos-network-loss.yml \
608
+ | kubectl apply --namespace ${ns} -f -
609
+ sleep 5
610
+ }
0 commit comments