From a838611bde89a3a1b872e4dbaf90e530bfcf7df1 Mon Sep 17 00:00:00 2001 From: "Padmanabhan, Badhrinath" Date: Wed, 21 May 2025 22:57:58 -0700 Subject: [PATCH 1/2] Scale scripts for Apporch components --- scripts/README.md | 307 +++++++++ scripts/app-scale-tester/.add-N-apps.sh.swp | Bin 0 -> 16384 bytes scripts/app-scale-tester/add-N-apps-nginx.sh | 524 ++++++++++++++ scripts/app-scale-tester/add-N-apps.sh | 525 +++++++++++++++ .../app-scale-tester/add-N-apps_dummyapp.sh | 525 +++++++++++++++ scripts/app-scale-tester/bck_add-N-apps.sh | 637 ++++++++++++++++++ scripts/app-scale-tester/run_test.sh | 16 + scripts/deployment-packages/README.md | 22 + .../dummy-app-package/app-dummy-0.0.1.yaml | 20 + .../dummy-app-package/dp-dummy-0.0.1.yaml | 22 + .../dummy-app-package/registry-sandbox.yaml | 118 ++++ .../values-dummy-app-0.0.1.yaml | 0 .../app-nginx-local-0.1.0.yaml | 23 + .../nginx-app-local/dp-nginx-local-0.1.0.yaml | 26 + .../values-nginx-local-1-0.1.0.yaml | 18 + .../nginx-app/app-nginx-0.1.0.yaml | 23 + .../nginx-app/dp-nginx-0.1.0.yaml | 26 + .../nginx-app/values-nginx-1-0.1.0.yaml | 14 + .../ten-dummy-apps/app-dummy-01.yaml | 20 + .../ten-dummy-apps/app-dummy-02.yaml | 20 + .../ten-dummy-apps/app-dummy-03.yaml | 20 + .../ten-dummy-apps/app-dummy-04.yaml | 20 + .../ten-dummy-apps/app-dummy-05.yaml | 20 + .../ten-dummy-apps/app-dummy-06.yaml | 20 + .../ten-dummy-apps/app-dummy-07.yaml | 20 + .../ten-dummy-apps/app-dummy-08.yaml | 20 + .../ten-dummy-apps/app-dummy-09.yaml | 20 + .../ten-dummy-apps/app-dummy-10.yaml | 20 + .../ten-dummy-apps/dp-dummy-0.0.1.yaml | 57 ++ .../ten-dummy-apps/registry-sandbox.yaml | 118 ++++ .../ten-dummy-apps/values-01.yaml | 4 + .../ten-dummy-apps/values-02.yaml | 4 + .../ten-dummy-apps/values-03.yaml | 4 + .../ten-dummy-apps/values-04.yaml | 4 + .../ten-dummy-apps/values-05.yaml | 4 + .../ten-dummy-apps/values-06.yaml | 4 + .../ten-dummy-apps/values-07.yaml | 4 + .../ten-dummy-apps/values-08.yaml | 4 + .../ten-dummy-apps/values-09.yaml | 4 + .../ten-dummy-apps/values-10.yaml | 4 + scripts/k6-scripts/adm-api-latency.js | 85 +++ scripts/k6-scripts/arm-asp-api-latency.js | 154 +++++ .../arm-asp-api-performance-test.sh | 242 +++++++ scripts/k6-scripts/arm-vnc-api-latency.js | 162 +++++ .../arm-vnc-api-performance-test.sh | 36 + scripts/k6-scripts/tmp.json | 122 ++++ 46 files changed, 4062 insertions(+) create mode 100644 scripts/README.md create mode 100644 scripts/app-scale-tester/.add-N-apps.sh.swp create mode 100755 scripts/app-scale-tester/add-N-apps-nginx.sh create mode 100755 scripts/app-scale-tester/add-N-apps.sh create mode 100755 scripts/app-scale-tester/add-N-apps_dummyapp.sh create mode 100755 scripts/app-scale-tester/bck_add-N-apps.sh create mode 100755 scripts/app-scale-tester/run_test.sh create mode 100644 scripts/deployment-packages/README.md create mode 100644 scripts/deployment-packages/dummy-app-package/app-dummy-0.0.1.yaml create mode 100644 scripts/deployment-packages/dummy-app-package/dp-dummy-0.0.1.yaml create mode 100644 scripts/deployment-packages/dummy-app-package/registry-sandbox.yaml create mode 100644 scripts/deployment-packages/dummy-app-package/values-dummy-app-0.0.1.yaml create mode 100644 scripts/deployment-packages/nginx-app-local/app-nginx-local-0.1.0.yaml create mode 100644 scripts/deployment-packages/nginx-app-local/dp-nginx-local-0.1.0.yaml create mode 100644 scripts/deployment-packages/nginx-app-local/values-nginx-local-1-0.1.0.yaml create mode 100644 scripts/deployment-packages/nginx-app/app-nginx-0.1.0.yaml create mode 100644 scripts/deployment-packages/nginx-app/dp-nginx-0.1.0.yaml create mode 100644 scripts/deployment-packages/nginx-app/values-nginx-1-0.1.0.yaml create mode 100644 scripts/deployment-packages/ten-dummy-apps/app-dummy-01.yaml create mode 100644 scripts/deployment-packages/ten-dummy-apps/app-dummy-02.yaml create mode 100644 scripts/deployment-packages/ten-dummy-apps/app-dummy-03.yaml create mode 100644 scripts/deployment-packages/ten-dummy-apps/app-dummy-04.yaml create mode 100644 scripts/deployment-packages/ten-dummy-apps/app-dummy-05.yaml create mode 100644 scripts/deployment-packages/ten-dummy-apps/app-dummy-06.yaml create mode 100644 scripts/deployment-packages/ten-dummy-apps/app-dummy-07.yaml create mode 100644 scripts/deployment-packages/ten-dummy-apps/app-dummy-08.yaml create mode 100644 scripts/deployment-packages/ten-dummy-apps/app-dummy-09.yaml create mode 100644 scripts/deployment-packages/ten-dummy-apps/app-dummy-10.yaml create mode 100644 scripts/deployment-packages/ten-dummy-apps/dp-dummy-0.0.1.yaml create mode 100644 scripts/deployment-packages/ten-dummy-apps/registry-sandbox.yaml create mode 100644 scripts/deployment-packages/ten-dummy-apps/values-01.yaml create mode 100644 scripts/deployment-packages/ten-dummy-apps/values-02.yaml create mode 100644 scripts/deployment-packages/ten-dummy-apps/values-03.yaml create mode 100644 scripts/deployment-packages/ten-dummy-apps/values-04.yaml create mode 100644 scripts/deployment-packages/ten-dummy-apps/values-05.yaml create mode 100644 scripts/deployment-packages/ten-dummy-apps/values-06.yaml create mode 100644 scripts/deployment-packages/ten-dummy-apps/values-07.yaml create mode 100644 scripts/deployment-packages/ten-dummy-apps/values-08.yaml create mode 100644 scripts/deployment-packages/ten-dummy-apps/values-09.yaml create mode 100644 scripts/deployment-packages/ten-dummy-apps/values-10.yaml create mode 100644 scripts/k6-scripts/adm-api-latency.js create mode 100644 scripts/k6-scripts/arm-asp-api-latency.js create mode 100755 scripts/k6-scripts/arm-asp-api-performance-test.sh create mode 100644 scripts/k6-scripts/arm-vnc-api-latency.js create mode 100755 scripts/k6-scripts/arm-vnc-api-performance-test.sh create mode 100644 scripts/k6-scripts/tmp.json diff --git a/scripts/README.md b/scripts/README.md new file mode 100644 index 00000000..f600b8d9 --- /dev/null +++ b/scripts/README.md @@ -0,0 +1,307 @@ + + +# App-Orch Scale testing +This README documents details on the following topics +- App-Orch scale testing +- Tools used +- Different scale strategies (ENiC vs vCluster) +- Script used and how to run them +- Results collected +- Debugging +- Cleanup + +## Pre-requisite + +### Gnu Plot +```shell +sudo apt-get update +sudo apt-get install gnuplot +``` +Verified gnuplot version **5.4 patchlevel 2**. + +### K6 API load tester tool +Use instructions at https://k6.io/docs/get-started/installation/ to install k6. Recommended version **v0.47.0**. + +### vCluster +Refer https://www.vcluster.com/. Please install vcluster version **0.19.6**. + +### kubectl +Refer https://kubernetes.io/docs/tasks/tools/ and install kubectl for your platform. Recommended version **v1.28.9**. + +### JQ and YQ +- [JQ](https://jqlang.github.io/jq/download/) - **jq-1.6** or above +- [YQ](https://mikefarah.gitbook.io/yq) - **v4.33.3** or above + +### Bash shell +Bash shell version **5.1.16** or above. + +### Catalog CLI tool +Refer to https://github.com/intel-innersource/frameworks.edge.one-intel-edge.maestro-app.cli. + +## Running the cluster scale tester scripts + +### Using ENIC based edge node emulator for scale tests +The following steps detail scale tests based on ENIC based edge node emulator. + +The scale tester scripts can be used to bring up multiple clusters and install multiple apps on +them. The clusters / apps are installed in batches of 10 by default. After each batch is ready, +the test scripts will run API latency tests and collect relevant metrics from the Observability service. + +To create a number of clusters using the Configured hosts and wait for all clusters to be Ready: +```shell +cd ./enic-scale-tester +./add-N-clusters.sh -c -o +``` + +#### Other options to the scripts + +- `-b` : Batch Size of clusters to install. **Default 10** +- `-f` : Cluster FQDN. **Default integration12.maestro.intel.com** +- `-u` : Keycloak username. **Default all-groups-example-user** +- `-p` : Keycloak password. **Default ChangeMeOn1stLogin!** +- `-a` : Apps per enic. **Default 3** + + +### Using vCluster based edge node emulator for scale tests +The following steps detail using [vCluster](https://www.vcluster.com/) based scale tests. + +#### Case1: A host cluster already exists to host the vClusters +**Pre-requisite:** +- Set KUBECONFIG env variable to kubeconfig of the target cluster +- The script expects that `apps` namespace is pre-created on the target clusters. If vclusters are to be installed in to a different NS, the script needs to be modified. +- The target namespace has sufficient resource limits to host the required number of vclusters + + +To create a number of clusters using the Configured hosts and wait for all clusters to be Ready: +```shell +cd vcluster-scale-tester/ +./add-N-virtual-clusters.sh -c -o +``` + +Use help option like below to learn more options that is exposed by the script + +```shell +./add-N-virtual-clusters.sh -h +``` + +#### Case2: No host cluster but only bare-metal servers with Ubuntu 22.04 + +Do the following on all the servers that are used to host vClusters + +##### Install Tools +```shell +# Install KinD +[ $(uname -m) = x86_64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.23.0/kind-linux-amd64 +# For ARM64 +[ $(uname -m) = aarch64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.23.0/kind-linux-arm64 +chmod +x ./kind +sudo mv ./kind /usr/local/bin/kind + +# Install Helm +curl https://baltocdn.com/helm/signing.asc | gpg --dearmor | sudo tee /usr/share/keyrings/helm.gpg > /dev/null +sudo apt-get install apt-transport-https --yes +echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/helm.gpg] https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list +sudo apt-get update +sudo apt-get install helm + +# Install vCluster +wget https://github.com/loft-sh/vcluster/releases/download/v0.19.6/vcluster-linux-amd64 +chmod +x vcluster-linux-amd64 +sudo mv vcluster-linux-amd64 /usr/local/bin/vcluster + +# Install kubectl +curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" +chmod +x kubectl +sudo mv kubectl /usr/local/bin/kubectl +``` +##### Copy intel-harbor-ca.crt +Do this in the home folder of the server +```shell +openssl s_client -showcerts -connect amr-registry.caas.intel.com:443 < /dev/null | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' > ~/intel-harbor-ca.crt +``` + +##### Create KinD cluster config file +Create file named `kind-config.yaml` in the home folder of the server with below content. +**Note**: Change the `apiServerAddress` to the actual IP address of the server. + +```shell +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +networking: + apiServerAddress: 10.3.162.65 +nodes: +- role: control-plane + extraMounts: + - hostPath: /tmp/var/log/ + containerPath: /var/log/ + - hostPath: /tmp/var/log/containers/ + containerPath: /var/log/containers/ + - hostPath: /tmp/var/lib/rancher/rke2/agent/logs/ + containerPath: /var/lib/rancher/rke2/agent/logs/ + - hostPath: /tmp/var/lib/rancher/rke2/server/logs/ + containerPath: /var/lib/rancher/rke2/server/logs/ + - hostPath: /tmp/dev/lvmvg/ + containerPath: /dev/lvmvg/ +containerdConfigPatches: + - |- + [plugins."io.containerd.grpc.v1.cri".registry] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."*"] + endpoint = ["https://dockerhubcache.caas.intel.com"] + [plugins."io.containerd.grpc.v1.cri".registry.configs] + [plugins."io.containerd.grpc.v1.cri".registry.configs."amr-registry.caas.intel.com".tls] + ca_file = "/usr/local/share/ca-certificates/intel-harbor-ca.crt" + [plugins."io.containerd.grpc.v1.cri".registry.configs."dockerhubcache.caas.intel.com".tls] + ca_file = "/usr/local/share/ca-certificates/intel-harbor-ca.crt" +kubeadmConfigPatches: + - | + apiVersion: kubelet.config.k8s.io/v1beta1 + kind: KubeletConfiguration + maxPods: 500 +``` + +##### Enable public key access +Enable public key access to these servers from the server which is used to run the automation scripts. + +*We are finally ready to run the automation script* + +```shell +cd vcluster-scale-tester/ +./add-virtual-clusters-on-kind.sh -s "@/,@/" + +## Example +./add-virtual-clusters-on-kind.sh -s "labrat@10.123.232.168/10,labrat@10.123.232.172/10,root@10.237.213.34/10,root@10.237.213.151/10,testbeduser@10.228.254.158/5,labuser@10.3.162.217/10,labuser@10.3.162.88/10,labuser@10.3.162.105/10,labuser@10.3.162.65/10" +``` + +Other options to the script -> +```shell +❯ ./add-virtual-clusters-on-kind.sh -h +Usage: ./add-virtual-clusters-on-kind.sh [options] [--] [arguments] + +Options: + -s VALUE List of servers (comma separated) and vclusters to install on those servers. Ex: "labuser@10.3.162.217/30,labuser@10.3.162.105/30" + -u VALUE Keycloak username, default all-groups-example-user + -p VALUE Keycloak password, default ChangeMeOn1stLogin! + -f VALUE Orch FQDN, default integration12.maestro.intel.com + -b VALUE Cluster install batch size, default 10 + -o VALUE Observability API credentials base64 encoded + -a VALUE Apps per ENIC, default 1 + -k VALUE vClusters per kind host, default 30 + -r VALUE Path of amr-registry.caas.intel.com registry public certificate. + -h Print this help menu + + In the below example, we ask to install 30 vclusters on labuser@10.3.162.217 and 10 vclusters on labuser@10.3.162.105 (using -s). + We also specify the path of Intel AMR CaaS public certificate (using -r). We then specify that only 10 vclusters should be hosted per kind cluster + + ./add-virtual-clusters-on-kind.sh -s "labuser@10.3.162.217/30,labuser@10.3.162.105/10" -r ./intel_harbor_ca.crt -k 10 +``` + +### Running the app scale test script and collecting metrics + +#### ADM API Scale tester +To deploy multiple copies of the "dummy app" to place load on the App Orch control plane without exhausting scarce edge resources: +```shell +cd app-scale-tester/ +./add-N-apps.sh -a -o +``` +The script generates various plots at the end of the test on API latency and Resource usage. Look for the results in the `./test-results/` folder. +The exact folder where the results are generated is logged at the end of the script execution. + +*NOTE:* Refer section [Getting observability api credentials](#getting-observability-api-credentials) further in this README on how to get Observability API Token to get the observability metrics. + +#### ARM API and ASP Scale tester + +To run the ARM and API scale tester do the following. This script scales the number of concurrent users linearly, +and then measures the ARM API and ASP latency, and also Maestro-App-System Namespace resource usage during the process +```shell +cd k6-scripts +./arm-asp-api-performance-test.sh -a -o +``` +For more details on the options to the script use below command +```shell +./arm-asp-api-performance-test.sh -h +``` + +At the end of the test result, all the metrics are collected and graphs are generated in the `./test-results/` folder. +The exact folder where the results are generated is logged at the end of the script execution. + +## Getting observability api credentials + +```shell +export KUBECONFIG= + +kubectl get secret mp-observability-grafana -n maestro-platform-system -o go-template='{{range $k,$v := .data}}{{printf "%s: " $k}}{{if not $v}}{{$v}}{{else}}{{$v | base64decode}}{{end}}{{"\n"}}{{end}}' + +admin-password: +admin-user: +ldap-toml: +``` + +The output credentials are decoded so now base64 encode `:` with the colon char inbetween and provide that as `` + +**Note:** when orchestrator or system restarts, the password will change so you will need to get new credentials and encode again. + +## Test Results +All results will be stored in `./test-results/` folder. It will be a collection of PNG files generated by GNU Plot in PNG file format and the source CSV files that were used to generate the GNU plots. +The shell script also dumps a lot of logs that could be used to trace the test progress. + +## Debugging +### vClusters setup on Scale Load cluster +```shell +# 1: Set the KUBECONFIG to the right kubeconfig file pointing to scale load cluster +export KUBECONFIG= +# 2: Connect to problematic cluster and execute the kubectl command of interest to debug +vcluster connect -n apps -- kubectl get pods -A +``` +**NOTE:** All vclusters are hosted in `apps` namespace by default for app-orch scale testing. +### vClusters setup on bare-metal servers +The vCluster name on bare-metal servers have the format - `vcluster-edge-node--- +# Example: +kubectl config use-context kind-kind-node-10-3-162-88-0 +# 3: list vClusters on that KinD cluster +vcluster list -n apps +# 4: connect to vcluster and execute the kubectl command of interest to debug further +# Example: +vcluster connect -n apps vcluster-edge-node-10-3-162-88-17 -- kubectl get pods -A +``` +**NOTE:** All vclusters are hosted in `apps` namespace by default for app-orch scale testing. + +## Cleanup +### vClusters cleanup on Scale Load cluster +Use the following instructions +1. Delete those clusters from Rancher UI. Bulk deletes are possible on Rancher UI by selecting several clusters. +2. Delete specific vclusters with `vcluster delete -n apps `. Make sure you have set the right kubeconfig to be able to access the vcluster. To delete batch of vclusters, use `utils/delete-vclusters.sh -c ` script. +3. If the associated pods are still in Terminating state after vcluster tool has deleted the vclusters, then force delete those pods in Scale Load cluster with below command +``` +kubectl delete pod -n apps --force `kubectl get pods -n apps | grep Terminating | awk '{print $1}'` +``` +4. Then re-setup the clusters that are necessary to fill the gap on Scale Load cluster with below command + +``` + ./add-N-virtual-clusters.sh -c -a -i +``` + +**NOTE**: + +- All vclusters are hosted in `apps` namespace by default for app-orch scale testing. +- Choose `start-index-to-use-for-vcluster` such that it is the first available index with no index in use after that. It defaults to 0 if none specified. + +### vClusters cleanup on bare-metal servers +Use below instructions + +- Delete those clusters from Rancher UI. Bulk deletes are possible on Rancher UI by selecting several clusters. +- Login to baremetal hosting the vCluster. Setup the kubeconfig to the KinD cluster hosting the vCluster. Delete the vcluster now + +At this time it is not possible to add vClusters on a bare-metal in an additive manner. So please delete all the clusters associated with a server on Rancher UI using bulk delete, then delete all the KinD clusters (which also deletes the vClusters automatically) created for hosting vClusters. Once this cleanup is done, setup the required number of vClusters on this baremetal using the instructions shared in previous sections. diff --git a/scripts/app-scale-tester/.add-N-apps.sh.swp b/scripts/app-scale-tester/.add-N-apps.sh.swp new file mode 100644 index 0000000000000000000000000000000000000000..35c3731a2c1c7fe1f00b5fa404d502ce58f45ee1 GIT binary patch literal 16384 zcmeHO&5s;M6)!#wgpkOQ!X<}t&u+Yy-7~wkgvdr6*i2&Ci9c4}U0cW)RMTCv({_Kk zt9sTG%OE&~OD=>YQX~W+Cld!DB#=NDAwER7aERa#!9PHda=<}IhQRMtch7Xs&Srf< z9NZ)Qyu0<@t9tdTde!ybw0nzZkDug^w3iq@_cPXdtvz^Y;XQWat!1WUMy-qP(~W!)K9cBvWcopJHqqqQ#MRgTkSs7uD&!1 z4Fh){1NX8chnE&<-Ysh%e_-#%-A6$)Zw)pvhsh|HTK55Z@Zc6@MAurqCn3DgykUv?p^J0KF?J64>%ENq1-J?P9C#Tx4?G0i5B%bj zjQtq+K5!j)4)`MQY2ZQN);)~<3V0QG7Wfjd3_Jkr1%CYr#x4UJz{nmvz&m>w`w5_dE^rUi$gF!m!L0b<~3zyfZ6n6Wp3SAdJa ze&8P{GTa2N0#5;Zfj^*d@@=3Gv;Yfu7hK)~-UQwNUIv~Ah~uvT^$+LL9mL!^ooGLf z)Uw6$mf{DuHk8MhDni@zZo5Xu?vs+Oj8;2|HmzD0yIF6yJ1OwCNE)vs z$&R+6$OjU`%^iJ(>`r{Gc2OB`rghcV+-|0?b=%TcaMw+;oq2VW><)MxcTw?`R%M%Y z)z^IJnZDL-OJ6aA@j%mifM&OCwC#JK+C|0JnbuWbbGw| zo1z7;4*7PFw-tNE+otBtQcIHKk^kayS48o`di)K$Yf2_{z!X9d&F)&-FjyJ;e4*n&u70D5Nyld&M`-#_rTb&)Un z&em05^Py+@S|7Reb%$Uwqt&rCsauese!U+cD|ub9ECU>Y;?{=sB? zt#(oImAKhvUG+7$o9S!aw)C|gOu36w!IV2x4W?XeDwuM$S}^5$)4^mEb(lQ0cKXy& zM3ZC9rBY>qR!{h;bakALIuN%nsyMRPw8*jA9eTUaE(D!9H%n7IBq@5=vWl0_=t8qo}L)L z$G9#-Jax5n_FuK|v^^z4X)Rmx$~r`Uo|UP>wQCm+S;Y;Z=yS@8$*Y;` zqlD%lG9BAw?c>3I$cl$DO?^+E6+tFx^b6PagEBZ71nxk(m*@f_dRWD^?%~DHJ_6bL zKyut43_l#xO=IB>aAIo3-5AaC3f~&Yh;Kh%U z#NWvnw#|zb4@NwSHSR(-OKTffleThxt$~LZxF%J82k1NHFg-or=wBXq} zQd{`d>cTjdC;I{}VxyIQp_kRkdX6>hVv*~b#rANl#r9Hru`Z)ka;#|6kFMB3jAf{Z zCp4`J_Y$syRb^0B8njv*{_x@n##JI+zwf(zC{mx6O5rNpQ!*Wjo*(#ngokN2l^$Zq z7lCpti{nw3#zQPS7H5d9;2|U%hiT#gI%GYe-9gcNt@C5?77s-G)bE z^hYSMIGG|AH_n`xT>ERQZNt=$rqTe)3_3U?g^IHjk$6Px!w++ktqP^K;?#4x0}*Y?lk#-5 zr1Xgxp|lT2NG0iUF8PtyHaj80Lg9EZ7ToU_dx%w)%}LspDj}18D(pdw&5x68mSKw@ zk9-Ov>{+g`Abc~3dl+OKdt5{w&Oy3eBC$ha=`-YmSA5Nv=ogbJ<`IjtEDL*>VB8;X zCPYPs2OK;-&*xg)arh#?@F>M(#PB}9<~H8xZ|&>!{HW6tc#)p+|8F3N_K{Ol{$GB7 z{zv5XH-Vo4KLuU{UI3m2JirAW1s(+ciQNA!;D^BLzzyJgz$?HvfdOy=cpCUS>Ht3g zUI4xYBtQ(D0uBLGC%6xI2lap=0QLj7;rk8XHQ+fQ1}*|L zrvcD>8U`8$8U`8$8U`8$8U`8${vQl@aU^krb>oauERSo|HQEUB#_Aler^j>PFh)a|;Vp2f~*eRQcgH>H$il0F;_a(1Cog zZr&e9vSRu$)!JG*l&6K`t9PEeQ3l1&WDz%AQ%3nX<%;EG@MX;|gDd)$r{Z2&fjT&^ z$T6h4NVv2Lt4n2 z$B7KAK|(|}LUSZ@C>5Y&wp^ol;@vLO<4Pi>@`%K3HZ|&@uqAwbG)`B7US4?6*y)n2XuMsRRk*`qHFmGmZt)^2cc+l}gGg3Ir{xixu?-R5OxP z4*fV&+!QM)6)bX7#la|oEAWs#CMcYuCKsnV7@0mQSt#{So4lyJh)q;8i}h2YFWW_X z-jylHC9zU|54E(sV-Mvz6e`K<}KBrB=%xE=j3n67Cr@cEo&Dhc8I zs18}z>(umEp0sZu2m}?NDkaJ!uhZc)z_$`2h&OX3R+ymbhFy;k7{|3T0n%=_@x81e b!z{b0QB8F6^nY?3g4vwyv9h7s6b1eZ9Wt+s literal 0 HcmV?d00001 diff --git a/scripts/app-scale-tester/add-N-apps-nginx.sh b/scripts/app-scale-tester/add-N-apps-nginx.sh new file mode 100755 index 00000000..aa325ad9 --- /dev/null +++ b/scripts/app-scale-tester/add-N-apps-nginx.sh @@ -0,0 +1,524 @@ +#!/bin/bash +set -eu + +if [ $# == 0 ]; then + echo "specify number of apps to setup" + exit 1 +fi + +# Initialize some global variables and constants +clusterFqdn=integration14.espd.infra-host.com # Change if needed +# clusterFqdn=kind.internal # Change if needed +enspUser=intel-itep-user && enspPassword=ChangeMeOn1stLogin! +prometheusURL="https://observability-admin.${clusterFqdn}/api/datasources/uid/orchestrator-mimir/resources/api/v1" +prometheusTestURL="https://observability-admin.${clusterFqdn}/api/datasources/uid/orchestrator-mimir/health" +project=itep +targetClusterLabel="default-extension=baseline" + +## App install specific variables +totalApps=$1 # Total apps to install - input argument +batchSize=10 # How many apps to install in parallel? +interval=10 +pageSize=100 # Adjust pageSize as needed for querying elements on APIs +offset=0 +runTests=1 + +## Token specific variables +orchAPIToken="" # Orchestrator API Token +observabilityApiCredentials="" # Provide a valid token for the test. + +## App install specific variables +containerDeploymentPackageName="nginx-app" +containerAppName="nginx" +containerAppVersion="0.1.0" +containerAppProfileName="default-profile" +containerAppPublisherName="default" +totalContainerAppInstancesPerCluster=3 + +# Container deployment template -- currently not used but kept for reference. +# The template approach gets complex when we have a Deployment with multiple apps. +# shellcheck disable=SC2089 +containerAppDeploymentTemplate='{"appName":"%s","appVersion":"%s","name":"dummy-app-%d","profileName":"%s","publisherName":"%s","targetClusters":[{"appName":"%s", "labels":{"target":"scale"}}],"displayName":"dummy-app-%d","deploymentType":"auto-scaling","overrideValues":[]}' + +# CSV Files to capture result +currentTimeStamp=$(date -u +"%Y%m%d-%H%M%S") +resultsDirectory="./test-results/$currentTimeStamp" +tmpJSONFile="$resultsDirectory/tmp.json" +appInstallTimeCsv="app-install-time.csv" +admApiLatencyCsv="adm-api-latency.csv" +#armApiLatencyCsv="arm-api-latency.csv" +avgCpuMaestroAppSystemCsv="avg-cpu-usage-ma.csv" +avgCpuCattleSystemCsv="avg-cpu-usage-cattle-system.csv" +avgCpuCattleFleetSystemCsv="avg-cpu-usage-cattle-fleet-system.csv" +maxCpuMaestroAppSystemCsv="max-cpu-usage-ma.csv" +maxCpuCattleSystemCsv="max-cpu-usage-cattle-system.csv" +maxCpuCattleFleetSystemCsv="max-cpu-usage-cattle-fleet-system.csv" +avgRamMaestroAppSystemCsv="avg-ram-usage-ma.csv" +avgRamCattleSystemCsv="avg-ram-usage-cattle-system.csv" +avgRamCattleFleetSystemCsv="avg-ram-usage-cattle-fleet-system.csv" +maxRamMaestroAppSystemCsv="max-ram-usage-ma.csv" +maxRamCattleSystemCsv="max-ram-usage-cattle-system.csv" +maxRamCattleFleetSystemCsv="max-ram-usage-cattle-fleet-system.csv" + +# Png files generated from CSV file +appInstallTimePng="app-install-time.png" +admApiLatencyPng="adm-api-latency.png" +#armApiLatencyPng="arm-api-latency.png" +avgCpuMaestroAppSystemPng="avg-cpu-usage-ma.png" +avgCpuCattleSystemPng="avg-cpu-usage-cattle-system.png" +avgCpuCattleFleetSystemPng="avg-cpu-usage-cattle-fleet-system.png" +maxCpuMaestroAppSystemPng="max-cpu-usage-ma.png" +maxCpuCattleSystemPng="max-cpu-usage-cattle-system.png" +maxCpuCattleFleetSystemPng="max-cpu-usage-cattle-fleet-system.png" +avgRamMaestroAppSystemPng="avg-ram-usage-ma.png" +avgRamCattleSystemPng="avg-ram-usage-cattle-system.png" +avgRamCattleFleetSystemPng="avg-ram-usage-cattle-fleet-system.png" +maxRamMaestroAppSystemPng="max-ram-usage-ma.png" +maxRamCattleSystemPng="max-ram-usage-cattle-system.png" +maxRamCattleFleetSystemPng="max-ram-usage-cattle-fleet-system.png" + +appOrchEndpoint="https://api.${clusterFqdn}" + +# Function to display usage help +usage() { + echo "Usage: $0 [options] [--] [arguments]" + echo + echo "Options:" + echo " -u VALUE Keycloak username, default all-groups-example-user" + echo " -p VALUE Keycloak password, default ChangeMeOn1stLogin!" + echo " -f VALUE Orch FQDN, default integration12.espd.infra-host.com" + echo " -b VALUE Cluster install batch size, default 10" + echo " -o VALUE Observability API credentials base64 encoded" + echo " -a VALUE Apps per cluster, default 3" + echo " -d VALUE Deployment package name to deploy, default dummy-app-package" + echo " -v VALUE Deployment package version to deploy, default 0.0.1" + echo " -g VALUE Don't run tests, only regenerate graphs using the specified results directory" + echo " -m VALUE project, default itep" + echo " -h Print this help menu" + + echo + echo "Example:" + echo " $0 -a 10 -b 2" + exit 1 +} + +readInputArgs() { + while [[ $# -gt 0 ]]; do + case "$1" in + -u | --user) + echo "Username: $2" + enspUser=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -p | --password) + echo "Password: $2" + enspPassword=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -h | --help) + usage + shift + ;; + -f | --fqdn) + echo "FQDN: $2" + clusterFqdn=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -b | --batch-size) + echo "Cluster Install batch size: $2" + batchSize=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -o | --observability-api-cred) + echo "Observability API Credentials base64 encoded: $2" + observabilityApiCredentials=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -a | --total-apps-per-cluster) + echo "Apps per enic: $2" + totalApps=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -d | --deployment-package-name) + echo "Deployment package name: $2" + containerDeploymentPackageName=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -v | --deployment-package-version) + echo "Deployment-package-version: $2" + containerAppVersion=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -m | --project) + echo "Project: $2" + project=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -g) + echo "Results directory: $2" + resultsDirectory=$2 + exit 0 + ;; + *) + echo "Unknown option: $1" + shift # Shift past the unknown option + ;; + esac + done +} + +# Function to initialize Orchestrator API Token +initializeOrchAPIToken() { + orchAPIToken=$(curl -s --location --request POST https://keycloak.${clusterFqdn}/realms/master/protocol/openid-connect/token \ + -H 'Content-Type: application/x-www-form-urlencoded' \ + -d "username=${enspUser}" \ + -d "password=${enspPassword}" \ + -d "grant_type=password" \ + -d "client_id=system-client" \ + -d "scope=openid" | + jq -r '.access_token' 2>/dev/null) +} + +catalogLogin() { + CATALOG_ARGS="--deployment-endpoint ${appOrchEndpoint} --catalog-endpoint ${appOrchEndpoint}" + catalog ${CATALOG_ARGS} logout + + catalog ${CATALOG_ARGS} login --client-id=system-client --trust-cert=true --keycloak https://keycloak.${clusterFqdn}/realms/master ${enspUser} ${enspPassword} +} + +# Provide customized deployment for different packages (particularly target labels) +# Arguments are deploymentPackageName, deploymentPackageVersion +catalogDeploy() { + NAME=$1 + VERSION=$2 + + CATALOG_ARGS="--deployment-endpoint ${appOrchEndpoint} --catalog-endpoint ${appOrchEndpoint} --project ${project}" + case ${NAME} in + dummy-app-package) + catalog ${CATALOG_ARGS} create deployment \ + ${NAME} ${VERSION} --application-label dummy-app.$targetClusterLabel + ;; + ten-dummy-apps) + catalog ${CATALOG_ARGS} create deployment \ + ${NAME} ${VERSION} --application-label dummy-app-1.$targetClusterLabel \ + --application-label dummy-app-2.$targetClusterLabel \ + --application-label dummy-app-3.$targetClusterLabel \ + --application-label dummy-app-4.$targetClusterLabel \ + --application-label dummy-app-5.$targetClusterLabel \ + --application-label dummy-app-6.$targetClusterLabel \ + --application-label dummy-app-7.$targetClusterLabel \ + --application-label dummy-app-8.$targetClusterLabel \ + --application-label dummy-app-9.$targetClusterLabel \ + --application-label dummy-app-10.$targetClusterLabel + ;; + nginx-app) + catalog ${CATALOG_ARGS} create deployment \ + ${NAME} ${VERSION} --application-label nginx.$targetClusterLabel + ;; + *) + echo "ERROR: No deploy template for deployment package ${NAME}" + exit 1 + ;; + esac +} + +catalogUpload() { + catalogLogin + + CATALOG_ARGS="--deployment-endpoint ${appOrchEndpoint} --catalog-endpoint ${appOrchEndpoint} --project ${project}" + pushd ../../deployment-packages/ + for DPDIR in $(ls -d $containerDeploymentPackageName) + do + cd ${DPDIR}; catalog ${CATALOG_ARGS} upload .; cd .. + done + popd +} + +createContainerDeployment() { + echo Creating container deployment + batch=$1 + appDeploymentsCounter=$(( batch * batchSize)) + + containerAppInstanceCnt=$appDeploymentsCounter + while [ "$containerAppInstanceCnt" -lt "$(( (batch + 1) * batchSize ))" ]; do + # shellcheck disable=SC2090 + # containerAppInstallSpec=$(printf "$containerAppDeploymentTemplate" $containerDeploymentPackageName $containerAppVersion $containerAppInstanceCnt $containerAppProfileName $containerAppPublisherName $containerAppName $containerAppInstanceCnt) + # echo "App Deployment Spec is $containerAppInstallSpec" + # curl -s -X POST "https://app-orch.$clusterFqdn/deployment.orchestrator.apis/v1/deployments" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" -d "$containerAppInstallSpec" + catalogLogin + catalogDeploy $containerDeploymentPackageName $containerAppVersion + echo "" + # shellcheck disable=SC2003 + containerAppInstanceCnt=$(expr $containerAppInstanceCnt + 1) + done +} + +deleteDeployments() { + arr=() + + totalAppCnt=$(curl -s "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" | jq '.totalElements') + for ((i = 0 ; i < totalAppCnt ; i++ )); + do + currDeployment=$(curl -s "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments?pageSize=${pageSize}&offset=${offset}" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" | jq -r '.deployments['$i']') + appName=$(echo $currDeployment | jq -r '.appName') + deployId=$(echo $currDeployment | jq -r '.deployId') + + if [ "$appName" == "$containerDeploymentPackageName" ]; then + arr+=("$deployId") + fi + done + + for d in "${arr[@]}" + do + deployId=$(curl -X DELETE "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments/$d" -H "Authorization: Bearer $orchAPIToken") + echo "Deleted deployment $d..." + done + sleep 2 +} + +waitForAllAppsToBeRunning() { + initializeOrchAPIToken + echo Waiting for all apps to be running + +#echo "pagesize print: $pageSize" + while true; do + totalAppCnt=$(curl -s "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" | jq '.totalElements') + set +e + totalCnt=0 + for ((i = 0 ; i < totalAppCnt ; i++ )); + do + currDeployment=$(curl -s "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments?pageSize=${pageSize}&offset=${offset}" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" | jq -r '.deployments['$i']') + appName=$(echo $currDeployment | jq -r '.appName') +#echo "appname print : $appName" + + if [ "$appName" == "$containerDeploymentPackageName" ]; then + totalCnt=$((totalCnt + 1)) +#echo "totalCnt print : $totalCnt" + fi + done +#echo "totalCnt print out: $totalCnt" +#runningAppCnt=$(curl -s "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments?pageSize=${pageSize}&offset=${offset}" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" | jq -r '.deployments[].status.state' | grep -c RUNNING) + runningCnt=0 + for ((i = 0 ; i < totalAppCnt ; i++ )); + do + currDeployment=$(curl -s "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments?pageSize=${pageSize}&offset=${offset}" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" | jq -r '.deployments['$i']') + appName=$(echo $currDeployment | jq -r '.appName') + statusVal=$(echo $currDeployment | jq -r '.status.state') +#echo "appname print : $appName" +#echo "statusval print : $statusVal" + + if [[ "$appName" == "$containerDeploymentPackageName" && "$statusVal" == "RUNNING" ]]; then + runningCnt=$((runningCnt + 1)) +#echo "runningCnt print : $runningCnt" + fi + done +#echo "runningCnt print out: $runningCnt" + set -e + # curl -s "https://app-orch.$clusterFqdn/deployment.orchestrator.apis/v1/summary/deployments_status" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" >"$tmpJSONFile" 2>/dev/null + # Method below does not seem to provide an accurate total app count (see NEX-2988) + # totalAppCnt=$(jq '.total' "$tmpJSONFile") + # runningCnt=$(jq '.running' "$tmpJSONFile") + if [ "$totalCnt" == "$runningCnt" ]; then + echo "All $totalAppCnt apps are running!!" + break + fi + + echo "$runningCnt / $totalCnt apps running. Waiting $interval seconds." + sleep $interval + done +} + +runApiLatencyChecks() { + echo "Running API latency tests... it will take a few mins to capture the results" + initializeOrchAPIToken + # Run API Latency Tests + k6 run --env MY_HOSTNAME=$clusterFqdn --env API_TOKEN="$orchAPIToken" --env PROJECT="$project" ../k6-scripts/adm-api-latency.js -q --no-thresholds --summary-export="$tmpJSONFile" + totalChecksPass=$(jq '.root_group.checks."status is OK".passes' "$tmpJSONFile") + totalChecksFail=$(jq '.root_group.checks."status is OK".fails' "$tmpJSONFile") + avgApiDuration=$(jq '.metrics.http_req_duration.avg' "$tmpJSONFile") + # Capture the result to a csv file to be used for plotting later + echo "$totalAppCnt,$avgApiDuration" >>"$resultsDirectory/$admApiLatencyCsv" + echo "API Latency Check Results: Passed Checks: $totalChecksPass Failed Checks: $totalChecksFail, Avg API Duration: $avgApiDuration" +} + +plotGraph() { + # Define the input CSV file and the output image file + inputCsv="$resultsDirectory/$1" + outputPng="$resultsDirectory/$2" + xLabel=$3 + yLabel=$4 + yRangeMin=$5 + yRangeMax=$6 + yAxisScaleFactor=$7 + graphTitle=$8 + + # If csv files exists, then generate graphs before exiting the script + if [ ! -f "$inputCsv" ]; then + return + fi + echo "Plotting graph for $inputCsv" + + # Generate the graph using gnuplot + gnuplot -persist <<-EOFMarker + set datafile separator "," + set terminal png size 800,600 + set output "$outputPng" + set title "$graphTitle" + set xlabel "$xLabel" + set ylabel "$yLabel" + set ytics scale $yAxisScaleFactor + set yrange [$yRangeMin:$yRangeMax] + plot "$inputCsv" using 1:(\$2*$yAxisScaleFactor) with linespoints title "$graphTitle" +EOFMarker + echo "Graph generated for $graphTitle, output file: $outputPng" +} + +collectMetric() { + input=$1 + value="$2" + query=$3 + resultFile=$4 + + value=$(curl -s "$prometheusURL/query" -H "Authorization: Basic ${observabilityApiCredentials}" --data-urlencode "query=$query" | jq -r '.data.result[0].value[1]' 2>/dev/null) + if [ "$value" != "" ]; then + echo "$input,$value" >>"$resultsDirectory/$resultFile" + fi +} + +collectObservabilityMetrics() { + if [ "$observabilityApiCredentials" == "" ]; then + echo "observabilityApiCredentials is nil, cannot collect observability metrics" + return + fi + + # Check that Prometheus is accessible with the provided credentials + status=$(curl -s "$prometheusTestURL" -H "Authorization: Basic ${observabilityApiCredentials}" | jq -r '.status') + if [ "$status" != "OK" ]; then + echo "Unable to query Prometheus with credentials provided, cannot collect observability metrics" + return + fi + + totalApps=$1 + secondSuffix="s" + timeDurationInSec=$2$secondSuffix + + # Remove "_ratio" suffix due observability name changes (e.g., https://github.com/intel-innersource/frameworks.edge.one-intel-edge.observability.platform-dashboard/pull/15/files) + collectMetric "$totalApps" "avgCpu" "sum by(k8s_namespace_name)(avg_over_time(k8s_pod_cpu_utilization{k8s_namespace_name=\"maestro-app-system\"}[$timeDurationInSec]))" $avgCpuMaestroAppSystemCsv + collectMetric "$totalApps" "maxCpu" "sum by(k8s_namespace_name)(max_over_time(k8s_pod_cpu_utilization{k8s_namespace_name=\"maestro-app-system\"}[$timeDurationInSec]))" $maxCpuMaestroAppSystemCsv + collectMetric "$totalApps" "avgCpu" "sum by(k8s_namespace_name)(avg_over_time(k8s_pod_cpu_utilization{k8s_namespace_name=\"cattle-system\"}[$timeDurationInSec]))" $avgCpuCattleSystemCsv + collectMetric "$totalApps" "maxCpu" "sum by(k8s_namespace_name)(max_over_time(k8s_pod_cpu_utilization{k8s_namespace_name=\"cattle-system\"}[$timeDurationInSec]))" $maxCpuCattleSystemCsv + collectMetric "$totalApps" "avgCpu" "sum by(k8s_namespace_name)(avg_over_time(k8s_pod_cpu_utilization{k8s_namespace_name=\"cattle-fleet-system\"}[$timeDurationInSec]))" $avgCpuCattleFleetSystemCsv + collectMetric "$totalApps" "maxCpu" "sum by(k8s_namespace_name)(max_over_time(k8s_pod_cpu_utilization{k8s_namespace_name=\"cattle-fleet-system\"}[$timeDurationInSec]))" $maxCpuCattleFleetSystemCsv + + # Remove "_bytes" suffix due observability name changes (e.g., https://github.com/intel-innersource/frameworks.edge.one-intel-edge.observability.platform-dashboard/pull/15/files) + collectMetric "$totalApps" "avgMem" "sum by(k8s_namespace_name)(avg_over_time(k8s_pod_memory_usage{k8s_namespace_name=\"maestro-app-system\"}[$timeDurationInSec]))" $avgRamMaestroAppSystemCsv + collectMetric "$totalApps" "maxMem" "sum by(k8s_namespace_name)(max_over_time(k8s_pod_memory_usage{k8s_namespace_name=\"maestro-app-system\"}[$timeDurationInSec]))" $maxRamMaestroAppSystemCsv + collectMetric "$totalApps" "avgMem" "sum by(k8s_namespace_name)(avg_over_time(k8s_pod_memory_usage{k8s_namespace_name=\"cattle-system\"}[$timeDurationInSec]))" $avgRamCattleSystemCsv + collectMetric "$totalApps" "maxMem" "sum by(k8s_namespace_name)(max_over_time(k8s_pod_memory_usage{k8s_namespace_name=\"cattle-system\"}[$timeDurationInSec]))" $maxRamCattleSystemCsv + collectMetric "$totalApps" "avgMem" "sum by(k8s_namespace_name)(avg_over_time(k8s_pod_memory_usage{k8s_namespace_name=\"cattle-fleet-system\"}[$timeDurationInSec]))" $avgRamCattleFleetSystemCsv + collectMetric "$totalApps" "maxMem" "sum by(k8s_namespace_name)(max_over_time(k8s_pod_memory_usage{k8s_namespace_name=\"cattle-fleet-system\"}[$timeDurationInSec]))" $maxRamCattleFleetSystemCsv +} + +cleanup() { + # Remove temp file used to capture transient results + rm -f "$tmpJSONFile" + + plotGraph "$appInstallTimeCsv" "$appInstallTimePng" "Deployment #" "Seconds until Running on all edges" 0 500 1 "Time to Running on 1K edges, per Deployment" + plotGraph "$admApiLatencyCsv" "$admApiLatencyPng" "Total Deployments" "Avg API latency in ms" 0 1000 1 "Avg ADM API Latency by # of Deployments (1K edges)" + plotGraph "$avgCpuMaestroAppSystemCsv" "$avgCpuMaestroAppSystemPng" "Total Deployments" "Avg CPU usage" 0 10 1 "Avg App Orch CPU usage by # of Deployments (1K edges)" + plotGraph "$avgCpuCattleSystemCsv" "$avgCpuCattleSystemPng" "Total Deployments" "Avg CPU usage" 0 10 1 "Avg Rancher CPU usage by # of Deployments (1K edges)" + plotGraph "$avgCpuCattleFleetSystemCsv" "$avgCpuCattleFleetSystemPng" "Total Deployments" "Avg CPU usage" 0 40 1 "Avg Fleet CPU usage by # of Deployments (1K edges)" + plotGraph "$maxCpuMaestroAppSystemCsv" "$maxCpuMaestroAppSystemPng" "Total Deployments" "Max CPU usage" 0 10 1 "Max App Orch CPU usages by # of Deployments (1K edges)" + plotGraph "$maxCpuCattleSystemCsv" "$maxCpuCattleSystemPng" "Total Deployments" "Max CPU usage" 0 10 1 "Max Rancher CPU usage by # of Deployments (1K edges)" + plotGraph "$maxCpuCattleFleetSystemCsv" "$maxCpuCattleFleetSystemPng" "Total Deployments" "Max CPU usage" 0 40 1 "Max Fleet CPU usage by # of Deployments (1K edges)" + plotGraph "$avgRamMaestroAppSystemCsv" "$avgRamMaestroAppSystemPng" "Total Deployments" "Avg RAM usage in MB" 500 10000 0.000001 "Avg App Orch RAM usage by # of Deployments (1K edges)" + plotGraph "$avgRamCattleSystemCsv" "$avgRamCattleSystemPng" "Total Deployments" "Avg RAM usage in MB" 500 40000 0.000001 "Avg Rancher RAM usage by # of Deployments (1K edges)" + plotGraph "$avgRamCattleFleetSystemCsv" "$avgRamCattleFleetSystemPng" "Total Deployments" "Avg RAM usage in MB" 500 10000 0.000001 "Avg Fleet RAM usage by # of Deployments (1K edges)" + plotGraph "$maxRamMaestroAppSystemCsv" "$maxRamMaestroAppSystemPng" "Total Deployments" "Max RAM usage in MB" 500 10000 0.000001 "Max App Orch RAM usage by # of Deployments (1K edges)" + plotGraph "$maxRamCattleSystemCsv" "$maxRamCattleSystemPng" "Total Deployments" "Max RAM usage in MB" 500 40000 0.000001 "Max Rancher RAM usage by # of Deployments (1K edges)" + plotGraph "$maxRamCattleFleetSystemCsv" "$maxRamCattleFleetSystemPng" "Total Deployments" "Max RAM usage in MB" 500 10000 0.000001 "Max Fleet RAM usage by # of Deployments (1K edges)" +} + +########################### Main Script Starts here ########################### + +# Trap multiple signals +trap cleanup EXIT SIGINT SIGTERM + +# Read input arguments +readInputArgs "$@" + +if [ "$totalApps" == 0 ]; then + echo "No apps to be setup, exit.." + exit 0 +fi + +# Initialize Orchestrator Keycloak Token to be used for API access +initializeOrchAPIToken +echo "Initialized Orchestrator Keycloak Token" + +deleteDeployments +echo "Deleted previous deployments" + +# Create directory to store results +mkdir -p "$resultsDirectory" +echo "Created directory $resultsDirectory" + +if [ "$totalApps" -lt "$batchSize" ]; then + batchSize=$totalApps +fi + +echo "Creating $totalApps apps..." + +catalogUpload +echo "Uploaded deployment packages" + +totalBatches=$((totalApps / batchSize)) +remainder=$((totalApps % batchSize)) +if [ $remainder -ne 0 ]; then + # shellcheck disable=SC2003 + totalBatches=$(expr $totalBatches + 1) +fi + +counter=0 + +# Install apps +while [ $counter -lt "$totalBatches" ]; do + testStartTime=$(date +%s) + + # Initialize Orch API Token again to be safe. Token could have possibly expired while + # we installed previous batch of clusters or could expire shortly + initializeOrchAPIToken + + createContainerDeployment $counter + + # Wait for all apps to running + appInstallStartTime=$(date +%s) + waitForAllAppsToBeRunning + + appInstallEndTime=$(date +%s) + echo Total time for all apps to be running at iteration:$counter: $((appInstallEndTime - appInstallStartTime)) seconds + + # Capture the result to a csv file to be used for plotting later + echo "$totalAppCnt,$((appInstallEndTime - appInstallStartTime))" >>"$resultsDirectory/$appInstallTimeCsv" + + # Run API Latency Checks + runApiLatencyChecks + testEndTime=$(date +%s) + echo "Total Test Run Time for Batch$counter: $((testEndTime - testStartTime))s" + + # Collect metrics from observability APIs + collectObservabilityMetrics "$totalAppCnt" $((testEndTime - testStartTime)) + + # shellcheck disable=SC2003 + counter=$(expr $counter + 1) +done + diff --git a/scripts/app-scale-tester/add-N-apps.sh b/scripts/app-scale-tester/add-N-apps.sh new file mode 100755 index 00000000..6095a02f --- /dev/null +++ b/scripts/app-scale-tester/add-N-apps.sh @@ -0,0 +1,525 @@ +#!/bin/bash +set -eu + +if [ $# == 0 ]; then + echo "specify number of apps to setup" + exit 1 +fi + +# Initialize some global variables and constants +clusterFqdn=scale.espd.infra-host.com # Change if needed +# clusterFqdn=kind.internal # Change if needed +enspUser=intel-itep-user && enspPassword=ChangeMeOn1stLogin! +prometheusURL="https://observability-admin.${clusterFqdn}/api/datasources/uid/orchestrator-mimir/resources/api/v1" +prometheusTestURL="https://observability-admin.${clusterFqdn}/api/datasources/uid/orchestrator-mimir/health" +project=itep +targetClusterLabel="scale=adm" + +## App install specific variables +totalApps=$1 # Total apps to install - input argument +batchSize=10 # How many apps to install in parallel? +interval=10 +pageSize=100 # Adjust pageSize as needed for querying elements on APIs +offset=0 +runTests=1 + +## Token specific variables +orchAPIToken="" # Orchestrator API Token +observabilityApiCredentials="" # Provide a valid token for the test. + +## App install specific variables +containerDeploymentPackageName="nginx-app-local" +containerAppName="nginx-local" +containerAppVersion="0.1.0" +containerAppProfileName="default-profile" +containerAppPublisherName="default" +totalContainerAppInstancesPerCluster=3 + +# Container deployment template -- currently not used but kept for reference. +# The template approach gets complex when we have a Deployment with multiple apps. +# shellcheck disable=SC2089 +containerAppDeploymentTemplate='{"appName":"%s","appVersion":"%s","name":"dummy-app-%d","profileName":"%s","publisherName":"%s","targetClusters":[{"appName":"%s", "labels":{"target":"scale"}}],"displayName":"dummy-app-%d","deploymentType":"auto-scaling","overrideValues":[]}' + +# CSV Files to capture result +currentTimeStamp=$(date -u +"%Y%m%d-%H%M%S") +resultsDirectory="./test-results/$currentTimeStamp" +tmpJSONFile="$resultsDirectory/tmp.json" +appInstallTimeCsv="app-install-time.csv" +admApiLatencyCsv="adm-api-latency.csv" +#armApiLatencyCsv="arm-api-latency.csv" +avgCpuMaestroAppSystemCsv="avg-cpu-usage-ma.csv" +avgCpuCattleSystemCsv="avg-cpu-usage-cattle-system.csv" +avgCpuCattleFleetSystemCsv="avg-cpu-usage-cattle-fleet-system.csv" +maxCpuMaestroAppSystemCsv="max-cpu-usage-ma.csv" +maxCpuCattleSystemCsv="max-cpu-usage-cattle-system.csv" +maxCpuCattleFleetSystemCsv="max-cpu-usage-cattle-fleet-system.csv" +avgRamMaestroAppSystemCsv="avg-ram-usage-ma.csv" +avgRamCattleSystemCsv="avg-ram-usage-cattle-system.csv" +avgRamCattleFleetSystemCsv="avg-ram-usage-cattle-fleet-system.csv" +maxRamMaestroAppSystemCsv="max-ram-usage-ma.csv" +maxRamCattleSystemCsv="max-ram-usage-cattle-system.csv" +maxRamCattleFleetSystemCsv="max-ram-usage-cattle-fleet-system.csv" + +# Png files generated from CSV file +appInstallTimePng="app-install-time.png" +admApiLatencyPng="adm-api-latency.png" +#armApiLatencyPng="arm-api-latency.png" +avgCpuMaestroAppSystemPng="avg-cpu-usage-ma.png" +avgCpuCattleSystemPng="avg-cpu-usage-cattle-system.png" +avgCpuCattleFleetSystemPng="avg-cpu-usage-cattle-fleet-system.png" +maxCpuMaestroAppSystemPng="max-cpu-usage-ma.png" +maxCpuCattleSystemPng="max-cpu-usage-cattle-system.png" +maxCpuCattleFleetSystemPng="max-cpu-usage-cattle-fleet-system.png" +avgRamMaestroAppSystemPng="avg-ram-usage-ma.png" +avgRamCattleSystemPng="avg-ram-usage-cattle-system.png" +avgRamCattleFleetSystemPng="avg-ram-usage-cattle-fleet-system.png" +maxRamMaestroAppSystemPng="max-ram-usage-ma.png" +maxRamCattleSystemPng="max-ram-usage-cattle-system.png" +maxRamCattleFleetSystemPng="max-ram-usage-cattle-fleet-system.png" + +appOrchEndpoint="https://api.${clusterFqdn}" + +# Function to display usage help +usage() { + echo "Usage: $0 [options] [--] [arguments]" + echo + echo "Options:" + echo " -u VALUE Keycloak username, default all-groups-example-user" + echo " -p VALUE Keycloak password, default ChangeMeOn1stLogin!" + echo " -f VALUE Orch FQDN, default integration12.espd.infra-host.com" + echo " -b VALUE Cluster install batch size, default 10" + echo " -o VALUE Observability API credentials base64 encoded" + echo " -a VALUE Apps per cluster, default 3" + echo " -d VALUE Deployment package name to deploy, default dummy-app-package" + echo " -v VALUE Deployment package version to deploy, default 0.0.1" + echo " -g VALUE Don't run tests, only regenerate graphs using the specified results directory" + echo " -m VALUE project, default itep" + echo " -h Print this help menu" + + echo + echo "Example:" + echo " $0 -a 10 -b 2" + exit 1 +} + +readInputArgs() { + while [[ $# -gt 0 ]]; do + case "$1" in + -u | --user) + echo "Username: $2" + enspUser=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -p | --password) + echo "Password: $2" + enspPassword=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -h | --help) + usage + shift + ;; + -f | --fqdn) + echo "FQDN: $2" + clusterFqdn=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -b | --batch-size) + echo "Cluster Install batch size: $2" + batchSize=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -o | --observability-api-cred) + echo "Observability API Credentials base64 encoded: $2" + observabilityApiCredentials=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -a | --total-apps-per-cluster) + echo "Apps per enic: $2" + totalApps=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -d | --deployment-package-name) + echo "Deployment package name: $2" + containerDeploymentPackageName=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -v | --deployment-package-version) + echo "Deployment-package-version: $2" + containerAppVersion=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -m | --project) + echo "Project: $2" + project=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -g) + echo "Results directory: $2" + resultsDirectory=$2 + exit 0 + ;; + *) + echo "Unknown option: $1" + shift # Shift past the unknown option + ;; + esac + done +} + +# Function to initialize Orchestrator API Token +initializeOrchAPIToken() { + orchAPIToken=$(curl -s --location --request POST https://keycloak.${clusterFqdn}/realms/master/protocol/openid-connect/token \ + -H 'Content-Type: application/x-www-form-urlencoded' \ + -d "username=${enspUser}" \ + -d "password=${enspPassword}" \ + -d "grant_type=password" \ + -d "client_id=system-client" \ + -d "scope=openid" | + jq -r '.access_token' 2>/dev/null) +} + +catalogLogin() { + CATALOG_ARGS="--deployment-endpoint ${appOrchEndpoint} --catalog-endpoint ${appOrchEndpoint}" + catalog ${CATALOG_ARGS} logout + + catalog ${CATALOG_ARGS} login --client-id=system-client --trust-cert=true --keycloak https://keycloak.${clusterFqdn}/realms/master ${enspUser} ${enspPassword} +} + +# Provide customized deployment for different packages (particularly target labels) +# Arguments are deploymentPackageName, deploymentPackageVersion +catalogDeploy() { + NAME=$1 + VERSION=$2 + + CATALOG_ARGS="--deployment-endpoint ${appOrchEndpoint} --catalog-endpoint ${appOrchEndpoint} --project ${project}" + case ${NAME} in + dummy-app-package) + catalog ${CATALOG_ARGS} create deployment \ + ${NAME} ${VERSION} --application-label dummy-app.$targetClusterLabel + ;; + ten-dummy-apps) + catalog ${CATALOG_ARGS} create deployment \ + ${NAME} ${VERSION} --application-label dummy-app-1.$targetClusterLabel \ + --application-label dummy-app-2.$targetClusterLabel \ + --application-label dummy-app-3.$targetClusterLabel \ + --application-label dummy-app-4.$targetClusterLabel \ + --application-label dummy-app-5.$targetClusterLabel \ + --application-label dummy-app-6.$targetClusterLabel \ + --application-label dummy-app-7.$targetClusterLabel \ + --application-label dummy-app-8.$targetClusterLabel \ + --application-label dummy-app-9.$targetClusterLabel \ + --application-label dummy-app-10.$targetClusterLabel + ;; + nginx-app-local) + catalog ${CATALOG_ARGS} create deployment \ + ${NAME} ${VERSION} --application-label nginx-local.$targetClusterLabel + ;; + *) + echo "ERROR: No deploy template for deployment package ${NAME}" + exit 1 + ;; + esac +} + +catalogUpload() { + catalogLogin + + CATALOG_ARGS="--deployment-endpoint ${appOrchEndpoint} --catalog-endpoint ${appOrchEndpoint} --project ${project}" + pushd ../../deployment-packages/ + for DPDIR in $(ls -d $containerDeploymentPackageName) + do + cd ${DPDIR}; catalog ${CATALOG_ARGS} upload .; cd .. + done + popd +} + +createContainerDeployment() { + echo Creating container deployment + batch=$1 + appDeploymentsCounter=$(( batch * batchSize)) + + containerAppInstanceCnt=$appDeploymentsCounter + while [ "$containerAppInstanceCnt" -lt "$(( (batch + 1) * batchSize ))" ]; do + # shellcheck disable=SC2090 + # containerAppInstallSpec=$(printf "$containerAppDeploymentTemplate" $containerDeploymentPackageName $containerAppVersion $containerAppInstanceCnt $containerAppProfileName $containerAppPublisherName $containerAppName $containerAppInstanceCnt) + # echo "App Deployment Spec is $containerAppInstallSpec" + # curl -s -X POST "https://app-orch.$clusterFqdn/deployment.orchestrator.apis/v1/deployments" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" -d "$containerAppInstallSpec" + catalogLogin + catalogDeploy $containerDeploymentPackageName $containerAppVersion + echo "" + # shellcheck disable=SC2003 + containerAppInstanceCnt=$(expr $containerAppInstanceCnt + 1) + done +} + +deleteDeployments() { + arr=() + + totalAppCnt=$(curl -s "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" | jq '.totalElements') + for ((i = 0 ; i < totalAppCnt ; i++ )); + do + currDeployment=$(curl -s "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments?pageSize=${pageSize}&offset=${offset}" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" | jq -r '.deployments['$i']') + appName=$(echo $currDeployment | jq -r '.appName') + deployId=$(echo $currDeployment | jq -r '.deployId') + + if [ "$appName" == "$containerDeploymentPackageName" ]; then + arr+=("$deployId") + fi + done + + for d in "${arr[@]}" + do + deployId=$(curl -X DELETE "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments/$d" -H "Authorization: Bearer $orchAPIToken") + echo "Deleted deployment $d..." + done + sleep 2 +} + +waitForAllAppsToBeRunning() { + initializeOrchAPIToken + echo Waiting for all apps to be running + +#echo "pagesize print: $pageSize" + while true; do + totalAppCnt=$(curl -s "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" | jq '.totalElements') + set +e + totalCnt=0 + for ((i = 0 ; i < totalAppCnt ; i++ )); + do + currDeployment=$(curl -s "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments?pageSize=${pageSize}&offset=${offset}" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" | jq -r '.deployments['$i']') + appName=$(echo $currDeployment | jq -r '.appName') +#echo "appname print : $appName" + + if [ "$appName" == "$containerDeploymentPackageName" ]; then + totalCnt=$((totalCnt + 1)) +#echo "totalCnt print : $totalCnt" + fi + done +#echo "totalCnt print out: $totalCnt" +#runningAppCnt=$(curl -s "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments?pageSize=${pageSize}&offset=${offset}" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" | jq -r '.deployments[].status.state' | grep -c RUNNING) + runningCnt=0 + for ((i = 0 ; i < totalAppCnt ; i++ )); + do + currDeployment=$(curl -s "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments?pageSize=${pageSize}&offset=${offset}" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" | jq -r '.deployments['$i']') + appName=$(echo $currDeployment | jq -r '.appName') + statusVal=$(echo $currDeployment | jq -r '.status.state') + statusInstance=$(echo $currDeployment | jq -r '.status.summary.running') +#echo "appname print : $appName" +#echo "statusval print : $statusVal" + + if [[ "$appName" == "$containerDeploymentPackageName" && "$statusVal" == "RUNNING" && "$statusInstance" == "101" ]]; then + runningCnt=$((runningCnt + 1)) +#echo "runningCnt print : $runningCnt" + fi + done +#echo "runningCnt print out: $runningCnt" + set -e + # curl -s "https://app-orch.$clusterFqdn/deployment.orchestrator.apis/v1/summary/deployments_status" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" >"$tmpJSONFile" 2>/dev/null + # Method below does not seem to provide an accurate total app count (see NEX-2988) + # totalAppCnt=$(jq '.total' "$tmpJSONFile") + # runningCnt=$(jq '.running' "$tmpJSONFile") + if [ "$totalCnt" == "$runningCnt" ]; then + echo "All $totalAppCnt apps are running!!" + break + fi + + echo "$runningCnt / $totalCnt apps running. Waiting $interval seconds." + sleep $interval + done +} + +runApiLatencyChecks() { + echo "Running API latency tests... it will take a few mins to capture the results" + initializeOrchAPIToken + # Run API Latency Tests + k6 run --env MY_HOSTNAME=$clusterFqdn --env API_TOKEN="$orchAPIToken" --env PROJECT="$project" ../k6-scripts/adm-api-latency.js -q --no-thresholds --summary-export="$tmpJSONFile" + totalChecksPass=$(jq '.root_group.checks."status is OK".passes' "$tmpJSONFile") + totalChecksFail=$(jq '.root_group.checks."status is OK".fails' "$tmpJSONFile") + avgApiDuration=$(jq '.metrics.http_req_duration.avg' "$tmpJSONFile") + # Capture the result to a csv file to be used for plotting later + echo "$totalAppCnt,$avgApiDuration" >>"$resultsDirectory/$admApiLatencyCsv" + echo "API Latency Check Results: Passed Checks: $totalChecksPass Failed Checks: $totalChecksFail, Avg API Duration: $avgApiDuration" +} + +plotGraph() { + # Define the input CSV file and the output image file + inputCsv="$resultsDirectory/$1" + outputPng="$resultsDirectory/$2" + xLabel=$3 + yLabel=$4 + yRangeMin=$5 + yRangeMax=$6 + yAxisScaleFactor=$7 + graphTitle=$8 + + # If csv files exists, then generate graphs before exiting the script + if [ ! -f "$inputCsv" ]; then + return + fi + echo "Plotting graph for $inputCsv" + + # Generate the graph using gnuplot + gnuplot -persist <<-EOFMarker + set datafile separator "," + set terminal png size 800,600 + set output "$outputPng" + set title "$graphTitle" + set xlabel "$xLabel" + set ylabel "$yLabel" + set ytics scale $yAxisScaleFactor + set yrange [$yRangeMin:$yRangeMax] + plot "$inputCsv" using 1:(\$2*$yAxisScaleFactor) with linespoints title "$graphTitle" +EOFMarker + echo "Graph generated for $graphTitle, output file: $outputPng" +} + +collectMetric() { + input=$1 + value="$2" + query=$3 + resultFile=$4 + + value=$(curl -s "$prometheusURL/query" -H "Authorization: Basic ${observabilityApiCredentials}" --data-urlencode "query=$query" | jq -r '.data.result[0].value[1]' 2>/dev/null) + if [ "$value" != "" ]; then + echo "$input,$value" >>"$resultsDirectory/$resultFile" + fi +} + +collectObservabilityMetrics() { + if [ "$observabilityApiCredentials" == "" ]; then + echo "observabilityApiCredentials is nil, cannot collect observability metrics" + return + fi + + # Check that Prometheus is accessible with the provided credentials + status=$(curl -s "$prometheusTestURL" -H "Authorization: Basic ${observabilityApiCredentials}" | jq -r '.status') + if [ "$status" != "OK" ]; then + echo "Unable to query Prometheus with credentials provided, cannot collect observability metrics" + return + fi + + totalApps=$1 + secondSuffix="s" + timeDurationInSec=$2$secondSuffix + + # Remove "_ratio" suffix due observability name changes (e.g., https://github.com/intel-innersource/frameworks.edge.one-intel-edge.observability.platform-dashboard/pull/15/files) + collectMetric "$totalApps" "avgCpu" "sum by(k8s_namespace_name)(avg_over_time(k8s_pod_cpu_utilization{k8s_namespace_name=\"maestro-app-system\"}[$timeDurationInSec]))" $avgCpuMaestroAppSystemCsv + collectMetric "$totalApps" "maxCpu" "sum by(k8s_namespace_name)(max_over_time(k8s_pod_cpu_utilization{k8s_namespace_name=\"maestro-app-system\"}[$timeDurationInSec]))" $maxCpuMaestroAppSystemCsv + collectMetric "$totalApps" "avgCpu" "sum by(k8s_namespace_name)(avg_over_time(k8s_pod_cpu_utilization{k8s_namespace_name=\"cattle-system\"}[$timeDurationInSec]))" $avgCpuCattleSystemCsv + collectMetric "$totalApps" "maxCpu" "sum by(k8s_namespace_name)(max_over_time(k8s_pod_cpu_utilization{k8s_namespace_name=\"cattle-system\"}[$timeDurationInSec]))" $maxCpuCattleSystemCsv + collectMetric "$totalApps" "avgCpu" "sum by(k8s_namespace_name)(avg_over_time(k8s_pod_cpu_utilization{k8s_namespace_name=\"cattle-fleet-system\"}[$timeDurationInSec]))" $avgCpuCattleFleetSystemCsv + collectMetric "$totalApps" "maxCpu" "sum by(k8s_namespace_name)(max_over_time(k8s_pod_cpu_utilization{k8s_namespace_name=\"cattle-fleet-system\"}[$timeDurationInSec]))" $maxCpuCattleFleetSystemCsv + + # Remove "_bytes" suffix due observability name changes (e.g., https://github.com/intel-innersource/frameworks.edge.one-intel-edge.observability.platform-dashboard/pull/15/files) + collectMetric "$totalApps" "avgMem" "sum by(k8s_namespace_name)(avg_over_time(k8s_pod_memory_usage{k8s_namespace_name=\"maestro-app-system\"}[$timeDurationInSec]))" $avgRamMaestroAppSystemCsv + collectMetric "$totalApps" "maxMem" "sum by(k8s_namespace_name)(max_over_time(k8s_pod_memory_usage{k8s_namespace_name=\"maestro-app-system\"}[$timeDurationInSec]))" $maxRamMaestroAppSystemCsv + collectMetric "$totalApps" "avgMem" "sum by(k8s_namespace_name)(avg_over_time(k8s_pod_memory_usage{k8s_namespace_name=\"cattle-system\"}[$timeDurationInSec]))" $avgRamCattleSystemCsv + collectMetric "$totalApps" "maxMem" "sum by(k8s_namespace_name)(max_over_time(k8s_pod_memory_usage{k8s_namespace_name=\"cattle-system\"}[$timeDurationInSec]))" $maxRamCattleSystemCsv + collectMetric "$totalApps" "avgMem" "sum by(k8s_namespace_name)(avg_over_time(k8s_pod_memory_usage{k8s_namespace_name=\"cattle-fleet-system\"}[$timeDurationInSec]))" $avgRamCattleFleetSystemCsv + collectMetric "$totalApps" "maxMem" "sum by(k8s_namespace_name)(max_over_time(k8s_pod_memory_usage{k8s_namespace_name=\"cattle-fleet-system\"}[$timeDurationInSec]))" $maxRamCattleFleetSystemCsv +} + +cleanup() { + # Remove temp file used to capture transient results + rm -f "$tmpJSONFile" + + plotGraph "$appInstallTimeCsv" "$appInstallTimePng" "Deployment #" "Seconds until Running on all edges" 0 500 1 "Time to Running on 1K edges, per Deployment" + plotGraph "$admApiLatencyCsv" "$admApiLatencyPng" "Total Deployments" "Avg API latency in ms" 0 1000 1 "Avg ADM API Latency by # of Deployments (1K edges)" + plotGraph "$avgCpuMaestroAppSystemCsv" "$avgCpuMaestroAppSystemPng" "Total Deployments" "Avg CPU usage" 0 10 1 "Avg App Orch CPU usage by # of Deployments (1K edges)" + plotGraph "$avgCpuCattleSystemCsv" "$avgCpuCattleSystemPng" "Total Deployments" "Avg CPU usage" 0 10 1 "Avg Rancher CPU usage by # of Deployments (1K edges)" + plotGraph "$avgCpuCattleFleetSystemCsv" "$avgCpuCattleFleetSystemPng" "Total Deployments" "Avg CPU usage" 0 40 1 "Avg Fleet CPU usage by # of Deployments (1K edges)" + plotGraph "$maxCpuMaestroAppSystemCsv" "$maxCpuMaestroAppSystemPng" "Total Deployments" "Max CPU usage" 0 10 1 "Max App Orch CPU usages by # of Deployments (1K edges)" + plotGraph "$maxCpuCattleSystemCsv" "$maxCpuCattleSystemPng" "Total Deployments" "Max CPU usage" 0 10 1 "Max Rancher CPU usage by # of Deployments (1K edges)" + plotGraph "$maxCpuCattleFleetSystemCsv" "$maxCpuCattleFleetSystemPng" "Total Deployments" "Max CPU usage" 0 40 1 "Max Fleet CPU usage by # of Deployments (1K edges)" + plotGraph "$avgRamMaestroAppSystemCsv" "$avgRamMaestroAppSystemPng" "Total Deployments" "Avg RAM usage in MB" 500 10000 0.000001 "Avg App Orch RAM usage by # of Deployments (1K edges)" + plotGraph "$avgRamCattleSystemCsv" "$avgRamCattleSystemPng" "Total Deployments" "Avg RAM usage in MB" 500 40000 0.000001 "Avg Rancher RAM usage by # of Deployments (1K edges)" + plotGraph "$avgRamCattleFleetSystemCsv" "$avgRamCattleFleetSystemPng" "Total Deployments" "Avg RAM usage in MB" 500 10000 0.000001 "Avg Fleet RAM usage by # of Deployments (1K edges)" + plotGraph "$maxRamMaestroAppSystemCsv" "$maxRamMaestroAppSystemPng" "Total Deployments" "Max RAM usage in MB" 500 10000 0.000001 "Max App Orch RAM usage by # of Deployments (1K edges)" + plotGraph "$maxRamCattleSystemCsv" "$maxRamCattleSystemPng" "Total Deployments" "Max RAM usage in MB" 500 40000 0.000001 "Max Rancher RAM usage by # of Deployments (1K edges)" + plotGraph "$maxRamCattleFleetSystemCsv" "$maxRamCattleFleetSystemPng" "Total Deployments" "Max RAM usage in MB" 500 10000 0.000001 "Max Fleet RAM usage by # of Deployments (1K edges)" +} + +########################### Main Script Starts here ########################### + +# Trap multiple signals +trap cleanup EXIT SIGINT SIGTERM + +# Read input arguments +readInputArgs "$@" + +if [ "$totalApps" == 0 ]; then + echo "No apps to be setup, exit.." + exit 0 +fi + +# Initialize Orchestrator Keycloak Token to be used for API access +initializeOrchAPIToken +echo "Initialized Orchestrator Keycloak Token" + +deleteDeployments +echo "Deleted previous deployments" + +# Create directory to store results +mkdir -p "$resultsDirectory" +echo "Created directory $resultsDirectory" + +if [ "$totalApps" -lt "$batchSize" ]; then + batchSize=$totalApps +fi + +echo "Creating $totalApps apps..." + +catalogUpload +echo "Uploaded deployment packages" + +totalBatches=$((totalApps / batchSize)) +remainder=$((totalApps % batchSize)) +if [ $remainder -ne 0 ]; then + # shellcheck disable=SC2003 + totalBatches=$(expr $totalBatches + 1) +fi + +counter=0 + +# Install apps +while [ $counter -lt "$totalBatches" ]; do + testStartTime=$(date +%s) + + # Initialize Orch API Token again to be safe. Token could have possibly expired while + # we installed previous batch of clusters or could expire shortly + initializeOrchAPIToken + + createContainerDeployment $counter + + # Wait for all apps to running + appInstallStartTime=$(date +%s) + waitForAllAppsToBeRunning + + appInstallEndTime=$(date +%s) + echo Total time for all apps to be running at iteration:$counter: $((appInstallEndTime - appInstallStartTime)) seconds + + # Capture the result to a csv file to be used for plotting later + echo "$totalAppCnt,$((appInstallEndTime - appInstallStartTime))" >>"$resultsDirectory/$appInstallTimeCsv" + + # Run API Latency Checks + runApiLatencyChecks + testEndTime=$(date +%s) + echo "Total Test Run Time for Batch$counter: $((testEndTime - testStartTime))s" + + # Collect metrics from observability APIs + collectObservabilityMetrics "$totalAppCnt" $((testEndTime - testStartTime)) + + # shellcheck disable=SC2003 + counter=$(expr $counter + 1) +done + diff --git a/scripts/app-scale-tester/add-N-apps_dummyapp.sh b/scripts/app-scale-tester/add-N-apps_dummyapp.sh new file mode 100755 index 00000000..f1f4ef5c --- /dev/null +++ b/scripts/app-scale-tester/add-N-apps_dummyapp.sh @@ -0,0 +1,525 @@ +#!/bin/bash +set -eu + +if [ $# == 0 ]; then + echo "specify number of apps to setup" + exit 1 +fi + +# Initialize some global variables and constants +clusterFqdn=onpremv3rc1.espdqa.infra-host.com # Change if needed +# clusterFqdn=kind.internal # Change if needed +enspUser=sample-project-api-user && enspPassword=ChangeMeOn1stLogin! +prometheusURL="https://observability-admin.${clusterFqdn}/api/datasources/uid/orchestrator-mimir/resources/api/v1" +prometheusTestURL="https://observability-admin.${clusterFqdn}/api/datasources/uid/orchestrator-mimir/health" +project=sample-project +targetClusterLabel="default-extension=baseline" + +## App install specific variables +totalApps=$1 # Total apps to install - input argument +batchSize=10 # How many apps to install in parallel? +interval=10 +pageSize=100 # Adjust pageSize as needed for querying elements on APIs +offset=0 +runTests=1 + +## Token specific variables +orchAPIToken="" # Orchestrator API Token +observabilityApiCredentials="" # Provide a valid token for the test. + +## App install specific variables +containerDeploymentPackageName="dummy-app-package" +containerAppName="dummy-app" +containerAppVersion="0.0.1" +containerAppProfileName="default-profile" +containerAppPublisherName="default" +totalContainerAppInstancesPerCluster=3 + +# Container deployment template -- currently not used but kept for reference. +# The template approach gets complex when we have a Deployment with multiple apps. +# shellcheck disable=SC2089 +containerAppDeploymentTemplate='{"appName":"%s","appVersion":"%s","name":"dummy-app-%d","profileName":"%s","publisherName":"%s","targetClusters":[{"appName":"%s", "labels":{"target":"scale"}}],"displayName":"dummy-app-%d","deploymentType":"auto-scaling","overrideValues":[]}' + +# CSV Files to capture result +currentTimeStamp=$(date -u +"%Y%m%d-%H%M%S") +resultsDirectory="./test-results/$currentTimeStamp" +tmpJSONFile="$resultsDirectory/tmp.json" +appInstallTimeCsv="app-install-time.csv" +admApiLatencyCsv="adm-api-latency.csv" +#armApiLatencyCsv="arm-api-latency.csv" +avgCpuMaestroAppSystemCsv="avg-cpu-usage-ma.csv" +avgCpuCattleSystemCsv="avg-cpu-usage-cattle-system.csv" +avgCpuCattleFleetSystemCsv="avg-cpu-usage-cattle-fleet-system.csv" +maxCpuMaestroAppSystemCsv="max-cpu-usage-ma.csv" +maxCpuCattleSystemCsv="max-cpu-usage-cattle-system.csv" +maxCpuCattleFleetSystemCsv="max-cpu-usage-cattle-fleet-system.csv" +avgRamMaestroAppSystemCsv="avg-ram-usage-ma.csv" +avgRamCattleSystemCsv="avg-ram-usage-cattle-system.csv" +avgRamCattleFleetSystemCsv="avg-ram-usage-cattle-fleet-system.csv" +maxRamMaestroAppSystemCsv="max-ram-usage-ma.csv" +maxRamCattleSystemCsv="max-ram-usage-cattle-system.csv" +maxRamCattleFleetSystemCsv="max-ram-usage-cattle-fleet-system.csv" + +# Png files generated from CSV file +appInstallTimePng="app-install-time.png" +admApiLatencyPng="adm-api-latency.png" +#armApiLatencyPng="arm-api-latency.png" +avgCpuMaestroAppSystemPng="avg-cpu-usage-ma.png" +avgCpuCattleSystemPng="avg-cpu-usage-cattle-system.png" +avgCpuCattleFleetSystemPng="avg-cpu-usage-cattle-fleet-system.png" +maxCpuMaestroAppSystemPng="max-cpu-usage-ma.png" +maxCpuCattleSystemPng="max-cpu-usage-cattle-system.png" +maxCpuCattleFleetSystemPng="max-cpu-usage-cattle-fleet-system.png" +avgRamMaestroAppSystemPng="avg-ram-usage-ma.png" +avgRamCattleSystemPng="avg-ram-usage-cattle-system.png" +avgRamCattleFleetSystemPng="avg-ram-usage-cattle-fleet-system.png" +maxRamMaestroAppSystemPng="max-ram-usage-ma.png" +maxRamCattleSystemPng="max-ram-usage-cattle-system.png" +maxRamCattleFleetSystemPng="max-ram-usage-cattle-fleet-system.png" + +appOrchEndpoint="https://api.${clusterFqdn}" + +# Function to display usage help +usage() { + echo "Usage: $0 [options] [--] [arguments]" + echo + echo "Options:" + echo " -u VALUE Keycloak username, default all-groups-example-user" + echo " -p VALUE Keycloak password, default ChangeMeOn1stLogin!" + echo " -f VALUE Orch FQDN, default integration12.espd.infra-host.com" + echo " -b VALUE Cluster install batch size, default 10" + echo " -o VALUE Observability API credentials base64 encoded" + echo " -a VALUE Apps per cluster, default 3" + echo " -d VALUE Deployment package name to deploy, default dummy-app-package" + echo " -v VALUE Deployment package version to deploy, default 0.0.1" + echo " -g VALUE Don't run tests, only regenerate graphs using the specified results directory" + echo " -m VALUE project, default itep" + echo " -h Print this help menu" + + echo + echo "Example:" + echo " $0 -a 10 -b 2" + exit 1 +} + +readInputArgs() { + while [[ $# -gt 0 ]]; do + case "$1" in + -u | --user) + echo "Username: $2" + enspUser=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -p | --password) + echo "Password: $2" + enspPassword=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -h | --help) + usage + shift + ;; + -f | --fqdn) + echo "FQDN: $2" + clusterFqdn=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -b | --batch-size) + echo "Cluster Install batch size: $2" + batchSize=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -o | --observability-api-cred) + echo "Observability API Credentials base64 encoded: $2" + observabilityApiCredentials=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -a | --total-apps-per-cluster) + echo "Apps per enic: $2" + totalApps=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -d | --deployment-package-name) + echo "Deployment package name: $2" + containerDeploymentPackageName=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -v | --deployment-package-version) + echo "Deployment-package-version: $2" + containerAppVersion=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -m | --project) + echo "Project: $2" + project=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -g) + echo "Results directory: $2" + resultsDirectory=$2 + exit 0 + ;; + *) + echo "Unknown option: $1" + shift # Shift past the unknown option + ;; + esac + done +} + +# Function to initialize Orchestrator API Token +initializeOrchAPIToken() { + orchAPIToken=$(curl -s --location --request POST https://keycloak.${clusterFqdn}/realms/master/protocol/openid-connect/token \ + -H 'Content-Type: application/x-www-form-urlencoded' \ + -d "username=admin" \ + -d "password=${enspPassword}" \ + -d "grant_type=password" \ + -d "client_id=system-client" \ + -d "scope=openid" | + jq -r '.access_token' 2>/dev/null) +} + +catalogLogin() { + CATALOG_ARGS="--deployment-endpoint ${appOrchEndpoint} --catalog-endpoint ${appOrchEndpoint}" + catalog ${CATALOG_ARGS} logout + + catalog ${CATALOG_ARGS} login --client-id=system-client --trust-cert=true --keycloak https://keycloak.${clusterFqdn}/realms/master ${enspUser} ${enspPassword} +} + +# Provide customized deployment for different packages (particularly target labels) +# Arguments are deploymentPackageName, deploymentPackageVersion +catalogDeploy() { + NAME=$1 + VERSION=$2 + + CATALOG_ARGS="--deployment-endpoint ${appOrchEndpoint} --catalog-endpoint ${appOrchEndpoint} --project ${project}" + case ${NAME} in + dummy-app-package) + catalog ${CATALOG_ARGS} create deployment \ + ${NAME} ${VERSION} --application-label dummy-app.$targetClusterLabel + ;; + ten-dummy-apps) + catalog ${CATALOG_ARGS} create deployment \ + ${NAME} ${VERSION} --application-label dummy-app-1.$targetClusterLabel \ + --application-label dummy-app-2.$targetClusterLabel \ + --application-label dummy-app-3.$targetClusterLabel \ + --application-label dummy-app-4.$targetClusterLabel \ + --application-label dummy-app-5.$targetClusterLabel \ + --application-label dummy-app-6.$targetClusterLabel \ + --application-label dummy-app-7.$targetClusterLabel \ + --application-label dummy-app-8.$targetClusterLabel \ + --application-label dummy-app-9.$targetClusterLabel \ + --application-label dummy-app-10.$targetClusterLabel + ;; + nginx-app) + catalog ${CATALOG_ARGS} create deployment \ + ${NAME} ${VERSION} --application-label nginx.$targetClusterLabel + ;; + *) + echo "ERROR: No deploy template for deployment package ${NAME}" + exit 1 + ;; + esac +} + +catalogUpload() { + catalogLogin + + CATALOG_ARGS="--deployment-endpoint ${appOrchEndpoint} --catalog-endpoint ${appOrchEndpoint} --project ${project}" + pushd ../../deployment-packages/ + for DPDIR in $(ls -d $containerDeploymentPackageName) + do + cd ${DPDIR}; catalog ${CATALOG_ARGS} upload .; cd .. + done + popd +} + +createContainerDeployment() { + echo Creating container deployment + batch=$1 + appDeploymentsCounter=$(( batch * batchSize)) + + containerAppInstanceCnt=$appDeploymentsCounter + while [ "$containerAppInstanceCnt" -lt "$(( (batch + 1) * batchSize ))" ]; do + # shellcheck disable=SC2090 + # containerAppInstallSpec=$(printf "$containerAppDeploymentTemplate" $containerDeploymentPackageName $containerAppVersion $containerAppInstanceCnt $containerAppProfileName $containerAppPublisherName $containerAppName $containerAppInstanceCnt) + # echo "App Deployment Spec is $containerAppInstallSpec" + # curl -s -X POST "https://app-orch.$clusterFqdn/deployment.orchestrator.apis/v1/deployments" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" -d "$containerAppInstallSpec" + catalogLogin + catalogDeploy $containerDeploymentPackageName $containerAppVersion + echo "" + # shellcheck disable=SC2003 + containerAppInstanceCnt=$(expr $containerAppInstanceCnt + 1) + done +} + +deleteDeployments() { + arr=() + + totalAppCnt=$(curl -s "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" | jq '.totalElements') + for ((i = 0 ; i < totalAppCnt ; i++ )); + do + currDeployment=$(curl -s "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments?pageSize=${pageSize}&offset=${offset}" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" | jq -r '.deployments['$i']') + appName=$(echo $currDeployment | jq -r '.appName') + deployId=$(echo $currDeployment | jq -r '.deployId') + + if [ "$appName" == "$containerDeploymentPackageName" ]; then + arr+=("$deployId") + fi + done + + for d in "${arr[@]}" + do + deployId=$(curl -X DELETE "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments/$d" -H "Authorization: Bearer $orchAPIToken") + echo "Deleted deployment $d..." + done + sleep 2 +} + +waitForAllAppsToBeRunning() { + initializeOrchAPIToken + echo Waiting for all apps to be running + +#echo "pagesize print: $pageSize" + while true; do + totalAppCnt=$(curl -s "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" | jq '.totalElements') + set +e + totalCnt=0 + for ((i = 0 ; i < totalAppCnt ; i++ )); + do + currDeployment=$(curl -s "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments?pageSize=${pageSize}&offset=${offset}" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" | jq -r '.deployments['$i']') + appName=$(echo $currDeployment | jq -r '.appName') +#echo "appname print : $appName" + + if [ "$appName" == "$containerDeploymentPackageName" ]; then + totalCnt=$((totalCnt + 1)) +#echo "totalCnt print : $totalCnt" + fi + done +#echo "totalCnt print out: $totalCnt" +#runningAppCnt=$(curl -s "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments?pageSize=${pageSize}&offset=${offset}" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" | jq -r '.deployments[].status.state' | grep -c RUNNING) + runningCnt=0 + for ((i = 0 ; i < totalAppCnt ; i++ )); + do + currDeployment=$(curl -s "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments?pageSize=${pageSize}&offset=${offset}" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" | jq -r '.deployments['$i']') + appName=$(echo $currDeployment | jq -r '.appName') + statusVal=$(echo $currDeployment | jq -r '.status.state') + statusInstance=$(echo $currDeployment | jq -r '.status.summary.running') +#echo "appname print : $appName" +#echo "statusval print : $statusVal" + + if [[ "$appName" == "$containerDeploymentPackageName" && "$statusVal" == "RUNNING" && "$statusInstance" == "100" ]]; then + runningCnt=$((runningCnt + 1)) +#echo "runningCnt print : $runningCnt" + fi + done +#echo "runningCnt print out: $runningCnt" + set -e + # curl -s "https://app-orch.$clusterFqdn/deployment.orchestrator.apis/v1/summary/deployments_status" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" >"$tmpJSONFile" 2>/dev/null + # Method below does not seem to provide an accurate total app count (see NEX-2988) + # totalAppCnt=$(jq '.total' "$tmpJSONFile") + # runningCnt=$(jq '.running' "$tmpJSONFile") + if [ "$totalCnt" == "$runningCnt" ]; then + echo "All $totalAppCnt apps are running!!" + break + fi + + echo "$runningCnt / $totalCnt apps running. Waiting $interval seconds." + sleep $interval + done +} + +runApiLatencyChecks() { + echo "Running API latency tests... it will take a few mins to capture the results" + initializeOrchAPIToken + # Run API Latency Tests + k6 run --env MY_HOSTNAME=$clusterFqdn --env API_TOKEN="$orchAPIToken" --env PROJECT="$project" ../k6-scripts/adm-api-latency.js -q --no-thresholds --summary-export="$tmpJSONFile" + totalChecksPass=$(jq '.root_group.checks."status is OK".passes' "$tmpJSONFile") + totalChecksFail=$(jq '.root_group.checks."status is OK".fails' "$tmpJSONFile") + avgApiDuration=$(jq '.metrics.http_req_duration.avg' "$tmpJSONFile") + # Capture the result to a csv file to be used for plotting later + echo "$totalAppCnt,$avgApiDuration" >>"$resultsDirectory/$admApiLatencyCsv" + echo "API Latency Check Results: Passed Checks: $totalChecksPass Failed Checks: $totalChecksFail, Avg API Duration: $avgApiDuration" +} + +plotGraph() { + # Define the input CSV file and the output image file + inputCsv="$resultsDirectory/$1" + outputPng="$resultsDirectory/$2" + xLabel=$3 + yLabel=$4 + yRangeMin=$5 + yRangeMax=$6 + yAxisScaleFactor=$7 + graphTitle=$8 + + # If csv files exists, then generate graphs before exiting the script + if [ ! -f "$inputCsv" ]; then + return + fi + echo "Plotting graph for $inputCsv" + + # Generate the graph using gnuplot + gnuplot -persist <<-EOFMarker + set datafile separator "," + set terminal png size 800,600 + set output "$outputPng" + set title "$graphTitle" + set xlabel "$xLabel" + set ylabel "$yLabel" + set ytics scale $yAxisScaleFactor + set yrange [$yRangeMin:$yRangeMax] + plot "$inputCsv" using 1:(\$2*$yAxisScaleFactor) with linespoints title "$graphTitle" +EOFMarker + echo "Graph generated for $graphTitle, output file: $outputPng" +} + +collectMetric() { + input=$1 + value="$2" + query=$3 + resultFile=$4 + + value=$(curl -s "$prometheusURL/query" -H "Authorization: Basic ${observabilityApiCredentials}" --data-urlencode "query=$query" | jq -r '.data.result[0].value[1]' 2>/dev/null) + if [ "$value" != "" ]; then + echo "$input,$value" >>"$resultsDirectory/$resultFile" + fi +} + +collectObservabilityMetrics() { + if [ "$observabilityApiCredentials" == "" ]; then + echo "observabilityApiCredentials is nil, cannot collect observability metrics" + return + fi + + # Check that Prometheus is accessible with the provided credentials + status=$(curl -s "$prometheusTestURL" -H "Authorization: Basic ${observabilityApiCredentials}" | jq -r '.status') + if [ "$status" != "OK" ]; then + echo "Unable to query Prometheus with credentials provided, cannot collect observability metrics" + return + fi + + totalApps=$1 + secondSuffix="s" + timeDurationInSec=$2$secondSuffix + + # Remove "_ratio" suffix due observability name changes (e.g., https://github.com/intel-innersource/frameworks.edge.one-intel-edge.observability.platform-dashboard/pull/15/files) + collectMetric "$totalApps" "avgCpu" "sum by(k8s_namespace_name)(avg_over_time(k8s_pod_cpu_utilization{k8s_namespace_name=\"maestro-app-system\"}[$timeDurationInSec]))" $avgCpuMaestroAppSystemCsv + collectMetric "$totalApps" "maxCpu" "sum by(k8s_namespace_name)(max_over_time(k8s_pod_cpu_utilization{k8s_namespace_name=\"maestro-app-system\"}[$timeDurationInSec]))" $maxCpuMaestroAppSystemCsv + collectMetric "$totalApps" "avgCpu" "sum by(k8s_namespace_name)(avg_over_time(k8s_pod_cpu_utilization{k8s_namespace_name=\"cattle-system\"}[$timeDurationInSec]))" $avgCpuCattleSystemCsv + collectMetric "$totalApps" "maxCpu" "sum by(k8s_namespace_name)(max_over_time(k8s_pod_cpu_utilization{k8s_namespace_name=\"cattle-system\"}[$timeDurationInSec]))" $maxCpuCattleSystemCsv + collectMetric "$totalApps" "avgCpu" "sum by(k8s_namespace_name)(avg_over_time(k8s_pod_cpu_utilization{k8s_namespace_name=\"cattle-fleet-system\"}[$timeDurationInSec]))" $avgCpuCattleFleetSystemCsv + collectMetric "$totalApps" "maxCpu" "sum by(k8s_namespace_name)(max_over_time(k8s_pod_cpu_utilization{k8s_namespace_name=\"cattle-fleet-system\"}[$timeDurationInSec]))" $maxCpuCattleFleetSystemCsv + + # Remove "_bytes" suffix due observability name changes (e.g., https://github.com/intel-innersource/frameworks.edge.one-intel-edge.observability.platform-dashboard/pull/15/files) + collectMetric "$totalApps" "avgMem" "sum by(k8s_namespace_name)(avg_over_time(k8s_pod_memory_usage{k8s_namespace_name=\"maestro-app-system\"}[$timeDurationInSec]))" $avgRamMaestroAppSystemCsv + collectMetric "$totalApps" "maxMem" "sum by(k8s_namespace_name)(max_over_time(k8s_pod_memory_usage{k8s_namespace_name=\"maestro-app-system\"}[$timeDurationInSec]))" $maxRamMaestroAppSystemCsv + collectMetric "$totalApps" "avgMem" "sum by(k8s_namespace_name)(avg_over_time(k8s_pod_memory_usage{k8s_namespace_name=\"cattle-system\"}[$timeDurationInSec]))" $avgRamCattleSystemCsv + collectMetric "$totalApps" "maxMem" "sum by(k8s_namespace_name)(max_over_time(k8s_pod_memory_usage{k8s_namespace_name=\"cattle-system\"}[$timeDurationInSec]))" $maxRamCattleSystemCsv + collectMetric "$totalApps" "avgMem" "sum by(k8s_namespace_name)(avg_over_time(k8s_pod_memory_usage{k8s_namespace_name=\"cattle-fleet-system\"}[$timeDurationInSec]))" $avgRamCattleFleetSystemCsv + collectMetric "$totalApps" "maxMem" "sum by(k8s_namespace_name)(max_over_time(k8s_pod_memory_usage{k8s_namespace_name=\"cattle-fleet-system\"}[$timeDurationInSec]))" $maxRamCattleFleetSystemCsv +} + +cleanup() { + # Remove temp file used to capture transient results + rm -f "$tmpJSONFile" + + plotGraph "$appInstallTimeCsv" "$appInstallTimePng" "Deployment #" "Seconds until Running on all edges" 0 500 1 "Time to Running on 1K edges, per Deployment" + plotGraph "$admApiLatencyCsv" "$admApiLatencyPng" "Total Deployments" "Avg API latency in ms" 0 1000 1 "Avg ADM API Latency by # of Deployments (1K edges)" + plotGraph "$avgCpuMaestroAppSystemCsv" "$avgCpuMaestroAppSystemPng" "Total Deployments" "Avg CPU usage" 0 10 1 "Avg App Orch CPU usage by # of Deployments (1K edges)" + plotGraph "$avgCpuCattleSystemCsv" "$avgCpuCattleSystemPng" "Total Deployments" "Avg CPU usage" 0 10 1 "Avg Rancher CPU usage by # of Deployments (1K edges)" + plotGraph "$avgCpuCattleFleetSystemCsv" "$avgCpuCattleFleetSystemPng" "Total Deployments" "Avg CPU usage" 0 40 1 "Avg Fleet CPU usage by # of Deployments (1K edges)" + plotGraph "$maxCpuMaestroAppSystemCsv" "$maxCpuMaestroAppSystemPng" "Total Deployments" "Max CPU usage" 0 10 1 "Max App Orch CPU usages by # of Deployments (1K edges)" + plotGraph "$maxCpuCattleSystemCsv" "$maxCpuCattleSystemPng" "Total Deployments" "Max CPU usage" 0 10 1 "Max Rancher CPU usage by # of Deployments (1K edges)" + plotGraph "$maxCpuCattleFleetSystemCsv" "$maxCpuCattleFleetSystemPng" "Total Deployments" "Max CPU usage" 0 40 1 "Max Fleet CPU usage by # of Deployments (1K edges)" + plotGraph "$avgRamMaestroAppSystemCsv" "$avgRamMaestroAppSystemPng" "Total Deployments" "Avg RAM usage in MB" 500 10000 0.000001 "Avg App Orch RAM usage by # of Deployments (1K edges)" + plotGraph "$avgRamCattleSystemCsv" "$avgRamCattleSystemPng" "Total Deployments" "Avg RAM usage in MB" 500 40000 0.000001 "Avg Rancher RAM usage by # of Deployments (1K edges)" + plotGraph "$avgRamCattleFleetSystemCsv" "$avgRamCattleFleetSystemPng" "Total Deployments" "Avg RAM usage in MB" 500 10000 0.000001 "Avg Fleet RAM usage by # of Deployments (1K edges)" + plotGraph "$maxRamMaestroAppSystemCsv" "$maxRamMaestroAppSystemPng" "Total Deployments" "Max RAM usage in MB" 500 10000 0.000001 "Max App Orch RAM usage by # of Deployments (1K edges)" + plotGraph "$maxRamCattleSystemCsv" "$maxRamCattleSystemPng" "Total Deployments" "Max RAM usage in MB" 500 40000 0.000001 "Max Rancher RAM usage by # of Deployments (1K edges)" + plotGraph "$maxRamCattleFleetSystemCsv" "$maxRamCattleFleetSystemPng" "Total Deployments" "Max RAM usage in MB" 500 10000 0.000001 "Max Fleet RAM usage by # of Deployments (1K edges)" +} + +########################### Main Script Starts here ########################### + +# Trap multiple signals +trap cleanup EXIT SIGINT SIGTERM + +# Read input arguments +readInputArgs "$@" + +if [ "$totalApps" == 0 ]; then + echo "No apps to be setup, exit.." + exit 0 +fi + +# Initialize Orchestrator Keycloak Token to be used for API access +initializeOrchAPIToken +echo "Initialized Orchestrator Keycloak Token" + +deleteDeployments +echo "Deleted previous deployments" + +# Create directory to store results +mkdir -p "$resultsDirectory" +echo "Created directory $resultsDirectory" + +if [ "$totalApps" -lt "$batchSize" ]; then + batchSize=$totalApps +fi + +echo "Creating $totalApps apps..." + +catalogUpload +echo "Uploaded deployment packages" + +totalBatches=$((totalApps / batchSize)) +remainder=$((totalApps % batchSize)) +if [ $remainder -ne 0 ]; then + # shellcheck disable=SC2003 + totalBatches=$(expr $totalBatches + 1) +fi + +counter=0 + +# Install apps +while [ $counter -lt "$totalBatches" ]; do + testStartTime=$(date +%s) + + # Initialize Orch API Token again to be safe. Token could have possibly expired while + # we installed previous batch of clusters or could expire shortly + initializeOrchAPIToken + + createContainerDeployment $counter + + # Wait for all apps to running + appInstallStartTime=$(date +%s) + waitForAllAppsToBeRunning + + appInstallEndTime=$(date +%s) + echo Total time for all apps to be running at iteration:$counter: $((appInstallEndTime - appInstallStartTime)) seconds + + # Capture the result to a csv file to be used for plotting later + echo "$totalAppCnt,$((appInstallEndTime - appInstallStartTime))" >>"$resultsDirectory/$appInstallTimeCsv" + + # Run API Latency Checks + runApiLatencyChecks + testEndTime=$(date +%s) + echo "Total Test Run Time for Batch$counter: $((testEndTime - testStartTime))s" + + # Collect metrics from observability APIs + collectObservabilityMetrics "$totalAppCnt" $((testEndTime - testStartTime)) + + # shellcheck disable=SC2003 + counter=$(expr $counter + 1) +done + diff --git a/scripts/app-scale-tester/bck_add-N-apps.sh b/scripts/app-scale-tester/bck_add-N-apps.sh new file mode 100755 index 00000000..63c65bef --- /dev/null +++ b/scripts/app-scale-tester/bck_add-N-apps.sh @@ -0,0 +1,637 @@ +#!/bin/bash +set -eu + +if [ $# == 0 ]; then + echo "specify number of apps to setup" + exit 1 +fi + +# Initialize some global variables and constants +clusterFqdn=integration14.espd.infra-host.com # Change if needed +# clusterFqdn=kind.internal # Change if needed +enspUser=intel-itep-user && enspPassword=ChangeMeOn1stLogin! +prometheusURL="https://observability-admin.${clusterFqdn}/api/datasources/uid/orchestrator-mimir/resources/api/v1" +prometheusTestURL="https://observability-admin.${clusterFqdn}/api/datasources/uid/orchestrator-mimir/health" +project=itep +targetClusterLabel="default-extension=baseline" + +## App install specific variables +totalApps=$1 # Total apps to install - input argument +batchSize=10 # How many apps to install in parallel? +interval=10 +pageSize=10 # Adjust pageSize as needed for querying elements on APIs +offset=0 +runTests=1 + +## Token specific variables +orchAPIToken="" # Orchestrator API Token +observabilityApiCredentials="" # Provide a valid token for the test. + +## App install specific variables +containerDeploymentPackageName="dummy-app-package" +containerAppName="dummy-app" +containerAppVersion="0.0.1" +containerAppProfileName="default-profile" +containerAppPublisherName="default" +totalContainerAppInstancesPerCluster=3 + +# Container deployment template -- currently not used but kept for reference. +# The template approach gets complex when we have a Deployment with multiple apps. +# shellcheck disable=SC2089 +containerAppDeploymentTemplate='{"appName":"%s","appVersion":"%s","name":"dummy-app-%d","profileName":"%s","publisherName":"%s","targetClusters":[{"appName":"%s", "labels":{"target":"scale"}}],"displayName":"dummy-app-%d","deploymentType":"auto-scaling","overrideValues":[]}' + +# CSV Files to capture result +currentTimeStamp=$(date -u +"%Y%m%d-%H%M%S") +resultsDirectory="./test-results/$currentTimeStamp" +tmpJSONFile="$resultsDirectory/tmp.json" +appInstallTimeCsv="app-install-time.csv" +admApiLatencyCsv="adm-api-latency.csv" +#armApiLatencyCsv="arm-api-latency.csv" +avgCpuMaestroAppSystemCsv="avg-cpu-usage-ma.csv" +avgCpuCattleSystemCsv="avg-cpu-usage-cattle-system.csv" +avgCpuCattleFleetSystemCsv="avg-cpu-usage-cattle-fleet-system.csv" +maxCpuMaestroAppSystemCsv="max-cpu-usage-ma.csv" +maxCpuCattleSystemCsv="max-cpu-usage-cattle-system.csv" +maxCpuCattleFleetSystemCsv="max-cpu-usage-cattle-fleet-system.csv" +avgRamMaestroAppSystemCsv="avg-ram-usage-ma.csv" +avgRamCattleSystemCsv="avg-ram-usage-cattle-system.csv" +avgRamCattleFleetSystemCsv="avg-ram-usage-cattle-fleet-system.csv" +maxRamMaestroAppSystemCsv="max-ram-usage-ma.csv" +maxRamCattleSystemCsv="max-ram-usage-cattle-system.csv" +maxRamCattleFleetSystemCsv="max-ram-usage-cattle-fleet-system.csv" + +# Png files generated from CSV file +appInstallTimePng="app-install-time.png" +admApiLatencyPng="adm-api-latency.png" +#armApiLatencyPng="arm-api-latency.png" +avgCpuMaestroAppSystemPng="avg-cpu-usage-ma.png" +avgCpuCattleSystemPng="avg-cpu-usage-cattle-system.png" +avgCpuCattleFleetSystemPng="avg-cpu-usage-cattle-fleet-system.png" +maxCpuMaestroAppSystemPng="max-cpu-usage-ma.png" +maxCpuCattleSystemPng="max-cpu-usage-cattle-system.png" +maxCpuCattleFleetSystemPng="max-cpu-usage-cattle-fleet-system.png" +avgRamMaestroAppSystemPng="avg-ram-usage-ma.png" +avgRamCattleSystemPng="avg-ram-usage-cattle-system.png" +avgRamCattleFleetSystemPng="avg-ram-usage-cattle-fleet-system.png" +maxRamMaestroAppSystemPng="max-ram-usage-ma.png" +maxRamCattleSystemPng="max-ram-usage-cattle-system.png" +maxRamCattleFleetSystemPng="max-ram-usage-cattle-fleet-system.png" + +appOrchEndpoint="https://api.${clusterFqdn}" + +# Function to display usage help +usage() { + echo "Usage: $0 [options] [--] [arguments]" + echo + echo "Options:" + echo " -u VALUE Keycloak username, default all-groups-example-user" + echo " -p VALUE Keycloak password, default ChangeMeOn1stLogin!" + echo " -f VALUE Orch FQDN, default integration12.espd.infra-host.com" + echo " -b VALUE Cluster install batch size, default 10" + echo " -o VALUE Observability API credentials base64 encoded" + echo " -a VALUE Apps per cluster, default 3" + echo " -d VALUE Deployment package name to deploy, default dummy-app-package" + echo " -v VALUE Deployment package version to deploy, default 0.0.1" + echo " -g VALUE Don't run tests, only regenerate graphs using the specified results directory" + echo " -m VALUE project, default itep" + echo " -h Print this help menu" + + echo + echo "Example:" + echo " $0 -a 10 -b 2" + exit 1 +} + +readInputArgs() { + while [[ $# -gt 0 ]]; do + case "$1" in + -u | --user) + echo "Username: $2" + enspUser=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -p | --password) + echo "Password: $2" + enspPassword=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -h | --help) + usage + shift + ;; + -f | --fqdn) + echo "FQDN: $2" + clusterFqdn=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -b | --batch-size) + echo "Cluster Install batch size: $2" + batchSize=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -o | --observability-api-cred) + echo "Observability API Credentials base64 encoded: $2" + observabilityApiCredentials=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -a | --total-apps-per-cluster) + echo "Apps per enic: $2" + totalApps=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -d | --deployment-package-name) + echo "Deployment package name: $2" + containerDeploymentPackageName=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -v | --deployment-package-version) + echo "Deployment-package-version: $2" + containerAppVersion=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -m | --project) + echo "Project: $2" + project=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -g) + echo "Results directory: $2" + resultsDirectory=$2 + exit 0 + ;; + *) + echo "Unknown option: $1" + shift # Shift past the unknown option + ;; + esac + done +} + +# Function to initialize Orchestrator API Token +initializeOrchAPIToken() { + orchAPIToken=$(curl -s --location --request POST https://keycloak.${clusterFqdn}/realms/master/protocol/openid-connect/token \ + -H 'Content-Type: application/x-www-form-urlencoded' \ + -d "username=${enspUser}" \ + -d "password=${enspPassword}" \ + -d "grant_type=password" \ + -d "client_id=system-client" \ + -d "scope=openid" | + jq -r '.access_token' 2>/dev/null) +} + +catalogLogin() { + CATALOG_ARGS="--deployment-endpoint ${appOrchEndpoint} --catalog-endpoint ${appOrchEndpoint}" + catalog ${CATALOG_ARGS} logout + + catalog ${CATALOG_ARGS} login --client-id=system-client --trust-cert=true --keycloak https://keycloak.${clusterFqdn}/realms/master ${enspUser} ${enspPassword} +} + +# Provide customized deployment for different packages (particularly target labels) +# Arguments are deploymentPackageName, deploymentPackageVersion +catalogDeploy() { + NAME=$1 + VERSION=$2 + + CATALOG_ARGS="--deployment-endpoint ${appOrchEndpoint} --catalog-endpoint ${appOrchEndpoint} --project ${project}" + case ${NAME} in + dummy-app-package) + catalog ${CATALOG_ARGS} create deployment \ + ${NAME} ${VERSION} --application-label dummy-app.$targetClusterLabel + ;; + ten-dummy-apps) + catalog ${CATALOG_ARGS} create deployment \ + ${NAME} ${VERSION} --application-label dummy-app-1.$targetClusterLabel \ + --application-label dummy-app-2.$targetClusterLabel \ + --application-label dummy-app-3.$targetClusterLabel \ + --application-label dummy-app-4.$targetClusterLabel \ + --application-label dummy-app-5.$targetClusterLabel \ + --application-label dummy-app-6.$targetClusterLabel \ + --application-label dummy-app-7.$targetClusterLabel \ + --application-label dummy-app-8.$targetClusterLabel \ + --application-label dummy-app-9.$targetClusterLabel \ + --application-label dummy-app-10.$targetClusterLabel + ;; + nginx-app) + catalog ${CATALOG_ARGS} create deployment \ + ${NAME} ${VERSION} --application-label nginx.$targetClusterLabel + ;; + *) + echo "ERROR: No deploy template for deployment package ${NAME}" + exit 1 + ;; + esac +} + +catalogUpload() { + catalogLogin + + CATALOG_ARGS="--deployment-endpoint ${appOrchEndpoint} --catalog-endpoint ${appOrchEndpoint} --project ${project}" + pushd ../../deployment-packages/ + for DPDIR in $(ls -d $containerDeploymentPackageName) + do + cd ${DPDIR}; catalog ${CATALOG_ARGS} upload .; cd .. + done + popd +} + +createContainerDeployment() { + echo Creating container deployment + batch=$1 + appDeploymentsCounter=$(( batch * batchSize)) + + containerAppInstanceCnt=$appDeploymentsCounter + while [ "$containerAppInstanceCnt" -lt "$(( (batch + 1) * batchSize ))" ]; do + # shellcheck disable=SC2090 + # containerAppInstallSpec=$(printf "$containerAppDeploymentTemplate" $containerDeploymentPackageName $containerAppVersion $containerAppInstanceCnt $containerAppProfileName $containerAppPublisherName $containerAppName $containerAppInstanceCnt) + # echo "App Deployment Spec is $containerAppInstallSpec" + # curl -s -X POST "https://app-orch.$clusterFqdn/deployment.orchestrator.apis/v1/deployments" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" -d "$containerAppInstallSpec" + catalogLogin + catalogDeploy $containerDeploymentPackageName $containerAppVersion + echo "" + # shellcheck disable=SC2003 + containerAppInstanceCnt=$(expr $containerAppInstanceCnt + 1) + done +} + +deleteDeployments() { + arr=() + + totalAppCnt=$(curl -s "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" | jq '.totalElements') + for ((i = 0 ; i < totalAppCnt ; i++ )); + do + currDeployment=$(curl -s "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments?pageSize=${pageSize}&offset=${offset}" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" | jq -r '.deployments['$i']') + appName=$(echo $currDeployment | jq -r '.appName') + deployId=$(echo $currDeployment | jq -r '.deployId') + + if [ "$appName" == "$containerDeploymentPackageName" ]; then + arr+=("$deployId") + fi + done + + for d in "${arr[@]}" + do + deployId=$(curl -X DELETE "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments/$d" -H "Authorization: Bearer $orchAPIToken") + echo "Deleted deployment $d..." + done + sleep 2 +} + +waitForAllAppsToBeRunning() { + initializeOrchAPIToken + echo Waiting for all apps to be running + +#echo "pagesize print: $pageSize" + while true; do + totalAppCnt=$(curl -s "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" | jq '.totalElements') + set +e + totalCnt=0 + for ((i = 0 ; i < totalAppCnt ; i++ )); + do + currDeployment=$(curl -s "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments?pageSize=${pageSize}&offset=${offset}" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" | jq -r '.deployments['$i']') + appName=$(echo $currDeployment | jq -r '.appName') +#echo "appname print : $appName" + + if [ "$appName" == "$containerDeploymentPackageName" ]; then + totalCnt=$((totalCnt + 1)) +#echo "totalCnt print : $totalCnt" + fi + done +#echo "totalCnt print out: $totalCnt" +#runningAppCnt=$(curl -s "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments?pageSize=${pageSize}&offset=${offset}" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" | jq -r '.deployments[].status.state' | grep -c RUNNING) + runningCnt=0 + for ((i = 0 ; i < totalAppCnt ; i++ )); + do + currDeployment=$(curl -s "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments?pageSize=${pageSize}&offset=${offset}" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" | jq -r '.deployments['$i']') + appName=$(echo $currDeployment | jq -r '.appName') + statusVal=$(echo $currDeployment | jq -r '.status.state') +#echo "appname print : $appName" +#echo "statusval print : $statusVal" + + if [[ "$appName" == "$containerDeploymentPackageName" && "$statusVal" == "RUNNING" ]]; then + runningCnt=$((runningCnt + 1)) +#echo "runningCnt print : $runningCnt" + fi + done +#echo "runningCnt print out: $runningCnt" + set -e + # curl -s "https://app-orch.$clusterFqdn/deployment.orchestrator.apis/v1/summary/deployments_status" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" >"$tmpJSONFile" 2>/dev/null + # Method below does not seem to provide an accurate total app count (see NEX-2988) + # totalAppCnt=$(jq '.total' "$tmpJSONFile") + # runningCnt=$(jq '.running' "$tmpJSONFile") + if [ "$totalCnt" == "$runningCnt" ]; then + echo "All $totalAppCnt apps are running!!" + break + fi + + echo "$runningCnt / $totalCnt apps running. Waiting $interval seconds." + sleep $interval + done +} + +: ' +deleteDeployments() { + arr=() + pageSize=10 # Define the page size (number of deployments per page) + offset=0 # Start with the first page + + # Fetch total number of deployments + delTotalAppCnt=$(curl -s "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" | jq '.totalElements') + + echo "delTotalAppCnt : $delTotalAppCnt" + # Calculate the total number of pages needed + totalPages=$(( (delTotalAppCnt + pageSize - 1) / pageSize )) + + # Loop through each page + for ((page = 0; page < totalPages; page++)); do + offset=$((page * pageSize)) + + # Fetch deployments for the current page + deployments=$(curl -s "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments?pageSize=${pageSize}&offset=${offset}" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken") + deploymentCnt=$(curl -s "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments?pageSize=${pageSize}&offset=${offset}" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" | jq '.totalElements') + + if [ $deploymentCnt -lt $pageSize ]; then + pageSize=$deploymentCnt + fi + echo "pageSize : $pageSize" + + # Loop through each deployment in the current page using jq + echo "$deployments" | jq -c '.deployments[]' | while read -r currDeployment; do + appName=$(echo "$currDeployment" | jq -r '.appName') + deployId=$(echo "$currDeployment" | jq -r '.deployId') + echo "delete appname: $appName" + echo "delete deployId: $deployId" + + # Check if the appName matches the target package name + if [ "$appName" == "$containerDeploymentPackageName" ]; then + arr+=("$deployId") + echo "deleting deployment: $deployId" + fi + done + done + + # Delete each deployment in the array + for d in "${arr[@]}"; do + echo "dep in array: $deployId" + response=$(curl -s -o /dev/null -w "%{http_code}" -X DELETE "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments/$d" -H "Authorization: Bearer $orchAPIToken") + echo "response: $response" + if [ "$response" -eq 200 ]; then + echo "Deleted deployment $d..." + else + echo "Failed to delete deployment $d. HTTP response code: $response" + fi + done + sleep 2 +} + +waitForAllAppsToBeRunning() { + initializeOrchAPIToken + echo "Waiting for all apps to be running" + + pageSize=10 # Define the page size (number of deployments per page) + interval=5 # Define the interval between checks (in seconds) + + while true; do + set +e + totalCnt=0 + runningCnt=0 + + # Fetch total number of deployments + totalAppCnt=$(curl -s "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" | jq '.totalElements') + + echo "totalAppCnt : $totalAppCnt" + # Calculate the total number of pages needed + totalPages=$(( (totalAppCnt + pageSize - 1) / pageSize )) + + # Loop through each page + for ((page = 0; page < totalPages; page++)); do + offset=$((page * pageSize)) + + # Fetch deployments for the current page + deployments=$(curl -s "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments?pageSize=${pageSize}&offset=${offset}" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken") + + deploymentCnt=$(curl -s "$appOrchEndpoint/v1/projects/$project/appdeployment/deployments?pageSize=${pageSize}&offset=${offset}" -H 'Content-Type: application/json' -H 'accept: application/json' -H "Authorization: Bearer $orchAPIToken" | jq '.totalElements') + + if [ $deploymentCnt -lt $pageSize ]; then + pageSize=$deploymentCnt + fi + echo "pageSize : $pageSize" + # Loop through each deployment in the current page using jq + echo "$deployments" | jq -c '.deployments[]' | while read -r currDeployment; do + appName=$(echo "$currDeployment" | jq -r '.appName') + statusVal=$(echo "$currDeployment" | jq -r '.status.state') + + if [ "$appName" == "$containerDeploymentPackageName" ]; then + totalCnt=$((totalCnt + 1)) + if [ "$statusVal" == "RUNNING" ]; then + runningCnt=$((runningCnt + 1)) + fi + fi + done + done + + set -e + + if [ "$totalCnt" -eq "$runningCnt" ]; then + echo "All $totalCnt apps are running!!" + break + fi + + echo "$runningCnt / $totalCnt apps running. Waiting $interval seconds." + sleep $interval + done +} +' + +runApiLatencyChecks() { + echo "Running API latency tests... it will take a few mins to capture the results" + initializeOrchAPIToken + # Run API Latency Tests + k6 run --env MY_HOSTNAME=$clusterFqdn --env API_TOKEN="$orchAPIToken" --env PROJECT="$project" ../k6-scripts/adm-api-latency.js -q --no-thresholds --summary-export="$tmpJSONFile" + totalChecksPass=$(jq '.root_group.checks."status is OK".passes' "$tmpJSONFile") + totalChecksFail=$(jq '.root_group.checks."status is OK".fails' "$tmpJSONFile") + avgApiDuration=$(jq '.metrics.http_req_duration.avg' "$tmpJSONFile") + # Capture the result to a csv file to be used for plotting later + echo "$totalAppCnt,$avgApiDuration" >>"$resultsDirectory/$admApiLatencyCsv" + echo "API Latency Check Results: Passed Checks: $totalChecksPass Failed Checks: $totalChecksFail, Avg API Duration: $avgApiDuration" +} + +plotGraph() { + # Define the input CSV file and the output image file + inputCsv="$resultsDirectory/$1" + outputPng="$resultsDirectory/$2" + xLabel=$3 + yLabel=$4 + yRangeMin=$5 + yRangeMax=$6 + yAxisScaleFactor=$7 + graphTitle=$8 + + # If csv files exists, then generate graphs before exiting the script + if [ ! -f "$inputCsv" ]; then + return + fi + echo "Plotting graph for $inputCsv" + + # Generate the graph using gnuplot + gnuplot -persist <<-EOFMarker + set datafile separator "," + set terminal png size 800,600 + set output "$outputPng" + set title "$graphTitle" + set xlabel "$xLabel" + set ylabel "$yLabel" + set ytics scale $yAxisScaleFactor + set yrange [$yRangeMin:$yRangeMax] + plot "$inputCsv" using 1:(\$2*$yAxisScaleFactor) with linespoints title "$graphTitle" +EOFMarker + echo "Graph generated for $graphTitle, output file: $outputPng" +} + +collectMetric() { + input=$1 + value="$2" + query=$3 + resultFile=$4 + + value=$(curl -s "$prometheusURL/query" -H "Authorization: Basic ${observabilityApiCredentials}" --data-urlencode "query=$query" | jq -r '.data.result[0].value[1]' 2>/dev/null) + if [ "$value" != "" ]; then + echo "$input,$value" >>"$resultsDirectory/$resultFile" + fi +} + +collectObservabilityMetrics() { + if [ "$observabilityApiCredentials" == "" ]; then + echo "observabilityApiCredentials is nil, cannot collect observability metrics" + return + fi + + # Check that Prometheus is accessible with the provided credentials + status=$(curl -s "$prometheusTestURL" -H "Authorization: Basic ${observabilityApiCredentials}" | jq -r '.status') + if [ "$status" != "OK" ]; then + echo "Unable to query Prometheus with credentials provided, cannot collect observability metrics" + return + fi + + totalApps=$1 + secondSuffix="s" + timeDurationInSec=$2$secondSuffix + + # Remove "_ratio" suffix due observability name changes (e.g., https://github.com/intel-innersource/frameworks.edge.one-intel-edge.observability.platform-dashboard/pull/15/files) + collectMetric "$totalApps" "avgCpu" "sum by(k8s_namespace_name)(avg_over_time(k8s_pod_cpu_utilization{k8s_namespace_name=\"maestro-app-system\"}[$timeDurationInSec]))" $avgCpuMaestroAppSystemCsv + collectMetric "$totalApps" "maxCpu" "sum by(k8s_namespace_name)(max_over_time(k8s_pod_cpu_utilization{k8s_namespace_name=\"maestro-app-system\"}[$timeDurationInSec]))" $maxCpuMaestroAppSystemCsv + collectMetric "$totalApps" "avgCpu" "sum by(k8s_namespace_name)(avg_over_time(k8s_pod_cpu_utilization{k8s_namespace_name=\"cattle-system\"}[$timeDurationInSec]))" $avgCpuCattleSystemCsv + collectMetric "$totalApps" "maxCpu" "sum by(k8s_namespace_name)(max_over_time(k8s_pod_cpu_utilization{k8s_namespace_name=\"cattle-system\"}[$timeDurationInSec]))" $maxCpuCattleSystemCsv + collectMetric "$totalApps" "avgCpu" "sum by(k8s_namespace_name)(avg_over_time(k8s_pod_cpu_utilization{k8s_namespace_name=\"cattle-fleet-system\"}[$timeDurationInSec]))" $avgCpuCattleFleetSystemCsv + collectMetric "$totalApps" "maxCpu" "sum by(k8s_namespace_name)(max_over_time(k8s_pod_cpu_utilization{k8s_namespace_name=\"cattle-fleet-system\"}[$timeDurationInSec]))" $maxCpuCattleFleetSystemCsv + + # Remove "_bytes" suffix due observability name changes (e.g., https://github.com/intel-innersource/frameworks.edge.one-intel-edge.observability.platform-dashboard/pull/15/files) + collectMetric "$totalApps" "avgMem" "sum by(k8s_namespace_name)(avg_over_time(k8s_pod_memory_usage{k8s_namespace_name=\"maestro-app-system\"}[$timeDurationInSec]))" $avgRamMaestroAppSystemCsv + collectMetric "$totalApps" "maxMem" "sum by(k8s_namespace_name)(max_over_time(k8s_pod_memory_usage{k8s_namespace_name=\"maestro-app-system\"}[$timeDurationInSec]))" $maxRamMaestroAppSystemCsv + collectMetric "$totalApps" "avgMem" "sum by(k8s_namespace_name)(avg_over_time(k8s_pod_memory_usage{k8s_namespace_name=\"cattle-system\"}[$timeDurationInSec]))" $avgRamCattleSystemCsv + collectMetric "$totalApps" "maxMem" "sum by(k8s_namespace_name)(max_over_time(k8s_pod_memory_usage{k8s_namespace_name=\"cattle-system\"}[$timeDurationInSec]))" $maxRamCattleSystemCsv + collectMetric "$totalApps" "avgMem" "sum by(k8s_namespace_name)(avg_over_time(k8s_pod_memory_usage{k8s_namespace_name=\"cattle-fleet-system\"}[$timeDurationInSec]))" $avgRamCattleFleetSystemCsv + collectMetric "$totalApps" "maxMem" "sum by(k8s_namespace_name)(max_over_time(k8s_pod_memory_usage{k8s_namespace_name=\"cattle-fleet-system\"}[$timeDurationInSec]))" $maxRamCattleFleetSystemCsv +} + +cleanup() { + # Remove temp file used to capture transient results + rm -f "$tmpJSONFile" + + plotGraph "$appInstallTimeCsv" "$appInstallTimePng" "Deployment #" "Seconds until Running on all edges" 0 500 1 "Time to Running on 1K edges, per Deployment" + plotGraph "$admApiLatencyCsv" "$admApiLatencyPng" "Total Deployments" "Avg API latency in ms" 0 1000 1 "Avg ADM API Latency by # of Deployments (1K edges)" + plotGraph "$avgCpuMaestroAppSystemCsv" "$avgCpuMaestroAppSystemPng" "Total Deployments" "Avg CPU usage" 0 10 1 "Avg App Orch CPU usage by # of Deployments (1K edges)" + plotGraph "$avgCpuCattleSystemCsv" "$avgCpuCattleSystemPng" "Total Deployments" "Avg CPU usage" 0 10 1 "Avg Rancher CPU usage by # of Deployments (1K edges)" + plotGraph "$avgCpuCattleFleetSystemCsv" "$avgCpuCattleFleetSystemPng" "Total Deployments" "Avg CPU usage" 0 40 1 "Avg Fleet CPU usage by # of Deployments (1K edges)" + plotGraph "$maxCpuMaestroAppSystemCsv" "$maxCpuMaestroAppSystemPng" "Total Deployments" "Max CPU usage" 0 10 1 "Max App Orch CPU usages by # of Deployments (1K edges)" + plotGraph "$maxCpuCattleSystemCsv" "$maxCpuCattleSystemPng" "Total Deployments" "Max CPU usage" 0 10 1 "Max Rancher CPU usage by # of Deployments (1K edges)" + plotGraph "$maxCpuCattleFleetSystemCsv" "$maxCpuCattleFleetSystemPng" "Total Deployments" "Max CPU usage" 0 40 1 "Max Fleet CPU usage by # of Deployments (1K edges)" + plotGraph "$avgRamMaestroAppSystemCsv" "$avgRamMaestroAppSystemPng" "Total Deployments" "Avg RAM usage in MB" 500 10000 0.000001 "Avg App Orch RAM usage by # of Deployments (1K edges)" + plotGraph "$avgRamCattleSystemCsv" "$avgRamCattleSystemPng" "Total Deployments" "Avg RAM usage in MB" 500 40000 0.000001 "Avg Rancher RAM usage by # of Deployments (1K edges)" + plotGraph "$avgRamCattleFleetSystemCsv" "$avgRamCattleFleetSystemPng" "Total Deployments" "Avg RAM usage in MB" 500 10000 0.000001 "Avg Fleet RAM usage by # of Deployments (1K edges)" + plotGraph "$maxRamMaestroAppSystemCsv" "$maxRamMaestroAppSystemPng" "Total Deployments" "Max RAM usage in MB" 500 10000 0.000001 "Max App Orch RAM usage by # of Deployments (1K edges)" + plotGraph "$maxRamCattleSystemCsv" "$maxRamCattleSystemPng" "Total Deployments" "Max RAM usage in MB" 500 40000 0.000001 "Max Rancher RAM usage by # of Deployments (1K edges)" + plotGraph "$maxRamCattleFleetSystemCsv" "$maxRamCattleFleetSystemPng" "Total Deployments" "Max RAM usage in MB" 500 10000 0.000001 "Max Fleet RAM usage by # of Deployments (1K edges)" +} + +########################### Main Script Starts here ########################### + +# Trap multiple signals +trap cleanup EXIT SIGINT SIGTERM + +# Read input arguments +readInputArgs "$@" + +if [ "$totalApps" == 0 ]; then + echo "No apps to be setup, exit.." + exit 0 +fi + +# Initialize Orchestrator Keycloak Token to be used for API access +initializeOrchAPIToken +echo "Initialized Orchestrator Keycloak Token" + +deleteDeployments +echo "Deleted previous deployments" + +# Create directory to store results +mkdir -p "$resultsDirectory" +echo "Created directory $resultsDirectory" + +if [ "$totalApps" -lt "$batchSize" ]; then + batchSize=$totalApps +fi + +echo "Creating $totalApps apps..." + +catalogUpload +echo "Uploaded deployment packages" + +totalBatches=$((totalApps / batchSize)) +remainder=$((totalApps % batchSize)) +if [ $remainder -ne 0 ]; then + # shellcheck disable=SC2003 + totalBatches=$(expr $totalBatches + 1) +fi + +counter=0 + +# Install apps +while [ $counter -lt "$totalBatches" ]; do + testStartTime=$(date +%s) + + # Initialize Orch API Token again to be safe. Token could have possibly expired while + # we installed previous batch of clusters or could expire shortly + initializeOrchAPIToken + + createContainerDeployment $counter + + # Wait for all apps to running + appInstallStartTime=$(date +%s) + waitForAllAppsToBeRunning + + appInstallEndTime=$(date +%s) + echo Total time for all apps to be running at iteration:$counter: $((appInstallEndTime - appInstallStartTime)) seconds + + # Capture the result to a csv file to be used for plotting later + echo "$totalAppCnt,$((appInstallEndTime - appInstallStartTime))" >>"$resultsDirectory/$appInstallTimeCsv" + + # Run API Latency Checks + runApiLatencyChecks + testEndTime=$(date +%s) + echo "Total Test Run Time for Batch$counter: $((testEndTime - testStartTime))s" + + # Collect metrics from observability APIs + collectObservabilityMetrics "$totalAppCnt" $((testEndTime - testStartTime)) + + # shellcheck disable=SC2003 + counter=$(expr $counter + 1) +done diff --git a/scripts/app-scale-tester/run_test.sh b/scripts/app-scale-tester/run_test.sh new file mode 100755 index 00000000..669a584e --- /dev/null +++ b/scripts/app-scale-tester/run_test.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +clusterFqdn=integration14.espd.infra-host.com +appOrchEndpoint="https://api.${clusterFqdn}" +enspUser=intel-itep-user +enspPassword=ChangeMeOn1stLogin! +project=itep +orchAPIToken=$(curl -s --location --request POST https://keycloak.${clusterFqdn}/realms/master/protocol/openid-connect/token \ + -H 'Content-Type: application/x-www-form-urlencoded' \ + -d "username=${enspUser}" \ + -d "password=${enspPassword}" \ + -d "grant_type=password" \ + -d "client_id=system-client" \ + -d "scope=openid" | + jq -r '.access_token' 2>/dev/null) + diff --git a/scripts/deployment-packages/README.md b/scripts/deployment-packages/README.md new file mode 100644 index 00000000..1f94fdb1 --- /dev/null +++ b/scripts/deployment-packages/README.md @@ -0,0 +1,22 @@ +# Dummy App Packages for Scale Testing + +The Deployment Packages in this directory can be used to deploy a "dummy app" +to edge clusters. The "dummy app" Helm chart installs only a Service (no Pods) +so it consumes almost no edge resources. The "dummy app" is useful for App Orch +scale testing. + +To load the Deployment Packages, the ["catalog" CLI tool](https://github.com/intel-innersource/frameworks.edge.one-intel-edge.maestro-app.cli.git) is recommended. +The following example shows how to load the Deployment Packages into the orchestrator's +Catalog service using the catalog CLI: + +```bash +$ export FQDN=integration12.maestro.intel.com +$ catalog login -v --trust-cert=true --keycloak https://keycloak.${FQDN}/realms/master ${USER} ${PASS} +$ catalog --catalog-endpoint https://app-orch.${FQDN} load dummy-app-package/ +$ catalog --catalog-endpoint https://app-orch.${FQDN} load ten-dummy-apps/ +$ catalog --catalog-endpoint https://app-orch.${FQDN} list deployment-packages +Publisher Name Name Display Name Version Default Profile Is Deployed Is Visible Application Count +default dummy-app-package Dummy app 0.0.1 default-profile false false 1 +default dummy-app-package Dummy app 0.0.2 default-profile false false 1 +default ten-dummy-apps 10 dummy apps 0.0.1 default-profile false false 10 +``` diff --git a/scripts/deployment-packages/dummy-app-package/app-dummy-0.0.1.yaml b/scripts/deployment-packages/dummy-app-package/app-dummy-0.0.1.yaml new file mode 100644 index 00000000..762f79f4 --- /dev/null +++ b/scripts/deployment-packages/dummy-app-package/app-dummy-0.0.1.yaml @@ -0,0 +1,20 @@ +# SPDX-FileCopyrightText: (C) 2023 Intel Corporation +# SPDX-License-Identifier: LicenseRef-Intel +--- +specSchema: "Application" +schemaVersion: "0.1" +$schema: "https://schema.intel.com/catalog.orchestrator/0.1/schema" + +name: dummy-app +version: 0.0.1 +description: "Dummy app" + +# CLI tool will look up registry by name and convert to an ID. + +helmRegistry: "harbor-helm-oci" +chartName: "scale-test-dummy-app" +chartVersion: "0.0.1" + +profiles: + - name: "default" + valuesFileName: "values-dummy-app-0.0.1.yaml" diff --git a/scripts/deployment-packages/dummy-app-package/dp-dummy-0.0.1.yaml b/scripts/deployment-packages/dummy-app-package/dp-dummy-0.0.1.yaml new file mode 100644 index 00000000..5cecd94f --- /dev/null +++ b/scripts/deployment-packages/dummy-app-package/dp-dummy-0.0.1.yaml @@ -0,0 +1,22 @@ +# SPDX-FileCopyrightText: (C) 2023 Intel Corporation +# SPDX-License-Identifier: LicenseRef-Intel +--- +specSchema: "DeploymentPackage" +schemaVersion: "0.1" +$schema: "https://schema.intel.com/catalog.orchestrator/0.1/schema" + +name: "dummy-app-package" +displayName: "Dummy app" +description: "Dummy app for scale testing" +version: "0.0.1" + +applications: + - name: dummy-app + version: 0.0.1 + +deploymentProfiles: + - name: "default-profile" + applicationProfiles: + - application: "dummy-app" + profile: "default" + diff --git a/scripts/deployment-packages/dummy-app-package/registry-sandbox.yaml b/scripts/deployment-packages/dummy-app-package/registry-sandbox.yaml new file mode 100644 index 00000000..5ee62513 --- /dev/null +++ b/scripts/deployment-packages/dummy-app-package/registry-sandbox.yaml @@ -0,0 +1,118 @@ +# SPDX-FileCopyrightText: (C) 2023 Intel Corporation +# SPDX-License-Identifier: LicenseRef-Intel +--- +specSchema: "Registry" +schemaVersion: "0.1" +$schema: "https://schema.intel.com/catalog.orchestrator/0.1/schema" + +name: "intel-sandbox" +description: "The Intel amr sandbox registry" + +type: "HELM" + +rootUrl: "oci://amr-registry-pre.caas.intel.com/one-intel-edge-sandbox" +caCerts: |- + -----BEGIN CERTIFICATE----- + MIIF1DCCA7ygAwIBAgITEwDfYhzj3aRqyZZkEQACAN9iHDANBgkqhkiG9w0BAQsF + ADBQMQswCQYDVQQGEwJVUzEaMBgGA1UEChMRSW50ZWwgQ29ycG9yYXRpb24xJTAj + BgNVBAMTHEludGVsIEludGVybmFsIElzc3VpbmcgQ0EgNUEwHhcNMjMwNzEwMTYy + ODE5WhcNMjQwNzA0MTYyODE5WjBtMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2Fs + aWZvcm5pYTEUMBIGA1UEBxMLU2FudGEgQ2xhcmExGjAYBgNVBAoTEUludGVsIENv + cnBvcmF0aW9uMRcwFQYDVQQDEw5jYWFzLmludGVsLmNvbTCCASIwDQYJKoZIhvcN + AQEBBQADggEPADCCAQoCggEBAOq0YEn3C7eHngujk2sMjuK9GgogEzo90TKqhxuU + hA1FvVdgDTdoBwuuCYw3dmdYvXbnNij5nRuuRV7Pv4swsXEjp3p7Mtu0RhCkECOq + Jl/T5IAaB4AQtVJ9jr2AItsysESRTb9xefvZgDerQOYZgj/l7V82TbTvPW5hBPtO + v8RHH4V4vTu6IJgju8occ8ZXyHlH4T20GLgUytEKUP9hpGcyJfo/76UDUT8ddz7e + jq7JENQakZfAwAMIgID4tPfFrib5lrph1XwZ3O19AYjcPu2M1nmFZm10clG7w8he + a911wmFuGN2DjrVO8tlkRsheFiJwGkT47+kAXXxgRk8hw4kCAwEAAaOCAYgwggGE + MCsGA1UdEQQkMCKCECouY2Fhcy5pbnRlbC5jb22CDmNhYXMuaW50ZWwuY29tMB0G + A1UdDgQWBBS582Aw1fECTfvAlHd7zvFnx9EmtjAfBgNVHSMEGDAWgBRpkJZpxwBp + ltt7XZqP9jrqKJStJjA6BgNVHR8EMzAxMC+gLaArhilodHRwOi8vcGtpLmludGVs + LmNvbS9jcmwvSW50ZWxDQTVBKDIpLmNybDBFBggrBgEFBQcBAQQ5MDcwNQYIKwYB + BQUHMAKGKWh0dHA6Ly9wa2kuaW50ZWwuY29tL2NydC9JbnRlbENBNUEoMikuY3J0 + MAsGA1UdDwQEAwIFoDA9BgkrBgEEAYI3FQcEMDAuBiYrBgEEAYI3FQiGw4x1hJnl + UYP9gSiFjp9TgpHACWeC7d1OgbvMdwIBZAIBITAdBgNVHSUEFjAUBggrBgEFBQcD + AQYIKwYBBQUHAwIwJwYJKwYBBAGCNxUKBBowGDAKBggrBgEFBQcDATAKBggrBgEF + BQcDAjANBgkqhkiG9w0BAQsFAAOCAgEAE+qNZecRfBP/Plso6qU2hcaX4VF/QsLH + /CYNQS8aG8VpcKAfW5/6M8J13aF5PGSNIlTGe3nYRz5mXvatPsdeEZ1SMoX7Hnn7 + 3M7/UjnPqTy7d6uIAUPLd44H/Bm1x3NeDsowGKPYV0aYMBQbgA8brCjOWQCaD8ZY + 2BnZmuDvtHVMULlTAt62RJyOzXDHOuQgF9gx48dJPeyatH5yaV1kNCAtvho1x/49 + bCyDBCob3T/ARyuuqWNgZ1fpaS/IWR3LLh7jwT38B20AZVn3d7+Hv2BcdHHWR/2n + BrVtoroZej5ST/HDDxqJWsMQioR9fc8uoyT5oDZ/ZZ6YcVV3YrCk3QUkyywkl+AA + JIlemsVjyGtu5PprtW2R7iHYXi5N/M5SS0k4qpWfrB2O8sdMEixTpPh42RofIUSB + fmG7qfbnqOsF3fjASiBMTovLN4lNewTlLRK0eudfMwsJ/GXHGGlavaTOIOUpOzQu + Fc/hSq1USMODME+foqWNMdWXnTltH47sEtshgeLQKggFILzzbD5tuRllapWOlS7O + WOLjk5Gw230WtvS6qbGjG9zp358FB9nX43yTMuxUyz1g4EYLpZc6ojBa5vjeYICS + gQj8AV0jDDTjRD6DYFwZSYJEUnj3/Eug9trlypiEM+QblmQbLBoSeJmzhIckzAaD + UJPcnUL35xk= + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIG1DCCBLygAwIBAgITFAAAAAfPx1upej2GNwAAAAAABzANBgkqhkiG9w0BAQsF + ADBIMQswCQYDVQQGEwJVUzEaMBgGA1UEChMRSW50ZWwgQ29ycG9yYXRpb24xHTAb + BgNVBAMTFEludGVsIFNIQTI1NiBSb290IENBMB4XDTIyMDUxODE3MTgzMloXDTI3 + MDUxODE3MjgzMlowUDELMAkGA1UEBhMCVVMxGjAYBgNVBAoTEUludGVsIENvcnBv + cmF0aW9uMSUwIwYDVQQDExxJbnRlbCBJbnRlcm5hbCBJc3N1aW5nIENBIDVBMIIC + IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAySt2dR9sybkzvglF+csDg4lb + uGF75wT19yrOh2Pkim39PxRl6PEL3tEzbGRgPEcEqPAFtp5F8g2anidv7MMmoReP + n2ffpwFgM/R6AcfX5FuugsfcLmjMEq6xlVsXKO2sbOjPiOyN1lWlkgpCMCB2fFFb + BJjLvCThQ4orQb66LqzzwIGVt1GF/3+dS+C8PNDWXYaIVY4u2HMY1YBsICCIpk1k + pj7rBF0rc5dJ8lY75pRNVwZDGh7ArvNfT/P4NLTaLwtyccTl52606Oaa31IEOeON + 5SOqRTxKi9zt4fAc0qYraFiEpfc8Yn4p3mKFnqwF+JatZA4I1wFd+uhDvapY4q9S + 4dkFPS3Ag4PeR0lldnlhOjIiso0Qe7CUCqVrci/vjIuVw8i32W0iHo7YKWAnAHxI + uS9SP0WBj3p6SqTgDuD7iLYRpfYzWWCtrLjWaM/GrbEZEKWt0Y+f03nOU828+MQV + cvtfIhF5zRcLmpz5OxOTiit2wH7oKdzglWQsqev2Bf/AR3+DkUfYMx04N57Rv3sX + Smfa+HuXN0vuKle7fcVj3VelZaBA7FtB46A3Zh1IAG5hLGpLnwaj96CvoqE3iz7V + 0+3QOWuuIqbDvCQ93yE9lLhFkje5huv/yuzbppkIMyW71j3Zi4CDR2f3OuKB1Ib7 + ZAICUWOe4KaIPktYKMsCAwEAAaOCAa0wggGpMBIGCSsGAQQBgjcVAQQFAgMCAAIw + IwYJKwYBBAGCNxUCBBYEFCkdV505xBCictvRW45uqPMFQtiUMB0GA1UdDgQWBBRp + kJZpxwBpltt7XZqP9jrqKJStJjBEBgNVHSAEPTA7MDkGCyqGSIb4TQEFAWUBMCow + KAYIKwYBBQUHAgEWHGh0dHA6Ly9wa2kuaW50ZWwuY29tL2Nwcy5wZGYwGQYJKwYB + BAGCNxQCBAweCgBTAHUAYgBDAEEwCwYDVR0PBAQDAgGGMA8GA1UdEwEB/wQFMAMB + Af8wHwYDVR0jBBgwFoAUiZ/Qsu0gqaY+Smaq8h6CsNYsCTYwPwYDVR0fBDgwNjA0 + oDKgMIYuaHR0cDovL3BraS5pbnRlbC5jb20vY3JsL0ludGVsU0hBMjU2Um9vdENB + LmNybDBuBggrBgEFBQcBAQRiMGAwOgYIKwYBBQUHMAKGLmh0dHA6Ly9wa2kuaW50 + ZWwuY29tL2NydC9JbnRlbFNIQTI1NlJvb3RDQS5jcnQwIgYIKwYBBQUHMAGGFmh0 + dHA6Ly9PQ1NQLmludGVsLmNvbS8wDQYJKoZIhvcNAQELBQADggIBAMEr4lTXCi6h + CQfmlCP1wr3z6BfUpzfFfEMqB1YAaubW0d6oF0f8i5LSJUxPzb165DcPRVj1vxR1 + vYmcYuIEtoG5Y0OLoVI57Qra8l019lb/ell52CHIW9bsy2QblGpuR38jy+rJZv1r + 520YQROMHRKyNvramzOzWxIIVcTunK8xIPjBXuhVrihZRKAQau3t5HOXKUiX7cJ0 + epIaUEYk2jbMgKBWRwOgJQ424wTB/Ek/y4U3Kk59h6DW2jQD6e7U8Tdnm9FnfVTo + 6zEJaJiZqQi7kVMYqLvnlM6kcV6cqfHL79VWXHa4PKgFFphF/ie0iQR/fwVpKfZf + jr9xgap+moUsMqYjyV8n5/Eqx3a3k4zSZeYulxqU00K1zEJDE1501zNxFmgaVd/K + MZVXsKQ23QZDm72Rb0UYM0C/m2w3LCZnMNcgNZ5pZv+kUanRaoOTEBu9UqDNP4Ge + gdqJmOCXHkGXvamBsfsa3UkXjcJnPo0b83XVXiF2vYNeo1Ikz+Atmm4HV/usBcLG + UQhIrf0sB7WfJ8US+lLkcAeVYpxZihRI9vVq/SwyAJiNSKC7N9siiXEVtxzERTSc + 5ZpHOi0E3an4Gjpa6mzzL1qgmc+bozsV40SUGPKhqrIQtFVQY9DbzDNPZ3aGQdbN + QHsQpcp2KxoBAdDT7L9zSOjUvDWJCLlV + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIFazCCA1OgAwIBAgIQVjEnZqqj1qVKUoaZgBc18DANBgkqhkiG9w0BAQsFADBI + MQswCQYDVQQGEwJVUzEaMBgGA1UEChMRSW50ZWwgQ29ycG9yYXRpb24xHTAbBgNV + BAMTFEludGVsIFNIQTI1NiBSb290IENBMB4XDTE1MDkxNjIwMDAwNVoXDTM1MDkx + NjIwMDAwNVowSDELMAkGA1UEBhMCVVMxGjAYBgNVBAoTEUludGVsIENvcnBvcmF0 + aW9uMR0wGwYDVQQDExRJbnRlbCBTSEEyNTYgUm9vdCBDQTCCAiIwDQYJKoZIhvcN + AQEBBQADggIPADCCAgoCggIBANEHLD4x7o4xKCDYTwXGMis1qob+uBlMR17Pr11i + w/NERAx4nahGgzTZmX9i3viTtfn5uDX6IJMPdq7N4m+Flr8v4VuAznvnCBGqkUDe + HAPbR6rXU4lMj7lNK7sAWbMuJ8+0a0Sc4wEyNOXYbNzS4pLcq0eL8AjFwSpFNYv1 + NoppB2tpnmTUu+KjymaXcXEubaWuonmHvjqEvzRjicqU1/QMYcoFOw3GP7gNwz2W + nOKhnMSbs8CguBK9eIBdAYOsetdCOEm2s8yYrqeqtBWMVWq95qiRAdmz/rpWBlFl + NwlXpEGSlGII374+E2/gPeOX3x6gUfpAiVeXni7puN4ixQWyCxEK0W+51xDltO1p + 0oFsl9Pt9UWpQqZoA8sFXsTf+IwoAzb9JcGoPlrzfMfuL+qgssF86J4i01drzRK8 + d+NhinCsjLpg+Qw1we/RRyv6CXvFYCVKqtECCYocY+NtJ3jDDtukyWLUfXMdFgTi + Jx6IvEdinfV0RQz+8BZY3sNQQNC66/dPiZWER0TXxonjfrS1ze/6VngUla6L95LC + UwlxpOd5cu3mpbtYvidOwufEQWw1IF+xgVjFx0i5VGg8jRsM3TSR/v3oPqY/Qv3K + 6QFZpspWAJcGsIuqyKfhcGLDNbdU/tiVNZfd2jt0rp40k12Kyig3S4heGMBvdM08 + UOYNAgMBAAGjUTBPMAsGA1UdDwQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud + DgQWBBSJn9Cy7SCppj5KZqryHoKw1iwJNjAQBgkrBgEEAYI3FQEEAwIBADANBgkq + hkiG9w0BAQsFAAOCAgEAIUnVqslRWvl+TARbZHgPe+9XPUDK28BctvNBbCD+Aqxn + 6PwXkxyoCd/piLscVpAzkijTuyeIbH+k2JEkKuKCVabsf0OoRp5Y4jEwpXuPp4bj + WpGnZTm3hwmxNZk3smFzsWgS7wiJRSpSKXiO3p9LEVsukR5RFpuy7uKOdS7ErL3A + sSOqRTuNRGE7d6jk5mcA6LDO8g+YBI/PZQeR0B8iQGi3jrJdMf2GifRValzS0OWP + TpEklh1uQVP9kG5uFg6HGqD+crc0Dp4Y6LapanZHWq7qjiL6oRw+/4TLUzONYwcm + o8v4MtgYmlE7Io2S4WKlVwyyHEGc4ki3lgEPxFm1aVyRP5W+DkspX/+xlgLmMxje + bGuIOgcieuTmixWaQJQgDZxjKBYMouI7knnHiCgT184Ed3qyNRwoOxRC8dVy/r4U + 6LmNf73o2yTPlhGhj9b/zEKrKSCrkWK5CMO8B1ihOh+f0nhF+UtoXkceUDmIVsYV + k7xYdoqXXDbr5TmMX6lcsC8XHjOMFOzFczTwVr9nJw8ssOK8s3+t+K14qf33SibJ + I26hieAkPXicvnGc+EVRLRCn5GZrdfrTBZ8KVak4E9lXmq+3OhZ326schF0ZNVTY + geY4cbiNt4uaLKVzYlQu327psq5ZEbjjR/aurhbaJWA2YeMY2SB8I2chNHNhJp8= + -----END CERTIFICATE----- diff --git a/scripts/deployment-packages/dummy-app-package/values-dummy-app-0.0.1.yaml b/scripts/deployment-packages/dummy-app-package/values-dummy-app-0.0.1.yaml new file mode 100644 index 00000000..e69de29b diff --git a/scripts/deployment-packages/nginx-app-local/app-nginx-local-0.1.0.yaml b/scripts/deployment-packages/nginx-app-local/app-nginx-local-0.1.0.yaml new file mode 100644 index 00000000..0e2068b2 --- /dev/null +++ b/scripts/deployment-packages/nginx-app-local/app-nginx-local-0.1.0.yaml @@ -0,0 +1,23 @@ +# SPDX-FileCopyrightText: 2025 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 + +--- +specSchema: "Application" +schemaVersion: "0.1" +$schema: "https://schema.intel.com/catalog.orchestrator/0.1/schema" + +name: nginx-local +version: 0.1.0 +description: "NGINX local" + +# CLI tool will look up registry by name and convert to an ID. + +helmRegistry: "bitnami-helm" +imageRegistry: "harbor-docker-oci" +chartName: "nginx" +chartVersion: "15.9.3" + +profiles: + - name: "default" + valuesFileName: "values-nginx-local-1-0.1.0.yaml" diff --git a/scripts/deployment-packages/nginx-app-local/dp-nginx-local-0.1.0.yaml b/scripts/deployment-packages/nginx-app-local/dp-nginx-local-0.1.0.yaml new file mode 100644 index 00000000..0427d603 --- /dev/null +++ b/scripts/deployment-packages/nginx-app-local/dp-nginx-local-0.1.0.yaml @@ -0,0 +1,26 @@ +# SPDX-FileCopyrightText: 2025 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 + +--- +specSchema: "DeploymentPackage" +schemaVersion: "0.1" +$schema: "https://schema.intel.com/catalog.orchestrator/0.1/schema" + +name: "nginx-app-local" +displayName: "NGINX-local" +description: "NGINX-local" +version: "0.1.0" + +# CLI tool will also have to contend with converting application (name, version, publisher) +# tuple into an application ID. + +applications: + - name: nginx-local + version: 0.1.0 + +deploymentProfiles: + - name: "testing-local-default" + applicationProfiles: + - application: "nginx-local" + profile: "default" diff --git a/scripts/deployment-packages/nginx-app-local/values-nginx-local-1-0.1.0.yaml b/scripts/deployment-packages/nginx-app-local/values-nginx-local-1-0.1.0.yaml new file mode 100644 index 00000000..5d42cd63 --- /dev/null +++ b/scripts/deployment-packages/nginx-app-local/values-nginx-local-1-0.1.0.yaml @@ -0,0 +1,18 @@ +# SPDX-FileCopyrightText: 2025 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +--- +global: + initial_admin_user_password: test-required + optional_user_password: test-optional +image: + pullSecret: + - name: "%GeneratedDockerCredential%" +service: + type: ClusterIP + annotations: + service-proxy.app.orchestrator.io/ports: "80" +replicaCount: 1 +machine: + admin: + ip_address: 1.2.3.4 diff --git a/scripts/deployment-packages/nginx-app/app-nginx-0.1.0.yaml b/scripts/deployment-packages/nginx-app/app-nginx-0.1.0.yaml new file mode 100644 index 00000000..95ff7152 --- /dev/null +++ b/scripts/deployment-packages/nginx-app/app-nginx-0.1.0.yaml @@ -0,0 +1,23 @@ +# SPDX-FileCopyrightText: 2025 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 + +--- +specSchema: "Application" +schemaVersion: "0.1" +$schema: "https://schema.intel.com/catalog.orchestrator/0.1/schema" + +name: nginx +version: 0.1.0 +description: "NGINX" + +# CLI tool will look up registry by name and convert to an ID. + +helmRegistry: "bitnami-helm" +imageRegistry: "harbor-oci-docker" +chartName: "nginx" +chartVersion: "15.9.3" + +profiles: + - name: "default" + valuesFileName: "values-nginx-1-0.1.0.yaml" diff --git a/scripts/deployment-packages/nginx-app/dp-nginx-0.1.0.yaml b/scripts/deployment-packages/nginx-app/dp-nginx-0.1.0.yaml new file mode 100644 index 00000000..1f0482e2 --- /dev/null +++ b/scripts/deployment-packages/nginx-app/dp-nginx-0.1.0.yaml @@ -0,0 +1,26 @@ +# SPDX-FileCopyrightText: 2025 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 + +--- +specSchema: "DeploymentPackage" +schemaVersion: "0.1" +$schema: "https://schema.intel.com/catalog.orchestrator/0.1/schema" + +name: "nginx-app" +displayName: "NGINX" +description: "NGINX" +version: "0.1.0" + +# CLI tool will also have to contend with converting application (name, version, publisher) +# tuple into an application ID. + +applications: + - name: nginx + version: 0.1.0 + +deploymentProfiles: + - name: "testing-default" + applicationProfiles: + - application: "nginx" + profile: "default" diff --git a/scripts/deployment-packages/nginx-app/values-nginx-1-0.1.0.yaml b/scripts/deployment-packages/nginx-app/values-nginx-1-0.1.0.yaml new file mode 100644 index 00000000..41b48c8c --- /dev/null +++ b/scripts/deployment-packages/nginx-app/values-nginx-1-0.1.0.yaml @@ -0,0 +1,14 @@ +# SPDX-FileCopyrightText: 2025 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 + +--- +global: + initial_admin_user_password: test-required + optional_user_password: test-optional +service: + type: ClusterIP +replicaCount: 1 +machine: + admin: + ip_address: 1.2.3.4 diff --git a/scripts/deployment-packages/ten-dummy-apps/app-dummy-01.yaml b/scripts/deployment-packages/ten-dummy-apps/app-dummy-01.yaml new file mode 100644 index 00000000..36977455 --- /dev/null +++ b/scripts/deployment-packages/ten-dummy-apps/app-dummy-01.yaml @@ -0,0 +1,20 @@ +# SPDX-FileCopyrightText: (C) 2023 Intel Corporation +# SPDX-License-Identifier: LicenseRef-Intel +--- +specSchema: "Application" +schemaVersion: "0.1" +$schema: "https://schema.intel.com/catalog.orchestrator/0.1/schema" + +name: dummy-app-1 +version: 0.0.2 +description: "Dummy app 1" + +# CLI tool will look up registry by name and convert to an ID. + +helmRegistry: "intel-sandbox" +chartName: "scale-test-dummy-app" +chartVersion: "0.0.2" + +profiles: + - name: "default" + valuesFileName: "values-01.yaml" diff --git a/scripts/deployment-packages/ten-dummy-apps/app-dummy-02.yaml b/scripts/deployment-packages/ten-dummy-apps/app-dummy-02.yaml new file mode 100644 index 00000000..afd79555 --- /dev/null +++ b/scripts/deployment-packages/ten-dummy-apps/app-dummy-02.yaml @@ -0,0 +1,20 @@ +# SPDX-FileCopyrightText: (C) 2023 Intel Corporation +# SPDX-License-Identifier: LicenseRef-Intel +--- +specSchema: "Application" +schemaVersion: "0.1" +$schema: "https://schema.intel.com/catalog.orchestrator/0.1/schema" + +name: dummy-app-2 +version: 0.0.2 +description: "Dummy app 2" + +# CLI tool will look up registry by name and convert to an ID. + +helmRegistry: "intel-sandbox" +chartName: "scale-test-dummy-app" +chartVersion: "0.0.2" + +profiles: + - name: "default" + valuesFileName: "values-02.yaml" diff --git a/scripts/deployment-packages/ten-dummy-apps/app-dummy-03.yaml b/scripts/deployment-packages/ten-dummy-apps/app-dummy-03.yaml new file mode 100644 index 00000000..9b6d1c36 --- /dev/null +++ b/scripts/deployment-packages/ten-dummy-apps/app-dummy-03.yaml @@ -0,0 +1,20 @@ +# SPDX-FileCopyrightText: (C) 2023 Intel Corporation +# SPDX-License-Identifier: LicenseRef-Intel +--- +specSchema: "Application" +schemaVersion: "0.1" +$schema: "https://schema.intel.com/catalog.orchestrator/0.1/schema" + +name: dummy-app-3 +version: 0.0.2 +description: "Dummy app 3" + +# CLI tool will look up registry by name and convert to an ID. + +helmRegistry: "intel-sandbox" +chartName: "scale-test-dummy-app" +chartVersion: "0.0.2" + +profiles: + - name: "default" + valuesFileName: "values-03.yaml" diff --git a/scripts/deployment-packages/ten-dummy-apps/app-dummy-04.yaml b/scripts/deployment-packages/ten-dummy-apps/app-dummy-04.yaml new file mode 100644 index 00000000..70206690 --- /dev/null +++ b/scripts/deployment-packages/ten-dummy-apps/app-dummy-04.yaml @@ -0,0 +1,20 @@ +# SPDX-FileCopyrightText: (C) 2023 Intel Corporation +# SPDX-License-Identifier: LicenseRef-Intel +--- +specSchema: "Application" +schemaVersion: "0.1" +$schema: "https://schema.intel.com/catalog.orchestrator/0.1/schema" + +name: dummy-app-4 +version: 0.0.2 +description: "Dummy app 4" + +# CLI tool will look up registry by name and convert to an ID. + +helmRegistry: "intel-sandbox" +chartName: "scale-test-dummy-app" +chartVersion: "0.0.2" + +profiles: + - name: "default" + valuesFileName: "values-04.yaml" diff --git a/scripts/deployment-packages/ten-dummy-apps/app-dummy-05.yaml b/scripts/deployment-packages/ten-dummy-apps/app-dummy-05.yaml new file mode 100644 index 00000000..c101a87c --- /dev/null +++ b/scripts/deployment-packages/ten-dummy-apps/app-dummy-05.yaml @@ -0,0 +1,20 @@ +# SPDX-FileCopyrightText: (C) 2023 Intel Corporation +# SPDX-License-Identifier: LicenseRef-Intel +--- +specSchema: "Application" +schemaVersion: "0.1" +$schema: "https://schema.intel.com/catalog.orchestrator/0.1/schema" + +name: dummy-app-5 +version: 0.0.2 +description: "Dummy app 5" + +# CLI tool will look up registry by name and convert to an ID. + +helmRegistry: "intel-sandbox" +chartName: "scale-test-dummy-app" +chartVersion: "0.0.2" + +profiles: + - name: "default" + valuesFileName: "values-05.yaml" diff --git a/scripts/deployment-packages/ten-dummy-apps/app-dummy-06.yaml b/scripts/deployment-packages/ten-dummy-apps/app-dummy-06.yaml new file mode 100644 index 00000000..d33581f7 --- /dev/null +++ b/scripts/deployment-packages/ten-dummy-apps/app-dummy-06.yaml @@ -0,0 +1,20 @@ +# SPDX-FileCopyrightText: (C) 2023 Intel Corporation +# SPDX-License-Identifier: LicenseRef-Intel +--- +specSchema: "Application" +schemaVersion: "0.1" +$schema: "https://schema.intel.com/catalog.orchestrator/0.1/schema" + +name: dummy-app-6 +version: 0.0.2 +description: "Dummy app 6" + +# CLI tool will look up registry by name and convert to an ID. + +helmRegistry: "intel-sandbox" +chartName: "scale-test-dummy-app" +chartVersion: "0.0.2" + +profiles: + - name: "default" + valuesFileName: "values-06.yaml" diff --git a/scripts/deployment-packages/ten-dummy-apps/app-dummy-07.yaml b/scripts/deployment-packages/ten-dummy-apps/app-dummy-07.yaml new file mode 100644 index 00000000..53cf8992 --- /dev/null +++ b/scripts/deployment-packages/ten-dummy-apps/app-dummy-07.yaml @@ -0,0 +1,20 @@ +# SPDX-FileCopyrightText: (C) 2023 Intel Corporation +# SPDX-License-Identifier: LicenseRef-Intel +--- +specSchema: "Application" +schemaVersion: "0.1" +$schema: "https://schema.intel.com/catalog.orchestrator/0.1/schema" + +name: dummy-app-7 +version: 0.0.2 +description: "Dummy app 7" + +# CLI tool will look up registry by name and convert to an ID. + +helmRegistry: "intel-sandbox" +chartName: "scale-test-dummy-app" +chartVersion: "0.0.2" + +profiles: + - name: "default" + valuesFileName: "values-07.yaml" diff --git a/scripts/deployment-packages/ten-dummy-apps/app-dummy-08.yaml b/scripts/deployment-packages/ten-dummy-apps/app-dummy-08.yaml new file mode 100644 index 00000000..c170e8ac --- /dev/null +++ b/scripts/deployment-packages/ten-dummy-apps/app-dummy-08.yaml @@ -0,0 +1,20 @@ +# SPDX-FileCopyrightText: (C) 2023 Intel Corporation +# SPDX-License-Identifier: LicenseRef-Intel +--- +specSchema: "Application" +schemaVersion: "0.1" +$schema: "https://schema.intel.com/catalog.orchestrator/0.1/schema" + +name: dummy-app-8 +version: 0.0.2 +description: "Dummy app 8" + +# CLI tool will look up registry by name and convert to an ID. + +helmRegistry: "intel-sandbox" +chartName: "scale-test-dummy-app" +chartVersion: "0.0.2" + +profiles: + - name: "default" + valuesFileName: "values-08.yaml" diff --git a/scripts/deployment-packages/ten-dummy-apps/app-dummy-09.yaml b/scripts/deployment-packages/ten-dummy-apps/app-dummy-09.yaml new file mode 100644 index 00000000..d2915c2e --- /dev/null +++ b/scripts/deployment-packages/ten-dummy-apps/app-dummy-09.yaml @@ -0,0 +1,20 @@ +# SPDX-FileCopyrightText: (C) 2023 Intel Corporation +# SPDX-License-Identifier: LicenseRef-Intel +--- +specSchema: "Application" +schemaVersion: "0.1" +$schema: "https://schema.intel.com/catalog.orchestrator/0.1/schema" + +name: dummy-app-9 +version: 0.0.2 +description: "Dummy app 9" + +# CLI tool will look up registry by name and convert to an ID. + +helmRegistry: "intel-sandbox" +chartName: "scale-test-dummy-app" +chartVersion: "0.0.2" + +profiles: + - name: "default" + valuesFileName: "values-09.yaml" diff --git a/scripts/deployment-packages/ten-dummy-apps/app-dummy-10.yaml b/scripts/deployment-packages/ten-dummy-apps/app-dummy-10.yaml new file mode 100644 index 00000000..3050c298 --- /dev/null +++ b/scripts/deployment-packages/ten-dummy-apps/app-dummy-10.yaml @@ -0,0 +1,20 @@ +# SPDX-FileCopyrightText: (C) 2023 Intel Corporation +# SPDX-License-Identifier: LicenseRef-Intel +--- +specSchema: "Application" +schemaVersion: "0.1" +$schema: "https://schema.intel.com/catalog.orchestrator/0.1/schema" + +name: dummy-app-10 +version: 0.0.2 +description: "Dummy app 10" + +# CLI tool will look up registry by name and convert to an ID. + +helmRegistry: "intel-sandbox" +chartName: "scale-test-dummy-app" +chartVersion: "0.0.2" + +profiles: + - name: "default" + valuesFileName: "values-10.yaml" diff --git a/scripts/deployment-packages/ten-dummy-apps/dp-dummy-0.0.1.yaml b/scripts/deployment-packages/ten-dummy-apps/dp-dummy-0.0.1.yaml new file mode 100644 index 00000000..7de10f4a --- /dev/null +++ b/scripts/deployment-packages/ten-dummy-apps/dp-dummy-0.0.1.yaml @@ -0,0 +1,57 @@ +# SPDX-FileCopyrightText: (C) 2023 Intel Corporation +# SPDX-License-Identifier: LicenseRef-Intel +--- +specSchema: "DeploymentPackage" +schemaVersion: "0.1" +$schema: "https://schema.intel.com/catalog.orchestrator/0.1/schema" + +name: "ten-dummy-apps" +displayName: "10 dummy apps" +description: "10 Dummy apps for scale testing" +version: "0.0.1" + +applications: + - name: dummy-app-1 + version: 0.0.2 + - name: dummy-app-2 + version: 0.0.2 + - name: dummy-app-3 + version: 0.0.2 + - name: dummy-app-4 + version: 0.0.2 + - name: dummy-app-5 + version: 0.0.2 + - name: dummy-app-6 + version: 0.0.2 + - name: dummy-app-7 + version: 0.0.2 + - name: dummy-app-8 + version: 0.0.2 + - name: dummy-app-9 + version: 0.0.2 + - name: dummy-app-10 + version: 0.0.2 + +deploymentProfiles: + - name: "default-profile" + applicationProfiles: + - application: "dummy-app-1" + profile: "default" + - application: "dummy-app-2" + profile: "default" + - application: "dummy-app-3" + profile: "default" + - application: "dummy-app-4" + profile: "default" + - application: "dummy-app-5" + profile: "default" + - application: "dummy-app-6" + profile: "default" + - application: "dummy-app-7" + profile: "default" + - application: "dummy-app-8" + profile: "default" + - application: "dummy-app-9" + profile: "default" + - application: "dummy-app-10" + profile: "default" diff --git a/scripts/deployment-packages/ten-dummy-apps/registry-sandbox.yaml b/scripts/deployment-packages/ten-dummy-apps/registry-sandbox.yaml new file mode 100644 index 00000000..5ee62513 --- /dev/null +++ b/scripts/deployment-packages/ten-dummy-apps/registry-sandbox.yaml @@ -0,0 +1,118 @@ +# SPDX-FileCopyrightText: (C) 2023 Intel Corporation +# SPDX-License-Identifier: LicenseRef-Intel +--- +specSchema: "Registry" +schemaVersion: "0.1" +$schema: "https://schema.intel.com/catalog.orchestrator/0.1/schema" + +name: "intel-sandbox" +description: "The Intel amr sandbox registry" + +type: "HELM" + +rootUrl: "oci://amr-registry-pre.caas.intel.com/one-intel-edge-sandbox" +caCerts: |- + -----BEGIN CERTIFICATE----- + MIIF1DCCA7ygAwIBAgITEwDfYhzj3aRqyZZkEQACAN9iHDANBgkqhkiG9w0BAQsF + ADBQMQswCQYDVQQGEwJVUzEaMBgGA1UEChMRSW50ZWwgQ29ycG9yYXRpb24xJTAj + BgNVBAMTHEludGVsIEludGVybmFsIElzc3VpbmcgQ0EgNUEwHhcNMjMwNzEwMTYy + ODE5WhcNMjQwNzA0MTYyODE5WjBtMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2Fs + aWZvcm5pYTEUMBIGA1UEBxMLU2FudGEgQ2xhcmExGjAYBgNVBAoTEUludGVsIENv + cnBvcmF0aW9uMRcwFQYDVQQDEw5jYWFzLmludGVsLmNvbTCCASIwDQYJKoZIhvcN + AQEBBQADggEPADCCAQoCggEBAOq0YEn3C7eHngujk2sMjuK9GgogEzo90TKqhxuU + hA1FvVdgDTdoBwuuCYw3dmdYvXbnNij5nRuuRV7Pv4swsXEjp3p7Mtu0RhCkECOq + Jl/T5IAaB4AQtVJ9jr2AItsysESRTb9xefvZgDerQOYZgj/l7V82TbTvPW5hBPtO + v8RHH4V4vTu6IJgju8occ8ZXyHlH4T20GLgUytEKUP9hpGcyJfo/76UDUT8ddz7e + jq7JENQakZfAwAMIgID4tPfFrib5lrph1XwZ3O19AYjcPu2M1nmFZm10clG7w8he + a911wmFuGN2DjrVO8tlkRsheFiJwGkT47+kAXXxgRk8hw4kCAwEAAaOCAYgwggGE + MCsGA1UdEQQkMCKCECouY2Fhcy5pbnRlbC5jb22CDmNhYXMuaW50ZWwuY29tMB0G + A1UdDgQWBBS582Aw1fECTfvAlHd7zvFnx9EmtjAfBgNVHSMEGDAWgBRpkJZpxwBp + ltt7XZqP9jrqKJStJjA6BgNVHR8EMzAxMC+gLaArhilodHRwOi8vcGtpLmludGVs + LmNvbS9jcmwvSW50ZWxDQTVBKDIpLmNybDBFBggrBgEFBQcBAQQ5MDcwNQYIKwYB + BQUHMAKGKWh0dHA6Ly9wa2kuaW50ZWwuY29tL2NydC9JbnRlbENBNUEoMikuY3J0 + MAsGA1UdDwQEAwIFoDA9BgkrBgEEAYI3FQcEMDAuBiYrBgEEAYI3FQiGw4x1hJnl + UYP9gSiFjp9TgpHACWeC7d1OgbvMdwIBZAIBITAdBgNVHSUEFjAUBggrBgEFBQcD + AQYIKwYBBQUHAwIwJwYJKwYBBAGCNxUKBBowGDAKBggrBgEFBQcDATAKBggrBgEF + BQcDAjANBgkqhkiG9w0BAQsFAAOCAgEAE+qNZecRfBP/Plso6qU2hcaX4VF/QsLH + /CYNQS8aG8VpcKAfW5/6M8J13aF5PGSNIlTGe3nYRz5mXvatPsdeEZ1SMoX7Hnn7 + 3M7/UjnPqTy7d6uIAUPLd44H/Bm1x3NeDsowGKPYV0aYMBQbgA8brCjOWQCaD8ZY + 2BnZmuDvtHVMULlTAt62RJyOzXDHOuQgF9gx48dJPeyatH5yaV1kNCAtvho1x/49 + bCyDBCob3T/ARyuuqWNgZ1fpaS/IWR3LLh7jwT38B20AZVn3d7+Hv2BcdHHWR/2n + BrVtoroZej5ST/HDDxqJWsMQioR9fc8uoyT5oDZ/ZZ6YcVV3YrCk3QUkyywkl+AA + JIlemsVjyGtu5PprtW2R7iHYXi5N/M5SS0k4qpWfrB2O8sdMEixTpPh42RofIUSB + fmG7qfbnqOsF3fjASiBMTovLN4lNewTlLRK0eudfMwsJ/GXHGGlavaTOIOUpOzQu + Fc/hSq1USMODME+foqWNMdWXnTltH47sEtshgeLQKggFILzzbD5tuRllapWOlS7O + WOLjk5Gw230WtvS6qbGjG9zp358FB9nX43yTMuxUyz1g4EYLpZc6ojBa5vjeYICS + gQj8AV0jDDTjRD6DYFwZSYJEUnj3/Eug9trlypiEM+QblmQbLBoSeJmzhIckzAaD + UJPcnUL35xk= + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIG1DCCBLygAwIBAgITFAAAAAfPx1upej2GNwAAAAAABzANBgkqhkiG9w0BAQsF + ADBIMQswCQYDVQQGEwJVUzEaMBgGA1UEChMRSW50ZWwgQ29ycG9yYXRpb24xHTAb + BgNVBAMTFEludGVsIFNIQTI1NiBSb290IENBMB4XDTIyMDUxODE3MTgzMloXDTI3 + MDUxODE3MjgzMlowUDELMAkGA1UEBhMCVVMxGjAYBgNVBAoTEUludGVsIENvcnBv + cmF0aW9uMSUwIwYDVQQDExxJbnRlbCBJbnRlcm5hbCBJc3N1aW5nIENBIDVBMIIC + IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAySt2dR9sybkzvglF+csDg4lb + uGF75wT19yrOh2Pkim39PxRl6PEL3tEzbGRgPEcEqPAFtp5F8g2anidv7MMmoReP + n2ffpwFgM/R6AcfX5FuugsfcLmjMEq6xlVsXKO2sbOjPiOyN1lWlkgpCMCB2fFFb + BJjLvCThQ4orQb66LqzzwIGVt1GF/3+dS+C8PNDWXYaIVY4u2HMY1YBsICCIpk1k + pj7rBF0rc5dJ8lY75pRNVwZDGh7ArvNfT/P4NLTaLwtyccTl52606Oaa31IEOeON + 5SOqRTxKi9zt4fAc0qYraFiEpfc8Yn4p3mKFnqwF+JatZA4I1wFd+uhDvapY4q9S + 4dkFPS3Ag4PeR0lldnlhOjIiso0Qe7CUCqVrci/vjIuVw8i32W0iHo7YKWAnAHxI + uS9SP0WBj3p6SqTgDuD7iLYRpfYzWWCtrLjWaM/GrbEZEKWt0Y+f03nOU828+MQV + cvtfIhF5zRcLmpz5OxOTiit2wH7oKdzglWQsqev2Bf/AR3+DkUfYMx04N57Rv3sX + Smfa+HuXN0vuKle7fcVj3VelZaBA7FtB46A3Zh1IAG5hLGpLnwaj96CvoqE3iz7V + 0+3QOWuuIqbDvCQ93yE9lLhFkje5huv/yuzbppkIMyW71j3Zi4CDR2f3OuKB1Ib7 + ZAICUWOe4KaIPktYKMsCAwEAAaOCAa0wggGpMBIGCSsGAQQBgjcVAQQFAgMCAAIw + IwYJKwYBBAGCNxUCBBYEFCkdV505xBCictvRW45uqPMFQtiUMB0GA1UdDgQWBBRp + kJZpxwBpltt7XZqP9jrqKJStJjBEBgNVHSAEPTA7MDkGCyqGSIb4TQEFAWUBMCow + KAYIKwYBBQUHAgEWHGh0dHA6Ly9wa2kuaW50ZWwuY29tL2Nwcy5wZGYwGQYJKwYB + BAGCNxQCBAweCgBTAHUAYgBDAEEwCwYDVR0PBAQDAgGGMA8GA1UdEwEB/wQFMAMB + Af8wHwYDVR0jBBgwFoAUiZ/Qsu0gqaY+Smaq8h6CsNYsCTYwPwYDVR0fBDgwNjA0 + oDKgMIYuaHR0cDovL3BraS5pbnRlbC5jb20vY3JsL0ludGVsU0hBMjU2Um9vdENB + LmNybDBuBggrBgEFBQcBAQRiMGAwOgYIKwYBBQUHMAKGLmh0dHA6Ly9wa2kuaW50 + ZWwuY29tL2NydC9JbnRlbFNIQTI1NlJvb3RDQS5jcnQwIgYIKwYBBQUHMAGGFmh0 + dHA6Ly9PQ1NQLmludGVsLmNvbS8wDQYJKoZIhvcNAQELBQADggIBAMEr4lTXCi6h + CQfmlCP1wr3z6BfUpzfFfEMqB1YAaubW0d6oF0f8i5LSJUxPzb165DcPRVj1vxR1 + vYmcYuIEtoG5Y0OLoVI57Qra8l019lb/ell52CHIW9bsy2QblGpuR38jy+rJZv1r + 520YQROMHRKyNvramzOzWxIIVcTunK8xIPjBXuhVrihZRKAQau3t5HOXKUiX7cJ0 + epIaUEYk2jbMgKBWRwOgJQ424wTB/Ek/y4U3Kk59h6DW2jQD6e7U8Tdnm9FnfVTo + 6zEJaJiZqQi7kVMYqLvnlM6kcV6cqfHL79VWXHa4PKgFFphF/ie0iQR/fwVpKfZf + jr9xgap+moUsMqYjyV8n5/Eqx3a3k4zSZeYulxqU00K1zEJDE1501zNxFmgaVd/K + MZVXsKQ23QZDm72Rb0UYM0C/m2w3LCZnMNcgNZ5pZv+kUanRaoOTEBu9UqDNP4Ge + gdqJmOCXHkGXvamBsfsa3UkXjcJnPo0b83XVXiF2vYNeo1Ikz+Atmm4HV/usBcLG + UQhIrf0sB7WfJ8US+lLkcAeVYpxZihRI9vVq/SwyAJiNSKC7N9siiXEVtxzERTSc + 5ZpHOi0E3an4Gjpa6mzzL1qgmc+bozsV40SUGPKhqrIQtFVQY9DbzDNPZ3aGQdbN + QHsQpcp2KxoBAdDT7L9zSOjUvDWJCLlV + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIFazCCA1OgAwIBAgIQVjEnZqqj1qVKUoaZgBc18DANBgkqhkiG9w0BAQsFADBI + MQswCQYDVQQGEwJVUzEaMBgGA1UEChMRSW50ZWwgQ29ycG9yYXRpb24xHTAbBgNV + BAMTFEludGVsIFNIQTI1NiBSb290IENBMB4XDTE1MDkxNjIwMDAwNVoXDTM1MDkx + NjIwMDAwNVowSDELMAkGA1UEBhMCVVMxGjAYBgNVBAoTEUludGVsIENvcnBvcmF0 + aW9uMR0wGwYDVQQDExRJbnRlbCBTSEEyNTYgUm9vdCBDQTCCAiIwDQYJKoZIhvcN + AQEBBQADggIPADCCAgoCggIBANEHLD4x7o4xKCDYTwXGMis1qob+uBlMR17Pr11i + w/NERAx4nahGgzTZmX9i3viTtfn5uDX6IJMPdq7N4m+Flr8v4VuAznvnCBGqkUDe + HAPbR6rXU4lMj7lNK7sAWbMuJ8+0a0Sc4wEyNOXYbNzS4pLcq0eL8AjFwSpFNYv1 + NoppB2tpnmTUu+KjymaXcXEubaWuonmHvjqEvzRjicqU1/QMYcoFOw3GP7gNwz2W + nOKhnMSbs8CguBK9eIBdAYOsetdCOEm2s8yYrqeqtBWMVWq95qiRAdmz/rpWBlFl + NwlXpEGSlGII374+E2/gPeOX3x6gUfpAiVeXni7puN4ixQWyCxEK0W+51xDltO1p + 0oFsl9Pt9UWpQqZoA8sFXsTf+IwoAzb9JcGoPlrzfMfuL+qgssF86J4i01drzRK8 + d+NhinCsjLpg+Qw1we/RRyv6CXvFYCVKqtECCYocY+NtJ3jDDtukyWLUfXMdFgTi + Jx6IvEdinfV0RQz+8BZY3sNQQNC66/dPiZWER0TXxonjfrS1ze/6VngUla6L95LC + UwlxpOd5cu3mpbtYvidOwufEQWw1IF+xgVjFx0i5VGg8jRsM3TSR/v3oPqY/Qv3K + 6QFZpspWAJcGsIuqyKfhcGLDNbdU/tiVNZfd2jt0rp40k12Kyig3S4heGMBvdM08 + UOYNAgMBAAGjUTBPMAsGA1UdDwQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud + DgQWBBSJn9Cy7SCppj5KZqryHoKw1iwJNjAQBgkrBgEEAYI3FQEEAwIBADANBgkq + hkiG9w0BAQsFAAOCAgEAIUnVqslRWvl+TARbZHgPe+9XPUDK28BctvNBbCD+Aqxn + 6PwXkxyoCd/piLscVpAzkijTuyeIbH+k2JEkKuKCVabsf0OoRp5Y4jEwpXuPp4bj + WpGnZTm3hwmxNZk3smFzsWgS7wiJRSpSKXiO3p9LEVsukR5RFpuy7uKOdS7ErL3A + sSOqRTuNRGE7d6jk5mcA6LDO8g+YBI/PZQeR0B8iQGi3jrJdMf2GifRValzS0OWP + TpEklh1uQVP9kG5uFg6HGqD+crc0Dp4Y6LapanZHWq7qjiL6oRw+/4TLUzONYwcm + o8v4MtgYmlE7Io2S4WKlVwyyHEGc4ki3lgEPxFm1aVyRP5W+DkspX/+xlgLmMxje + bGuIOgcieuTmixWaQJQgDZxjKBYMouI7knnHiCgT184Ed3qyNRwoOxRC8dVy/r4U + 6LmNf73o2yTPlhGhj9b/zEKrKSCrkWK5CMO8B1ihOh+f0nhF+UtoXkceUDmIVsYV + k7xYdoqXXDbr5TmMX6lcsC8XHjOMFOzFczTwVr9nJw8ssOK8s3+t+K14qf33SibJ + I26hieAkPXicvnGc+EVRLRCn5GZrdfrTBZ8KVak4E9lXmq+3OhZ326schF0ZNVTY + geY4cbiNt4uaLKVzYlQu327psq5ZEbjjR/aurhbaJWA2YeMY2SB8I2chNHNhJp8= + -----END CERTIFICATE----- diff --git a/scripts/deployment-packages/ten-dummy-apps/values-01.yaml b/scripts/deployment-packages/ten-dummy-apps/values-01.yaml new file mode 100644 index 00000000..e4299a06 --- /dev/null +++ b/scripts/deployment-packages/ten-dummy-apps/values-01.yaml @@ -0,0 +1,4 @@ +service: + name: dummy-svc-1 + type: ClusterIP + port: 8081 \ No newline at end of file diff --git a/scripts/deployment-packages/ten-dummy-apps/values-02.yaml b/scripts/deployment-packages/ten-dummy-apps/values-02.yaml new file mode 100644 index 00000000..292e2fd9 --- /dev/null +++ b/scripts/deployment-packages/ten-dummy-apps/values-02.yaml @@ -0,0 +1,4 @@ +service: + name: dummy-svc-2 + type: ClusterIP + port: 8082 \ No newline at end of file diff --git a/scripts/deployment-packages/ten-dummy-apps/values-03.yaml b/scripts/deployment-packages/ten-dummy-apps/values-03.yaml new file mode 100644 index 00000000..256215f1 --- /dev/null +++ b/scripts/deployment-packages/ten-dummy-apps/values-03.yaml @@ -0,0 +1,4 @@ +service: + name: dummy-svc-3 + type: ClusterIP + port: 8083 \ No newline at end of file diff --git a/scripts/deployment-packages/ten-dummy-apps/values-04.yaml b/scripts/deployment-packages/ten-dummy-apps/values-04.yaml new file mode 100644 index 00000000..72b3168e --- /dev/null +++ b/scripts/deployment-packages/ten-dummy-apps/values-04.yaml @@ -0,0 +1,4 @@ +service: + name: dummy-svc-4 + type: ClusterIP + port: 8084 \ No newline at end of file diff --git a/scripts/deployment-packages/ten-dummy-apps/values-05.yaml b/scripts/deployment-packages/ten-dummy-apps/values-05.yaml new file mode 100644 index 00000000..c1a706ba --- /dev/null +++ b/scripts/deployment-packages/ten-dummy-apps/values-05.yaml @@ -0,0 +1,4 @@ +service: + name: dummy-svc-5 + type: ClusterIP + port: 8085 \ No newline at end of file diff --git a/scripts/deployment-packages/ten-dummy-apps/values-06.yaml b/scripts/deployment-packages/ten-dummy-apps/values-06.yaml new file mode 100644 index 00000000..e609fdd4 --- /dev/null +++ b/scripts/deployment-packages/ten-dummy-apps/values-06.yaml @@ -0,0 +1,4 @@ +service: + name: dummy-svc-6 + type: ClusterIP + port: 8086 \ No newline at end of file diff --git a/scripts/deployment-packages/ten-dummy-apps/values-07.yaml b/scripts/deployment-packages/ten-dummy-apps/values-07.yaml new file mode 100644 index 00000000..0c28c92b --- /dev/null +++ b/scripts/deployment-packages/ten-dummy-apps/values-07.yaml @@ -0,0 +1,4 @@ +service: + name: dummy-svc-7 + type: ClusterIP + port: 8087 \ No newline at end of file diff --git a/scripts/deployment-packages/ten-dummy-apps/values-08.yaml b/scripts/deployment-packages/ten-dummy-apps/values-08.yaml new file mode 100644 index 00000000..5d29f466 --- /dev/null +++ b/scripts/deployment-packages/ten-dummy-apps/values-08.yaml @@ -0,0 +1,4 @@ +service: + name: dummy-svc-8 + type: ClusterIP + port: 8088 \ No newline at end of file diff --git a/scripts/deployment-packages/ten-dummy-apps/values-09.yaml b/scripts/deployment-packages/ten-dummy-apps/values-09.yaml new file mode 100644 index 00000000..3beade44 --- /dev/null +++ b/scripts/deployment-packages/ten-dummy-apps/values-09.yaml @@ -0,0 +1,4 @@ +service: + name: dummy-svc-9 + type: ClusterIP + port: 8089 \ No newline at end of file diff --git a/scripts/deployment-packages/ten-dummy-apps/values-10.yaml b/scripts/deployment-packages/ten-dummy-apps/values-10.yaml new file mode 100644 index 00000000..d22b487d --- /dev/null +++ b/scripts/deployment-packages/ten-dummy-apps/values-10.yaml @@ -0,0 +1,4 @@ +service: + name: dummy-svc-10 + type: ClusterIP + port: 8090 \ No newline at end of file diff --git a/scripts/k6-scripts/adm-api-latency.js b/scripts/k6-scripts/adm-api-latency.js new file mode 100644 index 00000000..cb4e5a0d --- /dev/null +++ b/scripts/k6-scripts/adm-api-latency.js @@ -0,0 +1,85 @@ +import { check } from 'k6'; +import http from 'k6/http'; + +// Define some constants and global variables. +let timeDurationBetweenScenarioInMin = 1; +let deploymentListFromADMStartTime = 0; +let deploymentSummaryFromAdmStartTime = timeDurationBetweenScenarioInMin + deploymentListFromADMStartTime +let clusterListFromAdmStartTime = timeDurationBetweenScenarioInMin + deploymentSummaryFromAdmStartTime + +// Dynamically create the scenario configuration for K6 runner +let scenariosDynamic = {}; +let numOfScenarios = 0; + +// Scenario for querying deployment list from ADM +scenariosDynamic[numOfScenarios++] = { + executor: 'constant-vus', + exec: 'deploymentListFromADM', + vus: 10, + startTime: deploymentSummaryFromAdmStartTime.toString() + 'm', + duration: timeDurationBetweenScenarioInMin.toString() + 'm', +} + +// Scenario for querying deployment summary from ADM +scenariosDynamic[numOfScenarios++] = { + executor: 'constant-vus', + exec: 'deploymentSummaryFromADM', + vus: 10, + startTime: deploymentListFromADMStartTime.toString() + 'm', + duration: timeDurationBetweenScenarioInMin.toString() + 'm', +} + +// Scenario for querying cluster list from ADM +scenariosDynamic[numOfScenarios++] = { + executor: 'constant-vus', + exec: 'clusterListFromADM', + vus: 10, + startTime: clusterListFromAdmStartTime.toString() + 'm', + duration: timeDurationBetweenScenarioInMin.toString() + 'm', +} + + + +// Set k6 test options +export const options = { + scenarios: scenariosDynamic, + // thresholds: Defines the checks to perform at the end of the tests + thresholds: { + http_req_failed: ['rate<0.01'], // http errors should be less than 1% + 'http_req_duration{type:admApiStats}': ['p(95)<1000', 'avg<500'], // 95% of Cluster Query requests from ECM should be take below 1000ms, avg is less 500ms + 'http_req_failed{type:admApiStats}': ['rate<0.01'], // http errors should be less than 1% + }, +}; + +// Define Http header for App List query from ADM +const httpOptionsAdmQuery = { + headers: { + Authorization: `Bearer ${__ENV.API_TOKEN}`, + 'Content-Type': 'application/json', + }, + tags: {type: 'admApiStats'}, +}; + +// deploymentListFromADM queries the deployment List from ADM NB API +export function deploymentListFromADM() { + let response = http.get(`https://api.${__ENV.MY_HOSTNAME}/v1/projects/${__ENV.PROJECT}/appdeployment/deployments`, httpOptionsAdmQuery); + check(response, { + "status is OK": (r) => r && r.status === 200, + }); +} + +// deploymentSummaryFromADM queries the deployment Summary of deployments ADM NB API +export function deploymentSummaryFromADM() { + let response = http.get(`https://api.${__ENV.MY_HOSTNAME}/v1/projects/${__ENV.PROJECT}/summary/deployments_status`, httpOptionsAdmQuery); + check(response, { + "status is OK": (r) => r && r.status === 200, + }); +} + +// clusterListFromADM queries the clusters from ADM NB API +export function clusterListFromADM() { + let response = http.get(`https://api.${__ENV.MY_HOSTNAME}/v1/projects/${__ENV.PROJECT}/appdeployment/clusters`, httpOptionsAdmQuery); + check(response, { + "status is OK": (r) => r && r.status === 200, + }); +} diff --git a/scripts/k6-scripts/arm-asp-api-latency.js b/scripts/k6-scripts/arm-asp-api-latency.js new file mode 100644 index 00000000..1f892874 --- /dev/null +++ b/scripts/k6-scripts/arm-asp-api-latency.js @@ -0,0 +1,154 @@ +import {check, fail} from 'k6'; +import http from 'k6/http'; +import exec from "k6/execution"; + +/* +HOW TO RUN +---------- + +export CO_CLUSTER_FQDN=integration12.maestro.intel.com # Change per your requirement + +export API_TOKEN=$(curl -k -s -X POST "https://keycloak.${CO_CLUSTER_FQDN}/realms/master/protocol/openid-connect/token" -d "username=all-groups-example-user" -d 'password=ChangeMeOn1stLogin!' -d "grant_type=password" -d "client_id=system-client" -d "scope=openid" | jq -r ".access_token") + + +k6 run --env MY_HOSTNAME=integration12.maestro.intel.com --env API_TOKEN=$API_TOKEN --env APP_ID="b-8f3c9900-6d02-5e84-9e8d-edf2606d1810" --env USERS=10 --env APPS_PER_USER=100 asp-container-api-latency.js + +NOTE on Options to the script +- API_TOKEN: Keycloak JWT token +- APP_ID: Fleet Bundle ID Of the App +- USERS: Number of concurrent users +- APPS_PER_USER: No of app endpoints each user will access during the test + */ + + + +// Define some constants and global variables. +let timeDurationBetweenScenarioInMin = 10; +let containerAppEndpointAccessLatencyTestStartTime = 0; +let users = parseInt(__ENV.USERS) || 10 +let appsPerUser = parseInt(__ENV.APPS_PER_USER) || 100 +// Dynamically create the scenario configuration for K6 runner +let scenariosDynamic = {}; +let numOfScenarios = 0; + +// Scenario for testing API Latency of Container Application access via App Proxys +scenariosDynamic[numOfScenarios++] = { + executor: 'per-vu-iterations', + exec: 'containerAppProxyLatencyTest', + vus: users, + iterations: 1, + startTime: containerAppEndpointAccessLatencyTestStartTime.toString() + 'm', + maxDuration: timeDurationBetweenScenarioInMin.toString() + 'm', +} + + +// Set k6 test options +export const options = { + scenarios: scenariosDynamic, + // thresholds: Defines the checks to perform at the end of the tests + thresholds: { + http_req_failed: ['rate<0.01'], // http errors should be less than 1% + 'http_req_duration{type:armAPI}': ['p(95)<1000', 'avg<750'], // 95% of API from ARM should be take below 1000ms, avg is less 500ms + 'http_req_failed{type:armAPI}': ['rate<0.01'], // http errors should be less than 1% + 'http_req_duration{type:containerAppProxy}': ['p(95)<3000', 'avg<2500'], // 95% of requests from AppProxy should be take below 3000ms, avg is less 2500ms + 'http_req_failed{type:containerAppProxy}': ['rate<0.01'], // http errors should be less than 1% + }, +}; + +// Define Http header for ARM API Access +const httpOptionsARM = { + headers: { + Authorization: `Bearer ${__ENV.API_TOKEN}`, + 'Content-Type': 'application/json', + }, + tags: {type: 'armAPI'}, +}; + +// Define Http header for ContainerAppProxy tests +const httpOptionsContainerAppProxy = { + headers: { + cookie: `keycloak-token=${__ENV.API_TOKEN}`, + 'Content-Type': 'application/json', + }, + tags: {type: 'containerAppProxy'}, +}; + +// containerAppProxyLatencyTest tests API latency of Container Applications via the App Proxy +export function containerAppProxyLatencyTest() { + // First get the list of clusters names + let clusterNameList = Array(); + let offSet = 0; + let defaultPageSize = 100 + let response = http.get(`https://app-orch.${__ENV.MY_HOSTNAME}/deployment.orchestrator.apis/v1/clusters?pageSize=${defaultPageSize}&offset=${offSet}`, httpOptionsARM); + if ( + !check(response, { + "status is OK": (r) => r && r.status === 200, + }) + ) { + fail('failed to get clusters, status not 200'); + return; + } + let totalClusterPages = parseInt(response.json().totalElements / defaultPageSize) + if ((parseInt(response.json().totalElements) % defaultPageSize) !== 0) { + totalClusterPages++; + } + while (totalClusterPages >= 0) { + // TODO: Use totalElements to calculate the pageSize + let clusters = response.json().clusters; + + // Populate all cluster names + for (let _cluster of clusters) { + if (_cluster.name !== "cluster1") { + clusterNameList.push(_cluster.name); + } + } + totalClusterPages--; + if (totalClusterPages === 0) { + break; + } else { + offSet = offSet + defaultPageSize; + response = http.get(`https://app-orch.${__ENV.MY_HOSTNAME}/deployment.orchestrator.apis/v1/clusters?pageSize=${defaultPageSize}&offset=${offSet}`, httpOptionsARM); + } + } + console.log(`Cluster length: `, clusterNameList.length) + + // Now for the given AppId get the endpoints from each cluster + // Distribute the clusters across all concurrent users + let appEndpoints = Array(); + + let startIdx = parseInt((clusterNameList.length / users) * (exec.vu.idInInstance-1)) + + for (let i = startIdx; i < (startIdx + appsPerUser) && i < clusterNameList.length; i++) { + let _name = clusterNameList[i] + let response = http.get(`https://app-orch.${__ENV.MY_HOSTNAME}/resource.orchestrator.apis/v2/endpoints/${__ENV.APP_ID}/${_name}`, httpOptionsARM); + if ( + !check(response, { + "status is OK": (r) => r && r.status === 200, + }) + ) { + fail(`failed to get app endpoints, status not 200 for cluster: ${_name}`); + return; + } + // Populate the App Endpoints here + let _appEndpoints = response.json().appEndpoints + _appEndpoints.forEach(function (obj) { + if (obj.endpointStatus.state !== "STATE_READY") { + let appId = obj.id + fail(`App Endpoint not in ready state for cluster: ${_name}, appId: ${appId}`) + } + appEndpoints.push(obj.ports[0].serviceProxyUrl) + }) + } + console.log(`Total App Endpoints to verify: ${appEndpoints.length}`) + + // Finally access the App Endpoints via the App Proxy + for (let _endpoint of appEndpoints) { + let response = http.get(`${_endpoint}`, httpOptionsContainerAppProxy); + if ( + !check(response, { + "status is OK": (r) => r && r.status === 200, + })) { + console.log(`failed ${_endpoint}`) + } + } +} diff --git a/scripts/k6-scripts/arm-asp-api-performance-test.sh b/scripts/k6-scripts/arm-asp-api-performance-test.sh new file mode 100755 index 00000000..a21e063a --- /dev/null +++ b/scripts/k6-scripts/arm-asp-api-performance-test.sh @@ -0,0 +1,242 @@ +#!/bin/bash + +set -eu + +# Initialize some global variables and constants +clusterFqdn=scale.espd.infra-host.com # Change if needed +enspUser=intel-itep-user && enspPassword=ChangeMeOn1stLogin! +prometheusURL="https://observability-admin.${clusterFqdn}/api/datasources/uid/orchestrator-mimir/resources/api/v1" +appID="" +observabilityApiCredentials="" +appEndpointsPerUser=100 + +currentTimeStamp=$(date -u +"%Y%m%d-%H%M%S") +resultsDirectory="./test-results/$currentTimeStamp" +tmpJSONFile="$resultsDirectory/tmp.json" + +# CSV Files to capture the latency and Resource metrics +armAvgAPILatencyCsv="arm-api-latency.csv" +aspAvgAPILatencyCsv="asp-api-latency.csv" +avgCpuOrchAppCsv="avg-cpu-usage-ma.csv" +maxCpuOrchAppCsv="max-cpu-usage-ma.csv" +avgRamOrchAppCsv="avg-ram-usage-ma.csv" +maxRamOrchAppCsv="max-ram-usage-ma.csv" + +# Png files generated from CSV file +armAvgAPILatencyPng="arm-api-latency.png" +aspAvgAPILatencyPng="asp-api-latency.png" +avgCpuOrchAppPng="avg-cpu-usage-ma.png" +maxCpuOrchAppPng="max-cpu-usage-ma.png" +avgRamOrchAppPng="avg-ram-usage-ma.png" +maxRamOrchAppPng="max-ram-usage-ma.png" + +# Function to display usage help +usage() { + echo "This script scales the number of concurrent users linearly, \ +and then measures the ARM API and ASP latency, and also Maestro-App-System Namespace resource usage during the process" + echo "Usage: $0 [options] [--] [arguments]" + echo + echo "Options:" + echo " -u VALUE Keycloak username, default all-groups-example-user" + echo " -p VALUE Keycloak password, default ChangeMeOn1stLogin!" + echo " -f VALUE Orch FQDN, default integration12.maestro.intel.com" + echo " -o VALUE Observability API credentials base64 encoded" + echo " -a VALUE App ID to be used for running API Latency Checks. This is exactly same ID as the Rancher Fleet Bundle ID of the APP" + echo " -r VALUE App endpoints accessed by each user" + echo " -h Print this help menu" + + echo + echo "Example:" + echo " $0 -a b-8f3c9900-6d02-5e84-9e8d-edf2606d1810 -o Z2djOmdnYzEyMw==" + exit 1 +} + +readInputArgs() { + while [[ $# -gt 0 ]]; do + case "$1" in + -u | --user) + echo "Username: $2" + enspUser=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -p | --password) + echo "Password: $2" + enspPassword=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -f | --fqdn) + echo "FQDN: $2" + clusterFqdn=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -o | --observability-api-cred) + echo "Observability API Credentials base64 encoded: $2" + observabilityApiCredentials=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -a | --app-id) + echo "APP ID: $2" + appID=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -r | --asp-endpoints-per-user) + echo "Asp Endpoints per User: $2" + appEndpointsPerUser=$2 + shift # Shift past the option + shift # Shift past the value + ;; + -h | --help) + usage + shift + ;; + *) + echo "Unknown option: $1" + shift # Shift past the unknown option + ;; + esac + done +} + +# Function to initialize Orchestrator API Token +initializeOrchAPIToken() { + orchAPIToken=$(curl -kX POST https://keycloak.${clusterFqdn}/realms/master/protocol/openid-connect/token \ + -d "username=${enspUser}" \ + -d "password=${enspPassword}" \ + -d "grant_type=password" \ + -d "client_id=system-client" \ + -d "scope=openid" | + jq -r '.access_token' 2>/dev/null) +} + +runApiLatencyChecks() { + users=$1 + echo "Running API latency tests... it will take a few mins to capture the results" + initializeOrchAPIToken + # Run API Latency Tests + k6 run --env MY_HOSTNAME="$clusterFqdn" --env API_TOKEN="$orchAPIToken" --env APP_ID="$appID" --env USERS="$users" --env APPS_PER_USER="$appEndpointsPerUser" ./arm-asp-api-latency.js -q --no-thresholds --summary-export="$tmpJSONFile" + totalChecksPass=$(jq '.root_group.checks."status is OK".passes' "$tmpJSONFile") + totalChecksFail=$(jq '.root_group.checks."status is OK".fails' "$tmpJSONFile") + avgArmApiDuration=$(jq '.metrics."http_req_duration{type:armAPI}".avg' "$tmpJSONFile") + avgArmAspProxyAccessDuration=$(jq '.metrics."http_req_duration{type:containerAppProxy}".avg' "$tmpJSONFile") + # Capture the result to a csv file to be used for plotting later + echo "$users,$avgArmApiDuration" >>"$resultsDirectory/$armAvgAPILatencyCsv" + echo "$users,$avgArmAspProxyAccessDuration" >>"$resultsDirectory/$aspAvgAPILatencyCsv" + echo "API Latency Check Results: Passed Checks: $totalChecksPass Failed Checks: $totalChecksFail, users: $users, appEndpointsPerUser: $appEndpointsPerUser, Avg ARM API Duration: $avgArmApiDuration, Avg App Proxy Access Duration: $avgArmAspProxyAccessDuration" +} + +collectMetric() { + input=$1 + value="$2" + query=$3 + resultFile=$4 + + value=$(curl -s "$prometheusURL/query" --header "Authorization: Basic ${observabilityApiCredentials}" --data-urlencode "query=$query" | jq -r '.data.result[0].value[1]' 2>/dev/null) + if [ "$value" != "" ]; then + echo "$input,$value" >>"$resultsDirectory/$resultFile" + fi +} + +collectObservabilityMetrics() { + if [ "$observabilityApiCredentials" == "" ]; then + echo "observabilityApiCredentials is nil, cannot collect observability metrics" + return + fi + users=$1 + secondSuffix="s" + timeDurationInSec=$2$secondSuffix + + collectMetric "$users" "avgCpu" "sum by(k8s_namespace_name)(avg_over_time(k8s_pod_cpu_utilization_ratio{k8s_namespace_name=\"orch-app\"}[$timeDurationInSec]))" $avgCpuOrchAppCsv + collectMetric "$users" "maxCpu" "sum by(k8s_namespace_name)(max_over_time(k8s_pod_cpu_utilization_ratio{k8s_namespace_name=\"orch-app\"}[$timeDurationInSec]))" $maxCpuOrchAppCsv + + collectMetric "$users" "avgMem" "sum by(k8s_namespace_name)(avg_over_time(k8s_pod_memory_usage_bytes{k8s_namespace_name=\"orch-app\"}[$timeDurationInSec]))" $avgRamOrchAppCsv + collectMetric "$users" "maxMem" "sum by(k8s_namespace_name)(max_over_time(k8s_pod_memory_usage_bytes{k8s_namespace_name=\"orch-app\"}[$timeDurationInSec]))" $maxRamOrchAppCsv +} + +plotGraph() { + # Define the input CSV file and the output image file + inputCsv="$resultsDirectory/$1" + outputPng="$resultsDirectory/$2" + xLabel=$3 + yLabel=$4 + yRangeMin=$5 + yRangeMax=$6 + yAxisScaleFactor=$7 + graphTitle=$8 + + # If csv files exists, then generate graphs before exiting the script + if [ ! -f "$inputCsv" ]; then + return + fi + echo "Plotting graph for $inputCsv" + + # Generate the graph using gnuplot + gnuplot -persist <<-EOFMarker + set datafile separator "," + set terminal png size 800,600 + set output "$outputPng" + set title "$graphTitle" + set xlabel "$xLabel" + set ylabel "$yLabel" + set ytics scale $yAxisScaleFactor + set yrange [$yRangeMin:$yRangeMax] + plot "$inputCsv" using 1:(\$2*$yAxisScaleFactor) with linespoints title "$graphTitle" +EOFMarker + + echo "Graph generated for $graphTitle, output file: $outputPng" +} + +cleanup() { + # Remove temp file used to capture transient results + rm -f "$tmpJSONFile" + + plotGraph "$armAvgAPILatencyCsv" "$armAvgAPILatencyPng" "total-concurrent-users-with-$appEndpointsPerUser-app-endpoints-per-user" "avg-api-latency-in-ms" 0 2000 1 "arm-api-latency" + plotGraph "$aspAvgAPILatencyCsv" "$aspAvgAPILatencyPng" "total-concurrent-users-with-$appEndpointsPerUser-app-endpoints-per-user" "avg-api-latency-in-ms" 0 3000 1 "asp-api-latency" + plotGraph "$avgCpuOrchAppCsv" "$avgCpuOrchAppPng" "total-concurrent-users-with-$appEndpointsPerUser-app-endpoints-per-user" "avg-cpu-usage" 0 5 1 "avg-cpu-usage--ma" + plotGraph "$maxCpuOrchAppCsv" "$maxCpuOrchAppPng" "total-concurrent-users-with-$appEndpointsPerUser-app-endpoints-per-user" "max-cpu-usage" 0 5 1 "max-cpu-usage--ma" + plotGraph "$avgRamOrchAppCsv" "$avgRamOrchAppPng" "total-concurrent-users-with-$appEndpointsPerUser-app-endpoints-per-user" "avg-ram-usage-in-MB" 500 10000 0.000001 "avg-ram-usage--ma" + plotGraph "$maxRamOrchAppCsv" "$maxRamOrchAppPng" "total-concurrent-users-with-$appEndpointsPerUser-app-endpoints-per-user" "max-ram-usage-in-MB" 500 10000 0.000001 "max-ram-usage--ma" +} + +# Trap multiple signals +trap cleanup EXIT SIGINT SIGTERM + +# Read input arguments +readInputArgs "$@" + +if [ -z "$appID" ] || [ -z "$observabilityApiCredentials" ]; then + echo "appID: $appID or ObservabilityApiCredentials: $observabilityApiCredentials is nil, can't proceed with the test..." + exit 1 +fi + +# Create directory to store results +mkdir -p "$resultsDirectory" + +testStartTime=$(date +%s) +runApiLatencyChecks 10 +testEndTime=$(date +%s) +collectObservabilityMetrics 10 $((testEndTime - testStartTime)) + +testStartTime=$(date +%s) +runApiLatencyChecks 15 +testEndTime=$(date +%s) +collectObservabilityMetrics 15 $((testEndTime - testStartTime)) + +testStartTime=$(date +%s) +runApiLatencyChecks 20 +testEndTime=$(date +%s) +collectObservabilityMetrics 20 $((testEndTime - testStartTime)) + +testStartTime=$(date +%s) +runApiLatencyChecks 25 +testEndTime=$(date +%s) +collectObservabilityMetrics 25 $((testEndTime - testStartTime)) + +testStartTime=$(date +%s) +runApiLatencyChecks 30 +testEndTime=$(date +%s) +collectObservabilityMetrics 30 $((testEndTime - testStartTime)) diff --git a/scripts/k6-scripts/arm-vnc-api-latency.js b/scripts/k6-scripts/arm-vnc-api-latency.js new file mode 100644 index 00000000..6b9e2620 --- /dev/null +++ b/scripts/k6-scripts/arm-vnc-api-latency.js @@ -0,0 +1,162 @@ +import {check, fail} from 'k6'; +import http from 'k6/http'; +import exec from "k6/execution"; +import ws from 'k6/ws'; + +let timeDurationBetweenScenarioInMin = 10; +let containerAppEndpointAccessLatencyTestStartTime = 0; +let users = parseInt(__ENV.USERS) || 10 +let appsPerUser = parseInt(__ENV.APPS_PER_USER) || 100 +// Dynamically create the scenario configuration for K6 runner +let scenariosDynamic = {}; +let numOfScenarios = 0; +let sessionDuration = 1000; + +// Scenario for testing API Latency of Container Application access via App Proxys +scenariosDynamic[numOfScenarios++] = { + executor: 'per-vu-iterations', + exec: 'vmVNCAccessLatencyTest', + vus: users, + iterations: 1, + startTime: containerAppEndpointAccessLatencyTestStartTime.toString() + 'm', + maxDuration: timeDurationBetweenScenarioInMin.toString() + 'm', +} + +// Set k6 test options +export const options = { + scenarios: scenariosDynamic, + // thresholds: Defines the checks to perform at the end of the tests + thresholds: { + http_req_failed: ['rate<0.01'], // http errors should be less than 1% + 'http_req_duration{type:armAPI}': ['p(95)<1000', 'avg<750'], // 95% of API from ARM should be take below 1000ms, avg is less 500ms + 'http_req_failed{type:armAPI}': ['rate<0.01'], // http errors should be less than 1% + 'ws_connecting{type:vncProxy}': ['p(95)<3000', 'avg<2500'], // 95% of requests from AppProxy should be take below 3000ms, avg is less 2500ms + 'ws_session_duration{type:vncProxy}': ['p(95)<3000', 'avg<2500'], // http errors should be less than 1% + }, +}; + +// Define Http header for ARM API Access +const httpOptionsARM = { + headers: { + Authorization: `Bearer ${__ENV.API_TOKEN}`, + 'Content-Type': 'application/json', + }, + tags: {type: 'armAPI'}, +}; + +// Define Http header for VNC proxy tests +const httpOptionsContainerAppProxy = { + headers: { + cookie: `keycloak-token=${__ENV.API_TOKEN}`, + 'Content-Type': 'application/json', + }, + tags: {type: 'vncProxy'}, +}; +export function vmVNCAccessLatencyTest() { + // cluster - app - VM ID list + var numOfTargetVMs = {} + + // get Cluster List + let clusterNameList = Array(); + let offSet = 0; + let defaultPageSize = 100 + let response = http.get(`https://app-orch.${__ENV.MY_HOSTNAME}/deployment.orchestrator.apis/v1/clusters?pageSize=${defaultPageSize}&offset=${offSet}`, httpOptionsARM); + if ( + !check(response, { + "status is OK": (r) => r && r.status === 200, + }) + ) { + fail('failed to get clusters, status not 200'); + return; + } + let totalClusterPages = parseInt(response.json().totalElements / defaultPageSize) + if ((parseInt(response.json().totalElements) % defaultPageSize) !== 0) { + totalClusterPages++; + } + while (totalClusterPages >= 0) { + // TODO: Use totalElements to calculate the pageSize + let clusters = response.json().clusters; + + // Populate all cluster names + for (let _cluster of clusters) { + if (_cluster.id !== "cluster1") { + clusterNameList.push(_cluster.id); + } + } + totalClusterPages--; + if (totalClusterPages === 0) { + break; + } else { + offSet = offSet + defaultPageSize; + response = http.get(`https://app-orch.${__ENV.MY_HOSTNAME}/deployment.orchestrator.apis/v1/clusters?pageSize=${defaultPageSize}&offset=${offSet}`, httpOptionsARM); + } + } + + // get URL from ARM + let vncEndpoint = Array(); + let startIdx = parseInt((clusterNameList.length / users) * (exec.vu.idInInstance-1)) + for (let i = startIdx; i < (startIdx + appsPerUser) && i < clusterNameList.length; i++) { + let _name = clusterNameList[i] + let response = http.get(`https://app-orch.${__ENV.MY_HOSTNAME}/resource.orchestrator.apis/v2/workloads/${__ENV.APP_ID}/${_name}`, httpOptionsARM); + if ( + !check(response, { + "status is OK": (r) => r && r.status === 200, + }) + ) { + fail(`failed to get app endpoints, status not 200 for cluster: ${_name}`); + return; + } + + let _appWorkloads = response.json().appWorkloads + _appWorkloads.forEach(function (obj) { + if (obj.type === "TYPE_VIRTUAL_MACHINE" ) { + let _vmID = obj.id + let vncAPIResponse = http.get(`https://app-orch.${__ENV.MY_HOSTNAME}/resource.orchestrator.apis/v2/workloads/virtual-machines/${__ENV.APP_ID}/${_name}/${_vmID}/vnc`, httpOptionsARM); + if ( + !check(vncAPIResponse, { + "status is OK": (r) => r && r.status === 200, + }) + ) { + fail(`failed to get app vnc address, status not 200 for cluster: ${_name}`); + return; + } + + vncEndpoint.push(vncAPIResponse.json().address) + } + }) + } + + vncEndpoint.forEach(function (obj) { + const params = { headers: { + "Origin": `https://web-ui.${__ENV.MY_HOSTNAME}`, + cookie: `keycloak-token=${__ENV.API_TOKEN}`, + },tags: {type: 'vncProxy'}}; + + const res = ws.connect(obj, params, function (socket) { + + socket.on('open', function open() { + console.log(`VU ${__VU}: connected / ${obj.toString()}`); + }); + + socket.on('binaryMessage', (message) => { + var rMsg = new Uint8Array(message) + var msg = String.fromCharCode.apply(null, rMsg) + if (msg.toString() !== "RFB 003.008\n") { + fail(`unexpected message arrived - should be RFB 003.008 but received ${msg.toString()}`) + } + socket.close() + }); + + socket.setTimeout(function () { + fail("session over sessionDuration", sessionDuration) + }, sessionDuration); + + socket.setTimeout(function () { + socket.close(); + fail(`Closing the socket forcefully 3s after graceful LEAVE`) + }, sessionDuration + 30000); + }); + + check(res, { 'Connected successfully': (r) => r && r.status === 101 }); + }); +} \ No newline at end of file diff --git a/scripts/k6-scripts/arm-vnc-api-performance-test.sh b/scripts/k6-scripts/arm-vnc-api-performance-test.sh new file mode 100755 index 00000000..47c8115d --- /dev/null +++ b/scripts/k6-scripts/arm-vnc-api-performance-test.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +set -eu + +# Initialize some global variables and constants +clusterFqdn=kind.internal # Change if needed +enspUser=all-groups-example-user && enspPassword=ChangeMeOn1stLogin! +appID=$2 +appEndpointsPerUser=1 +users=$1 + +currentTimeStamp=$(date -u +"%Y%m%d-%H%M%S") +resultsDirectory="./" +tmpJSONFile="$resultsDirectory/$currentTimeStamp.json" + +# Function to initialize Orchestrator API Token +initializeOrchAPIToken() { + orchAPIToken=$(curl -kX POST https://keycloak.${clusterFqdn}/realms/master/protocol/openid-connect/token \ + -d "username=${enspUser}" \ + -d "password=${enspPassword}" \ + -d "grant_type=password" \ + -d "client_id=system-client" \ + -d "scope=openid" | + jq -r '.access_token' 2>/dev/null) +} + +runApiLatencyChecks() { + + echo "Running API latency tests... it will take a few mins to capture the results" + initializeOrchAPIToken + k6 run --env MY_HOSTNAME="$clusterFqdn" --env API_TOKEN="$orchAPIToken" --env APP_ID="$appID" --env USERS="$users" --env APPS_PER_USER="$appEndpointsPerUser" ./arm-vnc-api-latency.js -q --no-thresholds --summary-export="$tmpJSONFile" +} + +runApiLatencyChecks + +#wscat --no-check --header "origin: https://web-ui.kind.internal" --header "cookie: keycloak-token=$orchAPIToken" -c wss://vnc.kind.internal/vnc/b-867bcd29-f8ba-5b12-b114-e6b21553cb2d/cluster-01234567/c7d7a6b3-dccb-4402-b2f7-4e0cd7a85c41 \ No newline at end of file diff --git a/scripts/k6-scripts/tmp.json b/scripts/k6-scripts/tmp.json new file mode 100644 index 00000000..bb0acf77 --- /dev/null +++ b/scripts/k6-scripts/tmp.json @@ -0,0 +1,122 @@ +{ + "root_group": { + "name": "", + "path": "", + "id": "d41d8cd98f00b204e9800998ecf8427e", + "groups": {}, + "checks": { + "status is OK": { + "fails": 10, + "name": "status is OK", + "path": "::status is OK", + "id": "d83a99fb6db0de09e9f0d32dd8d2e9d5", + "passes": 0 + } + } + }, + "metrics": { + "http_reqs": { + "count": 10, + "rate": 201.7171128891264 + }, + "iterations": { + "count": 10, + "rate": 201.7171128891264 + }, + "http_req_waiting": { + "avg": 20.032341800000005, + "min": 12.155688, + "med": 16.2444715, + "max": 30.272082, + "p(90)": 29.7442824, + "p(95)": 30.0081822 + }, + "http_req_failed{type:armAPI}": { + "fails": 0, + "passes": 10, + "value": 1 + }, + "http_req_duration{type:armAPI}": { + "med": 16.45135, + "max": 30.509328, + "p(90)": 29.9402058, + "p(95)": 30.2247669, + "avg": 20.2809143, + "min": 12.663708 + }, + "data_sent": { + "count": 32510, + "rate": 655782.33400255 + }, + "http_req_duration": { + "p(90)": 29.9402058, + "p(95)": 30.2247669, + "avg": 20.2809143, + "min": 12.663708, + "med": 16.45135, + "max": 30.509328 + }, + "checks": { + "passes": 0, + "fails": 10, + "value": 0 + }, + "http_req_blocked": { + "med": 18.5761945, + "max": 19.212538, + "p(90)": 19.1433595, + "p(95)": 19.17794875, + "avg": 16.9724738, + "min": 11.519697 + }, + "http_req_tls_handshaking": { + "avg": 12.080767, + "min": 7.150055, + "med": 13.48726, + "max": 13.993239, + "p(90)": 13.9586025, + "p(95)": 13.97592075 + }, + "http_req_failed": { + "passes": 10, + "fails": 0, + "value": 1 + }, + "http_req_receiving": { + "max": 0.06775, + "p(90)": 0.0676681, + "p(95)": 0.06770905, + "avg": 0.0440527, + "min": 0.020636, + "med": 0.0396965 + }, + "iteration_duration": { + "min": 24.521351, + "med": 35.3450585, + "max": 49.300302, + "p(90)": 48.4094262, + "p(95)": 48.8548641, + "avg": 37.493412899999996 + }, + "data_received": { + "rate": 931368.2536316745, + "count": 46172 + }, + "http_req_connecting": { + "med": 1.512642, + "max": 1.773667, + "p(90)": 1.7035885, + "p(95)": 1.73862775, + "avg": 1.3882615, + "min": 0.807526 + }, + "http_req_sending": { + "p(90)": 0.41971319999999995, + "p(95)": 0.43003709999999995, + "avg": 0.2045198, + "min": 0.105191, + "med": 0.149099, + "max": 0.440361 + } + } +} \ No newline at end of file From 0c67cdb422fc36eb99818701b3b85d9254b48b09 Mon Sep 17 00:00:00 2001 From: "Padmanabhan, Badhrinath" Date: Wed, 21 May 2025 23:07:28 -0700 Subject: [PATCH 2/2] Update readne and .swp file removal --- scripts/README.md | 318 +------------------- scripts/app-scale-tester/.add-N-apps.sh.swp | Bin 16384 -> 0 bytes 2 files changed, 13 insertions(+), 305 deletions(-) delete mode 100644 scripts/app-scale-tester/.add-N-apps.sh.swp diff --git a/scripts/README.md b/scripts/README.md index f600b8d9..dc1db403 100644 --- a/scripts/README.md +++ b/scripts/README.md @@ -1,307 +1,15 @@ +1. https://github.com/intel-innersource/frameworks.edge.one-intel-edge.scale.test-tools +2. Use scale-badhri branch in above repo. +3. Use this command in scripts/app-scale-tester folder to run nginx test: ./add-N-apps.sh -a 1 -o aW50ZWwtaXRlcC11c2VyOkNoYW5nZU1lT24xc3RMb2dpbiE= +4. How to construct the string after -o flag is explained in README of repo. +5. The observability part doesnt work yet. +6. Use this command in scripts/app-scale-tester folder to run dummy app test: ./add-N-apps_dummyapp.sh -a 1 -o aW50ZWwtaXRlcC11c2VyOkNoYW5nZU1lT24xc3RMb2dpbiE= +7. scale test was run on orchestrator : scale.espd.infra-host.com +8. helpful notes: + - label all clusters with label scale=adm : kubectl label -n 84d60f25-685c-4992-b60f-ea5a87549456 clusters.cluster.x-k8s.io --all scale=adm + - remove label from certain cluster : kubectl label -n 84d60f25-685c-4992-b60f-ea5a87549456 clusters.cluster.x-k8s.io/cl-50-concurrent1 scale- + - list all clusters with label scle=adm: kubectl get clusters.cluster.x-k8s.io -n 84d60f25-685c-4992-b60f-ea5a87549456 -l scale=adm +9. ARM and ASP tests were not run in this release + - Need to understand what the scripts are doing. -# App-Orch Scale testing -This README documents details on the following topics -- App-Orch scale testing -- Tools used -- Different scale strategies (ENiC vs vCluster) -- Script used and how to run them -- Results collected -- Debugging -- Cleanup - -## Pre-requisite - -### Gnu Plot -```shell -sudo apt-get update -sudo apt-get install gnuplot -``` -Verified gnuplot version **5.4 patchlevel 2**. - -### K6 API load tester tool -Use instructions at https://k6.io/docs/get-started/installation/ to install k6. Recommended version **v0.47.0**. - -### vCluster -Refer https://www.vcluster.com/. Please install vcluster version **0.19.6**. - -### kubectl -Refer https://kubernetes.io/docs/tasks/tools/ and install kubectl for your platform. Recommended version **v1.28.9**. - -### JQ and YQ -- [JQ](https://jqlang.github.io/jq/download/) - **jq-1.6** or above -- [YQ](https://mikefarah.gitbook.io/yq) - **v4.33.3** or above - -### Bash shell -Bash shell version **5.1.16** or above. - -### Catalog CLI tool -Refer to https://github.com/intel-innersource/frameworks.edge.one-intel-edge.maestro-app.cli. - -## Running the cluster scale tester scripts - -### Using ENIC based edge node emulator for scale tests -The following steps detail scale tests based on ENIC based edge node emulator. - -The scale tester scripts can be used to bring up multiple clusters and install multiple apps on -them. The clusters / apps are installed in batches of 10 by default. After each batch is ready, -the test scripts will run API latency tests and collect relevant metrics from the Observability service. - -To create a number of clusters using the Configured hosts and wait for all clusters to be Ready: -```shell -cd ./enic-scale-tester -./add-N-clusters.sh -c -o -``` - -#### Other options to the scripts - -- `-b` : Batch Size of clusters to install. **Default 10** -- `-f` : Cluster FQDN. **Default integration12.maestro.intel.com** -- `-u` : Keycloak username. **Default all-groups-example-user** -- `-p` : Keycloak password. **Default ChangeMeOn1stLogin!** -- `-a` : Apps per enic. **Default 3** - - -### Using vCluster based edge node emulator for scale tests -The following steps detail using [vCluster](https://www.vcluster.com/) based scale tests. - -#### Case1: A host cluster already exists to host the vClusters -**Pre-requisite:** -- Set KUBECONFIG env variable to kubeconfig of the target cluster -- The script expects that `apps` namespace is pre-created on the target clusters. If vclusters are to be installed in to a different NS, the script needs to be modified. -- The target namespace has sufficient resource limits to host the required number of vclusters - - -To create a number of clusters using the Configured hosts and wait for all clusters to be Ready: -```shell -cd vcluster-scale-tester/ -./add-N-virtual-clusters.sh -c -o -``` - -Use help option like below to learn more options that is exposed by the script - -```shell -./add-N-virtual-clusters.sh -h -``` - -#### Case2: No host cluster but only bare-metal servers with Ubuntu 22.04 - -Do the following on all the servers that are used to host vClusters - -##### Install Tools -```shell -# Install KinD -[ $(uname -m) = x86_64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.23.0/kind-linux-amd64 -# For ARM64 -[ $(uname -m) = aarch64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.23.0/kind-linux-arm64 -chmod +x ./kind -sudo mv ./kind /usr/local/bin/kind - -# Install Helm -curl https://baltocdn.com/helm/signing.asc | gpg --dearmor | sudo tee /usr/share/keyrings/helm.gpg > /dev/null -sudo apt-get install apt-transport-https --yes -echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/helm.gpg] https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list -sudo apt-get update -sudo apt-get install helm - -# Install vCluster -wget https://github.com/loft-sh/vcluster/releases/download/v0.19.6/vcluster-linux-amd64 -chmod +x vcluster-linux-amd64 -sudo mv vcluster-linux-amd64 /usr/local/bin/vcluster - -# Install kubectl -curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" -chmod +x kubectl -sudo mv kubectl /usr/local/bin/kubectl -``` -##### Copy intel-harbor-ca.crt -Do this in the home folder of the server -```shell -openssl s_client -showcerts -connect amr-registry.caas.intel.com:443 < /dev/null | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' > ~/intel-harbor-ca.crt -``` - -##### Create KinD cluster config file -Create file named `kind-config.yaml` in the home folder of the server with below content. -**Note**: Change the `apiServerAddress` to the actual IP address of the server. - -```shell -kind: Cluster -apiVersion: kind.x-k8s.io/v1alpha4 -networking: - apiServerAddress: 10.3.162.65 -nodes: -- role: control-plane - extraMounts: - - hostPath: /tmp/var/log/ - containerPath: /var/log/ - - hostPath: /tmp/var/log/containers/ - containerPath: /var/log/containers/ - - hostPath: /tmp/var/lib/rancher/rke2/agent/logs/ - containerPath: /var/lib/rancher/rke2/agent/logs/ - - hostPath: /tmp/var/lib/rancher/rke2/server/logs/ - containerPath: /var/lib/rancher/rke2/server/logs/ - - hostPath: /tmp/dev/lvmvg/ - containerPath: /dev/lvmvg/ -containerdConfigPatches: - - |- - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."*"] - endpoint = ["https://dockerhubcache.caas.intel.com"] - [plugins."io.containerd.grpc.v1.cri".registry.configs] - [plugins."io.containerd.grpc.v1.cri".registry.configs."amr-registry.caas.intel.com".tls] - ca_file = "/usr/local/share/ca-certificates/intel-harbor-ca.crt" - [plugins."io.containerd.grpc.v1.cri".registry.configs."dockerhubcache.caas.intel.com".tls] - ca_file = "/usr/local/share/ca-certificates/intel-harbor-ca.crt" -kubeadmConfigPatches: - - | - apiVersion: kubelet.config.k8s.io/v1beta1 - kind: KubeletConfiguration - maxPods: 500 -``` - -##### Enable public key access -Enable public key access to these servers from the server which is used to run the automation scripts. - -*We are finally ready to run the automation script* - -```shell -cd vcluster-scale-tester/ -./add-virtual-clusters-on-kind.sh -s "@/,@/" - -## Example -./add-virtual-clusters-on-kind.sh -s "labrat@10.123.232.168/10,labrat@10.123.232.172/10,root@10.237.213.34/10,root@10.237.213.151/10,testbeduser@10.228.254.158/5,labuser@10.3.162.217/10,labuser@10.3.162.88/10,labuser@10.3.162.105/10,labuser@10.3.162.65/10" -``` - -Other options to the script -> -```shell -❯ ./add-virtual-clusters-on-kind.sh -h -Usage: ./add-virtual-clusters-on-kind.sh [options] [--] [arguments] - -Options: - -s VALUE List of servers (comma separated) and vclusters to install on those servers. Ex: "labuser@10.3.162.217/30,labuser@10.3.162.105/30" - -u VALUE Keycloak username, default all-groups-example-user - -p VALUE Keycloak password, default ChangeMeOn1stLogin! - -f VALUE Orch FQDN, default integration12.maestro.intel.com - -b VALUE Cluster install batch size, default 10 - -o VALUE Observability API credentials base64 encoded - -a VALUE Apps per ENIC, default 1 - -k VALUE vClusters per kind host, default 30 - -r VALUE Path of amr-registry.caas.intel.com registry public certificate. - -h Print this help menu - - In the below example, we ask to install 30 vclusters on labuser@10.3.162.217 and 10 vclusters on labuser@10.3.162.105 (using -s). - We also specify the path of Intel AMR CaaS public certificate (using -r). We then specify that only 10 vclusters should be hosted per kind cluster - - ./add-virtual-clusters-on-kind.sh -s "labuser@10.3.162.217/30,labuser@10.3.162.105/10" -r ./intel_harbor_ca.crt -k 10 -``` - -### Running the app scale test script and collecting metrics - -#### ADM API Scale tester -To deploy multiple copies of the "dummy app" to place load on the App Orch control plane without exhausting scarce edge resources: -```shell -cd app-scale-tester/ -./add-N-apps.sh -a -o -``` -The script generates various plots at the end of the test on API latency and Resource usage. Look for the results in the `./test-results/` folder. -The exact folder where the results are generated is logged at the end of the script execution. - -*NOTE:* Refer section [Getting observability api credentials](#getting-observability-api-credentials) further in this README on how to get Observability API Token to get the observability metrics. - -#### ARM API and ASP Scale tester - -To run the ARM and API scale tester do the following. This script scales the number of concurrent users linearly, -and then measures the ARM API and ASP latency, and also Maestro-App-System Namespace resource usage during the process -```shell -cd k6-scripts -./arm-asp-api-performance-test.sh -a -o -``` -For more details on the options to the script use below command -```shell -./arm-asp-api-performance-test.sh -h -``` - -At the end of the test result, all the metrics are collected and graphs are generated in the `./test-results/` folder. -The exact folder where the results are generated is logged at the end of the script execution. - -## Getting observability api credentials - -```shell -export KUBECONFIG= - -kubectl get secret mp-observability-grafana -n maestro-platform-system -o go-template='{{range $k,$v := .data}}{{printf "%s: " $k}}{{if not $v}}{{$v}}{{else}}{{$v | base64decode}}{{end}}{{"\n"}}{{end}}' - -admin-password: -admin-user: -ldap-toml: -``` - -The output credentials are decoded so now base64 encode `:` with the colon char inbetween and provide that as `` - -**Note:** when orchestrator or system restarts, the password will change so you will need to get new credentials and encode again. - -## Test Results -All results will be stored in `./test-results/` folder. It will be a collection of PNG files generated by GNU Plot in PNG file format and the source CSV files that were used to generate the GNU plots. -The shell script also dumps a lot of logs that could be used to trace the test progress. - -## Debugging -### vClusters setup on Scale Load cluster -```shell -# 1: Set the KUBECONFIG to the right kubeconfig file pointing to scale load cluster -export KUBECONFIG= -# 2: Connect to problematic cluster and execute the kubectl command of interest to debug -vcluster connect -n apps -- kubectl get pods -A -``` -**NOTE:** All vclusters are hosted in `apps` namespace by default for app-orch scale testing. -### vClusters setup on bare-metal servers -The vCluster name on bare-metal servers have the format - `vcluster-edge-node--- -# Example: -kubectl config use-context kind-kind-node-10-3-162-88-0 -# 3: list vClusters on that KinD cluster -vcluster list -n apps -# 4: connect to vcluster and execute the kubectl command of interest to debug further -# Example: -vcluster connect -n apps vcluster-edge-node-10-3-162-88-17 -- kubectl get pods -A -``` -**NOTE:** All vclusters are hosted in `apps` namespace by default for app-orch scale testing. - -## Cleanup -### vClusters cleanup on Scale Load cluster -Use the following instructions -1. Delete those clusters from Rancher UI. Bulk deletes are possible on Rancher UI by selecting several clusters. -2. Delete specific vclusters with `vcluster delete -n apps `. Make sure you have set the right kubeconfig to be able to access the vcluster. To delete batch of vclusters, use `utils/delete-vclusters.sh -c ` script. -3. If the associated pods are still in Terminating state after vcluster tool has deleted the vclusters, then force delete those pods in Scale Load cluster with below command -``` -kubectl delete pod -n apps --force `kubectl get pods -n apps | grep Terminating | awk '{print $1}'` -``` -4. Then re-setup the clusters that are necessary to fill the gap on Scale Load cluster with below command - -``` - ./add-N-virtual-clusters.sh -c -a -i -``` - -**NOTE**: - -- All vclusters are hosted in `apps` namespace by default for app-orch scale testing. -- Choose `start-index-to-use-for-vcluster` such that it is the first available index with no index in use after that. It defaults to 0 if none specified. - -### vClusters cleanup on bare-metal servers -Use below instructions - -- Delete those clusters from Rancher UI. Bulk deletes are possible on Rancher UI by selecting several clusters. -- Login to baremetal hosting the vCluster. Setup the kubeconfig to the KinD cluster hosting the vCluster. Delete the vcluster now - -At this time it is not possible to add vClusters on a bare-metal in an additive manner. So please delete all the clusters associated with a server on Rancher UI using bulk delete, then delete all the KinD clusters (which also deletes the vClusters automatically) created for hosting vClusters. Once this cleanup is done, setup the required number of vClusters on this baremetal using the instructions shared in previous sections. diff --git a/scripts/app-scale-tester/.add-N-apps.sh.swp b/scripts/app-scale-tester/.add-N-apps.sh.swp deleted file mode 100644 index 35c3731a2c1c7fe1f00b5fa404d502ce58f45ee1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 16384 zcmeHO&5s;M6)!#wgpkOQ!X<}t&u+Yy-7~wkgvdr6*i2&Ci9c4}U0cW)RMTCv({_Kk zt9sTG%OE&~OD=>YQX~W+Cld!DB#=NDAwER7aERa#!9PHda=<}IhQRMtch7Xs&Srf< z9NZ)Qyu0<@t9tdTde!ybw0nzZkDug^w3iq@_cPXdtvz^Y;XQWat!1WUMy-qP(~W!)K9cBvWcopJHqqqQ#MRgTkSs7uD&!1 z4Fh){1NX8chnE&<-Ysh%e_-#%-A6$)Zw)pvhsh|HTK55Z@Zc6@MAurqCn3DgykUv?p^J0KF?J64>%ENq1-J?P9C#Tx4?G0i5B%bj zjQtq+K5!j)4)`MQY2ZQN);)~<3V0QG7Wfjd3_Jkr1%CYr#x4UJz{nmvz&m>w`w5_dE^rUi$gF!m!L0b<~3zyfZ6n6Wp3SAdJa ze&8P{GTa2N0#5;Zfj^*d@@=3Gv;Yfu7hK)~-UQwNUIv~Ah~uvT^$+LL9mL!^ooGLf z)Uw6$mf{DuHk8MhDni@zZo5Xu?vs+Oj8;2|HmzD0yIF6yJ1OwCNE)vs z$&R+6$OjU`%^iJ(>`r{Gc2OB`rghcV+-|0?b=%TcaMw+;oq2VW><)MxcTw?`R%M%Y z)z^IJnZDL-OJ6aA@j%mifM&OCwC#JK+C|0JnbuWbbGw| zo1z7;4*7PFw-tNE+otBtQcIHKk^kayS48o`di)K$Yf2_{z!X9d&F)&-FjyJ;e4*n&u70D5Nyld&M`-#_rTb&)Un z&em05^Py+@S|7Reb%$Uwqt&rCsauese!U+cD|ub9ECU>Y;?{=sB? zt#(oImAKhvUG+7$o9S!aw)C|gOu36w!IV2x4W?XeDwuM$S}^5$)4^mEb(lQ0cKXy& zM3ZC9rBY>qR!{h;bakALIuN%nsyMRPw8*jA9eTUaE(D!9H%n7IBq@5=vWl0_=t8qo}L)L z$G9#-Jax5n_FuK|v^^z4X)Rmx$~r`Uo|UP>wQCm+S;Y;Z=yS@8$*Y;` zqlD%lG9BAw?c>3I$cl$DO?^+E6+tFx^b6PagEBZ71nxk(m*@f_dRWD^?%~DHJ_6bL zKyut43_l#xO=IB>aAIo3-5AaC3f~&Yh;Kh%U z#NWvnw#|zb4@NwSHSR(-OKTffleThxt$~LZxF%J82k1NHFg-or=wBXq} zQd{`d>cTjdC;I{}VxyIQp_kRkdX6>hVv*~b#rANl#r9Hru`Z)ka;#|6kFMB3jAf{Z zCp4`J_Y$syRb^0B8njv*{_x@n##JI+zwf(zC{mx6O5rNpQ!*Wjo*(#ngokN2l^$Zq z7lCpti{nw3#zQPS7H5d9;2|U%hiT#gI%GYe-9gcNt@C5?77s-G)bE z^hYSMIGG|AH_n`xT>ERQZNt=$rqTe)3_3U?g^IHjk$6Px!w++ktqP^K;?#4x0}*Y?lk#-5 zr1Xgxp|lT2NG0iUF8PtyHaj80Lg9EZ7ToU_dx%w)%}LspDj}18D(pdw&5x68mSKw@ zk9-Ov>{+g`Abc~3dl+OKdt5{w&Oy3eBC$ha=`-YmSA5Nv=ogbJ<`IjtEDL*>VB8;X zCPYPs2OK;-&*xg)arh#?@F>M(#PB}9<~H8xZ|&>!{HW6tc#)p+|8F3N_K{Ol{$GB7 z{zv5XH-Vo4KLuU{UI3m2JirAW1s(+ciQNA!;D^BLzzyJgz$?HvfdOy=cpCUS>Ht3g zUI4xYBtQ(D0uBLGC%6xI2lap=0QLj7;rk8XHQ+fQ1}*|L zrvcD>8U`8$8U`8$8U`8$8U`8${vQl@aU^krb>oauERSo|HQEUB#_Aler^j>PFh)a|;Vp2f~*eRQcgH>H$il0F;_a(1Cog zZr&e9vSRu$)!JG*l&6K`t9PEeQ3l1&WDz%AQ%3nX<%;EG@MX;|gDd)$r{Z2&fjT&^ z$T6h4NVv2Lt4n2 z$B7KAK|(|}LUSZ@C>5Y&wp^ol;@vLO<4Pi>@`%K3HZ|&@uqAwbG)`B7US4?6*y)n2XuMsRRk*`qHFmGmZt)^2cc+l}gGg3Ir{xixu?-R5OxP z4*fV&+!QM)6)bX7#la|oEAWs#CMcYuCKsnV7@0mQSt#{So4lyJh)q;8i}h2YFWW_X z-jylHC9zU|54E(sV-Mvz6e`K<}KBrB=%xE=j3n67Cr@cEo&Dhc8I zs18}z>(umEp0sZu2m}?NDkaJ!uhZc)z_$`2h&OX3R+ymbhFy;k7{|3T0n%=_@x81e b!z{b0QB8F6^nY?3g4vwyv9h7s6b1eZ9Wt+s