From a175fa55aaa38069d07006aa945f960dbf1564c7 Mon Sep 17 00:00:00 2001 From: Kamesh Akella Date: Thu, 8 Aug 2024 12:16:12 -0400 Subject: [PATCH] update scaling benchmark to get metrics from both clusters Signed-off-by: Kamesh Akella --- .../prometheus-metrics-calc/action.yml | 12 ++- .github/workflows/rosa-scaling-benchmark.yml | 81 +++++++++++++++++-- benchmark/src/main/python/perfInsights.py | 2 +- 3 files changed, 86 insertions(+), 9 deletions(-) diff --git a/.github/actions/prometheus-metrics-calc/action.yml b/.github/actions/prometheus-metrics-calc/action.yml index 866b8fff2..c227b2005 100644 --- a/.github/actions/prometheus-metrics-calc/action.yml +++ b/.github/actions/prometheus-metrics-calc/action.yml @@ -44,11 +44,17 @@ runs: run: | readarray -t lines < ${{ inputs.input }} num1=${lines[0]} - num2=${lines[1]} + num2=${lines[2]} + num3=${lines[1]} + num4=${lines[3]} #calculating the difference of cumulative metric (changed during the benchmark execution) - difference=$(awk "BEGIN {print ($num2-$num1); exit}") + difference_cluster_1=$(awk "BEGIN {print ($num2 - $num1); exit}") + difference_cluster_2=$(awk "BEGIN {print ($num4 - $num3); exit}") + + # averaging the differences between the two clusters + average_difference=$(awk "BEGIN {print ($difference_cluster_1 + $difference_cluster_2) / 2; exit}") #the script calculates vCPU need to calculate the vcpu number from CPU seconds metrics for the interval during which the test was running - metric_count_in_interval=$(awk "BEGIN {print $difference/$TIME_INTERVAL; exit}") + metric_count_in_interval=$(awk "BEGIN {print $average_difference/$TIME_INTERVAL; exit}") #calculating the average metric per pod metric_per_pod=$(awk "BEGIN {print $metric_count_in_interval/$POD_NUM; exit}") #Calculating the final number, i.e. how many of specified criteria (e.g. user logins/sec, client credential grants, etc) diff --git a/.github/workflows/rosa-scaling-benchmark.yml b/.github/workflows/rosa-scaling-benchmark.yml index 8c50e5787..33c1eb43e 100644 --- a/.github/workflows/rosa-scaling-benchmark.yml +++ b/.github/workflows/rosa-scaling-benchmark.yml @@ -7,6 +7,10 @@ on: description: 'Name of the cluster' type: string default: 'gh-keycloak-a' + clusterPrefix: + description: 'Cluster prefix' + type: string + default: 'gh-keycloak' region: description: 'Name of the region where EC2 instances should be installed' type: string @@ -45,6 +49,10 @@ on: clusterName: description: 'Name of the cluster' type: string + default: 'gh-keycloak-a' + clusterPrefix: + description: 'Cluster prefix' + type: string default: 'gh-keycloak' region: description: 'Name of the region where EC2 instances should be installed' @@ -105,7 +113,7 @@ jobs: - name: Login to OpenShift cluster uses: ./.github/actions/oc-keycloak-login with: - clusterName: ${{ inputs.clusterName || format('gh-{0}', github.repository_owner) }} + clusterName: ${{ inputs.clusterPrefix }}-a - name: Setup Go Task uses: ./.github/actions/task-setup @@ -227,7 +235,19 @@ jobs: isvCPU: false isMemory: true - - name: Run CPU sec Util Query Before Benchmark + - name: Run CPU sec Util Query Before Benchmark on Cluster 1 + uses: ./.github/actions/prometheus-run-queries + with: + project: ${{ env.PROJECT }} + runCpuSecsUtil: true + output: user_logins_vCpu + + - name: Login to OpenShift cluster 2 + uses: ./.github/actions/oc-keycloak-login + with: + clusterName: ${{ inputs.clusterPrefix }}-b + + - name: Run CPU sec Util Query Before Benchmark on Cluster 2 uses: ./.github/actions/prometheus-run-queries with: project: ${{ env.PROJECT }} @@ -251,7 +271,24 @@ jobs: continue-on-error: true working-directory: ansible - - name: Run CPU sec Util Query After Benchmark + - name: Login to OpenShift cluster 1 + uses: ./.github/actions/oc-keycloak-login + with: + clusterName: ${{ inputs.clusterPrefix }}-a + + - name: Run CPU sec Util Query After Benchmark on Cluster 1 + uses: ./.github/actions/prometheus-run-queries + with: + project: ${{ env.PROJECT }} + runCpuSecsUtil: true + output: user_logins_vCpu + + - name: Login to OpenShift cluster 2 + uses: ./.github/actions/oc-keycloak-login + with: + clusterName: ${{ inputs.clusterPrefix }}-b + + - name: Run CPU sec Util Query After Benchmark on Cluster 2 uses: ./.github/actions/prometheus-run-queries with: project: ${{ env.PROJECT }} @@ -269,7 +306,24 @@ jobs: isvCPU: true isMemory: false - - name: Run CPU sec Util Query Before Benchmark + - name: Login to OpenShift cluster 1 + uses: ./.github/actions/oc-keycloak-login + with: + clusterName: ${{ inputs.clusterPrefix }}-a + + - name: Run CPU sec Util Query Before Benchmark on Cluster 1 + uses: ./.github/actions/prometheus-run-queries + with: + project: ${{ env.PROJECT }} + runCpuSecsUtil: true + output: client_credential_grants_vCpu + + - name: Login to OpenShift cluster 2 + uses: ./.github/actions/oc-keycloak-login + with: + clusterName: ${{ inputs.clusterPrefix }}-b + + - name: Run CPU sec Util Query Before Benchmark on cluster 2 uses: ./.github/actions/prometheus-run-queries with: project: ${{ env.PROJECT }} @@ -292,7 +346,24 @@ jobs: continue-on-error: true working-directory: ansible - - name: Run CPU sec Util Query After Benchmark + - name: Login to OpenShift cluster 1 + uses: ./.github/actions/oc-keycloak-login + with: + clusterName: ${{ inputs.clusterPrefix }}-a + + - name: Run CPU sec Util Query After Benchmark on Cluster 1 + uses: ./.github/actions/prometheus-run-queries + with: + project: ${{ env.PROJECT }} + runCpuSecsUtil: true + output: client_credential_grants_vCpu + + - name: Login to OpenShift cluster 2 + uses: ./.github/actions/oc-keycloak-login + with: + clusterName: ${{ inputs.clusterPrefix }}-b + + - name: Run CPU sec Util Query After Benchmark on cluster 2 uses: ./.github/actions/prometheus-run-queries with: project: ${{ env.PROJECT }} diff --git a/benchmark/src/main/python/perfInsights.py b/benchmark/src/main/python/perfInsights.py index 4d454c654..5a4638aa3 100644 --- a/benchmark/src/main/python/perfInsights.py +++ b/benchmark/src/main/python/perfInsights.py @@ -61,7 +61,7 @@ def fetch_and_process_json(github_user, github_repo, branch_name, json_directory data_frames[test].append(df) combined_df = pd.concat(basic_df, ignore_index=True) - perf_across_deployments_df = combined_df[['start', 'context.externalInfinispanFeatureEnabled', 'context.persistentSessionsEnabled', 'cpuUsageForLoginsTest.userLoginsPerSecPer1vCpuPerPod', 'credentialGrantsPerSecPer1vCpu', 'memoryUsageTest.activeSessionsPer500MbPerPod']] + perf_across_deployments_df = combined_df[['start', 'context.externalInfinispanFeatureEnabled', 'context.persistentSessionsEnabled', 'cpuUsageForLoginsTest.userLoginsPerSecPer1vCpuPerPod', 'cpuUsageForCredentialGrantsTest.credentialGrantsPerSecPer1vCpu', 'memoryUsageTest.activeSessionsPer500MbPerPod']] print(perf_across_deployments_df.to_csv(index=False)) # Concatenate all DataFrames for each test into a single DataFrame