Skip to content

Commit d440192

Browse files
Added test for specific oss cluster setup name
1 parent 6a06046 commit d440192

File tree

15 files changed

+1132
-133
lines changed

15 files changed

+1132
-133
lines changed

redisbench_admin/compare/compare.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1244,12 +1244,12 @@ def from_rts_to_regression_table(
12441244
server_regression_details or client_regression_details
12451245
)
12461246
if combined_regression_details:
1247-
combined_regression_details["server_side"] = (
1248-
server_confirms_regression
1249-
)
1250-
combined_regression_details["client_side"] = (
1251-
client_confirms_regression
1252-
)
1247+
combined_regression_details[
1248+
"server_side"
1249+
] = server_confirms_regression
1250+
combined_regression_details[
1251+
"client_side"
1252+
] = client_confirms_regression
12531253

12541254
# 2nd level confirmation is sufficient - always add to confirmed regressions
12551255
logging.info(
@@ -1291,17 +1291,17 @@ def from_rts_to_regression_table(
12911291
f"Confidence analysis for '{test_name}': {confidence_note}"
12921292
)
12931293
# Use 3rd level confidence if available
1294-
combined_regression_details["high_confidence"] = (
1295-
high_confidence
1296-
)
1294+
combined_regression_details[
1295+
"high_confidence"
1296+
] = high_confidence
12971297
else:
12981298
# No 3rd level data available - default to moderate confidence since 2nd level confirmed
12991299
logging.info(
13001300
f"No 3rd level data available for '{test_name}' - using 2nd level confirmation"
13011301
)
1302-
combined_regression_details["high_confidence"] = (
1303-
True # 2nd level confirmation is reliable
1304-
)
1302+
combined_regression_details[
1303+
"high_confidence"
1304+
] = True # 2nd level confirmation is reliable
13051305

13061306
# Always add to confirmed regressions when 2nd level confirms
13071307
latency_confirmed_regression_details.append(

redisbench_admin/deploy/deploy.py

Lines changed: 1 addition & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -73,15 +73,7 @@ def deploy_command_logic(args, project_name, project_version):
7373
tf_triggering_env = "redisbench-admin-deploy"
7474
logging.info("Setting an infra timeout of {} secs".format(infra_timeout_secs))
7575
if args.destroy is False:
76-
(
77-
tf_return_code,
78-
_,
79-
_,
80-
_,
81-
_,
82-
_,
83-
_,
84-
) = setup_remote_environment(
76+
(tf_return_code, _, _, _, _, _, _,) = setup_remote_environment(
8577
tf,
8678
tf_github_sha,
8779
tf_github_actor,

redisbench_admin/export/export.py

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -42,13 +42,7 @@ def export_command_logic(args, project_name, project_version):
4242
deployment_name = args.deployment_name
4343
deployment_type = args.deployment_type
4444
results_format = args.results_format
45-
(
46-
_,
47-
github_branch,
48-
github_org,
49-
github_repo,
50-
_,
51-
) = git_vars_crosscheck(
45+
(_, github_branch, github_org, github_repo, _,) = git_vars_crosscheck(
5246
None, args.github_branch, args.github_org, args.github_repo, None
5347
)
5448
exporter_timemetric_path = None

redisbench_admin/profilers/perf.py

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -400,9 +400,9 @@ def generate_outputs(self, use_case, **kwargs):
400400
"Main THREAD Flame Graph: " + use_case, details
401401
)
402402
if artifact_result is True:
403-
outputs["Main THREAD Flame Graph {}".format(identifier)] = (
404-
flame_graph_output
405-
)
403+
outputs[
404+
"Main THREAD Flame Graph {}".format(identifier)
405+
] = flame_graph_output
406406
result &= artifact_result
407407

408408
tid = self.pid
@@ -440,9 +440,9 @@ def generate_outputs(self, use_case, **kwargs):
440440
)
441441

442442
if artifact_result is True:
443-
outputs["perf report per dso,sym {}".format(identifier)] = (
444-
perf_report_artifact
445-
)
443+
outputs[
444+
"perf report per dso,sym {}".format(identifier)
445+
] = perf_report_artifact
446446
result &= artifact_result
447447

448448
# generate perf report per dso,sym
@@ -460,9 +460,9 @@ def generate_outputs(self, use_case, **kwargs):
460460
)
461461

462462
if artifact_result is True:
463-
outputs["perf report per dso,sym with callgraph {}".format(identifier)] = (
464-
perf_report_artifact
465-
)
463+
outputs[
464+
"perf report per dso,sym with callgraph {}".format(identifier)
465+
] = perf_report_artifact
466466
result &= artifact_result
467467

468468
# generate perf report per dso,sym,srcline
@@ -487,9 +487,9 @@ def generate_outputs(self, use_case, **kwargs):
487487
)
488488

489489
if artifact_result is True:
490-
outputs["perf report per dso,sym,srcline {}".format(identifier)] = (
491-
perf_report_artifact
492-
)
490+
outputs[
491+
"perf report per dso,sym,srcline {}".format(identifier)
492+
] = perf_report_artifact
493493
result &= artifact_result
494494

495495
self.logger.info(
@@ -527,9 +527,9 @@ def generate_outputs(self, use_case, **kwargs):
527527
)
528528

529529
if artifact_result is True:
530-
outputs["perf report top self-cpu {}".format(identifier)] = (
531-
perf_report_artifact
532-
)
530+
outputs[
531+
"perf report top self-cpu {}".format(identifier)
532+
] = perf_report_artifact
533533
result &= artifact_result
534534

535535
# generate perf report --stdio report
@@ -546,9 +546,9 @@ def generate_outputs(self, use_case, **kwargs):
546546
)
547547

548548
if artifact_result is True:
549-
outputs["perf report top self-cpu (dso={})".format(binary)] = (
550-
perf_report_artifact
551-
)
549+
outputs[
550+
"perf report top self-cpu (dso={})".format(binary)
551+
] = perf_report_artifact
552552
result &= artifact_result
553553

554554
if self.callgraph_mode == "dwarf":
@@ -590,9 +590,9 @@ def generate_outputs(self, use_case, **kwargs):
590590
)
591591
result &= artifact_result
592592
if artifact_result is True:
593-
outputs["Top entries in text form by LOC"] = (
594-
pprof_artifact_text_output
595-
)
593+
outputs[
594+
"Top entries in text form by LOC"
595+
] = pprof_artifact_text_output
596596
tabular_data_map["text-lines"] = tabular_data
597597
self.logger.info("Generating pprof png output")
598598
pprof_png_output = self.output + ".pprof.png"
@@ -604,9 +604,9 @@ def generate_outputs(self, use_case, **kwargs):
604604
self.output,
605605
)
606606
if artifact_result is True:
607-
outputs["Output graph image in PNG format"] = (
608-
pprof_artifact_png_output
609-
)
607+
outputs[
608+
"Output graph image in PNG format"
609+
] = pprof_artifact_png_output
610610
result &= artifact_result
611611

612612
# save stack collapsed

redisbench_admin/run/common.py

Lines changed: 6 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -206,10 +206,7 @@ def prepare_benchmark_parameters_specif_tooling(
206206
if isremote is True:
207207
benchmark_tool = "/tmp/{}".format(benchmark_tool)
208208
input_data_file = "/tmp/input.data"
209-
(
210-
command_arr,
211-
command_str,
212-
) = prepare_tsbs_benchmark_command(
209+
(command_arr, command_str,) = prepare_tsbs_benchmark_command(
213210
benchmark_tool,
214211
server_private_ip,
215212
server_plaintext_port,
@@ -221,10 +218,7 @@ def prepare_benchmark_parameters_specif_tooling(
221218
cluster_api_enabled,
222219
)
223220
if "memtier_benchmark" in benchmark_tool:
224-
(
225-
command_arr,
226-
command_str,
227-
) = prepare_memtier_benchmark_command(
221+
(command_arr, command_str,) = prepare_memtier_benchmark_command(
228222
benchmark_tool,
229223
server_private_ip,
230224
server_plaintext_port,
@@ -242,10 +236,7 @@ def prepare_benchmark_parameters_specif_tooling(
242236
ann_path = stdout[0].strip() + "/run/ann/pkg/multirun.py"
243237
logging.info("Remote ann-benchmark path: {}".format(ann_path))
244238

245-
(
246-
command_arr,
247-
command_str,
248-
) = prepare_ann_benchmark_command(
239+
(command_arr, command_str,) = prepare_ann_benchmark_command(
249240
server_private_ip,
250241
server_plaintext_port,
251242
cluster_api_enabled,
@@ -259,10 +250,7 @@ def prepare_benchmark_parameters_specif_tooling(
259250
if isremote is True:
260251
benchmark_tool = "/tmp/{}".format(benchmark_tool)
261252
input_data_file = "/tmp/input.data"
262-
(
263-
command_arr,
264-
command_str,
265-
) = prepare_ftsb_benchmark_command(
253+
(command_arr, command_str,) = prepare_ftsb_benchmark_command(
266254
benchmark_tool,
267255
server_private_ip,
268256
server_plaintext_port,
@@ -279,10 +267,7 @@ def prepare_benchmark_parameters_specif_tooling(
279267
if isremote is True:
280268
benchmark_tool = "/tmp/{}".format(benchmark_tool)
281269
input_data_file = "/tmp/input.data"
282-
(
283-
command_arr,
284-
command_str,
285-
) = prepare_aibench_benchmark_command(
270+
(command_arr, command_str,) = prepare_aibench_benchmark_command(
286271
benchmark_tool,
287272
server_private_ip,
288273
server_plaintext_port,
@@ -787,10 +772,7 @@ def print_results_table_stdout(
787772
metric_names=[],
788773
):
789774
# check which metrics to extract
790-
(
791-
_,
792-
metrics,
793-
) = merge_default_and_config_metrics(
775+
(_, metrics,) = merge_default_and_config_metrics(
794776
benchmark_config,
795777
default_metrics,
796778
None,

redisbench_admin/run_async/async_terraform.py

Lines changed: 2 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -114,11 +114,7 @@ def common_properties_log(self, private_key):
114114
def async_runner_setup(
115115
self,
116116
):
117-
(
118-
remote_setup,
119-
deployment_type,
120-
remote_id,
121-
) = fetch_remote_setup_from_config(
117+
(remote_setup, deployment_type, remote_id,) = fetch_remote_setup_from_config(
122118
[{"type": "async", "setup": "runner"}],
123119
"https://github.com/RedisLabsModules/testing-infrastructure.git",
124120
"master",
@@ -233,11 +229,7 @@ def terraform_spin_or_reuse_env(
233229
tf_override_name=None,
234230
tf_folder_path=None,
235231
):
236-
(
237-
remote_setup,
238-
deployment_type,
239-
remote_id,
240-
) = fetch_remote_setup_from_config(
232+
(remote_setup, deployment_type, remote_id,) = fetch_remote_setup_from_config(
241233
benchmark_config["remote"],
242234
"https://github.com/RedisLabsModules/testing-infrastructure.git",
243235
"master",

redisbench_admin/run_async/render_files.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -28,9 +28,9 @@ def renderServiceFile(access_key, region, secret_key, gh_token, job_name, args,
2828
argv.append("--private_key")
2929
argv.append("/home/ubuntu/work_dir/tests/benchmarks/benchmarks.redislabs.pem")
3030
else:
31-
argv[argv.index(args.private_key)] = (
32-
"/home/ubuntu/work_dir/tests/benchmarks/benchmarks.redislabs.pem"
33-
)
31+
argv[
32+
argv.index(args.private_key)
33+
] = "/home/ubuntu/work_dir/tests/benchmarks/benchmarks.redislabs.pem"
3434
if len(args.module_path) != 0:
3535
argv[argv.index(args.module_path[0])] = (
3636
"/home/ubuntu/work_dir/tests/benchmarks/"

redisbench_admin/run_local/run_local.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -737,17 +737,17 @@ def commandstats_latencystats_process_name(
737737
branch = variant_labels_dict["branch"]
738738

739739
if version is not None:
740-
variant_labels_dict["command_and_metric_and_version"] = (
741-
"{} - {} - {}".format(command, metric, version)
742-
)
743-
variant_labels_dict["command_and_metric_and_setup_and_version"] = (
744-
"{} - {} - {} - {}".format(command, metric, setup_name, version)
745-
)
740+
variant_labels_dict[
741+
"command_and_metric_and_version"
742+
] = "{} - {} - {}".format(command, metric, version)
743+
variant_labels_dict[
744+
"command_and_metric_and_setup_and_version"
745+
] = "{} - {} - {} - {}".format(command, metric, setup_name, version)
746746

747747
if branch is not None:
748-
variant_labels_dict["command_and_metric_and_branch"] = (
749-
"{} - {} - {}".format(command, metric, branch)
750-
)
751-
variant_labels_dict["command_and_metric_and_setup_and_branch"] = (
752-
"{} - {} - {} - {}".format(command, metric, setup_name, branch)
753-
)
748+
variant_labels_dict[
749+
"command_and_metric_and_branch"
750+
] = "{} - {} - {}".format(command, metric, branch)
751+
variant_labels_dict[
752+
"command_and_metric_and_setup_and_branch"
753+
] = "{} - {} - {} - {}".format(command, metric, setup_name, branch)

redisbench_admin/run_remote/remote_helpers.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -106,11 +106,7 @@ def remote_tool_pre_bench_step(
106106
)
107107

108108
if "ftsb_" in benchmark_tool:
109-
(
110-
queries_file_link,
111-
remote_tool_link,
112-
tool_link,
113-
) = extract_ftsb_extra_links(
109+
(queries_file_link, remote_tool_link, tool_link,) = extract_ftsb_extra_links(
114110
benchmark_config, benchmark_tool, config_key, architecture
115111
)
116112
logging.info(

redisbench_admin/run_remote/run_remote.py

Lines changed: 13 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -741,9 +741,7 @@ def run_remote_command_logic(args, project_name, project_version):
741741
logging.info(
742742
"✅ Data preload: Completed during setup"
743743
)
744-
logging.info(
745-
"🏁 Dry-run completed successfully"
746-
)
744+
logging.info("🏁 Dry-run completed successfully")
747745
logging.info(
748746
"⏭️ Benchmark execution skipped (dry-run mode)"
749747
)
@@ -1486,20 +1484,20 @@ def commandstats_latencystats_process_name(
14861484
branch = variant_labels_dict["branch"]
14871485

14881486
if version is not None:
1489-
variant_labels_dict["command_and_metric_and_version"] = (
1490-
"{} - {} - {}".format(command, metric, version)
1491-
)
1492-
variant_labels_dict["command_and_metric_and_setup_and_version"] = (
1493-
"{} - {} - {} - {}".format(command, metric, setup_name, version)
1494-
)
1487+
variant_labels_dict[
1488+
"command_and_metric_and_version"
1489+
] = "{} - {} - {}".format(command, metric, version)
1490+
variant_labels_dict[
1491+
"command_and_metric_and_setup_and_version"
1492+
] = "{} - {} - {} - {}".format(command, metric, setup_name, version)
14951493

14961494
if branch is not None:
1497-
variant_labels_dict["command_and_metric_and_branch"] = (
1498-
"{} - {} - {}".format(command, metric, branch)
1499-
)
1500-
variant_labels_dict["command_and_metric_and_setup_and_branch"] = (
1501-
"{} - {} - {} - {}".format(command, metric, setup_name, branch)
1502-
)
1495+
variant_labels_dict[
1496+
"command_and_metric_and_branch"
1497+
] = "{} - {} - {}".format(command, metric, branch)
1498+
variant_labels_dict[
1499+
"command_and_metric_and_setup_and_branch"
1500+
] = "{} - {} - {} - {}".format(command, metric, setup_name, branch)
15031501

15041502

15051503
def shutdown_remote_redis(redis_conns, ssh_tunnel):

0 commit comments

Comments
 (0)