Skip to content

Commit 6a06046

Browse files
Fixed black/flake8 issues
1 parent c3766c6 commit 6a06046

File tree

19 files changed

+552
-94
lines changed

19 files changed

+552
-94
lines changed

Makefile

Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
# Makefile for redisbench-admin
2+
3+
.PHONY: compliance compliance-fix test integration-tests help
4+
5+
# Code quality and compliance checks
6+
compliance:
7+
@echo "🔍 Running compliance checks..."
8+
@if poetry run black --check redisbench_admin; then \
9+
echo "📝 Black formatting: ✅ PASSED"; \
10+
else \
11+
echo "📝 Black formatting: ❌ FAILED"; \
12+
echo "💡 Run 'make format' to fix formatting issues"; \
13+
exit 1; \
14+
fi
15+
@if poetry run flake8 redisbench_admin; then \
16+
echo "🔍 Flake8 linting: ✅ PASSED"; \
17+
else \
18+
echo "🔍 Flake8 linting: ❌ FAILED"; \
19+
exit 1; \
20+
fi
21+
@echo "✅ All compliance checks passed!"
22+
23+
# Fix code formatting issues
24+
format:
25+
@echo "🔧 Fixing code formatting..."
26+
poetry run black redisbench_admin
27+
@echo "✅ Code formatting fixed!"
28+
29+
# Alias for format
30+
compliance-fix: format
31+
32+
# Run tests with coverage
33+
test:
34+
@echo "🧪 Running tests..."
35+
poetry run coverage erase
36+
poetry run pytest --cov=redisbench_admin --cov-report=term-missing -ra
37+
poetry run coverage xml
38+
@echo "✅ Tests completed!"
39+
40+
# Run integration tests (alias for test)
41+
integration-tests: test
42+
43+
# Run both compliance and tests
44+
all: compliance test
45+
46+
# Show help
47+
help:
48+
@echo "Available targets:"
49+
@echo " compliance - Run code quality checks (black, flake8)"
50+
@echo " format - Fix code formatting with black"
51+
@echo " compliance-fix - Alias for format"
52+
@echo " test - Run tests with coverage"
53+
@echo " integration-tests - Alias for test"
54+
@echo " all - Run compliance checks and tests"
55+
@echo " help - Show this help message"
56+
57+
# Default target
58+
.DEFAULT_GOAL := help

redisbench_admin/compare/compare.py

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1216,9 +1216,9 @@ def from_rts_to_regression_table(
12161216
f"Blocking regression confirmation for '{test_name}' due to unstable latency data"
12171217
)
12181218
if server_has_unstable:
1219-
logging.info(f" Server-side latency data is unstable")
1219+
logging.info(" Server-side latency data is unstable")
12201220
if client_has_unstable:
1221-
logging.info(f" Client-side latency data is unstable")
1221+
logging.info(" Client-side latency data is unstable")
12221222
else:
12231223
both_confirm_regression = (
12241224
server_confirms_regression and client_confirms_regression
@@ -1244,12 +1244,12 @@ def from_rts_to_regression_table(
12441244
server_regression_details or client_regression_details
12451245
)
12461246
if combined_regression_details:
1247-
combined_regression_details[
1248-
"server_side"
1249-
] = server_confirms_regression
1250-
combined_regression_details[
1251-
"client_side"
1252-
] = client_confirms_regression
1247+
combined_regression_details["server_side"] = (
1248+
server_confirms_regression
1249+
)
1250+
combined_regression_details["client_side"] = (
1251+
client_confirms_regression
1252+
)
12531253

12541254
# 2nd level confirmation is sufficient - always add to confirmed regressions
12551255
logging.info(
@@ -1291,17 +1291,17 @@ def from_rts_to_regression_table(
12911291
f"Confidence analysis for '{test_name}': {confidence_note}"
12921292
)
12931293
# Use 3rd level confidence if available
1294-
combined_regression_details[
1295-
"high_confidence"
1296-
] = high_confidence
1294+
combined_regression_details["high_confidence"] = (
1295+
high_confidence
1296+
)
12971297
else:
12981298
# No 3rd level data available - default to moderate confidence since 2nd level confirmed
12991299
logging.info(
13001300
f"No 3rd level data available for '{test_name}' - using 2nd level confirmation"
13011301
)
1302-
combined_regression_details[
1303-
"high_confidence"
1304-
] = True # 2nd level confirmation is reliable
1302+
combined_regression_details["high_confidence"] = (
1303+
True # 2nd level confirmation is reliable
1304+
)
13051305

13061306
# Always add to confirmed regressions when 2nd level confirms
13071307
latency_confirmed_regression_details.append(

redisbench_admin/deploy/deploy.py

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,15 @@ def deploy_command_logic(args, project_name, project_version):
7373
tf_triggering_env = "redisbench-admin-deploy"
7474
logging.info("Setting an infra timeout of {} secs".format(infra_timeout_secs))
7575
if args.destroy is False:
76-
(tf_return_code, _, _, _, _, _, _,) = setup_remote_environment(
76+
(
77+
tf_return_code,
78+
_,
79+
_,
80+
_,
81+
_,
82+
_,
83+
_,
84+
) = setup_remote_environment(
7785
tf,
7886
tf_github_sha,
7987
tf_github_actor,

redisbench_admin/export/export.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,13 @@ def export_command_logic(args, project_name, project_version):
4242
deployment_name = args.deployment_name
4343
deployment_type = args.deployment_type
4444
results_format = args.results_format
45-
(_, github_branch, github_org, github_repo, _,) = git_vars_crosscheck(
45+
(
46+
_,
47+
github_branch,
48+
github_org,
49+
github_repo,
50+
_,
51+
) = git_vars_crosscheck(
4652
None, args.github_branch, args.github_org, args.github_repo, None
4753
)
4854
exporter_timemetric_path = None

redisbench_admin/profilers/perf.py

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -400,9 +400,9 @@ def generate_outputs(self, use_case, **kwargs):
400400
"Main THREAD Flame Graph: " + use_case, details
401401
)
402402
if artifact_result is True:
403-
outputs[
404-
"Main THREAD Flame Graph {}".format(identifier)
405-
] = flame_graph_output
403+
outputs["Main THREAD Flame Graph {}".format(identifier)] = (
404+
flame_graph_output
405+
)
406406
result &= artifact_result
407407

408408
tid = self.pid
@@ -440,9 +440,9 @@ def generate_outputs(self, use_case, **kwargs):
440440
)
441441

442442
if artifact_result is True:
443-
outputs[
444-
"perf report per dso,sym {}".format(identifier)
445-
] = perf_report_artifact
443+
outputs["perf report per dso,sym {}".format(identifier)] = (
444+
perf_report_artifact
445+
)
446446
result &= artifact_result
447447

448448
# generate perf report per dso,sym
@@ -460,9 +460,9 @@ def generate_outputs(self, use_case, **kwargs):
460460
)
461461

462462
if artifact_result is True:
463-
outputs[
464-
"perf report per dso,sym with callgraph {}".format(identifier)
465-
] = perf_report_artifact
463+
outputs["perf report per dso,sym with callgraph {}".format(identifier)] = (
464+
perf_report_artifact
465+
)
466466
result &= artifact_result
467467

468468
# generate perf report per dso,sym,srcline
@@ -487,9 +487,9 @@ def generate_outputs(self, use_case, **kwargs):
487487
)
488488

489489
if artifact_result is True:
490-
outputs[
491-
"perf report per dso,sym,srcline {}".format(identifier)
492-
] = perf_report_artifact
490+
outputs["perf report per dso,sym,srcline {}".format(identifier)] = (
491+
perf_report_artifact
492+
)
493493
result &= artifact_result
494494

495495
self.logger.info(
@@ -527,9 +527,9 @@ def generate_outputs(self, use_case, **kwargs):
527527
)
528528

529529
if artifact_result is True:
530-
outputs[
531-
"perf report top self-cpu {}".format(identifier)
532-
] = perf_report_artifact
530+
outputs["perf report top self-cpu {}".format(identifier)] = (
531+
perf_report_artifact
532+
)
533533
result &= artifact_result
534534

535535
# generate perf report --stdio report
@@ -546,9 +546,9 @@ def generate_outputs(self, use_case, **kwargs):
546546
)
547547

548548
if artifact_result is True:
549-
outputs[
550-
"perf report top self-cpu (dso={})".format(binary)
551-
] = perf_report_artifact
549+
outputs["perf report top self-cpu (dso={})".format(binary)] = (
550+
perf_report_artifact
551+
)
552552
result &= artifact_result
553553

554554
if self.callgraph_mode == "dwarf":
@@ -590,9 +590,9 @@ def generate_outputs(self, use_case, **kwargs):
590590
)
591591
result &= artifact_result
592592
if artifact_result is True:
593-
outputs[
594-
"Top entries in text form by LOC"
595-
] = pprof_artifact_text_output
593+
outputs["Top entries in text form by LOC"] = (
594+
pprof_artifact_text_output
595+
)
596596
tabular_data_map["text-lines"] = tabular_data
597597
self.logger.info("Generating pprof png output")
598598
pprof_png_output = self.output + ".pprof.png"
@@ -604,9 +604,9 @@ def generate_outputs(self, use_case, **kwargs):
604604
self.output,
605605
)
606606
if artifact_result is True:
607-
outputs[
608-
"Output graph image in PNG format"
609-
] = pprof_artifact_png_output
607+
outputs["Output graph image in PNG format"] = (
608+
pprof_artifact_png_output
609+
)
610610
result &= artifact_result
611611

612612
# save stack collapsed

redisbench_admin/run/common.py

Lines changed: 24 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -206,7 +206,10 @@ def prepare_benchmark_parameters_specif_tooling(
206206
if isremote is True:
207207
benchmark_tool = "/tmp/{}".format(benchmark_tool)
208208
input_data_file = "/tmp/input.data"
209-
(command_arr, command_str,) = prepare_tsbs_benchmark_command(
209+
(
210+
command_arr,
211+
command_str,
212+
) = prepare_tsbs_benchmark_command(
210213
benchmark_tool,
211214
server_private_ip,
212215
server_plaintext_port,
@@ -218,7 +221,10 @@ def prepare_benchmark_parameters_specif_tooling(
218221
cluster_api_enabled,
219222
)
220223
if "memtier_benchmark" in benchmark_tool:
221-
(command_arr, command_str,) = prepare_memtier_benchmark_command(
224+
(
225+
command_arr,
226+
command_str,
227+
) = prepare_memtier_benchmark_command(
222228
benchmark_tool,
223229
server_private_ip,
224230
server_plaintext_port,
@@ -236,7 +242,10 @@ def prepare_benchmark_parameters_specif_tooling(
236242
ann_path = stdout[0].strip() + "/run/ann/pkg/multirun.py"
237243
logging.info("Remote ann-benchmark path: {}".format(ann_path))
238244

239-
(command_arr, command_str,) = prepare_ann_benchmark_command(
245+
(
246+
command_arr,
247+
command_str,
248+
) = prepare_ann_benchmark_command(
240249
server_private_ip,
241250
server_plaintext_port,
242251
cluster_api_enabled,
@@ -250,7 +259,10 @@ def prepare_benchmark_parameters_specif_tooling(
250259
if isremote is True:
251260
benchmark_tool = "/tmp/{}".format(benchmark_tool)
252261
input_data_file = "/tmp/input.data"
253-
(command_arr, command_str,) = prepare_ftsb_benchmark_command(
262+
(
263+
command_arr,
264+
command_str,
265+
) = prepare_ftsb_benchmark_command(
254266
benchmark_tool,
255267
server_private_ip,
256268
server_plaintext_port,
@@ -267,7 +279,10 @@ def prepare_benchmark_parameters_specif_tooling(
267279
if isremote is True:
268280
benchmark_tool = "/tmp/{}".format(benchmark_tool)
269281
input_data_file = "/tmp/input.data"
270-
(command_arr, command_str,) = prepare_aibench_benchmark_command(
282+
(
283+
command_arr,
284+
command_str,
285+
) = prepare_aibench_benchmark_command(
271286
benchmark_tool,
272287
server_private_ip,
273288
server_plaintext_port,
@@ -772,7 +787,10 @@ def print_results_table_stdout(
772787
metric_names=[],
773788
):
774789
# check which metrics to extract
775-
(_, metrics,) = merge_default_and_config_metrics(
790+
(
791+
_,
792+
metrics,
793+
) = merge_default_and_config_metrics(
776794
benchmark_config,
777795
default_metrics,
778796
None,

redisbench_admin/run_async/async_terraform.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,11 @@ def common_properties_log(self, private_key):
114114
def async_runner_setup(
115115
self,
116116
):
117-
(remote_setup, deployment_type, remote_id,) = fetch_remote_setup_from_config(
117+
(
118+
remote_setup,
119+
deployment_type,
120+
remote_id,
121+
) = fetch_remote_setup_from_config(
118122
[{"type": "async", "setup": "runner"}],
119123
"https://github.com/RedisLabsModules/testing-infrastructure.git",
120124
"master",
@@ -229,7 +233,11 @@ def terraform_spin_or_reuse_env(
229233
tf_override_name=None,
230234
tf_folder_path=None,
231235
):
232-
(remote_setup, deployment_type, remote_id,) = fetch_remote_setup_from_config(
236+
(
237+
remote_setup,
238+
deployment_type,
239+
remote_id,
240+
) = fetch_remote_setup_from_config(
233241
benchmark_config["remote"],
234242
"https://github.com/RedisLabsModules/testing-infrastructure.git",
235243
"master",

redisbench_admin/run_async/render_files.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -28,9 +28,9 @@ def renderServiceFile(access_key, region, secret_key, gh_token, job_name, args,
2828
argv.append("--private_key")
2929
argv.append("/home/ubuntu/work_dir/tests/benchmarks/benchmarks.redislabs.pem")
3030
else:
31-
argv[
32-
argv.index(args.private_key)
33-
] = "/home/ubuntu/work_dir/tests/benchmarks/benchmarks.redislabs.pem"
31+
argv[argv.index(args.private_key)] = (
32+
"/home/ubuntu/work_dir/tests/benchmarks/benchmarks.redislabs.pem"
33+
)
3434
if len(args.module_path) != 0:
3535
argv[argv.index(args.module_path[0])] = (
3636
"/home/ubuntu/work_dir/tests/benchmarks/"

redisbench_admin/run_local/args.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,4 +40,16 @@ def create_run_local_arguments(parser):
4040
default=IGNORE_KEYSPACE_ERRORS,
4141
help="Ignore keyspace check errors. Will still log them as errors",
4242
)
43+
parser.add_argument(
44+
"--dry-run",
45+
default=False,
46+
action="store_true",
47+
help="Setup environment and test connectivity without running benchmarks",
48+
)
49+
parser.add_argument(
50+
"--dry-run-with-preload",
51+
default=False,
52+
action="store_true",
53+
help="Setup environment, preload data, and test connectivity without running benchmarks",
54+
)
4355
return parser

0 commit comments

Comments
 (0)