Skip to content

Commit ab07c36

Browse files
Added isProcessAlive and getLocalRunFullFilename to redisgraph-benchmark-go helpers (#35)
* [add] added isProcessAlive and getLocalRunFullFilename * Bumping version from 0.1.34 to 0.1.35
1 parent d80baa8 commit ab07c36

26 files changed

+1039
-399
lines changed

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "redisbench-admin"
3-
version = "0.1.34"
3+
version = "0.1.35"
44
description = "Redis benchmark run helper. A wrapper around Redis and Redis Modules benchmark tools ( ftsb_redisearch, memtier_benchmark, redis-benchmark, aibench, etc... )."
55
authors = ["filipecosta90 <[email protected]>"]
66
readme = "README.md"

redisbench_admin/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
__version__ = '0.1.30'
1+
__version__ = "0.1.30"

redisbench_admin/cli.py

Lines changed: 37 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -16,11 +16,11 @@
1616

1717

1818
def populate_with_poetry_data():
19-
project_name = 'redisbench-admin'
19+
project_name = "redisbench-admin"
2020
project_version = __version__
2121
project_description = None
2222
try:
23-
poetry_data = toml.load("pyproject.toml")['tool']['poetry']
23+
poetry_data = toml.load("pyproject.toml")["tool"]["poetry"]
2424
project_name = poetry_data["name"]
2525
project_version = poetry_data["version"]
2626
project_description = poetry_data["description"]
@@ -34,16 +34,22 @@ def main():
3434
tool = None
3535
if len(sys.argv) < 2:
3636
print(
37-
"A minimum of 2 arguments is required: redisbench-admin <tool> <arguments>. Use redisbench-admin --help if you need further assistance.")
37+
"A minimum of 2 arguments is required: redisbench-admin <tool> <arguments>. Use redisbench-admin --help if you need further assistance."
38+
)
3839
sys.exit(1)
3940
requested_tool = sys.argv[1]
4041
project_name, project_description, project_version = populate_with_poetry_data()
4142
parser = argparse.ArgumentParser(
4243
description=project_description,
43-
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
44+
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
45+
)
4446
# common arguments to all tools
45-
parser.add_argument('--version', default=False, action='store_true', help='print version and exit')
46-
parser.add_argument('--local-dir', type=str, default="./", help='local dir to use as storage')
47+
parser.add_argument(
48+
"--version", default=False, action="store_true", help="print version and exit"
49+
)
50+
parser.add_argument(
51+
"--local-dir", type=str, default="./", help="local dir to use as storage"
52+
)
4753

4854
if requested_tool == "run":
4955
parser = create_run_arguments(parser)
@@ -78,22 +84,40 @@ def main():
7884

7985

8086
def print_invalid_tool_option(requested_tool, valid_tool_options):
81-
print("Invalid redisbench-admin <tool>. Requested tool: {}. Available tools: {}".format(
82-
requested_tool, ",".join(valid_tool_options)))
87+
print(
88+
"Invalid redisbench-admin <tool>. Requested tool: {}. Available tools: {}".format(
89+
requested_tool, ",".join(valid_tool_options)
90+
)
91+
)
8392

8493

8594
def print_version(project_name, project_version):
86-
print("{project_name} {project_version}".format(project_name=project_name, project_version=project_version))
95+
print(
96+
"{project_name} {project_version}".format(
97+
project_name=project_name, project_version=project_version
98+
)
99+
)
87100

88101

89102
def print_help(project_name, project_version):
90-
print("{project_name} {project_version}".format(project_name=project_name, project_version=project_version))
103+
print(
104+
"{project_name} {project_version}".format(
105+
project_name=project_name, project_version=project_version
106+
)
107+
)
91108
print("usage: {project_name} <tool> <args>...".format(project_name=project_name))
92109
print(
93-
"\t-) To know more on how to run benchmarks: {project_name} run --help".format(project_name=project_name))
110+
"\t-) To know more on how to run benchmarks: {project_name} run --help".format(
111+
project_name=project_name
112+
)
113+
)
94114
print(
95115
"\t-) To know more on how to compare benchmark results: {project_name} compare --help".format(
96-
project_name=project_name))
116+
project_name=project_name
117+
)
118+
)
97119
print(
98120
"\t-) To know more on how to export benchmark results: {project_name} export --help".format(
99-
project_name=project_name))
121+
project_name=project_name
122+
)
123+
)

redisbench_admin/compare/args.py

Lines changed: 36 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,38 @@
11
def create_compare_arguments(parser):
2-
parser.add_argument('--baseline-file', type=str, required=True,
3-
help="baseline benchmark output file to read results from. can be a local file or a remote link.")
4-
parser.add_argument('--comparison-file', type=str, required=True,
5-
help="comparison benchmark output file to read results from. can be a local file or a remote link.")
6-
parser.add_argument('--use-result', type=str, default="median-result",
7-
help="for each key-metric, use either worst-result, best-result, or median-result")
8-
parser.add_argument('--steps', type=str, default="setup,benchmark",
9-
help="comma separated list of steps to be analyzed given the benchmark result files")
10-
parser.add_argument('--enable-fail-above', default=False, action='store_true',
11-
help="enables failing test if percentage of change is above threshold on any of the benchmark steps being analysed")
12-
parser.add_argument('--fail-above-pct-change', type=float, default=10.0,
13-
help='Fail above if any of the key-metrics presents an regression in percentage of change (from 0.0-100.0)')
2+
parser.add_argument(
3+
"--baseline-file",
4+
type=str,
5+
required=True,
6+
help="baseline benchmark output file to read results from. can be a local file or a remote link.",
7+
)
8+
parser.add_argument(
9+
"--comparison-file",
10+
type=str,
11+
required=True,
12+
help="comparison benchmark output file to read results from. can be a local file or a remote link.",
13+
)
14+
parser.add_argument(
15+
"--use-result",
16+
type=str,
17+
default="median-result",
18+
help="for each key-metric, use either worst-result, best-result, or median-result",
19+
)
20+
parser.add_argument(
21+
"--steps",
22+
type=str,
23+
default="setup,benchmark",
24+
help="comma separated list of steps to be analyzed given the benchmark result files",
25+
)
26+
parser.add_argument(
27+
"--enable-fail-above",
28+
default=False,
29+
action="store_true",
30+
help="enables failing test if percentage of change is above threshold on any of the benchmark steps being analysed",
31+
)
32+
parser.add_argument(
33+
"--fail-above-pct-change",
34+
type=float,
35+
default=10.0,
36+
help="Fail above if any of the key-metrics presents an regression in percentage of change (from 0.0-100.0)",
37+
)
1438
return parser

redisbench_admin/compare/compare.py

Lines changed: 56 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -17,23 +17,29 @@ def compare_command_logic(args):
1717
max_negative_pct_change = max_pct_change * -1.0
1818
enabled_fail = args.enable_fail_above
1919

20-
baseline_json = retrieve_local_or_remote_input_json(baseline_file, local_path, "--baseline-file")
20+
baseline_json = retrieve_local_or_remote_input_json(
21+
baseline_file, local_path, "--baseline-file"
22+
)
2123
if baseline_json is None:
22-
print('Error while retrieving {}! Exiting..'.format(baseline_file))
24+
print("Error while retrieving {}! Exiting..".format(baseline_file))
2325
sys.exit(1)
2426

25-
comparison_json = retrieve_local_or_remote_input_json(comparison_file, local_path, "--comparison-file")
27+
comparison_json = retrieve_local_or_remote_input_json(
28+
comparison_file, local_path, "--comparison-file"
29+
)
2630
if comparison_json is None:
27-
print('Error while retrieving {}! Exiting..'.format(comparison_file))
31+
print("Error while retrieving {}! Exiting..".format(comparison_file))
2832
sys.exit(1)
2933

3034
##### Comparison starts here #####
3135
baseline_key_results_steps = baseline_json["key-results"].keys()
3236
comparison_key_results_steps = comparison_json["key-results"].keys()
33-
baseline_df_config = generate_comparison_dataframe_configs(baseline_json["benchmark-config"],
34-
baseline_key_results_steps)
35-
comparison_df_config = generate_comparison_dataframe_configs(comparison_json["benchmark-config"],
36-
comparison_key_results_steps)
37+
baseline_df_config = generate_comparison_dataframe_configs(
38+
baseline_json["benchmark-config"], baseline_key_results_steps
39+
)
40+
comparison_df_config = generate_comparison_dataframe_configs(
41+
comparison_json["benchmark-config"], comparison_key_results_steps
42+
)
3743

3844
percentange_change_map = {}
3945
for step in baseline_key_results_steps:
@@ -42,49 +48,70 @@ def compare_command_logic(args):
4248
percentange_change_map[step] = {}
4349
print("##############################")
4450
print("Comparing {} step".format(step))
45-
key_result_run_name, baseline_metrics = get_key_results_and_values(baseline_json, step, use_result)
46-
key_result_run_name, comparison_metrics = get_key_results_and_values(comparison_json, step, use_result)
51+
key_result_run_name, baseline_metrics = get_key_results_and_values(
52+
baseline_json, step, use_result
53+
)
54+
key_result_run_name, comparison_metrics = get_key_results_and_values(
55+
comparison_json, step, use_result
56+
)
4757
for baseline_metric_name, baseline_metric_value in baseline_metrics.items():
4858
comparison_metric_value = None
4959
if baseline_metric_name in comparison_metrics:
5060
comparison_metric_value = comparison_metrics[baseline_metric_name]
51-
df_dict[baseline_metric_name] = [baseline_metric_value, comparison_metric_value]
61+
df_dict[baseline_metric_name] = [
62+
baseline_metric_value,
63+
comparison_metric_value,
64+
]
5265
df = pd.DataFrame(df_dict, index=["baseline", "comparison"])
5366
print("Percentage of change for comparison on {}".format(step))
54-
df = df.append(df.pct_change().rename(index={'comparison': 'pct_change'}).loc['pct_change'] * 100.0)
67+
df = df.append(
68+
df.pct_change()
69+
.rename(index={"comparison": "pct_change"})
70+
.loc["pct_change"]
71+
* 100.0
72+
)
5573

5674
for metric_name, items in df.iteritems():
5775

58-
lower_is_better = baseline_df_config[step]["sorting_metric_sorting_direction_map"][metric_name]
76+
lower_is_better = baseline_df_config[step][
77+
"sorting_metric_sorting_direction_map"
78+
][metric_name]
5979

6080
multiplier = 1.0
6181
# if lower is better than negative changes are and performance improvement
6282
if lower_is_better:
6383
multiplier = -1.0
6484

6585
pct_change = items.get("pct_change") * multiplier
66-
df.at['pct_change', metric_name] = pct_change
86+
df.at["pct_change", metric_name] = pct_change
6787
percentange_change_map[step][metric_name] = pct_change
6888

6989
print(df)
7090
if enabled_fail:
71-
failing_metrics_serie = df.loc['pct_change'] <= max_negative_pct_change
72-
failing_metrics = df.loc['pct_change'][failing_metrics_serie]
91+
failing_metrics_serie = df.loc["pct_change"] <= max_negative_pct_change
92+
failing_metrics = df.loc["pct_change"][failing_metrics_serie]
7393
ammount_of_failing_metrics = len(failing_metrics)
7494
if ammount_of_failing_metrics > 0:
7595
df_keys = df.keys()
76-
print("There was a total of {} metrics that presented a regression above {} %".format(
77-
ammount_of_failing_metrics, max_pct_change))
96+
print(
97+
"There was a total of {} metrics that presented a regression above {} %".format(
98+
ammount_of_failing_metrics, max_pct_change
99+
)
100+
)
78101
for pos, failed in enumerate(failing_metrics_serie):
79102
if failed:
80-
print("\tMetric '{}' failed. with an percentage of change of {:.2f} %".format(df_keys[pos],
81-
df.loc[
82-
'pct_change'][
83-
pos]))
103+
print(
104+
"\tMetric '{}' failed. with an percentage of change of {:.2f} %".format(
105+
df_keys[pos], df.loc["pct_change"][pos]
106+
)
107+
)
84108
sys.exit(1)
85109
else:
86-
print("Skipping step: {} due to command line argument --steps not containing it ({})".format(step, ",".join(
87-
included_steps)))
110+
print(
111+
"Skipping step: {} due to command line argument --steps not containing it ({})".format(
112+
step, ",".join(included_steps)
113+
)
114+
)
88115

89116

90117
def generate_comparison_dataframe_configs(benchmark_config, steps):
@@ -104,7 +131,9 @@ def generate_comparison_dataframe_configs(benchmark_config, steps):
104131
step_df_dict[step]["metric_json_path"].append(metric_json_path)
105132
step_df_dict[step]["df_dict"][metric_name] = []
106133
step_df_dict[step]["sorting_metric_sorting_direction"].append(
107-
False if metric["comparison"] == "higher-better" else True)
108-
step_df_dict[step]["sorting_metric_sorting_direction_map"][metric_name] = False if metric[
109-
"comparison"] == "higher-better" else True
134+
False if metric["comparison"] == "higher-better" else True
135+
)
136+
step_df_dict[step]["sorting_metric_sorting_direction_map"][metric_name] = (
137+
False if metric["comparison"] == "higher-better" else True
138+
)
110139
return step_df_dict

redisbench_admin/export/args.py

Lines changed: 49 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1,22 +1,51 @@
11
def create_export_arguments(parser):
2-
parser.add_argument('--benchmark-result-files', type=str, required=True,
3-
help="benchmark results files to read results from. can be a local file, a remote link, or an s3 bucket.")
4-
parser.add_argument('--steps', type=str, default="setup,benchmark",
5-
help="comma separated list of steps to be analyzed given the benchmark result files")
6-
parser.add_argument('--exporter', type=str, default="redistimeseries",
7-
help="exporter to be used ( either csv or redistimeseries )")
8-
parser.add_argument('--results-format', type=str, default="redis-benchmark",
9-
help="results format of the the benchmark results files to read results from ( either memtier_benchmark, redis-benchmark, or ftsb_redisearch )")
10-
parser.add_argument('--use-result', type=str, default="median-result",
11-
help="for each key-metric, use either worst-result, best-result, or median-result")
12-
parser.add_argument('--extra-tags', type=str, default="",
13-
help='comma separated extra tags in the format of key1=value,key2=value,...')
14-
parser.add_argument('--host', type=str, default="localhost",
15-
help="redistimeseries host")
16-
parser.add_argument('--port', type=int, default=6379,
17-
help="redistimeseries port")
18-
parser.add_argument('--password', type=str, default=None,
19-
help="redistimeseries password")
20-
parser.add_argument('--input-tags-json', type=str, default="",
21-
help='input filename containing the extracted tags from redis.')
2+
parser.add_argument(
3+
"--benchmark-result-files",
4+
type=str,
5+
required=True,
6+
help="benchmark results files to read results from. can be a local file, a remote link, or an s3 bucket.",
7+
)
8+
parser.add_argument(
9+
"--steps",
10+
type=str,
11+
default="setup,benchmark",
12+
help="comma separated list of steps to be analyzed given the benchmark result files",
13+
)
14+
parser.add_argument(
15+
"--exporter",
16+
type=str,
17+
default="redistimeseries",
18+
help="exporter to be used ( either csv or redistimeseries )",
19+
)
20+
parser.add_argument(
21+
"--results-format",
22+
type=str,
23+
default="redis-benchmark",
24+
help="results format of the the benchmark results files to read results from ( either memtier_benchmark, redis-benchmark, or ftsb_redisearch )",
25+
)
26+
parser.add_argument(
27+
"--use-result",
28+
type=str,
29+
default="median-result",
30+
help="for each key-metric, use either worst-result, best-result, or median-result",
31+
)
32+
parser.add_argument(
33+
"--extra-tags",
34+
type=str,
35+
default="",
36+
help="comma separated extra tags in the format of key1=value,key2=value,...",
37+
)
38+
parser.add_argument(
39+
"--host", type=str, default="localhost", help="redistimeseries host"
40+
)
41+
parser.add_argument("--port", type=int, default=6379, help="redistimeseries port")
42+
parser.add_argument(
43+
"--password", type=str, default=None, help="redistimeseries password"
44+
)
45+
parser.add_argument(
46+
"--input-tags-json",
47+
type=str,
48+
default="",
49+
help="input filename containing the extracted tags from redis.",
50+
)
2251
return parser

0 commit comments

Comments
 (0)