12
12
13
13
14
14
from redisbench_admin .export .common .common import split_tags_string
15
+ from redisbench_admin .export .google_benchmark .google_benchmark_json_format import (
16
+ generate_summary_json_google_benchmark ,
17
+ )
15
18
from redisbench_admin .export .pyperf .pyperf_json_format import (
16
19
generate_summary_json_pyperf ,
17
20
)
@@ -50,10 +53,13 @@ def export_command_logic(args, project_name, project_version):
50
53
"You need to specify at least one (or more) of --deployment-version --github_branch arguments"
51
54
)
52
55
exit (1 )
53
- if results_format != "csv" and results_format != "pyperf-json" :
56
+ non_required_spec = ["csv" , "pyperf-json" , "google.benchmark" ]
57
+ if results_format not in non_required_spec :
54
58
if exporter_spec_file is None :
55
59
logging .error (
56
- "--exporter-spec-file is required for all formats with exception of csv and pyperf-json"
60
+ "--exporter-spec-file is required for all formats with exception of {}" .format (
61
+ "," .join (non_required_spec )
62
+ )
57
63
)
58
64
exit (1 )
59
65
else :
@@ -76,6 +82,22 @@ def export_command_logic(args, project_name, project_version):
76
82
with open (benchmark_file , "r" ) as json_file :
77
83
start_dict = json .load (json_file )
78
84
results_dict = generate_summary_json_pyperf (start_dict )
85
+ if results_format == "google.benchmark" :
86
+ with open (benchmark_file , "r" ) as json_file :
87
+ # override test names
88
+ print_warning = False
89
+ old_test_name = test_name
90
+ if test_name is None :
91
+ print_warning = True
92
+ start_dict = json .load (json_file )
93
+ results_dict , test_name = generate_summary_json_google_benchmark (start_dict )
94
+ if print_warning is True :
95
+ logging .warning (
96
+ "You've specificied a test name {} but on google benchmark we override it based on the test names retrieved from out file {}" .format (
97
+ old_test_name , test_name
98
+ )
99
+ )
100
+
79
101
if args .override_test_time :
80
102
datapoints_timestamp = int (args .override_test_time .timestamp () * 1000.0 )
81
103
logging .info (
@@ -120,9 +142,9 @@ def export_command_logic(args, project_name, project_version):
120
142
triggering_env ,
121
143
)
122
144
logging .info ("Parsed a total of {} metrics" .format (len (timeseries_dict .keys ())))
123
- if results_format == "pyperf-json" :
124
- logging .info ("Parsing pyperf format into timeseries format" )
125
- timeseries_dict = export_pyperf_json_to_timeseries_dict (
145
+ if results_format == "pyperf-json" or results_format == "google.benchmark" :
146
+ logging .info ("Parsing {} format into timeseries format" . format ( results_format ) )
147
+ timeseries_dict = export_json_to_timeseries_dict (
126
148
results_dict ,
127
149
break_by_dict ,
128
150
datapoints_timestamp ,
@@ -181,7 +203,7 @@ def export_command_logic(args, project_name, project_version):
181
203
)
182
204
183
205
184
- def export_pyperf_json_to_timeseries_dict (
206
+ def export_json_to_timeseries_dict (
185
207
benchmark_file ,
186
208
break_by_dict ,
187
209
datapoints_timestamp ,
0 commit comments