Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Pass --timeout flag to pyperf #354

Merged
merged 3 commits into from
Oct 2, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions doc/usage.rst
Original file line number Diff line number Diff line change
@@ -102,8 +102,8 @@ Usage::
pyperformance run [-h] [-r] [-f] [--debug-single-value] [-v] [-m]
[--affinity CPU_LIST] [-o FILENAME]
[--append FILENAME] [--manifest MANIFEST]
[-b BM_LIST] [--inherit-environ VAR_LIST]
[-p PYTHON]
[--timeout TIMEOUT] [-b BM_LIST]
[--inherit-environ VAR_LIST] [-p PYTHON]

options::

@@ -124,6 +124,8 @@ options::
baseline_python, not changed_python.
--append FILENAME Add runs to an existing file, or create it if
it doesn't exist
--timeout TIMEOUT Specify a timeout in seconds for a single
benchmark run (default: disabled)
--manifest MANIFEST benchmark manifest file to use
-b BM_LIST, --benchmarks BM_LIST
Comma-separated list of benchmarks to run. Can
6 changes: 5 additions & 1 deletion pyperformance/_benchmark.py
Original file line number Diff line number Diff line change
@@ -233,7 +233,11 @@ def _run_perf_script(python, runscript, runid, *,
sys.stderr.flush()
sys.stderr.write(stderr)
sys.stderr.flush()
raise RuntimeError("Benchmark died")
# pyperf returns exit code 124 if the benchmark execution times out
if ec == 124:
raise TimeoutError("Benchmark timed out")
else:
raise RuntimeError("Benchmark died")
return pyperf.BenchmarkSuite.load(tmp)


11 changes: 11 additions & 0 deletions pyperformance/cli.py
Original file line number Diff line number Diff line change
@@ -25,6 +25,13 @@ def comma_separated(values):
return list(filter(None, values))


def check_positive(value):
value = int(value)
if value <= 0:
raise argparse.ArgumentTypeError("Argument must a be positive integer.")
return value


def filter_opts(cmd, *, allow_no_benchmarks=False):
cmd.add_argument("--manifest", help="benchmark manifest file to use")

@@ -82,6 +89,10 @@ def parse_args():
help="Use the same number of loops as a previous run "
"(i.e., don't recalibrate). Should be a path to a "
".json file from a previous run.")
cmd.add_argument("--timeout",
help="Specify a timeout in seconds for a single "
"benchmark run (default: disabled)",
type=check_positive)
filter_opts(cmd)

# show
4 changes: 2 additions & 2 deletions pyperformance/commands.py
Original file line number Diff line number Diff line change
@@ -191,8 +191,8 @@ def cmd_run(options, benchmarks):

if errors:
print("%s benchmarks failed:" % len(errors))
for name in errors:
print("- %s" % name)
for name, reason in errors:
print("- %s (%s)" % (name, reason))
print()
sys.exit(1)

2 changes: 1 addition & 1 deletion pyperformance/requirements/requirements.txt
Original file line number Diff line number Diff line change
@@ -10,5 +10,5 @@ psutil==5.9.5
# via
# -r requirements.in
# pyperf
pyperf==2.7.0
pyperf==2.8.0
# via -r requirements.in
13 changes: 11 additions & 2 deletions pyperformance/run.py
Original file line number Diff line number Diff line change
@@ -164,7 +164,7 @@ def add_bench(dest_suite, obj):
bench_venv, bench_runid = benchmarks.get(bench)
if bench_venv is None:
print("ERROR: Benchmark %s failed: could not install requirements" % name)
errors.append(name)
errors.append((name, "Install requirements error"))
continue
try:
result = bench.run(
@@ -174,10 +174,17 @@ def add_bench(dest_suite, obj):
venv=bench_venv,
verbose=options.verbose,
)
except TimeoutError as exc:
print("ERROR: Benchmark %s timed out" % name)
errors.append((name, exc))
except RuntimeError as exc:
print("ERROR: Benchmark %s failed: %s" % (name, exc))
traceback.print_exc()
errors.append((name, exc))
except Exception as exc:
print("ERROR: Benchmark %s failed: %s" % (name, exc))
traceback.print_exc()
errors.append(name)
errors.append((name, exc))
else:
suite = add_bench(suite, result)

@@ -233,5 +240,7 @@ def get_pyperf_opts(options):
opts.append('--inherit-environ=%s' % ','.join(options.inherit_environ))
if options.min_time:
opts.append('--min-time=%s' % options.min_time)
if options.timeout:
opts.append('--timeout=%s' % options.timeout)

return opts
12 changes: 6 additions & 6 deletions pyperformance/tests/test_commands.py
Original file line number Diff line number Diff line change
@@ -399,7 +399,7 @@ def test_compare_single_value(self):
Performance version: 0.2

### call_simple ###
7896.0 kB -> 7900.0 kB: 1.00x larger
7896.0 KiB -> 7900.0 KiB: 1.00x larger
''').lstrip())

def test_compare_csv(self):
@@ -458,11 +458,11 @@ def test_compare_table_single_value(self):

Performance version: 0.2

+-------------+-----------+-----------+--------------+------------------------------------------+
| Benchmark | mem1.json | mem2.json | Change | Significance |
+=============+===========+===========+==============+==========================================+
| call_simple | 7896.0 kB | 7900.0 kB | 1.00x larger | (benchmark only contains a single value) |
+-------------+-----------+-----------+--------------+------------------------------------------+
+-------------+------------+------------+--------------+------------------------------------------+
| Benchmark | mem1.json | mem2.json | Change | Significance |
+=============+============+============+==============+==========================================+
| call_simple | 7896.0 KiB | 7900.0 KiB | 1.00x larger | (benchmark only contains a single value) |
+-------------+------------+------------+--------------+------------------------------------------+
''').lstrip())