Skip to content

Commit

Permalink
Added updated expensive benchmarks
Browse files Browse the repository at this point in the history
  • Loading branch information
sternj committed Dec 12, 2022
1 parent 4f28848 commit 5a8d2be
Show file tree
Hide file tree
Showing 8 changed files with 143 additions and 31 deletions.
106 changes: 106 additions & 0 deletions benchmarks/new_benchmark.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
import json
import subprocess
import re
import statistics
from glob import glob
from collections import defaultdict
import sys

cmds = {
# "baseline": ["python3"],
# "scalene": ["python3", "-m", "scalene", "--json", "--outfile", "/dev/null"],
# "scalene-cpu": ["python3", "-m", "scalene", "--json", "--cpu", "--outfile", "/dev/null"],
# "scalene-cpu-gpu": ["python3", "-m", "scalene", "--json", "--cpu", "--gpu", "--outfile", "/dev/null"],
# "scalene-5M": ["python3", "-m", "scalene", "--json", "--outfile", "/dev/null", "--allocation-sampling-window", "5242883"],
# "scalene-10M": ["python3", "-m", "scalene", "--json", "--outfile", "/dev/null", "--allocation-sampling-window", "10485767"],
# "scalene-20M": ["python3", "-m", "scalene", "--json", "--outfile", "/dev/null", "--allocation-sampling-window","20971529"],
# "memray": [
# "python3",
# "-m",
# "memray",
# "run",
# "--trace-python-allocators",
# "-f",
# "-o",
# "/tmp/memray.out",
# ],
# "fil": ["fil-profile", "-o", "/tmp/abc", '--no-browser', "run"],
# "austin_full": ["austin", "-o", "/dev/null", "-f"],
# "austin_cpu": ["austin", "-o", "/dev/null"],
# 'py-spy': ['py-spy', 'record', '-o', '/tmp/profile.svg', '--', 'python3'],
# 'cProfile': ['python3', '-m', 'cProfile', '-o', '/dev/null'],
'yappi_wall': ['python3', '-m', 'yappi', '-o', '/dev/null', '-c', 'wall'],
'yappi_cpu': ['python3', '-m', 'yappi', '-o', '/dev/null', '-c', 'cpu'],
# 'pprofile_det': ['pprofile', '-o', '/dev/null'],
# 'pprofile_stat': ['pprofile', '-o', '/dev/null', '-s', '0.001'],
# 'line_profiler': ['kernprof', '-l', '-o', '/dev/null', '-v'],
# 'profile': ['python3', '-m', 'profile', '-o', '/dev/null']
}
result_regexp = re.compile(r"Time elapsed:\s+([0-9]*\.[0-9]+)")


def main():
out = defaultdict(lambda : {})

for progname in [
# "./test/expensive_benchmarks/bm_mdp.py",
# "./test/expensive_benchmarks/bm_async_tree_io.py none",
# "./test/expensive_benchmarks/bm_async_tree_io.py io",
# "./test/expensive_benchmarks/bm_async_tree_io.py cpu_io_mixed",
# "./test/expensive_benchmarks/bm_async_tree_io.py memoization",
# "./test/expensive_benchmarks/bm_fannukh.py",
# "./test/expensive_benchmarks/bm_pprint.py",
# "./test/expensive_benchmarks/bm_raytrace.py",
# "./test/expensive_benchmarks/bm_sympy.py",
"./test/expensive_benchmarks/bm_docutils.py"
]:
for profile_name, profile_cmd in cmds.items():
times = []
for i in range(5):
print(
f"Running {profile_name} on {progname} using \"{' '.join(profile_cmd + progname.split(' '))}\"...",
end="",
flush=True,
)
result = subprocess.run(
profile_cmd + progname.split(' '),
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
)

output = result.stdout.decode("utf-8")
# print(output)
match = result_regexp.search(output)
if match is not None:
print(f"... {match.group(1)}", end=('\n' if profile_name != 'memray' else ''))
times.append(round(100 * float(match.group(1))) / 100.0)
if profile_name == 'memray':
res2 = subprocess.run(
['time',
sys.executable,
'-m',
'memray',
'flamegraph',
'-f',
'/tmp/memray.out'],
capture_output=True,
env={'TIME': 'Time elapsed: %e'}
)
output2 = res2.stderr.decode("utf-8")
match2 = result_regexp.search(output2)
if match2 is not None:
print(f"... {match2.group(1)}")
times[-1] += round(100 * float(match2.group(1))) / 100.0
else:
print("... RUN FAILED")
# exit(1)
else:
print("RUN FAILED")
# exit(1)
out[profile_name][progname] = times
with open('yappi.json', 'w+') as f:
json.dump(dict(out), f)


if __name__ == "__main__":
main()
18 changes: 12 additions & 6 deletions test/expensive_benchmarks/bm_async_tree_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,13 +55,19 @@ async def recurse(self, recurse_level):
await asyncio.gather(
*[self.recurse(recurse_level - 1) for _ in range(NUM_RECURSE_BRANCHES)]
)

async def run(self):
if self.__class__ == IOAsyncTree:
if isinstance(self, IOAsyncTree):
num_iters = 9
elif isinstance(self, MemoizationAsyncTree):
num_iters = 16
elif isinstance(self, NoneAsyncTree):
num_iters = 22
else:
num_iters = 20
num_iters = 14
for i in range(num_iters):
if isinstance(self, MemoizationAsyncTree):
self.cache = {}
await self.recurse(NUM_RECURSE_LEVELS)


Expand Down Expand Up @@ -165,9 +171,9 @@ def add_parser_args(parser):

async_tree_class = BENCHMARKS[benchmark]
async_tree = async_tree_class()
start = time.perf_counter()
start_p = time.perf_counter()
asyncio.run(async_tree.run())
stop = time.perf_counter()
print("Time elapsed: ", stop - start)
stop_p = time.perf_counter()
print("Time elapsed: ", stop_p - start_p)
# runner.bench_async_func(f"async_tree_{benchmark}", async_tree.run)

8 changes: 4 additions & 4 deletions test/expensive_benchmarks/bm_docutils.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
else:
Trace.show = lambda message, channel: ... # don't print to console

DOC_ROOT = (Path(__file__).parent / "docutils_data" / "docs").resolve()
DOC_ROOT = (Path('/home/sam/scalene/test/expensive_benchmarks/bm_docutils.py').parent / "docutils_data" / "docs").resolve()


def build_html(doc_root):
Expand Down Expand Up @@ -48,8 +48,8 @@ def bench_docutils(loops, doc_root):

if __name__ == "__main__":
# runner = pyperf.Runner()
start = time.perf_counter()
start_p = time.perf_counter()
bench_docutils(5, DOC_ROOT)
stop = time.perf_counter()
print("Time elapsed: ", stop - start)
stop_p = time.perf_counter()
print("Time elapsed: ", stop_p - start_p)

8 changes: 4 additions & 4 deletions test/expensive_benchmarks/bm_fannukh.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,10 +52,10 @@ def fannkuch(n):
if __name__ == "__main__":
# runner = pyperf.Runner()
arg = DEFAULT_ARG
start = time.perf_counter()
start_p = time.perf_counter()
# runner.bench_func('fannkuch', fannkuch, arg)
for i in range(2):
for i in range(3):
fannkuch(arg)
stop = time.perf_counter()
stop_p = time.perf_counter()

print("Time elapsed: ", stop - start)
print("Time elapsed: ", stop_p - start_p)
6 changes: 3 additions & 3 deletions test/expensive_benchmarks/bm_mdp.py
Original file line number Diff line number Diff line change
Expand Up @@ -266,8 +266,8 @@ def bench_mdp(loops):
if __name__ == "__main__":
runner = pyperf.Runner()
runner.metadata['description'] = "MDP benchmark"
start = time.perf_counter()
start_p = time.perf_counter()
# runner.bench_time_func('mdp', bench_mdp)
bench_mdp(5)
stop = time.perf_counter()
print("Time elapsed: ", stop - start)
stop_p = time.perf_counter()
print("Time elapsed: ", stop_p - start_p)
12 changes: 6 additions & 6 deletions test/expensive_benchmarks/bm_pprint.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,14 @@
from pprint import PrettyPrinter


printable = [('string', (1, 2), [3, 4], {5: 6, 7: 8})] * 100_000 * 8
printable = [('string', (1, 2), [3, 4], {5: 6, 7: 8})] * 100_000
p = PrettyPrinter()


if __name__ == '__main__':

start = perf_counter()
# for i in range(7):
p.pformat(printable)
stop = perf_counter()
print("Time elapsed: ", stop - start)
start_p = perf_counter()
for i in range(7):
p.pformat(printable)
stop_p = perf_counter()
print("Time elapsed: ", stop_p - start_p)
8 changes: 4 additions & 4 deletions test/expensive_benchmarks/bm_raytrace.py
Original file line number Diff line number Diff line change
Expand Up @@ -402,7 +402,7 @@ def add_cmdline_args(cmd, args):
cmd.add_argument("--filename", metavar="FILENAME.PPM",
help="Output filename of the PPM picture")
args = cmd.parse_args()
start = perf_counter()
bench_raytrace(20, args.width, args.height, args.filename)
stop = perf_counter()
print("Time elapsed: ", stop - start)
start_p = perf_counter()
bench_raytrace(25, args.width, args.height, args.filename)
stop_p = perf_counter()
print("Time elapsed: ", stop_p - start_p)
8 changes: 4 additions & 4 deletions test/expensive_benchmarks/bm_sympy.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,11 +56,11 @@ def add_cmdline_args(cmd, args):
import gc
gc.disable()

start = time.perf_counter()
for _ in range(20):
start_p = time.perf_counter()
for _ in range(25):
# Don't benchmark clear_cache(), exclude it of the benchmark
clear_cache()
bench_expand()

stop = time.perf_counter()
print("Time elapsed: ", stop - start)
stop_p = time.perf_counter()
print("Time elapsed: ", stop_p - start_p)

0 comments on commit 5a8d2be

Please sign in to comment.