Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
52 changes: 50 additions & 2 deletions wa/instruments/perf.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@

# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
Expand All @@ -22,6 +23,7 @@

from wa import Instrument, Parameter
from wa.utils.types import list_or_string, list_of_strs
from wa.utils.types import numeric

PERF_COUNT_REGEX = re.compile(r'^(CPU\d+)?\s*(\d+)\s*(.*?)\s*(\[\s*\d+\.\d+%\s*\])?\s*$')

Expand Down Expand Up @@ -77,6 +79,19 @@ class PerfInstrument(Instrument):
description="""Provides labels for pref output. If specified, the number of
labels must match the number of ``optionstring``\ s.
"""),
Parameter('mode', kind=str, default='stat', allowed_values=['stat', 'record'],
global_alias='mode',
description="""
Choose between 'perf stat' and 'perf record'. If 'perf record' is selected
'perf report' will also be run. 'report_optionstring' can be used to generate
custom report from the trace.
"""),
Parameter('report_optionstring', kind=list_or_string, default='',
global_alias='report_perf_options',
description="""
Specifies options to be used for the 'perf report' command used
with 'mode=record'.
"""),
Parameter('force_install', kind=bool, default=False,
description="""
always install perf binary even if perf is already present on the device.
Expand All @@ -92,6 +107,8 @@ def initialize(self, context):
self.events,
self.optionstring,
self.labels,
self.mode,
self.report_optionstring,
self.force_install)

def setup(self, context):
Expand All @@ -104,12 +121,21 @@ def stop(self, context):
self.collector.stop()

def update_output(self, context):
self.logger.info('Extracting reports from target...')
outdir = os.path.join(context.output_directory, 'perf')
self.collector.get_trace(outdir)

# Extract data for 'perf stat'
if self.mode == 'stat':
self._update_output_stat(context, outdir)
# Extract data for 'perf record'
elif self.mode == 'record':
self._update_output_record(context, outdir)

def _update_output_stat(self, context, outdir):
self.logger.info('Processing perf stat reports.')

for host_file in os.listdir(outdir):
label = host_file.split('.out')[0]
label = os.path.splitext(host_file)[0]
host_file_path = os.path.join(outdir, host_file)
context.add_artifact(label, host_file_path, 'raw')
with open(host_file_path) as fh:
Expand All @@ -134,5 +160,27 @@ def update_output(self, context):
metric = '{}_{}'.format(label, match.group(3))
context.add_metric(metric, count, classifiers=classifiers)

def _update_output_record(self, context, outdir):
self.logger.info('Processing "perf report" reports.')

for host_file in os.listdir(outdir):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is currently attempting to process every file in the perf directory, since we are now pulling the raw trace data to this directory this fails trying to parse binary data. Should this be updated to only process .rpt files?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks, changed in latest version.

label, ext = os.path.splitext(host_file)
host_file_path = os.path.join(outdir, host_file)
context.add_artifact(label, host_file_path, 'raw')
if ext == '.rpt':
with open(host_file_path) as fh:
for line in fh:
words = line.split()
if len(words) >= 1:
if words[0] == '#' and len(words) == 6:
if words[4] == 'event':
event_type = words[5]
event_type = event_type.strip("'")
if words[0] != '#' and '%' in words[0] and len(words) == 5:
metric = 'perf/{}/{}/{}'.format(event_type, words[1], words[2].strip("[]"))
count = numeric(words[0].strip('%'))
context.add_metric(metric, count, '%',
classifiers={'Perf Event': event_type, 'Command': words[1], 'Shared Object' : words[2]})

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
# else: fail silently

You could use a dictionary of functions (method-local dictionary of instance methods, class-attribute dictionary of class methods, ... your call), accessed with self.perf_mode as a key: if it's not present, a KeyError (IIRC) is raised. Furthermore, update_output becomes "subcommand-agnostic". This would even (depending on the implementation) allow assert-based unit testing by comparing the allowed_values of perf_mode and the keys() of this dictionary, allowing to programmatically express the link between the supported perf subcommands and the need for an implemented _update_output_<...> method, currently implicit. Finally, it's more pythonic than this switch-like use of if-elif-elif-... .

(I know that self.perf_mode has been sanitized but yet, I believe there's value in this change).

def teardown(self, context):
self.collector.reset()