from .. import __version__
from . import progressbar
from ..linuxautomaton import common
+from .. import _version
from babeltrace import TraceCollection
import argparse
import sys
import subprocess
+import json
+import re
+from . import mi
class Command:
- def __init__(self):
+ _MI_BASE_TAGS = ['linux-kernel', 'lttng-analyses']
+ _MI_AUTHORS = [
+ 'Julien Desfossez',
+ 'Antoine Busque',
+ 'Philippe Proulx',
+ ]
+ _MI_URL = 'https://github.com/lttng/lttng-analyses'
+
+ def __init__(self, mi_mode=False):
self._analysis = None
self._analysis_conf = None
self._args = None
self._handles = None
self._traces = None
-
+ self._ticks = 0
+ self._mi_mode = mi_mode
self._create_automaton()
+ self._mi_setup()
+
+ @property
+ def mi_mode(self):
+ return self._mi_mode
def run(self):
self._parse_args()
def _cmdline_error(self, msg, exit_code=1):
self._error('Command line error: {}'.format(msg), exit_code)
+ def _print(self, msg):
+ if not self._mi_mode:
+ print(msg)
+
+ def _mi_create_result_table(self, table_class_name, begin, end,
+ subtitle=None):
+ return mi.ResultTable(self._mi_table_classes[table_class_name],
+ begin, end, subtitle)
+
+ def _mi_setup(self):
+ self._mi_table_classes = {}
+
+ for tc_tuple in self._MI_TABLE_CLASSES:
+ table_class = mi.TableClass(tc_tuple[0], tc_tuple[1], tc_tuple[2])
+ self._mi_table_classes[table_class.name] = table_class
+
+ self._mi_clear_result_tables()
+
+ def _mi_print_metadata(self):
+ tags = self._MI_BASE_TAGS + self._MI_TAGS
+ infos = mi.get_metadata(version=self._MI_VERSION, title=self._MI_TITLE,
+ description=self._MI_DESCRIPTION,
+ authors=self._MI_AUTHORS, url=self._MI_URL,
+ tags=tags,
+ table_classes=self._mi_table_classes.values())
+ print(json.dumps(infos))
+
+ def _mi_append_result_table(self, result_table):
+ if not result_table or not result_table.rows:
+ return
+
+ tc_name = result_table.table_class.name
+ self._mi_get_result_tables(tc_name).append(result_table)
+
+ def _mi_append_result_tables(self, result_tables):
+ if not result_tables:
+ return
+
+ for result_table in result_tables:
+ self._mi_append_result_table(result_table)
+
+ def _mi_clear_result_tables(self):
+ self._result_tables = {}
+
+ def _mi_get_result_tables(self, table_class_name):
+ if table_class_name not in self._result_tables:
+ self._result_tables[table_class_name] = []
+
+ return self._result_tables[table_class_name]
+
+ def _mi_print(self):
+ results = []
+
+ for result_tables in self._result_tables.values():
+ for result_table in result_tables:
+ results.append(result_table.to_native_object())
+
+ obj = {
+ 'results': results,
+ }
+
+ print(json.dumps(obj))
+
+ def _create_summary_result_tables(self):
+ pass
+
def _open_trace(self):
traces = TraceCollection()
handles = traces.add_traces_recursive(self._args.path, 'ctf')
self._traces.remove_trace(handle)
def _check_lost_events(self):
- print('Checking the trace for lost events...')
+ self._print('Checking the trace for lost events...')
try:
subprocess.check_output('babeltrace %s' % self._args.path,
shell=True)
except subprocess.CalledProcessError:
- print('Error running babeltrace on the trace, cannot verify if '
- 'events were lost during the trace recording')
+ self._gen_error('Cannot run babeltrace on the trace, cannot verify if '
+ 'events were lost during the trace recording')
+
+ def _pre_analysis(self):
+ pass
+
+ def _post_analysis(self):
+ if not self._mi_mode:
+ return
+
+ if self._ticks > 1:
+ self._create_summary_result_tables()
+
+ self._mi_print()
def _run_analysis(self):
+ self._pre_analysis()
progressbar.progressbar_setup(self)
for event in self._traces.events:
progressbar.progressbar_finish(self)
self._analysis.end()
+ self._post_analysis()
def _print_date(self, begin_ns, end_ns):
date = 'Timerange: [%s, %s]' % (
multi_day=True),
common.ns_to_hour_nsec(end_ns, gmt=self._args.gmt,
multi_day=True))
- print(date)
+ self._print(date)
def _validate_transform_common_args(self, args):
refresh_period_ns = None
args.pid_list = args.pid.split(',')
args.pid_list = [int(pid) for pid in args.pid_list]
+ if self._mi_mode:
+ # force no progress in MI mode
+ args.no_progress = True
+
+ # print MI metadata if required
+ if args.metadata:
+ self._mi_print_metadata()
+ sys.exit(0)
+
+ # validate path argument (required at this point)
+ if not args.path:
+ self._cmdline_error('Please specify a trace path')
+
+ if type(args.path) is list:
+ args.path = args.path[0]
+
def _validate_transform_args(self, args):
pass
ap = argparse.ArgumentParser(description=self._DESC)
# common arguments
- ap.add_argument('path', metavar='<path/to/trace>', help='trace path')
ap.add_argument('-r', '--refresh', type=str,
help='Refresh period, with optional units suffix '
'(default units: s)')
+ ap.add_argument('--gmt', action='store_true',
+ help='Manipulate timestamps based on GMT instead '
+ 'of local time')
ap.add_argument('--limit', type=int, default=10,
help='Limit to top X (default = 10)')
- ap.add_argument('--no-progress', action='store_true',
- help='Don\'t display the progress bar')
ap.add_argument('--skip-validation', action='store_true',
help='Skip the trace validation')
- ap.add_argument('--gmt', action='store_true',
- help='Manipulate timestamps based on GMT instead '
- 'of local time')
ap.add_argument('--begin', type=str, help='start time: '
'hh:mm:ss[.nnnnnnnnn]')
ap.add_argument('--end', type=str, help='end time: '
'hh:mm:ss[.nnnnnnnnn]')
- ap.add_argument('--timerange', type=str, help='time range: '
- '[begin,end]')
ap.add_argument('--period-begin', type=str,
help='Analysis period start marker event name')
ap.add_argument('--period-end', type=str,
ap.add_argument('--period-key', type=str, default='cpu_id',
help='Optional, list of event field names used to match '
'period markers (default: cpu_id)')
+ ap.add_argument('--timerange', type=str, help='time range: '
+ '[begin,end]')
ap.add_argument('-V', '--version', action='version',
version='LTTng Analyses v' + __version__)
+ # MI mode-dependent arguments
+ if self._mi_mode:
+ ap.add_argument('--metadata', action='store_true',
+ help="Show analysis's metadata")
+ ap.add_argument('path', metavar='<path/to/trace>', help='trace path',
+ nargs='*')
+ else:
+ ap.add_argument('--no-progress', action='store_true',
+ help='Don\'t display the progress bar')
+ ap.add_argument('path', metavar='<path/to/trace>', help='trace path')
+
# Used to add command-specific args
self._add_arguments(ap)
self._args.multi_day = common.is_multi_day_trace_collection(
self._handles)
- if self._args.timerange:
+ if hasattr(self._args, 'timerange') and self._args.timerange:
(self._analysis_conf.begin_ts, self._analysis_conf.end_ts) = \
common.extract_timerange(self._handles, self._args.timerange,
self._args.gmt)
if self._args.begin is None or self._args.end is None:
- print('Invalid timeformat')
- sys.exit(1)
+ self._cmdline_error('Invalid time format: "{}"'.format(self._args.timerange))
else:
if self._args.begin:
self._args.begin = date_to_epoch_nsec(
def _create_analysis(self):
notification_cbs = {
- 'output_results': self._output_results
+ analysis.Analysis.TICK_CB: self._analysis_tick_cb
}
self._analysis = self._ANALYSIS_CLASS(self.state, self._analysis_conf)
self._automaton = automaton.Automaton()
self.state = self._automaton.state
- def _output_results(self, **kwargs):
+ def _analysis_tick_cb(self, **kwargs):
begin_ns = kwargs['begin_ns']
end_ns = kwargs['end_ns']
# TODO allow output of results to some other place/in other
# format than plain text-cli
- self._print_results(begin_ns, end_ns)
+ self._analysis_tick(begin_ns, end_ns)
+ self._ticks += 1
- def _print_results(self, begin_ns, end_ns):
+ def _analysis_tick(self, begin_ns, end_ns):
raise NotImplementedError()
def _filter_process(self, proc):
if self._args.pid_list and proc.pid not in self._args.pid_list:
return False
return True
+
+
+# create MI version
+_cmd_version = _version.get_versions()['version']
+_version_match = re.match(r'(\d+)\.(\d+)\.(\d+)(.*)', _cmd_version)
+Command._MI_VERSION = [
+ int(_version_match.group(1)),
+ int(_version_match.group(2)),
+ int(_version_match.group(3)),
+ _version_match.group(4),
+]
from ..core import cputop
from ..ascii_graph import Pyasciigraph
import operator
+from . import mi
class Cputop(Command):
_DESC = """The cputop command."""
_ANALYSIS_CLASS = cputop.Cputop
+ _MI_TITLE = 'Top CPU usage'
+ _MI_DESCRIPTION = 'Per-TID, per-CPU, and total top CPU usage'
+ _MI_TAGS = [mi.Tags.CPU, mi.Tags.TOP]
+ _MI_TABLE_CLASS_PER_PROC = 'per-process'
+ _MI_TABLE_CLASS_PER_CPU = 'per-cpu'
+ _MI_TABLE_CLASS_TOTAL = 'total'
+ _MI_TABLE_CLASS_SUMMARY = 'summary'
+ _MI_TABLE_CLASSES = [
+ (
+ _MI_TABLE_CLASS_PER_PROC,
+ 'Per-TID top CPU usage', [
+ ('process', 'Process', mi.Process),
+ ('migrations', 'Migration count', mi.Integer, 'migrations'),
+ ('usage', 'CPU usage', mi.Ratio),
+ ]
+ ),
+ (
+ _MI_TABLE_CLASS_PER_CPU,
+ 'Per-CPU top CPU usage', [
+ ('cpu', 'CPU', mi.Cpu),
+ ('usage', 'CPU usage', mi.Ratio),
+ ]),
+ (
+ _MI_TABLE_CLASS_TOTAL,
+ 'Total CPU usage', [
+ ('usage', 'CPU usage', mi.Ratio),
+ ]
+ ),
+ (
+ _MI_TABLE_CLASS_SUMMARY,
+ 'CPU usage - summary', [
+ ('time_range', 'Time range', mi.TimeRange),
+ ('usage', 'Total CPU usage', mi.Ratio),
+ ]
+ ),
+ ]
def _filter_process(self, proc):
# Exclude swapper
return True
- def _print_results(self, begin_ns, end_ns):
- self._print_date(begin_ns, end_ns)
- self._print_per_tid_usage()
- self._print_per_cpu_usage()
- self._print_total_cpu_usage()
-
- def _print_per_tid_usage(self):
+ def _analysis_tick(self, begin_ns, end_ns):
+ per_tid_table = self._get_per_tid_usage_result_table(begin_ns, end_ns)
+ per_cpu_table = self._get_per_cpu_usage_result_table(begin_ns, end_ns)
+ total_table = self._get_total_usage_result_table(begin_ns, end_ns)
+
+ if self._mi_mode:
+ self._mi_append_result_table(per_tid_table)
+ self._mi_append_result_table(per_cpu_table)
+ self._mi_append_result_table(total_table)
+ else:
+ self._print_date(begin_ns, end_ns)
+ self._print_per_tid_usage(per_tid_table)
+ self._print_per_cpu_usage(per_cpu_table)
+
+ if total_table:
+ self._print_total_cpu_usage(total_table)
+
+ def _create_summary_result_tables(self):
+ total_tables = self._mi_get_result_tables(self._MI_TABLE_CLASS_TOTAL)
+ begin = total_tables[0].timerange.begin
+ end = total_tables[-1].timerange.end
+ summary_table = \
+ self._mi_create_result_table(self._MI_TABLE_CLASS_SUMMARY,
+ begin, end)
+
+ for total_table in total_tables:
+ usage = total_table.rows[0].usage
+ summary_table.append_row(
+ time_range=total_table.timerange,
+ usage=usage,
+ )
+
+ self._mi_clear_result_tables()
+ self._mi_append_result_table(summary_table)
+
+ def _get_per_tid_usage_result_table(self, begin_ns, end_ns):
+ result_table = \
+ self._mi_create_result_table(self._MI_TABLE_CLASS_PER_PROC,
+ begin_ns, end_ns)
count = 0
- limit = self._args.limit
- graph = Pyasciigraph()
- values = []
for tid in sorted(self._analysis.tids.values(),
key=operator.attrgetter('usage_percent'),
if not self._filter_process(tid):
continue
- output_str = '%s (%d)' % (tid.comm, tid.tid)
- if tid.migrate_count > 0:
- output_str += ', %d migrations' % (tid.migrate_count)
-
- values.append((output_str, tid.usage_percent))
-
+ result_table.append_row(
+ process=mi.Process(tid.comm, tid=tid.tid),
+ migrations=mi.Integer(tid.migrate_count),
+ usage=mi.Ratio.from_percentage(tid.usage_percent)
+ )
count += 1
- if limit > 0 and count >= limit:
+
+ if self._args.limit > 0 and count >= self._args.limit:
break
- for line in graph.graph('Per-TID CPU Usage', values, unit=' %'):
- print(line)
+ return result_table
- def _print_per_cpu_usage(self):
- graph = Pyasciigraph()
- values = []
+ def _get_per_cpu_usage_result_table(self, begin_ns, end_ns):
+ result_table = \
+ self._mi_create_result_table(self._MI_TABLE_CLASS_PER_CPU,
+ begin_ns, end_ns)
for cpu in sorted(self._analysis.cpus.values(),
key=operator.attrgetter('usage_percent'),
reverse=True):
- values.append(('CPU %d' % cpu.cpu_id, cpu.usage_percent))
+ result_table.append_row(
+ cpu=mi.Cpu(cpu.cpu_id),
+ usage=mi.Ratio.from_percentage(cpu.usage_percent)
+ )
- for line in graph.graph('Per-CPU Usage', values, unit=' %'):
- print(line)
+ return result_table
+
+ def _get_total_usage_result_table(self, begin_ns, end_ns):
+ result_table = \
+ self._mi_create_result_table(self._MI_TABLE_CLASS_TOTAL,
+ begin_ns, end_ns)
- def _print_total_cpu_usage(self):
cpu_count = len(self.state.cpus)
usage_percent = 0
# average per CPU
usage_percent /= cpu_count
+ result_table.append_row(
+ usage=mi.Ratio.from_percentage(usage_percent),
+ )
+
+ return result_table
+
+ def _print_per_tid_usage(self, result_table):
+ graph = Pyasciigraph()
+ values = []
+
+ for row in result_table.rows:
+ process_do = row.process
+ migration_count = row.migrations.value
+ output_str = '%s (%d)' % (process_do.name, process_do.tid)
+
+ if migration_count > 0:
+ output_str += ', %d migrations' % (migration_count)
+
+ values.append((output_str, row.usage.to_percentage()))
+
+ for line in graph.graph('Per-TID CPU Usage', values, unit=' %'):
+ print(line)
+
+ def _print_per_cpu_usage(self, result_table):
+ graph = Pyasciigraph()
+ values = []
+
+ for row in result_table.rows:
+ cpu = row.cpu
+ values.append(('CPU %d' % cpu.id, row.usage.to_percentage()))
+
+ for line in graph.graph('Per-CPU Usage', values, unit=' %'):
+ print(line)
+
+ def _print_total_cpu_usage(self, result_table):
+ usage_percent = result_table.rows[0].usage.to_percentage()
print('\nTotal CPU Usage: %0.02f%%\n' % usage_percent)
def _add_arguments(self, ap):
Command._add_proc_filter_args(ap)
-def run():
- cputopcmd = Cputop()
+def _run(mi_mode):
+ cputopcmd = Cputop(mi_mode=mi_mode)
cputopcmd.run()
+
+
+# entry point (human)
+def run():
+ _run(mi_mode=False)
+
+
+# entry point (MI)
+def run_mi():
+ _run(mi_mode=True)
from ..core import io
from ..linuxautomaton import common
from ..ascii_graph import Pyasciigraph
+from . import mi
import operator
import statistics
+import collections
import sys
+_UsageTables = collections.namedtuple('_UsageTables', [
+ 'per_proc_read',
+ 'per_proc_write',
+ 'per_file_read',
+ 'per_file_write',
+ 'per_proc_block_read',
+ 'per_proc_block_write',
+ 'per_disk_sector',
+ 'per_disk_request',
+ 'per_disk_rtps',
+ 'per_netif_recv',
+ 'per_netif_send',
+])
+
+
class IoAnalysisCommand(Command):
_DESC = """The I/O command."""
_ANALYSIS_CLASS = io.IoAnalysis
-
+ _MI_TITLE = 'I/O analysis'
+ _MI_DESCRIPTION = 'System call/disk latency statistics, system call ' + \
+ 'latency distribution, system call top latencies, ' + \
+ 'I/O usage top, and I/O operations log'
+ _MI_TAGS = [
+ mi.Tags.IO,
+ mi.Tags.SYSCALL,
+ mi.Tags.STATS,
+ mi.Tags.FREQ,
+ mi.Tags.LOG,
+ mi.Tags.TOP,
+ ]
+ _MI_TABLE_CLASS_SYSCALL_LATENCY_STATS = 'syscall-latency-stats'
+ _MI_TABLE_CLASS_PART_LATENCY_STATS = 'disk-latency-stats'
+ _MI_TABLE_CLASS_FREQ = 'freq'
+ _MI_TABLE_CLASS_TOP_SYSCALL = 'top-syscall'
+ _MI_TABLE_CLASS_LOG = 'log'
+ _MI_TABLE_CLASS_PER_PROCESS_TOP = 'per-process-top'
+ _MI_TABLE_CLASS_PER_FILE_TOP = 'per-file-top'
+ _MI_TABLE_CLASS_PER_PROCESS_TOP_BLOCK = 'per-process-top-block'
+ _MI_TABLE_CLASS_PER_DISK_TOP_SECTOR = 'per-disk-top-sector'
+ _MI_TABLE_CLASS_PER_DISK_TOP_REQUEST = 'per-disk-top-request'
+ _MI_TABLE_CLASS_PER_DISK_TOP_RTPS = 'per-disk-top-rps'
+ _MI_TABLE_CLASS_PER_NETIF_TOP = 'per-netif-top'
+ _MI_TABLE_CLASSES = [
+ (
+ _MI_TABLE_CLASS_SYSCALL_LATENCY_STATS,
+ 'System call latency statistics', [
+ ('obj', 'System call category', mi.String),
+ ('count', 'Call count', mi.Integer, 'calls'),
+ ('min_latency', 'Minimum call latency', mi.Duration),
+ ('avg_latency', 'Average call latency', mi.Duration),
+ ('max_latency', 'Maximum call latency', mi.Duration),
+ ('stdev_latency', 'System call latency standard deviation', mi.Duration),
+ ]
+ ),
+ (
+ _MI_TABLE_CLASS_PART_LATENCY_STATS,
+ 'Partition latency statistics', [
+ ('obj', 'Partition', mi.Disk),
+ ('count', 'Access count', mi.Integer, 'accesses'),
+ ('min_latency', 'Minimum access latency', mi.Duration),
+ ('avg_latency', 'Average access latency', mi.Duration),
+ ('max_latency', 'Maximum access latency', mi.Duration),
+ ('stdev_latency', 'System access latency standard deviation', mi.Duration),
+ ]
+ ),
+ (
+ _MI_TABLE_CLASS_FREQ,
+ 'I/O request latency distribution', [
+ ('latency_lower', 'Latency (lower bound)', mi.Duration),
+ ('latency_upper', 'Latency (upper bound)', mi.Duration),
+ ('count', 'Request count', mi.Integer, 'requests'),
+ ]
+ ),
+ (
+ _MI_TABLE_CLASS_TOP_SYSCALL,
+ 'Top system call latencies', [
+ ('time_range', 'Call time range', mi.TimeRange),
+ ('out_of_range', 'System call out of range?', mi.Boolean),
+ ('duration', 'Call duration', mi.Duration),
+ ('syscall', 'System call', mi.Syscall),
+ ('size', 'Read/write size', mi.Size),
+ ('process', 'Process', mi.Process),
+ ('path', 'File path', mi.Path),
+ ('fd', 'File descriptor', mi.Fd),
+ ]
+ ),
+ (
+ _MI_TABLE_CLASS_LOG,
+ 'I/O operations log', [
+ ('time_range', 'Call time range', mi.TimeRange),
+ ('out_of_range', 'System call out of range?', mi.Boolean),
+ ('duration', 'Call duration', mi.Duration),
+ ('syscall', 'System call', mi.Syscall),
+ ('size', 'Read/write size', mi.Size),
+ ('process', 'Process', mi.Process),
+ ('path', 'File path', mi.Path),
+ ('fd', 'File descriptor', mi.Fd),
+ ]
+ ),
+ (
+ _MI_TABLE_CLASS_PER_PROCESS_TOP,
+ 'Per-process top I/O operations', [
+ ('process', 'Process', mi.Process),
+ ('size', 'Total operations size', mi.Size),
+ ('disk_size', 'Disk operations size', mi.Size),
+ ('net_size', 'Network operations size', mi.Size),
+ ('unknown_size', 'Unknown operations size', mi.Size),
+ ]
+ ),
+ (
+ _MI_TABLE_CLASS_PER_FILE_TOP,
+ 'Per-file top I/O operations', [
+ ('path', 'File path/info', mi.Path),
+ ('size', 'Operations size', mi.Size),
+ ('fd_owners', 'File descriptor owners', mi.String),
+ ]
+ ),
+ (
+ _MI_TABLE_CLASS_PER_PROCESS_TOP_BLOCK,
+ 'Per-process top block I/O operations', [
+ ('process', 'Process', mi.Process),
+ ('size', 'Operations size', mi.Size),
+ ]
+ ),
+ (
+ _MI_TABLE_CLASS_PER_DISK_TOP_SECTOR,
+ 'Per-disk top sector I/O operations', [
+ ('disk', 'Disk', mi.Disk),
+ ('count', 'Sector count', mi.Integer, 'sectors'),
+ ]
+ ),
+ (
+ _MI_TABLE_CLASS_PER_DISK_TOP_REQUEST,
+ 'Per-disk top I/O requests', [
+ ('disk', 'Disk', mi.Disk),
+ ('count', 'Request count', mi.Integer, 'I/O requests'),
+ ]
+ ),
+ (
+ _MI_TABLE_CLASS_PER_DISK_TOP_RTPS,
+ 'Per-disk top I/O request time/sector', [
+ ('disk', 'Disk', mi.Disk),
+ ('rtps', 'Request time/sector', mi.Duration),
+ ]
+ ),
+ (
+ _MI_TABLE_CLASS_PER_NETIF_TOP,
+ 'Per-network interface top I/O operations', [
+ ('netif', 'Network interface', mi.NetIf),
+ ('size', 'Operations size', mi.Size),
+ ]
+ ),
+ ]
_LATENCY_STATS_FORMAT = '{:<14} {:>14} {:>14} {:>14} {:>14} {:>14}'
_SECTION_SEPARATOR_STRING = '-' * 89
+ def _analysis_tick(self, begin_ns, end_ns):
+ syscall_latency_stats_table = None
+ disk_latency_stats_table = None
+ freq_tables = None
+ top_tables = None
+ log_table = None
+ usage_tables = None
+
+ if self._args.stats:
+ syscall_latency_stats_table, disk_latency_stats_table = \
+ self._get_latency_stats_result_tables(begin_ns, end_ns)
+
+ if self._args.freq:
+ freq_tables = self._get_freq_result_tables(begin_ns, end_ns)
+
+ if self._args.usage:
+ usage_tables = self._get_usage_result_tables(begin_ns, end_ns)
+
+ if self._args.top:
+ top_tables = self._get_top_result_tables(begin_ns, end_ns)
+
+ if self._args.log:
+ log_table = self._get_log_result_table(begin_ns, end_ns)
+
+ if self._mi_mode:
+ self._mi_append_result_tables([
+ log_table,
+ syscall_latency_stats_table,
+ disk_latency_stats_table,
+ ])
+ self._mi_append_result_tables(top_tables)
+ self._mi_append_result_tables(usage_tables)
+ self._mi_append_result_tables(freq_tables)
+ else:
+ self._print_date(begin_ns, end_ns)
+
+ if self._args.usage:
+ self._print_usage(usage_tables)
+
+ if self._args.stats:
+ self._print_latency_stats(syscall_latency_stats_table,
+ disk_latency_stats_table)
+
+ if self._args.top:
+ self._print_top(top_tables)
+
+ if self._args.freq:
+ self._print_freq(freq_tables)
+
+ if self._args.log:
+ self._print_log(log_table)
+
+ def _create_summary_result_tables(self):
+ # TODO: create a summary table here
+ self._mi_clear_result_tables()
+
# Filter predicates
def _filter_size(self, size):
if size is None:
return self._args.begin and io_rq.begin_ts < self._args.begin or \
self._args.end and io_rq.end_ts > self._args.end
- def _print_ascii_graph(self, input_list, get_datum_cb, graph_label,
- graph_args=None):
- """Print an ascii graph for given data
-
- This method wraps the ascii_graph module and facilitates the
- printing of a graph with a limited number of lines.
-
- Args:
- input_list (list): A list of objects from which the data
- for the graph will be generated.
-
- get_datum_cb (function): function that takes a single
- object from the input list as an argument, and returns a
- datum tuple for the graph, of the form (string, int). The
- string element is printed as is in the graph, and the int
- is the numeric value corresponding to this graph entry.
-
- graph_label (string): Label used to identify the printed
- graph.
-
- graph_args (dict, optional): Dict of keyword args to be
- passed to the graph() function as is.
- """
- count = 0
- limit = self._args.limit
- graph = Pyasciigraph()
- data = []
- if graph_args is None:
- graph_args = {}
+ def _append_per_proc_read_usage_row(self, proc_stats, result_table):
+ if not self._filter_process(proc_stats):
+ return False
- for elem in input_list:
- datum = get_datum_cb(elem)
- if datum is not None:
- data.append(datum)
- count += 1
- if limit is not None and count >= limit:
- break
+ result_table.append_row(
+ process=mi.Process(proc_stats.comm, pid=proc_stats.pid,
+ tid=proc_stats.tid),
+ size=mi.Size(proc_stats.total_read),
+ disk_size=mi.Size(proc_stats.disk_read),
+ net_size=mi.Size(proc_stats.net_read),
+ unknown_size=mi.Size(proc_stats.unk_read),
+ )
- for line in graph.graph(graph_label, data, **graph_args):
- print(line)
+ return True
- # I/O Top output methods
- def _get_read_datum(self, proc_stats):
+ def _append_per_proc_write_usage_row(self, proc_stats, result_table):
if not self._filter_process(proc_stats):
- return None
-
- if proc_stats.pid is None:
- pid_str = 'unknown (tid=%d)' % (proc_stats.tid)
- else:
- pid_str = str(proc_stats.pid)
+ return False
- format_str = '{:>10} {:<25} {:>9} file {:>9} net {:>9} unknown'
- output_str = format_str.format(
- common.convert_size(proc_stats.total_read, padding_after=True),
- '%s (%s)' % (proc_stats.comm, pid_str),
- common.convert_size(proc_stats.disk_read, padding_after=True),
- common.convert_size(proc_stats.net_read, padding_after=True),
- common.convert_size(proc_stats.unk_read, padding_after=True))
+ result_table.append_row(
+ process=mi.Process(proc_stats.comm, pid=proc_stats.pid,
+ tid=proc_stats.tid),
+ size=mi.Size(proc_stats.total_write),
+ disk_size=mi.Size(proc_stats.disk_write),
+ net_size=mi.Size(proc_stats.net_write),
+ unknown_size=mi.Size(proc_stats.unk_write),
+ )
- return (output_str, proc_stats.total_read)
+ return True
- def _get_write_datum(self, proc_stats):
- if not self._filter_process(proc_stats):
- return None
+ def _append_per_proc_block_read_usage_row(self, proc_stats, result_table):
+ if not self._filter_process(proc_stats) or proc_stats.block_read == 0:
+ return False
- if proc_stats.pid is None:
- pid_str = 'unknown (tid=%d)' % (proc_stats.tid)
+ if proc_stats.comm:
+ proc_name = proc_stats.comm
else:
- pid_str = str(proc_stats.pid)
-
- format_str = '{:>10} {:<25} {:>9} file {:>9} net {:>9} unknown'
- output_str = format_str.format(
- common.convert_size(proc_stats.total_write, padding_after=True),
- '%s (%s)' % (proc_stats.comm, pid_str),
- common.convert_size(proc_stats.disk_write, padding_after=True),
- common.convert_size(proc_stats.net_write, padding_after=True),
- common.convert_size(proc_stats.unk_write, padding_after=True))
+ proc_name = None
- return (output_str, proc_stats.total_write)
+ result_table.append_row(
+ process=mi.Process(proc_name, pid=proc_stats.pid,
+ tid=proc_stats.tid),
+ size=mi.Size(proc_stats.block_read),
+ )
- def _get_block_read_datum(self, proc_stats):
- if not self._filter_process(proc_stats) or proc_stats.block_read == 0:
- return None
+ return True
- comm = proc_stats.comm
- if not comm:
- comm = 'unknown'
+ def _append_per_proc_block_write_usage_row(self, proc_stats, result_table):
+ if not self._filter_process(proc_stats) or proc_stats.block_write == 0:
+ return False
- if proc_stats.pid is None:
- pid_str = 'unknown (tid=%d)' % (proc_stats.tid)
+ if proc_stats.comm:
+ proc_name = proc_stats.comm
else:
- pid_str = str(proc_stats.pid)
+ proc_name = None
- format_str = '{:>10} {:<22}'
- output_str = format_str.format(
- common.convert_size(proc_stats.block_read, padding_after=True),
- '%s (pid=%s)' % (comm, pid_str))
+ result_table.append_row(
+ process=mi.Process(proc_name, pid=proc_stats.pid,
+ tid=proc_stats.tid),
+ size=mi.Size(proc_stats.block_write),
+ )
- return (output_str, proc_stats.block_read)
+ return True
- def _get_block_write_datum(self, proc_stats):
- if not self._filter_process(proc_stats) or \
- proc_stats.block_write == 0:
+ def _append_disk_sector_usage_row(self, disk_stats, result_table):
+ if disk_stats.total_rq_sectors == 0:
return None
- comm = proc_stats.comm
- if not comm:
- comm = 'unknown'
-
- if proc_stats.pid is None:
- pid_str = 'unknown (tid=%d)' % (proc_stats.tid)
- else:
- pid_str = str(proc_stats.pid)
-
- format_str = '{:>10} {:<22}'
- output_str = format_str.format(
- common.convert_size(proc_stats.block_write, padding_after=True),
- '%s (pid=%s)' % (comm, pid_str))
+ result_table.append_row(
+ disk=mi.Disk(disk_stats.disk_name),
+ count=mi.Integer(disk_stats.total_rq_sectors),
+ )
- return (output_str, proc_stats.block_write)
+ return True
- def _get_total_rq_sectors_datum(self, disk):
- if disk.total_rq_sectors == 0:
- return None
+ def _append_disk_request_usage_row(self, disk_stats, result_table):
+ if disk_stats.rq_count == 0:
+ return False
- return (disk.disk_name, disk.total_rq_sectors)
+ result_table.append_row(
+ disk=mi.Disk(disk_stats.disk_name),
+ count=mi.Integer(disk_stats.rq_count),
+ )
- def _get_rq_count_datum(self, disk):
- if disk.rq_count == 0:
- return None
+ return True
- return (disk.disk_name, disk.rq_count)
+ def _append_disk_rtps_usage_row(self, disk_stats, result_table):
+ if disk_stats.rq_count == 0:
+ return False
- def _get_avg_disk_latency_datum(self, disk):
- if disk.rq_count == 0:
- return None
+ avg_latency = (disk_stats.total_rq_duration / disk_stats.rq_count)
+ result_table.append_row(
+ disk=mi.Disk(disk_stats.disk_name),
+ rtps=mi.Duration(avg_latency),
+ )
- avg_latency = ((disk.total_rq_duration / disk.rq_count) /
- common.NSEC_PER_MSEC)
- avg_latency = round(avg_latency, 3)
+ return True
- return (disk.disk_name, avg_latency)
+ def _append_netif_recv_usage_row(self, netif_stats, result_table):
+ result_table.append_row(
+ netif=mi.NetIf(netif_stats.name),
+ size=mi.Size(netif_stats.recv_bytes)
+ )
- def _get_net_recv_bytes_datum(self, iface):
- return ('%s %s' % (common.convert_size(iface.recv_bytes), iface.name),
- iface.recv_bytes)
+ return True
- def _get_net_sent_bytes_datum(self, iface):
- return ('%s %s' % (common.convert_size(iface.sent_bytes), iface.name),
- iface.sent_bytes)
+ def _append_netif_send_usage_row(self, netif_stats, result_table):
+ result_table.append_row(
+ netif=mi.NetIf(netif_stats.name),
+ size=mi.Size(netif_stats.sent_bytes)
+ )
- def _get_file_read_datum(self, file_stats):
- if file_stats.read == 0:
- return None
+ return True
+ def _get_file_stats_fd_owners_str(self, file_stats):
fd_by_pid_str = ''
+
for pid, fd in file_stats.fd_by_pid.items():
comm = self._analysis.tids[pid].comm
fd_by_pid_str += 'fd %d in %s (%s) ' % (fd, comm, pid)
- format_str = '{:>10} {} {}'
- output_str = format_str.format(
- common.convert_size(file_stats.read, padding_after=True),
- file_stats.filename,
- fd_by_pid_str)
+ return fd_by_pid_str
+
+ def _append_file_read_usage_row(self, file_stats, result_table):
+ if file_stats.read == 0:
+ return False
- return (output_str, file_stats.read)
+ fd_owners = self._get_file_stats_fd_owners_str(file_stats)
+ result_table.append_row(
+ path=mi.Path(file_stats.filename),
+ size=mi.Size(file_stats.read),
+ fd_owners=mi.String(fd_owners),
+ )
+
+ return True
- def _get_file_write_datum(self, file_stats):
+ def _append_file_write_usage_row(self, file_stats, result_table):
if file_stats.write == 0:
- return None
+ return False
- fd_by_pid_str = ''
- for pid, fd in file_stats.fd_by_pid.items():
- comm = self._analysis.tids[pid].comm
- fd_by_pid_str += 'fd %d in %s (%s) ' % (fd, comm, pid)
+ fd_owners = self._get_file_stats_fd_owners_str(file_stats)
+ result_table.append_row(
+ path=mi.Path(file_stats.filename),
+ size=mi.Size(file_stats.write),
+ fd_owners=mi.String(fd_owners),
+ )
- format_str = '{:>10} {} {}'
- output_str = format_str.format(
- common.convert_size(file_stats.write, padding_after=True),
- file_stats.filename,
- fd_by_pid_str)
+ return True
- return (output_str, file_stats.write)
+ def _fill_usage_result_table(self, input_list, append_row_cb, result_table):
+ count = 0
+ limit = self._args.limit
- def _output_read(self):
+ for elem in input_list:
+ if append_row_cb(elem, result_table):
+ count += 1
+
+ if limit is not None and count >= limit:
+ break
+
+ def _fill_per_process_read_usage_result_table(self, result_table):
input_list = sorted(self._analysis.tids.values(),
key=operator.attrgetter('total_read'),
reverse=True)
- label = 'Per-process I/O Read'
- graph_args = {'with_value': False}
- self._print_ascii_graph(input_list, self._get_read_datum, label,
- graph_args)
+ self._fill_usage_result_table(input_list,
+ self._append_per_proc_read_usage_row,
+ result_table)
- def _output_write(self):
+ def _fill_per_process_write_usage_result_table(self, result_table):
input_list = sorted(self._analysis.tids.values(),
key=operator.attrgetter('total_write'),
reverse=True)
- label = 'Per-process I/O Write'
- graph_args = {'with_value': False}
- self._print_ascii_graph(input_list, self._get_write_datum, label,
- graph_args)
+ self._fill_usage_result_table(input_list,
+ self._append_per_proc_write_usage_row,
+ result_table)
- def _output_block_read(self):
+ def _fill_per_process_block_read_usage_result_table(self, result_table):
input_list = sorted(self._analysis.tids.values(),
key=operator.attrgetter('block_read'),
reverse=True)
- label = 'Block I/O Read'
- graph_args = {'with_value': False}
- self._print_ascii_graph(input_list, self._get_block_read_datum,
- label, graph_args)
+ self._fill_usage_result_table(input_list,
+ self._append_per_proc_block_read_usage_row,
+ result_table)
- def _output_block_write(self):
+ def _fill_per_process_block_write_usage_result_table(self, result_table):
input_list = sorted(self._analysis.tids.values(),
key=operator.attrgetter('block_write'),
reverse=True)
- label = 'Block I/O Write'
- graph_args = {'with_value': False}
- self._print_ascii_graph(input_list, self._get_block_write_datum,
- label, graph_args)
+ self._fill_usage_result_table(input_list,
+ self._append_per_proc_block_write_usage_row,
+ result_table)
- def _output_total_rq_sectors(self):
+ def _fill_disk_sector_usage_result_table(self, result_table):
input_list = sorted(self._analysis.disks.values(),
key=operator.attrgetter('total_rq_sectors'),
reverse=True)
- label = 'Disk requests sector count'
- graph_args = {'unit': ' sectors'}
- self._print_ascii_graph(input_list, self._get_total_rq_sectors_datum,
- label, graph_args)
+ self._fill_usage_result_table(input_list,
+ self._append_disk_sector_usage_row,
+ result_table)
- def _output_rq_count(self):
+ def _fill_disk_request_usage_result_table(self, result_table):
input_list = sorted(self._analysis.disks.values(),
key=operator.attrgetter('rq_count'),
reverse=True)
- label = 'Disk request count'
- graph_args = {'unit': ' requests'}
- self._print_ascii_graph(input_list, self._get_rq_count_datum,
- label, graph_args)
+ self._fill_usage_result_table(input_list,
+ self._append_disk_request_usage_row,
+ result_table)
- def _output_avg_disk_latency(self):
+ def _fill_disk_rtps_usage_result_table(self, result_table):
input_list = self._analysis.disks.values()
- label = 'Disk request average latency'
- graph_args = {'unit': ' ms', 'sort': 2}
- self._print_ascii_graph(input_list, self._get_avg_disk_latency_datum,
- label, graph_args)
+ self._fill_usage_result_table(input_list,
+ self._append_disk_rtps_usage_row,
+ result_table)
- def _output_net_recv_bytes(self):
+ def _fill_netif_recv_usage_result_table(self, result_table):
input_list = sorted(self._analysis.ifaces.values(),
key=operator.attrgetter('recv_bytes'),
reverse=True)
- label = 'Network received bytes'
- graph_args = {'with_value': False}
- self._print_ascii_graph(input_list, self._get_net_recv_bytes_datum,
- label, graph_args)
+ self._fill_usage_result_table(input_list,
+ self._append_netif_recv_usage_row,
+ result_table)
- def _output_net_sent_bytes(self):
+ def _fill_netif_send_usage_result_table(self, result_table):
input_list = sorted(self._analysis.ifaces.values(),
key=operator.attrgetter('sent_bytes'),
reverse=True)
- label = 'Network sent bytes'
- graph_args = {'with_value': False}
- self._print_ascii_graph(input_list, self._get_net_sent_bytes_datum,
- label, graph_args)
+ self._fill_usage_result_table(input_list,
+ self._append_netif_send_usage_row,
+ result_table)
- def _output_file_read(self, files):
+ def _fill_file_read_usage_result_table(self, files, result_table):
input_list = sorted(files.values(),
key=lambda file_stats: file_stats.read,
reverse=True)
- label = 'Files read'
- graph_args = {'with_value': False, 'sort': 2}
- self._print_ascii_graph(input_list, self._get_file_read_datum,
- label, graph_args)
+ self._fill_usage_result_table(input_list,
+ self._append_file_read_usage_row,
+ result_table)
- def _output_file_write(self, files):
+ def _fill_file_write_usage_result_table(self, files, result_table):
input_list = sorted(files.values(),
key=lambda file_stats: file_stats.write,
reverse=True)
- label = 'Files write'
- graph_args = {'with_value': False, 'sort': 2}
- self._print_ascii_graph(input_list, self._get_file_write_datum,
- label, graph_args)
+ self._fill_usage_result_table(input_list,
+ self._append_file_write_usage_row,
+ result_table)
- def _output_file_read_write(self):
+ def _fill_file_usage_result_tables(self, read_table, write_table):
files = self._analysis.get_files_stats(self._args.pid_list,
self._args.proc_list)
- self._output_file_read(files)
- self._output_file_write(files)
-
- def iotop_output(self):
- self._output_read()
- self._output_write()
- self._output_file_read_write()
- self._output_block_read()
- self._output_block_write()
- self._output_total_rq_sectors()
- self._output_rq_count()
- self._output_avg_disk_latency()
- self._output_net_recv_bytes()
- self._output_net_sent_bytes()
-
- # I/O Latency frequency output methods
- def _print_frequency_distribution(self, duration_list, title):
+ self._fill_file_read_usage_result_table(files, read_table)
+ self._fill_file_write_usage_result_table(files, write_table)
+
+ def _get_usage_result_tables(self, begin, end):
+ # create result tables
+ per_proc_read_table = \
+ self._mi_create_result_table(self._MI_TABLE_CLASS_PER_PROCESS_TOP,
+ begin, end, 'read')
+ per_proc_write_table = \
+ self._mi_create_result_table(self._MI_TABLE_CLASS_PER_PROCESS_TOP,
+ begin, end, 'written')
+ per_file_read_table = \
+ self._mi_create_result_table(self._MI_TABLE_CLASS_PER_FILE_TOP,
+ begin, end, 'read')
+ per_file_write_table = \
+ self._mi_create_result_table(self._MI_TABLE_CLASS_PER_FILE_TOP,
+ begin, end, 'written')
+ per_proc_block_read_table = \
+ self._mi_create_result_table(self._MI_TABLE_CLASS_PER_PROCESS_TOP_BLOCK,
+ begin, end, 'read')
+ per_proc_block_write_table = \
+ self._mi_create_result_table(self._MI_TABLE_CLASS_PER_PROCESS_TOP_BLOCK,
+ begin, end, 'written')
+ per_disk_sector_table = \
+ self._mi_create_result_table(self._MI_TABLE_CLASS_PER_DISK_TOP_SECTOR,
+ begin, end)
+ per_disk_request_table = \
+ self._mi_create_result_table(self._MI_TABLE_CLASS_PER_DISK_TOP_REQUEST,
+ begin, end)
+ per_disk_rtps_table = \
+ self._mi_create_result_table(self._MI_TABLE_CLASS_PER_DISK_TOP_RTPS,
+ begin, end)
+ per_netif_recv_table = \
+ self._mi_create_result_table(self._MI_TABLE_CLASS_PER_NETIF_TOP,
+ begin, end, 'received')
+ per_netif_send_table = \
+ self._mi_create_result_table(self._MI_TABLE_CLASS_PER_NETIF_TOP,
+ begin, end, 'sent')
+
+ # fill result tables
+ self._fill_per_process_read_usage_result_table(per_proc_read_table)
+ self._fill_per_process_write_usage_result_table(per_proc_write_table)
+ self._fill_file_usage_result_tables(per_file_read_table,
+ per_file_write_table)
+ self._fill_per_process_block_read_usage_result_table(per_proc_block_read_table)
+ self._fill_per_process_block_read_usage_result_table(per_proc_block_write_table)
+ self._fill_disk_sector_usage_result_table(per_disk_sector_table)
+ self._fill_disk_request_usage_result_table(per_disk_request_table)
+ self._fill_disk_rtps_usage_result_table(per_disk_rtps_table)
+ self._fill_netif_recv_usage_result_table(per_netif_recv_table)
+ self._fill_netif_send_usage_result_table(per_netif_send_table)
+
+ return _UsageTables(
+ per_proc_read=per_proc_read_table,
+ per_proc_write=per_proc_write_table,
+ per_file_read=per_file_read_table,
+ per_file_write=per_file_write_table,
+ per_proc_block_read=per_proc_block_read_table,
+ per_proc_block_write=per_proc_block_write_table,
+ per_disk_sector=per_disk_sector_table,
+ per_disk_request=per_disk_request_table,
+ per_disk_rtps=per_disk_rtps_table,
+ per_netif_recv=per_netif_recv_table,
+ per_netif_send=per_netif_send_table,
+ )
+
+ def _get_per_process_read_write_usage_datum(self, row):
+ if row.process.pid is None:
+ pid_str = 'unknown (tid=%d)' % (row.process.tid)
+ else:
+ pid_str = str(row.process.pid)
+
+ format_str = '{:>10} {:<25} {:>9} file {:>9} net {:>9} unknown'
+ output_str = format_str.format(
+ common.convert_size(row.size.value, padding_after=True),
+ '%s (%s)' % (row.process.name, pid_str),
+ common.convert_size(row.disk_size.value, padding_after=True),
+ common.convert_size(row.net_size.value, padding_after=True),
+ common.convert_size(row.unknown_size.value, padding_after=True))
+
+ return (output_str, row.size.value)
+
+ def _get_per_process_block_read_write_usage_datum(self, row):
+ proc_name = row.process.name
+
+ if not proc_name:
+ proc_name = 'unknown'
+
+ if row.process.pid is None:
+ pid_str = 'unknown (tid=%d)' % (row.process.tid)
+ else:
+ pid_str = str(row.process.pid)
+
+ format_str = '{:>10} {:<22}'
+ output_str = format_str.format(
+ common.convert_size(row.size.value, padding_after=True),
+ '%s (pid=%s)' % (proc_name, pid_str))
+
+ return (output_str, row.size.value)
+
+ def _get_per_disk_count_usage_datum(self, row):
+ return (row.disk.name, row.count.value)
+
+ def _get_per_disk_rtps_usage_datum(self, row):
+ avg_latency = row.rtps.value / common.NSEC_PER_MSEC
+ avg_latency = round(avg_latency, 3)
+
+ return (row.disk.name, avg_latency)
+
+ def _get_per_netif_recv_send_usage_datum(self, row):
+ return ('%s %s' % (common.convert_size(row.size.value), row.netif.name),
+ row.size.value)
+
+ def _get_per_file_read_write_usage_datum(self, row):
+ format_str = '{:>10} {} {}'
+ output_str = format_str.format(
+ common.convert_size(row.size.value, padding_after=True),
+ row.path.path, row.fd_owners.value)
+
+ return (output_str, row.size.value)
+
+ def _print_usage_ascii_graph(self, result_table, get_datum_cb, graph_label,
+ graph_args=None):
+ graph = Pyasciigraph()
+ data = []
+
+ if graph_args is None:
+ graph_args = {}
+
+ for row in result_table.rows:
+ datum = get_datum_cb(row)
+ data.append(datum)
+
+ for line in graph.graph(graph_label, data, **graph_args):
+ print(line)
+
+ def _print_per_process_read(self, result_table):
+ label = 'Per-process I/O Read'
+ graph_args = {'with_value': False}
+ self._print_usage_ascii_graph(result_table,
+ self._get_per_process_read_write_usage_datum,
+ label, graph_args)
+
+ def _print_per_process_write(self, result_table):
+ label = 'Per-process I/O Write'
+ graph_args = {'with_value': False}
+ self._print_usage_ascii_graph(result_table,
+ self._get_per_process_read_write_usage_datum,
+ label, graph_args)
+
+ def _print_per_process_block_read(self, result_table):
+ label = 'Block I/O Read'
+ graph_args = {'with_value': False}
+ self._print_usage_ascii_graph(result_table,
+ self._get_per_process_block_read_write_usage_datum,
+ label, graph_args)
+
+ def _print_per_process_block_write(self, result_table):
+ label = 'Block I/O Write'
+ graph_args = {'with_value': False}
+ self._print_usage_ascii_graph(result_table,
+ self._get_per_process_block_read_write_usage_datum,
+ label, graph_args)
+
+ def _print_per_disk_sector(self, result_table):
+ label = 'Disk requests sector count'
+ graph_args = {'unit': ' sectors'}
+ self._print_usage_ascii_graph(result_table,
+ self._get_per_disk_count_usage_datum,
+ label, graph_args)
+
+ def _print_per_disk_request(self, result_table):
+ label = 'Disk request count'
+ graph_args = {'unit': ' requests'}
+ self._print_usage_ascii_graph(result_table,
+ self._get_per_disk_count_usage_datum,
+ label, graph_args)
+
+ def _print_per_disk_rtps(self, result_table):
+ label = 'Disk request average latency'
+ graph_args = {'unit': ' ms', 'sort': 2}
+ self._print_usage_ascii_graph(result_table,
+ self._get_per_disk_rtps_usage_datum,
+ label, graph_args)
+
+ def _print_per_netif_recv(self, result_table):
+ label = 'Network received bytes'
+ graph_args = {'with_value': False}
+ self._print_usage_ascii_graph(result_table,
+ self._get_per_netif_recv_send_usage_datum,
+ label, graph_args)
+
+ def _print_per_netif_send(self, result_table):
+ label = 'Network sent bytes'
+ graph_args = {'with_value': False}
+ self._print_usage_ascii_graph(result_table,
+ self._get_per_netif_recv_send_usage_datum,
+ label, graph_args)
+
+ def _print_per_file_read(self, result_table):
+ label = 'Files read'
+ graph_args = {'with_value': False, 'sort': 2}
+ self._print_usage_ascii_graph(result_table,
+ self._get_per_file_read_write_usage_datum,
+ label, graph_args)
+
+ def _print_per_file_write(self, result_table):
+ label = 'Files write'
+ graph_args = {'with_value': False, 'sort': 2}
+ self._print_usage_ascii_graph(result_table,
+ self._get_per_file_read_write_usage_datum,
+ label, graph_args)
+
+ def _print_usage(self, usage_tables):
+ self._print_per_process_read(usage_tables.per_proc_read)
+ self._print_per_process_write(usage_tables.per_proc_write)
+ self._print_per_file_read(usage_tables.per_file_read)
+ self._print_per_file_write(usage_tables.per_file_write)
+ self._print_per_process_block_read(usage_tables.per_proc_block_read)
+ self._print_per_process_block_write(usage_tables.per_proc_block_write)
+ self._print_per_disk_sector(usage_tables.per_disk_sector)
+ self._print_per_disk_request(usage_tables.per_disk_request)
+ self._print_per_disk_rtps(usage_tables.per_disk_rtps)
+ self._print_per_netif_recv(usage_tables.per_netif_recv)
+ self._print_per_netif_send(usage_tables.per_netif_send)
+
+ def _fill_freq_result_table(self, duration_list, result_table):
if not duration_list:
return
max_duration /= 1000
step = (max_duration - min_duration) / resolution
+
if step == 0:
return
buckets = []
values = []
- graph = Pyasciigraph()
+
for i in range(resolution):
buckets.append(i * step)
values.append(0)
+
for duration in duration_list:
duration /= 1000
index = min(int((duration - min_duration) / step), resolution - 1)
values[index] += 1
- graph_data = []
for index, value in enumerate(values):
- # The graph data format is a tuple (info, value). Here info
- # is the lower bound of the bucket, value the bucket's count
- graph_data.append(('%0.03f' % (index * step + min_duration),
- value))
+ result_table.append_row(
+ latency_lower=mi.Duration.from_us(index * step + min_duration),
+ latency_upper=mi.Duration.from_us((index + 1) * step + min_duration),
+ count=mi.Integer(value),
+ )
+
+ def _get_disk_freq_result_tables(self, begin, end):
+ result_tables = []
+
+ for disk in self._analysis.disks.values():
+ rq_durations = [rq.duration for rq in disk.rq_list if
+ self._filter_io_request(rq)]
+ subtitle = 'disk: {}'.format(disk.disk_name)
+ result_table = \
+ self._mi_create_result_table(self._MI_TABLE_CLASS_FREQ,
+ begin, end, subtitle)
+ self._fill_freq_result_table(rq_durations, result_table)
+ result_tables.append(result_table)
+
+ return result_tables
+
+ def _get_syscall_freq_result_tables(self, begin, end):
+ open_table = \
+ self._mi_create_result_table(self._MI_TABLE_CLASS_FREQ,
+ begin, end, 'open')
+ read_table = \
+ self._mi_create_result_table(self._MI_TABLE_CLASS_FREQ,
+ begin, end, 'read')
+ write_table = \
+ self._mi_create_result_table(self._MI_TABLE_CLASS_FREQ,
+ begin, end, 'write')
+ sync_table = \
+ self._mi_create_result_table(self._MI_TABLE_CLASS_FREQ,
+ begin, end, 'sync')
+ self._fill_freq_result_table([io_rq.duration for io_rq in
+ self._analysis.open_io_requests if
+ self._filter_io_request(io_rq)],
+ open_table)
+ self._fill_freq_result_table([io_rq.duration for io_rq in
+ self._analysis.read_io_requests if
+ self._filter_io_request(io_rq)],
+ read_table)
+ self._fill_freq_result_table([io_rq.duration for io_rq in
+ self._analysis.write_io_requests if
+ self._filter_io_request(io_rq)],
+ write_table)
+ self._fill_freq_result_table([io_rq.duration for io_rq in
+ self._analysis.sync_io_requests if
+ self._filter_io_request(io_rq)],
+ sync_table)
+
+ return [open_table, read_table, write_table, sync_table]
+
+ def _get_freq_result_tables(self, begin, end):
+ syscall_tables = self._get_syscall_freq_result_tables(begin, end)
+ disk_tables = self._get_disk_freq_result_tables(begin, end)
+
+ return syscall_tables + disk_tables
+
+ def _print_one_freq(self, result_table):
+ if not result_table.rows:
+ return
+ values = []
+ graph = Pyasciigraph()
+ graph_data = []
+
+ for row in result_table.rows:
+ graph_data.append(('%0.03f' % row.latency_lower.to_us(),
+ row.count.value))
+
+ title = '{} {} (usec)'.format(result_table.title, result_table.subtitle)
graph_lines = graph.graph(
title,
graph_data,
print()
- def _output_disk_latency_freq(self):
- for disk in self._analysis.disks.values():
- rq_durations = [rq.duration for rq in disk.rq_list if
- self._filter_io_request(rq)]
- self._print_frequency_distribution(
- rq_durations,
- 'Frequency distribution for disk %s (usec)' % (disk.disk_name))
-
- def iolatency_output(self):
- self._output_disk_latency_freq()
-
- def iolatency_syscalls_output(self):
- print()
- self._print_frequency_distribution([io_rq.duration for io_rq in
- self._analysis.open_io_requests if
- self._filter_io_request(io_rq)],
- 'Open latency distribution (usec)')
- self._print_frequency_distribution([io_rq.duration for io_rq in
- self._analysis.read_io_requests if
- self._filter_io_request(io_rq)],
- 'Read latency distribution (usec)')
- self._print_frequency_distribution([io_rq.duration for io_rq in
- self._analysis.write_io_requests if
- self._filter_io_request(io_rq)],
- 'Write latency distribution (usec)')
- self._print_frequency_distribution([io_rq.duration for io_rq in
- self._analysis.sync_io_requests if
- self._filter_io_request(io_rq)],
- 'Sync latency distribution (usec)')
-
- # I/O latency top and log output methods
- def _output_io_request(self, io_rq):
- fmt = '{:<40} {:<16} {:>16} {:>11} {:<24} {:<8} {:<14}'
-
- begin_time = common.ns_to_hour_nsec(io_rq.begin_ts,
- self._args.multi_day,
- self._args.gmt)
- end_time = common.ns_to_hour_nsec(io_rq.end_ts,
- self._args.multi_day,
- self._args.gmt)
- time_range_str = '[' + begin_time + ',' + end_time + ']'
- duration_str = '%0.03f' % (io_rq.duration / 1000)
+ def _print_freq(self, freq_tables):
+ for freq_table in freq_tables:
+ self._print_one_freq(freq_table)
+ def _append_log_row(self, io_rq, result_table):
if io_rq.size is None:
- size = 'N/A'
+ size = mi.Empty()
else:
- size = common.convert_size(io_rq.size)
+ size = mi.Size(io_rq.size)
tid = io_rq.tid
proc_stats = self._analysis.tids[tid]
- comm = proc_stats.comm
+ proc_name = proc_stats.comm
# TODO: handle fd_in/fd_out for RW type operations
if io_rq.fd is None:
- file_str = 'N/A'
+ path = mi.Empty()
+ fd = mi.Empty()
else:
- fd = io_rq.fd
-
+ fd = mi.Fd(io_rq.fd)
parent_proc = proc_stats
+
if parent_proc.pid is not None:
parent_proc = self._analysis.tids[parent_proc.pid]
- fd_stats = parent_proc.get_fd(fd, io_rq.end_ts)
+ fd_stats = parent_proc.get_fd(io_rq.fd, io_rq.end_ts)
+
if fd_stats is not None:
- filename = fd_stats.filename
+ path = mi.Path(fd_stats.filename)
else:
- filename = 'unknown'
+ path = mi.Unknown()
+
+ result_table.append_row(
+ time_range=mi.TimeRange(io_rq.begin_ts, io_rq.end_ts),
+ out_of_range=mi.Boolean(self._is_io_rq_out_of_range(io_rq)),
+ duration=mi.Duration(io_rq.duration),
+ syscall=mi.Syscall(io_rq.syscall_name),
+ size=size,
+ process=mi.Process(proc_name, tid=tid),
+ path=path,
+ fd=fd,
+ )
+
+ def _fill_log_result_table(self, rq_list, sort_key, is_top, result_table):
+ if not rq_list:
+ return
+
+ count = 0
- file_str = '%s (fd=%s)' % (filename, fd)
+ for io_rq in sorted(rq_list, key=operator.attrgetter(sort_key),
+ reverse=is_top):
+ if is_top and count > self._args.limit:
+ break
+
+ self._append_log_row(io_rq, result_table)
+ count += 1
+
+ def _fill_log_result_table_from_io_requests(self, io_requests, sort_key,
+ is_top, result_table):
+ io_requests = [io_rq for io_rq in io_requests if
+ self._filter_io_request(io_rq)]
+ self._fill_log_result_table(io_requests, sort_key, is_top, result_table)
+
+ def _get_top_result_tables(self, begin, end):
+ open_table = \
+ self._mi_create_result_table(self._MI_TABLE_CLASS_TOP_SYSCALL,
+ begin, end, 'open')
+ read_table = \
+ self._mi_create_result_table(self._MI_TABLE_CLASS_TOP_SYSCALL,
+ begin, end, 'read')
+ write_table = \
+ self._mi_create_result_table(self._MI_TABLE_CLASS_TOP_SYSCALL,
+ begin, end, 'write')
+ sync_table = \
+ self._mi_create_result_table(self._MI_TABLE_CLASS_TOP_SYSCALL,
+ begin, end, 'sync')
+ self._fill_log_result_table_from_io_requests(
+ self._analysis.open_io_requests, 'duration', True, open_table)
+ self._fill_log_result_table_from_io_requests(
+ self._analysis.read_io_requests, 'duration', True, read_table)
+ self._fill_log_result_table_from_io_requests(
+ self._analysis.write_io_requests, 'duration', True, write_table)
+ self._fill_log_result_table_from_io_requests(
+ self._analysis.sync_io_requests, 'duration', True, sync_table)
+
+ return [open_table, read_table, write_table, sync_table]
+
+ def _print_log_row(self, row):
+ fmt = '{:<40} {:<16} {:>16} {:>11} {:<24} {:<8} {:<14}'
+ begin_time = common.ns_to_hour_nsec(row.time_range.begin,
+ self._args.multi_day,
+ self._args.gmt)
+ end_time = common.ns_to_hour_nsec(row.time_range.end,
+ self._args.multi_day,
+ self._args.gmt)
+ time_range_str = '[' + begin_time + ',' + end_time + ']'
+ duration_str = '%0.03f' % row.duration.to_us()
- if self._is_io_rq_out_of_range(io_rq):
+ if type(row.size) is mi.Empty:
+ size = 'N/A'
+ else:
+ size = common.convert_size(row.size.value)
+
+ tid = row.process.tid
+ proc_name = row.process.name
+
+ if type(row.fd) is mi.Empty:
+ file_str = 'N/A'
+ else:
+ if type(row.path) is mi.Unknown:
+ path = 'unknown'
+ else:
+ path = row.path.path
+
+ file_str = '%s (fd=%s)' % (path, row.fd.fd)
+
+ if row.out_of_range.value:
time_range_str += '*'
duration_str += '*'
else:
time_range_str += ' '
duration_str += ' '
- print(fmt.format(time_range_str, io_rq.syscall_name, duration_str,
- size, comm, tid, file_str))
+ print(fmt.format(time_range_str, row.syscall.name, duration_str,
+ size, proc_name, tid, file_str))
- def _output_io_requests_list(self, rq_list, title, sort_key, is_top=False):
- if not rq_list:
+ def _print_log(self, result_table):
+ if not result_table.rows:
return
- count = 0
has_out_of_range_rq = False
print()
- print(title)
-
+ fmt = '{} {} (usec)'
+ print(fmt.format(result_table.title, result_table.subtitle))
header_fmt = '{:<19} {:<20} {:<16} {:<23} {:<5} {:<24} {:<8} {:<14}'
print(header_fmt.format(
'Begin', 'End', 'Name', 'Duration (usec)', 'Size', 'Proc', 'PID',
'Filename'))
- for io_rq in sorted(rq_list, key=operator.attrgetter(sort_key),
- reverse=is_top):
- if is_top and count > self._args.limit:
- break
+ for row in result_table.rows:
+ self._print_log_row(row)
- self._output_io_request(io_rq)
- if not has_out_of_range_rq and self._is_io_rq_out_of_range(io_rq):
+ if not has_out_of_range_rq and row.out_of_range.value:
has_out_of_range_rq = True
- count += 1
-
if has_out_of_range_rq:
print('*: Syscalls started and/or completed outside of the '
'range specified')
- def _output_latency_log_from_requests(self, io_requests, title, sort_key,
- is_top=False):
- io_requests = [io_rq for io_rq in io_requests if
- self._filter_io_request(io_rq)]
- self._output_io_requests_list(io_requests, title, sort_key, is_top)
-
- def iolatency_syscalls_top_output(self):
- self._output_latency_log_from_requests(
- [io_rq for io_rq in self._analysis.open_io_requests if
- self._filter_io_request(io_rq)],
- 'Top open syscall latencies (usec)',
- 'duration', is_top=True)
- self._output_io_requests_list(
- [io_rq for io_rq in self._analysis.read_io_requests if
- self._filter_io_request(io_rq)],
- 'Top read syscall latencies (usec)',
- 'duration', is_top=True)
- self._output_io_requests_list(
- [io_rq for io_rq in self._analysis.write_io_requests if
- self._filter_io_request(io_rq)],
- 'Top write syscall latencies (usec)',
- 'duration', is_top=True)
- self._output_io_requests_list(
- [io_rq for io_rq in self._analysis.sync_io_requests if
- self._filter_io_request(io_rq)],
- 'Top sync syscall latencies (usec)',
- 'duration', is_top=True)
-
- def iolatency_syscalls_log_output(self):
- self._output_io_requests_list(
- self._analysis.io_requests,
- 'Log of all I/O system calls',
- 'begin_ts')
-
- # I/O Stats output methods
- def _output_latency_stats(self, name, rq_durations):
+ def _print_top(self, top_tables):
+ for table in top_tables:
+ self._print_log(table)
+
+ def _get_log_result_table(self, begin, end):
+ log_table = self._mi_create_result_table(self._MI_TABLE_CLASS_LOG,
+ begin, end)
+ self._fill_log_result_table_from_io_requests(
+ self._analysis.io_requests, 'begin_ts', False, log_table)
+
+ return log_table
+
+ def _append_latency_stats_row(self, obj, rq_durations, result_table):
rq_count = len(rq_durations)
total_duration = sum(rq_durations)
+
if len(rq_durations) > 0:
min_duration = min(rq_durations)
max_duration = max(rq_durations)
max_duration = 0
if rq_count < 2:
- stdev = '?'
+ stdev = mi.Unknown()
else:
- stdev = '%0.03f' % (statistics.stdev(rq_durations) / 1000)
+ stdev = mi.Duration(statistics.stdev(rq_durations))
if rq_count > 0:
- avg = '%0.03f' % (total_duration / (rq_count) / 1000)
+ avg = total_duration / rq_count
else:
- avg = "0.000"
-
- min_duration = '%0.03f' % (min_duration / 1000)
- max_duration = '%0.03f' % (max_duration / 1000)
-
- print(IoAnalysisCommand._LATENCY_STATS_FORMAT.format(
- name, rq_count, min_duration, avg, max_duration, stdev))
+ avg = 0
+
+ result_table.append_row(
+ obj=obj,
+ count=mi.Integer(rq_count),
+ min_latency=mi.Duration(min_duration),
+ avg_latency=mi.Duration(avg),
+ max_latency=mi.Duration(max_duration),
+ stdev_latency=stdev,
+ )
- def _output_latency_stats_from_requests(self, io_requests, name):
+ def _append_latency_stats_row_from_requests(self, obj, io_requests,
+ result_table):
rq_durations = [io_rq.duration for io_rq in io_requests if
self._filter_io_request(io_rq)]
- self._output_latency_stats(name, rq_durations)
+ self._append_latency_stats_row(obj, rq_durations, result_table)
+
+ def _get_syscall_latency_stats_result_table(self, begin, end):
+ result_table = \
+ self._mi_create_result_table(self._MI_TABLE_CLASS_SYSCALL_LATENCY_STATS,
+ begin, end)
+ append_fn = self._append_latency_stats_row_from_requests
+ append_fn(mi.String('Open'), self._analysis.open_io_requests,
+ result_table)
+ append_fn(mi.String('Read'), self._analysis.read_io_requests,
+ result_table)
+ append_fn(mi.String('Write'), self._analysis.write_io_requests,
+ result_table)
+ append_fn(mi.String('Sync'), self._analysis.sync_io_requests,
+ result_table)
+
+ return result_table
+
+ def _get_disk_latency_stats_result_table(self, begin, end):
+ if not self._analysis.disks:
+ return
+
+ result_table = \
+ self._mi_create_result_table(self._MI_TABLE_CLASS_PART_LATENCY_STATS,
+ begin, end)
+ append_fn = self._append_latency_stats_row_from_requests
+
+ for disk in self._analysis.disks.values():
+ if disk.rq_count:
+ rq_durations = [rq.duration for rq in disk.rq_list if
+ self._filter_io_request(rq)]
+ disk = mi.Disk(disk.disk_name)
+ self._append_latency_stats_row(disk, rq_durations, result_table)
+
+ return result_table
+
+ def _get_latency_stats_result_tables(self, begin, end):
+ syscall_tbl = self._get_syscall_latency_stats_result_table(begin, end)
+ disk_tbl = self._get_disk_latency_stats_result_table(begin, end)
+
+ return syscall_tbl, disk_tbl
- def _output_syscalls_latency_stats(self):
+ def _print_latency_stats_row(self, row):
+ if type(row.stdev_latency) is mi.Unknown:
+ stdev = '?'
+ else:
+ stdev = '%0.03f' % row.stdev_latency.to_us()
+
+ avg = '%0.03f' % row.avg_latency.to_us()
+ min_duration = '%0.03f' % row.min_latency.to_us()
+ max_duration = '%0.03f' % row.max_latency.to_us()
+
+ print(IoAnalysisCommand._LATENCY_STATS_FORMAT.format(
+ str(row.obj), row.count.value, min_duration,
+ avg, max_duration, stdev))
+
+ def _print_syscall_latency_stats(self, stats_table):
print('\nSyscalls latency statistics (usec):')
print(IoAnalysisCommand._LATENCY_STATS_FORMAT.format(
'Type', 'Count', 'Min', 'Average', 'Max', 'Stdev'))
print(IoAnalysisCommand._SECTION_SEPARATOR_STRING)
- self._output_latency_stats_from_requests(
- self._analysis.open_io_requests, 'Open')
- self._output_latency_stats_from_requests(
- self._analysis.read_io_requests, 'Read')
- self._output_latency_stats_from_requests(
- self._analysis.write_io_requests, 'Write')
- self._output_latency_stats_from_requests(
- self._analysis.sync_io_requests, 'Sync')
+ for row in stats_table.rows:
+ self._print_latency_stats_row(row)
- def _output_disk_latency_stats(self):
- if not self._analysis.disks:
+ def _print_disk_latency_stats(self, stats_table):
+ if not stats_table.rows:
return
print('\nDisk latency statistics (usec):')
'Name', 'Count', 'Min', 'Average', 'Max', 'Stdev'))
print(IoAnalysisCommand._SECTION_SEPARATOR_STRING)
- for disk in self._analysis.disks.values():
- if disk.rq_count:
- rq_durations = [rq.duration for rq in disk.rq_list if
- self._filter_io_request(rq)]
- self._output_latency_stats(disk.disk_name, rq_durations)
-
- def iostats_output(self):
- self._output_syscalls_latency_stats()
- self._output_disk_latency_stats()
+ for row in stats_table.rows:
+ self._print_latency_stats_row(row)
- def _print_results(self, begin_ns, end_ns):
- self._print_date(begin_ns, end_ns)
- if self._args.usage:
- self.iotop_output()
- if self._args.stats:
- self.iostats_output()
- if self._args.top:
- self.iolatency_syscalls_top_output()
- if self._args.freq:
- self.iolatency_syscalls_output()
- self.iolatency_output()
- if self._args.log:
- self.iolatency_syscalls_log_output()
+ def _print_latency_stats(self, syscall_latency_stats_table,
+ disk_latency_stats_table):
+ self._print_syscall_latency_stats(syscall_latency_stats_table)
+ self._print_disk_latency_stats(disk_latency_stats_table)
def _add_arguments(self, ap):
Command._add_proc_filter_args(ap)
help='Output the top I/O latencies by category')
-def runstats():
- sys.argv.insert(1, '--stats')
- iocmd = IoAnalysisCommand()
+def _run(mi_mode):
+ iocmd = IoAnalysisCommand(mi_mode=mi_mode)
iocmd.run()
-def runlatencytop():
+def _runstats(mi_mode):
+ sys.argv.insert(1, '--stats')
+ _run(mi_mode)
+
+
+def _runlog(mi_mode):
+ sys.argv.insert(1, '--log')
+ _run(mi_mode)
+
+
+def _runfreq(mi_mode):
+ sys.argv.insert(1, '--freq')
+ _run(mi_mode)
+
+
+def _runlatencytop(mi_mode):
sys.argv.insert(1, '--top')
- iocmd = IoAnalysisCommand()
- iocmd.run()
+ _run(mi_mode)
+
+
+def _runusage(mi_mode):
+ sys.argv.insert(1, '--usage')
+ _run(mi_mode)
+
+
+def runstats():
+ _runstats(mi_mode=False)
def runlog():
- sys.argv.insert(1, '--log')
- iocmd = IoAnalysisCommand()
- iocmd.run()
+ _runlog(mi_mode=False)
def runfreq():
- sys.argv.insert(1, '--freq')
- iocmd = IoAnalysisCommand()
- iocmd.run()
+ _runfreq(mi_mode=False)
+
+
+def runlatencytop():
+ _runlatencytop(mi_mode=False)
def runusage():
- sys.argv.insert(1, '--usage')
- iocmd = IoAnalysisCommand()
- iocmd.run()
+ _runusage(mi_mode=False)
+
+
+def runstats_mi():
+ _runstats(mi_mode=True)
+
+
+def runlog_mi():
+ _runlog(mi_mode=True)
+
+
+def runfreq_mi():
+ _runfreq(mi_mode=True)
+
+
+def runlatencytop_mi():
+ _runlatencytop(mi_mode=True)
+
+
+def runusage_mi():
+ _runusage(mi_mode=True)
from ..core import irq as core_irq
from ..linuxautomaton import common, sv
from ..ascii_graph import Pyasciigraph
-
+from . import mi
import math
+import itertools
import statistics
import sys
class IrqAnalysisCommand(Command):
_DESC = """The irq command."""
_ANALYSIS_CLASS = core_irq.IrqAnalysis
+ _MI_TITLE = 'System interrupt analysis'
+ _MI_DESCRIPTION = 'Interrupt frequency distribution, statistics, and log'
+ _MI_TAGS = [mi.Tags.INTERRUPT, mi.Tags.STATS, mi.Tags.FREQ, mi.Tags.LOG]
+ _MI_TABLE_CLASS_LOG = 'log'
+ _MI_TABLE_CLASS_HARD_STATS = 'hard-stats'
+ _MI_TABLE_CLASS_SOFT_STATS = 'soft-stats'
+ _MI_TABLE_CLASS_FREQ = 'freq'
+ _MI_TABLE_CLASS_SUMMARY = 'summary'
+ _MI_TABLE_CLASSES = [
+ (
+ _MI_TABLE_CLASS_LOG,
+ 'Interrupt log', [
+ ('time_range', 'Time range', mi.TimeRange),
+ ('raised_ts', 'Raised timestamp', mi.Timestamp),
+ ('cpu', 'CPU', mi.Cpu),
+ ('irq', 'Interrupt', mi.Irq),
+ ]
+ ),
+ (
+ _MI_TABLE_CLASS_HARD_STATS,
+ 'Hardware interrupt statistics', [
+ ('irq', 'Interrupt', mi.Irq),
+ ('count', 'Interrupt count', mi.Integer, 'interrupts'),
+ ('min_duration', 'Minimum duration', mi.Duration),
+ ('avg_duration', 'Average duration', mi.Duration),
+ ('max_duration', 'Maximum duration', mi.Duration),
+ ('stdev_duration', "Interrupt duration standard deviation", mi.Duration),
+ ]
+ ),
+ (
+ _MI_TABLE_CLASS_SOFT_STATS,
+ 'Hardware interrupt statistics', [
+ ('irq', 'Interrupt', mi.Irq),
+ ('count', 'Interrupt count', mi.Integer, 'interrupts'),
+ ('min_duration', 'Minimum duration', mi.Duration),
+ ('avg_duration', 'Average duration', mi.Duration),
+ ('max_duration', 'Maximum duration', mi.Duration),
+ ('stdev_duration', "Interrupt duration standard deviation", mi.Duration),
+ ('raise_count', 'Interrupt raise count', mi.Integer, 'interrupt raises'),
+ ('min_latency', 'Minimum raise latency', mi.Duration),
+ ('avg_latency', 'Average raise latency', mi.Duration),
+ ('max_latency', 'Maximum raise latency', mi.Duration),
+ ('stdev_latency', "Interrupt raise latency standard deviation", mi.Duration),
+ ]
+ ),
+ (
+ _MI_TABLE_CLASS_FREQ,
+ 'Interrupt handler duration frequency distribution', [
+ ('duration_lower', 'Duration (lower bound)', mi.Duration),
+ ('duration_upper', 'Duration (upper bound)', mi.Duration),
+ ('count', 'Interrupt count', mi.Integer, 'interrupts'),
+ ]
+ ),
+ (
+ _MI_TABLE_CLASS_SUMMARY,
+ 'Interrupt statistics - summary', [
+ ('time_range', 'Time range', mi.TimeRange),
+ ('count', 'Total interrupt count', mi.Integer, 'interrupts'),
+ ]
+ ),
+ ]
+
+ def _analysis_tick(self, begin_ns, end_ns):
+ log_table = None
+ hard_stats_table = None
+ soft_stats_table = None
+ freq_tables = None
+
+ if self._args.log:
+ log_table = self._get_log_result_table(begin_ns, end_ns)
+
+ if self._args.stats or self._args.freq:
+ hard_stats_table, soft_stats_table, freq_tables = \
+ self._get_stats_freq_result_tables(begin_ns, end_ns)
+
+ if self._mi_mode:
+ self._mi_append_result_table(log_table)
+ self._mi_append_result_table(hard_stats_table)
+ self._mi_append_result_table(soft_stats_table)
+ self._mi_append_result_tables(freq_tables)
+ else:
+ self._print_date(begin_ns, end_ns)
+
+ if hard_stats_table or soft_stats_table or freq_tables:
+ self._print_stats_freq(hard_stats_table, soft_stats_table,
+ freq_tables)
+ if log_table:
+ print()
+
+ if log_table:
+ self._print_log(log_table)
+
+ def _create_summary_result_tables(self):
+ if not self._args.stats:
+ self._mi_clear_result_tables()
+ return
+
+ hard_stats_tables = \
+ self._mi_get_result_tables(self._MI_TABLE_CLASS_HARD_STATS)
+ soft_stats_tables = \
+ self._mi_get_result_tables(self._MI_TABLE_CLASS_SOFT_STATS)
+ assert(len(hard_stats_table) == len(soft_stats_tables))
+ begin = hard_stats_tables[0].timerange.begin
+ end = hard_stats_tables[-1].timerange.end
+ summary_table = \
+ self._mi_create_result_table(self._MI_TABLE_CLASS_SUMMARY,
+ begin, end)
+
+ for hs_table, ss_table in zip(hard_stats_tables, soft_stats_tables):
+ assert(hs_table.timerange == ss_table.timerange)
+
+ for row in itertools.chain(hs_table.rows, ss_table.rows):
+ summary_table.append_row(
+ time_range=hs_table.timerange,
+ count=row.count,
+ )
+
+ self._mi_clear_result_tables()
+ self._mi_append_result_table(summary_table)
+
+ def _get_log_result_table(self, begin_ns, end_ns):
+ result_table = self._mi_create_result_table(self._MI_TABLE_CLASS_LOG,
+ begin_ns, end_ns)
+
+ for irq in self._analysis.irq_list:
+ if not self._filter_irq(irq):
+ continue
+
+ if type(irq) is sv.HardIRQ:
+ is_hard = True
+ raised_ts_do = mi.Empty()
+ name = self._analysis.hard_irq_stats[irq.id].name
+ else:
+ is_hard = False
+
+ if irq.raise_ts is None:
+ raised_ts_do = mi.Unknown()
+ else:
+ raised_ts_do = mi.Timestamp(irq.raise_ts)
+
+ name = self._analysis.softirq_stats[irq.id].name
+
+ result_table.append_row(
+ time_range=mi.TimeRange(irq.begin_ts, irq.end_ts),
+ raised_ts=raised_ts_do,
+ cpu=mi.Cpu(irq.cpu_id),
+ irq=mi.Irq(is_hard, irq.id, name),
+ )
+
+ return result_table
+
+ def _get_common_stats_result_table_row(self, is_hard, irq_nr, irq_stats):
+ stdev = self._compute_duration_stdev(irq_stats)
+
+ if math.isnan(stdev):
+ stdev = mi.Unknown()
+ else:
+ stdev = mi.Duration(stdev)
+
+ return (
+ mi.Irq(is_hard, irq_nr, irq_stats.name),
+ mi.Integer(irq_stats.count),
+ mi.Duration(irq_stats.min_duration),
+ mi.Duration(irq_stats.total_duration / irq_stats.count),
+ mi.Duration(irq_stats.max_duration),
+ stdev,
+ )
+
+ def _append_hard_stats_result_table_row(self, irq_nr, irq_stats,
+ hard_stats_table):
+ common_row = self._get_common_stats_result_table_row(True, irq_nr,
+ irq_stats)
+ hard_stats_table.append_row(
+ irq=common_row[0],
+ count=common_row[1],
+ min_duration=common_row[2],
+ avg_duration=common_row[3],
+ max_duration=common_row[4],
+ stdev_duration=common_row[5],
+ )
+
+ def _append_soft_stats_result_table_row(self, irq_nr, irq_stats,
+ soft_stats_table):
+ common_row = self._get_common_stats_result_table_row(False, irq_nr,
+ irq_stats)
+
+ if irq_stats.raise_count == 0:
+ min_latency = mi.Unknown()
+ avg_latency = mi.Unknown()
+ max_latency = mi.Unknown()
+ stdev_latency = mi.Unknown()
+ else:
+ min_latency = mi.Duration(irq_stats.min_raise_latency)
+ avg_latency = irq_stats.total_raise_latency / irq_stats.raise_count
+ avg_latency = mi.Duration(avg_latency)
+ max_latency = mi.Duration(irq_stats.max_raise_latency)
+ stdev = self._compute_raise_latency_stdev(irq_stats)
+
+ if math.isnan(stdev):
+ stdev_latency = mi.Unknown()
+ else:
+ stdev_latency = mi.Duration(stdev)
+
+ soft_stats_table.append_row(
+ irq=common_row[0],
+ count=common_row[1],
+ min_duration=common_row[2],
+ avg_duration=common_row[3],
+ max_duration=common_row[4],
+ stdev_duration=common_row[5],
+ raise_count=mi.Integer(irq_stats.raise_count),
+ min_latency=min_latency,
+ avg_latency=avg_latency,
+ max_latency=max_latency,
+ stdev_latency=stdev_latency,
+ )
+
+ def _fill_freq_result_table(self, irq_stats, freq_table):
+ # The number of bins for the histogram
+ resolution = self._args.freq_resolution
+ min_duration_us = irq_stats.min_duration
+ max_duration_us = irq_stats.max_duration
+
+ # ns to µs
+ min_duration_us /= 1000
+ max_duration_us /= 1000
+
+ # histogram's step
+ step = (max_duration_us - min_duration_us) / resolution
+
+ if step == 0:
+ return
+
+ buckets = []
+ counts = []
+
+ for i in range(resolution):
+ buckets.append(i * step)
+ counts.append(0)
+
+ for irq in irq_stats.irq_list:
+ duration_us = (irq.end_ts - irq.begin_ts) / 1000
+ index = min(int((duration_us - min_duration_us) / step), resolution - 1)
+ counts[index] += 1
+
+ graph_data = []
+
+ for index, count in enumerate(counts):
+ lower_bound_us = index * step + min_duration_us
+ upper_bound_us = (index + 1) * step + min_duration_us
+ freq_table.append_row(
+ duration_lower=mi.Duration.from_us(lower_bound_us),
+ duration_upper=mi.Duration.from_us(upper_bound_us),
+ count=mi.Integer(count),
+ )
+
+ def _fill_stats_freq_result_tables(self, begin_ns, end_ns, is_hard,
+ analysis_stats, filter_list,
+ hard_stats_table, soft_stats_table,
+ freq_tables):
+ for id in sorted(analysis_stats):
+ if filter_list and str(id) not in filter_list:
+ continue
+
+ irq_stats = analysis_stats[id]
+
+ if irq_stats.count == 0:
+ continue
+
+ if self._args.stats:
+ if is_hard:
+ append_row_fn = self._append_hard_stats_result_table_row
+ table = hard_stats_table
+ else:
+ append_row_fn = self._append_soft_stats_result_table_row
+ table = soft_stats_table
+
+ append_row_fn(id, irq_stats, table)
+
+ if self._args.freq:
+ subtitle = '{} ({})'.format(irq_stats.name, id)
+ freq_table = \
+ self._mi_create_result_table(self._MI_TABLE_CLASS_FREQ,
+ begin_ns, end_ns, subtitle)
+ self._fill_freq_result_table(irq_stats, freq_table)
+
+ # it is possible that the frequency distribution result
+ # table is empty; we need to keep it any way because
+ # there's a 1-to-1 association between the statistics
+ # row indexes (if available) and the frequency table
+ # indexes
+ freq_tables.append(freq_table)
+
+ def _get_stats_freq_result_tables(self, begin_ns, end_ns):
+ def fill_stats_freq_result_tables(is_hard, stats, filter_list):
+ self._fill_stats_freq_result_tables(begin_ns, end_ns, is_hard,
+ stats, filter_list,
+ hard_stats_table,
+ soft_stats_table, freq_tables)
+
+ hard_stats_table = \
+ self._mi_create_result_table(self._MI_TABLE_CLASS_HARD_STATS,
+ begin_ns, end_ns)
+ soft_stats_table = \
+ self._mi_create_result_table(self._MI_TABLE_CLASS_SOFT_STATS,
+ begin_ns, end_ns)
+ freq_tables = []
+
+ if self._args.irq_filter_list is not None or \
+ self._args.softirq_filter_list is None:
+ fill_stats_freq_result_tables(True, self._analysis.hard_irq_stats,
+ self._args.irq_filter_list)
+
+ if self._args.softirq_filter_list is not None or \
+ self._args.irq_filter_list is None:
+ fill_stats_freq_result_tables(False, self._analysis.softirq_stats,
+ self._args.softirq_filter_list)
+
+ return hard_stats_table, soft_stats_table, freq_tables
+
+ def _ns_to_hour_nsec(self, ts):
+ return common.ns_to_hour_nsec(ts, self._args.multi_day, self._args.gmt)
+
+ def _print_log(self, result_table):
+ fmt = '[{:<18}, {:<18}] {:>15} {:>4} {:<9} {:>4} {:<22}'
+ title_fmt = '{:<20} {:<19} {:>15} {:>4} {:<9} {:>4} {:<22}'
+ print(title_fmt.format('Begin', 'End', 'Duration (us)', 'CPU',
+ 'Type', '#', 'Name'))
+ for row in result_table.rows:
+ timerange = row.time_range
+ begin_ts = timerange.begin
+ end_ts = timerange.end
+
+ if type(row.raised_ts) is mi.Timestamp:
+ raised_fmt = ' (raised at %s)'
+ raised_ts = \
+ raised_fmt % self._ns_to_hour_nsec(row.raised_ts.value)
+ else:
+ raised_ts = ''
+
+ cpu_id = row.cpu.id
+ irq_do = row.irq
+
+ if irq_do.is_hard:
+ irqtype = 'IRQ'
+ else:
+ irqtype = 'SoftIRQ'
+
+ print(fmt.format(self._ns_to_hour_nsec(begin_ts),
+ self._ns_to_hour_nsec(end_ts),
+ '%0.03f' % ((end_ts - begin_ts) / 1000),
+ '%d' % cpu_id, irqtype, irq_do.nr,
+ irq_do.name + raised_ts))
def _validate_transform_args(self, args):
args.irq_filter_list = None
return statistics.stdev(raise_latencies)
- def _print_frequency_distribution(self, irq_stats_item, id):
- # The number of bins for the histogram
- resolution = self._args.freq_resolution
-
- min_duration = irq_stats_item.min_duration
- max_duration = irq_stats_item.max_duration
- # ns to µs
- min_duration /= 1000
- max_duration /= 1000
-
- step = (max_duration - min_duration) / resolution
- if step == 0:
- return
-
- buckets = []
- values = []
+ def _print_frequency_distribution(self, freq_table):
graph = Pyasciigraph()
- for i in range(resolution):
- buckets.append(i * step)
- values.append(0)
- for irq in irq_stats_item.irq_list:
- duration = (irq.end_ts - irq.begin_ts) / 1000
- index = min(int((duration - min_duration) / step), resolution - 1)
- values[index] += 1
-
graph_data = []
- for index, value in enumerate(values):
+
+ for row in freq_table.rows:
# The graph data format is a tuple (info, value). Here info
# is the lower bound of the bucket, value the bucket's count
- graph_data.append(('%0.03f' % (index * step + min_duration),
- value))
+ lower_bound_us = row.duration_lower.to_us()
+ count = row.count.value
+
+ graph_data.append(('%0.03f' % lower_bound_us, count))
+
+ title_fmt = 'Handler duration frequency distribution {} (usec)'
graph_lines = graph.graph(
- 'Handler duration frequency distribution %s (%s) (usec)' %
- (irq_stats_item.name, id),
+ title_fmt.format(freq_table.subtitle),
graph_data,
info_before=True,
count=True
if self._args.softirq_filter_list:
return str(irq.id) in self._args.softirq_filter_list
if self._args.irq_filter_list:
- return False
+ return FORalse
return True
- def _print_irq_log(self):
- fmt = '[{:<18}, {:<18}] {:>15} {:>4} {:<9} {:>4} {:<22}'
- title_fmt = '{:<20} {:<19} {:>15} {:>4} {:<9} {:>4} {:<22}'
- print(title_fmt.format('Begin', 'End', 'Duration (us)', 'CPU',
- 'Type', '#', 'Name'))
- for irq in self._analysis.irq_list:
- if not self._filter_irq(irq):
- continue
-
- raise_ts = ''
- if type(irq) is sv.HardIRQ:
- name = self._analysis.hard_irq_stats[irq.id].name
- irqtype = 'IRQ'
- else:
- name = self._analysis.softirq_stats[irq.id].name
- irqtype = 'SoftIRQ'
- if irq.raise_ts is not None:
- raise_ts = ' (raised at %s)' % \
- (common.ns_to_hour_nsec(irq.raise_ts,
- self._args.multi_day,
- self._args.gmt))
-
- print(fmt.format(common.ns_to_hour_nsec(irq.begin_ts,
- self._args.multi_day,
- self._args.gmt),
- common.ns_to_hour_nsec(irq.end_ts,
- self._args.multi_day,
- self._args.gmt),
- '%0.03f' % ((irq.end_ts - irq.begin_ts) / 1000),
- '%d' % irq.cpu_id, irqtype, irq.id,
- name + raise_ts))
-
- def _print_irq_stats(self, irq_stats, filter_list, header):
- header_printed = False
- for id in sorted(irq_stats):
- if filter_list and str(id) not in filter_list:
- continue
-
- irq_stats_item = irq_stats[id]
- if irq_stats_item.count == 0:
- continue
-
- if self._args.stats:
- if self._args.freq or not header_printed:
- print(header)
- header_printed = True
-
- if type(irq_stats_item) is core_irq.HardIrqStats:
- self._print_hard_irq_stats_item(irq_stats_item, id)
- else:
- self._print_soft_irq_stats_item(irq_stats_item, id)
-
- if self._args.freq:
- self._print_frequency_distribution(irq_stats_item, id)
-
- print()
-
- def _print_hard_irq_stats_item(self, irq_stats_item, id):
- output_str = self._get_duration_stats_str(irq_stats_item, id)
+ def _print_hard_irq_stats_row(self, row):
+ output_str = self._get_duration_stats_str(row)
print(output_str)
- def _print_soft_irq_stats_item(self, irq_stats_item, id):
- output_str = self._get_duration_stats_str(irq_stats_item, id)
- if irq_stats_item.raise_count != 0:
- output_str += self._get_raise_latency_str(irq_stats_item, id)
+ def _print_soft_irq_stats_row(self, row):
+ output_str = self._get_duration_stats_str(row)
+
+ if row.raise_count.value != 0:
+ output_str += self._get_raise_latency_str(row)
print(output_str)
- def _get_duration_stats_str(self, irq_stats_item, id):
+ def _get_duration_stats_str(self, row):
format_str = '{:<3} {:<18} {:>5} {:>12} {:>12} {:>12} {:>12} {:<2}'
+ irq_do = row.irq
+ count = row.count.value
+ min_duration = row.min_duration.to_us()
+ avg_duration = row.avg_duration.to_us()
+ max_duration = row.max_duration.to_us()
- avg_duration = irq_stats_item.total_duration / irq_stats_item.count
- duration_stdev = self._compute_duration_stdev(irq_stats_item)
- min_duration = irq_stats_item.min_duration
- max_duration = irq_stats_item.max_duration
- # ns to µs
- avg_duration /= 1000
- duration_stdev /= 1000
- min_duration /= 1000
- max_duration /= 1000
-
- if math.isnan(duration_stdev):
+ if type(row.stdev_duration) is mi.Unknown:
duration_stdev_str = '?'
else:
- duration_stdev_str = '%0.03f' % duration_stdev
+ duration_stdev_str = '%0.03f' % row.stdev_duration.to_us()
- output_str = format_str.format('%d:' % id,
- '<%s>' % irq_stats_item.name,
- '%d' % irq_stats_item.count,
+ output_str = format_str.format('%d:' % irq_do.nr,
+ '<%s>' % irq_do.name,
+ '%d' % count,
'%0.03f' % min_duration,
'%0.03f' % avg_duration,
'%0.03f' % max_duration,
' |')
return output_str
- def _get_raise_latency_str(self, irq_stats_item, id):
+ def _get_raise_latency_str(self, row):
format_str = ' {:>6} {:>12} {:>12} {:>12} {:>12}'
+ raise_count = row.raise_count.value
+ min_raise_latency = row.min_latency.to_us()
+ avg_raise_latency = row.avg_latency.to_us()
+ max_raise_latency = row.max_latency.to_us()
- avg_raise_latency = (irq_stats_item.total_raise_latency /
- irq_stats_item.raise_count)
- raise_latency_stdev = self._compute_raise_latency_stdev(irq_stats_item)
- min_raise_latency = irq_stats_item.min_raise_latency
- max_raise_latency = irq_stats_item.max_raise_latency
- # ns to µs
- avg_raise_latency /= 1000
- raise_latency_stdev /= 1000
- min_raise_latency /= 1000
- max_raise_latency /= 1000
-
- if math.isnan(raise_latency_stdev):
+ if type(row.stdev_latency) is mi.Unknown:
raise_latency_stdev_str = '?'
else:
- raise_latency_stdev_str = '%0.03f' % raise_latency_stdev
+ raise_latency_stdev_str = '%0.03f' % row.stdev_latency.to_us()
- output_str = format_str.format(irq_stats_item.raise_count,
+ output_str = format_str.format(raise_count,
'%0.03f' % min_raise_latency,
'%0.03f' % avg_raise_latency,
'%0.03f' % max_raise_latency,
'%s' % raise_latency_stdev_str)
+
return output_str
- def _print_results(self, begin_ns, end_ns):
- if self._args.stats or self._args.freq:
- self._print_stats(begin_ns, end_ns)
- if self._args.log:
- self._print_irq_log()
+ def _print_stats_freq(self, hard_stats_table, soft_stats_table,
+ freq_tables):
+ hard_header_format = '{:<52} {:<12}\n' \
+ '{:<22} {:<14} {:<12} {:<12} {:<10} {:<12}\n'
+ hard_header = hard_header_format.format(
+ 'Hard IRQ', 'Duration (us)',
+ '', 'count', 'min', 'avg', 'max', 'stdev'
+ )
+ hard_header += ('-' * 82 + '|')
+ soft_header_format = '{:<52} {:<52} {:<12}\n' \
+ '{:<22} {:<14} {:<12} {:<12} {:<10} {:<4} ' \
+ '{:<3} {:<14} {:<12} {:<12} {:<10} {:<12}\n'
+ soft_header = soft_header_format.format(
+ 'Soft IRQ', 'Duration (us)',
+ 'Raise latency (us)', '',
+ 'count', 'min', 'avg', 'max', 'stdev', ' |',
+ 'count', 'min', 'avg', 'max', 'stdev'
+ )
+ soft_header += '-' * 82 + '|' + '-' * 60
- def _print_stats(self, begin_ns, end_ns):
- self._print_date(begin_ns, end_ns)
+ if hard_stats_table.rows or soft_stats_table.rows:
+ stats_rows = itertools.chain(hard_stats_table.rows,
+ soft_stats_table.rows)
- if self._args.irq_filter_list is not None or \
- self._args.softirq_filter_list is None:
- header_format = '{:<52} {:<12}\n' \
- '{:<22} {:<14} {:<12} {:<12} {:<10} {:<12}\n'
- header = header_format.format(
- 'Hard IRQ', 'Duration (us)',
- '', 'count', 'min', 'avg', 'max', 'stdev'
- )
- header += ('-' * 82 + '|')
- self._print_irq_stats(self._analysis.hard_irq_stats,
- self._args.irq_filter_list,
- header)
+ if freq_tables:
+ for stats_row, freq_table in zip(stats_rows, freq_tables):
+ irq = stats_row.irq
- if self._args.softirq_filter_list is not None or \
- self._args.irq_filter_list is None:
- header_format = '{:<52} {:<52} {:<12}\n' \
- '{:<22} {:<14} {:<12} {:<12} {:<10} {:<4} ' \
- '{:<3} {:<14} {:<12} {:<12} {:<10} {:<12}\n'
- header = header_format.format(
- 'Soft IRQ', 'Duration (us)',
- 'Raise latency (us)', '',
- 'count', 'min', 'avg', 'max', 'stdev', ' |',
- 'count', 'min', 'avg', 'max', 'stdev'
- )
- header += '-' * 82 + '|' + '-' * 60
- self._print_irq_stats(self._analysis.softirq_stats,
- self._args.softirq_filter_list,
- header)
+ if irq.is_hard:
+ print(hard_header)
+ self._print_hard_irq_stats_row(stats_row)
+ else:
+ print(soft_header)
+ self._print_soft_irq_stats_row(stats_row)
+
+ # frequency table might be empty: do not print
+ if freq_table.rows:
+ print()
+ self._print_frequency_distribution(freq_table)
+
+ print()
+
+ else:
+ hard_header_printed = False
+ soft_header_printed = False
+
+ for stats_row in stats_rows:
+ irq = stats_row.irq
+
+ if irq.is_hard:
+ if not hard_header_printed:
+ print(hard_header)
+ hard_header_printed = True
+
+ self._print_hard_irq_stats_row(stats_row)
+ else:
+ if not soft_header_printed:
+ if hard_header_printed:
+ print()
+
+ print(soft_header)
+ soft_header_printed = True
+
+ self._print_soft_irq_stats_row(stats_row)
+
+ return
+
+ for freq_table in freq_tables:
+ # frequency table might be empty: do not print
+ if freq_table.rows:
+ print()
+ self._print_frequency_distribution(freq_table)
def _add_arguments(self, ap):
Command._add_min_max_args(ap)
help='Output results only for the list of SoftIRQ')
-# entry point
-def runstats():
- sys.argv.insert(1, '--stats')
- irqcmd = IrqAnalysisCommand()
+def _run(mi_mode):
+ irqcmd = IrqAnalysisCommand(mi_mode=mi_mode)
irqcmd.run()
-def runlog():
+def _runstats(mi_mode):
+ sys.argv.insert(1, '--stats')
+ _run(mi_mode)
+
+
+def _runlog(mi_mode):
sys.argv.insert(1, '--log')
- irqcmd = IrqAnalysisCommand()
- irqcmd.run()
+ _run(mi_mode)
-def runfreq():
+def _runfreq(mi_mode):
sys.argv.insert(1, '--freq')
- irqcmd = IrqAnalysisCommand()
- irqcmd.run()
+ _run(mi_mode)
+
+
+def runstats():
+ _runstats(mi_mode=False)
+
+
+def runlog():
+ _runlog(mi_mode=False)
+
+
+def runfreq():
+ _runfreq(mi_mode=False)
+
+
+def runstats_mi():
+ _runstats(mi_mode=True)
+
+
+def runlog_mi():
+ _runlog(mi_mode=True)
+
+
+def runfreq_mi():
+ _runfreq(mi_mode=True)
from .command import Command
from ..core import memtop
from ..ascii_graph import Pyasciigraph
+from . import mi
import operator
class Memtop(Command):
_DESC = """The memtop command."""
_ANALYSIS_CLASS = memtop.Memtop
-
- def _print_results(self, begin_ns, end_ns):
- self._print_date(begin_ns, end_ns)
- self._print_per_tid_alloc()
- self._print_per_tid_freed()
- self._print_total_alloc_freed()
-
- def _print_per_tid_alloc(self):
- graph = Pyasciigraph()
- values = []
+ _MI_TITLE = 'Top memory usage'
+ _MI_DESCRIPTION = 'Per-TID top allocated/freed memory'
+ _MI_TAGS = [mi.Tags.MEMORY, mi.Tags.TOP]
+ _MI_TABLE_CLASS_ALLOCD = 'allocd'
+ _MI_TABLE_CLASS_FREED = 'freed'
+ _MI_TABLE_CLASS_TOTAL = 'total'
+ _MI_TABLE_CLASS_SUMMARY = 'summary'
+ _MI_TABLE_CLASSES = [
+ (
+ _MI_TABLE_CLASS_ALLOCD,
+ 'Per-TID top allocated memory', [
+ ('process', 'Process', mi.Process),
+ ('pages', 'Allocated pages', mi.Integer, 'pages'),
+ ]
+ ),
+ (
+ _MI_TABLE_CLASS_FREED,
+ 'Per-TID top freed memory', [
+ ('process', 'Process', mi.Process),
+ ('pages', 'Freed pages', mi.Integer, 'pages'),
+ ]
+ ),
+ (
+ _MI_TABLE_CLASS_TOTAL,
+ 'Total allocated/freed memory', [
+ ('allocd', 'Total allocated pages', mi.Integer, 'pages'),
+ ('freed', 'Total freed pages', mi.Integer, 'pages'),
+ ]
+ ),
+ (
+ _MI_TABLE_CLASS_SUMMARY,
+ 'Memory usage - summary', [
+ ('time_range', 'Time range', mi.TimeRange),
+ ('allocd', 'Total allocated pages', mi.Integer, 'pages'),
+ ('freed', 'Total freed pages', mi.Integer, 'pages'),
+ ]
+ ),
+ ]
+
+ def _analysis_tick(self, begin_ns, end_ns):
+ allocd_table = self._get_per_tid_allocd_result_table(begin_ns, end_ns)
+ freed_table = self._get_per_tid_freed_result_table(begin_ns, end_ns)
+ total_table = self._get_total_result_table(begin_ns, end_ns)
+
+ if self._mi_mode:
+ self._mi_append_result_table(allocd_table)
+ self._mi_append_result_table(freed_table)
+ self._mi_append_result_table(total_table)
+ else:
+ self._print_date(begin_ns, end_ns)
+ self._print_per_tid_allocd(allocd_table)
+ self._print_per_tid_freed(freed_table)
+ self._print_total(total_table)
+
+ def _create_summary_result_tables(self):
+ total_tables = self._mi_get_result_tables(self._MI_TABLE_CLASS_TOTAL)
+ begin = total_tables[0].timerange.begin
+ end = total_tables[-1].timerange.end
+ summary_table = \
+ self._mi_create_result_table(self._MI_TABLE_CLASS_SUMMARY,
+ begin, end)
+
+ for total_table in total_tables:
+ total_allocd = total_table.rows[0].allocd
+ total_freed = total_table.rows[0].freed
+ summary_table.append_row(
+ time_range=total_table.timerange,
+ allocd=total_allocd,
+ freed=total_freed,
+ )
+
+ self._mi_clear_result_tables()
+ self._mi_append_result_table(summary_table)
+
+ def _get_per_tid_attr_result_table(self, table_class, attr,
+ begin_ns, end_ns):
+ result_table = self._mi_create_result_table(table_class,
+ begin_ns, end_ns)
count = 0
for tid in sorted(self._analysis.tids.values(),
- key=operator.attrgetter('allocated_pages'),
+ key=operator.attrgetter(attr),
reverse=True):
if not self._filter_process(tid):
continue
- values.append(('%s (%d)' % (tid.comm, tid.tid),
- tid.allocated_pages))
-
+ result_table.append_row(
+ process=mi.Process(tid.comm, tid=tid.tid),
+ pages=mi.Integer(getattr(tid, attr)),
+ )
count += 1
+
if self._args.limit > 0 and count >= self._args.limit:
break
- for line in graph.graph('Per-TID Memory Allocations', values,
- unit=' pages'):
- print(line)
-
- def _print_per_tid_freed(self):
- graph = Pyasciigraph()
- values = []
- count = 0
-
- for tid in sorted(self._analysis.tids.values(),
- key=operator.attrgetter('freed_pages'),
- reverse=True):
- if not self._filter_process(tid):
- continue
+ return result_table
- values.append(('%s (%d)' % (tid.comm, tid.tid), tid.freed_pages))
- count += 1
- if self._args.limit > 0 and count >= self._args.limit:
- break
+ def _get_per_tid_allocd_result_table(self, begin_ns, end_ns):
+ return self._get_per_tid_attr_result_table(self._MI_TABLE_CLASS_ALLOCD,
+ 'allocated_pages',
+ begin_ns, end_ns)
- for line in graph.graph('Per-TID Memory Deallocation', values,
- unit=' pages'):
- print(line)
+ def _get_per_tid_freed_result_table(self, begin_ns, end_ns):
+ return self._get_per_tid_attr_result_table(self._MI_TABLE_CLASS_FREED,
+ 'freed_pages',
+ begin_ns, end_ns)
- def _print_total_alloc_freed(self):
+ def _get_total_result_table(self, begin_ns, end_ns):
+ result_table = self._mi_create_result_table(self._MI_TABLE_CLASS_TOTAL,
+ begin_ns, end_ns)
alloc = 0
freed = 0
alloc += tid.allocated_pages
freed += tid.freed_pages
+ result_table.append_row(
+ allocd=mi.Integer(alloc),
+ freed=mi.Integer(freed),
+ )
+
+ return result_table
+
+ def _print_per_tid_result(self, result_table, title):
+ graph = Pyasciigraph()
+ values = []
+
+ for row in result_table.rows:
+ process_do = row.process
+ pages = row.pages.value
+ values.append(('%s (%d)' % (process_do.name, process_do.tid),
+ pages))
+
+ for line in graph.graph(title, values, unit=' pages'):
+ print(line)
+
+ def _print_per_tid_allocd(self, result_table):
+ self._print_per_tid_result(result_table, 'Per-TID Memory Allocations')
+
+ def _print_per_tid_freed(self, result_table):
+ self._print_per_tid_result(result_table, 'Per-TID Memory Deallocations')
+
+ def _print_total(self, result_table):
+ alloc = result_table.rows[0].allocd.value
+ freed = result_table.rows[0].freed.value
print('\nTotal memory usage:\n- %d pages allocated\n- %d pages freed' %
(alloc, freed))
Command._add_proc_filter_args(ap)
-def run():
- memtopcmd = Memtop()
+def _run(mi_mode):
+ memtopcmd = Memtop(mi_mode=mi_mode)
memtopcmd.run()
+
+
+# entry point (human)
+def run():
+ _run(mi_mode=False)
+
+
+# entry point (MI)
+def run_mi():
+ _run(mi_mode=True)
--- /dev/null
+#!/usr/bin/env python3
+#
+# The MIT License (MIT)
+#
+# Copyright (C) 2015 - Philippe Proulx <pproulx@efficios.com>
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+from collections import namedtuple
+
+
+class Tags:
+ CPU = 'cpu'
+ MEMORY = 'memory'
+ INTERRUPT = 'interrupt'
+ SYSCALL = 'syscall'
+ IO = 'io'
+ TOP = 'top'
+ STATS = 'stats'
+ FREQ = 'freq'
+ LOG = 'log'
+
+
+class ColumnDescription:
+ def __init__(self, key, title, do_class, unit=None):
+ self._key = key
+ self._title = title
+ self._do_class = do_class
+ self._unit = unit
+
+ @property
+ def key(self):
+ return self._key
+
+ def to_native_object(self):
+ obj = {
+ 'title': self._title,
+ 'class': self._do_class,
+ }
+
+ if self._unit:
+ obj['unit'] = self._unit
+
+ return obj
+
+
+class TableClass:
+ def __init__(self, name, title, column_descriptions_tuples=[],
+ inherit=None):
+ self._inherit = inherit
+ self._name = name
+ self._title = title
+ self._column_descriptions = []
+
+ for column_descr_tuple in column_descriptions_tuples:
+ key = column_descr_tuple[0]
+ title = column_descr_tuple[1]
+ do_type = column_descr_tuple[2]
+ unit = None
+
+ if len(column_descr_tuple) > 3:
+ unit = column_descr_tuple[3]
+
+ column_descr = ColumnDescription(key, title, do_type.CLASS, unit)
+ self._column_descriptions.append(column_descr)
+
+ @property
+ def name(self):
+ return self._name
+
+ @property
+ def title(self):
+ return self._title
+
+ def to_native_object(self):
+ obj = {}
+ column_descrs = self._column_descriptions
+ native_column_descrs = [c.to_native_object() for c in column_descrs]
+
+ if self._inherit is not None:
+ obj['inherit'] = self._inherit
+
+ if self._title is not None:
+ obj['title'] = self._title
+
+ if native_column_descrs:
+ obj['column-descriptions'] = native_column_descrs
+
+ return obj
+
+ def get_column_named_tuple(self):
+ keys = [cd.key for cd in self._column_descriptions]
+
+ return namedtuple('Column', keys)
+
+
+class ResultTable:
+ def __init__(self, table_class, begin, end, subtitle=None):
+ self._table_class = table_class
+ self._column_named_tuple = table_class.get_column_named_tuple()
+ self._subtitle = subtitle
+ self._timerange = TimeRange(begin, end)
+ self._rows = []
+
+ @property
+ def table_class(self):
+ return self._table_class
+
+ @property
+ def timerange(self):
+ return self._timerange
+
+ @property
+ def title(self):
+ return self._table_class.title
+
+ @property
+ def subtitle(self):
+ return self._subtitle
+
+ def append_row(self, **kwargs):
+ row = self._column_named_tuple(**kwargs)
+ self._rows.append(row)
+
+ def append_row_tuple(self, row_tuple):
+ self._rows.append(row_tuple)
+
+ @property
+ def rows(self):
+ return self._rows
+
+ def to_native_object(self):
+ obj = {
+ 'class': self._table_class.name,
+ 'time-range': self._timerange.to_native_object(),
+ }
+ row_objs = []
+
+ if self._table_class.name:
+ if self._subtitle is not None:
+ full_title = '{} [{}]'.format(self.title, self._subtitle)
+ table_class = TableClass(None, full_title,
+ inherit=self._table_class.name)
+ self._table_class = table_class
+
+ if self._table_class.name is None:
+ obj['class'] = self._table_class.to_native_object()
+
+ for row in self._rows:
+ row_obj = []
+
+ for cell in row:
+ row_obj.append(cell.to_native_object())
+
+ row_objs.append(row_obj)
+
+ obj['data'] = row_objs
+
+ return obj
+
+
+class _DataObject:
+ def to_native_object(self):
+ raise NotImplementedError
+
+ def __eq__(self, other):
+ # ensure we're comparing the same type first
+ if not isinstance(other, self.__class__):
+ return False
+
+ # call specific equality method
+ return self._eq(other)
+
+ def _eq(self, other):
+ raise NotImplementedError
+
+
+class _UnstructuredDataObject(_DataObject):
+ def __init__(self, value):
+ self._value = value
+
+ @property
+ def value(self):
+ return self._value
+
+ def to_native_object(self):
+ return self._value
+
+ def __str__(self):
+ return str(self._value)
+
+ def _eq(self, other):
+ return self._value == other._value
+
+
+class _StructuredDataObject(_DataObject):
+ def to_native_object(self):
+ base = {'class': self.CLASS}
+ base.update(self._to_native_object())
+
+ return base
+
+ def _to_native_object(self):
+ raise NotImplementedError
+
+
+class Boolean(_UnstructuredDataObject):
+ CLASS = 'bool'
+
+
+class Integer(_UnstructuredDataObject):
+ CLASS = 'int'
+
+
+class Float(_UnstructuredDataObject):
+ CLASS = 'float'
+
+
+class String(_UnstructuredDataObject):
+ CLASS = 'string'
+
+
+class Empty(_DataObject):
+ def to_native_object(self):
+ return None
+
+ def _eq(self, other):
+ return True
+
+
+class Unknown(_StructuredDataObject):
+ CLASS = 'unknown'
+
+ def _to_native_object(self):
+ return {}
+
+ def _eq(self, other):
+ return True
+
+ def __str__(self):
+ return '?'
+
+
+class _SimpleValue(_StructuredDataObject):
+ def __init__(self, value):
+ self._value = value
+
+ @property
+ def value(self):
+ return self._value
+
+ def _to_native_object(self):
+ return {'value': self._value}
+
+ def __str__(self):
+ return str(self._value)
+
+ def _eq(self, other):
+ return self._value == other._value
+
+
+class _SimpleName(_StructuredDataObject):
+ def __init__(self, name):
+ self._name = name
+
+ @property
+ def name(self):
+ return self._name
+
+ def _to_native_object(self):
+ return {'name': self._name}
+
+ def __str__(self):
+ return self._name
+
+ def _eq(self, other):
+ return self._name == other._name
+
+
+class Ratio(_SimpleValue):
+ CLASS = 'ratio'
+
+ @classmethod
+ def from_percentage(cls, value):
+ return cls(value / 100)
+
+ def to_percentage(self):
+ return self._value * 100
+
+
+class Timestamp(_SimpleValue):
+ CLASS = 'timestamp'
+
+
+class Duration(_SimpleValue):
+ CLASS = 'duration'
+
+ @classmethod
+ def from_ms(cls, ms):
+ return cls(ms * 1000000)
+
+ @classmethod
+ def from_us(cls, us):
+ return cls(us * 1000)
+
+ def to_ms(self):
+ return self._value / 1000000
+
+ def to_us(self):
+ return self._value / 1000
+
+
+class Size(_SimpleValue):
+ CLASS = 'size'
+
+
+class Bitrate(_SimpleValue):
+ CLASS = 'bitrate'
+
+ @classmethod
+ def from_size_duration(cls, size, duration):
+ return cls(size * 8 / duration)
+
+
+class TimeRange(_StructuredDataObject):
+ CLASS = 'time-range'
+
+ def __init__(self, begin, end):
+ self._begin = begin
+ self._end = end
+
+ @property
+ def begin(self):
+ return self._begin
+
+ @property
+ def end(self):
+ return self._end
+
+ def _to_native_object(self):
+ return {'begin': self._begin, 'end': self._end}
+
+ def _eq(self, other):
+ return (self._begin, self._end) == (other._begin, other._end)
+
+
+class Syscall(_SimpleName):
+ CLASS = 'syscall'
+
+
+class Process(_StructuredDataObject):
+ CLASS = 'process'
+
+ def __init__(self, name=None, pid=None, tid=None):
+ self._name = name
+ self._pid = pid
+ self._tid = tid
+
+ @property
+ def name(self):
+ return self._name
+
+ @property
+ def pid(self):
+ return self._pid
+
+ @property
+ def tid(self):
+ return self._tid
+
+ def _to_native_object(self):
+ ret_dict = {}
+
+ if self._name is not None:
+ ret_dict['name'] = self._name
+
+ if self._pid is not None:
+ ret_dict['pid'] = self._pid
+
+ if self._tid is not None:
+ ret_dict['tid'] = self._tid
+
+ return ret_dict
+
+ def _eq(self, other):
+ self_tuple = (self._name, self._pid, self._tid)
+ other_tuple = (other._name, other._pid, other._tid)
+
+ return self_tuple == other_tuple
+
+
+class Path(_StructuredDataObject):
+ CLASS = 'path'
+
+ def __init__(self, path):
+ self._path = path
+
+ @property
+ def path(self):
+ return self._path
+
+ def _to_native_object(self):
+ return {'path': self._path}
+
+ def _eq(self, other):
+ return self._path == other._path
+
+
+class Fd(_StructuredDataObject):
+ CLASS = 'fd'
+
+ def __init__(self, fd):
+ self._fd = fd
+
+ @property
+ def fd(self):
+ return self._fd
+
+ def _to_native_object(self):
+ return {'fd': self._fd}
+
+ def _eq(self, other):
+ return self._fd == other._fd
+
+
+class Irq(_StructuredDataObject):
+ CLASS = 'irq'
+
+ def __init__(self, is_hard, nr, name=None):
+ self._is_hard = is_hard
+ self._nr = nr
+ self._name = name
+
+ @property
+ def is_hard(self):
+ return self._is_hard
+
+ @property
+ def nr(self):
+ return self._nr
+
+ @property
+ def name(self):
+ return self._name
+
+ def _to_native_object(self):
+ obj = {'hard': self._is_hard, 'nr': self._nr}
+
+ if self._name is not None:
+ obj['name'] = self._name
+
+ return obj
+
+ def _eq(self, other):
+ self_tuple = (self._is_hard, self._nr, self._name)
+ other_tuple = (other._is_hard, other._nr, other._name)
+
+ return self_tuple == other_tuple
+
+
+class Cpu(_StructuredDataObject):
+ CLASS = 'cpu'
+
+ def __init__(self, cpu_id):
+ self._id = cpu_id
+
+ @property
+ def id(self):
+ return self._id
+
+ def _to_native_object(self):
+ return {'id': self._id}
+
+ def _eq(self, other):
+ return self._id == other._id
+
+
+class Disk(_SimpleName):
+ CLASS = 'disk'
+
+
+class Partition(_SimpleName):
+ CLASS = 'part'
+
+
+class NetIf(_SimpleName):
+ CLASS = 'netif'
+
+
+def get_metadata(version, title, description, authors, url, tags,
+ table_classes):
+ t_classes = {t.name: t.to_native_object() for t in table_classes}
+
+ return {
+ 'version': {
+ 'major': version[0],
+ 'minor': version[1],
+ 'patch': version[2],
+ 'extra': version[3]
+ },
+ 'title': title,
+ 'description': description,
+ 'authors': authors,
+ 'url': url,
+ 'tags': tags,
+ 'table-classes': t_classes,
+ }
from .command import Command
from ..core import syscalls
-
+from . import mi
import operator
import statistics
import errno
class SyscallsAnalysis(Command):
_DESC = """The syscallstats command."""
_ANALYSIS_CLASS = syscalls.SyscallsAnalysis
-
- def _print_results(self, begin_ns, end_ns):
- line_format = '{:<38} {:>14} {:>14} {:>14} {:>12} {:>10} {:<14}'
-
- self._print_date(begin_ns, end_ns)
- print('Per-TID syscalls statistics (usec)')
+ _MI_TITLE = 'System call statistics'
+ _MI_DESCRIPTION = 'Per-TID and global system call statistics'
+ _MI_TAGS = [mi.Tags.SYSCALL, mi.Tags.STATS]
+ _MI_TABLE_CLASS_PER_TID_STATS = 'per-tid'
+ _MI_TABLE_CLASS_TOTAL = 'total'
+ _MI_TABLE_CLASS_SUMMARY = 'summary'
+ _MI_TABLE_CLASSES = [
+ (
+ _MI_TABLE_CLASS_PER_TID_STATS,
+ 'System call statistics', [
+ ('syscall', 'System call', mi.Syscall),
+ ('count', 'Call count', mi.Integer, 'calls'),
+ ('min_duration', 'Minimum call duration', mi.Duration),
+ ('avg_duration', 'Average call duration', mi.Duration),
+ ('max_duration', 'Maximum call duration', mi.Duration),
+ ('stdev_duration', 'Call duration standard deviation', mi.Duration),
+ ('return_values', 'Return values count', mi.String),
+ ]
+ ),
+ (
+ _MI_TABLE_CLASS_TOTAL,
+ 'Per-TID system call statistics', [
+ ('process', 'Process', mi.Process),
+ ('count', 'Total system call count', mi.Integer, 'calls'),
+ ]
+ ),
+ (
+ _MI_TABLE_CLASS_SUMMARY,
+ 'System call statistics - summary', [
+ ('time_range', 'Time range', mi.TimeRange),
+ ('process', 'Process', mi.Process),
+ ('count', 'Total system call count', mi.Integer, 'calls'),
+ ]
+ ),
+ ]
+
+ def _analysis_tick(self, begin_ns, end_ns):
+ total_table, per_tid_tables = self._get_result_tables(begin_ns, end_ns)
+
+ if self._mi_mode:
+ self._mi_append_result_tables(per_tid_tables)
+ self._mi_append_result_table(total_table)
+ else:
+ self._print_date(begin_ns, end_ns)
+ self._print_results(total_table, per_tid_tables)
+
+ def _post_analysis(self):
+ if not self._mi_mode:
+ return
+
+ if len(self._mi_get_result_tables(self._MI_TABLE_CLASS_TOTAL)) > 1:
+ self._create_summary_result_table()
+
+ self._mi_print()
+
+ def _create_summary_result_table(self):
+ total_tables = self._mi_get_result_tables(self._MI_TABLE_CLASS_TOTAL)
+ begin = total_tables[0].timerange.begin
+ end = total_tables[-1].timerange.end
+ summary_table = \
+ self._mi_create_result_table(self._MI_TABLE_CLASS_SUMMARY,
+ begin, end)
+
+ for total_table in total_tables:
+ for row in total_table.rows:
+ process = row.process
+ count = row.count
+ summary_table.append_row(
+ time_range=total_table.timerange,
+ process=process,
+ count=count,
+ )
+
+ self._mi_clear_result_tables()
+ self._mi_append_result_table(summary_table)
+
+ def _get_result_tables(self, begin_ns, end_ns):
+ per_tid_tables = []
+ total_table = self._mi_create_result_table(self._MI_TABLE_CLASS_TOTAL,
+ begin_ns, end_ns)
for proc_stats in sorted(self._analysis.tids.values(),
key=operator.attrgetter('total_syscalls'),
continue
pid = proc_stats.pid
+
if proc_stats.pid is None:
pid = '?'
- print(line_format.format(
- '%s (%s, tid = %d)' % (proc_stats.comm, pid, proc_stats.tid),
- 'Count', 'Min', 'Average', 'Max', 'Stdev', 'Return values'))
+ subtitle = '%s (%s, TID: %d)' % (proc_stats.comm, pid,
+ proc_stats.tid)
+ result_table = \
+ self._mi_create_result_table(self._MI_TABLE_CLASS_PER_TID_STATS,
+ begin_ns, end_ns, subtitle)
for syscall in sorted(proc_stats.syscalls.values(),
key=operator.attrgetter('count'),
return_count[return_key] += 1
- min_duration = round(syscall.min_duration / 1000, 3)
- max_duration = round(syscall.max_duration / 1000, 3)
- avg_duration = round(
- syscall.total_duration / syscall.count / 1000, 3)
-
if len(durations) > 2:
- stdev = round(statistics.stdev(durations) / 1000, 3)
+ stdev = mi.Duration(statistics.stdev(durations))
else:
+ stdev = mi.Unknown()
+
+ result_table.append_row(
+ syscall=mi.Syscall(syscall.name),
+ count=mi.Integer(syscall.count),
+ min_duration=mi.Duration(syscall.min_duration),
+ avg_duration=mi.Duration(syscall.total_duration / syscall.count),
+ max_duration=mi.Duration(syscall.max_duration),
+ stdev_duration=stdev,
+ return_values=mi.String(str(return_count)),
+ )
+
+ per_tid_tables.append(result_table)
+ total_table.append_row(
+ process=mi.Process(proc_stats.comm, pid=proc_stats.pid,
+ tid=proc_stats.tid),
+ count=mi.Integer(proc_stats.total_syscalls),
+ )
+
+ return total_table, per_tid_tables
+
+ def _print_results(self, total_table, per_tid_tables):
+ line_format = '{:<38} {:>14} {:>14} {:>14} {:>12} {:>10} {:<14}'
+
+ print('Per-TID syscalls statistics (usec)')
+ total_calls = 0
+
+ for total_row, table in zip(total_table.rows, per_tid_tables):
+ print(line_format.format(table.subtitle,
+ 'Count', 'Min', 'Average', 'Max',
+ 'Stdev', 'Return values'))
+ for row in table.rows:
+ syscall_name = row.syscall.name
+ syscall_count = row.count.value
+ min_duration = round(row.min_duration.to_us(), 3)
+ avg_duration = round(row.avg_duration.to_us(), 3)
+ max_duration = round(row.max_duration.to_us(), 3)
+
+ if type(row.stdev_duration) is mi.Unknown:
stdev = '?'
+ else:
+ stdev = round(row.stdev_duration.to_us(), 3)
- name = syscall.name
+ proc_total_calls = total_row.count.value
print(line_format.format(
- ' - ' + name, syscall.count, min_duration, avg_duration,
- max_duration, stdev, str(return_count)))
+ ' - ' + syscall_name, syscall_count, min_duration,
+ avg_duration, max_duration, stdev,
+ row.return_values.value))
- print(line_format.format('Total:', proc_stats.total_syscalls,
+ print(line_format.format('Total:', proc_total_calls,
'', '', '', '', ''))
print('-' * 113)
+ total_calls += proc_total_calls
- print('\nTotal syscalls: %d' % (self._analysis.total_syscalls))
+ print('\nTotal syscalls: %d' % (total_calls))
def _add_arguments(self, ap):
Command._add_proc_filter_args(ap)
-def run():
- syscallscmd = SyscallsAnalysis()
+def _run(mi_mode):
+ syscallscmd = SyscallsAnalysis(mi_mode=mi_mode)
syscallscmd.run()
+
+
+# entry point (human)
+def run():
+ _run(mi_mode=False)
+
+
+# entry point (MI)
+def run_mi():
+ _run(mi_mode=True)
class Analysis:
+ TICK_CB = 'tick'
+
def __init__(self, state, conf):
self._state = state
self._conf = conf
def _end_period(self):
self._end_period_cb()
- self._send_notification_cb('output_results',
+ self._send_notification_cb(Analysis.TICK_CB,
begin_ns=self._period_start_ts,
end_ns=self._last_event_ts)
self.reset()