TMP: add MI test script
authorPhilippe Proulx <eeppeliteloop@gmail.com>
Fri, 6 Nov 2015 20:57:02 +0000 (15:57 -0500)
committerAntoine Busque <abusque@efficios.com>
Mon, 9 Nov 2015 00:28:14 +0000 (19:28 -0500)
lttnganalyses/cli/mitest.py [new file with mode: 0644]
mitest.py [new file with mode: 0644]

diff --git a/lttnganalyses/cli/mitest.py b/lttnganalyses/cli/mitest.py
new file mode 100644 (file)
index 0000000..95e5448
--- /dev/null
@@ -0,0 +1,228 @@
+# The MIT License (MIT)
+#
+# Copyright (C) 2015 - Julien Desfossez <jdesfossez@efficios.com>
+#               2015 - Antoine Busque <abusque@efficios.com>
+#               2015 - Philippe Proulx <pproulx@efficios.com>
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+from .command import Command
+from ..core import cputop
+from ..ascii_graph import Pyasciigraph
+import operator
+from . import mi
+
+
+class Cputop(Command):
+    _DESC = """The cputop command."""
+    _ANALYSIS_CLASS = cputop.Cputop
+    _MI_TITLE = 'Top CPU usage'
+    _MI_DESCRIPTION = 'Per-TID, per-CPU, and total top CPU usage'
+    _MI_TAGS = [mi.Tags.CPU, mi.Tags.TOP]
+    _MI_TABLE_CLASS_PER_PROC = 'per-process'
+    _MI_TABLE_CLASS_PER_CPU = 'per-cpu'
+    _MI_TABLE_CLASS_TOTAL = 'total'
+    _MI_TABLE_CLASS_SUMMARY = 'summary'
+    _MI_TABLE_CLASSES = [
+        (
+            _MI_TABLE_CLASS_PER_PROC,
+            'Per-TID top CPU usage', [
+                ('process', 'Process', mi.Process),
+                ('migrations', 'Migration count', mi.Integer, 'migrations'),
+                ('usage', 'CPU usage', mi.Ratio),
+            ]
+        ),
+        (
+            _MI_TABLE_CLASS_PER_CPU,
+            'Per-CPU top CPU usage', [
+                ('cpu', 'CPU', mi.Cpu),
+                ('usage', 'CPU usage', mi.Ratio),
+            ]),
+        (
+            _MI_TABLE_CLASS_TOTAL,
+            'Total CPU usage', [
+                ('usage', 'CPU usage', mi.Ratio),
+            ]
+        ),
+        (
+            _MI_TABLE_CLASS_SUMMARY,
+            'CPU usage - summary', [
+                ('time_range', 'Time range', mi.TimeRange),
+                ('usage', 'Total CPU usage', mi.Ratio),
+            ]
+        ),
+    ]
+
+    def _filter_process(self, proc):
+        # Exclude swapper
+        if proc.tid == 0:
+            return False
+
+        if self._args.proc_list and proc.comm not in self._args.proc_list:
+            return False
+
+        return True
+
+    def _analysis_tick(self, begin_ns, end_ns):
+        per_tid_table = self._get_per_tid_usage_result_table(begin_ns, end_ns)
+        per_cpu_table = self._get_per_cpu_usage_result_table(begin_ns, end_ns)
+        total_table = self._get_total_usage_result_table(begin_ns, end_ns)
+
+        if self._mi_mode:
+            self._mi_append_result_table(per_tid_table)
+            self._mi_append_result_table(per_cpu_table)
+            self._mi_append_result_table(total_table)
+        else:
+            self._print_date(begin_ns, end_ns)
+            self._print_per_tid_usage(per_tid_table)
+            self._print_per_cpu_usage(per_cpu_table)
+
+            if total_table:
+                self._print_total_cpu_usage(total_table)
+
+    def _create_summary_result_tables(self):
+        total_tables = self._mi_get_result_tables(self._MI_TABLE_CLASS_TOTAL)
+        begin = total_tables[0].timerange.begin
+        end = total_tables[-1].timerange.end
+        summary_table = \
+            self._mi_create_result_table(self._MI_TABLE_CLASS_SUMMARY,
+                                         begin, end)
+
+        for total_table in total_tables:
+            usage = total_table.rows[0].usage
+            summary_table.append_row(
+                time_range=total_table.timerange,
+                usage=usage,
+            )
+
+        self._mi_clear_result_tables()
+        self._mi_append_result_table(summary_table)
+
+    def _get_per_tid_usage_result_table(self, begin_ns, end_ns):
+        result_table = \
+            self._mi_create_result_table(self._MI_TABLE_CLASS_PER_PROC,
+                                         begin_ns, end_ns)
+        count = 0
+
+        for tid in sorted(self._analysis.tids.values(),
+                          key=operator.attrgetter('usage_percent'),
+                          reverse=True):
+            if not self._filter_process(tid):
+                continue
+
+            result_table.append_row(
+                process=mi.Process(tid.comm, tid=tid.tid),
+                migrations=mi.Integer(tid.migrate_count),
+                usage=mi.Ratio.from_percentage(tid.usage_percent)
+            )
+            count += 1
+
+            if self._args.limit > 0 and count >= self._args.limit:
+                break
+
+        return result_table
+
+    def _get_per_cpu_usage_result_table(self, begin_ns, end_ns):
+        result_table = \
+            self._mi_create_result_table(self._MI_TABLE_CLASS_PER_CPU,
+                                         begin_ns, end_ns)
+
+        for cpu in sorted(self._analysis.cpus.values(),
+                          key=operator.attrgetter('usage_percent'),
+                          reverse=True):
+            result_table.append_row(
+                cpu=mi.Cpu(cpu.cpu_id),
+                usage=mi.Ratio.from_percentage(cpu.usage_percent)
+            )
+
+        return result_table
+
+    def _get_total_usage_result_table(self, begin_ns, end_ns):
+        result_table = \
+            self._mi_create_result_table(self._MI_TABLE_CLASS_TOTAL,
+                                         begin_ns, end_ns)
+
+        cpu_count = len(self.state.cpus)
+        usage_percent = 0
+
+        if not cpu_count:
+            return
+
+        for cpu in sorted(self._analysis.cpus.values(),
+                          key=operator.attrgetter('usage_percent'),
+                          reverse=True):
+            usage_percent += cpu.usage_percent
+
+        # average per CPU
+        usage_percent /= cpu_count
+        result_table.append_row(
+            usage=mi.Ratio.from_percentage(usage_percent),
+        )
+
+        return result_table
+
+    def _print_per_tid_usage(self, result_table):
+        graph = Pyasciigraph()
+        values = []
+
+        for row in result_table.rows:
+            process_do = row.process
+            migration_count = row.migrations.value
+            output_str = '%s (%d)' % (process_do.name, process_do.tid)
+
+            if migration_count > 0:
+                output_str += ', %d migrations' % (migration_count)
+
+            values.append((output_str, row.usage.to_percentage()))
+
+        for line in graph.graph('Per-TID CPU Usage', values, unit=' %'):
+            print(line)
+
+    def _print_per_cpu_usage(self, result_table):
+        graph = Pyasciigraph()
+        values = []
+
+        for row in result_table.rows:
+            cpu = row.cpu
+            values.append(('CPU %d' % cpu.id, row.usage.to_percentage()))
+
+        for line in graph.graph('Per-CPU Usage', values, unit=' %'):
+            print(line)
+
+    def _print_total_cpu_usage(self, result_table):
+        usage_percent = result_table.rows[0].usage.to_percentage()
+        print('\nTotal CPU Usage: %0.02f%%\n' % usage_percent)
+
+    def _add_arguments(self, ap):
+        Command._add_proc_filter_args(ap)
+
+
+def _run(mi_mode):
+    cputopcmd = Cputop(mi_mode=mi_mode)
+    cputopcmd.run()
+
+
+# entry point (human)
+def run():
+    _run(mi_mode=False)
+
+
+# entry point (MI)
+def run_mi():
+    _run(mi_mode=True)
diff --git a/mitest.py b/mitest.py
new file mode 100644 (file)
index 0000000..a9c7eb3
--- /dev/null
+++ b/mitest.py
@@ -0,0 +1,267 @@
+#!/usr/bin/python3
+
+import json
+import string
+import random
+import argparse
+from lttnganalyses.cli import mi
+
+
+_TABLE_CLASS_PER_PROC = 'per-proc'
+_TABLE_CLASS_PER_SYSCALL = 'per-syscall'
+_TABLE_CLASS_PER_IRQ = 'per-irq'
+_TABLE_CLASSES = {
+    _TABLE_CLASS_PER_PROC: mi.TableClass(
+        _TABLE_CLASS_PER_PROC,
+        'Per-process stuff', [
+            ('proc', 'Process', mi.Process),
+            ('count', 'Count', mi.Integer, 'things'),
+            ('flag', 'Flag', mi.Boolean),
+            ('value', 'Value', mi.Float, 'thou'),
+            ('name', 'Name', mi.String),
+            ('ratio', 'Ratio', mi.Ratio),
+            ('ts', 'Timestamp', mi.Timestamp),
+        ]
+    ),
+    _TABLE_CLASS_PER_SYSCALL: mi.TableClass(
+        _TABLE_CLASS_PER_SYSCALL,
+        'Per-syscall stuff', [
+            ('syscall', 'System call', mi.Syscall),
+            ('duration', 'Duration', mi.Duration),
+            ('size', 'Size', mi.Size),
+            ('bitrate', 'Bitrate', mi.Bitrate),
+            ('time_range', 'Time range', mi.TimeRange),
+        ]
+    ),
+    _TABLE_CLASS_PER_IRQ: mi.TableClass(
+        _TABLE_CLASS_PER_IRQ,
+        'Per-interrupt stuff', [
+            ('interrupt', 'Interrupt', mi.Irq),
+            ('fd', 'File descriptor', mi.Fd),
+            ('path', 'File path', mi.Path),
+            ('cpu', 'CPU', mi.Cpu),
+            ('disk', 'Disk', mi.Disk),
+            ('part', 'Partition', mi.Partition),
+            ('netif', 'Network interface', mi.NetIf),
+        ]
+    )
+}
+
+
+def _print_metadata():
+    infos = mi.get_metadata(version=[1, 2, 3, 'dev'], title='LAMI test',
+                            description='LTTng analyses machine interface test',
+                            authors=['Phil Proulx'], url='http://perdu.com',
+                            tags=['lami', 'test'],
+                            table_classes=_TABLE_CLASSES.values())
+    print(json.dumps(infos))
+
+
+def _parse_args():
+    ap = argparse.ArgumentParser()
+    ap.add_argument('--metadata', action='store_true')
+    ap.add_argument('--begin', type=int, default=1000)
+    ap.add_argument('--end', type=int, default=2000)
+    ap.add_argument('-d', '--dynamic', action='store_true')
+    ap.add_argument('-r', '--dynamic-rows', type=int, default=25)
+    ap.add_argument('-c', '--dynamic-columns', type=int, default=10)
+
+    return ap.parse_args()
+
+
+def _print_tables(tables):
+    obj = {
+        'results': [t.to_native_object() for t in tables],
+    }
+
+    print(json.dumps(obj))
+
+
+def _print_dynamic_table(begin, end, rows, columns):
+    def gen_irq_name(size=6, chars=string.ascii_uppercase + string.digits):
+        return ''.join(random.choice(chars) for _ in range(size))
+
+    column_tuples = [
+        ('irq', 'Interrupt', mi.Irq),
+    ]
+
+    for i in range(columns):
+        column_tuples.append((
+            'count{}'.format(i),
+            'Count ({} to {})'.format(i * 5, (i + 1) * 5),
+            mi.Integer,
+            'interrupts'
+        ))
+
+    table_class = mi.TableClass(None, 'What a dynamic table!', column_tuples)
+    result_table = mi.ResultTable(table_class, begin, end)
+
+    for i in range(rows):
+        row_tuple = [
+            mi.Irq(bool(random.getrandbits(1)), i, gen_irq_name())
+        ]
+
+        for j in range(columns):
+            row_tuple.append(mi.Integer(random.randint(0, 5000)))
+
+        result_table.append_row_tuple(tuple(row_tuple))
+
+    _print_tables([result_table])
+
+
+def _print_static_tables(begin, end):
+    per_proc_table = mi.ResultTable(_TABLE_CLASSES[_TABLE_CLASS_PER_PROC], begin, end)
+    per_syscall_table = mi.ResultTable(_TABLE_CLASSES[_TABLE_CLASS_PER_SYSCALL], begin, end)
+    per_irq_table = mi.ResultTable(_TABLE_CLASSES[_TABLE_CLASS_PER_IRQ], begin, end)
+    per_irq_table_sub = mi.ResultTable(_TABLE_CLASSES[_TABLE_CLASS_PER_IRQ], begin, end,
+                                       'with overridden title')
+
+    # per-process
+    per_proc_table.append_row_tuple((
+        mi.Process('zsh', pid=23),
+        mi.Integer(23),
+        mi.Boolean(False),
+        mi.Float(17.2832),
+        mi.String('typical'),
+        mi.Ratio(0.154),
+        mi.Timestamp(817232),
+    ))
+    per_proc_table.append_row_tuple((
+        mi.Process('chromium', tid=4987),
+        mi.Integer(19),
+        mi.Boolean(False),
+        mi.Float(-19457.15),
+        mi.String('beam'),
+        mi.Ratio(0.001),
+        mi.Timestamp(1194875),
+    ))
+    per_proc_table.append_row_tuple((
+        mi.Process('terminator'),
+        mi.Integer(-145),
+        mi.Unknown(),
+        mi.Float(22.22),
+        mi.String('dry'),
+        mi.Ratio(0.94),
+        mi.Timestamp(984987658),
+    ))
+    per_proc_table.append_row_tuple((
+        mi.Process(pid=1945, tid=4497),
+        mi.Integer(31416),
+        mi.Boolean(True),
+        mi.Float(17.34),
+        mi.Empty(),
+        mi.Ratio(1.5),
+        mi.Timestamp(154484512),
+    ))
+
+    # per-syscall
+    per_syscall_table.append_row_tuple((
+        mi.Syscall('read'),
+        mi.Duration(2398123),
+        mi.Size(8123982),
+        mi.Bitrate(223232),
+        mi.TimeRange(98233, 1293828),
+    ))
+    per_syscall_table.append_row_tuple((
+        mi.Syscall('write'),
+        mi.Duration(412434),
+        mi.Size(5645),
+        mi.Bitrate(25235343),
+        mi.TimeRange(5454, 2354523),
+    ))
+    per_syscall_table.append_row_tuple((
+        mi.Syscall('sync'),
+        mi.Duration(2312454),
+        mi.Size(23433),
+        mi.Empty(),
+        mi.TimeRange(12, 645634545454),
+    ))
+    per_syscall_table.append_row_tuple((
+        mi.Syscall('fstat'),
+        mi.Unknown(),
+        mi.Size(2343334),
+        mi.Bitrate(5864684),
+        mi.TimeRange(2134, 645634545),
+    ))
+    per_syscall_table.append_row_tuple((
+        mi.Syscall('sync'),
+        mi.Duration(564533),
+        mi.Size(56875),
+        mi.Bitrate(4494494494),
+        mi.Empty(),
+    ))
+
+    # per-interrupt
+    per_irq_table.append_row_tuple((
+        mi.Irq(True, 15, 'keyboard'),
+        mi.Fd(3),
+        mi.Path('/etc/passwd'),
+        mi.Cpu(2),
+        mi.Disk('sda'),
+        mi.Partition('sdb3'),
+        mi.NetIf('eth0'),
+    ))
+    per_irq_table.append_row_tuple((
+        mi.Irq(False, 7, 'soft-timer'),
+        mi.Fd(1),
+        mi.Path('/dev/null'),
+        mi.Unknown(),
+        mi.Disk('hda'),
+        mi.Partition('mmcblk0p2'),
+        mi.NetIf('enp3s25'),
+    ))
+    per_irq_table.append_row_tuple((
+        mi.Irq(True, 34),
+        mi.Empty(),
+        mi.Empty(),
+        mi.Cpu(1),
+        mi.Disk('sdc'),
+        mi.Partition('sdc3'),
+        mi.NetIf('lo'),
+    ))
+
+    # per-interrupt with subtitle
+    per_irq_table_sub.append_row_tuple((
+        mi.Irq(False, 12, 'soft-like-silk'),
+        mi.Fd(10),
+        mi.Path('/home/bob/meowmix.txt'),
+        mi.Cpu(0),
+        mi.Disk('sdb'),
+        mi.Partition('sdb2'),
+        mi.NetIf('eth1'),
+    ))
+    per_irq_table_sub.append_row_tuple((
+        mi.Irq(True, 1, 'mouse2'),
+        mi.Fd(5),
+        mi.Empty(),
+        mi.Cpu(7),
+        mi.Disk('vda'),
+        mi.Partition('vda3'),
+        mi.NetIf('wlp3s0'),
+    ))
+
+    # print
+    _print_tables([
+        per_proc_table,
+        per_syscall_table,
+        per_irq_table,
+        per_irq_table_sub,
+    ])
+
+
+def _mitest():
+    args = _parse_args()
+
+    if args.metadata:
+        _print_metadata()
+        return
+
+    if args.dynamic:
+        _print_dynamic_table(args.begin, args.end,
+                             args.dynamic_rows, args.dynamic_columns)
+    else:
+        _print_static_tables(args.begin, args.end)
+
+
+if __name__ == '__main__':
+    _mitest()
This page took 0.029637 seconds and 5 git commands to generate.