Merge pull request #31 from abusque/staging
authorJulien Desfossez <jdesfossez@users.noreply.github.com>
Fri, 26 Feb 2016 16:34:39 +0000 (11:34 -0500)
committerJulien Desfossez <jdesfossez@users.noreply.github.com>
Fri, 26 Feb 2016 16:34:39 +0000 (11:34 -0500)
v0.4 changeset

39 files changed:
LICENSE [new file with mode: 0644]
README.rst
lttnganalyses/ascii_graph/__init__.py [deleted file]
lttnganalyses/cli/command.py
lttnganalyses/cli/cputop.py
lttnganalyses/cli/io.py
lttnganalyses/cli/irq.py
lttnganalyses/cli/memtop.py
lttnganalyses/cli/mitest.py [deleted file]
lttnganalyses/cli/sched.py
lttnganalyses/cli/termgraph.py [new file with mode: 0644]
lttnganalyses/common/format_utils.py [new file with mode: 0644]
lttnganalyses/core/cputop.py
lttnganalyses/core/io.py
lttnganalyses/core/irq.py
lttnganalyses/core/memtop.py
lttnganalyses/core/sched.py
lttnganalyses/core/stats.py [new file with mode: 0644]
lttnganalyses/core/syscalls.py
lttnganalyses/linuxautomaton/common.py
lttnganalyses/linuxautomaton/irq.py
lttnganalyses/linuxautomaton/sched.py
lttnganalyses/linuxautomaton/sv.py
mit-license.txt [new file with mode: 0644]
mitest.py [deleted file]
parser_generator.py
setup.py
tests/__init__.py [new file with mode: 0644]
tests/analysis_test.py [new file with mode: 0644]
tests/expected/cputop.txt [new file with mode: 0644]
tests/expected/iolatencytop.txt [new file with mode: 0644]
tests/expected/iousagetop.txt [new file with mode: 0644]
tests/expected/irqlog.txt [new file with mode: 0644]
tests/expected/irqstats.txt [new file with mode: 0644]
tests/gen_ctfwriter.py [new file with mode: 0755]
tests/test_cputop.py [new file with mode: 0644]
tests/test_io.py [new file with mode: 0644]
tests/test_irq.py [new file with mode: 0644]
tests/trace_writer.py [new file with mode: 0644]

diff --git a/LICENSE b/LICENSE
new file mode 100644 (file)
index 0000000..fababb8
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,5 @@
+LTTng-Analyses - Licensing
+
+These analyses are released under the MIT license. This license is used to
+allow the use of these analyses in both free and proprietary software. See
+mit-license.txt for details.
index c20e606e65e0766aa23857caf85572cab6ab0fb6..39ffed2f89cafd9916f4daa170fdcef3e82bf692 100644 (file)
@@ -74,7 +74,7 @@ The **latest development version** can be installed directly from GitHub:
 
 .. code-block:: bash
 
-    pip3 install --upgrade https://github.com/lttng/lttng-analyses/tarball/master
+    pip3 install --upgrade git+git://github.com/lttng/lttng-analyses.git
 
 
 ==============
diff --git a/lttnganalyses/ascii_graph/__init__.py b/lttnganalyses/ascii_graph/__init__.py
deleted file mode 100644 (file)
index 167c716..0000000
+++ /dev/null
@@ -1,229 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright (c) 2012 Pierre-Francois Carpentier <carpentier.pf@gmail.com>
-#
-# https://github.com/kakwa/py-ascii-graph/
-#
-# Permission is hereby granted, free of charge, to any person obtaining
-# a copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish,
-# distribute, sublicense, and/or sell copies of the Software, and to
-# permit persons to whom the Software is furnished to do so, subject to
-# the following conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-from __future__ import unicode_literals
-import sys
-import os
-
-
-class Pyasciigraph:
-    def __init__(self, line_length=79, min_graph_length=50,
-                 separator_length=2):
-        """Constructor of Pyasciigraph
-
-        :param int line_length: the max number of char on a line
-                if any line cannot be shorter,
-                it will go over this limit
-        :param int min_graph_length: the min number of char used by the graph
-        :param int separator_length: the length of field separator
-        """
-        self.line_length = line_length
-        self.separator_length = separator_length
-        self.min_graph_length = min_graph_length
-
-    def _u(self, x):
-        if sys.version < '3':
-            import codecs
-            return codecs.unicode_escape_decode(x)[0]
-        else:
-            return x
-
-    def _get_maximum(self, data):
-        all_max = {}
-        all_max['value_max_length'] = 0
-        all_max['info_max_length'] = 0
-        all_max['max_value'] = 0
-
-        for (info, value) in data:
-            if value > all_max['max_value']:
-                all_max['max_value'] = value
-
-            if len(info) > all_max['info_max_length']:
-                all_max['info_max_length'] = len(info)
-
-            if len(str(value)) > all_max['value_max_length']:
-                all_max['value_max_length'] = len(str(value))
-        return all_max
-
-    def _gen_graph_string(self, value, max_value, graph_length, start_value):
-        if max_value == 0:
-            number_of_square = int(value * graph_length)
-        else:
-            number_of_square = int(value * graph_length / max_value)
-        number_of_space = int(start_value - number_of_square)
-        return '█' * number_of_square + self._u(' ') * number_of_space
-
-    def _console_size(self):
-        TERMSIZE = 80
-        return int(os.environ.get('COLUMNS', TERMSIZE)) - 1
-
-    def _gen_info_string(self, info, start_info, line_length, info_before):
-        number_of_space = (line_length - start_info - len(info))
-        if info_before:
-            return self._u(' ') * number_of_space + info
-        else:
-            return info + self._u(' ') * number_of_space
-
-    def _gen_value_string(self, value, start_value, start_info, unit, count):
-        if not count:
-            v = str("%0.02f" % value)
-        else:
-            # we don't want to add .00 to count values (only integers)
-            v = str(value)
-        number_space = start_info -\
-            start_value -\
-            len(v) -\
-            self.separator_length
-
-        return ' ' * number_space +\
-            v + str(unit) +\
-            ' ' * self.separator_length
-
-    def _sanitize_string(self, string):
-        # get the type of a unicode string
-        unicode_type = type(self._u('t'))
-        input_type = type(string)
-        if input_type is str:
-            if sys.version_info.major < 3:  # pragma: no cover
-                info = string
-            else:
-                info = string
-        elif input_type is unicode_type:
-            info = string
-        elif input_type is int or input_type is float:
-            if sys.version_info.major < 3:  # pragma: no cover
-                info = string
-            else:
-                info = str(string)
-        return info
-
-    def _sanitize_data(self, data):
-        ret = []
-        for item in data:
-            ret.append((self._sanitize_string(item[0]), item[1]))
-        return ret
-
-    def graph(self, label, data, sort=0, with_value=True, unit="",
-              info_before=False, count=False):
-        """function generating the graph
-
-        :param string label: the label of the graph
-        :param iterable data: the data (list of tuple (info, value))
-                info must be "castable" to a unicode string
-                value must be an int or a float
-        :param int sort: flag sorted
-                0: not sorted (same order as given) (default)
-                1: increasing order
-                2: decreasing order
-        :param boolean with_value: flag printing value
-                True: print the numeric value (default)
-                False: don't print the numeric value
-        :rtype: a list of strings (each lines)
-
-        """
-        result = []
-        san_data = self._sanitize_data(data)
-        san_label = self._sanitize_string(label)
-
-        if sort == 1:
-            san_data = sorted(san_data, key=lambda value: value[1],
-                              reverse=False)
-        elif sort == 2:
-            san_data = sorted(san_data, key=lambda value: value[1],
-                              reverse=True)
-
-        all_max = self._get_maximum(san_data)
-
-        real_line_length = max(self.line_length, len(label))
-
-        min_line_length = self.min_graph_length +\
-            2 * self.separator_length +\
-            all_max['value_max_length'] +\
-            all_max['info_max_length']
-
-        if min_line_length < real_line_length:
-            # calcul of where to start info
-            start_info = self.line_length -\
-                all_max['info_max_length']
-            # calcul of where to start value
-            start_value = start_info -\
-                self.separator_length -\
-                all_max['value_max_length']
-            # calcul of where to end graph
-            graph_length = start_value -\
-                self.separator_length
-        else:
-            # calcul of where to start value
-            start_value = self.min_graph_length +\
-                self.separator_length
-            # calcul of where to start info
-            start_info = start_value +\
-                all_max['value_max_length'] +\
-                self.separator_length
-            # calcul of where to end graph
-            graph_length = self.min_graph_length
-            # calcul of the real line length
-            real_line_length = min_line_length
-
-        real_line_length = min(real_line_length, self._console_size())
-        result.append(san_label)
-        result.append(self._u('#') * real_line_length)
-
-        for item in san_data:
-            info = item[0]
-            value = item[1]
-
-            graph_string = self._gen_graph_string(
-                value,
-                all_max['max_value'],
-                graph_length,
-                start_value)
-
-            if with_value:
-                value_string = self._gen_value_string(
-                    value,
-                    start_value,
-                    start_info, unit, count)
-            else:
-                value_string = ""
-
-            info_string = self._gen_info_string(
-                info,
-                start_info,
-                real_line_length, info_before)
-            if info_before:
-                new_line = info_string + " " + graph_string + value_string
-            else:
-                new_line = graph_string + value_string + info_string
-            result.append(new_line)
-
-        return result
-
-if __name__ == '__main__':
-    test = [('long_label', 423), ('sl', 1234), ('line3', 531),
-            ('line4', 200), ('line5', 834)]
-    graph = Pyasciigraph()
-    for line in graph.graph('test print', test):
-        print(line)
index 93df78657c181e41d43422ad9ac6673c0d12a3e2..c1fb08363e308909010854d910f261b2c4d93ffb 100644 (file)
@@ -174,6 +174,9 @@ class Command:
 
     def _read_tracer_version(self):
         kernel_path = None
+        # remove the trailing /
+        while self._args.path.endswith('/'):
+            self._args.path = self._args.path[:-1]
         for root, _, _ in os.walk(self._args.path):
             if root.endswith('kernel'):
                 kernel_path = root
@@ -183,15 +186,25 @@ class Command:
             self._gen_error('Could not find kernel trace directory')
 
         try:
-            metadata = subprocess.getoutput(
+            ret, metadata = subprocess.getstatusoutput(
                 'babeltrace -o ctf-metadata "%s"' % kernel_path)
         except subprocess.CalledProcessError:
             self._gen_error('Cannot run babeltrace on the trace, cannot read'
                             ' tracer version')
 
-        major_match = re.search(r'tracer_major = (\d+)', metadata)
-        minor_match = re.search(r'tracer_minor = (\d+)', metadata)
-        patch_match = re.search(r'tracer_patchlevel = (\d+)', metadata)
+        # fallback to reading the text metadata if babeltrace failed to
+        # output the CTF metadata
+        if ret != 0:
+            try:
+                metadata = subprocess.getoutput(
+                    'cat "%s"' % os.path.join(kernel_path, 'metadata'))
+            except subprocess.CalledProcessError:
+                self._gen_error('Cannot read the metadata of the trace, cannot'
+                                'extract tracer version')
+
+        major_match = re.search(r'tracer_major = "*(\d+)"*', metadata)
+        minor_match = re.search(r'tracer_minor = "*(\d+)"*', metadata)
+        patch_match = re.search(r'tracer_patchlevel = "*(\d+)"*', metadata)
 
         if not major_match or not minor_match or not patch_match:
             self._gen_error('Malformed metadata, cannot read tracer version')
@@ -284,18 +297,18 @@ class Command:
         self._analysis_conf.period_begin_ev_name = args.period_begin
         self._analysis_conf.period_end_ev_name = args.period_end
         self._analysis_conf.period_begin_key_fields = \
-                                            args.period_begin_key.split(',')
+            args.period_begin_key.split(',')
 
         if args.period_end_key:
             self._analysis_conf.period_end_key_fields = \
-                                            args.period_end_key.split(',')
+                args.period_end_key.split(',')
         else:
             self._analysis_conf.period_end_key_fields = \
-                                    self._analysis_conf.period_begin_key_fields
+                self._analysis_conf.period_begin_key_fields
 
         if args.period_key_value:
             self._analysis_conf.period_key_value = \
-                                        tuple(args.period_key_value.split(','))
+                tuple(args.period_key_value.split(','))
 
         if args.cpu:
             self._analysis_conf.cpu_list = args.cpu.split(',')
index 17d6f214e29c126111c1feab490f2954b58db29c..ff24f80ed45123b76b7ce61e30bc972989d2f25f 100644 (file)
@@ -25,8 +25,8 @@
 import operator
 from .command import Command
 from ..core import cputop
-from ..ascii_graph import Pyasciigraph
 from . import mi
+from . import termgraph
 
 
 class Cputop(Command):
@@ -45,7 +45,7 @@ class Cputop(Command):
             'Per-TID top CPU usage', [
                 ('process', 'Process', mi.Process),
                 ('migrations', 'Migration count', mi.Integer, 'migrations'),
-                ('priority', 'Priority', mi.Integer),
+                ('prio_list', 'Chronological priorities', mi.String),
                 ('usage', 'CPU usage', mi.Ratio),
             ]
         ),
@@ -114,10 +114,13 @@ class Cputop(Command):
         for tid in sorted(self._analysis.tids.values(),
                           key=operator.attrgetter('usage_percent'),
                           reverse=True):
+            prio_list = str([prio_evt.prio for
+                             prio_evt in tid.prio_list])
+
             result_table.append_row(
                 process=mi.Process(tid.comm, tid=tid.tid),
                 migrations=mi.Integer(tid.migrate_count),
-                priority=mi.Integer(tid.prio),
+                prio_list=mi.String(prio_list),
                 usage=mi.Ratio.from_percentage(tid.usage_percent)
             )
             count += 1
@@ -166,37 +169,37 @@ class Cputop(Command):
         return result_table
 
     def _print_per_tid_usage(self, result_table):
-        graph = Pyasciigraph()
-        values = []
-
-        for row in result_table.rows:
-            process_do = row.process
-            migration_count = row.migrations.value
-            if row.priority.value is not None:
-                prio_str = 'prio: %d' % row.priority.value
-            else:
-                prio_str = 'prio: ?'
-            output_str = '%s (%d) (%s)' % (process_do.name, process_do.tid,
-                                           prio_str)
-
-            if migration_count > 0:
-                output_str += ', %d migrations' % (migration_count)
+        row_format = '{:<25} {:>10} {}'
+        label_header = row_format.format('Process', 'Migrations', 'Priorities')
+
+        def format_label(row):
+            return row_format.format(
+                '%s (%d)' % (row.process.name, row.process.tid),
+                row.migrations.value,
+                row.prio_list.value,
+            )
 
-            values.append((output_str, row.usage.to_percentage()))
+        graph = termgraph.BarGraph(
+            title='Per-TID Usage',
+            unit='%',
+            get_value=lambda row: row.usage.to_percentage(),
+            get_label=format_label,
+            label_header=label_header,
+            data=result_table.rows
+        )
 
-        for line in graph.graph('Per-TID CPU Usage', values, unit=' %'):
-            print(line)
+        graph.print_graph()
 
     def _print_per_cpu_usage(self, result_table):
-        graph = Pyasciigraph()
-        values = []
-
-        for row in result_table.rows:
-            cpu = row.cpu
-            values.append(('CPU %d' % cpu.id, row.usage.to_percentage()))
+        graph = termgraph.BarGraph(
+            title='Per-CPU Usage',
+            unit='%',
+            get_value=lambda row: row.usage.to_percentage(),
+            get_label=lambda row: 'CPU %d' % row.cpu.id,
+            data=result_table.rows
+        )
 
-        for line in graph.graph('Per-CPU Usage', values, unit=' %'):
-            print(line)
+        graph.print_graph()
 
     def _print_total_cpu_usage(self, result_table):
         usage_percent = result_table.rows[0].usage.to_percentage()
@@ -212,11 +215,9 @@ def _run(mi_mode):
     cputopcmd.run()
 
 
-# entry point (human)
 def run():
     _run(mi_mode=False)
 
 
-# entry point (MI)
 def run_mi():
     _run(mi_mode=True)
index fed102021b22c85c6af9928d43621e51115ce391..e3da95a85d20055e379f7286e4b8ae997c82aa47 100644 (file)
@@ -27,10 +27,11 @@ import operator
 import statistics
 import sys
 from . import mi
+from . import termgraph
 from ..core import io
+from ..common import format_utils
 from .command import Command
 from ..linuxautomaton import common
-from ..ascii_graph import Pyasciigraph
 
 
 _UsageTables = collections.namedtuple('_UsageTables', [
@@ -272,10 +273,6 @@ class IoAnalysisCommand(Command):
                     begin > self._args.end)
 
     def _filter_io_request(self, io_rq):
-        if io_rq.tid in self._analysis.tids:
-            proc = self._analysis.tids[io_rq.tid]
-        else:
-            proc = None
         return self._filter_size(io_rq.size) and \
             self._filter_latency(io_rq.duration) and \
             self._filter_time_range(io_rq.begin_ts, io_rq.end_ts)
@@ -289,9 +286,9 @@ class IoAnalysisCommand(Command):
             process=mi.Process(proc_stats.comm, pid=proc_stats.pid,
                                tid=proc_stats.tid),
             size=mi.Size(proc_stats.total_read),
-            disk_size=mi.Size(proc_stats.disk_read),
-            net_size=mi.Size(proc_stats.net_read),
-            unknown_size=mi.Size(proc_stats.unk_read),
+            disk_size=mi.Size(proc_stats.disk_io.read),
+            net_size=mi.Size(proc_stats.net_io.read),
+            unknown_size=mi.Size(proc_stats.unk_io.read),
         )
 
         return True
@@ -301,15 +298,15 @@ class IoAnalysisCommand(Command):
             process=mi.Process(proc_stats.comm, pid=proc_stats.pid,
                                tid=proc_stats.tid),
             size=mi.Size(proc_stats.total_write),
-            disk_size=mi.Size(proc_stats.disk_write),
-            net_size=mi.Size(proc_stats.net_write),
-            unknown_size=mi.Size(proc_stats.unk_write),
+            disk_size=mi.Size(proc_stats.disk_io.write),
+            net_size=mi.Size(proc_stats.net_io.write),
+            unknown_size=mi.Size(proc_stats.unk_io.write),
         )
 
         return True
 
     def _append_per_proc_block_read_usage_row(self, proc_stats, result_table):
-        if proc_stats.block_read == 0:
+        if proc_stats.block_io.read == 0:
             return False
 
         if proc_stats.comm:
@@ -320,13 +317,13 @@ class IoAnalysisCommand(Command):
         result_table.append_row(
             process=mi.Process(proc_name, pid=proc_stats.pid,
                                tid=proc_stats.tid),
-            size=mi.Size(proc_stats.block_read),
+            size=mi.Size(proc_stats.block_io.read),
         )
 
         return True
 
     def _append_per_proc_block_write_usage_row(self, proc_stats, result_table):
-        if proc_stats.block_write == 0:
+        if proc_stats.block_io.write == 0:
             return False
 
         if proc_stats.comm:
@@ -337,7 +334,7 @@ class IoAnalysisCommand(Command):
         result_table.append_row(
             process=mi.Process(proc_name, pid=proc_stats.pid,
                                tid=proc_stats.tid),
-            size=mi.Size(proc_stats.block_write),
+            size=mi.Size(proc_stats.block_io.write),
         )
 
         return True
@@ -402,26 +399,26 @@ class IoAnalysisCommand(Command):
         return fd_by_pid_str
 
     def _append_file_read_usage_row(self, file_stats, result_table):
-        if file_stats.read == 0:
+        if file_stats.io.read == 0:
             return False
 
         fd_owners = self._get_file_stats_fd_owners_str(file_stats)
         result_table.append_row(
             path=mi.Path(file_stats.filename),
-            size=mi.Size(file_stats.read),
+            size=mi.Size(file_stats.io.read),
             fd_owners=mi.String(fd_owners),
         )
 
         return True
 
     def _append_file_write_usage_row(self, file_stats, result_table):
-        if file_stats.write == 0:
+        if file_stats.io.write == 0:
             return False
 
         fd_owners = self._get_file_stats_fd_owners_str(file_stats)
         result_table.append_row(
             path=mi.Path(file_stats.filename),
-            size=mi.Size(file_stats.write),
+            size=mi.Size(file_stats.io.write),
             fd_owners=mi.String(fd_owners),
         )
 
@@ -457,7 +454,7 @@ class IoAnalysisCommand(Command):
 
     def _fill_per_process_block_read_usage_result_table(self, result_table):
         input_list = sorted(self._analysis.tids.values(),
-                            key=operator.attrgetter('block_read'),
+                            key=operator.attrgetter('block_io.read'),
                             reverse=True)
         self._fill_usage_result_table(
             input_list, self._append_per_proc_block_read_usage_row,
@@ -465,7 +462,7 @@ class IoAnalysisCommand(Command):
 
     def _fill_per_process_block_write_usage_result_table(self, result_table):
         input_list = sorted(self._analysis.tids.values(),
-                            key=operator.attrgetter('block_write'),
+                            key=operator.attrgetter('block_io.write'),
                             reverse=True)
         self._fill_usage_result_table(
             input_list, self._append_per_proc_block_write_usage_row,
@@ -511,7 +508,7 @@ class IoAnalysisCommand(Command):
 
     def _fill_file_read_usage_result_table(self, files, result_table):
         input_list = sorted(files.values(),
-                            key=lambda file_stats: file_stats.read,
+                            key=lambda file_stats: file_stats.io.read,
                             reverse=True)
         self._fill_usage_result_table(input_list,
                                       self._append_file_read_usage_row,
@@ -519,7 +516,7 @@ class IoAnalysisCommand(Command):
 
     def _fill_file_write_usage_result_table(self, files, result_table):
         input_list = sorted(files.values(),
-                            key=lambda file_stats: file_stats.write,
+                            key=lambda file_stats: file_stats.io.write,
                             reverse=True)
         self._fill_usage_result_table(input_list,
                                       self._append_file_write_usage_row,
@@ -562,7 +559,7 @@ class IoAnalysisCommand(Command):
                                             per_file_write_table)
         self._fill_per_process_block_read_usage_result_table(
             per_proc_block_read_table)
-        self._fill_per_process_block_read_usage_result_table(
+        self._fill_per_process_block_write_usage_result_table(
             per_proc_block_write_table)
         self._fill_disk_sector_usage_result_table(per_disk_sector_table)
         self._fill_disk_request_usage_result_table(per_disk_request_table)
@@ -584,166 +581,140 @@ class IoAnalysisCommand(Command):
             per_netif_send=per_netif_send_table,
         )
 
-    def _get_per_process_read_write_usage_datum(self, row):
-        if row.process.pid is None:
-            pid_str = 'unknown (tid=%d)' % (row.process.tid)
-        else:
-            pid_str = str(row.process.pid)
+    def _print_per_proc_io(self, result_table, title):
+        header_format = '{:<25} {:<10} {:<10} {:<10}'
+        label_header = header_format.format(
+            'Process', 'Disk', 'Net', 'Unknown'
+        )
 
-        format_str = '{:>10} {:<25} {:>9} file {:>9} net {:>9} unknown'
-        output_str = format_str.format(
-            common.convert_size(row.size.value, padding_after=True),
-            '%s (%s)' % (row.process.name, pid_str),
-            common.convert_size(row.disk_size.value, padding_after=True),
-            common.convert_size(row.net_size.value, padding_after=True),
-            common.convert_size(row.unknown_size.value, padding_after=True))
+        def get_label(row):
+            label_format = '{:<25} {:>10} {:>10} {:>10}'
+            if row.process.pid is None:
+                pid_str = 'unknown (tid=%d)' % (row.process.tid)
+            else:
+                pid_str = str(row.process.pid)
 
-        return (output_str, row.size.value)
+            label = label_format.format(
+                '%s (%s)' % (row.process.name, pid_str),
+                format_utils.format_size(row.disk_size.value),
+                format_utils.format_size(row.net_size.value),
+                format_utils.format_size(row.unknown_size.value)
+            )
 
-    def _get_per_process_block_read_write_usage_datum(self, row):
-        proc_name = row.process.name
+            return label
 
-        if not proc_name:
-            proc_name = 'unknown'
+        graph = termgraph.BarGraph(
+            title='Per-process I/O ' + title,
+            label_header=label_header,
+            get_value=lambda row: row.size.value,
+            get_value_str=format_utils.format_size,
+            get_label=get_label,
+            data=result_table.rows
+        )
 
-        if row.process.pid is None:
-            pid_str = 'unknown (tid=%d)' % (row.process.tid)
-        else:
-            pid_str = str(row.process.pid)
+        graph.print_graph()
 
-        format_str = '{:>10} {:<22}'
-        output_str = format_str.format(
-            common.convert_size(row.size.value, padding_after=True),
-            '%s (pid=%s)' % (proc_name, pid_str))
+    def _print_per_proc_block_io(self, result_table, title):
+        def get_label(row):
+            proc_name = row.process.name
 
-        return (output_str, row.size.value)
+            if not proc_name:
+                proc_name = 'unknown'
 
-    def _get_per_disk_count_usage_datum(self, row):
-        return (row.disk.name, row.count.value)
+            if row.process.pid is None:
+                pid_str = 'unknown (tid={})'.format(row.process.tid)
+            else:
+                pid_str = str(row.process.pid)
 
-    def _get_per_disk_rtps_usage_datum(self, row):
-        avg_latency = row.rtps.value / common.NSEC_PER_MSEC
-        avg_latency = round(avg_latency, 3)
+            return '{} (pid={})'.format(proc_name, pid_str)
 
-        return (row.disk.name, avg_latency)
+        graph = termgraph.BarGraph(
+            title='Block I/O ' + title,
+            label_header='Process',
+            get_value=lambda row: row.size.value,
+            get_value_str=format_utils.format_size,
+            get_label=get_label,
+            data=result_table.rows
+        )
 
-    def _get_per_netif_recv_send_usage_datum(self, row):
-        return ('%s %s' %
-                (common.convert_size(row.size.value), row.netif.name),
-                row.size.value)
+        graph.print_graph()
 
-    def _get_per_file_read_write_usage_datum(self, row):
-        format_str = '{:>10} {} {}'
-        output_str = format_str.format(
-            common.convert_size(row.size.value, padding_after=True),
-            row.path.path, row.fd_owners.value)
+    def _print_per_disk_sector(self, result_table):
+        graph = termgraph.BarGraph(
+            title='Disk Requests Sector Count',
+            label_header='Disk',
+            unit='sectors',
+            get_value=lambda row: row.count.value,
+            get_label=lambda row: row.disk.name,
+            data=result_table.rows
+        )
 
-        return (output_str, row.size.value)
+        graph.print_graph()
 
-    def _print_usage_ascii_graph(self, result_table, get_datum_cb, graph_label,
-                                 graph_args=None):
-        graph = Pyasciigraph()
-        data = []
+    def _print_per_disk_request(self, result_table):
+        graph = termgraph.BarGraph(
+            title='Disk Request Count',
+            label_header='Disk',
+            unit='requests',
+            get_value=lambda row: row.count.value,
+            get_label=lambda row: row.disk.name,
+            data=result_table.rows
+        )
 
-        if graph_args is None:
-            graph_args = {}
+        graph.print_graph()
 
-        for row in result_table.rows:
-            datum = get_datum_cb(row)
-            data.append(datum)
-
-        for line in graph.graph(graph_label, data, **graph_args):
-            print(line)
-
-    def _print_per_process_read(self, result_table):
-        label = 'Per-process I/O Read'
-        graph_args = {'with_value': False}
-        self._print_usage_ascii_graph(
-            result_table, self._get_per_process_read_write_usage_datum,
-            label, graph_args)
-
-    def _print_per_process_write(self, result_table):
-        label = 'Per-process I/O Write'
-        graph_args = {'with_value': False}
-        self._print_usage_ascii_graph(
-            result_table, self._get_per_process_read_write_usage_datum,
-            label, graph_args)
-
-    def _print_per_process_block_read(self, result_table):
-        label = 'Block I/O Read'
-        graph_args = {'with_value': False}
-        self._print_usage_ascii_graph(
-            result_table, self._get_per_process_block_read_write_usage_datum,
-            label, graph_args)
-
-    def _print_per_process_block_write(self, result_table):
-        label = 'Block I/O Write'
-        graph_args = {'with_value': False}
-        self._print_usage_ascii_graph(
-            result_table, self._get_per_process_block_read_write_usage_datum,
-            label, graph_args)
+    def _print_per_disk_rtps(self, result_table):
+        graph = termgraph.BarGraph(
+            title='Disk Request Average Latency',
+            label_header='Disk',
+            unit='ms',
+            get_value=lambda row: row.rtps.value / common.NSEC_PER_MSEC,
+            get_label=lambda row: row.disk.name,
+            data=result_table.rows
+        )
 
-    def _print_per_disk_sector(self, result_table):
-        label = 'Disk requests sector count'
-        graph_args = {'unit': ' sectors'}
-        self._print_usage_ascii_graph(result_table,
-                                      self._get_per_disk_count_usage_datum,
-                                      label, graph_args)
+        graph.print_graph()
 
-    def _print_per_disk_request(self, result_table):
-        label = 'Disk request count'
-        graph_args = {'unit': ' requests'}
-        self._print_usage_ascii_graph(result_table,
-                                      self._get_per_disk_count_usage_datum,
-                                      label, graph_args)
+    def _print_per_netif_io(self, result_table, title):
+        graph = termgraph.BarGraph(
+            title='Network ' + title + ' Bytes',
+            label_header='Interface',
+            get_value=lambda row: row.size.value,
+            get_value_str=format_utils.format_size,
+            get_label=lambda row: row.netif.name,
+            data=result_table.rows
+        )
 
-    def _print_per_disk_rtps(self, result_table):
-        label = 'Disk request average latency'
-        graph_args = {'unit': ' ms', 'sort': 2}
-        self._print_usage_ascii_graph(result_table,
-                                      self._get_per_disk_rtps_usage_datum,
-                                      label, graph_args)
-
-    def _print_per_netif_recv(self, result_table):
-        label = 'Network received bytes'
-        graph_args = {'with_value': False}
-        self._print_usage_ascii_graph(
-            result_table, self._get_per_netif_recv_send_usage_datum,
-            label, graph_args)
-
-    def _print_per_netif_send(self, result_table):
-        label = 'Network sent bytes'
-        graph_args = {'with_value': False}
-        self._print_usage_ascii_graph(
-            result_table, self._get_per_netif_recv_send_usage_datum,
-            label, graph_args)
-
-    def _print_per_file_read(self, result_table):
-        label = 'Files read'
-        graph_args = {'with_value': False, 'sort': 2}
-        self._print_usage_ascii_graph(
-            result_table, self._get_per_file_read_write_usage_datum,
-            label, graph_args)
-
-    def _print_per_file_write(self, result_table):
-        label = 'Files write'
-        graph_args = {'with_value': False, 'sort': 2}
-        self._print_usage_ascii_graph(
-            result_table, self._get_per_file_read_write_usage_datum,
-            label, graph_args)
+        graph.print_graph()
+
+    def _print_per_file_io(self, result_table, title):
+        # FIXME add option to show FD owners
+        # FIXME why are read and write values the same?
+        graph = termgraph.BarGraph(
+            title='Per-file I/O ' + title,
+            label_header='Path',
+            get_value=lambda row: row.size.value,
+            get_value_str=format_utils.format_size,
+            get_label=lambda row: row.path.path,
+            data=result_table.rows
+        )
+
+        graph.print_graph()
 
     def _print_usage(self, usage_tables):
-        self._print_per_process_read(usage_tables.per_proc_read)
-        self._print_per_process_write(usage_tables.per_proc_write)
-        self._print_per_file_read(usage_tables.per_file_read)
-        self._print_per_file_write(usage_tables.per_file_write)
-        self._print_per_process_block_read(usage_tables.per_proc_block_read)
-        self._print_per_process_block_write(usage_tables.per_proc_block_write)
+        self._print_per_proc_io(usage_tables.per_proc_read, 'Read')
+        self._print_per_proc_io(usage_tables.per_proc_write, 'Write')
+        self._print_per_file_io(usage_tables.per_file_read, 'Read')
+        self._print_per_file_io(usage_tables.per_file_write, 'Write')
+        self._print_per_proc_block_io(usage_tables.per_proc_block_read, 'Read')
+        self._print_per_proc_block_io(
+            usage_tables.per_proc_block_write, 'Write'
+        )
         self._print_per_disk_sector(usage_tables.per_disk_sector)
         self._print_per_disk_request(usage_tables.per_disk_request)
         self._print_per_disk_rtps(usage_tables.per_disk_rtps)
-        self._print_per_netif_recv(usage_tables.per_netif_recv)
-        self._print_per_netif_send(usage_tables.per_netif_send)
+        self._print_per_netif_io(usage_tables.per_netif_recv, 'Received')
+        self._print_per_netif_io(usage_tables.per_netif_send, 'Sent')
 
     def _fill_freq_result_table(self, duration_list, result_table):
         if not duration_list:
@@ -837,29 +808,15 @@ class IoAnalysisCommand(Command):
         return syscall_tables + disk_tables
 
     def _print_one_freq(self, result_table):
-        if not result_table.rows:
-            return
-
-        graph = Pyasciigraph()
-        graph_data = []
-
-        for row in result_table.rows:
-            graph_data.append(('%0.03f' % row.latency_lower.to_us(),
-                               row.count.value))
-
-        title = '{} {} (usec)'.format(result_table.title,
-                                      result_table.subtitle)
-        graph_lines = graph.graph(
-            title,
-            graph_data,
-            info_before=True,
-            count=True
+        graph = termgraph.FreqGraph(
+            data=result_table.rows,
+            get_value=lambda row: row.count.value,
+            get_lower_bound=lambda row: row.latency_lower.to_us(),
+            title='{} {}'.format(result_table.title, result_table.subtitle),
+            unit='µs'
         )
 
-        for line in graph_lines:
-            print(line)
-
-        print()
+        graph.print_graph()
 
     def _print_freq(self, freq_tables):
         for freq_table in freq_tables:
@@ -963,7 +920,7 @@ class IoAnalysisCommand(Command):
         if type(row.size) is mi.Empty:
             size = 'N/A'
         else:
-            size = common.convert_size(row.size.value)
+            size = format_utils.format_size(row.size.value)
 
         tid = row.process.tid
         proc_name = row.process.name
index af1dca92a521e8e4d9c8dd39f1fd11a497692d11..facbb2b8ebd5b767175a2b3b3b39e90b24ccda61 100644 (file)
@@ -27,9 +27,9 @@ import math
 import statistics
 import sys
 from . import mi
+from . import termgraph
 from .command import Command
 from ..core import irq as core_irq
-from ..ascii_graph import Pyasciigraph
 from ..linuxautomaton import common, sv
 
 
@@ -487,28 +487,17 @@ class IrqAnalysisCommand(Command):
         return statistics.stdev(raise_latencies)
 
     def _print_frequency_distribution(self, freq_table):
-        graph = Pyasciigraph()
-        graph_data = []
-
-        for row in freq_table.rows:
-            # The graph data format is a tuple (info, value). Here info
-            # is the lower bound of the bucket, value the bucket's count
-            lower_bound_us = row.duration_lower.to_us()
-            count = row.count.value
-
-            graph_data.append(('%0.03f' % lower_bound_us, count))
-
-        title_fmt = 'Handler duration frequency distribution {} (usec)'
-
-        graph_lines = graph.graph(
-            title_fmt.format(freq_table.subtitle),
-            graph_data,
-            info_before=True,
-            count=True
+        title_fmt = 'Handler duration frequency distribution {}'
+
+        graph = termgraph.FreqGraph(
+            data=freq_table.rows,
+            get_value=lambda row: row.count.value,
+            get_lower_bound=lambda row: row.duration_lower.to_us(),
+            title=title_fmt.format(freq_table.subtitle),
+            unit='µs'
         )
 
-        for line in graph_lines:
-            print(line)
+        graph.print_graph()
 
     def _filter_irq(self, irq):
         if type(irq) is sv.HardIRQ:
index a00ecfb3e92d18f9c6a093059e85bce3d4461580..2461e2324ff42d71eff50019947aa3f1a5f9d356 100644 (file)
@@ -25,8 +25,8 @@
 import operator
 from .command import Command
 from ..core import memtop
-from ..ascii_graph import Pyasciigraph
 from . import mi
+from . import termgraph
 
 
 class Memtop(Command):
@@ -154,17 +154,17 @@ class Memtop(Command):
         return result_table
 
     def _print_per_tid_result(self, result_table, title):
-        graph = Pyasciigraph()
-        values = []
-
-        for row in result_table.rows:
-            process_do = row.process
-            pages = row.pages.value
-            values.append(('%s (%d)' % (process_do.name, process_do.tid),
-                           pages))
+        graph = termgraph.BarGraph(
+            title=title,
+            unit='pages',
+            get_value=lambda row: row.pages.value,
+            get_label=lambda row: '%s (%d)' % (row.process.name,
+                                               row.process.tid),
+            label_header='Process',
+            data=result_table.rows
+        )
 
-        for line in graph.graph(title, values, unit=' pages'):
-            print(line)
+        graph.print_graph()
 
     def _print_per_tid_allocd(self, result_table):
         self._print_per_tid_result(result_table, 'Per-TID Memory Allocations')
diff --git a/lttnganalyses/cli/mitest.py b/lttnganalyses/cli/mitest.py
deleted file mode 100644 (file)
index 5a09250..0000000
+++ /dev/null
@@ -1,228 +0,0 @@
-# The MIT License (MIT)
-#
-# Copyright (C) 2015 - Julien Desfossez <jdesfossez@efficios.com>
-#               2015 - Antoine Busque <abusque@efficios.com>
-#               2015 - Philippe Proulx <pproulx@efficios.com>
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-# SOFTWARE.
-
-import operator
-from . import mi
-from ..core import cputop
-from .command import Command
-from ..ascii_graph import Pyasciigraph
-
-
-class Cputop(Command):
-    _DESC = """The cputop command."""
-    _ANALYSIS_CLASS = cputop.Cputop
-    _MI_TITLE = 'Top CPU usage'
-    _MI_DESCRIPTION = 'Per-TID, per-CPU, and total top CPU usage'
-    _MI_TAGS = [mi.Tags.CPU, mi.Tags.TOP]
-    _MI_TABLE_CLASS_PER_PROC = 'per-process'
-    _MI_TABLE_CLASS_PER_CPU = 'per-cpu'
-    _MI_TABLE_CLASS_TOTAL = 'total'
-    _MI_TABLE_CLASS_SUMMARY = 'summary'
-    _MI_TABLE_CLASSES = [
-        (
-            _MI_TABLE_CLASS_PER_PROC,
-            'Per-TID top CPU usage', [
-                ('process', 'Process', mi.Process),
-                ('migrations', 'Migration count', mi.Integer, 'migrations'),
-                ('usage', 'CPU usage', mi.Ratio),
-            ]
-        ),
-        (
-            _MI_TABLE_CLASS_PER_CPU,
-            'Per-CPU top CPU usage', [
-                ('cpu', 'CPU', mi.Cpu),
-                ('usage', 'CPU usage', mi.Ratio),
-            ]),
-        (
-            _MI_TABLE_CLASS_TOTAL,
-            'Total CPU usage', [
-                ('usage', 'CPU usage', mi.Ratio),
-            ]
-        ),
-        (
-            _MI_TABLE_CLASS_SUMMARY,
-            'CPU usage - summary', [
-                ('time_range', 'Time range', mi.TimeRange),
-                ('usage', 'Total CPU usage', mi.Ratio),
-            ]
-        ),
-    ]
-
-    def _filter_process(self, proc):
-        # Exclude swapper
-        if proc.tid == 0:
-            return False
-
-        if self._args.proc_list and proc.comm not in self._args.proc_list:
-            return False
-
-        return True
-
-    def _analysis_tick(self, begin_ns, end_ns):
-        per_tid_table = self._get_per_tid_usage_result_table(begin_ns, end_ns)
-        per_cpu_table = self._get_per_cpu_usage_result_table(begin_ns, end_ns)
-        total_table = self._get_total_usage_result_table(begin_ns, end_ns)
-
-        if self._mi_mode:
-            self._mi_append_result_table(per_tid_table)
-            self._mi_append_result_table(per_cpu_table)
-            self._mi_append_result_table(total_table)
-        else:
-            self._print_date(begin_ns, end_ns)
-            self._print_per_tid_usage(per_tid_table)
-            self._print_per_cpu_usage(per_cpu_table)
-
-            if total_table:
-                self._print_total_cpu_usage(total_table)
-
-    def _create_summary_result_tables(self):
-        total_tables = self._mi_get_result_tables(self._MI_TABLE_CLASS_TOTAL)
-        begin = total_tables[0].timerange.begin
-        end = total_tables[-1].timerange.end
-        summary_table = \
-            self._mi_create_result_table(self._MI_TABLE_CLASS_SUMMARY,
-                                         begin, end)
-
-        for total_table in total_tables:
-            usage = total_table.rows[0].usage
-            summary_table.append_row(
-                time_range=total_table.timerange,
-                usage=usage,
-            )
-
-        self._mi_clear_result_tables()
-        self._mi_append_result_table(summary_table)
-
-    def _get_per_tid_usage_result_table(self, begin_ns, end_ns):
-        result_table = \
-            self._mi_create_result_table(self._MI_TABLE_CLASS_PER_PROC,
-                                         begin_ns, end_ns)
-        count = 0
-
-        for tid in sorted(self._analysis.tids.values(),
-                          key=operator.attrgetter('usage_percent'),
-                          reverse=True):
-            if not self._filter_process(tid):
-                continue
-
-            result_table.append_row(
-                process=mi.Process(tid.comm, tid=tid.tid),
-                migrations=mi.Integer(tid.migrate_count),
-                usage=mi.Ratio.from_percentage(tid.usage_percent)
-            )
-            count += 1
-
-            if self._args.limit > 0 and count >= self._args.limit:
-                break
-
-        return result_table
-
-    def _get_per_cpu_usage_result_table(self, begin_ns, end_ns):
-        result_table = \
-            self._mi_create_result_table(self._MI_TABLE_CLASS_PER_CPU,
-                                         begin_ns, end_ns)
-
-        for cpu in sorted(self._analysis.cpus.values(),
-                          key=operator.attrgetter('usage_percent'),
-                          reverse=True):
-            result_table.append_row(
-                cpu=mi.Cpu(cpu.cpu_id),
-                usage=mi.Ratio.from_percentage(cpu.usage_percent)
-            )
-
-        return result_table
-
-    def _get_total_usage_result_table(self, begin_ns, end_ns):
-        result_table = \
-            self._mi_create_result_table(self._MI_TABLE_CLASS_TOTAL,
-                                         begin_ns, end_ns)
-
-        cpu_count = len(self.state.cpus)
-        usage_percent = 0
-
-        if not cpu_count:
-            return
-
-        for cpu in sorted(self._analysis.cpus.values(),
-                          key=operator.attrgetter('usage_percent'),
-                          reverse=True):
-            usage_percent += cpu.usage_percent
-
-        # average per CPU
-        usage_percent /= cpu_count
-        result_table.append_row(
-            usage=mi.Ratio.from_percentage(usage_percent),
-        )
-
-        return result_table
-
-    def _print_per_tid_usage(self, result_table):
-        graph = Pyasciigraph()
-        values = []
-
-        for row in result_table.rows:
-            process_do = row.process
-            migration_count = row.migrations.value
-            output_str = '%s (%d)' % (process_do.name, process_do.tid)
-
-            if migration_count > 0:
-                output_str += ', %d migrations' % (migration_count)
-
-            values.append((output_str, row.usage.to_percentage()))
-
-        for line in graph.graph('Per-TID CPU Usage', values, unit=' %'):
-            print(line)
-
-    def _print_per_cpu_usage(self, result_table):
-        graph = Pyasciigraph()
-        values = []
-
-        for row in result_table.rows:
-            cpu = row.cpu
-            values.append(('CPU %d' % cpu.id, row.usage.to_percentage()))
-
-        for line in graph.graph('Per-CPU Usage', values, unit=' %'):
-            print(line)
-
-    def _print_total_cpu_usage(self, result_table):
-        usage_percent = result_table.rows[0].usage.to_percentage()
-        print('\nTotal CPU Usage: %0.02f%%\n' % usage_percent)
-
-    def _add_arguments(self, ap):
-        Command._add_proc_filter_args(ap)
-
-
-def _run(mi_mode):
-    cputopcmd = Cputop(mi_mode=mi_mode)
-    cputopcmd.run()
-
-
-# entry point (human)
-def run():
-    _run(mi_mode=False)
-
-
-# entry point (MI)
-def run_mi():
-    _run(mi_mode=True)
index f4d252e7d2fb1eeb9adacbab3204a6f815a2592e..cdd4dd9bd9f6076701f6b54cb1f8999f835034aa 100644 (file)
@@ -27,10 +27,10 @@ import operator
 import statistics
 import collections
 from . import mi
+from . import termgraph
 from ..core import sched
 from .command import Command
 from ..linuxautomaton import common
-from ..ascii_graph import Pyasciigraph
 
 
 _SchedStats = collections.namedtuple('_SchedStats', [
@@ -797,34 +797,22 @@ class SchedAnalysisCommand(Command):
                 print(row_str)
 
     def _print_frequency_distribution(self, freq_table):
-        graph = Pyasciigraph()
-        graph_data = []
-
-        for row in freq_table.rows:
-            # The graph data format is a tuple (info, value). Here info
-            # is the lower bound of the bucket, value the bucket's count
-            lower_bound_us = row.duration_lower.to_us()
-            count = row.count.value
-
-            graph_data.append(('%0.03f' % lower_bound_us, count))
-
-        title_fmt = 'Scheduling latency (µs) frequency distribution - {}'
-
-        graph_lines = graph.graph(
-            title_fmt.format(freq_table.subtitle),
-            graph_data,
-            info_before=True,
-            count=True
+        title_fmt = 'Scheduling latency frequency distribution - {}'
+
+        graph = termgraph.FreqGraph(
+            data=freq_table.rows,
+            get_value=lambda row: row.count.value,
+            get_lower_bound=lambda row: row.duration_lower.to_us(),
+            title=title_fmt.format(freq_table.subtitle),
+            unit='µs'
         )
 
-        for line in graph_lines:
-            print(line)
+        graph.print_graph()
+
 
     def _print_freq(self, freq_tables):
         for freq_table in freq_tables:
-            if freq_table.rows:
-                print()
-                self._print_frequency_distribution(freq_table)
+            self._print_frequency_distribution(freq_table)
 
     def _validate_transform_args(self, args):
         # If neither --total nor --per-prio are specified, default
diff --git a/lttnganalyses/cli/termgraph.py b/lttnganalyses/cli/termgraph.py
new file mode 100644 (file)
index 0000000..493c29a
--- /dev/null
@@ -0,0 +1,202 @@
+# The MIT License (MIT)
+#
+# Copyright (C) 2016 - Antoine Busque <abusque@efficios.com>
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+from collections import namedtuple
+
+
+GraphDatum = namedtuple('GraphDatum', ['value', 'value_str'])
+BarGraphDatum = namedtuple('BarGraphDatum', ['value', 'value_str', 'label'])
+FreqGraphDatum = namedtuple(
+    'FreqGraphDatum', ['value', 'value_str', 'lower_bound']
+)
+
+class Graph():
+    MAX_GRAPH_WIDTH = 80
+    BAR_CHAR = '█'
+    HR_CHAR = '#'
+
+    def __init__(self, data, get_value, get_value_str, title, unit):
+        self._data = data
+        self._get_value = get_value
+        self._title = title
+        self._unit = unit
+        self._max_value = 0
+        self._max_value_len = 0
+
+        if get_value_str is not None:
+            self._get_value_str_cb = get_value_str
+        else:
+            self._get_value_str_cb = Graph._get_value_str_default
+
+    def _transform_data(self, data):
+        graph_data = []
+
+        for datum in data:
+            graph_datum = self._get_graph_datum(datum)
+
+            if graph_datum.value > self._max_value:
+                self._max_value = graph_datum.value
+            if len(graph_datum.value_str) > self._max_value_len:
+                self._max_value_len = len(graph_datum.value_str)
+
+            graph_data.append(graph_datum)
+
+        return graph_data
+
+    def _get_value_str(self, value):
+        return self._get_value_str_cb(value)
+
+    def _get_graph_datum(self, datum):
+        value = self._get_value(datum)
+        value_str = self._get_value_str(value)
+
+        return GraphDatum(value, value_str)
+
+    def _print_header(self):
+        if self._title:
+            print(self._title)
+
+    def _print_separator(self):
+        print(self.HR_CHAR * self.MAX_GRAPH_WIDTH)
+
+    def _print_body(self):
+        raise NotImplementedError()
+
+    def print_graph(self):
+        if not self._data:
+            return
+
+        self._print_header()
+        self._print_separator()
+        self._print_body()
+        print()
+
+    @staticmethod
+    def _get_value_str_default(value):
+        if isinstance(value, float):
+            value_str = '{:0.02f}'.format(value)
+        else:
+            value_str = str(value)
+
+        return value_str
+
+
+class BarGraph(Graph):
+    def __init__(self, data, get_value, get_label, get_value_str=None,
+                 title=None, label_header=None, unit=None):
+        super().__init__(data, get_value, get_value_str, title, unit)
+
+        self._get_label = get_label
+        self._label_header = label_header
+        self._data = self._transform_data(self._data)
+
+    def _get_graph_datum(self, datum):
+        value = self._get_value(datum)
+        value_str = self._get_value_str(value)
+        label = self._get_label(datum)
+
+        return BarGraphDatum(value, value_str, label)
+
+    def _get_value_str(self, value):
+        value_str = super()._get_value_str(value)
+        if self._unit:
+            value_str += ' ' + self._unit
+
+        return value_str
+
+    def _get_graph_header(self):
+        if not self._label_header:
+            return self._title
+
+        title_len = len(self._title)
+        space_width = (self.MAX_GRAPH_WIDTH - title_len) + \
+                      1 + self._max_value_len + 1
+
+        return self._title + ' ' * space_width + self._label_header
+
+    def _print_header(self):
+        header = self._get_graph_header()
+        print(header)
+
+    def _get_bar_str(self, datum):
+        if self._max_value == 0:
+            bar_width = 0
+        else:
+            bar_width = int(self.MAX_GRAPH_WIDTH * datum.value /
+                            self._max_value)
+        space_width = self.MAX_GRAPH_WIDTH - bar_width
+        bar_str = self.BAR_CHAR * bar_width + ' ' * space_width
+
+        return bar_str
+
+    def _print_body(self):
+        for datum in self._data:
+            bar_str = self._get_bar_str(datum)
+            value_padding = ' ' * (self._max_value_len - len(datum.value_str))
+            print(bar_str, value_padding + datum.value_str, datum.label)
+
+
+class FreqGraph(Graph):
+    LOWER_BOUND_WIDTH = 8
+
+    def __init__(self, data, get_value, get_lower_bound,
+                 get_value_str=None, title=None, unit=None):
+        super().__init__(data, get_value, get_value_str, title, unit)
+
+        self._get_lower_bound = get_lower_bound
+        self._data = self._transform_data(self._data)
+
+    def _get_graph_datum(self, datum):
+        value = self._get_value(datum)
+        value_str = self._get_value_str(value)
+        lower_bound = self._get_lower_bound(datum)
+
+        return FreqGraphDatum(value, value_str, lower_bound)
+
+    def _print_header(self):
+        header = self._title
+        if self._unit:
+            header += ' ({})'.format(self._unit)
+
+        print(header)
+
+    def _get_bar_str(self, datum):
+        max_width = self.MAX_GRAPH_WIDTH - self.LOWER_BOUND_WIDTH
+        if self._max_value == 0:
+            bar_width = 0
+        else:
+            bar_width = int(max_width * datum.value / self._max_value)
+        space_width = max_width - bar_width
+        bar_str = self.BAR_CHAR * bar_width + ' ' * space_width
+
+        return bar_str
+
+    def _print_body(self):
+        for datum in self._data:
+            bound_str = FreqGraph._get_bound_str(datum)
+            bar_str = self._get_bar_str(datum)
+            value_padding = ' ' * (self._max_value_len - len(datum.value_str))
+            print(bound_str, bar_str, value_padding + datum.value_str)
+
+    @staticmethod
+    def _get_bound_str(datum):
+        return '{:>7.03f}'.format(datum.lower_bound)
diff --git a/lttnganalyses/common/format_utils.py b/lttnganalyses/common/format_utils.py
new file mode 100644 (file)
index 0000000..9d8224f
--- /dev/null
@@ -0,0 +1,68 @@
+# The MIT License (MIT)
+#
+# Copyright (C) 2016 - Antoine Busque <abusque@efficios.com>
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+import math
+
+
+def format_size(size, binary_prefix=True):
+    """Convert an integral number of bytes to a human-readable string
+
+    Args:
+        size (int): a non-negative number of bytes
+        binary_prefix (bool, optional): whether to use binary units
+            prefixes, over SI prefixes. default: True
+
+    Returns:
+        The formatted string comprised of the size and units
+
+    Raises:
+        ValueError: if size < 0
+    """
+    if size < 0:
+        raise ValueError('Cannot format negative size')
+
+    if binary_prefix:
+        base = 1024
+        units = ['  B', 'KiB', 'MiB', 'GiB','TiB', 'PiB', 'EiB', 'ZiB', 'YiB']
+    else:
+        base = 1000
+        units = [' B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']
+
+    if size == 0:
+        exponent = 0
+    else:
+        exponent = int(math.log(size, base))
+        if exponent >= len(units):
+            # Don't try and use a unit above YiB/YB
+            exponent = len(units) - 1
+
+        size /= math.pow(base, exponent)
+
+    unit = units[exponent]
+
+    if exponent == 0:
+        # Don't display fractions of a byte
+        format_str = '{:0.0f} {}'
+    else:
+        format_str = '{:0.2f} {}'
+
+    return format_str.format(size, unit)
index e77ef75fa51a9d0ddfeec6de8e4d0eb586226f53..05ce7d2fda530574c17d54fde45d757313ff4142 100644 (file)
@@ -21,6 +21,7 @@
 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 # SOFTWARE.
 
+from . import stats
 from .analysis import Analysis
 
 
@@ -29,7 +30,8 @@ class Cputop(Analysis):
         notification_cbs = {
             'sched_migrate_task': self._process_sched_migrate_task,
             'sched_switch_per_cpu': self._process_sched_switch_per_cpu,
-            'sched_switch_per_tid': self._process_sched_switch_per_tid
+            'sched_switch_per_tid': self._process_sched_switch_per_tid,
+            'prio_changed': self._process_prio_changed,
         }
 
         super().__init__(state, conf)
@@ -44,11 +46,15 @@ class Cputop(Analysis):
         self._ev_count += 1
 
     def reset(self):
-        for cpu_id in self.cpus:
-            self.cpus[cpu_id].reset(self._last_event_ts)
+        for cpu_stats in self.cpus.values():
+            cpu_stats.reset()
+            if cpu_stats.current_task_start_ts is not None:
+                cpu_stats.current_task_start_ts = self._last_event_ts
 
-        for tid in self.tids:
-            self.tids[tid].reset(self._last_event_ts)
+        for proc_stats in self.tids.values():
+            proc_stats.reset()
+            if proc_stats.last_sched_ts is not None:
+                proc_stats.last_sched_ts = self._last_event_ts
 
     def _end_period_cb(self):
         self._compute_stats()
@@ -126,11 +132,12 @@ class Cputop(Analysis):
             return
 
         if next_tid not in self.tids:
-            self.tids[next_tid] = ProcessCpuStats(next_tid, next_comm)
+            self.tids[next_tid] = ProcessCpuStats(None, next_tid, next_comm)
+            self.tids[next_tid].update_prio(timestamp, wakee_proc.prio)
 
         next_proc = self.tids[next_tid]
         next_proc.last_sched_ts = timestamp
-        next_proc.prio = wakee_proc.prio
+
 
     def _process_sched_migrate_task(self, **kwargs):
         cpu_id = kwargs['cpu_id']
@@ -147,6 +154,16 @@ class Cputop(Analysis):
 
         self.tids[tid].migrate_count += 1
 
+    def _process_prio_changed(self, **kwargs):
+        timestamp = kwargs['timestamp']
+        prio = kwargs['prio']
+        tid = kwargs['tid']
+
+        if tid not in self.tids:
+            return
+
+        self.tids[tid].update_prio(timestamp, prio)
+
     def _filter_process(self, proc):
         # Exclude swapper
         if proc.tid == 0:
@@ -173,38 +190,29 @@ class CpuUsageStats():
         else:
             self.usage_percent = 0
 
-    def reset(self, timestamp):
+    def reset(self):
         self.total_usage_time = 0
         self.usage_percent = None
-        if self.current_task_start_ts is not None:
-            self.current_task_start_ts = timestamp
 
 
-class ProcessCpuStats():
-    def __init__(self, tid, comm):
-        self.tid = tid
-        self.comm = comm
-        # Currently only the latest prio is tracked
-        self.prio = None
+class ProcessCpuStats(stats.Process):
+    def __init__(self, pid, tid, comm):
+        super().__init__(pid, tid, comm)
+
         # CPU Time and timestamp in nanoseconds (ns)
         self.total_cpu_time = 0
         self.last_sched_ts = None
         self.migrate_count = 0
         self.usage_percent = None
 
-    @classmethod
-    def new_from_process(cls, proc):
-        return cls(proc.tid, proc.comm)
-
     def compute_stats(self, duration):
         if duration != 0:
             self.usage_percent = self.total_cpu_time * 100 / duration
         else:
             self.usage_percent = 0
 
-    def reset(self, timestamp):
+    def reset(self):
+        super().reset()
         self.total_cpu_time = 0
         self.migrate_count = 0
         self.usage_percent = None
-        if self.last_sched_ts is not None:
-            self.last_sched_ts = timestamp
index 17b90028cb8469582399af433b1c6d5eed28d5fc..6ed7434207346445cd3e732eff1656008ae9fd31 100644 (file)
@@ -20,6 +20,7 @@
 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 # SOFTWARE.
 
+from . import stats
 from .analysis import Analysis
 from ..linuxautomaton import sv
 
@@ -125,16 +126,10 @@ class IoAnalysis(Analysis):
                     # Add process name to generic filenames to
                     # distinguish them
                     if FileStats.is_generic_name(filename):
-                        filename += '(%s)' % proc_stats.comm
+                        filename += ' (%s)' % proc_stats.comm
 
                     if filename not in files_stats:
-                        if proc_stats.pid is not None:
-                            pid = proc_stats.pid
-                        else:
-                            pid = proc_stats.tid
-
-                        files_stats[filename] = FileStats(
-                            filename, fd_stats.fd, pid)
+                        files_stats[filename] = FileStats(filename)
 
                     files_stats[filename].update_stats(fd_stats, proc_stats)
 
@@ -246,6 +241,13 @@ class IoAnalysis(Analysis):
         proc_stats.update_io_stats(io_rq, fd_types)
         parent_stats.update_fd_stats(io_rq)
 
+        # Check if the proc stats comm corresponds to the actual
+        # process comm. It might be that it was missing so far.
+        if proc_stats.comm != proc.comm:
+            proc_stats.comm = proc.comm
+        if parent_stats.comm != parent_proc.comm:
+            parent_stats.comm = parent_proc.comm
+
     def _process_create_parent_proc(self, **kwargs):
         proc = kwargs['proc']
         parent_proc = kwargs['parent_proc']
@@ -375,22 +377,13 @@ class IfaceStats():
         self.sent_packets = 0
 
 
-class ProcessIOStats():
+class ProcessIOStats(stats.Process):
     def __init__(self, pid, tid, comm):
-        self.pid = pid
-        self.tid = tid
-        self.comm = comm
-        # Number of bytes read or written by the process, by type of I/O
-        self.disk_read = 0
-        self.disk_write = 0
-        self.net_read = 0
-        self.net_write = 0
-        self.unk_read = 0
-        self.unk_write = 0
-        # Actual number of bytes read or written by the process at the
-        # block layer
-        self.block_read = 0
-        self.block_write = 0
+        super().__init__(pid, tid, comm)
+        self.disk_io = stats.IO()
+        self.net_io = stats.IO()
+        self.unk_io = stats.IO()
+        self.block_io = stats.IO()
         # FDStats objects, indexed by fd (fileno)
         self.fds = {}
         self.rq_list = []
@@ -402,11 +395,11 @@ class ProcessIOStats():
     # Total read/write does not account for block layer I/O
     @property
     def total_read(self):
-        return self.disk_read + self.net_read + self.unk_read
+        return self.disk_io.read + self.net_io.read + self.unk_io.read
 
     @property
     def total_write(self):
-        return self.disk_write + self.net_write + self.unk_write
+        return self.disk_io.write + self.net_io.write + self.unk_io.write
 
     def update_fd_stats(self, req):
         if req.errno is not None:
@@ -425,9 +418,9 @@ class ProcessIOStats():
         self.rq_list.append(req)
 
         if req.operation is sv.IORequest.OP_READ:
-            self.block_read += req.size
+            self.block_io.read += req.size
         elif req.operation is sv.IORequest.OP_WRITE:
-            self.block_write += req.size
+            self.block_io.write += req.size
 
     def update_io_stats(self, req, fd_types):
         self.rq_list.append(req)
@@ -443,23 +436,21 @@ class ProcessIOStats():
             self._update_read(req.returned_size, fd_types['fd_in'])
             self._update_write(req.returned_size, fd_types['fd_out'])
 
-        self.rq_list.append(req)
-
     def _update_read(self, size, fd_type):
         if fd_type == sv.FDType.disk:
-            self.disk_read += size
+            self.disk_io.read += size
         elif fd_type == sv.FDType.net or fd_type == sv.FDType.maybe_net:
-            self.net_read += size
+            self.net_io.read += size
         else:
-            self.unk_read += size
+            self.unk_io.read += size
 
     def _update_write(self, size, fd_type):
         if fd_type == sv.FDType.disk:
-            self.disk_write += size
+            self.disk_io.write += size
         elif fd_type == sv.FDType.net or fd_type == sv.FDType.maybe_net:
-            self.net_write += size
+            self.net_io.write += size
         else:
-            self.unk_write += size
+            self.unk_io.write += size
 
     def _get_current_fd(self, fd):
         fd_stats = self.fds[fd][-1]
@@ -523,14 +514,10 @@ class ProcessIOStats():
         return fd_stats
 
     def reset(self):
-        self.disk_read = 0
-        self.disk_write = 0
-        self.net_read = 0
-        self.net_write = 0
-        self.unk_read = 0
-        self.unk_write = 0
-        self.block_read = 0
-        self.block_write = 0
+        self.disk_io.reset()
+        self.net_io.reset()
+        self.unk_io.reset()
+        self.block_io.reset()
         self.rq_list = []
 
         for fd in self.fds:
@@ -548,10 +535,7 @@ class FDStats():
         self.family = family
         self.open_ts = open_ts
         self.close_ts = None
-
-        # Number of bytes read or written
-        self.read = 0
-        self.write = 0
+        self.io = stats.IO()
         # IO Requests that acted upon the FD
         self.rq_list = []
 
@@ -562,38 +546,35 @@ class FDStats():
 
     def update_stats(self, req):
         if req.operation is sv.IORequest.OP_READ:
-            self.read += req.returned_size
+            self.io.read += req.returned_size
         elif req.operation is sv.IORequest.OP_WRITE:
-            self.write += req.returned_size
+            self.io.write += req.returned_size
         elif req.operation is sv.IORequest.OP_READ_WRITE:
             if self.fd == req.fd_in:
-                self.read += req.returned_size
+                self.io.read += req.returned_size
             elif self.fd == req.fd_out:
-                self.write += req.returned_size
+                self.io.write += req.returned_size
 
         self.rq_list.append(req)
 
     def reset(self):
-        self.read = 0
-        self.write = 0
+        self.io.reset()
         self.rq_list = []
 
 
 class FileStats():
     GENERIC_NAMES = ['pipe', 'socket', 'anon_inode', 'unknown']
 
-    def __init__(self, filename, fd, pid):
+    def __init__(self, filename):
         self.filename = filename
-        # Number of bytes read or written
-        self.read = 0
-        self.write = 0
+        self.io = stats.IO()
         # Dict of file descriptors representing this file, indexed by
         # parent pid
-        self.fd_by_pid = {pid: fd}
+        # FIXME this doesn't cover FD reuse cases
+        self.fd_by_pid = {}
 
     def update_stats(self, fd_stats, proc_stats):
-        self.read += fd_stats.read
-        self.write += fd_stats.write
+        self.io += fd_stats.io
 
         if proc_stats.pid is not None:
             pid = proc_stats.pid
@@ -604,8 +585,7 @@ class FileStats():
             self.fd_by_pid[pid] = fd_stats.fd
 
     def reset(self):
-        self.read = 0
-        self.write = 0
+        self.io.reset()
 
     @staticmethod
     def is_generic_name(filename):
index 85c6ed64b928fdc687168c77b599b8f6f27d7620..d50bfc8e212c9b57c448826baa6eee28d74a66a5 100644 (file)
@@ -52,8 +52,8 @@ class IrqAnalysis(Analysis):
         name = kwargs['irq_name']
         if id not in self.hard_irq_stats:
             self.hard_irq_stats[id] = HardIrqStats(name)
-        elif self.hard_irq_stats[id].name != name:
-            self.hard_irq_stats[id].name = name
+        elif name not in self.hard_irq_stats[id].names:
+            self.hard_irq_stats[id].names.append(name)
 
     def _process_irq_handler_exit(self, **kwargs):
         irq = kwargs['hard_irq']
@@ -97,12 +97,16 @@ class IrqAnalysis(Analysis):
 
 class IrqStats():
     def __init__(self, name):
-        self.name = name
+        self._name = name
         self.min_duration = None
         self.max_duration = None
         self.total_duration = 0
         self.irq_list = []
 
+    @property
+    def name(self):
+        return self._name
+
     @property
     def count(self):
         return len(self.irq_list)
@@ -125,8 +129,15 @@ class IrqStats():
 
 
 class HardIrqStats(IrqStats):
+    NAMES_SEPARATOR = ', '
+
     def __init__(self, name='unknown'):
         super().__init__(name)
+        self.names = [name]
+
+    @property
+    def name(self):
+        return self.NAMES_SEPARATOR.join(self.names)
 
 
 class SoftIrqStats(IrqStats):
index ce6b79bdfbaa64c6a0fc031d8273b24b42d96683..5e1ee6097182f81309393722fbb1941abeeba6f0 100644 (file)
@@ -20,6 +20,7 @@
 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 # SOFTWARE.
 
+from . import stats
 from .analysis import Analysis
 
 
@@ -70,18 +71,13 @@ class Memtop(Analysis):
         self.tids[tid].freed_pages += 1
 
 
-class ProcessMemStats():
+class ProcessMemStats(stats.Process):
     def __init__(self, pid, tid, comm):
-        self.pid = pid
-        self.tid = tid
-        self.comm = comm
+        super().__init__(pid, tid, comm)
+
         self.allocated_pages = 0
         self.freed_pages = 0
 
-    @classmethod
-    def new_from_process(cls, proc):
-        return cls(proc.pid, proc.tid, proc.comm)
-
     def reset(self):
         self.allocated_pages = 0
         self.freed_pages = 0
index dbab32208a311dfb6860c2e1160b425576ea9a8e..df38e94603b213aeef9783f8e96f9795402d6770 100644 (file)
 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 # SOFTWARE.
 
-from collections import namedtuple
+from . import stats
 from .analysis import Analysis
 
 
-PrioEvent = namedtuple('PrioEvent', ['timestamp', 'prio'])
-
-
 class SchedAnalysis(Analysis):
     def __init__(self, state, conf):
         notification_cbs = {
@@ -85,11 +82,12 @@ class SchedAnalysis(Analysis):
 
         if waker_proc is not None and waker_proc.tid not in self.tids:
             self.tids[waker_proc.tid] = \
-                SchedStats.new_from_process(waker_proc)
+                ProcessSchedStats.new_from_process(waker_proc)
             self.tids[waker_proc.tid].update_prio(switch_ts, waker_proc.prio)
 
         if next_tid not in self.tids:
-            self.tids[next_tid] = SchedStats.new_from_process(wakee_proc)
+            self.tids[next_tid] = \
+                ProcessSchedStats.new_from_process(wakee_proc)
             self.tids[next_tid].update_prio(switch_ts, wakee_proc.prio)
 
         sched_event = SchedEvent(
@@ -118,19 +116,14 @@ class SchedAnalysis(Analysis):
         self.sched_list.append(sched_event)
 
 
-class SchedStats():
-    def __init__(self, tid, comm):
-        self.tid = tid
-        self.comm = comm
+class ProcessSchedStats(stats.Process):
+    def __init__(self, pid, tid, comm):
+        super().__init__(pid, tid, comm)
+
         self.min_latency = None
         self.max_latency = None
         self.total_latency = 0
         self.sched_list = []
-        self.prio_list = []
-
-    @classmethod
-    def new_from_process(cls, proc):
-        return cls(proc.tid, proc.comm)
 
     @property
     def count(self):
@@ -146,17 +139,12 @@ class SchedStats():
         self.total_latency += sched_event.latency
         self.sched_list.append(sched_event)
 
-    def update_prio(self, timestamp, prio):
-        self.prio_list.append(PrioEvent(timestamp, prio))
-
     def reset(self):
+        super().reset()
         self.min_latency = None
         self.max_latency = None
         self.total_latency = 0
         self.sched_list = []
-        if self.prio_list:
-            # Keep the last prio as the first for the next period
-            self.prio_list = self.prio_list[-1:]
 
 
 class SchedEvent():
diff --git a/lttnganalyses/core/stats.py b/lttnganalyses/core/stats.py
new file mode 100644 (file)
index 0000000..7f38d7a
--- /dev/null
@@ -0,0 +1,67 @@
+# The MIT License (MIT)
+#
+# Copyright (C) 2015 - Antoine Busque <abusque@efficios.com>
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+from collections import namedtuple
+
+
+PrioEvent = namedtuple('PrioEvent', ['timestamp', 'prio'])
+
+
+class Stats():
+    def reset(self):
+        raise NotImplementedError()
+
+
+class Process(Stats):
+    def __init__(self, pid, tid, comm):
+        self.pid = pid
+        self.tid = tid
+        self.comm = comm
+        self.prio_list = []
+
+    @classmethod
+    def new_from_process(cls, proc):
+        return cls(proc.pid, proc.tid, proc.comm)
+
+    def update_prio(self, timestamp, prio):
+        self.prio_list.append(PrioEvent(timestamp, prio))
+
+    def reset(self):
+        if self.prio_list:
+            # Keep the last prio as the first for the next period
+            self.prio_list = self.prio_list[-1:]
+
+
+class IO(Stats):
+    def __init__(self):
+        # Number of bytes read or written
+        self.read = 0
+        self.write = 0
+
+    def reset(self):
+        self.read = 0
+        self.write = 0
+
+    def __iadd__(self, other):
+        self.read += other.read
+        self.write += other.write
+        return self
index cf2bd1960b74b11dd67bf996f863a094bcb7005e..eceed1ce34a4d3fa51893223ccb8ae7c4a8c2e3d 100644 (file)
@@ -20,6 +20,7 @@
 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 # SOFTWARE.
 
+from . import stats
 from .analysis import Analysis
 
 
@@ -36,6 +37,7 @@ class SyscallsAnalysis(Analysis):
         self.total_syscalls = 0
 
     def reset(self):
+        # FIXME why no reset?
         pass
 
     def _process_syscall_exit(self, **kwargs):
@@ -62,18 +64,16 @@ class SyscallsAnalysis(Analysis):
         self.total_syscalls += 1
 
 
-class ProcessSyscallStats():
+class ProcessSyscallStats(stats.Process):
     def __init__(self, pid, tid, comm):
-        self.pid = pid
-        self.tid = tid
-        self.comm = comm
+        super().__init__(pid, tid, comm)
+
         # indexed by syscall name
         self.syscalls = {}
         self.total_syscalls = 0
 
-    @classmethod
-    def new_from_process(cls, proc):
-        return cls(proc.pid, proc.tid, proc.comm)
+    def reset(self):
+        pass
 
 
 class SyscallStats():
index c4525317c07dce47a6d3c2a4686338aab05cc874..17b4dcab262711f44564cdba2a31e77d96bd653f 100644 (file)
@@ -20,7 +20,6 @@
 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 # SOFTWARE.
 
-import math
 import re
 import time
 import datetime
@@ -50,32 +49,6 @@ def get_syscall_name(event):
     return name[14:]
 
 
-def convert_size(size, padding_after=False, padding_before=False):
-    if padding_after and size < 1024:
-        space_after = ' '
-    else:
-        space_after = ''
-    if padding_before and size < 1024:
-        space_before = ' '
-    else:
-        space_before = ''
-    if size <= 0:
-        return '0 ' + space_before + 'B' + space_after
-    size_name = ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB')
-    i = int(math.floor(math.log(size, 1024)))
-    p = math.pow(1024, i)
-    s = round(size/p, 2)
-    if s > 0:
-        try:
-            v = '%0.02f' % s
-            return '%s %s%s%s' % (v, space_before, size_name[i], space_after)
-        except:
-            print(i, size_name)
-            raise Exception('Too big to be true')
-    else:
-        return '0 B'
-
-
 def is_multi_day_trace_collection(handles):
     time_begin = None
 
index 23ff9d248a686a41a25f97fe7de09d5ad7e3c763..a1def6c704afd2f65b3a12897eeac34293dd5afa 100644 (file)
@@ -100,11 +100,16 @@ class IrqStateProvider(sp.StateProvider):
     def _process_softirq_exit(self, event):
         cpu = self._get_cpu(event['cpu_id'])
         vec = event['vec']
+        # List of enqueued softirqs for the current cpu/vec
+        # combination. None if vec is not found in the dictionary.
+        current_softirqs = cpu.current_softirqs.get(vec)
 
-        if not cpu.current_softirqs[vec]:
+        # Ignore the exit if either vec was not in the cpu's dict or
+        # if its irq list was empty (i.e. no matching raise).
+        if not current_softirqs:
             return
 
-        cpu.current_softirqs[vec][0].end_ts = event.timestamp
+        current_softirqs[0].end_ts = event.timestamp
         self._state.send_notification_cb('softirq_exit',
-                                         softirq=cpu.current_softirqs[vec][0])
-        del cpu.current_softirqs[vec][0]
+                                         softirq=current_softirqs[0])
+        del current_softirqs[0]
index 4e5b076de71130a12047db997dfdac760c5cad83..b5b5cab7a99bc2d030581ffc8f019e25796a1292 100644 (file)
@@ -28,7 +28,7 @@ from ..common import version_utils
 class SchedStateProvider(sp.StateProvider):
     # The priority offset for sched_wak* events was fixed in
     # lttng-modules 2.7.1 upwards
-    PRIO_OFFSET_FIX_VERSION = version_utils.Version(2,7,1)
+    PRIO_OFFSET_FIX_VERSION = version_utils.Version(2, 7, 1)
 
     def __init__(self, state):
         cbs = {
@@ -131,9 +131,9 @@ class SchedStateProvider(sp.StateProvider):
         else:
             proc = self._state.tids[tid]
 
-        self._check_prio_changed(event.timestamp, tid, prio)
         self._state.send_notification_cb(
             'sched_migrate_task', proc=proc, cpu_id=event['cpu_id'])
+        self._check_prio_changed(event.timestamp, tid, prio)
 
     def _process_sched_wakeup(self, event):
         target_cpu = event['target_cpu']
index 4bfdf3f1369ad1afd809268d640ee7c6684b1931..4f3a159d7910d98f0c72b2e26b48bbfdfab69121 100644 (file)
@@ -454,7 +454,7 @@ class SyscallConsts():
     DISK_OPEN_SYSCALLS = ['open', 'openat']
     # list of syscalls that open a FD on the network
     # (in the exit_syscall event)
-    NET_OPEN_SYSCALLS = ['accept', 'accept4', 'socket']
+    NET_OPEN_SYSCALLS = ['socket']
     # list of syscalls that can duplicate a FD
     DUP_OPEN_SYSCALLS = ['fcntl', 'dup', 'dup2', 'dup3']
     SYNC_SYSCALLS = ['sync', 'sync_file_range', 'fsync', 'fdatasync']
diff --git a/mit-license.txt b/mit-license.txt
new file mode 100644 (file)
index 0000000..5f26899
--- /dev/null
@@ -0,0 +1,19 @@
+Copyright (c) ...
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/mitest.py b/mitest.py
deleted file mode 100644 (file)
index a9c7eb3..0000000
--- a/mitest.py
+++ /dev/null
@@ -1,267 +0,0 @@
-#!/usr/bin/python3
-
-import json
-import string
-import random
-import argparse
-from lttnganalyses.cli import mi
-
-
-_TABLE_CLASS_PER_PROC = 'per-proc'
-_TABLE_CLASS_PER_SYSCALL = 'per-syscall'
-_TABLE_CLASS_PER_IRQ = 'per-irq'
-_TABLE_CLASSES = {
-    _TABLE_CLASS_PER_PROC: mi.TableClass(
-        _TABLE_CLASS_PER_PROC,
-        'Per-process stuff', [
-            ('proc', 'Process', mi.Process),
-            ('count', 'Count', mi.Integer, 'things'),
-            ('flag', 'Flag', mi.Boolean),
-            ('value', 'Value', mi.Float, 'thou'),
-            ('name', 'Name', mi.String),
-            ('ratio', 'Ratio', mi.Ratio),
-            ('ts', 'Timestamp', mi.Timestamp),
-        ]
-    ),
-    _TABLE_CLASS_PER_SYSCALL: mi.TableClass(
-        _TABLE_CLASS_PER_SYSCALL,
-        'Per-syscall stuff', [
-            ('syscall', 'System call', mi.Syscall),
-            ('duration', 'Duration', mi.Duration),
-            ('size', 'Size', mi.Size),
-            ('bitrate', 'Bitrate', mi.Bitrate),
-            ('time_range', 'Time range', mi.TimeRange),
-        ]
-    ),
-    _TABLE_CLASS_PER_IRQ: mi.TableClass(
-        _TABLE_CLASS_PER_IRQ,
-        'Per-interrupt stuff', [
-            ('interrupt', 'Interrupt', mi.Irq),
-            ('fd', 'File descriptor', mi.Fd),
-            ('path', 'File path', mi.Path),
-            ('cpu', 'CPU', mi.Cpu),
-            ('disk', 'Disk', mi.Disk),
-            ('part', 'Partition', mi.Partition),
-            ('netif', 'Network interface', mi.NetIf),
-        ]
-    )
-}
-
-
-def _print_metadata():
-    infos = mi.get_metadata(version=[1, 2, 3, 'dev'], title='LAMI test',
-                            description='LTTng analyses machine interface test',
-                            authors=['Phil Proulx'], url='http://perdu.com',
-                            tags=['lami', 'test'],
-                            table_classes=_TABLE_CLASSES.values())
-    print(json.dumps(infos))
-
-
-def _parse_args():
-    ap = argparse.ArgumentParser()
-    ap.add_argument('--metadata', action='store_true')
-    ap.add_argument('--begin', type=int, default=1000)
-    ap.add_argument('--end', type=int, default=2000)
-    ap.add_argument('-d', '--dynamic', action='store_true')
-    ap.add_argument('-r', '--dynamic-rows', type=int, default=25)
-    ap.add_argument('-c', '--dynamic-columns', type=int, default=10)
-
-    return ap.parse_args()
-
-
-def _print_tables(tables):
-    obj = {
-        'results': [t.to_native_object() for t in tables],
-    }
-
-    print(json.dumps(obj))
-
-
-def _print_dynamic_table(begin, end, rows, columns):
-    def gen_irq_name(size=6, chars=string.ascii_uppercase + string.digits):
-        return ''.join(random.choice(chars) for _ in range(size))
-
-    column_tuples = [
-        ('irq', 'Interrupt', mi.Irq),
-    ]
-
-    for i in range(columns):
-        column_tuples.append((
-            'count{}'.format(i),
-            'Count ({} to {})'.format(i * 5, (i + 1) * 5),
-            mi.Integer,
-            'interrupts'
-        ))
-
-    table_class = mi.TableClass(None, 'What a dynamic table!', column_tuples)
-    result_table = mi.ResultTable(table_class, begin, end)
-
-    for i in range(rows):
-        row_tuple = [
-            mi.Irq(bool(random.getrandbits(1)), i, gen_irq_name())
-        ]
-
-        for j in range(columns):
-            row_tuple.append(mi.Integer(random.randint(0, 5000)))
-
-        result_table.append_row_tuple(tuple(row_tuple))
-
-    _print_tables([result_table])
-
-
-def _print_static_tables(begin, end):
-    per_proc_table = mi.ResultTable(_TABLE_CLASSES[_TABLE_CLASS_PER_PROC], begin, end)
-    per_syscall_table = mi.ResultTable(_TABLE_CLASSES[_TABLE_CLASS_PER_SYSCALL], begin, end)
-    per_irq_table = mi.ResultTable(_TABLE_CLASSES[_TABLE_CLASS_PER_IRQ], begin, end)
-    per_irq_table_sub = mi.ResultTable(_TABLE_CLASSES[_TABLE_CLASS_PER_IRQ], begin, end,
-                                       'with overridden title')
-
-    # per-process
-    per_proc_table.append_row_tuple((
-        mi.Process('zsh', pid=23),
-        mi.Integer(23),
-        mi.Boolean(False),
-        mi.Float(17.2832),
-        mi.String('typical'),
-        mi.Ratio(0.154),
-        mi.Timestamp(817232),
-    ))
-    per_proc_table.append_row_tuple((
-        mi.Process('chromium', tid=4987),
-        mi.Integer(19),
-        mi.Boolean(False),
-        mi.Float(-19457.15),
-        mi.String('beam'),
-        mi.Ratio(0.001),
-        mi.Timestamp(1194875),
-    ))
-    per_proc_table.append_row_tuple((
-        mi.Process('terminator'),
-        mi.Integer(-145),
-        mi.Unknown(),
-        mi.Float(22.22),
-        mi.String('dry'),
-        mi.Ratio(0.94),
-        mi.Timestamp(984987658),
-    ))
-    per_proc_table.append_row_tuple((
-        mi.Process(pid=1945, tid=4497),
-        mi.Integer(31416),
-        mi.Boolean(True),
-        mi.Float(17.34),
-        mi.Empty(),
-        mi.Ratio(1.5),
-        mi.Timestamp(154484512),
-    ))
-
-    # per-syscall
-    per_syscall_table.append_row_tuple((
-        mi.Syscall('read'),
-        mi.Duration(2398123),
-        mi.Size(8123982),
-        mi.Bitrate(223232),
-        mi.TimeRange(98233, 1293828),
-    ))
-    per_syscall_table.append_row_tuple((
-        mi.Syscall('write'),
-        mi.Duration(412434),
-        mi.Size(5645),
-        mi.Bitrate(25235343),
-        mi.TimeRange(5454, 2354523),
-    ))
-    per_syscall_table.append_row_tuple((
-        mi.Syscall('sync'),
-        mi.Duration(2312454),
-        mi.Size(23433),
-        mi.Empty(),
-        mi.TimeRange(12, 645634545454),
-    ))
-    per_syscall_table.append_row_tuple((
-        mi.Syscall('fstat'),
-        mi.Unknown(),
-        mi.Size(2343334),
-        mi.Bitrate(5864684),
-        mi.TimeRange(2134, 645634545),
-    ))
-    per_syscall_table.append_row_tuple((
-        mi.Syscall('sync'),
-        mi.Duration(564533),
-        mi.Size(56875),
-        mi.Bitrate(4494494494),
-        mi.Empty(),
-    ))
-
-    # per-interrupt
-    per_irq_table.append_row_tuple((
-        mi.Irq(True, 15, 'keyboard'),
-        mi.Fd(3),
-        mi.Path('/etc/passwd'),
-        mi.Cpu(2),
-        mi.Disk('sda'),
-        mi.Partition('sdb3'),
-        mi.NetIf('eth0'),
-    ))
-    per_irq_table.append_row_tuple((
-        mi.Irq(False, 7, 'soft-timer'),
-        mi.Fd(1),
-        mi.Path('/dev/null'),
-        mi.Unknown(),
-        mi.Disk('hda'),
-        mi.Partition('mmcblk0p2'),
-        mi.NetIf('enp3s25'),
-    ))
-    per_irq_table.append_row_tuple((
-        mi.Irq(True, 34),
-        mi.Empty(),
-        mi.Empty(),
-        mi.Cpu(1),
-        mi.Disk('sdc'),
-        mi.Partition('sdc3'),
-        mi.NetIf('lo'),
-    ))
-
-    # per-interrupt with subtitle
-    per_irq_table_sub.append_row_tuple((
-        mi.Irq(False, 12, 'soft-like-silk'),
-        mi.Fd(10),
-        mi.Path('/home/bob/meowmix.txt'),
-        mi.Cpu(0),
-        mi.Disk('sdb'),
-        mi.Partition('sdb2'),
-        mi.NetIf('eth1'),
-    ))
-    per_irq_table_sub.append_row_tuple((
-        mi.Irq(True, 1, 'mouse2'),
-        mi.Fd(5),
-        mi.Empty(),
-        mi.Cpu(7),
-        mi.Disk('vda'),
-        mi.Partition('vda3'),
-        mi.NetIf('wlp3s0'),
-    ))
-
-    # print
-    _print_tables([
-        per_proc_table,
-        per_syscall_table,
-        per_irq_table,
-        per_irq_table_sub,
-    ])
-
-
-def _mitest():
-    args = _parse_args()
-
-    if args.metadata:
-        _print_metadata()
-        return
-
-    if args.dynamic:
-        _print_dynamic_table(args.begin, args.end,
-                             args.dynamic_rows, args.dynamic_columns)
-    else:
-        _print_static_tables(args.begin, args.end)
-
-
-if __name__ == '__main__':
-    _mitest()
index 8558ec603b7278befafa113c1e3df3b1daa69d38..7e05dc3702df8ecb4d952e2910343fa497b4924b 100755 (executable)
@@ -117,6 +117,8 @@ def gen_parser(handle, fd, args):
                         fname = "_in"
                     if fname == "event":
                         fname = "_event"
+                    if fname == "from":
+                        fname = "_from"
                     fd.write("        %s = event[\"%s\"]\n" % (fname,
                              field.name))
                     fmt_str = fmt_str + field.name + " = %s, "
index b100a3eefcc9b9dc27cff79c4e8ac1c4eebe8f23..988e3f3211e7844317a6ec9cba514fe1df79a902 100755 (executable)
--- a/setup.py
+++ b/setup.py
@@ -78,8 +78,7 @@ setup(
         'lttnganalyses.common',
         'lttnganalyses.core',
         'lttnganalyses.cli',
-        'lttnganalyses.linuxautomaton',
-        'lttnganalyses.ascii_graph'
+        'lttnganalyses.linuxautomaton'
         ],
 
     entry_points={
@@ -127,5 +126,7 @@ setup(
 
     extras_require={
         'progressbar':  ["progressbar"]
-    }
+    },
+
+    test_suite='tests',
 )
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644 (file)
index 0000000..8eca6e0
--- /dev/null
@@ -0,0 +1,21 @@
+# The MIT License (MIT)
+#
+# Copyright (C) 2016 - Antoine Busque <abusque@efficios.com>
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
diff --git a/tests/analysis_test.py b/tests/analysis_test.py
new file mode 100644 (file)
index 0000000..a9a7263
--- /dev/null
@@ -0,0 +1,62 @@
+# The MIT License (MIT)
+#
+# Copyright (C) 2016 - Julien Desfossez <jdesfossez@efficios.com>
+#                      Antoine Busque <abusque@efficios.com>
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+import os
+import subprocess
+import unittest
+from .trace_writer import TraceWriter
+
+
+class AnalysisTest(unittest.TestCase):
+    COMMON_OPTIONS = '--no-progress --skip-validation'
+
+    def set_up_class(self):
+        dirname = os.path.dirname(os.path.realpath(__file__))
+        self.data_path = dirname + '/expected/'
+        self.maxDiff = None
+        self.trace_writer = TraceWriter()
+        self.write_trace()
+
+    def tear_down_class(self):
+        self.trace_writer.rm_trace()
+
+    def write_trace(self):
+        raise NotImplementedError
+
+    def run(self, result=None):
+        self.set_up_class()
+        super().run(result)
+        self.tear_down_class()
+
+        return result
+
+    def get_expected_output(self, filename):
+        with open(self.data_path + filename, 'r') as expected_file:
+            return expected_file.read()
+
+    def get_cmd_output(self, exec_name, options=''):
+        cmd_fmt = './{} {} {} {}'
+        cmd = cmd_fmt.format(exec_name, self.COMMON_OPTIONS,
+                             options, self.trace_writer.trace_root)
+
+        return subprocess.getoutput(cmd)
diff --git a/tests/expected/cputop.txt b/tests/expected/cputop.txt
new file mode 100644 (file)
index 0000000..d249086
--- /dev/null
@@ -0,0 +1,15 @@
+Timerange: [1969-12-31 19:00:01.000000000, 1969-12-31 19:00:11.000000000]
+Per-TID Usage                                                                             Process                   Migrations Priorities
+################################################################################
+████████████████████████████████████████████████████████████████████████████████ 100.00 % prog100pc-cpu5 (42)                0 [20]
+████████████████████                                                              25.00 % prog25pc-cpu1 (30665)              0 [20]
+████████████████                                                                  20.00 % prog20pc-cpu0 (30664)              0 [20]
+
+Per-CPU Usage
+################################################################################
+████████████████                                                                  20.00 % CPU 0
+████████████████████                                                              25.00 % CPU 1
+████████████████████████████████████████████████████████████████████████████████ 100.00 % CPU 5
+
+
+Total CPU Usage: 48.33%
diff --git a/tests/expected/iolatencytop.txt b/tests/expected/iolatencytop.txt
new file mode 100644 (file)
index 0000000..3941470
--- /dev/null
@@ -0,0 +1,14 @@
+Timerange: [1969-12-31 19:00:01.000000000, 1969-12-31 19:00:01.024000000]
+
+Top system call latencies open (usec)
+Begin               End                  Name             Duration (usec)         Size  Proc                     PID      Filename      
+[19:00:01.023000000,19:00:01.024000000]  open                    1000.000          N/A  app3                     101      test/open/file (fd=42)
+
+Top system call latencies read (usec)
+Begin               End                  Name             Duration (usec)         Size  Proc                     PID      Filename      
+[19:00:01.008000000,19:00:01.009000000]  read                    1000.000      100   B  app2                     100      testfile (fd=3)
+[19:00:01.012000000,19:00:01.013000000]  read                    1000.000       42   B  app3                     101      unknown (fd=3)
+
+Top system call latencies write (usec)
+Begin               End                  Name             Duration (usec)         Size  Proc                     PID      Filename      
+[19:00:01.004000000,19:00:01.005000000]  write                   1000.000       10   B  app                      99       unknown (fd=4)
\ No newline at end of file
diff --git a/tests/expected/iousagetop.txt b/tests/expected/iousagetop.txt
new file mode 100644 (file)
index 0000000..e62ce85
--- /dev/null
@@ -0,0 +1,54 @@
+Timerange: [1969-12-31 19:00:01.000000000, 1969-12-31 19:00:01.024000000]
+Per-process I/O Read                                                                     Process                   Disk       Net        Unknown   
+################################################################################
+████████████████████████████████████████████████████████████████████████████████ 100   B app2 (100)                     0   B      0   B    100   B
+█████████████████████████████████                                                 42   B app3 (unknown (tid=101))       0   B      0   B     42   B
+                                                                                   0   B app (99)                       0   B      0   B      0   B
+
+Per-process I/O Write                                                                   Process                   Disk       Net        Unknown   
+################################################################################
+████████████████████████████████████████████████████████████████████████████████ 10   B app (99)                       0   B      0   B     10   B
+                                                                                  0   B app2 (100)                     0   B      0   B      0   B
+                                                                                  0   B app3 (unknown (tid=101))       0   B      0   B      0   B
+
+Per-file I/O Read                                                                        Path
+################################################################################
+████████████████████████████████████████████████████████████████████████████████ 100   B testfile
+█████████████████████████████████                                                 42   B unknown (app3)
+
+Per-file I/O Write                                                                      Path
+################################################################################
+████████████████████████████████████████████████████████████████████████████████ 10   B unknown (app)
+
+Block I/O Read                                                                            Process
+################################################################################
+████████████████████████████████████████████████████████████████████████████████ 5.00 KiB app (pid=99)
+
+Block I/O Write                                                                            Process
+################################################################################
+████████████████████████████████████████████████████████████████████████████████ 10.00 KiB app3 (pid=unknown (tid=101))
+
+Disk Requests Sector Count                                                                  Disk
+################################################################################
+████████████████████████████████████████████████████████████████████████████████ 20 sectors (8,0)
+████████████████████████████████████████                                         10 sectors (252,0)
+
+Disk Request Count                                                                          Disk
+################################################################################
+████████████████████████████████████████████████████████████████████████████████ 1 requests (252,0)
+████████████████████████████████████████████████████████████████████████████████ 1 requests (8,0)
+
+Disk Request Average Latency                                                             Disk
+################################################################################
+████████████████████████████████████████████████████████████████████████████████ 1.00 ms (252,0)
+████████████████████████████████████████████████████████████████████████████████ 1.00 ms (8,0)
+
+Network Received Bytes                                                                   Interface
+################################################################################
+████████████████████████████████████████████████████████████████████████████████ 200   B wlan0
+████████████████████████████████████████                                         100   B wlan1
+
+Network Sent Bytes                                                                       Interface
+################################################################################
+████████████████████████████████████████████████████████████████████████████████ 100   B wlan0
+                                                                                   0   B wlan1
diff --git a/tests/expected/irqlog.txt b/tests/expected/irqlog.txt
new file mode 100644 (file)
index 0000000..a8b041e
--- /dev/null
@@ -0,0 +1,19 @@
+Timerange: [1969-12-31 19:00:01.000000000, 1969-12-31 19:00:01.045000000]
+Begin                End                   Duration (us)  CPU  Type         #  Name                  
+[19:00:01.007000000, 19:00:01.008000000]        1000.000    1  SoftIRQ      1  TIMER_SOFTIRQ (raised at 19:00:01.000000000)
+[19:00:01.006000000, 19:00:01.009000000]        3000.000    3  SoftIRQ      1  TIMER_SOFTIRQ (raised at 19:00:01.001000000)
+[19:00:01.010000000, 19:00:01.012000000]        2000.000    1  SoftIRQ      9  RCU_SOFTIRQ (raised at 19:00:01.002000000)
+[19:00:01.011000000, 19:00:01.013000000]        2000.000    3  SoftIRQ      7  SCHED_SOFTIRQ (raised at 19:00:01.005000000)
+[19:00:01.014000000, 19:00:01.015000000]        1000.000    3  SoftIRQ      9  RCU_SOFTIRQ (raised at 19:00:01.004000000)
+[19:00:01.016000000, 19:00:01.018000000]        2000.000    0  IRQ         41  ahci                  
+[19:00:01.019000000, 19:00:01.020000000]        1000.000    0  SoftIRQ      4  BLOCK_SOFTIRQ (raised at 19:00:01.017000000)
+[19:00:01.021000000, 19:00:01.023000000]        2000.000    0  IRQ         41  ahci                  
+[19:00:01.024000000, 19:00:01.025000000]        1000.000    0  SoftIRQ      4  BLOCK_SOFTIRQ (raised at 19:00:01.022000000)
+[19:00:01.026000000, 19:00:01.028000000]        2000.000    0  IRQ         41  ahci                  
+[19:00:01.029000000, 19:00:01.030000000]        1000.000    0  SoftIRQ      4  BLOCK_SOFTIRQ (raised at 19:00:01.027000000)
+[19:00:01.031000000, 19:00:01.033000000]        2000.000    0  IRQ         41  ahci                  
+[19:00:01.034000000, 19:00:01.035000000]        1000.000    0  SoftIRQ      4  BLOCK_SOFTIRQ (raised at 19:00:01.032000000)
+[19:00:01.036000000, 19:00:01.038000000]        2000.000    0  IRQ         41  ahci                  
+[19:00:01.039000000, 19:00:01.040000000]        1000.000    0  SoftIRQ      4  BLOCK_SOFTIRQ (raised at 19:00:01.037000000)
+[19:00:01.041000000, 19:00:01.043000000]        2000.000    0  IRQ         41  ahci                  
+[19:00:01.044000000, 19:00:01.045000000]        1000.000    0  SoftIRQ      4  BLOCK_SOFTIRQ (raised at 19:00:01.042000000)
\ No newline at end of file
diff --git a/tests/expected/irqstats.txt b/tests/expected/irqstats.txt
new file mode 100644 (file)
index 0000000..a5d2807
--- /dev/null
@@ -0,0 +1,13 @@
+Timerange: [1969-12-31 19:00:01.000000000, 1969-12-31 19:00:01.045000000]
+Hard IRQ                                             Duration (us)
+                       count          min          avg          max        stdev       
+----------------------------------------------------------------------------------|
+41: <ahci>                 6     2000.000     2000.000     2000.000        0.000  |
+
+Soft IRQ                                             Duration (us)                                        Raise latency (us)
+                       count          min          avg          max        stdev  |  count          min          avg          max        stdev       
+----------------------------------------------------------------------------------|------------------------------------------------------------
+1:  <TIMER_SOFTIRQ>        2     1000.000     2000.000     3000.000     1414.214  |      2     5000.000     6000.000     7000.000     1414.214
+4:  <BLOCK_SOFTIRQ>        6     1000.000     1000.000     1000.000        0.000  |      6     2000.000     2000.000     2000.000        0.000
+7:  <SCHED_SOFTIRQ>        1     2000.000     2000.000     2000.000            ?  |      1     6000.000     6000.000     6000.000            ?
+9:  <RCU_SOFTIRQ>          2     1000.000     1500.000     2000.000      707.107  |      2     8000.000     9000.000    10000.000     1414.214
\ No newline at end of file
diff --git a/tests/gen_ctfwriter.py b/tests/gen_ctfwriter.py
new file mode 100755 (executable)
index 0000000..ccb6ba8
--- /dev/null
@@ -0,0 +1,124 @@
+#!/usr/bin/env python3
+#
+# The MIT License (MIT)
+#
+# Copyright (C) 2016 - Julien Desfossez <jdesfossez@efficios.com>
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# Helper tool to generate CTFWriter code from the metadata of an existing
+# trace.
+# It used to add code in TraceTest.py.
+# Only the basic types are supported, a warning is generated if a field cannot
+# be generated so it is easy to look manually at the metadata and fix it.
+
+import sys
+import argparse
+
+from babeltrace import TraceCollection, CTFScope, CTFTypeId
+
+
+def get_definition_type(field, event):
+    if field.type == CTFTypeId.INTEGER:
+        signed = ''
+        if field.signedness == 0:
+            signed = 'u'
+        length = field.length
+        print('        self.%s.add_field(self.%sint%s_type, "_%s")' %
+              (event.name, signed, length, field.name))
+    elif field.type == CTFTypeId.ARRAY:
+        print('        self.%s.add_field(self.array%s_type, "_%s")' %
+              (event.name, field.length, field.name))
+    elif field.type == CTFTypeId.STRING:
+        print('        self.%s.add_field(self.string_type, "_%s")' %
+              (event.name, field.name))
+    else:
+        print('        # FIXME %s.%s: Unhandled type %d' % (event.name,
+                                                            field.name,
+                                                            field.type))
+
+
+def gen_define(event):
+        fields = []
+        print('    def define_%s(self):' % (event.name))
+        print('        self.%s = CTFWriter.EventClass("%s")' %
+              (event.name, event.name))
+        for field in event.fields:
+            if field.scope == CTFScope.EVENT_FIELDS:
+                fname = field.name
+                fields.append(fname)
+                get_definition_type(field, event)
+        print('        self.add_event(self.%s)' % event.name)
+        print('')
+        return fields
+
+
+def gen_write(event, fields):
+        f_list = None
+        for f in fields:
+            if f_list is None:
+                f_list = f
+            else:
+                f_list = f_list + ", %s" % (f)
+        print('    def write_%s(self, time_ms, cpu_id, %s):' % (event.name,
+                                                                f_list))
+        print('        event = CTFWriter.Event(self.%s)' % (event.name))
+        print('        self.clock.time = time_ms * 1000000')
+        print('        self.set_int(event.payload("_cpu_id"), cpu_id)')
+        for field in event.fields:
+            if field.scope == CTFScope.EVENT_FIELDS:
+                fname = field.name
+                if field.type == CTFTypeId.INTEGER:
+                    print('        self.set_int(event.payload("_%s"), %s)' %
+                          (fname, fname))
+                elif field.type == CTFTypeId.ARRAY:
+                    print('        self.set_char_array(event.payload("_%s"), '
+                          '%s)' % (fname, fname))
+                elif field.type == CTFTypeId.STRING:
+                    print('        self.set_string(event.payload("_%s"), %s)' %
+                          (fname, fname))
+                else:
+                    print('        # FIXME %s.%s: Unhandled type %d' %
+                          (event.name, field.name, field.type))
+        print('        self.stream.append_event(event)')
+        print('        self.stream.flush()')
+        print('')
+
+
+def gen_parser(handle, args):
+    for h in handle.values():
+        for event in h.events:
+            fields = gen_define(event)
+            gen_write(event, fields)
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser(description='CTFWriter code generator')
+    parser.add_argument('path', metavar="<path/to/trace>", help='Trace path')
+    args = parser.parse_args()
+
+    traces = TraceCollection()
+    handle = traces.add_traces_recursive(args.path, "ctf")
+    if handle is None:
+        sys.exit(1)
+
+    gen_parser(handle, args)
+
+    for h in handle.values():
+        traces.remove_trace(h)
diff --git a/tests/test_cputop.py b/tests/test_cputop.py
new file mode 100644 (file)
index 0000000..77256ce
--- /dev/null
@@ -0,0 +1,47 @@
+# The MIT License (MIT)
+#
+# Copyright (C) 2016 - Julien Desfossez <jdesfossez@efficios.com>
+#                      Antoine Busque <abusque@efficios.com>
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+from .analysis_test import AnalysisTest
+
+
+class CpuTest(AnalysisTest):
+    def write_trace(self):
+        # runs the whole time: 100%
+        self.trace_writer.write_sched_switch(1000, 5, 'swapper/5',
+                                             0, 'prog100pc-cpu5', 42)
+        # runs for 2s alternating with swapper out every 100ms
+        self.trace_writer.sched_switch_50pc(1100, 5000, 0, 100, 'swapper/0',
+                                            0, 'prog20pc-cpu0', 30664)
+        # runs for 2.5s alternating with swapper out every 100ms
+        self.trace_writer.sched_switch_50pc(5100, 10000, 1, 100, 'swapper/1',
+                                            0, 'prog25pc-cpu1', 30665)
+        # switch out prog100pc-cpu5
+        self.trace_writer.write_sched_switch(11000, 5, 'prog100pc-cpu5',
+                                             42, 'swapper/5', 0)
+        self.trace_writer.flush()
+
+    def test_cputop(self):
+        expected = self.get_expected_output('cputop.txt')
+        result = self.get_cmd_output('lttng-cputop')
+
+        self.assertMultiLineEqual(result, expected)
diff --git a/tests/test_io.py b/tests/test_io.py
new file mode 100644 (file)
index 0000000..9eb0e56
--- /dev/null
@@ -0,0 +1,71 @@
+# The MIT License (MIT)
+#
+# Copyright (C) 2016 - Julien Desfossez <jdesfossez@efficios.com>
+#                      Antoine Busque <abusque@efficios.com>
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+from .analysis_test import AnalysisTest
+
+
+class IoTest(AnalysisTest):
+    def write_trace(self):
+        # app (99) is known at statedump
+        self.trace_writer.write_lttng_statedump_process_state(
+            1000, 0, 99, 99, 99, 99, 98, 98, 'app', 0, 5, 0, 5, 0)
+        # app2 (100) unknown at statedump has testfile, FD 3 defined at
+        # statedump
+        self.trace_writer.write_lttng_statedump_file_descriptor(
+            1001, 0, 100, 3, 0, 0, 'testfile')
+        # app write 10 bytes to FD 4
+        self.trace_writer.write_sched_switch(1002, 0, 'swapper/0', 0, 'app', 99)
+        self.trace_writer.write_syscall_write(1004, 0, 1, 4, 0xabcd, 10, 10)
+        # app2 reads 100 bytes in FD 3
+        self.trace_writer.write_sched_switch(1006, 0, 'app', 99, 'app2', 100)
+        self.trace_writer.write_syscall_read(1008, 0, 1, 3, 0xcafe, 100, 100)
+        # app3 and its FD 3 are completely unknown at statedump, tries to read 100
+        # bytes from FD 3 but only gets 42
+        self.trace_writer.write_sched_switch(1010, 0, 'app2', 100, 'app3', 101)
+        self.trace_writer.write_syscall_read(1012, 0, 1, 3, 0xcafe, 100, 42)
+        # block write
+        self.trace_writer.write_block_rq_issue(1015, 0, 264241152, 33, 10, 40, 99, 0, 0, '', 'app')
+        self.trace_writer.write_block_rq_complete(1016, 0, 264241152, 33, 10, 0, 0, 0, '')
+        # block read
+        self.trace_writer.write_block_rq_issue(1017, 0, 8388608, 33, 20, 90, 101, 1, 0, '', 'app3')
+        self.trace_writer.write_block_rq_complete(1018, 0, 8388608, 33, 20, 0, 1, 0, '')
+        # net xmit
+        self.trace_writer.write_net_dev_xmit(1020, 2, 0xff, 32, 100, 'wlan0')
+        # net receive
+        self.trace_writer.write_netif_receive_skb(1021, 1, 0xff, 100, 'wlan1')
+        self.trace_writer.write_netif_receive_skb(1022, 1, 0xff, 200, 'wlan0')
+        # syscall open
+        self.trace_writer.write_syscall_open(1023, 0, 1, 'test/open/file', 0, 0, 42)
+        self.trace_writer.flush()
+
+    def test_iousagetop(self):
+        expected = self.get_expected_output('iousagetop.txt')
+        result = self.get_cmd_output('lttng-iousagetop')
+
+        self.assertMultiLineEqual(result, expected)
+
+    def test_iolatencytop(self):
+        expected = self.get_expected_output('iolatencytop.txt')
+        result = self.get_cmd_output('lttng-iolatencytop')
+
+        self.assertMultiLineEqual(result, expected)
diff --git a/tests/test_irq.py b/tests/test_irq.py
new file mode 100644 (file)
index 0000000..d792e68
--- /dev/null
@@ -0,0 +1,87 @@
+# The MIT License (MIT)
+#
+# Copyright (C) 2016 - Julien Desfossez <jdesfossez@efficios.com>
+#                      Antoine Busque <abusque@efficios.com>
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+from .analysis_test import AnalysisTest
+
+
+class IrqTest(AnalysisTest):
+    def write_trace(self):
+        self.trace_writer.write_softirq_raise(1000, 1, 1)
+        self.trace_writer.write_softirq_raise(1001, 3, 1)
+        self.trace_writer.write_softirq_raise(1002, 1, 9)
+        self.trace_writer.write_softirq_exit(1003, 0, 4)
+        self.trace_writer.write_softirq_raise(1004, 3, 9)
+        self.trace_writer.write_softirq_raise(1005, 3, 7)
+        self.trace_writer.write_softirq_entry(1006, 3, 1)
+        self.trace_writer.write_softirq_entry(1007, 1, 1)
+        self.trace_writer.write_softirq_exit(1008, 1, 1)
+        self.trace_writer.write_softirq_exit(1009, 3, 1)
+        self.trace_writer.write_softirq_entry(1010, 1, 9)
+        self.trace_writer.write_softirq_entry(1011, 3, 7)
+        self.trace_writer.write_softirq_exit(1012, 1, 9)
+        self.trace_writer.write_softirq_exit(1013, 3, 7)
+        self.trace_writer.write_softirq_entry(1014, 3, 9)
+        self.trace_writer.write_softirq_exit(1015, 3, 9)
+        self.trace_writer.write_irq_handler_entry(1016, 0, 41, 'ahci')
+        self.trace_writer.write_softirq_raise(1017, 0, 4)
+        self.trace_writer.write_irq_handler_exit(1018, 0, 41, 1)
+        self.trace_writer.write_softirq_entry(1019, 0, 4)
+        self.trace_writer.write_softirq_exit(1020, 0, 4)
+        self.trace_writer.write_irq_handler_entry(1021, 0, 41, 'ahci')
+        self.trace_writer.write_softirq_raise(1022, 0, 4)
+        self.trace_writer.write_irq_handler_exit(1023, 0, 41, 1)
+        self.trace_writer.write_softirq_entry(1024, 0, 4)
+        self.trace_writer.write_softirq_exit(1025, 0, 4)
+        self.trace_writer.write_irq_handler_entry(1026, 0, 41, 'ahci')
+        self.trace_writer.write_softirq_raise(1027, 0, 4)
+        self.trace_writer.write_irq_handler_exit(1028, 0, 41, 1)
+        self.trace_writer.write_softirq_entry(1029, 0, 4)
+        self.trace_writer.write_softirq_exit(1030, 0, 4)
+        self.trace_writer.write_irq_handler_entry(1031, 0, 41, 'ahci')
+        self.trace_writer.write_softirq_raise(1032, 0, 4)
+        self.trace_writer.write_irq_handler_exit(1033, 0, 41, 1)
+        self.trace_writer.write_softirq_entry(1034, 0, 4)
+        self.trace_writer.write_softirq_exit(1035, 0, 4)
+        self.trace_writer.write_irq_handler_entry(1036, 0, 41, 'ahci')
+        self.trace_writer.write_softirq_raise(1037, 0, 4)
+        self.trace_writer.write_irq_handler_exit(1038, 0, 41, 1)
+        self.trace_writer.write_softirq_entry(1039, 0, 4)
+        self.trace_writer.write_softirq_exit(1040, 0, 4)
+        self.trace_writer.write_irq_handler_entry(1041, 0, 41, 'ahci')
+        self.trace_writer.write_softirq_raise(1042, 0, 4)
+        self.trace_writer.write_irq_handler_exit(1043, 0, 41, 1)
+        self.trace_writer.write_softirq_entry(1044, 0, 4)
+        self.trace_writer.write_softirq_exit(1045, 0, 4)
+        self.trace_writer.flush()
+
+    def test_irqstats(self):
+        expected = self.get_expected_output('irqstats.txt')
+        result = self.get_cmd_output('lttng-irqstats')
+
+        self.assertMultiLineEqual(result, expected)
+
+    def test_irqlog(self):
+        expected = self.get_expected_output('irqlog.txt')
+        result = self.get_cmd_output('lttng-irqlog')
+
+        self.assertMultiLineEqual(result, expected)
diff --git a/tests/trace_writer.py b/tests/trace_writer.py
new file mode 100644 (file)
index 0000000..dc69051
--- /dev/null
@@ -0,0 +1,534 @@
+# The MIT License (MIT)
+#
+# Copyright (C) 2016 - Julien Desfossez <jdesfossez@efficios.com>
+#                      Antoine Busque <abusque@efficios.com>
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+import sys
+import os
+import shutil
+import tempfile
+from babeltrace import CTFWriter, CTFStringEncoding
+
+
+class TraceWriter():
+    def __init__(self):
+        self._trace_root = tempfile.mkdtemp()
+        self.trace_path = os.path.join(self.trace_root, "kernel")
+        self.create_writer()
+        self.create_stream_class()
+        self.define_base_types()
+        self.define_events()
+        self.create_stream()
+
+    @property
+    def trace_root(self):
+        return self._trace_root
+
+    def rm_trace(self):
+        shutil.rmtree(self.trace_root)
+
+    def flush(self):
+        self.writer.flush_metadata()
+        self.stream.flush()
+
+    def create_writer(self):
+        self.clock = CTFWriter.Clock("A_clock")
+        self.clock.description = "Simple clock"
+        self.writer = CTFWriter.Writer(self.trace_path)
+        self.writer.add_clock(self.clock)
+        self.writer.add_environment_field("Python_version",
+                                          str(sys.version_info))
+        self.writer.add_environment_field("tracer_major", 2)
+        self.writer.add_environment_field("tracer_minor", 8)
+        self.writer.add_environment_field("tracer_patchlevel", 0)
+
+    def create_stream_class(self):
+        self.stream_class = CTFWriter.StreamClass("test_stream")
+        self.stream_class.clock = self.clock
+
+    def define_base_types(self):
+        self.char8_type = CTFWriter.IntegerFieldDeclaration(8)
+        self.char8_type.signed = True
+        self.char8_type.encoding = CTFStringEncoding.UTF8
+        self.char8_type.alignment = 8
+
+        self.int16_type = CTFWriter.IntegerFieldDeclaration(16)
+        self.int16_type.signed = True
+        self.int16_type.alignment = 8
+
+        self.uint16_type = CTFWriter.IntegerFieldDeclaration(16)
+        self.uint16_type.signed = False
+        self.uint16_type.alignment = 8
+
+        self.int32_type = CTFWriter.IntegerFieldDeclaration(32)
+        self.int32_type.signed = True
+        self.int32_type.alignment = 8
+
+        self.uint32_type = CTFWriter.IntegerFieldDeclaration(32)
+        self.uint32_type.signed = False
+        self.uint32_type.alignment = 8
+
+        self.int64_type = CTFWriter.IntegerFieldDeclaration(64)
+        self.int64_type.signed = True
+        self.int64_type.alignment = 8
+
+        self.uint64_type = CTFWriter.IntegerFieldDeclaration(64)
+        self.uint64_type.signed = False
+        self.uint64_type.alignment = 8
+
+        self.array16_type = CTFWriter.ArrayFieldDeclaration(self.char8_type,
+                                                            16)
+
+        self.string_type = CTFWriter.StringFieldDeclaration()
+
+    def add_event(self, event):
+        event.add_field(self.uint32_type, "_cpu_id")
+        self.stream_class.add_event_class(event)
+
+    def define_sched_switch(self):
+        self.sched_switch = CTFWriter.EventClass("sched_switch")
+        self.sched_switch.add_field(self.array16_type, "_prev_comm")
+        self.sched_switch.add_field(self.int32_type, "_prev_tid")
+        self.sched_switch.add_field(self.int32_type, "_prev_prio")
+        self.sched_switch.add_field(self.int64_type, "_prev_state")
+        self.sched_switch.add_field(self.array16_type, "_next_comm")
+        self.sched_switch.add_field(self.int32_type, "_next_tid")
+        self.sched_switch.add_field(self.int32_type, "_next_prio")
+        self.add_event(self.sched_switch)
+
+    def define_softirq_raise(self):
+        self.softirq_raise = CTFWriter.EventClass("softirq_raise")
+        self.softirq_raise.add_field(self.uint32_type, "_vec")
+        self.add_event(self.softirq_raise)
+
+    def define_softirq_entry(self):
+        self.softirq_entry = CTFWriter.EventClass("softirq_entry")
+        self.softirq_entry.add_field(self.uint32_type, "_vec")
+        self.add_event(self.softirq_entry)
+
+    def define_softirq_exit(self):
+        self.softirq_exit = CTFWriter.EventClass("softirq_exit")
+        self.softirq_exit.add_field(self.uint32_type, "_vec")
+        self.add_event(self.softirq_exit)
+
+    def define_irq_handler_entry(self):
+        self.irq_handler_entry = CTFWriter.EventClass("irq_handler_entry")
+        self.irq_handler_entry.add_field(self.int32_type, "_irq")
+        self.irq_handler_entry.add_field(self.string_type, "_name")
+        self.add_event(self.irq_handler_entry)
+
+    def define_irq_handler_exit(self):
+        self.irq_handler_exit = CTFWriter.EventClass("irq_handler_exit")
+        self.irq_handler_exit.add_field(self.int32_type, "_irq")
+        self.irq_handler_exit.add_field(self.int32_type, "_ret")
+        self.add_event(self.irq_handler_exit)
+
+    def define_syscall_entry_write(self):
+        self.syscall_entry_write = CTFWriter.EventClass("syscall_entry_write")
+        self.syscall_entry_write.add_field(self.uint32_type, "_fd")
+        self.syscall_entry_write.add_field(self.uint64_type, "_buf")
+        self.syscall_entry_write.add_field(self.uint64_type, "_count")
+        self.add_event(self.syscall_entry_write)
+
+    def define_syscall_exit_write(self):
+        self.syscall_exit_write = CTFWriter.EventClass("syscall_exit_write")
+        self.syscall_exit_write.add_field(self.int64_type, "_ret")
+        self.add_event(self.syscall_exit_write)
+
+    def define_syscall_entry_read(self):
+        self.syscall_entry_read = CTFWriter.EventClass("syscall_entry_read")
+        self.syscall_entry_read.add_field(self.uint32_type, "_fd")
+        self.syscall_entry_read.add_field(self.uint64_type, "_count")
+        self.add_event(self.syscall_entry_read)
+
+    def define_syscall_exit_read(self):
+        self.syscall_exit_read = CTFWriter.EventClass("syscall_exit_read")
+        self.syscall_exit_read.add_field(self.uint64_type, "_buf")
+        self.syscall_exit_read.add_field(self.int64_type, "_ret")
+        self.add_event(self.syscall_exit_read)
+
+    def define_syscall_entry_open(self):
+        self.syscall_entry_open = CTFWriter.EventClass("syscall_entry_open")
+        self.syscall_entry_open.add_field(self.string_type, "_filename")
+        self.syscall_entry_open.add_field(self.int32_type, "_flags")
+        self.syscall_entry_open.add_field(self.uint16_type, "_mode")
+        self.add_event(self.syscall_entry_open)
+
+    def define_syscall_exit_open(self):
+        self.syscall_exit_open = CTFWriter.EventClass("syscall_exit_open")
+        self.syscall_exit_open.add_field(self.int64_type, "_ret")
+        self.add_event(self.syscall_exit_open)
+
+    def define_lttng_statedump_process_state(self):
+        self.lttng_statedump_process_state = CTFWriter.EventClass(
+            "lttng_statedump_process_state")
+        self.lttng_statedump_process_state.add_field(self.int32_type, "_tid")
+        self.lttng_statedump_process_state.add_field(self.int32_type, "_vtid")
+        self.lttng_statedump_process_state.add_field(self.int32_type, "_pid")
+        self.lttng_statedump_process_state.add_field(self.int32_type, "_vpid")
+        self.lttng_statedump_process_state.add_field(self.int32_type, "_ppid")
+        self.lttng_statedump_process_state.add_field(self.int32_type, "_vppid")
+        self.lttng_statedump_process_state.add_field(self.array16_type,
+                                                     "_name")
+        self.lttng_statedump_process_state.add_field(self.int32_type, "_type")
+        self.lttng_statedump_process_state.add_field(self.int32_type, "_mode")
+        self.lttng_statedump_process_state.add_field(self.int32_type,
+                                                     "_submode")
+        self.lttng_statedump_process_state.add_field(self.int32_type,
+                                                     "_status")
+        self.lttng_statedump_process_state.add_field(self.int32_type,
+                                                     "_ns_level")
+        self.add_event(self.lttng_statedump_process_state)
+
+    def define_lttng_statedump_file_descriptor(self):
+        self.lttng_statedump_file_descriptor = CTFWriter.EventClass(
+            "lttng_statedump_file_descriptor")
+        self.lttng_statedump_file_descriptor.add_field(self.int32_type, "_pid")
+        self.lttng_statedump_file_descriptor.add_field(self.int32_type, "_fd")
+        self.lttng_statedump_file_descriptor.add_field(self.uint32_type,
+                                                       "_flags")
+        self.lttng_statedump_file_descriptor.add_field(self.uint32_type,
+                                                       "_fmode")
+        self.lttng_statedump_file_descriptor.add_field(self.string_type,
+                                                       "_filename")
+        self.add_event(self.lttng_statedump_file_descriptor)
+
+    def define_sched_wakeup(self):
+        self.sched_wakeup = CTFWriter.EventClass("sched_wakeup")
+        self.sched_wakeup.add_field(self.array16_type, "_comm")
+        self.sched_wakeup.add_field(self.int32_type, "_tid")
+        self.sched_wakeup.add_field(self.int32_type, "_prio")
+        self.sched_wakeup.add_field(self.int32_type, "_success")
+        self.sched_wakeup.add_field(self.int32_type, "_target_cpu")
+        self.add_event(self.sched_wakeup)
+
+    def define_sched_waking(self):
+        self.sched_waking = CTFWriter.EventClass("sched_waking")
+        self.sched_waking.add_field(self.array16_type, "_comm")
+        self.sched_waking.add_field(self.int32_type, "_tid")
+        self.sched_waking.add_field(self.int32_type, "_prio")
+        self.sched_waking.add_field(self.int32_type, "_target_cpu")
+        self.add_event(self.sched_waking)
+
+    def define_block_rq_complete(self):
+        self.block_rq_complete = CTFWriter.EventClass("block_rq_complete")
+        self.block_rq_complete.add_field(self.uint32_type, "_dev")
+        self.block_rq_complete.add_field(self.uint64_type, "_sector")
+        self.block_rq_complete.add_field(self.uint32_type, "_nr_sector")
+        self.block_rq_complete.add_field(self.int32_type, "_errors")
+        self.block_rq_complete.add_field(self.uint32_type, "_rwbs")
+        self.block_rq_complete.add_field(self.uint64_type, "__cmd_length")
+        self.block_rq_complete.add_field(self.array16_type, "_cmd")
+        self.add_event(self.block_rq_complete)
+
+    def define_block_rq_issue(self):
+        self.block_rq_issue = CTFWriter.EventClass("block_rq_issue")
+        self.block_rq_issue.add_field(self.uint32_type, "_dev")
+        self.block_rq_issue.add_field(self.uint64_type, "_sector")
+        self.block_rq_issue.add_field(self.uint32_type, "_nr_sector")
+        self.block_rq_issue.add_field(self.uint32_type, "_bytes")
+        self.block_rq_issue.add_field(self.int32_type, "_tid")
+        self.block_rq_issue.add_field(self.uint32_type, "_rwbs")
+        self.block_rq_issue.add_field(self.uint64_type, "__cmd_length")
+        self.block_rq_issue.add_field(self.array16_type, "_cmd")
+        self.block_rq_issue.add_field(self.array16_type, "_comm")
+        self.add_event(self.block_rq_issue)
+
+    def define_net_dev_xmit(self):
+        self.net_dev_xmit = CTFWriter.EventClass("net_dev_xmit")
+        self.net_dev_xmit.add_field(self.uint64_type, "_skbaddr")
+        self.net_dev_xmit.add_field(self.int32_type, "_rc")
+        self.net_dev_xmit.add_field(self.uint32_type, "_len")
+        self.net_dev_xmit.add_field(self.string_type, "_name")
+        self.add_event(self.net_dev_xmit)
+
+    def define_netif_receive_skb(self):
+        self.netif_receive_skb = CTFWriter.EventClass("netif_receive_skb")
+        self.netif_receive_skb.add_field(self.uint64_type, "_skbaddr")
+        self.netif_receive_skb.add_field(self.uint32_type, "_len")
+        self.netif_receive_skb.add_field(self.string_type, "_name")
+        self.add_event(self.netif_receive_skb)
+
+    def define_events(self):
+        self.define_sched_switch()
+        self.define_softirq_raise()
+        self.define_softirq_entry()
+        self.define_softirq_exit()
+        self.define_irq_handler_entry()
+        self.define_irq_handler_exit()
+        self.define_syscall_entry_write()
+        self.define_syscall_exit_write()
+        self.define_syscall_entry_read()
+        self.define_syscall_exit_read()
+        self.define_syscall_entry_open()
+        self.define_syscall_exit_open()
+        self.define_lttng_statedump_process_state()
+        self.define_lttng_statedump_file_descriptor()
+        self.define_sched_wakeup()
+        self.define_sched_waking()
+        self.define_block_rq_complete()
+        self.define_block_rq_issue()
+        self.define_net_dev_xmit()
+        self.define_netif_receive_skb()
+
+    def create_stream(self):
+        self.stream = self.writer.create_stream(self.stream_class)
+
+    def set_char_array(self, event, string):
+        if len(string) > 16:
+            string = string[0:16]
+        else:
+            string = "%s" % (string + "\0" * (16 - len(string)))
+
+        for i, char in enumerate(string):
+            event.field(i).value = ord(char)
+
+    def set_int(self, event, value):
+        event.value = value
+
+    def set_string(self, event, value):
+        event.value = value
+
+    def write_softirq_raise(self, time_ms, cpu_id, vec):
+        event = CTFWriter.Event(self.softirq_raise)
+        self.clock.time = time_ms * 1000000
+        self.set_int(event.payload("_cpu_id"), cpu_id)
+        self.set_int(event.payload("_vec"), vec)
+        self.stream.append_event(event)
+        self.stream.flush()
+
+    def write_softirq_entry(self, time_ms, cpu_id, vec):
+        event = CTFWriter.Event(self.softirq_entry)
+        self.clock.time = time_ms * 1000000
+        self.set_int(event.payload("_cpu_id"), cpu_id)
+        self.set_int(event.payload("_vec"), vec)
+        self.stream.append_event(event)
+        self.stream.flush()
+
+    def write_softirq_exit(self, time_ms, cpu_id, vec):
+        event = CTFWriter.Event(self.softirq_exit)
+        self.clock.time = time_ms * 1000000
+        self.set_int(event.payload("_cpu_id"), cpu_id)
+        self.set_int(event.payload("_vec"), vec)
+        self.stream.append_event(event)
+        self.stream.flush()
+
+    def write_irq_handler_entry(self, time_ms, cpu_id, irq, name):
+        event = CTFWriter.Event(self.irq_handler_entry)
+        self.clock.time = time_ms * 1000000
+        self.set_int(event.payload("_cpu_id"), cpu_id)
+        self.set_int(event.payload("_irq"), irq)
+        self.set_string(event.payload("_name"), name)
+        self.stream.append_event(event)
+        self.stream.flush()
+
+    def write_irq_handler_exit(self, time_ms, cpu_id, irq, ret):
+        event = CTFWriter.Event(self.irq_handler_exit)
+        self.clock.time = time_ms * 1000000
+        self.set_int(event.payload("_cpu_id"), cpu_id)
+        self.set_int(event.payload("_irq"), irq)
+        self.set_int(event.payload("_ret"), ret)
+        self.stream.append_event(event)
+        self.stream.flush()
+
+    def write_syscall_write(self, time_ms, cpu_id, delay, fd, buf, count, ret):
+        event_entry = CTFWriter.Event(self.syscall_entry_write)
+        self.clock.time = time_ms * 1000000
+        self.set_int(event_entry.payload("_cpu_id"), cpu_id)
+        self.set_int(event_entry.payload("_fd"), fd)
+        self.set_int(event_entry.payload("_buf"), buf)
+        self.set_int(event_entry.payload("_count"), count)
+        self.stream.append_event(event_entry)
+
+        event_exit = CTFWriter.Event(self.syscall_exit_write)
+        self.clock.time = (time_ms + delay) * 1000000
+        self.set_int(event_exit.payload("_cpu_id"), cpu_id)
+        self.set_int(event_exit.payload("_ret"), ret)
+        self.stream.append_event(event_exit)
+        self.stream.flush()
+
+    def write_syscall_read(self, time_ms, cpu_id, delay, fd, buf, count, ret):
+        event_entry = CTFWriter.Event(self.syscall_entry_read)
+        self.clock.time = time_ms * 1000000
+        self.set_int(event_entry.payload("_cpu_id"), cpu_id)
+        self.set_int(event_entry.payload("_fd"), fd)
+        self.set_int(event_entry.payload("_count"), count)
+        self.stream.append_event(event_entry)
+
+        event_exit = CTFWriter.Event(self.syscall_exit_read)
+        self.clock.time = (time_ms + delay) * 1000000
+        self.set_int(event_exit.payload("_cpu_id"), cpu_id)
+        self.set_int(event_exit.payload("_buf"), buf)
+        self.set_int(event_exit.payload("_ret"), ret)
+        self.stream.append_event(event_exit)
+        self.stream.flush()
+
+    def write_syscall_open(self, time_ms, cpu_id, delay, filename, flags,
+                           mode, ret):
+        event = CTFWriter.Event(self.syscall_entry_open)
+        self.clock.time = time_ms * 1000000
+        self.set_int(event.payload("_cpu_id"), cpu_id)
+        self.set_string(event.payload("_filename"), filename)
+        self.set_int(event.payload("_flags"), flags)
+        self.set_int(event.payload("_mode"), mode)
+        self.stream.append_event(event)
+        self.stream.flush()
+
+        event = CTFWriter.Event(self.syscall_exit_open)
+        self.clock.time = (time_ms + delay) * 1000000
+        self.set_int(event.payload("_cpu_id"), cpu_id)
+        self.set_int(event.payload("_ret"), ret)
+        self.stream.append_event(event)
+        self.stream.flush()
+
+    def write_lttng_statedump_file_descriptor(self, time_ms, cpu_id, pid, fd,
+                                              flags, fmode, filename):
+        event = CTFWriter.Event(self.lttng_statedump_file_descriptor)
+        self.clock.time = time_ms * 1000000
+        self.set_int(event.payload("_cpu_id"), cpu_id)
+        self.set_int(event.payload("_pid"), pid)
+        self.set_int(event.payload("_fd"), fd)
+        self.set_int(event.payload("_flags"), flags)
+        self.set_int(event.payload("_fmode"), fmode)
+        self.set_string(event.payload("_filename"), filename)
+        self.stream.append_event(event)
+        self.stream.flush()
+
+    def write_lttng_statedump_process_state(self, time_ms, cpu_id, tid, vtid,
+                                            pid, vpid, ppid, vppid, name, type,
+                                            mode, submode, status, ns_level):
+        event = CTFWriter.Event(self.lttng_statedump_process_state)
+        self.clock.time = time_ms * 1000000
+        self.set_int(event.payload("_cpu_id"), cpu_id)
+        self.set_int(event.payload("_tid"), tid)
+        self.set_int(event.payload("_vtid"), vtid)
+        self.set_int(event.payload("_pid"), pid)
+        self.set_int(event.payload("_vpid"), vpid)
+        self.set_int(event.payload("_ppid"), ppid)
+        self.set_int(event.payload("_vppid"), vppid)
+        self.set_char_array(event.payload("_name"), name)
+        self.set_int(event.payload("_type"), type)
+        self.set_int(event.payload("_mode"), mode)
+        self.set_int(event.payload("_submode"), submode)
+        self.set_int(event.payload("_status"), status)
+        self.set_int(event.payload("_ns_level"), ns_level)
+        self.stream.append_event(event)
+        self.stream.flush()
+
+    def write_sched_wakeup(self, time_ms, cpu_id, comm, tid, prio, target_cpu):
+        event = CTFWriter.Event(self.sched_wakeup)
+        self.clock.time = time_ms * 1000000
+        self.set_int(event.payload("_cpu_id"), cpu_id)
+        self.set_char_array(event.payload("_comm"), comm)
+        self.set_int(event.payload("_tid"), tid)
+        self.set_int(event.payload("_prio"), prio)
+        self.set_int(event.payload("_target_cpu"), target_cpu)
+        self.stream.append_event(event)
+        self.stream.flush()
+
+    def write_sched_waking(self, time_ms, cpu_id, comm, tid, prio, target_cpu):
+        event = CTFWriter.Event(self.sched_waking)
+        self.clock.time = time_ms * 1000000
+        self.set_int(event.payload("_cpu_id"), cpu_id)
+        self.set_char_array(event.payload("_comm"), comm)
+        self.set_int(event.payload("_tid"), tid)
+        self.set_int(event.payload("_prio"), prio)
+        self.set_int(event.payload("_target_cpu"), target_cpu)
+        self.stream.append_event(event)
+        self.stream.flush()
+
+    def write_block_rq_complete(self, time_ms, cpu_id, dev, sector, nr_sector,
+                                errors, rwbs, _cmd_length, cmd):
+        event = CTFWriter.Event(self.block_rq_complete)
+        self.clock.time = time_ms * 1000000
+        self.set_int(event.payload("_cpu_id"), cpu_id)
+        self.set_int(event.payload("_dev"), dev)
+        self.set_int(event.payload("_sector"), sector)
+        self.set_int(event.payload("_nr_sector"), nr_sector)
+        self.set_int(event.payload("_errors"), errors)
+        self.set_int(event.payload("_rwbs"), rwbs)
+        self.set_int(event.payload("__cmd_length"), _cmd_length)
+        self.set_char_array(event.payload("_cmd"), cmd)
+        self.stream.append_event(event)
+        self.stream.flush()
+
+    def write_block_rq_issue(self, time_ms, cpu_id, dev, sector, nr_sector,
+                             bytes, tid, rwbs, _cmd_length, cmd, comm):
+        event = CTFWriter.Event(self.block_rq_issue)
+        self.clock.time = time_ms * 1000000
+        self.set_int(event.payload("_cpu_id"), cpu_id)
+        self.set_int(event.payload("_dev"), dev)
+        self.set_int(event.payload("_sector"), sector)
+        self.set_int(event.payload("_nr_sector"), nr_sector)
+        self.set_int(event.payload("_bytes"), bytes)
+        self.set_int(event.payload("_tid"), tid)
+        self.set_int(event.payload("_rwbs"), rwbs)
+        self.set_int(event.payload("__cmd_length"), _cmd_length)
+        self.set_char_array(event.payload("_cmd"), cmd)
+        self.set_char_array(event.payload("_comm"), comm)
+        self.stream.append_event(event)
+        self.stream.flush()
+
+    def write_net_dev_xmit(self, time_ms, cpu_id, skbaddr, rc, len, name):
+        event = CTFWriter.Event(self.net_dev_xmit)
+        self.clock.time = time_ms * 1000000
+        self.set_int(event.payload("_cpu_id"), cpu_id)
+        self.set_int(event.payload("_skbaddr"), skbaddr)
+        self.set_int(event.payload("_rc"), rc)
+        self.set_int(event.payload("_len"), len)
+        self.set_string(event.payload("_name"), name)
+        self.stream.append_event(event)
+        self.stream.flush()
+
+    def write_netif_receive_skb(self, time_ms, cpu_id, skbaddr, len, name):
+        event = CTFWriter.Event(self.netif_receive_skb)
+        self.clock.time = time_ms * 1000000
+        self.set_int(event.payload("_cpu_id"), cpu_id)
+        self.set_int(event.payload("_skbaddr"), skbaddr)
+        self.set_int(event.payload("_len"), len)
+        self.set_string(event.payload("_name"), name)
+        self.stream.append_event(event)
+        self.stream.flush()
+
+    def write_sched_switch(self, time_ms, cpu_id, prev_comm, prev_tid,
+                           next_comm, next_tid, prev_prio=20, prev_state=1,
+                           next_prio=20):
+        event = CTFWriter.Event(self.sched_switch)
+        self.clock.time = time_ms * 1000000
+        self.set_char_array(event.payload("_prev_comm"), prev_comm)
+        self.set_int(event.payload("_prev_tid"), prev_tid)
+        self.set_int(event.payload("_prev_prio"), prev_prio)
+        self.set_int(event.payload("_prev_state"), prev_state)
+        self.set_char_array(event.payload("_next_comm"), next_comm)
+        self.set_int(event.payload("_next_tid"), next_tid)
+        self.set_int(event.payload("_next_prio"), next_prio)
+        self.set_int(event.payload("_cpu_id"), cpu_id)
+        self.stream.append_event(event)
+        self.stream.flush()
+
+    def sched_switch_50pc(self, start_time_ms, end_time_ms, cpu_id, period,
+                          comm1, tid1, comm2, tid2):
+        current = start_time_ms
+        while current < end_time_ms:
+            self.write_sched_switch(current, cpu_id, comm1, tid1, comm2, tid2)
+            current += period
+            self.write_sched_switch(current, cpu_id, comm2, tid2, comm1, tid1)
+            current += period
This page took 0.070063 seconds and 5 git commands to generate.