begin io syscall analysis
authorJulien Desfossez <jdesfossez@efficios.com>
Thu, 29 Jan 2015 22:13:44 +0000 (17:13 -0500)
committerJulien Desfossez <jdesfossez@efficios.com>
Thu, 29 Jan 2015 22:13:44 +0000 (17:13 -0500)
Signed-off-by: Julien Desfossez <jdesfossez@efficios.com>
linuxautomaton/linuxautomaton/sp.py
linuxautomaton/linuxautomaton/syscalls.py [new file with mode: 0644]
lttnganalyses/lttnganalyses/syscalls.py [new file with mode: 0644]
lttnganalysescli/lttnganalysescli/io.py [new file with mode: 0644]

index 68ae8253d1da0557064d06446f1cdb31e40d803f..ff9e8d0b2ca6d42dd8df950c29d45b8085fa60db 100644 (file)
@@ -10,3 +10,11 @@ class StateProvider:
 
         if name in self._cbs:
             self._cbs[name](ev)
+        # for now we process all the syscalls at the same place
+        if "syscall_entry" in self._cbs and \
+                (name.startswith("sys_") or name.startswith("syscall_entry_")):
+            self._cbs["syscall_entry"](ev)
+        if "syscall_exit" in self._cbs and \
+                (name.startswith("exit_syscall") or
+                 name.startswith("syscall_exit_")):
+            self._cbs["syscall_exit"](ev)
diff --git a/linuxautomaton/linuxautomaton/syscalls.py b/linuxautomaton/linuxautomaton/syscalls.py
new file mode 100644 (file)
index 0000000..178f8dc
--- /dev/null
@@ -0,0 +1,563 @@
+import socket
+import operator
+from linuxautomaton import sp, sv, common
+
+
+class IOCategory():
+    """Defines an enumeration mapping IO categories to integer values.
+    Used mainly to export syscall metadata (to JSON)."""
+
+    invalid = 0
+    # Can't use open as a name given that is is a built-in function
+    # TODO: find less stupid name
+    opn = 1
+    close = 2
+    read = 3
+    write = 4
+
+
+class SyscallsStateProvider(sp.StateProvider):
+    def __init__(self, state):
+        self.state = state
+        self.cpus = state.cpus
+        self.tids = state.tids
+        self.syscalls = state.syscalls
+        self.pending_syscalls = state.pending_syscalls
+        self.syscalls["total"] = 0
+        self.dirty_pages = state.dirty_pages
+        cbs = {
+            'syscall_entry': self._process_syscall_entry,
+            'syscall_exit': self._process_syscall_exit,
+        }
+        self._register_cbs(cbs)
+
+    def process_event(self, ev):
+        self._process_event_cb(ev)
+
+    def get_syscall_category(self, name):
+        """Receives a syscall name and returns an enum value
+        representing its IO category (open, close, read, or write)"
+
+        This is used to produce json data for visualization"""
+
+        if name in sv.SyscallConsts.OPEN_SYSCALLS:
+            return IOCategory.opn
+        if name in sv.SyscallConsts.CLOSE_SYSCALLS:
+            return IOCategory.close
+        if name in sv.SyscallConsts.READ_SYSCALLS:
+            return IOCategory.read
+        if name in sv.SyscallConsts.WRITE_SYSCALLS:
+            return IOCategory.write
+
+        return IOCategory.invalid
+
+    def get_fd_type(self, name, family):
+        if name in sv.SyscallConsts.NET_OPEN_SYSCALLS:
+            if family in sv.SyscallConsts.INET_FAMILIES:
+                return sv.FDType.net
+            if family in sv.SyscallConsts.DISK_FAMILIES:
+                return sv.FDType.disk
+
+        if name in sv.SyscallConsts.DISK_OPEN_SYSCALLS:
+            return sv.FDType.disk
+
+        return sv.FDType.unknown
+
+    def global_syscall_entry(self, name):
+        if name not in self.syscalls:
+            s = sv.Syscall()
+            s.name = name
+            s.count = 0
+            self.syscalls[name] = s
+        else:
+            s = self.syscalls[name]
+        s.count += 1
+        self.syscalls["total"] += 1
+
+    def per_tid_syscall_entry(self, name, cpu_id):
+        # we don't know which process is currently on this CPU
+        if cpu_id not in self.cpus:
+            return
+        c = self.cpus[cpu_id]
+        if c.current_tid == -1:
+            return
+        t = self.tids[c.current_tid]
+        t.total_syscalls += 1
+        if name not in t.syscalls:
+            s = sv.Syscall()
+            s.name = name
+            t.syscalls[name] = s
+        else:
+            s = t.syscalls[name]
+        s.count += 1
+
+    def track_open(self, name, proc, event, cpu):
+        self.tids[cpu.current_tid].current_syscall = {}
+        current_syscall = self.tids[cpu.current_tid].current_syscall
+        if name in sv.SyscallConsts.DISK_OPEN_SYSCALLS:
+            current_syscall["filename"] = event["filename"]
+            if event["flags"] & common.O_CLOEXEC == common.O_CLOEXEC:
+                current_syscall["cloexec"] = 1
+        elif name in ["sys_accept", "syscall_entry_accept"]:
+            if "family" in event.keys() and event["family"] == socket.AF_INET:
+                ipport = "%s:%d" % (common.get_v4_addr_str(event["v4addr"]),
+                                    event["sport"])
+                current_syscall["filename"] = ipport
+            else:
+                current_syscall["filename"] = "socket"
+        elif name in sv.SyscallConsts.NET_OPEN_SYSCALLS:
+            current_syscall["filename"] = "socket"
+        elif name in ["sys_dup2", "syscall_entry_dup2"]:
+            newfd = event["newfd"]
+            oldfd = event["oldfd"]
+            if newfd in proc.fds.keys():
+                self.close_fd(proc, newfd)
+            if oldfd in proc.fds.keys():
+                current_syscall["filename"] = proc.fds[oldfd].filename
+                current_syscall["fdtype"] = proc.fds[oldfd].fdtype
+            else:
+                current_syscall["filename"] = ""
+        elif name in ["sys_fcntl", "syscall_entry_fcntl"]:
+            # F_DUPsv.FD
+            if event["cmd"] != 0:
+                return
+            oldfd = event["fd"]
+            if oldfd in proc.fds.keys():
+                current_syscall["filename"] = proc.fds[oldfd].filename
+                current_syscall["fdtype"] = proc.fds[oldfd].fdtype
+            else:
+                current_syscall["filename"] = ""
+
+        if name in sv.SyscallConsts.NET_OPEN_SYSCALLS and \
+                "family" in event.keys():
+            family = event["family"]
+            current_syscall["family"] = family
+        else:
+            family = socket.AF_UNSPEC
+            current_syscall["family"] = family
+
+        current_syscall["name"] = name
+        current_syscall["start"] = event.timestamp
+        current_syscall["fdtype"] = self.get_fd_type(name, family)
+
+    def close_fd(self, proc, fd):
+        filename = proc.fds[fd].filename
+        if filename not in sv.SyscallConsts.GENERIC_NAMES \
+           and filename in proc.closed_fds.keys():
+            f = proc.closed_fds[filename]
+            f.close += 1
+            f.net_read += proc.fds[fd].net_read
+            f.disk_read += proc.fds[fd].disk_read
+            f.net_write += proc.fds[fd].net_write
+            f.disk_write += proc.fds[fd].disk_write
+        else:
+            proc.closed_fds[filename] = proc.fds[fd]
+            proc.closed_fds[filename].close = 1
+#        print("Close sv.FD %s in %d (%d, %d, %d, %d)" %
+#                (filename, proc.tid, proc.fds[fd].read, proc.fds[fd].write,
+#                    proc.fds[fd].open, proc.fds[fd].close))
+        proc.fds.pop(fd, None)
+
+    def track_close(self, name, proc, event, cpu):
+        fd = event["fd"]
+        if fd not in proc.fds.keys():
+            return
+
+        tid = self.tids[cpu.current_tid]
+        tid.current_syscall = {}
+        current_syscall = tid.current_syscall
+        current_syscall["filename"] = proc.fds[fd].filename
+        current_syscall["name"] = name
+        current_syscall["start"] = event.timestamp
+
+        self.close_fd(proc, fd)
+
+    def track_fds(self, name, event, cpu_id):
+        # we don't know which process is currently on this CPU
+        ret_string = ""
+        if cpu_id not in self.cpus:
+            return
+        c = self.cpus[cpu_id]
+        if c.current_tid == -1:
+            return
+        t = self.tids[c.current_tid]
+        # check if we can fix the pid from a context
+        if t.pid == -1 and "pid" in event.keys():
+            t.pid = event["pid"]
+            p = sv.Process()
+            p.tid = t.pid
+            p.pid = t.pid
+            p.comm = t.comm
+            self.tids[p.pid] = p
+        # if it's a thread, we want the parent
+        if t.pid != -1 and t.tid != t.pid:
+            t = self.tids[t.pid]
+        if name in sv.SyscallConsts.OPEN_SYSCALLS:
+            self.track_open(name, t, event, c)
+        elif name in sv.SyscallConsts.CLOSE_SYSCALLS:
+            ret_string = "%s %s(%d)" % \
+                (common.ns_to_hour_nsec(event.timestamp),
+                 name, event["fd"])
+            self.track_close(name, t, event, c)
+        # when a connect occurs, no new sv.FD is returned, but we can fix
+        # the "filename" if we have the destination info
+        elif name in ["sys_connect", "syscall_entry_connect"] \
+                and "family" in event.keys():
+            if event["family"] == socket.AF_INET:
+                fd = self.get_fd(t, event["fd"])
+                ipport = "%s:%d" % (common.get_v4_addr_str(event["v4addr"]),
+                                    event["dport"])
+                fd.filename = ipport
+        return ret_string
+
+    def get_fd(self, proc, fd):
+        if fd not in proc.fds.keys():
+            f = sv.FD()
+            f.fd = fd
+            f.filename = "unknown (origin not found)"
+            proc.fds[fd] = f
+        else:
+            f = proc.fds[fd]
+        return f
+
+    def track_sync(self, name, event, cpu_id):
+        # we don't know which process is currently on this CPU
+        if cpu_id not in self.cpus:
+            return
+        c = self.cpus[cpu_id]
+        if c.current_tid == -1:
+            return
+        t = self.tids[c.current_tid]
+        self.pending_syscalls.append(t)
+        # if it's a thread, we want the parent
+        if t.pid != -1 and t.tid != t.pid:
+            t = self.tids[t.pid]
+        current_syscall = self.tids[c.current_tid].current_syscall
+        current_syscall["name"] = name
+        current_syscall["start"] = event.timestamp
+        if name not in ["sys_sync", "syscall_entry_sync"]:
+            fd = event["fd"]
+            f = self.get_fd(t, fd)
+            current_syscall["fd"] = f
+            current_syscall["filename"] = f.filename
+
+    def track_read_write(self, name, event, cpu_id):
+        # we don't know which process is currently on this CPU
+        if cpu_id not in self.cpus:
+            return
+        c = self.cpus[cpu_id]
+        if c.current_tid == -1:
+            return
+        t = self.tids[c.current_tid]
+        self.pending_syscalls.append(t)
+        # if it's a thread, we want the parent
+        if t.pid != -1 and t.tid != t.pid:
+            t = self.tids[t.pid]
+        current_syscall = self.tids[c.current_tid].current_syscall
+        current_syscall["name"] = name
+        current_syscall["start"] = event.timestamp
+        if name in ["sys_splice", "syscall_entry_splice"]:
+            current_syscall["fd_in"] = self.get_fd(t, event["fd_in"])
+            current_syscall["fd_out"] = self.get_fd(t, event["fd_out"])
+            current_syscall["count"] = event["len"]
+            current_syscall["filename"] = current_syscall["fd_in"].filename
+            return
+        elif name in ["sys_sendfile64", "syscall_entry_sendfile64"]:
+            current_syscall["fd_in"] = self.get_fd(t, event["in_fd"])
+            current_syscall["fd_out"] = self.get_fd(t, event["out_fd"])
+            current_syscall["count"] = event["count"]
+            current_syscall["filename"] = current_syscall["fd_in"].filename
+            return
+        fd = event["fd"]
+        f = self.get_fd(t, fd)
+        current_syscall["fd"] = f
+        if name in ["sys_writev", "syscall_entry_writev",
+                    "sys_readv", "syscall_entry_readv"]:
+            current_syscall["count"] = event["vlen"]
+        elif name in ["sys_recvfrom", "syscall_entry_recvfrom"]:
+            current_syscall["count"] = event["size"]
+        elif name in ["sys_recvmsg", "syscall_entry_recvmsg",
+                      "sys_sendmsg", "syscall_entry_sendmsg"]:
+            current_syscall["count"] = ""
+        elif name in ["sys_sendto", "syscall_entry_sendto"]:
+            current_syscall["count"] = event["len"]
+        else:
+            try:
+                current_syscall["count"] = event["count"]
+            except:
+                print("Missing count argument for syscall",
+                      current_syscall["name"])
+                current_syscall["count"] = 0
+
+        current_syscall["filename"] = f.filename
+
+    def add_tid_fd(self, event, cpu):
+        ret = event["ret"]
+        t = self.tids[cpu.current_tid]
+        # if it's a thread, we want the parent
+        if t.pid != -1 and t.tid != t.pid:
+            t = self.tids[t.pid]
+        current_syscall = self.tids[cpu.current_tid].current_syscall
+
+        name = current_syscall["filename"]
+        if name not in sv.SyscallConsts.GENERIC_NAMES \
+           and name in t.closed_fds.keys():
+            fd = t.closed_fds[name]
+            fd.open += 1
+        else:
+            fd = sv.FD()
+            fd.filename = name
+            if current_syscall["name"] in sv.SyscallConsts.NET_OPEN_SYSCALLS:
+                fd.family = current_syscall["family"]
+                if fd.family in sv.SyscallConsts.INET_FAMILIES:
+                    fd.fdtype = sv.FDType.net
+            fd.open = 1
+        if ret >= 0:
+            fd.fd = ret
+        else:
+            return
+        if "cloexec" in current_syscall.keys():
+            fd.cloexec = 1
+        t.fds[fd.fd] = fd
+
+    def read_append(self, fd, proc, count, rq):
+        rq.operation = sv.IORequest.OP_READ
+        rq.size = count
+        if fd.fdtype in [sv.FDType.net, sv.FDType.maybe_net]:
+            fd.net_read += count
+            proc.net_read += count
+        elif fd.fdtype == sv.FDType.disk:
+            fd.disk_read += count
+            proc.disk_read += count
+        else:
+            fd.unk_read += count
+            proc.unk_read += count
+        fd.read += count
+        proc.read += count
+
+    def write_append(self, fd, proc, count, rq):
+        rq.operation = sv.IORequest.OP_WRITE
+        rq.size = count
+        if fd.fdtype in [sv.FDType.net, sv.FDType.maybe_net]:
+            fd.net_write += count
+            proc.net_write += count
+        elif fd.fdtype == sv.FDType.disk:
+            fd.disk_write += count
+            proc.disk_write += count
+        else:
+            fd.unk_write += count
+            proc.unk_write += count
+        fd.write += count
+        proc.write += count
+
+    def track_read_write_return(self, name, ret, cpu):
+        if ret < 0:
+            # TODO: track errors
+            return
+        proc = self.tids[cpu.current_tid]
+        # if it's a thread, we want the parent
+        if proc.pid != -1 and proc.tid != proc.pid:
+            proc = self.tids[proc.pid]
+        current_syscall = self.tids[cpu.current_tid].current_syscall
+        if name in ["sys_splice", "syscall_entry_splice",
+                    "sys_sendfile64", "syscall_entry_sendfile64"]:
+            self.read_append(current_syscall["fd_in"], proc, ret,
+                             current_syscall["iorequest"])
+            self.write_append(current_syscall["fd_out"], proc, ret,
+                              current_syscall["iorequest"])
+        elif name in sv.SyscallConsts.READ_SYSCALLS:
+            if ret > 0:
+                self.read_append(current_syscall["fd"], proc, ret,
+                                 current_syscall["iorequest"])
+        elif name in sv.SyscallConsts.WRITE_SYSCALLS:
+            if ret > 0:
+                self.write_append(current_syscall["fd"], proc, ret,
+                                  current_syscall["iorequest"])
+
+    def get_page_queue_stats(self, page_list):
+        processes = {}
+        for i in page_list:
+            procname = i[0].comm
+            tid = i[0].tid
+            filename = i[2]
+            if tid not in processes.keys():
+                processes[tid] = {}
+                processes[tid]["procname"] = procname
+                processes[tid]["count"] = 1
+                processes[tid]["files"] = {}
+                processes[tid]["files"][filename] = 1
+            else:
+                processes[tid]["count"] += 1
+                if filename not in processes[tid]["files"].keys():
+                    processes[tid]["files"][filename] = 1
+                else:
+                    processes[tid]["files"][filename] += 1
+        return processes
+
+    def print_page_table(self, event, pages):
+        spaces = (41 + 6) * " "
+        for i in pages.keys():
+            p = pages[i]
+            print("%s %s (%d): %d pages" % (spaces, p["procname"],
+                                            i, p["count"]))
+            files = sorted(p["files"].items(), key=operator.itemgetter(1),
+                           reverse=True)
+            for f in files:
+                print("%s  - %s : %d pages" % (spaces, f[0], f[1]))
+
+    def syscall_clear_pages(self, event, name, fd, current_syscall, tid):
+        cleaned = []
+        if name in ["sys_sync", "syscall_entry_sync"]:
+            # remove all the pages
+            for i in range(len(self.dirty_pages["pages"])):
+                cleaned.append(self.dirty_pages["pages"].pop(0))
+        else:
+            # remove only the pages that belong to a specific proc/fd
+            for i in range(len(self.dirty_pages["pages"])):
+                proc = self.dirty_pages["pages"][i][0]
+                page_fd = self.dirty_pages["pages"][i][3]
+                if page_fd == fd and (tid.tid == proc.tid or
+                                      tid.pid == proc.pid):
+                    cleaned.append(self.dirty_pages["pages"][i])
+            for i in cleaned:
+                self.dirty_pages["pages"].remove(i)
+        if len(cleaned) > 0:
+            current_syscall["pages_cleared"] = cleaned
+
+    def track_rw_latency(self, name, ret, c, ts, event):
+        current_syscall = self.tids[c.current_tid].current_syscall
+        rq = current_syscall["iorequest"]
+#       FIXME: useless ?
+#        if "start" not in current_syscall.keys():
+#            return
+        rq.duration = (event.timestamp - current_syscall["start"])
+        rq.begin = current_syscall["start"]
+        rq.end = event.timestamp
+        rq.proc = self.tids[c.current_tid]
+        if "fd" in current_syscall.keys():
+            rq.fd = current_syscall["fd"]
+            r = current_syscall["fd"].iorequests
+            r.append(current_syscall["iorequest"])
+        elif "fd_in" in current_syscall.keys():
+            rq.fd = current_syscall["fd_in"]
+        # pages written during the latency
+        if "pages_written" in current_syscall.keys():
+            rq.page_written = current_syscall["pages_written"]
+        # dirty buffers during the latency
+        if "dirty" in current_syscall.keys():
+            rq.dirty = current_syscall["dirty"]
+        # alloc pages during the latency
+        if "alloc" in current_syscall.keys():
+            rq.page_alloc = current_syscall["alloc"]
+        # wakeup_kswapd during the latency
+        if "page_free" in current_syscall.keys():
+            rq.page_free = current_syscall["page_free"]
+        if "wakeup_kswapd" in current_syscall.keys():
+            rq.woke_kswapd = True
+        if name in sv.SyscallConsts.SYNC_SYSCALLS:
+#            self.syscall_clear_pages(event, name, fd, current_syscall,
+#                                     self.tids[c.current_tid])
+            if "pages_cleared" in current_syscall.keys():
+                rq.page_cleared = len(current_syscall["pages_cleared"])
+
+    def _process_syscall_entry(self, event):
+        name = event.name
+        ret_string = ""
+        cpu_id = event["cpu_id"]
+        self.global_syscall_entry(name)
+        self.per_tid_syscall_entry(name, cpu_id)
+        ret_string = self.track_fds(name, event, cpu_id)
+        if name in sv.SyscallConsts.READ_SYSCALLS or \
+                name in sv.SyscallConsts.WRITE_SYSCALLS:
+            self.track_read_write(name, event, cpu_id)
+        if name in sv.SyscallConsts.SYNC_SYSCALLS:
+            self.track_sync(name, event, cpu_id)
+        return ret_string
+
+    def _process_syscall_exit(self, event):
+        cpu_id = event["cpu_id"]
+        ret_string = ""
+        if cpu_id not in self.cpus:
+            return
+        c = self.cpus[cpu_id]
+        if c.current_tid == -1:
+            return
+        current_syscall = self.tids[c.current_tid].current_syscall
+        if len(current_syscall.keys()) == 0:
+            return
+        name = current_syscall["name"]
+        ret = event["ret"]
+        current_syscall["iorequest"] = sv.IORequest()
+        current_syscall["iorequest"].iotype = sv.IORequest.IO_SYSCALL
+        current_syscall["iorequest"].name = name
+        if name in sv.SyscallConsts.OPEN_SYSCALLS:
+            self.add_tid_fd(event, c)
+            ret_string = "%s %s(%s, fd = %d)" % (
+                common.ns_to_hour_nsec(current_syscall["start"]),
+                name, current_syscall["filename"], ret)
+            if ret < 0:
+                return ret_string
+            t = self.tids[c.current_tid]
+            current_syscall["fd"] = self.get_fd(t, ret)
+            current_syscall["count"] = 0
+            current_syscall["fd"].fdtype = current_syscall["fdtype"]
+            current_syscall["iorequest"].operation = sv.IORequest.OP_OPEN
+            self.track_rw_latency(name, ret, c,
+                                  event.timestamp, event)
+        elif name in sv.SyscallConsts.READ_SYSCALLS or \
+                name in sv.SyscallConsts.WRITE_SYSCALLS:
+            self.track_read_write_return(name, ret, c)
+            self.track_rw_latency(name, ret, c, event.timestamp, event)
+        elif name in sv.SyscallConsts.SYNC_SYSCALLS:
+            current_syscall["iorequest"].operation = sv.IORequest.OP_SYNC
+            self.track_rw_latency(name, ret, c, event.timestamp, event)
+            if name in ["sys_sync", "syscall_entry_sync"]:
+                t = self.tids[c.current_tid]
+                t.iorequests.append(current_syscall["iorequest"])
+        self.tids[c.current_tid].current_syscall = {}
+        if self.tids[c.current_tid] in self.pending_syscalls:
+            self.pending_syscalls.remove(self.tids[c.current_tid])
+        return ret_string
+
+    def wb_pages(self, event):
+        """writeback_pages_written"""
+        for c in self.cpus.values():
+            if c.current_tid <= 0:
+                continue
+            current_syscall = self.tids[c.current_tid].current_syscall
+            if len(current_syscall.keys()) == 0:
+                continue
+            current_syscall["pages_written"] = event["pages"]
+
+    def wakeup_kswapd(self, event):
+        """mm_vmscan_wakeup_kswapd"""
+        cpu_id = event["cpu_id"]
+        if cpu_id not in self.cpus:
+            return
+        c = self.cpus[cpu_id]
+        if c.current_tid == -1:
+            return
+        current_syscall = self.tids[c.current_tid].current_syscall
+        if len(current_syscall.keys()) == 0:
+            return
+        current_syscall["wakeup_kswapd"] = 1
+
+    def page_free(self, event):
+        """mm_page_free"""
+        for c in self.cpus.values():
+            if c.current_tid <= 0:
+                continue
+            p = self.tids[c.current_tid]
+            # if the current process is kswapd0, we need to
+            # attribute the page freed to the process that
+            # woke it up.
+            if p.comm == "kswapd0" and p.prev_tid > 0:
+                p = self.tids[p.prev_tid]
+            current_syscall = p.current_syscall
+            if len(current_syscall.keys()) == 0:
+                continue
+            if "wakeup_kswapd" in current_syscall.keys():
+                if "page_free" in current_syscall.keys():
+                    current_syscall["page_free"] += 1
+                else:
+                    current_syscall["page_free"] = 1
diff --git a/lttnganalyses/lttnganalyses/syscalls.py b/lttnganalyses/lttnganalyses/syscalls.py
new file mode 100644 (file)
index 0000000..7e31b1a
--- /dev/null
@@ -0,0 +1,9 @@
+from .analysis import Analysis
+
+
+class SyscallsAnalysis(Analysis):
+    def __init__(self, state):
+        self._state = state
+
+    def process_event(self, ev):
+        pass
diff --git a/lttnganalysescli/lttnganalysescli/io.py b/lttnganalysescli/lttnganalysescli/io.py
new file mode 100644 (file)
index 0000000..8e52957
--- /dev/null
@@ -0,0 +1,698 @@
+from .command import Command
+import lttnganalyses.syscalls
+from linuxautomaton import common, sv
+from ascii_graph import Pyasciigraph
+import operator
+import statistics
+
+
+class IoAnalysis(Command):
+    _VERSION = '0.1.0'
+    _DESC = """The I/O command."""
+
+    def __init__(self):
+        super().__init__(self._add_arguments,
+                         enable_proc_filter_args=True,
+                         enable_max_min_args=True,
+                         enable_max_min_size_arg=True,
+                         enable_freq_arg=True,
+                         enable_log_arg=True,
+                         enable_stats_arg=True)
+
+    def _validate_transform_args(self):
+        self._arg_extra = self._args.extra
+
+    def run(self):
+        # parse arguments first
+        self._parse_args()
+        # validate, transform and save specific arguments
+        self._validate_transform_args()
+        # open the trace
+        self._open_trace()
+        # create the appropriate analysis/analyses
+        self._create_analysis()
+        # run the analysis
+        self._run_analysis(self._reset_total, self._refresh)
+        # process the results
+        self._compute_stats()
+        # print results
+        self._print_results(self.start_ns, self.trace_end_ts, final=1)
+        # close the trace
+        self._close_trace()
+
+    def _create_analysis(self):
+        self._analysis = lttnganalyses.syscalls.SyscallsAnalysis(
+            self._automaton.state)
+
+    def _compute_stats(self):
+        self.state = self._automaton.state
+        pass
+
+    def _refresh(self, begin, end):
+        self._compute_stats()
+        self._print_results(begin, end, final=0)
+        self._reset_total(end)
+
+    def add_fd_dict(self, tid, fd, files):
+        if fd.read == 0 and fd.write == 0:
+            return
+        if fd.filename.startswith("pipe") or \
+                fd.filename.startswith("socket") or \
+                fd.filename.startswith("anon_inode") or \
+                fd.filename.startswith("unknown"):
+            filename = "%s (%s)" % (fd.filename, tid.comm)
+            files[filename] = {}
+            files[filename]["read"] = fd.read
+            files[filename]["write"] = fd.write
+            files[filename]["name"] = filename
+            files[filename]["other"] = ["fd %d in %s (%d)" % (fd.fd,
+                                        tid.comm, tid.pid)]
+        else:
+            # merge counters of shared files
+            filename = fd.filename
+            if filename not in files.keys():
+                files[filename] = {}
+                files[filename]["read"] = fd.read
+                files[filename]["write"] = fd.write
+                files[filename]["name"] = filename
+                files[filename]["other"] = ["fd %d in %s (%d)" %
+                                            (fd.fd, tid.comm, tid.pid)]
+                files[filename]["tids"] = [tid.tid]
+            else:
+                files[filename]["read"] += fd.read
+                files[filename]["write"] += fd.write
+                files[filename]["other"].append("fd %d in %s (%d)" %
+                                                (fd.fd, tid.comm,
+                                                 tid.pid))
+
+    def create_files_dict(self):
+        files = {}
+        for tid in self.state.tids.values():
+            if not self.filter_process(tid):
+                continue
+            for fd in tid.fds.values():
+                self.add_fd_dict(tid, fd, files)
+            for fd in tid.closed_fds.values():
+                self.add_fd_dict(tid, fd, files)
+        return files
+
+    # iotop functions
+    def iotop_output_print_file_read(self, files):
+        count = 0
+        limit = self._arg_limit
+        graph = Pyasciigraph()
+        values = []
+        sorted_f = sorted(files.items(), key=lambda files: files[1]['read'],
+                          reverse=True)
+        for f in sorted_f:
+            if f[1]["read"] == 0:
+                continue
+            info_fmt = "{:>10}".format(common.convert_size(f[1]["read"],
+                                       padding_after=True))
+            values.append(("%s %s %s" % (info_fmt,
+                                         f[1]["name"],
+                                         str(f[1]["other"])[1:-1]),
+                           f[1]["read"]))
+            count = count + 1
+            if limit > 0 and count >= limit:
+                break
+        for line in graph.graph('Files Read', values, sort=2,
+                                with_value=False):
+            print(line)
+
+    def iotop_output_print_file_write(self, files):
+        # Compute files read
+        count = 0
+        limit = self._arg_limit
+        graph = Pyasciigraph()
+        values = []
+        sorted_f = sorted(files.items(), key=lambda files: files[1]['write'],
+                          reverse=True)
+        for f in sorted_f:
+            if f[1]["write"] == 0:
+                continue
+            info_fmt = "{:>10}".format(common.convert_size(f[1]["write"],
+                                       padding_after=True))
+            values.append(("%s %s %s" % (info_fmt,
+                                         f[1]["name"],
+                                         str(f[1]["other"])[1:-1]),
+                           f[1]["write"]))
+            count = count + 1
+            if limit > 0 and count >= limit:
+                break
+        for line in graph.graph('Files Write', values, sort=2,
+                                with_value=False):
+            print(line)
+
+    def iotop_output_file_read_write(self):
+        files = self.create_files_dict()
+        self.iotop_output_print_file_read(files)
+        self.iotop_output_print_file_write(files)
+
+    def filter_process(self, proc):
+        if self._arg_proc_list and proc.comm not in self._arg_proc_list:
+            return False
+        if self._arg_pid_list and str(proc.pid) not in self._arg_pid_list:
+            return False
+        return True
+
+    def filter_size(self, size):
+        # don't filter sync and open
+        if size is None:
+            return True
+        if self._arg_maxsize is not None and size > self._arg_maxsize:
+            return False
+        if self._arg_minsize is not None and size < self._arg_minsize:
+            return False
+        return True
+
+    def filter_latency(self, duration):
+        if self._arg_max is not None and (duration/1000) > self._arg_max:
+            return False
+        if self._arg_min is not None and (duration/1000) < self._arg_min:
+            return False
+        return True
+
+    def iotop_output_read(self):
+        count = 0
+        limit = self._arg_limit
+        graph = Pyasciigraph()
+        values = []
+        for tid in sorted(self.state.tids.values(),
+                          key=operator.attrgetter('read'), reverse=True):
+            if not self.filter_process(tid):
+                continue
+            info_fmt = "{:>10} {:<25} {:>9} file {:>9} net {:>9} unknown"
+            values.append((info_fmt.format(
+                           common.convert_size(tid.read, padding_after=True),
+                           "%s (%d)" % (tid.comm, tid.pid),
+                           common.convert_size(tid.disk_read,
+                                               padding_after=True),
+                           common.convert_size(tid.net_read,
+                                               padding_after=True),
+                           common.convert_size(tid.unk_read,
+                                               padding_after=True)),
+                           tid.read))
+            count = count + 1
+            if limit > 0 and count >= limit:
+                break
+        for line in graph.graph('Per-process I/O Read', values,
+                                with_value=False):
+            print(line)
+
+    def iotop_output_write(self):
+        count = 0
+        limit = self._arg_limit
+        graph = Pyasciigraph()
+        values = []
+        for tid in sorted(self.state.tids.values(),
+                          key=operator.attrgetter('write'), reverse=True):
+            if not self.filter_process(tid):
+                continue
+            info_fmt = "{:>10} {:<25} {:>9} file {:>9} net {:>9} unknown "
+            values.append((info_fmt.format(
+                           common.convert_size(tid.write, padding_after=True),
+                           "%s (%d)" % (tid.comm, tid.pid),
+                           common.convert_size(tid.disk_write,
+                                               padding_after=True),
+                           common.convert_size(tid.net_write,
+                                               padding_after=True),
+                           common.convert_size(tid.unk_write,
+                                               padding_after=True)),
+                           tid.write))
+            count = count + 1
+            if limit > 0 and count >= limit:
+                break
+        for line in graph.graph('Per-process I/O Write', values,
+                                with_value=False):
+            print(line)
+
+    def iotop_output_disk_read(self):
+        count = 0
+        limit = self._arg_limit
+        graph = Pyasciigraph()
+        values = []
+        for tid in sorted(self.state.tids.values(),
+                          key=operator.attrgetter('block_read'), reverse=True):
+            if not self.filter_process(tid):
+                continue
+            if tid.block_read == 0:
+                continue
+            info_fmt = "{:>10} {:<22}"
+            values.append((info_fmt.format(common.convert_size(tid.block_read,
+                                           padding_after=True),
+                                           "%s (pid=%d)" % (tid.comm,
+                                                            tid.pid)),
+                           tid.block_read))
+            count = count + 1
+            if limit > 0 and count >= limit:
+                break
+        for line in graph.graph('Block I/O Read', values, with_value=False):
+            print(line)
+
+    def iotop_output_disk_write(self):
+        count = 0
+        limit = self._arg_limit
+        graph = Pyasciigraph()
+        values = []
+        for tid in sorted(self.state.tids.values(),
+                          key=operator.attrgetter('block_write'),
+                          reverse=True):
+            if not self.filter_process(tid):
+                continue
+            if tid.block_write == 0:
+                continue
+            info_fmt = "{:>10} {:<22}"
+            values.append((info_fmt.format(common.convert_size(tid.block_write,
+                                           padding_after=True),
+                                           "%s (pid=%d)" % (tid.comm,
+                                                            tid.pid)),
+                           tid.block_write))
+            count = count + 1
+            if limit > 0 and count >= limit:
+                break
+        for line in graph.graph('Block I/O Write', values, with_value=False):
+            print(line)
+
+    def iotop_output_nr_sector(self):
+        graph = Pyasciigraph()
+        values = []
+        for disk in sorted(self.state.disks.values(),
+                           key=operator.attrgetter('nr_sector'), reverse=True):
+            if disk.nr_sector == 0:
+                continue
+            values.append((disk.prettyname, disk.nr_sector))
+        for line in graph.graph('Disk nr_sector', values, unit=" sectors"):
+            print(line)
+
+    def iotop_output_nr_requests(self):
+        graph = Pyasciigraph()
+        values = []
+        for disk in sorted(self.state.disks.values(),
+                           key=operator.attrgetter('nr_requests'),
+                           reverse=True):
+            if disk.nr_sector == 0:
+                continue
+            values.append((disk.prettyname, disk.nr_requests))
+        for line in graph.graph('Disk nr_requests', values, unit=" requests"):
+            print(line)
+
+    def iotop_output_dev_latency(self):
+        graph = Pyasciigraph()
+        values = []
+        for disk in self.state.disks.values():
+            if disk.completed_requests == 0:
+                continue
+            total = (disk.request_time / disk.completed_requests) \
+                / sv.MSEC_PER_NSEC
+            total = float("%0.03f" % total)
+            values.append(("%s" % disk.prettyname, total))
+        for line in graph.graph('Disk request time/sector', values, sort=2,
+                                unit=" ms"):
+            print(line)
+
+    def iotop_output_net_recv_bytes(self):
+        graph = Pyasciigraph()
+        values = []
+        for iface in sorted(self.state.ifaces.values(),
+                            key=operator.attrgetter('recv_bytes'),
+                            reverse=True):
+            values.append(("%s %s" % (common.convert_size(iface.recv_bytes),
+                                      iface.name),
+                          iface.recv_bytes))
+        for line in graph.graph('Network recv_bytes', values,
+                                with_value=False):
+            print(line)
+
+    def iotop_output_net_sent_bytes(self):
+        graph = Pyasciigraph()
+        values = []
+        for iface in sorted(self.state.ifaces.values(),
+                            key=operator.attrgetter('send_bytes'),
+                            reverse=True):
+            values.append(("%s %s" % (common.convert_size(iface.send_bytes),
+                                      iface.name),
+                          iface.send_bytes))
+        for line in graph.graph('Network sent_bytes', values,
+                                with_value=False):
+            print(line)
+
+    def iotop_output(self):
+        self.iotop_output_read()
+        self.iotop_output_write()
+        self.iotop_output_file_read_write()
+        self.iotop_output_disk_read()
+        self.iotop_output_disk_write()
+        self.iotop_output_nr_sector()
+        self.iotop_output_nr_requests()
+        self.iotop_output_dev_latency()
+        self.iotop_output_net_recv_bytes()
+        self.iotop_output_net_sent_bytes()
+#        self.output_latencies()
+
+    def iolatency_freq_histogram(self, _min, _max, res, rq_list, title):
+        step = (_max - _min) / res
+        if step == 0:
+            return
+        buckets = []
+        values = []
+        graph = Pyasciigraph()
+        for i in range(res):
+            buckets.append(i * step)
+            values.append(0)
+        for i in rq_list:
+            v = i / 1000
+            b = min(int((v-_min)/step), res - 1)
+            values[b] += 1
+        g = []
+        i = 0
+        for v in values:
+            g.append(("%0.03f" % (i * step + _min), v))
+            i += 1
+        for line in graph.graph(title, g, info_before=True):
+            print(line)
+        print("")
+
+    def compute_disk_stats(self, dev):
+        _max = 0
+        _min = -1
+        total = 0
+        values = []
+        count = len(dev.rq_list)
+        if count == 0:
+            return
+        for rq in dev.rq_list:
+            if rq.duration > _max:
+                _max = rq.duration
+            if _min == -1 or rq.duration < _min:
+                _min = rq.duration
+            total += rq.duration
+            values.append(rq.duration)
+        if count > 2:
+            stdev = statistics.stdev(values) / 1000
+        else:
+            stdev = "?"
+        dev.min = _min / 1000
+        dev.max = _max / 1000
+        dev.total = total / 1000
+        dev.count = count
+        dev.rq_values = values
+        dev.stdev = stdev
+
+    # iolatency functions
+    def iolatency_output_disk(self):
+        for dev in self.state.disks.keys():
+            d = self.state.disks[dev]
+            if d.max is None:
+                self.compute_disk_stats(d)
+            if d.count is not None:
+                self.iolatency_freq_histogram(d.min, d.max,
+                                              self._arg_freq_resolution,
+                                              d.rq_values,
+                                              "Frequency distribution for "
+                                              "disk %s (usec)" %
+                                              (d.prettyname))
+
+    def iolatency_output(self):
+        self.iolatency_output_disk()
+
+#    def output_latencies(self):
+#        graph = Pyasciigraph()
+#        for proc in self.latency_hist.keys():
+#            values = []
+#            for v in self.latency_hist[proc]:
+#                values.append(("%s" % (v[0]), v[1]))
+#            for line in graph.graph('%s requests latency (ms)' % proc, values,
+#                                    unit=" ms"):
+#                print(line)
+
+    def iostats_minmax(self, duration, current_min, current_max):
+        _min = current_min
+        _max = current_max
+        if current_min is None or duration < current_min:
+            _min = duration
+        if duration > current_max:
+            _max = duration
+        return (_min, _max)
+
+    def iostats_syscalls_line(self, fmt, name, count, _min, _max, total, rq):
+        if count < 2:
+            stdev = "?"
+        else:
+            stdev = "%0.03f" % (statistics.stdev(rq) / 1000)
+        if count < 1:
+            avg = "0.000"
+        else:
+            avg = "%0.03f" % (total / (count * 1000))
+        if _min is None:
+            _min = 0
+        _min = "%0.03f" % (_min / 1000)
+        _max = "%0.03f" % (_max / 1000)
+        print(fmt.format(name, count, _min, avg, _max, stdev))
+
+    def account_syscall_iorequests(self, s, iorequests):
+        for rq in iorequests:
+            # filter out if completely out of range but accept the
+            # union to show the real begin/end time
+            if self._arg_begin and self._arg_end and rq.end and \
+                    rq.begin > self._arg_end:
+                continue
+            if rq.iotype != sv.IORequest.IO_SYSCALL:
+                continue
+            if not self.filter_size(rq.size):
+                continue
+            if not self.filter_latency(rq.duration):
+                continue
+            if rq.operation == sv.IORequest.OP_READ:
+                s.read_count += 1
+                s.read_total += rq.duration
+                s.read_rq.append(rq.duration)
+                s.all_read.append(rq)
+                s.read_min, s.read_max = self.iostats_minmax(
+                    rq.duration, s.read_min, s.read_max)
+            elif rq.operation == sv.IORequest.OP_WRITE:
+                s.write_count += 1
+                s.write_total += rq.duration
+                s.write_rq.append(rq.duration)
+                s.all_write.append(rq)
+                s.write_min, s.write_max = self.iostats_minmax(
+                    rq.duration, s.write_min, s.write_max)
+            elif rq.operation == sv.IORequest.OP_SYNC:
+                s.sync_count += 1
+                s.sync_total += rq.duration
+                s.sync_rq.append(rq.duration)
+                s.all_sync.append(rq)
+                s.sync_min, s.sync_max = self.iostats_minmax(
+                    rq.duration, s.sync_min, s.sync_max)
+            elif rq.operation == sv.IORequest.OP_OPEN:
+                s.open_count += 1
+                s.open_total += rq.duration
+                s.open_rq.append(rq.duration)
+                s.all_open.append(rq)
+                s.open_min, s.open_max = self.iostats_minmax(
+                    rq.duration, s.open_min, s.open_max)
+
+    def compute_syscalls_latency_stats(self, end_ns):
+        s = sv.Syscalls_stats()
+        for tid in self.state.tids.values():
+            if not self.filter_process(tid):
+                continue
+            self.account_syscall_iorequests(s, tid.iorequests)
+            for fd in tid.fds.values():
+                self.account_syscall_iorequests(s, fd.iorequests)
+            for fd in tid.closed_fds.values():
+                self.account_syscall_iorequests(s, fd.iorequests)
+        return s
+
+    def iostats_output_syscalls(self):
+        s = self.syscalls_stats
+        print("\nSyscalls latency statistics (usec):")
+        fmt = "{:<14} {:>14} {:>14} {:>14} {:>14} {:>14}"
+        print(fmt.format("Type", "Count", "Min", "Average",
+                         "Max", "Stdev"))
+        print("-" * 89)
+        self.iostats_syscalls_line(fmt, "Open", s.open_count, s.open_min,
+                                   s.open_max, s.open_total, s.open_rq)
+        self.iostats_syscalls_line(fmt, "Read", s.read_count, s.read_min,
+                                   s.read_max, s.read_total, s.read_rq)
+        self.iostats_syscalls_line(fmt, "Write", s.write_count, s.write_min,
+                                   s.write_max, s.write_total, s.write_rq)
+        self.iostats_syscalls_line(fmt, "Sync", s.sync_count, s.sync_min,
+                                   s.sync_max, s.sync_total, s.sync_rq)
+
+    def iolatency_syscalls_output(self):
+        s = self.syscalls_stats
+        print("")
+        if s.open_count > 0:
+            self.iolatency_freq_histogram(s.open_min/1000, s.open_max/1000,
+                                          self._arg_freq_resolution, s.open_rq,
+                                          "Open latency distribution (usec)")
+        if s.read_count > 0:
+            self.iolatency_freq_histogram(s.read_min/1000, s.read_max/1000,
+                                          self._arg_freq_resolution, s.read_rq,
+                                          "Read latency distribution (usec)")
+        if s.write_count > 0:
+            self.iolatency_freq_histogram(s.write_min/1000, s.write_max/1000,
+                                          self._arg_freq_resolution,
+                                          s.write_rq,
+                                          "Write latency distribution (usec)")
+        if s.sync_count > 0:
+            self.iolatency_freq_histogram(s.sync_min/1000, s.sync_max/1000,
+                                          self._arg_freq_resolution, s.sync_rq,
+                                          "Sync latency distribution (usec)")
+
+    def iolatency_syscalls_list_output(self, title, rq_list,
+                                       sortkey, reverse):
+        limit = self._arg_limit
+        count = 0
+        outrange_legend = False
+        if len(rq_list) == 0:
+            return
+        print(title)
+        if self._arg_extra:
+            extra_fmt = "{:<48}"
+            extra_title = "{:<8} {:<8} {:<8} {:<8} {:<8} {:<8} ".format(
+                "Dirtied", "Alloc", "Free", "Written", "Kswap", "Cleared")
+        else:
+            extra_fmt = "{:<0}"
+            extra_title = ""
+        title_fmt = "{:<19} {:<20} {:<16} {:<23} {:<5} {:<24} {:<8} " + \
+            extra_fmt + "{:<14}"
+        fmt = "{:<40} {:<16} {:>16} {:>12}  {:<24} {:<8} " + \
+            extra_fmt + "{:<14}"
+        print(title_fmt.format("Begin", "End", "Name", "Duration (usec)",
+                               "Size", "Proc", "PID", extra_title, "Filename"))
+        for rq in sorted(rq_list,
+                         key=operator.attrgetter(sortkey), reverse=reverse):
+            # only limit the output if in the "top" view
+            if reverse and count > limit:
+                break
+            if rq.size is None:
+                size = "N/A"
+            else:
+                size = common.convert_size(rq.size)
+            if self._arg_extra:
+                extra = "{:<8} {:<8} {:<8} {:<8} {:<8} {:<8} ".format(
+                    rq.dirty, rq.page_alloc, rq.page_free, rq.page_written,
+                    rq.woke_kswapd, rq.page_cleared)
+            else:
+                extra = ""
+            name = rq.name.replace("syscall_entry_", "").replace("sys_", "")
+            if rq.fd is None:
+                filename = "None"
+                fd = "None"
+            else:
+                filename = rq.fd.filename
+                fd = rq.fd.fd
+            end = common.ns_to_hour_nsec(rq.end, self._arg_multi_day,
+                                         self._arg_gmt)
+
+            outrange = " "
+            duration = rq.duration
+            if self._arg_begin and rq.begin < self._arg_begin:
+                outrange = "*"
+                outrange_legend = True
+            if self._arg_end and rq.end > self._arg_end:
+                outrange = "*"
+                outrange_legend = True
+
+            print(fmt.format("[" + common.ns_to_hour_nsec(
+                rq.begin, self._arg_multi_day, self._arg_gmt) + "," +
+                end + "]" + outrange,
+                name,
+                "%0.03f" % (duration/1000) + outrange,
+                size, rq.proc.comm,
+                rq.proc.pid, extra,
+                "%s (fd=%s)" % (filename, fd)))
+            count += 1
+        if outrange_legend:
+            print("*: Syscalls started and/or completed outside of the "
+                  "range specified")
+
+    def iolatency_syscalls_top_output(self):
+        s = self.syscalls_stats
+        self.iolatency_syscalls_list_output(
+            "\nTop open syscall latencies (usec)", s.all_open,
+            "duration", True)
+        self.iolatency_syscalls_list_output(
+            "\nTop read syscall latencies (usec)", s.all_read,
+            "duration", True)
+        self.iolatency_syscalls_list_output(
+            "\nTop write syscall latencies (usec)", s.all_write,
+            "duration", True)
+        self.iolatency_syscalls_list_output(
+            "\nTop sync syscall latencies (usec)", s.all_sync,
+            "duration", True)
+
+    def iolatency_syscalls_log_output(self):
+        s = self.syscalls_stats
+        self.iolatency_syscalls_list_output(
+            "\nLog of all I/O system calls",
+            s.all_open + s.all_read + s.all_write + s.all_sync,
+            "begin", False)
+
+    # iostats functions
+    def iostats_output_disk(self):
+        # TODO same with network
+        if len(self.state.disks.keys()) == 0:
+            return
+        print("\nDisk latency statistics (usec):")
+        fmt = "{:<14} {:>14} {:>14} {:>14} {:>14} {:>14}"
+        print(fmt.format("Name", "Count", "Min", "Average", "Max", "Stdev"))
+        print("-" * 89)
+
+        for dev in self.state.disks.keys():
+            d = self.state.disks[dev]
+            if d.max is None:
+                d = self.state.disks[dev]
+            self.compute_disk_stats(d)
+            if d.count is not None:
+                self.iostats_syscalls_line(fmt, d.prettyname, d.count, d.min,
+                                           d.max, d.total, d.rq_values)
+
+    def iostats_output(self):
+        self.iostats_output_syscalls()
+        self.iostats_output_disk()
+
+    def _print_results(self, begin_ns, end_ns, final=0):
+        print('%s to %s' % (common.ns_to_asctime(begin_ns),
+                            common.ns_to_asctime(end_ns)))
+        self.iotop_output()
+        self.syscalls_stats = self.compute_syscalls_latency_stats(end_ns)
+        if self._arg_stats:
+            self.iostats_output()
+            self.iolatency_syscalls_top_output()
+        if self._arg_freq:
+            self.iolatency_syscalls_output()
+            self.iolatency_output()
+        if self._arg_log:
+            self.iolatency_syscalls_log_output()
+
+    def _reset_total(self, start_ts):
+        for dev in self.state.disks.keys():
+            self.state.disks[dev].init_counts()
+
+        for iface in self.state.ifaces.keys():
+            self.state.ifaces[iface].init_counts()
+
+        for tid in self.state.tids.values():
+            for fd in tid.fds.values():
+                fd.init_counts()
+            for fd in tid.closed_fds.values():
+                fd.init_counts()
+            tid.init_counts()
+
+    def _add_arguments(self, ap):
+        ap.add_argument('--extra', type=str, default=0,
+                        help='Show extra information in stats (beta)')
+        # specific argument
+        pass
+
+
+# entry point
+def run():
+    # create command
+    iocmd = IoAnalysis()
+
+    # execute command
+    iocmd.run()
This page took 0.035367 seconds and 5 git commands to generate.