remove useless code in proc-info
authorJulien Desfossez <jdesfossez@efficios.com>
Fri, 28 Nov 2014 19:38:02 +0000 (14:38 -0500)
committerJulien Desfossez <jdesfossez@efficios.com>
Fri, 28 Nov 2014 19:38:02 +0000 (14:38 -0500)
Signed-off-by: Julien Desfossez <jdesfossez@efficios.com>
proc-info.py

index 880bddd163ec1d9d8d365e59b39518da82e5a688..65033152f20395f197e1f778671daf9ece84eae1 100755 (executable)
@@ -11,7 +11,6 @@
 # all copies or substantial portions of the Software.
 
 import argparse
-import operator
 import sys
 
 try:
@@ -21,14 +20,12 @@ except ImportError:
     sys.path.append("/usr/local/lib/python%d.%d/site-packages" %
                     (sys.version_info.major, sys.version_info.minor))
     from babeltrace import TraceCollection
-from LTTngAnalyzes.common import NSEC_PER_SEC, MSEC_PER_NSEC, \
-    convert_size, ns_to_asctime, ns_to_hour_nsec
+from LTTngAnalyzes.common import ns_to_hour_nsec
 from LTTngAnalyzes.sched import Sched
 from LTTngAnalyzes.syscalls import Syscalls
 from LTTngAnalyzes.block import Block
 from LTTngAnalyzes.net import Net
 from LTTngAnalyzes.statedump import Statedump
-from ascii_graph import Pyasciigraph
 
 
 class ProcInfo():
@@ -122,171 +119,6 @@ class ProcInfo():
             if payload:
                 print("%s" % (payload))
 
-    def check_refresh(self, args, event):
-        """Check if we need to output something"""
-        if args.refresh == 0:
-            return
-        event_sec = event.timestamp / NSEC_PER_SEC
-        if self.current_sec == 0:
-            self.current_sec = event_sec
-        elif self.current_sec != event_sec and \
-                (self.current_sec + args.refresh) <= event_sec:
-            self.output(args, self.start_ns, event.timestamp)
-            self.reset_total(event.timestamp)
-            self.current_sec = event_sec
-            self.start_ns = event.timestamp
-
-    def output_file_read(self, args):
-        count = 0
-        limit = args.top
-        graph = Pyasciigraph()
-        values = []
-        files = {}
-        for tid in self.tids.values():
-            for fd in tid.fds.values():
-                if fd.filename not in files.keys():
-                    files[fd.filename] = {}
-                    files[fd.filename]["read"] = fd.read
-                    files[fd.filename]["write"] = fd.write
-                    if fd.filename.startswith("pipe") or \
-                            fd.filename.startswith("socket") or \
-                            fd.filename.startswith("anon_inode"):
-                        files[fd.filename]["name"] = "%s (%s)" % (fd.filename,
-                                                                  tid.comm)
-                    else:
-                        files[fd.filename]["name"] = fd.filename
-                    files[fd.filename]["other"] = "(%d %d)" % (fd.fd, tid.tid)
-                else:
-                    files[fd.filename]["read"] += fd.read
-                    files[fd.filename]["write"] += fd.write
-        for f in files.values():
-            if f["read"] == 0:
-                continue
-            values.append(("%s %s %s" % (
-                f["name"], convert_size(f["read"]), f["other"]), f["read"]))
-            count = count + 1
-            if limit > 0 and count >= limit:
-                break
-        for line in graph.graph('Files Read', values, sort=2):
-            print(line)
-
-    def output_read(self, args):
-        count = 0
-        limit = args.top
-        graph = Pyasciigraph()
-        values = []
-        for tid in sorted(
-                self.tids.values(),
-                key=operator.attrgetter('read'), reverse=True):
-            if len(args.proc_list) > 0 and tid.comm not in args.proc_list:
-                continue
-            values.append(("%s %s (%d)" % (convert_size(tid.read), tid.comm,
-                                           tid.tid), tid.read))
-            count = count + 1
-            if limit > 0 and count >= limit:
-                break
-        for line in graph.graph('I/O Read', values):
-            print(line)
-
-    def output_write(self, args):
-        count = 0
-        limit = args.top
-        graph = Pyasciigraph()
-        values = []
-        for tid in sorted(self.tids.values(),
-                          key=operator.attrgetter('write'), reverse=True):
-            if len(args.proc_list) > 0 and tid.comm not in args.proc_list:
-                continue
-            values.append(("%s %s (%d)" % (convert_size(tid.write), tid.comm,
-                                           tid.tid), tid.write))
-            count = count + 1
-            if limit > 0 and count >= limit:
-                break
-        for line in graph.graph('I/O Write', values):
-            print(line)
-
-    def output_nr_sector(self, args):
-        graph = Pyasciigraph()
-        values = []
-        for disk in sorted(self.disks.values(),
-                           key=operator.attrgetter('nr_sector'), reverse=True):
-            if disk.nr_sector == 0:
-                continue
-            values.append((disk.prettyname, disk.nr_sector))
-        for line in graph.graph('Disk nr_sector', values):
-            print(line)
-
-    def output_nr_requests(self, args):
-        graph = Pyasciigraph()
-        values = []
-        for disk in sorted(self.disks.values(),
-                           key=operator.attrgetter('nr_requests'),
-                           reverse=True):
-            if disk.nr_sector == 0:
-                continue
-            values.append((disk.prettyname, disk.nr_requests))
-        for line in graph.graph('Disk nr_requests', values):
-            print(line)
-
-    def output_dev_latency(self, args):
-        graph = Pyasciigraph()
-        values = []
-        for disk in self.disks.values():
-            if disk.completed_requests == 0:
-                continue
-            total = (disk.request_time / disk.completed_requests) \
-                / MSEC_PER_NSEC
-            total = float("%0.03f" % total)
-            values.append(("ms %s" % disk.prettyname, total))
-        for line in graph.graph('Disk request time/sector', values, sort=2):
-            print(line)
-
-    def output_net_recv_bytes(self, args):
-        graph = Pyasciigraph()
-        values = []
-        for iface in sorted(self.ifaces.values(),
-                            key=operator.attrgetter('recv_bytes'),
-                            reverse=True):
-            values.append(("%s %s" % (convert_size(iface.recv_bytes),
-                          iface.name), iface.recv_bytes))
-        for line in graph.graph('Network recv_bytes', values):
-            print(line)
-
-    def output_net_sent_bytes(self, args):
-        graph = Pyasciigraph()
-        values = []
-        for iface in sorted(self.ifaces.values(),
-                            key=operator.attrgetter('send_bytes'),
-                            reverse=True):
-            values.append(("%s %s" % (convert_size(iface.send_bytes),
-                                      iface.name), iface.send_bytes))
-        for line in graph.graph('Network sent_bytes', values):
-            print(line)
-
-    def output(self, args, begin_ns, end_ns, final=0):
-        print('%s to %s' % (ns_to_asctime(begin_ns), ns_to_asctime(end_ns)))
-        self.output_read(args)
-        self.output_file_read(args)
-        self.output_write(args)
-        self.output_nr_sector(args)
-        self.output_nr_requests(args)
-        self.output_dev_latency(args)
-        self.output_net_recv_bytes(args)
-        self.output_net_sent_bytes(args)
-
-    def reset_total(self, start_ts):
-        for dev in self.disks.keys():
-            self.disks[dev].nr_sector = 0
-            self.disks[dev].nr_requests = 0
-            self.disks[dev].completed_requests = 0
-            self.disks[dev].request_time = 0
-
-        for iface in self.ifaces.keys():
-            self.ifaces[iface].recv_bytes = 0
-            self.ifaces[iface].recv_packets = 0
-            self.ifaces[iface].send_bytes = 0
-            self.ifaces[iface].send_packets = 0
-
 if __name__ == "__main__":
     parser = argparse.ArgumentParser(description='I/O usage analysis')
     parser.add_argument('path', metavar="<path/to/trace>", help='Trace path')
This page took 0.038737 seconds and 5 git commands to generate.