+++ /dev/null
-from LTTngAnalyzes.common import Process, get_disk, IORequest
-
-
-class Block():
- def __init__(self, cpus, disks, tids):
- self.cpus = cpus
- self.disks = disks
- self.tids = tids
- self.remap_requests = []
-
- def remap(self, event):
- dev = event["dev"]
- sector = event["sector"]
- old_dev = event["old_dev"]
- old_sector = event["old_sector"]
-
- for req in self.remap_requests:
- if req["dev"] == old_dev and req["sector"] == old_sector:
- req["dev"] = dev
- req["sector"] = sector
- return
-
- req = {}
- req["orig_dev"] = old_dev
- req["dev"] = dev
- req["sector"] = sector
- self.remap_requests.append(req)
-
- # For backmerge requests, just remove the request from the
- # remap_requests queue, because we rely later on the nr_sector
- # which has all the info we need.
- def backmerge(self, event):
- dev = event["dev"]
- sector = event["sector"]
- for req in self.remap_requests:
- if req["dev"] == dev and req["sector"] == sector:
- self.remap_requests.remove(req)
-
- def issue(self, event):
- dev = event["dev"]
- sector = event["sector"]
- nr_sector = event["nr_sector"]
- # Note: since we don't know, we assume a sector is 512 bytes
- block_size = 512
- if nr_sector == 0:
- return
-
- rq = {}
- rq["nr_sector"] = nr_sector
- rq["rq_time"] = event.timestamp
- rq["iorequest"] = IORequest()
- rq["iorequest"].iotype = IORequest.IO_BLOCK
- rq["iorequest"].begin = event.timestamp
- rq["iorequest"].size = nr_sector * block_size
-
- d = None
- for req in self.remap_requests:
- if req["dev"] == dev and req["sector"] == sector:
- d = get_disk(req["orig_dev"], self.disks)
- if not d:
- d = get_disk(dev, self.disks)
-
- d.nr_requests += 1
- d.nr_sector += nr_sector
- d.pending_requests[sector] = rq
-
- if "tid" in event.keys():
- tid = event["tid"]
- if tid not in self.tids:
- p = Process()
- p.tid = tid
- self.tids[tid] = p
- else:
- p = self.tids[tid]
- if p.pid != -1 and p.tid != p.pid:
- p = self.tids[p.pid]
- rq["pid"] = p
- # even rwbs means read, odd means write
- if event["rwbs"] % 2 == 0:
- p.block_read += nr_sector * block_size
- rq["iorequest"].operation = IORequest.OP_READ
- else:
- p.block_write += nr_sector * block_size
- rq["iorequest"].operation = IORequest.OP_WRITE
-
- def complete(self, event):
- dev = event["dev"]
- sector = event["sector"]
- nr_sector = event["nr_sector"]
- if nr_sector == 0:
- return
-
- d = None
- for req in self.remap_requests:
- if req["dev"] == dev and req["sector"] == sector:
- d = get_disk(req["orig_dev"], self.disks)
- self.remap_requests.remove(req)
-
- if not d:
- d = get_disk(dev, self.disks)
-
- # ignore the completion of requests we didn't see the issue
- # because it would mess up the latency totals
- if sector not in d.pending_requests.keys():
- return
-
- rq = d.pending_requests[sector]
- if rq["nr_sector"] != nr_sector:
- return
- d.completed_requests += 1
- if rq["rq_time"] > event.timestamp:
- print("Weird request TS", event.timestamp)
- time_per_sector = (event.timestamp - rq["rq_time"]) / rq["nr_sector"]
- d.request_time += time_per_sector
- rq["iorequest"].duration = time_per_sector
- rq["iorequest"].end = event.timestamp
- d.rq_list.append(rq["iorequest"])
- if "pid" in rq.keys():
- rq["pid"].iorequests.append(rq["iorequest"])
- del d.pending_requests[sector]
-
- def dump_orphan_requests(self):
- for req in self.remap_requests:
- print("Orphan : %d : %d %d" % (req["orig_dev"], req["dev"],
- req["sector"]))
+++ /dev/null
-import math
-import re
-import time
-import datetime
-import socket
-import struct
-import sys
-
-NSEC_PER_SEC = 1000000000
-MSEC_PER_NSEC = 1000000
-
-O_CLOEXEC = 0o2000000
-
-
-class Process():
- def __init__(self):
- self.tid = -1
- self.pid = -1
- self.comm = ""
- # indexed by fd
- self.fds = {}
- # indexed by filename
- self.closed_fds = {}
- self.current_syscall = {}
- self.init_counts()
-
- def init_counts(self):
- self.cpu_ns = 0
- self.migrate_count = 0
- # network read/write
- self.net_read = 0
- self.net_write = 0
- # disk read/write (might be cached)
- self.disk_read = 0
- self.disk_write = 0
- # actual block access read/write
- self.block_read = 0
- self.block_write = 0
- # unclassified read/write (FD passing and statedump)
- self.unk_read = 0
- self.unk_write = 0
- # total I/O read/write
- self.read = 0
- self.write = 0
- # last TS where the process was scheduled in
- self.last_sched = 0
- # the process scheduled before this one
- self.prev_tid = -1
- # indexed by syscall_name
- self.syscalls = {}
- self.perf = {}
- self.dirty = 0
- self.allocated_pages = 0
- self.freed_pages = 0
- self.total_syscalls = 0
- # array of IORequest objects for freq analysis later (block and
- # syscalls with no FD like sys_sync)
- self.iorequests = []
-
-
-class CPU():
- def __init__(self):
- self.cpu_id = -1
- self.cpu_ns = 0
- self.current_tid = -1
- self.start_task_ns = 0
- self.perf = {}
- self.wakeup_queue = []
-
-
-class Syscall():
- def __init__(self):
- self.name = ""
- self.count = 0
-
-
-class Disk():
- def __init__(self):
- self.name = ""
- self.prettyname = ""
- self.init_counts()
-
- def init_counts(self):
- self.nr_sector = 0
- self.nr_requests = 0
- self.completed_requests = 0
- self.request_time = 0
- self.pending_requests = {}
- self.rq_list = []
- self.max = None
- self.min = None
- self.total = None
- self.count = None
- self.rq_values = None
- self.stdev = None
-
-
-class Iface():
- def __init__(self):
- self.name = ""
- self.init_counts()
-
- def init_counts(self):
- self.recv_bytes = 0
- self.recv_packets = 0
- self.send_bytes = 0
- self.send_packets = 0
-
-
-class FDType():
- unknown = 0
- disk = 1
- net = 2
- # not 100% sure they are network FDs (assumed when net_dev_xmit is
- # called during a write syscall and the type in unknown).
- maybe_net = 3
-
-
-class FD():
- def __init__(self):
- self.filename = ""
- self.fd = -1
- # address family
- self.family = socket.AF_UNSPEC
- self.fdtype = FDType.unknown
- # if FD was inherited, parent PID
- self.parent = -1
- self.init_counts()
-
- def init_counts(self):
- # network read/write
- self.net_read = 0
- self.net_write = 0
- # disk read/write (might be cached)
- self.disk_read = 0
- self.disk_write = 0
- # unclassified read/write (FD passing and statedump)
- self.unk_read = 0
- self.unk_write = 0
- # total read/write
- self.read = 0
- self.write = 0
- self.open = 0
- self.close = 0
- self.cloexec = 0
- # array of syscall IORequest objects for freq analysis later
- self.iorequests = []
-
-
-class IRQ():
- HARD_IRQ = 1
- SOFT_IRQ = 2
- # from include/linux/interrupt.h
- soft_names = {0: "HI_SOFTIRQ",
- 1: "TIMER_SOFTIRQ",
- 2: "NET_TX_SOFTIRQ",
- 3: "NET_RX_SOFTIRQ",
- 4: "BLOCK_SOFTIRQ",
- 5: "BLOCK_IOPOLL_SOFTIRQ",
- 6: "TASKLET_SOFTIRQ",
- 7: "SCHED_SOFTIRQ",
- 8: "HRTIMER_SOFTIRQ",
- 9: "RCU_SOFTIRQ"}
-
- def __init__(self):
- self.nr = -1
- self.irqclass = 0
- self.start_ts = -1
- self.stop_ts = -1
- self.raise_ts = -1
- self.cpu_id = -1
-
-
-class IORequest():
- # I/O "type"
- IO_SYSCALL = 1
- IO_BLOCK = 2
- IO_NET = 3
- # I/O operations
- OP_OPEN = 1
- OP_READ = 2
- OP_WRITE = 3
- OP_CLOSE = 4
- OP_SYNC = 5
-
- def __init__(self):
- # IORequest.IO_*
- self.iotype = None
- # bytes for syscalls and net, sectors for block
- # FIXME: syscalls handling vectors (vector size missing)
- self.size = None
- # for syscalls and block: delay between issue and completion
- # of the request
- self.duration = None
- # IORequest.OP_*
- self.operation = None
- # syscall name
- self.name = None
- # begin syscall timestamp
- self.begin = None
- # end syscall timestamp
- self.end = None
- # current process
- self.proc = None
- # current FD (for syscalls)
- self.fd = None
- # buffers dirtied during the operation
- self.dirty = 0
- # pages allocated during the operation
- self.page_alloc = 0
- # pages freed during the operation
- self.page_free = 0
- # pages written on disk during the operation
- self.page_written = 0
- # kswapd was forced to wakeup during the operation
- self.woke_kswapd = False
- # estimated pages flushed during a sync operation
- self.page_cleared = 0
-
-
-class Syscalls_stats():
- def __init__(self):
- self.read_max = 0
- self.read_min = None
- self.read_total = 0
- self.read_count = 0
- self.read_rq = []
- self.all_read = []
-
- self.write_max = 0
- self.write_min = None
- self.write_total = 0
- self.write_count = 0
- self.write_rq = []
- self.all_write = []
-
- self.open_max = 0
- self.open_min = None
- self.open_total = 0
- self.open_count = 0
- self.open_rq = []
- self.all_open = []
-
- self.sync_max = 0
- self.sync_min = None
- self.sync_total = 0
- self.sync_count = 0
- self.sync_rq = []
- self.all_sync = []
-
-
-class SyscallConsts():
- # TODO: decouple socket/family logic from this class
- INET_FAMILIES = [socket.AF_INET, socket.AF_INET6]
- DISK_FAMILIES = [socket.AF_UNIX]
- # list nof syscalls that open a FD on disk (in the exit_syscall event)
- DISK_OPEN_SYSCALLS = ["sys_open", "syscall_entry_open",
- "sys_openat", "syscall_entry_openat"]
- # list of syscalls that open a FD on the network
- # (in the exit_syscall event)
- NET_OPEN_SYSCALLS = ["sys_accept", "syscall_entry_accept",
- "sys_socket", "syscall_entry_socket"]
- # list of syscalls that can duplicate a FD
- DUP_OPEN_SYSCALLS = ["sys_fcntl", "syscall_entry_fcntl",
- "sys_dup2", "syscall_entry_dup2"]
- SYNC_SYSCALLS = ["sys_sync", "syscall_entry_sync",
- "sys_sync_file_range", "syscall_entry_sync_file_range",
- "sys_fsync", "syscall_entry_fsync",
- "sys_fdatasync", "syscall_entry_fdatasync"]
- # merge the 3 open lists
- OPEN_SYSCALLS = DISK_OPEN_SYSCALLS + NET_OPEN_SYSCALLS + DUP_OPEN_SYSCALLS
- # list of syscalls that close a FD (in the "fd =" field)
- CLOSE_SYSCALLS = ["sys_close", "syscall_entry_close"]
- # list of syscall that read on a FD, value in the exit_syscall following
- READ_SYSCALLS = ["sys_read", "syscall_entry_read",
- "sys_recvmsg", "syscall_entry_recvmsg",
- "sys_recvfrom", "syscall_entry_recvfrom",
- "sys_splice", "syscall_entry_splice",
- "sys_readv", "syscall_entry_readv",
- "sys_sendfile64", "syscall_entry_sendfile64"]
- # list of syscall that write on a FD, value in the exit_syscall following
- WRITE_SYSCALLS = ["sys_write", "syscall_entry_write",
- "sys_sendmsg", "syscall_entry_sendmsg",
- "sys_sendto", "syscall_entry_sendto",
- "sys_writev", "syscall_entry_writev"]
- # generic names assigned to special FDs, don't try to match these in the
- # closed_fds dict
- GENERIC_NAMES = ["unknown", "socket"]
-
- def __init__():
- pass
-
-
-# imported from include/linux/kdev_t.h
-def kdev_major_minor(dev):
- MINORBITS = 20
- MINORMASK = ((1 << MINORBITS) - 1)
- major = dev >> MINORBITS
- minor = dev & MINORMASK
- return "(%d,%d)" % (major, minor)
-
-
-def get_disk(dev, disks):
- if dev not in disks:
- d = Disk()
- d.name = "%d" % dev
- d.prettyname = kdev_major_minor(dev)
- disks[dev] = d
- else:
- d = disks[dev]
- return d
-
-
-def convert_size(size, padding_after=False, padding_before=False):
- if padding_after and size < 1024:
- space_after = " "
- else:
- space_after = ""
- if padding_before and size < 1024:
- space_before = " "
- else:
- space_before = ""
- if size <= 0:
- return "0 " + space_before + "B" + space_after
- size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
- i = int(math.floor(math.log(size, 1024)))
- p = math.pow(1024, i)
- s = round(size/p, 2)
- if (s > 0):
- try:
- return '%s %s%s%s' % (s, space_before, size_name[i], space_after)
- except:
- print(i, size_name)
- raise Exception("Too big to be true")
- else:
- return '0 B'
-
-
-def is_multi_day_trace_collection(handle):
- y = m = d = -1
- for h in handle.values():
- if y == -1:
- y = time.localtime(h.timestamp_begin/NSEC_PER_SEC).tm_year
- m = time.localtime(h.timestamp_begin/NSEC_PER_SEC).tm_mon
- d = time.localtime(h.timestamp_begin/NSEC_PER_SEC).tm_mday
- _y = time.localtime(h.timestamp_end/NSEC_PER_SEC).tm_year
- _m = time.localtime(h.timestamp_end/NSEC_PER_SEC).tm_mon
- _d = time.localtime(h.timestamp_end/NSEC_PER_SEC).tm_mday
- if y != _y:
- return True
- elif m != _m:
- return True
- elif d != _d:
- return True
- return False
-
-
-def trace_collection_date(handle):
- if is_multi_day_trace_collection(handle):
- return None
- for h in handle.values():
- y = time.localtime(h.timestamp_begin/NSEC_PER_SEC).tm_year
- m = time.localtime(h.timestamp_begin/NSEC_PER_SEC).tm_mon
- d = time.localtime(h.timestamp_begin/NSEC_PER_SEC).tm_mday
- return (y, m, d)
-
-
-def extract_timerange(handle, timerange, gmt):
- p = re.compile('^\[(?P<begin>.*),(?P<end>.*)\]$')
- if not p.match(timerange):
- return None
- b = p.search(timerange).group("begin").strip()
- e = p.search(timerange).group("end").strip()
- begin = date_to_epoch_nsec(handle, b, gmt)
- if begin is None:
- return (None, None)
- end = date_to_epoch_nsec(handle, e, gmt)
- if end is None:
- return (None, None)
- return (begin, end)
-
-
-def date_to_epoch_nsec(handle, date, gmt):
- # match 2014-12-12 17:29:43.802588035 or 2014-12-12T17:29:43.802588035
- p1 = re.compile('^(?P<year>\d\d\d\d)-(?P<mon>[01]\d)-'
- '(?P<day>[0123]\d)[\sTt]'
- '(?P<hour>\d\d):(?P<min>\d\d):(?P<sec>\d\d).'
- '(?P<nsec>\d\d\d\d\d\d\d\d\d)$')
- # match 2014-12-12 17:29:43 or 2014-12-12T17:29:43
- p2 = re.compile('^(?P<year>\d\d\d\d)-(?P<mon>[01]\d)-'
- '(?P<day>[0123]\d)[\sTt]'
- '(?P<hour>\d\d):(?P<min>\d\d):(?P<sec>\d\d)$')
- # match 17:29:43.802588035
- p3 = re.compile('^(?P<hour>\d\d):(?P<min>\d\d):(?P<sec>\d\d).'
- '(?P<nsec>\d\d\d\d\d\d\d\d\d)$')
- # match 17:29:43
- p4 = re.compile('^(?P<hour>\d\d):(?P<min>\d\d):(?P<sec>\d\d)$')
-
- if p1.match(date):
- year = p1.search(date).group("year")
- month = p1.search(date).group("mon")
- day = p1.search(date).group("day")
- hour = p1.search(date).group("hour")
- minute = p1.search(date).group("min")
- sec = p1.search(date).group("sec")
- nsec = p1.search(date).group("nsec")
- elif p2.match(date):
- year = p2.search(date).group("year")
- month = p2.search(date).group("mon")
- day = p2.search(date).group("day")
- hour = p2.search(date).group("hour")
- minute = p2.search(date).group("min")
- sec = p2.search(date).group("sec")
- nsec = 0
- elif p3.match(date):
- d = trace_collection_date(handle)
- if d is None:
- print("Use the format 'yyyy-mm-dd hh:mm:ss[.nnnnnnnnn]' "
- "for multi-day traces")
- return None
- year = d[0]
- month = d[1]
- day = d[2]
- hour = p3.search(date).group("hour")
- minute = p3.search(date).group("min")
- sec = p3.search(date).group("sec")
- nsec = p3.search(date).group("nsec")
- elif p4.match(date):
- d = trace_collection_date(handle)
- if d is None:
- print("Use the format 'yyyy-mm-dd hh:mm:ss[.nnnnnnnnn]' "
- "for multi-day traces")
- return None
- year = d[0]
- month = d[1]
- day = d[2]
- hour = p4.search(date).group("hour")
- minute = p4.search(date).group("min")
- sec = p4.search(date).group("sec")
- nsec = 0
- else:
- return None
-
- d = datetime.datetime(int(year), int(month), int(day), int(hour),
- int(minute), int(sec))
- if gmt:
- d = d + datetime.timedelta(seconds=time.timezone)
- return int(d.timestamp()) * NSEC_PER_SEC + int(nsec)
-
-
-def process_date_args(args, handle):
- args.multi_day = is_multi_day_trace_collection(handle)
- if args.timerange:
- (args.begin, args.end) = extract_timerange(handle, args.timerange,
- args.gmt)
- if args.begin is None or args.end is None:
- print("Invalid timeformat")
- sys.exit(1)
- else:
- if args.begin:
- args.begin = date_to_epoch_nsec(handle, args.begin, args.gmt)
- if args.begin is None:
- print("Invalid timeformat")
- sys.exit(1)
- if args.end:
- args.end = date_to_epoch_nsec(handle, args.end, args.gmt)
- if args.end is None:
- print("Invalid timeformat")
- sys.exit(1)
-
-
-def ns_to_asctime(ns):
- return time.asctime(time.localtime(ns/NSEC_PER_SEC))
-
-
-def ns_to_hour(ns):
- d = time.localtime(ns/NSEC_PER_SEC)
- return "%02d:%02d:%02d" % (d.tm_hour, d.tm_min, d.tm_sec)
-
-
-def ns_to_hour_nsec(ns, multi_day=False, gmt=False):
- if gmt:
- d = time.gmtime(ns/NSEC_PER_SEC)
- else:
- d = time.localtime(ns/NSEC_PER_SEC)
- if multi_day:
- return "%04d-%02d-%02d %02d:%02d:%02d.%09d" % (d.tm_year, d.tm_mon,
- d.tm_mday, d.tm_hour,
- d.tm_min, d.tm_sec,
- ns % NSEC_PER_SEC)
- else:
- return "%02d:%02d:%02d.%09d" % (d.tm_hour, d.tm_min, d.tm_sec,
- ns % NSEC_PER_SEC)
-
-
-def ns_to_sec(ns):
- return "%lu.%09u" % (ns/NSEC_PER_SEC, ns % NSEC_PER_SEC)
-
-
-def ns_to_day(ns):
- d = time.localtime(ns/NSEC_PER_SEC)
- return "%04d-%02d-%02d" % (d.tm_year, d.tm_mon, d.tm_mday)
-
-
-def sec_to_hour(ns):
- d = time.localtime(ns)
- return "%02d:%02d:%02d" % (d.tm_hour, d.tm_min, d.tm_sec)
-
-
-def sec_to_nsec(sec):
- return sec * NSEC_PER_SEC
-
-
-def seq_to_ipv4(ip):
- return "{}.{}.{}.{}".format(ip[0], ip[1], ip[2], ip[3])
-
-
-def int_to_ipv4(ip):
- return socket.inet_ntoa(struct.pack("!I", ip))
-
-
-def str_to_bytes(value):
- num = ""
- unit = ""
- for i in value:
- if i.isdigit() or i == ".":
- num = num + i
- elif i.isalnum():
- unit = unit + i
- num = float(num)
- if len(unit) == 0:
- return int(num)
- if unit in ["B"]:
- return int(num)
- if unit in ["k", "K", "kB", "KB"]:
- return int(num * 1024)
- if unit in ["m", "M", "mB", "MB"]:
- return int(num * 1024 * 1024)
- if unit in ["g", "G", "gB", "GB"]:
- return int(num * 1024 * 1024 * 1024)
- if unit in ["t", "T", "tB", "TB"]:
- return int(num * 1024 * 1024 * 1024 * 1024)
- print("Unit", unit, "not understood")
- return None
-
-
-def get_v4_addr_str(ip):
- # depending on the version of lttng-modules, the v4addr is a
- # string (< 2.6) or sequence (>= 2.6)
- try:
- return seq_to_ipv4(ip)
- except TypeError:
- return int_to_ipv4(ip)
+++ /dev/null
-import os
-import sys
-from socket import socket
-from LTTngAnalyzes.common import NSEC_PER_SEC
-
-CARBON_SERVER = '10.0.3.185'
-CARBON_PORT = 2003
-
-
-class GraphiteReport():
- def __init__(self, trace_start_ts, trace_end_ts, cpus, tids,
- syscalls, disks, ifaces):
- self.trace_start_ts = trace_start_ts
- self.trace_end_ts = trace_end_ts
- self.cpus = cpus
- self.tids = tids
- self.syscalls = syscalls
- self.disks = disks
- self.ifaces = ifaces
- self.hostname = os.uname()[1]
-
- def report(self, begin_ns, end_ns, final, args):
- if not (args.info or args.cpu or args.tid or args.global_syscalls
- or args.tid_syscalls):
- return
-
- sock = socket()
- try:
- sock.connect((CARBON_SERVER, CARBON_PORT))
- except:
- print("Couldn't connect to %(server)s on port %(port)d, is "
- "carbon-agent.py running?"
- % {'server': CARBON_SERVER, 'port': CARBON_PORT})
- sys.exit(1)
-
- total_ns = end_ns - begin_ns
-
- if args.cpu:
- self.per_cpu_report(total_ns, end_ns, sock)
- if args.disk:
- self.per_disk_report(end_ns, sock)
- if args.net:
- self.per_iface_report(end_ns, sock)
- # if args.tid:
- # self.per_tid_report(end_ns, total_ns, sock)
-
- def per_cpu_report(self, total_ns, end_ns, sock):
- total_cpu_pc = 0
- nb_cpu = len(self.cpus.keys())
- lines = []
- for cpu in self.cpus.keys():
- cpu_pc = self.cpus[cpu].cpu_pc
- total_cpu_pc += cpu_pc
- lines.append("hosts.%s.cpu.cpu%d %d %lu" % (self.hostname,
- cpu, cpu_pc,
- end_ns/NSEC_PER_SEC))
- lines.append("hosts.%s.cpu.totalcpu %d %lu"
- % (self.hostname, total_cpu_pc/nb_cpu,
- end_ns/NSEC_PER_SEC))
- message = '\n'.join(lines) + '\n' # all lines must end in a newline
- sock.sendall(message.encode())
- print("Sent cpu at", end_ns/NSEC_PER_SEC)
-
- def per_disk_report(self, end_ns, sock):
- lines = []
- ts = end_ns/NSEC_PER_SEC
- for dev in self.disks:
- lines.append("hosts.%s.disk.%d.rq %d %lu"
- % (self.hostname, dev,
- self.disks[dev].nr_requests, ts))
- lines.append("hosts.%s.disk.%d.sectors %d %lu"
- % (self.hostname, dev, self.disks[dev].nr_sector, ts))
- if self.disks[dev].completed_requests > 0:
- total = (self.disks[dev].request_time /
- self.disks[dev].completed_requests)
- lines.append("hosts.%s.disk.%d.latency %d %lu" %
- (self.hostname, dev, total, ts))
- message = '\n'.join(lines) + '\n' # all lines must end in a newline
- sock.sendall(message.encode())
- print("Sent block at", end_ns/NSEC_PER_SEC)
-
- def per_iface_report(self, end_ns, sock):
- lines = []
- ts = end_ns/NSEC_PER_SEC
- for iface in self.ifaces:
- lines.append("hosts.%s.net.%s.recv_bytes %d %lu"
- % (self.hostname, iface,
- self.ifaces[iface].recv_bytes, ts))
- lines.append("hosts.%s.net.%s.recv_packets %d %lu"
- % (self.hostname, iface,
- self.ifaces[iface].recv_packets, ts))
- lines.append("hosts.%s.net.%s.send_bytes %d %lu"
- % (self.hostname, iface,
- self.ifaces[iface].send_bytes, ts))
- lines.append("hosts.%s.net.%s.send_packets %d %lu"
- % (self.hostname, iface,
- self.ifaces[iface].send_packets, ts))
- message = '\n'.join(lines) + '\n' # all lines must end in a newline
- sock.sendall(message.encode())
- print("Sent net at", end_ns/NSEC_PER_SEC)
-
- def per_tid_report(self, end_ns, total_ns, sock):
- lines = []
- ts = end_ns/NSEC_PER_SEC
- for tid in self.tids.values():
- if tid.tid == 0:
- continue
- lines.append("hosts.%s.tid.%s-%d %d %lu"
- % (self.hostname, tid.comm.replace("/", "|"),
- tid.tid, ((tid.cpu_ns * 100) / total_ns), ts))
- message = '\n'.join(lines) + '\n' # all lines must end in a newline
- sock.sendall(message.encode())
- print("Sent TIDs at", end_ns/NSEC_PER_SEC)
+++ /dev/null
-from LTTngAnalyzes.common import IRQ
-
-
-class Interrupt():
- def __init__(self, irq, cpus, tids):
- self.irq = irq
- self.cpus = cpus
- self.tids = tids
- self.irq["hard_count"] = 0
- self.irq["soft_count"] = 0
- self.irq["hard-per-cpu"] = {}
- self.irq["soft-per-cpu"] = {}
- self.irq["raise-per-cpu"] = {}
- self.irq["names"] = {}
- self.irq["hard-irqs"] = {}
- self.irq["soft-irqs"] = {}
- self.irq["raise-latency"] = {}
- self.irq["irq-list"] = []
-
- def init_irq(self):
- irq = {}
- irq["list"] = []
- irq["max"] = 0
- irq["min"] = -1
- irq["count"] = 0
- irq["total"] = 0
- irq["raise_max"] = 0
- irq["raise_min"] = -1
- irq["raise_count"] = 0
- irq["raise_total"] = 0
- return irq
-
- def entry(self, event, irqclass, idfield):
- cpu_id = event["cpu_id"]
- i = IRQ()
- i.irqclass = irqclass
- i.start_ts = event.timestamp
- i.cpu_id = cpu_id
- i.nr = event[idfield]
- return i
-
- def hard_entry(self, event):
- cpu_id = event["cpu_id"]
- self.irq["names"][event["irq"]] = event["name"]
- self.irq["hard_count"] += 1
- i = self.entry(event, IRQ.HARD_IRQ, "irq")
- self.irq["hard-per-cpu"][cpu_id] = i
-
- def soft_entry(self, event):
- cpu_id = event["cpu_id"]
- self.irq["soft_count"] += 1
- i = self.entry(event, IRQ.SOFT_IRQ, "vec")
- self.irq["soft-per-cpu"][cpu_id] = i
- if cpu_id in self.irq["raise-per-cpu"].keys() and \
- self.irq["raise-per-cpu"][cpu_id] is not None and \
- self.irq["raise-per-cpu"][cpu_id][1] == event["vec"]:
- i.raise_ts = self.irq["raise-per-cpu"][cpu_id][0]
- self.irq["raise-per-cpu"][cpu_id] = None
-
- def compute_stats(self, irq_entry, i):
- duration = i.stop_ts - i.start_ts
- if duration > irq_entry["max"]:
- irq_entry["max"] = duration
- if irq_entry["min"] == -1 or duration < irq_entry["min"]:
- irq_entry["min"] = duration
- irq_entry["count"] += 1
- irq_entry["total"] += duration
- # compute raise latency if applicable
- if i.raise_ts == -1:
- return True
- latency = i.start_ts - i.raise_ts
- if latency > irq_entry["raise_max"]:
- irq_entry["raise_max"] = latency
- if irq_entry["raise_min"] == -1 or latency < irq_entry["raise_min"]:
- irq_entry["raise_min"] = latency
- irq_entry["raise_count"] += 1
- irq_entry["raise_total"] += latency
- return True
-
- def exit(self, event, idfield, per_cpu_key, irq_type, args):
- cpu_id = event["cpu_id"]
- if cpu_id not in self.irq[per_cpu_key].keys() or \
- self.irq[per_cpu_key][cpu_id] is None:
- return
- i = self.irq[per_cpu_key][cpu_id]
- if i.nr != event[idfield]:
- self.irq[per_cpu_key][cpu_id] = None
- return
- i.stop_ts = event.timestamp
- if not i.nr in self.irq[irq_type].keys():
- self.irq[irq_type][i.nr] = self.init_irq()
-
- # filter out max/min
- duration = i.stop_ts - i.start_ts
- if args.max and duration > args.max * 1000:
- return False
- if args.min and duration < args.min * 1000:
- return False
- self.irq[irq_type][i.nr]["list"].append(i)
- self.compute_stats(self.irq[irq_type][i.nr], i)
- self.irq["irq-list"].append(i)
- return i
-
- def hard_exit(self, event, args):
- i = self.exit(event, "irq", "hard-per-cpu", "hard-irqs", args)
- if not i:
- return
- i.ret = event["ret"]
-
- def soft_exit(self, event, args):
- self.exit(event, "vec", "soft-per-cpu", "soft-irqs", args)
-
- def soft_raise(self, event):
- cpu_id = event["cpu_id"]
- self.irq["raise-per-cpu"][cpu_id] = ((event.timestamp, event["vec"]))
+++ /dev/null
-from LTTngAnalyzes.common import NSEC_PER_SEC
-import json
-
-
-class JsonReport():
- def __init__(self, trace_start_ts, trace_end_ts, cpus, tids):
- self.trace_start_ts = trace_start_ts
- self.trace_end_ts = trace_end_ts
- self.cpus = cpus
- self.tids = tids
-
- def json_per_cpu_report(self, start, end):
- out = {}
- out_per_cpu = {}
- out_ts = {"start": int(start), "end": int(end)}
- total_pc = 0
- for cpu in self.cpus.keys():
- out_per_cpu[cpu] = int(self.cpus[cpu].cpu_pc)
- total_pc += out_per_cpu[cpu]
- out["per-cpu"] = out_per_cpu
- out["timestamp"] = out_ts
- out["total-cpu"] = int(total_pc / len(self.cpus.keys()))
- print(json.dumps(out, indent=4))
-
- def json_per_tid_report(self, start, end, proc_list):
- out = {}
- out_per_tid = {}
- out_ts = {"start": int(start), "end": int(end)}
- total_ns = end - start
- for tid in self.tids.keys():
- if len(proc_list) > 0 and not self.tids[tid].comm in proc_list:
- continue
- proc = {}
- proc["procname"] = self.tids[tid].comm
- proc["percent"] = int((self.tids[tid].cpu_ns * 100) / total_ns)
- out_per_tid[tid] = proc
- out["per-tid"] = out_per_tid
- out["timestamp"] = out_ts
- print(json.dumps(out, indent=4))
-
- def json_global_per_cpu_report(self):
- a = []
- for cpu in self.cpus.keys():
- b = {}
- b["key"] = "CPU %d" % cpu
- b["values"] = self.cpus[cpu].total_per_cpu_pc_list
- a.append(b)
- print(json.dumps(a))
-
- def json_trace_info(self):
- out = {}
- total_ns = self.trace_end_ts - self.trace_start_ts
- out["start"] = self.trace_start_ts
- out["end"] = self.trace_end_ts
- out["total_ns"] = total_ns
- out["total_sec"] = "%lu.%0.09lus" % ((total_ns / NSEC_PER_SEC,
- total_ns % NSEC_PER_SEC))
- print(json.dumps(out, indent=4))
-
- def report(self, begin_ns, end_ns, final, args):
- if not (args.info or args.cpu or args.tid or args.overall):
- return
- if args.info and final:
- self.json_trace_info()
- if args.cpu:
- self.json_per_cpu_report(begin_ns, end_ns)
- if args.tid:
- self.json_per_tid_report(begin_ns, end_ns, args.display_proc_list)
- if args.overall and final:
- self.json_global_per_cpu_report()
+++ /dev/null
-from LTTngAnalyzes.common import ns_to_hour_nsec
-
-
-class Mm():
- def __init__(self, mm, cpus, tids, dirty_pages):
- self.mm = mm
- self.cpus = cpus
- self.tids = tids
- self.dirty_pages = dirty_pages
- self.mm["allocated_pages"] = 0
- self.mm["freed_pages"] = 0
- self.mm["count"] = 0
- self.mm["dirty"] = 0
- self.dirty_pages["pages"] = []
- self.dirty_pages["global_nr_dirty"] = -1
- self.dirty_pages["base_nr_dirty"] = -1
-
- def get_current_proc(self, event):
- cpu_id = event["cpu_id"]
- if cpu_id not in self.cpus:
- return None
- c = self.cpus[cpu_id]
- if c.current_tid == -1:
- return None
- return self.tids[c.current_tid]
-
- def page_alloc(self, event):
- self.mm["count"] += 1
- self.mm["allocated_pages"] += 1
- for p in self.tids.values():
- if len(p.current_syscall.keys()) == 0:
- continue
- if "alloc" not in p.current_syscall.keys():
- p.current_syscall["alloc"] = 1
- else:
- p.current_syscall["alloc"] += 1
- t = self.get_current_proc(event)
- if t is None:
- return
- t.allocated_pages += 1
-
- def page_free(self, event):
- self.mm["freed_pages"] += 1
- if self.mm["count"] == 0:
- return
- self.mm["count"] -= 1
- t = self.get_current_proc(event)
- if t is None:
- return
- t.freed_pages += 1
-
- def block_dirty_buffer(self, event):
- self.mm["dirty"] += 1
- if event["cpu_id"] not in self.cpus.keys():
- return
- c = self.cpus[event["cpu_id"]]
- if c.current_tid <= 0:
- return
- p = self.tids[c.current_tid]
- current_syscall = self.tids[c.current_tid].current_syscall
- if len(current_syscall.keys()) == 0:
- return
- if self.dirty_pages is None:
- return
- if "fd" in current_syscall.keys():
- self.dirty_pages["pages"].append((p, current_syscall["name"],
- current_syscall["fd"].filename,
- current_syscall["fd"].fd))
- return
-
- def writeback_global_dirty_state(self, event):
- print("%s count : %d, count dirty : %d, nr_dirty : %d, "
- "nr_writeback : %d, nr_dirtied : %d, nr_written : %d" %
- (ns_to_hour_nsec(event.timestamp), self.mm["count"],
- self.mm["dirty"], event["nr_dirty"],
- event["nr_writeback"], event["nr_dirtied"],
- event["nr_written"]))
- self.mm["dirty"] = 0
+++ /dev/null
-from LTTngAnalyzes.common import Iface, FDType, SyscallConsts
-
-
-class Net():
- def __init__(self, ifaces, cpus, tids):
- self.ifaces = ifaces
- self.cpus = cpus
- self.tids = tids
-
- def get_dev(self, dev):
- if dev not in self.ifaces:
- d = Iface()
- d.name = dev
- self.ifaces[dev] = d
- else:
- d = self.ifaces[dev]
- return d
-
- def send(self, event):
- dev = event["name"]
- sent_len = event["len"]
- cpu_id = event["cpu_id"]
-
- d = self.get_dev(dev)
- d.send_packets += 1
- d.send_bytes += sent_len
-
- if cpu_id not in self.cpus.keys():
- return
- c = self.cpus[cpu_id]
- if c.current_tid == -1:
- return
- t = self.tids[c.current_tid]
- if not t.current_syscall:
- return
- if t.current_syscall["name"] in SyscallConsts.WRITE_SYSCALLS:
- if t.current_syscall["fd"].fdtype == FDType.unknown:
- t.current_syscall["fd"].fdtype = FDType.maybe_net
-
- def recv(self, event):
- dev = event["name"]
- recv_len = event["len"]
-
- d = self.get_dev(dev)
- d.recv_packets += 1
- d.recv_bytes += recv_len
+++ /dev/null
-import os
-import sys
-
-try:
- from progressbar import ETA, Bar, Percentage, ProgressBar
- progressbar_available = True
-except ImportError:
- progressbar_available = False
-
-# approximation for the progress bar
-BYTES_PER_EVENT = 30
-
-
-def getFolderSize(folder):
- total_size = os.path.getsize(folder)
- for item in os.listdir(folder):
- itempath = os.path.join(folder, item)
- if os.path.isfile(itempath):
- total_size += os.path.getsize(itempath)
- elif os.path.isdir(itempath):
- total_size += getFolderSize(itempath)
- return total_size
-
-
-def progressbar_setup(obj, args):
- if hasattr(args, "no_progress") and args.no_progress:
- obj.pbar = None
- return
-
- if progressbar_available:
- size = getFolderSize(args.path)
- widgets = ['Processing the trace: ', Percentage(), ' ',
- Bar(marker='#', left='[', right=']'),
- ' ', ETA(), ' '] # see docs for other options
- obj.pbar = ProgressBar(widgets=widgets,
- maxval=size/BYTES_PER_EVENT)
- obj.pbar.start()
- else:
- print("Warning: progressbar module not available, "
- "using --no-progress.", file=sys.stderr)
- args.no_progress = True
- obj.pbar = None
- obj.event_count = 0
-
-
-def progressbar_update(obj, args):
- if hasattr(args, "no_progress") and \
- (args.no_progress or obj.pbar is None):
- return
- try:
- obj.pbar.update(obj.event_count)
- except ValueError:
- pass
- obj.event_count += 1
-
-
-def progressbar_finish(obj, args):
- if hasattr(args, "no_progress") and args.no_progress:
- return
- obj.pbar.finish()
+++ /dev/null
-from babeltrace import CTFScope
-from LTTngAnalyzes.common import Process, CPU, FD
-
-
-class Sched():
- def __init__(self, cpus, tids, dirty_pages={}):
- self.cpus = cpus
- self.tids = tids
- self.dirty_pages = dirty_pages
-
- def sched_switch_per_cpu(self, cpu_id, ts, next_tid, event):
- """Compute per-cpu usage"""
- if cpu_id in self.cpus:
- c = self.cpus[cpu_id]
- if c.start_task_ns != 0:
- c.cpu_ns += ts - c.start_task_ns
- # exclude swapper process
- if next_tid != 0:
- c.start_task_ns = ts
- c.current_tid = next_tid
- else:
- c.start_task_ns = 0
- c.current_tid = -1
- else:
- self.add_cpu(cpu_id, ts, next_tid)
- for context in event.keys():
- if context.startswith("perf_"):
- c.perf[context] = event[context]
-
- def add_cpu(self, cpu_id, ts, next_tid):
- c = CPU()
- c.cpu_id = cpu_id
- c.current_tid = next_tid
- # when we schedule a real task (not swapper)
- c.start_task_ns = ts
- # first activity on the CPU
- self.cpus[cpu_id] = c
- self.cpus[cpu_id].total_per_cpu_pc_list = []
-
- def sched_switch_per_tid(self, ts, prev_tid, next_tid,
- next_comm, cpu_id, event, ret):
- """Compute per-tid usage"""
- # if we don't know yet the CPU, skip this
- if cpu_id not in self.cpus.keys():
- self.add_cpu(cpu_id, ts, next_tid)
- c = self.cpus[cpu_id]
- # per-tid usage
- if prev_tid in self.tids:
- p = self.tids[prev_tid]
- p.cpu_ns += (ts - p.last_sched)
- # perf PMU counters checks
- for context in event.field_list_with_scope(
- CTFScope.STREAM_EVENT_CONTEXT):
- if context.startswith("perf_"):
- if context not in c.perf.keys():
- c.perf[context] = event[context]
- # add the difference between the last known value
- # for this counter on the current CPU
- diff = event[context] - c.perf[context]
- if context not in p.perf.keys():
- p.perf[context] = diff
- else:
- p.perf[context] += diff
- if diff > 0:
- ret[context] = diff
-
- # exclude swapper process
- if next_tid == 0:
- return ret
-
- if next_tid not in self.tids:
- p = Process()
- p.tid = next_tid
- p.comm = next_comm
- self.tids[next_tid] = p
- else:
- p = self.tids[next_tid]
- p.comm = next_comm
- p.last_sched = ts
- for q in c.wakeup_queue:
- if q["task"] == p:
- ret["sched_latency"] = ts - q["ts"]
- ret["next_tid"] = next_tid
- c.wakeup_queue.remove(q)
- return ret
-
- def clear_dirty_pages(self, to_clean, reason):
- cleaned = []
-# print("%s Cleaning nr : %d, current : %d, base : %d,
-# " cleaning %d, global %d" % \
-# (ns_to_hour_nsec(event.timestamp), nr, current,
-# self.dirty_pages["base_nr_dirty"],
-# to_clean, self.dirty_pages["global_nr_dirty"]))
- if to_clean > len(self.dirty_pages["pages"]):
- to_clean = len(self.dirty_pages["pages"])
- for i in range(to_clean):
- a = self.dirty_pages["pages"].pop(0)
- cleaned.append(a)
-
- # don't account background kernel threads emptying the
- # page cache
- if reason == "counter":
- return
-
- # flag all processes with a syscall in progress
- for p in self.tids.values():
- if len(p.current_syscall.keys()) == 0:
- continue
- p.current_syscall["pages_cleared"] = cleaned
- return
-
- def track_dirty_pages(self, event):
- if "pages" not in self.dirty_pages.keys():
- return
- if "nr_dirty" not in event.keys():
- # if the context is not available, only keep the
- # last 1000 pages inserted (arbitrary)
- if len(self.dirty_pages["pages"]) > 1000:
- for i in range(len(self.dirty_pages["pages"]) - 1000):
- self.dirty_pages["pages"].pop(0)
- return
- nr = event["nr_dirty"]
-# current = len(self.dirty_pages["pages"])
-
- if self.dirty_pages["global_nr_dirty"] == -1:
- self.dirty_pages["global_nr_dirty"] = nr
- self.dirty_pages["base_nr_dirty"] = nr
- return
-
- # only cleanup when the counter goes down
- if nr >= self.dirty_pages["global_nr_dirty"]:
- self.dirty_pages["global_nr_dirty"] = nr
- return
-
- if nr <= self.dirty_pages["base_nr_dirty"]:
- self.dirty_pages["base_nr_dirty"] = nr
- self.dirty_pages["global_nr_dirty"] = nr
-# to_clean = current
-# elif (self.dirty_pages["global_nr_dirty"] - nr) < 0:
-# to_clean = current
-# else:
-# to_clean = self.dirty_pages["global_nr_dirty"] - nr
-# if to_clean > 0:
-# self.clear_dirty_pages(to_clean, "counter")
- self.dirty_pages["global_nr_dirty"] = nr
-
- def switch(self, event):
- """Handle sched_switch event, returns a dict of changed values"""
- prev_tid = event["prev_tid"]
- next_comm = event["next_comm"]
- next_tid = event["next_tid"]
- cpu_id = event["cpu_id"]
- ret = {}
-
- self.sched_switch_per_tid(event.timestamp, prev_tid,
- next_tid, next_comm,
- cpu_id, event, ret)
- # because of perf events check, we need to do the CPU analysis after
- # the per-tid analysis
- self.sched_switch_per_cpu(cpu_id, event.timestamp, next_tid, event)
- if next_tid > 0:
- self.tids[next_tid].prev_tid = prev_tid
- self.track_dirty_pages(event)
-
- return ret
-
- def migrate_task(self, event):
- tid = event["tid"]
- if tid not in self.tids:
- p = Process()
- p.tid = tid
- p.comm = event["comm"]
- self.tids[tid] = p
- else:
- p = self.tids[tid]
- p.migrate_count += 1
-
- def wakeup(self, event):
- """Stores the sched_wakeup infos to compute scheduling latencies"""
- target_cpu = event["target_cpu"]
- tid = event["tid"]
- if target_cpu not in self.cpus.keys():
- c = CPU()
- c.cpu_id = target_cpu
- self.cpus[target_cpu] = c
- else:
- c = self.cpus[target_cpu]
-
- if tid not in self.tids:
- p = Process()
- p.tid = tid
- self.tids[tid] = p
- else:
- p = self.tids[tid]
- c.wakeup_queue.append({"ts": event.timestamp, "task": p})
-
- def fix_process(self, name, tid, pid):
- if tid not in self.tids:
- p = Process()
- p.tid = tid
- self.tids[tid] = p
- else:
- p = self.tids[tid]
- p.pid = pid
- p.comm = name
-
- if pid not in self.tids:
- p = Process()
- p.tid = pid
- self.tids[pid] = p
- else:
- p = self.tids[pid]
- p.pid = pid
- p.comm = name
-
- def dup_fd(self, fd):
- f = FD()
- f.filename = fd.filename
- f.fd = fd.fd
- f.fdtype = fd.fdtype
- return f
-
- def process_fork(self, event):
- child_tid = event["child_tid"]
- child_pid = event["child_pid"]
- child_comm = event["child_comm"]
- parent_pid = event["parent_pid"]
- parent_tid = event["parent_pid"]
- parent_comm = event["parent_comm"]
- f = Process()
- f.tid = child_tid
- f.pid = child_pid
- f.comm = child_comm
-
- # make sure the parent exists
- self.fix_process(parent_comm, parent_tid, parent_pid)
- p = self.tids[parent_pid]
- for fd in p.fds.keys():
- f.fds[fd] = self.dup_fd(p.fds[fd])
- f.fds[fd].parent = parent_pid
-
- self.tids[child_tid] = f
-
- def process_exec(self, event):
- tid = event["tid"]
- if tid not in self.tids:
- p = Process()
- p.tid = tid
- self.tids[tid] = p
- else:
- p = self.tids[tid]
- if "procname" in event.keys():
- p.comm = event["procname"]
- toremove = []
- for fd in p.fds.keys():
- if p.fds[fd].cloexec == 1:
- toremove.append(fd)
- for fd in toremove:
- p.fds.pop(fd, None)
+++ /dev/null
-from LTTngAnalyzes.sched import Sched
-from LTTngAnalyzes.net import Net
-from LTTngAnalyzes.block import Block
-from LTTngAnalyzes.statedump import Statedump
-from LTTngAnalyzes.syscalls import Syscalls
-from LTTngAnalyzes.mm import Mm
-from LTTngAnalyzes.irq import Interrupt
-
-
-class State():
- def __init__(self):
- self.cpus = {}
- self.tids = {}
- self.disks = {}
- self.syscalls = {}
- self.mm = {}
- self.ifaces = {}
- self.dirty_pages = {}
- self.interrupts = {}
- self.pending_syscalls = []
-
- self.sched = Sched(self.cpus, self.tids)
- self.syscall = Syscalls(self.cpus, self.tids, self.syscalls,
- self.pending_syscalls)
- self.statedump = Statedump(self.tids, self.disks)
- self.mem = Mm(self.mm, self.cpus, self.tids, self.dirty_pages)
- self.block = Block(self.cpus, self.disks, self.tids)
- self.net = Net(self.ifaces, self.cpus, self.tids)
- self.irq = Interrupt(self.interrupts, self.cpus, self.tids)
+++ /dev/null
-from LTTngAnalyzes.common import Process, FD, get_disk
-
-
-class Statedump():
- def __init__(self, tids, disks):
- self.tids = tids
- self.disks = disks
-
- def merge_fd_dict(self, p, parent):
- if len(p.fds.keys()) != 0:
- toremove = []
- for fd in p.fds.keys():
- if fd not in parent.fds.keys():
- parent.fds[fd] = p.fds[fd]
- else:
- # best effort to fix the filename
- if len(parent.fds[fd].filename) == 0:
- parent.fds[fd].filename = p.fds[fd].filename
- # merge the values as they are for the same FD
- parent.fds[fd].net_read += p.fds[fd].net_read
- parent.fds[fd].net_write += p.fds[fd].net_write
- parent.fds[fd].disk_read += p.fds[fd].disk_read
- parent.fds[fd].disk_write += p.fds[fd].disk_write
- parent.fds[fd].open += p.fds[fd].open
- parent.fds[fd].close += p.fds[fd].close
- toremove.append(fd)
- for fd in toremove:
- p.fds.pop(fd, None)
- if len(p.closed_fds.keys()) != 0:
- for fd in p.closed_fds.keys():
- if fd not in parent.closed_fds.keys():
- parent.closed_fds[fd] = p.closed_fds[fd]
- else:
- # best effort to fix the filename
- if len(parent.closed_fds[fd].name) == 0:
- parent.closed_fds[fd].name = p.closed_fds[fd].name
- # merge the values as they are for the same FD
- parent.closed_fds[fd].read += p.closed_fds[fd].read
- parent.closed_fds[fd].write += p.closed_fds[fd].write
- parent.closed_fds[fd].open += p.closed_fds[fd].open
- parent.closed_fds[fd].close += p.closed_fds[fd].close
- p.closed_fds.pop(fd, None)
-
- def process_state(self, event):
- tid = event["tid"]
- pid = event["pid"]
- name = event["name"]
- if tid not in self.tids:
- p = Process()
- p.tid = tid
- self.tids[tid] = p
- else:
- p = self.tids[tid]
- # Even if the process got created earlier, some info might be
- # missing, add it now.
- p.pid = pid
- p.comm = name
-
- if pid != tid:
- # create the parent
- if pid not in self.tids:
- parent = Process()
- parent.tid = pid
- parent.pid = pid
- parent.comm = name
- self.tids[pid] = parent
- else:
- parent = self.tids[pid]
- # If the thread had opened FDs, they need to be assigned
- # to the parent.
- self.merge_fd_dict(p, parent)
-
- def file_descriptor(self, event):
- pid = event["pid"]
- fd = event["fd"]
- filename = event["filename"]
-
- if pid not in self.tids:
- p = Process()
- p.pid = pid
- p.tid = pid
- self.tids[pid] = p
- else:
- p = self.tids[pid]
-
- if fd not in p.fds.keys():
- newfile = FD()
- newfile.filename = filename
- newfile.fd = fd
- # FIXME: we don't have the info, just assume for now
- newfile.cloexec = 1
- p.fds[fd] = newfile
- else:
- # just fix the filename
- p.fds[fd].filename = filename
-
- def block_device(self, event):
- d = get_disk(event["dev"], self.disks)
- d.prettyname = event["diskname"]
+++ /dev/null
-from LTTngAnalyzes.common import FDType, FD, ns_to_hour_nsec, Syscall, \
- O_CLOEXEC, get_v4_addr_str, Process, IORequest, SyscallConsts
-import socket
-import operator
-
-
-class IOCategory():
- """Defines an enumeration mapping IO categories to integer values.
- Used mainly to export syscall metadata (to JSON)."""
-
- invalid = 0
- # Can't use open as a name given that is is a built-in function
- # TODO: find less stupid name
- opn = 1
- close = 2
- read = 3
- write = 4
-
-
-class Syscalls():
- def get_syscall_category(name):
- """Receives a syscall name and returns an enum value
- representing its IO category (open, close, read, or write)"
-
- This is used to produce json data for visualization"""
-
- if name in SyscallConsts.OPEN_SYSCALLS:
- return IOCategory.opn
- if name in SyscallConsts.CLOSE_SYSCALLS:
- return IOCategory.close
- if name in SyscallConsts.READ_SYSCALLS:
- return IOCategory.read
- if name in SyscallConsts.WRITE_SYSCALLS:
- return IOCategory.write
-
- return IOCategory.invalid
-
- def get_fd_type(name, family):
- if name in SyscallConsts.NET_OPEN_SYSCALLS:
- if family in SyscallConsts.INET_FAMILIES:
- return FDType.net
- if family in SyscallConsts.DISK_FAMILIES:
- return FDType.disk
-
- if name in SyscallConsts.DISK_OPEN_SYSCALLS:
- return FDType.disk
-
- return FDType.unknown
-
- def __init__(self, cpus, tids, syscalls, pending_syscalls, dirty_pages={},
- names=None, latency=-1, latency_hist=None, seconds=False):
- self.cpus = cpus
- self.tids = tids
- self.syscalls = syscalls
- self.pending_syscalls = pending_syscalls
- self.syscalls["total"] = 0
- self.dirty_pages = dirty_pages
- self.names = names
- self.latency = latency
- self.latency_hist = latency_hist
- self.seconds = seconds
-
- def global_syscall_entry(self, name):
- if name not in self.syscalls:
- s = Syscall()
- s.name = name
- s.count = 0
- self.syscalls[name] = s
- else:
- s = self.syscalls[name]
- s.count += 1
- self.syscalls["total"] += 1
-
- def per_tid_syscall_entry(self, name, cpu_id):
- # we don't know which process is currently on this CPU
- if cpu_id not in self.cpus:
- return
- c = self.cpus[cpu_id]
- if c.current_tid == -1:
- return
- t = self.tids[c.current_tid]
- t.total_syscalls += 1
- if name not in t.syscalls:
- s = Syscall()
- s.name = name
- t.syscalls[name] = s
- else:
- s = t.syscalls[name]
- s.count += 1
-
- def track_open(self, name, proc, event, cpu):
- self.tids[cpu.current_tid].current_syscall = {}
- current_syscall = self.tids[cpu.current_tid].current_syscall
- if name in SyscallConsts.DISK_OPEN_SYSCALLS:
- current_syscall["filename"] = event["filename"]
- if event["flags"] & O_CLOEXEC == O_CLOEXEC:
- current_syscall["cloexec"] = 1
- elif name in ["sys_accept", "syscall_entry_accept"]:
- if "family" in event.keys() and event["family"] == socket.AF_INET:
- ipport = "%s:%d" % (get_v4_addr_str(event["v4addr"]),
- event["sport"])
- current_syscall["filename"] = ipport
- else:
- current_syscall["filename"] = "socket"
- elif name in SyscallConsts.NET_OPEN_SYSCALLS:
- current_syscall["filename"] = "socket"
- elif name in ["sys_dup2", "syscall_entry_dup2"]:
- newfd = event["newfd"]
- oldfd = event["oldfd"]
- if newfd in proc.fds.keys():
- self.close_fd(proc, newfd)
- if oldfd in proc.fds.keys():
- current_syscall["filename"] = proc.fds[oldfd].filename
- current_syscall["fdtype"] = proc.fds[oldfd].fdtype
- else:
- current_syscall["filename"] = ""
- elif name in ["sys_fcntl", "syscall_entry_fcntl"]:
- # F_DUPFD
- if event["cmd"] != 0:
- return
- oldfd = event["fd"]
- if oldfd in proc.fds.keys():
- current_syscall["filename"] = proc.fds[oldfd].filename
- current_syscall["fdtype"] = proc.fds[oldfd].fdtype
- else:
- current_syscall["filename"] = ""
-
- if name in SyscallConsts.NET_OPEN_SYSCALLS and \
- "family" in event.keys():
- family = event["family"]
- current_syscall["family"] = family
- else:
- family = socket.AF_UNSPEC
- current_syscall["family"] = family
-
- current_syscall["name"] = name
- current_syscall["start"] = event.timestamp
- current_syscall["fdtype"] = Syscalls.get_fd_type(name, family)
-
- def close_fd(self, proc, fd):
- filename = proc.fds[fd].filename
- if filename not in SyscallConsts.GENERIC_NAMES \
- and filename in proc.closed_fds.keys():
- f = proc.closed_fds[filename]
- f.close += 1
- f.net_read += proc.fds[fd].net_read
- f.disk_read += proc.fds[fd].disk_read
- f.net_write += proc.fds[fd].net_write
- f.disk_write += proc.fds[fd].disk_write
- else:
- proc.closed_fds[filename] = proc.fds[fd]
- proc.closed_fds[filename].close = 1
-# print("Close FD %s in %d (%d, %d, %d, %d)" %
-# (filename, proc.tid, proc.fds[fd].read, proc.fds[fd].write,
-# proc.fds[fd].open, proc.fds[fd].close))
- proc.fds.pop(fd, None)
-
- def track_close(self, name, proc, event, cpu):
- fd = event["fd"]
- if fd not in proc.fds.keys():
- return
-
- tid = self.tids[cpu.current_tid]
- tid.current_syscall = {}
- current_syscall = tid.current_syscall
- current_syscall["filename"] = proc.fds[fd].filename
- current_syscall["name"] = name
- current_syscall["start"] = event.timestamp
-
- self.close_fd(proc, fd)
-
- def track_fds(self, name, event, cpu_id):
- # we don't know which process is currently on this CPU
- ret_string = ""
- if cpu_id not in self.cpus:
- return
- c = self.cpus[cpu_id]
- if c.current_tid == -1:
- return
- t = self.tids[c.current_tid]
- # check if we can fix the pid from a context
- if t.pid == -1 and "pid" in event.keys():
- t.pid = event["pid"]
- p = Process()
- p.tid = t.pid
- p.pid = t.pid
- p.comm = t.comm
- self.tids[p.pid] = p
- # if it's a thread, we want the parent
- if t.pid != -1 and t.tid != t.pid:
- t = self.tids[t.pid]
- if name in SyscallConsts.OPEN_SYSCALLS:
- self.track_open(name, t, event, c)
- elif name in SyscallConsts.CLOSE_SYSCALLS:
- ret_string = "%s %s(%d)" % (ns_to_hour_nsec(event.timestamp),
- name, event["fd"])
- self.track_close(name, t, event, c)
- # when a connect occurs, no new FD is returned, but we can fix
- # the "filename" if we have the destination info
- elif name in ["sys_connect", "syscall_entry_connect"] \
- and "family" in event.keys():
- if event["family"] == socket.AF_INET:
- fd = self.get_fd(t, event["fd"])
- ipport = "%s:%d" % (get_v4_addr_str(event["v4addr"]),
- event["dport"])
- fd.filename = ipport
- return ret_string
-
- def get_fd(self, proc, fd):
- if fd not in proc.fds.keys():
- f = FD()
- f.fd = fd
- f.filename = "unknown (origin not found)"
- proc.fds[fd] = f
- else:
- f = proc.fds[fd]
- return f
-
- def track_sync(self, name, event, cpu_id):
- # we don't know which process is currently on this CPU
- if cpu_id not in self.cpus:
- return
- c = self.cpus[cpu_id]
- if c.current_tid == -1:
- return
- t = self.tids[c.current_tid]
- self.pending_syscalls.append(t)
- # if it's a thread, we want the parent
- if t.pid != -1 and t.tid != t.pid:
- t = self.tids[t.pid]
- current_syscall = self.tids[c.current_tid].current_syscall
- current_syscall["name"] = name
- current_syscall["start"] = event.timestamp
- if name not in ["sys_sync", "syscall_entry_sync"]:
- fd = event["fd"]
- f = self.get_fd(t, fd)
- current_syscall["fd"] = f
- current_syscall["filename"] = f.filename
-
- def track_read_write(self, name, event, cpu_id):
- # we don't know which process is currently on this CPU
- if cpu_id not in self.cpus:
- return
- c = self.cpus[cpu_id]
- if c.current_tid == -1:
- return
- t = self.tids[c.current_tid]
- self.pending_syscalls.append(t)
- # if it's a thread, we want the parent
- if t.pid != -1 and t.tid != t.pid:
- t = self.tids[t.pid]
- current_syscall = self.tids[c.current_tid].current_syscall
- current_syscall["name"] = name
- current_syscall["start"] = event.timestamp
- if name in ["sys_splice", "syscall_entry_splice"]:
- current_syscall["fd_in"] = self.get_fd(t, event["fd_in"])
- current_syscall["fd_out"] = self.get_fd(t, event["fd_out"])
- current_syscall["count"] = event["len"]
- current_syscall["filename"] = current_syscall["fd_in"].filename
- return
- elif name in ["sys_sendfile64", "syscall_entry_sendfile64"]:
- current_syscall["fd_in"] = self.get_fd(t, event["in_fd"])
- current_syscall["fd_out"] = self.get_fd(t, event["out_fd"])
- current_syscall["count"] = event["count"]
- current_syscall["filename"] = current_syscall["fd_in"].filename
- return
- fd = event["fd"]
- f = self.get_fd(t, fd)
- current_syscall["fd"] = f
- if name in ["sys_writev", "syscall_entry_writev",
- "sys_readv", "syscall_entry_readv"]:
- current_syscall["count"] = event["vlen"]
- elif name in ["sys_recvfrom", "syscall_entry_recvfrom"]:
- current_syscall["count"] = event["size"]
- elif name in ["sys_recvmsg", "syscall_entry_recvmsg",
- "sys_sendmsg", "syscall_entry_sendmsg"]:
- current_syscall["count"] = ""
- elif name in ["sys_sendto", "syscall_entry_sendto"]:
- current_syscall["count"] = event["len"]
- else:
- try:
- current_syscall["count"] = event["count"]
- except:
- print("Missing count argument for syscall",
- current_syscall["name"])
- current_syscall["count"] = 0
-
- current_syscall["filename"] = f.filename
-
- def add_tid_fd(self, event, cpu):
- ret = event["ret"]
- t = self.tids[cpu.current_tid]
- # if it's a thread, we want the parent
- if t.pid != -1 and t.tid != t.pid:
- t = self.tids[t.pid]
- current_syscall = self.tids[cpu.current_tid].current_syscall
-
- name = current_syscall["filename"]
- if name not in SyscallConsts.GENERIC_NAMES \
- and name in t.closed_fds.keys():
- fd = t.closed_fds[name]
- fd.open += 1
- else:
- fd = FD()
- fd.filename = name
- if current_syscall["name"] in SyscallConsts.NET_OPEN_SYSCALLS:
- fd.family = current_syscall["family"]
- if fd.family in SyscallConsts.INET_FAMILIES:
- fd.fdtype = FDType.net
- fd.open = 1
- if ret >= 0:
- fd.fd = ret
- else:
- return
-# if fd.fd in t.fds.keys():
-# print("%lu : FD %d in tid %d was already there, untracked close" %
-# (event.timestamp, fd.fd, t.tid))
- if "cloexec" in current_syscall.keys():
- fd.cloexec = 1
- t.fds[fd.fd] = fd
- # print("%lu : %s opened %s (%d times)" % (event.timestamp, t.comm,
- # fd.filename, fd.open))
-
- def read_append(self, fd, proc, count, rq):
- rq.operation = IORequest.OP_READ
- rq.size = count
- if fd.fdtype in [FDType.net, FDType.maybe_net]:
- fd.net_read += count
- proc.net_read += count
- elif fd.fdtype == FDType.disk:
- fd.disk_read += count
- proc.disk_read += count
- else:
- fd.unk_read += count
- proc.unk_read += count
- fd.read += count
- proc.read += count
-
- def write_append(self, fd, proc, count, rq):
- rq.operation = IORequest.OP_WRITE
- rq.size = count
- if fd.fdtype in [FDType.net, FDType.maybe_net]:
- fd.net_write += count
- proc.net_write += count
- elif fd.fdtype == FDType.disk:
- fd.disk_write += count
- proc.disk_write += count
- else:
- fd.unk_write += count
- proc.unk_write += count
- fd.write += count
- proc.write += count
-
- def track_read_write_return(self, name, ret, cpu):
- if ret < 0:
- # TODO: track errors
- return
- proc = self.tids[cpu.current_tid]
- # if it's a thread, we want the parent
- if proc.pid != -1 and proc.tid != proc.pid:
- proc = self.tids[proc.pid]
- current_syscall = self.tids[cpu.current_tid].current_syscall
- if name in ["sys_splice", "syscall_entry_splice",
- "sys_sendfile64", "syscall_entry_sendfile64"]:
- self.read_append(current_syscall["fd_in"], proc, ret,
- current_syscall["iorequest"])
- self.write_append(current_syscall["fd_out"], proc, ret,
- current_syscall["iorequest"])
- elif name in SyscallConsts.READ_SYSCALLS:
- if ret > 0:
- self.read_append(current_syscall["fd"], proc, ret,
- current_syscall["iorequest"])
- elif name in SyscallConsts.WRITE_SYSCALLS:
- if ret > 0:
- self.write_append(current_syscall["fd"], proc, ret,
- current_syscall["iorequest"])
-
- def get_page_queue_stats(self, page_list):
- processes = {}
- for i in page_list:
- procname = i[0].comm
- tid = i[0].tid
- filename = i[2]
- if tid not in processes.keys():
- processes[tid] = {}
- processes[tid]["procname"] = procname
- processes[tid]["count"] = 1
- processes[tid]["files"] = {}
- processes[tid]["files"][filename] = 1
- else:
- processes[tid]["count"] += 1
- if filename not in processes[tid]["files"].keys():
- processes[tid]["files"][filename] = 1
- else:
- processes[tid]["files"][filename] += 1
- return processes
-
- def print_page_table(self, event, pages):
- spaces = (41 + 6) * " "
- for i in pages.keys():
- p = pages[i]
- print("%s %s (%d): %d pages" % (spaces, p["procname"],
- i, p["count"]))
- files = sorted(p["files"].items(), key=operator.itemgetter(1),
- reverse=True)
- for f in files:
- print("%s - %s : %d pages" % (spaces, f[0], f[1]))
-
- def syscall_clear_pages(self, event, name, fd, current_syscall, tid):
- cleaned = []
- if name in ["sys_sync", "syscall_entry_sync"]:
- # remove all the pages
- for i in range(len(self.dirty_pages["pages"])):
- cleaned.append(self.dirty_pages["pages"].pop(0))
- else:
- # remove only the pages that belong to a specific proc/fd
- for i in range(len(self.dirty_pages["pages"])):
- proc = self.dirty_pages["pages"][i][0]
- page_fd = self.dirty_pages["pages"][i][3]
- if page_fd == fd and (tid.tid == proc.tid or
- tid.pid == proc.pid):
- cleaned.append(self.dirty_pages["pages"][i])
- for i in cleaned:
- self.dirty_pages["pages"].remove(i)
- if len(cleaned) > 0:
- current_syscall["pages_cleared"] = cleaned
-
- def track_rw_latency(self, name, ret, c, ts, event):
- current_syscall = self.tids[c.current_tid].current_syscall
- rq = current_syscall["iorequest"]
-# FIXME: useless ?
-# if "start" not in current_syscall.keys():
-# return
- rq.duration = (event.timestamp - current_syscall["start"])
- rq.begin = current_syscall["start"]
- rq.end = event.timestamp
- rq.proc = self.tids[c.current_tid]
- if "fd" in current_syscall.keys():
- rq.fd = current_syscall["fd"]
- r = current_syscall["fd"].iorequests
- r.append(current_syscall["iorequest"])
- elif "fd_in" in current_syscall.keys():
- rq.fd = current_syscall["fd_in"]
- # pages written during the latency
- if "pages_written" in current_syscall.keys():
- rq.page_written = current_syscall["pages_written"]
- # dirty buffers during the latency
- if "dirty" in current_syscall.keys():
- rq.dirty = current_syscall["dirty"]
- # alloc pages during the latency
- if "alloc" in current_syscall.keys():
- rq.page_alloc = current_syscall["alloc"]
- # wakeup_kswapd during the latency
- if "page_free" in current_syscall.keys():
- rq.page_free = current_syscall["page_free"]
- if "wakeup_kswapd" in current_syscall.keys():
- rq.woke_kswapd = True
- if name in SyscallConsts.SYNC_SYSCALLS:
-# self.syscall_clear_pages(event, name, fd, current_syscall,
-# self.tids[c.current_tid])
- if "pages_cleared" in current_syscall.keys():
- rq.page_cleared = len(current_syscall["pages_cleared"])
-
- def entry(self, event):
- name = event.name
- ret_string = ""
- cpu_id = event["cpu_id"]
- self.global_syscall_entry(name)
- self.per_tid_syscall_entry(name, cpu_id)
- ret_string = self.track_fds(name, event, cpu_id)
- if name in SyscallConsts.READ_SYSCALLS or \
- name in SyscallConsts.WRITE_SYSCALLS:
- self.track_read_write(name, event, cpu_id)
- if name in SyscallConsts.SYNC_SYSCALLS:
- self.track_sync(name, event, cpu_id)
- return ret_string
-
- def exit(self, event):
- cpu_id = event["cpu_id"]
- ret_string = ""
- if cpu_id not in self.cpus:
- return
- c = self.cpus[cpu_id]
- if c.current_tid == -1:
- return
- current_syscall = self.tids[c.current_tid].current_syscall
- if len(current_syscall.keys()) == 0:
- return
- name = current_syscall["name"]
- ret = event["ret"]
- current_syscall["iorequest"] = IORequest()
- current_syscall["iorequest"].iotype = IORequest.IO_SYSCALL
- current_syscall["iorequest"].name = name
- if name in SyscallConsts.OPEN_SYSCALLS:
- self.add_tid_fd(event, c)
- ret_string = "%s %s(%s, fd = %d)" % (
- ns_to_hour_nsec(current_syscall["start"]),
- name, current_syscall["filename"], ret)
- if ret < 0:
- return ret_string
- t = self.tids[c.current_tid]
- current_syscall["fd"] = self.get_fd(t, ret)
- current_syscall["count"] = 0
- current_syscall["fd"].fdtype = current_syscall["fdtype"]
- current_syscall["iorequest"].operation = IORequest.OP_OPEN
- self.track_rw_latency(name, ret, c,
- event.timestamp, event)
- elif name in SyscallConsts.READ_SYSCALLS or \
- name in SyscallConsts.WRITE_SYSCALLS:
- self.track_read_write_return(name, ret, c)
- self.track_rw_latency(name, ret, c, event.timestamp, event)
- elif name in SyscallConsts.SYNC_SYSCALLS:
- current_syscall["iorequest"].operation = IORequest.OP_SYNC
- self.track_rw_latency(name, ret, c, event.timestamp, event)
- if name in ["sys_sync", "syscall_entry_sync"]:
- t = self.tids[c.current_tid]
- t.iorequests.append(current_syscall["iorequest"])
- self.tids[c.current_tid].current_syscall = {}
- if self.tids[c.current_tid] in self.pending_syscalls:
- self.pending_syscalls.remove(self.tids[c.current_tid])
- return ret_string
-
- def wb_pages(self, event):
- """writeback_pages_written"""
- for c in self.cpus.values():
- if c.current_tid <= 0:
- continue
- current_syscall = self.tids[c.current_tid].current_syscall
- if len(current_syscall.keys()) == 0:
- continue
- current_syscall["pages_written"] = event["pages"]
-
- def wakeup_kswapd(self, event):
- """mm_vmscan_wakeup_kswapd"""
- cpu_id = event["cpu_id"]
- if cpu_id not in self.cpus:
- return
- c = self.cpus[cpu_id]
- if c.current_tid == -1:
- return
- current_syscall = self.tids[c.current_tid].current_syscall
- if len(current_syscall.keys()) == 0:
- return
- current_syscall["wakeup_kswapd"] = 1
-
- def page_free(self, event):
- """mm_page_free"""
- for c in self.cpus.values():
- if c.current_tid <= 0:
- continue
- p = self.tids[c.current_tid]
- # if the current process is kswapd0, we need to
- # attribute the page freed to the process that
- # woke it up.
- if p.comm == "kswapd0" and p.prev_tid > 0:
- p = self.tids[p.prev_tid]
- current_syscall = p.current_syscall
- if len(current_syscall.keys()) == 0:
- continue
- if "wakeup_kswapd" in current_syscall.keys():
- if "page_free" in current_syscall.keys():
- current_syscall["page_free"] += 1
- else:
- current_syscall["page_free"] = 1
+++ /dev/null
-from LTTngAnalyzes.common import NSEC_PER_SEC, MSEC_PER_NSEC, convert_size
-import operator
-
-
-# class CPUComplexEncoder(json.JSONEncoder):
-# def default(self, obj):
-# if isinstance(obj, CPU):
-# return obj.cpu_pc
-# # Let the base class default method raise the TypeError
-# return json.JSONEncoder.default(self, obj)
-
-class TextReport():
- def __init__(self, trace_start_ts, trace_end_ts, cpus, tids, syscalls,
- disks, ifaces, mm):
- self.trace_start_ts = trace_start_ts
- self.trace_end_ts = trace_end_ts
- self.cpus = cpus
- self.tids = tids
- self.syscalls = syscalls
- self.disks = disks
- self.ifaces = ifaces
- self.mm = mm
-
- def text_trace_info(self):
- total_ns = self.trace_end_ts - self.trace_start_ts
- print("### Trace info ###")
- print("Start : %lu\nEnd: %lu" % (self.trace_start_ts,
- self.trace_end_ts))
- print("Total ns : %lu" % (total_ns))
- print("Total : %lu.%0.09lus" % (total_ns / NSEC_PER_SEC,
- total_ns % NSEC_PER_SEC))
-
- def report(self, begin_ns, end_ns, final, args):
- if not (args.info or args.cpu or args.tid or args.global_syscalls
- or args.tid_syscalls or args.disk or args.fds or args.net or
- args.mem):
- return
- if args.cpu or args.tid or args.global_syscalls \
- or args.tid_syscalls or args.disk or args.fds or args.net \
- or args.mem:
- print("[%lu:%lu]" % (begin_ns/NSEC_PER_SEC, end_ns/NSEC_PER_SEC))
-
- total_ns = end_ns - begin_ns
-
- if args.info and final:
- self.text_trace_info()
- print("")
- if args.cpu:
- self.text_per_cpu_report(total_ns)
- print("")
- if args.tid:
- self.text_per_tid_report(total_ns, args.display_proc_list,
- limit=args.top,
- syscalls=args.tid_syscalls, fds=args.fds,
- mem=args.mem)
- print("")
- if args.global_syscalls:
- self.text_global_syscall_report()
- print("")
- if args.disk:
- self.text_disks_report(total_ns)
- print("")
- if args.net:
- self.text_net_report(total_ns)
- print("")
- if args.mem:
- self.text_global_mm_report(self.mm)
- print("")
-
- def text_disks_report(self, total_ns):
- print("### Disks stats ###")
- for dev in self.disks:
- if self.disks[dev].completed_requests == 0:
- totalstr = "0 completed requests"
- else:
- total = (self.disks[dev].request_time /
- self.disks[dev].completed_requests) / MSEC_PER_NSEC
- totalstr = ("%d completed requests (%0.04fms/sector)" %
- (self.disks[dev].completed_requests, total))
- print("Dev %d, %d requests, %d sectors, %s" %
- (dev, self.disks[dev].nr_requests,
- self.disks[dev].nr_sector, totalstr))
-
- def text_net_report(self, total_ns):
- print("### Network stats ###")
- for iface in self.ifaces.keys():
- dev = self.ifaces[iface]
- print("%s : %d bytes received (%d packets), "
- "%d bytes sent (%d packets)" %
- (iface, dev.recv_bytes, dev.recv_packets, dev.send_bytes,
- dev.send_packets))
-
- def text_global_mm_report(self, mm):
- print("### Global memory usage ###")
- print("%d allocated pages" % (mm["allocated_pages"]))
- print("%d freed pages" % (mm["freed_pages"]))
-
- def text_global_syscall_report(self):
- print("### Global syscall ###")
- for syscall in sorted(self.syscalls.values(),
- key=operator.attrgetter("count"), reverse=True):
- if syscall.count == 0:
- continue
- print("%s : %d" % (syscall.name, syscall.count))
-
- def text_per_tid_report(self, total_ns, proc_list, limit=0, syscalls=0,
- fds=0, mem=0):
- print("### Per-TID Usage ###")
- count = 0
- for tid in sorted(self.tids.values(),
- key=operator.attrgetter('cpu_ns'), reverse=True):
- if len(proc_list) > 0 and tid.comm not in proc_list:
- continue
- print("%s (%d) : %0.02f%%, read %s, write %s"
- % (tid.comm, tid.tid, ((tid.cpu_ns * 100) / total_ns),
- convert_size(tid.read), convert_size(tid.write)), end="")
- if tid.migrate_count > 0:
- print(""" (%d migration(s))""" % tid.migrate_count)
- else:
- print("")
- count = count + 1
- if fds:
- if tid.tid == tid.pid:
- if len(tid.fds.keys()) > 0:
- print("- Still opened files :")
- for fd in tid.fds.values():
- if fd.parent != -1 and fd.parent != tid.tid:
- inherit = " (inherited by %s (%d))" % \
- (self.tids[fd.parent].comm, fd.parent)
- else:
- inherit = ""
- print(" - %s (%d), read = %s, write = %s, "
- "open = %d, close = %d%s" %
- (fd.filename, fd.fd, convert_size(fd.read),
- convert_size(fd.write), fd.open,
- fd.close, inherit))
- if len(tid.closed_fds.keys()) > 0:
- print("- Closed files :")
- for fd in tid.closed_fds.values():
- if fd.parent != -1 and fd.parent != tid.tid:
- inherit = " (inherited by %s (%d))" % \
- (self.tids[fd.parent].comm, fd.parent)
- else:
- inherit = ""
- print(" - %s (%d), read = %s, write = %s, "
- "open = %d, close = %d%s" %
- (fd.filename, fd.fd, convert_size(fd.read),
- convert_size(fd.write), fd.open,
- fd.close, inherit))
- if syscalls:
- if len(tid.syscalls.keys()) > 0:
- print("- Syscalls")
- for syscall in sorted(tid.syscalls.values(),
- key=operator.attrgetter('count'),
- reverse=True):
- if syscall.count == 0:
- continue
- print(" - %s : %d" % (syscall.name, syscall.count))
- if mem:
- print("- Memory")
- print(" - Allocated %d pages" % tid.allocated_pages)
- print(" - Freed %d pages" % tid.freed_pages)
- if limit > 0 and count >= limit:
- break
-
- def text_per_cpu_report(self, total_ns):
- print("### Per-CPU Usage ###")
- total_cpu_pc = 0
- nb_cpu = len(self.cpus.keys())
- for cpu in self.cpus.keys():
- cpu_total_ns = self.cpus[cpu].cpu_ns
- cpu_pc = self.cpus[cpu].cpu_pc
- total_cpu_pc += cpu_pc
- print("CPU %d : %d ns (%0.02f%%)" % (cpu, cpu_total_ns, cpu_pc))
- if nb_cpu == 0:
- return
- print("Total CPU Usage : %0.02f%%" % (total_cpu_pc / nb_cpu))
-# print(json.dumps(self.cpus, cls=CPUComplexEncoder))
+++ /dev/null
-#!/usr/bin/env python3
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-
-import sys
-import argparse
-import os
-import sqlite3
-try:
- from babeltrace import TraceCollection
-except ImportError:
- # quick fix for debian-based distros
- sys.path.append("/usr/local/lib/python%d.%d/site-packages" %
- (sys.version_info.major, sys.version_info.minor))
- from babeltrace import TraceCollection
-from LTTngAnalyzes.common import CPU, Process, Syscall
-
-DB_NAME = "proc.db"
-
-
-class Analyzes():
- def __init__(self, traces):
- self.traces = traces
- self.processes = {}
- self.cpus = {}
-
- def connect_db(self):
- self.conn = sqlite3.connect(DB_NAME)
- self.cur = self.conn.cursor()
-
- def init_db(self):
- self.connect_db()
- self.cur.execute("DROP TABLE IF EXISTS processes")
- self.cur.execute("CREATE TABLE processes (name TEXT)")
- self.cur.execute("DROP TABLE IF EXISTS syscalls")
- self.cur.execute("CREATE TABLE syscalls "
- "(proc_name TEXT, syscall_name TEXT)")
-
- self.cur.execute("DROP TABLE IF EXISTS staging_processes")
- self.cur.execute("CREATE TABLE staging_processes (name TEXT)")
- self.cur.execute("DROP TABLE IF EXISTS staging_syscalls")
- self.cur.execute("CREATE TABLE staging_syscalls "
- "(proc_name TEST, syscall_name TEXT)")
-
- def check_process(self, proc):
- self.cur.execute("SELECT * FROM processes WHERE name=:name",
- {"name": proc})
- p = self.cur.fetchall()
- if p:
- return
- self.cur.execute("SELECT * FROM staging_processes WHERE name=:name",
- {"name": proc})
- p = self.cur.fetchall()
- if not p:
- self.cur.execute("INSERT INTO staging_processes VALUES (:proc)",
- {"proc": proc})
-
- def check_syscall(self, proc, syscall):
- self.cur.execute("SELECT * FROM syscalls WHERE proc_name=:proc_name "
- "AND syscall_name=:syscall_name",
- {"proc_name": proc, "syscall_name": syscall})
- p = self.cur.fetchall()
- if p:
- return
- self.cur.execute("SELECT * FROM staging_syscalls "
- "WHERE proc_name=:proc_name "
- "AND syscall_name=:syscall_name",
- {"proc_name": proc, "syscall_name": syscall})
- p = self.cur.fetchall()
- if not p:
- self.cur.execute("INSERT INTO staging_syscalls VALUES(?,?)",
- (proc, syscall))
-
- def add_proc(self, p):
- self.cur.execute("INSERT INTO processes VALUES (:proc)",
- {"proc": p})
- self.cur.execute("DELETE FROM staging_processes WHERE name=:proc",
- {"proc": p})
-
- def add_syscall(self, p, s):
- self.cur.execute("INSERT INTO syscalls VALUES (:proc, :syscall)",
- {"proc": p, "syscall": s})
- self.cur.execute("DELETE FROM staging_syscalls WHERE proc_name=:proc "
- "AND syscall_name=:syscall",
- {"proc": p, "syscall": s})
-
- def review_processes(self):
- self.cur.execute("SELECT * FROM staging_processes")
- proc = self.cur.fetchall()
- if not proc:
- return
- add_all = 0
- for p in proc:
- if add_all:
- print("Adding %s" % p[0])
- self.add_proc(p[0])
- continue
-
- print("Found new process running: %s, "
- "add it to the DB (Y/n/a/q) ?" % (p))
- a = sys.stdin.readline().strip()
- if a in ["y", "Y", ""]:
- self.add_proc(p[0])
- elif a == "a":
- add_all = 1
- self.add_proc(p[0])
- elif a == "q":
- return
- else:
- continue
-
- def review_syscalls(self):
- self.cur.execute("SELECT * FROM staging_syscalls")
- sysc = self.cur.fetchall()
- if not sysc:
- return
- add_all = 0
- for p in sysc:
- if add_all:
- print("Adding %s to %s" % (p[1], p[0]))
- self.add_syscall(p[0], p[1])
- continue
-
- print("Found new syscall %s for proc %s, "
- "add it to the DB (Y/n/a/q) ?" %
- (p[1], p[0]))
- a = sys.stdin.readline().strip()
- if a in ["y", "Y", ""]:
- self.add_syscall(p[0], p[1])
- elif a == "a":
- add_all = 1
- self.add_syscall(p[0], p[1])
- elif a == "q":
- return
- else:
- continue
-
- def lttng_statedump_process_state(self, event):
- name = event["name"]
- if name not in self.processes.keys():
- self.processes[name] = Process()
- self.check_process(name)
-
- def sched_switch(self, event):
- next_comm = event["next_comm"]
- cpu_id = event["cpu_id"]
- if cpu_id not in self.cpus.keys():
- self.cpus[cpu_id] = CPU()
- self.cpus[cpu_id].current_comm = next_comm
- if next_comm not in self.processes.keys():
- self.processes[next_comm] = Process()
- self.check_process(next_comm)
-
- def syscall_entry(self, event):
- cpu_id = event["cpu_id"]
- if cpu_id not in self.cpus.keys():
- return
- p = self.processes[self.cpus[cpu_id].current_comm]
- p.syscalls[event.name] = Syscall()
- self.check_syscall(self.cpus[cpu_id].current_comm, event.name)
-
- def run(self, args):
- for event in self.traces.events:
- if event.name == "sched_switch":
- self.sched_switch(event)
- elif event.name == "lttng_statedump_process_state":
- self.lttng_statedump_process_state(event)
- elif event.name[0:4] == "sys_":
- self.syscall_entry(event)
- self.conn.commit()
- self.review_processes()
- self.review_syscalls()
- self.conn.commit()
-
- def report(self):
- for p in self.processes.keys():
- print(p)
- for s in self.processes[p].syscalls.keys():
- print(" %s" % s)
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser(description='Activity tracker')
- parser.add_argument('path', metavar="<path/to/trace>", help='Trace path')
- parser.add_argument('--reset', action="store_true",
- help='Destroy and init the database')
- parser.add_argument('--accept', action="store_true",
- help='Accept all (non-interactive)')
- parser.add_argument('--report', action="store_true",
- help='Report the difference between the DB '
- '(non-interactive)')
- args = parser.parse_args()
-
- traces = TraceCollection()
- handle = traces.add_traces_recursive(args.path, "ctf")
- if handle is None:
- sys.exit(1)
-
- c = Analyzes(traces)
- if not os.path.isfile(DB_NAME):
- print("Creating the database for the first time")
- c.init_db()
- elif args.reset:
- print("Resetting the database")
- c.init_db()
- else:
- c.connect_db()
-
- c.run(args)
-
- for h in handle.values():
- traces.remove_trace(h)
+++ /dev/null
-#!/usr/bin/env python3
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-
-import argparse
-import os
-import sys
-import time
-try:
- from babeltrace import TraceCollection
-except ImportError:
- # quick fix for debian-based distros
- sys.path.append("/usr/local/lib/python%d.%d/site-packages" %
- (sys.version_info.major, sys.version_info.minor))
- from babeltrace import TraceCollection
-from LTTngAnalyzes.common import NSEC_PER_SEC
-from LTTngAnalyzes.jsonreport import JsonReport
-from LTTngAnalyzes.textreport import TextReport
-from LTTngAnalyzes.graphitereport import GraphiteReport
-from LTTngAnalyzes.state import State
-from LTTngAnalyzes.progressbar import progressbar_setup, progressbar_update, \
- progressbar_finish
-
-
-class Analyzes():
- def __init__(self, traces):
- self.trace_start_ts = 0
- self.trace_end_ts = 0
- self.traces = traces
- self.state = State()
-
- def output(self, args, begin_ns, end_ns, final=0):
- if args.text:
- r = TextReport(self.trace_start_ts, self.trace_end_ts,
- self.state.cpus, self.state.tids,
- self.state.syscalls, self.state.disks,
- self.state.ifaces, self.state.mm)
- r.report(begin_ns, end_ns, final, args)
- if not final and (args.cpu or args.tid or args.disk or args.net):
- print("")
- if args.json:
- r = JsonReport(self.trace_start_ts, self.trace_end_ts,
- self.state.cpus, self.state.tids)
- r.report(begin_ns, end_ns, final, args)
- if args.graphite:
- r = GraphiteReport(self.trace_start_ts, self.trace_end_ts,
- self.state.cpus, self.state.tids,
- self.state.syscalls, self.state.disks,
- self.state.ifaces)
- r.report(begin_ns, end_ns, final, args)
-
- def check_refresh(self, args, event):
- """Check if we need to output something"""
- if args.refresh == 0:
- return
- event_sec = event.timestamp / NSEC_PER_SEC
- if self.current_sec == 0:
- self.current_sec = event_sec
- elif self.current_sec != event_sec and \
- (self.current_sec + args.refresh) <= event_sec:
- self.compute_stats()
- self.output(args, self.start_ns, event.timestamp)
- self.reset_total(event.timestamp)
- self.current_sec = event_sec
- self.start_ns = event.timestamp
-
- def reset_total(self, start_ts):
- for cpu in self.state.cpus.keys():
- current_cpu = self.state.cpus[cpu]
- current_cpu.cpu_ns = 0
- if current_cpu.start_task_ns != 0:
- current_cpu.start_task_ns = start_ts
- if current_cpu.current_tid >= 0:
- self.state.tids[current_cpu.current_tid].last_sched = start_ts
-
- for tid in self.state.tids.keys():
- self.state.tids[tid].cpu_ns = 0
- self.state.tids[tid].migrate_count = 0
- self.state.tids[tid].read = 0
- self.state.tids[tid].write = 0
- self.state.tids[tid].allocated_pages = 0
- self.state.tids[tid].freed_pages = 0
- for syscall in self.state.tids[tid].syscalls.keys():
- self.state.tids[tid].syscalls[syscall].count = 0
-
- for syscall in self.state.syscalls.keys():
- if syscall == "total":
- continue
- self.state.syscalls[syscall].count = 0
-
- for dev in self.state.disks.keys():
- self.state.disks[dev].nr_sector = 0
- self.state.disks[dev].nr_requests = 0
- self.state.disks[dev].completed_requests = 0
- self.state.disks[dev].request_time = 0
-
- for iface in self.state.ifaces.keys():
- self.state.ifaces[iface].recv_bytes = 0
- self.state.ifaces[iface].recv_packets = 0
- self.state.ifaces[iface].send_bytes = 0
- self.state.ifaces[iface].send_packets = 0
-
- def clear(self):
- self.trace_start_ts = 0
- self.trace_end_ts = 0
- self.traces = traces
- self.state.tids = {}
- self.state.cpus = {}
- self.state.syscalls = {}
- self.state.disks = {}
- self.state.ifaces = {}
-
- def compute_stats(self):
- for cpu in self.state.cpus.keys():
- current_cpu = self.state.cpus[cpu]
- total_ns = self.end_ns - self.start_ns
- if current_cpu.start_task_ns != 0:
- current_cpu.cpu_ns += self.end_ns - current_cpu.start_task_ns
- cpu_total_ns = current_cpu.cpu_ns
- current_cpu.cpu_pc = (cpu_total_ns * 100)/total_ns
- if current_cpu.current_tid >= 0:
- self.state.tids[current_cpu.current_tid].cpu_ns += \
- self.end_ns - current_cpu.start_task_ns
-
- def run(self, args):
- """Process the trace"""
- self.current_sec = 0
- self.start_ns = 0
- self.end_ns = 0
-
- progressbar_setup(self, args)
-
- for event in self.traces.events:
- progressbar_update(self, args)
- if self.start_ns == 0:
- self.start_ns = event.timestamp
- if self.trace_start_ts == 0:
- self.trace_start_ts = event.timestamp
- self.end_ns = event.timestamp
- self.check_refresh(args, event)
- self.trace_end_ts = event.timestamp
-
- if event.name == "sched_switch":
- self.state.sched.switch(event)
- elif event.name == "sched_migrate_task":
- self.state.sched.migrate_task(event)
- elif event.name == "sched_process_fork":
- self.state.sched.process_fork(event)
- elif event.name == "sched_process_exec":
- self.state.sched.process_exec(event)
- elif (event.name[0:4] == "sys_" or event.name[0:14] ==
- "syscall_entry_") and (args.global_syscalls or
- args.tid_syscalls or
- args.fds):
- self.state.syscall.entry(event)
- elif (event.name == "exit_syscall" or event.name[0:13] ==
- "syscall_exit_") and (args.global_syscalls or
- args.tid_syscalls or
- args.fds):
- self.state.syscall.exit(event)
- elif event.name == "block_rq_complete":
- self.state.block.complete(event)
- elif event.name == "block_rq_issue":
- self.state.block.issue(event)
- elif event.name == "netif_receive_skb":
- self.state.net.recv(event)
- elif event.name == "net_dev_xmit":
- self.state.net.send(event)
- elif event.name == "lttng_statedump_process_state":
- self.state.statedump.process_state(event)
- elif event.name == "lttng_statedump_file_descriptor":
- self.state.statedump.file_descriptor(event)
- elif event.name == "self.state.mm_page_alloc":
- self.state.mm.page_alloc(event)
- elif event.name == "self.state.mm_page_free":
- self.state.mm.page_free(event)
- progressbar_finish(self, args)
- if args.refresh == 0:
- # stats for the whole trace
- self.compute_stats()
- self.output(args, self.trace_start_ts, self.trace_end_ts, final=1)
- else:
- # stats only for the last segment
- self.compute_stats()
- self.output(args, self.start_ns, self.trace_end_ts,
- final=1)
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser(description='CPU usage analysis')
- parser.add_argument('path', metavar="<path/to/trace>", help='Trace path')
- parser.add_argument('-r', '--refresh', type=int,
- help='Refresh period in seconds', default=0)
- parser.add_argument('--text', action="store_true",
- help='Output in text (default)')
- parser.add_argument('--json', action="store_true",
- help='Output in JSON')
- parser.add_argument('--graphite', action="store_true",
- help='Output to graphite')
- parser.add_argument('--cpu', action="store_true",
- help='Per-CPU stats (default)')
- parser.add_argument('--mem', action="store_true",
- help='Memory usage stats (default)')
- parser.add_argument('--disk', action="store_true",
- help='Per-Disk stats (default)')
- parser.add_argument('--tid', action="store_true",
- help='Per-TID stats (default)')
- parser.add_argument('--net', action="store_true",
- help='Per-interface network stats (default)')
- parser.add_argument('--global-syscalls', action="store_true",
- help='Global syscalls (default)')
- parser.add_argument('--tid-syscalls', action="store_true",
- help='Per-TID syscalls (default)')
- parser.add_argument('--fds', action="store_true",
- help='Per-PID FD stats (default)')
- parser.add_argument('--overall', action="store_true",
- help='Overall CPU Usage (default)')
- parser.add_argument('--info', action="store_true",
- help='Trace info (default)')
- parser.add_argument('--top', type=int, default=0,
- help='Limit to top X TIDs')
- parser.add_argument('--name', type=str, default=0,
- help='Show results only for the list of processes')
- parser.add_argument('--no-progress', action="store_true",
- help='Don\'t display the progress bar')
- args = parser.parse_args()
-
- if not args.json and not args.graphite:
- args.text = True
-
- if args.tid_syscalls or args.fds:
- args.tid = True
-
- if not (args.cpu or args.tid or args.overall or args.info or
- args.global_syscalls or args.tid_syscalls or args.disk
- or args.net or args.fds or args.mem):
- args.cpu = True
- args.tid = True
- args.overall = True
- args.disk = True
- args.info = True
- args.global_syscalls = True
- args.tid_syscalls = True
- args.net = True
- args.fds = True
- args.mem = True
- if args.name:
- args.global_syscalls = False
- args.display_proc_list = []
- if args.name:
- args.display_proc_list = args.name.split(",")
-
- while True:
- if args.graphite:
- events = "sched_switch,block_rq_complete,block_rq_issue," \
- "netif_receive_skb,net_dev_xmit"
- os.system("lttng create graphite -o graphite-live >/dev/null")
- os.system("lttng enable-event -k %s -s graphite >/dev/null"
- % events)
- os.system("lttng start graphite >/dev/null")
- time.sleep(2)
- os.system("lttng stop graphite >/dev/null")
- os.system("lttng destroy graphite >/dev/null")
- traces = TraceCollection()
- handle = traces.add_traces_recursive(args.path, "ctf")
- if handle is None:
- sys.exit(1)
-
- c = Analyzes(traces)
- c.run(args)
- c.clear()
-
- for h in handle.values():
- traces.remove_trace(h)
-
- if not args.graphite:
- break
+++ /dev/null
-#!/usr/bin/env python3
-#
-# Copyright (C) 2014 - Julien Desfossez <jdesfossez@efficios.com>
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-# SOFTWARE.
-
-import sys
-import argparse
-try:
- from babeltrace import TraceCollection
-except ImportError:
- # quick fix for debian-based distros
- sys.path.append("/usr/local/lib/python%d.%d/site-packages" %
- (sys.version_info.major, sys.version_info.minor))
- from babeltrace import TraceCollection
-from LTTngAnalyzes.common import NSEC_PER_SEC, sec_to_hour
-from LTTngAnalyzes.state import State
-from ascii_graph import Pyasciigraph
-
-
-class CPUTop():
- def __init__(self, traces):
- self.trace_start_ts = 0
- self.trace_end_ts = 0
- self.traces = traces
- self.history = {}
- self.state = State()
-
- def run(self, args):
- """Process the trace"""
- self.current_sec = 0
- self.start_ns = 0
- self.end_ns = 0
-
- for event in self.traces.events:
- if self.start_ns == 0:
- self.start_ns = event.timestamp
- if self.trace_start_ts == 0:
- self.trace_start_ts = event.timestamp
- self.end_ns = event.timestamp
- self.check_refresh(args, event)
- self.trace_end_ts = event.timestamp
-
- if event.name == "sched_switch":
- self.state.sched.switch(event)
- # stats for the whole trace
- self.compute_stats()
- # self.output(args, self.trace_start_ts, self.trace_end_ts, final=1)
- self.graph_output(args, self.trace_start_ts,
- self.trace_end_ts, final=1)
-
- def update_history(self, args, sec):
- self.history[sec] = {}
- self.history[sec]["total_ns"] = self.end_ns - self.start_ns
- self.history[sec]["proc"] = {}
- h = self.history[sec]["proc"]
- for tid in self.state.tids.values():
- if tid.comm not in args.proc_list:
- continue
- if tid.comm not in h.keys():
- h[tid.comm] = tid.cpu_ns
- else:
- h[tid.comm] += tid.cpu_ns
- total_cpu_pc = 0
- for cpu in self.state.cpus.values():
- total_cpu_pc += cpu.cpu_pc
- total_cpu_pc = total_cpu_pc / len(self.state.cpus.keys())
- self.history[sec]["cpu"] = total_cpu_pc
-
- def check_refresh(self, args, event):
- """Check if we need to output something"""
- if args.refresh == 0:
- return
- event_sec = event.timestamp / NSEC_PER_SEC
- if self.current_sec == 0:
- self.current_sec = event_sec
- elif self.current_sec != event_sec and \
- (self.current_sec + args.refresh) <= event_sec:
- self.compute_stats()
- self.update_history(args, event_sec)
- self.reset_total(event.timestamp)
- self.current_sec = event_sec
- self.start_ns = event.timestamp
-
- def compute_stats(self):
- for cpu in self.state.cpus.keys():
- current_cpu = self.state.cpus[cpu]
- total_ns = self.end_ns - self.start_ns
- if current_cpu.start_task_ns != 0:
- current_cpu.cpu_ns += self.end_ns - current_cpu.start_task_ns
- cpu_total_ns = current_cpu.cpu_ns
- current_cpu.cpu_pc = (cpu_total_ns * 100)/total_ns
- if current_cpu.current_tid >= 0:
- self.state.tids[current_cpu.current_tid].cpu_ns += \
- self.end_ns - current_cpu.start_task_ns
-
- def output(self, args, begin_ns, end_ns, final=0):
- for sec in self.history.keys():
- s = self.history[sec]
- print("sec : %lu, total_ns : %lu" % (sec, s["total_ns"]))
- for p in s["proc"].keys():
- print("%s : %lu" % (p, s["proc"][p]))
-
- def graph_output(self, args, begin_ns, end_ns, final=0):
- for comm in args.proc_list:
- graph = Pyasciigraph()
- values = []
- for sec in sorted(self.history.keys()):
- if comm not in self.history[sec]["proc"].keys():
- break
- pc = float("%0.02f" % (
- (self.history[sec]["proc"][comm] * 100) /
- self.history[sec]["total_ns"]))
- values.append(("%s" % sec_to_hour(sec), pc))
- for line in graph.graph("%s CPU Usage" % comm, values, unit=" %"):
- print(line)
- graph = Pyasciigraph()
- values = []
- for sec in sorted(self.history.keys()):
- pc = float("%0.02f" % (self.history[sec]["cpu"]))
- values.append(("%s" % sec_to_hour(sec), pc))
- for line in graph.graph("Total CPU Usage", values, unit=" %"):
- print(line)
-
- def reset_total(self, start_ts):
- for cpu in self.state.cpus.keys():
- current_cpu = self.state.cpus[cpu]
- current_cpu.cpu_ns = 0
- if current_cpu.start_task_ns != 0:
- current_cpu.start_task_ns = start_ts
- if current_cpu.current_tid >= 0:
- self.state.tids[current_cpu.current_tid].last_sched = start_ts
- for tid in self.state.tids.keys():
- self.state.tids[tid].cpu_ns = 0
- self.state.tids[tid].migrate_count = 0
- self.state.tids[tid].read = 0
- self.state.tids[tid].write = 0
- for syscall in self.state.tids[tid].syscalls.keys():
- self.state.tids[tid].syscalls[syscall].count = 0
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser(description='CPU usage analysis')
- parser.add_argument('path', metavar="<path/to/trace>", help='Trace path')
- parser.add_argument('-r', '--refresh', type=int,
- help='Aggregate period in seconds (default = 1)',
- default=1)
- parser.add_argument('--names', type=str, default=0,
- help='Only this coma-separated list of process names')
- args = parser.parse_args()
- args.proc_list = []
- if args.names:
- args.proc_list = args.names.split(",")
-
- if args.refresh < 1:
- print("Refresh period must be >= 1 sec")
- sys.exit(1)
-
- traces = TraceCollection()
- handle = traces.add_traces_recursive(args.path, "ctf")
- if handle is None:
- sys.exit(1)
-
- c = CPUTop(traces)
-
- c.run(args)
-
- for h in handle.values():
- traces.remove_trace(h)
+++ /dev/null
-#!/usr/bin/env python3
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the 'Software'), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-
-import argparse
-import sys
-try:
- from babeltrace import TraceCollection
-except ImportError:
- # quick fix for debian-based distros
- sys.path.append("/usr/local/lib/python%d.%d/site-packages" %
- (sys.version_info.major, sys.version_info.minor))
- from babeltrace import TraceCollection
-from LTTngAnalyzes.progressbar import progressbar_setup, progressbar_update, \
- progressbar_finish
-from babeltrace import CTFWriter, CTFScope, CTFStringEncoding
-
-
-# These declarations will go in their own file
-# They have been put here temporarily for testing
-char8_type = CTFWriter.IntegerFieldDeclaration(8)
-char8_type.signed = True
-char8_type.encoding = CTFStringEncoding.UTF8
-char8_type.alignment = 8
-
-int8_type = CTFWriter.IntegerFieldDeclaration(8)
-int8_type.signed = True
-int8_type.alignment = 8
-
-uint8_type = CTFWriter.IntegerFieldDeclaration(8)
-uint8_type.signed = False
-uint8_type.alignment = 8
-
-int16_type = CTFWriter.IntegerFieldDeclaration(16)
-int16_type.signed = True
-int16_type.alignment = 8
-
-uint16_type = CTFWriter.IntegerFieldDeclaration(16)
-uint16_type.signed = False
-uint16_type.alignment = 8
-
-int32_type = CTFWriter.IntegerFieldDeclaration(32)
-int32_type.signed = True
-int32_type.alignment = 8
-
-uint32_type = CTFWriter.IntegerFieldDeclaration(32)
-uint32_type.signed = False
-uint32_type.alignment = 8
-
-int64_type = CTFWriter.IntegerFieldDeclaration(64)
-int64_type.signed = True
-int64_type.alignment = 8
-
-uint64_type = CTFWriter.IntegerFieldDeclaration(64)
-uint64_type.signed = False
-uint64_type.alignment = 8
-
-string_type = CTFWriter.StringFieldDeclaration()
-
-
-class CTFFilter():
- def __init__(self, args, handle, traces):
- self.args = args
- self.handle = handle
- self.traces = traces
-
- self.clock = CTFWriter.Clock('monotonic')
- self.clock.description = 'Monotonic Clock'
- self.clock.freq = 1000000000
-
- self.writer = CTFWriter.Writer(self.args.output)
- self.writer.add_clock(self.clock)
-
- self.stream_class = CTFWriter.StreamClass('test_stream')
- self.stream_class.clock = self.clock
-
- self.event_classes = {}
-
- def process_event_metadata(self, event):
- if self.args.discard and event.name == self.args.name\
- or not self.args.discard and event.name != self.args.name:
- return
-
- if event.name not in self.event_classes.keys():
- event_class = CTFWriter.EventClass(event.name)
- for field in event.fields_scope(CTFScope.EVENT_FIELDS):
- self.add_field(event_class, field)
-
- self.event_classes[event.name] = event_class
- self.stream_class.add_event_class(event_class)
-
- def add_field(self, event_class, field):
- field_type = type(field)
-
- if field_type is CTFWriter.IntegerFieldDeclaration:
- self.add_int_field(event_class, field)
- elif field_type is CTFWriter.StringFieldDeclaration:
- self.add_string_field(event_class, field)
- elif field_type is CTFWriter.ArrayFieldDeclaration:
- self.add_array_field(event_class, field)
- elif field_type is CTFWriter.SequenceFieldDeclaration:
- self.add_sequence_field(event_class, field)
- else:
- raise RuntimeError('Unsupported field type: '
- + field_type.__name__)
-
- def add_int_field(self, event_class, field):
- # signed int
- if field.signedness == 1:
- if field.length == 8:
- event_class.add_field(int8_type, '_' + field.name)
- elif field.length == 16:
- event_class.add_field(int16_type, '_' + field.name)
- elif field.length == 32:
- event_class.add_field(int32_type, '_' + field.name)
- elif field.length == 64:
- event_class.add_field(int64_type, '_' + field.name)
- else:
- raise RuntimeError(
- 'Error, unsupported field length {0} bits of field {1}'
- .format(field.length, field.name))
- # unsigned int
- elif field.signedness == 0:
- if field.length == 8:
- event_class.add_field(uint8_type, '_' + field.name)
- elif field.length == 16:
- event_class.add_field(uint16_type, '_' + field.name)
- elif field.length == 32:
- event_class.add_field(uint32_type, '_' + field.name)
- elif field.length == 64:
- event_class.add_field(uint64_type, '_' + field.name)
- else:
- raise RuntimeError(
- 'Error, unsupported field length {0} bits of field {1}'
- .format(field.length, field.name))
- else:
- raise RuntimeError('Error, could not determine signedness of field'
- + field.name)
-
- def add_string_field(self, event_class, field):
- string_type = CTFWriter.ArrayFieldDeclaration(char8_type, 16)
- event_class.add_field(string_type, '_' + field.name)
-
- def add_array_field(self, event_class, field):
- array_type = CTFWriter.ArrayFieldDeclaration(char8_type, field.length)
- event_class.add_field(array_type, '_' + field.name)
-
- def add_sequence_field(self, event_class, field):
- # stuff
- print('seq')
-
- def process_event(self, event):
- if self.args.discard and event.name == self.args.name\
- or not self.args.discard and event.name != self.args.name:
- return
-
- if event.name in ['lttng_statedump_start', 'lttng_statedump_end',
- 'sys_unknown', 'sys_geteuid', 'sys_getuid',
- 'sys_getegid']:
- return
-
- self.clock.time = event.timestamp
- writeable_event = CTFWriter.Event(self.event_classes[event.name])
-
- field_names = event.field_list_with_scope(CTFScope.EVENT_FIELDS)
-
- for field_name in field_names:
- self.set_field(writeable_event, field_name, event[field_name])
-
- try:
- self.stream.append_event(writeable_event)
- except ValueError:
- print(event.name)
- pass
-
- def set_field(self, writeable_event, field_name, value):
- field_type = type(value)
-
- if field_type is str:
- self.set_char_array(writeable_event.payload('_' + field_name),
- value)
- elif field_type is int:
- self.set_int(writeable_event.payload('_' + field_name), value)
- elif field_type is list:
- pass
- else:
- raise RuntimeError('Error, unsupported field type '
- + field_type.__name__)
-
- def set_char_array(self, writeable_event, string):
- if len(string) > 16:
- string = string[0:16]
- else:
- string = "%s" % (string + "\0" * (16 - len(string)))
-
- for i in range(len(string)):
- a = writeable_event.field(i)
- a.value = ord(string[i])
-
- def set_int(self, writeable_event, value):
- writeable_event.value = value
-
- def run(self):
- progressbar_setup(self, args)
- for event in self.handle.events:
- progressbar_update(self, args)
- self.process_event_metadata(event)
-
- self.stream = self.writer.create_stream(self.stream_class)
-
- for event in self.traces.events:
- progressbar_update(self, args)
- self.process_event(event)
-
- progressbar_finish(self, args)
-
- self.stream.flush()
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser(description='')
- parser.add_argument('path', metavar='<path/to/trace>', help='Trace path')
- parser.add_argument('output', metavar='<path/to/new/trace>',
- help='Location of file to which the resulting filtered\
- trace will be written')
- parser.add_argument('-n', '--name', type=str, required=True,
- help='Name of events to keep\
- (or discard when --discard is used)')
- parser.add_argument('--discard', action='store_true',
- help='Discard specifed events instead of keeping them')
- parser.add_argument('--no-progress', action="store_true",
- help='Don\'t display the progress bar')
-
- args = parser.parse_args()
-
- traces = TraceCollection()
- handle = traces.add_traces_recursive(args.path, 'ctf')
- if handle is None:
- sys.exit(1)
-
- ctf_filter = CTFFilter(args, handle, traces)
-
- ctf_filter.run()
-
- for h in handle.values():
- traces.remove_trace(h)
+++ /dev/null
-#!/usr/bin/env python3
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the 'Software'), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-
-import argparse
-import errno
-import json
-from os.path import join
-import socket
-import sys
-from collections import OrderedDict
-try:
- from babeltrace import TraceCollection
-except ImportError:
- # quick fix for debian-based distros
- sys.path.append("/usr/local/lib/python%d.%d/site-packages" %
- (sys.version_info.major, sys.version_info.minor))
- from babeltrace import TraceCollection
-from LTTngAnalyzes.common import FDType, ns_to_hour_nsec, NSEC_PER_SEC
-from LTTngAnalyzes.state import State
-from LTTngAnalyzes.syscalls import Syscalls, IOCategory
-try:
- from pymongo import MongoClient
- from pymongo.errors import CollectionInvalid
-except ImportError:
- nomongolib = 1
-
-NS_IN_S = 1000000000
-NS_IN_MS = 1000000
-NS_IN_US = 1000
-
-
-def parse_errname(errname):
- errname = errname.upper()
-
- try:
- err_number = getattr(errno, errname)
- except AttributeError:
- print('Invalid errno name: ' + errname)
- sys.exit(1)
-
- return err_number
-
-
-def parse_duration(duration):
- """Receives a numeric string with time unit suffix
- Returns an int representing a duration in nanoseconds"""
-
- # default case is no duration is entered by user
- if duration == '-1':
- return -1
-
- if duration.endswith('ns'):
- return int(duration[0:-2])
- elif duration.endswith('us'):
- return int(float(duration[0:-2]) * NS_IN_US)
- elif duration.endswith('ms'):
- return int(float(duration[0:-2]) * NS_IN_MS)
- elif duration.endswith('s'):
- return int(float(duration[0:-1]) * NS_IN_S)
- else:
- print('Invalid duration: ' + duration)
- sys.exit(1)
-
-
-class FDInfo():
- DUMP_FORMAT = '{0:18} {1:20} {2:<8} {3:20} {4:60}'
- SUCCESS_FORMAT = '{0:18} ({1:8f}) {2:20} {3:<8} {4:15} res={5:<3} {6:60}'
- FAILURE_FORMAT = '{0:18} ({1:8f}) {2:20} {3:<8} {4:15} res={5:<3} ({6}) \
- {7:60}'
- FAILURE_RED = '\033[31m'
- NORMAL_WHITE = '\033[37m'
-
- def __init__(self, args, traces, output_enabled, err_number):
- self.args = args
-
- self.traces = traces
- self.output_enabled = output_enabled
- self.err_number = err_number
-
- self.is_interactive = sys.stdout.isatty()
-
- self.fd_events = []
- # Stores metadata about processes when outputting to json
- # Keys: PID, values: {pname, fds, threads}
- self.json_metadata = {}
- # Used to identify session in database
- if self.args.path[-1] == '/':
- self.session_name = self.args.path.split('/')[-3]
- else:
- self.session_name = self.args.path.split('/')[-2]
- # Hyphens in collections names are an incovenience in mongo
- self.session_name = self.session_name.replace('-', '_')
- self.state = State()
-
- def process_event(self, event):
- if event.name == 'sched_switch':
- self.state.sched.switch(event)
- elif event.name.startswith('sys_') or \
- event.name.startswith('syscall_entry_'):
- self.state.syscall.entry(event)
- elif event.name == 'exit_syscall' or \
- event.name.startswith('syscall_exit_'):
- self.handle_syscall_exit(event)
- elif event.name == 'sched_process_fork':
- self.state.sched.process_fork(event)
- elif event.name == 'lttng_statedump_process_state':
- self.state.statedump.process_state(event)
- elif event.name == 'lttng_statedump_file_descriptor':
- self.state.statedump.file_descriptor(event)
- if self.output_enabled['dump']:
- self.output_dump(event)
-
- def handle_syscall_exit(self, event, started=1):
- cpu_id = event['cpu_id']
- if cpu_id not in self.state.cpus:
- return
-
- cpu = self.state.cpus[cpu_id]
- if cpu.current_tid == -1:
- return
-
- current_syscall = self.state.tids[cpu.current_tid].current_syscall
- if len(current_syscall.keys()) == 0:
- return
-
- name = current_syscall['name']
- if name in Syscalls.OPEN_SYSCALLS and self.output_enabled['open'] or\
- name in Syscalls.CLOSE_SYSCALLS and self.output_enabled['close'] or\
- name in Syscalls.READ_SYSCALLS and self.output_enabled['read'] or\
- name in Syscalls.WRITE_SYSCALLS and self.output_enabled['write']:
- self.output_fd_event(event, current_syscall)
-
- self.state.syscall.exit(event, started)
-
- def run(self):
- '''Process the trace'''
-
- for event in self.traces.events:
- self.process_event(event)
-
- if self.args.json:
- self.output_json()
-
- if self.args.mongo:
- self.store_mongo()
-
- if self.args.graphite:
- self.store_graphite()
-
- if self.args.localfile:
- self.store_localfile()
-
- def output_json(self):
- fd_events_name = 'fd_events_' + self.session_name + '.json'
- fd_events_path = join(self.args.json, fd_events_name)
- f = open(fd_events_path, 'w')
- json.dump(self.fd_events, f)
- f.close()
-
- f = open(join(self.args.json, 'metadata.json'), 'w')
- json.dump(self.json_metadata, f)
- f.close()
-
- def store_graphite(self):
- sock = None
- try:
- sock = socket.create_connection((self.args.graphite_host,
- self.args.graphite_port))
- except:
- print("Couldn't connect to %(server)s on port %(port)d, is "
- "carbon-agent.py running?" % {'server':
- self.args.graphite_host,
- 'port':
- self.args.graphite_port})
- sys.exit(1)
-
- lines = []
- for event in self.fd_events:
- ts = event["ts_start"]/NSEC_PER_SEC
- if event["category"] == IOCategory.write:
- l = "hosts.test.latencies.write %d %lu" % \
- (event["duration"]/(NSEC_PER_SEC), ts)
- elif event["category"] == IOCategory.read:
- l = "hosts.test.latencies.read %d %lu" % \
- (event["duration"]/(NSEC_PER_SEC), ts)
- else:
- continue
- lines.append(l)
- message = '\n'.join(lines) + '\n' # all lines must end in a newline
- sock.sendall(message.encode())
-
- def store_localfile(self):
- nb_read = 0
- total_read = 0
- nb_write = 0
- total_write = 0
- pid_latencies = {}
- for event in self.fd_events:
- if args.pid_latency_list and str(event["pid"]) in \
- args.pid_latency_list:
- if not event["pid"] in pid_latencies.keys():
- pid_latencies[event["pid"]] = {}
- pid_latencies[event["pid"]]["r"] = 0
- pid_latencies[event["pid"]]["nb_r"] = 0
- pid_latencies[event["pid"]]["w"] = 0
- pid_latencies[event["pid"]]["nb_w"] = 0
- if event["category"] == IOCategory.write:
- total_write += event["duration"]
- nb_write += 1
- if args.pid_latency_list and str(event["pid"]) in \
- args.pid_latency_list:
- pid_latencies[event["pid"]]["w"] += event["duration"]
- pid_latencies[event["pid"]]["nb_w"] += 1
- elif event["category"] == IOCategory.read:
- total_read += event["duration"]
- nb_read += 1
- if args.pid_latency_list and str(event["pid"]) in \
- args.pid_latency_list:
- pid_latencies[event["pid"]]["r"] += event["duration"]
- pid_latencies[event["pid"]]["nb_r"] += 1
- else:
- continue
- f = open(join(args.localfile, "avg_w_latency"), "w")
- f.write("%f\n" % ((total_write/nb_write) / (NSEC_PER_SEC / 1000000)))
- f.close()
-
- f = open(join(args.localfile, "avg_r_latency"), "w")
- f.write("%f\n" % ((total_read/nb_read) / (NSEC_PER_SEC / 1000000)))
- f.close()
-
- for p in pid_latencies.keys():
- r = pid_latencies[p]["r"]
- nb_r = pid_latencies[p]["nb_r"]
- w = pid_latencies[p]["w"]
- nb_w = pid_latencies[p]["nb_w"]
- if nb_r > 0:
- f = open(join(args.localfile, "%d_r_latency" % (p)), "w")
- f.write("%f\n" % ((r/nb_r) / (NSEC_PER_SEC / 1000000)))
- f.close()
- if nb_w > 0:
- f = open(join(args.localfile, "%d_w_latency" % (p)), "w")
- f.write("%f\n" % ((w/nb_w) / (NSEC_PER_SEC / 1000000)))
- f.close()
-
- def store_mongo(self):
- client = MongoClient(self.args.mongo_host, self.args.mongo_port)
- db = client.analyses
-
- fd_events_name = 'fd_events_' + self.session_name
- metadata_name = 'metadata_' + self.session_name
-
- try:
- db.create_collection(fd_events_name)
- except CollectionInvalid as ex:
- print('Failed to create collection: ')
- print(ex)
- print('Data will not be stored to MongoDB')
- return
-
- for event in self.fd_events:
- db[fd_events_name].insert(event)
-
- # Ascending timestamp index
- db[fd_events_name].create_index('ts_start')
-
- if metadata_name not in db.collection_names():
- try:
- db.create_collection(metadata_name)
- except CollectionInvalid as ex:
- print('Failed to create collection: ')
- print(ex)
- print('Metadata will not be stored to MongoDB')
- return
-
- db.sessions.insert({'name': self.session_name})
-
- for pid in self.json_metadata:
- metadatum = self.json_metadata[pid]
- metadatum['pid'] = pid
- db[metadata_name].update({'pid': pid}, metadatum, upsert=True)
-
- # Ascending PID index
- db[metadata_name].create_index('pid')
-
- def output_dump(self, event):
- # dump events can't fail, and don't have a duration, so ignore
- if self.args.failed or self.err_number or self.args.duration > 0:
- return
-
- # using tid as variable name for consistency with other events
- tid = event['pid']
- if self.args.tid >= 0 and self.args.tid != tid:
- return
-
- comm = self.state.tids[tid].comm
- if self.args.pname is not None and self.args.pname != comm:
- return
-
- name = event.name
- if args.syscall and args.syscall != name:
- return
-
- filename = event['filename']
-
- endtime = event.timestamp
- if self.args.start and endtime < self.args.start:
- return
- if self.args.end and endtime > self.args.end:
- return
-
- if not self.args.unixtime:
- endtime = ns_to_hour_nsec(endtime)
- else:
- endtime = '{:.9f}'.format(endtime / NS_IN_S)
-
- if filename.startswith(self.args.prefix) and not self.args.quiet:
- print(FDInfo.DUMP_FORMAT.format(endtime, comm, tid, name,
- filename))
-
- def output_fd_event(self, exit_event, entry):
- ret = exit_event['ret']
- failed = ret < 0
-
- if self.args.failed and not failed:
- return
-
- if self.err_number and ret != -err_number:
- return
-
- tid = self.state.cpus[exit_event['cpu_id']].current_tid
- if self.args.tid >= 0 and self.args.tid != tid:
- return
-
- pid = self.state.tids[tid].pid
- if pid == -1:
- pid = tid
-
- comm = self.state.tids[tid].comm
- if self.args.pname is not None and self.args.pname != comm:
- return
-
- filename = entry['filename']
- if filename is None:
- return
-
- name = entry['name']
-
- if args.syscall and args.syscall != name:
- return
-
- if self.args.start and entry['start'] < self.args.start:
- return
-
- if self.args.end and exit_event.timestamp > self.args.end:
- return
-
- endtime = exit_event.timestamp
- if not self.args.unixtime:
- endtime = ns_to_hour_nsec(endtime)
- else:
- endtime = '{:.9f}'.format(endtime / NS_IN_S)
-
- duration_ns = (exit_event.timestamp - entry['start'])
-
- if self.args.duration > 0 and duration_ns < self.args.duration:
- return
-
- duration = duration_ns / NS_IN_S
-
- if self.args.json or self.args.mongo or self.args.graphite or \
- self.args.localfile:
- self.log_fd_event_json(tid, pid, comm, entry, name, duration_ns,
- filename, ret)
-
- if self.is_interactive and failed and not self.args.no_color:
- sys.stdout.write(FDInfo.FAILURE_RED)
-
- if filename.startswith(self.args.prefix) and not self.args.quiet:
- if not failed:
- print(FDInfo.SUCCESS_FORMAT.format(endtime, duration, comm,
- tid, name, ret, filename))
- else:
- try:
- err_name = errno.errorcode[-ret]
- print(FDInfo.FAILURE_FORMAT.format(endtime, duration, comm,
- tid, name, ret,
- err_name, filename))
- except KeyError:
- print('Invalid error code:', -ret)
-
- if self.is_interactive and failed and not self.args.no_color:
- sys.stdout.write(FDInfo.NORMAL_WHITE)
-
- def log_fd_event_json(self, tid, pid, comm, entry, name, duration_ns,
- filename, ret):
- self.track_thread(tid, pid, comm)
-
- fd = None
- fd_in = None
- fd_out = None
-
- if 'fd' in entry.keys():
- fd = entry['fd'].fd
- elif 'fd_in' in entry.keys():
- fd_in = entry['fd_in'].fd
- fd_out = entry['fd_out'].fd
-
- if fd:
- self.track_fd(fd, filename, tid, pid, entry)
- elif fd_in and fd_out:
- self.track_fd(fd_in, filename, tid, pid, entry)
- self.track_fd(fd_out, filename, tid, pid, entry)
-
- category = Syscalls.get_syscall_category(name)
-
- fd_event = {'ts_start': entry['start'],
- 'duration': duration_ns,
- 'tid': tid,
- 'pid': pid,
- 'category': category}
-
- if fd is not None:
- fd_event['fd'] = fd
- elif fd_in is not None and fd_out is not None:
- fd_event['fd_in'] = fd_in
- fd_event['fd_out'] = fd_out
-
- if ret < 0:
- fd_event['errno'] = -ret
- else:
- if name in ['sys_splice', 'sys_sendfile64']:
- fd_event['read'] = ret
- fd_event['write'] = ret
- elif name in Syscalls.READ_SYSCALLS:
- fd_event['read'] = ret
- elif name in Syscalls.WRITE_SYSCALLS:
- fd_event['write'] = ret
-
- self.fd_events.append(fd_event)
-
- def track_thread(self, tid, pid, comm):
- # Dealing with plain old process
- if pid == tid:
- if pid not in self.json_metadata:
- self.json_metadata[pid] = {
- 'pname': comm,
- 'fds': {},
- 'threads': {}
- }
- else:
- if self.json_metadata[pid]['pname'] != comm:
- self.json_metadata[pid]['pname'] = comm
- # Dealing with a thread
- else:
- if pid not in self.json_metadata:
- self.json_metadata[pid] = {
- 'pname': 'unknown',
- 'fds': {},
- 'threads': {}
- }
-
- tid_str = str(tid)
- if tid_str not in self.json_metadata[pid]['threads']:
- self.json_metadata[pid]['threads'][tid_str] = {
- 'pname': comm
- }
- else:
- if self.json_metadata[pid]['threads'][tid_str]['pname'] \
- != comm:
- self.json_metadata[pid]['threads'][tid_str]['pname'] = comm
-
- def track_fd(self, fd, filename, tid, pid, entry):
- fd_str = str(fd)
- fdtype = FDType.unknown
-
- if fd in self.state.tids[tid].fds:
- fdtype = self.state.tids[tid].fds[fd].fdtype
-
- fd_metadata = {}
- fd_metadata['filename'] = filename
- fd_metadata['fdtype'] = fdtype
-
- if str(fd) not in self.json_metadata[pid]['fds']:
- fds = self.json_metadata[pid]['fds']
- fds[fd_str] = OrderedDict()
- fds[fd_str][str(entry['start'])] = fd_metadata
- else:
- chrono_fd = self.json_metadata[pid]['fds'][fd_str]
- last_ts = next(reversed(chrono_fd))
- if filename != chrono_fd[last_ts]['filename']:
- chrono_fd[str(entry['start'])] = fd_metadata
-
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser(description='FD syscalls analysis')
- parser.add_argument('path', metavar='<path/to/trace>', help='Trace path')
- parser.add_argument('-p', '--prefix', type=str, default='',
- help='Prefix in which to search')
- parser.add_argument('-t', '--type', type=str, default='all',
- help='Types of events to display. Possible values:\
- all, open, close, read, write dump')
- parser.add_argument('--tid', type=int, default='-1',
- help='TID for which to display events')
- parser.add_argument('--pname', type=str, default=None,
- help='Process name for which to display events')
- parser.add_argument('-d', '--duration', type=str, default='-1',
- help='Minimum duration in ms of syscalls to display')
- parser.add_argument('-e', '--errname', type=str,
- help='Only display syscalls whose return value matches\
- that corresponding to the given errno name')
- parser.add_argument('--syscall', type=str, default=None,
- help='Name of syscall to display')
- parser.add_argument('--start', type=int, default=None,
- help='Start time from which to display events (unix\
- time)')
- parser.add_argument('--end', type=int, default=None,
- help='End time after which events are not displayed\
- (unix time)')
- parser.add_argument('--failed', action='store_true',
- help='Display only failed syscalls')
- parser.add_argument('--unixtime', action='store_true',
- help='Display timestamps in unix time format')
- parser.add_argument('--no-color', action='store_true',
- help='Disable color output')
- parser.add_argument('--json', type=str, default=None,
- help='Store FD events as JSON in specified directory')
- parser.add_argument('--mongo', type=str, default=None,
- help='Store FD events into MongoDB at specified ip\
- and port')
- parser.add_argument('--graphite', type=str, default=None,
- help='Store FD events into graphite at specified ip\
- and port')
- parser.add_argument('--localfile', type=str, default=None,
- help='Store average latencies in local files '
- '(one file per metric)')
- parser.add_argument('--pidlatencies', type=str, default=None,
- help='Compute R/W latencies for these processes for '
- 'localfile mode')
- parser.add_argument('-q', '--quiet', action='store_true',
- help='Don\'t output fd events to stdout')
-
- args = parser.parse_args()
-
- types = args.type.split(',')
-
- possibleTypes = ['open', 'close', 'read', 'write', 'dump']
-
- if 'all' in types:
- output_enabled = {x: True for x in possibleTypes}
- else:
- output_enabled = {x: False for x in possibleTypes}
- for event_type in types:
- if event_type in possibleTypes:
- output_enabled[event_type] = True
- else:
- print('Invalid type:', event_type)
- parser.print_help()
- sys.exit(1)
-
- if args.syscall and not args.syscall.startswith('sys_'):
- args.syscall = 'sys_' + args.syscall
-
- traces = TraceCollection()
- handle = traces.add_trace(args.path, 'ctf')
- if handle is None:
- sys.exit(1)
-
- if args.errname:
- err_number = parse_errname(args.errname)
- else:
- err_number = None
-
- # Convert start/endtime from seconds to nanoseconds
- if args.start:
- args.start = args.start * NS_IN_S
- if args.end:
- args.end = args.end * NS_IN_S
-
- # Parse duration option
- args.duration = parse_duration(args.duration)
-
- if args.mongo:
- if nomongolib == 1:
- print("Missing pymongo library")
- sys.exit(1)
- try:
- (args.mongo_host, args.mongo_port) = args.mongo.split(':')
- socket.inet_aton(args.mongo_host)
- args.mongo_port = int(args.mongo_port)
- except ValueError:
- print('Invalid MongoDB address format: ', args.mongo)
- print('Expected format: IPV4:PORT')
- sys.exit(1)
- except socket.error:
- print('Invalid MongoDB ip ', args.mongo_host)
- sys.exit(1)
-
- if args.graphite:
- try:
- (args.graphite_host, args.graphite_port) = args.graphite.split(':')
- socket.inet_aton(args.graphite_host)
- args.graphite_port = int(args.graphite_port)
- except ValueError:
- print('Invalid graphite address format: ', args.graphite)
- print('Expected format: IPV4:PORT')
- sys.exit(1)
- except socket.error:
- print('Invalid graphite ip ', args.graphite_host)
- sys.exit(1)
-
- if args.pidlatencies:
- args.pid_latency_list = args.pidlatencies.split(",")
- else:
- args.pid_latency_list = None
-
- analyser = FDInfo(args, traces, output_enabled, err_number)
-
- analyser.run()
-
- traces.remove_trace(handle)
+++ /dev/null
-#!/usr/bin/env python3
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the 'Software'), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-
-# KNOWN LIMITATIONS: Does not account for net IO on sockets opened before
-# start of trace
-
-import sys
-import argparse
-import socket
-from babeltrace import TraceCollection
-from LTTngAnalyzes.common import convert_size, FDType
-from LTTngAnalyzes.state import State
-from LTTngAnalyzes.progressbar import progressbar_setup, progressbar_update, \
- progressbar_finish
-
-
-class NetTop():
- TOTAL_FORMAT = '{0:20} {1:<10} total: {2:10}'
-
- def __init__(self, traces, is_io_measured, is_connection_measured, number):
- self.traces = traces
- self.is_io_measured = is_io_measured
- self.is_connection_measured = is_connection_measured
- self.number = number
- self.state = State()
-
- def get_total_transfer(self, transfer):
- total = 0
-
- if is_connection_measured['ipv4']:
- if is_io_measured['up']:
- total += transfer['ipv4']['up']
- if is_io_measured['down']:
- total += transfer['ipv4']['down']
- if is_connection_measured['ipv6']:
- if is_io_measured['up']:
- total += transfer['ipv6']['up']
- if is_io_measured['down']:
- total += transfer['ipv6']['down']
-
- return total
-
- def process_event(self, event):
- if event.name == 'sched_switch':
- self.state.sched.switch(event)
- elif event.name == 'sched_process_fork':
- self.state.sched.process_fork(event)
- elif event.name[0:4] == 'sys_' or event.name[0:14] == "syscall_entry_":
- self.state.syscall.entry(event)
- elif event.name == 'exit_syscall' or \
- event.name[0:13] == "syscall_exit_":
- self.state.syscall.exit(event)
-
- def run(self, args):
- progressbar_setup(self, args)
- for event in self.traces.events:
- progressbar_update(self, args)
- self.process_event(event)
-
- progressbar_finish(self, args)
-
- self.output()
-
- def output(self):
- transferred = {}
-
- for tid in self.state.tids.keys():
- transferred[tid] = {'ipv4': {}, 'ipv6': {}}
-
- transferred[tid]['ipv4'] = {'up': 0, 'down': 0}
- transferred[tid]['ipv6'] = {'up': 0, 'down': 0}
-
- for fd in self.state.tids[tid].fds.values():
- if fd.fdtype is FDType.net:
- if fd.family == socket.AF_INET:
- transferred[tid]['ipv4']['up'] += fd.net_write
- transferred[tid]['ipv4']['down'] += fd.net_read
- elif fd.family == socket.AF_INET6:
- transferred[tid]['ipv6']['up'] += fd.net_write
- transferred[tid]['ipv6']['down'] += fd.net_read
-
- print('Processes by Network I/O')
- print('#' * 80)
-
- for tid in sorted(transferred, key=lambda tid:
- self.get_total_transfer(transferred[tid]),
- reverse=True)[:self.number]:
-
- total = self.get_total_transfer(transferred[tid])
-
- if total != 0:
- print(NetTop.TOTAL_FORMAT.format(self.state.tids[tid].comm,
- '(' + str(tid) + ')',
- convert_size(total)))
-
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser(description='Network usage \
- analysis by process')
- parser.add_argument('path', metavar='<path/to/trace>', help='Trace path')
- parser.add_argument('-t', '--type', type=str, default='all',
- help='Types of network IO to measure. '
- 'Possible values: all, up, down')
- parser.add_argument('-c', '--connection', type=str, default='all',
- help='Types of connections to measure.'
- ' Possible values: all, ipv4, ipv6')
- parser.add_argument('-n', '--number', type=int, default=10,
- help='Number of processes to display')
- parser.add_argument('--no-progress', action="store_true",
- help='Don\'t display the progress bar')
-
- args = parser.parse_args()
-
- io_types = args.type.split(',')
- possible_io_types = ['up', 'down']
-
- if 'all' in io_types:
- is_io_measured = {x: True for x in possible_io_types}
- else:
- is_io_measured = {x: False for x in possible_io_types}
- for type in io_types:
- if type in possible_io_types:
- is_io_measured[type] = True
- else:
- print('Invalid type:', type)
- parser.print_help()
- sys.exit(1)
-
- connection_types = args.connection.split(',')
- possible_connection_types = ['ipv4', 'ipv6']
-
- if 'all' in connection_types:
- is_connection_measured = {x: True for x in possible_connection_types}
- else:
- is_connection_measured = {x: False for x in possible_connection_types}
- for type in connection_types:
- if type in possible_connection_types:
- is_connection_measured[type] = True
- else:
- print('Invalid type:', type)
- parser.print_help()
- sys.exit(1)
-
- if args.number < 0:
- print('Number of processes must be non-negative')
- parser.print_help()
- sys.exit(1)
-
- traces = TraceCollection()
- handle = traces.add_traces_recursive(args.path, 'ctf')
-
- c = NetTop(traces, is_io_measured, is_connection_measured, args.number)
- c.run(args)
-
- for h in handle.values():
- traces.remove_trace(h)
+++ /dev/null
-#!/usr/bin/env python3
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the 'Software'), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-
-import argparse
-import json
-import os.path
-import socket
-import sys
-try:
- from babeltrace import TraceCollection
-except ImportError:
- # quick fix for debian-based distros
- sys.path.append("/usr/local/lib/python%d.%d/site-packages" %
- (sys.version_info.major, sys.version_info.minor))
- from babeltrace import TraceCollection
-from LTTngAnalyzes.common import ns_to_hour_nsec
-from LTTngAnalyzes.state import State
-from pymongo import MongoClient
-from pymongo.errors import CollectionInvalid
-
-NS_IN_S = 1000000000
-
-
-class Perf():
- PERF_FORMAT = '{0:18} {1:20} {2:<8} {3:20} {4:<8}'
-
- def __init__(self, args, traces, types):
- self.args = args
-
- self.traces = traces
- self.types = types
- self.is_interactive = sys.stdout.isatty()
-
- self.state = State()
- self.perf = []
-
- # Stores metadata about processes when outputting to json
- # Keys: PID, values: {pname, threads}
- self.json_metadata = {}
- # Used to identify session in database
- self.session_name = self.args.path.split('/')[-2]
- # Hyphens in collections names are an incovenience in mongo
- self.session_name = self.session_name.replace('-', '_')
-
- def process_event(self, event):
- if event.name == 'sched_switch':
- ret = self.state.sched.switch(event)
- tid = event['prev_tid']
- if len(ret.keys()) > 0:
- d = {'ts': event.timestamp,
- 'tid': tid}
- for context in ret.keys():
- if self.types and context not in self.types:
- continue
- if context.startswith('perf_'):
- if self.args.delta:
- d[context] = ret[context]
- else:
- d[context] = self.state.tids[tid].perf[context]
- self.output_perf(event, d)
- elif event.name == 'lttng_statedump_process_state':
- self.state.statedump.process_state(event)
-
- def run(self):
- '''Process the trace'''
- for event in self.traces.events:
- self.process_event(event)
-
- if self.args.json:
- self.output_json()
-
- if self.args.mongo:
- self.store_mongo()
-
- def output_json(self):
- perf_name = 'perf_' + self.session_name + '.json'
- perf_path = os.path.join(self.args.json, perf_name)
- f = open(perf_path, 'w')
- json.dump(self.perf, f)
- f.close()
-
- f = open(os.path.join(self.args.json, 'metadata.json'), 'w')
- json.dump(self.json_metadata, f)
- f.close()
-
- def store_mongo(self):
- client = MongoClient(self.args.mongo_host, self.args.mongo_port)
- db = client.analyses
-
- perf_name = 'perf_' + self.session_name
- metadata_name = 'metadata_' + self.session_name
-
- try:
- db.create_collection(perf_name)
- except CollectionInvalid as ex:
- print('Failed to create collection: ')
- print(ex)
- print('Data will not be stored to MongoDB')
- return
-
- for event in self.perf:
- db[perf_name].insert(event)
-
- # Ascending timestamp index
- db[perf_name].create_index('ts')
-
- if metadata_name not in db.collection_names():
- try:
- db.create_collection(metadata_name)
- except CollectionInvalid as ex:
- print('Failed to create collection: ')
- print(ex)
- print('Metadata will not be stored to MongoDB')
- return
-
- db.sessions.insert({'name': self.session_name})
-
- for pid in self.json_metadata:
- metadatum = self.json_metadata[pid]
- metadatum['pid'] = pid
- db[metadata_name].update({'pid': pid}, metadatum, upsert=True)
-
- # Ascending PID index
- db[metadata_name].create_index('pid')
-
- def output_perf(self, event, ret):
- tid = event['prev_tid']
- if self.args.tid and str(tid) not in self.args.tid:
- return
-
- pid = self.state.tids[tid].pid
- if pid == -1:
- pid = tid
-
- comm = self.state.tids[tid].comm
- if self.args.pname is not None and self.args.pname != comm:
- return
-
- name = event.name
- if name != 'sched_switch':
- return
-
- endtime = event.timestamp
- if self.args.start and endtime < self.args.start:
- return
- if self.args.end and endtime > self.args.end:
- return
-
- if not self.args.unixtime:
- endtime = ns_to_hour_nsec(endtime)
- else:
- endtime = '{:.9f}'.format(endtime / NS_IN_S)
-
- insert = 0
- for context in ret.keys():
- if context.startswith('perf_'):
- if self.args.json or self.args.mongo:
- insert = 1
- if not self.args.quiet:
- print(Perf.PERF_FORMAT.format(endtime, comm, tid, context,
- ret[context]))
- if insert:
- self.log_perf_event_json(endtime, comm, tid, pid, ret)
-
- def log_perf_event_json(self, ts, comm, tid, pid, ret):
- if pid == tid:
- if pid not in self.json_metadata:
- self.json_metadata[pid] = {'pname': comm, 'threads': {}}
- elif self.json_metadata[pid]['pname'] != comm:
- self.json_metadata[pid]['pname'] = comm
- else:
- if pid not in self.json_metadata:
- self.json_metadata[pid] = {'pname': 'unknown', 'threads': {}}
-
- tid_str = str(tid)
- if tid_str not in self.json_metadata[pid]['threads']:
- self.json_metadata[pid]['threads'][tid_str] = {
- 'pname': comm
- }
- else:
- if self.json_metadata[pid]['threads'][tid_str]['pname'] \
- != comm:
- self.json_metadata[pid]['threads'][tid_str]['pname'] = comm
-
- self.perf.append(ret)
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser(description='Perf counter analysis')
- parser.add_argument('path', metavar='<path/to/trace>', help='Trace path')
- parser.add_argument('-t', '--type', type=str, default='all',
- help='Types of perf counters to display')
- parser.add_argument('--tid', type=str, default=0,
- help='TID for which to display events')
- parser.add_argument('--pname', type=str, default=None,
- help='Process name for which to display events')
- parser.add_argument('--start', type=int, default=None,
- help='Start time from which to display events (unix\
- time)')
- parser.add_argument('--end', type=int, default=None,
- help='End time after which events are not displayed\
- (unix time)')
- parser.add_argument('--unixtime', action='store_true',
- help='Display timestamps in unix time format')
- parser.add_argument('--delta', action='store_true',
- help='Display deltas instead of total count')
- parser.add_argument('--json', type=str, default=None,
- help='Store perf counter changes as JSON in specified\
- directory')
- parser.add_argument('--mongo', type=str, default=None,
- help='Store perf counter changes into MongoDB at\
- specified ip and port')
- parser.add_argument('-q', '--quiet', action='store_true',
- help='Don\'t output fd events to stdout')
-
- args = parser.parse_args()
-
- if args.type != 'all':
- types = args.type.split(',')
- else:
- types = None
-
- if args.tid:
- args.tid = args.tid.split(',')
- else:
- args.tid = None
-
- traces = TraceCollection()
- handle = traces.add_traces_recursive(args.path, 'ctf')
- if handle is None:
- sys.exit(1)
-
- # Convert start/endtime from seconds to nanoseconds
- if args.start:
- args.start = args.start * NS_IN_S
- if args.end:
- args.end = args.end * NS_IN_S
-
- if args.mongo:
- try:
- (args.mongo_host, args.mongo_port) = args.mongo.split(':')
- socket.inet_aton(args.mongo_host)
- args.mongo_port = int(args.mongo_port)
- except ValueError:
- print('Invalid MongoDB address format: ', args.mongo)
- print('Expected format: IPV4:PORT')
- sys.exit(1)
- except socket.error:
- print('Invalid MongoDB ip ', args.mongo_host)
- sys.exit(1)
-
- analyser = Perf(args, traces, types)
-
- analyser.run()
-
- for h in handle.values():
- traces.remove_trace(h)
+++ /dev/null
-#!/usr/bin/env python3
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-
-import argparse
-import sys
-
-try:
- from babeltrace import TraceCollection
-except ImportError:
- # quick fix for debian-based distros
- sys.path.append("/usr/local/lib/python%d.%d/site-packages" %
- (sys.version_info.major, sys.version_info.minor))
- from babeltrace import TraceCollection
-from LTTngAnalyzes.common import ns_to_hour_nsec
-from LTTngAnalyzes.state import State
-
-
-class ProcInfo():
- def __init__(self, traces):
- self.trace_start_ts = 0
- self.trace_end_ts = 0
- self.traces = traces
- self.state = State()
-
- def run(self, args):
- """Process the trace"""
- self.current_sec = 0
- self.start_ns = 0
- self.end_ns = 0
-
- for event in self.traces.events:
- if self.start_ns == 0:
- self.start_ns = event.timestamp
- if self.trace_start_ts == 0:
- self.trace_start_ts = event.timestamp
- self.end_ns = event.timestamp
- self.trace_end_ts = event.timestamp
- payload = ""
- override_tid = 0
-
- if event.name == "sched_switch":
- self.state.sched.switch(event)
- elif event.name[0:4] == "sys_" or \
- event.name[0:14] == "syscall_entry_":
- payload = self.state.syscall.entry(event)
- elif event.name == "exit_syscall" or \
- event.name[0:13] == "syscall_exit_":
- payload = self.state.syscall.exit(event)
- elif event.name == "block_complete" or \
- event.name == "block_rq_complete":
- self.state.block.complete(event)
- elif event.name == "block_queue":
- self.state.block.queue(event)
- elif event.name == "netif_receive_skb":
- self.state.net.recv(event)
- elif event.name == "net_dev_xmit":
- self.state.net.send(event)
- elif event.name == "sched_process_fork":
- self.state.sched.process_fork(event)
- if int(event["child_tid"]) == int(args.pid):
- override_tid = 1
- payload = "%s created by : %d" % (
- ns_to_hour_nsec(event.timestamp),
- event["parent_tid"])
- else:
- payload = "%s fork child_tid : %d" % (
- ns_to_hour_nsec(event.timestamp),
- event["child_tid"])
- elif event.name == "sched_process_exec":
- payload = "%s exec %s" % (
- ns_to_hour_nsec(event.timestamp),
- event["filename"])
- elif event.name == "lttng_statedump_process_state":
- self.state.statedump.process_state(event)
- if event["pid"] == int(args.pid):
- override_tid = 1
- payload = "%s existed at statedump" % \
- ns_to_hour_nsec(event.timestamp)
- elif event.name == "lttng_statedump_file_descriptor":
- self.state.statedump.file_descriptor(event)
- if event["pid"] == int(args.pid):
- override_tid = 1
- payload = "%s statedump file : %s, fd : %d" % (
- ns_to_hour_nsec(event.timestamp),
- event["filename"], event["fd"])
- elif event.name == "lttng_statedump_block_device":
- self.state.statedump.block_device(event)
-
- cpu_id = event["cpu_id"]
- if cpu_id not in self.state.cpus.keys():
- continue
- c = self.state.cpus[cpu_id]
- if c.current_tid not in self.state.tids.keys():
- continue
- pid = self.state.tids[c.current_tid].pid
- if int(args.pid) != pid and override_tid == 0:
- continue
- if payload:
- print("%s" % (payload))
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser(description='I/O usage analysis')
- parser.add_argument('path', metavar="<path/to/trace>", help='Trace path')
- parser.add_argument('pid', help='PID')
- args = parser.parse_args()
- args.proc_list = []
-
- traces = TraceCollection()
- handle = traces.add_traces_recursive(args.path, "ctf")
- if handle is None:
- sys.exit(1)
-
- c = ProcInfo(traces)
-
- c.run(args)
-
- for h in handle.values():
- traces.remove_trace(h)
+++ /dev/null
-#!/usr/bin/env python3
-
-import sys
-import tempfile
-try:
- from babeltrace import CTFWriter, CTFStringEncoding
-except ImportError:
- # quick fix for debian-based distros
- sys.path.append("/usr/local/lib/python%d.%d/site-packages" %
- (sys.version_info.major, sys.version_info.minor))
- from babeltrace import CTFWriter, CTFStringEncoding
-
-trace_path = tempfile.mkdtemp()
-
-print("Writing trace at {}".format(trace_path))
-writer = CTFWriter.Writer(trace_path)
-
-clock = CTFWriter.Clock("A_clock")
-clock.description = "Simple clock"
-
-writer.add_clock(clock)
-writer.add_environment_field("Python_version", str(sys.version_info))
-
-stream_class = CTFWriter.StreamClass("test_stream")
-stream_class.clock = clock
-
-char8_type = CTFWriter.IntegerFieldDeclaration(8)
-char8_type.signed = True
-char8_type.encoding = CTFStringEncoding.UTF8
-char8_type.alignment = 8
-
-int32_type = CTFWriter.IntegerFieldDeclaration(32)
-int32_type.signed = True
-int32_type.alignment = 8
-
-uint32_type = CTFWriter.IntegerFieldDeclaration(32)
-uint32_type.signed = False
-uint32_type.alignment = 8
-
-int64_type = CTFWriter.IntegerFieldDeclaration(64)
-int64_type.signed = True
-int64_type.alignment = 8
-
-array_type = CTFWriter.ArrayFieldDeclaration(char8_type, 16)
-
-sched_switch = CTFWriter.EventClass("sched_switch")
-
-sched_switch.add_field(array_type, "_prev_comm")
-sched_switch.add_field(int32_type, "_prev_tid")
-sched_switch.add_field(int32_type, "_prev_prio")
-sched_switch.add_field(int64_type, "_prev_state")
-sched_switch.add_field(array_type, "_next_comm")
-sched_switch.add_field(int32_type, "_next_tid")
-sched_switch.add_field(int32_type, "_next_prio")
-sched_switch.add_field(uint32_type, "_cpu_id")
-
-stream_class.add_event_class(sched_switch)
-stream = writer.create_stream(stream_class)
-
-
-def set_char_array(event, string):
- if len(string) > 16:
- string = string[0:16]
- else:
- string = "%s" % (string + "\0" * (16 - len(string)))
-
- for i in range(len(string)):
- a = event.field(i)
- a.value = ord(string[i])
-
-
-def set_int(event, value):
- event.value = value
-
-
-def write_sched_switch(time_ms, cpu_id, prev_comm, prev_tid, next_comm,
- next_tid, prev_prio=20, prev_state=1,
- next_prio=20):
- event = CTFWriter.Event(sched_switch)
- clock.time = time_ms * 1000000
- set_char_array(event.payload("_prev_comm"), prev_comm)
- set_int(event.payload("_prev_tid"), prev_tid)
- set_int(event.payload("_prev_prio"), prev_prio)
- set_int(event.payload("_prev_state"), prev_state)
- set_char_array(event.payload("_next_comm"), next_comm)
- set_int(event.payload("_next_tid"), next_tid)
- set_int(event.payload("_next_prio"), next_prio)
- set_int(event.payload("_cpu_id"), cpu_id)
- stream.append_event(event)
- stream.flush()
-
-
-def sched_switch_50pc(start_time_ms, end_time_ms, cpu_id, period,
- comm1, tid1, comm2, tid2):
- current = start_time_ms
- while current < end_time_ms:
- write_sched_switch(current, cpu_id, comm1, tid1, comm2, tid2)
- current += period
- write_sched_switch(current, cpu_id, comm2, tid2, comm1, tid1)
- current += period
-
-
-def sched_switch_rr(start_time_ms, end_time_ms, cpu_id, period, task_list):
- current = start_time_ms
- while current < end_time_ms:
- current_task = task_list[len(task_list) - 1]
- for i in task_list:
- write_sched_switch(current, cpu_id, current_task[0],
- current_task[1], i[0], i[1])
- current_task = i
- current += period
-
-write_sched_switch(1393345613900, 5, "swapper/5", 0, "prog100pc-cpu5", 42)
-sched_switch_50pc(1393345614000, 1393345615000, 0, 100,
- "swapper/0", 0, "prog50pc-cpu0", 30664)
-sched_switch_50pc(1393345615000, 1393345616000, 1, 100,
- "swapper/1", 0, "prog50pc-cpu1", 30665)
-sched_switch_50pc(1393345616000, 1393345617000, 2, 100,
- "swapper/2", 0, "prog50pc-cpu2", 30666)
-sched_switch_50pc(1393345617000, 1393345618000, 3, 100,
- "swapper/3", 0, "prog50pc-cpu3", 30667)
-sched_switch_50pc(1393345618000, 1393345619000, 0, 100,
- "swapper/0", 0, "prog50pc-cpu0", 30664)
-
-proc_list = [("prog1", 10), ("prog2", 11), ("prog3", 12), ("prog4", 13)]
-sched_switch_rr(1393345619000, 1393345622000, 4, 100, proc_list)
-write_sched_switch(1393345622300, 5, "prog100pc-cpu5", 42, "swapper/5", 0)