Merge pull request #11 from abusque/master
[deliverable/lttng-analyses.git] / lttnganalysescli / lttnganalysescli / io.py
CommitLineData
4ed24f86
JD
1#!/usr/bin/env python3
2#
3# The MIT License (MIT)
4#
5# Copyright (C) 2015 - Julien Desfossez <jdesfosez@efficios.com>
6#
7# Permission is hereby granted, free of charge, to any person obtaining a copy
8# of this software and associated documentation files (the "Software"), to deal
9# in the Software without restriction, including without limitation the rights
10# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11# copies of the Software, and to permit persons to whom the Software is
12# furnished to do so, subject to the following conditions:
13#
14# The above copyright notice and this permission notice shall be included in
15# all copies or substantial portions of the Software.
16#
17# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23# SOFTWARE.
24
a9c9a2a6
JD
25from .command import Command
26import lttnganalyses.syscalls
27from linuxautomaton import common, sv
28from ascii_graph import Pyasciigraph
29import operator
30import statistics
31
32
33class IoAnalysis(Command):
34 _VERSION = '0.1.0'
35 _DESC = """The I/O command."""
36
37 def __init__(self):
38 super().__init__(self._add_arguments,
39 enable_proc_filter_args=True,
40 enable_max_min_args=True,
41 enable_max_min_size_arg=True,
1c0f0e3c 42 enable_log_arg=True)
a9c9a2a6
JD
43
44 def _validate_transform_args(self):
45 self._arg_extra = self._args.extra
1c0f0e3c
JD
46 self._arg_usage = self._args.usage
47 self._arg_stats = self._args.latencystats
48 self._arg_freq = self._args.latencyfreq
49 self._arg_freq_resolution = self._args.freq_resolution
50
51 def _default_args(self, stats, log, freq, usage):
52 if stats:
53 self._arg_stats = True
54 if log:
55 self._arg_log = True
56 if freq:
57 self._arg_freq = True
58 if usage:
59 self._arg_usage = True
60
61 def run(self, stats=False, log=False, freq=False, usage=False):
a9c9a2a6
JD
62 # parse arguments first
63 self._parse_args()
64 # validate, transform and save specific arguments
65 self._validate_transform_args()
1c0f0e3c
JD
66 # handle the default args for different executables
67 self._default_args(stats, log, freq, usage)
a9c9a2a6
JD
68 # open the trace
69 self._open_trace()
70 # create the appropriate analysis/analyses
71 self._create_analysis()
72 # run the analysis
119f6cef
JD
73 self._run_analysis(self._reset_total, self._refresh,
74 break_cb=self._breakcb)
a9c9a2a6
JD
75 # process the results
76 self._compute_stats()
77 # print results
78 self._print_results(self.start_ns, self.trace_end_ts, final=1)
79 # close the trace
80 self._close_trace()
81
1c0f0e3c
JD
82 def run_stats(self):
83 self.run(stats=True)
84
85 def run_log(self):
86 self.run(log=True)
87
88 def run_freq(self):
89 self.run(freq=True)
90
91 def run_usage(self):
92 self.run(usage=True)
93
a9c9a2a6
JD
94 def _create_analysis(self):
95 self._analysis = lttnganalyses.syscalls.SyscallsAnalysis(
96 self._automaton.state)
119f6cef 97 self.state = self._automaton.state
a9c9a2a6
JD
98
99 def _compute_stats(self):
a9c9a2a6
JD
100 pass
101
119f6cef
JD
102 def _breakcb(self):
103 if len(self.state.pending_syscalls) > 0:
104 return False
105 return True
106
a9c9a2a6
JD
107 def _refresh(self, begin, end):
108 self._compute_stats()
109 self._print_results(begin, end, final=0)
110 self._reset_total(end)
111
112 def add_fd_dict(self, tid, fd, files):
113 if fd.read == 0 and fd.write == 0:
114 return
115 if fd.filename.startswith("pipe") or \
116 fd.filename.startswith("socket") or \
117 fd.filename.startswith("anon_inode") or \
118 fd.filename.startswith("unknown"):
119 filename = "%s (%s)" % (fd.filename, tid.comm)
120 files[filename] = {}
121 files[filename]["read"] = fd.read
122 files[filename]["write"] = fd.write
123 files[filename]["name"] = filename
124 files[filename]["other"] = ["fd %d in %s (%d)" % (fd.fd,
125 tid.comm, tid.pid)]
126 else:
127 # merge counters of shared files
128 filename = fd.filename
129 if filename not in files.keys():
130 files[filename] = {}
131 files[filename]["read"] = fd.read
132 files[filename]["write"] = fd.write
133 files[filename]["name"] = filename
134 files[filename]["other"] = ["fd %d in %s (%d)" %
135 (fd.fd, tid.comm, tid.pid)]
136 files[filename]["tids"] = [tid.tid]
137 else:
138 files[filename]["read"] += fd.read
139 files[filename]["write"] += fd.write
140 files[filename]["other"].append("fd %d in %s (%d)" %
141 (fd.fd, tid.comm,
142 tid.pid))
143
144 def create_files_dict(self):
145 files = {}
146 for tid in self.state.tids.values():
147 if not self.filter_process(tid):
148 continue
149 for fd in tid.fds.values():
150 self.add_fd_dict(tid, fd, files)
151 for fd in tid.closed_fds.values():
152 self.add_fd_dict(tid, fd, files)
153 return files
154
155 # iotop functions
156 def iotop_output_print_file_read(self, files):
157 count = 0
158 limit = self._arg_limit
159 graph = Pyasciigraph()
160 values = []
161 sorted_f = sorted(files.items(), key=lambda files: files[1]['read'],
162 reverse=True)
163 for f in sorted_f:
164 if f[1]["read"] == 0:
165 continue
166 info_fmt = "{:>10}".format(common.convert_size(f[1]["read"],
167 padding_after=True))
168 values.append(("%s %s %s" % (info_fmt,
169 f[1]["name"],
170 str(f[1]["other"])[1:-1]),
171 f[1]["read"]))
172 count = count + 1
173 if limit > 0 and count >= limit:
174 break
175 for line in graph.graph('Files Read', values, sort=2,
176 with_value=False):
177 print(line)
178
179 def iotop_output_print_file_write(self, files):
180 # Compute files read
181 count = 0
182 limit = self._arg_limit
183 graph = Pyasciigraph()
184 values = []
185 sorted_f = sorted(files.items(), key=lambda files: files[1]['write'],
186 reverse=True)
187 for f in sorted_f:
188 if f[1]["write"] == 0:
189 continue
190 info_fmt = "{:>10}".format(common.convert_size(f[1]["write"],
191 padding_after=True))
192 values.append(("%s %s %s" % (info_fmt,
193 f[1]["name"],
194 str(f[1]["other"])[1:-1]),
195 f[1]["write"]))
196 count = count + 1
197 if limit > 0 and count >= limit:
198 break
199 for line in graph.graph('Files Write', values, sort=2,
200 with_value=False):
201 print(line)
202
203 def iotop_output_file_read_write(self):
204 files = self.create_files_dict()
205 self.iotop_output_print_file_read(files)
206 self.iotop_output_print_file_write(files)
207
208 def filter_process(self, proc):
209 if self._arg_proc_list and proc.comm not in self._arg_proc_list:
210 return False
211 if self._arg_pid_list and str(proc.pid) not in self._arg_pid_list:
212 return False
213 return True
214
215 def filter_size(self, size):
216 # don't filter sync and open
217 if size is None:
218 return True
219 if self._arg_maxsize is not None and size > self._arg_maxsize:
220 return False
221 if self._arg_minsize is not None and size < self._arg_minsize:
222 return False
223 return True
224
225 def filter_latency(self, duration):
226 if self._arg_max is not None and (duration/1000) > self._arg_max:
227 return False
228 if self._arg_min is not None and (duration/1000) < self._arg_min:
229 return False
230 return True
231
232 def iotop_output_read(self):
233 count = 0
234 limit = self._arg_limit
235 graph = Pyasciigraph()
236 values = []
237 for tid in sorted(self.state.tids.values(),
238 key=operator.attrgetter('read'), reverse=True):
239 if not self.filter_process(tid):
240 continue
241 info_fmt = "{:>10} {:<25} {:>9} file {:>9} net {:>9} unknown"
242 values.append((info_fmt.format(
243 common.convert_size(tid.read, padding_after=True),
244 "%s (%d)" % (tid.comm, tid.pid),
245 common.convert_size(tid.disk_read,
246 padding_after=True),
247 common.convert_size(tid.net_read,
248 padding_after=True),
249 common.convert_size(tid.unk_read,
250 padding_after=True)),
251 tid.read))
252 count = count + 1
253 if limit > 0 and count >= limit:
254 break
255 for line in graph.graph('Per-process I/O Read', values,
256 with_value=False):
257 print(line)
258
259 def iotop_output_write(self):
260 count = 0
261 limit = self._arg_limit
262 graph = Pyasciigraph()
263 values = []
264 for tid in sorted(self.state.tids.values(),
265 key=operator.attrgetter('write'), reverse=True):
266 if not self.filter_process(tid):
267 continue
268 info_fmt = "{:>10} {:<25} {:>9} file {:>9} net {:>9} unknown "
269 values.append((info_fmt.format(
270 common.convert_size(tid.write, padding_after=True),
271 "%s (%d)" % (tid.comm, tid.pid),
272 common.convert_size(tid.disk_write,
273 padding_after=True),
274 common.convert_size(tid.net_write,
275 padding_after=True),
276 common.convert_size(tid.unk_write,
277 padding_after=True)),
278 tid.write))
279 count = count + 1
280 if limit > 0 and count >= limit:
281 break
282 for line in graph.graph('Per-process I/O Write', values,
283 with_value=False):
284 print(line)
285
286 def iotop_output_disk_read(self):
287 count = 0
288 limit = self._arg_limit
289 graph = Pyasciigraph()
290 values = []
291 for tid in sorted(self.state.tids.values(),
292 key=operator.attrgetter('block_read'), reverse=True):
293 if not self.filter_process(tid):
294 continue
295 if tid.block_read == 0:
296 continue
297 info_fmt = "{:>10} {:<22}"
298 values.append((info_fmt.format(common.convert_size(tid.block_read,
299 padding_after=True),
300 "%s (pid=%d)" % (tid.comm,
301 tid.pid)),
302 tid.block_read))
303 count = count + 1
304 if limit > 0 and count >= limit:
305 break
306 for line in graph.graph('Block I/O Read', values, with_value=False):
307 print(line)
308
309 def iotop_output_disk_write(self):
310 count = 0
311 limit = self._arg_limit
312 graph = Pyasciigraph()
313 values = []
314 for tid in sorted(self.state.tids.values(),
315 key=operator.attrgetter('block_write'),
316 reverse=True):
317 if not self.filter_process(tid):
318 continue
319 if tid.block_write == 0:
320 continue
321 info_fmt = "{:>10} {:<22}"
322 values.append((info_fmt.format(common.convert_size(tid.block_write,
323 padding_after=True),
324 "%s (pid=%d)" % (tid.comm,
325 tid.pid)),
326 tid.block_write))
327 count = count + 1
328 if limit > 0 and count >= limit:
329 break
330 for line in graph.graph('Block I/O Write', values, with_value=False):
331 print(line)
332
333 def iotop_output_nr_sector(self):
334 graph = Pyasciigraph()
335 values = []
336 for disk in sorted(self.state.disks.values(),
337 key=operator.attrgetter('nr_sector'), reverse=True):
338 if disk.nr_sector == 0:
339 continue
340 values.append((disk.prettyname, disk.nr_sector))
341 for line in graph.graph('Disk nr_sector', values, unit=" sectors"):
342 print(line)
343
344 def iotop_output_nr_requests(self):
345 graph = Pyasciigraph()
346 values = []
347 for disk in sorted(self.state.disks.values(),
348 key=operator.attrgetter('nr_requests'),
349 reverse=True):
350 if disk.nr_sector == 0:
351 continue
352 values.append((disk.prettyname, disk.nr_requests))
353 for line in graph.graph('Disk nr_requests', values, unit=" requests"):
354 print(line)
355
356 def iotop_output_dev_latency(self):
357 graph = Pyasciigraph()
358 values = []
359 for disk in self.state.disks.values():
360 if disk.completed_requests == 0:
361 continue
362 total = (disk.request_time / disk.completed_requests) \
9ecee826 363 / common.MSEC_PER_NSEC
a9c9a2a6
JD
364 total = float("%0.03f" % total)
365 values.append(("%s" % disk.prettyname, total))
366 for line in graph.graph('Disk request time/sector', values, sort=2,
367 unit=" ms"):
368 print(line)
369
370 def iotop_output_net_recv_bytes(self):
371 graph = Pyasciigraph()
372 values = []
373 for iface in sorted(self.state.ifaces.values(),
374 key=operator.attrgetter('recv_bytes'),
375 reverse=True):
376 values.append(("%s %s" % (common.convert_size(iface.recv_bytes),
377 iface.name),
378 iface.recv_bytes))
379 for line in graph.graph('Network recv_bytes', values,
380 with_value=False):
381 print(line)
382
383 def iotop_output_net_sent_bytes(self):
384 graph = Pyasciigraph()
385 values = []
386 for iface in sorted(self.state.ifaces.values(),
387 key=operator.attrgetter('send_bytes'),
388 reverse=True):
389 values.append(("%s %s" % (common.convert_size(iface.send_bytes),
390 iface.name),
391 iface.send_bytes))
392 for line in graph.graph('Network sent_bytes', values,
393 with_value=False):
394 print(line)
395
396 def iotop_output(self):
397 self.iotop_output_read()
398 self.iotop_output_write()
399 self.iotop_output_file_read_write()
400 self.iotop_output_disk_read()
401 self.iotop_output_disk_write()
402 self.iotop_output_nr_sector()
403 self.iotop_output_nr_requests()
404 self.iotop_output_dev_latency()
405 self.iotop_output_net_recv_bytes()
406 self.iotop_output_net_sent_bytes()
407# self.output_latencies()
408
409 def iolatency_freq_histogram(self, _min, _max, res, rq_list, title):
410 step = (_max - _min) / res
411 if step == 0:
412 return
413 buckets = []
414 values = []
415 graph = Pyasciigraph()
416 for i in range(res):
417 buckets.append(i * step)
418 values.append(0)
419 for i in rq_list:
420 v = i / 1000
421 b = min(int((v-_min)/step), res - 1)
422 values[b] += 1
423 g = []
424 i = 0
425 for v in values:
426 g.append(("%0.03f" % (i * step + _min), v))
427 i += 1
428 for line in graph.graph(title, g, info_before=True):
429 print(line)
430 print("")
431
432 def compute_disk_stats(self, dev):
433 _max = 0
434 _min = -1
435 total = 0
436 values = []
437 count = len(dev.rq_list)
438 if count == 0:
439 return
440 for rq in dev.rq_list:
441 if rq.duration > _max:
442 _max = rq.duration
443 if _min == -1 or rq.duration < _min:
444 _min = rq.duration
445 total += rq.duration
446 values.append(rq.duration)
447 if count > 2:
448 stdev = statistics.stdev(values) / 1000
449 else:
450 stdev = "?"
451 dev.min = _min / 1000
452 dev.max = _max / 1000
453 dev.total = total / 1000
454 dev.count = count
455 dev.rq_values = values
456 dev.stdev = stdev
457
458 # iolatency functions
459 def iolatency_output_disk(self):
460 for dev in self.state.disks.keys():
461 d = self.state.disks[dev]
462 if d.max is None:
463 self.compute_disk_stats(d)
464 if d.count is not None:
465 self.iolatency_freq_histogram(d.min, d.max,
466 self._arg_freq_resolution,
467 d.rq_values,
468 "Frequency distribution for "
469 "disk %s (usec)" %
470 (d.prettyname))
471
472 def iolatency_output(self):
473 self.iolatency_output_disk()
474
475# def output_latencies(self):
476# graph = Pyasciigraph()
477# for proc in self.latency_hist.keys():
478# values = []
479# for v in self.latency_hist[proc]:
480# values.append(("%s" % (v[0]), v[1]))
481# for line in graph.graph('%s requests latency (ms)' % proc, values,
482# unit=" ms"):
483# print(line)
484
485 def iostats_minmax(self, duration, current_min, current_max):
486 _min = current_min
487 _max = current_max
488 if current_min is None or duration < current_min:
489 _min = duration
490 if duration > current_max:
491 _max = duration
492 return (_min, _max)
493
494 def iostats_syscalls_line(self, fmt, name, count, _min, _max, total, rq):
495 if count < 2:
496 stdev = "?"
497 else:
498 stdev = "%0.03f" % (statistics.stdev(rq) / 1000)
499 if count < 1:
500 avg = "0.000"
501 else:
502 avg = "%0.03f" % (total / (count * 1000))
503 if _min is None:
504 _min = 0
505 _min = "%0.03f" % (_min / 1000)
506 _max = "%0.03f" % (_max / 1000)
507 print(fmt.format(name, count, _min, avg, _max, stdev))
508
509 def account_syscall_iorequests(self, s, iorequests):
510 for rq in iorequests:
511 # filter out if completely out of range but accept the
512 # union to show the real begin/end time
513 if self._arg_begin and self._arg_end and rq.end and \
514 rq.begin > self._arg_end:
515 continue
516 if rq.iotype != sv.IORequest.IO_SYSCALL:
517 continue
518 if not self.filter_size(rq.size):
519 continue
520 if not self.filter_latency(rq.duration):
521 continue
522 if rq.operation == sv.IORequest.OP_READ:
523 s.read_count += 1
524 s.read_total += rq.duration
525 s.read_rq.append(rq.duration)
526 s.all_read.append(rq)
527 s.read_min, s.read_max = self.iostats_minmax(
528 rq.duration, s.read_min, s.read_max)
529 elif rq.operation == sv.IORequest.OP_WRITE:
530 s.write_count += 1
531 s.write_total += rq.duration
532 s.write_rq.append(rq.duration)
533 s.all_write.append(rq)
534 s.write_min, s.write_max = self.iostats_minmax(
535 rq.duration, s.write_min, s.write_max)
536 elif rq.operation == sv.IORequest.OP_SYNC:
537 s.sync_count += 1
538 s.sync_total += rq.duration
539 s.sync_rq.append(rq.duration)
540 s.all_sync.append(rq)
541 s.sync_min, s.sync_max = self.iostats_minmax(
542 rq.duration, s.sync_min, s.sync_max)
543 elif rq.operation == sv.IORequest.OP_OPEN:
544 s.open_count += 1
545 s.open_total += rq.duration
546 s.open_rq.append(rq.duration)
547 s.all_open.append(rq)
548 s.open_min, s.open_max = self.iostats_minmax(
549 rq.duration, s.open_min, s.open_max)
550
551 def compute_syscalls_latency_stats(self, end_ns):
552 s = sv.Syscalls_stats()
553 for tid in self.state.tids.values():
554 if not self.filter_process(tid):
555 continue
556 self.account_syscall_iorequests(s, tid.iorequests)
557 for fd in tid.fds.values():
558 self.account_syscall_iorequests(s, fd.iorequests)
559 for fd in tid.closed_fds.values():
560 self.account_syscall_iorequests(s, fd.iorequests)
561 return s
562
563 def iostats_output_syscalls(self):
564 s = self.syscalls_stats
565 print("\nSyscalls latency statistics (usec):")
566 fmt = "{:<14} {:>14} {:>14} {:>14} {:>14} {:>14}"
567 print(fmt.format("Type", "Count", "Min", "Average",
568 "Max", "Stdev"))
569 print("-" * 89)
570 self.iostats_syscalls_line(fmt, "Open", s.open_count, s.open_min,
571 s.open_max, s.open_total, s.open_rq)
572 self.iostats_syscalls_line(fmt, "Read", s.read_count, s.read_min,
573 s.read_max, s.read_total, s.read_rq)
574 self.iostats_syscalls_line(fmt, "Write", s.write_count, s.write_min,
575 s.write_max, s.write_total, s.write_rq)
576 self.iostats_syscalls_line(fmt, "Sync", s.sync_count, s.sync_min,
577 s.sync_max, s.sync_total, s.sync_rq)
578
579 def iolatency_syscalls_output(self):
580 s = self.syscalls_stats
581 print("")
582 if s.open_count > 0:
583 self.iolatency_freq_histogram(s.open_min/1000, s.open_max/1000,
584 self._arg_freq_resolution, s.open_rq,
585 "Open latency distribution (usec)")
586 if s.read_count > 0:
587 self.iolatency_freq_histogram(s.read_min/1000, s.read_max/1000,
588 self._arg_freq_resolution, s.read_rq,
589 "Read latency distribution (usec)")
590 if s.write_count > 0:
591 self.iolatency_freq_histogram(s.write_min/1000, s.write_max/1000,
592 self._arg_freq_resolution,
593 s.write_rq,
594 "Write latency distribution (usec)")
595 if s.sync_count > 0:
596 self.iolatency_freq_histogram(s.sync_min/1000, s.sync_max/1000,
597 self._arg_freq_resolution, s.sync_rq,
598 "Sync latency distribution (usec)")
599
600 def iolatency_syscalls_list_output(self, title, rq_list,
601 sortkey, reverse):
602 limit = self._arg_limit
603 count = 0
604 outrange_legend = False
605 if len(rq_list) == 0:
606 return
607 print(title)
608 if self._arg_extra:
609 extra_fmt = "{:<48}"
610 extra_title = "{:<8} {:<8} {:<8} {:<8} {:<8} {:<8} ".format(
611 "Dirtied", "Alloc", "Free", "Written", "Kswap", "Cleared")
612 else:
613 extra_fmt = "{:<0}"
614 extra_title = ""
615 title_fmt = "{:<19} {:<20} {:<16} {:<23} {:<5} {:<24} {:<8} " + \
616 extra_fmt + "{:<14}"
119f6cef 617 fmt = "{:<40} {:<16} {:>16} {:>11} {:<24} {:<8} " + \
a9c9a2a6
JD
618 extra_fmt + "{:<14}"
619 print(title_fmt.format("Begin", "End", "Name", "Duration (usec)",
620 "Size", "Proc", "PID", extra_title, "Filename"))
621 for rq in sorted(rq_list,
622 key=operator.attrgetter(sortkey), reverse=reverse):
623 # only limit the output if in the "top" view
624 if reverse and count > limit:
625 break
626 if rq.size is None:
627 size = "N/A"
628 else:
629 size = common.convert_size(rq.size)
630 if self._arg_extra:
631 extra = "{:<8} {:<8} {:<8} {:<8} {:<8} {:<8} ".format(
632 rq.dirty, rq.page_alloc, rq.page_free, rq.page_written,
633 rq.woke_kswapd, rq.page_cleared)
634 else:
635 extra = ""
636 name = rq.name.replace("syscall_entry_", "").replace("sys_", "")
637 if rq.fd is None:
638 filename = "None"
639 fd = "None"
640 else:
641 filename = rq.fd.filename
642 fd = rq.fd.fd
643 end = common.ns_to_hour_nsec(rq.end, self._arg_multi_day,
644 self._arg_gmt)
645
646 outrange = " "
647 duration = rq.duration
648 if self._arg_begin and rq.begin < self._arg_begin:
649 outrange = "*"
650 outrange_legend = True
651 if self._arg_end and rq.end > self._arg_end:
652 outrange = "*"
653 outrange_legend = True
654
655 print(fmt.format("[" + common.ns_to_hour_nsec(
656 rq.begin, self._arg_multi_day, self._arg_gmt) + "," +
657 end + "]" + outrange,
658 name,
659 "%0.03f" % (duration/1000) + outrange,
660 size, rq.proc.comm,
661 rq.proc.pid, extra,
662 "%s (fd=%s)" % (filename, fd)))
663 count += 1
664 if outrange_legend:
665 print("*: Syscalls started and/or completed outside of the "
666 "range specified")
667
668 def iolatency_syscalls_top_output(self):
669 s = self.syscalls_stats
670 self.iolatency_syscalls_list_output(
671 "\nTop open syscall latencies (usec)", s.all_open,
672 "duration", True)
673 self.iolatency_syscalls_list_output(
674 "\nTop read syscall latencies (usec)", s.all_read,
675 "duration", True)
676 self.iolatency_syscalls_list_output(
677 "\nTop write syscall latencies (usec)", s.all_write,
678 "duration", True)
679 self.iolatency_syscalls_list_output(
680 "\nTop sync syscall latencies (usec)", s.all_sync,
681 "duration", True)
682
683 def iolatency_syscalls_log_output(self):
684 s = self.syscalls_stats
685 self.iolatency_syscalls_list_output(
686 "\nLog of all I/O system calls",
687 s.all_open + s.all_read + s.all_write + s.all_sync,
688 "begin", False)
689
690 # iostats functions
691 def iostats_output_disk(self):
692 # TODO same with network
693 if len(self.state.disks.keys()) == 0:
694 return
695 print("\nDisk latency statistics (usec):")
696 fmt = "{:<14} {:>14} {:>14} {:>14} {:>14} {:>14}"
697 print(fmt.format("Name", "Count", "Min", "Average", "Max", "Stdev"))
698 print("-" * 89)
699
700 for dev in self.state.disks.keys():
701 d = self.state.disks[dev]
702 if d.max is None:
703 d = self.state.disks[dev]
704 self.compute_disk_stats(d)
705 if d.count is not None:
706 self.iostats_syscalls_line(fmt, d.prettyname, d.count, d.min,
707 d.max, d.total, d.rq_values)
708
709 def iostats_output(self):
710 self.iostats_output_syscalls()
711 self.iostats_output_disk()
712
713 def _print_results(self, begin_ns, end_ns, final=0):
35743f87
JD
714 print('Timerange: [%s, %s]' % (
715 common.ns_to_hour_nsec(begin_ns, gmt=self._arg_gmt,
716 multi_day=True),
717 common.ns_to_hour_nsec(end_ns, gmt=self._arg_gmt,
718 multi_day=True)))
1c0f0e3c
JD
719 if self._arg_usage:
720 self.iotop_output()
a9c9a2a6
JD
721 self.syscalls_stats = self.compute_syscalls_latency_stats(end_ns)
722 if self._arg_stats:
723 self.iostats_output()
724 self.iolatency_syscalls_top_output()
725 if self._arg_freq:
726 self.iolatency_syscalls_output()
727 self.iolatency_output()
728 if self._arg_log:
729 self.iolatency_syscalls_log_output()
730
731 def _reset_total(self, start_ts):
732 for dev in self.state.disks.keys():
733 self.state.disks[dev].init_counts()
734
735 for iface in self.state.ifaces.keys():
736 self.state.ifaces[iface].init_counts()
737
738 for tid in self.state.tids.values():
739 for fd in tid.fds.values():
740 fd.init_counts()
741 for fd in tid.closed_fds.values():
742 fd.init_counts()
743 tid.init_counts()
744
745 def _add_arguments(self, ap):
1c0f0e3c
JD
746 ap.add_argument('--usage', action="store_true",
747 help='Show the I/O usage')
748 ap.add_argument('--latencystats', action="store_true",
749 help='Show the I/O latency statistics')
750 ap.add_argument('--latencyfreq', action="store_true",
751 help='Show the I/O latency frequency distribution')
752 ap.add_argument('--freq-resolution', type=int, default=20,
753 help='Frequency distribution resolution '
754 '(default 20)')
a9c9a2a6
JD
755 ap.add_argument('--extra', type=str, default=0,
756 help='Show extra information in stats (beta)')
757 # specific argument
758 pass
759
760
761# entry point
1c0f0e3c 762def runstats():
a9c9a2a6
JD
763 # create command
764 iocmd = IoAnalysis()
1c0f0e3c
JD
765 # execute command
766 iocmd.run_stats()
767
a9c9a2a6 768
1c0f0e3c
JD
769def runlog():
770 # create command
771 iocmd = IoAnalysis()
772 # execute command
773 iocmd.run_log()
774
775
776def runfreq():
777 # create command
778 iocmd = IoAnalysis()
779 # execute command
780 iocmd.run_freq()
781
782
783def runusage():
784 # create command
785 iocmd = IoAnalysis()
a9c9a2a6 786 # execute command
1c0f0e3c 787 iocmd.run_usage()
This page took 0.073565 seconds and 5 git commands to generate.