Refactor io top and log views
[deliverable/lttng-analyses.git] / lttnganalysescli / lttnganalysescli / io.py
1 #!/usr/bin/env python3
2 #
3 # The MIT License (MIT)
4 #
5 # Copyright (C) 2015 - Julien Desfossez <jdesfossez@efficios.com>
6 # 2015 - Antoine Busque <abusque@efficios.com>
7 #
8 # Permission is hereby granted, free of charge, to any person obtaining a copy
9 # of this software and associated documentation files (the "Software"), to deal
10 # in the Software without restriction, including without limitation the rights
11 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 # copies of the Software, and to permit persons to whom the Software is
13 # furnished to do so, subject to the following conditions:
14 #
15 # The above copyright notice and this permission notice shall be included in
16 # all copies or substantial portions of the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 # SOFTWARE.
25
26 from .command import Command
27 import lttnganalyses.io
28 from linuxautomaton import common, sv
29 from ascii_graph import Pyasciigraph
30 import operator
31 import statistics
32
33
34 class IoAnalysisCommand(Command):
35 _VERSION = '0.1.0'
36 _DESC = """The I/O command."""
37
38 _LATENCY_STATS_FORMAT = '{:<14} {:>14} {:>14} {:>14} {:>14} {:>14}'
39 _SECTION_SEPARATOR_STRING = '-' * 89
40
41 def __init__(self):
42 super().__init__(self._add_arguments,
43 enable_proc_filter_args=True,
44 enable_max_min_args=True,
45 enable_max_min_size_arg=True,
46 enable_log_arg=True)
47
48 def _validate_transform_args(self):
49 self._arg_usage = self._args.usage
50 self._arg_stats = self._args.latencystats
51 self._arg_latencytop = self._args.latencytop
52 self._arg_freq = self._args.latencyfreq
53 self._arg_freq_resolution = self._args.freq_resolution
54
55 def _default_args(self, stats, log, freq, usage, latencytop):
56 if stats:
57 self._arg_stats = True
58 if log:
59 self._arg_log = True
60 if freq:
61 self._arg_freq = True
62 if usage:
63 self._arg_usage = True
64 if latencytop:
65 self._arg_latencytop = True
66
67 def run(self, stats=False, log=False, freq=False, usage=False,
68 latencytop=False):
69 # parse arguments first
70 self._parse_args()
71 # validate, transform and save specific arguments
72 self._validate_transform_args()
73 # handle the default args for different executables
74 self._default_args(stats, log, freq, usage, latencytop)
75 # open the trace
76 self._open_trace()
77 # create the appropriate analysis/analyses
78 self._create_analysis()
79 # run the analysis
80 self._run_analysis(self._reset_total, self._refresh)
81 # print results
82 self._print_results(self.start_ns, self.trace_end_ts)
83 # close the trace
84 self._close_trace()
85
86 def run_stats(self):
87 self.run(stats=True)
88
89 def run_latencytop(self):
90 self.run(latencytop=True)
91
92 def run_log(self):
93 self.run(log=True)
94
95 def run_freq(self):
96 self.run(freq=True)
97
98 def run_usage(self):
99 self.run(usage=True)
100
101 def _create_analysis(self):
102 self._analysis = lttnganalyses.io.IoAnalysis(self.state)
103
104 def _refresh(self, begin, end):
105 self._print_results(begin, end)
106 self._reset_total(end)
107
108 # Filter predicates
109 def _filter_size(self, size):
110 if size is None:
111 return True
112 if self._arg_maxsize is not None and size > self._arg_maxsize:
113 return False
114 if self._arg_minsize is not None and size < self._arg_minsize:
115 return False
116 return True
117
118 def _filter_latency(self, duration):
119 if self._arg_max is not None and (duration/1000) > self._arg_max:
120 return False
121 if self._arg_min is not None and (duration/1000) < self._arg_min:
122 return False
123 return True
124
125 def _filter_time_range(self, begin, end):
126 return not (self._arg_begin and self._arg_end and end and
127 begin > self._arg_end)
128
129 def _filter_io_request(self, io_rq):
130 proc = self._analysis.tids[io_rq.tid]
131 return self._filter_process(proc) and \
132 self._filter_size(io_rq.size) and \
133 self._filter_latency(io_rq.duration) and \
134 self._filter_time_range(io_rq.begin_ts, io_rq.end_ts)
135
136 def _is_io_rq_out_of_range(self, io_rq):
137 return self._arg_begin and io_rq.begin_ts < self._arg_begin or \
138 self._arg_end and io_rq.end_ts > self._arg_end
139
140 def _print_ascii_graph(self, input_list, get_datum_cb, graph_label,
141 graph_args={}):
142 """Print an ascii graph for given data
143
144 This method wraps the ascii_graph module and facilitates the
145 printing of a graph with a limited number of lines.
146
147 Args:
148 input_list (list): A list of objects from which the data
149 for the graph will be generated.
150
151 get_datum_cb (function): function that takes a single
152 object from the input list as an argument, and returns a
153 datum tuple for the graph, of the form (string, int). The
154 string element is printed as is in the graph, and the int
155 is the numeric value corresponding to this graph entry.
156
157 graph_label (string): Label used to identify the printed
158 graph.
159
160 graph_args (dict, optional): Dict of keyword args to be
161 passed to the graph() function as is.
162 """
163 count = 0
164 limit = self._arg_limit
165 graph = Pyasciigraph()
166 data = []
167
168 for elem in input_list:
169 datum = get_datum_cb(elem)
170 if datum is not None:
171 data.append(datum)
172 count += 1
173 if limit is not None and count >= limit:
174 break
175
176 for line in graph.graph(graph_label, data, **graph_args):
177 print(line)
178
179 # I/O Top output methods
180 def _get_read_datum(self, proc_stats):
181 if not self._filter_process(proc_stats):
182 return None
183
184 if proc_stats.pid is None:
185 pid_str = 'unknown (tid=%d)' % (proc_stats.tid)
186 else:
187 pid_str = str(proc_stats.pid)
188
189 format_str = '{:>10} {:<25} {:>9} file {:>9} net {:>9} unknown'
190 output_str = format_str.format(
191 common.convert_size(proc_stats.total_read, padding_after=True),
192 '%s (%s)' % (proc_stats.comm, pid_str),
193 common.convert_size(proc_stats.disk_read, padding_after=True),
194 common.convert_size(proc_stats.net_read, padding_after=True),
195 common.convert_size(proc_stats.unk_read, padding_after=True))
196
197 return (output_str, proc_stats.total_read)
198
199 def _get_write_datum(self, proc_stats):
200 if not self._filter_process(proc_stats):
201 return None
202
203 if proc_stats.pid is None:
204 pid_str = 'unknown (tid=%d)' % (proc_stats.tid)
205 else:
206 pid_str = str(proc_stats.pid)
207
208 format_str = '{:>10} {:<25} {:>9} file {:>9} net {:>9} unknown'
209 output_str = format_str.format(
210 common.convert_size(proc_stats.total_write, padding_after=True),
211 '%s (%s)' % (proc_stats.comm, pid_str),
212 common.convert_size(proc_stats.disk_write, padding_after=True),
213 common.convert_size(proc_stats.net_write, padding_after=True),
214 common.convert_size(proc_stats.unk_write, padding_after=True))
215
216 return (output_str, proc_stats.total_write)
217
218 def _get_block_read_datum(self, proc_stats):
219 if not self._filter_process(proc_stats) or proc_stats.block_read == 0:
220 return None
221
222 comm = proc_stats.comm
223 if not comm:
224 comm = 'unknown'
225
226 if proc_stats.pid is None:
227 pid_str = 'unknown (tid=%d)' % (proc_stats.tid)
228 else:
229 pid_str = str(proc_stats.pid)
230
231 format_str = '{:>10} {:<22}'
232 output_str = format_str.format(
233 common.convert_size(proc_stats.block_read, padding_after=True),
234 '%s (pid=%s)' % (comm, pid_str))
235
236 return (output_str, proc_stats.block_read)
237
238 def _get_block_write_datum(self, proc_stats):
239 if not self._filter_process(proc_stats) or \
240 proc_stats.block_write == 0:
241 return None
242
243 comm = proc_stats.comm
244 if not comm:
245 comm = 'unknown'
246
247 if proc_stats.pid is None:
248 pid_str = 'unknown (tid=%d)' % (proc_stats.tid)
249 else:
250 pid_str = str(proc_stats.pid)
251
252 format_str = '{:>10} {:<22}'
253 output_str = format_str.format(
254 common.convert_size(proc_stats.block_write, padding_after=True),
255 '%s (pid=%s)' % (comm, pid_str))
256
257 return (output_str, proc_stats.block_write)
258
259 def _get_total_rq_sectors_datum(self, disk):
260 if disk.total_rq_sectors == 0:
261 return None
262
263 return (disk.disk_name, disk.total_rq_sectors)
264
265 def _get_rq_count_datum(self, disk):
266 if disk.rq_count == 0:
267 return None
268
269 return (disk.disk_name, disk.rq_count)
270
271 def _get_avg_disk_latency_datum(self, disk):
272 if disk.rq_count == 0:
273 return None
274
275 avg_latency = ((disk.total_rq_duration / disk.rq_count) /
276 common.MSEC_PER_NSEC)
277 avg_latency = round(avg_latency, 3)
278
279 return ('%s' % disk.disk_name, avg_latency)
280
281 def _get_net_recv_bytes_datum(self, iface):
282 return ('%s %s' % (common.convert_size(iface.recv_bytes), iface.name),
283 iface.recv_bytes)
284
285 def _get_net_sent_bytes_datum(self, iface):
286 return ('%s %s' % (common.convert_size(iface.sent_bytes), iface.name),
287 iface.sent_bytes)
288
289 def _get_file_read_datum(self, file_stats):
290 if file_stats.read == 0:
291 return None
292
293 fd_by_pid_str = ''
294 for pid, fd in file_stats.fd_by_pid.items():
295 comm = self._analysis.tids[pid].comm
296 fd_by_pid_str += 'fd %d in %s (%s) ' % (fd, comm, pid)
297
298 format_str = '{:>10} {} {}'
299 output_str = format_str.format(
300 common.convert_size(file_stats.read, padding_after=True),
301 file_stats.filename,
302 fd_by_pid_str)
303
304 return (output_str, file_stats.read)
305
306 def _get_file_write_datum(self, file_stats):
307 if file_stats.write == 0:
308 return None
309
310 fd_by_pid_str = ''
311 for pid, fd in file_stats.fd_by_pid.items():
312 comm = self._analysis.tids[pid].comm
313 fd_by_pid_str += 'fd %d in %s (%s) ' % (fd, comm, pid)
314
315 format_str = '{:>10} {} {}'
316 output_str = format_str.format(
317 common.convert_size(file_stats.write, padding_after=True),
318 file_stats.filename,
319 fd_by_pid_str)
320
321 return (output_str, file_stats.write)
322
323 def _output_read(self):
324 input_list = sorted(self._analysis.tids.values(),
325 key=operator.attrgetter('total_read'),
326 reverse=True)
327 label = 'Per-process I/O Read'
328 graph_args = {'with_value': False}
329 self._print_ascii_graph(input_list, self._get_read_datum, label,
330 graph_args)
331
332 def _output_write(self):
333 input_list = sorted(self._analysis.tids.values(),
334 key=operator.attrgetter('total_write'),
335 reverse=True)
336 label = 'Per-process I/O Write'
337 graph_args = {'with_value': False}
338 self._print_ascii_graph(input_list, self._get_write_datum, label,
339 graph_args)
340
341 def _output_block_read(self):
342 input_list = sorted(self._analysis.tids.values(),
343 key=operator.attrgetter('block_read'),
344 reverse=True)
345 label = 'Block I/O Read'
346 graph_args = {'with_value': False}
347 self._print_ascii_graph(input_list, self._get_block_read_datum,
348 label, graph_args)
349
350 def _output_block_write(self):
351 input_list = sorted(self._analysis.tids.values(),
352 key=operator.attrgetter('block_write'),
353 reverse=True)
354 label = 'Block I/O Write'
355 graph_args = {'with_value': False}
356 self._print_ascii_graph(input_list, self._get_block_write_datum,
357 label, graph_args)
358
359 def _output_total_rq_sectors(self):
360 input_list = sorted(self._analysis.disks.values(),
361 key=operator.attrgetter('total_rq_sectors'),
362 reverse=True)
363 label = 'Disk requests sector count'
364 graph_args = {'unit': ' sectors'}
365 self._print_ascii_graph(input_list, self._get_total_rq_sectors_datum,
366 label, graph_args)
367
368 def _output_rq_count(self):
369 input_list = sorted(self._analysis.disks.values(),
370 key=operator.attrgetter('rq_count'),
371 reverse=True)
372 label = 'Disk request count'
373 graph_args = {'unit': ' requests'}
374 self._print_ascii_graph(input_list, self._get_rq_count_datum,
375 label, graph_args)
376
377 def _output_avg_disk_latency(self):
378 input_list = self._analysis.disks.values()
379 label = 'Disk request average latency'
380 graph_args = {'unit': ' ms', 'sort': 2}
381 self._print_ascii_graph(input_list, self._get_avg_disk_latency_datum,
382 label, graph_args)
383
384 def _output_net_recv_bytes(self):
385 input_list = sorted(self._analysis.ifaces.values(),
386 key=operator.attrgetter('recv_bytes'),
387 reverse=True)
388 label = 'Network received bytes'
389 graph_args = {'with_value': False}
390 self._print_ascii_graph(input_list, self._get_net_recv_bytes_datum,
391 label, graph_args)
392
393 def _output_net_sent_bytes(self):
394 input_list = sorted(self._analysis.ifaces.values(),
395 key=operator.attrgetter('sent_bytes'),
396 reverse=True)
397 label = 'Network sent bytes'
398 graph_args = {'with_value': False}
399 self._print_ascii_graph(input_list, self._get_net_sent_bytes_datum,
400 label, graph_args)
401
402 def _output_file_read(self, files):
403 input_list = sorted(files.values(),
404 key=lambda file_stats: file_stats.read,
405 reverse=True)
406 label = 'Files read'
407 graph_args = {'with_value': False, 'sort': 2}
408 self._print_ascii_graph(input_list, self._get_file_read_datum,
409 label, graph_args)
410
411 def _output_file_write(self, files):
412 input_list = sorted(files.values(),
413 key=lambda file_stats: file_stats.write,
414 reverse=True)
415 label = 'Files write'
416 graph_args = {'with_value': False, 'sort': 2}
417 self._print_ascii_graph(input_list, self._get_file_write_datum,
418 label, graph_args)
419
420 def _output_file_read_write(self):
421 files = self._analysis.get_files_stats(self._arg_pid_list,
422 self._arg_proc_list)
423 self._output_file_read(files)
424 self._output_file_write(files)
425
426 def iotop_output(self):
427 self._output_read()
428 self._output_write()
429 self._output_file_read_write()
430 self._output_block_read()
431 self._output_block_write()
432 self._output_total_rq_sectors()
433 self._output_rq_count()
434 self._output_avg_disk_latency()
435 self._output_net_recv_bytes()
436 self._output_net_sent_bytes()
437
438 # IO Latency output methods
439 def iolatency_freq_histogram(self, _min, _max, res, rq_list, title):
440 step = (_max - _min) / res
441 if step == 0:
442 return
443 buckets = []
444 values = []
445 graph = Pyasciigraph()
446 for i in range(res):
447 buckets.append(i * step)
448 values.append(0)
449 for i in rq_list:
450 v = i / 1000
451 b = min(int((v-_min)/step), res - 1)
452 values[b] += 1
453 g = []
454 i = 0
455 for v in values:
456 g.append(('%0.03f' % (i * step + _min), v))
457 i += 1
458 for line in graph.graph(title, g, info_before=True, count=True):
459 print(line)
460 print('')
461
462 # iolatency functions
463 def iolatency_output_disk(self):
464 for dev in self.state.disks.keys():
465 d = self.state.disks[dev]
466 if d.max is None:
467 self.compute_disk_stats(d)
468 if d.count is not None:
469 self.iolatency_freq_histogram(d.min, d.max,
470 self._arg_freq_resolution,
471 d.rq_values,
472 'Frequency distribution for '
473 'disk %s (usec)' %
474 (d.prettyname))
475
476 def iolatency_output(self):
477 self._output_disk_latency_freq()
478
479 def iolatency_syscalls_output(self):
480 s = self.syscalls_stats
481 print('')
482 if s.open_count > 0:
483 self.iolatency_freq_histogram(s.open_min/1000, s.open_max/1000,
484 self._arg_freq_resolution, s.open_rq,
485 'Open latency distribution (usec)')
486 if s.read_count > 0:
487 self.iolatency_freq_histogram(s.read_min/1000, s.read_max/1000,
488 self._arg_freq_resolution, s.read_rq,
489 'Read latency distribution (usec)')
490 if s.write_count > 0:
491 self.iolatency_freq_histogram(s.write_min/1000, s.write_max/1000,
492 self._arg_freq_resolution,
493 s.write_rq,
494 'Write latency distribution (usec)')
495 if s.sync_count > 0:
496 self.iolatency_freq_histogram(s.sync_min/1000, s.sync_max/1000,
497 self._arg_freq_resolution, s.sync_rq,
498 'Sync latency distribution (usec)')
499
500 def _output_io_request(self, io_rq):
501 fmt = '{:<40} {:<16} {:>16} {:>11} {:<24} {:<8} {:<14}'
502
503 begin_time = common.ns_to_hour_nsec(io_rq.begin_ts,
504 self._arg_multi_day,
505 self._arg_gmt)
506 end_time = common.ns_to_hour_nsec(io_rq.end_ts,
507 self._arg_multi_day,
508 self._arg_gmt)
509 time_range_str = '[' + begin_time + ',' + end_time + ']'
510 duration_str = '%0.03f' % (io_rq.duration / 1000)
511
512 if io_rq.size is None:
513 size = 'N/A'
514 else:
515 size = common.convert_size(io_rq.size)
516
517 tid = io_rq.tid
518 proc_stats = self._analysis.tids[tid]
519 comm = proc_stats.comm
520
521 # TODO: handle fd_in/fd_out for RW type operations
522 if io_rq.fd is None:
523 file_str = 'N/A'
524 else:
525 fd = io_rq.fd
526
527 parent_proc = proc_stats
528 if parent_proc.pid is not None:
529 parent_proc = self._analysis.tids[parent_proc.pid]
530
531 fd_stats = parent_proc.get_fd(fd, io_rq.end_ts)
532 if fd_stats is not None:
533 filename = fd_stats.filename
534 else:
535 filename = 'unknown'
536
537 file_str = '%s (fd=%s)' % (filename, fd)
538
539 if self._is_io_rq_out_of_range(io_rq):
540 time_range_str += '*'
541 duration_str += '*'
542 else:
543 time_range_str += ' '
544 duration_str += ' '
545
546 print(fmt.format(time_range_str, io_rq.syscall_name, duration_str,
547 size, comm, tid, file_str))
548
549 def _output_io_requests_list(self, rq_list, title, sort_key, is_top=False):
550 if not rq_list:
551 return
552
553 count = 0
554 has_out_of_range_rq = False
555
556 print()
557 print(title)
558
559 header_fmt = '{:<19} {:<20} {:<16} {:<23} {:<5} {:<24} {:<8} {:<14}'
560 print(header_fmt.format(
561 'Begin', 'End', 'Name', 'Duration (usec)', 'Size', 'Proc', 'PID',
562 'Filename'))
563
564 for io_rq in sorted(rq_list, key=operator.attrgetter(sort_key),
565 reverse=is_top):
566 if is_top and count > self._arg_limit:
567 break
568
569 self._output_io_request(io_rq)
570 if not has_out_of_range_rq and self._is_io_rq_out_of_range(io_rq):
571 has_out_of_range_rq = True
572
573 count += 1
574
575 if has_out_of_range_rq:
576 print('*: Syscalls started and/or completed outside of the '
577 'range specified')
578
579 def _output_latency_log_from_requests(self, io_requests, title, sort_key,
580 is_top=False):
581 io_requests = [io_rq for io_rq in io_requests if
582 self._filter_io_request(io_rq)]
583 self._output_io_requests_list(io_requests, title, sort_key, is_top)
584
585 def iolatency_syscalls_top_output(self):
586 self._output_latency_log_from_requests(
587 [io_rq for io_rq in self._analysis.open_io_requests if
588 self._filter_io_request(io_rq)],
589 'Top open syscall latencies (usec)',
590 'duration', is_top=True)
591 self._output_io_requests_list(
592 [io_rq for io_rq in self._analysis.read_io_requests if
593 self._filter_io_request(io_rq)],
594 'Top read syscall latencies (usec)',
595 'duration', is_top=True)
596 self._output_io_requests_list(
597 [io_rq for io_rq in self._analysis.write_io_requests if
598 self._filter_io_request(io_rq)],
599 'Top write syscall latencies (usec)',
600 'duration', is_top=True)
601 self._output_io_requests_list(
602 [io_rq for io_rq in self._analysis.sync_io_requests if
603 self._filter_io_request(io_rq)],
604 'Top sync syscall latencies (usec)',
605 'duration', is_top=True)
606
607 def iolatency_syscalls_log_output(self):
608 self._output_io_requests_list(
609 self._analysis.io_requests,
610 'Log of all I/O system calls',
611 'begin_ts')
612
613 # IO Stats output methods
614 def _output_latency_stats(self, name, rq_count, min_duration, max_duration,
615 total_duration, rq_durations):
616 if rq_count < 2:
617 stdev = '?'
618 else:
619 stdev = '%0.03f' % (statistics.stdev(rq_durations) / 1000)
620
621 avg = '%0.03f' % (total_duration / (rq_count) / 1000)
622 min_duration = '%0.03f' % (min_duration / 1000)
623 max_duration = '%0.03f' % (max_duration / 1000)
624
625 print(IoAnalysisCommand._LATENCY_STATS_FORMAT.format(
626 name, rq_count, min_duration, avg, max_duration, stdev))
627
628 def _output_latency_stats_from_requests(self, io_requests, name):
629 rq_durations = [io_rq.duration for io_rq in io_requests if
630 self._filter_io_request(io_rq)]
631 rq_count = len(rq_durations)
632 min_duration = min(rq_durations)
633 max_duration = max(rq_durations)
634 total_duration = sum(rq_durations)
635
636 self._output_latency_stats(name, rq_count, min_duration,
637 max_duration, total_duration,
638 rq_durations)
639
640 def _output_syscalls_latency_stats(self):
641 print('\nSyscalls latency statistics (usec):')
642 print(IoAnalysisCommand._LATENCY_STATS_FORMAT.format(
643 'Type', 'Count', 'Min', 'Average', 'Max', 'Stdev'))
644 print(IoAnalysisCommand._SECTION_SEPARATOR_STRING)
645
646 self._output_latency_stats_from_requests(
647 self._analysis.open_io_requests, 'Open')
648 self._output_latency_stats_from_requests(
649 self._analysis.read_io_requests, 'Read')
650 self._output_latency_stats_from_requests(
651 self._analysis.write_io_requests, 'Write')
652 self._output_latency_stats_from_requests(
653 self._analysis.sync_io_requests, 'Sync')
654
655 def _output_disk_latency_stats(self):
656 if not self._analysis.disks:
657 return
658
659 print('\nDisk latency statistics (usec):')
660 print(IoAnalysisCommand._LATENCY_STATS_FORMAT.format(
661 'Name', 'Count', 'Min', 'Average', 'Max', 'Stdev'))
662 print(IoAnalysisCommand._SECTION_SEPARATOR_STRING)
663
664 for disk in self._analysis.disks.values():
665 if disk.rq_count:
666 rq_durations = [rq.duration for rq in disk.rq_list]
667 self._output_latency_stats(disk.disk_name,
668 disk.rq_count,
669 disk.min_rq_duration,
670 disk.max_rq_duration,
671 disk.total_rq_duration,
672 rq_durations)
673
674 def iostats_output(self):
675 self._output_syscalls_latency_stats()
676 self._output_disk_latency_stats()
677
678 def _print_results(self, begin_ns, end_ns):
679 self._print_date(begin_ns, end_ns)
680 if self._arg_usage:
681 self.iotop_output()
682 if self._arg_stats:
683 self.iostats_output()
684 if self._arg_latencytop:
685 self.iolatency_syscalls_top_output()
686 if self._arg_freq:
687 self.iolatency_syscalls_output()
688 self.iolatency_output()
689 if self._arg_log:
690 self.iolatency_syscalls_log_output()
691
692 def _reset_total(self, start_ts):
693 self._analysis.reset()
694
695 def _add_arguments(self, ap):
696 ap.add_argument('--usage', action='store_true',
697 help='Show the I/O usage')
698 ap.add_argument('--latencystats', action='store_true',
699 help='Show the I/O latency statistics')
700 ap.add_argument('--latencytop', action='store_true',
701 help='Show the I/O latency top')
702 ap.add_argument('--latencyfreq', action='store_true',
703 help='Show the I/O latency frequency distribution')
704 ap.add_argument('--freq-resolution', type=int, default=20,
705 help='Frequency distribution resolution '
706 '(default 20)')
707
708
709 # entry point
710 def runstats():
711 # create command
712 iocmd = IoAnalysisCommand()
713 # execute command
714 iocmd.run_stats()
715
716
717 def runlatencytop():
718 # create command
719 iocmd = IoAnalysisCommand()
720 # execute command
721 iocmd.run_latencytop()
722
723
724 def runlog():
725 # create command
726 iocmd = IoAnalysisCommand()
727 # execute command
728 iocmd.run_log()
729
730
731 def runfreq():
732 # create command
733 iocmd = IoAnalysisCommand()
734 # execute command
735 iocmd.run_freq()
736
737
738 def runusage():
739 # create command
740 iocmd = IoAnalysisCommand()
741 # execute command
742 iocmd.run_usage()
This page took 0.045848 seconds and 5 git commands to generate.