fix: stats with 0 requests
[deliverable/lttng-analyses.git] / lttnganalysescli / lttnganalysescli / io.py
1 #!/usr/bin/env python3
2 #
3 # The MIT License (MIT)
4 #
5 # Copyright (C) 2015 - Julien Desfossez <jdesfossez@efficios.com>
6 # 2015 - Antoine Busque <abusque@efficios.com>
7 #
8 # Permission is hereby granted, free of charge, to any person obtaining a copy
9 # of this software and associated documentation files (the "Software"), to deal
10 # in the Software without restriction, including without limitation the rights
11 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 # copies of the Software, and to permit persons to whom the Software is
13 # furnished to do so, subject to the following conditions:
14 #
15 # The above copyright notice and this permission notice shall be included in
16 # all copies or substantial portions of the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 # SOFTWARE.
25
26 from .command import Command
27 import lttnganalyses.io
28 from linuxautomaton import common
29 from ascii_graph import Pyasciigraph
30 import operator
31 import statistics
32
33
34 class IoAnalysisCommand(Command):
35 _DESC = """The I/O command."""
36
37 _LATENCY_STATS_FORMAT = '{:<14} {:>14} {:>14} {:>14} {:>14} {:>14}'
38 _SECTION_SEPARATOR_STRING = '-' * 89
39
40 def __init__(self):
41 super().__init__(self._add_arguments,
42 enable_proc_filter_args=True,
43 enable_max_min_args=True,
44 enable_max_min_size_arg=True,
45 enable_log_arg=True)
46
47 def _validate_transform_args(self):
48 self._arg_usage = self._args.usage
49 self._arg_stats = self._args.latencystats
50 self._arg_latencytop = self._args.latencytop
51 self._arg_freq = self._args.latencyfreq
52 self._arg_freq_resolution = self._args.freq_resolution
53
54 def _default_args(self, stats, log, freq, usage, latencytop):
55 if stats:
56 self._arg_stats = True
57 if log:
58 self._arg_log = True
59 if freq:
60 self._arg_freq = True
61 if usage:
62 self._arg_usage = True
63 if latencytop:
64 self._arg_latencytop = True
65
66 def run(self, stats=False, log=False, freq=False, usage=False,
67 latencytop=False):
68 # parse arguments first
69 self._parse_args()
70 # validate, transform and save specific arguments
71 self._validate_transform_args()
72 # handle the default args for different executables
73 self._default_args(stats, log, freq, usage, latencytop)
74 # open the trace
75 self._open_trace()
76 # create the appropriate analysis/analyses
77 self._create_analysis()
78 # run the analysis
79 self._run_analysis(self._reset_total, self._refresh)
80 # print results
81 self._print_results(self.start_ns, self.trace_end_ts)
82 # close the trace
83 self._close_trace()
84
85 def run_stats(self):
86 self.run(stats=True)
87
88 def run_latencytop(self):
89 self.run(latencytop=True)
90
91 def run_log(self):
92 self.run(log=True)
93
94 def run_freq(self):
95 self.run(freq=True)
96
97 def run_usage(self):
98 self.run(usage=True)
99
100 def _create_analysis(self):
101 self._analysis = lttnganalyses.io.IoAnalysis(self.state)
102
103 def _refresh(self, begin, end):
104 self._print_results(begin, end)
105 self._reset_total(end)
106
107 # Filter predicates
108 def _filter_size(self, size):
109 if size is None:
110 return True
111 if self._arg_maxsize is not None and size > self._arg_maxsize:
112 return False
113 if self._arg_minsize is not None and size < self._arg_minsize:
114 return False
115 return True
116
117 def _filter_latency(self, duration):
118 if self._arg_max is not None and (duration/1000) > self._arg_max:
119 return False
120 if self._arg_min is not None and (duration/1000) < self._arg_min:
121 return False
122 return True
123
124 def _filter_time_range(self, begin, end):
125 return not (self._arg_begin and self._arg_end and end and
126 begin > self._arg_end)
127
128 def _filter_io_request(self, io_rq):
129 proc = self._analysis.tids[io_rq.tid]
130 return self._filter_process(proc) and \
131 self._filter_size(io_rq.size) and \
132 self._filter_latency(io_rq.duration) and \
133 self._filter_time_range(io_rq.begin_ts, io_rq.end_ts)
134
135 def _is_io_rq_out_of_range(self, io_rq):
136 return self._arg_begin and io_rq.begin_ts < self._arg_begin or \
137 self._arg_end and io_rq.end_ts > self._arg_end
138
139 def _print_ascii_graph(self, input_list, get_datum_cb, graph_label,
140 graph_args=None):
141 """Print an ascii graph for given data
142
143 This method wraps the ascii_graph module and facilitates the
144 printing of a graph with a limited number of lines.
145
146 Args:
147 input_list (list): A list of objects from which the data
148 for the graph will be generated.
149
150 get_datum_cb (function): function that takes a single
151 object from the input list as an argument, and returns a
152 datum tuple for the graph, of the form (string, int). The
153 string element is printed as is in the graph, and the int
154 is the numeric value corresponding to this graph entry.
155
156 graph_label (string): Label used to identify the printed
157 graph.
158
159 graph_args (dict, optional): Dict of keyword args to be
160 passed to the graph() function as is.
161 """
162 count = 0
163 limit = self._arg_limit
164 graph = Pyasciigraph()
165 data = []
166 if graph_args is None:
167 graph_args = {}
168
169 for elem in input_list:
170 datum = get_datum_cb(elem)
171 if datum is not None:
172 data.append(datum)
173 count += 1
174 if limit is not None and count >= limit:
175 break
176
177 for line in graph.graph(graph_label, data, **graph_args):
178 print(line)
179
180 # I/O Top output methods
181 def _get_read_datum(self, proc_stats):
182 if not self._filter_process(proc_stats):
183 return None
184
185 if proc_stats.pid is None:
186 pid_str = 'unknown (tid=%d)' % (proc_stats.tid)
187 else:
188 pid_str = str(proc_stats.pid)
189
190 format_str = '{:>10} {:<25} {:>9} file {:>9} net {:>9} unknown'
191 output_str = format_str.format(
192 common.convert_size(proc_stats.total_read, padding_after=True),
193 '%s (%s)' % (proc_stats.comm, pid_str),
194 common.convert_size(proc_stats.disk_read, padding_after=True),
195 common.convert_size(proc_stats.net_read, padding_after=True),
196 common.convert_size(proc_stats.unk_read, padding_after=True))
197
198 return (output_str, proc_stats.total_read)
199
200 def _get_write_datum(self, proc_stats):
201 if not self._filter_process(proc_stats):
202 return None
203
204 if proc_stats.pid is None:
205 pid_str = 'unknown (tid=%d)' % (proc_stats.tid)
206 else:
207 pid_str = str(proc_stats.pid)
208
209 format_str = '{:>10} {:<25} {:>9} file {:>9} net {:>9} unknown'
210 output_str = format_str.format(
211 common.convert_size(proc_stats.total_write, padding_after=True),
212 '%s (%s)' % (proc_stats.comm, pid_str),
213 common.convert_size(proc_stats.disk_write, padding_after=True),
214 common.convert_size(proc_stats.net_write, padding_after=True),
215 common.convert_size(proc_stats.unk_write, padding_after=True))
216
217 return (output_str, proc_stats.total_write)
218
219 def _get_block_read_datum(self, proc_stats):
220 if not self._filter_process(proc_stats) or proc_stats.block_read == 0:
221 return None
222
223 comm = proc_stats.comm
224 if not comm:
225 comm = 'unknown'
226
227 if proc_stats.pid is None:
228 pid_str = 'unknown (tid=%d)' % (proc_stats.tid)
229 else:
230 pid_str = str(proc_stats.pid)
231
232 format_str = '{:>10} {:<22}'
233 output_str = format_str.format(
234 common.convert_size(proc_stats.block_read, padding_after=True),
235 '%s (pid=%s)' % (comm, pid_str))
236
237 return (output_str, proc_stats.block_read)
238
239 def _get_block_write_datum(self, proc_stats):
240 if not self._filter_process(proc_stats) or \
241 proc_stats.block_write == 0:
242 return None
243
244 comm = proc_stats.comm
245 if not comm:
246 comm = 'unknown'
247
248 if proc_stats.pid is None:
249 pid_str = 'unknown (tid=%d)' % (proc_stats.tid)
250 else:
251 pid_str = str(proc_stats.pid)
252
253 format_str = '{:>10} {:<22}'
254 output_str = format_str.format(
255 common.convert_size(proc_stats.block_write, padding_after=True),
256 '%s (pid=%s)' % (comm, pid_str))
257
258 return (output_str, proc_stats.block_write)
259
260 def _get_total_rq_sectors_datum(self, disk):
261 if disk.total_rq_sectors == 0:
262 return None
263
264 return (disk.disk_name, disk.total_rq_sectors)
265
266 def _get_rq_count_datum(self, disk):
267 if disk.rq_count == 0:
268 return None
269
270 return (disk.disk_name, disk.rq_count)
271
272 def _get_avg_disk_latency_datum(self, disk):
273 if disk.rq_count == 0:
274 return None
275
276 avg_latency = ((disk.total_rq_duration / disk.rq_count) /
277 common.MSEC_PER_NSEC)
278 avg_latency = round(avg_latency, 3)
279
280 return ('%s' % disk.disk_name, avg_latency)
281
282 def _get_net_recv_bytes_datum(self, iface):
283 return ('%s %s' % (common.convert_size(iface.recv_bytes), iface.name),
284 iface.recv_bytes)
285
286 def _get_net_sent_bytes_datum(self, iface):
287 return ('%s %s' % (common.convert_size(iface.sent_bytes), iface.name),
288 iface.sent_bytes)
289
290 def _get_file_read_datum(self, file_stats):
291 if file_stats.read == 0:
292 return None
293
294 fd_by_pid_str = ''
295 for pid, fd in file_stats.fd_by_pid.items():
296 comm = self._analysis.tids[pid].comm
297 fd_by_pid_str += 'fd %d in %s (%s) ' % (fd, comm, pid)
298
299 format_str = '{:>10} {} {}'
300 output_str = format_str.format(
301 common.convert_size(file_stats.read, padding_after=True),
302 file_stats.filename,
303 fd_by_pid_str)
304
305 return (output_str, file_stats.read)
306
307 def _get_file_write_datum(self, file_stats):
308 if file_stats.write == 0:
309 return None
310
311 fd_by_pid_str = ''
312 for pid, fd in file_stats.fd_by_pid.items():
313 comm = self._analysis.tids[pid].comm
314 fd_by_pid_str += 'fd %d in %s (%s) ' % (fd, comm, pid)
315
316 format_str = '{:>10} {} {}'
317 output_str = format_str.format(
318 common.convert_size(file_stats.write, padding_after=True),
319 file_stats.filename,
320 fd_by_pid_str)
321
322 return (output_str, file_stats.write)
323
324 def _output_read(self):
325 input_list = sorted(self._analysis.tids.values(),
326 key=operator.attrgetter('total_read'),
327 reverse=True)
328 label = 'Per-process I/O Read'
329 graph_args = {'with_value': False}
330 self._print_ascii_graph(input_list, self._get_read_datum, label,
331 graph_args)
332
333 def _output_write(self):
334 input_list = sorted(self._analysis.tids.values(),
335 key=operator.attrgetter('total_write'),
336 reverse=True)
337 label = 'Per-process I/O Write'
338 graph_args = {'with_value': False}
339 self._print_ascii_graph(input_list, self._get_write_datum, label,
340 graph_args)
341
342 def _output_block_read(self):
343 input_list = sorted(self._analysis.tids.values(),
344 key=operator.attrgetter('block_read'),
345 reverse=True)
346 label = 'Block I/O Read'
347 graph_args = {'with_value': False}
348 self._print_ascii_graph(input_list, self._get_block_read_datum,
349 label, graph_args)
350
351 def _output_block_write(self):
352 input_list = sorted(self._analysis.tids.values(),
353 key=operator.attrgetter('block_write'),
354 reverse=True)
355 label = 'Block I/O Write'
356 graph_args = {'with_value': False}
357 self._print_ascii_graph(input_list, self._get_block_write_datum,
358 label, graph_args)
359
360 def _output_total_rq_sectors(self):
361 input_list = sorted(self._analysis.disks.values(),
362 key=operator.attrgetter('total_rq_sectors'),
363 reverse=True)
364 label = 'Disk requests sector count'
365 graph_args = {'unit': ' sectors'}
366 self._print_ascii_graph(input_list, self._get_total_rq_sectors_datum,
367 label, graph_args)
368
369 def _output_rq_count(self):
370 input_list = sorted(self._analysis.disks.values(),
371 key=operator.attrgetter('rq_count'),
372 reverse=True)
373 label = 'Disk request count'
374 graph_args = {'unit': ' requests'}
375 self._print_ascii_graph(input_list, self._get_rq_count_datum,
376 label, graph_args)
377
378 def _output_avg_disk_latency(self):
379 input_list = self._analysis.disks.values()
380 label = 'Disk request average latency'
381 graph_args = {'unit': ' ms', 'sort': 2}
382 self._print_ascii_graph(input_list, self._get_avg_disk_latency_datum,
383 label, graph_args)
384
385 def _output_net_recv_bytes(self):
386 input_list = sorted(self._analysis.ifaces.values(),
387 key=operator.attrgetter('recv_bytes'),
388 reverse=True)
389 label = 'Network received bytes'
390 graph_args = {'with_value': False}
391 self._print_ascii_graph(input_list, self._get_net_recv_bytes_datum,
392 label, graph_args)
393
394 def _output_net_sent_bytes(self):
395 input_list = sorted(self._analysis.ifaces.values(),
396 key=operator.attrgetter('sent_bytes'),
397 reverse=True)
398 label = 'Network sent bytes'
399 graph_args = {'with_value': False}
400 self._print_ascii_graph(input_list, self._get_net_sent_bytes_datum,
401 label, graph_args)
402
403 def _output_file_read(self, files):
404 input_list = sorted(files.values(),
405 key=lambda file_stats: file_stats.read,
406 reverse=True)
407 label = 'Files read'
408 graph_args = {'with_value': False, 'sort': 2}
409 self._print_ascii_graph(input_list, self._get_file_read_datum,
410 label, graph_args)
411
412 def _output_file_write(self, files):
413 input_list = sorted(files.values(),
414 key=lambda file_stats: file_stats.write,
415 reverse=True)
416 label = 'Files write'
417 graph_args = {'with_value': False, 'sort': 2}
418 self._print_ascii_graph(input_list, self._get_file_write_datum,
419 label, graph_args)
420
421 def _output_file_read_write(self):
422 files = self._analysis.get_files_stats(self._arg_pid_list,
423 self._arg_proc_list)
424 self._output_file_read(files)
425 self._output_file_write(files)
426
427 def iotop_output(self):
428 self._output_read()
429 self._output_write()
430 self._output_file_read_write()
431 self._output_block_read()
432 self._output_block_write()
433 self._output_total_rq_sectors()
434 self._output_rq_count()
435 self._output_avg_disk_latency()
436 self._output_net_recv_bytes()
437 self._output_net_sent_bytes()
438
439 # I/O Latency frequency output methods
440 def _print_frequency_distribution(self, duration_list, title):
441 if not duration_list:
442 return
443
444 # The number of bins for the histogram
445 resolution = self._arg_freq_resolution
446
447 min_duration = min(duration_list)
448 max_duration = max(duration_list)
449 # ns to µs
450 min_duration /= 1000
451 max_duration /= 1000
452
453 step = (max_duration - min_duration) / resolution
454 if step == 0:
455 return
456
457 buckets = []
458 values = []
459 graph = Pyasciigraph()
460 for i in range(resolution):
461 buckets.append(i * step)
462 values.append(0)
463 for duration in duration_list:
464 duration /= 1000
465 index = min(int((duration - min_duration) / step), resolution - 1)
466 values[index] += 1
467
468 graph_data = []
469 for index, value in enumerate(values):
470 # The graph data format is a tuple (info, value). Here info
471 # is the lower bound of the bucket, value the bucket's count
472 graph_data.append(('%0.03f' % (index * step + min_duration),
473 value))
474
475 graph_lines = graph.graph(
476 title,
477 graph_data,
478 info_before=True,
479 count=True
480 )
481
482 for line in graph_lines:
483 print(line)
484
485 print()
486
487 def _output_disk_latency_freq(self):
488 for disk in self._analysis.disks.values():
489 rq_durations = [rq.duration for rq in disk.rq_list]
490 self._print_frequency_distribution(
491 rq_durations,
492 'Frequency distribution for disk %s (usec)' % (disk.disk_name))
493
494 def iolatency_output(self):
495 self._output_disk_latency_freq()
496
497 def iolatency_syscalls_output(self):
498 print()
499 self._print_frequency_distribution([io_rq.duration for io_rq in
500 self._analysis.open_io_requests if
501 self._filter_io_request(io_rq)],
502 'Open latency distribution (usec)')
503 self._print_frequency_distribution([io_rq.duration for io_rq in
504 self._analysis.read_io_requests if
505 self._filter_io_request(io_rq)],
506 'Read latency distribution (usec)')
507 self._print_frequency_distribution([io_rq.duration for io_rq in
508 self._analysis.write_io_requests if
509 self._filter_io_request(io_rq)],
510 'Write latency distribution (usec)')
511 self._print_frequency_distribution([io_rq.duration for io_rq in
512 self._analysis.sync_io_requests if
513 self._filter_io_request(io_rq)],
514 'Sync latency distribution (usec)')
515
516 # I/O latency top and log output methods
517 def _output_io_request(self, io_rq):
518 fmt = '{:<40} {:<16} {:>16} {:>11} {:<24} {:<8} {:<14}'
519
520 begin_time = common.ns_to_hour_nsec(io_rq.begin_ts,
521 self._arg_multi_day,
522 self._arg_gmt)
523 end_time = common.ns_to_hour_nsec(io_rq.end_ts,
524 self._arg_multi_day,
525 self._arg_gmt)
526 time_range_str = '[' + begin_time + ',' + end_time + ']'
527 duration_str = '%0.03f' % (io_rq.duration / 1000)
528
529 if io_rq.size is None:
530 size = 'N/A'
531 else:
532 size = common.convert_size(io_rq.size)
533
534 tid = io_rq.tid
535 proc_stats = self._analysis.tids[tid]
536 comm = proc_stats.comm
537
538 # TODO: handle fd_in/fd_out for RW type operations
539 if io_rq.fd is None:
540 file_str = 'N/A'
541 else:
542 fd = io_rq.fd
543
544 parent_proc = proc_stats
545 if parent_proc.pid is not None:
546 parent_proc = self._analysis.tids[parent_proc.pid]
547
548 fd_stats = parent_proc.get_fd(fd, io_rq.end_ts)
549 if fd_stats is not None:
550 filename = fd_stats.filename
551 else:
552 filename = 'unknown'
553
554 file_str = '%s (fd=%s)' % (filename, fd)
555
556 if self._is_io_rq_out_of_range(io_rq):
557 time_range_str += '*'
558 duration_str += '*'
559 else:
560 time_range_str += ' '
561 duration_str += ' '
562
563 print(fmt.format(time_range_str, io_rq.syscall_name, duration_str,
564 size, comm, tid, file_str))
565
566 def _output_io_requests_list(self, rq_list, title, sort_key, is_top=False):
567 if not rq_list:
568 return
569
570 count = 0
571 has_out_of_range_rq = False
572
573 print()
574 print(title)
575
576 header_fmt = '{:<19} {:<20} {:<16} {:<23} {:<5} {:<24} {:<8} {:<14}'
577 print(header_fmt.format(
578 'Begin', 'End', 'Name', 'Duration (usec)', 'Size', 'Proc', 'PID',
579 'Filename'))
580
581 for io_rq in sorted(rq_list, key=operator.attrgetter(sort_key),
582 reverse=is_top):
583 if is_top and count > self._arg_limit:
584 break
585
586 self._output_io_request(io_rq)
587 if not has_out_of_range_rq and self._is_io_rq_out_of_range(io_rq):
588 has_out_of_range_rq = True
589
590 count += 1
591
592 if has_out_of_range_rq:
593 print('*: Syscalls started and/or completed outside of the '
594 'range specified')
595
596 def _output_latency_log_from_requests(self, io_requests, title, sort_key,
597 is_top=False):
598 io_requests = [io_rq for io_rq in io_requests if
599 self._filter_io_request(io_rq)]
600 self._output_io_requests_list(io_requests, title, sort_key, is_top)
601
602 def iolatency_syscalls_top_output(self):
603 self._output_latency_log_from_requests(
604 [io_rq for io_rq in self._analysis.open_io_requests if
605 self._filter_io_request(io_rq)],
606 'Top open syscall latencies (usec)',
607 'duration', is_top=True)
608 self._output_io_requests_list(
609 [io_rq for io_rq in self._analysis.read_io_requests if
610 self._filter_io_request(io_rq)],
611 'Top read syscall latencies (usec)',
612 'duration', is_top=True)
613 self._output_io_requests_list(
614 [io_rq for io_rq in self._analysis.write_io_requests if
615 self._filter_io_request(io_rq)],
616 'Top write syscall latencies (usec)',
617 'duration', is_top=True)
618 self._output_io_requests_list(
619 [io_rq for io_rq in self._analysis.sync_io_requests if
620 self._filter_io_request(io_rq)],
621 'Top sync syscall latencies (usec)',
622 'duration', is_top=True)
623
624 def iolatency_syscalls_log_output(self):
625 self._output_io_requests_list(
626 self._analysis.io_requests,
627 'Log of all I/O system calls',
628 'begin_ts')
629
630 # I/O Stats output methods
631 def _output_latency_stats(self, name, rq_count, min_duration, max_duration,
632 total_duration, rq_durations):
633 if rq_count < 2:
634 stdev = '?'
635 else:
636 stdev = '%0.03f' % (statistics.stdev(rq_durations) / 1000)
637
638 if rq_count > 0:
639 avg = '%0.03f' % (total_duration / (rq_count) / 1000)
640 else:
641 avg = "0.000"
642 min_duration = '%0.03f' % (min_duration / 1000)
643 max_duration = '%0.03f' % (max_duration / 1000)
644
645 print(IoAnalysisCommand._LATENCY_STATS_FORMAT.format(
646 name, rq_count, min_duration, avg, max_duration, stdev))
647
648 def _output_latency_stats_from_requests(self, io_requests, name):
649 rq_durations = [io_rq.duration for io_rq in io_requests if
650 self._filter_io_request(io_rq)]
651 rq_count = len(rq_durations)
652 if len(rq_durations) > 0:
653 min_duration = min(rq_durations)
654 max_duration = max(rq_durations)
655 else:
656 min_duration = 0
657 max_duration = 0
658 total_duration = sum(rq_durations)
659
660 self._output_latency_stats(name, rq_count, min_duration,
661 max_duration, total_duration,
662 rq_durations)
663
664 def _output_syscalls_latency_stats(self):
665 print('\nSyscalls latency statistics (usec):')
666 print(IoAnalysisCommand._LATENCY_STATS_FORMAT.format(
667 'Type', 'Count', 'Min', 'Average', 'Max', 'Stdev'))
668 print(IoAnalysisCommand._SECTION_SEPARATOR_STRING)
669
670 self._output_latency_stats_from_requests(
671 self._analysis.open_io_requests, 'Open')
672 self._output_latency_stats_from_requests(
673 self._analysis.read_io_requests, 'Read')
674 self._output_latency_stats_from_requests(
675 self._analysis.write_io_requests, 'Write')
676 self._output_latency_stats_from_requests(
677 self._analysis.sync_io_requests, 'Sync')
678
679 def _output_disk_latency_stats(self):
680 if not self._analysis.disks:
681 return
682
683 print('\nDisk latency statistics (usec):')
684 print(IoAnalysisCommand._LATENCY_STATS_FORMAT.format(
685 'Name', 'Count', 'Min', 'Average', 'Max', 'Stdev'))
686 print(IoAnalysisCommand._SECTION_SEPARATOR_STRING)
687
688 for disk in self._analysis.disks.values():
689 if disk.rq_count:
690 rq_durations = [rq.duration for rq in disk.rq_list]
691 self._output_latency_stats(disk.disk_name,
692 disk.rq_count,
693 disk.min_rq_duration,
694 disk.max_rq_duration,
695 disk.total_rq_duration,
696 rq_durations)
697
698 def iostats_output(self):
699 self._output_syscalls_latency_stats()
700 self._output_disk_latency_stats()
701
702 def _print_results(self, begin_ns, end_ns):
703 self._print_date(begin_ns, end_ns)
704 if self._arg_usage:
705 self.iotop_output()
706 if self._arg_stats:
707 self.iostats_output()
708 if self._arg_latencytop:
709 self.iolatency_syscalls_top_output()
710 if self._arg_freq:
711 self.iolatency_syscalls_output()
712 self.iolatency_output()
713 if self._arg_log:
714 self.iolatency_syscalls_log_output()
715
716 def _reset_total(self, start_ts):
717 self._analysis.reset()
718
719 def _add_arguments(self, ap):
720 ap.add_argument('--usage', action='store_true',
721 help='Show the I/O usage')
722 ap.add_argument('--latencystats', action='store_true',
723 help='Show the I/O latency statistics')
724 ap.add_argument('--latencytop', action='store_true',
725 help='Show the I/O latency top')
726 ap.add_argument('--latencyfreq', action='store_true',
727 help='Show the I/O latency frequency distribution')
728 ap.add_argument('--freq-resolution', type=int, default=20,
729 help='Frequency distribution resolution '
730 '(default 20)')
731
732
733 # entry point
734 def runstats():
735 # create command
736 iocmd = IoAnalysisCommand()
737 # execute command
738 iocmd.run_stats()
739
740
741 def runlatencytop():
742 # create command
743 iocmd = IoAnalysisCommand()
744 # execute command
745 iocmd.run_latencytop()
746
747
748 def runlog():
749 # create command
750 iocmd = IoAnalysisCommand()
751 # execute command
752 iocmd.run_log()
753
754
755 def runfreq():
756 # create command
757 iocmd = IoAnalysisCommand()
758 # execute command
759 iocmd.run_freq()
760
761
762 def runusage():
763 # create command
764 iocmd = IoAnalysisCommand()
765 # execute command
766 iocmd.run_usage()
This page took 0.065303 seconds and 5 git commands to generate.