3 # The MIT License (MIT)
5 # Copyright (C) 2015 - Julien Desfossez <jdesfossez@efficios.com>
6 # 2015 - Antoine Busque <abusque@efficios.com>
8 # Permission is hereby granted, free of charge, to any person obtaining a copy
9 # of this software and associated documentation files (the "Software"), to deal
10 # in the Software without restriction, including without limitation the rights
11 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 # copies of the Software, and to permit persons to whom the Software is
13 # furnished to do so, subject to the following conditions:
15 # The above copyright notice and this permission notice shall be included in
16 # all copies or substantial portions of the Software.
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 from .command
import Command
27 import lttnganalyses
.io
28 from linuxautomaton
import common
29 from ascii_graph
import Pyasciigraph
34 class IoAnalysisCommand(Command
):
35 _DESC
= """The I/O command."""
37 _LATENCY_STATS_FORMAT
= '{:<14} {:>14} {:>14} {:>14} {:>14} {:>14}'
38 _SECTION_SEPARATOR_STRING
= '-' * 89
41 super().__init
__(self
._add
_arguments
,
42 enable_proc_filter_args
=True,
43 enable_max_min_args
=True,
44 enable_max_min_size_arg
=True,
47 def _validate_transform_args(self
):
48 self
._arg
_usage
= self
._args
.usage
49 self
._arg
_stats
= self
._args
.latencystats
50 self
._arg
_latencytop
= self
._args
.latencytop
51 self
._arg
_freq
= self
._args
.latencyfreq
52 self
._arg
_freq
_resolution
= self
._args
.freq_resolution
54 def _default_args(self
, stats
, log
, freq
, usage
, latencytop
):
56 self
._arg
_stats
= True
62 self
._arg
_usage
= True
64 self
._arg
_latencytop
= True
66 def run(self
, stats
=False, log
=False, freq
=False, usage
=False,
68 # parse arguments first
70 # validate, transform and save specific arguments
71 self
._validate
_transform
_args
()
72 # handle the default args for different executables
73 self
._default
_args
(stats
, log
, freq
, usage
, latencytop
)
76 # create the appropriate analysis/analyses
77 self
._create
_analysis
()
79 self
._run
_analysis
(self
._reset
_total
, self
._refresh
)
81 self
._print
_results
(self
.start_ns
, self
.trace_end_ts
)
88 def run_latencytop(self
):
89 self
.run(latencytop
=True)
100 def _create_analysis(self
):
101 self
._analysis
= lttnganalyses
.io
.IoAnalysis(self
.state
)
103 def _refresh(self
, begin
, end
):
104 self
._print
_results
(begin
, end
)
105 self
._reset
_total
(end
)
108 def _filter_size(self
, size
):
111 if self
._arg
_maxsize
is not None and size
> self
._arg
_maxsize
:
113 if self
._arg
_minsize
is not None and size
< self
._arg
_minsize
:
117 def _filter_latency(self
, duration
):
118 if self
._arg
_max
is not None and (duration
/1000) > self
._arg
_max
:
120 if self
._arg
_min
is not None and (duration
/1000) < self
._arg
_min
:
124 def _filter_time_range(self
, begin
, end
):
125 return not (self
._arg
_begin
and self
._arg
_end
and end
and
126 begin
> self
._arg
_end
)
128 def _filter_io_request(self
, io_rq
):
129 proc
= self
._analysis
.tids
[io_rq
.tid
]
130 return self
._filter
_process
(proc
) and \
131 self
._filter
_size
(io_rq
.size
) and \
132 self
._filter
_latency
(io_rq
.duration
) and \
133 self
._filter
_time
_range
(io_rq
.begin_ts
, io_rq
.end_ts
)
135 def _is_io_rq_out_of_range(self
, io_rq
):
136 return self
._arg
_begin
and io_rq
.begin_ts
< self
._arg
_begin
or \
137 self
._arg
_end
and io_rq
.end_ts
> self
._arg
_end
139 def _print_ascii_graph(self
, input_list
, get_datum_cb
, graph_label
,
141 """Print an ascii graph for given data
143 This method wraps the ascii_graph module and facilitates the
144 printing of a graph with a limited number of lines.
147 input_list (list): A list of objects from which the data
148 for the graph will be generated.
150 get_datum_cb (function): function that takes a single
151 object from the input list as an argument, and returns a
152 datum tuple for the graph, of the form (string, int). The
153 string element is printed as is in the graph, and the int
154 is the numeric value corresponding to this graph entry.
156 graph_label (string): Label used to identify the printed
159 graph_args (dict, optional): Dict of keyword args to be
160 passed to the graph() function as is.
163 limit
= self
._arg
_limit
164 graph
= Pyasciigraph()
166 if graph_args
is None:
169 for elem
in input_list
:
170 datum
= get_datum_cb(elem
)
171 if datum
is not None:
174 if limit
is not None and count
>= limit
:
177 for line
in graph
.graph(graph_label
, data
, **graph_args
):
180 # I/O Top output methods
181 def _get_read_datum(self
, proc_stats
):
182 if not self
._filter
_process
(proc_stats
):
185 if proc_stats
.pid
is None:
186 pid_str
= 'unknown (tid=%d)' % (proc_stats
.tid
)
188 pid_str
= str(proc_stats
.pid
)
190 format_str
= '{:>10} {:<25} {:>9} file {:>9} net {:>9} unknown'
191 output_str
= format_str
.format(
192 common
.convert_size(proc_stats
.total_read
, padding_after
=True),
193 '%s (%s)' % (proc_stats
.comm
, pid_str
),
194 common
.convert_size(proc_stats
.disk_read
, padding_after
=True),
195 common
.convert_size(proc_stats
.net_read
, padding_after
=True),
196 common
.convert_size(proc_stats
.unk_read
, padding_after
=True))
198 return (output_str
, proc_stats
.total_read
)
200 def _get_write_datum(self
, proc_stats
):
201 if not self
._filter
_process
(proc_stats
):
204 if proc_stats
.pid
is None:
205 pid_str
= 'unknown (tid=%d)' % (proc_stats
.tid
)
207 pid_str
= str(proc_stats
.pid
)
209 format_str
= '{:>10} {:<25} {:>9} file {:>9} net {:>9} unknown'
210 output_str
= format_str
.format(
211 common
.convert_size(proc_stats
.total_write
, padding_after
=True),
212 '%s (%s)' % (proc_stats
.comm
, pid_str
),
213 common
.convert_size(proc_stats
.disk_write
, padding_after
=True),
214 common
.convert_size(proc_stats
.net_write
, padding_after
=True),
215 common
.convert_size(proc_stats
.unk_write
, padding_after
=True))
217 return (output_str
, proc_stats
.total_write
)
219 def _get_block_read_datum(self
, proc_stats
):
220 if not self
._filter
_process
(proc_stats
) or proc_stats
.block_read
== 0:
223 comm
= proc_stats
.comm
227 if proc_stats
.pid
is None:
228 pid_str
= 'unknown (tid=%d)' % (proc_stats
.tid
)
230 pid_str
= str(proc_stats
.pid
)
232 format_str
= '{:>10} {:<22}'
233 output_str
= format_str
.format(
234 common
.convert_size(proc_stats
.block_read
, padding_after
=True),
235 '%s (pid=%s)' % (comm
, pid_str
))
237 return (output_str
, proc_stats
.block_read
)
239 def _get_block_write_datum(self
, proc_stats
):
240 if not self
._filter
_process
(proc_stats
) or \
241 proc_stats
.block_write
== 0:
244 comm
= proc_stats
.comm
248 if proc_stats
.pid
is None:
249 pid_str
= 'unknown (tid=%d)' % (proc_stats
.tid
)
251 pid_str
= str(proc_stats
.pid
)
253 format_str
= '{:>10} {:<22}'
254 output_str
= format_str
.format(
255 common
.convert_size(proc_stats
.block_write
, padding_after
=True),
256 '%s (pid=%s)' % (comm
, pid_str
))
258 return (output_str
, proc_stats
.block_write
)
260 def _get_total_rq_sectors_datum(self
, disk
):
261 if disk
.total_rq_sectors
== 0:
264 return (disk
.disk_name
, disk
.total_rq_sectors
)
266 def _get_rq_count_datum(self
, disk
):
267 if disk
.rq_count
== 0:
270 return (disk
.disk_name
, disk
.rq_count
)
272 def _get_avg_disk_latency_datum(self
, disk
):
273 if disk
.rq_count
== 0:
276 avg_latency
= ((disk
.total_rq_duration
/ disk
.rq_count
) /
277 common
.MSEC_PER_NSEC
)
278 avg_latency
= round(avg_latency
, 3)
280 return ('%s' % disk
.disk_name
, avg_latency
)
282 def _get_net_recv_bytes_datum(self
, iface
):
283 return ('%s %s' % (common
.convert_size(iface
.recv_bytes
), iface
.name
),
286 def _get_net_sent_bytes_datum(self
, iface
):
287 return ('%s %s' % (common
.convert_size(iface
.sent_bytes
), iface
.name
),
290 def _get_file_read_datum(self
, file_stats
):
291 if file_stats
.read
== 0:
295 for pid
, fd
in file_stats
.fd_by_pid
.items():
296 comm
= self
._analysis
.tids
[pid
].comm
297 fd_by_pid_str
+= 'fd %d in %s (%s) ' % (fd
, comm
, pid
)
299 format_str
= '{:>10} {} {}'
300 output_str
= format_str
.format(
301 common
.convert_size(file_stats
.read
, padding_after
=True),
305 return (output_str
, file_stats
.read
)
307 def _get_file_write_datum(self
, file_stats
):
308 if file_stats
.write
== 0:
312 for pid
, fd
in file_stats
.fd_by_pid
.items():
313 comm
= self
._analysis
.tids
[pid
].comm
314 fd_by_pid_str
+= 'fd %d in %s (%s) ' % (fd
, comm
, pid
)
316 format_str
= '{:>10} {} {}'
317 output_str
= format_str
.format(
318 common
.convert_size(file_stats
.write
, padding_after
=True),
322 return (output_str
, file_stats
.write
)
324 def _output_read(self
):
325 input_list
= sorted(self
._analysis
.tids
.values(),
326 key
=operator
.attrgetter('total_read'),
328 label
= 'Per-process I/O Read'
329 graph_args
= {'with_value': False}
330 self
._print
_ascii
_graph
(input_list
, self
._get
_read
_datum
, label
,
333 def _output_write(self
):
334 input_list
= sorted(self
._analysis
.tids
.values(),
335 key
=operator
.attrgetter('total_write'),
337 label
= 'Per-process I/O Write'
338 graph_args
= {'with_value': False}
339 self
._print
_ascii
_graph
(input_list
, self
._get
_write
_datum
, label
,
342 def _output_block_read(self
):
343 input_list
= sorted(self
._analysis
.tids
.values(),
344 key
=operator
.attrgetter('block_read'),
346 label
= 'Block I/O Read'
347 graph_args
= {'with_value': False}
348 self
._print
_ascii
_graph
(input_list
, self
._get
_block
_read
_datum
,
351 def _output_block_write(self
):
352 input_list
= sorted(self
._analysis
.tids
.values(),
353 key
=operator
.attrgetter('block_write'),
355 label
= 'Block I/O Write'
356 graph_args
= {'with_value': False}
357 self
._print
_ascii
_graph
(input_list
, self
._get
_block
_write
_datum
,
360 def _output_total_rq_sectors(self
):
361 input_list
= sorted(self
._analysis
.disks
.values(),
362 key
=operator
.attrgetter('total_rq_sectors'),
364 label
= 'Disk requests sector count'
365 graph_args
= {'unit': ' sectors'}
366 self
._print
_ascii
_graph
(input_list
, self
._get
_total
_rq
_sectors
_datum
,
369 def _output_rq_count(self
):
370 input_list
= sorted(self
._analysis
.disks
.values(),
371 key
=operator
.attrgetter('rq_count'),
373 label
= 'Disk request count'
374 graph_args
= {'unit': ' requests'}
375 self
._print
_ascii
_graph
(input_list
, self
._get
_rq
_count
_datum
,
378 def _output_avg_disk_latency(self
):
379 input_list
= self
._analysis
.disks
.values()
380 label
= 'Disk request average latency'
381 graph_args
= {'unit': ' ms', 'sort': 2}
382 self
._print
_ascii
_graph
(input_list
, self
._get
_avg
_disk
_latency
_datum
,
385 def _output_net_recv_bytes(self
):
386 input_list
= sorted(self
._analysis
.ifaces
.values(),
387 key
=operator
.attrgetter('recv_bytes'),
389 label
= 'Network received bytes'
390 graph_args
= {'with_value': False}
391 self
._print
_ascii
_graph
(input_list
, self
._get
_net
_recv
_bytes
_datum
,
394 def _output_net_sent_bytes(self
):
395 input_list
= sorted(self
._analysis
.ifaces
.values(),
396 key
=operator
.attrgetter('sent_bytes'),
398 label
= 'Network sent bytes'
399 graph_args
= {'with_value': False}
400 self
._print
_ascii
_graph
(input_list
, self
._get
_net
_sent
_bytes
_datum
,
403 def _output_file_read(self
, files
):
404 input_list
= sorted(files
.values(),
405 key
=lambda file_stats
: file_stats
.read
,
408 graph_args
= {'with_value': False, 'sort': 2}
409 self
._print
_ascii
_graph
(input_list
, self
._get
_file
_read
_datum
,
412 def _output_file_write(self
, files
):
413 input_list
= sorted(files
.values(),
414 key
=lambda file_stats
: file_stats
.write
,
416 label
= 'Files write'
417 graph_args
= {'with_value': False, 'sort': 2}
418 self
._print
_ascii
_graph
(input_list
, self
._get
_file
_write
_datum
,
421 def _output_file_read_write(self
):
422 files
= self
._analysis
.get_files_stats(self
._arg
_pid
_list
,
424 self
._output
_file
_read
(files
)
425 self
._output
_file
_write
(files
)
427 def iotop_output(self
):
430 self
._output
_file
_read
_write
()
431 self
._output
_block
_read
()
432 self
._output
_block
_write
()
433 self
._output
_total
_rq
_sectors
()
434 self
._output
_rq
_count
()
435 self
._output
_avg
_disk
_latency
()
436 self
._output
_net
_recv
_bytes
()
437 self
._output
_net
_sent
_bytes
()
439 # I/O Latency frequency output methods
440 def _print_frequency_distribution(self
, duration_list
, title
):
441 if not duration_list
:
444 # The number of bins for the histogram
445 resolution
= self
._arg
_freq
_resolution
447 min_duration
= min(duration_list
)
448 max_duration
= max(duration_list
)
453 step
= (max_duration
- min_duration
) / resolution
459 graph
= Pyasciigraph()
460 for i
in range(resolution
):
461 buckets
.append(i
* step
)
463 for duration
in duration_list
:
465 index
= min(int((duration
- min_duration
) / step
), resolution
- 1)
469 for index
, value
in enumerate(values
):
470 # The graph data format is a tuple (info, value). Here info
471 # is the lower bound of the bucket, value the bucket's count
472 graph_data
.append(('%0.03f' % (index
* step
+ min_duration
),
475 graph_lines
= graph
.graph(
482 for line
in graph_lines
:
487 def _output_disk_latency_freq(self
):
488 for disk
in self
._analysis
.disks
.values():
489 rq_durations
= [rq
.duration
for rq
in disk
.rq_list
]
490 self
._print
_frequency
_distribution
(
492 'Frequency distribution for disk %s (usec)' % (disk
.disk_name
))
494 def iolatency_output(self
):
495 self
._output
_disk
_latency
_freq
()
497 def iolatency_syscalls_output(self
):
499 self
._print
_frequency
_distribution
([io_rq
.duration
for io_rq
in
500 self
._analysis
.open_io_requests
if
501 self
._filter
_io
_request
(io_rq
)],
502 'Open latency distribution (usec)')
503 self
._print
_frequency
_distribution
([io_rq
.duration
for io_rq
in
504 self
._analysis
.read_io_requests
if
505 self
._filter
_io
_request
(io_rq
)],
506 'Read latency distribution (usec)')
507 self
._print
_frequency
_distribution
([io_rq
.duration
for io_rq
in
508 self
._analysis
.write_io_requests
if
509 self
._filter
_io
_request
(io_rq
)],
510 'Write latency distribution (usec)')
511 self
._print
_frequency
_distribution
([io_rq
.duration
for io_rq
in
512 self
._analysis
.sync_io_requests
if
513 self
._filter
_io
_request
(io_rq
)],
514 'Sync latency distribution (usec)')
516 # I/O latency top and log output methods
517 def _output_io_request(self
, io_rq
):
518 fmt
= '{:<40} {:<16} {:>16} {:>11} {:<24} {:<8} {:<14}'
520 begin_time
= common
.ns_to_hour_nsec(io_rq
.begin_ts
,
523 end_time
= common
.ns_to_hour_nsec(io_rq
.end_ts
,
526 time_range_str
= '[' + begin_time
+ ',' + end_time
+ ']'
527 duration_str
= '%0.03f' % (io_rq
.duration
/ 1000)
529 if io_rq
.size
is None:
532 size
= common
.convert_size(io_rq
.size
)
535 proc_stats
= self
._analysis
.tids
[tid
]
536 comm
= proc_stats
.comm
538 # TODO: handle fd_in/fd_out for RW type operations
544 parent_proc
= proc_stats
545 if parent_proc
.pid
is not None:
546 parent_proc
= self
._analysis
.tids
[parent_proc
.pid
]
548 fd_stats
= parent_proc
.get_fd(fd
, io_rq
.end_ts
)
549 if fd_stats
is not None:
550 filename
= fd_stats
.filename
554 file_str
= '%s (fd=%s)' % (filename
, fd
)
556 if self
._is
_io
_rq
_out
_of
_range
(io_rq
):
557 time_range_str
+= '*'
560 time_range_str
+= ' '
563 print(fmt
.format(time_range_str
, io_rq
.syscall_name
, duration_str
,
564 size
, comm
, tid
, file_str
))
566 def _output_io_requests_list(self
, rq_list
, title
, sort_key
, is_top
=False):
571 has_out_of_range_rq
= False
576 header_fmt
= '{:<19} {:<20} {:<16} {:<23} {:<5} {:<24} {:<8} {:<14}'
577 print(header_fmt
.format(
578 'Begin', 'End', 'Name', 'Duration (usec)', 'Size', 'Proc', 'PID',
581 for io_rq
in sorted(rq_list
, key
=operator
.attrgetter(sort_key
),
583 if is_top
and count
> self
._arg
_limit
:
586 self
._output
_io
_request
(io_rq
)
587 if not has_out_of_range_rq
and self
._is
_io
_rq
_out
_of
_range
(io_rq
):
588 has_out_of_range_rq
= True
592 if has_out_of_range_rq
:
593 print('*: Syscalls started and/or completed outside of the '
596 def _output_latency_log_from_requests(self
, io_requests
, title
, sort_key
,
598 io_requests
= [io_rq
for io_rq
in io_requests
if
599 self
._filter
_io
_request
(io_rq
)]
600 self
._output
_io
_requests
_list
(io_requests
, title
, sort_key
, is_top
)
602 def iolatency_syscalls_top_output(self
):
603 self
._output
_latency
_log
_from
_requests
(
604 [io_rq
for io_rq
in self
._analysis
.open_io_requests
if
605 self
._filter
_io
_request
(io_rq
)],
606 'Top open syscall latencies (usec)',
607 'duration', is_top
=True)
608 self
._output
_io
_requests
_list
(
609 [io_rq
for io_rq
in self
._analysis
.read_io_requests
if
610 self
._filter
_io
_request
(io_rq
)],
611 'Top read syscall latencies (usec)',
612 'duration', is_top
=True)
613 self
._output
_io
_requests
_list
(
614 [io_rq
for io_rq
in self
._analysis
.write_io_requests
if
615 self
._filter
_io
_request
(io_rq
)],
616 'Top write syscall latencies (usec)',
617 'duration', is_top
=True)
618 self
._output
_io
_requests
_list
(
619 [io_rq
for io_rq
in self
._analysis
.sync_io_requests
if
620 self
._filter
_io
_request
(io_rq
)],
621 'Top sync syscall latencies (usec)',
622 'duration', is_top
=True)
624 def iolatency_syscalls_log_output(self
):
625 self
._output
_io
_requests
_list
(
626 self
._analysis
.io_requests
,
627 'Log of all I/O system calls',
630 # I/O Stats output methods
631 def _output_latency_stats(self
, name
, rq_count
, min_duration
, max_duration
,
632 total_duration
, rq_durations
):
636 stdev
= '%0.03f' % (statistics
.stdev(rq_durations
) / 1000)
639 avg
= '%0.03f' % (total_duration
/ (rq_count
) / 1000)
642 min_duration
= '%0.03f' % (min_duration
/ 1000)
643 max_duration
= '%0.03f' % (max_duration
/ 1000)
645 print(IoAnalysisCommand
._LATENCY
_STATS
_FORMAT
.format(
646 name
, rq_count
, min_duration
, avg
, max_duration
, stdev
))
648 def _output_latency_stats_from_requests(self
, io_requests
, name
):
649 rq_durations
= [io_rq
.duration
for io_rq
in io_requests
if
650 self
._filter
_io
_request
(io_rq
)]
651 rq_count
= len(rq_durations
)
652 if len(rq_durations
) > 0:
653 min_duration
= min(rq_durations
)
654 max_duration
= max(rq_durations
)
658 total_duration
= sum(rq_durations
)
660 self
._output
_latency
_stats
(name
, rq_count
, min_duration
,
661 max_duration
, total_duration
,
664 def _output_syscalls_latency_stats(self
):
665 print('\nSyscalls latency statistics (usec):')
666 print(IoAnalysisCommand
._LATENCY
_STATS
_FORMAT
.format(
667 'Type', 'Count', 'Min', 'Average', 'Max', 'Stdev'))
668 print(IoAnalysisCommand
._SECTION
_SEPARATOR
_STRING
)
670 self
._output
_latency
_stats
_from
_requests
(
671 self
._analysis
.open_io_requests
, 'Open')
672 self
._output
_latency
_stats
_from
_requests
(
673 self
._analysis
.read_io_requests
, 'Read')
674 self
._output
_latency
_stats
_from
_requests
(
675 self
._analysis
.write_io_requests
, 'Write')
676 self
._output
_latency
_stats
_from
_requests
(
677 self
._analysis
.sync_io_requests
, 'Sync')
679 def _output_disk_latency_stats(self
):
680 if not self
._analysis
.disks
:
683 print('\nDisk latency statistics (usec):')
684 print(IoAnalysisCommand
._LATENCY
_STATS
_FORMAT
.format(
685 'Name', 'Count', 'Min', 'Average', 'Max', 'Stdev'))
686 print(IoAnalysisCommand
._SECTION
_SEPARATOR
_STRING
)
688 for disk
in self
._analysis
.disks
.values():
690 rq_durations
= [rq
.duration
for rq
in disk
.rq_list
]
691 self
._output
_latency
_stats
(disk
.disk_name
,
693 disk
.min_rq_duration
,
694 disk
.max_rq_duration
,
695 disk
.total_rq_duration
,
698 def iostats_output(self
):
699 self
._output
_syscalls
_latency
_stats
()
700 self
._output
_disk
_latency
_stats
()
702 def _print_results(self
, begin_ns
, end_ns
):
703 self
._print
_date
(begin_ns
, end_ns
)
707 self
.iostats_output()
708 if self
._arg
_latencytop
:
709 self
.iolatency_syscalls_top_output()
711 self
.iolatency_syscalls_output()
712 self
.iolatency_output()
714 self
.iolatency_syscalls_log_output()
716 def _reset_total(self
, start_ts
):
717 self
._analysis
.reset()
719 def _add_arguments(self
, ap
):
720 ap
.add_argument('--usage', action
='store_true',
721 help='Show the I/O usage')
722 ap
.add_argument('--latencystats', action
='store_true',
723 help='Show the I/O latency statistics')
724 ap
.add_argument('--latencytop', action
='store_true',
725 help='Show the I/O latency top')
726 ap
.add_argument('--latencyfreq', action
='store_true',
727 help='Show the I/O latency frequency distribution')
728 ap
.add_argument('--freq-resolution', type=int, default
=20,
729 help='Frequency distribution resolution '
736 iocmd
= IoAnalysisCommand()
743 iocmd
= IoAnalysisCommand()
745 iocmd
.run_latencytop()
750 iocmd
= IoAnalysisCommand()
757 iocmd
= IoAnalysisCommand()
764 iocmd
= IoAnalysisCommand()