cli: remove shebangs
[deliverable/lttng-analyses.git] / lttnganalyses / cli / io.py
CommitLineData
4ed24f86
JD
1# The MIT License (MIT)
2#
a3fa57c0 3# Copyright (C) 2015 - Julien Desfossez <jdesfossez@efficios.com>
6cd52af3 4# 2015 - Antoine Busque <abusque@efficios.com>
4ed24f86
JD
5#
6# Permission is hereby granted, free of charge, to any person obtaining a copy
7# of this software and associated documentation files (the "Software"), to deal
8# in the Software without restriction, including without limitation the rights
9# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10# copies of the Software, and to permit persons to whom the Software is
11# furnished to do so, subject to the following conditions:
12#
13# The above copyright notice and this permission notice shall be included in
14# all copies or substantial portions of the Software.
15#
16# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22# SOFTWARE.
23
a9c9a2a6 24from .command import Command
56936af2
MJ
25from ..core import io
26from ..linuxautomaton import common
27from ..ascii_graph import Pyasciigraph
a0acc08c 28from . import mi
a9c9a2a6
JD
29import operator
30import statistics
a0acc08c 31import collections
b6d9132b 32import sys
a9c9a2a6
JD
33
34
a0acc08c
PP
35_UsageTables = collections.namedtuple('_UsageTables', [
36 'per_proc_read',
37 'per_proc_write',
38 'per_file_read',
39 'per_file_write',
40 'per_proc_block_read',
41 'per_proc_block_write',
42 'per_disk_sector',
43 'per_disk_request',
44 'per_disk_rtps',
45 'per_netif_recv',
46 'per_netif_send',
47])
48
49
087e5d00 50class IoAnalysisCommand(Command):
a9c9a2a6 51 _DESC = """The I/O command."""
b6d9132b 52 _ANALYSIS_CLASS = io.IoAnalysis
a0acc08c
PP
53 _MI_TITLE = 'I/O analysis'
54 _MI_DESCRIPTION = 'System call/disk latency statistics, system call ' + \
55 'latency distribution, system call top latencies, ' + \
56 'I/O usage top, and I/O operations log'
57 _MI_TAGS = [
58 mi.Tags.IO,
59 mi.Tags.SYSCALL,
60 mi.Tags.STATS,
61 mi.Tags.FREQ,
62 mi.Tags.LOG,
63 mi.Tags.TOP,
64 ]
65 _MI_TABLE_CLASS_SYSCALL_LATENCY_STATS = 'syscall-latency-stats'
66 _MI_TABLE_CLASS_PART_LATENCY_STATS = 'disk-latency-stats'
67 _MI_TABLE_CLASS_FREQ = 'freq'
68 _MI_TABLE_CLASS_TOP_SYSCALL = 'top-syscall'
69 _MI_TABLE_CLASS_LOG = 'log'
70 _MI_TABLE_CLASS_PER_PROCESS_TOP = 'per-process-top'
71 _MI_TABLE_CLASS_PER_FILE_TOP = 'per-file-top'
72 _MI_TABLE_CLASS_PER_PROCESS_TOP_BLOCK = 'per-process-top-block'
73 _MI_TABLE_CLASS_PER_DISK_TOP_SECTOR = 'per-disk-top-sector'
74 _MI_TABLE_CLASS_PER_DISK_TOP_REQUEST = 'per-disk-top-request'
75 _MI_TABLE_CLASS_PER_DISK_TOP_RTPS = 'per-disk-top-rps'
76 _MI_TABLE_CLASS_PER_NETIF_TOP = 'per-netif-top'
77 _MI_TABLE_CLASSES = [
78 (
79 _MI_TABLE_CLASS_SYSCALL_LATENCY_STATS,
80 'System call latency statistics', [
81 ('obj', 'System call category', mi.String),
82 ('count', 'Call count', mi.Integer, 'calls'),
83 ('min_latency', 'Minimum call latency', mi.Duration),
84 ('avg_latency', 'Average call latency', mi.Duration),
85 ('max_latency', 'Maximum call latency', mi.Duration),
86 ('stdev_latency', 'System call latency standard deviation', mi.Duration),
87 ]
88 ),
89 (
90 _MI_TABLE_CLASS_PART_LATENCY_STATS,
91 'Partition latency statistics', [
92 ('obj', 'Partition', mi.Disk),
93 ('count', 'Access count', mi.Integer, 'accesses'),
94 ('min_latency', 'Minimum access latency', mi.Duration),
95 ('avg_latency', 'Average access latency', mi.Duration),
96 ('max_latency', 'Maximum access latency', mi.Duration),
97 ('stdev_latency', 'System access latency standard deviation', mi.Duration),
98 ]
99 ),
100 (
101 _MI_TABLE_CLASS_FREQ,
102 'I/O request latency distribution', [
103 ('latency_lower', 'Latency (lower bound)', mi.Duration),
104 ('latency_upper', 'Latency (upper bound)', mi.Duration),
105 ('count', 'Request count', mi.Integer, 'requests'),
106 ]
107 ),
108 (
109 _MI_TABLE_CLASS_TOP_SYSCALL,
110 'Top system call latencies', [
111 ('time_range', 'Call time range', mi.TimeRange),
112 ('out_of_range', 'System call out of range?', mi.Boolean),
113 ('duration', 'Call duration', mi.Duration),
114 ('syscall', 'System call', mi.Syscall),
115 ('size', 'Read/write size', mi.Size),
116 ('process', 'Process', mi.Process),
117 ('path', 'File path', mi.Path),
118 ('fd', 'File descriptor', mi.Fd),
119 ]
120 ),
121 (
122 _MI_TABLE_CLASS_LOG,
123 'I/O operations log', [
124 ('time_range', 'Call time range', mi.TimeRange),
125 ('out_of_range', 'System call out of range?', mi.Boolean),
126 ('duration', 'Call duration', mi.Duration),
127 ('syscall', 'System call', mi.Syscall),
128 ('size', 'Read/write size', mi.Size),
129 ('process', 'Process', mi.Process),
130 ('path', 'File path', mi.Path),
131 ('fd', 'File descriptor', mi.Fd),
132 ]
133 ),
134 (
135 _MI_TABLE_CLASS_PER_PROCESS_TOP,
136 'Per-process top I/O operations', [
137 ('process', 'Process', mi.Process),
138 ('size', 'Total operations size', mi.Size),
139 ('disk_size', 'Disk operations size', mi.Size),
140 ('net_size', 'Network operations size', mi.Size),
141 ('unknown_size', 'Unknown operations size', mi.Size),
142 ]
143 ),
144 (
145 _MI_TABLE_CLASS_PER_FILE_TOP,
146 'Per-file top I/O operations', [
147 ('path', 'File path/info', mi.Path),
148 ('size', 'Operations size', mi.Size),
149 ('fd_owners', 'File descriptor owners', mi.String),
150 ]
151 ),
152 (
153 _MI_TABLE_CLASS_PER_PROCESS_TOP_BLOCK,
154 'Per-process top block I/O operations', [
155 ('process', 'Process', mi.Process),
156 ('size', 'Operations size', mi.Size),
157 ]
158 ),
159 (
160 _MI_TABLE_CLASS_PER_DISK_TOP_SECTOR,
161 'Per-disk top sector I/O operations', [
162 ('disk', 'Disk', mi.Disk),
163 ('count', 'Sector count', mi.Integer, 'sectors'),
164 ]
165 ),
166 (
167 _MI_TABLE_CLASS_PER_DISK_TOP_REQUEST,
168 'Per-disk top I/O requests', [
169 ('disk', 'Disk', mi.Disk),
170 ('count', 'Request count', mi.Integer, 'I/O requests'),
171 ]
172 ),
173 (
174 _MI_TABLE_CLASS_PER_DISK_TOP_RTPS,
175 'Per-disk top I/O request time/sector', [
176 ('disk', 'Disk', mi.Disk),
177 ('rtps', 'Request time/sector', mi.Duration),
178 ]
179 ),
180 (
181 _MI_TABLE_CLASS_PER_NETIF_TOP,
182 'Per-network interface top I/O operations', [
183 ('netif', 'Network interface', mi.NetIf),
184 ('size', 'Operations size', mi.Size),
185 ]
186 ),
187 ]
ef49c8de
AB
188 _LATENCY_STATS_FORMAT = '{:<14} {:>14} {:>14} {:>14} {:>14} {:>14}'
189 _SECTION_SEPARATOR_STRING = '-' * 89
190
a0acc08c
PP
191 def _analysis_tick(self, begin_ns, end_ns):
192 syscall_latency_stats_table = None
193 disk_latency_stats_table = None
194 freq_tables = None
195 top_tables = None
196 log_table = None
197 usage_tables = None
198
199 if self._args.stats:
200 syscall_latency_stats_table, disk_latency_stats_table = \
201 self._get_latency_stats_result_tables(begin_ns, end_ns)
202
203 if self._args.freq:
204 freq_tables = self._get_freq_result_tables(begin_ns, end_ns)
205
206 if self._args.usage:
207 usage_tables = self._get_usage_result_tables(begin_ns, end_ns)
208
209 if self._args.top:
210 top_tables = self._get_top_result_tables(begin_ns, end_ns)
211
212 if self._args.log:
213 log_table = self._get_log_result_table(begin_ns, end_ns)
214
215 if self._mi_mode:
216 self._mi_append_result_tables([
217 log_table,
218 syscall_latency_stats_table,
219 disk_latency_stats_table,
220 ])
221 self._mi_append_result_tables(top_tables)
222 self._mi_append_result_tables(usage_tables)
223 self._mi_append_result_tables(freq_tables)
224 else:
225 self._print_date(begin_ns, end_ns)
226
227 if self._args.usage:
228 self._print_usage(usage_tables)
229
230 if self._args.stats:
231 self._print_latency_stats(syscall_latency_stats_table,
232 disk_latency_stats_table)
233
234 if self._args.top:
235 self._print_top(top_tables)
236
237 if self._args.freq:
238 self._print_freq(freq_tables)
239
240 if self._args.log:
241 self._print_log(log_table)
242
243 def _create_summary_result_tables(self):
244 # TODO: create a summary table here
245 self._mi_clear_result_tables()
246
882fcd19 247 # Filter predicates
882fcd19
AB
248 def _filter_size(self, size):
249 if size is None:
250 return True
b6d9132b 251 if self._args.maxsize is not None and size > self._args.maxsize:
882fcd19 252 return False
b6d9132b 253 if self._args.minsize is not None and size < self._args.minsize:
882fcd19
AB
254 return False
255 return True
256
257 def _filter_latency(self, duration):
b6d9132b 258 if self._args.max is not None and duration > self._args.max:
882fcd19 259 return False
b6d9132b 260 if self._args.min is not None and duration < self._args.min:
882fcd19
AB
261 return False
262 return True
263
ef49c8de 264 def _filter_time_range(self, begin, end):
b6d9132b
AB
265 # Note: we only want to return False only when a request has
266 # ended and is completely outside the timerange (i.e. begun
267 # after the end of the time range).
268 return not (self._args.begin and self._args.end and end and
269 begin > self._args.end)
ef49c8de
AB
270
271 def _filter_io_request(self, io_rq):
f7a2ca1b
JD
272 if io_rq.tid in self._analysis.tids:
273 proc = self._analysis.tids[io_rq.tid]
274 else:
275 proc = None
ef49c8de
AB
276 return self._filter_process(proc) and \
277 self._filter_size(io_rq.size) and \
278 self._filter_latency(io_rq.duration) and \
279 self._filter_time_range(io_rq.begin_ts, io_rq.end_ts)
280
7e73fe34 281 def _is_io_rq_out_of_range(self, io_rq):
b6d9132b
AB
282 return self._args.begin and io_rq.begin_ts < self._args.begin or \
283 self._args.end and io_rq.end_ts > self._args.end
7e73fe34 284
a0acc08c
PP
285 def _append_per_proc_read_usage_row(self, proc_stats, result_table):
286 if not self._filter_process(proc_stats):
287 return False
88cfa546 288
a0acc08c
PP
289 result_table.append_row(
290 process=mi.Process(proc_stats.comm, pid=proc_stats.pid,
291 tid=proc_stats.tid),
292 size=mi.Size(proc_stats.total_read),
293 disk_size=mi.Size(proc_stats.disk_read),
294 net_size=mi.Size(proc_stats.net_read),
295 unknown_size=mi.Size(proc_stats.unk_read),
296 )
88cfa546 297
a0acc08c 298 return True
88cfa546 299
a0acc08c 300 def _append_per_proc_write_usage_row(self, proc_stats, result_table):
91765fd2 301 if not self._filter_process(proc_stats):
a0acc08c 302 return False
91765fd2 303
a0acc08c
PP
304 result_table.append_row(
305 process=mi.Process(proc_stats.comm, pid=proc_stats.pid,
306 tid=proc_stats.tid),
307 size=mi.Size(proc_stats.total_write),
308 disk_size=mi.Size(proc_stats.disk_write),
309 net_size=mi.Size(proc_stats.net_write),
310 unknown_size=mi.Size(proc_stats.unk_write),
311 )
91765fd2 312
a0acc08c 313 return True
91765fd2 314
a0acc08c
PP
315 def _append_per_proc_block_read_usage_row(self, proc_stats, result_table):
316 if not self._filter_process(proc_stats) or proc_stats.block_read == 0:
317 return False
91765fd2 318
a0acc08c
PP
319 if proc_stats.comm:
320 proc_name = proc_stats.comm
91765fd2 321 else:
a0acc08c 322 proc_name = None
91765fd2 323
a0acc08c
PP
324 result_table.append_row(
325 process=mi.Process(proc_name, pid=proc_stats.pid,
326 tid=proc_stats.tid),
327 size=mi.Size(proc_stats.block_read),
328 )
91765fd2 329
a0acc08c 330 return True
91765fd2 331
a0acc08c
PP
332 def _append_per_proc_block_write_usage_row(self, proc_stats, result_table):
333 if not self._filter_process(proc_stats) or proc_stats.block_write == 0:
334 return False
91765fd2 335
a0acc08c
PP
336 if proc_stats.comm:
337 proc_name = proc_stats.comm
91765fd2 338 else:
a0acc08c 339 proc_name = None
91765fd2 340
a0acc08c
PP
341 result_table.append_row(
342 process=mi.Process(proc_name, pid=proc_stats.pid,
343 tid=proc_stats.tid),
344 size=mi.Size(proc_stats.block_write),
345 )
91765fd2 346
a0acc08c 347 return True
91765fd2 348
a0acc08c
PP
349 def _append_disk_sector_usage_row(self, disk_stats, result_table):
350 if disk_stats.total_rq_sectors == 0:
91765fd2
AB
351 return None
352
a0acc08c
PP
353 result_table.append_row(
354 disk=mi.Disk(disk_stats.disk_name),
355 count=mi.Integer(disk_stats.total_rq_sectors),
356 )
91765fd2 357
a0acc08c 358 return True
91765fd2 359
a0acc08c
PP
360 def _append_disk_request_usage_row(self, disk_stats, result_table):
361 if disk_stats.rq_count == 0:
362 return False
91765fd2 363
a0acc08c
PP
364 result_table.append_row(
365 disk=mi.Disk(disk_stats.disk_name),
366 count=mi.Integer(disk_stats.rq_count),
367 )
91765fd2 368
a0acc08c 369 return True
91765fd2 370
a0acc08c
PP
371 def _append_disk_rtps_usage_row(self, disk_stats, result_table):
372 if disk_stats.rq_count == 0:
373 return False
91765fd2 374
a0acc08c
PP
375 avg_latency = (disk_stats.total_rq_duration / disk_stats.rq_count)
376 result_table.append_row(
377 disk=mi.Disk(disk_stats.disk_name),
378 rtps=mi.Duration(avg_latency),
379 )
91765fd2 380
a0acc08c 381 return True
91765fd2 382
a0acc08c
PP
383 def _append_netif_recv_usage_row(self, netif_stats, result_table):
384 result_table.append_row(
385 netif=mi.NetIf(netif_stats.name),
386 size=mi.Size(netif_stats.recv_bytes)
387 )
91765fd2 388
a0acc08c 389 return True
91765fd2 390
a0acc08c
PP
391 def _append_netif_send_usage_row(self, netif_stats, result_table):
392 result_table.append_row(
393 netif=mi.NetIf(netif_stats.name),
394 size=mi.Size(netif_stats.sent_bytes)
395 )
91765fd2 396
a0acc08c 397 return True
e0e5f1fd 398
a0acc08c 399 def _get_file_stats_fd_owners_str(self, file_stats):
e0e5f1fd 400 fd_by_pid_str = ''
a0acc08c 401
e0e5f1fd
AB
402 for pid, fd in file_stats.fd_by_pid.items():
403 comm = self._analysis.tids[pid].comm
404 fd_by_pid_str += 'fd %d in %s (%s) ' % (fd, comm, pid)
405
a0acc08c
PP
406 return fd_by_pid_str
407
408 def _append_file_read_usage_row(self, file_stats, result_table):
409 if file_stats.read == 0:
410 return False
e0e5f1fd 411
a0acc08c
PP
412 fd_owners = self._get_file_stats_fd_owners_str(file_stats)
413 result_table.append_row(
414 path=mi.Path(file_stats.filename),
415 size=mi.Size(file_stats.read),
416 fd_owners=mi.String(fd_owners),
417 )
418
419 return True
e0e5f1fd 420
a0acc08c 421 def _append_file_write_usage_row(self, file_stats, result_table):
e0e5f1fd 422 if file_stats.write == 0:
a0acc08c 423 return False
e0e5f1fd 424
a0acc08c
PP
425 fd_owners = self._get_file_stats_fd_owners_str(file_stats)
426 result_table.append_row(
427 path=mi.Path(file_stats.filename),
428 size=mi.Size(file_stats.write),
429 fd_owners=mi.String(fd_owners),
430 )
e0e5f1fd 431
a0acc08c 432 return True
e0e5f1fd 433
a0acc08c
PP
434 def _fill_usage_result_table(self, input_list, append_row_cb, result_table):
435 count = 0
436 limit = self._args.limit
e0e5f1fd 437
a0acc08c
PP
438 for elem in input_list:
439 if append_row_cb(elem, result_table):
440 count += 1
441
442 if limit is not None and count >= limit:
443 break
444
445 def _fill_per_process_read_usage_result_table(self, result_table):
91765fd2
AB
446 input_list = sorted(self._analysis.tids.values(),
447 key=operator.attrgetter('total_read'),
448 reverse=True)
a0acc08c
PP
449 self._fill_usage_result_table(input_list,
450 self._append_per_proc_read_usage_row,
451 result_table)
91765fd2 452
a0acc08c 453 def _fill_per_process_write_usage_result_table(self, result_table):
91765fd2
AB
454 input_list = sorted(self._analysis.tids.values(),
455 key=operator.attrgetter('total_write'),
456 reverse=True)
a0acc08c
PP
457 self._fill_usage_result_table(input_list,
458 self._append_per_proc_write_usage_row,
459 result_table)
91765fd2 460
a0acc08c 461 def _fill_per_process_block_read_usage_result_table(self, result_table):
91765fd2
AB
462 input_list = sorted(self._analysis.tids.values(),
463 key=operator.attrgetter('block_read'),
464 reverse=True)
a0acc08c
PP
465 self._fill_usage_result_table(input_list,
466 self._append_per_proc_block_read_usage_row,
467 result_table)
91765fd2 468
a0acc08c 469 def _fill_per_process_block_write_usage_result_table(self, result_table):
91765fd2
AB
470 input_list = sorted(self._analysis.tids.values(),
471 key=operator.attrgetter('block_write'),
472 reverse=True)
a0acc08c
PP
473 self._fill_usage_result_table(input_list,
474 self._append_per_proc_block_write_usage_row,
475 result_table)
91765fd2 476
a0acc08c 477 def _fill_disk_sector_usage_result_table(self, result_table):
91765fd2
AB
478 input_list = sorted(self._analysis.disks.values(),
479 key=operator.attrgetter('total_rq_sectors'),
480 reverse=True)
a0acc08c
PP
481 self._fill_usage_result_table(input_list,
482 self._append_disk_sector_usage_row,
483 result_table)
91765fd2 484
a0acc08c 485 def _fill_disk_request_usage_result_table(self, result_table):
91765fd2
AB
486 input_list = sorted(self._analysis.disks.values(),
487 key=operator.attrgetter('rq_count'),
488 reverse=True)
a0acc08c
PP
489 self._fill_usage_result_table(input_list,
490 self._append_disk_request_usage_row,
491 result_table)
91765fd2 492
a0acc08c 493 def _fill_disk_rtps_usage_result_table(self, result_table):
91765fd2 494 input_list = self._analysis.disks.values()
a0acc08c
PP
495 self._fill_usage_result_table(input_list,
496 self._append_disk_rtps_usage_row,
497 result_table)
91765fd2 498
a0acc08c 499 def _fill_netif_recv_usage_result_table(self, result_table):
91765fd2
AB
500 input_list = sorted(self._analysis.ifaces.values(),
501 key=operator.attrgetter('recv_bytes'),
502 reverse=True)
a0acc08c
PP
503 self._fill_usage_result_table(input_list,
504 self._append_netif_recv_usage_row,
505 result_table)
91765fd2 506
a0acc08c 507 def _fill_netif_send_usage_result_table(self, result_table):
91765fd2
AB
508 input_list = sorted(self._analysis.ifaces.values(),
509 key=operator.attrgetter('sent_bytes'),
510 reverse=True)
a0acc08c
PP
511 self._fill_usage_result_table(input_list,
512 self._append_netif_send_usage_row,
513 result_table)
91765fd2 514
a0acc08c 515 def _fill_file_read_usage_result_table(self, files, result_table):
e0e5f1fd
AB
516 input_list = sorted(files.values(),
517 key=lambda file_stats: file_stats.read,
518 reverse=True)
a0acc08c
PP
519 self._fill_usage_result_table(input_list,
520 self._append_file_read_usage_row,
521 result_table)
a9c9a2a6 522
a0acc08c 523 def _fill_file_write_usage_result_table(self, files, result_table):
e0e5f1fd
AB
524 input_list = sorted(files.values(),
525 key=lambda file_stats: file_stats.write,
526 reverse=True)
a0acc08c
PP
527 self._fill_usage_result_table(input_list,
528 self._append_file_write_usage_row,
529 result_table)
a9c9a2a6 530
a0acc08c 531 def _fill_file_usage_result_tables(self, read_table, write_table):
b6d9132b
AB
532 files = self._analysis.get_files_stats(self._args.pid_list,
533 self._args.proc_list)
a0acc08c
PP
534 self._fill_file_read_usage_result_table(files, read_table)
535 self._fill_file_write_usage_result_table(files, write_table)
536
537 def _get_usage_result_tables(self, begin, end):
538 # create result tables
539 per_proc_read_table = \
540 self._mi_create_result_table(self._MI_TABLE_CLASS_PER_PROCESS_TOP,
541 begin, end, 'read')
542 per_proc_write_table = \
543 self._mi_create_result_table(self._MI_TABLE_CLASS_PER_PROCESS_TOP,
544 begin, end, 'written')
545 per_file_read_table = \
546 self._mi_create_result_table(self._MI_TABLE_CLASS_PER_FILE_TOP,
547 begin, end, 'read')
548 per_file_write_table = \
549 self._mi_create_result_table(self._MI_TABLE_CLASS_PER_FILE_TOP,
550 begin, end, 'written')
551 per_proc_block_read_table = \
552 self._mi_create_result_table(self._MI_TABLE_CLASS_PER_PROCESS_TOP_BLOCK,
553 begin, end, 'read')
554 per_proc_block_write_table = \
555 self._mi_create_result_table(self._MI_TABLE_CLASS_PER_PROCESS_TOP_BLOCK,
556 begin, end, 'written')
557 per_disk_sector_table = \
558 self._mi_create_result_table(self._MI_TABLE_CLASS_PER_DISK_TOP_SECTOR,
559 begin, end)
560 per_disk_request_table = \
561 self._mi_create_result_table(self._MI_TABLE_CLASS_PER_DISK_TOP_REQUEST,
562 begin, end)
563 per_disk_rtps_table = \
564 self._mi_create_result_table(self._MI_TABLE_CLASS_PER_DISK_TOP_RTPS,
565 begin, end)
566 per_netif_recv_table = \
567 self._mi_create_result_table(self._MI_TABLE_CLASS_PER_NETIF_TOP,
568 begin, end, 'received')
569 per_netif_send_table = \
570 self._mi_create_result_table(self._MI_TABLE_CLASS_PER_NETIF_TOP,
571 begin, end, 'sent')
572
573 # fill result tables
574 self._fill_per_process_read_usage_result_table(per_proc_read_table)
575 self._fill_per_process_write_usage_result_table(per_proc_write_table)
576 self._fill_file_usage_result_tables(per_file_read_table,
577 per_file_write_table)
578 self._fill_per_process_block_read_usage_result_table(per_proc_block_read_table)
579 self._fill_per_process_block_read_usage_result_table(per_proc_block_write_table)
580 self._fill_disk_sector_usage_result_table(per_disk_sector_table)
581 self._fill_disk_request_usage_result_table(per_disk_request_table)
582 self._fill_disk_rtps_usage_result_table(per_disk_rtps_table)
583 self._fill_netif_recv_usage_result_table(per_netif_recv_table)
584 self._fill_netif_send_usage_result_table(per_netif_send_table)
585
586 return _UsageTables(
587 per_proc_read=per_proc_read_table,
588 per_proc_write=per_proc_write_table,
589 per_file_read=per_file_read_table,
590 per_file_write=per_file_write_table,
591 per_proc_block_read=per_proc_block_read_table,
592 per_proc_block_write=per_proc_block_write_table,
593 per_disk_sector=per_disk_sector_table,
594 per_disk_request=per_disk_request_table,
595 per_disk_rtps=per_disk_rtps_table,
596 per_netif_recv=per_netif_recv_table,
597 per_netif_send=per_netif_send_table,
598 )
599
600 def _get_per_process_read_write_usage_datum(self, row):
601 if row.process.pid is None:
602 pid_str = 'unknown (tid=%d)' % (row.process.tid)
603 else:
604 pid_str = str(row.process.pid)
605
606 format_str = '{:>10} {:<25} {:>9} file {:>9} net {:>9} unknown'
607 output_str = format_str.format(
608 common.convert_size(row.size.value, padding_after=True),
609 '%s (%s)' % (row.process.name, pid_str),
610 common.convert_size(row.disk_size.value, padding_after=True),
611 common.convert_size(row.net_size.value, padding_after=True),
612 common.convert_size(row.unknown_size.value, padding_after=True))
613
614 return (output_str, row.size.value)
615
616 def _get_per_process_block_read_write_usage_datum(self, row):
617 proc_name = row.process.name
618
619 if not proc_name:
620 proc_name = 'unknown'
621
622 if row.process.pid is None:
623 pid_str = 'unknown (tid=%d)' % (row.process.tid)
624 else:
625 pid_str = str(row.process.pid)
626
627 format_str = '{:>10} {:<22}'
628 output_str = format_str.format(
629 common.convert_size(row.size.value, padding_after=True),
630 '%s (pid=%s)' % (proc_name, pid_str))
631
632 return (output_str, row.size.value)
633
634 def _get_per_disk_count_usage_datum(self, row):
635 return (row.disk.name, row.count.value)
636
637 def _get_per_disk_rtps_usage_datum(self, row):
638 avg_latency = row.rtps.value / common.NSEC_PER_MSEC
639 avg_latency = round(avg_latency, 3)
640
641 return (row.disk.name, avg_latency)
642
643 def _get_per_netif_recv_send_usage_datum(self, row):
644 return ('%s %s' % (common.convert_size(row.size.value), row.netif.name),
645 row.size.value)
646
647 def _get_per_file_read_write_usage_datum(self, row):
648 format_str = '{:>10} {} {}'
649 output_str = format_str.format(
650 common.convert_size(row.size.value, padding_after=True),
651 row.path.path, row.fd_owners.value)
652
653 return (output_str, row.size.value)
654
655 def _print_usage_ascii_graph(self, result_table, get_datum_cb, graph_label,
656 graph_args=None):
657 graph = Pyasciigraph()
658 data = []
659
660 if graph_args is None:
661 graph_args = {}
662
663 for row in result_table.rows:
664 datum = get_datum_cb(row)
665 data.append(datum)
666
667 for line in graph.graph(graph_label, data, **graph_args):
668 print(line)
669
670 def _print_per_process_read(self, result_table):
671 label = 'Per-process I/O Read'
672 graph_args = {'with_value': False}
673 self._print_usage_ascii_graph(result_table,
674 self._get_per_process_read_write_usage_datum,
675 label, graph_args)
676
677 def _print_per_process_write(self, result_table):
678 label = 'Per-process I/O Write'
679 graph_args = {'with_value': False}
680 self._print_usage_ascii_graph(result_table,
681 self._get_per_process_read_write_usage_datum,
682 label, graph_args)
683
684 def _print_per_process_block_read(self, result_table):
685 label = 'Block I/O Read'
686 graph_args = {'with_value': False}
687 self._print_usage_ascii_graph(result_table,
688 self._get_per_process_block_read_write_usage_datum,
689 label, graph_args)
690
691 def _print_per_process_block_write(self, result_table):
692 label = 'Block I/O Write'
693 graph_args = {'with_value': False}
694 self._print_usage_ascii_graph(result_table,
695 self._get_per_process_block_read_write_usage_datum,
696 label, graph_args)
697
698 def _print_per_disk_sector(self, result_table):
699 label = 'Disk requests sector count'
700 graph_args = {'unit': ' sectors'}
701 self._print_usage_ascii_graph(result_table,
702 self._get_per_disk_count_usage_datum,
703 label, graph_args)
704
705 def _print_per_disk_request(self, result_table):
706 label = 'Disk request count'
707 graph_args = {'unit': ' requests'}
708 self._print_usage_ascii_graph(result_table,
709 self._get_per_disk_count_usage_datum,
710 label, graph_args)
711
712 def _print_per_disk_rtps(self, result_table):
713 label = 'Disk request average latency'
714 graph_args = {'unit': ' ms', 'sort': 2}
715 self._print_usage_ascii_graph(result_table,
716 self._get_per_disk_rtps_usage_datum,
717 label, graph_args)
718
719 def _print_per_netif_recv(self, result_table):
720 label = 'Network received bytes'
721 graph_args = {'with_value': False}
722 self._print_usage_ascii_graph(result_table,
723 self._get_per_netif_recv_send_usage_datum,
724 label, graph_args)
725
726 def _print_per_netif_send(self, result_table):
727 label = 'Network sent bytes'
728 graph_args = {'with_value': False}
729 self._print_usage_ascii_graph(result_table,
730 self._get_per_netif_recv_send_usage_datum,
731 label, graph_args)
732
733 def _print_per_file_read(self, result_table):
734 label = 'Files read'
735 graph_args = {'with_value': False, 'sort': 2}
736 self._print_usage_ascii_graph(result_table,
737 self._get_per_file_read_write_usage_datum,
738 label, graph_args)
739
740 def _print_per_file_write(self, result_table):
741 label = 'Files write'
742 graph_args = {'with_value': False, 'sort': 2}
743 self._print_usage_ascii_graph(result_table,
744 self._get_per_file_read_write_usage_datum,
745 label, graph_args)
746
747 def _print_usage(self, usage_tables):
748 self._print_per_process_read(usage_tables.per_proc_read)
749 self._print_per_process_write(usage_tables.per_proc_write)
750 self._print_per_file_read(usage_tables.per_file_read)
751 self._print_per_file_write(usage_tables.per_file_write)
752 self._print_per_process_block_read(usage_tables.per_proc_block_read)
753 self._print_per_process_block_write(usage_tables.per_proc_block_write)
754 self._print_per_disk_sector(usage_tables.per_disk_sector)
755 self._print_per_disk_request(usage_tables.per_disk_request)
756 self._print_per_disk_rtps(usage_tables.per_disk_rtps)
757 self._print_per_netif_recv(usage_tables.per_netif_recv)
758 self._print_per_netif_send(usage_tables.per_netif_send)
759
760 def _fill_freq_result_table(self, duration_list, result_table):
9ac03ff5
AB
761 if not duration_list:
762 return
763
764 # The number of bins for the histogram
b6d9132b 765 resolution = self._args.freq_resolution
9ac03ff5
AB
766
767 min_duration = min(duration_list)
768 max_duration = max(duration_list)
769 # ns to µs
770 min_duration /= 1000
771 max_duration /= 1000
772
773 step = (max_duration - min_duration) / resolution
a0acc08c 774
a9c9a2a6
JD
775 if step == 0:
776 return
9ac03ff5 777
a9c9a2a6
JD
778 buckets = []
779 values = []
a0acc08c 780
9ac03ff5 781 for i in range(resolution):
a9c9a2a6
JD
782 buckets.append(i * step)
783 values.append(0)
a0acc08c 784
9ac03ff5
AB
785 for duration in duration_list:
786 duration /= 1000
787 index = min(int((duration - min_duration) / step), resolution - 1)
788 values[index] += 1
789
9ac03ff5 790 for index, value in enumerate(values):
a0acc08c
PP
791 result_table.append_row(
792 latency_lower=mi.Duration.from_us(index * step + min_duration),
793 latency_upper=mi.Duration.from_us((index + 1) * step + min_duration),
794 count=mi.Integer(value),
795 )
796
797 def _get_disk_freq_result_tables(self, begin, end):
798 result_tables = []
799
800 for disk in self._analysis.disks.values():
801 rq_durations = [rq.duration for rq in disk.rq_list if
802 self._filter_io_request(rq)]
803 subtitle = 'disk: {}'.format(disk.disk_name)
804 result_table = \
805 self._mi_create_result_table(self._MI_TABLE_CLASS_FREQ,
806 begin, end, subtitle)
807 self._fill_freq_result_table(rq_durations, result_table)
808 result_tables.append(result_table)
809
810 return result_tables
811
812 def _get_syscall_freq_result_tables(self, begin, end):
813 open_table = \
814 self._mi_create_result_table(self._MI_TABLE_CLASS_FREQ,
815 begin, end, 'open')
816 read_table = \
817 self._mi_create_result_table(self._MI_TABLE_CLASS_FREQ,
818 begin, end, 'read')
819 write_table = \
820 self._mi_create_result_table(self._MI_TABLE_CLASS_FREQ,
821 begin, end, 'write')
822 sync_table = \
823 self._mi_create_result_table(self._MI_TABLE_CLASS_FREQ,
824 begin, end, 'sync')
825 self._fill_freq_result_table([io_rq.duration for io_rq in
826 self._analysis.open_io_requests if
827 self._filter_io_request(io_rq)],
828 open_table)
829 self._fill_freq_result_table([io_rq.duration for io_rq in
830 self._analysis.read_io_requests if
831 self._filter_io_request(io_rq)],
832 read_table)
833 self._fill_freq_result_table([io_rq.duration for io_rq in
834 self._analysis.write_io_requests if
835 self._filter_io_request(io_rq)],
836 write_table)
837 self._fill_freq_result_table([io_rq.duration for io_rq in
838 self._analysis.sync_io_requests if
839 self._filter_io_request(io_rq)],
840 sync_table)
841
842 return [open_table, read_table, write_table, sync_table]
843
844 def _get_freq_result_tables(self, begin, end):
845 syscall_tables = self._get_syscall_freq_result_tables(begin, end)
846 disk_tables = self._get_disk_freq_result_tables(begin, end)
847
848 return syscall_tables + disk_tables
849
850 def _print_one_freq(self, result_table):
851 if not result_table.rows:
852 return
9ac03ff5 853
a0acc08c
PP
854 values = []
855 graph = Pyasciigraph()
856 graph_data = []
857
858 for row in result_table.rows:
859 graph_data.append(('%0.03f' % row.latency_lower.to_us(),
860 row.count.value))
861
862 title = '{} {} (usec)'.format(result_table.title, result_table.subtitle)
9ac03ff5
AB
863 graph_lines = graph.graph(
864 title,
865 graph_data,
866 info_before=True,
867 count=True
868 )
869
870 for line in graph_lines:
a9c9a2a6 871 print(line)
9ac03ff5
AB
872
873 print()
874
a0acc08c
PP
875 def _print_freq(self, freq_tables):
876 for freq_table in freq_tables:
877 self._print_one_freq(freq_table)
7e73fe34 878
a0acc08c 879 def _append_log_row(self, io_rq, result_table):
7e73fe34 880 if io_rq.size is None:
a0acc08c 881 size = mi.Empty()
7e73fe34 882 else:
a0acc08c 883 size = mi.Size(io_rq.size)
7e73fe34
AB
884
885 tid = io_rq.tid
886 proc_stats = self._analysis.tids[tid]
a0acc08c 887 proc_name = proc_stats.comm
7e73fe34
AB
888
889 # TODO: handle fd_in/fd_out for RW type operations
890 if io_rq.fd is None:
a0acc08c
PP
891 path = mi.Empty()
892 fd = mi.Empty()
7e73fe34 893 else:
a0acc08c 894 fd = mi.Fd(io_rq.fd)
7e73fe34 895 parent_proc = proc_stats
a0acc08c 896
7e73fe34
AB
897 if parent_proc.pid is not None:
898 parent_proc = self._analysis.tids[parent_proc.pid]
899
a0acc08c
PP
900 fd_stats = parent_proc.get_fd(io_rq.fd, io_rq.end_ts)
901
7e73fe34 902 if fd_stats is not None:
a0acc08c 903 path = mi.Path(fd_stats.filename)
7e73fe34 904 else:
a0acc08c
PP
905 path = mi.Unknown()
906
907 result_table.append_row(
908 time_range=mi.TimeRange(io_rq.begin_ts, io_rq.end_ts),
909 out_of_range=mi.Boolean(self._is_io_rq_out_of_range(io_rq)),
910 duration=mi.Duration(io_rq.duration),
911 syscall=mi.Syscall(io_rq.syscall_name),
912 size=size,
913 process=mi.Process(proc_name, tid=tid),
914 path=path,
915 fd=fd,
916 )
917
918 def _fill_log_result_table(self, rq_list, sort_key, is_top, result_table):
919 if not rq_list:
920 return
921
922 count = 0
7e73fe34 923
a0acc08c
PP
924 for io_rq in sorted(rq_list, key=operator.attrgetter(sort_key),
925 reverse=is_top):
926 if is_top and count > self._args.limit:
927 break
928
929 self._append_log_row(io_rq, result_table)
930 count += 1
931
932 def _fill_log_result_table_from_io_requests(self, io_requests, sort_key,
933 is_top, result_table):
934 io_requests = [io_rq for io_rq in io_requests if
935 self._filter_io_request(io_rq)]
936 self._fill_log_result_table(io_requests, sort_key, is_top, result_table)
937
938 def _get_top_result_tables(self, begin, end):
939 open_table = \
940 self._mi_create_result_table(self._MI_TABLE_CLASS_TOP_SYSCALL,
941 begin, end, 'open')
942 read_table = \
943 self._mi_create_result_table(self._MI_TABLE_CLASS_TOP_SYSCALL,
944 begin, end, 'read')
945 write_table = \
946 self._mi_create_result_table(self._MI_TABLE_CLASS_TOP_SYSCALL,
947 begin, end, 'write')
948 sync_table = \
949 self._mi_create_result_table(self._MI_TABLE_CLASS_TOP_SYSCALL,
950 begin, end, 'sync')
951 self._fill_log_result_table_from_io_requests(
952 self._analysis.open_io_requests, 'duration', True, open_table)
953 self._fill_log_result_table_from_io_requests(
954 self._analysis.read_io_requests, 'duration', True, read_table)
955 self._fill_log_result_table_from_io_requests(
956 self._analysis.write_io_requests, 'duration', True, write_table)
957 self._fill_log_result_table_from_io_requests(
958 self._analysis.sync_io_requests, 'duration', True, sync_table)
959
960 return [open_table, read_table, write_table, sync_table]
961
962 def _print_log_row(self, row):
963 fmt = '{:<40} {:<16} {:>16} {:>11} {:<24} {:<8} {:<14}'
964 begin_time = common.ns_to_hour_nsec(row.time_range.begin,
965 self._args.multi_day,
966 self._args.gmt)
967 end_time = common.ns_to_hour_nsec(row.time_range.end,
968 self._args.multi_day,
969 self._args.gmt)
970 time_range_str = '[' + begin_time + ',' + end_time + ']'
971 duration_str = '%0.03f' % row.duration.to_us()
7e73fe34 972
a0acc08c
PP
973 if type(row.size) is mi.Empty:
974 size = 'N/A'
975 else:
976 size = common.convert_size(row.size.value)
977
978 tid = row.process.tid
979 proc_name = row.process.name
980
981 if type(row.fd) is mi.Empty:
982 file_str = 'N/A'
983 else:
984 if type(row.path) is mi.Unknown:
985 path = 'unknown'
986 else:
987 path = row.path.path
988
989 file_str = '%s (fd=%s)' % (path, row.fd.fd)
990
991 if row.out_of_range.value:
7e73fe34
AB
992 time_range_str += '*'
993 duration_str += '*'
994 else:
995 time_range_str += ' '
996 duration_str += ' '
997
a0acc08c
PP
998 print(fmt.format(time_range_str, row.syscall.name, duration_str,
999 size, proc_name, tid, file_str))
7e73fe34 1000
a0acc08c
PP
1001 def _print_log(self, result_table):
1002 if not result_table.rows:
a9c9a2a6 1003 return
7e73fe34 1004
7e73fe34
AB
1005 has_out_of_range_rq = False
1006
1007 print()
a0acc08c
PP
1008 fmt = '{} {} (usec)'
1009 print(fmt.format(result_table.title, result_table.subtitle))
7e73fe34
AB
1010 header_fmt = '{:<19} {:<20} {:<16} {:<23} {:<5} {:<24} {:<8} {:<14}'
1011 print(header_fmt.format(
1012 'Begin', 'End', 'Name', 'Duration (usec)', 'Size', 'Proc', 'PID',
1013 'Filename'))
1014
a0acc08c
PP
1015 for row in result_table.rows:
1016 self._print_log_row(row)
7e73fe34 1017
a0acc08c 1018 if not has_out_of_range_rq and row.out_of_range.value:
7e73fe34
AB
1019 has_out_of_range_rq = True
1020
7e73fe34 1021 if has_out_of_range_rq:
73b71522
AB
1022 print('*: Syscalls started and/or completed outside of the '
1023 'range specified')
a9c9a2a6 1024
a0acc08c
PP
1025 def _print_top(self, top_tables):
1026 for table in top_tables:
1027 self._print_log(table)
1028
1029 def _get_log_result_table(self, begin, end):
1030 log_table = self._mi_create_result_table(self._MI_TABLE_CLASS_LOG,
1031 begin, end)
1032 self._fill_log_result_table_from_io_requests(
1033 self._analysis.io_requests, 'begin_ts', False, log_table)
1034
1035 return log_table
1036
1037 def _append_latency_stats_row(self, obj, rq_durations, result_table):
a205a2b4
JD
1038 rq_count = len(rq_durations)
1039 total_duration = sum(rq_durations)
a0acc08c 1040
a205a2b4
JD
1041 if len(rq_durations) > 0:
1042 min_duration = min(rq_durations)
1043 max_duration = max(rq_durations)
1044 else:
1045 min_duration = 0
1046 max_duration = 0
1047
ef49c8de 1048 if rq_count < 2:
a0acc08c 1049 stdev = mi.Unknown()
ef49c8de 1050 else:
a0acc08c 1051 stdev = mi.Duration(statistics.stdev(rq_durations))
ef49c8de 1052
d872de29 1053 if rq_count > 0:
a0acc08c 1054 avg = total_duration / rq_count
d872de29 1055 else:
a0acc08c
PP
1056 avg = 0
1057
1058 result_table.append_row(
1059 obj=obj,
1060 count=mi.Integer(rq_count),
1061 min_latency=mi.Duration(min_duration),
1062 avg_latency=mi.Duration(avg),
1063 max_latency=mi.Duration(max_duration),
1064 stdev_latency=stdev,
1065 )
ef49c8de 1066
a0acc08c
PP
1067 def _append_latency_stats_row_from_requests(self, obj, io_requests,
1068 result_table):
ef49c8de
AB
1069 rq_durations = [io_rq.duration for io_rq in io_requests if
1070 self._filter_io_request(io_rq)]
a0acc08c
PP
1071 self._append_latency_stats_row(obj, rq_durations, result_table)
1072
1073 def _get_syscall_latency_stats_result_table(self, begin, end):
1074 result_table = \
1075 self._mi_create_result_table(self._MI_TABLE_CLASS_SYSCALL_LATENCY_STATS,
1076 begin, end)
1077 append_fn = self._append_latency_stats_row_from_requests
1078 append_fn(mi.String('Open'), self._analysis.open_io_requests,
1079 result_table)
1080 append_fn(mi.String('Read'), self._analysis.read_io_requests,
1081 result_table)
1082 append_fn(mi.String('Write'), self._analysis.write_io_requests,
1083 result_table)
1084 append_fn(mi.String('Sync'), self._analysis.sync_io_requests,
1085 result_table)
1086
1087 return result_table
1088
1089 def _get_disk_latency_stats_result_table(self, begin, end):
1090 if not self._analysis.disks:
1091 return
1092
1093 result_table = \
1094 self._mi_create_result_table(self._MI_TABLE_CLASS_PART_LATENCY_STATS,
1095 begin, end)
1096 append_fn = self._append_latency_stats_row_from_requests
1097
1098 for disk in self._analysis.disks.values():
1099 if disk.rq_count:
1100 rq_durations = [rq.duration for rq in disk.rq_list if
1101 self._filter_io_request(rq)]
1102 disk = mi.Disk(disk.disk_name)
1103 self._append_latency_stats_row(disk, rq_durations, result_table)
1104
1105 return result_table
1106
1107 def _get_latency_stats_result_tables(self, begin, end):
1108 syscall_tbl = self._get_syscall_latency_stats_result_table(begin, end)
1109 disk_tbl = self._get_disk_latency_stats_result_table(begin, end)
1110
1111 return syscall_tbl, disk_tbl
ef49c8de 1112
a0acc08c
PP
1113 def _print_latency_stats_row(self, row):
1114 if type(row.stdev_latency) is mi.Unknown:
1115 stdev = '?'
1116 else:
1117 stdev = '%0.03f' % row.stdev_latency.to_us()
1118
1119 avg = '%0.03f' % row.avg_latency.to_us()
1120 min_duration = '%0.03f' % row.min_latency.to_us()
1121 max_duration = '%0.03f' % row.max_latency.to_us()
1122
1123 print(IoAnalysisCommand._LATENCY_STATS_FORMAT.format(
1124 str(row.obj), row.count.value, min_duration,
1125 avg, max_duration, stdev))
1126
1127 def _print_syscall_latency_stats(self, stats_table):
ef49c8de
AB
1128 print('\nSyscalls latency statistics (usec):')
1129 print(IoAnalysisCommand._LATENCY_STATS_FORMAT.format(
1130 'Type', 'Count', 'Min', 'Average', 'Max', 'Stdev'))
1131 print(IoAnalysisCommand._SECTION_SEPARATOR_STRING)
1132
a0acc08c
PP
1133 for row in stats_table.rows:
1134 self._print_latency_stats_row(row)
ef49c8de 1135
a0acc08c
PP
1136 def _print_disk_latency_stats(self, stats_table):
1137 if not stats_table.rows:
a9c9a2a6 1138 return
a9c9a2a6 1139
ef49c8de
AB
1140 print('\nDisk latency statistics (usec):')
1141 print(IoAnalysisCommand._LATENCY_STATS_FORMAT.format(
1142 'Name', 'Count', 'Min', 'Average', 'Max', 'Stdev'))
1143 print(IoAnalysisCommand._SECTION_SEPARATOR_STRING)
1144
a0acc08c
PP
1145 for row in stats_table.rows:
1146 self._print_latency_stats_row(row)
a9c9a2a6 1147
a0acc08c
PP
1148 def _print_latency_stats(self, syscall_latency_stats_table,
1149 disk_latency_stats_table):
1150 self._print_syscall_latency_stats(syscall_latency_stats_table)
1151 self._print_disk_latency_stats(disk_latency_stats_table)
a9c9a2a6 1152
a9c9a2a6 1153 def _add_arguments(self, ap):
b6d9132b
AB
1154 Command._add_proc_filter_args(ap)
1155 Command._add_min_max_args(ap)
1156 Command._add_log_args(
1157 ap, help='Output the I/O requests in chronological order')
1158 Command._add_stats_args(ap, help='Output the I/O latency statistics')
1159 Command._add_freq_args(
1160 ap, help='Output the I/O latency frequency distribution')
73b71522 1161 ap.add_argument('--usage', action='store_true',
b6d9132b
AB
1162 help='Output the I/O usage')
1163 ap.add_argument('--minsize', type=float,
1164 help='Filter out, I/O operations working with '
1165 'less that minsize bytes')
1166 ap.add_argument('--maxsize', type=float,
1167 help='Filter out, I/O operations working with '
1168 'more that maxsize bytes')
1169 ap.add_argument('--top', action='store_true',
1170 help='Output the top I/O latencies by category')
1171
1172
a0acc08c
PP
1173def _run(mi_mode):
1174 iocmd = IoAnalysisCommand(mi_mode=mi_mode)
b6d9132b 1175 iocmd.run()
1c0f0e3c 1176
a9c9a2a6 1177
a0acc08c
PP
1178def _runstats(mi_mode):
1179 sys.argv.insert(1, '--stats')
1180 _run(mi_mode)
1181
1182
1183def _runlog(mi_mode):
1184 sys.argv.insert(1, '--log')
1185 _run(mi_mode)
1186
1187
1188def _runfreq(mi_mode):
1189 sys.argv.insert(1, '--freq')
1190 _run(mi_mode)
1191
1192
1193def _runlatencytop(mi_mode):
b6d9132b 1194 sys.argv.insert(1, '--top')
a0acc08c
PP
1195 _run(mi_mode)
1196
1197
1198def _runusage(mi_mode):
1199 sys.argv.insert(1, '--usage')
1200 _run(mi_mode)
1201
1202
1203def runstats():
1204 _runstats(mi_mode=False)
275bdbb4
JD
1205
1206
1c0f0e3c 1207def runlog():
a0acc08c 1208 _runlog(mi_mode=False)
1c0f0e3c
JD
1209
1210
1211def runfreq():
a0acc08c
PP
1212 _runfreq(mi_mode=False)
1213
1214
1215def runlatencytop():
1216 _runlatencytop(mi_mode=False)
1c0f0e3c
JD
1217
1218
1219def runusage():
a0acc08c
PP
1220 _runusage(mi_mode=False)
1221
1222
1223def runstats_mi():
1224 _runstats(mi_mode=True)
1225
1226
1227def runlog_mi():
1228 _runlog(mi_mode=True)
1229
1230
1231def runfreq_mi():
1232 _runfreq(mi_mode=True)
1233
1234
1235def runlatencytop_mi():
1236 _runlatencytop(mi_mode=True)
1237
1238
1239def runusage_mi():
1240 _runusage(mi_mode=True)
This page took 0.079792 seconds and 5 git commands to generate.