1 # The MIT License (MIT)
3 # Copyright (C) 2015 - Julien Desfossez <jdesfossez@efficios.com>
4 # 2015 - Antoine Busque <abusque@efficios.com>
5 # 2015 - Philippe Proulx <pproulx@efficios.com>
7 # Permission is hereby granted, free of charge, to any person obtaining a copy
8 # of this software and associated documentation files (the "Software"), to deal
9 # in the Software without restriction, including without limitation the rights
10 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 # copies of the Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
14 # The above copyright notice and this permission notice shall be included in
15 # all copies or substantial portions of the Software.
17 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 from . import termgraph
32 from ..common
import format_utils
33 from .command
import Command
36 _UsageTables
= collections
.namedtuple('_UsageTables', [
41 'per_proc_block_read',
42 'per_proc_block_write',
51 class IoAnalysisCommand(Command
):
52 _DESC
= """The I/O command."""
53 _ANALYSIS_CLASS
= io
.IoAnalysis
54 _MI_TITLE
= 'I/O analysis'
55 _MI_DESCRIPTION
= 'System call/disk latency statistics, system call ' + \
56 'latency distribution, system call top latencies, ' + \
57 'I/O usage top, and I/O operations log'
66 _MI_TABLE_CLASS_SYSCALL_LATENCY_STATS
= 'syscall-latency-stats'
67 _MI_TABLE_CLASS_PART_LATENCY_STATS
= 'disk-latency-stats'
68 _MI_TABLE_CLASS_FREQ
= 'freq'
69 _MI_TABLE_CLASS_TOP_SYSCALL
= 'top-syscall'
70 _MI_TABLE_CLASS_LOG
= 'log'
71 _MI_TABLE_CLASS_PER_PROCESS_TOP
= 'per-process-top'
72 _MI_TABLE_CLASS_PER_FILE_TOP
= 'per-file-top'
73 _MI_TABLE_CLASS_PER_PROCESS_TOP_BLOCK
= 'per-process-top-block'
74 _MI_TABLE_CLASS_PER_DISK_TOP_SECTOR
= 'per-disk-top-sector'
75 _MI_TABLE_CLASS_PER_DISK_TOP_REQUEST
= 'per-disk-top-request'
76 _MI_TABLE_CLASS_PER_DISK_TOP_RTPS
= 'per-disk-top-rps'
77 _MI_TABLE_CLASS_PER_NETIF_TOP
= 'per-netif-top'
80 _MI_TABLE_CLASS_SYSCALL_LATENCY_STATS
,
81 'System call latency statistics', [
82 ('obj', 'System call category', mi
.String
),
83 ('count', 'Call count', mi
.Integer
, 'calls'),
84 ('min_latency', 'Minimum call latency', mi
.Duration
),
85 ('avg_latency', 'Average call latency', mi
.Duration
),
86 ('max_latency', 'Maximum call latency', mi
.Duration
),
87 ('stdev_latency', 'System call latency standard deviation',
92 _MI_TABLE_CLASS_PART_LATENCY_STATS
,
93 'Partition latency statistics', [
94 ('obj', 'Partition', mi
.Disk
),
95 ('count', 'Access count', mi
.Integer
, 'accesses'),
96 ('min_latency', 'Minimum access latency', mi
.Duration
),
97 ('avg_latency', 'Average access latency', mi
.Duration
),
98 ('max_latency', 'Maximum access latency', mi
.Duration
),
99 ('stdev_latency', 'System access latency standard deviation',
104 _MI_TABLE_CLASS_FREQ
,
105 'I/O request latency distribution', [
106 ('latency_lower', 'Latency (lower bound)', mi
.Duration
),
107 ('latency_upper', 'Latency (upper bound)', mi
.Duration
),
108 ('count', 'Request count', mi
.Integer
, 'requests'),
112 _MI_TABLE_CLASS_TOP_SYSCALL
,
113 'Top system call latencies', [
114 ('time_range', 'Call time range', mi
.TimeRange
),
115 ('out_of_range', 'System call out of range?', mi
.Boolean
),
116 ('duration', 'Call duration', mi
.Duration
),
117 ('syscall', 'System call', mi
.Syscall
),
118 ('size', 'Read/write size', mi
.Size
),
119 ('process', 'Process', mi
.Process
),
120 ('path', 'File path', mi
.Path
),
121 ('fd', 'File descriptor', mi
.Fd
),
126 'I/O operations log', [
127 ('time_range', 'Call time range', mi
.TimeRange
),
128 ('out_of_range', 'System call out of range?', mi
.Boolean
),
129 ('duration', 'Call duration', mi
.Duration
),
130 ('syscall', 'System call', mi
.Syscall
),
131 ('size', 'Read/write size', mi
.Size
),
132 ('process', 'Process', mi
.Process
),
133 ('path', 'File path', mi
.Path
),
134 ('fd', 'File descriptor', mi
.Fd
),
138 _MI_TABLE_CLASS_PER_PROCESS_TOP
,
139 'Per-process top I/O operations', [
140 ('process', 'Process', mi
.Process
),
141 ('size', 'Total operations size', mi
.Size
),
142 ('disk_size', 'Disk operations size', mi
.Size
),
143 ('net_size', 'Network operations size', mi
.Size
),
144 ('unknown_size', 'Unknown operations size', mi
.Size
),
148 _MI_TABLE_CLASS_PER_FILE_TOP
,
149 'Per-file top I/O operations', [
150 ('path', 'File path/info', mi
.Path
),
151 ('size', 'Operations size', mi
.Size
),
152 ('fd_owners', 'File descriptor owners', mi
.String
),
156 _MI_TABLE_CLASS_PER_PROCESS_TOP_BLOCK
,
157 'Per-process top block I/O operations', [
158 ('process', 'Process', mi
.Process
),
159 ('size', 'Operations size', mi
.Size
),
163 _MI_TABLE_CLASS_PER_DISK_TOP_SECTOR
,
164 'Per-disk top sector I/O operations', [
165 ('disk', 'Disk', mi
.Disk
),
166 ('count', 'Sector count', mi
.Integer
, 'sectors'),
170 _MI_TABLE_CLASS_PER_DISK_TOP_REQUEST
,
171 'Per-disk top I/O requests', [
172 ('disk', 'Disk', mi
.Disk
),
173 ('count', 'Request count', mi
.Integer
, 'I/O requests'),
177 _MI_TABLE_CLASS_PER_DISK_TOP_RTPS
,
178 'Per-disk top I/O request time/sector', [
179 ('disk', 'Disk', mi
.Disk
),
180 ('rtps', 'Request time/sector', mi
.Duration
),
184 _MI_TABLE_CLASS_PER_NETIF_TOP
,
185 'Per-network interface top I/O operations', [
186 ('netif', 'Network interface', mi
.NetIf
),
187 ('size', 'Operations size', mi
.Size
),
191 _LATENCY_STATS_FORMAT
= '{:<14} {:>14} {:>14} {:>14} {:>14} {:>14}'
192 _SECTION_SEPARATOR_STRING
= '-' * 89
194 def _analysis_tick(self
, begin_ns
, end_ns
):
195 syscall_latency_stats_table
= None
196 disk_latency_stats_table
= None
203 syscall_latency_stats_table
, disk_latency_stats_table
= \
204 self
._get
_latency
_stats
_result
_tables
(begin_ns
, end_ns
)
207 freq_tables
= self
._get
_freq
_result
_tables
(begin_ns
, end_ns
)
210 usage_tables
= self
._get
_usage
_result
_tables
(begin_ns
, end_ns
)
213 top_tables
= self
._get
_top
_result
_tables
(begin_ns
, end_ns
)
216 log_table
= self
._get
_log
_result
_table
(begin_ns
, end_ns
)
219 self
._mi
_append
_result
_tables
([
221 syscall_latency_stats_table
,
222 disk_latency_stats_table
,
224 self
._mi
_append
_result
_tables
(top_tables
)
225 self
._mi
_append
_result
_tables
(usage_tables
)
226 self
._mi
_append
_result
_tables
(freq_tables
)
228 self
._print
_date
(begin_ns
, end_ns
)
231 self
._print
_usage
(usage_tables
)
234 self
._print
_latency
_stats
(syscall_latency_stats_table
,
235 disk_latency_stats_table
)
238 self
._print
_top
(top_tables
)
241 self
._print
_freq
(freq_tables
)
244 self
._print
_log
(log_table
)
246 def _create_summary_result_tables(self
):
247 # TODO: create a summary table here
248 self
._mi
_clear
_result
_tables
()
251 def _filter_size(self
, size
):
254 if self
._args
.maxsize
is not None and size
> self
._args
.maxsize
:
256 if self
._args
.minsize
is not None and size
< self
._args
.minsize
:
260 def _filter_latency(self
, duration
):
261 if self
._args
.max is not None and duration
> self
._args
.max:
263 if self
._args
.min is not None and duration
< self
._args
.min:
267 def _filter_time_range(self
, begin
, end
):
268 # Note: we only want to return False only when a request has
269 # ended and is completely outside the timerange (i.e. begun
270 # after the end of the time range).
272 self
._analysis
_conf
.begin_ts
and self
._analysis
_conf
.end_ts
and
273 end
and begin
> self
._analysis
_conf
.end_ts
276 def _filter_io_request(self
, io_rq
):
277 return self
._filter
_size
(io_rq
.size
) and \
278 self
._filter
_latency
(io_rq
.duration
) and \
279 self
._filter
_time
_range
(io_rq
.begin_ts
, io_rq
.end_ts
)
281 def _is_io_rq_out_of_range(self
, io_rq
):
283 self
._analysis
_conf
.begin_ts
and
284 io_rq
.begin_ts
< self
._analysis
_conf
.begin_ts
or
285 self
._analysis
_conf
.end_ts
and
286 io_rq
.end_ts
> self
._analysis
_conf
.end_ts
289 def _append_per_proc_read_usage_row(self
, proc_stats
, result_table
):
290 result_table
.append_row(
291 process
=mi
.Process(proc_stats
.comm
, pid
=proc_stats
.pid
,
293 size
=mi
.Size(proc_stats
.total_read
),
294 disk_size
=mi
.Size(proc_stats
.disk_io
.read
),
295 net_size
=mi
.Size(proc_stats
.net_io
.read
),
296 unknown_size
=mi
.Size(proc_stats
.unk_io
.read
),
301 def _append_per_proc_write_usage_row(self
, proc_stats
, result_table
):
302 result_table
.append_row(
303 process
=mi
.Process(proc_stats
.comm
, pid
=proc_stats
.pid
,
305 size
=mi
.Size(proc_stats
.total_write
),
306 disk_size
=mi
.Size(proc_stats
.disk_io
.write
),
307 net_size
=mi
.Size(proc_stats
.net_io
.write
),
308 unknown_size
=mi
.Size(proc_stats
.unk_io
.write
),
313 def _append_per_proc_block_read_usage_row(self
, proc_stats
, result_table
):
314 if proc_stats
.block_io
.read
== 0:
318 proc_name
= proc_stats
.comm
322 result_table
.append_row(
323 process
=mi
.Process(proc_name
, pid
=proc_stats
.pid
,
325 size
=mi
.Size(proc_stats
.block_io
.read
),
330 def _append_per_proc_block_write_usage_row(self
, proc_stats
, result_table
):
331 if proc_stats
.block_io
.write
== 0:
335 proc_name
= proc_stats
.comm
339 result_table
.append_row(
340 process
=mi
.Process(proc_name
, pid
=proc_stats
.pid
,
342 size
=mi
.Size(proc_stats
.block_io
.write
),
347 def _append_disk_sector_usage_row(self
, disk_stats
, result_table
):
348 if disk_stats
.total_rq_sectors
== 0:
351 result_table
.append_row(
352 disk
=mi
.Disk(disk_stats
.disk_name
),
353 count
=mi
.Integer(disk_stats
.total_rq_sectors
),
358 def _append_disk_request_usage_row(self
, disk_stats
, result_table
):
359 if disk_stats
.rq_count
== 0:
362 result_table
.append_row(
363 disk
=mi
.Disk(disk_stats
.disk_name
),
364 count
=mi
.Integer(disk_stats
.rq_count
),
369 def _append_disk_rtps_usage_row(self
, disk_stats
, result_table
):
370 if disk_stats
.rq_count
== 0:
373 avg_latency
= (disk_stats
.total_rq_duration
/ disk_stats
.rq_count
)
374 result_table
.append_row(
375 disk
=mi
.Disk(disk_stats
.disk_name
),
376 rtps
=mi
.Duration(avg_latency
),
381 def _append_netif_recv_usage_row(self
, netif_stats
, result_table
):
382 result_table
.append_row(
383 netif
=mi
.NetIf(netif_stats
.name
),
384 size
=mi
.Size(netif_stats
.recv_bytes
)
389 def _append_netif_send_usage_row(self
, netif_stats
, result_table
):
390 result_table
.append_row(
391 netif
=mi
.NetIf(netif_stats
.name
),
392 size
=mi
.Size(netif_stats
.sent_bytes
)
397 def _get_file_stats_fd_owners_str(self
, file_stats
):
400 for pid
, fd
in file_stats
.fd_by_pid
.items():
401 comm
= self
._analysis
.tids
[pid
].comm
402 fd_by_pid_str
+= 'fd %d in %s (%s) ' % (fd
, comm
, pid
)
406 def _append_file_read_usage_row(self
, file_stats
, result_table
):
407 if file_stats
.io
.read
== 0:
410 fd_owners
= self
._get
_file
_stats
_fd
_owners
_str
(file_stats
)
411 result_table
.append_row(
412 path
=mi
.Path(file_stats
.filename
),
413 size
=mi
.Size(file_stats
.io
.read
),
414 fd_owners
=mi
.String(fd_owners
),
419 def _append_file_write_usage_row(self
, file_stats
, result_table
):
420 if file_stats
.io
.write
== 0:
423 fd_owners
= self
._get
_file
_stats
_fd
_owners
_str
(file_stats
)
424 result_table
.append_row(
425 path
=mi
.Path(file_stats
.filename
),
426 size
=mi
.Size(file_stats
.io
.write
),
427 fd_owners
=mi
.String(fd_owners
),
432 def _fill_usage_result_table(self
, input_list
, append_row_cb
,
435 limit
= self
._args
.limit
437 for elem
in input_list
:
438 if append_row_cb(elem
, result_table
):
441 if limit
is not None and count
>= limit
:
444 def _fill_per_process_read_usage_result_table(self
, result_table
):
445 input_list
= sorted(self
._analysis
.tids
.values(),
446 key
=operator
.attrgetter('total_read'),
448 self
._fill
_usage
_result
_table
(input_list
,
449 self
._append
_per
_proc
_read
_usage
_row
,
452 def _fill_per_process_write_usage_result_table(self
, result_table
):
453 input_list
= sorted(self
._analysis
.tids
.values(),
454 key
=operator
.attrgetter('total_write'),
456 self
._fill
_usage
_result
_table
(input_list
,
457 self
._append
_per
_proc
_write
_usage
_row
,
460 def _fill_per_process_block_read_usage_result_table(self
, result_table
):
461 input_list
= sorted(self
._analysis
.tids
.values(),
462 key
=operator
.attrgetter('block_io.read'),
464 self
._fill
_usage
_result
_table
(
465 input_list
, self
._append
_per
_proc
_block
_read
_usage
_row
,
468 def _fill_per_process_block_write_usage_result_table(self
, result_table
):
469 input_list
= sorted(self
._analysis
.tids
.values(),
470 key
=operator
.attrgetter('block_io.write'),
472 self
._fill
_usage
_result
_table
(
473 input_list
, self
._append
_per
_proc
_block
_write
_usage
_row
,
476 def _fill_disk_sector_usage_result_table(self
, result_table
):
477 input_list
= sorted(self
._analysis
.disks
.values(),
478 key
=operator
.attrgetter('total_rq_sectors'),
480 self
._fill
_usage
_result
_table
(input_list
,
481 self
._append
_disk
_sector
_usage
_row
,
484 def _fill_disk_request_usage_result_table(self
, result_table
):
485 input_list
= sorted(self
._analysis
.disks
.values(),
486 key
=operator
.attrgetter('rq_count'),
488 self
._fill
_usage
_result
_table
(input_list
,
489 self
._append
_disk
_request
_usage
_row
,
492 def _fill_disk_rtps_usage_result_table(self
, result_table
):
493 input_list
= self
._analysis
.disks
.values()
494 self
._fill
_usage
_result
_table
(input_list
,
495 self
._append
_disk
_rtps
_usage
_row
,
498 def _fill_netif_recv_usage_result_table(self
, result_table
):
499 input_list
= sorted(self
._analysis
.ifaces
.values(),
500 key
=operator
.attrgetter('recv_bytes'),
502 self
._fill
_usage
_result
_table
(input_list
,
503 self
._append
_netif
_recv
_usage
_row
,
506 def _fill_netif_send_usage_result_table(self
, result_table
):
507 input_list
= sorted(self
._analysis
.ifaces
.values(),
508 key
=operator
.attrgetter('sent_bytes'),
510 self
._fill
_usage
_result
_table
(input_list
,
511 self
._append
_netif
_send
_usage
_row
,
514 def _fill_file_read_usage_result_table(self
, files
, result_table
):
515 input_list
= sorted(files
.values(),
516 key
=lambda file_stats
: file_stats
.io
.read
,
518 self
._fill
_usage
_result
_table
(input_list
,
519 self
._append
_file
_read
_usage
_row
,
522 def _fill_file_write_usage_result_table(self
, files
, result_table
):
523 input_list
= sorted(files
.values(),
524 key
=lambda file_stats
: file_stats
.io
.write
,
526 self
._fill
_usage
_result
_table
(input_list
,
527 self
._append
_file
_write
_usage
_row
,
530 def _fill_file_usage_result_tables(self
, read_table
, write_table
):
531 files
= self
._analysis
.get_files_stats()
532 self
._fill
_file
_read
_usage
_result
_table
(files
, read_table
)
533 self
._fill
_file
_write
_usage
_result
_table
(files
, write_table
)
535 def _get_usage_result_tables(self
, begin
, end
):
536 # create result tables
537 per_proc_read_table
= self
._mi
_create
_result
_table
(
538 self
._MI
_TABLE
_CLASS
_PER
_PROCESS
_TOP
, begin
, end
, 'read')
539 per_proc_write_table
= self
._mi
_create
_result
_table
(
540 self
._MI
_TABLE
_CLASS
_PER
_PROCESS
_TOP
, begin
, end
, 'written')
541 per_file_read_table
= self
._mi
_create
_result
_table
(
542 self
._MI
_TABLE
_CLASS
_PER
_FILE
_TOP
, begin
, end
, 'read')
543 per_file_write_table
= self
._mi
_create
_result
_table
(
544 self
._MI
_TABLE
_CLASS
_PER
_FILE
_TOP
, begin
, end
, 'written')
545 per_proc_block_read_table
= self
._mi
_create
_result
_table
(
546 self
._MI
_TABLE
_CLASS
_PER
_PROCESS
_TOP
_BLOCK
, begin
, end
, 'read')
547 per_proc_block_write_table
= self
._mi
_create
_result
_table
(
548 self
._MI
_TABLE
_CLASS
_PER
_PROCESS
_TOP
_BLOCK
, begin
, end
, 'written')
549 per_disk_sector_table
= self
._mi
_create
_result
_table
(
550 self
._MI
_TABLE
_CLASS
_PER
_DISK
_TOP
_SECTOR
, begin
, end
)
551 per_disk_request_table
= self
._mi
_create
_result
_table
(
552 self
._MI
_TABLE
_CLASS
_PER
_DISK
_TOP
_REQUEST
, begin
, end
)
553 per_disk_rtps_table
= self
._mi
_create
_result
_table
(
554 self
._MI
_TABLE
_CLASS
_PER
_DISK
_TOP
_RTPS
, begin
, end
)
555 per_netif_recv_table
= self
._mi
_create
_result
_table
(
556 self
._MI
_TABLE
_CLASS
_PER
_NETIF
_TOP
, begin
, end
, 'received')
557 per_netif_send_table
= self
._mi
_create
_result
_table
(
558 self
._MI
_TABLE
_CLASS
_PER
_NETIF
_TOP
, begin
, end
, 'sent')
561 self
._fill
_per
_process
_read
_usage
_result
_table
(per_proc_read_table
)
562 self
._fill
_per
_process
_write
_usage
_result
_table
(per_proc_write_table
)
563 self
._fill
_file
_usage
_result
_tables
(per_file_read_table
,
564 per_file_write_table
)
565 self
._fill
_per
_process
_block
_read
_usage
_result
_table
(
566 per_proc_block_read_table
)
567 self
._fill
_per
_process
_block
_write
_usage
_result
_table
(
568 per_proc_block_write_table
)
569 self
._fill
_disk
_sector
_usage
_result
_table
(per_disk_sector_table
)
570 self
._fill
_disk
_request
_usage
_result
_table
(per_disk_request_table
)
571 self
._fill
_disk
_rtps
_usage
_result
_table
(per_disk_rtps_table
)
572 self
._fill
_netif
_recv
_usage
_result
_table
(per_netif_recv_table
)
573 self
._fill
_netif
_send
_usage
_result
_table
(per_netif_send_table
)
576 per_proc_read
=per_proc_read_table
,
577 per_proc_write
=per_proc_write_table
,
578 per_file_read
=per_file_read_table
,
579 per_file_write
=per_file_write_table
,
580 per_proc_block_read
=per_proc_block_read_table
,
581 per_proc_block_write
=per_proc_block_write_table
,
582 per_disk_sector
=per_disk_sector_table
,
583 per_disk_request
=per_disk_request_table
,
584 per_disk_rtps
=per_disk_rtps_table
,
585 per_netif_recv
=per_netif_recv_table
,
586 per_netif_send
=per_netif_send_table
,
589 def _print_per_proc_io(self
, result_table
, title
):
590 header_format
= '{:<25} {:<10} {:<10} {:<10}'
591 label_header
= header_format
.format(
592 'Process', 'Disk', 'Net', 'Unknown'
596 label_format
= '{:<25} {:>10} {:>10} {:>10}'
597 if row
.process
.pid
is None:
598 pid_str
= 'unknown (tid=%d)' % (row
.process
.tid
)
600 pid_str
= str(row
.process
.pid
)
602 label
= label_format
.format(
603 '%s (%s)' % (row
.process
.name
, pid_str
),
604 format_utils
.format_size(row
.disk_size
.value
),
605 format_utils
.format_size(row
.net_size
.value
),
606 format_utils
.format_size(row
.unknown_size
.value
)
611 graph
= termgraph
.BarGraph(
612 title
='Per-process I/O ' + title
,
613 label_header
=label_header
,
614 get_value
=lambda row
: row
.size
.value
,
615 get_value_str
=format_utils
.format_size
,
617 data
=result_table
.rows
622 def _print_per_proc_block_io(self
, result_table
, title
):
624 proc_name
= row
.process
.name
627 proc_name
= 'unknown'
629 if row
.process
.pid
is None:
630 pid_str
= 'unknown (tid={})'.format(row
.process
.tid
)
632 pid_str
= str(row
.process
.pid
)
634 return '{} (pid={})'.format(proc_name
, pid_str
)
636 graph
= termgraph
.BarGraph(
637 title
='Block I/O ' + title
,
638 label_header
='Process',
639 get_value
=lambda row
: row
.size
.value
,
640 get_value_str
=format_utils
.format_size
,
642 data
=result_table
.rows
647 def _print_per_disk_sector(self
, result_table
):
648 graph
= termgraph
.BarGraph(
649 title
='Disk Requests Sector Count',
652 get_value
=lambda row
: row
.count
.value
,
653 get_label
=lambda row
: row
.disk
.name
,
654 data
=result_table
.rows
659 def _print_per_disk_request(self
, result_table
):
660 graph
= termgraph
.BarGraph(
661 title
='Disk Request Count',
664 get_value
=lambda row
: row
.count
.value
,
665 get_label
=lambda row
: row
.disk
.name
,
666 data
=result_table
.rows
671 def _print_per_disk_rtps(self
, result_table
):
672 graph
= termgraph
.BarGraph(
673 title
='Disk Request Average Latency',
676 get_value
=lambda row
: row
.rtps
.value
/ 1000000,
677 get_label
=lambda row
: row
.disk
.name
,
678 data
=result_table
.rows
683 def _print_per_netif_io(self
, result_table
, title
):
684 graph
= termgraph
.BarGraph(
685 title
='Network ' + title
+ ' Bytes',
686 label_header
='Interface',
687 get_value
=lambda row
: row
.size
.value
,
688 get_value_str
=format_utils
.format_size
,
689 get_label
=lambda row
: row
.netif
.name
,
690 data
=result_table
.rows
695 def _print_per_file_io(self
, result_table
, title
):
696 # FIXME add option to show FD owners
697 # FIXME why are read and write values the same?
698 graph
= termgraph
.BarGraph(
699 title
='Per-file I/O ' + title
,
701 get_value
=lambda row
: row
.size
.value
,
702 get_value_str
=format_utils
.format_size
,
703 get_label
=lambda row
: row
.path
.path
,
704 data
=result_table
.rows
709 def _print_usage(self
, usage_tables
):
710 self
._print
_per
_proc
_io
(usage_tables
.per_proc_read
, 'Read')
711 self
._print
_per
_proc
_io
(usage_tables
.per_proc_write
, 'Write')
712 self
._print
_per
_file
_io
(usage_tables
.per_file_read
, 'Read')
713 self
._print
_per
_file
_io
(usage_tables
.per_file_write
, 'Write')
714 self
._print
_per
_proc
_block
_io
(usage_tables
.per_proc_block_read
, 'Read')
715 self
._print
_per
_proc
_block
_io
(
716 usage_tables
.per_proc_block_write
, 'Write'
718 self
._print
_per
_disk
_sector
(usage_tables
.per_disk_sector
)
719 self
._print
_per
_disk
_request
(usage_tables
.per_disk_request
)
720 self
._print
_per
_disk
_rtps
(usage_tables
.per_disk_rtps
)
721 self
._print
_per
_netif
_io
(usage_tables
.per_netif_recv
, 'Received')
722 self
._print
_per
_netif
_io
(usage_tables
.per_netif_send
, 'Sent')
724 def _fill_freq_result_table(self
, duration_list
, result_table
):
725 if not duration_list
:
728 # The number of bins for the histogram
729 resolution
= self
._args
.freq_resolution
731 min_duration
= min(duration_list
)
732 max_duration
= max(duration_list
)
737 step
= (max_duration
- min_duration
) / resolution
745 for i
in range(resolution
):
746 buckets
.append(i
* step
)
749 for duration
in duration_list
:
751 index
= min(int((duration
- min_duration
) / step
), resolution
- 1)
754 for index
, value
in enumerate(values
):
755 result_table
.append_row(
756 latency_lower
=mi
.Duration
.from_us(index
* step
+ min_duration
),
757 latency_upper
=mi
.Duration
.from_us((index
+ 1) * step
+
759 count
=mi
.Integer(value
),
762 def _get_disk_freq_result_tables(self
, begin
, end
):
765 for disk
in self
._analysis
.disks
.values():
766 rq_durations
= [rq
.duration
for rq
in disk
.rq_list
if
767 self
._filter
_io
_request
(rq
)]
768 subtitle
= 'disk: {}'.format(disk
.disk_name
)
770 self
._mi
_create
_result
_table
(self
._MI
_TABLE
_CLASS
_FREQ
,
771 begin
, end
, subtitle
)
772 self
._fill
_freq
_result
_table
(rq_durations
, result_table
)
773 result_tables
.append(result_table
)
777 def _get_syscall_freq_result_tables(self
, begin
, end
):
779 self
._mi
_create
_result
_table
(self
._MI
_TABLE
_CLASS
_FREQ
,
782 self
._mi
_create
_result
_table
(self
._MI
_TABLE
_CLASS
_FREQ
,
785 self
._mi
_create
_result
_table
(self
._MI
_TABLE
_CLASS
_FREQ
,
788 self
._mi
_create
_result
_table
(self
._MI
_TABLE
_CLASS
_FREQ
,
790 self
._fill
_freq
_result
_table
([io_rq
.duration
for io_rq
in
791 self
._analysis
.open_io_requests
if
792 self
._filter
_io
_request
(io_rq
)],
794 self
._fill
_freq
_result
_table
([io_rq
.duration
for io_rq
in
795 self
._analysis
.read_io_requests
if
796 self
._filter
_io
_request
(io_rq
)],
798 self
._fill
_freq
_result
_table
([io_rq
.duration
for io_rq
in
799 self
._analysis
.write_io_requests
if
800 self
._filter
_io
_request
(io_rq
)],
802 self
._fill
_freq
_result
_table
([io_rq
.duration
for io_rq
in
803 self
._analysis
.sync_io_requests
if
804 self
._filter
_io
_request
(io_rq
)],
807 return [open_table
, read_table
, write_table
, sync_table
]
809 def _get_freq_result_tables(self
, begin
, end
):
810 syscall_tables
= self
._get
_syscall
_freq
_result
_tables
(begin
, end
)
811 disk_tables
= self
._get
_disk
_freq
_result
_tables
(begin
, end
)
813 return syscall_tables
+ disk_tables
815 def _print_one_freq(self
, result_table
):
816 graph
= termgraph
.FreqGraph(
817 data
=result_table
.rows
,
818 get_value
=lambda row
: row
.count
.value
,
819 get_lower_bound
=lambda row
: row
.latency_lower
.to_us(),
820 title
='{} {}'.format(result_table
.title
, result_table
.subtitle
),
826 def _print_freq(self
, freq_tables
):
827 for freq_table
in freq_tables
:
828 self
._print
_one
_freq
(freq_table
)
830 def _append_log_row(self
, io_rq
, result_table
):
831 if io_rq
.size
is None:
834 size
= mi
.Size(io_rq
.size
)
837 proc_stats
= self
._analysis
.tids
[tid
]
838 proc_name
= proc_stats
.comm
840 # TODO: handle fd_in/fd_out for RW type operations
846 parent_proc
= proc_stats
848 if parent_proc
.pid
is not None:
849 parent_proc
= self
._analysis
.tids
[parent_proc
.pid
]
851 fd_stats
= parent_proc
.get_fd(io_rq
.fd
, io_rq
.end_ts
)
853 if fd_stats
is not None:
854 path
= mi
.Path(fd_stats
.filename
)
858 result_table
.append_row(
859 time_range
=mi
.TimeRange(io_rq
.begin_ts
, io_rq
.end_ts
),
860 out_of_range
=mi
.Boolean(self
._is
_io
_rq
_out
_of
_range
(io_rq
)),
861 duration
=mi
.Duration(io_rq
.duration
),
862 syscall
=mi
.Syscall(io_rq
.syscall_name
),
864 process
=mi
.Process(proc_name
, tid
=tid
),
869 def _fill_log_result_table(self
, rq_list
, sort_key
, is_top
, result_table
):
875 for io_rq
in sorted(rq_list
, key
=operator
.attrgetter(sort_key
),
877 if is_top
and count
> self
._args
.limit
:
880 self
._append
_log
_row
(io_rq
, result_table
)
883 def _fill_log_result_table_from_io_requests(self
, io_requests
, sort_key
,
884 is_top
, result_table
):
885 io_requests
= [io_rq
for io_rq
in io_requests
if
886 self
._filter
_io
_request
(io_rq
)]
887 self
._fill
_log
_result
_table
(io_requests
, sort_key
, is_top
,
890 def _get_top_result_tables(self
, begin
, end
):
892 self
._mi
_create
_result
_table
(self
._MI
_TABLE
_CLASS
_TOP
_SYSCALL
,
895 self
._mi
_create
_result
_table
(self
._MI
_TABLE
_CLASS
_TOP
_SYSCALL
,
898 self
._mi
_create
_result
_table
(self
._MI
_TABLE
_CLASS
_TOP
_SYSCALL
,
901 self
._mi
_create
_result
_table
(self
._MI
_TABLE
_CLASS
_TOP
_SYSCALL
,
903 self
._fill
_log
_result
_table
_from
_io
_requests
(
904 self
._analysis
.open_io_requests
, 'duration', True, open_table
)
905 self
._fill
_log
_result
_table
_from
_io
_requests
(
906 self
._analysis
.read_io_requests
, 'duration', True, read_table
)
907 self
._fill
_log
_result
_table
_from
_io
_requests
(
908 self
._analysis
.write_io_requests
, 'duration', True, write_table
)
909 self
._fill
_log
_result
_table
_from
_io
_requests
(
910 self
._analysis
.sync_io_requests
, 'duration', True, sync_table
)
912 return [open_table
, read_table
, write_table
, sync_table
]
914 def _print_log_row(self
, row
):
915 fmt
= '{:<40} {:<16} {:>16} {:>11} {:<24} {:<8} {:<14}'
916 time_range_str
= format_utils
.format_time_range(
917 row
.time_range
.begin
,
919 self
._args
.multi_day
,
922 duration_str
= '%0.03f' % row
.duration
.to_us()
924 if type(row
.size
) is mi
.Empty
:
927 size
= format_utils
.format_size(row
.size
.value
)
929 tid
= row
.process
.tid
930 proc_name
= row
.process
.name
932 if type(row
.fd
) is mi
.Empty
:
935 if type(row
.path
) is mi
.Unknown
:
940 file_str
= '%s (fd=%s)' % (path
, row
.fd
.fd
)
942 if row
.out_of_range
.value
:
943 time_range_str
+= '*'
946 time_range_str
+= ' '
949 print(fmt
.format(time_range_str
, row
.syscall
.name
, duration_str
,
950 size
, proc_name
, tid
, file_str
))
952 def _print_log(self
, result_table
):
953 if not result_table
.rows
:
956 has_out_of_range_rq
= False
960 print(fmt
.format(result_table
.title
, result_table
.subtitle
))
961 header_fmt
= '{:<20} {:<20} {:<16} {:<23} {:<5} {:<24} {:<8} {:<14}'
962 print(header_fmt
.format(
963 'Begin', 'End', 'Name', 'Duration (usec)', 'Size', 'Proc', 'PID',
966 for row
in result_table
.rows
:
967 self
._print
_log
_row
(row
)
969 if not has_out_of_range_rq
and row
.out_of_range
.value
:
970 has_out_of_range_rq
= True
972 if has_out_of_range_rq
:
973 print('*: Syscalls started and/or completed outside of the '
976 def _print_top(self
, top_tables
):
977 for table
in top_tables
:
978 self
._print
_log
(table
)
980 def _get_log_result_table(self
, begin
, end
):
981 log_table
= self
._mi
_create
_result
_table
(self
._MI
_TABLE
_CLASS
_LOG
,
983 self
._fill
_log
_result
_table
_from
_io
_requests
(
984 self
._analysis
.io_requests
, 'begin_ts', False, log_table
)
988 def _append_latency_stats_row(self
, obj
, rq_durations
, result_table
):
989 rq_count
= len(rq_durations
)
990 total_duration
= sum(rq_durations
)
992 if len(rq_durations
) > 0:
993 min_duration
= min(rq_durations
)
994 max_duration
= max(rq_durations
)
1000 stdev
= mi
.Unknown()
1002 stdev
= mi
.Duration(statistics
.stdev(rq_durations
))
1005 avg
= total_duration
/ rq_count
1009 result_table
.append_row(
1011 count
=mi
.Integer(rq_count
),
1012 min_latency
=mi
.Duration(min_duration
),
1013 avg_latency
=mi
.Duration(avg
),
1014 max_latency
=mi
.Duration(max_duration
),
1015 stdev_latency
=stdev
,
1018 def _append_latency_stats_row_from_requests(self
, obj
, io_requests
,
1020 rq_durations
= [io_rq
.duration
for io_rq
in io_requests
if
1021 self
._filter
_io
_request
(io_rq
)]
1022 self
._append
_latency
_stats
_row
(obj
, rq_durations
, result_table
)
1024 def _get_syscall_latency_stats_result_table(self
, begin
, end
):
1025 result_table
= self
._mi
_create
_result
_table
(
1026 self
._MI
_TABLE
_CLASS
_SYSCALL
_LATENCY
_STATS
, begin
, end
)
1027 append_fn
= self
._append
_latency
_stats
_row
_from
_requests
1028 append_fn(mi
.String('Open'), self
._analysis
.open_io_requests
,
1030 append_fn(mi
.String('Read'), self
._analysis
.read_io_requests
,
1032 append_fn(mi
.String('Write'), self
._analysis
.write_io_requests
,
1034 append_fn(mi
.String('Sync'), self
._analysis
.sync_io_requests
,
1039 def _get_disk_latency_stats_result_table(self
, begin
, end
):
1040 if not self
._analysis
.disks
:
1043 result_table
= self
._mi
_create
_result
_table
(
1044 self
._MI
_TABLE
_CLASS
_PART
_LATENCY
_STATS
, begin
, end
)
1046 for disk
in self
._analysis
.disks
.values():
1048 rq_durations
= [rq
.duration
for rq
in disk
.rq_list
if
1049 self
._filter
_io
_request
(rq
)]
1050 disk
= mi
.Disk(disk
.disk_name
)
1051 self
._append
_latency
_stats
_row
(disk
, rq_durations
,
1056 def _get_latency_stats_result_tables(self
, begin
, end
):
1057 syscall_tbl
= self
._get
_syscall
_latency
_stats
_result
_table
(begin
, end
)
1058 disk_tbl
= self
._get
_disk
_latency
_stats
_result
_table
(begin
, end
)
1060 return syscall_tbl
, disk_tbl
1062 def _print_latency_stats_row(self
, row
):
1063 if type(row
.stdev_latency
) is mi
.Unknown
:
1066 stdev
= '%0.03f' % row
.stdev_latency
.to_us()
1068 avg
= '%0.03f' % row
.avg_latency
.to_us()
1069 min_duration
= '%0.03f' % row
.min_latency
.to_us()
1070 max_duration
= '%0.03f' % row
.max_latency
.to_us()
1072 print(IoAnalysisCommand
._LATENCY
_STATS
_FORMAT
.format(
1073 str(row
.obj
), row
.count
.value
, min_duration
,
1074 avg
, max_duration
, stdev
))
1076 def _print_syscall_latency_stats(self
, stats_table
):
1077 print('\nSyscalls latency statistics (usec):')
1078 print(IoAnalysisCommand
._LATENCY
_STATS
_FORMAT
.format(
1079 'Type', 'Count', 'Min', 'Average', 'Max', 'Stdev'))
1080 print(IoAnalysisCommand
._SECTION
_SEPARATOR
_STRING
)
1082 for row
in stats_table
.rows
:
1083 self
._print
_latency
_stats
_row
(row
)
1085 def _print_disk_latency_stats(self
, stats_table
):
1086 if not stats_table
.rows
:
1089 print('\nDisk latency statistics (usec):')
1090 print(IoAnalysisCommand
._LATENCY
_STATS
_FORMAT
.format(
1091 'Name', 'Count', 'Min', 'Average', 'Max', 'Stdev'))
1092 print(IoAnalysisCommand
._SECTION
_SEPARATOR
_STRING
)
1094 for row
in stats_table
.rows
:
1095 self
._print
_latency
_stats
_row
(row
)
1097 def _print_latency_stats(self
, syscall_latency_stats_table
,
1098 disk_latency_stats_table
):
1099 self
._print
_syscall
_latency
_stats
(syscall_latency_stats_table
)
1100 self
._print
_disk
_latency
_stats
(disk_latency_stats_table
)
1102 def _add_arguments(self
, ap
):
1103 Command
._add
_proc
_filter
_args
(ap
)
1104 Command
._add
_min
_max
_args
(ap
)
1105 Command
._add
_log
_args
(
1106 ap
, help='Output the I/O requests in chronological order')
1107 Command
._add
_top
_args
(
1108 ap
, help='Output the top I/O latencies by category')
1109 Command
._add
_stats
_args
(ap
, help='Output the I/O latency statistics')
1110 Command
._add
_freq
_args
(
1111 ap
, help='Output the I/O latency frequency distribution')
1112 ap
.add_argument('--usage', action
='store_true',
1113 help='Output the I/O usage')
1114 ap
.add_argument('--minsize', type=float,
1115 help='Filter out, I/O operations working with '
1116 'less that minsize bytes')
1117 ap
.add_argument('--maxsize', type=float,
1118 help='Filter out, I/O operations working with '
1119 'more that maxsize bytes')
1123 iocmd
= IoAnalysisCommand(mi_mode
=mi_mode
)
1127 def _runstats(mi_mode
):
1128 sys
.argv
.insert(1, '--stats')
1132 def _runlog(mi_mode
):
1133 sys
.argv
.insert(1, '--log')
1137 def _runfreq(mi_mode
):
1138 sys
.argv
.insert(1, '--freq')
1142 def _runlatencytop(mi_mode
):
1143 sys
.argv
.insert(1, '--top')
1147 def _runusage(mi_mode
):
1148 sys
.argv
.insert(1, '--usage')
1153 _runstats(mi_mode
=False)
1157 _runlog(mi_mode
=False)
1161 _runfreq(mi_mode
=False)
1164 def runlatencytop():
1165 _runlatencytop(mi_mode
=False)
1169 _runusage(mi_mode
=False)
1173 _runstats(mi_mode
=True)
1177 _runlog(mi_mode
=True)
1181 _runfreq(mi_mode
=True)
1184 def runlatencytop_mi():
1185 _runlatencytop(mi_mode
=True)
1189 _runusage(mi_mode
=True)