3 # The MIT License (MIT)
5 # Copyright (C) 2015 - Julien Desfossez <jdesfossez@efficios.com>
6 # 2015 - Antoine Busque <abusque@efficios.com>
8 # Permission is hereby granted, free of charge, to any person obtaining a copy
9 # of this software and associated documentation files (the "Software"), to deal
10 # in the Software without restriction, including without limitation the rights
11 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 # copies of the Software, and to permit persons to whom the Software is
13 # furnished to do so, subject to the following conditions:
15 # The above copyright notice and this permission notice shall be included in
16 # all copies or substantial portions of the Software.
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 from .command
import Command
27 import lttnganalyses
.io
28 from linuxautomaton
import common
, sv
29 from ascii_graph
import Pyasciigraph
34 class IoAnalysisCommand(Command
):
36 _DESC
= """The I/O command."""
38 _LATENCY_STATS_FORMAT
= '{:<14} {:>14} {:>14} {:>14} {:>14} {:>14}'
39 _SECTION_SEPARATOR_STRING
= '-' * 89
42 super().__init
__(self
._add
_arguments
,
43 enable_proc_filter_args
=True,
44 enable_max_min_args
=True,
45 enable_max_min_size_arg
=True,
48 def _validate_transform_args(self
):
49 self
._arg
_usage
= self
._args
.usage
50 self
._arg
_stats
= self
._args
.latencystats
51 self
._arg
_latencytop
= self
._args
.latencytop
52 self
._arg
_freq
= self
._args
.latencyfreq
53 self
._arg
_freq
_resolution
= self
._args
.freq_resolution
55 def _default_args(self
, stats
, log
, freq
, usage
, latencytop
):
57 self
._arg
_stats
= True
63 self
._arg
_usage
= True
65 self
._arg
_latencytop
= True
67 def run(self
, stats
=False, log
=False, freq
=False, usage
=False,
69 # parse arguments first
71 # validate, transform and save specific arguments
72 self
._validate
_transform
_args
()
73 # handle the default args for different executables
74 self
._default
_args
(stats
, log
, freq
, usage
, latencytop
)
77 # create the appropriate analysis/analyses
78 self
._create
_analysis
()
80 self
._run
_analysis
(self
._reset
_total
, self
._refresh
)
82 self
._print
_results
(self
.start_ns
, self
.trace_end_ts
)
89 def run_latencytop(self
):
90 self
.run(latencytop
=True)
101 def _create_analysis(self
):
102 self
._analysis
= lttnganalyses
.io
.IoAnalysis(self
.state
)
104 def _refresh(self
, begin
, end
):
105 self
._print
_results
(begin
, end
)
106 self
._reset
_total
(end
)
109 def _filter_size(self
, size
):
112 if self
._arg
_maxsize
is not None and size
> self
._arg
_maxsize
:
114 if self
._arg
_minsize
is not None and size
< self
._arg
_minsize
:
118 def _filter_latency(self
, duration
):
119 if self
._arg
_max
is not None and (duration
/1000) > self
._arg
_max
:
121 if self
._arg
_min
is not None and (duration
/1000) < self
._arg
_min
:
125 def _filter_time_range(self
, begin
, end
):
126 return not (self
._arg
_begin
and self
._arg
_end
and end
and
127 begin
> self
._arg
_end
)
129 def _filter_io_request(self
, io_rq
):
130 proc
= self
._analysis
.tids
[io_rq
.tid
]
131 return self
._filter
_process
(proc
) and \
132 self
._filter
_size
(io_rq
.size
) and \
133 self
._filter
_latency
(io_rq
.duration
) and \
134 self
._filter
_time
_range
(io_rq
.begin_ts
, io_rq
.end_ts
)
136 def _is_io_rq_out_of_range(self
, io_rq
):
137 return self
._arg
_begin
and io_rq
.begin_ts
< self
._arg
_begin
or \
138 self
._arg
_end
and io_rq
.end_ts
> self
._arg
_end
140 def _print_ascii_graph(self
, input_list
, get_datum_cb
, graph_label
,
142 """Print an ascii graph for given data
144 This method wraps the ascii_graph module and facilitates the
145 printing of a graph with a limited number of lines.
148 input_list (list): A list of objects from which the data
149 for the graph will be generated.
151 get_datum_cb (function): function that takes a single
152 object from the input list as an argument, and returns a
153 datum tuple for the graph, of the form (string, int). The
154 string element is printed as is in the graph, and the int
155 is the numeric value corresponding to this graph entry.
157 graph_label (string): Label used to identify the printed
160 graph_args (dict, optional): Dict of keyword args to be
161 passed to the graph() function as is.
164 limit
= self
._arg
_limit
165 graph
= Pyasciigraph()
168 for elem
in input_list
:
169 datum
= get_datum_cb(elem
)
170 if datum
is not None:
173 if limit
is not None and count
>= limit
:
176 for line
in graph
.graph(graph_label
, data
, **graph_args
):
179 # I/O Top output methods
180 def _get_read_datum(self
, proc_stats
):
181 if not self
._filter
_process
(proc_stats
):
184 if proc_stats
.pid
is None:
185 pid_str
= 'unknown (tid=%d)' % (proc_stats
.tid
)
187 pid_str
= str(proc_stats
.pid
)
189 format_str
= '{:>10} {:<25} {:>9} file {:>9} net {:>9} unknown'
190 output_str
= format_str
.format(
191 common
.convert_size(proc_stats
.total_read
, padding_after
=True),
192 '%s (%s)' % (proc_stats
.comm
, pid_str
),
193 common
.convert_size(proc_stats
.disk_read
, padding_after
=True),
194 common
.convert_size(proc_stats
.net_read
, padding_after
=True),
195 common
.convert_size(proc_stats
.unk_read
, padding_after
=True))
197 return (output_str
, proc_stats
.total_read
)
199 def _get_write_datum(self
, proc_stats
):
200 if not self
._filter
_process
(proc_stats
):
203 if proc_stats
.pid
is None:
204 pid_str
= 'unknown (tid=%d)' % (proc_stats
.tid
)
206 pid_str
= str(proc_stats
.pid
)
208 format_str
= '{:>10} {:<25} {:>9} file {:>9} net {:>9} unknown'
209 output_str
= format_str
.format(
210 common
.convert_size(proc_stats
.total_write
, padding_after
=True),
211 '%s (%s)' % (proc_stats
.comm
, pid_str
),
212 common
.convert_size(proc_stats
.disk_write
, padding_after
=True),
213 common
.convert_size(proc_stats
.net_write
, padding_after
=True),
214 common
.convert_size(proc_stats
.unk_write
, padding_after
=True))
216 return (output_str
, proc_stats
.total_write
)
218 def _get_block_read_datum(self
, proc_stats
):
219 if not self
._filter
_process
(proc_stats
) or proc_stats
.block_read
== 0:
222 comm
= proc_stats
.comm
226 if proc_stats
.pid
is None:
227 pid_str
= 'unknown (tid=%d)' % (proc_stats
.tid
)
229 pid_str
= str(proc_stats
.pid
)
231 format_str
= '{:>10} {:<22}'
232 output_str
= format_str
.format(
233 common
.convert_size(proc_stats
.block_read
, padding_after
=True),
234 '%s (pid=%s)' % (comm
, pid_str
))
236 return (output_str
, proc_stats
.block_read
)
238 def _get_block_write_datum(self
, proc_stats
):
239 if not self
._filter
_process
(proc_stats
) or \
240 proc_stats
.block_write
== 0:
243 comm
= proc_stats
.comm
247 if proc_stats
.pid
is None:
248 pid_str
= 'unknown (tid=%d)' % (proc_stats
.tid
)
250 pid_str
= str(proc_stats
.pid
)
252 format_str
= '{:>10} {:<22}'
253 output_str
= format_str
.format(
254 common
.convert_size(proc_stats
.block_write
, padding_after
=True),
255 '%s (pid=%s)' % (comm
, pid_str
))
257 return (output_str
, proc_stats
.block_write
)
259 def _get_total_rq_sectors_datum(self
, disk
):
260 if disk
.total_rq_sectors
== 0:
263 return (disk
.disk_name
, disk
.total_rq_sectors
)
265 def _get_rq_count_datum(self
, disk
):
266 if disk
.rq_count
== 0:
269 return (disk
.disk_name
, disk
.rq_count
)
271 def _get_avg_disk_latency_datum(self
, disk
):
272 if disk
.rq_count
== 0:
275 avg_latency
= ((disk
.total_rq_duration
/ disk
.rq_count
) /
276 common
.MSEC_PER_NSEC
)
277 avg_latency
= round(avg_latency
, 3)
279 return ('%s' % disk
.disk_name
, avg_latency
)
281 def _get_net_recv_bytes_datum(self
, iface
):
282 return ('%s %s' % (common
.convert_size(iface
.recv_bytes
), iface
.name
),
285 def _get_net_sent_bytes_datum(self
, iface
):
286 return ('%s %s' % (common
.convert_size(iface
.sent_bytes
), iface
.name
),
289 def _get_file_read_datum(self
, file_stats
):
290 if file_stats
.read
== 0:
294 for pid
, fd
in file_stats
.fd_by_pid
.items():
295 comm
= self
._analysis
.tids
[pid
].comm
296 fd_by_pid_str
+= 'fd %d in %s (%s) ' % (fd
, comm
, pid
)
298 format_str
= '{:>10} {} {}'
299 output_str
= format_str
.format(
300 common
.convert_size(file_stats
.read
, padding_after
=True),
304 return (output_str
, file_stats
.read
)
306 def _get_file_write_datum(self
, file_stats
):
307 if file_stats
.write
== 0:
311 for pid
, fd
in file_stats
.fd_by_pid
.items():
312 comm
= self
._analysis
.tids
[pid
].comm
313 fd_by_pid_str
+= 'fd %d in %s (%s) ' % (fd
, comm
, pid
)
315 format_str
= '{:>10} {} {}'
316 output_str
= format_str
.format(
317 common
.convert_size(file_stats
.write
, padding_after
=True),
321 return (output_str
, file_stats
.write
)
323 def _output_read(self
):
324 input_list
= sorted(self
._analysis
.tids
.values(),
325 key
=operator
.attrgetter('total_read'),
327 label
= 'Per-process I/O Read'
328 graph_args
= {'with_value': False}
329 self
._print
_ascii
_graph
(input_list
, self
._get
_read
_datum
, label
,
332 def _output_write(self
):
333 input_list
= sorted(self
._analysis
.tids
.values(),
334 key
=operator
.attrgetter('total_write'),
336 label
= 'Per-process I/O Write'
337 graph_args
= {'with_value': False}
338 self
._print
_ascii
_graph
(input_list
, self
._get
_write
_datum
, label
,
341 def _output_block_read(self
):
342 input_list
= sorted(self
._analysis
.tids
.values(),
343 key
=operator
.attrgetter('block_read'),
345 label
= 'Block I/O Read'
346 graph_args
= {'with_value': False}
347 self
._print
_ascii
_graph
(input_list
, self
._get
_block
_read
_datum
,
350 def _output_block_write(self
):
351 input_list
= sorted(self
._analysis
.tids
.values(),
352 key
=operator
.attrgetter('block_write'),
354 label
= 'Block I/O Write'
355 graph_args
= {'with_value': False}
356 self
._print
_ascii
_graph
(input_list
, self
._get
_block
_write
_datum
,
359 def _output_total_rq_sectors(self
):
360 input_list
= sorted(self
._analysis
.disks
.values(),
361 key
=operator
.attrgetter('total_rq_sectors'),
363 label
= 'Disk requests sector count'
364 graph_args
= {'unit': ' sectors'}
365 self
._print
_ascii
_graph
(input_list
, self
._get
_total
_rq
_sectors
_datum
,
368 def _output_rq_count(self
):
369 input_list
= sorted(self
._analysis
.disks
.values(),
370 key
=operator
.attrgetter('rq_count'),
372 label
= 'Disk request count'
373 graph_args
= {'unit': ' requests'}
374 self
._print
_ascii
_graph
(input_list
, self
._get
_rq
_count
_datum
,
377 def _output_avg_disk_latency(self
):
378 input_list
= self
._analysis
.disks
.values()
379 label
= 'Disk request average latency'
380 graph_args
= {'unit': ' ms', 'sort': 2}
381 self
._print
_ascii
_graph
(input_list
, self
._get
_avg
_disk
_latency
_datum
,
384 def _output_net_recv_bytes(self
):
385 input_list
= sorted(self
._analysis
.ifaces
.values(),
386 key
=operator
.attrgetter('recv_bytes'),
388 label
= 'Network received bytes'
389 graph_args
= {'with_value': False}
390 self
._print
_ascii
_graph
(input_list
, self
._get
_net
_recv
_bytes
_datum
,
393 def _output_net_sent_bytes(self
):
394 input_list
= sorted(self
._analysis
.ifaces
.values(),
395 key
=operator
.attrgetter('sent_bytes'),
397 label
= 'Network sent bytes'
398 graph_args
= {'with_value': False}
399 self
._print
_ascii
_graph
(input_list
, self
._get
_net
_sent
_bytes
_datum
,
402 def _output_file_read(self
, files
):
403 input_list
= sorted(files
.values(),
404 key
=lambda file_stats
: file_stats
.read
,
407 graph_args
= {'with_value': False, 'sort': 2}
408 self
._print
_ascii
_graph
(input_list
, self
._get
_file
_read
_datum
,
411 def _output_file_write(self
, files
):
412 input_list
= sorted(files
.values(),
413 key
=lambda file_stats
: file_stats
.write
,
415 label
= 'Files write'
416 graph_args
= {'with_value': False, 'sort': 2}
417 self
._print
_ascii
_graph
(input_list
, self
._get
_file
_write
_datum
,
420 def _output_file_read_write(self
):
421 files
= self
._analysis
.get_files_stats(self
._arg
_pid
_list
,
423 self
._output
_file
_read
(files
)
424 self
._output
_file
_write
(files
)
426 def iotop_output(self
):
429 self
._output
_file
_read
_write
()
430 self
._output
_block
_read
()
431 self
._output
_block
_write
()
432 self
._output
_total
_rq
_sectors
()
433 self
._output
_rq
_count
()
434 self
._output
_avg
_disk
_latency
()
435 self
._output
_net
_recv
_bytes
()
436 self
._output
_net
_sent
_bytes
()
438 # IO Latency output methods
439 def iolatency_freq_histogram(self
, _min
, _max
, res
, rq_list
, title
):
440 step
= (_max
- _min
) / res
445 graph
= Pyasciigraph()
447 buckets
.append(i
* step
)
451 b
= min(int((v
-_min
)/step
), res
- 1)
456 g
.append(('%0.03f' % (i
* step
+ _min
), v
))
458 for line
in graph
.graph(title
, g
, info_before
=True, count
=True):
462 # iolatency functions
463 def iolatency_output_disk(self
):
464 for dev
in self
.state
.disks
.keys():
465 d
= self
.state
.disks
[dev
]
467 self
.compute_disk_stats(d
)
468 if d
.count
is not None:
469 self
.iolatency_freq_histogram(d
.min, d
.max,
470 self
._arg
_freq
_resolution
,
472 'Frequency distribution for '
476 def iolatency_output(self
):
477 self
._output
_disk
_latency
_freq
()
479 def iolatency_syscalls_output(self
):
480 s
= self
.syscalls_stats
483 self
.iolatency_freq_histogram(s
.open_min
/1000, s
.open_max
/1000,
484 self
._arg
_freq
_resolution
, s
.open_rq
,
485 'Open latency distribution (usec)')
487 self
.iolatency_freq_histogram(s
.read_min
/1000, s
.read_max
/1000,
488 self
._arg
_freq
_resolution
, s
.read_rq
,
489 'Read latency distribution (usec)')
490 if s
.write_count
> 0:
491 self
.iolatency_freq_histogram(s
.write_min
/1000, s
.write_max
/1000,
492 self
._arg
_freq
_resolution
,
494 'Write latency distribution (usec)')
496 self
.iolatency_freq_histogram(s
.sync_min
/1000, s
.sync_max
/1000,
497 self
._arg
_freq
_resolution
, s
.sync_rq
,
498 'Sync latency distribution (usec)')
500 def _output_io_request(self
, io_rq
):
501 fmt
= '{:<40} {:<16} {:>16} {:>11} {:<24} {:<8} {:<14}'
503 begin_time
= common
.ns_to_hour_nsec(io_rq
.begin_ts
,
506 end_time
= common
.ns_to_hour_nsec(io_rq
.end_ts
,
509 time_range_str
= '[' + begin_time
+ ',' + end_time
+ ']'
510 duration_str
= '%0.03f' % (io_rq
.duration
/ 1000)
512 if io_rq
.size
is None:
515 size
= common
.convert_size(io_rq
.size
)
518 proc_stats
= self
._analysis
.tids
[tid
]
519 comm
= proc_stats
.comm
521 # TODO: handle fd_in/fd_out for RW type operations
527 parent_proc
= proc_stats
528 if parent_proc
.pid
is not None:
529 parent_proc
= self
._analysis
.tids
[parent_proc
.pid
]
531 fd_stats
= parent_proc
.get_fd(fd
, io_rq
.end_ts
)
532 if fd_stats
is not None:
533 filename
= fd_stats
.filename
537 file_str
= '%s (fd=%s)' % (filename
, fd
)
539 if self
._is
_io
_rq
_out
_of
_range
(io_rq
):
540 time_range_str
+= '*'
543 time_range_str
+= ' '
546 print(fmt
.format(time_range_str
, io_rq
.syscall_name
, duration_str
,
547 size
, comm
, tid
, file_str
))
549 def _output_io_requests_list(self
, rq_list
, title
, sort_key
, is_top
=False):
554 has_out_of_range_rq
= False
559 header_fmt
= '{:<19} {:<20} {:<16} {:<23} {:<5} {:<24} {:<8} {:<14}'
560 print(header_fmt
.format(
561 'Begin', 'End', 'Name', 'Duration (usec)', 'Size', 'Proc', 'PID',
564 for io_rq
in sorted(rq_list
, key
=operator
.attrgetter(sort_key
),
566 if is_top
and count
> self
._arg
_limit
:
569 self
._output
_io
_request
(io_rq
)
570 if not has_out_of_range_rq
and self
._is
_io
_rq
_out
_of
_range
(io_rq
):
571 has_out_of_range_rq
= True
575 if has_out_of_range_rq
:
576 print('*: Syscalls started and/or completed outside of the '
579 def _output_latency_log_from_requests(self
, io_requests
, title
, sort_key
,
581 io_requests
= [io_rq
for io_rq
in io_requests
if
582 self
._filter
_io
_request
(io_rq
)]
583 self
._output
_io
_requests
_list
(io_requests
, title
, sort_key
, is_top
)
585 def iolatency_syscalls_top_output(self
):
586 self
._output
_latency
_log
_from
_requests
(
587 [io_rq
for io_rq
in self
._analysis
.open_io_requests
if
588 self
._filter
_io
_request
(io_rq
)],
589 'Top open syscall latencies (usec)',
590 'duration', is_top
=True)
591 self
._output
_io
_requests
_list
(
592 [io_rq
for io_rq
in self
._analysis
.read_io_requests
if
593 self
._filter
_io
_request
(io_rq
)],
594 'Top read syscall latencies (usec)',
595 'duration', is_top
=True)
596 self
._output
_io
_requests
_list
(
597 [io_rq
for io_rq
in self
._analysis
.write_io_requests
if
598 self
._filter
_io
_request
(io_rq
)],
599 'Top write syscall latencies (usec)',
600 'duration', is_top
=True)
601 self
._output
_io
_requests
_list
(
602 [io_rq
for io_rq
in self
._analysis
.sync_io_requests
if
603 self
._filter
_io
_request
(io_rq
)],
604 'Top sync syscall latencies (usec)',
605 'duration', is_top
=True)
607 def iolatency_syscalls_log_output(self
):
608 self
._output
_io
_requests
_list
(
609 self
._analysis
.io_requests
,
610 'Log of all I/O system calls',
613 # IO Stats output methods
614 def _output_latency_stats(self
, name
, rq_count
, min_duration
, max_duration
,
615 total_duration
, rq_durations
):
619 stdev
= '%0.03f' % (statistics
.stdev(rq_durations
) / 1000)
621 avg
= '%0.03f' % (total_duration
/ (rq_count
) / 1000)
622 min_duration
= '%0.03f' % (min_duration
/ 1000)
623 max_duration
= '%0.03f' % (max_duration
/ 1000)
625 print(IoAnalysisCommand
._LATENCY
_STATS
_FORMAT
.format(
626 name
, rq_count
, min_duration
, avg
, max_duration
, stdev
))
628 def _output_latency_stats_from_requests(self
, io_requests
, name
):
629 rq_durations
= [io_rq
.duration
for io_rq
in io_requests
if
630 self
._filter
_io
_request
(io_rq
)]
631 rq_count
= len(rq_durations
)
632 min_duration
= min(rq_durations
)
633 max_duration
= max(rq_durations
)
634 total_duration
= sum(rq_durations
)
636 self
._output
_latency
_stats
(name
, rq_count
, min_duration
,
637 max_duration
, total_duration
,
640 def _output_syscalls_latency_stats(self
):
641 print('\nSyscalls latency statistics (usec):')
642 print(IoAnalysisCommand
._LATENCY
_STATS
_FORMAT
.format(
643 'Type', 'Count', 'Min', 'Average', 'Max', 'Stdev'))
644 print(IoAnalysisCommand
._SECTION
_SEPARATOR
_STRING
)
646 self
._output
_latency
_stats
_from
_requests
(
647 self
._analysis
.open_io_requests
, 'Open')
648 self
._output
_latency
_stats
_from
_requests
(
649 self
._analysis
.read_io_requests
, 'Read')
650 self
._output
_latency
_stats
_from
_requests
(
651 self
._analysis
.write_io_requests
, 'Write')
652 self
._output
_latency
_stats
_from
_requests
(
653 self
._analysis
.sync_io_requests
, 'Sync')
655 def _output_disk_latency_stats(self
):
656 if not self
._analysis
.disks
:
659 print('\nDisk latency statistics (usec):')
660 print(IoAnalysisCommand
._LATENCY
_STATS
_FORMAT
.format(
661 'Name', 'Count', 'Min', 'Average', 'Max', 'Stdev'))
662 print(IoAnalysisCommand
._SECTION
_SEPARATOR
_STRING
)
664 for disk
in self
._analysis
.disks
.values():
666 rq_durations
= [rq
.duration
for rq
in disk
.rq_list
]
667 self
._output
_latency
_stats
(disk
.disk_name
,
669 disk
.min_rq_duration
,
670 disk
.max_rq_duration
,
671 disk
.total_rq_duration
,
674 def iostats_output(self
):
675 self
._output
_syscalls
_latency
_stats
()
676 self
._output
_disk
_latency
_stats
()
678 def _print_results(self
, begin_ns
, end_ns
):
679 self
._print
_date
(begin_ns
, end_ns
)
683 self
.iostats_output()
684 if self
._arg
_latencytop
:
685 self
.iolatency_syscalls_top_output()
687 self
.iolatency_syscalls_output()
688 self
.iolatency_output()
690 self
.iolatency_syscalls_log_output()
692 def _reset_total(self
, start_ts
):
693 self
._analysis
.reset()
695 def _add_arguments(self
, ap
):
696 ap
.add_argument('--usage', action
='store_true',
697 help='Show the I/O usage')
698 ap
.add_argument('--latencystats', action
='store_true',
699 help='Show the I/O latency statistics')
700 ap
.add_argument('--latencytop', action
='store_true',
701 help='Show the I/O latency top')
702 ap
.add_argument('--latencyfreq', action
='store_true',
703 help='Show the I/O latency frequency distribution')
704 ap
.add_argument('--freq-resolution', type=int, default
=20,
705 help='Frequency distribution resolution '
712 iocmd
= IoAnalysisCommand()
719 iocmd
= IoAnalysisCommand()
721 iocmd
.run_latencytop()
726 iocmd
= IoAnalysisCommand()
733 iocmd
= IoAnalysisCommand()
740 iocmd
= IoAnalysisCommand()