cli: refactor progress indication
[deliverable/lttng-analyses.git] / lttnganalyses / cli / command.py
1 # The MIT License (MIT)
2 #
3 # Copyright (C) 2015 - Julien Desfossez <jdesfossez@efficios.com>
4 # 2015 - Philippe Proulx <pproulx@efficios.com>
5 # 2015 - Antoine Busque <abusque@efficios.com>
6 #
7 # Permission is hereby granted, free of charge, to any person obtaining a copy
8 # of this software and associated documentation files (the "Software"), to deal
9 # in the Software without restriction, including without limitation the rights
10 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 # copies of the Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice shall be included in
15 # all copies or substantial portions of the Software.
16 #
17 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 # SOFTWARE.
24
25 import argparse
26 import json
27 import os
28 import re
29 import sys
30 import subprocess
31 from babeltrace import TraceCollection
32 from . import mi, progressbar
33 from .. import _version, __version__
34 from ..core import analysis
35 from ..common import (
36 format_utils, parse_utils, time_utils, trace_utils, version_utils
37 )
38 from ..linuxautomaton import automaton
39
40
41 class Command:
42 _MI_BASE_TAGS = ['linux-kernel', 'lttng-analyses']
43 _MI_AUTHORS = [
44 'Julien Desfossez',
45 'Antoine Busque',
46 'Philippe Proulx',
47 ]
48 _MI_URL = 'https://github.com/lttng/lttng-analyses'
49
50 def __init__(self, mi_mode=False):
51 self._analysis = None
52 self._analysis_conf = None
53 self._args = None
54 self._handles = None
55 self._traces = None
56 self._ticks = 0
57 self._mi_mode = mi_mode
58 self._create_automaton()
59 self._mi_setup()
60
61 @property
62 def mi_mode(self):
63 return self._mi_mode
64
65 def run(self):
66 try:
67 self._parse_args()
68 self._open_trace()
69 self._create_analysis()
70 self._run_analysis()
71 self._close_trace()
72 except KeyboardInterrupt:
73 sys.exit(0)
74
75 def _mi_error(self, msg, code=None):
76 print(json.dumps(mi.get_error(msg, code)))
77
78 def _non_mi_error(self, msg):
79 try:
80 import termcolor
81
82 msg = termcolor.colored(msg, 'red', attrs=['bold'])
83 except ImportError:
84 pass
85
86 print(msg, file=sys.stderr)
87
88 def _error(self, msg, code, exit_code=1):
89 if self._mi_mode:
90 self._mi_error(msg)
91 else:
92 self._non_mi_error(msg)
93
94 sys.exit(exit_code)
95
96 def _gen_error(self, msg, exit_code=1):
97 self._error('Error: {}'.format(msg), exit_code)
98
99 def _cmdline_error(self, msg, exit_code=1):
100 self._error('Command line error: {}'.format(msg), exit_code)
101
102 def _print(self, msg):
103 if not self._mi_mode:
104 print(msg)
105
106 def _mi_create_result_table(self, table_class_name, begin, end,
107 subtitle=None):
108 return mi.ResultTable(self._mi_table_classes[table_class_name],
109 begin, end, subtitle)
110
111 def _mi_setup(self):
112 self._mi_table_classes = {}
113
114 for tc_tuple in self._MI_TABLE_CLASSES:
115 table_class = mi.TableClass(tc_tuple[0], tc_tuple[1], tc_tuple[2])
116 self._mi_table_classes[table_class.name] = table_class
117
118 self._mi_clear_result_tables()
119
120 def _mi_print_metadata(self):
121 tags = self._MI_BASE_TAGS + self._MI_TAGS
122 infos = mi.get_metadata(version=self._MI_VERSION, title=self._MI_TITLE,
123 description=self._MI_DESCRIPTION,
124 authors=self._MI_AUTHORS, url=self._MI_URL,
125 tags=tags,
126 table_classes=self._mi_table_classes.values())
127 print(json.dumps(infos))
128
129 def _mi_append_result_table(self, result_table):
130 if not result_table or not result_table.rows:
131 return
132
133 tc_name = result_table.table_class.name
134 self._mi_get_result_tables(tc_name).append(result_table)
135
136 def _mi_append_result_tables(self, result_tables):
137 if not result_tables:
138 return
139
140 for result_table in result_tables:
141 self._mi_append_result_table(result_table)
142
143 def _mi_clear_result_tables(self):
144 self._result_tables = {}
145
146 def _mi_get_result_tables(self, table_class_name):
147 if table_class_name not in self._result_tables:
148 self._result_tables[table_class_name] = []
149
150 return self._result_tables[table_class_name]
151
152 def _mi_print(self):
153 results = []
154
155 for result_tables in self._result_tables.values():
156 for result_table in result_tables:
157 results.append(result_table.to_native_object())
158
159 obj = {
160 'results': results,
161 }
162
163 print(json.dumps(obj))
164
165 def _create_summary_result_tables(self):
166 pass
167
168 def _open_trace(self):
169 traces = TraceCollection()
170 handles = traces.add_traces_recursive(self._args.path, 'ctf')
171 if handles == {}:
172 self._gen_error('Failed to open ' + self._args.path, -1)
173 self._handles = handles
174 self._traces = traces
175 self._ts_begin = traces.timestamp_begin
176 self._ts_end = traces.timestamp_end
177 self._process_date_args()
178 self._read_tracer_version()
179 if not self._args.skip_validation:
180 self._check_lost_events()
181
182 def _close_trace(self):
183 for handle in self._handles.values():
184 self._traces.remove_trace(handle)
185
186 def _read_tracer_version(self):
187 kernel_path = None
188 # remove the trailing /
189 while self._args.path.endswith('/'):
190 self._args.path = self._args.path[:-1]
191 for root, _, _ in os.walk(self._args.path):
192 if root.endswith('kernel'):
193 kernel_path = root
194 break
195
196 if kernel_path is None:
197 self._gen_error('Could not find kernel trace directory')
198
199 try:
200 ret, metadata = subprocess.getstatusoutput(
201 'babeltrace -o ctf-metadata "%s"' % kernel_path)
202 except subprocess.CalledProcessError:
203 self._gen_error('Cannot run babeltrace on the trace, cannot read'
204 ' tracer version')
205
206 # fallback to reading the text metadata if babeltrace failed to
207 # output the CTF metadata
208 if ret != 0:
209 try:
210 metadata = subprocess.getoutput(
211 'cat "%s"' % os.path.join(kernel_path, 'metadata'))
212 except subprocess.CalledProcessError:
213 self._gen_error('Cannot read the metadata of the trace, cannot'
214 'extract tracer version')
215
216 major_match = re.search(r'tracer_major = "*(\d+)"*', metadata)
217 minor_match = re.search(r'tracer_minor = "*(\d+)"*', metadata)
218 patch_match = re.search(r'tracer_patchlevel = "*(\d+)"*', metadata)
219
220 if not major_match or not minor_match or not patch_match:
221 self._gen_error('Malformed metadata, cannot read tracer version')
222
223 self.state.tracer_version = version_utils.Version(
224 int(major_match.group(1)),
225 int(minor_match.group(1)),
226 int(patch_match.group(1)),
227 )
228
229 def _check_lost_events(self):
230 msg = 'Checking the trace for lost events...'
231 self._print(msg)
232
233 if self._mi_mode and self._args.output_progress:
234 mi.print_progress(0, msg)
235
236 try:
237 subprocess.check_output('babeltrace "%s"' % self._args.path,
238 shell=True)
239 except subprocess.CalledProcessError:
240 self._gen_error('Cannot run babeltrace on the trace, cannot verify'
241 ' if events were lost during the trace recording')
242
243 def _pre_analysis(self):
244 pass
245
246 def _post_analysis(self):
247 if not self._mi_mode:
248 return
249
250 if self._ticks > 1:
251 self._create_summary_result_tables()
252
253 self._mi_print()
254
255 def _pb_setup(self):
256 if self._args.no_progress:
257 return
258
259 ts_end = self._ts_end
260
261 if self._analysis_conf.end_ts is not None:
262 ts_end = self._analysis_conf.end_ts
263
264 if self._mi_mode:
265 cls = progressbar.MiProgress
266 else:
267 cls = progressbar.FancyProgressBar
268
269 self._progress = cls(self._ts_begin, ts_end, self._args.path,
270 self._args.progress_use_size)
271
272 def _pb_update(self, event):
273 if self._args.no_progress:
274 return
275
276 self._progress.update(event)
277
278 def _pb_finish(self):
279 if self._args.no_progress:
280 return
281
282 self._progress.finalize()
283
284 def _run_analysis(self):
285 self._pre_analysis()
286 self._pb_setup()
287
288 for event in self._traces.events:
289 self._pb_update(event)
290 self._analysis.process_event(event)
291 if self._analysis.ended:
292 break
293 self._automaton.process_event(event)
294
295 self._pb_finish()
296 self._analysis.end()
297 self._post_analysis()
298
299 def _print_date(self, begin_ns, end_ns):
300 time_range_str = format_utils.format_time_range(
301 begin_ns, end_ns, print_date=True, gmt=self._args.gmt
302 )
303 date = 'Timerange: {}'.format(time_range_str)
304
305 self._print(date)
306
307 def _format_timestamp(self, timestamp):
308 return format_utils.format_timestamp(
309 timestamp, print_date=self._args.multi_day, gmt=self._args.gmt
310 )
311
312 def _get_uniform_freq_values(self, durations):
313 if self._args.uniform_step is not None:
314 return (self._args.uniform_min, self._args.uniform_max,
315 self._args.uniform_step)
316
317 if self._args.min is not None:
318 self._args.uniform_min = self._args.min
319 else:
320 self._args.uniform_min = min(durations)
321 if self._args.max is not None:
322 self._args.uniform_max = self._args.max
323 else:
324 self._args.uniform_max = max(durations)
325
326 # ns to µs
327 self._args.uniform_min /= 1000
328 self._args.uniform_max /= 1000
329 self._args.uniform_step = (
330 (self._args.uniform_max - self._args.uniform_min) /
331 self._args.freq_resolution
332 )
333
334 return self._args.uniform_min, self._args.uniform_max, \
335 self._args.uniform_step
336
337 def _validate_transform_common_args(self, args):
338 refresh_period_ns = None
339 if args.refresh is not None:
340 try:
341 refresh_period_ns = parse_utils.parse_duration(args.refresh)
342 except ValueError as e:
343 self._cmdline_error(str(e))
344
345 self._analysis_conf = analysis.AnalysisConfig()
346 self._analysis_conf.refresh_period = refresh_period_ns
347 self._analysis_conf.period_begin_ev_name = args.period_begin
348 self._analysis_conf.period_end_ev_name = args.period_end
349 self._analysis_conf.period_begin_key_fields = \
350 args.period_begin_key.split(',')
351
352 if args.period_end_key:
353 self._analysis_conf.period_end_key_fields = \
354 args.period_end_key.split(',')
355 else:
356 self._analysis_conf.period_end_key_fields = \
357 self._analysis_conf.period_begin_key_fields
358
359 if args.period_key_value:
360 self._analysis_conf.period_key_value = \
361 tuple(args.period_key_value.split(','))
362
363 if args.cpu:
364 self._analysis_conf.cpu_list = args.cpu.split(',')
365 self._analysis_conf.cpu_list = [int(cpu) for cpu in
366 self._analysis_conf.cpu_list]
367
368 # convert min/max args from µs to ns, if needed
369 if hasattr(args, 'min') and args.min is not None:
370 args.min *= 1000
371 self._analysis_conf.min_duration = args.min
372 if hasattr(args, 'max') and args.max is not None:
373 args.max *= 1000
374 self._analysis_conf.max_duration = args.max
375
376 if hasattr(args, 'procname'):
377 if args.procname:
378 self._analysis_conf.proc_list = args.procname.split(',')
379
380 if hasattr(args, 'tid'):
381 if args.tid:
382 self._analysis_conf.tid_list = args.tid.split(',')
383 self._analysis_conf.tid_list = [int(tid) for tid in
384 self._analysis_conf.tid_list]
385
386 if hasattr(args, 'freq'):
387 args.uniform_min = None
388 args.uniform_max = None
389 args.uniform_step = None
390
391 if args.freq_series:
392 # implies uniform buckets
393 args.freq_uniform = True
394
395 if self._mi_mode:
396 # print MI metadata if required
397 if args.metadata:
398 self._mi_print_metadata()
399 sys.exit(0)
400
401 # validate path argument (required at this point)
402 if not args.path:
403 self._cmdline_error('Please specify a trace path')
404
405 if type(args.path) is list:
406 args.path = args.path[0]
407
408 def _validate_transform_args(self, args):
409 pass
410
411 def _parse_args(self):
412 ap = argparse.ArgumentParser(description=self._DESC)
413
414 # common arguments
415 ap.add_argument('-r', '--refresh', type=str,
416 help='Refresh period, with optional units suffix '
417 '(default units: s)')
418 ap.add_argument('--gmt', action='store_true',
419 help='Manipulate timestamps based on GMT instead '
420 'of local time')
421 ap.add_argument('--skip-validation', action='store_true',
422 help='Skip the trace validation')
423 ap.add_argument('--begin', type=str, help='start time: '
424 'hh:mm:ss[.nnnnnnnnn]')
425 ap.add_argument('--end', type=str, help='end time: '
426 'hh:mm:ss[.nnnnnnnnn]')
427 ap.add_argument('--period-begin', type=str,
428 help='Analysis period start marker event name')
429 ap.add_argument('--period-end', type=str,
430 help='Analysis period end marker event name '
431 '(requires --period-begin)')
432 ap.add_argument('--period-begin-key', type=str, default='cpu_id',
433 help='Optional, list of event field names used to '
434 'match period markers (default: cpu_id)')
435 ap.add_argument('--period-end-key', type=str,
436 help='Optional, list of event field names used to '
437 'match period marker. If none specified, use the same '
438 ' --period-begin-key')
439 ap.add_argument('--period-key-value', type=str,
440 help='Optional, define a fixed key value to which a'
441 ' period must correspond to be considered.')
442 ap.add_argument('--cpu', type=str,
443 help='Filter the results only for this list of '
444 'CPU IDs')
445 ap.add_argument('--timerange', type=str, help='time range: '
446 '[begin,end]')
447 ap.add_argument('--progress-use-size', action='store_true',
448 help='use trace size to approximate progress')
449 ap.add_argument('-V', '--version', action='version',
450 version='LTTng Analyses v' + __version__)
451
452 # MI mode-dependent arguments
453 if self._mi_mode:
454 ap.add_argument('--metadata', action='store_true',
455 help='Show analysis\'s metadata')
456 ap.add_argument('path', metavar='<path/to/trace>',
457 help='trace path', nargs='*')
458 ap.add_argument('--output-progress', action='store_true',
459 help='Print progress indication lines')
460 else:
461 ap.add_argument('--no-progress', action='store_true',
462 help='Don\'t display the progress bar')
463 ap.add_argument('path', metavar='<path/to/trace>',
464 help='trace path')
465
466 # Used to add command-specific args
467 self._add_arguments(ap)
468
469 args = ap.parse_args()
470
471 if self._mi_mode:
472 args.no_progress = True
473
474 if args.output_progress:
475 args.no_progress = False
476
477 self._validate_transform_common_args(args)
478 self._validate_transform_args(args)
479 self._args = args
480
481 @staticmethod
482 def _add_proc_filter_args(ap):
483 ap.add_argument('--procname', type=str,
484 help='Filter the results only for this list of '
485 'process names')
486 ap.add_argument('--tid', type=str,
487 help='Filter the results only for this list of TIDs')
488
489 @staticmethod
490 def _add_min_max_args(ap):
491 ap.add_argument('--min', type=float,
492 help='Filter out durations shorter than min usec')
493 ap.add_argument('--max', type=float,
494 help='Filter out durations longer than max usec')
495
496 @staticmethod
497 def _add_freq_args(ap, help=None):
498 if not help:
499 help = 'Output the frequency distribution'
500
501 ap.add_argument('--freq', action='store_true', help=help)
502 ap.add_argument('--freq-resolution', type=int, default=20,
503 help='Frequency distribution resolution '
504 '(default 20)')
505 ap.add_argument('--freq-uniform', action='store_true',
506 help='Use a uniform resolution across distributions')
507 ap.add_argument('--freq-series', action='store_true',
508 help='Consolidate frequency distribution histogram '
509 'as a single one')
510
511 @staticmethod
512 def _add_log_args(ap, help=None):
513 if not help:
514 help = 'Output the events in chronological order'
515
516 ap.add_argument('--log', action='store_true', help=help)
517
518 @staticmethod
519 def _add_top_args(ap, help=None):
520 if not help:
521 help = 'Output the top results'
522
523 ap.add_argument('--limit', type=int, default=10,
524 help='Limit to top X (default = 10)')
525 ap.add_argument('--top', action='store_true', help=help)
526
527 @staticmethod
528 def _add_stats_args(ap, help=None):
529 if not help:
530 help = 'Output statistics'
531
532 ap.add_argument('--stats', action='store_true', help=help)
533
534 def _add_arguments(self, ap):
535 pass
536
537 def _process_date_args(self):
538 def parse_date(date):
539 try:
540 ts = parse_utils.parse_trace_collection_date(
541 self._traces, date, self._args.gmt
542 )
543 except ValueError as e:
544 self._cmdline_error(str(e))
545
546 return ts
547
548 self._args.multi_day = trace_utils.is_multi_day_trace_collection(
549 self._traces
550 )
551 begin_ts = None
552 end_ts = None
553
554 if self._args.timerange:
555 try:
556 begin_ts, end_ts = (
557 parse_utils.parse_trace_collection_time_range(
558 self._traces, self._args.timerange, self._args.gmt
559 )
560 )
561 except ValueError as e:
562 self._cmdline_error(str(e))
563 else:
564 if self._args.begin:
565 begin_ts = parse_date(self._args.begin)
566 if self._args.end:
567 end_ts = parse_date(self._args.end)
568
569 # We have to check if timestamp_begin is None, which
570 # it always is in older versions of babeltrace. In
571 # that case, the test is simply skipped and an invalid
572 # --end value will cause an empty analysis
573 if self._ts_begin is not None and \
574 end_ts < self._ts_begin:
575 self._cmdline_error(
576 '--end timestamp before beginning of trace')
577
578 self._analysis_conf.begin_ts = begin_ts
579 self._analysis_conf.end_ts = end_ts
580
581 def _create_analysis(self):
582 notification_cbs = {
583 analysis.Analysis.TICK_CB: self._analysis_tick_cb
584 }
585
586 self._analysis = self._ANALYSIS_CLASS(self.state, self._analysis_conf)
587 self._analysis.register_notification_cbs(notification_cbs)
588
589 def _create_automaton(self):
590 self._automaton = automaton.Automaton()
591 self.state = self._automaton.state
592
593 def _analysis_tick_cb(self, **kwargs):
594 begin_ns = kwargs['begin_ns']
595 end_ns = kwargs['end_ns']
596
597 self._analysis_tick(begin_ns, end_ns)
598 self._ticks += 1
599
600 def _analysis_tick(self, begin_ns, end_ns):
601 raise NotImplementedError()
602
603
604 # create MI version
605 _cmd_version = _version.get_versions()['version']
606 _version_match = re.match(r'(\d+)\.(\d+)\.(\d+)(.*)', _cmd_version)
607 Command._MI_VERSION = version_utils.Version(
608 int(_version_match.group(1)),
609 int(_version_match.group(2)),
610 int(_version_match.group(3)),
611 _version_match.group(4),
612 )
This page took 0.044222 seconds and 5 git commands to generate.