cli: add "catch all" for exceptions to sanitize output
[deliverable/lttng-analyses.git] / lttnganalyses / cli / command.py
1 # The MIT License (MIT)
2 #
3 # Copyright (C) 2015 - Julien Desfossez <jdesfossez@efficios.com>
4 # 2015 - Philippe Proulx <pproulx@efficios.com>
5 # 2015 - Antoine Busque <abusque@efficios.com>
6 #
7 # Permission is hereby granted, free of charge, to any person obtaining a copy
8 # of this software and associated documentation files (the "Software"), to deal
9 # in the Software without restriction, including without limitation the rights
10 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 # copies of the Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice shall be included in
15 # all copies or substantial portions of the Software.
16 #
17 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 # SOFTWARE.
24
25 import argparse
26 import json
27 import os
28 import re
29 import sys
30 import subprocess
31 from babeltrace import TraceCollection
32 from . import mi, progressbar
33 from .. import _version, __version__
34 from ..core import analysis
35 from ..common import (
36 format_utils, parse_utils, time_utils, trace_utils, version_utils
37 )
38 from ..linuxautomaton import automaton
39
40
41 class Command:
42 _MI_BASE_TAGS = ['linux-kernel', 'lttng-analyses']
43 _MI_AUTHORS = [
44 'Julien Desfossez',
45 'Antoine Busque',
46 'Philippe Proulx',
47 ]
48 _MI_URL = 'https://github.com/lttng/lttng-analyses'
49
50 def __init__(self, mi_mode=False):
51 self._analysis = None
52 self._analysis_conf = None
53 self._args = None
54 self._handles = None
55 self._traces = None
56 self._ticks = 0
57 self._mi_mode = mi_mode
58 self._run_step('create automaton', self._create_automaton)
59 self._run_step('setup MI', self._mi_setup)
60
61 @property
62 def mi_mode(self):
63 return self._mi_mode
64
65 def _run_step(self, action_title, fn):
66 try:
67 fn()
68 except KeyboardInterrupt:
69 self._print('Cancelled by user')
70 sys.exit(0)
71 except Exception as e:
72 self._gen_error('Cannot {}: {}'.format(action_title, e))
73
74 def run(self):
75 self._run_step('parse arguments', self._parse_args)
76 self._run_step('open trace', self._open_trace)
77 self._run_step('create analysis', self._create_analysis)
78
79 if self._mi_mode and not self._args.test_compatibility:
80 self._run_step('run analysis', self._run_analysis)
81
82 self._run_step('close trace', self._close_trace)
83
84 def _mi_error(self, msg, code=None):
85 print(json.dumps(mi.get_error(msg, code)))
86
87 def _non_mi_error(self, msg):
88 try:
89 import termcolor
90
91 msg = termcolor.colored(msg, 'red', attrs=['bold'])
92 except ImportError:
93 pass
94
95 print(msg, file=sys.stderr)
96
97 def _error(self, msg, code, exit_code=1):
98 if self._mi_mode:
99 self._mi_error(msg)
100 else:
101 self._non_mi_error(msg)
102
103 sys.exit(exit_code)
104
105 def _gen_error(self, msg, exit_code=1):
106 self._error('Error: {}'.format(msg), exit_code)
107
108 def _cmdline_error(self, msg, exit_code=1):
109 self._error('Command line error: {}'.format(msg), exit_code)
110
111 def _print(self, msg):
112 if not self._mi_mode:
113 print(msg)
114
115 def _mi_create_result_table(self, table_class_name, begin, end,
116 subtitle=None):
117 return mi.ResultTable(self._mi_table_classes[table_class_name],
118 begin, end, subtitle)
119
120 def _mi_setup(self):
121 self._mi_table_classes = {}
122
123 for tc_tuple in self._MI_TABLE_CLASSES:
124 table_class = mi.TableClass(tc_tuple[0], tc_tuple[1], tc_tuple[2])
125 self._mi_table_classes[table_class.name] = table_class
126
127 self._mi_clear_result_tables()
128
129 def _mi_print_metadata(self):
130 tags = self._MI_BASE_TAGS + self._MI_TAGS
131 infos = mi.get_metadata(version=self._MI_VERSION, title=self._MI_TITLE,
132 description=self._MI_DESCRIPTION,
133 authors=self._MI_AUTHORS, url=self._MI_URL,
134 tags=tags,
135 table_classes=self._mi_table_classes.values())
136 print(json.dumps(infos))
137
138 def _mi_append_result_table(self, result_table):
139 if not result_table or not result_table.rows:
140 return
141
142 tc_name = result_table.table_class.name
143 self._mi_get_result_tables(tc_name).append(result_table)
144
145 def _mi_append_result_tables(self, result_tables):
146 if not result_tables:
147 return
148
149 for result_table in result_tables:
150 self._mi_append_result_table(result_table)
151
152 def _mi_clear_result_tables(self):
153 self._result_tables = {}
154
155 def _mi_get_result_tables(self, table_class_name):
156 if table_class_name not in self._result_tables:
157 self._result_tables[table_class_name] = []
158
159 return self._result_tables[table_class_name]
160
161 def _mi_print(self):
162 results = []
163
164 for result_tables in self._result_tables.values():
165 for result_table in result_tables:
166 results.append(result_table.to_native_object())
167
168 obj = {
169 'results': results,
170 }
171
172 print(json.dumps(obj))
173
174 def _create_summary_result_tables(self):
175 pass
176
177 def _open_trace(self):
178 traces = TraceCollection()
179 handles = traces.add_traces_recursive(self._args.path, 'ctf')
180 if handles == {}:
181 self._gen_error('Failed to open ' + self._args.path, -1)
182 self._handles = handles
183 self._traces = traces
184 self._ts_begin = traces.timestamp_begin
185 self._ts_end = traces.timestamp_end
186 self._process_date_args()
187 self._read_tracer_version()
188 if not self._args.skip_validation:
189 self._check_lost_events()
190
191 def _close_trace(self):
192 for handle in self._handles.values():
193 self._traces.remove_trace(handle)
194
195 def _read_tracer_version(self):
196 kernel_path = None
197 # remove the trailing /
198 while self._args.path.endswith('/'):
199 self._args.path = self._args.path[:-1]
200 for root, _, _ in os.walk(self._args.path):
201 if root.endswith('kernel'):
202 kernel_path = root
203 break
204
205 if kernel_path is None:
206 self._gen_error('Could not find kernel trace directory')
207
208 try:
209 ret, metadata = subprocess.getstatusoutput(
210 'babeltrace -o ctf-metadata "%s"' % kernel_path)
211 except subprocess.CalledProcessError:
212 self._gen_error('Cannot run babeltrace on the trace, cannot read'
213 ' tracer version')
214
215 # fallback to reading the text metadata if babeltrace failed to
216 # output the CTF metadata
217 if ret != 0:
218 try:
219 metadata = subprocess.getoutput(
220 'cat "%s"' % os.path.join(kernel_path, 'metadata'))
221 except subprocess.CalledProcessError:
222 self._gen_error('Cannot read the metadata of the trace, cannot'
223 'extract tracer version')
224
225 major_match = re.search(r'tracer_major = "*(\d+)"*', metadata)
226 minor_match = re.search(r'tracer_minor = "*(\d+)"*', metadata)
227 patch_match = re.search(r'tracer_patchlevel = "*(\d+)"*', metadata)
228
229 if not major_match or not minor_match or not patch_match:
230 self._gen_error('Malformed metadata, cannot read tracer version')
231
232 self.state.tracer_version = version_utils.Version(
233 int(major_match.group(1)),
234 int(minor_match.group(1)),
235 int(patch_match.group(1)),
236 )
237
238 def _check_lost_events(self):
239 msg = 'Checking the trace for lost events...'
240 self._print(msg)
241
242 if self._mi_mode and self._args.output_progress:
243 mi.print_progress(0, msg)
244
245 try:
246 subprocess.check_output('babeltrace "%s"' % self._args.path,
247 shell=True)
248 except subprocess.CalledProcessError:
249 self._gen_error('Cannot run babeltrace on the trace, cannot verify'
250 ' if events were lost during the trace recording')
251
252 def _pre_analysis(self):
253 pass
254
255 def _post_analysis(self):
256 if not self._mi_mode:
257 return
258
259 if self._ticks > 1:
260 self._create_summary_result_tables()
261
262 self._mi_print()
263
264 def _pb_setup(self):
265 if self._args.no_progress:
266 return
267
268 ts_end = self._ts_end
269
270 if self._analysis_conf.end_ts is not None:
271 ts_end = self._analysis_conf.end_ts
272
273 if self._mi_mode:
274 cls = progressbar.MiProgress
275 else:
276 cls = progressbar.FancyProgressBar
277
278 self._progress = cls(self._ts_begin, ts_end, self._args.path,
279 self._args.progress_use_size)
280
281 def _pb_update(self, event):
282 if self._args.no_progress:
283 return
284
285 self._progress.update(event)
286
287 def _pb_finish(self):
288 if self._args.no_progress:
289 return
290
291 self._progress.finalize()
292
293 def _run_analysis(self):
294 self._pre_analysis()
295 self._pb_setup()
296
297 for event in self._traces.events:
298 self._pb_update(event)
299 self._analysis.process_event(event)
300 if self._analysis.ended:
301 break
302 self._automaton.process_event(event)
303
304 self._pb_finish()
305 self._analysis.end()
306 self._post_analysis()
307
308 def _print_date(self, begin_ns, end_ns):
309 time_range_str = format_utils.format_time_range(
310 begin_ns, end_ns, print_date=True, gmt=self._args.gmt
311 )
312 date = 'Timerange: {}'.format(time_range_str)
313
314 self._print(date)
315
316 def _format_timestamp(self, timestamp):
317 return format_utils.format_timestamp(
318 timestamp, print_date=self._args.multi_day, gmt=self._args.gmt
319 )
320
321 def _get_uniform_freq_values(self, durations):
322 if self._args.uniform_step is not None:
323 return (self._args.uniform_min, self._args.uniform_max,
324 self._args.uniform_step)
325
326 if self._args.min is not None:
327 self._args.uniform_min = self._args.min
328 else:
329 self._args.uniform_min = min(durations)
330 if self._args.max is not None:
331 self._args.uniform_max = self._args.max
332 else:
333 self._args.uniform_max = max(durations)
334
335 # ns to µs
336 self._args.uniform_min /= 1000
337 self._args.uniform_max /= 1000
338 self._args.uniform_step = (
339 (self._args.uniform_max - self._args.uniform_min) /
340 self._args.freq_resolution
341 )
342
343 return self._args.uniform_min, self._args.uniform_max, \
344 self._args.uniform_step
345
346 def _validate_transform_common_args(self, args):
347 refresh_period_ns = None
348 if args.refresh is not None:
349 try:
350 refresh_period_ns = parse_utils.parse_duration(args.refresh)
351 except ValueError as e:
352 self._cmdline_error(str(e))
353
354 self._analysis_conf = analysis.AnalysisConfig()
355 self._analysis_conf.refresh_period = refresh_period_ns
356 self._analysis_conf.period_begin_ev_name = args.period_begin
357 self._analysis_conf.period_end_ev_name = args.period_end
358 self._analysis_conf.period_begin_key_fields = \
359 args.period_begin_key.split(',')
360
361 if args.period_end_key:
362 self._analysis_conf.period_end_key_fields = \
363 args.period_end_key.split(',')
364 else:
365 self._analysis_conf.period_end_key_fields = \
366 self._analysis_conf.period_begin_key_fields
367
368 if args.period_key_value:
369 self._analysis_conf.period_key_value = \
370 tuple(args.period_key_value.split(','))
371
372 if args.cpu:
373 self._analysis_conf.cpu_list = args.cpu.split(',')
374 self._analysis_conf.cpu_list = [int(cpu) for cpu in
375 self._analysis_conf.cpu_list]
376
377 # convert min/max args from µs to ns, if needed
378 if hasattr(args, 'min') and args.min is not None:
379 args.min *= 1000
380 self._analysis_conf.min_duration = args.min
381 if hasattr(args, 'max') and args.max is not None:
382 args.max *= 1000
383 self._analysis_conf.max_duration = args.max
384
385 if hasattr(args, 'procname'):
386 if args.procname:
387 self._analysis_conf.proc_list = args.procname.split(',')
388
389 if hasattr(args, 'tid'):
390 if args.tid:
391 self._analysis_conf.tid_list = args.tid.split(',')
392 self._analysis_conf.tid_list = [int(tid) for tid in
393 self._analysis_conf.tid_list]
394
395 if hasattr(args, 'freq'):
396 args.uniform_min = None
397 args.uniform_max = None
398 args.uniform_step = None
399
400 if args.freq_series:
401 # implies uniform buckets
402 args.freq_uniform = True
403
404 if self._mi_mode:
405 # print MI version if required
406 if args.mi_version:
407 print(mi.get_version_string())
408 sys.exit(0)
409
410 # print MI metadata if required
411 if args.metadata:
412 self._mi_print_metadata()
413 sys.exit(0)
414
415 # validate path argument (required at this point)
416 if not args.path:
417 self._cmdline_error('Please specify a trace path')
418
419 if type(args.path) is list:
420 args.path = args.path[0]
421
422 def _validate_transform_args(self, args):
423 pass
424
425 def _parse_args(self):
426 ap = argparse.ArgumentParser(description=self._DESC)
427
428 # common arguments
429 ap.add_argument('-r', '--refresh', type=str,
430 help='Refresh period, with optional units suffix '
431 '(default units: s)')
432 ap.add_argument('--gmt', action='store_true',
433 help='Manipulate timestamps based on GMT instead '
434 'of local time')
435 ap.add_argument('--skip-validation', action='store_true',
436 help='Skip the trace validation')
437 ap.add_argument('--begin', type=str, help='start time: '
438 'hh:mm:ss[.nnnnnnnnn]')
439 ap.add_argument('--end', type=str, help='end time: '
440 'hh:mm:ss[.nnnnnnnnn]')
441 ap.add_argument('--period-begin', type=str,
442 help='Analysis period start marker event name')
443 ap.add_argument('--period-end', type=str,
444 help='Analysis period end marker event name '
445 '(requires --period-begin)')
446 ap.add_argument('--period-begin-key', type=str, default='cpu_id',
447 help='Optional, list of event field names used to '
448 'match period markers (default: cpu_id)')
449 ap.add_argument('--period-end-key', type=str,
450 help='Optional, list of event field names used to '
451 'match period marker. If none specified, use the same '
452 ' --period-begin-key')
453 ap.add_argument('--period-key-value', type=str,
454 help='Optional, define a fixed key value to which a'
455 ' period must correspond to be considered.')
456 ap.add_argument('--cpu', type=str,
457 help='Filter the results only for this list of '
458 'CPU IDs')
459 ap.add_argument('--timerange', type=str, help='time range: '
460 '[begin,end]')
461 ap.add_argument('--progress-use-size', action='store_true',
462 help='use trace size to approximate progress')
463 ap.add_argument('-V', '--version', action='version',
464 version='LTTng Analyses v' + __version__)
465
466 # MI mode-dependent arguments
467 if self._mi_mode:
468 ap.add_argument('--mi-version', action='store_true',
469 help='Print MI version')
470 ap.add_argument('--metadata', action='store_true',
471 help='Print analysis\' metadata')
472 ap.add_argument('--test-compatibility', action='store_true',
473 help='Check if the provided trace is supported and exit')
474 ap.add_argument('path', metavar='<path/to/trace>',
475 help='trace path', nargs='*')
476 ap.add_argument('--output-progress', action='store_true',
477 help='Print progress indication lines')
478 else:
479 ap.add_argument('--no-progress', action='store_true',
480 help='Don\'t display the progress bar')
481 ap.add_argument('path', metavar='<path/to/trace>',
482 help='trace path')
483
484 # Used to add command-specific args
485 self._add_arguments(ap)
486
487 args = ap.parse_args()
488
489 if self._mi_mode:
490 args.no_progress = True
491
492 if args.output_progress:
493 args.no_progress = False
494
495 self._validate_transform_common_args(args)
496 self._validate_transform_args(args)
497 self._args = args
498
499 @staticmethod
500 def _add_proc_filter_args(ap):
501 ap.add_argument('--procname', type=str,
502 help='Filter the results only for this list of '
503 'process names')
504 ap.add_argument('--tid', type=str,
505 help='Filter the results only for this list of TIDs')
506
507 @staticmethod
508 def _add_min_max_args(ap):
509 ap.add_argument('--min', type=float,
510 help='Filter out durations shorter than min usec')
511 ap.add_argument('--max', type=float,
512 help='Filter out durations longer than max usec')
513
514 @staticmethod
515 def _add_freq_args(ap, help=None):
516 if not help:
517 help = 'Output the frequency distribution'
518
519 ap.add_argument('--freq', action='store_true', help=help)
520 ap.add_argument('--freq-resolution', type=int, default=20,
521 help='Frequency distribution resolution '
522 '(default 20)')
523 ap.add_argument('--freq-uniform', action='store_true',
524 help='Use a uniform resolution across distributions')
525 ap.add_argument('--freq-series', action='store_true',
526 help='Consolidate frequency distribution histogram '
527 'as a single one')
528
529 @staticmethod
530 def _add_log_args(ap, help=None):
531 if not help:
532 help = 'Output the events in chronological order'
533
534 ap.add_argument('--log', action='store_true', help=help)
535
536 @staticmethod
537 def _add_top_args(ap, help=None):
538 if not help:
539 help = 'Output the top results'
540
541 ap.add_argument('--limit', type=int, default=10,
542 help='Limit to top X (default = 10)')
543 ap.add_argument('--top', action='store_true', help=help)
544
545 @staticmethod
546 def _add_stats_args(ap, help=None):
547 if not help:
548 help = 'Output statistics'
549
550 ap.add_argument('--stats', action='store_true', help=help)
551
552 def _add_arguments(self, ap):
553 pass
554
555 def _process_date_args(self):
556 def parse_date(date):
557 try:
558 ts = parse_utils.parse_trace_collection_date(
559 self._traces, date, self._args.gmt
560 )
561 except ValueError as e:
562 self._cmdline_error(str(e))
563
564 return ts
565
566 self._args.multi_day = trace_utils.is_multi_day_trace_collection(
567 self._traces
568 )
569 begin_ts = None
570 end_ts = None
571
572 if self._args.timerange:
573 try:
574 begin_ts, end_ts = (
575 parse_utils.parse_trace_collection_time_range(
576 self._traces, self._args.timerange, self._args.gmt
577 )
578 )
579 except ValueError as e:
580 self._cmdline_error(str(e))
581 else:
582 if self._args.begin:
583 begin_ts = parse_date(self._args.begin)
584 if self._args.end:
585 end_ts = parse_date(self._args.end)
586
587 # We have to check if timestamp_begin is None, which
588 # it always is in older versions of babeltrace. In
589 # that case, the test is simply skipped and an invalid
590 # --end value will cause an empty analysis
591 if self._ts_begin is not None and \
592 end_ts < self._ts_begin:
593 self._cmdline_error(
594 '--end timestamp before beginning of trace')
595
596 self._analysis_conf.begin_ts = begin_ts
597 self._analysis_conf.end_ts = end_ts
598
599 def _create_analysis(self):
600 notification_cbs = {
601 analysis.Analysis.TICK_CB: self._analysis_tick_cb
602 }
603
604 self._analysis = self._ANALYSIS_CLASS(self.state, self._analysis_conf)
605 self._analysis.register_notification_cbs(notification_cbs)
606
607 def _create_automaton(self):
608 self._automaton = automaton.Automaton()
609 self.state = self._automaton.state
610
611 def _analysis_tick_cb(self, **kwargs):
612 begin_ns = kwargs['begin_ns']
613 end_ns = kwargs['end_ns']
614
615 self._analysis_tick(begin_ns, end_ns)
616 self._ticks += 1
617
618 def _analysis_tick(self, begin_ns, end_ns):
619 raise NotImplementedError()
620
621
622 # create MI version
623 _cmd_version = _version.get_versions()['version']
624 _version_match = re.match(r'(\d+)\.(\d+)\.(\d+)(.*)', _cmd_version)
625 Command._MI_VERSION = version_utils.Version(
626 int(_version_match.group(1)),
627 int(_version_match.group(2)),
628 int(_version_match.group(3)),
629 _version_match.group(4),
630 )
This page took 0.042535 seconds and 5 git commands to generate.