602d7dd722d5baa05e184b4897dadc530925d72f
[deliverable/lttng-analyses.git] / lttnganalyses / cli / command.py
1 # The MIT License (MIT)
2 #
3 # Copyright (C) 2015 - Julien Desfossez <jdesfossez@efficios.com>
4 # 2015 - Philippe Proulx <pproulx@efficios.com>
5 # 2015 - Antoine Busque <abusque@efficios.com>
6 #
7 # Permission is hereby granted, free of charge, to any person obtaining a copy
8 # of this software and associated documentation files (the "Software"), to deal
9 # in the Software without restriction, including without limitation the rights
10 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 # copies of the Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice shall be included in
15 # all copies or substantial portions of the Software.
16 #
17 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 # SOFTWARE.
24
25 import argparse
26 import json
27 import os
28 import re
29 import sys
30 import subprocess
31 from babeltrace import TraceCollection
32 from . import mi, progressbar
33 from .. import _version, __version__
34 from ..core import analysis
35 from ..common import (
36 format_utils, parse_utils, time_utils, trace_utils, version_utils
37 )
38 from ..linuxautomaton import automaton
39
40
41 class Command:
42 _MI_BASE_TAGS = ['linux-kernel', 'lttng-analyses']
43 _MI_AUTHORS = [
44 'Julien Desfossez',
45 'Antoine Busque',
46 'Philippe Proulx',
47 ]
48 _MI_URL = 'https://github.com/lttng/lttng-analyses'
49
50 def __init__(self, mi_mode=False):
51 self._analysis = None
52 self._analysis_conf = None
53 self._args = None
54 self._handles = None
55 self._traces = None
56 self._ticks = 0
57 self._mi_mode = mi_mode
58 self._create_automaton()
59 self._mi_setup()
60
61 @property
62 def mi_mode(self):
63 return self._mi_mode
64
65 def run(self):
66 try:
67 self._parse_args()
68 self._open_trace()
69 self._create_analysis()
70 self._run_analysis()
71 self._close_trace()
72 except KeyboardInterrupt:
73 sys.exit(0)
74
75 def _mi_error(self, msg, code=None):
76 print(json.dumps(mi.get_error(msg, code)))
77
78 def _non_mi_error(self, msg):
79 try:
80 import termcolor
81
82 msg = termcolor.colored(msg, 'red', attrs=['bold'])
83 except ImportError:
84 pass
85
86 print(msg, file=sys.stderr)
87
88 def _error(self, msg, code, exit_code=1):
89 if self._mi_mode:
90 self._mi_error(msg)
91 else:
92 self._non_mi_error(msg)
93
94 sys.exit(exit_code)
95
96 def _gen_error(self, msg, exit_code=1):
97 self._error('Error: {}'.format(msg), exit_code)
98
99 def _cmdline_error(self, msg, exit_code=1):
100 self._error('Command line error: {}'.format(msg), exit_code)
101
102 def _print(self, msg):
103 if not self._mi_mode:
104 print(msg)
105
106 def _mi_create_result_table(self, table_class_name, begin, end,
107 subtitle=None):
108 return mi.ResultTable(self._mi_table_classes[table_class_name],
109 begin, end, subtitle)
110
111 def _mi_setup(self):
112 self._mi_table_classes = {}
113
114 for tc_tuple in self._MI_TABLE_CLASSES:
115 table_class = mi.TableClass(tc_tuple[0], tc_tuple[1], tc_tuple[2])
116 self._mi_table_classes[table_class.name] = table_class
117
118 self._mi_clear_result_tables()
119
120 def _mi_print_metadata(self):
121 tags = self._MI_BASE_TAGS + self._MI_TAGS
122 infos = mi.get_metadata(version=self._MI_VERSION, title=self._MI_TITLE,
123 description=self._MI_DESCRIPTION,
124 authors=self._MI_AUTHORS, url=self._MI_URL,
125 tags=tags,
126 table_classes=self._mi_table_classes.values())
127 print(json.dumps(infos))
128
129 def _mi_append_result_table(self, result_table):
130 if not result_table or not result_table.rows:
131 return
132
133 tc_name = result_table.table_class.name
134 self._mi_get_result_tables(tc_name).append(result_table)
135
136 def _mi_append_result_tables(self, result_tables):
137 if not result_tables:
138 return
139
140 for result_table in result_tables:
141 self._mi_append_result_table(result_table)
142
143 def _mi_clear_result_tables(self):
144 self._result_tables = {}
145
146 def _mi_get_result_tables(self, table_class_name):
147 if table_class_name not in self._result_tables:
148 self._result_tables[table_class_name] = []
149
150 return self._result_tables[table_class_name]
151
152 def _mi_print(self):
153 results = []
154
155 for result_tables in self._result_tables.values():
156 for result_table in result_tables:
157 results.append(result_table.to_native_object())
158
159 obj = {
160 'results': results,
161 }
162
163 print(json.dumps(obj))
164
165 def _create_summary_result_tables(self):
166 pass
167
168 def _open_trace(self):
169 traces = TraceCollection()
170 handles = traces.add_traces_recursive(self._args.path, 'ctf')
171 if handles == {}:
172 self._gen_error('Failed to open ' + self._args.path, -1)
173 self._handles = handles
174 self._traces = traces
175 self._ts_begin = traces.timestamp_begin
176 self._ts_end = traces.timestamp_end
177 self._process_date_args()
178 self._read_tracer_version()
179 if not self._args.skip_validation:
180 self._check_lost_events()
181
182 def _close_trace(self):
183 for handle in self._handles.values():
184 self._traces.remove_trace(handle)
185
186 def _read_tracer_version(self):
187 kernel_path = None
188 # remove the trailing /
189 while self._args.path.endswith('/'):
190 self._args.path = self._args.path[:-1]
191 for root, _, _ in os.walk(self._args.path):
192 if root.endswith('kernel'):
193 kernel_path = root
194 break
195
196 if kernel_path is None:
197 self._gen_error('Could not find kernel trace directory')
198
199 try:
200 ret, metadata = subprocess.getstatusoutput(
201 'babeltrace -o ctf-metadata "%s"' % kernel_path)
202 except subprocess.CalledProcessError:
203 self._gen_error('Cannot run babeltrace on the trace, cannot read'
204 ' tracer version')
205
206 # fallback to reading the text metadata if babeltrace failed to
207 # output the CTF metadata
208 if ret != 0:
209 try:
210 metadata = subprocess.getoutput(
211 'cat "%s"' % os.path.join(kernel_path, 'metadata'))
212 except subprocess.CalledProcessError:
213 self._gen_error('Cannot read the metadata of the trace, cannot'
214 'extract tracer version')
215
216 major_match = re.search(r'tracer_major = "*(\d+)"*', metadata)
217 minor_match = re.search(r'tracer_minor = "*(\d+)"*', metadata)
218 patch_match = re.search(r'tracer_patchlevel = "*(\d+)"*', metadata)
219
220 if not major_match or not minor_match or not patch_match:
221 self._gen_error('Malformed metadata, cannot read tracer version')
222
223 self.state.tracer_version = version_utils.Version(
224 int(major_match.group(1)),
225 int(minor_match.group(1)),
226 int(patch_match.group(1)),
227 )
228
229 def _check_lost_events(self):
230 msg = 'Checking the trace for lost events...'
231 self._print(msg)
232
233 if self._mi_mode and self._args.output_progress:
234 mi.print_progress(0, msg)
235
236 try:
237 subprocess.check_output('babeltrace "%s"' % self._args.path,
238 shell=True)
239 except subprocess.CalledProcessError:
240 self._gen_error('Cannot run babeltrace on the trace, cannot verify'
241 ' if events were lost during the trace recording')
242
243 def _pre_analysis(self):
244 pass
245
246 def _post_analysis(self):
247 if not self._mi_mode:
248 return
249
250 if self._ticks > 1:
251 self._create_summary_result_tables()
252
253 self._mi_print()
254
255 def _pb_setup(self):
256 if self._args.no_progress:
257 return
258
259 ts_end = self._ts_end
260
261 if self._analysis_conf.end_ts is not None:
262 ts_end = self._analysis_conf.end_ts
263
264 if self._mi_mode:
265 cls = progressbar.MiProgress
266 else:
267 cls = progressbar.FancyProgressBar
268
269 self._progress = cls(self._ts_begin, ts_end, self._args.path,
270 self._args.progress_use_size)
271
272 def _pb_update(self, event):
273 if self._args.no_progress:
274 return
275
276 self._progress.update(event)
277
278 def _pb_finish(self):
279 if self._args.no_progress:
280 return
281
282 self._progress.finalize()
283
284 def _run_analysis(self):
285 self._pre_analysis()
286 self._pb_setup()
287
288 for event in self._traces.events:
289 self._pb_update(event)
290 self._analysis.process_event(event)
291 if self._analysis.ended:
292 break
293 self._automaton.process_event(event)
294
295 self._pb_finish()
296 self._analysis.end()
297 self._post_analysis()
298
299 def _print_date(self, begin_ns, end_ns):
300 time_range_str = format_utils.format_time_range(
301 begin_ns, end_ns, print_date=True, gmt=self._args.gmt
302 )
303 date = 'Timerange: {}'.format(time_range_str)
304
305 self._print(date)
306
307 def _format_timestamp(self, timestamp):
308 return format_utils.format_timestamp(
309 timestamp, print_date=self._args.multi_day, gmt=self._args.gmt
310 )
311
312 def _get_uniform_freq_values(self, durations):
313 if self._args.uniform_step is not None:
314 return (self._args.uniform_min, self._args.uniform_max,
315 self._args.uniform_step)
316
317 if self._args.min is not None:
318 self._args.uniform_min = self._args.min
319 else:
320 self._args.uniform_min = min(durations)
321 if self._args.max is not None:
322 self._args.uniform_max = self._args.max
323 else:
324 self._args.uniform_max = max(durations)
325
326 # ns to µs
327 self._args.uniform_min /= 1000
328 self._args.uniform_max /= 1000
329 self._args.uniform_step = (
330 (self._args.uniform_max - self._args.uniform_min) /
331 self._args.freq_resolution
332 )
333
334 return self._args.uniform_min, self._args.uniform_max, \
335 self._args.uniform_step
336
337 def _validate_transform_common_args(self, args):
338 refresh_period_ns = None
339 if args.refresh is not None:
340 try:
341 refresh_period_ns = parse_utils.parse_duration(args.refresh)
342 except ValueError as e:
343 self._cmdline_error(str(e))
344
345 self._analysis_conf = analysis.AnalysisConfig()
346 self._analysis_conf.refresh_period = refresh_period_ns
347 self._analysis_conf.period_begin_ev_name = args.period_begin
348 self._analysis_conf.period_end_ev_name = args.period_end
349 self._analysis_conf.period_begin_key_fields = \
350 args.period_begin_key.split(',')
351
352 if args.period_end_key:
353 self._analysis_conf.period_end_key_fields = \
354 args.period_end_key.split(',')
355 else:
356 self._analysis_conf.period_end_key_fields = \
357 self._analysis_conf.period_begin_key_fields
358
359 if args.period_key_value:
360 self._analysis_conf.period_key_value = \
361 tuple(args.period_key_value.split(','))
362
363 if args.cpu:
364 self._analysis_conf.cpu_list = args.cpu.split(',')
365 self._analysis_conf.cpu_list = [int(cpu) for cpu in
366 self._analysis_conf.cpu_list]
367
368 # convert min/max args from µs to ns, if needed
369 if hasattr(args, 'min') and args.min is not None:
370 args.min *= 1000
371 self._analysis_conf.min_duration = args.min
372 if hasattr(args, 'max') and args.max is not None:
373 args.max *= 1000
374 self._analysis_conf.max_duration = args.max
375
376 if hasattr(args, 'procname'):
377 if args.procname:
378 self._analysis_conf.proc_list = args.procname.split(',')
379
380 if hasattr(args, 'tid'):
381 if args.tid:
382 self._analysis_conf.tid_list = args.tid.split(',')
383 self._analysis_conf.tid_list = [int(tid) for tid in
384 self._analysis_conf.tid_list]
385
386 if hasattr(args, 'freq'):
387 args.uniform_min = None
388 args.uniform_max = None
389 args.uniform_step = None
390
391 if args.freq_series:
392 # implies uniform buckets
393 args.freq_uniform = True
394
395 if self._mi_mode:
396 # print MI version if required
397 if args.mi_version:
398 print(mi.get_version_string())
399 sys.exit(0)
400
401 # print MI metadata if required
402 if args.metadata:
403 self._mi_print_metadata()
404 sys.exit(0)
405
406 # validate path argument (required at this point)
407 if not args.path:
408 self._cmdline_error('Please specify a trace path')
409
410 if type(args.path) is list:
411 args.path = args.path[0]
412
413 def _validate_transform_args(self, args):
414 pass
415
416 def _parse_args(self):
417 ap = argparse.ArgumentParser(description=self._DESC)
418
419 # common arguments
420 ap.add_argument('-r', '--refresh', type=str,
421 help='Refresh period, with optional units suffix '
422 '(default units: s)')
423 ap.add_argument('--gmt', action='store_true',
424 help='Manipulate timestamps based on GMT instead '
425 'of local time')
426 ap.add_argument('--skip-validation', action='store_true',
427 help='Skip the trace validation')
428 ap.add_argument('--begin', type=str, help='start time: '
429 'hh:mm:ss[.nnnnnnnnn]')
430 ap.add_argument('--end', type=str, help='end time: '
431 'hh:mm:ss[.nnnnnnnnn]')
432 ap.add_argument('--period-begin', type=str,
433 help='Analysis period start marker event name')
434 ap.add_argument('--period-end', type=str,
435 help='Analysis period end marker event name '
436 '(requires --period-begin)')
437 ap.add_argument('--period-begin-key', type=str, default='cpu_id',
438 help='Optional, list of event field names used to '
439 'match period markers (default: cpu_id)')
440 ap.add_argument('--period-end-key', type=str,
441 help='Optional, list of event field names used to '
442 'match period marker. If none specified, use the same '
443 ' --period-begin-key')
444 ap.add_argument('--period-key-value', type=str,
445 help='Optional, define a fixed key value to which a'
446 ' period must correspond to be considered.')
447 ap.add_argument('--cpu', type=str,
448 help='Filter the results only for this list of '
449 'CPU IDs')
450 ap.add_argument('--timerange', type=str, help='time range: '
451 '[begin,end]')
452 ap.add_argument('--progress-use-size', action='store_true',
453 help='use trace size to approximate progress')
454 ap.add_argument('-V', '--version', action='version',
455 version='LTTng Analyses v' + __version__)
456
457 # MI mode-dependent arguments
458 if self._mi_mode:
459 ap.add_argument('--mi-version', action='store_true',
460 help='Print MI version')
461 ap.add_argument('--metadata', action='store_true',
462 help='Print analysis\' metadata')
463 ap.add_argument('path', metavar='<path/to/trace>',
464 help='trace path', nargs='*')
465 ap.add_argument('--output-progress', action='store_true',
466 help='Print progress indication lines')
467 else:
468 ap.add_argument('--no-progress', action='store_true',
469 help='Don\'t display the progress bar')
470 ap.add_argument('path', metavar='<path/to/trace>',
471 help='trace path')
472
473 # Used to add command-specific args
474 self._add_arguments(ap)
475
476 args = ap.parse_args()
477
478 if self._mi_mode:
479 args.no_progress = True
480
481 if args.output_progress:
482 args.no_progress = False
483
484 self._validate_transform_common_args(args)
485 self._validate_transform_args(args)
486 self._args = args
487
488 @staticmethod
489 def _add_proc_filter_args(ap):
490 ap.add_argument('--procname', type=str,
491 help='Filter the results only for this list of '
492 'process names')
493 ap.add_argument('--tid', type=str,
494 help='Filter the results only for this list of TIDs')
495
496 @staticmethod
497 def _add_min_max_args(ap):
498 ap.add_argument('--min', type=float,
499 help='Filter out durations shorter than min usec')
500 ap.add_argument('--max', type=float,
501 help='Filter out durations longer than max usec')
502
503 @staticmethod
504 def _add_freq_args(ap, help=None):
505 if not help:
506 help = 'Output the frequency distribution'
507
508 ap.add_argument('--freq', action='store_true', help=help)
509 ap.add_argument('--freq-resolution', type=int, default=20,
510 help='Frequency distribution resolution '
511 '(default 20)')
512 ap.add_argument('--freq-uniform', action='store_true',
513 help='Use a uniform resolution across distributions')
514 ap.add_argument('--freq-series', action='store_true',
515 help='Consolidate frequency distribution histogram '
516 'as a single one')
517
518 @staticmethod
519 def _add_log_args(ap, help=None):
520 if not help:
521 help = 'Output the events in chronological order'
522
523 ap.add_argument('--log', action='store_true', help=help)
524
525 @staticmethod
526 def _add_top_args(ap, help=None):
527 if not help:
528 help = 'Output the top results'
529
530 ap.add_argument('--limit', type=int, default=10,
531 help='Limit to top X (default = 10)')
532 ap.add_argument('--top', action='store_true', help=help)
533
534 @staticmethod
535 def _add_stats_args(ap, help=None):
536 if not help:
537 help = 'Output statistics'
538
539 ap.add_argument('--stats', action='store_true', help=help)
540
541 def _add_arguments(self, ap):
542 pass
543
544 def _process_date_args(self):
545 def parse_date(date):
546 try:
547 ts = parse_utils.parse_trace_collection_date(
548 self._traces, date, self._args.gmt
549 )
550 except ValueError as e:
551 self._cmdline_error(str(e))
552
553 return ts
554
555 self._args.multi_day = trace_utils.is_multi_day_trace_collection(
556 self._traces
557 )
558 begin_ts = None
559 end_ts = None
560
561 if self._args.timerange:
562 try:
563 begin_ts, end_ts = (
564 parse_utils.parse_trace_collection_time_range(
565 self._traces, self._args.timerange, self._args.gmt
566 )
567 )
568 except ValueError as e:
569 self._cmdline_error(str(e))
570 else:
571 if self._args.begin:
572 begin_ts = parse_date(self._args.begin)
573 if self._args.end:
574 end_ts = parse_date(self._args.end)
575
576 # We have to check if timestamp_begin is None, which
577 # it always is in older versions of babeltrace. In
578 # that case, the test is simply skipped and an invalid
579 # --end value will cause an empty analysis
580 if self._ts_begin is not None and \
581 end_ts < self._ts_begin:
582 self._cmdline_error(
583 '--end timestamp before beginning of trace')
584
585 self._analysis_conf.begin_ts = begin_ts
586 self._analysis_conf.end_ts = end_ts
587
588 def _create_analysis(self):
589 notification_cbs = {
590 analysis.Analysis.TICK_CB: self._analysis_tick_cb
591 }
592
593 self._analysis = self._ANALYSIS_CLASS(self.state, self._analysis_conf)
594 self._analysis.register_notification_cbs(notification_cbs)
595
596 def _create_automaton(self):
597 self._automaton = automaton.Automaton()
598 self.state = self._automaton.state
599
600 def _analysis_tick_cb(self, **kwargs):
601 begin_ns = kwargs['begin_ns']
602 end_ns = kwargs['end_ns']
603
604 self._analysis_tick(begin_ns, end_ns)
605 self._ticks += 1
606
607 def _analysis_tick(self, begin_ns, end_ns):
608 raise NotImplementedError()
609
610
611 # create MI version
612 _cmd_version = _version.get_versions()['version']
613 _version_match = re.match(r'(\d+)\.(\d+)\.(\d+)(.*)', _cmd_version)
614 Command._MI_VERSION = version_utils.Version(
615 int(_version_match.group(1)),
616 int(_version_match.group(2)),
617 int(_version_match.group(3)),
618 _version_match.group(4),
619 )
This page took 0.043792 seconds and 4 git commands to generate.