Exit gracefully on SIGINT in run()
[deliverable/lttng-analyses.git] / lttnganalyses / cli / command.py
1 # The MIT License (MIT)
2 #
3 # Copyright (C) 2015 - Julien Desfossez <jdesfossez@efficios.com>
4 # 2015 - Philippe Proulx <pproulx@efficios.com>
5 #
6 # Permission is hereby granted, free of charge, to any person obtaining a copy
7 # of this software and associated documentation files (the "Software"), to deal
8 # in the Software without restriction, including without limitation the rights
9 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 # copies of the Software, and to permit persons to whom the Software is
11 # furnished to do so, subject to the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be included in
14 # all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 # SOFTWARE.
23
24 from ..core import analysis
25 from ..linuxautomaton import automaton
26 from .. import __version__
27 from . import progressbar
28 from ..linuxautomaton import common
29 from .. import _version
30 from babeltrace import TraceCollection
31 import argparse
32 import sys
33 import subprocess
34 import json
35 import re
36 from . import mi
37
38
39 class Command:
40 _MI_BASE_TAGS = ['linux-kernel', 'lttng-analyses']
41 _MI_AUTHORS = [
42 'Julien Desfossez',
43 'Antoine Busque',
44 'Philippe Proulx',
45 ]
46 _MI_URL = 'https://github.com/lttng/lttng-analyses'
47
48 def __init__(self, mi_mode=False):
49 self._analysis = None
50 self._analysis_conf = None
51 self._args = None
52 self._handles = None
53 self._traces = None
54 self._ticks = 0
55 self._mi_mode = mi_mode
56 self._create_automaton()
57 self._mi_setup()
58
59 @property
60 def mi_mode(self):
61 return self._mi_mode
62
63 def run(self):
64 try:
65 self._parse_args()
66 self._open_trace()
67 self._create_analysis()
68 self._run_analysis()
69 self._close_trace()
70 except KeyboardInterrupt:
71 sys.exit(0)
72
73 def _error(self, msg, exit_code=1):
74 try:
75 import termcolor
76
77 msg = termcolor.colored(msg, 'red', attrs=['bold'])
78 except:
79 pass
80
81 print(msg, file=sys.stderr)
82 sys.exit(exit_code)
83
84 def _gen_error(self, msg, exit_code=1):
85 self._error('Error: {}'.format(msg), exit_code)
86
87 def _cmdline_error(self, msg, exit_code=1):
88 self._error('Command line error: {}'.format(msg), exit_code)
89
90 def _print(self, msg):
91 if not self._mi_mode:
92 print(msg)
93
94 def _mi_create_result_table(self, table_class_name, begin, end,
95 subtitle=None):
96 return mi.ResultTable(self._mi_table_classes[table_class_name],
97 begin, end, subtitle)
98
99 def _mi_setup(self):
100 self._mi_table_classes = {}
101
102 for tc_tuple in self._MI_TABLE_CLASSES:
103 table_class = mi.TableClass(tc_tuple[0], tc_tuple[1], tc_tuple[2])
104 self._mi_table_classes[table_class.name] = table_class
105
106 self._mi_clear_result_tables()
107
108 def _mi_print_metadata(self):
109 tags = self._MI_BASE_TAGS + self._MI_TAGS
110 infos = mi.get_metadata(version=self._MI_VERSION, title=self._MI_TITLE,
111 description=self._MI_DESCRIPTION,
112 authors=self._MI_AUTHORS, url=self._MI_URL,
113 tags=tags,
114 table_classes=self._mi_table_classes.values())
115 print(json.dumps(infos))
116
117 def _mi_append_result_table(self, result_table):
118 if not result_table or not result_table.rows:
119 return
120
121 tc_name = result_table.table_class.name
122 self._mi_get_result_tables(tc_name).append(result_table)
123
124 def _mi_append_result_tables(self, result_tables):
125 if not result_tables:
126 return
127
128 for result_table in result_tables:
129 self._mi_append_result_table(result_table)
130
131 def _mi_clear_result_tables(self):
132 self._result_tables = {}
133
134 def _mi_get_result_tables(self, table_class_name):
135 if table_class_name not in self._result_tables:
136 self._result_tables[table_class_name] = []
137
138 return self._result_tables[table_class_name]
139
140 def _mi_print(self):
141 results = []
142
143 for result_tables in self._result_tables.values():
144 for result_table in result_tables:
145 results.append(result_table.to_native_object())
146
147 obj = {
148 'results': results,
149 }
150
151 print(json.dumps(obj))
152
153 def _create_summary_result_tables(self):
154 pass
155
156 def _open_trace(self):
157 traces = TraceCollection()
158 handles = traces.add_traces_recursive(self._args.path, 'ctf')
159 if handles == {}:
160 self._gen_error('Failed to open ' + self._args.path, -1)
161 self._handles = handles
162 self._traces = traces
163 self._process_date_args()
164 if not self._args.skip_validation:
165 self._check_lost_events()
166
167 def _close_trace(self):
168 for handle in self._handles.values():
169 self._traces.remove_trace(handle)
170
171 def _check_lost_events(self):
172 self._print('Checking the trace for lost events...')
173 try:
174 subprocess.check_output('babeltrace "%s"' % self._args.path,
175 shell=True)
176 except subprocess.CalledProcessError:
177 self._gen_error('Cannot run babeltrace on the trace, cannot verify'
178 ' if events were lost during the trace recording')
179
180 def _pre_analysis(self):
181 pass
182
183 def _post_analysis(self):
184 if not self._mi_mode:
185 return
186
187 if self._ticks > 1:
188 self._create_summary_result_tables()
189
190 self._mi_print()
191
192 def _run_analysis(self):
193 self._pre_analysis()
194 progressbar.progressbar_setup(self)
195
196 for event in self._traces.events:
197 progressbar.progressbar_update(self)
198 self._analysis.process_event(event)
199 if self._analysis.ended:
200 break
201 self._automaton.process_event(event)
202
203 progressbar.progressbar_finish(self)
204 self._analysis.end()
205 self._post_analysis()
206
207 def _print_date(self, begin_ns, end_ns):
208 date = 'Timerange: [%s, %s]' % (
209 common.ns_to_hour_nsec(begin_ns, gmt=self._args.gmt,
210 multi_day=True),
211 common.ns_to_hour_nsec(end_ns, gmt=self._args.gmt,
212 multi_day=True))
213 self._print(date)
214
215 def _get_uniform_freq_values(self, durations):
216 if self._args.uniform_step is not None:
217 return (self._args.uniform_min, self._args.uniform_max,
218 self._args.uniform_step)
219
220 if self._args.min is not None:
221 self._args.uniform_min = self._args.min
222 else:
223 self._args.uniform_min = min(durations)
224 if self._args.max is not None:
225 self._args.uniform_max = self._args.max
226 else:
227 self._args.uniform_max = max(durations)
228
229 # ns to µs
230 self._args.uniform_min /= 1000
231 self._args.uniform_max /= 1000
232 self._args.uniform_step = (
233 (self._args.uniform_max - self._args.uniform_min) /
234 self._args.freq_resolution
235 )
236
237 return self._args.uniform_min, self._args.uniform_max, \
238 self._args.uniform_step
239
240 def _validate_transform_common_args(self, args):
241 refresh_period_ns = None
242 if args.refresh is not None:
243 try:
244 refresh_period_ns = common.duration_str_to_ns(args.refresh)
245 except ValueError as e:
246 self._cmdline_error(str(e))
247
248 self._analysis_conf = analysis.AnalysisConfig()
249 self._analysis_conf.refresh_period = refresh_period_ns
250 self._analysis_conf.period_begin_ev_name = args.period_begin
251 self._analysis_conf.period_end_ev_name = args.period_end
252 self._analysis_conf.period_key_fields = args.period_key.split(',')
253 if args.cpu:
254 self._analysis_conf.cpu_list = args.cpu.split(',')
255 self._analysis_conf.cpu_list = [int(cpu) for cpu in
256 self._analysis_conf.cpu_list]
257
258 # convert min/max args from µs to ns, if needed
259 if hasattr(args, 'min') and args.min is not None:
260 args.min *= 1000
261 self._analysis_conf.min_duration = args.min
262 if hasattr(args, 'max') and args.max is not None:
263 args.max *= 1000
264 self._analysis_conf.max_duration = args.max
265
266 if hasattr(args, 'procname'):
267 if args.procname:
268 self._analysis_conf.proc_list = args.procname.split(',')
269
270 if hasattr(args, 'tid'):
271 if args.tid:
272 self._analysis_conf.tid_list = args.tid.split(',')
273 self._analysis_conf.tid_list = [int(tid) for tid in
274 self._analysis_conf.tid_list]
275
276 if hasattr(args, 'freq'):
277 args.uniform_min = None
278 args.uniform_max = None
279 args.uniform_step = None
280
281 if args.freq_series:
282 # implies uniform buckets
283 args.freq_uniform = True
284
285 if self._mi_mode:
286 # force no progress in MI mode
287 args.no_progress = True
288
289 # print MI metadata if required
290 if args.metadata:
291 self._mi_print_metadata()
292 sys.exit(0)
293
294 # validate path argument (required at this point)
295 if not args.path:
296 self._cmdline_error('Please specify a trace path')
297
298 if type(args.path) is list:
299 args.path = args.path[0]
300
301 def _validate_transform_args(self, args):
302 pass
303
304 def _parse_args(self):
305 ap = argparse.ArgumentParser(description=self._DESC)
306
307 # common arguments
308 ap.add_argument('-r', '--refresh', type=str,
309 help='Refresh period, with optional units suffix '
310 '(default units: s)')
311 ap.add_argument('--gmt', action='store_true',
312 help='Manipulate timestamps based on GMT instead '
313 'of local time')
314 ap.add_argument('--skip-validation', action='store_true',
315 help='Skip the trace validation')
316 ap.add_argument('--begin', type=str, help='start time: '
317 'hh:mm:ss[.nnnnnnnnn]')
318 ap.add_argument('--end', type=str, help='end time: '
319 'hh:mm:ss[.nnnnnnnnn]')
320 ap.add_argument('--period-begin', type=str,
321 help='Analysis period start marker event name')
322 ap.add_argument('--period-end', type=str,
323 help='Analysis period end marker event name '
324 '(requires --period-begin)')
325 ap.add_argument('--period-key', type=str, default='cpu_id',
326 help='Optional, list of event field names used to '
327 'match period markers (default: cpu_id)')
328 ap.add_argument('--cpu', type=str,
329 help='Filter the results only for this list of '
330 'CPU IDs')
331 ap.add_argument('--timerange', type=str, help='time range: '
332 '[begin,end]')
333 ap.add_argument('-V', '--version', action='version',
334 version='LTTng Analyses v' + __version__)
335
336 # MI mode-dependent arguments
337 if self._mi_mode:
338 ap.add_argument('--metadata', action='store_true',
339 help='Show analysis\'s metadata')
340 ap.add_argument('path', metavar='<path/to/trace>',
341 help='trace path', nargs='*')
342 else:
343 ap.add_argument('--no-progress', action='store_true',
344 help='Don\'t display the progress bar')
345 ap.add_argument('path', metavar='<path/to/trace>',
346 help='trace path')
347
348 # Used to add command-specific args
349 self._add_arguments(ap)
350
351 args = ap.parse_args()
352 self._validate_transform_common_args(args)
353 self._validate_transform_args(args)
354 self._args = args
355
356 @staticmethod
357 def _add_proc_filter_args(ap):
358 ap.add_argument('--procname', type=str,
359 help='Filter the results only for this list of '
360 'process names')
361 ap.add_argument('--tid', type=str,
362 help='Filter the results only for this list of TIDs')
363
364 @staticmethod
365 def _add_min_max_args(ap):
366 ap.add_argument('--min', type=float,
367 help='Filter out durations shorter than min usec')
368 ap.add_argument('--max', type=float,
369 help='Filter out durations longer than max usec')
370
371 @staticmethod
372 def _add_freq_args(ap, help=None):
373 if not help:
374 help = 'Output the frequency distribution'
375
376 ap.add_argument('--freq', action='store_true', help=help)
377 ap.add_argument('--freq-resolution', type=int, default=20,
378 help='Frequency distribution resolution '
379 '(default 20)')
380 ap.add_argument('--freq-uniform', action='store_true',
381 help='Use a uniform resolution across distributions')
382 ap.add_argument('--freq-series', action='store_true',
383 help='Consolidate frequency distribution histogram '
384 'as a single one')
385
386 @staticmethod
387 def _add_log_args(ap, help=None):
388 if not help:
389 help = 'Output the events in chronological order'
390
391 ap.add_argument('--log', action='store_true', help=help)
392
393 @staticmethod
394 def _add_top_args(ap, help=None):
395 if not help:
396 help = 'Output the top results'
397
398 ap.add_argument('--limit', type=int, default=10,
399 help='Limit to top X (default = 10)')
400 ap.add_argument('--top', action='store_true', help=help)
401
402 @staticmethod
403 def _add_stats_args(ap, help=None):
404 if not help:
405 help = 'Output statistics'
406
407 ap.add_argument('--stats', action='store_true', help=help)
408
409 def _add_arguments(self, ap):
410 pass
411
412 def _process_date_args(self):
413 def date_to_epoch_nsec(date):
414 ts = common.date_to_epoch_nsec(self._handles, date, self._args.gmt)
415 if ts is None:
416 self._cmdline_error('Invalid date format: "{}"'.format(date))
417
418 return ts
419
420 self._args.multi_day = common.is_multi_day_trace_collection(
421 self._handles)
422 begin_ts = None
423 end_ts = None
424
425 if self._args.timerange:
426 begin_ts, end_ts = common.extract_timerange(self._handles,
427 self._args.timerange,
428 self._args.gmt)
429 if None in [begin_ts, end_ts]:
430 self._cmdline_error(
431 'Invalid time format: "{}"'.format(self._args.timerange))
432 else:
433 if self._args.begin:
434 begin_ts = date_to_epoch_nsec(self._args.begin)
435 if self._args.end:
436 end_ts = date_to_epoch_nsec(self._args.end)
437
438 # We have to check if timestamp_begin is None, which
439 # it always is in older versions of babeltrace. In
440 # that case, the test is simply skipped and an invalid
441 # --end value will cause an empty analysis
442 if self._traces.timestamp_begin is not None and \
443 end_ts < self._traces.timestamp_begin:
444 self._cmdline_error(
445 '--end timestamp before beginning of trace')
446
447 self._analysis_conf.begin_ts = begin_ts
448 self._analysis_conf.end_ts = end_ts
449
450 def _create_analysis(self):
451 notification_cbs = {
452 analysis.Analysis.TICK_CB: self._analysis_tick_cb
453 }
454
455 self._analysis = self._ANALYSIS_CLASS(self.state, self._analysis_conf)
456 self._analysis.register_notification_cbs(notification_cbs)
457
458 def _create_automaton(self):
459 self._automaton = automaton.Automaton()
460 self.state = self._automaton.state
461
462 def _analysis_tick_cb(self, **kwargs):
463 begin_ns = kwargs['begin_ns']
464 end_ns = kwargs['end_ns']
465
466 self._analysis_tick(begin_ns, end_ns)
467 self._ticks += 1
468
469 def _analysis_tick(self, begin_ns, end_ns):
470 raise NotImplementedError()
471
472
473 # create MI version
474 _cmd_version = _version.get_versions()['version']
475 _version_match = re.match(r'(\d+)\.(\d+)\.(\d+)(.*)', _cmd_version)
476 Command._MI_VERSION = [
477 int(_version_match.group(1)),
478 int(_version_match.group(2)),
479 int(_version_match.group(3)),
480 _version_match.group(4),
481 ]
This page took 0.042419 seconds and 6 git commands to generate.