1 # The MIT License (MIT)
3 # Copyright (C) 2015 - Julien Desfossez <jdesfossez@efficios.com>
4 # 2015 - Philippe Proulx <pproulx@efficios.com>
5 # 2015 - Antoine Busque <abusque@efficios.com>
7 # Permission is hereby granted, free of charge, to any person obtaining a copy
8 # of this software and associated documentation files (the "Software"), to deal
9 # in the Software without restriction, including without limitation the rights
10 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 # copies of the Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
14 # The above copyright notice and this permission notice shall be included in
15 # all copies or substantial portions of the Software.
17 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 from babeltrace
import TraceCollection
32 from . import mi
, progressbar
33 from .. import _version
, __version__
34 from ..core
import analysis
35 from ..common
import (
36 format_utils
, parse_utils
, time_utils
, trace_utils
, version_utils
38 from ..linuxautomaton
import automaton
42 _MI_BASE_TAGS
= ['linux-kernel', 'lttng-analyses']
48 _MI_URL
= 'https://github.com/lttng/lttng-analyses'
50 def __init__(self
, mi_mode
=False):
52 self
._analysis
_conf
= None
57 self
._mi
_mode
= mi_mode
58 self
._run
_step
('create automaton', self
._create
_automaton
)
59 self
._run
_step
('setup MI', self
._mi
_setup
)
65 def _run_step(self
, action_title
, fn
):
68 except KeyboardInterrupt:
69 self
._print
('Cancelled by user')
71 except Exception as e
:
72 self
._gen
_error
('Cannot {}: {}'.format(action_title
, e
))
75 self
._run
_step
('parse arguments', self
._parse
_args
)
76 self
._run
_step
('open trace', self
._open
_trace
)
77 self
._run
_step
('create analysis', self
._create
_analysis
)
79 if self
._mi
_mode
and not self
._args
.test_compatibility
:
80 self
._run
_step
('run analysis', self
._run
_analysis
)
82 self
._run
_step
('close trace', self
._close
_trace
)
84 def _mi_error(self
, msg
, code
=None):
85 print(json
.dumps(mi
.get_error(msg
, code
)))
87 def _non_mi_error(self
, msg
):
91 msg
= termcolor
.colored(msg
, 'red', attrs
=['bold'])
95 print(msg
, file=sys
.stderr
)
97 def _error(self
, msg
, code
, exit_code
=1):
101 self
._non
_mi
_error
(msg
)
105 def _gen_error(self
, msg
, exit_code
=1):
106 self
._error
('Error: {}'.format(msg
), exit_code
)
108 def _cmdline_error(self
, msg
, exit_code
=1):
109 self
._error
('Command line error: {}'.format(msg
), exit_code
)
111 def _print(self
, msg
):
112 if not self
._mi
_mode
:
115 def _mi_create_result_table(self
, table_class_name
, begin
, end
,
117 return mi
.ResultTable(self
._mi
_table
_classes
[table_class_name
],
118 begin
, end
, subtitle
)
121 self
._mi
_table
_classes
= {}
123 for tc_tuple
in self
._MI
_TABLE
_CLASSES
:
124 table_class
= mi
.TableClass(tc_tuple
[0], tc_tuple
[1], tc_tuple
[2])
125 self
._mi
_table
_classes
[table_class
.name
] = table_class
127 self
._mi
_clear
_result
_tables
()
129 def _mi_print_metadata(self
):
130 tags
= self
._MI
_BASE
_TAGS
+ self
._MI
_TAGS
131 infos
= mi
.get_metadata(version
=self
._MI
_VERSION
, title
=self
._MI
_TITLE
,
132 description
=self
._MI
_DESCRIPTION
,
133 authors
=self
._MI
_AUTHORS
, url
=self
._MI
_URL
,
135 table_classes
=self
._mi
_table
_classes
.values())
136 print(json
.dumps(infos
))
138 def _mi_append_result_table(self
, result_table
):
139 if not result_table
or not result_table
.rows
:
142 tc_name
= result_table
.table_class
.name
143 self
._mi
_get
_result
_tables
(tc_name
).append(result_table
)
145 def _mi_append_result_tables(self
, result_tables
):
146 if not result_tables
:
149 for result_table
in result_tables
:
150 self
._mi
_append
_result
_table
(result_table
)
152 def _mi_clear_result_tables(self
):
153 self
._result
_tables
= {}
155 def _mi_get_result_tables(self
, table_class_name
):
156 if table_class_name
not in self
._result
_tables
:
157 self
._result
_tables
[table_class_name
] = []
159 return self
._result
_tables
[table_class_name
]
164 for result_tables
in self
._result
_tables
.values():
165 for result_table
in result_tables
:
166 results
.append(result_table
.to_native_object())
172 print(json
.dumps(obj
))
174 def _create_summary_result_tables(self
):
177 def _open_trace(self
):
178 traces
= TraceCollection()
179 handles
= traces
.add_traces_recursive(self
._args
.path
, 'ctf')
181 self
._gen
_error
('Failed to open ' + self
._args
.path
, -1)
182 self
._handles
= handles
183 self
._traces
= traces
184 self
._ts
_begin
= traces
.timestamp_begin
185 self
._ts
_end
= traces
.timestamp_end
186 self
._process
_date
_args
()
187 self
._read
_tracer
_version
()
188 if not self
._args
.skip_validation
:
189 self
._check
_lost
_events
()
191 def _close_trace(self
):
192 for handle
in self
._handles
.values():
193 self
._traces
.remove_trace(handle
)
195 def _read_tracer_version(self
):
197 # remove the trailing /
198 while self
._args
.path
.endswith('/'):
199 self
._args
.path
= self
._args
.path
[:-1]
200 for root
, _
, _
in os
.walk(self
._args
.path
):
201 if root
.endswith('kernel'):
205 if kernel_path
is None:
206 self
._gen
_error
('Could not find kernel trace directory')
209 ret
, metadata
= subprocess
.getstatusoutput(
210 'babeltrace -o ctf-metadata "%s"' % kernel_path
)
211 except subprocess
.CalledProcessError
:
212 self
._gen
_error
('Cannot run babeltrace on the trace, cannot read'
215 # fallback to reading the text metadata if babeltrace failed to
216 # output the CTF metadata
219 metadata
= subprocess
.getoutput(
220 'cat "%s"' % os
.path
.join(kernel_path
, 'metadata'))
221 except subprocess
.CalledProcessError
:
222 self
._gen
_error
('Cannot read the metadata of the trace, cannot'
223 'extract tracer version')
225 major_match
= re
.search(r
'tracer_major = "*(\d+)"*', metadata
)
226 minor_match
= re
.search(r
'tracer_minor = "*(\d+)"*', metadata
)
227 patch_match
= re
.search(r
'tracer_patchlevel = "*(\d+)"*', metadata
)
229 if not major_match
or not minor_match
or not patch_match
:
230 self
._gen
_error
('Malformed metadata, cannot read tracer version')
232 self
.state
.tracer_version
= version_utils
.Version(
233 int(major_match
.group(1)),
234 int(minor_match
.group(1)),
235 int(patch_match
.group(1)),
238 def _check_lost_events(self
):
239 msg
= 'Checking the trace for lost events...'
242 if self
._mi
_mode
and self
._args
.output_progress
:
243 mi
.print_progress(0, msg
)
246 subprocess
.check_output('babeltrace "%s"' % self
._args
.path
,
248 except subprocess
.CalledProcessError
:
249 self
._gen
_error
('Cannot run babeltrace on the trace, cannot verify'
250 ' if events were lost during the trace recording')
252 def _pre_analysis(self
):
255 def _post_analysis(self
):
256 if not self
._mi
_mode
:
260 self
._create
_summary
_result
_tables
()
265 if self
._args
.no_progress
:
268 ts_end
= self
._ts
_end
270 if self
._analysis
_conf
.end_ts
is not None:
271 ts_end
= self
._analysis
_conf
.end_ts
274 cls
= progressbar
.MiProgress
276 cls
= progressbar
.FancyProgressBar
278 self
._progress
= cls(self
._ts
_begin
, ts_end
, self
._args
.path
,
279 self
._args
.progress_use_size
)
281 def _pb_update(self
, event
):
282 if self
._args
.no_progress
:
285 self
._progress
.update(event
)
287 def _pb_finish(self
):
288 if self
._args
.no_progress
:
291 self
._progress
.finalize()
293 def _run_analysis(self
):
297 for event
in self
._traces
.events
:
298 self
._pb
_update
(event
)
299 self
._analysis
.process_event(event
)
300 if self
._analysis
.ended
:
302 self
._automaton
.process_event(event
)
306 self
._post
_analysis
()
308 def _print_date(self
, begin_ns
, end_ns
):
309 time_range_str
= format_utils
.format_time_range(
310 begin_ns
, end_ns
, print_date
=True, gmt
=self
._args
.gmt
312 date
= 'Timerange: {}'.format(time_range_str
)
316 def _format_timestamp(self
, timestamp
):
317 return format_utils
.format_timestamp(
318 timestamp
, print_date
=self
._args
.multi_day
, gmt
=self
._args
.gmt
321 def _get_uniform_freq_values(self
, durations
):
322 if self
._args
.uniform_step
is not None:
323 return (self
._args
.uniform_min
, self
._args
.uniform_max
,
324 self
._args
.uniform_step
)
326 if self
._args
.min is not None:
327 self
._args
.uniform_min
= self
._args
.min
329 self
._args
.uniform_min
= min(durations
)
330 if self
._args
.max is not None:
331 self
._args
.uniform_max
= self
._args
.max
333 self
._args
.uniform_max
= max(durations
)
336 self
._args
.uniform_min
/= 1000
337 self
._args
.uniform_max
/= 1000
338 self
._args
.uniform_step
= (
339 (self
._args
.uniform_max
- self
._args
.uniform_min
) /
340 self
._args
.freq_resolution
343 return self
._args
.uniform_min
, self
._args
.uniform_max
, \
344 self
._args
.uniform_step
346 def _validate_transform_common_args(self
, args
):
347 refresh_period_ns
= None
348 if args
.refresh
is not None:
350 refresh_period_ns
= parse_utils
.parse_duration(args
.refresh
)
351 except ValueError as e
:
352 self
._cmdline
_error
(str(e
))
354 self
._analysis
_conf
= analysis
.AnalysisConfig()
355 self
._analysis
_conf
.refresh_period
= refresh_period_ns
356 self
._analysis
_conf
.period_begin_ev_name
= args
.period_begin
357 self
._analysis
_conf
.period_end_ev_name
= args
.period_end
358 self
._analysis
_conf
.period_begin_key_fields
= \
359 args
.period_begin_key
.split(',')
361 if args
.period_end_key
:
362 self
._analysis
_conf
.period_end_key_fields
= \
363 args
.period_end_key
.split(',')
365 self
._analysis
_conf
.period_end_key_fields
= \
366 self
._analysis
_conf
.period_begin_key_fields
368 if args
.period_key_value
:
369 self
._analysis
_conf
.period_key_value
= \
370 tuple(args
.period_key_value
.split(','))
373 self
._analysis
_conf
.cpu_list
= args
.cpu
.split(',')
374 self
._analysis
_conf
.cpu_list
= [int(cpu
) for cpu
in
375 self
._analysis
_conf
.cpu_list
]
377 # convert min/max args from µs to ns, if needed
378 if hasattr(args
, 'min') and args
.min is not None:
380 self
._analysis
_conf
.min_duration
= args
.min
381 if hasattr(args
, 'max') and args
.max is not None:
383 self
._analysis
_conf
.max_duration
= args
.max
385 if hasattr(args
, 'procname'):
387 self
._analysis
_conf
.proc_list
= args
.procname
.split(',')
389 if hasattr(args
, 'tid'):
391 self
._analysis
_conf
.tid_list
= args
.tid
.split(',')
392 self
._analysis
_conf
.tid_list
= [int(tid
) for tid
in
393 self
._analysis
_conf
.tid_list
]
395 if hasattr(args
, 'freq'):
396 args
.uniform_min
= None
397 args
.uniform_max
= None
398 args
.uniform_step
= None
401 # implies uniform buckets
402 args
.freq_uniform
= True
405 # print MI version if required
407 print(mi
.get_version_string())
410 # print MI metadata if required
412 self
._mi
_print
_metadata
()
415 # validate path argument (required at this point)
417 self
._cmdline
_error
('Please specify a trace path')
419 if type(args
.path
) is list:
420 args
.path
= args
.path
[0]
422 def _validate_transform_args(self
, args
):
425 def _parse_args(self
):
426 ap
= argparse
.ArgumentParser(description
=self
._DESC
)
429 ap
.add_argument('-r', '--refresh', type=str,
430 help='Refresh period, with optional units suffix '
431 '(default units: s)')
432 ap
.add_argument('--gmt', action
='store_true',
433 help='Manipulate timestamps based on GMT instead '
435 ap
.add_argument('--skip-validation', action
='store_true',
436 help='Skip the trace validation')
437 ap
.add_argument('--begin', type=str, help='start time: '
438 'hh:mm:ss[.nnnnnnnnn]')
439 ap
.add_argument('--end', type=str, help='end time: '
440 'hh:mm:ss[.nnnnnnnnn]')
441 ap
.add_argument('--period-begin', type=str,
442 help='Analysis period start marker event name')
443 ap
.add_argument('--period-end', type=str,
444 help='Analysis period end marker event name '
445 '(requires --period-begin)')
446 ap
.add_argument('--period-begin-key', type=str, default
='cpu_id',
447 help='Optional, list of event field names used to '
448 'match period markers (default: cpu_id)')
449 ap
.add_argument('--period-end-key', type=str,
450 help='Optional, list of event field names used to '
451 'match period marker. If none specified, use the same '
452 ' --period-begin-key')
453 ap
.add_argument('--period-key-value', type=str,
454 help='Optional, define a fixed key value to which a'
455 ' period must correspond to be considered.')
456 ap
.add_argument('--cpu', type=str,
457 help='Filter the results only for this list of '
459 ap
.add_argument('--timerange', type=str, help='time range: '
461 ap
.add_argument('--progress-use-size', action
='store_true',
462 help='use trace size to approximate progress')
463 ap
.add_argument('-V', '--version', action
='version',
464 version
='LTTng Analyses v' + __version__
)
466 # MI mode-dependent arguments
468 ap
.add_argument('--mi-version', action
='store_true',
469 help='Print MI version')
470 ap
.add_argument('--metadata', action
='store_true',
471 help='Print analysis\' metadata')
472 ap
.add_argument('--test-compatibility', action
='store_true',
473 help='Check if the provided trace is supported and exit')
474 ap
.add_argument('path', metavar
='<path/to/trace>',
475 help='trace path', nargs
='*')
476 ap
.add_argument('--output-progress', action
='store_true',
477 help='Print progress indication lines')
479 ap
.add_argument('--no-progress', action
='store_true',
480 help='Don\'t display the progress bar')
481 ap
.add_argument('path', metavar
='<path/to/trace>',
484 # Used to add command-specific args
485 self
._add
_arguments
(ap
)
487 args
= ap
.parse_args()
490 args
.no_progress
= True
492 if args
.output_progress
:
493 args
.no_progress
= False
495 self
._validate
_transform
_common
_args
(args
)
496 self
._validate
_transform
_args
(args
)
500 def _add_proc_filter_args(ap
):
501 ap
.add_argument('--procname', type=str,
502 help='Filter the results only for this list of '
504 ap
.add_argument('--tid', type=str,
505 help='Filter the results only for this list of TIDs')
508 def _add_min_max_args(ap
):
509 ap
.add_argument('--min', type=float,
510 help='Filter out durations shorter than min usec')
511 ap
.add_argument('--max', type=float,
512 help='Filter out durations longer than max usec')
515 def _add_freq_args(ap
, help=None):
517 help = 'Output the frequency distribution'
519 ap
.add_argument('--freq', action
='store_true', help=help)
520 ap
.add_argument('--freq-resolution', type=int, default
=20,
521 help='Frequency distribution resolution '
523 ap
.add_argument('--freq-uniform', action
='store_true',
524 help='Use a uniform resolution across distributions')
525 ap
.add_argument('--freq-series', action
='store_true',
526 help='Consolidate frequency distribution histogram '
530 def _add_log_args(ap
, help=None):
532 help = 'Output the events in chronological order'
534 ap
.add_argument('--log', action
='store_true', help=help)
537 def _add_top_args(ap
, help=None):
539 help = 'Output the top results'
541 ap
.add_argument('--limit', type=int, default
=10,
542 help='Limit to top X (default = 10)')
543 ap
.add_argument('--top', action
='store_true', help=help)
546 def _add_stats_args(ap
, help=None):
548 help = 'Output statistics'
550 ap
.add_argument('--stats', action
='store_true', help=help)
552 def _add_arguments(self
, ap
):
555 def _process_date_args(self
):
556 def parse_date(date
):
558 ts
= parse_utils
.parse_trace_collection_date(
559 self
._traces
, date
, self
._args
.gmt
561 except ValueError as e
:
562 self
._cmdline
_error
(str(e
))
566 self
._args
.multi_day
= trace_utils
.is_multi_day_trace_collection(
572 if self
._args
.timerange
:
575 parse_utils
.parse_trace_collection_time_range(
576 self
._traces
, self
._args
.timerange
, self
._args
.gmt
579 except ValueError as e
:
580 self
._cmdline
_error
(str(e
))
583 begin_ts
= parse_date(self
._args
.begin
)
585 end_ts
= parse_date(self
._args
.end
)
587 # We have to check if timestamp_begin is None, which
588 # it always is in older versions of babeltrace. In
589 # that case, the test is simply skipped and an invalid
590 # --end value will cause an empty analysis
591 if self
._ts
_begin
is not None and \
592 end_ts
< self
._ts
_begin
:
594 '--end timestamp before beginning of trace')
596 self
._analysis
_conf
.begin_ts
= begin_ts
597 self
._analysis
_conf
.end_ts
= end_ts
599 def _create_analysis(self
):
601 analysis
.Analysis
.TICK_CB
: self
._analysis
_tick
_cb
604 self
._analysis
= self
._ANALYSIS
_CLASS
(self
.state
, self
._analysis
_conf
)
605 self
._analysis
.register_notification_cbs(notification_cbs
)
607 def _create_automaton(self
):
608 self
._automaton
= automaton
.Automaton()
609 self
.state
= self
._automaton
.state
611 def _analysis_tick_cb(self
, **kwargs
):
612 begin_ns
= kwargs
['begin_ns']
613 end_ns
= kwargs
['end_ns']
615 self
._analysis
_tick
(begin_ns
, end_ns
)
618 def _analysis_tick(self
, begin_ns
, end_ns
):
619 raise NotImplementedError()
623 _cmd_version
= _version
.get_versions()['version']
624 _version_match
= re
.match(r
'(\d+)\.(\d+)\.(\d+)(.*)', _cmd_version
)
625 Command
._MI
_VERSION
= version_utils
.Version(
626 int(_version_match
.group(1)),
627 int(_version_match
.group(2)),
628 int(_version_match
.group(3)),
629 _version_match
.group(4),
This page took 0.042535 seconds and 5 git commands to generate.