cf256cf0cb8b9f8a3525a73f5e3486f549e38058
1 # The MIT License (MIT)
3 # Copyright (C) 2015 - Julien Desfossez <jdesfossez@efficios.com>
4 # 2015 - Philippe Proulx <pproulx@efficios.com>
5 # 2015 - Antoine Busque <abusque@efficios.com>
7 # Permission is hereby granted, free of charge, to any person obtaining a copy
8 # of this software and associated documentation files (the "Software"), to deal
9 # in the Software without restriction, including without limitation the rights
10 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 # copies of the Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
14 # The above copyright notice and this permission notice shall be included in
15 # all copies or substantial portions of the Software.
17 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 from babeltrace
import TraceCollection
32 from . import mi
, progressbar
33 from .. import __version__
34 from ..core
import analysis
35 from ..common
import (
36 format_utils
, parse_utils
, time_utils
, trace_utils
, version_utils
38 from ..linuxautomaton
import automaton
42 _MI_BASE_TAGS
= ['linux-kernel', 'lttng-analyses']
48 _MI_URL
= 'https://github.com/lttng/lttng-analyses'
49 _VERSION
= version_utils
.Version
.new_from_string(__version__
)
51 def __init__(self
, mi_mode
=False):
53 self
._analysis
_conf
= None
58 self
._mi
_mode
= mi_mode
59 self
._run
_step
('create automaton', self
._create
_automaton
)
60 self
._run
_step
('setup MI', self
._mi
_setup
)
66 def _run_step(self
, action_title
, fn
):
69 except KeyboardInterrupt:
70 self
._print
('Cancelled by user')
72 except Exception as e
:
73 self
._gen
_error
('Cannot {}: {}'.format(action_title
, e
))
76 self
._run
_step
('parse arguments', self
._parse
_args
)
77 self
._run
_step
('open trace', self
._open
_trace
)
78 self
._run
_step
('create analysis', self
._create
_analysis
)
80 if not self
._mi
_mode
or not self
._args
.test_compatibility
:
81 self
._run
_step
('run analysis', self
._run
_analysis
)
83 self
._run
_step
('close trace', self
._close
_trace
)
85 def _mi_error(self
, msg
, code
=None):
86 print(json
.dumps(mi
.get_error(msg
, code
)))
88 def _non_mi_error(self
, msg
):
92 msg
= termcolor
.colored(msg
, 'red', attrs
=['bold'])
96 print(msg
, file=sys
.stderr
)
98 def _error(self
, msg
, code
, exit_code
=1):
102 self
._non
_mi
_error
(msg
)
106 def _gen_error(self
, msg
, exit_code
=1):
107 self
._error
('Error: {}'.format(msg
), exit_code
)
109 def _cmdline_error(self
, msg
, exit_code
=1):
110 self
._error
('Command line error: {}'.format(msg
), exit_code
)
112 def _print(self
, msg
):
113 if not self
._mi
_mode
:
116 def _mi_create_result_table(self
, table_class_name
, begin
, end
,
118 return mi
.ResultTable(self
._mi
_table
_classes
[table_class_name
],
119 begin
, end
, subtitle
)
122 self
._mi
_table
_classes
= {}
124 for tc_tuple
in self
._MI
_TABLE
_CLASSES
:
125 table_class
= mi
.TableClass(tc_tuple
[0], tc_tuple
[1], tc_tuple
[2])
126 self
._mi
_table
_classes
[table_class
.name
] = table_class
128 self
._mi
_clear
_result
_tables
()
130 def _mi_print_metadata(self
):
131 tags
= self
._MI
_BASE
_TAGS
+ self
._MI
_TAGS
132 infos
= mi
.get_metadata(version
=self
._VERSION
, title
=self
._MI
_TITLE
,
133 description
=self
._MI
_DESCRIPTION
,
134 authors
=self
._MI
_AUTHORS
, url
=self
._MI
_URL
,
136 table_classes
=self
._mi
_table
_classes
.values())
137 print(json
.dumps(infos
))
139 def _mi_append_result_table(self
, result_table
):
140 if not result_table
or not result_table
.rows
:
143 tc_name
= result_table
.table_class
.name
144 self
._mi
_get
_result
_tables
(tc_name
).append(result_table
)
146 def _mi_append_result_tables(self
, result_tables
):
147 if not result_tables
:
150 for result_table
in result_tables
:
151 self
._mi
_append
_result
_table
(result_table
)
153 def _mi_clear_result_tables(self
):
154 self
._result
_tables
= {}
156 def _mi_get_result_tables(self
, table_class_name
):
157 if table_class_name
not in self
._result
_tables
:
158 self
._result
_tables
[table_class_name
] = []
160 return self
._result
_tables
[table_class_name
]
165 for result_tables
in self
._result
_tables
.values():
166 for result_table
in result_tables
:
167 results
.append(result_table
.to_native_object())
173 print(json
.dumps(obj
))
175 def _create_summary_result_tables(self
):
178 def _open_trace(self
):
179 traces
= TraceCollection()
180 handles
= traces
.add_traces_recursive(self
._args
.path
, 'ctf')
182 self
._gen
_error
('Failed to open ' + self
._args
.path
, -1)
183 self
._handles
= handles
184 self
._traces
= traces
185 self
._ts
_begin
= traces
.timestamp_begin
186 self
._ts
_end
= traces
.timestamp_end
187 self
._process
_date
_args
()
188 self
._read
_tracer
_version
()
189 if not self
._args
.skip_validation
:
190 self
._check
_lost
_events
()
192 def _close_trace(self
):
193 for handle
in self
._handles
.values():
194 self
._traces
.remove_trace(handle
)
196 def _read_tracer_version(self
):
198 # remove the trailing /
199 while self
._args
.path
.endswith('/'):
200 self
._args
.path
= self
._args
.path
[:-1]
201 for root
, _
, _
in os
.walk(self
._args
.path
):
202 if root
.endswith('kernel'):
206 if kernel_path
is None:
207 self
._gen
_error
('Could not find kernel trace directory')
210 ret
, metadata
= subprocess
.getstatusoutput(
211 'babeltrace -o ctf-metadata "%s"' % kernel_path
)
212 except subprocess
.CalledProcessError
:
213 self
._gen
_error
('Cannot run babeltrace on the trace, cannot read'
216 # fallback to reading the text metadata if babeltrace failed to
217 # output the CTF metadata
220 metadata
= subprocess
.getoutput(
221 'cat "%s"' % os
.path
.join(kernel_path
, 'metadata'))
222 except subprocess
.CalledProcessError
:
223 self
._gen
_error
('Cannot read the metadata of the trace, cannot'
224 'extract tracer version')
226 major_match
= re
.search(r
'tracer_major = "*(\d+)"*', metadata
)
227 minor_match
= re
.search(r
'tracer_minor = "*(\d+)"*', metadata
)
228 patch_match
= re
.search(r
'tracer_patchlevel = "*(\d+)"*', metadata
)
230 if not major_match
or not minor_match
or not patch_match
:
231 self
._gen
_error
('Malformed metadata, cannot read tracer version')
233 self
.state
.tracer_version
= version_utils
.Version(
234 int(major_match
.group(1)),
235 int(minor_match
.group(1)),
236 int(patch_match
.group(1)),
239 def _check_lost_events(self
):
240 msg
= 'Checking the trace for lost events...'
243 if self
._mi
_mode
and self
._args
.output_progress
:
244 mi
.print_progress(0, msg
)
247 subprocess
.check_output('babeltrace "%s"' % self
._args
.path
,
249 except subprocess
.CalledProcessError
:
250 self
._gen
_error
('Cannot run babeltrace on the trace, cannot verify'
251 ' if events were lost during the trace recording')
253 def _pre_analysis(self
):
256 def _post_analysis(self
):
257 if not self
._mi
_mode
:
261 self
._create
_summary
_result
_tables
()
266 if self
._args
.no_progress
:
269 ts_end
= self
._ts
_end
271 if self
._analysis
_conf
.end_ts
is not None:
272 ts_end
= self
._analysis
_conf
.end_ts
275 cls
= progressbar
.MiProgress
277 cls
= progressbar
.FancyProgressBar
279 self
._progress
= cls(self
._ts
_begin
, ts_end
, self
._args
.path
,
280 self
._args
.progress_use_size
)
282 def _pb_update(self
, event
):
283 if self
._args
.no_progress
:
286 self
._progress
.update(event
)
288 def _pb_finish(self
):
289 if self
._args
.no_progress
:
292 self
._progress
.finalize()
294 def _run_analysis(self
):
298 for event
in self
._traces
.events
:
299 self
._pb
_update
(event
)
300 self
._analysis
.process_event(event
)
301 if self
._analysis
.ended
:
303 self
._automaton
.process_event(event
)
307 self
._post
_analysis
()
309 def _print_date(self
, begin_ns
, end_ns
):
310 time_range_str
= format_utils
.format_time_range(
311 begin_ns
, end_ns
, print_date
=True, gmt
=self
._args
.gmt
313 date
= 'Timerange: {}'.format(time_range_str
)
317 def _format_timestamp(self
, timestamp
):
318 return format_utils
.format_timestamp(
319 timestamp
, print_date
=self
._args
.multi_day
, gmt
=self
._args
.gmt
322 def _get_uniform_freq_values(self
, durations
):
323 if self
._args
.uniform_step
is not None:
324 return (self
._args
.uniform_min
, self
._args
.uniform_max
,
325 self
._args
.uniform_step
)
327 if self
._args
.min is not None:
328 self
._args
.uniform_min
= self
._args
.min
330 self
._args
.uniform_min
= min(durations
)
331 if self
._args
.max is not None:
332 self
._args
.uniform_max
= self
._args
.max
334 self
._args
.uniform_max
= max(durations
)
337 self
._args
.uniform_min
/= 1000
338 self
._args
.uniform_max
/= 1000
339 self
._args
.uniform_step
= (
340 (self
._args
.uniform_max
- self
._args
.uniform_min
) /
341 self
._args
.freq_resolution
344 return self
._args
.uniform_min
, self
._args
.uniform_max
, \
345 self
._args
.uniform_step
347 def _validate_transform_common_args(self
, args
):
348 refresh_period_ns
= None
349 if args
.refresh
is not None:
351 refresh_period_ns
= parse_utils
.parse_duration(args
.refresh
)
352 except ValueError as e
:
353 self
._cmdline
_error
(str(e
))
355 self
._analysis
_conf
= analysis
.AnalysisConfig()
356 self
._analysis
_conf
.refresh_period
= refresh_period_ns
357 self
._analysis
_conf
.period_begin_ev_name
= args
.period_begin
358 self
._analysis
_conf
.period_end_ev_name
= args
.period_end
359 self
._analysis
_conf
.period_begin_key_fields
= \
360 args
.period_begin_key
.split(',')
362 if args
.period_end_key
:
363 self
._analysis
_conf
.period_end_key_fields
= \
364 args
.period_end_key
.split(',')
366 self
._analysis
_conf
.period_end_key_fields
= \
367 self
._analysis
_conf
.period_begin_key_fields
369 if args
.period_key_value
:
370 self
._analysis
_conf
.period_key_value
= \
371 tuple(args
.period_key_value
.split(','))
374 self
._analysis
_conf
.cpu_list
= args
.cpu
.split(',')
375 self
._analysis
_conf
.cpu_list
= [int(cpu
) for cpu
in
376 self
._analysis
_conf
.cpu_list
]
378 # convert min/max args from µs to ns, if needed
379 if hasattr(args
, 'min') and args
.min is not None:
381 self
._analysis
_conf
.min_duration
= args
.min
382 if hasattr(args
, 'max') and args
.max is not None:
384 self
._analysis
_conf
.max_duration
= args
.max
386 if hasattr(args
, 'procname'):
388 self
._analysis
_conf
.proc_list
= args
.procname
.split(',')
390 if hasattr(args
, 'tid'):
392 self
._analysis
_conf
.tid_list
= args
.tid
.split(',')
393 self
._analysis
_conf
.tid_list
= [int(tid
) for tid
in
394 self
._analysis
_conf
.tid_list
]
396 if hasattr(args
, 'freq'):
397 args
.uniform_min
= None
398 args
.uniform_max
= None
399 args
.uniform_step
= None
402 # implies uniform buckets
403 args
.freq_uniform
= True
406 # print MI version if required
408 print(mi
.get_version_string())
411 # print MI metadata if required
413 self
._mi
_print
_metadata
()
416 # validate path argument (required at this point)
418 self
._cmdline
_error
('Please specify a trace path')
420 if type(args
.path
) is list:
421 args
.path
= args
.path
[0]
423 def _validate_transform_args(self
, args
):
426 def _parse_args(self
):
427 ap
= argparse
.ArgumentParser(description
=self
._DESC
)
430 ap
.add_argument('-r', '--refresh', type=str,
431 help='Refresh period, with optional units suffix '
432 '(default units: s)')
433 ap
.add_argument('--gmt', action
='store_true',
434 help='Manipulate timestamps based on GMT instead '
436 ap
.add_argument('--skip-validation', action
='store_true',
437 help='Skip the trace validation')
438 ap
.add_argument('--begin', type=str, help='start time: '
439 'hh:mm:ss[.nnnnnnnnn]')
440 ap
.add_argument('--end', type=str, help='end time: '
441 'hh:mm:ss[.nnnnnnnnn]')
442 ap
.add_argument('--period-begin', type=str,
443 help='Analysis period start marker event name')
444 ap
.add_argument('--period-end', type=str,
445 help='Analysis period end marker event name '
446 '(requires --period-begin)')
447 ap
.add_argument('--period-begin-key', type=str, default
='cpu_id',
448 help='Optional, list of event field names used to '
449 'match period markers (default: cpu_id)')
450 ap
.add_argument('--period-end-key', type=str,
451 help='Optional, list of event field names used to '
452 'match period marker. If none specified, use the same '
453 ' --period-begin-key')
454 ap
.add_argument('--period-key-value', type=str,
455 help='Optional, define a fixed key value to which a'
456 ' period must correspond to be considered.')
457 ap
.add_argument('--cpu', type=str,
458 help='Filter the results only for this list of '
460 ap
.add_argument('--timerange', type=str, help='time range: '
462 ap
.add_argument('--progress-use-size', action
='store_true',
463 help='use trace size to approximate progress')
464 ap
.add_argument('-V', '--version', action
='version',
465 version
='LTTng Analyses v{}'.format(self
._VERSION
))
467 # MI mode-dependent arguments
469 ap
.add_argument('--mi-version', action
='store_true',
470 help='Print MI version')
471 ap
.add_argument('--metadata', action
='store_true',
472 help='Print analysis\' metadata')
473 ap
.add_argument('--test-compatibility', action
='store_true',
474 help='Check if the provided trace is supported and exit')
475 ap
.add_argument('path', metavar
='<path/to/trace>',
476 help='trace path', nargs
='*')
477 ap
.add_argument('--output-progress', action
='store_true',
478 help='Print progress indication lines')
480 ap
.add_argument('--no-progress', action
='store_true',
481 help='Don\'t display the progress bar')
482 ap
.add_argument('path', metavar
='<path/to/trace>',
485 # Used to add command-specific args
486 self
._add
_arguments
(ap
)
488 args
= ap
.parse_args()
491 args
.no_progress
= True
493 if args
.output_progress
:
494 args
.no_progress
= False
496 self
._validate
_transform
_common
_args
(args
)
497 self
._validate
_transform
_args
(args
)
501 def _add_proc_filter_args(ap
):
502 ap
.add_argument('--procname', type=str,
503 help='Filter the results only for this list of '
505 ap
.add_argument('--tid', type=str,
506 help='Filter the results only for this list of TIDs')
509 def _add_min_max_args(ap
):
510 ap
.add_argument('--min', type=float,
511 help='Filter out durations shorter than min usec')
512 ap
.add_argument('--max', type=float,
513 help='Filter out durations longer than max usec')
516 def _add_freq_args(ap
, help=None):
518 help = 'Output the frequency distribution'
520 ap
.add_argument('--freq', action
='store_true', help=help)
521 ap
.add_argument('--freq-resolution', type=int, default
=20,
522 help='Frequency distribution resolution '
524 ap
.add_argument('--freq-uniform', action
='store_true',
525 help='Use a uniform resolution across distributions')
526 ap
.add_argument('--freq-series', action
='store_true',
527 help='Consolidate frequency distribution histogram '
531 def _add_log_args(ap
, help=None):
533 help = 'Output the events in chronological order'
535 ap
.add_argument('--log', action
='store_true', help=help)
538 def _add_top_args(ap
, help=None):
540 help = 'Output the top results'
542 ap
.add_argument('--limit', type=int, default
=10,
543 help='Limit to top X (default = 10)')
544 ap
.add_argument('--top', action
='store_true', help=help)
547 def _add_stats_args(ap
, help=None):
549 help = 'Output statistics'
551 ap
.add_argument('--stats', action
='store_true', help=help)
553 def _add_arguments(self
, ap
):
556 def _process_date_args(self
):
557 def parse_date(date
):
559 ts
= parse_utils
.parse_trace_collection_date(
560 self
._traces
, date
, self
._args
.gmt
562 except ValueError as e
:
563 self
._cmdline
_error
(str(e
))
567 self
._args
.multi_day
= trace_utils
.is_multi_day_trace_collection(
573 if self
._args
.timerange
:
576 parse_utils
.parse_trace_collection_time_range(
577 self
._traces
, self
._args
.timerange
, self
._args
.gmt
580 except ValueError as e
:
581 self
._cmdline
_error
(str(e
))
584 begin_ts
= parse_date(self
._args
.begin
)
586 end_ts
= parse_date(self
._args
.end
)
588 # We have to check if timestamp_begin is None, which
589 # it always is in older versions of babeltrace. In
590 # that case, the test is simply skipped and an invalid
591 # --end value will cause an empty analysis
592 if self
._ts
_begin
is not None and \
593 end_ts
< self
._ts
_begin
:
595 '--end timestamp before beginning of trace')
597 self
._analysis
_conf
.begin_ts
= begin_ts
598 self
._analysis
_conf
.end_ts
= end_ts
600 def _create_analysis(self
):
602 analysis
.Analysis
.TICK_CB
: self
._analysis
_tick
_cb
605 self
._analysis
= self
._ANALYSIS
_CLASS
(self
.state
, self
._analysis
_conf
)
606 self
._analysis
.register_notification_cbs(notification_cbs
)
608 def _create_automaton(self
):
609 self
._automaton
= automaton
.Automaton()
610 self
.state
= self
._automaton
.state
612 def _analysis_tick_cb(self
, **kwargs
):
613 begin_ns
= kwargs
['begin_ns']
614 end_ns
= kwargs
['end_ns']
616 self
._analysis
_tick
(begin_ns
, end_ns
)
619 def _analysis_tick(self
, begin_ns
, end_ns
):
620 raise NotImplementedError()
This page took 0.044626 seconds and 4 git commands to generate.