1 # The MIT License (MIT)
3 # Copyright (C) 2015 - Julien Desfossez <jdesfossez@efficios.com>
4 # 2015 - Philippe Proulx <pproulx@efficios.com>
6 # Permission is hereby granted, free of charge, to any person obtaining a copy
7 # of this software and associated documentation files (the "Software"), to deal
8 # in the Software without restriction, including without limitation the rights
9 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 # copies of the Software, and to permit persons to whom the Software is
11 # furnished to do so, subject to the following conditions:
13 # The above copyright notice and this permission notice shall be included in
14 # all copies or substantial portions of the Software.
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 from ..core
import analysis
25 from ..linuxautomaton
import automaton
26 from .. import __version__
27 from . import progressbar
28 from ..linuxautomaton
import common
29 from .. import _version
30 from babeltrace
import TraceCollection
40 _MI_BASE_TAGS
= ['linux-kernel', 'lttng-analyses']
46 _MI_URL
= 'https://github.com/lttng/lttng-analyses'
48 def __init__(self
, mi_mode
=False):
50 self
._analysis
_conf
= None
55 self
._mi
_mode
= mi_mode
56 self
._create
_automaton
()
67 self
._create
_analysis
()
70 except KeyboardInterrupt:
73 def _error(self
, msg
, exit_code
=1):
77 msg
= termcolor
.colored(msg
, 'red', attrs
=['bold'])
81 print(msg
, file=sys
.stderr
)
84 def _gen_error(self
, msg
, exit_code
=1):
85 self
._error
('Error: {}'.format(msg
), exit_code
)
87 def _cmdline_error(self
, msg
, exit_code
=1):
88 self
._error
('Command line error: {}'.format(msg
), exit_code
)
90 def _print(self
, msg
):
94 def _mi_create_result_table(self
, table_class_name
, begin
, end
,
96 return mi
.ResultTable(self
._mi
_table
_classes
[table_class_name
],
100 self
._mi
_table
_classes
= {}
102 for tc_tuple
in self
._MI
_TABLE
_CLASSES
:
103 table_class
= mi
.TableClass(tc_tuple
[0], tc_tuple
[1], tc_tuple
[2])
104 self
._mi
_table
_classes
[table_class
.name
] = table_class
106 self
._mi
_clear
_result
_tables
()
108 def _mi_print_metadata(self
):
109 tags
= self
._MI
_BASE
_TAGS
+ self
._MI
_TAGS
110 infos
= mi
.get_metadata(version
=self
._MI
_VERSION
, title
=self
._MI
_TITLE
,
111 description
=self
._MI
_DESCRIPTION
,
112 authors
=self
._MI
_AUTHORS
, url
=self
._MI
_URL
,
114 table_classes
=self
._mi
_table
_classes
.values())
115 print(json
.dumps(infos
))
117 def _mi_append_result_table(self
, result_table
):
118 if not result_table
or not result_table
.rows
:
121 tc_name
= result_table
.table_class
.name
122 self
._mi
_get
_result
_tables
(tc_name
).append(result_table
)
124 def _mi_append_result_tables(self
, result_tables
):
125 if not result_tables
:
128 for result_table
in result_tables
:
129 self
._mi
_append
_result
_table
(result_table
)
131 def _mi_clear_result_tables(self
):
132 self
._result
_tables
= {}
134 def _mi_get_result_tables(self
, table_class_name
):
135 if table_class_name
not in self
._result
_tables
:
136 self
._result
_tables
[table_class_name
] = []
138 return self
._result
_tables
[table_class_name
]
143 for result_tables
in self
._result
_tables
.values():
144 for result_table
in result_tables
:
145 results
.append(result_table
.to_native_object())
151 print(json
.dumps(obj
))
153 def _create_summary_result_tables(self
):
156 def _open_trace(self
):
157 traces
= TraceCollection()
158 handles
= traces
.add_traces_recursive(self
._args
.path
, 'ctf')
160 self
._gen
_error
('Failed to open ' + self
._args
.path
, -1)
161 self
._handles
= handles
162 self
._traces
= traces
163 self
._process
_date
_args
()
164 if not self
._args
.skip_validation
:
165 self
._check
_lost
_events
()
167 def _close_trace(self
):
168 for handle
in self
._handles
.values():
169 self
._traces
.remove_trace(handle
)
171 def _check_lost_events(self
):
172 self
._print
('Checking the trace for lost events...')
174 subprocess
.check_output('babeltrace "%s"' % self
._args
.path
,
176 except subprocess
.CalledProcessError
:
177 self
._gen
_error
('Cannot run babeltrace on the trace, cannot verify'
178 ' if events were lost during the trace recording')
180 def _pre_analysis(self
):
183 def _post_analysis(self
):
184 if not self
._mi
_mode
:
188 self
._create
_summary
_result
_tables
()
192 def _run_analysis(self
):
194 progressbar
.progressbar_setup(self
)
196 for event
in self
._traces
.events
:
197 progressbar
.progressbar_update(self
)
198 self
._analysis
.process_event(event
)
199 if self
._analysis
.ended
:
201 self
._automaton
.process_event(event
)
203 progressbar
.progressbar_finish(self
)
205 self
._post
_analysis
()
207 def _print_date(self
, begin_ns
, end_ns
):
208 date
= 'Timerange: [%s, %s]' % (
209 common
.ns_to_hour_nsec(begin_ns
, gmt
=self
._args
.gmt
,
211 common
.ns_to_hour_nsec(end_ns
, gmt
=self
._args
.gmt
,
215 def _get_uniform_freq_values(self
, durations
):
216 if self
._args
.uniform_step
is not None:
217 return (self
._args
.uniform_min
, self
._args
.uniform_max
,
218 self
._args
.uniform_step
)
220 if self
._args
.min is not None:
221 self
._args
.uniform_min
= self
._args
.min
223 self
._args
.uniform_min
= min(durations
)
224 if self
._args
.max is not None:
225 self
._args
.uniform_max
= self
._args
.max
227 self
._args
.uniform_max
= max(durations
)
230 self
._args
.uniform_min
/= 1000
231 self
._args
.uniform_max
/= 1000
232 self
._args
.uniform_step
= (
233 (self
._args
.uniform_max
- self
._args
.uniform_min
) /
234 self
._args
.freq_resolution
237 return self
._args
.uniform_min
, self
._args
.uniform_max
, \
238 self
._args
.uniform_step
240 def _validate_transform_common_args(self
, args
):
241 refresh_period_ns
= None
242 if args
.refresh
is not None:
244 refresh_period_ns
= common
.duration_str_to_ns(args
.refresh
)
245 except ValueError as e
:
246 self
._cmdline
_error
(str(e
))
248 self
._analysis
_conf
= analysis
.AnalysisConfig()
249 self
._analysis
_conf
.refresh_period
= refresh_period_ns
250 self
._analysis
_conf
.period_begin_ev_name
= args
.period_begin
251 self
._analysis
_conf
.period_end_ev_name
= args
.period_end
252 self
._analysis
_conf
.period_key_fields
= args
.period_key
.split(',')
254 self
._analysis
_conf
.cpu_list
= args
.cpu
.split(',')
255 self
._analysis
_conf
.cpu_list
= [int(cpu
) for cpu
in
256 self
._analysis
_conf
.cpu_list
]
258 # convert min/max args from µs to ns, if needed
259 if hasattr(args
, 'min') and args
.min is not None:
261 self
._analysis
_conf
.min_duration
= args
.min
262 if hasattr(args
, 'max') and args
.max is not None:
264 self
._analysis
_conf
.max_duration
= args
.max
266 if hasattr(args
, 'procname'):
268 self
._analysis
_conf
.proc_list
= args
.procname
.split(',')
270 if hasattr(args
, 'tid'):
272 self
._analysis
_conf
.tid_list
= args
.tid
.split(',')
273 self
._analysis
_conf
.tid_list
= [int(tid
) for tid
in
274 self
._analysis
_conf
.tid_list
]
276 if hasattr(args
, 'freq'):
277 args
.uniform_min
= None
278 args
.uniform_max
= None
279 args
.uniform_step
= None
282 # implies uniform buckets
283 args
.freq_uniform
= True
286 # force no progress in MI mode
287 args
.no_progress
= True
289 # print MI metadata if required
291 self
._mi
_print
_metadata
()
294 # validate path argument (required at this point)
296 self
._cmdline
_error
('Please specify a trace path')
298 if type(args
.path
) is list:
299 args
.path
= args
.path
[0]
301 def _validate_transform_args(self
, args
):
304 def _parse_args(self
):
305 ap
= argparse
.ArgumentParser(description
=self
._DESC
)
308 ap
.add_argument('-r', '--refresh', type=str,
309 help='Refresh period, with optional units suffix '
310 '(default units: s)')
311 ap
.add_argument('--gmt', action
='store_true',
312 help='Manipulate timestamps based on GMT instead '
314 ap
.add_argument('--skip-validation', action
='store_true',
315 help='Skip the trace validation')
316 ap
.add_argument('--begin', type=str, help='start time: '
317 'hh:mm:ss[.nnnnnnnnn]')
318 ap
.add_argument('--end', type=str, help='end time: '
319 'hh:mm:ss[.nnnnnnnnn]')
320 ap
.add_argument('--period-begin', type=str,
321 help='Analysis period start marker event name')
322 ap
.add_argument('--period-end', type=str,
323 help='Analysis period end marker event name '
324 '(requires --period-begin)')
325 ap
.add_argument('--period-key', type=str, default
='cpu_id',
326 help='Optional, list of event field names used to '
327 'match period markers (default: cpu_id)')
328 ap
.add_argument('--cpu', type=str,
329 help='Filter the results only for this list of '
331 ap
.add_argument('--timerange', type=str, help='time range: '
333 ap
.add_argument('-V', '--version', action
='version',
334 version
='LTTng Analyses v' + __version__
)
336 # MI mode-dependent arguments
338 ap
.add_argument('--metadata', action
='store_true',
339 help='Show analysis\'s metadata')
340 ap
.add_argument('path', metavar
='<path/to/trace>',
341 help='trace path', nargs
='*')
343 ap
.add_argument('--no-progress', action
='store_true',
344 help='Don\'t display the progress bar')
345 ap
.add_argument('path', metavar
='<path/to/trace>',
348 # Used to add command-specific args
349 self
._add
_arguments
(ap
)
351 args
= ap
.parse_args()
352 self
._validate
_transform
_common
_args
(args
)
353 self
._validate
_transform
_args
(args
)
357 def _add_proc_filter_args(ap
):
358 ap
.add_argument('--procname', type=str,
359 help='Filter the results only for this list of '
361 ap
.add_argument('--tid', type=str,
362 help='Filter the results only for this list of TIDs')
365 def _add_min_max_args(ap
):
366 ap
.add_argument('--min', type=float,
367 help='Filter out durations shorter than min usec')
368 ap
.add_argument('--max', type=float,
369 help='Filter out durations longer than max usec')
372 def _add_freq_args(ap
, help=None):
374 help = 'Output the frequency distribution'
376 ap
.add_argument('--freq', action
='store_true', help=help)
377 ap
.add_argument('--freq-resolution', type=int, default
=20,
378 help='Frequency distribution resolution '
380 ap
.add_argument('--freq-uniform', action
='store_true',
381 help='Use a uniform resolution across distributions')
382 ap
.add_argument('--freq-series', action
='store_true',
383 help='Consolidate frequency distribution histogram '
387 def _add_log_args(ap
, help=None):
389 help = 'Output the events in chronological order'
391 ap
.add_argument('--log', action
='store_true', help=help)
394 def _add_top_args(ap
, help=None):
396 help = 'Output the top results'
398 ap
.add_argument('--limit', type=int, default
=10,
399 help='Limit to top X (default = 10)')
400 ap
.add_argument('--top', action
='store_true', help=help)
403 def _add_stats_args(ap
, help=None):
405 help = 'Output statistics'
407 ap
.add_argument('--stats', action
='store_true', help=help)
409 def _add_arguments(self
, ap
):
412 def _process_date_args(self
):
413 def date_to_epoch_nsec(date
):
414 ts
= common
.date_to_epoch_nsec(self
._handles
, date
, self
._args
.gmt
)
416 self
._cmdline
_error
('Invalid date format: "{}"'.format(date
))
420 self
._args
.multi_day
= common
.is_multi_day_trace_collection(
425 if self
._args
.timerange
:
426 begin_ts
, end_ts
= common
.extract_timerange(self
._handles
,
427 self
._args
.timerange
,
429 if None in [begin_ts
, end_ts
]:
431 'Invalid time format: "{}"'.format(self
._args
.timerange
))
434 begin_ts
= date_to_epoch_nsec(self
._args
.begin
)
436 end_ts
= date_to_epoch_nsec(self
._args
.end
)
438 # We have to check if timestamp_begin is None, which
439 # it always is in older versions of babeltrace. In
440 # that case, the test is simply skipped and an invalid
441 # --end value will cause an empty analysis
442 if self
._traces
.timestamp_begin
is not None and \
443 end_ts
< self
._traces
.timestamp_begin
:
445 '--end timestamp before beginning of trace')
447 self
._analysis
_conf
.begin_ts
= begin_ts
448 self
._analysis
_conf
.end_ts
= end_ts
450 def _create_analysis(self
):
452 analysis
.Analysis
.TICK_CB
: self
._analysis
_tick
_cb
455 self
._analysis
= self
._ANALYSIS
_CLASS
(self
.state
, self
._analysis
_conf
)
456 self
._analysis
.register_notification_cbs(notification_cbs
)
458 def _create_automaton(self
):
459 self
._automaton
= automaton
.Automaton()
460 self
.state
= self
._automaton
.state
462 def _analysis_tick_cb(self
, **kwargs
):
463 begin_ns
= kwargs
['begin_ns']
464 end_ns
= kwargs
['end_ns']
466 self
._analysis
_tick
(begin_ns
, end_ns
)
469 def _analysis_tick(self
, begin_ns
, end_ns
):
470 raise NotImplementedError()
474 _cmd_version
= _version
.get_versions()['version']
475 _version_match
= re
.match(r
'(\d+)\.(\d+)\.(\d+)(.*)', _cmd_version
)
476 Command
._MI
_VERSION
= [
477 int(_version_match
.group(1)),
478 int(_version_match
.group(2)),
479 int(_version_match
.group(3)),
480 _version_match
.group(4),
This page took 0.042419 seconds and 6 git commands to generate.