602d7dd722d5baa05e184b4897dadc530925d72f
1 # The MIT License (MIT)
3 # Copyright (C) 2015 - Julien Desfossez <jdesfossez@efficios.com>
4 # 2015 - Philippe Proulx <pproulx@efficios.com>
5 # 2015 - Antoine Busque <abusque@efficios.com>
7 # Permission is hereby granted, free of charge, to any person obtaining a copy
8 # of this software and associated documentation files (the "Software"), to deal
9 # in the Software without restriction, including without limitation the rights
10 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 # copies of the Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
14 # The above copyright notice and this permission notice shall be included in
15 # all copies or substantial portions of the Software.
17 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 from babeltrace
import TraceCollection
32 from . import mi
, progressbar
33 from .. import _version
, __version__
34 from ..core
import analysis
35 from ..common
import (
36 format_utils
, parse_utils
, time_utils
, trace_utils
, version_utils
38 from ..linuxautomaton
import automaton
42 _MI_BASE_TAGS
= ['linux-kernel', 'lttng-analyses']
48 _MI_URL
= 'https://github.com/lttng/lttng-analyses'
50 def __init__(self
, mi_mode
=False):
52 self
._analysis
_conf
= None
57 self
._mi
_mode
= mi_mode
58 self
._create
_automaton
()
69 self
._create
_analysis
()
72 except KeyboardInterrupt:
75 def _mi_error(self
, msg
, code
=None):
76 print(json
.dumps(mi
.get_error(msg
, code
)))
78 def _non_mi_error(self
, msg
):
82 msg
= termcolor
.colored(msg
, 'red', attrs
=['bold'])
86 print(msg
, file=sys
.stderr
)
88 def _error(self
, msg
, code
, exit_code
=1):
92 self
._non
_mi
_error
(msg
)
96 def _gen_error(self
, msg
, exit_code
=1):
97 self
._error
('Error: {}'.format(msg
), exit_code
)
99 def _cmdline_error(self
, msg
, exit_code
=1):
100 self
._error
('Command line error: {}'.format(msg
), exit_code
)
102 def _print(self
, msg
):
103 if not self
._mi
_mode
:
106 def _mi_create_result_table(self
, table_class_name
, begin
, end
,
108 return mi
.ResultTable(self
._mi
_table
_classes
[table_class_name
],
109 begin
, end
, subtitle
)
112 self
._mi
_table
_classes
= {}
114 for tc_tuple
in self
._MI
_TABLE
_CLASSES
:
115 table_class
= mi
.TableClass(tc_tuple
[0], tc_tuple
[1], tc_tuple
[2])
116 self
._mi
_table
_classes
[table_class
.name
] = table_class
118 self
._mi
_clear
_result
_tables
()
120 def _mi_print_metadata(self
):
121 tags
= self
._MI
_BASE
_TAGS
+ self
._MI
_TAGS
122 infos
= mi
.get_metadata(version
=self
._MI
_VERSION
, title
=self
._MI
_TITLE
,
123 description
=self
._MI
_DESCRIPTION
,
124 authors
=self
._MI
_AUTHORS
, url
=self
._MI
_URL
,
126 table_classes
=self
._mi
_table
_classes
.values())
127 print(json
.dumps(infos
))
129 def _mi_append_result_table(self
, result_table
):
130 if not result_table
or not result_table
.rows
:
133 tc_name
= result_table
.table_class
.name
134 self
._mi
_get
_result
_tables
(tc_name
).append(result_table
)
136 def _mi_append_result_tables(self
, result_tables
):
137 if not result_tables
:
140 for result_table
in result_tables
:
141 self
._mi
_append
_result
_table
(result_table
)
143 def _mi_clear_result_tables(self
):
144 self
._result
_tables
= {}
146 def _mi_get_result_tables(self
, table_class_name
):
147 if table_class_name
not in self
._result
_tables
:
148 self
._result
_tables
[table_class_name
] = []
150 return self
._result
_tables
[table_class_name
]
155 for result_tables
in self
._result
_tables
.values():
156 for result_table
in result_tables
:
157 results
.append(result_table
.to_native_object())
163 print(json
.dumps(obj
))
165 def _create_summary_result_tables(self
):
168 def _open_trace(self
):
169 traces
= TraceCollection()
170 handles
= traces
.add_traces_recursive(self
._args
.path
, 'ctf')
172 self
._gen
_error
('Failed to open ' + self
._args
.path
, -1)
173 self
._handles
= handles
174 self
._traces
= traces
175 self
._ts
_begin
= traces
.timestamp_begin
176 self
._ts
_end
= traces
.timestamp_end
177 self
._process
_date
_args
()
178 self
._read
_tracer
_version
()
179 if not self
._args
.skip_validation
:
180 self
._check
_lost
_events
()
182 def _close_trace(self
):
183 for handle
in self
._handles
.values():
184 self
._traces
.remove_trace(handle
)
186 def _read_tracer_version(self
):
188 # remove the trailing /
189 while self
._args
.path
.endswith('/'):
190 self
._args
.path
= self
._args
.path
[:-1]
191 for root
, _
, _
in os
.walk(self
._args
.path
):
192 if root
.endswith('kernel'):
196 if kernel_path
is None:
197 self
._gen
_error
('Could not find kernel trace directory')
200 ret
, metadata
= subprocess
.getstatusoutput(
201 'babeltrace -o ctf-metadata "%s"' % kernel_path
)
202 except subprocess
.CalledProcessError
:
203 self
._gen
_error
('Cannot run babeltrace on the trace, cannot read'
206 # fallback to reading the text metadata if babeltrace failed to
207 # output the CTF metadata
210 metadata
= subprocess
.getoutput(
211 'cat "%s"' % os
.path
.join(kernel_path
, 'metadata'))
212 except subprocess
.CalledProcessError
:
213 self
._gen
_error
('Cannot read the metadata of the trace, cannot'
214 'extract tracer version')
216 major_match
= re
.search(r
'tracer_major = "*(\d+)"*', metadata
)
217 minor_match
= re
.search(r
'tracer_minor = "*(\d+)"*', metadata
)
218 patch_match
= re
.search(r
'tracer_patchlevel = "*(\d+)"*', metadata
)
220 if not major_match
or not minor_match
or not patch_match
:
221 self
._gen
_error
('Malformed metadata, cannot read tracer version')
223 self
.state
.tracer_version
= version_utils
.Version(
224 int(major_match
.group(1)),
225 int(minor_match
.group(1)),
226 int(patch_match
.group(1)),
229 def _check_lost_events(self
):
230 msg
= 'Checking the trace for lost events...'
233 if self
._mi
_mode
and self
._args
.output_progress
:
234 mi
.print_progress(0, msg
)
237 subprocess
.check_output('babeltrace "%s"' % self
._args
.path
,
239 except subprocess
.CalledProcessError
:
240 self
._gen
_error
('Cannot run babeltrace on the trace, cannot verify'
241 ' if events were lost during the trace recording')
243 def _pre_analysis(self
):
246 def _post_analysis(self
):
247 if not self
._mi
_mode
:
251 self
._create
_summary
_result
_tables
()
256 if self
._args
.no_progress
:
259 ts_end
= self
._ts
_end
261 if self
._analysis
_conf
.end_ts
is not None:
262 ts_end
= self
._analysis
_conf
.end_ts
265 cls
= progressbar
.MiProgress
267 cls
= progressbar
.FancyProgressBar
269 self
._progress
= cls(self
._ts
_begin
, ts_end
, self
._args
.path
,
270 self
._args
.progress_use_size
)
272 def _pb_update(self
, event
):
273 if self
._args
.no_progress
:
276 self
._progress
.update(event
)
278 def _pb_finish(self
):
279 if self
._args
.no_progress
:
282 self
._progress
.finalize()
284 def _run_analysis(self
):
288 for event
in self
._traces
.events
:
289 self
._pb
_update
(event
)
290 self
._analysis
.process_event(event
)
291 if self
._analysis
.ended
:
293 self
._automaton
.process_event(event
)
297 self
._post
_analysis
()
299 def _print_date(self
, begin_ns
, end_ns
):
300 time_range_str
= format_utils
.format_time_range(
301 begin_ns
, end_ns
, print_date
=True, gmt
=self
._args
.gmt
303 date
= 'Timerange: {}'.format(time_range_str
)
307 def _format_timestamp(self
, timestamp
):
308 return format_utils
.format_timestamp(
309 timestamp
, print_date
=self
._args
.multi_day
, gmt
=self
._args
.gmt
312 def _get_uniform_freq_values(self
, durations
):
313 if self
._args
.uniform_step
is not None:
314 return (self
._args
.uniform_min
, self
._args
.uniform_max
,
315 self
._args
.uniform_step
)
317 if self
._args
.min is not None:
318 self
._args
.uniform_min
= self
._args
.min
320 self
._args
.uniform_min
= min(durations
)
321 if self
._args
.max is not None:
322 self
._args
.uniform_max
= self
._args
.max
324 self
._args
.uniform_max
= max(durations
)
327 self
._args
.uniform_min
/= 1000
328 self
._args
.uniform_max
/= 1000
329 self
._args
.uniform_step
= (
330 (self
._args
.uniform_max
- self
._args
.uniform_min
) /
331 self
._args
.freq_resolution
334 return self
._args
.uniform_min
, self
._args
.uniform_max
, \
335 self
._args
.uniform_step
337 def _validate_transform_common_args(self
, args
):
338 refresh_period_ns
= None
339 if args
.refresh
is not None:
341 refresh_period_ns
= parse_utils
.parse_duration(args
.refresh
)
342 except ValueError as e
:
343 self
._cmdline
_error
(str(e
))
345 self
._analysis
_conf
= analysis
.AnalysisConfig()
346 self
._analysis
_conf
.refresh_period
= refresh_period_ns
347 self
._analysis
_conf
.period_begin_ev_name
= args
.period_begin
348 self
._analysis
_conf
.period_end_ev_name
= args
.period_end
349 self
._analysis
_conf
.period_begin_key_fields
= \
350 args
.period_begin_key
.split(',')
352 if args
.period_end_key
:
353 self
._analysis
_conf
.period_end_key_fields
= \
354 args
.period_end_key
.split(',')
356 self
._analysis
_conf
.period_end_key_fields
= \
357 self
._analysis
_conf
.period_begin_key_fields
359 if args
.period_key_value
:
360 self
._analysis
_conf
.period_key_value
= \
361 tuple(args
.period_key_value
.split(','))
364 self
._analysis
_conf
.cpu_list
= args
.cpu
.split(',')
365 self
._analysis
_conf
.cpu_list
= [int(cpu
) for cpu
in
366 self
._analysis
_conf
.cpu_list
]
368 # convert min/max args from µs to ns, if needed
369 if hasattr(args
, 'min') and args
.min is not None:
371 self
._analysis
_conf
.min_duration
= args
.min
372 if hasattr(args
, 'max') and args
.max is not None:
374 self
._analysis
_conf
.max_duration
= args
.max
376 if hasattr(args
, 'procname'):
378 self
._analysis
_conf
.proc_list
= args
.procname
.split(',')
380 if hasattr(args
, 'tid'):
382 self
._analysis
_conf
.tid_list
= args
.tid
.split(',')
383 self
._analysis
_conf
.tid_list
= [int(tid
) for tid
in
384 self
._analysis
_conf
.tid_list
]
386 if hasattr(args
, 'freq'):
387 args
.uniform_min
= None
388 args
.uniform_max
= None
389 args
.uniform_step
= None
392 # implies uniform buckets
393 args
.freq_uniform
= True
396 # print MI version if required
398 print(mi
.get_version_string())
401 # print MI metadata if required
403 self
._mi
_print
_metadata
()
406 # validate path argument (required at this point)
408 self
._cmdline
_error
('Please specify a trace path')
410 if type(args
.path
) is list:
411 args
.path
= args
.path
[0]
413 def _validate_transform_args(self
, args
):
416 def _parse_args(self
):
417 ap
= argparse
.ArgumentParser(description
=self
._DESC
)
420 ap
.add_argument('-r', '--refresh', type=str,
421 help='Refresh period, with optional units suffix '
422 '(default units: s)')
423 ap
.add_argument('--gmt', action
='store_true',
424 help='Manipulate timestamps based on GMT instead '
426 ap
.add_argument('--skip-validation', action
='store_true',
427 help='Skip the trace validation')
428 ap
.add_argument('--begin', type=str, help='start time: '
429 'hh:mm:ss[.nnnnnnnnn]')
430 ap
.add_argument('--end', type=str, help='end time: '
431 'hh:mm:ss[.nnnnnnnnn]')
432 ap
.add_argument('--period-begin', type=str,
433 help='Analysis period start marker event name')
434 ap
.add_argument('--period-end', type=str,
435 help='Analysis period end marker event name '
436 '(requires --period-begin)')
437 ap
.add_argument('--period-begin-key', type=str, default
='cpu_id',
438 help='Optional, list of event field names used to '
439 'match period markers (default: cpu_id)')
440 ap
.add_argument('--period-end-key', type=str,
441 help='Optional, list of event field names used to '
442 'match period marker. If none specified, use the same '
443 ' --period-begin-key')
444 ap
.add_argument('--period-key-value', type=str,
445 help='Optional, define a fixed key value to which a'
446 ' period must correspond to be considered.')
447 ap
.add_argument('--cpu', type=str,
448 help='Filter the results only for this list of '
450 ap
.add_argument('--timerange', type=str, help='time range: '
452 ap
.add_argument('--progress-use-size', action
='store_true',
453 help='use trace size to approximate progress')
454 ap
.add_argument('-V', '--version', action
='version',
455 version
='LTTng Analyses v' + __version__
)
457 # MI mode-dependent arguments
459 ap
.add_argument('--mi-version', action
='store_true',
460 help='Print MI version')
461 ap
.add_argument('--metadata', action
='store_true',
462 help='Print analysis\' metadata')
463 ap
.add_argument('path', metavar
='<path/to/trace>',
464 help='trace path', nargs
='*')
465 ap
.add_argument('--output-progress', action
='store_true',
466 help='Print progress indication lines')
468 ap
.add_argument('--no-progress', action
='store_true',
469 help='Don\'t display the progress bar')
470 ap
.add_argument('path', metavar
='<path/to/trace>',
473 # Used to add command-specific args
474 self
._add
_arguments
(ap
)
476 args
= ap
.parse_args()
479 args
.no_progress
= True
481 if args
.output_progress
:
482 args
.no_progress
= False
484 self
._validate
_transform
_common
_args
(args
)
485 self
._validate
_transform
_args
(args
)
489 def _add_proc_filter_args(ap
):
490 ap
.add_argument('--procname', type=str,
491 help='Filter the results only for this list of '
493 ap
.add_argument('--tid', type=str,
494 help='Filter the results only for this list of TIDs')
497 def _add_min_max_args(ap
):
498 ap
.add_argument('--min', type=float,
499 help='Filter out durations shorter than min usec')
500 ap
.add_argument('--max', type=float,
501 help='Filter out durations longer than max usec')
504 def _add_freq_args(ap
, help=None):
506 help = 'Output the frequency distribution'
508 ap
.add_argument('--freq', action
='store_true', help=help)
509 ap
.add_argument('--freq-resolution', type=int, default
=20,
510 help='Frequency distribution resolution '
512 ap
.add_argument('--freq-uniform', action
='store_true',
513 help='Use a uniform resolution across distributions')
514 ap
.add_argument('--freq-series', action
='store_true',
515 help='Consolidate frequency distribution histogram '
519 def _add_log_args(ap
, help=None):
521 help = 'Output the events in chronological order'
523 ap
.add_argument('--log', action
='store_true', help=help)
526 def _add_top_args(ap
, help=None):
528 help = 'Output the top results'
530 ap
.add_argument('--limit', type=int, default
=10,
531 help='Limit to top X (default = 10)')
532 ap
.add_argument('--top', action
='store_true', help=help)
535 def _add_stats_args(ap
, help=None):
537 help = 'Output statistics'
539 ap
.add_argument('--stats', action
='store_true', help=help)
541 def _add_arguments(self
, ap
):
544 def _process_date_args(self
):
545 def parse_date(date
):
547 ts
= parse_utils
.parse_trace_collection_date(
548 self
._traces
, date
, self
._args
.gmt
550 except ValueError as e
:
551 self
._cmdline
_error
(str(e
))
555 self
._args
.multi_day
= trace_utils
.is_multi_day_trace_collection(
561 if self
._args
.timerange
:
564 parse_utils
.parse_trace_collection_time_range(
565 self
._traces
, self
._args
.timerange
, self
._args
.gmt
568 except ValueError as e
:
569 self
._cmdline
_error
(str(e
))
572 begin_ts
= parse_date(self
._args
.begin
)
574 end_ts
= parse_date(self
._args
.end
)
576 # We have to check if timestamp_begin is None, which
577 # it always is in older versions of babeltrace. In
578 # that case, the test is simply skipped and an invalid
579 # --end value will cause an empty analysis
580 if self
._ts
_begin
is not None and \
581 end_ts
< self
._ts
_begin
:
583 '--end timestamp before beginning of trace')
585 self
._analysis
_conf
.begin_ts
= begin_ts
586 self
._analysis
_conf
.end_ts
= end_ts
588 def _create_analysis(self
):
590 analysis
.Analysis
.TICK_CB
: self
._analysis
_tick
_cb
593 self
._analysis
= self
._ANALYSIS
_CLASS
(self
.state
, self
._analysis
_conf
)
594 self
._analysis
.register_notification_cbs(notification_cbs
)
596 def _create_automaton(self
):
597 self
._automaton
= automaton
.Automaton()
598 self
.state
= self
._automaton
.state
600 def _analysis_tick_cb(self
, **kwargs
):
601 begin_ns
= kwargs
['begin_ns']
602 end_ns
= kwargs
['end_ns']
604 self
._analysis
_tick
(begin_ns
, end_ns
)
607 def _analysis_tick(self
, begin_ns
, end_ns
):
608 raise NotImplementedError()
612 _cmd_version
= _version
.get_versions()['version']
613 _version_match
= re
.match(r
'(\d+)\.(\d+)\.(\d+)(.*)', _cmd_version
)
614 Command
._MI
_VERSION
= version_utils
.Version(
615 int(_version_match
.group(1)),
616 int(_version_match
.group(2)),
617 int(_version_match
.group(3)),
618 _version_match
.group(4),
This page took 0.043792 seconds and 4 git commands to generate.