+handle_pt_insn_event_flags (struct btrace_thread_info *btinfo,
+ struct pt_insn_decoder *decoder,
+ const struct pt_insn &insn,
+ std::vector<unsigned int> &gaps)
+{
+#if defined (HAVE_STRUCT_PT_INSN_ENABLED)
+ /* Tracing is disabled and re-enabled each time we enter the kernel. Most
+ times, we continue from the same instruction we stopped before. This is
+ indicated via the RESUMED instruction flag. The ENABLED instruction flag
+ means that we continued from some other instruction. Indicate this as a
+ trace gap except when tracing just started. */
+ if (insn.enabled && !btinfo->functions.empty ())
+ {
+ struct btrace_function *bfun;
+ uint64_t offset;
+
+ bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
+
+ pt_insn_get_offset (decoder, &offset);
+
+ warning (_("Non-contiguous trace at instruction %u (offset = 0x%" PRIx64
+ ", pc = 0x%" PRIx64 ")."), bfun->insn_offset - 1, offset,
+ insn.ip);
+ }
+#endif /* defined (HAVE_STRUCT_PT_INSN_ENABLED) */
+
+#if defined (HAVE_STRUCT_PT_INSN_RESYNCED)
+ /* Indicate trace overflows. */
+ if (insn.resynced)
+ {
+ struct btrace_function *bfun;
+ uint64_t offset;
+
+ bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
+
+ pt_insn_get_offset (decoder, &offset);
+
+ warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 ", pc = 0x%"
+ PRIx64 ")."), bfun->insn_offset - 1, offset, insn.ip);
+ }
+#endif /* defined (HAVE_STRUCT_PT_INSN_RESYNCED) */
+}
+
+/* Add function branch trace to BTINFO using DECODER. */
+
+static void
+ftrace_add_pt (struct btrace_thread_info *btinfo,
+ struct pt_insn_decoder *decoder,
+ int *plevel,
+ std::vector<unsigned int> &gaps)