+ CATCH (error, RETURN_MASK_ERROR)
+ {
+ }
+ END_CATCH
+
+ return iclass;
+}
+
+/* Try to match the back trace at LHS to the back trace at RHS. Returns the
+ number of matching function segments or zero if the back traces do not
+ match. BTINFO is the branch trace information for the current thread. */
+
+static int
+ftrace_match_backtrace (struct btrace_thread_info *btinfo,
+ struct btrace_function *lhs,
+ struct btrace_function *rhs)
+{
+ int matches;
+
+ for (matches = 0; lhs != NULL && rhs != NULL; ++matches)
+ {
+ if (ftrace_function_switched (lhs, rhs->msym, rhs->sym))
+ return 0;
+
+ lhs = ftrace_get_caller (btinfo, lhs);
+ rhs = ftrace_get_caller (btinfo, rhs);
+ }
+
+ return matches;
+}
+
+/* Add ADJUSTMENT to the level of BFUN and succeeding function segments.
+ BTINFO is the branch trace information for the current thread. */
+
+static void
+ftrace_fixup_level (struct btrace_thread_info *btinfo,
+ struct btrace_function *bfun, int adjustment)
+{
+ if (adjustment == 0)
+ return;
+
+ DEBUG_FTRACE ("fixup level (%+d)", adjustment);
+ ftrace_debug (bfun, "..bfun");
+
+ while (bfun != NULL)
+ {
+ bfun->level += adjustment;
+ bfun = ftrace_find_call_by_number (btinfo, bfun->number + 1);
+ }
+}
+
+/* Recompute the global level offset. Traverse the function trace and compute
+ the global level offset as the negative of the minimal function level. */
+
+static void
+ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
+{
+ int level = INT_MAX;
+
+ if (btinfo == NULL)
+ return;
+
+ if (btinfo->functions.empty ())
+ return;
+
+ unsigned int length = btinfo->functions.size() - 1;
+ for (unsigned int i = 0; i < length; ++i)
+ level = std::min (level, btinfo->functions[i].level);
+
+ /* The last function segment contains the current instruction, which is not
+ really part of the trace. If it contains just this one instruction, we
+ ignore the segment. */
+ struct btrace_function *last = &btinfo->functions.back();
+ if (last->insn.size () != 1)
+ level = std::min (level, last->level);
+
+ DEBUG_FTRACE ("setting global level offset: %d", -level);
+ btinfo->level = -level;
+}
+
+/* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
+ ftrace_connect_backtrace. BTINFO is the branch trace information for the
+ current thread. */
+
+static void
+ftrace_connect_bfun (struct btrace_thread_info *btinfo,
+ struct btrace_function *prev,
+ struct btrace_function *next)
+{
+ DEBUG_FTRACE ("connecting...");
+ ftrace_debug (prev, "..prev");
+ ftrace_debug (next, "..next");
+
+ /* The function segments are not yet connected. */
+ gdb_assert (prev->next == 0);
+ gdb_assert (next->prev == 0);
+
+ prev->next = next->number;
+ next->prev = prev->number;
+
+ /* We may have moved NEXT to a different function level. */
+ ftrace_fixup_level (btinfo, next, prev->level - next->level);
+
+ /* If we run out of back trace for one, let's use the other's. */
+ if (prev->up == 0)
+ {
+ const btrace_function_flags flags = next->flags;
+
+ next = ftrace_find_call_by_number (btinfo, next->up);
+ if (next != NULL)
+ {
+ DEBUG_FTRACE ("using next's callers");
+ ftrace_fixup_caller (btinfo, prev, next, flags);
+ }
+ }
+ else if (next->up == 0)
+ {
+ const btrace_function_flags flags = prev->flags;
+
+ prev = ftrace_find_call_by_number (btinfo, prev->up);
+ if (prev != NULL)
+ {
+ DEBUG_FTRACE ("using prev's callers");
+ ftrace_fixup_caller (btinfo, next, prev, flags);
+ }
+ }
+ else
+ {
+ /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
+ link to add the tail callers to NEXT's back trace.
+
+ This removes NEXT->UP from NEXT's back trace. It will be added back
+ when connecting NEXT and PREV's callers - provided they exist.
+
+ If PREV's back trace consists of a series of tail calls without an
+ actual call, there will be no further connection and NEXT's caller will
+ be removed for good. To catch this case, we handle it here and connect
+ the top of PREV's back trace to NEXT's caller. */
+ if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
+ {
+ struct btrace_function *caller;
+ btrace_function_flags next_flags, prev_flags;
+
+ /* We checked NEXT->UP above so CALLER can't be NULL. */
+ caller = ftrace_find_call_by_number (btinfo, next->up);
+ next_flags = next->flags;
+ prev_flags = prev->flags;
+
+ DEBUG_FTRACE ("adding prev's tail calls to next");
+
+ prev = ftrace_find_call_by_number (btinfo, prev->up);
+ ftrace_fixup_caller (btinfo, next, prev, prev_flags);
+
+ for (; prev != NULL; prev = ftrace_find_call_by_number (btinfo,
+ prev->up))
+ {
+ /* At the end of PREV's back trace, continue with CALLER. */
+ if (prev->up == 0)
+ {
+ DEBUG_FTRACE ("fixing up link for tailcall chain");
+ ftrace_debug (prev, "..top");
+ ftrace_debug (caller, "..up");
+
+ ftrace_fixup_caller (btinfo, prev, caller, next_flags);
+
+ /* If we skipped any tail calls, this may move CALLER to a
+ different function level.
+
+ Note that changing CALLER's level is only OK because we
+ know that this is the last iteration of the bottom-to-top
+ walk in ftrace_connect_backtrace.
+
+ Otherwise we will fix up CALLER's level when we connect it
+ to PREV's caller in the next iteration. */
+ ftrace_fixup_level (btinfo, caller,
+ prev->level - caller->level - 1);
+ break;
+ }
+
+ /* There's nothing to do if we find a real call. */
+ if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
+ {
+ DEBUG_FTRACE ("will fix up link in next iteration");
+ break;
+ }
+ }
+ }
+ }
+}
+
+/* Connect function segments on the same level in the back trace at LHS and RHS.
+ The back traces at LHS and RHS are expected to match according to
+ ftrace_match_backtrace. BTINFO is the branch trace information for the
+ current thread. */
+
+static void
+ftrace_connect_backtrace (struct btrace_thread_info *btinfo,
+ struct btrace_function *lhs,
+ struct btrace_function *rhs)
+{
+ while (lhs != NULL && rhs != NULL)
+ {
+ struct btrace_function *prev, *next;
+
+ gdb_assert (!ftrace_function_switched (lhs, rhs->msym, rhs->sym));
+
+ /* Connecting LHS and RHS may change the up link. */
+ prev = lhs;
+ next = rhs;
+
+ lhs = ftrace_get_caller (btinfo, lhs);
+ rhs = ftrace_get_caller (btinfo, rhs);
+
+ ftrace_connect_bfun (btinfo, prev, next);
+ }
+}
+
+/* Bridge the gap between two function segments left and right of a gap if their
+ respective back traces match in at least MIN_MATCHES functions. BTINFO is
+ the branch trace information for the current thread.
+
+ Returns non-zero if the gap could be bridged, zero otherwise. */
+
+static int
+ftrace_bridge_gap (struct btrace_thread_info *btinfo,
+ struct btrace_function *lhs, struct btrace_function *rhs,
+ int min_matches)
+{
+ struct btrace_function *best_l, *best_r, *cand_l, *cand_r;
+ int best_matches;
+
+ DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
+ rhs->insn_offset - 1, min_matches);
+
+ best_matches = 0;
+ best_l = NULL;
+ best_r = NULL;
+
+ /* We search the back traces of LHS and RHS for valid connections and connect
+ the two functon segments that give the longest combined back trace. */
+
+ for (cand_l = lhs; cand_l != NULL;
+ cand_l = ftrace_get_caller (btinfo, cand_l))
+ for (cand_r = rhs; cand_r != NULL;
+ cand_r = ftrace_get_caller (btinfo, cand_r))
+ {
+ int matches;
+
+ matches = ftrace_match_backtrace (btinfo, cand_l, cand_r);
+ if (best_matches < matches)
+ {
+ best_matches = matches;
+ best_l = cand_l;
+ best_r = cand_r;
+ }
+ }
+
+ /* We need at least MIN_MATCHES matches. */
+ gdb_assert (min_matches > 0);
+ if (best_matches < min_matches)
+ return 0;
+
+ DEBUG_FTRACE ("..matches: %d", best_matches);
+
+ /* We will fix up the level of BEST_R and succeeding function segments such
+ that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
+
+ This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
+ BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
+
+ To catch this, we already fix up the level here where we can start at RHS
+ instead of at BEST_R. We will ignore the level fixup when connecting
+ BEST_L to BEST_R as they will already be on the same level. */
+ ftrace_fixup_level (btinfo, rhs, best_l->level - best_r->level);
+
+ ftrace_connect_backtrace (btinfo, best_l, best_r);
+
+ return best_matches;
+}
+
+/* Try to bridge gaps due to overflow or decode errors by connecting the
+ function segments that are separated by the gap. */
+
+static void
+btrace_bridge_gaps (struct thread_info *tp, std::vector<unsigned int> &gaps)
+{
+ struct btrace_thread_info *btinfo = &tp->btrace;
+ std::vector<unsigned int> remaining;
+ int min_matches;
+
+ DEBUG ("bridge gaps");
+
+ /* We require a minimum amount of matches for bridging a gap. The number of
+ required matches will be lowered with each iteration.
+
+ The more matches the higher our confidence that the bridging is correct.
+ For big gaps or small traces, however, it may not be feasible to require a
+ high number of matches. */
+ for (min_matches = 5; min_matches > 0; --min_matches)
+ {
+ /* Let's try to bridge as many gaps as we can. In some cases, we need to
+ skip a gap and revisit it again after we closed later gaps. */
+ while (!gaps.empty ())
+ {
+ for (const unsigned int number : gaps)
+ {
+ struct btrace_function *gap, *lhs, *rhs;
+ int bridged;
+
+ gap = ftrace_find_call_by_number (btinfo, number);
+
+ /* We may have a sequence of gaps if we run from one error into
+ the next as we try to re-sync onto the trace stream. Ignore
+ all but the leftmost gap in such a sequence.
+
+ Also ignore gaps at the beginning of the trace. */
+ lhs = ftrace_find_call_by_number (btinfo, gap->number - 1);
+ if (lhs == NULL || lhs->errcode != 0)
+ continue;
+
+ /* Skip gaps to the right. */
+ rhs = ftrace_find_call_by_number (btinfo, gap->number + 1);
+ while (rhs != NULL && rhs->errcode != 0)
+ rhs = ftrace_find_call_by_number (btinfo, rhs->number + 1);
+
+ /* Ignore gaps at the end of the trace. */
+ if (rhs == NULL)
+ continue;
+
+ bridged = ftrace_bridge_gap (btinfo, lhs, rhs, min_matches);
+
+ /* Keep track of gaps we were not able to bridge and try again.
+ If we just pushed them to the end of GAPS we would risk an
+ infinite loop in case we simply cannot bridge a gap. */
+ if (bridged == 0)
+ remaining.push_back (number);
+ }
+
+ /* Let's see if we made any progress. */
+ if (remaining.size () == gaps.size ())
+ break;
+
+ gaps.clear ();
+ gaps.swap (remaining);
+ }
+
+ /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
+ if (gaps.empty ())
+ break;
+
+ remaining.clear ();
+ }
+
+ /* We may omit this in some cases. Not sure it is worth the extra
+ complication, though. */
+ ftrace_compute_global_level_offset (btinfo);
+}
+
+/* Compute the function branch trace from BTS trace. */
+
+static void
+btrace_compute_ftrace_bts (struct thread_info *tp,
+ const struct btrace_data_bts *btrace,
+ std::vector<unsigned int> &gaps)
+{
+ struct btrace_thread_info *btinfo;
+ struct gdbarch *gdbarch;
+ unsigned int blk;
+ int level;
+
+ gdbarch = target_gdbarch ();
+ btinfo = &tp->btrace;
+ blk = VEC_length (btrace_block_s, btrace->blocks);
+
+ if (btinfo->functions.empty ())
+ level = INT_MAX;
+ else
+ level = -btinfo->level;
+
+ while (blk != 0)
+ {
+ btrace_block_s *block;
+ CORE_ADDR pc;
+
+ blk -= 1;
+
+ block = VEC_index (btrace_block_s, btrace->blocks, blk);
+ pc = block->begin;
+
+ for (;;)
+ {
+ struct btrace_function *bfun;
+ struct btrace_insn insn;
+ int size;
+
+ /* We should hit the end of the block. Warn if we went too far. */
+ if (block->end < pc)
+ {
+ /* Indicate the gap in the trace. */
+ bfun = ftrace_new_gap (btinfo, BDE_BTS_OVERFLOW, gaps);
+
+ warning (_("Recorded trace may be corrupted at instruction "
+ "%u (pc = %s)."), bfun->insn_offset - 1,
+ core_addr_to_string_nz (pc));
+
+ break;
+ }
+
+ bfun = ftrace_update_function (btinfo, pc);
+
+ /* Maintain the function level offset.
+ For all but the last block, we do it here. */
+ if (blk != 0)
+ level = std::min (level, bfun->level);
+
+ size = 0;
+ TRY
+ {
+ size = gdb_insn_length (gdbarch, pc);
+ }
+ CATCH (error, RETURN_MASK_ERROR)
+ {
+ }
+ END_CATCH
+
+ insn.pc = pc;
+ insn.size = size;
+ insn.iclass = ftrace_classify_insn (gdbarch, pc);
+ insn.flags = 0;
+
+ ftrace_update_insns (bfun, insn);
+
+ /* We're done once we pushed the instruction at the end. */
+ if (block->end == pc)
+ break;
+
+ /* We can't continue if we fail to compute the size. */
+ if (size <= 0)
+ {
+ /* Indicate the gap in the trace. We just added INSN so we're
+ not at the beginning. */
+ bfun = ftrace_new_gap (btinfo, BDE_BTS_INSN_SIZE, gaps);
+
+ warning (_("Recorded trace may be incomplete at instruction %u "
+ "(pc = %s)."), bfun->insn_offset - 1,
+ core_addr_to_string_nz (pc));
+
+ break;
+ }
+
+ pc += size;
+
+ /* Maintain the function level offset.
+ For the last block, we do it here to not consider the last
+ instruction.
+ Since the last instruction corresponds to the current instruction
+ and is not really part of the execution history, it shouldn't
+ affect the level. */
+ if (blk == 0)
+ level = std::min (level, bfun->level);
+ }
+ }
+
+ /* LEVEL is the minimal function level of all btrace function segments.
+ Define the global level offset to -LEVEL so all function levels are
+ normalized to start at zero. */
+ btinfo->level = -level;
+}
+
+#if defined (HAVE_LIBIPT)
+
+static enum btrace_insn_class
+pt_reclassify_insn (enum pt_insn_class iclass)
+{
+ switch (iclass)
+ {
+ case ptic_call:
+ return BTRACE_INSN_CALL;
+
+ case ptic_return:
+ return BTRACE_INSN_RETURN;
+
+ case ptic_jump:
+ return BTRACE_INSN_JUMP;
+
+ default:
+ return BTRACE_INSN_OTHER;
+ }
+}
+
+/* Return the btrace instruction flags for INSN. */
+
+static btrace_insn_flags
+pt_btrace_insn_flags (const struct pt_insn &insn)
+{
+ btrace_insn_flags flags = 0;
+
+ if (insn.speculative)
+ flags |= BTRACE_INSN_FLAG_SPECULATIVE;
+
+ return flags;
+}
+
+/* Return the btrace instruction for INSN. */
+
+static btrace_insn
+pt_btrace_insn (const struct pt_insn &insn)
+{
+ return {(CORE_ADDR) insn.ip, (gdb_byte) insn.size,
+ pt_reclassify_insn (insn.iclass),
+ pt_btrace_insn_flags (insn)};
+}
+
+/* Handle instruction decode events (libipt-v2). */
+
+static int
+handle_pt_insn_events (struct btrace_thread_info *btinfo,
+ struct pt_insn_decoder *decoder,
+ std::vector<unsigned int> &gaps, int status)
+{
+#if defined (HAVE_PT_INSN_EVENT)
+ while (status & pts_event_pending)
+ {
+ struct btrace_function *bfun;
+ struct pt_event event;
+ uint64_t offset;
+
+ status = pt_insn_event (decoder, &event, sizeof (event));
+ if (status < 0)
+ break;
+
+ switch (event.type)
+ {
+ default:
+ break;
+
+ case ptev_enabled:
+ if (event.variant.enabled.resumed == 0 && !btinfo->functions.empty ())
+ {
+ bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
+
+ pt_insn_get_offset (decoder, &offset);
+
+ warning (_("Non-contiguous trace at instruction %u (offset = 0x%"
+ PRIx64 ")."), bfun->insn_offset - 1, offset);
+ }
+
+ break;
+
+ case ptev_overflow:
+ bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
+
+ pt_insn_get_offset (decoder, &offset);
+
+ warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 ")."),
+ bfun->insn_offset - 1, offset);
+
+ break;
+ }
+ }
+#endif /* defined (HAVE_PT_INSN_EVENT) */
+
+ return status;
+}
+
+/* Handle events indicated by flags in INSN (libipt-v1). */
+
+static void
+handle_pt_insn_event_flags (struct btrace_thread_info *btinfo,
+ struct pt_insn_decoder *decoder,
+ const struct pt_insn &insn,
+ std::vector<unsigned int> &gaps)
+{
+#if defined (HAVE_STRUCT_PT_INSN_ENABLED)
+ /* Tracing is disabled and re-enabled each time we enter the kernel. Most
+ times, we continue from the same instruction we stopped before. This is
+ indicated via the RESUMED instruction flag. The ENABLED instruction flag
+ means that we continued from some other instruction. Indicate this as a
+ trace gap except when tracing just started. */
+ if (insn.enabled && !btinfo->functions.empty ())
+ {
+ struct btrace_function *bfun;
+ uint64_t offset;
+
+ bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
+
+ pt_insn_get_offset (decoder, &offset);
+
+ warning (_("Non-contiguous trace at instruction %u (offset = 0x%" PRIx64
+ ", pc = 0x%" PRIx64 ")."), bfun->insn_offset - 1, offset,
+ insn.ip);
+ }
+#endif /* defined (HAVE_STRUCT_PT_INSN_ENABLED) */
+
+#if defined (HAVE_STRUCT_PT_INSN_RESYNCED)
+ /* Indicate trace overflows. */
+ if (insn.resynced)
+ {
+ struct btrace_function *bfun;
+ uint64_t offset;
+
+ bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
+
+ pt_insn_get_offset (decoder, &offset);
+
+ warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 ", pc = 0x%"
+ PRIx64 ")."), bfun->insn_offset - 1, offset, insn.ip);
+ }
+#endif /* defined (HAVE_STRUCT_PT_INSN_RESYNCED) */
+}
+
+/* Add function branch trace to BTINFO using DECODER. */
+
+static void
+ftrace_add_pt (struct btrace_thread_info *btinfo,
+ struct pt_insn_decoder *decoder,
+ int *plevel,
+ std::vector<unsigned int> &gaps)
+{
+ struct btrace_function *bfun;
+ uint64_t offset;
+ int status;
+
+ for (;;)
+ {
+ struct pt_insn insn;
+
+ status = pt_insn_sync_forward (decoder);
+ if (status < 0)
+ {
+ if (status != -pte_eos)
+ warning (_("Failed to synchronize onto the Intel Processor "
+ "Trace stream: %s."), pt_errstr (pt_errcode (status)));
+ break;
+ }
+
+ for (;;)
+ {
+ /* Handle events from the previous iteration or synchronization. */
+ status = handle_pt_insn_events (btinfo, decoder, gaps, status);
+ if (status < 0)
+ break;
+
+ status = pt_insn_next (decoder, &insn, sizeof(insn));
+ if (status < 0)
+ break;
+
+ /* Handle events indicated by flags in INSN. */
+ handle_pt_insn_event_flags (btinfo, decoder, insn, gaps);
+
+ bfun = ftrace_update_function (btinfo, insn.ip);
+
+ /* Maintain the function level offset. */
+ *plevel = std::min (*plevel, bfun->level);
+
+ ftrace_update_insns (bfun, pt_btrace_insn (insn));
+ }
+
+ if (status == -pte_eos)
+ break;
+
+ /* Indicate the gap in the trace. */
+ bfun = ftrace_new_gap (btinfo, status, gaps);
+
+ pt_insn_get_offset (decoder, &offset);
+
+ warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
+ ", pc = 0x%" PRIx64 "): %s."), status, bfun->insn_offset - 1,
+ offset, insn.ip, pt_errstr (pt_errcode (status)));
+ }
+}
+
+/* A callback function to allow the trace decoder to read the inferior's
+ memory. */
+
+static int
+btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
+ const struct pt_asid *asid, uint64_t pc,
+ void *context)
+{
+ int result, errcode;
+
+ result = (int) size;
+ TRY
+ {
+ errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
+ if (errcode != 0)
+ result = -pte_nomap;
+ }
+ CATCH (error, RETURN_MASK_ERROR)
+ {
+ result = -pte_nomap;
+ }
+ END_CATCH
+
+ return result;
+}
+
+/* Translate the vendor from one enum to another. */
+
+static enum pt_cpu_vendor
+pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
+{
+ switch (vendor)
+ {
+ default:
+ return pcv_unknown;
+
+ case CV_INTEL:
+ return pcv_intel;
+ }
+}
+
+/* Finalize the function branch trace after decode. */
+
+static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
+ struct thread_info *tp, int level)
+{
+ pt_insn_free_decoder (decoder);
+
+ /* LEVEL is the minimal function level of all btrace function segments.
+ Define the global level offset to -LEVEL so all function levels are
+ normalized to start at zero. */
+ tp->btrace.level = -level;
+
+ /* Add a single last instruction entry for the current PC.
+ This allows us to compute the backtrace at the current PC using both
+ standard unwind and btrace unwind.
+ This extra entry is ignored by all record commands. */
+ btrace_add_pc (tp);
+}
+
+/* Compute the function branch trace from Intel Processor Trace
+ format. */
+
+static void
+btrace_compute_ftrace_pt (struct thread_info *tp,
+ const struct btrace_data_pt *btrace,
+ std::vector<unsigned int> &gaps)
+{
+ struct btrace_thread_info *btinfo;
+ struct pt_insn_decoder *decoder;
+ struct pt_config config;
+ int level, errcode;
+
+ if (btrace->size == 0)
+ return;
+
+ btinfo = &tp->btrace;
+ if (btinfo->functions.empty ())
+ level = INT_MAX;
+ else
+ level = -btinfo->level;
+
+ pt_config_init(&config);
+ config.begin = btrace->data;
+ config.end = btrace->data + btrace->size;
+
+ config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
+ config.cpu.family = btrace->config.cpu.family;
+ config.cpu.model = btrace->config.cpu.model;
+ config.cpu.stepping = btrace->config.cpu.stepping;
+
+ errcode = pt_cpu_errata (&config.errata, &config.cpu);
+ if (errcode < 0)
+ error (_("Failed to configure the Intel Processor Trace decoder: %s."),
+ pt_errstr (pt_errcode (errcode)));
+
+ decoder = pt_insn_alloc_decoder (&config);
+ if (decoder == NULL)
+ error (_("Failed to allocate the Intel Processor Trace decoder."));
+
+ TRY
+ {
+ struct pt_image *image;
+
+ image = pt_insn_get_image(decoder);
+ if (image == NULL)
+ error (_("Failed to configure the Intel Processor Trace decoder."));
+
+ errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
+ if (errcode < 0)
+ error (_("Failed to configure the Intel Processor Trace decoder: "
+ "%s."), pt_errstr (pt_errcode (errcode)));
+
+ ftrace_add_pt (btinfo, decoder, &level, gaps);
+ }
+ CATCH (error, RETURN_MASK_ALL)
+ {
+ /* Indicate a gap in the trace if we quit trace processing. */
+ if (error.reason == RETURN_QUIT && !btinfo->functions.empty ())
+ ftrace_new_gap (btinfo, BDE_PT_USER_QUIT, gaps);
+
+ btrace_finalize_ftrace_pt (decoder, tp, level);
+
+ throw_exception (error);
+ }
+ END_CATCH
+
+ btrace_finalize_ftrace_pt (decoder, tp, level);
+}
+
+#else /* defined (HAVE_LIBIPT) */
+
+static void
+btrace_compute_ftrace_pt (struct thread_info *tp,
+ const struct btrace_data_pt *btrace,
+ std::vector<unsigned int> &gaps)
+{
+ internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
+}
+
+#endif /* defined (HAVE_LIBIPT) */
+
+/* Compute the function branch trace from a block branch trace BTRACE for
+ a thread given by BTINFO. */
+
+static void
+btrace_compute_ftrace_1 (struct thread_info *tp, struct btrace_data *btrace,
+ std::vector<unsigned int> &gaps)
+{
+ DEBUG ("compute ftrace");
+
+ switch (btrace->format)
+ {
+ case BTRACE_FORMAT_NONE:
+ return;
+
+ case BTRACE_FORMAT_BTS:
+ btrace_compute_ftrace_bts (tp, &btrace->variant.bts, gaps);
+ return;
+
+ case BTRACE_FORMAT_PT:
+ btrace_compute_ftrace_pt (tp, &btrace->variant.pt, gaps);
+ return;
+ }
+
+ internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
+}
+
+static void
+btrace_finalize_ftrace (struct thread_info *tp, std::vector<unsigned int> &gaps)
+{
+ if (!gaps.empty ())
+ {
+ tp->btrace.ngaps += gaps.size ();
+ btrace_bridge_gaps (tp, gaps);
+ }
+}
+
+static void
+btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
+{
+ std::vector<unsigned int> gaps;
+
+ TRY
+ {
+ btrace_compute_ftrace_1 (tp, btrace, gaps);
+ }
+ CATCH (error, RETURN_MASK_ALL)
+ {
+ btrace_finalize_ftrace (tp, gaps);
+
+ throw_exception (error);
+ }
+ END_CATCH
+
+ btrace_finalize_ftrace (tp, gaps);
+}
+
+/* Add an entry for the current PC. */
+
+static void
+btrace_add_pc (struct thread_info *tp)
+{
+ struct btrace_data btrace;
+ struct btrace_block *block;
+ struct regcache *regcache;
+ struct cleanup *cleanup;
+ CORE_ADDR pc;
+
+ regcache = get_thread_regcache (tp->ptid);
+ pc = regcache_read_pc (regcache);
+
+ btrace_data_init (&btrace);
+ btrace.format = BTRACE_FORMAT_BTS;
+ btrace.variant.bts.blocks = NULL;
+
+ cleanup = make_cleanup_btrace_data (&btrace);
+
+ block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
+ block->begin = pc;
+ block->end = pc;
+
+ btrace_compute_ftrace (tp, &btrace);
+
+ do_cleanups (cleanup);
+}
+
+/* See btrace.h. */
+
+void
+btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
+{
+ if (tp->btrace.target != NULL)
+ return;
+
+#if !defined (HAVE_LIBIPT)
+ if (conf->format == BTRACE_FORMAT_PT)
+ error (_("GDB does not support Intel Processor Trace."));
+#endif /* !defined (HAVE_LIBIPT) */
+
+ if (!target_supports_btrace (conf->format))
+ error (_("Target does not support branch tracing."));
+
+ DEBUG ("enable thread %s (%s)", print_thread_id (tp),
+ target_pid_to_str (tp->ptid));
+
+ tp->btrace.target = target_enable_btrace (tp->ptid, conf);
+
+ /* We're done if we failed to enable tracing. */
+ if (tp->btrace.target == NULL)
+ return;
+
+ /* We need to undo the enable in case of errors. */
+ TRY
+ {
+ /* Add an entry for the current PC so we start tracing from where we
+ enabled it.
+
+ If we can't access TP's registers, TP is most likely running. In this
+ case, we can't really say where tracing was enabled so it should be
+ safe to simply skip this step.
+
+ This is not relevant for BTRACE_FORMAT_PT since the trace will already
+ start at the PC at which tracing was enabled. */
+ if (conf->format != BTRACE_FORMAT_PT
+ && can_access_registers_ptid (tp->ptid))
+ btrace_add_pc (tp);
+ }
+ CATCH (exception, RETURN_MASK_ALL)
+ {
+ btrace_disable (tp);
+
+ throw_exception (exception);
+ }
+ END_CATCH
+}
+
+/* See btrace.h. */
+
+const struct btrace_config *
+btrace_conf (const struct btrace_thread_info *btinfo)
+{
+ if (btinfo->target == NULL)
+ return NULL;
+
+ return target_btrace_conf (btinfo->target);
+}
+
+/* See btrace.h. */
+
+void
+btrace_disable (struct thread_info *tp)
+{
+ struct btrace_thread_info *btp = &tp->btrace;
+ int errcode = 0;
+
+ if (btp->target == NULL)
+ return;
+
+ DEBUG ("disable thread %s (%s)", print_thread_id (tp),
+ target_pid_to_str (tp->ptid));
+
+ target_disable_btrace (btp->target);
+ btp->target = NULL;
+
+ btrace_clear (tp);
+}
+
+/* See btrace.h. */
+
+void
+btrace_teardown (struct thread_info *tp)
+{
+ struct btrace_thread_info *btp = &tp->btrace;
+ int errcode = 0;
+
+ if (btp->target == NULL)
+ return;
+
+ DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
+ target_pid_to_str (tp->ptid));
+
+ target_teardown_btrace (btp->target);
+ btp->target = NULL;
+
+ btrace_clear (tp);
+}
+
+/* Stitch branch trace in BTS format. */
+
+static int
+btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
+{
+ struct btrace_thread_info *btinfo;
+ struct btrace_function *last_bfun;
+ btrace_block_s *first_new_block;
+
+ btinfo = &tp->btrace;
+ gdb_assert (!btinfo->functions.empty ());
+ gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
+
+ last_bfun = &btinfo->functions.back ();
+
+ /* If the existing trace ends with a gap, we just glue the traces
+ together. We need to drop the last (i.e. chronologically first) block
+ of the new trace, though, since we can't fill in the start address.*/
+ if (last_bfun->insn.empty ())
+ {
+ VEC_pop (btrace_block_s, btrace->blocks);
+ return 0;
+ }
+
+ /* Beware that block trace starts with the most recent block, so the
+ chronologically first block in the new trace is the last block in
+ the new trace's block vector. */
+ first_new_block = VEC_last (btrace_block_s, btrace->blocks);
+ const btrace_insn &last_insn = last_bfun->insn.back ();
+
+ /* If the current PC at the end of the block is the same as in our current
+ trace, there are two explanations:
+ 1. we executed the instruction and some branch brought us back.
+ 2. we have not made any progress.
+ In the first case, the delta trace vector should contain at least two
+ entries.
+ In the second case, the delta trace vector should contain exactly one
+ entry for the partial block containing the current PC. Remove it. */
+ if (first_new_block->end == last_insn.pc
+ && VEC_length (btrace_block_s, btrace->blocks) == 1)
+ {
+ VEC_pop (btrace_block_s, btrace->blocks);
+ return 0;
+ }
+
+ DEBUG ("stitching %s to %s", ftrace_print_insn_addr (&last_insn),
+ core_addr_to_string_nz (first_new_block->end));
+
+ /* Do a simple sanity check to make sure we don't accidentally end up
+ with a bad block. This should not occur in practice. */
+ if (first_new_block->end < last_insn.pc)
+ {
+ warning (_("Error while trying to read delta trace. Falling back to "
+ "a full read."));
+ return -1;
+ }
+
+ /* We adjust the last block to start at the end of our current trace. */
+ gdb_assert (first_new_block->begin == 0);
+ first_new_block->begin = last_insn.pc;
+
+ /* We simply pop the last insn so we can insert it again as part of
+ the normal branch trace computation.
+ Since instruction iterators are based on indices in the instructions
+ vector, we don't leave any pointers dangling. */
+ DEBUG ("pruning insn at %s for stitching",
+ ftrace_print_insn_addr (&last_insn));
+
+ last_bfun->insn.pop_back ();
+
+ /* The instructions vector may become empty temporarily if this has
+ been the only instruction in this function segment.
+ This violates the invariant but will be remedied shortly by
+ btrace_compute_ftrace when we add the new trace. */
+
+ /* The only case where this would hurt is if the entire trace consisted
+ of just that one instruction. If we remove it, we might turn the now
+ empty btrace function segment into a gap. But we don't want gaps at
+ the beginning. To avoid this, we remove the entire old trace. */
+ if (last_bfun->number == 1 && last_bfun->insn.empty ())
+ btrace_clear (tp);
+
+ return 0;
+}
+
+/* Adjust the block trace in order to stitch old and new trace together.
+ BTRACE is the new delta trace between the last and the current stop.
+ TP is the traced thread.
+ May modifx BTRACE as well as the existing trace in TP.
+ Return 0 on success, -1 otherwise. */
+
+static int
+btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
+{
+ /* If we don't have trace, there's nothing to do. */
+ if (btrace_data_empty (btrace))
+ return 0;
+
+ switch (btrace->format)
+ {
+ case BTRACE_FORMAT_NONE:
+ return 0;
+
+ case BTRACE_FORMAT_BTS:
+ return btrace_stitch_bts (&btrace->variant.bts, tp);
+
+ case BTRACE_FORMAT_PT:
+ /* Delta reads are not supported. */
+ return -1;
+ }
+
+ internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
+}
+
+/* Clear the branch trace histories in BTINFO. */
+
+static void
+btrace_clear_history (struct btrace_thread_info *btinfo)
+{
+ xfree (btinfo->insn_history);
+ xfree (btinfo->call_history);
+ xfree (btinfo->replay);
+
+ btinfo->insn_history = NULL;
+ btinfo->call_history = NULL;
+ btinfo->replay = NULL;
+}
+
+/* Clear the branch trace maintenance histories in BTINFO. */
+
+static void
+btrace_maint_clear (struct btrace_thread_info *btinfo)
+{
+ switch (btinfo->data.format)
+ {
+ default:
+ break;
+
+ case BTRACE_FORMAT_BTS:
+ btinfo->maint.variant.bts.packet_history.begin = 0;
+ btinfo->maint.variant.bts.packet_history.end = 0;
+ break;
+
+#if defined (HAVE_LIBIPT)
+ case BTRACE_FORMAT_PT:
+ xfree (btinfo->maint.variant.pt.packets);
+
+ btinfo->maint.variant.pt.packets = NULL;
+ btinfo->maint.variant.pt.packet_history.begin = 0;
+ btinfo->maint.variant.pt.packet_history.end = 0;
+ break;
+#endif /* defined (HAVE_LIBIPT) */
+ }
+}
+
+/* See btrace.h. */
+
+const char *
+btrace_decode_error (enum btrace_format format, int errcode)
+{
+ switch (format)
+ {
+ case BTRACE_FORMAT_BTS:
+ switch (errcode)
+ {
+ case BDE_BTS_OVERFLOW:
+ return _("instruction overflow");
+
+ case BDE_BTS_INSN_SIZE:
+ return _("unknown instruction");
+
+ default:
+ break;
+ }
+ break;
+
+#if defined (HAVE_LIBIPT)
+ case BTRACE_FORMAT_PT:
+ switch (errcode)
+ {
+ case BDE_PT_USER_QUIT:
+ return _("trace decode cancelled");
+
+ case BDE_PT_DISABLED:
+ return _("disabled");
+
+ case BDE_PT_OVERFLOW:
+ return _("overflow");
+
+ default:
+ if (errcode < 0)
+ return pt_errstr (pt_errcode (errcode));
+ break;
+ }
+ break;
+#endif /* defined (HAVE_LIBIPT) */
+
+ default:
+ break;
+ }
+
+ return _("unknown");
+}
+
+/* See btrace.h. */
+
+void
+btrace_fetch (struct thread_info *tp)
+{
+ struct btrace_thread_info *btinfo;
+ struct btrace_target_info *tinfo;
+ struct btrace_data btrace;
+ struct cleanup *cleanup;
+ int errcode;
+
+ DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
+ target_pid_to_str (tp->ptid));
+
+ btinfo = &tp->btrace;
+ tinfo = btinfo->target;
+ if (tinfo == NULL)
+ return;
+
+ /* There's no way we could get new trace while replaying.
+ On the other hand, delta trace would return a partial record with the
+ current PC, which is the replay PC, not the last PC, as expected. */
+ if (btinfo->replay != NULL)
+ return;
+
+ /* With CLI usage, TP->PTID always equals INFERIOR_PTID here. Now that we
+ can store a gdb.Record object in Python referring to a different thread
+ than the current one, temporarily set INFERIOR_PTID. */
+ scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
+ inferior_ptid = tp->ptid;
+
+ /* We should not be called on running or exited threads. */
+ gdb_assert (can_access_registers_ptid (tp->ptid));
+
+ btrace_data_init (&btrace);
+ cleanup = make_cleanup_btrace_data (&btrace);
+
+ /* Let's first try to extend the trace we already have. */
+ if (!btinfo->functions.empty ())
+ {
+ errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
+ if (errcode == 0)
+ {
+ /* Success. Let's try to stitch the traces together. */
+ errcode = btrace_stitch_trace (&btrace, tp);
+ }
+ else
+ {
+ /* We failed to read delta trace. Let's try to read new trace. */
+ errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
+
+ /* If we got any new trace, discard what we have. */
+ if (errcode == 0 && !btrace_data_empty (&btrace))
+ btrace_clear (tp);
+ }
+
+ /* If we were not able to read the trace, we start over. */
+ if (errcode != 0)
+ {
+ btrace_clear (tp);
+ errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
+ }
+ }
+ else
+ errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
+
+ /* If we were not able to read the branch trace, signal an error. */
+ if (errcode != 0)
+ error (_("Failed to read branch trace."));
+
+ /* Compute the trace, provided we have any. */
+ if (!btrace_data_empty (&btrace))
+ {
+ /* Store the raw trace data. The stored data will be cleared in
+ btrace_clear, so we always append the new trace. */
+ btrace_data_append (&btinfo->data, &btrace);
+ btrace_maint_clear (btinfo);
+
+ btrace_clear_history (btinfo);
+ btrace_compute_ftrace (tp, &btrace);
+ }
+
+ do_cleanups (cleanup);
+}
+
+/* See btrace.h. */
+
+void
+btrace_clear (struct thread_info *tp)
+{
+ struct btrace_thread_info *btinfo;
+
+ DEBUG ("clear thread %s (%s)", print_thread_id (tp),
+ target_pid_to_str (tp->ptid));
+
+ /* Make sure btrace frames that may hold a pointer into the branch
+ trace data are destroyed. */
+ reinit_frame_cache ();
+
+ btinfo = &tp->btrace;
+
+ btinfo->functions.clear ();
+ btinfo->ngaps = 0;
+
+ /* Must clear the maint data before - it depends on BTINFO->DATA. */
+ btrace_maint_clear (btinfo);
+ btrace_data_clear (&btinfo->data);
+ btrace_clear_history (btinfo);
+}
+
+/* See btrace.h. */
+
+void
+btrace_free_objfile (struct objfile *objfile)
+{
+ struct thread_info *tp;
+
+ DEBUG ("free objfile");
+
+ ALL_NON_EXITED_THREADS (tp)
+ btrace_clear (tp);
+}
+
+#if defined (HAVE_LIBEXPAT)
+
+/* Check the btrace document version. */
+
+static void
+check_xml_btrace_version (struct gdb_xml_parser *parser,
+ const struct gdb_xml_element *element,
+ void *user_data, VEC (gdb_xml_value_s) *attributes)
+{
+ const char *version
+ = (const char *) xml_find_attribute (attributes, "version")->value;
+
+ if (strcmp (version, "1.0") != 0)
+ gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
+}
+
+/* Parse a btrace "block" xml record. */
+
+static void
+parse_xml_btrace_block (struct gdb_xml_parser *parser,
+ const struct gdb_xml_element *element,
+ void *user_data, VEC (gdb_xml_value_s) *attributes)
+{
+ struct btrace_data *btrace;
+ struct btrace_block *block;
+ ULONGEST *begin, *end;
+
+ btrace = (struct btrace_data *) user_data;
+
+ switch (btrace->format)
+ {
+ case BTRACE_FORMAT_BTS:
+ break;
+
+ case BTRACE_FORMAT_NONE:
+ btrace->format = BTRACE_FORMAT_BTS;
+ btrace->variant.bts.blocks = NULL;
+ break;
+
+ default:
+ gdb_xml_error (parser, _("Btrace format error."));
+ }
+
+ begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value;
+ end = (ULONGEST *) xml_find_attribute (attributes, "end")->value;
+
+ block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
+ block->begin = *begin;
+ block->end = *end;
+}
+
+/* Parse a "raw" xml record. */
+
+static void
+parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
+ gdb_byte **pdata, size_t *psize)
+{
+ struct cleanup *cleanup;
+ gdb_byte *data, *bin;
+ size_t len, size;
+
+ len = strlen (body_text);
+ if (len % 2 != 0)
+ gdb_xml_error (parser, _("Bad raw data size."));
+
+ size = len / 2;
+
+ bin = data = (gdb_byte *) xmalloc (size);
+ cleanup = make_cleanup (xfree, data);
+
+ /* We use hex encoding - see common/rsp-low.h. */
+ while (len > 0)
+ {
+ char hi, lo;
+
+ hi = *body_text++;
+ lo = *body_text++;
+
+ if (hi == 0 || lo == 0)
+ gdb_xml_error (parser, _("Bad hex encoding."));
+
+ *bin++ = fromhex (hi) * 16 + fromhex (lo);
+ len -= 2;
+ }
+
+ discard_cleanups (cleanup);
+
+ *pdata = data;
+ *psize = size;
+}
+
+/* Parse a btrace pt-config "cpu" xml record. */
+
+static void
+parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
+ const struct gdb_xml_element *element,
+ void *user_data,
+ VEC (gdb_xml_value_s) *attributes)
+{
+ struct btrace_data *btrace;
+ const char *vendor;
+ ULONGEST *family, *model, *stepping;
+
+ vendor = (const char *) xml_find_attribute (attributes, "vendor")->value;
+ family = (ULONGEST *) xml_find_attribute (attributes, "family")->value;
+ model = (ULONGEST *) xml_find_attribute (attributes, "model")->value;
+ stepping = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value;
+
+ btrace = (struct btrace_data *) user_data;
+
+ if (strcmp (vendor, "GenuineIntel") == 0)
+ btrace->variant.pt.config.cpu.vendor = CV_INTEL;
+
+ btrace->variant.pt.config.cpu.family = *family;
+ btrace->variant.pt.config.cpu.model = *model;
+ btrace->variant.pt.config.cpu.stepping = *stepping;
+}
+
+/* Parse a btrace pt "raw" xml record. */
+
+static void
+parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
+ const struct gdb_xml_element *element,
+ void *user_data, const char *body_text)
+{
+ struct btrace_data *btrace;
+
+ btrace = (struct btrace_data *) user_data;
+ parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
+ &btrace->variant.pt.size);
+}
+
+/* Parse a btrace "pt" xml record. */
+
+static void
+parse_xml_btrace_pt (struct gdb_xml_parser *parser,
+ const struct gdb_xml_element *element,
+ void *user_data, VEC (gdb_xml_value_s) *attributes)
+{
+ struct btrace_data *btrace;
+
+ btrace = (struct btrace_data *) user_data;
+ btrace->format = BTRACE_FORMAT_PT;
+ btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
+ btrace->variant.pt.data = NULL;
+ btrace->variant.pt.size = 0;
+}
+
+static const struct gdb_xml_attribute block_attributes[] = {
+ { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
+ { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
+ { NULL, GDB_XML_AF_NONE, NULL, NULL }
+};
+
+static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
+ { "vendor", GDB_XML_AF_NONE, NULL, NULL },
+ { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
+ { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
+ { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
+ { NULL, GDB_XML_AF_NONE, NULL, NULL }
+};
+
+static const struct gdb_xml_element btrace_pt_config_children[] = {
+ { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
+ parse_xml_btrace_pt_config_cpu, NULL },
+ { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
+};
+
+static const struct gdb_xml_element btrace_pt_children[] = {
+ { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
+ NULL },
+ { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
+ { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
+};
+
+static const struct gdb_xml_attribute btrace_attributes[] = {
+ { "version", GDB_XML_AF_NONE, NULL, NULL },
+ { NULL, GDB_XML_AF_NONE, NULL, NULL }
+};
+
+static const struct gdb_xml_element btrace_children[] = {
+ { "block", block_attributes, NULL,
+ GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
+ { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
+ NULL },
+ { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
+};
+
+static const struct gdb_xml_element btrace_elements[] = {
+ { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
+ check_xml_btrace_version, NULL },
+ { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
+};
+
+#endif /* defined (HAVE_LIBEXPAT) */
+
+/* See btrace.h. */
+
+void
+parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
+{
+ struct cleanup *cleanup;
+ int errcode;
+
+#if defined (HAVE_LIBEXPAT)
+
+ btrace->format = BTRACE_FORMAT_NONE;
+
+ cleanup = make_cleanup_btrace_data (btrace);
+ errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
+ buffer, btrace);
+ if (errcode != 0)
+ error (_("Error parsing branch trace."));
+
+ /* Keep parse results. */
+ discard_cleanups (cleanup);
+
+#else /* !defined (HAVE_LIBEXPAT) */
+
+ error (_("Cannot process branch trace. XML parsing is not supported."));
+
+#endif /* !defined (HAVE_LIBEXPAT) */
+}
+
+#if defined (HAVE_LIBEXPAT)
+
+/* Parse a btrace-conf "bts" xml record. */
+
+static void
+parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
+ const struct gdb_xml_element *element,
+ void *user_data, VEC (gdb_xml_value_s) *attributes)
+{
+ struct btrace_config *conf;
+ struct gdb_xml_value *size;
+
+ conf = (struct btrace_config *) user_data;
+ conf->format = BTRACE_FORMAT_BTS;
+ conf->bts.size = 0;
+
+ size = xml_find_attribute (attributes, "size");
+ if (size != NULL)
+ conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
+}
+
+/* Parse a btrace-conf "pt" xml record. */
+
+static void
+parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
+ const struct gdb_xml_element *element,
+ void *user_data, VEC (gdb_xml_value_s) *attributes)
+{
+ struct btrace_config *conf;
+ struct gdb_xml_value *size;
+
+ conf = (struct btrace_config *) user_data;
+ conf->format = BTRACE_FORMAT_PT;
+ conf->pt.size = 0;
+
+ size = xml_find_attribute (attributes, "size");
+ if (size != NULL)
+ conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
+}
+
+static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
+ { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
+ { NULL, GDB_XML_AF_NONE, NULL, NULL }
+};
+
+static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
+ { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
+ { NULL, GDB_XML_AF_NONE, NULL, NULL }
+};
+
+static const struct gdb_xml_element btrace_conf_children[] = {
+ { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
+ parse_xml_btrace_conf_bts, NULL },
+ { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
+ parse_xml_btrace_conf_pt, NULL },
+ { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
+};
+
+static const struct gdb_xml_attribute btrace_conf_attributes[] = {
+ { "version", GDB_XML_AF_NONE, NULL, NULL },
+ { NULL, GDB_XML_AF_NONE, NULL, NULL }
+};
+
+static const struct gdb_xml_element btrace_conf_elements[] = {
+ { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
+ GDB_XML_EF_NONE, NULL, NULL },
+ { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
+};
+
+#endif /* defined (HAVE_LIBEXPAT) */
+
+/* See btrace.h. */
+
+void
+parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
+{
+ int errcode;
+
+#if defined (HAVE_LIBEXPAT)
+
+ errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
+ btrace_conf_elements, xml, conf);
+ if (errcode != 0)
+ error (_("Error parsing branch trace configuration."));
+
+#else /* !defined (HAVE_LIBEXPAT) */
+
+ error (_("XML parsing is not supported."));
+
+#endif /* !defined (HAVE_LIBEXPAT) */
+}
+
+/* See btrace.h. */
+
+const struct btrace_insn *
+btrace_insn_get (const struct btrace_insn_iterator *it)
+{
+ const struct btrace_function *bfun;
+ unsigned int index, end;
+
+ index = it->insn_index;
+ bfun = &it->btinfo->functions[it->call_index];
+
+ /* Check if the iterator points to a gap in the trace. */
+ if (bfun->errcode != 0)
+ return NULL;
+
+ /* The index is within the bounds of this function's instruction vector. */
+ end = bfun->insn.size ();
+ gdb_assert (0 < end);
+ gdb_assert (index < end);
+
+ return &bfun->insn[index];
+}
+
+/* See btrace.h. */
+
+int
+btrace_insn_get_error (const struct btrace_insn_iterator *it)
+{
+ return it->btinfo->functions[it->call_index].errcode;
+}
+
+/* See btrace.h. */
+
+unsigned int
+btrace_insn_number (const struct btrace_insn_iterator *it)
+{
+ return it->btinfo->functions[it->call_index].insn_offset + it->insn_index;
+}
+
+/* See btrace.h. */
+
+void
+btrace_insn_begin (struct btrace_insn_iterator *it,
+ const struct btrace_thread_info *btinfo)
+{
+ if (btinfo->functions.empty ())
+ error (_("No trace."));
+
+ it->btinfo = btinfo;
+ it->call_index = 0;
+ it->insn_index = 0;
+}
+
+/* See btrace.h. */
+
+void
+btrace_insn_end (struct btrace_insn_iterator *it,
+ const struct btrace_thread_info *btinfo)
+{
+ const struct btrace_function *bfun;
+ unsigned int length;
+
+ if (btinfo->functions.empty ())
+ error (_("No trace."));
+
+ bfun = &btinfo->functions.back ();
+ length = bfun->insn.size ();
+
+ /* The last function may either be a gap or it contains the current
+ instruction, which is one past the end of the execution trace; ignore
+ it. */
+ if (length > 0)
+ length -= 1;
+
+ it->btinfo = btinfo;
+ it->call_index = bfun->number - 1;
+ it->insn_index = length;
+}
+
+/* See btrace.h. */
+
+unsigned int
+btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
+{
+ const struct btrace_function *bfun;
+ unsigned int index, steps;
+
+ bfun = &it->btinfo->functions[it->call_index];
+ steps = 0;
+ index = it->insn_index;
+
+ while (stride != 0)
+ {
+ unsigned int end, space, adv;
+
+ end = bfun->insn.size ();
+
+ /* An empty function segment represents a gap in the trace. We count
+ it as one instruction. */
+ if (end == 0)
+ {
+ const struct btrace_function *next;
+
+ next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
+ if (next == NULL)
+ break;
+
+ stride -= 1;
+ steps += 1;
+
+ bfun = next;
+ index = 0;
+
+ continue;
+ }
+
+ gdb_assert (0 < end);
+ gdb_assert (index < end);
+
+ /* Compute the number of instructions remaining in this segment. */
+ space = end - index;
+
+ /* Advance the iterator as far as possible within this segment. */
+ adv = std::min (space, stride);
+ stride -= adv;
+ index += adv;
+ steps += adv;
+
+ /* Move to the next function if we're at the end of this one. */
+ if (index == end)
+ {
+ const struct btrace_function *next;
+
+ next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
+ if (next == NULL)
+ {
+ /* We stepped past the last function.
+
+ Let's adjust the index to point to the last instruction in
+ the previous function. */
+ index -= 1;
+ steps -= 1;
+ break;
+ }
+
+ /* We now point to the first instruction in the new function. */
+ bfun = next;
+ index = 0;
+ }
+
+ /* We did make progress. */
+ gdb_assert (adv > 0);
+ }
+
+ /* Update the iterator. */
+ it->call_index = bfun->number - 1;
+ it->insn_index = index;
+
+ return steps;
+}
+
+/* See btrace.h. */
+
+unsigned int
+btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
+{
+ const struct btrace_function *bfun;
+ unsigned int index, steps;
+
+ bfun = &it->btinfo->functions[it->call_index];
+ steps = 0;
+ index = it->insn_index;
+
+ while (stride != 0)
+ {
+ unsigned int adv;
+
+ /* Move to the previous function if we're at the start of this one. */
+ if (index == 0)
+ {
+ const struct btrace_function *prev;
+
+ prev = ftrace_find_call_by_number (it->btinfo, bfun->number - 1);
+ if (prev == NULL)
+ break;
+
+ /* We point to one after the last instruction in the new function. */
+ bfun = prev;
+ index = bfun->insn.size ();
+
+ /* An empty function segment represents a gap in the trace. We count
+ it as one instruction. */
+ if (index == 0)
+ {
+ stride -= 1;
+ steps += 1;
+
+ continue;
+ }
+ }
+
+ /* Advance the iterator as far as possible within this segment. */
+ adv = std::min (index, stride);
+
+ stride -= adv;
+ index -= adv;
+ steps += adv;
+
+ /* We did make progress. */
+ gdb_assert (adv > 0);
+ }
+
+ /* Update the iterator. */
+ it->call_index = bfun->number - 1;
+ it->insn_index = index;
+
+ return steps;
+}
+
+/* See btrace.h. */
+
+int
+btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
+ const struct btrace_insn_iterator *rhs)
+{
+ gdb_assert (lhs->btinfo == rhs->btinfo);
+
+ if (lhs->call_index != rhs->call_index)
+ return lhs->call_index - rhs->call_index;
+
+ return lhs->insn_index - rhs->insn_index;
+}
+
+/* See btrace.h. */
+
+int
+btrace_find_insn_by_number (struct btrace_insn_iterator *it,
+ const struct btrace_thread_info *btinfo,
+ unsigned int number)
+{
+ const struct btrace_function *bfun;
+ unsigned int upper, lower;
+
+ if (btinfo->functions.empty ())
+ return 0;
+
+ lower = 0;
+ bfun = &btinfo->functions[lower];
+ if (number < bfun->insn_offset)
+ return 0;
+
+ upper = btinfo->functions.size () - 1;
+ bfun = &btinfo->functions[upper];
+ if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
+ return 0;
+
+ /* We assume that there are no holes in the numbering. */
+ for (;;)
+ {
+ const unsigned int average = lower + (upper - lower) / 2;
+
+ bfun = &btinfo->functions[average];
+
+ if (number < bfun->insn_offset)
+ {
+ upper = average - 1;
+ continue;
+ }
+
+ if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
+ {
+ lower = average + 1;
+ continue;
+ }
+
+ break;
+ }
+
+ it->btinfo = btinfo;
+ it->call_index = bfun->number - 1;
+ it->insn_index = number - bfun->insn_offset;
+ return 1;
+}
+
+/* Returns true if the recording ends with a function segment that
+ contains only a single (i.e. the current) instruction. */
+
+static bool
+btrace_ends_with_single_insn (const struct btrace_thread_info *btinfo)
+{
+ const btrace_function *bfun;
+
+ if (btinfo->functions.empty ())
+ return false;
+
+ bfun = &btinfo->functions.back ();
+ if (bfun->errcode != 0)
+ return false;
+
+ return ftrace_call_num_insn (bfun) == 1;
+}
+
+/* See btrace.h. */
+
+const struct btrace_function *
+btrace_call_get (const struct btrace_call_iterator *it)
+{
+ if (it->index >= it->btinfo->functions.size ())
+ return NULL;
+
+ return &it->btinfo->functions[it->index];
+}
+
+/* See btrace.h. */
+
+unsigned int
+btrace_call_number (const struct btrace_call_iterator *it)
+{
+ const unsigned int length = it->btinfo->functions.size ();
+
+ /* If the last function segment contains only a single instruction (i.e. the
+ current instruction), skip it. */
+ if ((it->index == length) && btrace_ends_with_single_insn (it->btinfo))
+ return length;
+
+ return it->index + 1;
+}
+
+/* See btrace.h. */
+
+void
+btrace_call_begin (struct btrace_call_iterator *it,
+ const struct btrace_thread_info *btinfo)
+{
+ if (btinfo->functions.empty ())
+ error (_("No trace."));
+
+ it->btinfo = btinfo;
+ it->index = 0;
+}
+
+/* See btrace.h. */
+
+void
+btrace_call_end (struct btrace_call_iterator *it,
+ const struct btrace_thread_info *btinfo)
+{
+ if (btinfo->functions.empty ())
+ error (_("No trace."));
+
+ it->btinfo = btinfo;
+ it->index = btinfo->functions.size ();
+}
+
+/* See btrace.h. */
+
+unsigned int
+btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
+{
+ const unsigned int length = it->btinfo->functions.size ();
+
+ if (it->index + stride < length - 1)
+ /* Default case: Simply advance the iterator. */
+ it->index += stride;
+ else if (it->index + stride == length - 1)
+ {
+ /* We land exactly at the last function segment. If it contains only one
+ instruction (i.e. the current instruction) it is not actually part of
+ the trace. */
+ if (btrace_ends_with_single_insn (it->btinfo))
+ it->index = length;
+ else
+ it->index = length - 1;
+ }
+ else
+ {
+ /* We land past the last function segment and have to adjust the stride.
+ If the last function segment contains only one instruction (i.e. the
+ current instruction) it is not actually part of the trace. */
+ if (btrace_ends_with_single_insn (it->btinfo))
+ stride = length - it->index - 1;
+ else
+ stride = length - it->index;
+
+ it->index = length;
+ }
+
+ return stride;
+}
+
+/* See btrace.h. */
+
+unsigned int
+btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
+{
+ const unsigned int length = it->btinfo->functions.size ();
+ int steps = 0;
+
+ gdb_assert (it->index <= length);
+
+ if (stride == 0 || it->index == 0)
+ return 0;
+
+ /* If we are at the end, the first step is a special case. If the last
+ function segment contains only one instruction (i.e. the current
+ instruction) it is not actually part of the trace. To be able to step
+ over this instruction, we need at least one more function segment. */
+ if ((it->index == length) && (length > 1))
+ {
+ if (btrace_ends_with_single_insn (it->btinfo))
+ it->index = length - 2;
+ else
+ it->index = length - 1;
+
+ steps = 1;
+ stride -= 1;
+ }
+
+ stride = std::min (stride, it->index);
+
+ it->index -= stride;
+ return steps + stride;
+}
+
+/* See btrace.h. */
+
+int
+btrace_call_cmp (const struct btrace_call_iterator *lhs,
+ const struct btrace_call_iterator *rhs)
+{
+ gdb_assert (lhs->btinfo == rhs->btinfo);
+ return (int) (lhs->index - rhs->index);
+}
+
+/* See btrace.h. */
+
+int
+btrace_find_call_by_number (struct btrace_call_iterator *it,
+ const struct btrace_thread_info *btinfo,
+ unsigned int number)
+{
+ const unsigned int length = btinfo->functions.size ();
+
+ if ((number == 0) || (number > length))
+ return 0;
+
+ it->btinfo = btinfo;
+ it->index = number - 1;
+ return 1;
+}
+
+/* See btrace.h. */
+
+void
+btrace_set_insn_history (struct btrace_thread_info *btinfo,
+ const struct btrace_insn_iterator *begin,
+ const struct btrace_insn_iterator *end)
+{
+ if (btinfo->insn_history == NULL)
+ btinfo->insn_history = XCNEW (struct btrace_insn_history);