+/* Try to match the back trace at LHS to the back trace at RHS. Returns the
+ number of matching function segments or zero if the back traces do not
+ match. BTINFO is the branch trace information for the current thread. */
+
+static int
+ftrace_match_backtrace (struct btrace_thread_info *btinfo,
+ struct btrace_function *lhs,
+ struct btrace_function *rhs)
+{
+ int matches;
+
+ for (matches = 0; lhs != NULL && rhs != NULL; ++matches)
+ {
+ if (ftrace_function_switched (lhs, rhs->msym, rhs->sym))
+ return 0;
+
+ lhs = ftrace_get_caller (btinfo, lhs);
+ rhs = ftrace_get_caller (btinfo, rhs);
+ }
+
+ return matches;
+}
+
+/* Add ADJUSTMENT to the level of BFUN and succeeding function segments.
+ BTINFO is the branch trace information for the current thread. */
+
+static void
+ftrace_fixup_level (struct btrace_thread_info *btinfo,
+ struct btrace_function *bfun, int adjustment)
+{
+ if (adjustment == 0)
+ return;
+
+ DEBUG_FTRACE ("fixup level (%+d)", adjustment);
+ ftrace_debug (bfun, "..bfun");
+
+ while (bfun != NULL)
+ {
+ bfun->level += adjustment;
+ bfun = ftrace_find_call_by_number (btinfo, bfun->number + 1);
+ }
+}
+
+/* Recompute the global level offset. Traverse the function trace and compute
+ the global level offset as the negative of the minimal function level. */
+
+static void
+ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
+{
+ int level = INT_MAX;
+
+ if (btinfo == NULL)
+ return;
+
+ if (btinfo->functions.empty ())
+ return;
+
+ unsigned int length = btinfo->functions.size() - 1;
+ for (unsigned int i = 0; i < length; ++i)
+ level = std::min (level, btinfo->functions[i].level);
+
+ /* The last function segment contains the current instruction, which is not
+ really part of the trace. If it contains just this one instruction, we
+ ignore the segment. */
+ struct btrace_function *last = &btinfo->functions.back();
+ if (last->insn.size () != 1)
+ level = std::min (level, last->level);
+
+ DEBUG_FTRACE ("setting global level offset: %d", -level);
+ btinfo->level = -level;
+}
+
+/* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
+ ftrace_connect_backtrace. BTINFO is the branch trace information for the
+ current thread. */
+
+static void
+ftrace_connect_bfun (struct btrace_thread_info *btinfo,
+ struct btrace_function *prev,
+ struct btrace_function *next)
+{
+ DEBUG_FTRACE ("connecting...");
+ ftrace_debug (prev, "..prev");
+ ftrace_debug (next, "..next");
+
+ /* The function segments are not yet connected. */
+ gdb_assert (prev->next == 0);
+ gdb_assert (next->prev == 0);
+
+ prev->next = next->number;
+ next->prev = prev->number;
+
+ /* We may have moved NEXT to a different function level. */
+ ftrace_fixup_level (btinfo, next, prev->level - next->level);
+
+ /* If we run out of back trace for one, let's use the other's. */
+ if (prev->up == 0)
+ {
+ const btrace_function_flags flags = next->flags;
+
+ next = ftrace_find_call_by_number (btinfo, next->up);
+ if (next != NULL)
+ {
+ DEBUG_FTRACE ("using next's callers");
+ ftrace_fixup_caller (btinfo, prev, next, flags);
+ }
+ }
+ else if (next->up == 0)
+ {
+ const btrace_function_flags flags = prev->flags;
+
+ prev = ftrace_find_call_by_number (btinfo, prev->up);
+ if (prev != NULL)
+ {
+ DEBUG_FTRACE ("using prev's callers");
+ ftrace_fixup_caller (btinfo, next, prev, flags);
+ }
+ }
+ else
+ {
+ /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
+ link to add the tail callers to NEXT's back trace.
+
+ This removes NEXT->UP from NEXT's back trace. It will be added back
+ when connecting NEXT and PREV's callers - provided they exist.
+
+ If PREV's back trace consists of a series of tail calls without an
+ actual call, there will be no further connection and NEXT's caller will
+ be removed for good. To catch this case, we handle it here and connect
+ the top of PREV's back trace to NEXT's caller. */
+ if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
+ {
+ struct btrace_function *caller;
+ btrace_function_flags next_flags, prev_flags;
+
+ /* We checked NEXT->UP above so CALLER can't be NULL. */
+ caller = ftrace_find_call_by_number (btinfo, next->up);
+ next_flags = next->flags;
+ prev_flags = prev->flags;
+
+ DEBUG_FTRACE ("adding prev's tail calls to next");
+
+ prev = ftrace_find_call_by_number (btinfo, prev->up);
+ ftrace_fixup_caller (btinfo, next, prev, prev_flags);
+
+ for (; prev != NULL; prev = ftrace_find_call_by_number (btinfo,
+ prev->up))
+ {
+ /* At the end of PREV's back trace, continue with CALLER. */
+ if (prev->up == 0)
+ {
+ DEBUG_FTRACE ("fixing up link for tailcall chain");
+ ftrace_debug (prev, "..top");
+ ftrace_debug (caller, "..up");
+
+ ftrace_fixup_caller (btinfo, prev, caller, next_flags);
+
+ /* If we skipped any tail calls, this may move CALLER to a
+ different function level.
+
+ Note that changing CALLER's level is only OK because we
+ know that this is the last iteration of the bottom-to-top
+ walk in ftrace_connect_backtrace.
+
+ Otherwise we will fix up CALLER's level when we connect it
+ to PREV's caller in the next iteration. */
+ ftrace_fixup_level (btinfo, caller,
+ prev->level - caller->level - 1);
+ break;
+ }
+
+ /* There's nothing to do if we find a real call. */
+ if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
+ {
+ DEBUG_FTRACE ("will fix up link in next iteration");
+ break;
+ }
+ }
+ }
+ }
+}
+
+/* Connect function segments on the same level in the back trace at LHS and RHS.
+ The back traces at LHS and RHS are expected to match according to
+ ftrace_match_backtrace. BTINFO is the branch trace information for the
+ current thread. */
+
+static void
+ftrace_connect_backtrace (struct btrace_thread_info *btinfo,
+ struct btrace_function *lhs,
+ struct btrace_function *rhs)
+{
+ while (lhs != NULL && rhs != NULL)
+ {
+ struct btrace_function *prev, *next;
+
+ gdb_assert (!ftrace_function_switched (lhs, rhs->msym, rhs->sym));
+
+ /* Connecting LHS and RHS may change the up link. */
+ prev = lhs;
+ next = rhs;
+
+ lhs = ftrace_get_caller (btinfo, lhs);
+ rhs = ftrace_get_caller (btinfo, rhs);
+
+ ftrace_connect_bfun (btinfo, prev, next);
+ }
+}
+
+/* Bridge the gap between two function segments left and right of a gap if their
+ respective back traces match in at least MIN_MATCHES functions. BTINFO is
+ the branch trace information for the current thread.
+
+ Returns non-zero if the gap could be bridged, zero otherwise. */
+
+static int
+ftrace_bridge_gap (struct btrace_thread_info *btinfo,
+ struct btrace_function *lhs, struct btrace_function *rhs,
+ int min_matches)
+{
+ struct btrace_function *best_l, *best_r, *cand_l, *cand_r;
+ int best_matches;
+
+ DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
+ rhs->insn_offset - 1, min_matches);
+
+ best_matches = 0;
+ best_l = NULL;
+ best_r = NULL;
+
+ /* We search the back traces of LHS and RHS for valid connections and connect
+ the two function segments that give the longest combined back trace. */
+
+ for (cand_l = lhs; cand_l != NULL;
+ cand_l = ftrace_get_caller (btinfo, cand_l))
+ for (cand_r = rhs; cand_r != NULL;
+ cand_r = ftrace_get_caller (btinfo, cand_r))
+ {
+ int matches;
+
+ matches = ftrace_match_backtrace (btinfo, cand_l, cand_r);
+ if (best_matches < matches)
+ {
+ best_matches = matches;
+ best_l = cand_l;
+ best_r = cand_r;
+ }
+ }
+
+ /* We need at least MIN_MATCHES matches. */
+ gdb_assert (min_matches > 0);
+ if (best_matches < min_matches)
+ return 0;
+
+ DEBUG_FTRACE ("..matches: %d", best_matches);
+
+ /* We will fix up the level of BEST_R and succeeding function segments such
+ that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
+
+ This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
+ BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
+
+ To catch this, we already fix up the level here where we can start at RHS
+ instead of at BEST_R. We will ignore the level fixup when connecting
+ BEST_L to BEST_R as they will already be on the same level. */
+ ftrace_fixup_level (btinfo, rhs, best_l->level - best_r->level);
+
+ ftrace_connect_backtrace (btinfo, best_l, best_r);
+
+ return best_matches;
+}
+
+/* Try to bridge gaps due to overflow or decode errors by connecting the
+ function segments that are separated by the gap. */
+
+static void
+btrace_bridge_gaps (struct thread_info *tp, std::vector<unsigned int> &gaps)
+{
+ struct btrace_thread_info *btinfo = &tp->btrace;
+ std::vector<unsigned int> remaining;
+ int min_matches;
+
+ DEBUG ("bridge gaps");
+
+ /* We require a minimum amount of matches for bridging a gap. The number of
+ required matches will be lowered with each iteration.
+
+ The more matches the higher our confidence that the bridging is correct.
+ For big gaps or small traces, however, it may not be feasible to require a
+ high number of matches. */
+ for (min_matches = 5; min_matches > 0; --min_matches)
+ {
+ /* Let's try to bridge as many gaps as we can. In some cases, we need to
+ skip a gap and revisit it again after we closed later gaps. */
+ while (!gaps.empty ())
+ {
+ for (const unsigned int number : gaps)
+ {
+ struct btrace_function *gap, *lhs, *rhs;
+ int bridged;
+
+ gap = ftrace_find_call_by_number (btinfo, number);
+
+ /* We may have a sequence of gaps if we run from one error into
+ the next as we try to re-sync onto the trace stream. Ignore
+ all but the leftmost gap in such a sequence.
+
+ Also ignore gaps at the beginning of the trace. */
+ lhs = ftrace_find_call_by_number (btinfo, gap->number - 1);
+ if (lhs == NULL || lhs->errcode != 0)
+ continue;
+
+ /* Skip gaps to the right. */
+ rhs = ftrace_find_call_by_number (btinfo, gap->number + 1);
+ while (rhs != NULL && rhs->errcode != 0)
+ rhs = ftrace_find_call_by_number (btinfo, rhs->number + 1);
+
+ /* Ignore gaps at the end of the trace. */
+ if (rhs == NULL)
+ continue;
+
+ bridged = ftrace_bridge_gap (btinfo, lhs, rhs, min_matches);
+
+ /* Keep track of gaps we were not able to bridge and try again.
+ If we just pushed them to the end of GAPS we would risk an
+ infinite loop in case we simply cannot bridge a gap. */
+ if (bridged == 0)
+ remaining.push_back (number);
+ }
+
+ /* Let's see if we made any progress. */
+ if (remaining.size () == gaps.size ())
+ break;
+
+ gaps.clear ();
+ gaps.swap (remaining);
+ }
+
+ /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
+ if (gaps.empty ())
+ break;
+
+ remaining.clear ();
+ }
+
+ /* We may omit this in some cases. Not sure it is worth the extra
+ complication, though. */
+ ftrace_compute_global_level_offset (btinfo);
+}
+