level = bfun->level;
ibegin = bfun->insn_offset;
- iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
+ iend = ibegin + bfun->insn.size ();
DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
prefix, fun, file, level, ibegin, iend);
if (bfun->errcode != 0)
return 1;
- return VEC_length (btrace_insn_s, bfun->insn);
+ return bfun->insn.size ();
}
/* Return the function segment with the given NUMBER or NULL if no such segment
exists. BTINFO is the branch trace information for the current thread. */
static struct btrace_function *
+ftrace_find_call_by_number (struct btrace_thread_info *btinfo,
+ unsigned int number)
+{
+ if (number == 0 || number > btinfo->functions.size ())
+ return NULL;
+
+ return &btinfo->functions[number - 1];
+}
+
+/* A const version of the function above. */
+
+static const struct btrace_function *
ftrace_find_call_by_number (const struct btrace_thread_info *btinfo,
unsigned int number)
{
if (number == 0 || number > btinfo->functions.size ())
return NULL;
- return btinfo->functions[number - 1];
+ return &btinfo->functions[number - 1];
}
/* Return non-zero if BFUN does not match MFUN and FUN,
/* Allocate and initialize a new branch trace function segment at the end of
the trace.
BTINFO is the branch trace information for the current thread.
- MFUN and FUN are the symbol information we have for this function. */
+ MFUN and FUN are the symbol information we have for this function.
+ This invalidates all struct btrace_function pointer currently held. */
static struct btrace_function *
ftrace_new_function (struct btrace_thread_info *btinfo,
struct minimal_symbol *mfun,
struct symbol *fun)
{
- struct btrace_function *bfun;
-
- bfun = XCNEW (struct btrace_function);
-
- bfun->msym = mfun;
- bfun->sym = fun;
+ int level;
+ unsigned int number, insn_offset;
if (btinfo->functions.empty ())
{
- /* Start counting at one. */
- bfun->number = 1;
- bfun->insn_offset = 1;
+ /* Start counting NUMBER and INSN_OFFSET at one. */
+ level = 0;
+ number = 1;
+ insn_offset = 1;
}
else
{
- struct btrace_function *prev = btinfo->functions.back ();
-
- bfun->number = prev->number + 1;
- bfun->insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
- bfun->level = prev->level;
+ const struct btrace_function *prev = &btinfo->functions.back ();
+ level = prev->level;
+ number = prev->number + 1;
+ insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
}
- btinfo->functions.push_back (bfun);
- return bfun;
+ btinfo->functions.emplace_back (mfun, fun, number, insn_offset, level);
+ return &btinfo->functions.back ();
}
/* Update the UP field of a function segment. */
{
for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
{
- struct btrace_insn *last;
-
/* Skip gaps. */
if (bfun->errcode != 0)
continue;
- last = VEC_last (btrace_insn_s, bfun->insn);
+ btrace_insn &last = bfun->insn.back ();
- if (last->iclass == BTRACE_INSN_CALL)
+ if (last.iclass == BTRACE_INSN_CALL)
break;
}
struct minimal_symbol *mfun,
struct symbol *fun)
{
- struct btrace_function *prev = btinfo->functions.back ();
- struct btrace_function *bfun, *caller;
+ struct btrace_function *prev, *bfun, *caller;
bfun = ftrace_new_function (btinfo, mfun, fun);
+ prev = ftrace_find_call_by_number (btinfo, bfun->number - 1);
/* It is important to start at PREV's caller. Otherwise, we might find
PREV itself, if PREV is a recursive function. */
struct minimal_symbol *mfun,
struct symbol *fun)
{
- struct btrace_function *prev = btinfo->functions.back ();
- struct btrace_function *bfun;
+ struct btrace_function *prev, *bfun;
/* This is an unexplained function switch. We can't really be sure about the
call stack, yet the best I can think of right now is to preserve it. */
bfun = ftrace_new_function (btinfo, mfun, fun);
+ prev = ftrace_find_call_by_number (btinfo, bfun->number - 1);
bfun->up = prev->up;
bfun->flags = prev->flags;
else
{
/* We hijack the previous function segment if it was empty. */
- bfun = btinfo->functions.back ();
- if (bfun->errcode != 0 || !VEC_empty (btrace_insn_s, bfun->insn))
+ bfun = &btinfo->functions.back ();
+ if (bfun->errcode != 0 || !bfun->insn.empty ())
bfun = ftrace_new_function (btinfo, NULL, NULL);
}
struct bound_minimal_symbol bmfun;
struct minimal_symbol *mfun;
struct symbol *fun;
- struct btrace_insn *last;
struct btrace_function *bfun;
/* Try to determine the function we're in. We use both types of symbols
return ftrace_new_function (btinfo, mfun, fun);
/* If we had a gap before, we create a function. */
- bfun = btinfo->functions.back ();
+ bfun = &btinfo->functions.back ();
if (bfun->errcode != 0)
return ftrace_new_function (btinfo, mfun, fun);
/* Check the last instruction, if we have one.
We do this check first, since it allows us to fill in the call stack
links in addition to the normal flow links. */
- last = NULL;
- if (!VEC_empty (btrace_insn_s, bfun->insn))
- last = VEC_last (btrace_insn_s, bfun->insn);
+ btrace_insn *last = NULL;
+ if (!bfun->insn.empty ())
+ last = &bfun->insn.back ();
if (last != NULL)
{
/* Add the instruction at PC to BFUN's instructions. */
static void
-ftrace_update_insns (struct btrace_function *bfun,
- const struct btrace_insn *insn)
+ftrace_update_insns (struct btrace_function *bfun, const btrace_insn &insn)
{
- VEC_safe_push (btrace_insn_s, bfun->insn, insn);
+ bfun->insn.push_back (insn);
if (record_debug > 1)
ftrace_debug (bfun, "update insn");
unsigned int length = btinfo->functions.size() - 1;
for (unsigned int i = 0; i < length; ++i)
- level = std::min (level, btinfo->functions[i]->level);
+ level = std::min (level, btinfo->functions[i].level);
/* The last function segment contains the current instruction, which is not
really part of the trace. If it contains just this one instruction, we
ignore the segment. */
- struct btrace_function *last = btinfo->functions.back();
- if (VEC_length (btrace_insn_s, last->insn) != 1)
+ struct btrace_function *last = &btinfo->functions.back();
+ if (last->insn.size () != 1)
level = std::min (level, last->level);
DEBUG_FTRACE ("setting global level offset: %d", -level);
insn.iclass = ftrace_classify_insn (gdbarch, pc);
insn.flags = 0;
- ftrace_update_insns (bfun, &insn);
+ ftrace_update_insns (bfun, insn);
/* We're done once we pushed the instruction at the end. */
if (block->end == pc)
pt_btrace_insn_flags (insn)};
}
+/* Handle instruction decode events (libipt-v2). */
+
+static int
+handle_pt_insn_events (struct btrace_thread_info *btinfo,
+ struct pt_insn_decoder *decoder,
+ std::vector<unsigned int> &gaps, int status)
+{
+#if defined (HAVE_PT_INSN_EVENT)
+ while (status & pts_event_pending)
+ {
+ struct btrace_function *bfun;
+ struct pt_event event;
+ uint64_t offset;
+
+ status = pt_insn_event (decoder, &event, sizeof (event));
+ if (status < 0)
+ break;
+
+ switch (event.type)
+ {
+ default:
+ break;
+
+ case ptev_enabled:
+ if (event.variant.enabled.resumed == 0 && !btinfo->functions.empty ())
+ {
+ bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
+
+ pt_insn_get_offset (decoder, &offset);
+
+ warning (_("Non-contiguous trace at instruction %u (offset = 0x%"
+ PRIx64 ")."), bfun->insn_offset - 1, offset);
+ }
+
+ break;
+
+ case ptev_overflow:
+ bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
+
+ pt_insn_get_offset (decoder, &offset);
+
+ warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 ")."),
+ bfun->insn_offset - 1, offset);
+
+ break;
+ }
+ }
+#endif /* defined (HAVE_PT_INSN_EVENT) */
+
+ return status;
+}
+
+/* Handle events indicated by flags in INSN (libipt-v1). */
+
+static void
+handle_pt_insn_event_flags (struct btrace_thread_info *btinfo,
+ struct pt_insn_decoder *decoder,
+ const struct pt_insn &insn,
+ std::vector<unsigned int> &gaps)
+{
+#if defined (HAVE_STRUCT_PT_INSN_ENABLED)
+ /* Tracing is disabled and re-enabled each time we enter the kernel. Most
+ times, we continue from the same instruction we stopped before. This is
+ indicated via the RESUMED instruction flag. The ENABLED instruction flag
+ means that we continued from some other instruction. Indicate this as a
+ trace gap except when tracing just started. */
+ if (insn.enabled && !btinfo->functions.empty ())
+ {
+ struct btrace_function *bfun;
+ uint64_t offset;
+
+ bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
+
+ pt_insn_get_offset (decoder, &offset);
+
+ warning (_("Non-contiguous trace at instruction %u (offset = 0x%" PRIx64
+ ", pc = 0x%" PRIx64 ")."), bfun->insn_offset - 1, offset,
+ insn.ip);
+ }
+#endif /* defined (HAVE_STRUCT_PT_INSN_ENABLED) */
+
+#if defined (HAVE_STRUCT_PT_INSN_RESYNCED)
+ /* Indicate trace overflows. */
+ if (insn.resynced)
+ {
+ struct btrace_function *bfun;
+ uint64_t offset;
+
+ bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
+
+ pt_insn_get_offset (decoder, &offset);
+
+ warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 ", pc = 0x%"
+ PRIx64 ")."), bfun->insn_offset - 1, offset, insn.ip);
+ }
+#endif /* defined (HAVE_STRUCT_PT_INSN_RESYNCED) */
+}
/* Add function branch trace to BTINFO using DECODER. */
{
struct btrace_function *bfun;
uint64_t offset;
- int errcode;
+ int status;
for (;;)
{
struct pt_insn insn;
- errcode = pt_insn_sync_forward (decoder);
- if (errcode < 0)
+ status = pt_insn_sync_forward (decoder);
+ if (status < 0)
{
- if (errcode != -pte_eos)
+ if (status != -pte_eos)
warning (_("Failed to synchronize onto the Intel Processor "
- "Trace stream: %s."), pt_errstr (pt_errcode (errcode)));
+ "Trace stream: %s."), pt_errstr (pt_errcode (status)));
break;
}
for (;;)
{
- errcode = pt_insn_next (decoder, &insn, sizeof(insn));
- if (errcode < 0)
+ /* Handle events from the previous iteration or synchronization. */
+ status = handle_pt_insn_events (btinfo, decoder, gaps, status);
+ if (status < 0)
break;
- /* Look for gaps in the trace - unless we're at the beginning. */
- if (!btinfo->functions.empty ())
- {
- /* Tracing is disabled and re-enabled each time we enter the
- kernel. Most times, we continue from the same instruction we
- stopped before. This is indicated via the RESUMED instruction
- flag. The ENABLED instruction flag means that we continued
- from some other instruction. Indicate this as a trace gap. */
- if (insn.enabled)
- {
- bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
-
- pt_insn_get_offset (decoder, &offset);
-
- warning (_("Non-contiguous trace at instruction %u (offset "
- "= 0x%" PRIx64 ", pc = 0x%" PRIx64 ")."),
- bfun->insn_offset - 1, offset, insn.ip);
- }
- }
-
- /* Indicate trace overflows. */
- if (insn.resynced)
- {
- bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
-
- pt_insn_get_offset (decoder, &offset);
+ status = pt_insn_next (decoder, &insn, sizeof(insn));
+ if (status < 0)
+ break;
- warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
- ", pc = 0x%" PRIx64 ")."), bfun->insn_offset - 1,
- offset, insn.ip);
- }
+ /* Handle events indicated by flags in INSN. */
+ handle_pt_insn_event_flags (btinfo, decoder, insn, gaps);
bfun = ftrace_update_function (btinfo, insn.ip);
/* Maintain the function level offset. */
*plevel = std::min (*plevel, bfun->level);
- btrace_insn btinsn = pt_btrace_insn (insn);
- ftrace_update_insns (bfun, &btinsn);
+ ftrace_update_insns (bfun, pt_btrace_insn (insn));
}
- if (errcode == -pte_eos)
+ if (status == -pte_eos)
break;
/* Indicate the gap in the trace. */
- bfun = ftrace_new_gap (btinfo, errcode, gaps);
+ bfun = ftrace_new_gap (btinfo, status, gaps);
pt_insn_get_offset (decoder, &offset);
warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
- ", pc = 0x%" PRIx64 "): %s."), errcode, bfun->insn_offset - 1,
- offset, insn.ip, pt_errstr (pt_errcode (errcode)));
+ ", pc = 0x%" PRIx64 "): %s."), status, bfun->insn_offset - 1,
+ offset, insn.ip, pt_errstr (pt_errcode (status)));
}
}
{
struct btrace_thread_info *btinfo;
struct btrace_function *last_bfun;
- struct btrace_insn *last_insn;
btrace_block_s *first_new_block;
btinfo = &tp->btrace;
gdb_assert (!btinfo->functions.empty ());
gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
- last_bfun = btinfo->functions.back ();
+ last_bfun = &btinfo->functions.back ();
/* If the existing trace ends with a gap, we just glue the traces
together. We need to drop the last (i.e. chronologically first) block
of the new trace, though, since we can't fill in the start address.*/
- if (VEC_empty (btrace_insn_s, last_bfun->insn))
+ if (last_bfun->insn.empty ())
{
VEC_pop (btrace_block_s, btrace->blocks);
return 0;
chronologically first block in the new trace is the last block in
the new trace's block vector. */
first_new_block = VEC_last (btrace_block_s, btrace->blocks);
- last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
+ const btrace_insn &last_insn = last_bfun->insn.back ();
/* If the current PC at the end of the block is the same as in our current
trace, there are two explanations:
entries.
In the second case, the delta trace vector should contain exactly one
entry for the partial block containing the current PC. Remove it. */
- if (first_new_block->end == last_insn->pc
+ if (first_new_block->end == last_insn.pc
&& VEC_length (btrace_block_s, btrace->blocks) == 1)
{
VEC_pop (btrace_block_s, btrace->blocks);
return 0;
}
- DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
+ DEBUG ("stitching %s to %s", ftrace_print_insn_addr (&last_insn),
core_addr_to_string_nz (first_new_block->end));
/* Do a simple sanity check to make sure we don't accidentally end up
with a bad block. This should not occur in practice. */
- if (first_new_block->end < last_insn->pc)
+ if (first_new_block->end < last_insn.pc)
{
warning (_("Error while trying to read delta trace. Falling back to "
"a full read."));
/* We adjust the last block to start at the end of our current trace. */
gdb_assert (first_new_block->begin == 0);
- first_new_block->begin = last_insn->pc;
+ first_new_block->begin = last_insn.pc;
/* We simply pop the last insn so we can insert it again as part of
the normal branch trace computation.
Since instruction iterators are based on indices in the instructions
vector, we don't leave any pointers dangling. */
DEBUG ("pruning insn at %s for stitching",
- ftrace_print_insn_addr (last_insn));
+ ftrace_print_insn_addr (&last_insn));
- VEC_pop (btrace_insn_s, last_bfun->insn);
+ last_bfun->insn.pop_back ();
/* The instructions vector may become empty temporarily if this has
been the only instruction in this function segment.
of just that one instruction. If we remove it, we might turn the now
empty btrace function segment into a gap. But we don't want gaps at
the beginning. To avoid this, we remove the entire old trace. */
- if (last_bfun->number == 1 && VEC_empty (btrace_insn_s, last_bfun->insn))
+ if (last_bfun->number == 1 && last_bfun->insn.empty ())
btrace_clear (tp);
return 0;
/* With CLI usage, TP->PTID always equals INFERIOR_PTID here. Now that we
can store a gdb.Record object in Python referring to a different thread
than the current one, temporarily set INFERIOR_PTID. */
- cleanup = save_inferior_ptid ();
+ scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
inferior_ptid = tp->ptid;
/* We should not be called on running or exited threads. */
gdb_assert (can_access_registers_ptid (tp->ptid));
btrace_data_init (&btrace);
- make_cleanup_btrace_data (&btrace);
+ cleanup = make_cleanup_btrace_data (&btrace);
/* Let's first try to extend the trace we already have. */
if (!btinfo->functions.empty ())
reinit_frame_cache ();
btinfo = &tp->btrace;
- for (auto &bfun : btinfo->functions)
- {
- VEC_free (btrace_insn_s, bfun->insn);
- xfree (bfun);
- }
btinfo->functions.clear ();
btinfo->ngaps = 0;
unsigned int index, end;
index = it->insn_index;
- bfun = it->btinfo->functions[it->call_index];
+ bfun = &it->btinfo->functions[it->call_index];
/* Check if the iterator points to a gap in the trace. */
if (bfun->errcode != 0)
return NULL;
/* The index is within the bounds of this function's instruction vector. */
- end = VEC_length (btrace_insn_s, bfun->insn);
+ end = bfun->insn.size ();
gdb_assert (0 < end);
gdb_assert (index < end);
- return VEC_index (btrace_insn_s, bfun->insn, index);
+ return &bfun->insn[index];
}
/* See btrace.h. */
int
btrace_insn_get_error (const struct btrace_insn_iterator *it)
{
- const struct btrace_function *bfun;
-
- bfun = it->btinfo->functions[it->call_index];
- return bfun->errcode;
+ return it->btinfo->functions[it->call_index].errcode;
}
/* See btrace.h. */
unsigned int
btrace_insn_number (const struct btrace_insn_iterator *it)
{
- const struct btrace_function *bfun;
-
- bfun = it->btinfo->functions[it->call_index];
- return bfun->insn_offset + it->insn_index;
+ return it->btinfo->functions[it->call_index].insn_offset + it->insn_index;
}
/* See btrace.h. */
if (btinfo->functions.empty ())
error (_("No trace."));
- bfun = btinfo->functions.back ();
- length = VEC_length (btrace_insn_s, bfun->insn);
+ bfun = &btinfo->functions.back ();
+ length = bfun->insn.size ();
/* The last function may either be a gap or it contains the current
instruction, which is one past the end of the execution trace; ignore
const struct btrace_function *bfun;
unsigned int index, steps;
- bfun = it->btinfo->functions[it->call_index];
+ bfun = &it->btinfo->functions[it->call_index];
steps = 0;
index = it->insn_index;
{
unsigned int end, space, adv;
- end = VEC_length (btrace_insn_s, bfun->insn);
+ end = bfun->insn.size ();
/* An empty function segment represents a gap in the trace. We count
it as one instruction. */
const struct btrace_function *bfun;
unsigned int index, steps;
- bfun = it->btinfo->functions[it->call_index];
+ bfun = &it->btinfo->functions[it->call_index];
steps = 0;
index = it->insn_index;
/* We point to one after the last instruction in the new function. */
bfun = prev;
- index = VEC_length (btrace_insn_s, bfun->insn);
+ index = bfun->insn.size ();
/* An empty function segment represents a gap in the trace. We count
it as one instruction. */
return 0;
lower = 0;
- bfun = btinfo->functions[lower];
+ bfun = &btinfo->functions[lower];
if (number < bfun->insn_offset)
return 0;
upper = btinfo->functions.size () - 1;
- bfun = btinfo->functions[upper];
+ bfun = &btinfo->functions[upper];
if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
return 0;
{
const unsigned int average = lower + (upper - lower) / 2;
- bfun = btinfo->functions[average];
+ bfun = &btinfo->functions[average];
if (number < bfun->insn_offset)
{
if (btinfo->functions.empty ())
return false;
- bfun = btinfo->functions.back ();
+ bfun = &btinfo->functions.back ();
if (bfun->errcode != 0)
return false;
if (it->index >= it->btinfo->functions.size ())
return NULL;
- return it->btinfo->functions[it->index];
+ return &it->btinfo->functions[it->index];
}
/* See btrace.h. */
/* Read a number from an argument string. */
static unsigned int
-get_uint (char **arg)
+get_uint (const char **arg)
{
- char *begin, *end, *pos;
+ const char *begin, *pos;
+ char *end;
unsigned long number;
begin = *arg;
/* Read a context size from an argument string. */
static int
-get_context_size (char **arg)
+get_context_size (const char **arg)
{
- char *pos;
- int number;
-
- pos = skip_spaces (*arg);
+ const char *pos = skip_spaces (*arg);
if (!isdigit (*pos))
error (_("Expected positive number, got: %s."), pos);
- return strtol (pos, arg, 10);
+ char *end;
+ long result = strtol (pos, &end, 10);
+ *arg = end;
+ return result;
}
/* Complain about junk at the end of an argument string. */
static void
-no_chunk (char *arg)
+no_chunk (const char *arg)
{
if (*arg != 0)
error (_("Junk after argument: %s."), arg);
/* The "maintenance btrace packet-history" command. */
static void
-maint_btrace_packet_history_cmd (char *arg, int from_tty)
+maint_btrace_packet_history_cmd (const char *arg, int from_tty)
{
struct btrace_thread_info *btinfo;
struct thread_info *tp;
/* The "maintenance btrace clear-packet-history" command. */
static void
-maint_btrace_clear_packet_history_cmd (char *args, int from_tty)
+maint_btrace_clear_packet_history_cmd (const char *args, int from_tty)
{
struct btrace_thread_info *btinfo;
struct thread_info *tp;
/* The "maintenance btrace clear" command. */
static void
-maint_btrace_clear_cmd (char *args, int from_tty)
+maint_btrace_clear_cmd (const char *args, int from_tty)
{
struct btrace_thread_info *btinfo;
struct thread_info *tp;
/* The "maintenance info btrace" command. */
static void
-maint_info_btrace_cmd (char *args, int from_tty)
+maint_info_btrace_cmd (const char *args, int from_tty)
{
struct btrace_thread_info *btinfo;
struct thread_info *tp;
/* Initialize btrace maintenance commands. */
-void _initialize_btrace (void);
void
_initialize_btrace (void)
{