+btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
+{
+ if (tp->btrace.target != NULL)
+ return;
+
+#if !defined (HAVE_LIBIPT)
+ if (conf->format == BTRACE_FORMAT_PT)
+ error (_("Intel Processor Trace support was disabled at compile time."));
+#endif /* !defined (HAVE_LIBIPT) */
+
+ DEBUG ("enable thread %s (%s)", print_thread_id (tp),
+ target_pid_to_str (tp->ptid).c_str ());
+
+ tp->btrace.target = target_enable_btrace (tp->ptid, conf);
+
+ /* We're done if we failed to enable tracing. */
+ if (tp->btrace.target == NULL)
+ return;
+
+ /* We need to undo the enable in case of errors. */
+ try
+ {
+ /* Add an entry for the current PC so we start tracing from where we
+ enabled it.
+
+ If we can't access TP's registers, TP is most likely running. In this
+ case, we can't really say where tracing was enabled so it should be
+ safe to simply skip this step.
+
+ This is not relevant for BTRACE_FORMAT_PT since the trace will already
+ start at the PC at which tracing was enabled. */
+ if (conf->format != BTRACE_FORMAT_PT
+ && can_access_registers_thread (tp))
+ btrace_add_pc (tp);
+ }
+ catch (const gdb_exception &exception)
+ {
+ btrace_disable (tp);
+
+ throw;
+ }
+}
+
+/* See btrace.h. */
+
+const struct btrace_config *
+btrace_conf (const struct btrace_thread_info *btinfo)
+{
+ if (btinfo->target == NULL)
+ return NULL;
+
+ return target_btrace_conf (btinfo->target);
+}
+
+/* See btrace.h. */
+
+void
+btrace_disable (struct thread_info *tp)
+{
+ struct btrace_thread_info *btp = &tp->btrace;
+
+ if (btp->target == NULL)
+ return;
+
+ DEBUG ("disable thread %s (%s)", print_thread_id (tp),
+ target_pid_to_str (tp->ptid).c_str ());
+
+ target_disable_btrace (btp->target);
+ btp->target = NULL;
+
+ btrace_clear (tp);
+}
+
+/* See btrace.h. */
+
+void
+btrace_teardown (struct thread_info *tp)
+{
+ struct btrace_thread_info *btp = &tp->btrace;
+
+ if (btp->target == NULL)
+ return;
+
+ DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
+ target_pid_to_str (tp->ptid).c_str ());
+
+ target_teardown_btrace (btp->target);
+ btp->target = NULL;
+
+ btrace_clear (tp);
+}
+
+/* Stitch branch trace in BTS format. */
+
+static int
+btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
+{
+ struct btrace_thread_info *btinfo;
+ struct btrace_function *last_bfun;
+ btrace_block *first_new_block;
+
+ btinfo = &tp->btrace;
+ gdb_assert (!btinfo->functions.empty ());
+ gdb_assert (!btrace->blocks->empty ());
+
+ last_bfun = &btinfo->functions.back ();
+
+ /* If the existing trace ends with a gap, we just glue the traces
+ together. We need to drop the last (i.e. chronologically first) block
+ of the new trace, though, since we can't fill in the start address.*/
+ if (last_bfun->insn.empty ())
+ {
+ btrace->blocks->pop_back ();
+ return 0;
+ }
+
+ /* Beware that block trace starts with the most recent block, so the
+ chronologically first block in the new trace is the last block in
+ the new trace's block vector. */
+ first_new_block = &btrace->blocks->back ();
+ const btrace_insn &last_insn = last_bfun->insn.back ();
+
+ /* If the current PC at the end of the block is the same as in our current
+ trace, there are two explanations:
+ 1. we executed the instruction and some branch brought us back.
+ 2. we have not made any progress.
+ In the first case, the delta trace vector should contain at least two
+ entries.
+ In the second case, the delta trace vector should contain exactly one
+ entry for the partial block containing the current PC. Remove it. */
+ if (first_new_block->end == last_insn.pc && btrace->blocks->size () == 1)
+ {
+ btrace->blocks->pop_back ();
+ return 0;
+ }
+
+ DEBUG ("stitching %s to %s", ftrace_print_insn_addr (&last_insn),
+ core_addr_to_string_nz (first_new_block->end));
+
+ /* Do a simple sanity check to make sure we don't accidentally end up
+ with a bad block. This should not occur in practice. */
+ if (first_new_block->end < last_insn.pc)
+ {
+ warning (_("Error while trying to read delta trace. Falling back to "
+ "a full read."));
+ return -1;
+ }
+
+ /* We adjust the last block to start at the end of our current trace. */
+ gdb_assert (first_new_block->begin == 0);
+ first_new_block->begin = last_insn.pc;
+
+ /* We simply pop the last insn so we can insert it again as part of
+ the normal branch trace computation.
+ Since instruction iterators are based on indices in the instructions
+ vector, we don't leave any pointers dangling. */
+ DEBUG ("pruning insn at %s for stitching",
+ ftrace_print_insn_addr (&last_insn));
+
+ last_bfun->insn.pop_back ();
+
+ /* The instructions vector may become empty temporarily if this has
+ been the only instruction in this function segment.
+ This violates the invariant but will be remedied shortly by
+ btrace_compute_ftrace when we add the new trace. */
+
+ /* The only case where this would hurt is if the entire trace consisted
+ of just that one instruction. If we remove it, we might turn the now
+ empty btrace function segment into a gap. But we don't want gaps at
+ the beginning. To avoid this, we remove the entire old trace. */
+ if (last_bfun->number == 1 && last_bfun->insn.empty ())
+ btrace_clear (tp);
+
+ return 0;
+}
+
+/* Adjust the block trace in order to stitch old and new trace together.
+ BTRACE is the new delta trace between the last and the current stop.
+ TP is the traced thread.
+ May modifx BTRACE as well as the existing trace in TP.
+ Return 0 on success, -1 otherwise. */
+
+static int
+btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
+{
+ /* If we don't have trace, there's nothing to do. */
+ if (btrace->empty ())
+ return 0;
+
+ switch (btrace->format)
+ {
+ case BTRACE_FORMAT_NONE:
+ return 0;
+
+ case BTRACE_FORMAT_BTS:
+ return btrace_stitch_bts (&btrace->variant.bts, tp);
+
+ case BTRACE_FORMAT_PT:
+ /* Delta reads are not supported. */
+ return -1;
+ }
+
+ internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
+}
+
+/* Clear the branch trace histories in BTINFO. */
+
+static void
+btrace_clear_history (struct btrace_thread_info *btinfo)
+{
+ xfree (btinfo->insn_history);
+ xfree (btinfo->call_history);
+ xfree (btinfo->replay);
+
+ btinfo->insn_history = NULL;
+ btinfo->call_history = NULL;
+ btinfo->replay = NULL;
+}
+
+/* Clear the branch trace maintenance histories in BTINFO. */
+
+static void
+btrace_maint_clear (struct btrace_thread_info *btinfo)
+{
+ switch (btinfo->data.format)
+ {
+ default:
+ break;
+
+ case BTRACE_FORMAT_BTS:
+ btinfo->maint.variant.bts.packet_history.begin = 0;
+ btinfo->maint.variant.bts.packet_history.end = 0;
+ break;
+
+#if defined (HAVE_LIBIPT)
+ case BTRACE_FORMAT_PT:
+ delete btinfo->maint.variant.pt.packets;
+
+ btinfo->maint.variant.pt.packets = NULL;
+ btinfo->maint.variant.pt.packet_history.begin = 0;
+ btinfo->maint.variant.pt.packet_history.end = 0;
+ break;
+#endif /* defined (HAVE_LIBIPT) */
+ }
+}
+
+/* See btrace.h. */
+
+const char *
+btrace_decode_error (enum btrace_format format, int errcode)
+{
+ switch (format)
+ {
+ case BTRACE_FORMAT_BTS:
+ switch (errcode)
+ {
+ case BDE_BTS_OVERFLOW:
+ return _("instruction overflow");
+
+ case BDE_BTS_INSN_SIZE:
+ return _("unknown instruction");
+
+ default:
+ break;
+ }
+ break;
+
+#if defined (HAVE_LIBIPT)
+ case BTRACE_FORMAT_PT:
+ switch (errcode)
+ {
+ case BDE_PT_USER_QUIT:
+ return _("trace decode cancelled");
+
+ case BDE_PT_DISABLED:
+ return _("disabled");
+
+ case BDE_PT_OVERFLOW:
+ return _("overflow");
+
+ default:
+ if (errcode < 0)
+ return pt_errstr (pt_errcode (errcode));
+ break;
+ }
+ break;
+#endif /* defined (HAVE_LIBIPT) */
+
+ default:
+ break;
+ }
+
+ return _("unknown");
+}
+
+/* See btrace.h. */
+
+void
+btrace_fetch (struct thread_info *tp, const struct btrace_cpu *cpu)
+{
+ struct btrace_thread_info *btinfo;
+ struct btrace_target_info *tinfo;
+ struct btrace_data btrace;
+ int errcode;
+
+ DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
+ target_pid_to_str (tp->ptid).c_str ());
+
+ btinfo = &tp->btrace;
+ tinfo = btinfo->target;
+ if (tinfo == NULL)
+ return;
+
+ /* There's no way we could get new trace while replaying.
+ On the other hand, delta trace would return a partial record with the
+ current PC, which is the replay PC, not the last PC, as expected. */
+ if (btinfo->replay != NULL)
+ return;
+
+ /* With CLI usage, TP->PTID always equals INFERIOR_PTID here. Now that we
+ can store a gdb.Record object in Python referring to a different thread
+ than the current one, temporarily set INFERIOR_PTID. */
+ scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
+ inferior_ptid = tp->ptid;
+
+ /* We should not be called on running or exited threads. */
+ gdb_assert (can_access_registers_thread (tp));
+
+ /* Let's first try to extend the trace we already have. */
+ if (!btinfo->functions.empty ())
+ {
+ errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
+ if (errcode == 0)
+ {
+ /* Success. Let's try to stitch the traces together. */
+ errcode = btrace_stitch_trace (&btrace, tp);
+ }
+ else
+ {
+ /* We failed to read delta trace. Let's try to read new trace. */
+ errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
+
+ /* If we got any new trace, discard what we have. */
+ if (errcode == 0 && !btrace.empty ())
+ btrace_clear (tp);
+ }
+
+ /* If we were not able to read the trace, we start over. */
+ if (errcode != 0)
+ {
+ btrace_clear (tp);
+ errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
+ }
+ }
+ else
+ errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
+
+ /* If we were not able to read the branch trace, signal an error. */
+ if (errcode != 0)
+ error (_("Failed to read branch trace."));
+
+ /* Compute the trace, provided we have any. */
+ if (!btrace.empty ())
+ {
+ /* Store the raw trace data. The stored data will be cleared in
+ btrace_clear, so we always append the new trace. */
+ btrace_data_append (&btinfo->data, &btrace);
+ btrace_maint_clear (btinfo);
+
+ btrace_clear_history (btinfo);
+ btrace_compute_ftrace (tp, &btrace, cpu);
+ }
+}
+
+/* See btrace.h. */
+
+void
+btrace_clear (struct thread_info *tp)
+{
+ struct btrace_thread_info *btinfo;
+
+ DEBUG ("clear thread %s (%s)", print_thread_id (tp),
+ target_pid_to_str (tp->ptid).c_str ());
+
+ /* Make sure btrace frames that may hold a pointer into the branch
+ trace data are destroyed. */
+ reinit_frame_cache ();
+
+ btinfo = &tp->btrace;
+
+ btinfo->functions.clear ();
+ btinfo->ngaps = 0;
+
+ /* Must clear the maint data before - it depends on BTINFO->DATA. */
+ btrace_maint_clear (btinfo);
+ btinfo->data.clear ();
+ btrace_clear_history (btinfo);
+}
+
+/* See btrace.h. */
+
+void
+btrace_free_objfile (struct objfile *objfile)
+{
+ DEBUG ("free objfile");
+
+ for (thread_info *tp : all_non_exited_threads ())
+ btrace_clear (tp);
+}
+
+#if defined (HAVE_LIBEXPAT)
+
+/* Check the btrace document version. */
+
+static void
+check_xml_btrace_version (struct gdb_xml_parser *parser,
+ const struct gdb_xml_element *element,
+ void *user_data,
+ std::vector<gdb_xml_value> &attributes)
+{
+ const char *version
+ = (const char *) xml_find_attribute (attributes, "version")->value.get ();
+
+ if (strcmp (version, "1.0") != 0)
+ gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
+}
+
+/* Parse a btrace "block" xml record. */
+
+static void
+parse_xml_btrace_block (struct gdb_xml_parser *parser,
+ const struct gdb_xml_element *element,
+ void *user_data,
+ std::vector<gdb_xml_value> &attributes)
+{
+ struct btrace_data *btrace;
+ ULONGEST *begin, *end;
+
+ btrace = (struct btrace_data *) user_data;
+
+ switch (btrace->format)
+ {
+ case BTRACE_FORMAT_BTS:
+ break;
+
+ case BTRACE_FORMAT_NONE:
+ btrace->format = BTRACE_FORMAT_BTS;
+ btrace->variant.bts.blocks = new std::vector<btrace_block>;
+ break;
+
+ default:
+ gdb_xml_error (parser, _("Btrace format error."));
+ }
+
+ begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value.get ();
+ end = (ULONGEST *) xml_find_attribute (attributes, "end")->value.get ();
+ btrace->variant.bts.blocks->emplace_back (*begin, *end);
+}
+
+/* Parse a "raw" xml record. */
+
+static void
+parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
+ gdb_byte **pdata, size_t *psize)
+{
+ gdb_byte *bin;
+ size_t len, size;
+
+ len = strlen (body_text);
+ if (len % 2 != 0)
+ gdb_xml_error (parser, _("Bad raw data size."));
+
+ size = len / 2;
+
+ gdb::unique_xmalloc_ptr<gdb_byte> data ((gdb_byte *) xmalloc (size));
+ bin = data.get ();
+
+ /* We use hex encoding - see gdbsupport/rsp-low.h. */
+ while (len > 0)
+ {
+ char hi, lo;
+
+ hi = *body_text++;
+ lo = *body_text++;
+
+ if (hi == 0 || lo == 0)
+ gdb_xml_error (parser, _("Bad hex encoding."));
+
+ *bin++ = fromhex (hi) * 16 + fromhex (lo);
+ len -= 2;
+ }
+
+ *pdata = data.release ();
+ *psize = size;
+}
+
+/* Parse a btrace pt-config "cpu" xml record. */
+
+static void
+parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
+ const struct gdb_xml_element *element,
+ void *user_data,
+ std::vector<gdb_xml_value> &attributes)
+{
+ struct btrace_data *btrace;
+ const char *vendor;
+ ULONGEST *family, *model, *stepping;
+
+ vendor =
+ (const char *) xml_find_attribute (attributes, "vendor")->value.get ();
+ family
+ = (ULONGEST *) xml_find_attribute (attributes, "family")->value.get ();
+ model
+ = (ULONGEST *) xml_find_attribute (attributes, "model")->value.get ();
+ stepping
+ = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value.get ();
+
+ btrace = (struct btrace_data *) user_data;
+
+ if (strcmp (vendor, "GenuineIntel") == 0)
+ btrace->variant.pt.config.cpu.vendor = CV_INTEL;
+
+ btrace->variant.pt.config.cpu.family = *family;
+ btrace->variant.pt.config.cpu.model = *model;
+ btrace->variant.pt.config.cpu.stepping = *stepping;
+}
+
+/* Parse a btrace pt "raw" xml record. */
+
+static void
+parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
+ const struct gdb_xml_element *element,
+ void *user_data, const char *body_text)
+{
+ struct btrace_data *btrace;
+
+ btrace = (struct btrace_data *) user_data;
+ parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
+ &btrace->variant.pt.size);
+}
+
+/* Parse a btrace "pt" xml record. */
+
+static void
+parse_xml_btrace_pt (struct gdb_xml_parser *parser,
+ const struct gdb_xml_element *element,
+ void *user_data,
+ std::vector<gdb_xml_value> &attributes)
+{
+ struct btrace_data *btrace;
+
+ btrace = (struct btrace_data *) user_data;
+ btrace->format = BTRACE_FORMAT_PT;
+ btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
+ btrace->variant.pt.data = NULL;
+ btrace->variant.pt.size = 0;
+}
+
+static const struct gdb_xml_attribute block_attributes[] = {
+ { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
+ { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
+ { NULL, GDB_XML_AF_NONE, NULL, NULL }
+};
+
+static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
+ { "vendor", GDB_XML_AF_NONE, NULL, NULL },
+ { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
+ { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
+ { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
+ { NULL, GDB_XML_AF_NONE, NULL, NULL }
+};
+
+static const struct gdb_xml_element btrace_pt_config_children[] = {
+ { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
+ parse_xml_btrace_pt_config_cpu, NULL },
+ { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
+};
+
+static const struct gdb_xml_element btrace_pt_children[] = {
+ { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
+ NULL },
+ { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
+ { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
+};
+
+static const struct gdb_xml_attribute btrace_attributes[] = {
+ { "version", GDB_XML_AF_NONE, NULL, NULL },
+ { NULL, GDB_XML_AF_NONE, NULL, NULL }
+};
+
+static const struct gdb_xml_element btrace_children[] = {
+ { "block", block_attributes, NULL,
+ GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
+ { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
+ NULL },
+ { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
+};
+
+static const struct gdb_xml_element btrace_elements[] = {
+ { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
+ check_xml_btrace_version, NULL },
+ { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
+};
+
+#endif /* defined (HAVE_LIBEXPAT) */
+
+/* See btrace.h. */
+
+void
+parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
+{
+#if defined (HAVE_LIBEXPAT)
+
+ int errcode;
+ btrace_data result;
+ result.format = BTRACE_FORMAT_NONE;
+
+ errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
+ buffer, &result);
+ if (errcode != 0)
+ error (_("Error parsing branch trace."));
+
+ /* Keep parse results. */
+ *btrace = std::move (result);
+
+#else /* !defined (HAVE_LIBEXPAT) */
+
+ error (_("Cannot process branch trace. XML support was disabled at "
+ "compile time."));
+
+#endif /* !defined (HAVE_LIBEXPAT) */
+}
+
+#if defined (HAVE_LIBEXPAT)
+
+/* Parse a btrace-conf "bts" xml record. */
+
+static void
+parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
+ const struct gdb_xml_element *element,
+ void *user_data,
+ std::vector<gdb_xml_value> &attributes)
+{
+ struct btrace_config *conf;
+ struct gdb_xml_value *size;
+
+ conf = (struct btrace_config *) user_data;
+ conf->format = BTRACE_FORMAT_BTS;
+ conf->bts.size = 0;
+
+ size = xml_find_attribute (attributes, "size");
+ if (size != NULL)
+ conf->bts.size = (unsigned int) *(ULONGEST *) size->value.get ();
+}
+
+/* Parse a btrace-conf "pt" xml record. */
+
+static void
+parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
+ const struct gdb_xml_element *element,
+ void *user_data,
+ std::vector<gdb_xml_value> &attributes)
+{
+ struct btrace_config *conf;
+ struct gdb_xml_value *size;
+
+ conf = (struct btrace_config *) user_data;
+ conf->format = BTRACE_FORMAT_PT;
+ conf->pt.size = 0;
+
+ size = xml_find_attribute (attributes, "size");
+ if (size != NULL)
+ conf->pt.size = (unsigned int) *(ULONGEST *) size->value.get ();
+}
+
+static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
+ { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
+ { NULL, GDB_XML_AF_NONE, NULL, NULL }
+};
+
+static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
+ { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
+ { NULL, GDB_XML_AF_NONE, NULL, NULL }
+};
+
+static const struct gdb_xml_element btrace_conf_children[] = {
+ { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
+ parse_xml_btrace_conf_bts, NULL },
+ { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
+ parse_xml_btrace_conf_pt, NULL },
+ { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
+};
+
+static const struct gdb_xml_attribute btrace_conf_attributes[] = {
+ { "version", GDB_XML_AF_NONE, NULL, NULL },
+ { NULL, GDB_XML_AF_NONE, NULL, NULL }
+};
+
+static const struct gdb_xml_element btrace_conf_elements[] = {
+ { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
+ GDB_XML_EF_NONE, NULL, NULL },
+ { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
+};
+
+#endif /* defined (HAVE_LIBEXPAT) */
+
+/* See btrace.h. */
+
+void
+parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
+{
+#if defined (HAVE_LIBEXPAT)
+
+ int errcode;
+ errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
+ btrace_conf_elements, xml, conf);
+ if (errcode != 0)
+ error (_("Error parsing branch trace configuration."));
+
+#else /* !defined (HAVE_LIBEXPAT) */
+
+ error (_("Cannot process the branch trace configuration. XML support "
+ "was disabled at compile time."));
+
+#endif /* !defined (HAVE_LIBEXPAT) */
+}
+
+/* See btrace.h. */
+
+const struct btrace_insn *
+btrace_insn_get (const struct btrace_insn_iterator *it)
+{
+ const struct btrace_function *bfun;
+ unsigned int index, end;
+
+ index = it->insn_index;
+ bfun = &it->btinfo->functions[it->call_index];
+
+ /* Check if the iterator points to a gap in the trace. */
+ if (bfun->errcode != 0)
+ return NULL;
+
+ /* The index is within the bounds of this function's instruction vector. */
+ end = bfun->insn.size ();
+ gdb_assert (0 < end);
+ gdb_assert (index < end);
+
+ return &bfun->insn[index];
+}
+
+/* See btrace.h. */
+
+int
+btrace_insn_get_error (const struct btrace_insn_iterator *it)
+{
+ return it->btinfo->functions[it->call_index].errcode;
+}
+
+/* See btrace.h. */
+
+unsigned int
+btrace_insn_number (const struct btrace_insn_iterator *it)
+{
+ return it->btinfo->functions[it->call_index].insn_offset + it->insn_index;
+}
+
+/* See btrace.h. */
+
+void
+btrace_insn_begin (struct btrace_insn_iterator *it,