1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
29 #include "exceptions.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
40 /* The target_ops of record-btrace. */
41 static struct target_ops record_btrace_ops
;
43 /* A new thread observer enabling branch tracing for the new thread. */
44 static struct observer
*record_btrace_thread_observer
;
46 /* Temporarily allow memory accesses. */
47 static int record_btrace_allow_memory_access
;
49 /* Print a record-btrace debug message. Use do ... while (0) to avoid
50 ambiguities when used in if statements. */
52 #define DEBUG(msg, args...) \
55 if (record_debug != 0) \
56 fprintf_unfiltered (gdb_stdlog, \
57 "[record-btrace] " msg "\n", ##args); \
62 /* Update the branch trace for the current thread and return a pointer to its
65 Throws an error if there is no thread or no trace. This function never
68 static struct thread_info
*
69 require_btrace_thread (void)
71 struct thread_info
*tp
;
75 tp
= find_thread_ptid (inferior_ptid
);
77 error (_("No thread."));
81 if (btrace_is_empty (tp
))
82 error (_("No trace."));
87 /* Update the branch trace for the current thread and return a pointer to its
88 branch trace information struct.
90 Throws an error if there is no thread or no trace. This function never
93 static struct btrace_thread_info
*
96 struct thread_info
*tp
;
98 tp
= require_btrace_thread ();
103 /* Enable branch tracing for one thread. Warn on errors. */
106 record_btrace_enable_warn (struct thread_info
*tp
)
108 volatile struct gdb_exception error
;
110 TRY_CATCH (error
, RETURN_MASK_ERROR
)
113 if (error
.message
!= NULL
)
114 warning ("%s", error
.message
);
117 /* Callback function to disable branch tracing for one thread. */
120 record_btrace_disable_callback (void *arg
)
122 struct thread_info
*tp
;
129 /* Enable automatic tracing of new threads. */
132 record_btrace_auto_enable (void)
134 DEBUG ("attach thread observer");
136 record_btrace_thread_observer
137 = observer_attach_new_thread (record_btrace_enable_warn
);
140 /* Disable automatic tracing of new threads. */
143 record_btrace_auto_disable (void)
145 /* The observer may have been detached, already. */
146 if (record_btrace_thread_observer
== NULL
)
149 DEBUG ("detach thread observer");
151 observer_detach_new_thread (record_btrace_thread_observer
);
152 record_btrace_thread_observer
= NULL
;
155 /* The to_open method of target record-btrace. */
158 record_btrace_open (char *args
, int from_tty
)
160 struct cleanup
*disable_chain
;
161 struct thread_info
*tp
;
167 if (!target_has_execution
)
168 error (_("The program is not being run."));
170 if (!target_supports_btrace ())
171 error (_("Target does not support branch tracing."));
174 error (_("Record btrace can't debug inferior in non-stop mode."));
176 gdb_assert (record_btrace_thread_observer
== NULL
);
178 disable_chain
= make_cleanup (null_cleanup
, NULL
);
180 if (args
== NULL
|| *args
== 0 || number_is_in_list (args
, tp
->num
))
184 make_cleanup (record_btrace_disable_callback
, tp
);
187 record_btrace_auto_enable ();
189 push_target (&record_btrace_ops
);
191 observer_notify_record_changed (current_inferior (), 1);
193 discard_cleanups (disable_chain
);
196 /* The to_stop_recording method of target record-btrace. */
199 record_btrace_stop_recording (struct target_ops
*self
)
201 struct thread_info
*tp
;
203 DEBUG ("stop recording");
205 record_btrace_auto_disable ();
208 if (tp
->btrace
.target
!= NULL
)
212 /* The to_close method of target record-btrace. */
215 record_btrace_close (struct target_ops
*self
)
217 struct thread_info
*tp
;
219 /* Make sure automatic recording gets disabled even if we did not stop
220 recording before closing the record-btrace target. */
221 record_btrace_auto_disable ();
223 /* We should have already stopped recording.
224 Tear down btrace in case we have not. */
226 btrace_teardown (tp
);
229 /* The to_info_record method of target record-btrace. */
232 record_btrace_info (struct target_ops
*self
)
234 struct btrace_thread_info
*btinfo
;
235 struct thread_info
*tp
;
236 unsigned int insns
, calls
;
240 tp
= find_thread_ptid (inferior_ptid
);
242 error (_("No thread."));
249 btinfo
= &tp
->btrace
;
251 if (!btrace_is_empty (tp
))
253 struct btrace_call_iterator call
;
254 struct btrace_insn_iterator insn
;
256 btrace_call_end (&call
, btinfo
);
257 btrace_call_prev (&call
, 1);
258 calls
= btrace_call_number (&call
);
260 btrace_insn_end (&insn
, btinfo
);
261 btrace_insn_prev (&insn
, 1);
262 insns
= btrace_insn_number (&insn
);
265 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
266 "%d (%s).\n"), insns
, calls
, tp
->num
,
267 target_pid_to_str (tp
->ptid
));
269 if (btrace_is_replaying (tp
))
270 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
271 btrace_insn_number (btinfo
->replay
));
274 /* Print an unsigned int. */
277 ui_out_field_uint (struct ui_out
*uiout
, const char *fld
, unsigned int val
)
279 ui_out_field_fmt (uiout
, fld
, "%u", val
);
282 /* Disassemble a section of the recorded instruction trace. */
285 btrace_insn_history (struct ui_out
*uiout
,
286 const struct btrace_insn_iterator
*begin
,
287 const struct btrace_insn_iterator
*end
, int flags
)
289 struct gdbarch
*gdbarch
;
290 struct btrace_insn_iterator it
;
292 DEBUG ("itrace (0x%x): [%u; %u)", flags
, btrace_insn_number (begin
),
293 btrace_insn_number (end
));
295 gdbarch
= target_gdbarch ();
297 for (it
= *begin
; btrace_insn_cmp (&it
, end
) != 0; btrace_insn_next (&it
, 1))
299 const struct btrace_insn
*insn
;
301 insn
= btrace_insn_get (&it
);
303 /* Print the instruction index. */
304 ui_out_field_uint (uiout
, "index", btrace_insn_number (&it
));
305 ui_out_text (uiout
, "\t");
307 /* Disassembly with '/m' flag may not produce the expected result.
309 gdb_disassembly (gdbarch
, uiout
, NULL
, flags
, 1, insn
->pc
, insn
->pc
+ 1);
313 /* The to_insn_history method of target record-btrace. */
316 record_btrace_insn_history (struct target_ops
*self
, int size
, int flags
)
318 struct btrace_thread_info
*btinfo
;
319 struct btrace_insn_history
*history
;
320 struct btrace_insn_iterator begin
, end
;
321 struct cleanup
*uiout_cleanup
;
322 struct ui_out
*uiout
;
323 unsigned int context
, covered
;
325 uiout
= current_uiout
;
326 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
328 context
= abs (size
);
330 error (_("Bad record instruction-history-size."));
332 btinfo
= require_btrace ();
333 history
= btinfo
->insn_history
;
336 struct btrace_insn_iterator
*replay
;
338 DEBUG ("insn-history (0x%x): %d", flags
, size
);
340 /* If we're replaying, we start at the replay position. Otherwise, we
341 start at the tail of the trace. */
342 replay
= btinfo
->replay
;
346 btrace_insn_end (&begin
, btinfo
);
348 /* We start from here and expand in the requested direction. Then we
349 expand in the other direction, as well, to fill up any remaining
354 /* We want the current position covered, as well. */
355 covered
= btrace_insn_next (&end
, 1);
356 covered
+= btrace_insn_prev (&begin
, context
- covered
);
357 covered
+= btrace_insn_next (&end
, context
- covered
);
361 covered
= btrace_insn_next (&end
, context
);
362 covered
+= btrace_insn_prev (&begin
, context
- covered
);
367 begin
= history
->begin
;
370 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags
, size
,
371 btrace_insn_number (&begin
), btrace_insn_number (&end
));
376 covered
= btrace_insn_prev (&begin
, context
);
381 covered
= btrace_insn_next (&end
, context
);
386 btrace_insn_history (uiout
, &begin
, &end
, flags
);
390 printf_unfiltered (_("At the start of the branch trace record.\n"));
392 printf_unfiltered (_("At the end of the branch trace record.\n"));
395 btrace_set_insn_history (btinfo
, &begin
, &end
);
396 do_cleanups (uiout_cleanup
);
399 /* The to_insn_history_range method of target record-btrace. */
402 record_btrace_insn_history_range (struct target_ops
*self
,
403 ULONGEST from
, ULONGEST to
, int flags
)
405 struct btrace_thread_info
*btinfo
;
406 struct btrace_insn_history
*history
;
407 struct btrace_insn_iterator begin
, end
;
408 struct cleanup
*uiout_cleanup
;
409 struct ui_out
*uiout
;
410 unsigned int low
, high
;
413 uiout
= current_uiout
;
414 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
419 DEBUG ("insn-history (0x%x): [%u; %u)", flags
, low
, high
);
421 /* Check for wrap-arounds. */
422 if (low
!= from
|| high
!= to
)
423 error (_("Bad range."));
426 error (_("Bad range."));
428 btinfo
= require_btrace ();
430 found
= btrace_find_insn_by_number (&begin
, btinfo
, low
);
432 error (_("Range out of bounds."));
434 found
= btrace_find_insn_by_number (&end
, btinfo
, high
);
437 /* Silently truncate the range. */
438 btrace_insn_end (&end
, btinfo
);
442 /* We want both begin and end to be inclusive. */
443 btrace_insn_next (&end
, 1);
446 btrace_insn_history (uiout
, &begin
, &end
, flags
);
447 btrace_set_insn_history (btinfo
, &begin
, &end
);
449 do_cleanups (uiout_cleanup
);
452 /* The to_insn_history_from method of target record-btrace. */
455 record_btrace_insn_history_from (struct target_ops
*self
,
456 ULONGEST from
, int size
, int flags
)
458 ULONGEST begin
, end
, context
;
460 context
= abs (size
);
462 error (_("Bad record instruction-history-size."));
471 begin
= from
- context
+ 1;
476 end
= from
+ context
- 1;
478 /* Check for wrap-around. */
483 record_btrace_insn_history_range (self
, begin
, end
, flags
);
486 /* Print the instruction number range for a function call history line. */
489 btrace_call_history_insn_range (struct ui_out
*uiout
,
490 const struct btrace_function
*bfun
)
492 unsigned int begin
, end
, size
;
494 size
= VEC_length (btrace_insn_s
, bfun
->insn
);
495 gdb_assert (size
> 0);
497 begin
= bfun
->insn_offset
;
498 end
= begin
+ size
- 1;
500 ui_out_field_uint (uiout
, "insn begin", begin
);
501 ui_out_text (uiout
, ",");
502 ui_out_field_uint (uiout
, "insn end", end
);
505 /* Print the source line information for a function call history line. */
508 btrace_call_history_src_line (struct ui_out
*uiout
,
509 const struct btrace_function
*bfun
)
518 ui_out_field_string (uiout
, "file",
519 symtab_to_filename_for_display (sym
->symtab
));
521 begin
= bfun
->lbegin
;
527 ui_out_text (uiout
, ":");
528 ui_out_field_int (uiout
, "min line", begin
);
533 ui_out_text (uiout
, ",");
534 ui_out_field_int (uiout
, "max line", end
);
537 /* Get the name of a branch trace function. */
540 btrace_get_bfun_name (const struct btrace_function
*bfun
)
542 struct minimal_symbol
*msym
;
552 return SYMBOL_PRINT_NAME (sym
);
553 else if (msym
!= NULL
)
554 return MSYMBOL_PRINT_NAME (msym
);
559 /* Disassemble a section of the recorded function trace. */
562 btrace_call_history (struct ui_out
*uiout
,
563 const struct btrace_thread_info
*btinfo
,
564 const struct btrace_call_iterator
*begin
,
565 const struct btrace_call_iterator
*end
,
566 enum record_print_flag flags
)
568 struct btrace_call_iterator it
;
570 DEBUG ("ftrace (0x%x): [%u; %u)", flags
, btrace_call_number (begin
),
571 btrace_call_number (end
));
573 for (it
= *begin
; btrace_call_cmp (&it
, end
) < 0; btrace_call_next (&it
, 1))
575 const struct btrace_function
*bfun
;
576 struct minimal_symbol
*msym
;
579 bfun
= btrace_call_get (&it
);
583 /* Print the function index. */
584 ui_out_field_uint (uiout
, "index", bfun
->number
);
585 ui_out_text (uiout
, "\t");
587 if ((flags
& RECORD_PRINT_INDENT_CALLS
) != 0)
589 int level
= bfun
->level
+ btinfo
->level
, i
;
591 for (i
= 0; i
< level
; ++i
)
592 ui_out_text (uiout
, " ");
596 ui_out_field_string (uiout
, "function", SYMBOL_PRINT_NAME (sym
));
597 else if (msym
!= NULL
)
598 ui_out_field_string (uiout
, "function", MSYMBOL_PRINT_NAME (msym
));
599 else if (!ui_out_is_mi_like_p (uiout
))
600 ui_out_field_string (uiout
, "function", "??");
602 if ((flags
& RECORD_PRINT_INSN_RANGE
) != 0)
604 ui_out_text (uiout
, _("\tinst "));
605 btrace_call_history_insn_range (uiout
, bfun
);
608 if ((flags
& RECORD_PRINT_SRC_LINE
) != 0)
610 ui_out_text (uiout
, _("\tat "));
611 btrace_call_history_src_line (uiout
, bfun
);
614 ui_out_text (uiout
, "\n");
618 /* The to_call_history method of target record-btrace. */
621 record_btrace_call_history (struct target_ops
*self
, int size
, int flags
)
623 struct btrace_thread_info
*btinfo
;
624 struct btrace_call_history
*history
;
625 struct btrace_call_iterator begin
, end
;
626 struct cleanup
*uiout_cleanup
;
627 struct ui_out
*uiout
;
628 unsigned int context
, covered
;
630 uiout
= current_uiout
;
631 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
633 context
= abs (size
);
635 error (_("Bad record function-call-history-size."));
637 btinfo
= require_btrace ();
638 history
= btinfo
->call_history
;
641 struct btrace_insn_iterator
*replay
;
643 DEBUG ("call-history (0x%x): %d", flags
, size
);
645 /* If we're replaying, we start at the replay position. Otherwise, we
646 start at the tail of the trace. */
647 replay
= btinfo
->replay
;
650 begin
.function
= replay
->function
;
651 begin
.btinfo
= btinfo
;
654 btrace_call_end (&begin
, btinfo
);
656 /* We start from here and expand in the requested direction. Then we
657 expand in the other direction, as well, to fill up any remaining
662 /* We want the current position covered, as well. */
663 covered
= btrace_call_next (&end
, 1);
664 covered
+= btrace_call_prev (&begin
, context
- covered
);
665 covered
+= btrace_call_next (&end
, context
- covered
);
669 covered
= btrace_call_next (&end
, context
);
670 covered
+= btrace_call_prev (&begin
, context
- covered
);
675 begin
= history
->begin
;
678 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags
, size
,
679 btrace_call_number (&begin
), btrace_call_number (&end
));
684 covered
= btrace_call_prev (&begin
, context
);
689 covered
= btrace_call_next (&end
, context
);
694 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
698 printf_unfiltered (_("At the start of the branch trace record.\n"));
700 printf_unfiltered (_("At the end of the branch trace record.\n"));
703 btrace_set_call_history (btinfo
, &begin
, &end
);
704 do_cleanups (uiout_cleanup
);
707 /* The to_call_history_range method of target record-btrace. */
710 record_btrace_call_history_range (struct target_ops
*self
,
711 ULONGEST from
, ULONGEST to
, int flags
)
713 struct btrace_thread_info
*btinfo
;
714 struct btrace_call_history
*history
;
715 struct btrace_call_iterator begin
, end
;
716 struct cleanup
*uiout_cleanup
;
717 struct ui_out
*uiout
;
718 unsigned int low
, high
;
721 uiout
= current_uiout
;
722 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
727 DEBUG ("call-history (0x%x): [%u; %u)", flags
, low
, high
);
729 /* Check for wrap-arounds. */
730 if (low
!= from
|| high
!= to
)
731 error (_("Bad range."));
734 error (_("Bad range."));
736 btinfo
= require_btrace ();
738 found
= btrace_find_call_by_number (&begin
, btinfo
, low
);
740 error (_("Range out of bounds."));
742 found
= btrace_find_call_by_number (&end
, btinfo
, high
);
745 /* Silently truncate the range. */
746 btrace_call_end (&end
, btinfo
);
750 /* We want both begin and end to be inclusive. */
751 btrace_call_next (&end
, 1);
754 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
755 btrace_set_call_history (btinfo
, &begin
, &end
);
757 do_cleanups (uiout_cleanup
);
760 /* The to_call_history_from method of target record-btrace. */
763 record_btrace_call_history_from (struct target_ops
*self
,
764 ULONGEST from
, int size
, int flags
)
766 ULONGEST begin
, end
, context
;
768 context
= abs (size
);
770 error (_("Bad record function-call-history-size."));
779 begin
= from
- context
+ 1;
784 end
= from
+ context
- 1;
786 /* Check for wrap-around. */
791 record_btrace_call_history_range (self
, begin
, end
, flags
);
794 /* The to_record_is_replaying method of target record-btrace. */
797 record_btrace_is_replaying (struct target_ops
*self
)
799 struct thread_info
*tp
;
802 if (btrace_is_replaying (tp
))
808 /* The to_xfer_partial method of target record-btrace. */
810 static enum target_xfer_status
811 record_btrace_xfer_partial (struct target_ops
*ops
, enum target_object object
,
812 const char *annex
, gdb_byte
*readbuf
,
813 const gdb_byte
*writebuf
, ULONGEST offset
,
814 ULONGEST len
, ULONGEST
*xfered_len
)
816 struct target_ops
*t
;
818 /* Filter out requests that don't make sense during replay. */
819 if (!record_btrace_allow_memory_access
&& record_btrace_is_replaying (ops
))
823 case TARGET_OBJECT_MEMORY
:
825 struct target_section
*section
;
827 /* We do not allow writing memory in general. */
828 if (writebuf
!= NULL
)
831 return TARGET_XFER_UNAVAILABLE
;
834 /* We allow reading readonly memory. */
835 section
= target_section_by_addr (ops
, offset
);
838 /* Check if the section we found is readonly. */
839 if ((bfd_get_section_flags (section
->the_bfd_section
->owner
,
840 section
->the_bfd_section
)
841 & SEC_READONLY
) != 0)
843 /* Truncate the request to fit into this section. */
844 len
= min (len
, section
->endaddr
- offset
);
850 return TARGET_XFER_UNAVAILABLE
;
855 /* Forward the request. */
856 for (ops
= ops
->beneath
; ops
!= NULL
; ops
= ops
->beneath
)
857 if (ops
->to_xfer_partial
!= NULL
)
858 return ops
->to_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
859 offset
, len
, xfered_len
);
862 return TARGET_XFER_UNAVAILABLE
;
865 /* The to_insert_breakpoint method of target record-btrace. */
868 record_btrace_insert_breakpoint (struct target_ops
*ops
,
869 struct gdbarch
*gdbarch
,
870 struct bp_target_info
*bp_tgt
)
872 volatile struct gdb_exception except
;
875 /* Inserting breakpoints requires accessing memory. Allow it for the
876 duration of this function. */
877 old
= record_btrace_allow_memory_access
;
878 record_btrace_allow_memory_access
= 1;
881 TRY_CATCH (except
, RETURN_MASK_ALL
)
882 ret
= ops
->beneath
->to_insert_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
);
884 record_btrace_allow_memory_access
= old
;
886 if (except
.reason
< 0)
887 throw_exception (except
);
892 /* The to_remove_breakpoint method of target record-btrace. */
895 record_btrace_remove_breakpoint (struct target_ops
*ops
,
896 struct gdbarch
*gdbarch
,
897 struct bp_target_info
*bp_tgt
)
899 volatile struct gdb_exception except
;
902 /* Removing breakpoints requires accessing memory. Allow it for the
903 duration of this function. */
904 old
= record_btrace_allow_memory_access
;
905 record_btrace_allow_memory_access
= 1;
908 TRY_CATCH (except
, RETURN_MASK_ALL
)
909 ret
= ops
->beneath
->to_remove_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
);
911 record_btrace_allow_memory_access
= old
;
913 if (except
.reason
< 0)
914 throw_exception (except
);
919 /* The to_fetch_registers method of target record-btrace. */
922 record_btrace_fetch_registers (struct target_ops
*ops
,
923 struct regcache
*regcache
, int regno
)
925 struct btrace_insn_iterator
*replay
;
926 struct thread_info
*tp
;
928 tp
= find_thread_ptid (inferior_ptid
);
929 gdb_assert (tp
!= NULL
);
931 replay
= tp
->btrace
.replay
;
934 const struct btrace_insn
*insn
;
935 struct gdbarch
*gdbarch
;
938 gdbarch
= get_regcache_arch (regcache
);
939 pcreg
= gdbarch_pc_regnum (gdbarch
);
943 /* We can only provide the PC register. */
944 if (regno
>= 0 && regno
!= pcreg
)
947 insn
= btrace_insn_get (replay
);
948 gdb_assert (insn
!= NULL
);
950 regcache_raw_supply (regcache
, regno
, &insn
->pc
);
954 struct target_ops
*t
;
956 for (t
= ops
->beneath
; t
!= NULL
; t
= t
->beneath
)
957 if (t
->to_fetch_registers
!= NULL
)
959 t
->to_fetch_registers (t
, regcache
, regno
);
965 /* The to_store_registers method of target record-btrace. */
968 record_btrace_store_registers (struct target_ops
*ops
,
969 struct regcache
*regcache
, int regno
)
971 struct target_ops
*t
;
973 if (record_btrace_is_replaying (ops
))
974 error (_("This record target does not allow writing registers."));
976 gdb_assert (may_write_registers
!= 0);
978 for (t
= ops
->beneath
; t
!= NULL
; t
= t
->beneath
)
979 if (t
->to_store_registers
!= NULL
)
981 t
->to_store_registers (t
, regcache
, regno
);
988 /* The to_prepare_to_store method of target record-btrace. */
991 record_btrace_prepare_to_store (struct target_ops
*ops
,
992 struct regcache
*regcache
)
994 struct target_ops
*t
;
996 if (record_btrace_is_replaying (ops
))
999 for (t
= ops
->beneath
; t
!= NULL
; t
= t
->beneath
)
1000 if (t
->to_prepare_to_store
!= NULL
)
1002 t
->to_prepare_to_store (t
, regcache
);
1007 /* The branch trace frame cache. */
1009 struct btrace_frame_cache
1012 struct thread_info
*tp
;
1014 /* The frame info. */
1015 struct frame_info
*frame
;
1017 /* The branch trace function segment. */
1018 const struct btrace_function
*bfun
;
1021 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1023 static htab_t bfcache
;
1025 /* hash_f for htab_create_alloc of bfcache. */
1028 bfcache_hash (const void *arg
)
1030 const struct btrace_frame_cache
*cache
= arg
;
1032 return htab_hash_pointer (cache
->frame
);
1035 /* eq_f for htab_create_alloc of bfcache. */
1038 bfcache_eq (const void *arg1
, const void *arg2
)
1040 const struct btrace_frame_cache
*cache1
= arg1
;
1041 const struct btrace_frame_cache
*cache2
= arg2
;
1043 return cache1
->frame
== cache2
->frame
;
1046 /* Create a new btrace frame cache. */
1048 static struct btrace_frame_cache
*
1049 bfcache_new (struct frame_info
*frame
)
1051 struct btrace_frame_cache
*cache
;
1054 cache
= FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache
);
1055 cache
->frame
= frame
;
1057 slot
= htab_find_slot (bfcache
, cache
, INSERT
);
1058 gdb_assert (*slot
== NULL
);
1064 /* Extract the branch trace function from a branch trace frame. */
1066 static const struct btrace_function
*
1067 btrace_get_frame_function (struct frame_info
*frame
)
1069 const struct btrace_frame_cache
*cache
;
1070 const struct btrace_function
*bfun
;
1071 struct btrace_frame_cache pattern
;
1074 pattern
.frame
= frame
;
1076 slot
= htab_find_slot (bfcache
, &pattern
, NO_INSERT
);
1084 /* Implement stop_reason method for record_btrace_frame_unwind. */
1086 static enum unwind_stop_reason
1087 record_btrace_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1090 const struct btrace_frame_cache
*cache
;
1091 const struct btrace_function
*bfun
;
1093 cache
= *this_cache
;
1095 gdb_assert (bfun
!= NULL
);
1097 if (bfun
->up
== NULL
)
1098 return UNWIND_UNAVAILABLE
;
1100 return UNWIND_NO_REASON
;
1103 /* Implement this_id method for record_btrace_frame_unwind. */
1106 record_btrace_frame_this_id (struct frame_info
*this_frame
, void **this_cache
,
1107 struct frame_id
*this_id
)
1109 const struct btrace_frame_cache
*cache
;
1110 const struct btrace_function
*bfun
;
1111 CORE_ADDR code
, special
;
1113 cache
= *this_cache
;
1116 gdb_assert (bfun
!= NULL
);
1118 while (bfun
->segment
.prev
!= NULL
)
1119 bfun
= bfun
->segment
.prev
;
1121 code
= get_frame_func (this_frame
);
1122 special
= bfun
->number
;
1124 *this_id
= frame_id_build_unavailable_stack_special (code
, special
);
1126 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1127 btrace_get_bfun_name (cache
->bfun
),
1128 core_addr_to_string_nz (this_id
->code_addr
),
1129 core_addr_to_string_nz (this_id
->special_addr
));
1132 /* Implement prev_register method for record_btrace_frame_unwind. */
1134 static struct value
*
1135 record_btrace_frame_prev_register (struct frame_info
*this_frame
,
1139 const struct btrace_frame_cache
*cache
;
1140 const struct btrace_function
*bfun
, *caller
;
1141 const struct btrace_insn
*insn
;
1142 struct gdbarch
*gdbarch
;
1146 gdbarch
= get_frame_arch (this_frame
);
1147 pcreg
= gdbarch_pc_regnum (gdbarch
);
1148 if (pcreg
< 0 || regnum
!= pcreg
)
1149 throw_error (NOT_AVAILABLE_ERROR
,
1150 _("Registers are not available in btrace record history"));
1152 cache
= *this_cache
;
1154 gdb_assert (bfun
!= NULL
);
1158 throw_error (NOT_AVAILABLE_ERROR
,
1159 _("No caller in btrace record history"));
1161 if ((bfun
->flags
& BFUN_UP_LINKS_TO_RET
) != 0)
1163 insn
= VEC_index (btrace_insn_s
, caller
->insn
, 0);
1168 insn
= VEC_last (btrace_insn_s
, caller
->insn
);
1171 pc
+= gdb_insn_length (gdbarch
, pc
);
1174 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1175 btrace_get_bfun_name (bfun
), bfun
->level
,
1176 core_addr_to_string_nz (pc
));
1178 return frame_unwind_got_address (this_frame
, regnum
, pc
);
1181 /* Implement sniffer method for record_btrace_frame_unwind. */
1184 record_btrace_frame_sniffer (const struct frame_unwind
*self
,
1185 struct frame_info
*this_frame
,
1188 const struct btrace_function
*bfun
;
1189 struct btrace_frame_cache
*cache
;
1190 struct thread_info
*tp
;
1191 struct frame_info
*next
;
1193 /* THIS_FRAME does not contain a reference to its thread. */
1194 tp
= find_thread_ptid (inferior_ptid
);
1195 gdb_assert (tp
!= NULL
);
1198 next
= get_next_frame (this_frame
);
1201 const struct btrace_insn_iterator
*replay
;
1203 replay
= tp
->btrace
.replay
;
1205 bfun
= replay
->function
;
1209 const struct btrace_function
*callee
;
1211 callee
= btrace_get_frame_function (next
);
1212 if (callee
!= NULL
&& (callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1219 DEBUG ("[frame] sniffed frame for %s on level %d",
1220 btrace_get_bfun_name (bfun
), bfun
->level
);
1222 /* This is our frame. Initialize the frame cache. */
1223 cache
= bfcache_new (this_frame
);
1227 *this_cache
= cache
;
1231 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1234 record_btrace_tailcall_frame_sniffer (const struct frame_unwind
*self
,
1235 struct frame_info
*this_frame
,
1238 const struct btrace_function
*bfun
, *callee
;
1239 struct btrace_frame_cache
*cache
;
1240 struct frame_info
*next
;
1242 next
= get_next_frame (this_frame
);
1246 callee
= btrace_get_frame_function (next
);
1250 if ((callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1257 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1258 btrace_get_bfun_name (bfun
), bfun
->level
);
1260 /* This is our frame. Initialize the frame cache. */
1261 cache
= bfcache_new (this_frame
);
1262 cache
->tp
= find_thread_ptid (inferior_ptid
);
1265 *this_cache
= cache
;
1270 record_btrace_frame_dealloc_cache (struct frame_info
*self
, void *this_cache
)
1272 struct btrace_frame_cache
*cache
;
1277 slot
= htab_find_slot (bfcache
, cache
, NO_INSERT
);
1278 gdb_assert (slot
!= NULL
);
1280 htab_remove_elt (bfcache
, cache
);
1283 /* btrace recording does not store previous memory content, neither the stack
1284 frames content. Any unwinding would return errorneous results as the stack
1285 contents no longer matches the changed PC value restored from history.
1286 Therefore this unwinder reports any possibly unwound registers as
1289 const struct frame_unwind record_btrace_frame_unwind
=
1292 record_btrace_frame_unwind_stop_reason
,
1293 record_btrace_frame_this_id
,
1294 record_btrace_frame_prev_register
,
1296 record_btrace_frame_sniffer
,
1297 record_btrace_frame_dealloc_cache
1300 const struct frame_unwind record_btrace_tailcall_frame_unwind
=
1303 record_btrace_frame_unwind_stop_reason
,
1304 record_btrace_frame_this_id
,
1305 record_btrace_frame_prev_register
,
1307 record_btrace_tailcall_frame_sniffer
,
1308 record_btrace_frame_dealloc_cache
1311 /* Implement the to_get_unwinder method. */
1313 static const struct frame_unwind
*
1314 record_btrace_to_get_unwinder (struct target_ops
*self
)
1316 return &record_btrace_frame_unwind
;
1319 /* Implement the to_get_tailcall_unwinder method. */
1321 static const struct frame_unwind
*
1322 record_btrace_to_get_tailcall_unwinder (struct target_ops
*self
)
1324 return &record_btrace_tailcall_frame_unwind
;
1327 /* Indicate that TP should be resumed according to FLAG. */
1330 record_btrace_resume_thread (struct thread_info
*tp
,
1331 enum btrace_thread_flag flag
)
1333 struct btrace_thread_info
*btinfo
;
1335 DEBUG ("resuming %d (%s): %u", tp
->num
, target_pid_to_str (tp
->ptid
), flag
);
1337 btinfo
= &tp
->btrace
;
1339 if ((btinfo
->flags
& BTHR_MOVE
) != 0)
1340 error (_("Thread already moving."));
1342 /* Fetch the latest branch trace. */
1345 btinfo
->flags
|= flag
;
1348 /* Find the thread to resume given a PTID. */
1350 static struct thread_info
*
1351 record_btrace_find_resume_thread (ptid_t ptid
)
1353 struct thread_info
*tp
;
1355 /* When asked to resume everything, we pick the current thread. */
1356 if (ptid_equal (minus_one_ptid
, ptid
) || ptid_is_pid (ptid
))
1357 ptid
= inferior_ptid
;
1359 return find_thread_ptid (ptid
);
1362 /* Start replaying a thread. */
1364 static struct btrace_insn_iterator
*
1365 record_btrace_start_replaying (struct thread_info
*tp
)
1367 volatile struct gdb_exception except
;
1368 struct btrace_insn_iterator
*replay
;
1369 struct btrace_thread_info
*btinfo
;
1372 btinfo
= &tp
->btrace
;
1375 /* We can't start replaying without trace. */
1376 if (btinfo
->begin
== NULL
)
1379 /* Clear the executing flag to allow changes to the current frame.
1380 We are not actually running, yet. We just started a reverse execution
1381 command or a record goto command.
1382 For the latter, EXECUTING is false and this has no effect.
1383 For the former, EXECUTING is true and we're in to_wait, about to
1384 move the thread. Since we need to recompute the stack, we temporarily
1385 set EXECUTING to flase. */
1386 executing
= is_executing (tp
->ptid
);
1387 set_executing (tp
->ptid
, 0);
1389 /* GDB stores the current frame_id when stepping in order to detects steps
1391 Since frames are computed differently when we're replaying, we need to
1392 recompute those stored frames and fix them up so we can still detect
1393 subroutines after we started replaying. */
1394 TRY_CATCH (except
, RETURN_MASK_ALL
)
1396 struct frame_info
*frame
;
1397 struct frame_id frame_id
;
1398 int upd_step_frame_id
, upd_step_stack_frame_id
;
1400 /* The current frame without replaying - computed via normal unwind. */
1401 frame
= get_current_frame ();
1402 frame_id
= get_frame_id (frame
);
1404 /* Check if we need to update any stepping-related frame id's. */
1405 upd_step_frame_id
= frame_id_eq (frame_id
,
1406 tp
->control
.step_frame_id
);
1407 upd_step_stack_frame_id
= frame_id_eq (frame_id
,
1408 tp
->control
.step_stack_frame_id
);
1410 /* We start replaying at the end of the branch trace. This corresponds
1411 to the current instruction. */
1412 replay
= xmalloc (sizeof (*replay
));
1413 btrace_insn_end (replay
, btinfo
);
1415 /* We're not replaying, yet. */
1416 gdb_assert (btinfo
->replay
== NULL
);
1417 btinfo
->replay
= replay
;
1419 /* Make sure we're not using any stale registers. */
1420 registers_changed_ptid (tp
->ptid
);
1422 /* The current frame with replaying - computed via btrace unwind. */
1423 frame
= get_current_frame ();
1424 frame_id
= get_frame_id (frame
);
1426 /* Replace stepping related frames where necessary. */
1427 if (upd_step_frame_id
)
1428 tp
->control
.step_frame_id
= frame_id
;
1429 if (upd_step_stack_frame_id
)
1430 tp
->control
.step_stack_frame_id
= frame_id
;
1433 /* Restore the previous execution state. */
1434 set_executing (tp
->ptid
, executing
);
1436 if (except
.reason
< 0)
1438 xfree (btinfo
->replay
);
1439 btinfo
->replay
= NULL
;
1441 registers_changed_ptid (tp
->ptid
);
1443 throw_exception (except
);
1449 /* Stop replaying a thread. */
1452 record_btrace_stop_replaying (struct thread_info
*tp
)
1454 struct btrace_thread_info
*btinfo
;
1456 btinfo
= &tp
->btrace
;
1458 xfree (btinfo
->replay
);
1459 btinfo
->replay
= NULL
;
1461 /* Make sure we're not leaving any stale registers. */
1462 registers_changed_ptid (tp
->ptid
);
1465 /* The to_resume method of target record-btrace. */
1468 record_btrace_resume (struct target_ops
*ops
, ptid_t ptid
, int step
,
1469 enum gdb_signal signal
)
1471 struct thread_info
*tp
, *other
;
1472 enum btrace_thread_flag flag
;
1474 DEBUG ("resume %s: %s", target_pid_to_str (ptid
), step
? "step" : "cont");
1476 tp
= record_btrace_find_resume_thread (ptid
);
1478 error (_("Cannot find thread to resume."));
1480 /* Stop replaying other threads if the thread to resume is not replaying. */
1481 if (!btrace_is_replaying (tp
) && execution_direction
!= EXEC_REVERSE
)
1483 record_btrace_stop_replaying (other
);
1485 /* As long as we're not replaying, just forward the request. */
1486 if (!record_btrace_is_replaying (ops
) && execution_direction
!= EXEC_REVERSE
)
1488 for (ops
= ops
->beneath
; ops
!= NULL
; ops
= ops
->beneath
)
1489 if (ops
->to_resume
!= NULL
)
1490 return ops
->to_resume (ops
, ptid
, step
, signal
);
1492 error (_("Cannot find target for stepping."));
1495 /* Compute the btrace thread flag for the requested move. */
1497 flag
= execution_direction
== EXEC_REVERSE
? BTHR_RCONT
: BTHR_CONT
;
1499 flag
= execution_direction
== EXEC_REVERSE
? BTHR_RSTEP
: BTHR_STEP
;
1501 /* At the moment, we only move a single thread. We could also move
1502 all threads in parallel by single-stepping each resumed thread
1503 until the first runs into an event.
1504 When we do that, we would want to continue all other threads.
1505 For now, just resume one thread to not confuse to_wait. */
1506 record_btrace_resume_thread (tp
, flag
);
1508 /* We just indicate the resume intent here. The actual stepping happens in
1509 record_btrace_wait below. */
1512 /* Find a thread to move. */
1514 static struct thread_info
*
1515 record_btrace_find_thread_to_move (ptid_t ptid
)
1517 struct thread_info
*tp
;
1519 /* First check the parameter thread. */
1520 tp
= find_thread_ptid (ptid
);
1521 if (tp
!= NULL
&& (tp
->btrace
.flags
& BTHR_MOVE
) != 0)
1524 /* Otherwise, find one other thread that has been resumed. */
1526 if ((tp
->btrace
.flags
& BTHR_MOVE
) != 0)
1532 /* Return a target_waitstatus indicating that we ran out of history. */
1534 static struct target_waitstatus
1535 btrace_step_no_history (void)
1537 struct target_waitstatus status
;
1539 status
.kind
= TARGET_WAITKIND_NO_HISTORY
;
1544 /* Return a target_waitstatus indicating that a step finished. */
1546 static struct target_waitstatus
1547 btrace_step_stopped (void)
1549 struct target_waitstatus status
;
1551 status
.kind
= TARGET_WAITKIND_STOPPED
;
1552 status
.value
.sig
= GDB_SIGNAL_TRAP
;
1557 /* Clear the record histories. */
1560 record_btrace_clear_histories (struct btrace_thread_info
*btinfo
)
1562 xfree (btinfo
->insn_history
);
1563 xfree (btinfo
->call_history
);
1565 btinfo
->insn_history
= NULL
;
1566 btinfo
->call_history
= NULL
;
1569 /* Step a single thread. */
1571 static struct target_waitstatus
1572 record_btrace_step_thread (struct thread_info
*tp
)
1574 struct btrace_insn_iterator
*replay
, end
;
1575 struct btrace_thread_info
*btinfo
;
1576 struct address_space
*aspace
;
1577 struct inferior
*inf
;
1578 enum btrace_thread_flag flags
;
1581 /* We can't step without an execution history. */
1582 if (btrace_is_empty (tp
))
1583 return btrace_step_no_history ();
1585 btinfo
= &tp
->btrace
;
1586 replay
= btinfo
->replay
;
1588 flags
= btinfo
->flags
& BTHR_MOVE
;
1589 btinfo
->flags
&= ~BTHR_MOVE
;
1591 DEBUG ("stepping %d (%s): %u", tp
->num
, target_pid_to_str (tp
->ptid
), flags
);
1596 internal_error (__FILE__
, __LINE__
, _("invalid stepping type."));
1599 /* We're done if we're not replaying. */
1601 return btrace_step_no_history ();
1603 /* We are always able to step at least once. */
1604 steps
= btrace_insn_next (replay
, 1);
1605 gdb_assert (steps
== 1);
1607 /* Determine the end of the instruction trace. */
1608 btrace_insn_end (&end
, btinfo
);
1610 /* We stop replaying if we reached the end of the trace. */
1611 if (btrace_insn_cmp (replay
, &end
) == 0)
1612 record_btrace_stop_replaying (tp
);
1614 return btrace_step_stopped ();
1617 /* Start replaying if we're not already doing so. */
1619 replay
= record_btrace_start_replaying (tp
);
1621 /* If we can't step any further, we reached the end of the history. */
1622 steps
= btrace_insn_prev (replay
, 1);
1624 return btrace_step_no_history ();
1626 return btrace_step_stopped ();
1629 /* We're done if we're not replaying. */
1631 return btrace_step_no_history ();
1633 inf
= find_inferior_pid (ptid_get_pid (tp
->ptid
));
1634 aspace
= inf
->aspace
;
1636 /* Determine the end of the instruction trace. */
1637 btrace_insn_end (&end
, btinfo
);
1641 const struct btrace_insn
*insn
;
1643 /* We are always able to step at least once. */
1644 steps
= btrace_insn_next (replay
, 1);
1645 gdb_assert (steps
== 1);
1647 /* We stop replaying if we reached the end of the trace. */
1648 if (btrace_insn_cmp (replay
, &end
) == 0)
1650 record_btrace_stop_replaying (tp
);
1651 return btrace_step_no_history ();
1654 insn
= btrace_insn_get (replay
);
1657 DEBUG ("stepping %d (%s) ... %s", tp
->num
,
1658 target_pid_to_str (tp
->ptid
),
1659 core_addr_to_string_nz (insn
->pc
));
1661 if (breakpoint_here_p (aspace
, insn
->pc
))
1662 return btrace_step_stopped ();
1666 /* Start replaying if we're not already doing so. */
1668 replay
= record_btrace_start_replaying (tp
);
1670 inf
= find_inferior_pid (ptid_get_pid (tp
->ptid
));
1671 aspace
= inf
->aspace
;
1675 const struct btrace_insn
*insn
;
1677 /* If we can't step any further, we're done. */
1678 steps
= btrace_insn_prev (replay
, 1);
1680 return btrace_step_no_history ();
1682 insn
= btrace_insn_get (replay
);
1685 DEBUG ("reverse-stepping %d (%s) ... %s", tp
->num
,
1686 target_pid_to_str (tp
->ptid
),
1687 core_addr_to_string_nz (insn
->pc
));
1689 if (breakpoint_here_p (aspace
, insn
->pc
))
1690 return btrace_step_stopped ();
1695 /* The to_wait method of target record-btrace. */
1698 record_btrace_wait (struct target_ops
*ops
, ptid_t ptid
,
1699 struct target_waitstatus
*status
, int options
)
1701 struct thread_info
*tp
, *other
;
1703 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid
), options
);
1705 /* As long as we're not replaying, just forward the request. */
1706 if (!record_btrace_is_replaying (ops
) && execution_direction
!= EXEC_REVERSE
)
1708 for (ops
= ops
->beneath
; ops
!= NULL
; ops
= ops
->beneath
)
1709 if (ops
->to_wait
!= NULL
)
1710 return ops
->to_wait (ops
, ptid
, status
, options
);
1712 error (_("Cannot find target for waiting."));
1715 /* Let's find a thread to move. */
1716 tp
= record_btrace_find_thread_to_move (ptid
);
1719 DEBUG ("wait %s: no thread", target_pid_to_str (ptid
));
1721 status
->kind
= TARGET_WAITKIND_IGNORE
;
1722 return minus_one_ptid
;
1725 /* We only move a single thread. We're not able to correlate threads. */
1726 *status
= record_btrace_step_thread (tp
);
1728 /* Stop all other threads. */
1731 other
->btrace
.flags
&= ~BTHR_MOVE
;
1733 /* Start record histories anew from the current position. */
1734 record_btrace_clear_histories (&tp
->btrace
);
1736 /* We moved the replay position but did not update registers. */
1737 registers_changed_ptid (tp
->ptid
);
1742 /* The to_can_execute_reverse method of target record-btrace. */
1745 record_btrace_can_execute_reverse (struct target_ops
*self
)
1750 /* The to_decr_pc_after_break method of target record-btrace. */
1753 record_btrace_decr_pc_after_break (struct target_ops
*ops
,
1754 struct gdbarch
*gdbarch
)
1756 /* When replaying, we do not actually execute the breakpoint instruction
1757 so there is no need to adjust the PC after hitting a breakpoint. */
1758 if (record_btrace_is_replaying (ops
))
1761 return ops
->beneath
->to_decr_pc_after_break (ops
->beneath
, gdbarch
);
1764 /* The to_find_new_threads method of target record-btrace. */
1767 record_btrace_find_new_threads (struct target_ops
*ops
)
1769 /* Don't expect new threads if we're replaying. */
1770 if (record_btrace_is_replaying (ops
))
1773 /* Forward the request. */
1774 for (ops
= ops
->beneath
; ops
!= NULL
; ops
= ops
->beneath
)
1775 if (ops
->to_find_new_threads
!= NULL
)
1777 ops
->to_find_new_threads (ops
);
1782 /* The to_thread_alive method of target record-btrace. */
1785 record_btrace_thread_alive (struct target_ops
*ops
, ptid_t ptid
)
1787 /* We don't add or remove threads during replay. */
1788 if (record_btrace_is_replaying (ops
))
1789 return find_thread_ptid (ptid
) != NULL
;
1791 /* Forward the request. */
1792 for (ops
= ops
->beneath
; ops
!= NULL
; ops
= ops
->beneath
)
1793 if (ops
->to_thread_alive
!= NULL
)
1794 return ops
->to_thread_alive (ops
, ptid
);
1799 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
1803 record_btrace_set_replay (struct thread_info
*tp
,
1804 const struct btrace_insn_iterator
*it
)
1806 struct btrace_thread_info
*btinfo
;
1808 btinfo
= &tp
->btrace
;
1810 if (it
== NULL
|| it
->function
== NULL
)
1811 record_btrace_stop_replaying (tp
);
1814 if (btinfo
->replay
== NULL
)
1815 record_btrace_start_replaying (tp
);
1816 else if (btrace_insn_cmp (btinfo
->replay
, it
) == 0)
1819 *btinfo
->replay
= *it
;
1820 registers_changed_ptid (tp
->ptid
);
1823 /* Start anew from the new replay position. */
1824 record_btrace_clear_histories (btinfo
);
1827 /* The to_goto_record_begin method of target record-btrace. */
1830 record_btrace_goto_begin (struct target_ops
*self
)
1832 struct thread_info
*tp
;
1833 struct btrace_insn_iterator begin
;
1835 tp
= require_btrace_thread ();
1837 btrace_insn_begin (&begin
, &tp
->btrace
);
1838 record_btrace_set_replay (tp
, &begin
);
1840 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
1843 /* The to_goto_record_end method of target record-btrace. */
1846 record_btrace_goto_end (struct target_ops
*ops
)
1848 struct thread_info
*tp
;
1850 tp
= require_btrace_thread ();
1852 record_btrace_set_replay (tp
, NULL
);
1854 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
1857 /* The to_goto_record method of target record-btrace. */
1860 record_btrace_goto (struct target_ops
*self
, ULONGEST insn
)
1862 struct thread_info
*tp
;
1863 struct btrace_insn_iterator it
;
1864 unsigned int number
;
1869 /* Check for wrap-arounds. */
1871 error (_("Instruction number out of range."));
1873 tp
= require_btrace_thread ();
1875 found
= btrace_find_insn_by_number (&it
, &tp
->btrace
, number
);
1877 error (_("No such instruction."));
1879 record_btrace_set_replay (tp
, &it
);
1881 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
1884 /* Initialize the record-btrace target ops. */
1887 init_record_btrace_ops (void)
1889 struct target_ops
*ops
;
1891 ops
= &record_btrace_ops
;
1892 ops
->to_shortname
= "record-btrace";
1893 ops
->to_longname
= "Branch tracing target";
1894 ops
->to_doc
= "Collect control-flow trace and provide the execution history.";
1895 ops
->to_open
= record_btrace_open
;
1896 ops
->to_close
= record_btrace_close
;
1897 ops
->to_detach
= record_detach
;
1898 ops
->to_disconnect
= record_disconnect
;
1899 ops
->to_mourn_inferior
= record_mourn_inferior
;
1900 ops
->to_kill
= record_kill
;
1901 ops
->to_stop_recording
= record_btrace_stop_recording
;
1902 ops
->to_info_record
= record_btrace_info
;
1903 ops
->to_insn_history
= record_btrace_insn_history
;
1904 ops
->to_insn_history_from
= record_btrace_insn_history_from
;
1905 ops
->to_insn_history_range
= record_btrace_insn_history_range
;
1906 ops
->to_call_history
= record_btrace_call_history
;
1907 ops
->to_call_history_from
= record_btrace_call_history_from
;
1908 ops
->to_call_history_range
= record_btrace_call_history_range
;
1909 ops
->to_record_is_replaying
= record_btrace_is_replaying
;
1910 ops
->to_xfer_partial
= record_btrace_xfer_partial
;
1911 ops
->to_remove_breakpoint
= record_btrace_remove_breakpoint
;
1912 ops
->to_insert_breakpoint
= record_btrace_insert_breakpoint
;
1913 ops
->to_fetch_registers
= record_btrace_fetch_registers
;
1914 ops
->to_store_registers
= record_btrace_store_registers
;
1915 ops
->to_prepare_to_store
= record_btrace_prepare_to_store
;
1916 ops
->to_get_unwinder
= &record_btrace_to_get_unwinder
;
1917 ops
->to_get_tailcall_unwinder
= &record_btrace_to_get_tailcall_unwinder
;
1918 ops
->to_resume
= record_btrace_resume
;
1919 ops
->to_wait
= record_btrace_wait
;
1920 ops
->to_find_new_threads
= record_btrace_find_new_threads
;
1921 ops
->to_thread_alive
= record_btrace_thread_alive
;
1922 ops
->to_goto_record_begin
= record_btrace_goto_begin
;
1923 ops
->to_goto_record_end
= record_btrace_goto_end
;
1924 ops
->to_goto_record
= record_btrace_goto
;
1925 ops
->to_can_execute_reverse
= record_btrace_can_execute_reverse
;
1926 ops
->to_decr_pc_after_break
= record_btrace_decr_pc_after_break
;
1927 ops
->to_stratum
= record_stratum
;
1928 ops
->to_magic
= OPS_MAGIC
;
1931 /* Alias for "target record". */
1934 cmd_record_btrace_start (char *args
, int from_tty
)
1936 if (args
!= NULL
&& *args
!= 0)
1937 error (_("Invalid argument."));
1939 execute_command ("target record-btrace", from_tty
);
1942 void _initialize_record_btrace (void);
1944 /* Initialize btrace commands. */
1947 _initialize_record_btrace (void)
1949 add_cmd ("btrace", class_obscure
, cmd_record_btrace_start
,
1950 _("Start branch trace recording."),
1952 add_alias_cmd ("b", "btrace", class_obscure
, 1, &record_cmdlist
);
1954 init_record_btrace_ops ();
1955 add_target (&record_btrace_ops
);
1957 bfcache
= htab_create_alloc (50, bfcache_hash
, bfcache_eq
, NULL
,