1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
29 #include "cli/cli-utils.h"
33 #include "filenames.h"
35 #include "frame-unwind.h"
38 #include "event-loop.h"
41 /* The target_ops of record-btrace. */
42 static struct target_ops record_btrace_ops
;
44 /* A new thread observer enabling branch tracing for the new thread. */
45 static struct observer
*record_btrace_thread_observer
;
47 /* Memory access types used in set/show record btrace replay-memory-access. */
48 static const char replay_memory_access_read_only
[] = "read-only";
49 static const char replay_memory_access_read_write
[] = "read-write";
50 static const char *const replay_memory_access_types
[] =
52 replay_memory_access_read_only
,
53 replay_memory_access_read_write
,
57 /* The currently allowed replay memory access type. */
58 static const char *replay_memory_access
= replay_memory_access_read_only
;
60 /* Command lists for "set/show record btrace". */
61 static struct cmd_list_element
*set_record_btrace_cmdlist
;
62 static struct cmd_list_element
*show_record_btrace_cmdlist
;
64 /* The execution direction of the last resume we got. See record-full.c. */
65 static enum exec_direction_kind record_btrace_resume_exec_dir
= EXEC_FORWARD
;
67 /* The async event handler for reverse/replay execution. */
68 static struct async_event_handler
*record_btrace_async_inferior_event_handler
;
70 /* A flag indicating that we are currently generating a core file. */
71 static int record_btrace_generating_corefile
;
73 /* Print a record-btrace debug message. Use do ... while (0) to avoid
74 ambiguities when used in if statements. */
76 #define DEBUG(msg, args...) \
79 if (record_debug != 0) \
80 fprintf_unfiltered (gdb_stdlog, \
81 "[record-btrace] " msg "\n", ##args); \
86 /* Update the branch trace for the current thread and return a pointer to its
89 Throws an error if there is no thread or no trace. This function never
92 static struct thread_info
*
93 require_btrace_thread (void)
95 struct thread_info
*tp
;
99 tp
= find_thread_ptid (inferior_ptid
);
101 error (_("No thread."));
105 if (btrace_is_empty (tp
))
106 error (_("No trace."));
111 /* Update the branch trace for the current thread and return a pointer to its
112 branch trace information struct.
114 Throws an error if there is no thread or no trace. This function never
117 static struct btrace_thread_info
*
118 require_btrace (void)
120 struct thread_info
*tp
;
122 tp
= require_btrace_thread ();
127 /* Enable branch tracing for one thread. Warn on errors. */
130 record_btrace_enable_warn (struct thread_info
*tp
)
132 volatile struct gdb_exception error
;
134 TRY_CATCH (error
, RETURN_MASK_ERROR
)
137 if (error
.message
!= NULL
)
138 warning ("%s", error
.message
);
141 /* Callback function to disable branch tracing for one thread. */
144 record_btrace_disable_callback (void *arg
)
146 struct thread_info
*tp
;
153 /* Enable automatic tracing of new threads. */
156 record_btrace_auto_enable (void)
158 DEBUG ("attach thread observer");
160 record_btrace_thread_observer
161 = observer_attach_new_thread (record_btrace_enable_warn
);
164 /* Disable automatic tracing of new threads. */
167 record_btrace_auto_disable (void)
169 /* The observer may have been detached, already. */
170 if (record_btrace_thread_observer
== NULL
)
173 DEBUG ("detach thread observer");
175 observer_detach_new_thread (record_btrace_thread_observer
);
176 record_btrace_thread_observer
= NULL
;
179 /* The record-btrace async event handler function. */
182 record_btrace_handle_async_inferior_event (gdb_client_data data
)
184 inferior_event_handler (INF_REG_EVENT
, NULL
);
187 /* The to_open method of target record-btrace. */
190 record_btrace_open (const char *args
, int from_tty
)
192 struct cleanup
*disable_chain
;
193 struct thread_info
*tp
;
199 if (!target_has_execution
)
200 error (_("The program is not being run."));
202 if (!target_supports_btrace ())
203 error (_("Target does not support branch tracing."));
206 error (_("Record btrace can't debug inferior in non-stop mode."));
208 gdb_assert (record_btrace_thread_observer
== NULL
);
210 disable_chain
= make_cleanup (null_cleanup
, NULL
);
211 ALL_NON_EXITED_THREADS (tp
)
212 if (args
== NULL
|| *args
== 0 || number_is_in_list (args
, tp
->num
))
216 make_cleanup (record_btrace_disable_callback
, tp
);
219 record_btrace_auto_enable ();
221 push_target (&record_btrace_ops
);
223 record_btrace_async_inferior_event_handler
224 = create_async_event_handler (record_btrace_handle_async_inferior_event
,
226 record_btrace_generating_corefile
= 0;
228 observer_notify_record_changed (current_inferior (), 1);
230 discard_cleanups (disable_chain
);
233 /* The to_stop_recording method of target record-btrace. */
236 record_btrace_stop_recording (struct target_ops
*self
)
238 struct thread_info
*tp
;
240 DEBUG ("stop recording");
242 record_btrace_auto_disable ();
244 ALL_NON_EXITED_THREADS (tp
)
245 if (tp
->btrace
.target
!= NULL
)
249 /* The to_close method of target record-btrace. */
252 record_btrace_close (struct target_ops
*self
)
254 struct thread_info
*tp
;
256 if (record_btrace_async_inferior_event_handler
!= NULL
)
257 delete_async_event_handler (&record_btrace_async_inferior_event_handler
);
259 /* Make sure automatic recording gets disabled even if we did not stop
260 recording before closing the record-btrace target. */
261 record_btrace_auto_disable ();
263 /* We should have already stopped recording.
264 Tear down btrace in case we have not. */
265 ALL_NON_EXITED_THREADS (tp
)
266 btrace_teardown (tp
);
269 /* The to_async method of target record-btrace. */
272 record_btrace_async (struct target_ops
*ops
,
273 void (*callback
) (enum inferior_event_type event_type
,
277 if (callback
!= NULL
)
278 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
280 clear_async_event_handler (record_btrace_async_inferior_event_handler
);
282 ops
->beneath
->to_async (ops
->beneath
, callback
, context
);
285 /* The to_info_record method of target record-btrace. */
288 record_btrace_info (struct target_ops
*self
)
290 struct btrace_thread_info
*btinfo
;
291 struct thread_info
*tp
;
292 unsigned int insns
, calls
;
296 tp
= find_thread_ptid (inferior_ptid
);
298 error (_("No thread."));
305 btinfo
= &tp
->btrace
;
307 if (!btrace_is_empty (tp
))
309 struct btrace_call_iterator call
;
310 struct btrace_insn_iterator insn
;
312 btrace_call_end (&call
, btinfo
);
313 btrace_call_prev (&call
, 1);
314 calls
= btrace_call_number (&call
);
316 btrace_insn_end (&insn
, btinfo
);
317 btrace_insn_prev (&insn
, 1);
318 insns
= btrace_insn_number (&insn
);
321 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
322 "%d (%s).\n"), insns
, calls
, tp
->num
,
323 target_pid_to_str (tp
->ptid
));
325 if (btrace_is_replaying (tp
))
326 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
327 btrace_insn_number (btinfo
->replay
));
330 /* Print an unsigned int. */
333 ui_out_field_uint (struct ui_out
*uiout
, const char *fld
, unsigned int val
)
335 ui_out_field_fmt (uiout
, fld
, "%u", val
);
338 /* Disassemble a section of the recorded instruction trace. */
341 btrace_insn_history (struct ui_out
*uiout
,
342 const struct btrace_insn_iterator
*begin
,
343 const struct btrace_insn_iterator
*end
, int flags
)
345 struct gdbarch
*gdbarch
;
346 struct btrace_insn_iterator it
;
348 DEBUG ("itrace (0x%x): [%u; %u)", flags
, btrace_insn_number (begin
),
349 btrace_insn_number (end
));
351 gdbarch
= target_gdbarch ();
353 for (it
= *begin
; btrace_insn_cmp (&it
, end
) != 0; btrace_insn_next (&it
, 1))
355 const struct btrace_insn
*insn
;
357 insn
= btrace_insn_get (&it
);
359 /* Print the instruction index. */
360 ui_out_field_uint (uiout
, "index", btrace_insn_number (&it
));
361 ui_out_text (uiout
, "\t");
363 /* Disassembly with '/m' flag may not produce the expected result.
365 gdb_disassembly (gdbarch
, uiout
, NULL
, flags
, 1, insn
->pc
, insn
->pc
+ 1);
369 /* The to_insn_history method of target record-btrace. */
372 record_btrace_insn_history (struct target_ops
*self
, int size
, int flags
)
374 struct btrace_thread_info
*btinfo
;
375 struct btrace_insn_history
*history
;
376 struct btrace_insn_iterator begin
, end
;
377 struct cleanup
*uiout_cleanup
;
378 struct ui_out
*uiout
;
379 unsigned int context
, covered
;
381 uiout
= current_uiout
;
382 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
384 context
= abs (size
);
386 error (_("Bad record instruction-history-size."));
388 btinfo
= require_btrace ();
389 history
= btinfo
->insn_history
;
392 struct btrace_insn_iterator
*replay
;
394 DEBUG ("insn-history (0x%x): %d", flags
, size
);
396 /* If we're replaying, we start at the replay position. Otherwise, we
397 start at the tail of the trace. */
398 replay
= btinfo
->replay
;
402 btrace_insn_end (&begin
, btinfo
);
404 /* We start from here and expand in the requested direction. Then we
405 expand in the other direction, as well, to fill up any remaining
410 /* We want the current position covered, as well. */
411 covered
= btrace_insn_next (&end
, 1);
412 covered
+= btrace_insn_prev (&begin
, context
- covered
);
413 covered
+= btrace_insn_next (&end
, context
- covered
);
417 covered
= btrace_insn_next (&end
, context
);
418 covered
+= btrace_insn_prev (&begin
, context
- covered
);
423 begin
= history
->begin
;
426 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags
, size
,
427 btrace_insn_number (&begin
), btrace_insn_number (&end
));
432 covered
= btrace_insn_prev (&begin
, context
);
437 covered
= btrace_insn_next (&end
, context
);
442 btrace_insn_history (uiout
, &begin
, &end
, flags
);
446 printf_unfiltered (_("At the start of the branch trace record.\n"));
448 printf_unfiltered (_("At the end of the branch trace record.\n"));
451 btrace_set_insn_history (btinfo
, &begin
, &end
);
452 do_cleanups (uiout_cleanup
);
455 /* The to_insn_history_range method of target record-btrace. */
458 record_btrace_insn_history_range (struct target_ops
*self
,
459 ULONGEST from
, ULONGEST to
, int flags
)
461 struct btrace_thread_info
*btinfo
;
462 struct btrace_insn_history
*history
;
463 struct btrace_insn_iterator begin
, end
;
464 struct cleanup
*uiout_cleanup
;
465 struct ui_out
*uiout
;
466 unsigned int low
, high
;
469 uiout
= current_uiout
;
470 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
475 DEBUG ("insn-history (0x%x): [%u; %u)", flags
, low
, high
);
477 /* Check for wrap-arounds. */
478 if (low
!= from
|| high
!= to
)
479 error (_("Bad range."));
482 error (_("Bad range."));
484 btinfo
= require_btrace ();
486 found
= btrace_find_insn_by_number (&begin
, btinfo
, low
);
488 error (_("Range out of bounds."));
490 found
= btrace_find_insn_by_number (&end
, btinfo
, high
);
493 /* Silently truncate the range. */
494 btrace_insn_end (&end
, btinfo
);
498 /* We want both begin and end to be inclusive. */
499 btrace_insn_next (&end
, 1);
502 btrace_insn_history (uiout
, &begin
, &end
, flags
);
503 btrace_set_insn_history (btinfo
, &begin
, &end
);
505 do_cleanups (uiout_cleanup
);
508 /* The to_insn_history_from method of target record-btrace. */
511 record_btrace_insn_history_from (struct target_ops
*self
,
512 ULONGEST from
, int size
, int flags
)
514 ULONGEST begin
, end
, context
;
516 context
= abs (size
);
518 error (_("Bad record instruction-history-size."));
527 begin
= from
- context
+ 1;
532 end
= from
+ context
- 1;
534 /* Check for wrap-around. */
539 record_btrace_insn_history_range (self
, begin
, end
, flags
);
542 /* Print the instruction number range for a function call history line. */
545 btrace_call_history_insn_range (struct ui_out
*uiout
,
546 const struct btrace_function
*bfun
)
548 unsigned int begin
, end
, size
;
550 size
= VEC_length (btrace_insn_s
, bfun
->insn
);
551 gdb_assert (size
> 0);
553 begin
= bfun
->insn_offset
;
554 end
= begin
+ size
- 1;
556 ui_out_field_uint (uiout
, "insn begin", begin
);
557 ui_out_text (uiout
, ",");
558 ui_out_field_uint (uiout
, "insn end", end
);
561 /* Print the source line information for a function call history line. */
564 btrace_call_history_src_line (struct ui_out
*uiout
,
565 const struct btrace_function
*bfun
)
574 ui_out_field_string (uiout
, "file",
575 symtab_to_filename_for_display (symbol_symtab (sym
)));
577 begin
= bfun
->lbegin
;
583 ui_out_text (uiout
, ":");
584 ui_out_field_int (uiout
, "min line", begin
);
589 ui_out_text (uiout
, ",");
590 ui_out_field_int (uiout
, "max line", end
);
593 /* Get the name of a branch trace function. */
596 btrace_get_bfun_name (const struct btrace_function
*bfun
)
598 struct minimal_symbol
*msym
;
608 return SYMBOL_PRINT_NAME (sym
);
609 else if (msym
!= NULL
)
610 return MSYMBOL_PRINT_NAME (msym
);
615 /* Disassemble a section of the recorded function trace. */
618 btrace_call_history (struct ui_out
*uiout
,
619 const struct btrace_thread_info
*btinfo
,
620 const struct btrace_call_iterator
*begin
,
621 const struct btrace_call_iterator
*end
,
622 enum record_print_flag flags
)
624 struct btrace_call_iterator it
;
626 DEBUG ("ftrace (0x%x): [%u; %u)", flags
, btrace_call_number (begin
),
627 btrace_call_number (end
));
629 for (it
= *begin
; btrace_call_cmp (&it
, end
) < 0; btrace_call_next (&it
, 1))
631 const struct btrace_function
*bfun
;
632 struct minimal_symbol
*msym
;
635 bfun
= btrace_call_get (&it
);
639 /* Print the function index. */
640 ui_out_field_uint (uiout
, "index", bfun
->number
);
641 ui_out_text (uiout
, "\t");
643 if ((flags
& RECORD_PRINT_INDENT_CALLS
) != 0)
645 int level
= bfun
->level
+ btinfo
->level
, i
;
647 for (i
= 0; i
< level
; ++i
)
648 ui_out_text (uiout
, " ");
652 ui_out_field_string (uiout
, "function", SYMBOL_PRINT_NAME (sym
));
653 else if (msym
!= NULL
)
654 ui_out_field_string (uiout
, "function", MSYMBOL_PRINT_NAME (msym
));
655 else if (!ui_out_is_mi_like_p (uiout
))
656 ui_out_field_string (uiout
, "function", "??");
658 if ((flags
& RECORD_PRINT_INSN_RANGE
) != 0)
660 ui_out_text (uiout
, _("\tinst "));
661 btrace_call_history_insn_range (uiout
, bfun
);
664 if ((flags
& RECORD_PRINT_SRC_LINE
) != 0)
666 ui_out_text (uiout
, _("\tat "));
667 btrace_call_history_src_line (uiout
, bfun
);
670 ui_out_text (uiout
, "\n");
674 /* The to_call_history method of target record-btrace. */
677 record_btrace_call_history (struct target_ops
*self
, int size
, int flags
)
679 struct btrace_thread_info
*btinfo
;
680 struct btrace_call_history
*history
;
681 struct btrace_call_iterator begin
, end
;
682 struct cleanup
*uiout_cleanup
;
683 struct ui_out
*uiout
;
684 unsigned int context
, covered
;
686 uiout
= current_uiout
;
687 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
689 context
= abs (size
);
691 error (_("Bad record function-call-history-size."));
693 btinfo
= require_btrace ();
694 history
= btinfo
->call_history
;
697 struct btrace_insn_iterator
*replay
;
699 DEBUG ("call-history (0x%x): %d", flags
, size
);
701 /* If we're replaying, we start at the replay position. Otherwise, we
702 start at the tail of the trace. */
703 replay
= btinfo
->replay
;
706 begin
.function
= replay
->function
;
707 begin
.btinfo
= btinfo
;
710 btrace_call_end (&begin
, btinfo
);
712 /* We start from here and expand in the requested direction. Then we
713 expand in the other direction, as well, to fill up any remaining
718 /* We want the current position covered, as well. */
719 covered
= btrace_call_next (&end
, 1);
720 covered
+= btrace_call_prev (&begin
, context
- covered
);
721 covered
+= btrace_call_next (&end
, context
- covered
);
725 covered
= btrace_call_next (&end
, context
);
726 covered
+= btrace_call_prev (&begin
, context
- covered
);
731 begin
= history
->begin
;
734 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags
, size
,
735 btrace_call_number (&begin
), btrace_call_number (&end
));
740 covered
= btrace_call_prev (&begin
, context
);
745 covered
= btrace_call_next (&end
, context
);
750 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
754 printf_unfiltered (_("At the start of the branch trace record.\n"));
756 printf_unfiltered (_("At the end of the branch trace record.\n"));
759 btrace_set_call_history (btinfo
, &begin
, &end
);
760 do_cleanups (uiout_cleanup
);
763 /* The to_call_history_range method of target record-btrace. */
766 record_btrace_call_history_range (struct target_ops
*self
,
767 ULONGEST from
, ULONGEST to
, int flags
)
769 struct btrace_thread_info
*btinfo
;
770 struct btrace_call_history
*history
;
771 struct btrace_call_iterator begin
, end
;
772 struct cleanup
*uiout_cleanup
;
773 struct ui_out
*uiout
;
774 unsigned int low
, high
;
777 uiout
= current_uiout
;
778 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
783 DEBUG ("call-history (0x%x): [%u; %u)", flags
, low
, high
);
785 /* Check for wrap-arounds. */
786 if (low
!= from
|| high
!= to
)
787 error (_("Bad range."));
790 error (_("Bad range."));
792 btinfo
= require_btrace ();
794 found
= btrace_find_call_by_number (&begin
, btinfo
, low
);
796 error (_("Range out of bounds."));
798 found
= btrace_find_call_by_number (&end
, btinfo
, high
);
801 /* Silently truncate the range. */
802 btrace_call_end (&end
, btinfo
);
806 /* We want both begin and end to be inclusive. */
807 btrace_call_next (&end
, 1);
810 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
811 btrace_set_call_history (btinfo
, &begin
, &end
);
813 do_cleanups (uiout_cleanup
);
816 /* The to_call_history_from method of target record-btrace. */
819 record_btrace_call_history_from (struct target_ops
*self
,
820 ULONGEST from
, int size
, int flags
)
822 ULONGEST begin
, end
, context
;
824 context
= abs (size
);
826 error (_("Bad record function-call-history-size."));
835 begin
= from
- context
+ 1;
840 end
= from
+ context
- 1;
842 /* Check for wrap-around. */
847 record_btrace_call_history_range (self
, begin
, end
, flags
);
850 /* The to_record_is_replaying method of target record-btrace. */
853 record_btrace_is_replaying (struct target_ops
*self
)
855 struct thread_info
*tp
;
857 ALL_NON_EXITED_THREADS (tp
)
858 if (btrace_is_replaying (tp
))
864 /* The to_xfer_partial method of target record-btrace. */
866 static enum target_xfer_status
867 record_btrace_xfer_partial (struct target_ops
*ops
, enum target_object object
,
868 const char *annex
, gdb_byte
*readbuf
,
869 const gdb_byte
*writebuf
, ULONGEST offset
,
870 ULONGEST len
, ULONGEST
*xfered_len
)
872 struct target_ops
*t
;
874 /* Filter out requests that don't make sense during replay. */
875 if (replay_memory_access
== replay_memory_access_read_only
876 && !record_btrace_generating_corefile
877 && record_btrace_is_replaying (ops
))
881 case TARGET_OBJECT_MEMORY
:
883 struct target_section
*section
;
885 /* We do not allow writing memory in general. */
886 if (writebuf
!= NULL
)
889 return TARGET_XFER_UNAVAILABLE
;
892 /* We allow reading readonly memory. */
893 section
= target_section_by_addr (ops
, offset
);
896 /* Check if the section we found is readonly. */
897 if ((bfd_get_section_flags (section
->the_bfd_section
->owner
,
898 section
->the_bfd_section
)
899 & SEC_READONLY
) != 0)
901 /* Truncate the request to fit into this section. */
902 len
= min (len
, section
->endaddr
- offset
);
908 return TARGET_XFER_UNAVAILABLE
;
913 /* Forward the request. */
915 return ops
->to_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
916 offset
, len
, xfered_len
);
919 /* The to_insert_breakpoint method of target record-btrace. */
922 record_btrace_insert_breakpoint (struct target_ops
*ops
,
923 struct gdbarch
*gdbarch
,
924 struct bp_target_info
*bp_tgt
)
926 volatile struct gdb_exception except
;
930 /* Inserting breakpoints requires accessing memory. Allow it for the
931 duration of this function. */
932 old
= replay_memory_access
;
933 replay_memory_access
= replay_memory_access_read_write
;
936 TRY_CATCH (except
, RETURN_MASK_ALL
)
937 ret
= ops
->beneath
->to_insert_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
);
939 replay_memory_access
= old
;
941 if (except
.reason
< 0)
942 throw_exception (except
);
947 /* The to_remove_breakpoint method of target record-btrace. */
950 record_btrace_remove_breakpoint (struct target_ops
*ops
,
951 struct gdbarch
*gdbarch
,
952 struct bp_target_info
*bp_tgt
)
954 volatile struct gdb_exception except
;
958 /* Removing breakpoints requires accessing memory. Allow it for the
959 duration of this function. */
960 old
= replay_memory_access
;
961 replay_memory_access
= replay_memory_access_read_write
;
964 TRY_CATCH (except
, RETURN_MASK_ALL
)
965 ret
= ops
->beneath
->to_remove_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
);
967 replay_memory_access
= old
;
969 if (except
.reason
< 0)
970 throw_exception (except
);
975 /* The to_fetch_registers method of target record-btrace. */
978 record_btrace_fetch_registers (struct target_ops
*ops
,
979 struct regcache
*regcache
, int regno
)
981 struct btrace_insn_iterator
*replay
;
982 struct thread_info
*tp
;
984 tp
= find_thread_ptid (inferior_ptid
);
985 gdb_assert (tp
!= NULL
);
987 replay
= tp
->btrace
.replay
;
988 if (replay
!= NULL
&& !record_btrace_generating_corefile
)
990 const struct btrace_insn
*insn
;
991 struct gdbarch
*gdbarch
;
994 gdbarch
= get_regcache_arch (regcache
);
995 pcreg
= gdbarch_pc_regnum (gdbarch
);
999 /* We can only provide the PC register. */
1000 if (regno
>= 0 && regno
!= pcreg
)
1003 insn
= btrace_insn_get (replay
);
1004 gdb_assert (insn
!= NULL
);
1006 regcache_raw_supply (regcache
, regno
, &insn
->pc
);
1010 struct target_ops
*t
= ops
->beneath
;
1012 t
->to_fetch_registers (t
, regcache
, regno
);
1016 /* The to_store_registers method of target record-btrace. */
1019 record_btrace_store_registers (struct target_ops
*ops
,
1020 struct regcache
*regcache
, int regno
)
1022 struct target_ops
*t
;
1024 if (!record_btrace_generating_corefile
&& record_btrace_is_replaying (ops
))
1025 error (_("This record target does not allow writing registers."));
1027 gdb_assert (may_write_registers
!= 0);
1030 t
->to_store_registers (t
, regcache
, regno
);
1033 /* The to_prepare_to_store method of target record-btrace. */
1036 record_btrace_prepare_to_store (struct target_ops
*ops
,
1037 struct regcache
*regcache
)
1039 struct target_ops
*t
;
1041 if (!record_btrace_generating_corefile
&& record_btrace_is_replaying (ops
))
1045 t
->to_prepare_to_store (t
, regcache
);
1048 /* The branch trace frame cache. */
1050 struct btrace_frame_cache
1053 struct thread_info
*tp
;
1055 /* The frame info. */
1056 struct frame_info
*frame
;
1058 /* The branch trace function segment. */
1059 const struct btrace_function
*bfun
;
1062 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1064 static htab_t bfcache
;
1066 /* hash_f for htab_create_alloc of bfcache. */
1069 bfcache_hash (const void *arg
)
1071 const struct btrace_frame_cache
*cache
= arg
;
1073 return htab_hash_pointer (cache
->frame
);
1076 /* eq_f for htab_create_alloc of bfcache. */
1079 bfcache_eq (const void *arg1
, const void *arg2
)
1081 const struct btrace_frame_cache
*cache1
= arg1
;
1082 const struct btrace_frame_cache
*cache2
= arg2
;
1084 return cache1
->frame
== cache2
->frame
;
1087 /* Create a new btrace frame cache. */
1089 static struct btrace_frame_cache
*
1090 bfcache_new (struct frame_info
*frame
)
1092 struct btrace_frame_cache
*cache
;
1095 cache
= FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache
);
1096 cache
->frame
= frame
;
1098 slot
= htab_find_slot (bfcache
, cache
, INSERT
);
1099 gdb_assert (*slot
== NULL
);
1105 /* Extract the branch trace function from a branch trace frame. */
1107 static const struct btrace_function
*
1108 btrace_get_frame_function (struct frame_info
*frame
)
1110 const struct btrace_frame_cache
*cache
;
1111 const struct btrace_function
*bfun
;
1112 struct btrace_frame_cache pattern
;
1115 pattern
.frame
= frame
;
1117 slot
= htab_find_slot (bfcache
, &pattern
, NO_INSERT
);
1125 /* Implement stop_reason method for record_btrace_frame_unwind. */
1127 static enum unwind_stop_reason
1128 record_btrace_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1131 const struct btrace_frame_cache
*cache
;
1132 const struct btrace_function
*bfun
;
1134 cache
= *this_cache
;
1136 gdb_assert (bfun
!= NULL
);
1138 if (bfun
->up
== NULL
)
1139 return UNWIND_UNAVAILABLE
;
1141 return UNWIND_NO_REASON
;
1144 /* Implement this_id method for record_btrace_frame_unwind. */
1147 record_btrace_frame_this_id (struct frame_info
*this_frame
, void **this_cache
,
1148 struct frame_id
*this_id
)
1150 const struct btrace_frame_cache
*cache
;
1151 const struct btrace_function
*bfun
;
1152 CORE_ADDR code
, special
;
1154 cache
= *this_cache
;
1157 gdb_assert (bfun
!= NULL
);
1159 while (bfun
->segment
.prev
!= NULL
)
1160 bfun
= bfun
->segment
.prev
;
1162 code
= get_frame_func (this_frame
);
1163 special
= bfun
->number
;
1165 *this_id
= frame_id_build_unavailable_stack_special (code
, special
);
1167 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1168 btrace_get_bfun_name (cache
->bfun
),
1169 core_addr_to_string_nz (this_id
->code_addr
),
1170 core_addr_to_string_nz (this_id
->special_addr
));
1173 /* Implement prev_register method for record_btrace_frame_unwind. */
1175 static struct value
*
1176 record_btrace_frame_prev_register (struct frame_info
*this_frame
,
1180 const struct btrace_frame_cache
*cache
;
1181 const struct btrace_function
*bfun
, *caller
;
1182 const struct btrace_insn
*insn
;
1183 struct gdbarch
*gdbarch
;
1187 gdbarch
= get_frame_arch (this_frame
);
1188 pcreg
= gdbarch_pc_regnum (gdbarch
);
1189 if (pcreg
< 0 || regnum
!= pcreg
)
1190 throw_error (NOT_AVAILABLE_ERROR
,
1191 _("Registers are not available in btrace record history"));
1193 cache
= *this_cache
;
1195 gdb_assert (bfun
!= NULL
);
1199 throw_error (NOT_AVAILABLE_ERROR
,
1200 _("No caller in btrace record history"));
1202 if ((bfun
->flags
& BFUN_UP_LINKS_TO_RET
) != 0)
1204 insn
= VEC_index (btrace_insn_s
, caller
->insn
, 0);
1209 insn
= VEC_last (btrace_insn_s
, caller
->insn
);
1212 pc
+= gdb_insn_length (gdbarch
, pc
);
1215 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1216 btrace_get_bfun_name (bfun
), bfun
->level
,
1217 core_addr_to_string_nz (pc
));
1219 return frame_unwind_got_address (this_frame
, regnum
, pc
);
1222 /* Implement sniffer method for record_btrace_frame_unwind. */
1225 record_btrace_frame_sniffer (const struct frame_unwind
*self
,
1226 struct frame_info
*this_frame
,
1229 const struct btrace_function
*bfun
;
1230 struct btrace_frame_cache
*cache
;
1231 struct thread_info
*tp
;
1232 struct frame_info
*next
;
1234 /* THIS_FRAME does not contain a reference to its thread. */
1235 tp
= find_thread_ptid (inferior_ptid
);
1236 gdb_assert (tp
!= NULL
);
1239 next
= get_next_frame (this_frame
);
1242 const struct btrace_insn_iterator
*replay
;
1244 replay
= tp
->btrace
.replay
;
1246 bfun
= replay
->function
;
1250 const struct btrace_function
*callee
;
1252 callee
= btrace_get_frame_function (next
);
1253 if (callee
!= NULL
&& (callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1260 DEBUG ("[frame] sniffed frame for %s on level %d",
1261 btrace_get_bfun_name (bfun
), bfun
->level
);
1263 /* This is our frame. Initialize the frame cache. */
1264 cache
= bfcache_new (this_frame
);
1268 *this_cache
= cache
;
1272 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1275 record_btrace_tailcall_frame_sniffer (const struct frame_unwind
*self
,
1276 struct frame_info
*this_frame
,
1279 const struct btrace_function
*bfun
, *callee
;
1280 struct btrace_frame_cache
*cache
;
1281 struct frame_info
*next
;
1283 next
= get_next_frame (this_frame
);
1287 callee
= btrace_get_frame_function (next
);
1291 if ((callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1298 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1299 btrace_get_bfun_name (bfun
), bfun
->level
);
1301 /* This is our frame. Initialize the frame cache. */
1302 cache
= bfcache_new (this_frame
);
1303 cache
->tp
= find_thread_ptid (inferior_ptid
);
1306 *this_cache
= cache
;
1311 record_btrace_frame_dealloc_cache (struct frame_info
*self
, void *this_cache
)
1313 struct btrace_frame_cache
*cache
;
1318 slot
= htab_find_slot (bfcache
, cache
, NO_INSERT
);
1319 gdb_assert (slot
!= NULL
);
1321 htab_remove_elt (bfcache
, cache
);
1324 /* btrace recording does not store previous memory content, neither the stack
1325 frames content. Any unwinding would return errorneous results as the stack
1326 contents no longer matches the changed PC value restored from history.
1327 Therefore this unwinder reports any possibly unwound registers as
1330 const struct frame_unwind record_btrace_frame_unwind
=
1333 record_btrace_frame_unwind_stop_reason
,
1334 record_btrace_frame_this_id
,
1335 record_btrace_frame_prev_register
,
1337 record_btrace_frame_sniffer
,
1338 record_btrace_frame_dealloc_cache
1341 const struct frame_unwind record_btrace_tailcall_frame_unwind
=
1344 record_btrace_frame_unwind_stop_reason
,
1345 record_btrace_frame_this_id
,
1346 record_btrace_frame_prev_register
,
1348 record_btrace_tailcall_frame_sniffer
,
1349 record_btrace_frame_dealloc_cache
1352 /* Implement the to_get_unwinder method. */
1354 static const struct frame_unwind
*
1355 record_btrace_to_get_unwinder (struct target_ops
*self
)
1357 return &record_btrace_frame_unwind
;
1360 /* Implement the to_get_tailcall_unwinder method. */
1362 static const struct frame_unwind
*
1363 record_btrace_to_get_tailcall_unwinder (struct target_ops
*self
)
1365 return &record_btrace_tailcall_frame_unwind
;
1368 /* Indicate that TP should be resumed according to FLAG. */
1371 record_btrace_resume_thread (struct thread_info
*tp
,
1372 enum btrace_thread_flag flag
)
1374 struct btrace_thread_info
*btinfo
;
1376 DEBUG ("resuming %d (%s): %u", tp
->num
, target_pid_to_str (tp
->ptid
), flag
);
1378 btinfo
= &tp
->btrace
;
1380 if ((btinfo
->flags
& BTHR_MOVE
) != 0)
1381 error (_("Thread already moving."));
1383 /* Fetch the latest branch trace. */
1386 btinfo
->flags
|= flag
;
1389 /* Find the thread to resume given a PTID. */
1391 static struct thread_info
*
1392 record_btrace_find_resume_thread (ptid_t ptid
)
1394 struct thread_info
*tp
;
1396 /* When asked to resume everything, we pick the current thread. */
1397 if (ptid_equal (minus_one_ptid
, ptid
) || ptid_is_pid (ptid
))
1398 ptid
= inferior_ptid
;
1400 return find_thread_ptid (ptid
);
1403 /* Start replaying a thread. */
1405 static struct btrace_insn_iterator
*
1406 record_btrace_start_replaying (struct thread_info
*tp
)
1408 volatile struct gdb_exception except
;
1409 struct btrace_insn_iterator
*replay
;
1410 struct btrace_thread_info
*btinfo
;
1413 btinfo
= &tp
->btrace
;
1416 /* We can't start replaying without trace. */
1417 if (btinfo
->begin
== NULL
)
1420 /* Clear the executing flag to allow changes to the current frame.
1421 We are not actually running, yet. We just started a reverse execution
1422 command or a record goto command.
1423 For the latter, EXECUTING is false and this has no effect.
1424 For the former, EXECUTING is true and we're in to_wait, about to
1425 move the thread. Since we need to recompute the stack, we temporarily
1426 set EXECUTING to flase. */
1427 executing
= is_executing (tp
->ptid
);
1428 set_executing (tp
->ptid
, 0);
1430 /* GDB stores the current frame_id when stepping in order to detects steps
1432 Since frames are computed differently when we're replaying, we need to
1433 recompute those stored frames and fix them up so we can still detect
1434 subroutines after we started replaying. */
1435 TRY_CATCH (except
, RETURN_MASK_ALL
)
1437 struct frame_info
*frame
;
1438 struct frame_id frame_id
;
1439 int upd_step_frame_id
, upd_step_stack_frame_id
;
1441 /* The current frame without replaying - computed via normal unwind. */
1442 frame
= get_current_frame ();
1443 frame_id
= get_frame_id (frame
);
1445 /* Check if we need to update any stepping-related frame id's. */
1446 upd_step_frame_id
= frame_id_eq (frame_id
,
1447 tp
->control
.step_frame_id
);
1448 upd_step_stack_frame_id
= frame_id_eq (frame_id
,
1449 tp
->control
.step_stack_frame_id
);
1451 /* We start replaying at the end of the branch trace. This corresponds
1452 to the current instruction. */
1453 replay
= xmalloc (sizeof (*replay
));
1454 btrace_insn_end (replay
, btinfo
);
1456 /* We're not replaying, yet. */
1457 gdb_assert (btinfo
->replay
== NULL
);
1458 btinfo
->replay
= replay
;
1460 /* Make sure we're not using any stale registers. */
1461 registers_changed_ptid (tp
->ptid
);
1463 /* The current frame with replaying - computed via btrace unwind. */
1464 frame
= get_current_frame ();
1465 frame_id
= get_frame_id (frame
);
1467 /* Replace stepping related frames where necessary. */
1468 if (upd_step_frame_id
)
1469 tp
->control
.step_frame_id
= frame_id
;
1470 if (upd_step_stack_frame_id
)
1471 tp
->control
.step_stack_frame_id
= frame_id
;
1474 /* Restore the previous execution state. */
1475 set_executing (tp
->ptid
, executing
);
1477 if (except
.reason
< 0)
1479 xfree (btinfo
->replay
);
1480 btinfo
->replay
= NULL
;
1482 registers_changed_ptid (tp
->ptid
);
1484 throw_exception (except
);
1490 /* Stop replaying a thread. */
1493 record_btrace_stop_replaying (struct thread_info
*tp
)
1495 struct btrace_thread_info
*btinfo
;
1497 btinfo
= &tp
->btrace
;
1499 xfree (btinfo
->replay
);
1500 btinfo
->replay
= NULL
;
1502 /* Make sure we're not leaving any stale registers. */
1503 registers_changed_ptid (tp
->ptid
);
1506 /* The to_resume method of target record-btrace. */
1509 record_btrace_resume (struct target_ops
*ops
, ptid_t ptid
, int step
,
1510 enum gdb_signal signal
)
1512 struct thread_info
*tp
, *other
;
1513 enum btrace_thread_flag flag
;
1515 DEBUG ("resume %s: %s", target_pid_to_str (ptid
), step
? "step" : "cont");
1517 /* Store the execution direction of the last resume. */
1518 record_btrace_resume_exec_dir
= execution_direction
;
1520 tp
= record_btrace_find_resume_thread (ptid
);
1522 error (_("Cannot find thread to resume."));
1524 /* Stop replaying other threads if the thread to resume is not replaying. */
1525 if (!btrace_is_replaying (tp
) && execution_direction
!= EXEC_REVERSE
)
1526 ALL_NON_EXITED_THREADS (other
)
1527 record_btrace_stop_replaying (other
);
1529 /* As long as we're not replaying, just forward the request. */
1530 if (!record_btrace_is_replaying (ops
) && execution_direction
!= EXEC_REVERSE
)
1533 return ops
->to_resume (ops
, ptid
, step
, signal
);
1536 /* Compute the btrace thread flag for the requested move. */
1538 flag
= execution_direction
== EXEC_REVERSE
? BTHR_RCONT
: BTHR_CONT
;
1540 flag
= execution_direction
== EXEC_REVERSE
? BTHR_RSTEP
: BTHR_STEP
;
1542 /* At the moment, we only move a single thread. We could also move
1543 all threads in parallel by single-stepping each resumed thread
1544 until the first runs into an event.
1545 When we do that, we would want to continue all other threads.
1546 For now, just resume one thread to not confuse to_wait. */
1547 record_btrace_resume_thread (tp
, flag
);
1549 /* We just indicate the resume intent here. The actual stepping happens in
1550 record_btrace_wait below. */
1552 /* Async support. */
1553 if (target_can_async_p ())
1555 target_async (inferior_event_handler
, 0);
1556 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
1560 /* Find a thread to move. */
1562 static struct thread_info
*
1563 record_btrace_find_thread_to_move (ptid_t ptid
)
1565 struct thread_info
*tp
;
1567 /* First check the parameter thread. */
1568 tp
= find_thread_ptid (ptid
);
1569 if (tp
!= NULL
&& (tp
->btrace
.flags
& BTHR_MOVE
) != 0)
1572 /* Otherwise, find one other thread that has been resumed. */
1573 ALL_NON_EXITED_THREADS (tp
)
1574 if ((tp
->btrace
.flags
& BTHR_MOVE
) != 0)
1580 /* Return a target_waitstatus indicating that we ran out of history. */
1582 static struct target_waitstatus
1583 btrace_step_no_history (void)
1585 struct target_waitstatus status
;
1587 status
.kind
= TARGET_WAITKIND_NO_HISTORY
;
1592 /* Return a target_waitstatus indicating that a step finished. */
1594 static struct target_waitstatus
1595 btrace_step_stopped (void)
1597 struct target_waitstatus status
;
1599 status
.kind
= TARGET_WAITKIND_STOPPED
;
1600 status
.value
.sig
= GDB_SIGNAL_TRAP
;
1605 /* Clear the record histories. */
1608 record_btrace_clear_histories (struct btrace_thread_info
*btinfo
)
1610 xfree (btinfo
->insn_history
);
1611 xfree (btinfo
->call_history
);
1613 btinfo
->insn_history
= NULL
;
1614 btinfo
->call_history
= NULL
;
1617 /* Step a single thread. */
1619 static struct target_waitstatus
1620 record_btrace_step_thread (struct thread_info
*tp
)
1622 struct btrace_insn_iterator
*replay
, end
;
1623 struct btrace_thread_info
*btinfo
;
1624 struct address_space
*aspace
;
1625 struct inferior
*inf
;
1626 enum btrace_thread_flag flags
;
1629 /* We can't step without an execution history. */
1630 if (btrace_is_empty (tp
))
1631 return btrace_step_no_history ();
1633 btinfo
= &tp
->btrace
;
1634 replay
= btinfo
->replay
;
1636 flags
= btinfo
->flags
& BTHR_MOVE
;
1637 btinfo
->flags
&= ~BTHR_MOVE
;
1639 DEBUG ("stepping %d (%s): %u", tp
->num
, target_pid_to_str (tp
->ptid
), flags
);
1644 internal_error (__FILE__
, __LINE__
, _("invalid stepping type."));
1647 /* We're done if we're not replaying. */
1649 return btrace_step_no_history ();
1651 /* We are always able to step at least once. */
1652 steps
= btrace_insn_next (replay
, 1);
1653 gdb_assert (steps
== 1);
1655 /* Determine the end of the instruction trace. */
1656 btrace_insn_end (&end
, btinfo
);
1658 /* We stop replaying if we reached the end of the trace. */
1659 if (btrace_insn_cmp (replay
, &end
) == 0)
1660 record_btrace_stop_replaying (tp
);
1662 return btrace_step_stopped ();
1665 /* Start replaying if we're not already doing so. */
1667 replay
= record_btrace_start_replaying (tp
);
1669 /* If we can't step any further, we reached the end of the history. */
1670 steps
= btrace_insn_prev (replay
, 1);
1672 return btrace_step_no_history ();
1674 return btrace_step_stopped ();
1677 /* We're done if we're not replaying. */
1679 return btrace_step_no_history ();
1681 inf
= find_inferior_ptid (tp
->ptid
);
1682 aspace
= inf
->aspace
;
1684 /* Determine the end of the instruction trace. */
1685 btrace_insn_end (&end
, btinfo
);
1689 const struct btrace_insn
*insn
;
1691 /* We are always able to step at least once. */
1692 steps
= btrace_insn_next (replay
, 1);
1693 gdb_assert (steps
== 1);
1695 /* We stop replaying if we reached the end of the trace. */
1696 if (btrace_insn_cmp (replay
, &end
) == 0)
1698 record_btrace_stop_replaying (tp
);
1699 return btrace_step_no_history ();
1702 insn
= btrace_insn_get (replay
);
1705 DEBUG ("stepping %d (%s) ... %s", tp
->num
,
1706 target_pid_to_str (tp
->ptid
),
1707 core_addr_to_string_nz (insn
->pc
));
1709 if (breakpoint_here_p (aspace
, insn
->pc
))
1710 return btrace_step_stopped ();
1714 /* Start replaying if we're not already doing so. */
1716 replay
= record_btrace_start_replaying (tp
);
1718 inf
= find_inferior_ptid (tp
->ptid
);
1719 aspace
= inf
->aspace
;
1723 const struct btrace_insn
*insn
;
1725 /* If we can't step any further, we're done. */
1726 steps
= btrace_insn_prev (replay
, 1);
1728 return btrace_step_no_history ();
1730 insn
= btrace_insn_get (replay
);
1733 DEBUG ("reverse-stepping %d (%s) ... %s", tp
->num
,
1734 target_pid_to_str (tp
->ptid
),
1735 core_addr_to_string_nz (insn
->pc
));
1737 if (breakpoint_here_p (aspace
, insn
->pc
))
1738 return btrace_step_stopped ();
1743 /* The to_wait method of target record-btrace. */
1746 record_btrace_wait (struct target_ops
*ops
, ptid_t ptid
,
1747 struct target_waitstatus
*status
, int options
)
1749 struct thread_info
*tp
, *other
;
1751 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid
), options
);
1753 /* As long as we're not replaying, just forward the request. */
1754 if (!record_btrace_is_replaying (ops
) && execution_direction
!= EXEC_REVERSE
)
1757 return ops
->to_wait (ops
, ptid
, status
, options
);
1760 /* Let's find a thread to move. */
1761 tp
= record_btrace_find_thread_to_move (ptid
);
1764 DEBUG ("wait %s: no thread", target_pid_to_str (ptid
));
1766 status
->kind
= TARGET_WAITKIND_IGNORE
;
1767 return minus_one_ptid
;
1770 /* We only move a single thread. We're not able to correlate threads. */
1771 *status
= record_btrace_step_thread (tp
);
1773 /* Stop all other threads. */
1775 ALL_NON_EXITED_THREADS (other
)
1776 other
->btrace
.flags
&= ~BTHR_MOVE
;
1778 /* Start record histories anew from the current position. */
1779 record_btrace_clear_histories (&tp
->btrace
);
1781 /* We moved the replay position but did not update registers. */
1782 registers_changed_ptid (tp
->ptid
);
1787 /* The to_can_execute_reverse method of target record-btrace. */
1790 record_btrace_can_execute_reverse (struct target_ops
*self
)
1795 /* The to_decr_pc_after_break method of target record-btrace. */
1798 record_btrace_decr_pc_after_break (struct target_ops
*ops
,
1799 struct gdbarch
*gdbarch
)
1801 /* When replaying, we do not actually execute the breakpoint instruction
1802 so there is no need to adjust the PC after hitting a breakpoint. */
1803 if (record_btrace_is_replaying (ops
))
1806 return ops
->beneath
->to_decr_pc_after_break (ops
->beneath
, gdbarch
);
1809 /* The to_update_thread_list method of target record-btrace. */
1812 record_btrace_update_thread_list (struct target_ops
*ops
)
1814 /* We don't add or remove threads during replay. */
1815 if (record_btrace_is_replaying (ops
))
1818 /* Forward the request. */
1820 ops
->to_update_thread_list (ops
);
1823 /* The to_thread_alive method of target record-btrace. */
1826 record_btrace_thread_alive (struct target_ops
*ops
, ptid_t ptid
)
1828 /* We don't add or remove threads during replay. */
1829 if (record_btrace_is_replaying (ops
))
1830 return find_thread_ptid (ptid
) != NULL
;
1832 /* Forward the request. */
1834 return ops
->to_thread_alive (ops
, ptid
);
1837 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
1841 record_btrace_set_replay (struct thread_info
*tp
,
1842 const struct btrace_insn_iterator
*it
)
1844 struct btrace_thread_info
*btinfo
;
1846 btinfo
= &tp
->btrace
;
1848 if (it
== NULL
|| it
->function
== NULL
)
1849 record_btrace_stop_replaying (tp
);
1852 if (btinfo
->replay
== NULL
)
1853 record_btrace_start_replaying (tp
);
1854 else if (btrace_insn_cmp (btinfo
->replay
, it
) == 0)
1857 *btinfo
->replay
= *it
;
1858 registers_changed_ptid (tp
->ptid
);
1861 /* Start anew from the new replay position. */
1862 record_btrace_clear_histories (btinfo
);
1865 /* The to_goto_record_begin method of target record-btrace. */
1868 record_btrace_goto_begin (struct target_ops
*self
)
1870 struct thread_info
*tp
;
1871 struct btrace_insn_iterator begin
;
1873 tp
= require_btrace_thread ();
1875 btrace_insn_begin (&begin
, &tp
->btrace
);
1876 record_btrace_set_replay (tp
, &begin
);
1878 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
1881 /* The to_goto_record_end method of target record-btrace. */
1884 record_btrace_goto_end (struct target_ops
*ops
)
1886 struct thread_info
*tp
;
1888 tp
= require_btrace_thread ();
1890 record_btrace_set_replay (tp
, NULL
);
1892 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
1895 /* The to_goto_record method of target record-btrace. */
1898 record_btrace_goto (struct target_ops
*self
, ULONGEST insn
)
1900 struct thread_info
*tp
;
1901 struct btrace_insn_iterator it
;
1902 unsigned int number
;
1907 /* Check for wrap-arounds. */
1909 error (_("Instruction number out of range."));
1911 tp
= require_btrace_thread ();
1913 found
= btrace_find_insn_by_number (&it
, &tp
->btrace
, number
);
1915 error (_("No such instruction."));
1917 record_btrace_set_replay (tp
, &it
);
1919 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
1922 /* The to_execution_direction target method. */
1924 static enum exec_direction_kind
1925 record_btrace_execution_direction (struct target_ops
*self
)
1927 return record_btrace_resume_exec_dir
;
1930 /* The to_prepare_to_generate_core target method. */
1933 record_btrace_prepare_to_generate_core (struct target_ops
*self
)
1935 record_btrace_generating_corefile
= 1;
1938 /* The to_done_generating_core target method. */
1941 record_btrace_done_generating_core (struct target_ops
*self
)
1943 record_btrace_generating_corefile
= 0;
1946 /* Initialize the record-btrace target ops. */
1949 init_record_btrace_ops (void)
1951 struct target_ops
*ops
;
1953 ops
= &record_btrace_ops
;
1954 ops
->to_shortname
= "record-btrace";
1955 ops
->to_longname
= "Branch tracing target";
1956 ops
->to_doc
= "Collect control-flow trace and provide the execution history.";
1957 ops
->to_open
= record_btrace_open
;
1958 ops
->to_close
= record_btrace_close
;
1959 ops
->to_async
= record_btrace_async
;
1960 ops
->to_detach
= record_detach
;
1961 ops
->to_disconnect
= record_disconnect
;
1962 ops
->to_mourn_inferior
= record_mourn_inferior
;
1963 ops
->to_kill
= record_kill
;
1964 ops
->to_stop_recording
= record_btrace_stop_recording
;
1965 ops
->to_info_record
= record_btrace_info
;
1966 ops
->to_insn_history
= record_btrace_insn_history
;
1967 ops
->to_insn_history_from
= record_btrace_insn_history_from
;
1968 ops
->to_insn_history_range
= record_btrace_insn_history_range
;
1969 ops
->to_call_history
= record_btrace_call_history
;
1970 ops
->to_call_history_from
= record_btrace_call_history_from
;
1971 ops
->to_call_history_range
= record_btrace_call_history_range
;
1972 ops
->to_record_is_replaying
= record_btrace_is_replaying
;
1973 ops
->to_xfer_partial
= record_btrace_xfer_partial
;
1974 ops
->to_remove_breakpoint
= record_btrace_remove_breakpoint
;
1975 ops
->to_insert_breakpoint
= record_btrace_insert_breakpoint
;
1976 ops
->to_fetch_registers
= record_btrace_fetch_registers
;
1977 ops
->to_store_registers
= record_btrace_store_registers
;
1978 ops
->to_prepare_to_store
= record_btrace_prepare_to_store
;
1979 ops
->to_get_unwinder
= &record_btrace_to_get_unwinder
;
1980 ops
->to_get_tailcall_unwinder
= &record_btrace_to_get_tailcall_unwinder
;
1981 ops
->to_resume
= record_btrace_resume
;
1982 ops
->to_wait
= record_btrace_wait
;
1983 ops
->to_update_thread_list
= record_btrace_update_thread_list
;
1984 ops
->to_thread_alive
= record_btrace_thread_alive
;
1985 ops
->to_goto_record_begin
= record_btrace_goto_begin
;
1986 ops
->to_goto_record_end
= record_btrace_goto_end
;
1987 ops
->to_goto_record
= record_btrace_goto
;
1988 ops
->to_can_execute_reverse
= record_btrace_can_execute_reverse
;
1989 ops
->to_decr_pc_after_break
= record_btrace_decr_pc_after_break
;
1990 ops
->to_execution_direction
= record_btrace_execution_direction
;
1991 ops
->to_prepare_to_generate_core
= record_btrace_prepare_to_generate_core
;
1992 ops
->to_done_generating_core
= record_btrace_done_generating_core
;
1993 ops
->to_stratum
= record_stratum
;
1994 ops
->to_magic
= OPS_MAGIC
;
1997 /* Alias for "target record". */
2000 cmd_record_btrace_start (char *args
, int from_tty
)
2002 if (args
!= NULL
&& *args
!= 0)
2003 error (_("Invalid argument."));
2005 execute_command ("target record-btrace", from_tty
);
2008 /* The "set record btrace" command. */
2011 cmd_set_record_btrace (char *args
, int from_tty
)
2013 cmd_show_list (set_record_btrace_cmdlist
, from_tty
, "");
2016 /* The "show record btrace" command. */
2019 cmd_show_record_btrace (char *args
, int from_tty
)
2021 cmd_show_list (show_record_btrace_cmdlist
, from_tty
, "");
2024 /* The "show record btrace replay-memory-access" command. */
2027 cmd_show_replay_memory_access (struct ui_file
*file
, int from_tty
,
2028 struct cmd_list_element
*c
, const char *value
)
2030 fprintf_filtered (gdb_stdout
, _("Replay memory access is %s.\n"),
2031 replay_memory_access
);
2034 void _initialize_record_btrace (void);
2036 /* Initialize btrace commands. */
2039 _initialize_record_btrace (void)
2041 add_cmd ("btrace", class_obscure
, cmd_record_btrace_start
,
2042 _("Start branch trace recording."),
2044 add_alias_cmd ("b", "btrace", class_obscure
, 1, &record_cmdlist
);
2046 add_prefix_cmd ("btrace", class_support
, cmd_set_record_btrace
,
2047 _("Set record options"), &set_record_btrace_cmdlist
,
2048 "set record btrace ", 0, &set_record_cmdlist
);
2050 add_prefix_cmd ("btrace", class_support
, cmd_show_record_btrace
,
2051 _("Show record options"), &show_record_btrace_cmdlist
,
2052 "show record btrace ", 0, &show_record_cmdlist
);
2054 add_setshow_enum_cmd ("replay-memory-access", no_class
,
2055 replay_memory_access_types
, &replay_memory_access
, _("\
2056 Set what memory accesses are allowed during replay."), _("\
2057 Show what memory accesses are allowed during replay."),
2058 _("Default is READ-ONLY.\n\n\
2059 The btrace record target does not trace data.\n\
2060 The memory therefore corresponds to the live target and not \
2061 to the current replay position.\n\n\
2062 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2063 When READ-WRITE, allow accesses to read-only and read-write memory during \
2065 NULL
, cmd_show_replay_memory_access
,
2066 &set_record_btrace_cmdlist
,
2067 &show_record_btrace_cmdlist
);
2069 init_record_btrace_ops ();
2070 add_target (&record_btrace_ops
);
2072 bfcache
= htab_create_alloc (50, bfcache_hash
, bfcache_eq
, NULL
,