1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
29 #include "cli/cli-utils.h"
33 #include "filenames.h"
35 #include "frame-unwind.h"
38 #include "event-loop.h"
41 /* The target_ops of record-btrace. */
42 static struct target_ops record_btrace_ops
;
44 /* A new thread observer enabling branch tracing for the new thread. */
45 static struct observer
*record_btrace_thread_observer
;
47 /* Memory access types used in set/show record btrace replay-memory-access. */
48 static const char replay_memory_access_read_only
[] = "read-only";
49 static const char replay_memory_access_read_write
[] = "read-write";
50 static const char *const replay_memory_access_types
[] =
52 replay_memory_access_read_only
,
53 replay_memory_access_read_write
,
57 /* The currently allowed replay memory access type. */
58 static const char *replay_memory_access
= replay_memory_access_read_only
;
60 /* Command lists for "set/show record btrace". */
61 static struct cmd_list_element
*set_record_btrace_cmdlist
;
62 static struct cmd_list_element
*show_record_btrace_cmdlist
;
64 /* The execution direction of the last resume we got. See record-full.c. */
65 static enum exec_direction_kind record_btrace_resume_exec_dir
= EXEC_FORWARD
;
67 /* The async event handler for reverse/replay execution. */
68 static struct async_event_handler
*record_btrace_async_inferior_event_handler
;
70 /* A flag indicating that we are currently generating a core file. */
71 static int record_btrace_generating_corefile
;
73 /* The current branch trace configuration. */
74 static struct btrace_config record_btrace_conf
;
76 /* Command list for "record btrace". */
77 static struct cmd_list_element
*record_btrace_cmdlist
;
79 /* Command lists for "set/show record btrace bts". */
80 static struct cmd_list_element
*set_record_btrace_bts_cmdlist
;
81 static struct cmd_list_element
*show_record_btrace_bts_cmdlist
;
83 /* Print a record-btrace debug message. Use do ... while (0) to avoid
84 ambiguities when used in if statements. */
86 #define DEBUG(msg, args...) \
89 if (record_debug != 0) \
90 fprintf_unfiltered (gdb_stdlog, \
91 "[record-btrace] " msg "\n", ##args); \
96 /* Update the branch trace for the current thread and return a pointer to its
99 Throws an error if there is no thread or no trace. This function never
102 static struct thread_info
*
103 require_btrace_thread (void)
105 struct thread_info
*tp
;
109 tp
= find_thread_ptid (inferior_ptid
);
111 error (_("No thread."));
115 if (btrace_is_empty (tp
))
116 error (_("No trace."));
121 /* Update the branch trace for the current thread and return a pointer to its
122 branch trace information struct.
124 Throws an error if there is no thread or no trace. This function never
127 static struct btrace_thread_info
*
128 require_btrace (void)
130 struct thread_info
*tp
;
132 tp
= require_btrace_thread ();
137 /* Enable branch tracing for one thread. Warn on errors. */
140 record_btrace_enable_warn (struct thread_info
*tp
)
144 btrace_enable (tp
, &record_btrace_conf
);
146 CATCH (error
, RETURN_MASK_ERROR
)
148 warning ("%s", error
.message
);
153 /* Callback function to disable branch tracing for one thread. */
156 record_btrace_disable_callback (void *arg
)
158 struct thread_info
*tp
;
165 /* Enable automatic tracing of new threads. */
168 record_btrace_auto_enable (void)
170 DEBUG ("attach thread observer");
172 record_btrace_thread_observer
173 = observer_attach_new_thread (record_btrace_enable_warn
);
176 /* Disable automatic tracing of new threads. */
179 record_btrace_auto_disable (void)
181 /* The observer may have been detached, already. */
182 if (record_btrace_thread_observer
== NULL
)
185 DEBUG ("detach thread observer");
187 observer_detach_new_thread (record_btrace_thread_observer
);
188 record_btrace_thread_observer
= NULL
;
191 /* The record-btrace async event handler function. */
194 record_btrace_handle_async_inferior_event (gdb_client_data data
)
196 inferior_event_handler (INF_REG_EVENT
, NULL
);
199 /* The to_open method of target record-btrace. */
202 record_btrace_open (const char *args
, int from_tty
)
204 struct cleanup
*disable_chain
;
205 struct thread_info
*tp
;
211 if (!target_has_execution
)
212 error (_("The program is not being run."));
215 error (_("Record btrace can't debug inferior in non-stop mode."));
217 gdb_assert (record_btrace_thread_observer
== NULL
);
219 disable_chain
= make_cleanup (null_cleanup
, NULL
);
220 ALL_NON_EXITED_THREADS (tp
)
221 if (args
== NULL
|| *args
== 0 || number_is_in_list (args
, tp
->num
))
223 btrace_enable (tp
, &record_btrace_conf
);
225 make_cleanup (record_btrace_disable_callback
, tp
);
228 record_btrace_auto_enable ();
230 push_target (&record_btrace_ops
);
232 record_btrace_async_inferior_event_handler
233 = create_async_event_handler (record_btrace_handle_async_inferior_event
,
235 record_btrace_generating_corefile
= 0;
237 observer_notify_record_changed (current_inferior (), 1);
239 discard_cleanups (disable_chain
);
242 /* The to_stop_recording method of target record-btrace. */
245 record_btrace_stop_recording (struct target_ops
*self
)
247 struct thread_info
*tp
;
249 DEBUG ("stop recording");
251 record_btrace_auto_disable ();
253 ALL_NON_EXITED_THREADS (tp
)
254 if (tp
->btrace
.target
!= NULL
)
258 /* The to_close method of target record-btrace. */
261 record_btrace_close (struct target_ops
*self
)
263 struct thread_info
*tp
;
265 if (record_btrace_async_inferior_event_handler
!= NULL
)
266 delete_async_event_handler (&record_btrace_async_inferior_event_handler
);
268 /* Make sure automatic recording gets disabled even if we did not stop
269 recording before closing the record-btrace target. */
270 record_btrace_auto_disable ();
272 /* We should have already stopped recording.
273 Tear down btrace in case we have not. */
274 ALL_NON_EXITED_THREADS (tp
)
275 btrace_teardown (tp
);
278 /* The to_async method of target record-btrace. */
281 record_btrace_async (struct target_ops
*ops
,
282 void (*callback
) (enum inferior_event_type event_type
,
286 if (callback
!= NULL
)
287 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
289 clear_async_event_handler (record_btrace_async_inferior_event_handler
);
291 ops
->beneath
->to_async (ops
->beneath
, callback
, context
);
294 /* Adjusts the size and returns a human readable size suffix. */
297 record_btrace_adjust_size (unsigned int *size
)
303 if ((sz
& ((1u << 30) - 1)) == 0)
308 else if ((sz
& ((1u << 20) - 1)) == 0)
313 else if ((sz
& ((1u << 10) - 1)) == 0)
322 /* Print a BTS configuration. */
325 record_btrace_print_bts_conf (const struct btrace_config_bts
*conf
)
333 suffix
= record_btrace_adjust_size (&size
);
334 printf_unfiltered (_("Buffer size: %u%s.\n"), size
, suffix
);
338 /* Print a branch tracing configuration. */
341 record_btrace_print_conf (const struct btrace_config
*conf
)
343 printf_unfiltered (_("Recording format: %s.\n"),
344 btrace_format_string (conf
->format
));
346 switch (conf
->format
)
348 case BTRACE_FORMAT_NONE
:
351 case BTRACE_FORMAT_BTS
:
352 record_btrace_print_bts_conf (&conf
->bts
);
356 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
359 /* The to_info_record method of target record-btrace. */
362 record_btrace_info (struct target_ops
*self
)
364 struct btrace_thread_info
*btinfo
;
365 const struct btrace_config
*conf
;
366 struct thread_info
*tp
;
367 unsigned int insns
, calls
, gaps
;
371 tp
= find_thread_ptid (inferior_ptid
);
373 error (_("No thread."));
375 btinfo
= &tp
->btrace
;
377 conf
= btrace_conf (btinfo
);
379 record_btrace_print_conf (conf
);
387 if (!btrace_is_empty (tp
))
389 struct btrace_call_iterator call
;
390 struct btrace_insn_iterator insn
;
392 btrace_call_end (&call
, btinfo
);
393 btrace_call_prev (&call
, 1);
394 calls
= btrace_call_number (&call
);
396 btrace_insn_end (&insn
, btinfo
);
398 insns
= btrace_insn_number (&insn
);
401 /* The last instruction does not really belong to the trace. */
408 /* Skip gaps at the end. */
411 steps
= btrace_insn_prev (&insn
, 1);
415 insns
= btrace_insn_number (&insn
);
420 gaps
= btinfo
->ngaps
;
423 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
424 "for thread %d (%s).\n"), insns
, calls
, gaps
,
425 tp
->num
, target_pid_to_str (tp
->ptid
));
427 if (btrace_is_replaying (tp
))
428 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
429 btrace_insn_number (btinfo
->replay
));
432 /* Print a decode error. */
435 btrace_ui_out_decode_error (struct ui_out
*uiout
, int errcode
,
436 enum btrace_format format
)
441 errstr
= _("unknown");
449 case BTRACE_FORMAT_BTS
:
455 case BDE_BTS_OVERFLOW
:
456 errstr
= _("instruction overflow");
459 case BDE_BTS_INSN_SIZE
:
460 errstr
= _("unknown instruction");
466 ui_out_text (uiout
, _("["));
469 ui_out_text (uiout
, _("decode error ("));
470 ui_out_field_int (uiout
, "errcode", errcode
);
471 ui_out_text (uiout
, _("): "));
473 ui_out_text (uiout
, errstr
);
474 ui_out_text (uiout
, _("]\n"));
477 /* Print an unsigned int. */
480 ui_out_field_uint (struct ui_out
*uiout
, const char *fld
, unsigned int val
)
482 ui_out_field_fmt (uiout
, fld
, "%u", val
);
485 /* Disassemble a section of the recorded instruction trace. */
488 btrace_insn_history (struct ui_out
*uiout
,
489 const struct btrace_thread_info
*btinfo
,
490 const struct btrace_insn_iterator
*begin
,
491 const struct btrace_insn_iterator
*end
, int flags
)
493 struct gdbarch
*gdbarch
;
494 struct btrace_insn_iterator it
;
496 DEBUG ("itrace (0x%x): [%u; %u)", flags
, btrace_insn_number (begin
),
497 btrace_insn_number (end
));
499 gdbarch
= target_gdbarch ();
501 for (it
= *begin
; btrace_insn_cmp (&it
, end
) != 0; btrace_insn_next (&it
, 1))
503 const struct btrace_insn
*insn
;
505 insn
= btrace_insn_get (&it
);
507 /* A NULL instruction indicates a gap in the trace. */
510 const struct btrace_config
*conf
;
512 conf
= btrace_conf (btinfo
);
514 /* We have trace so we must have a configuration. */
515 gdb_assert (conf
!= NULL
);
517 btrace_ui_out_decode_error (uiout
, it
.function
->errcode
,
522 /* Print the instruction index. */
523 ui_out_field_uint (uiout
, "index", btrace_insn_number (&it
));
524 ui_out_text (uiout
, "\t");
526 /* Disassembly with '/m' flag may not produce the expected result.
528 gdb_disassembly (gdbarch
, uiout
, NULL
, flags
, 1, insn
->pc
,
534 /* The to_insn_history method of target record-btrace. */
537 record_btrace_insn_history (struct target_ops
*self
, int size
, int flags
)
539 struct btrace_thread_info
*btinfo
;
540 struct btrace_insn_history
*history
;
541 struct btrace_insn_iterator begin
, end
;
542 struct cleanup
*uiout_cleanup
;
543 struct ui_out
*uiout
;
544 unsigned int context
, covered
;
546 uiout
= current_uiout
;
547 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
549 context
= abs (size
);
551 error (_("Bad record instruction-history-size."));
553 btinfo
= require_btrace ();
554 history
= btinfo
->insn_history
;
557 struct btrace_insn_iterator
*replay
;
559 DEBUG ("insn-history (0x%x): %d", flags
, size
);
561 /* If we're replaying, we start at the replay position. Otherwise, we
562 start at the tail of the trace. */
563 replay
= btinfo
->replay
;
567 btrace_insn_end (&begin
, btinfo
);
569 /* We start from here and expand in the requested direction. Then we
570 expand in the other direction, as well, to fill up any remaining
575 /* We want the current position covered, as well. */
576 covered
= btrace_insn_next (&end
, 1);
577 covered
+= btrace_insn_prev (&begin
, context
- covered
);
578 covered
+= btrace_insn_next (&end
, context
- covered
);
582 covered
= btrace_insn_next (&end
, context
);
583 covered
+= btrace_insn_prev (&begin
, context
- covered
);
588 begin
= history
->begin
;
591 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags
, size
,
592 btrace_insn_number (&begin
), btrace_insn_number (&end
));
597 covered
= btrace_insn_prev (&begin
, context
);
602 covered
= btrace_insn_next (&end
, context
);
607 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
611 printf_unfiltered (_("At the start of the branch trace record.\n"));
613 printf_unfiltered (_("At the end of the branch trace record.\n"));
616 btrace_set_insn_history (btinfo
, &begin
, &end
);
617 do_cleanups (uiout_cleanup
);
620 /* The to_insn_history_range method of target record-btrace. */
623 record_btrace_insn_history_range (struct target_ops
*self
,
624 ULONGEST from
, ULONGEST to
, int flags
)
626 struct btrace_thread_info
*btinfo
;
627 struct btrace_insn_history
*history
;
628 struct btrace_insn_iterator begin
, end
;
629 struct cleanup
*uiout_cleanup
;
630 struct ui_out
*uiout
;
631 unsigned int low
, high
;
634 uiout
= current_uiout
;
635 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
640 DEBUG ("insn-history (0x%x): [%u; %u)", flags
, low
, high
);
642 /* Check for wrap-arounds. */
643 if (low
!= from
|| high
!= to
)
644 error (_("Bad range."));
647 error (_("Bad range."));
649 btinfo
= require_btrace ();
651 found
= btrace_find_insn_by_number (&begin
, btinfo
, low
);
653 error (_("Range out of bounds."));
655 found
= btrace_find_insn_by_number (&end
, btinfo
, high
);
658 /* Silently truncate the range. */
659 btrace_insn_end (&end
, btinfo
);
663 /* We want both begin and end to be inclusive. */
664 btrace_insn_next (&end
, 1);
667 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
668 btrace_set_insn_history (btinfo
, &begin
, &end
);
670 do_cleanups (uiout_cleanup
);
673 /* The to_insn_history_from method of target record-btrace. */
676 record_btrace_insn_history_from (struct target_ops
*self
,
677 ULONGEST from
, int size
, int flags
)
679 ULONGEST begin
, end
, context
;
681 context
= abs (size
);
683 error (_("Bad record instruction-history-size."));
692 begin
= from
- context
+ 1;
697 end
= from
+ context
- 1;
699 /* Check for wrap-around. */
704 record_btrace_insn_history_range (self
, begin
, end
, flags
);
707 /* Print the instruction number range for a function call history line. */
710 btrace_call_history_insn_range (struct ui_out
*uiout
,
711 const struct btrace_function
*bfun
)
713 unsigned int begin
, end
, size
;
715 size
= VEC_length (btrace_insn_s
, bfun
->insn
);
716 gdb_assert (size
> 0);
718 begin
= bfun
->insn_offset
;
719 end
= begin
+ size
- 1;
721 ui_out_field_uint (uiout
, "insn begin", begin
);
722 ui_out_text (uiout
, ",");
723 ui_out_field_uint (uiout
, "insn end", end
);
726 /* Compute the lowest and highest source line for the instructions in BFUN
727 and return them in PBEGIN and PEND.
728 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
729 result from inlining or macro expansion. */
732 btrace_compute_src_line_range (const struct btrace_function
*bfun
,
733 int *pbegin
, int *pend
)
735 struct btrace_insn
*insn
;
736 struct symtab
*symtab
;
748 symtab
= symbol_symtab (sym
);
750 for (idx
= 0; VEC_iterate (btrace_insn_s
, bfun
->insn
, idx
, insn
); ++idx
)
752 struct symtab_and_line sal
;
754 sal
= find_pc_line (insn
->pc
, 0);
755 if (sal
.symtab
!= symtab
|| sal
.line
== 0)
758 begin
= min (begin
, sal
.line
);
759 end
= max (end
, sal
.line
);
767 /* Print the source line information for a function call history line. */
770 btrace_call_history_src_line (struct ui_out
*uiout
,
771 const struct btrace_function
*bfun
)
780 ui_out_field_string (uiout
, "file",
781 symtab_to_filename_for_display (symbol_symtab (sym
)));
783 btrace_compute_src_line_range (bfun
, &begin
, &end
);
787 ui_out_text (uiout
, ":");
788 ui_out_field_int (uiout
, "min line", begin
);
793 ui_out_text (uiout
, ",");
794 ui_out_field_int (uiout
, "max line", end
);
797 /* Get the name of a branch trace function. */
800 btrace_get_bfun_name (const struct btrace_function
*bfun
)
802 struct minimal_symbol
*msym
;
812 return SYMBOL_PRINT_NAME (sym
);
813 else if (msym
!= NULL
)
814 return MSYMBOL_PRINT_NAME (msym
);
819 /* Disassemble a section of the recorded function trace. */
822 btrace_call_history (struct ui_out
*uiout
,
823 const struct btrace_thread_info
*btinfo
,
824 const struct btrace_call_iterator
*begin
,
825 const struct btrace_call_iterator
*end
,
826 enum record_print_flag flags
)
828 struct btrace_call_iterator it
;
830 DEBUG ("ftrace (0x%x): [%u; %u)", flags
, btrace_call_number (begin
),
831 btrace_call_number (end
));
833 for (it
= *begin
; btrace_call_cmp (&it
, end
) < 0; btrace_call_next (&it
, 1))
835 const struct btrace_function
*bfun
;
836 struct minimal_symbol
*msym
;
839 bfun
= btrace_call_get (&it
);
843 /* Print the function index. */
844 ui_out_field_uint (uiout
, "index", bfun
->number
);
845 ui_out_text (uiout
, "\t");
847 /* Indicate gaps in the trace. */
848 if (bfun
->errcode
!= 0)
850 const struct btrace_config
*conf
;
852 conf
= btrace_conf (btinfo
);
854 /* We have trace so we must have a configuration. */
855 gdb_assert (conf
!= NULL
);
857 btrace_ui_out_decode_error (uiout
, bfun
->errcode
, conf
->format
);
862 if ((flags
& RECORD_PRINT_INDENT_CALLS
) != 0)
864 int level
= bfun
->level
+ btinfo
->level
, i
;
866 for (i
= 0; i
< level
; ++i
)
867 ui_out_text (uiout
, " ");
871 ui_out_field_string (uiout
, "function", SYMBOL_PRINT_NAME (sym
));
872 else if (msym
!= NULL
)
873 ui_out_field_string (uiout
, "function", MSYMBOL_PRINT_NAME (msym
));
874 else if (!ui_out_is_mi_like_p (uiout
))
875 ui_out_field_string (uiout
, "function", "??");
877 if ((flags
& RECORD_PRINT_INSN_RANGE
) != 0)
879 ui_out_text (uiout
, _("\tinst "));
880 btrace_call_history_insn_range (uiout
, bfun
);
883 if ((flags
& RECORD_PRINT_SRC_LINE
) != 0)
885 ui_out_text (uiout
, _("\tat "));
886 btrace_call_history_src_line (uiout
, bfun
);
889 ui_out_text (uiout
, "\n");
893 /* The to_call_history method of target record-btrace. */
896 record_btrace_call_history (struct target_ops
*self
, int size
, int flags
)
898 struct btrace_thread_info
*btinfo
;
899 struct btrace_call_history
*history
;
900 struct btrace_call_iterator begin
, end
;
901 struct cleanup
*uiout_cleanup
;
902 struct ui_out
*uiout
;
903 unsigned int context
, covered
;
905 uiout
= current_uiout
;
906 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
908 context
= abs (size
);
910 error (_("Bad record function-call-history-size."));
912 btinfo
= require_btrace ();
913 history
= btinfo
->call_history
;
916 struct btrace_insn_iterator
*replay
;
918 DEBUG ("call-history (0x%x): %d", flags
, size
);
920 /* If we're replaying, we start at the replay position. Otherwise, we
921 start at the tail of the trace. */
922 replay
= btinfo
->replay
;
925 begin
.function
= replay
->function
;
926 begin
.btinfo
= btinfo
;
929 btrace_call_end (&begin
, btinfo
);
931 /* We start from here and expand in the requested direction. Then we
932 expand in the other direction, as well, to fill up any remaining
937 /* We want the current position covered, as well. */
938 covered
= btrace_call_next (&end
, 1);
939 covered
+= btrace_call_prev (&begin
, context
- covered
);
940 covered
+= btrace_call_next (&end
, context
- covered
);
944 covered
= btrace_call_next (&end
, context
);
945 covered
+= btrace_call_prev (&begin
, context
- covered
);
950 begin
= history
->begin
;
953 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags
, size
,
954 btrace_call_number (&begin
), btrace_call_number (&end
));
959 covered
= btrace_call_prev (&begin
, context
);
964 covered
= btrace_call_next (&end
, context
);
969 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
973 printf_unfiltered (_("At the start of the branch trace record.\n"));
975 printf_unfiltered (_("At the end of the branch trace record.\n"));
978 btrace_set_call_history (btinfo
, &begin
, &end
);
979 do_cleanups (uiout_cleanup
);
982 /* The to_call_history_range method of target record-btrace. */
985 record_btrace_call_history_range (struct target_ops
*self
,
986 ULONGEST from
, ULONGEST to
, int flags
)
988 struct btrace_thread_info
*btinfo
;
989 struct btrace_call_history
*history
;
990 struct btrace_call_iterator begin
, end
;
991 struct cleanup
*uiout_cleanup
;
992 struct ui_out
*uiout
;
993 unsigned int low
, high
;
996 uiout
= current_uiout
;
997 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
1002 DEBUG ("call-history (0x%x): [%u; %u)", flags
, low
, high
);
1004 /* Check for wrap-arounds. */
1005 if (low
!= from
|| high
!= to
)
1006 error (_("Bad range."));
1009 error (_("Bad range."));
1011 btinfo
= require_btrace ();
1013 found
= btrace_find_call_by_number (&begin
, btinfo
, low
);
1015 error (_("Range out of bounds."));
1017 found
= btrace_find_call_by_number (&end
, btinfo
, high
);
1020 /* Silently truncate the range. */
1021 btrace_call_end (&end
, btinfo
);
1025 /* We want both begin and end to be inclusive. */
1026 btrace_call_next (&end
, 1);
1029 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
1030 btrace_set_call_history (btinfo
, &begin
, &end
);
1032 do_cleanups (uiout_cleanup
);
1035 /* The to_call_history_from method of target record-btrace. */
1038 record_btrace_call_history_from (struct target_ops
*self
,
1039 ULONGEST from
, int size
, int flags
)
1041 ULONGEST begin
, end
, context
;
1043 context
= abs (size
);
1045 error (_("Bad record function-call-history-size."));
1054 begin
= from
- context
+ 1;
1059 end
= from
+ context
- 1;
1061 /* Check for wrap-around. */
1066 record_btrace_call_history_range (self
, begin
, end
, flags
);
1069 /* The to_record_is_replaying method of target record-btrace. */
1072 record_btrace_is_replaying (struct target_ops
*self
)
1074 struct thread_info
*tp
;
1076 ALL_NON_EXITED_THREADS (tp
)
1077 if (btrace_is_replaying (tp
))
1083 /* The to_xfer_partial method of target record-btrace. */
1085 static enum target_xfer_status
1086 record_btrace_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1087 const char *annex
, gdb_byte
*readbuf
,
1088 const gdb_byte
*writebuf
, ULONGEST offset
,
1089 ULONGEST len
, ULONGEST
*xfered_len
)
1091 struct target_ops
*t
;
1093 /* Filter out requests that don't make sense during replay. */
1094 if (replay_memory_access
== replay_memory_access_read_only
1095 && !record_btrace_generating_corefile
1096 && record_btrace_is_replaying (ops
))
1100 case TARGET_OBJECT_MEMORY
:
1102 struct target_section
*section
;
1104 /* We do not allow writing memory in general. */
1105 if (writebuf
!= NULL
)
1108 return TARGET_XFER_UNAVAILABLE
;
1111 /* We allow reading readonly memory. */
1112 section
= target_section_by_addr (ops
, offset
);
1113 if (section
!= NULL
)
1115 /* Check if the section we found is readonly. */
1116 if ((bfd_get_section_flags (section
->the_bfd_section
->owner
,
1117 section
->the_bfd_section
)
1118 & SEC_READONLY
) != 0)
1120 /* Truncate the request to fit into this section. */
1121 len
= min (len
, section
->endaddr
- offset
);
1127 return TARGET_XFER_UNAVAILABLE
;
1132 /* Forward the request. */
1134 return ops
->to_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
1135 offset
, len
, xfered_len
);
1138 /* The to_insert_breakpoint method of target record-btrace. */
1141 record_btrace_insert_breakpoint (struct target_ops
*ops
,
1142 struct gdbarch
*gdbarch
,
1143 struct bp_target_info
*bp_tgt
)
1148 /* Inserting breakpoints requires accessing memory. Allow it for the
1149 duration of this function. */
1150 old
= replay_memory_access
;
1151 replay_memory_access
= replay_memory_access_read_write
;
1156 ret
= ops
->beneath
->to_insert_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
);
1158 CATCH (except
, RETURN_MASK_ALL
)
1160 replay_memory_access
= old
;
1161 throw_exception (except
);
1164 replay_memory_access
= old
;
1169 /* The to_remove_breakpoint method of target record-btrace. */
1172 record_btrace_remove_breakpoint (struct target_ops
*ops
,
1173 struct gdbarch
*gdbarch
,
1174 struct bp_target_info
*bp_tgt
)
1179 /* Removing breakpoints requires accessing memory. Allow it for the
1180 duration of this function. */
1181 old
= replay_memory_access
;
1182 replay_memory_access
= replay_memory_access_read_write
;
1187 ret
= ops
->beneath
->to_remove_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
);
1189 CATCH (except
, RETURN_MASK_ALL
)
1191 replay_memory_access
= old
;
1192 throw_exception (except
);
1195 replay_memory_access
= old
;
1200 /* The to_fetch_registers method of target record-btrace. */
1203 record_btrace_fetch_registers (struct target_ops
*ops
,
1204 struct regcache
*regcache
, int regno
)
1206 struct btrace_insn_iterator
*replay
;
1207 struct thread_info
*tp
;
1209 tp
= find_thread_ptid (inferior_ptid
);
1210 gdb_assert (tp
!= NULL
);
1212 replay
= tp
->btrace
.replay
;
1213 if (replay
!= NULL
&& !record_btrace_generating_corefile
)
1215 const struct btrace_insn
*insn
;
1216 struct gdbarch
*gdbarch
;
1219 gdbarch
= get_regcache_arch (regcache
);
1220 pcreg
= gdbarch_pc_regnum (gdbarch
);
1224 /* We can only provide the PC register. */
1225 if (regno
>= 0 && regno
!= pcreg
)
1228 insn
= btrace_insn_get (replay
);
1229 gdb_assert (insn
!= NULL
);
1231 regcache_raw_supply (regcache
, regno
, &insn
->pc
);
1235 struct target_ops
*t
= ops
->beneath
;
1237 t
->to_fetch_registers (t
, regcache
, regno
);
1241 /* The to_store_registers method of target record-btrace. */
1244 record_btrace_store_registers (struct target_ops
*ops
,
1245 struct regcache
*regcache
, int regno
)
1247 struct target_ops
*t
;
1249 if (!record_btrace_generating_corefile
&& record_btrace_is_replaying (ops
))
1250 error (_("This record target does not allow writing registers."));
1252 gdb_assert (may_write_registers
!= 0);
1255 t
->to_store_registers (t
, regcache
, regno
);
1258 /* The to_prepare_to_store method of target record-btrace. */
1261 record_btrace_prepare_to_store (struct target_ops
*ops
,
1262 struct regcache
*regcache
)
1264 struct target_ops
*t
;
1266 if (!record_btrace_generating_corefile
&& record_btrace_is_replaying (ops
))
1270 t
->to_prepare_to_store (t
, regcache
);
1273 /* The branch trace frame cache. */
1275 struct btrace_frame_cache
1278 struct thread_info
*tp
;
1280 /* The frame info. */
1281 struct frame_info
*frame
;
1283 /* The branch trace function segment. */
1284 const struct btrace_function
*bfun
;
1287 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1289 static htab_t bfcache
;
1291 /* hash_f for htab_create_alloc of bfcache. */
1294 bfcache_hash (const void *arg
)
1296 const struct btrace_frame_cache
*cache
= arg
;
1298 return htab_hash_pointer (cache
->frame
);
1301 /* eq_f for htab_create_alloc of bfcache. */
1304 bfcache_eq (const void *arg1
, const void *arg2
)
1306 const struct btrace_frame_cache
*cache1
= arg1
;
1307 const struct btrace_frame_cache
*cache2
= arg2
;
1309 return cache1
->frame
== cache2
->frame
;
1312 /* Create a new btrace frame cache. */
1314 static struct btrace_frame_cache
*
1315 bfcache_new (struct frame_info
*frame
)
1317 struct btrace_frame_cache
*cache
;
1320 cache
= FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache
);
1321 cache
->frame
= frame
;
1323 slot
= htab_find_slot (bfcache
, cache
, INSERT
);
1324 gdb_assert (*slot
== NULL
);
1330 /* Extract the branch trace function from a branch trace frame. */
1332 static const struct btrace_function
*
1333 btrace_get_frame_function (struct frame_info
*frame
)
1335 const struct btrace_frame_cache
*cache
;
1336 const struct btrace_function
*bfun
;
1337 struct btrace_frame_cache pattern
;
1340 pattern
.frame
= frame
;
1342 slot
= htab_find_slot (bfcache
, &pattern
, NO_INSERT
);
1350 /* Implement stop_reason method for record_btrace_frame_unwind. */
1352 static enum unwind_stop_reason
1353 record_btrace_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1356 const struct btrace_frame_cache
*cache
;
1357 const struct btrace_function
*bfun
;
1359 cache
= *this_cache
;
1361 gdb_assert (bfun
!= NULL
);
1363 if (bfun
->up
== NULL
)
1364 return UNWIND_UNAVAILABLE
;
1366 return UNWIND_NO_REASON
;
1369 /* Implement this_id method for record_btrace_frame_unwind. */
1372 record_btrace_frame_this_id (struct frame_info
*this_frame
, void **this_cache
,
1373 struct frame_id
*this_id
)
1375 const struct btrace_frame_cache
*cache
;
1376 const struct btrace_function
*bfun
;
1377 CORE_ADDR code
, special
;
1379 cache
= *this_cache
;
1382 gdb_assert (bfun
!= NULL
);
1384 while (bfun
->segment
.prev
!= NULL
)
1385 bfun
= bfun
->segment
.prev
;
1387 code
= get_frame_func (this_frame
);
1388 special
= bfun
->number
;
1390 *this_id
= frame_id_build_unavailable_stack_special (code
, special
);
1392 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1393 btrace_get_bfun_name (cache
->bfun
),
1394 core_addr_to_string_nz (this_id
->code_addr
),
1395 core_addr_to_string_nz (this_id
->special_addr
));
1398 /* Implement prev_register method for record_btrace_frame_unwind. */
1400 static struct value
*
1401 record_btrace_frame_prev_register (struct frame_info
*this_frame
,
1405 const struct btrace_frame_cache
*cache
;
1406 const struct btrace_function
*bfun
, *caller
;
1407 const struct btrace_insn
*insn
;
1408 struct gdbarch
*gdbarch
;
1412 gdbarch
= get_frame_arch (this_frame
);
1413 pcreg
= gdbarch_pc_regnum (gdbarch
);
1414 if (pcreg
< 0 || regnum
!= pcreg
)
1415 throw_error (NOT_AVAILABLE_ERROR
,
1416 _("Registers are not available in btrace record history"));
1418 cache
= *this_cache
;
1420 gdb_assert (bfun
!= NULL
);
1424 throw_error (NOT_AVAILABLE_ERROR
,
1425 _("No caller in btrace record history"));
1427 if ((bfun
->flags
& BFUN_UP_LINKS_TO_RET
) != 0)
1429 insn
= VEC_index (btrace_insn_s
, caller
->insn
, 0);
1434 insn
= VEC_last (btrace_insn_s
, caller
->insn
);
1437 pc
+= gdb_insn_length (gdbarch
, pc
);
1440 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1441 btrace_get_bfun_name (bfun
), bfun
->level
,
1442 core_addr_to_string_nz (pc
));
1444 return frame_unwind_got_address (this_frame
, regnum
, pc
);
1447 /* Implement sniffer method for record_btrace_frame_unwind. */
1450 record_btrace_frame_sniffer (const struct frame_unwind
*self
,
1451 struct frame_info
*this_frame
,
1454 const struct btrace_function
*bfun
;
1455 struct btrace_frame_cache
*cache
;
1456 struct thread_info
*tp
;
1457 struct frame_info
*next
;
1459 /* THIS_FRAME does not contain a reference to its thread. */
1460 tp
= find_thread_ptid (inferior_ptid
);
1461 gdb_assert (tp
!= NULL
);
1464 next
= get_next_frame (this_frame
);
1467 const struct btrace_insn_iterator
*replay
;
1469 replay
= tp
->btrace
.replay
;
1471 bfun
= replay
->function
;
1475 const struct btrace_function
*callee
;
1477 callee
= btrace_get_frame_function (next
);
1478 if (callee
!= NULL
&& (callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1485 DEBUG ("[frame] sniffed frame for %s on level %d",
1486 btrace_get_bfun_name (bfun
), bfun
->level
);
1488 /* This is our frame. Initialize the frame cache. */
1489 cache
= bfcache_new (this_frame
);
1493 *this_cache
= cache
;
1497 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1500 record_btrace_tailcall_frame_sniffer (const struct frame_unwind
*self
,
1501 struct frame_info
*this_frame
,
1504 const struct btrace_function
*bfun
, *callee
;
1505 struct btrace_frame_cache
*cache
;
1506 struct frame_info
*next
;
1508 next
= get_next_frame (this_frame
);
1512 callee
= btrace_get_frame_function (next
);
1516 if ((callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1523 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1524 btrace_get_bfun_name (bfun
), bfun
->level
);
1526 /* This is our frame. Initialize the frame cache. */
1527 cache
= bfcache_new (this_frame
);
1528 cache
->tp
= find_thread_ptid (inferior_ptid
);
1531 *this_cache
= cache
;
1536 record_btrace_frame_dealloc_cache (struct frame_info
*self
, void *this_cache
)
1538 struct btrace_frame_cache
*cache
;
1543 slot
= htab_find_slot (bfcache
, cache
, NO_INSERT
);
1544 gdb_assert (slot
!= NULL
);
1546 htab_remove_elt (bfcache
, cache
);
1549 /* btrace recording does not store previous memory content, neither the stack
1550 frames content. Any unwinding would return errorneous results as the stack
1551 contents no longer matches the changed PC value restored from history.
1552 Therefore this unwinder reports any possibly unwound registers as
1555 const struct frame_unwind record_btrace_frame_unwind
=
1558 record_btrace_frame_unwind_stop_reason
,
1559 record_btrace_frame_this_id
,
1560 record_btrace_frame_prev_register
,
1562 record_btrace_frame_sniffer
,
1563 record_btrace_frame_dealloc_cache
1566 const struct frame_unwind record_btrace_tailcall_frame_unwind
=
1569 record_btrace_frame_unwind_stop_reason
,
1570 record_btrace_frame_this_id
,
1571 record_btrace_frame_prev_register
,
1573 record_btrace_tailcall_frame_sniffer
,
1574 record_btrace_frame_dealloc_cache
1577 /* Implement the to_get_unwinder method. */
1579 static const struct frame_unwind
*
1580 record_btrace_to_get_unwinder (struct target_ops
*self
)
1582 return &record_btrace_frame_unwind
;
1585 /* Implement the to_get_tailcall_unwinder method. */
1587 static const struct frame_unwind
*
1588 record_btrace_to_get_tailcall_unwinder (struct target_ops
*self
)
1590 return &record_btrace_tailcall_frame_unwind
;
1593 /* Indicate that TP should be resumed according to FLAG. */
1596 record_btrace_resume_thread (struct thread_info
*tp
,
1597 enum btrace_thread_flag flag
)
1599 struct btrace_thread_info
*btinfo
;
1601 DEBUG ("resuming %d (%s): %u", tp
->num
, target_pid_to_str (tp
->ptid
), flag
);
1603 btinfo
= &tp
->btrace
;
1605 if ((btinfo
->flags
& BTHR_MOVE
) != 0)
1606 error (_("Thread already moving."));
1608 /* Fetch the latest branch trace. */
1611 btinfo
->flags
|= flag
;
1614 /* Find the thread to resume given a PTID. */
1616 static struct thread_info
*
1617 record_btrace_find_resume_thread (ptid_t ptid
)
1619 struct thread_info
*tp
;
1621 /* When asked to resume everything, we pick the current thread. */
1622 if (ptid_equal (minus_one_ptid
, ptid
) || ptid_is_pid (ptid
))
1623 ptid
= inferior_ptid
;
1625 return find_thread_ptid (ptid
);
1628 /* Start replaying a thread. */
1630 static struct btrace_insn_iterator
*
1631 record_btrace_start_replaying (struct thread_info
*tp
)
1633 struct btrace_insn_iterator
*replay
;
1634 struct btrace_thread_info
*btinfo
;
1637 btinfo
= &tp
->btrace
;
1640 /* We can't start replaying without trace. */
1641 if (btinfo
->begin
== NULL
)
1644 /* Clear the executing flag to allow changes to the current frame.
1645 We are not actually running, yet. We just started a reverse execution
1646 command or a record goto command.
1647 For the latter, EXECUTING is false and this has no effect.
1648 For the former, EXECUTING is true and we're in to_wait, about to
1649 move the thread. Since we need to recompute the stack, we temporarily
1650 set EXECUTING to flase. */
1651 executing
= is_executing (tp
->ptid
);
1652 set_executing (tp
->ptid
, 0);
1654 /* GDB stores the current frame_id when stepping in order to detects steps
1656 Since frames are computed differently when we're replaying, we need to
1657 recompute those stored frames and fix them up so we can still detect
1658 subroutines after we started replaying. */
1661 struct frame_info
*frame
;
1662 struct frame_id frame_id
;
1663 int upd_step_frame_id
, upd_step_stack_frame_id
;
1665 /* The current frame without replaying - computed via normal unwind. */
1666 frame
= get_current_frame ();
1667 frame_id
= get_frame_id (frame
);
1669 /* Check if we need to update any stepping-related frame id's. */
1670 upd_step_frame_id
= frame_id_eq (frame_id
,
1671 tp
->control
.step_frame_id
);
1672 upd_step_stack_frame_id
= frame_id_eq (frame_id
,
1673 tp
->control
.step_stack_frame_id
);
1675 /* We start replaying at the end of the branch trace. This corresponds
1676 to the current instruction. */
1677 replay
= xmalloc (sizeof (*replay
));
1678 btrace_insn_end (replay
, btinfo
);
1680 /* Skip gaps at the end of the trace. */
1681 while (btrace_insn_get (replay
) == NULL
)
1685 steps
= btrace_insn_prev (replay
, 1);
1687 error (_("No trace."));
1690 /* We're not replaying, yet. */
1691 gdb_assert (btinfo
->replay
== NULL
);
1692 btinfo
->replay
= replay
;
1694 /* Make sure we're not using any stale registers. */
1695 registers_changed_ptid (tp
->ptid
);
1697 /* The current frame with replaying - computed via btrace unwind. */
1698 frame
= get_current_frame ();
1699 frame_id
= get_frame_id (frame
);
1701 /* Replace stepping related frames where necessary. */
1702 if (upd_step_frame_id
)
1703 tp
->control
.step_frame_id
= frame_id
;
1704 if (upd_step_stack_frame_id
)
1705 tp
->control
.step_stack_frame_id
= frame_id
;
1707 CATCH (except
, RETURN_MASK_ALL
)
1709 /* Restore the previous execution state. */
1710 set_executing (tp
->ptid
, executing
);
1712 xfree (btinfo
->replay
);
1713 btinfo
->replay
= NULL
;
1715 registers_changed_ptid (tp
->ptid
);
1717 throw_exception (except
);
1721 /* Restore the previous execution state. */
1722 set_executing (tp
->ptid
, executing
);
1727 /* Stop replaying a thread. */
1730 record_btrace_stop_replaying (struct thread_info
*tp
)
1732 struct btrace_thread_info
*btinfo
;
1734 btinfo
= &tp
->btrace
;
1736 xfree (btinfo
->replay
);
1737 btinfo
->replay
= NULL
;
1739 /* Make sure we're not leaving any stale registers. */
1740 registers_changed_ptid (tp
->ptid
);
1743 /* The to_resume method of target record-btrace. */
1746 record_btrace_resume (struct target_ops
*ops
, ptid_t ptid
, int step
,
1747 enum gdb_signal signal
)
1749 struct thread_info
*tp
, *other
;
1750 enum btrace_thread_flag flag
;
1752 DEBUG ("resume %s: %s", target_pid_to_str (ptid
), step
? "step" : "cont");
1754 /* Store the execution direction of the last resume. */
1755 record_btrace_resume_exec_dir
= execution_direction
;
1757 tp
= record_btrace_find_resume_thread (ptid
);
1759 error (_("Cannot find thread to resume."));
1761 /* Stop replaying other threads if the thread to resume is not replaying. */
1762 if (!btrace_is_replaying (tp
) && execution_direction
!= EXEC_REVERSE
)
1763 ALL_NON_EXITED_THREADS (other
)
1764 record_btrace_stop_replaying (other
);
1766 /* As long as we're not replaying, just forward the request. */
1767 if (!record_btrace_is_replaying (ops
) && execution_direction
!= EXEC_REVERSE
)
1770 return ops
->to_resume (ops
, ptid
, step
, signal
);
1773 /* Compute the btrace thread flag for the requested move. */
1775 flag
= execution_direction
== EXEC_REVERSE
? BTHR_RCONT
: BTHR_CONT
;
1777 flag
= execution_direction
== EXEC_REVERSE
? BTHR_RSTEP
: BTHR_STEP
;
1779 /* At the moment, we only move a single thread. We could also move
1780 all threads in parallel by single-stepping each resumed thread
1781 until the first runs into an event.
1782 When we do that, we would want to continue all other threads.
1783 For now, just resume one thread to not confuse to_wait. */
1784 record_btrace_resume_thread (tp
, flag
);
1786 /* We just indicate the resume intent here. The actual stepping happens in
1787 record_btrace_wait below. */
1789 /* Async support. */
1790 if (target_can_async_p ())
1792 target_async (inferior_event_handler
, 0);
1793 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
1797 /* Find a thread to move. */
1799 static struct thread_info
*
1800 record_btrace_find_thread_to_move (ptid_t ptid
)
1802 struct thread_info
*tp
;
1804 /* First check the parameter thread. */
1805 tp
= find_thread_ptid (ptid
);
1806 if (tp
!= NULL
&& (tp
->btrace
.flags
& BTHR_MOVE
) != 0)
1809 /* Otherwise, find one other thread that has been resumed. */
1810 ALL_NON_EXITED_THREADS (tp
)
1811 if ((tp
->btrace
.flags
& BTHR_MOVE
) != 0)
1817 /* Return a target_waitstatus indicating that we ran out of history. */
1819 static struct target_waitstatus
1820 btrace_step_no_history (void)
1822 struct target_waitstatus status
;
1824 status
.kind
= TARGET_WAITKIND_NO_HISTORY
;
1829 /* Return a target_waitstatus indicating that a step finished. */
1831 static struct target_waitstatus
1832 btrace_step_stopped (void)
1834 struct target_waitstatus status
;
1836 status
.kind
= TARGET_WAITKIND_STOPPED
;
1837 status
.value
.sig
= GDB_SIGNAL_TRAP
;
1842 /* Clear the record histories. */
1845 record_btrace_clear_histories (struct btrace_thread_info
*btinfo
)
1847 xfree (btinfo
->insn_history
);
1848 xfree (btinfo
->call_history
);
1850 btinfo
->insn_history
= NULL
;
1851 btinfo
->call_history
= NULL
;
1854 /* Step a single thread. */
1856 static struct target_waitstatus
1857 record_btrace_step_thread (struct thread_info
*tp
)
1859 struct btrace_insn_iterator
*replay
, end
;
1860 struct btrace_thread_info
*btinfo
;
1861 struct address_space
*aspace
;
1862 struct inferior
*inf
;
1863 enum btrace_thread_flag flags
;
1866 /* We can't step without an execution history. */
1867 if (btrace_is_empty (tp
))
1868 return btrace_step_no_history ();
1870 btinfo
= &tp
->btrace
;
1871 replay
= btinfo
->replay
;
1873 flags
= btinfo
->flags
& BTHR_MOVE
;
1874 btinfo
->flags
&= ~BTHR_MOVE
;
1876 DEBUG ("stepping %d (%s): %u", tp
->num
, target_pid_to_str (tp
->ptid
), flags
);
1881 internal_error (__FILE__
, __LINE__
, _("invalid stepping type."));
1884 /* We're done if we're not replaying. */
1886 return btrace_step_no_history ();
1888 /* Skip gaps during replay. */
1891 steps
= btrace_insn_next (replay
, 1);
1894 record_btrace_stop_replaying (tp
);
1895 return btrace_step_no_history ();
1898 while (btrace_insn_get (replay
) == NULL
);
1900 /* Determine the end of the instruction trace. */
1901 btrace_insn_end (&end
, btinfo
);
1903 /* We stop replaying if we reached the end of the trace. */
1904 if (btrace_insn_cmp (replay
, &end
) == 0)
1905 record_btrace_stop_replaying (tp
);
1907 return btrace_step_stopped ();
1910 /* Start replaying if we're not already doing so. */
1912 replay
= record_btrace_start_replaying (tp
);
1914 /* If we can't step any further, we reached the end of the history.
1915 Skip gaps during replay. */
1918 steps
= btrace_insn_prev (replay
, 1);
1920 return btrace_step_no_history ();
1923 while (btrace_insn_get (replay
) == NULL
);
1925 return btrace_step_stopped ();
1928 /* We're done if we're not replaying. */
1930 return btrace_step_no_history ();
1932 inf
= find_inferior_ptid (tp
->ptid
);
1933 aspace
= inf
->aspace
;
1935 /* Determine the end of the instruction trace. */
1936 btrace_insn_end (&end
, btinfo
);
1940 const struct btrace_insn
*insn
;
1942 /* Skip gaps during replay. */
1945 steps
= btrace_insn_next (replay
, 1);
1948 record_btrace_stop_replaying (tp
);
1949 return btrace_step_no_history ();
1952 insn
= btrace_insn_get (replay
);
1954 while (insn
== NULL
);
1956 /* We stop replaying if we reached the end of the trace. */
1957 if (btrace_insn_cmp (replay
, &end
) == 0)
1959 record_btrace_stop_replaying (tp
);
1960 return btrace_step_no_history ();
1963 DEBUG ("stepping %d (%s) ... %s", tp
->num
,
1964 target_pid_to_str (tp
->ptid
),
1965 core_addr_to_string_nz (insn
->pc
));
1967 if (record_check_stopped_by_breakpoint (aspace
, insn
->pc
,
1968 &btinfo
->stop_reason
))
1969 return btrace_step_stopped ();
1973 /* Start replaying if we're not already doing so. */
1975 replay
= record_btrace_start_replaying (tp
);
1977 inf
= find_inferior_ptid (tp
->ptid
);
1978 aspace
= inf
->aspace
;
1982 const struct btrace_insn
*insn
;
1984 /* If we can't step any further, we reached the end of the history.
1985 Skip gaps during replay. */
1988 steps
= btrace_insn_prev (replay
, 1);
1990 return btrace_step_no_history ();
1992 insn
= btrace_insn_get (replay
);
1994 while (insn
== NULL
);
1996 DEBUG ("reverse-stepping %d (%s) ... %s", tp
->num
,
1997 target_pid_to_str (tp
->ptid
),
1998 core_addr_to_string_nz (insn
->pc
));
2000 if (record_check_stopped_by_breakpoint (aspace
, insn
->pc
,
2001 &btinfo
->stop_reason
))
2002 return btrace_step_stopped ();
2007 /* The to_wait method of target record-btrace. */
2010 record_btrace_wait (struct target_ops
*ops
, ptid_t ptid
,
2011 struct target_waitstatus
*status
, int options
)
2013 struct thread_info
*tp
, *other
;
2015 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid
), options
);
2017 /* As long as we're not replaying, just forward the request. */
2018 if (!record_btrace_is_replaying (ops
) && execution_direction
!= EXEC_REVERSE
)
2021 return ops
->to_wait (ops
, ptid
, status
, options
);
2024 /* Let's find a thread to move. */
2025 tp
= record_btrace_find_thread_to_move (ptid
);
2028 DEBUG ("wait %s: no thread", target_pid_to_str (ptid
));
2030 status
->kind
= TARGET_WAITKIND_IGNORE
;
2031 return minus_one_ptid
;
2034 /* We only move a single thread. We're not able to correlate threads. */
2035 *status
= record_btrace_step_thread (tp
);
2037 /* Stop all other threads. */
2039 ALL_NON_EXITED_THREADS (other
)
2040 other
->btrace
.flags
&= ~BTHR_MOVE
;
2042 /* Start record histories anew from the current position. */
2043 record_btrace_clear_histories (&tp
->btrace
);
2045 /* We moved the replay position but did not update registers. */
2046 registers_changed_ptid (tp
->ptid
);
2051 /* The to_can_execute_reverse method of target record-btrace. */
2054 record_btrace_can_execute_reverse (struct target_ops
*self
)
2059 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2062 record_btrace_stopped_by_sw_breakpoint (struct target_ops
*ops
)
2064 if (record_btrace_is_replaying (ops
))
2066 struct thread_info
*tp
= inferior_thread ();
2068 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
;
2071 return ops
->beneath
->to_stopped_by_sw_breakpoint (ops
->beneath
);
2074 /* The to_supports_stopped_by_sw_breakpoint method of target
2078 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops
*ops
)
2080 if (record_btrace_is_replaying (ops
))
2083 return ops
->beneath
->to_supports_stopped_by_sw_breakpoint (ops
->beneath
);
2086 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2089 record_btrace_stopped_by_hw_breakpoint (struct target_ops
*ops
)
2091 if (record_btrace_is_replaying (ops
))
2093 struct thread_info
*tp
= inferior_thread ();
2095 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
;
2098 return ops
->beneath
->to_stopped_by_hw_breakpoint (ops
->beneath
);
2101 /* The to_supports_stopped_by_hw_breakpoint method of target
2105 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops
*ops
)
2107 if (record_btrace_is_replaying (ops
))
2110 return ops
->beneath
->to_supports_stopped_by_hw_breakpoint (ops
->beneath
);
2113 /* The to_update_thread_list method of target record-btrace. */
2116 record_btrace_update_thread_list (struct target_ops
*ops
)
2118 /* We don't add or remove threads during replay. */
2119 if (record_btrace_is_replaying (ops
))
2122 /* Forward the request. */
2124 ops
->to_update_thread_list (ops
);
2127 /* The to_thread_alive method of target record-btrace. */
2130 record_btrace_thread_alive (struct target_ops
*ops
, ptid_t ptid
)
2132 /* We don't add or remove threads during replay. */
2133 if (record_btrace_is_replaying (ops
))
2134 return find_thread_ptid (ptid
) != NULL
;
2136 /* Forward the request. */
2138 return ops
->to_thread_alive (ops
, ptid
);
2141 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2145 record_btrace_set_replay (struct thread_info
*tp
,
2146 const struct btrace_insn_iterator
*it
)
2148 struct btrace_thread_info
*btinfo
;
2150 btinfo
= &tp
->btrace
;
2152 if (it
== NULL
|| it
->function
== NULL
)
2153 record_btrace_stop_replaying (tp
);
2156 if (btinfo
->replay
== NULL
)
2157 record_btrace_start_replaying (tp
);
2158 else if (btrace_insn_cmp (btinfo
->replay
, it
) == 0)
2161 *btinfo
->replay
= *it
;
2162 registers_changed_ptid (tp
->ptid
);
2165 /* Start anew from the new replay position. */
2166 record_btrace_clear_histories (btinfo
);
2169 /* The to_goto_record_begin method of target record-btrace. */
2172 record_btrace_goto_begin (struct target_ops
*self
)
2174 struct thread_info
*tp
;
2175 struct btrace_insn_iterator begin
;
2177 tp
= require_btrace_thread ();
2179 btrace_insn_begin (&begin
, &tp
->btrace
);
2180 record_btrace_set_replay (tp
, &begin
);
2182 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
2185 /* The to_goto_record_end method of target record-btrace. */
2188 record_btrace_goto_end (struct target_ops
*ops
)
2190 struct thread_info
*tp
;
2192 tp
= require_btrace_thread ();
2194 record_btrace_set_replay (tp
, NULL
);
2196 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
2199 /* The to_goto_record method of target record-btrace. */
2202 record_btrace_goto (struct target_ops
*self
, ULONGEST insn
)
2204 struct thread_info
*tp
;
2205 struct btrace_insn_iterator it
;
2206 unsigned int number
;
2211 /* Check for wrap-arounds. */
2213 error (_("Instruction number out of range."));
2215 tp
= require_btrace_thread ();
2217 found
= btrace_find_insn_by_number (&it
, &tp
->btrace
, number
);
2219 error (_("No such instruction."));
2221 record_btrace_set_replay (tp
, &it
);
2223 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
2226 /* The to_execution_direction target method. */
2228 static enum exec_direction_kind
2229 record_btrace_execution_direction (struct target_ops
*self
)
2231 return record_btrace_resume_exec_dir
;
2234 /* The to_prepare_to_generate_core target method. */
2237 record_btrace_prepare_to_generate_core (struct target_ops
*self
)
2239 record_btrace_generating_corefile
= 1;
2242 /* The to_done_generating_core target method. */
2245 record_btrace_done_generating_core (struct target_ops
*self
)
2247 record_btrace_generating_corefile
= 0;
2250 /* Initialize the record-btrace target ops. */
2253 init_record_btrace_ops (void)
2255 struct target_ops
*ops
;
2257 ops
= &record_btrace_ops
;
2258 ops
->to_shortname
= "record-btrace";
2259 ops
->to_longname
= "Branch tracing target";
2260 ops
->to_doc
= "Collect control-flow trace and provide the execution history.";
2261 ops
->to_open
= record_btrace_open
;
2262 ops
->to_close
= record_btrace_close
;
2263 ops
->to_async
= record_btrace_async
;
2264 ops
->to_detach
= record_detach
;
2265 ops
->to_disconnect
= record_disconnect
;
2266 ops
->to_mourn_inferior
= record_mourn_inferior
;
2267 ops
->to_kill
= record_kill
;
2268 ops
->to_stop_recording
= record_btrace_stop_recording
;
2269 ops
->to_info_record
= record_btrace_info
;
2270 ops
->to_insn_history
= record_btrace_insn_history
;
2271 ops
->to_insn_history_from
= record_btrace_insn_history_from
;
2272 ops
->to_insn_history_range
= record_btrace_insn_history_range
;
2273 ops
->to_call_history
= record_btrace_call_history
;
2274 ops
->to_call_history_from
= record_btrace_call_history_from
;
2275 ops
->to_call_history_range
= record_btrace_call_history_range
;
2276 ops
->to_record_is_replaying
= record_btrace_is_replaying
;
2277 ops
->to_xfer_partial
= record_btrace_xfer_partial
;
2278 ops
->to_remove_breakpoint
= record_btrace_remove_breakpoint
;
2279 ops
->to_insert_breakpoint
= record_btrace_insert_breakpoint
;
2280 ops
->to_fetch_registers
= record_btrace_fetch_registers
;
2281 ops
->to_store_registers
= record_btrace_store_registers
;
2282 ops
->to_prepare_to_store
= record_btrace_prepare_to_store
;
2283 ops
->to_get_unwinder
= &record_btrace_to_get_unwinder
;
2284 ops
->to_get_tailcall_unwinder
= &record_btrace_to_get_tailcall_unwinder
;
2285 ops
->to_resume
= record_btrace_resume
;
2286 ops
->to_wait
= record_btrace_wait
;
2287 ops
->to_update_thread_list
= record_btrace_update_thread_list
;
2288 ops
->to_thread_alive
= record_btrace_thread_alive
;
2289 ops
->to_goto_record_begin
= record_btrace_goto_begin
;
2290 ops
->to_goto_record_end
= record_btrace_goto_end
;
2291 ops
->to_goto_record
= record_btrace_goto
;
2292 ops
->to_can_execute_reverse
= record_btrace_can_execute_reverse
;
2293 ops
->to_stopped_by_sw_breakpoint
= record_btrace_stopped_by_sw_breakpoint
;
2294 ops
->to_supports_stopped_by_sw_breakpoint
2295 = record_btrace_supports_stopped_by_sw_breakpoint
;
2296 ops
->to_stopped_by_hw_breakpoint
= record_btrace_stopped_by_hw_breakpoint
;
2297 ops
->to_supports_stopped_by_hw_breakpoint
2298 = record_btrace_supports_stopped_by_hw_breakpoint
;
2299 ops
->to_execution_direction
= record_btrace_execution_direction
;
2300 ops
->to_prepare_to_generate_core
= record_btrace_prepare_to_generate_core
;
2301 ops
->to_done_generating_core
= record_btrace_done_generating_core
;
2302 ops
->to_stratum
= record_stratum
;
2303 ops
->to_magic
= OPS_MAGIC
;
2306 /* Start recording in BTS format. */
2309 cmd_record_btrace_bts_start (char *args
, int from_tty
)
2312 if (args
!= NULL
&& *args
!= 0)
2313 error (_("Invalid argument."));
2315 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2319 execute_command ("target record-btrace", from_tty
);
2321 CATCH (exception
, RETURN_MASK_ALL
)
2323 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2324 throw_exception (exception
);
2329 /* Alias for "target record". */
2332 cmd_record_btrace_start (char *args
, int from_tty
)
2335 if (args
!= NULL
&& *args
!= 0)
2336 error (_("Invalid argument."));
2338 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2342 execute_command ("target record-btrace", from_tty
);
2344 CATCH (exception
, RETURN_MASK_ALL
)
2346 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2347 throw_exception (exception
);
2352 /* The "set record btrace" command. */
2355 cmd_set_record_btrace (char *args
, int from_tty
)
2357 cmd_show_list (set_record_btrace_cmdlist
, from_tty
, "");
2360 /* The "show record btrace" command. */
2363 cmd_show_record_btrace (char *args
, int from_tty
)
2365 cmd_show_list (show_record_btrace_cmdlist
, from_tty
, "");
2368 /* The "show record btrace replay-memory-access" command. */
2371 cmd_show_replay_memory_access (struct ui_file
*file
, int from_tty
,
2372 struct cmd_list_element
*c
, const char *value
)
2374 fprintf_filtered (gdb_stdout
, _("Replay memory access is %s.\n"),
2375 replay_memory_access
);
2378 /* The "set record btrace bts" command. */
2381 cmd_set_record_btrace_bts (char *args
, int from_tty
)
2383 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2384 "by an apporpriate subcommand.\n"));
2385 help_list (set_record_btrace_bts_cmdlist
, "set record btrace bts ",
2386 all_commands
, gdb_stdout
);
2389 /* The "show record btrace bts" command. */
2392 cmd_show_record_btrace_bts (char *args
, int from_tty
)
2394 cmd_show_list (show_record_btrace_bts_cmdlist
, from_tty
, "");
2397 void _initialize_record_btrace (void);
2399 /* Initialize btrace commands. */
2402 _initialize_record_btrace (void)
2404 add_prefix_cmd ("btrace", class_obscure
, cmd_record_btrace_start
,
2405 _("Start branch trace recording."), &record_btrace_cmdlist
,
2406 "record btrace ", 0, &record_cmdlist
);
2407 add_alias_cmd ("b", "btrace", class_obscure
, 1, &record_cmdlist
);
2409 add_cmd ("bts", class_obscure
, cmd_record_btrace_bts_start
,
2411 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2412 The processor stores a from/to record for each branch into a cyclic buffer.\n\
2413 This format may not be available on all processors."),
2414 &record_btrace_cmdlist
);
2415 add_alias_cmd ("bts", "btrace bts", class_obscure
, 1, &record_cmdlist
);
2417 add_prefix_cmd ("btrace", class_support
, cmd_set_record_btrace
,
2418 _("Set record options"), &set_record_btrace_cmdlist
,
2419 "set record btrace ", 0, &set_record_cmdlist
);
2421 add_prefix_cmd ("btrace", class_support
, cmd_show_record_btrace
,
2422 _("Show record options"), &show_record_btrace_cmdlist
,
2423 "show record btrace ", 0, &show_record_cmdlist
);
2425 add_setshow_enum_cmd ("replay-memory-access", no_class
,
2426 replay_memory_access_types
, &replay_memory_access
, _("\
2427 Set what memory accesses are allowed during replay."), _("\
2428 Show what memory accesses are allowed during replay."),
2429 _("Default is READ-ONLY.\n\n\
2430 The btrace record target does not trace data.\n\
2431 The memory therefore corresponds to the live target and not \
2432 to the current replay position.\n\n\
2433 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2434 When READ-WRITE, allow accesses to read-only and read-write memory during \
2436 NULL
, cmd_show_replay_memory_access
,
2437 &set_record_btrace_cmdlist
,
2438 &show_record_btrace_cmdlist
);
2440 add_prefix_cmd ("bts", class_support
, cmd_set_record_btrace_bts
,
2441 _("Set record btrace bts options"),
2442 &set_record_btrace_bts_cmdlist
,
2443 "set record btrace bts ", 0, &set_record_btrace_cmdlist
);
2445 add_prefix_cmd ("bts", class_support
, cmd_show_record_btrace_bts
,
2446 _("Show record btrace bts options"),
2447 &show_record_btrace_bts_cmdlist
,
2448 "show record btrace bts ", 0, &show_record_btrace_cmdlist
);
2450 add_setshow_uinteger_cmd ("buffer-size", no_class
,
2451 &record_btrace_conf
.bts
.size
,
2452 _("Set the record/replay bts buffer size."),
2453 _("Show the record/replay bts buffer size."), _("\
2454 When starting recording request a trace buffer of this size. \
2455 The actual buffer size may differ from the requested size. \
2456 Use \"info record\" to see the actual buffer size.\n\n\
2457 Bigger buffers allow longer recording but also take more time to process \
2458 the recorded execution trace.\n\n\
2459 The trace buffer size may not be changed while recording."), NULL
, NULL
,
2460 &set_record_btrace_bts_cmdlist
,
2461 &show_record_btrace_bts_cmdlist
);
2463 init_record_btrace_ops ();
2464 add_target (&record_btrace_ops
);
2466 bfcache
= htab_create_alloc (50, bfcache_hash
, bfcache_eq
, NULL
,
2469 record_btrace_conf
.bts
.size
= 64 * 1024;