1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
29 #include "cli/cli-utils.h"
33 #include "filenames.h"
35 #include "frame-unwind.h"
38 #include "event-loop.h"
41 /* The target_ops of record-btrace. */
42 static struct target_ops record_btrace_ops
;
44 /* A new thread observer enabling branch tracing for the new thread. */
45 static struct observer
*record_btrace_thread_observer
;
47 /* Memory access types used in set/show record btrace replay-memory-access. */
48 static const char replay_memory_access_read_only
[] = "read-only";
49 static const char replay_memory_access_read_write
[] = "read-write";
50 static const char *const replay_memory_access_types
[] =
52 replay_memory_access_read_only
,
53 replay_memory_access_read_write
,
57 /* The currently allowed replay memory access type. */
58 static const char *replay_memory_access
= replay_memory_access_read_only
;
60 /* Command lists for "set/show record btrace". */
61 static struct cmd_list_element
*set_record_btrace_cmdlist
;
62 static struct cmd_list_element
*show_record_btrace_cmdlist
;
64 /* The execution direction of the last resume we got. See record-full.c. */
65 static enum exec_direction_kind record_btrace_resume_exec_dir
= EXEC_FORWARD
;
67 /* The async event handler for reverse/replay execution. */
68 static struct async_event_handler
*record_btrace_async_inferior_event_handler
;
70 /* A flag indicating that we are currently generating a core file. */
71 static int record_btrace_generating_corefile
;
73 /* The current branch trace configuration. */
74 static struct btrace_config record_btrace_conf
;
76 /* Command list for "record btrace". */
77 static struct cmd_list_element
*record_btrace_cmdlist
;
79 /* Command lists for "set/show record btrace bts". */
80 static struct cmd_list_element
*set_record_btrace_bts_cmdlist
;
81 static struct cmd_list_element
*show_record_btrace_bts_cmdlist
;
83 /* Command lists for "set/show record btrace pt". */
84 static struct cmd_list_element
*set_record_btrace_pt_cmdlist
;
85 static struct cmd_list_element
*show_record_btrace_pt_cmdlist
;
87 /* Print a record-btrace debug message. Use do ... while (0) to avoid
88 ambiguities when used in if statements. */
90 #define DEBUG(msg, args...) \
93 if (record_debug != 0) \
94 fprintf_unfiltered (gdb_stdlog, \
95 "[record-btrace] " msg "\n", ##args); \
100 /* Update the branch trace for the current thread and return a pointer to its
103 Throws an error if there is no thread or no trace. This function never
106 static struct thread_info
*
107 require_btrace_thread (void)
109 struct thread_info
*tp
;
113 tp
= find_thread_ptid (inferior_ptid
);
115 error (_("No thread."));
119 if (btrace_is_empty (tp
))
120 error (_("No trace."));
125 /* Update the branch trace for the current thread and return a pointer to its
126 branch trace information struct.
128 Throws an error if there is no thread or no trace. This function never
131 static struct btrace_thread_info
*
132 require_btrace (void)
134 struct thread_info
*tp
;
136 tp
= require_btrace_thread ();
141 /* Enable branch tracing for one thread. Warn on errors. */
144 record_btrace_enable_warn (struct thread_info
*tp
)
148 btrace_enable (tp
, &record_btrace_conf
);
150 CATCH (error
, RETURN_MASK_ERROR
)
152 warning ("%s", error
.message
);
157 /* Callback function to disable branch tracing for one thread. */
160 record_btrace_disable_callback (void *arg
)
162 struct thread_info
*tp
;
169 /* Enable automatic tracing of new threads. */
172 record_btrace_auto_enable (void)
174 DEBUG ("attach thread observer");
176 record_btrace_thread_observer
177 = observer_attach_new_thread (record_btrace_enable_warn
);
180 /* Disable automatic tracing of new threads. */
183 record_btrace_auto_disable (void)
185 /* The observer may have been detached, already. */
186 if (record_btrace_thread_observer
== NULL
)
189 DEBUG ("detach thread observer");
191 observer_detach_new_thread (record_btrace_thread_observer
);
192 record_btrace_thread_observer
= NULL
;
195 /* The record-btrace async event handler function. */
198 record_btrace_handle_async_inferior_event (gdb_client_data data
)
200 inferior_event_handler (INF_REG_EVENT
, NULL
);
203 /* The to_open method of target record-btrace. */
206 record_btrace_open (const char *args
, int from_tty
)
208 struct cleanup
*disable_chain
;
209 struct thread_info
*tp
;
215 if (!target_has_execution
)
216 error (_("The program is not being run."));
219 error (_("Record btrace can't debug inferior in non-stop mode."));
221 gdb_assert (record_btrace_thread_observer
== NULL
);
223 disable_chain
= make_cleanup (null_cleanup
, NULL
);
224 ALL_NON_EXITED_THREADS (tp
)
225 if (args
== NULL
|| *args
== 0 || number_is_in_list (args
, tp
->num
))
227 btrace_enable (tp
, &record_btrace_conf
);
229 make_cleanup (record_btrace_disable_callback
, tp
);
232 record_btrace_auto_enable ();
234 push_target (&record_btrace_ops
);
236 record_btrace_async_inferior_event_handler
237 = create_async_event_handler (record_btrace_handle_async_inferior_event
,
239 record_btrace_generating_corefile
= 0;
241 observer_notify_record_changed (current_inferior (), 1);
243 discard_cleanups (disable_chain
);
246 /* The to_stop_recording method of target record-btrace. */
249 record_btrace_stop_recording (struct target_ops
*self
)
251 struct thread_info
*tp
;
253 DEBUG ("stop recording");
255 record_btrace_auto_disable ();
257 ALL_NON_EXITED_THREADS (tp
)
258 if (tp
->btrace
.target
!= NULL
)
262 /* The to_close method of target record-btrace. */
265 record_btrace_close (struct target_ops
*self
)
267 struct thread_info
*tp
;
269 if (record_btrace_async_inferior_event_handler
!= NULL
)
270 delete_async_event_handler (&record_btrace_async_inferior_event_handler
);
272 /* Make sure automatic recording gets disabled even if we did not stop
273 recording before closing the record-btrace target. */
274 record_btrace_auto_disable ();
276 /* We should have already stopped recording.
277 Tear down btrace in case we have not. */
278 ALL_NON_EXITED_THREADS (tp
)
279 btrace_teardown (tp
);
282 /* The to_async method of target record-btrace. */
285 record_btrace_async (struct target_ops
*ops
, int enable
)
288 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
290 clear_async_event_handler (record_btrace_async_inferior_event_handler
);
292 ops
->beneath
->to_async (ops
->beneath
, enable
);
295 /* Adjusts the size and returns a human readable size suffix. */
298 record_btrace_adjust_size (unsigned int *size
)
304 if ((sz
& ((1u << 30) - 1)) == 0)
309 else if ((sz
& ((1u << 20) - 1)) == 0)
314 else if ((sz
& ((1u << 10) - 1)) == 0)
323 /* Print a BTS configuration. */
326 record_btrace_print_bts_conf (const struct btrace_config_bts
*conf
)
334 suffix
= record_btrace_adjust_size (&size
);
335 printf_unfiltered (_("Buffer size: %u%s.\n"), size
, suffix
);
339 /* Print an Intel(R) Processor Trace configuration. */
342 record_btrace_print_pt_conf (const struct btrace_config_pt
*conf
)
350 suffix
= record_btrace_adjust_size (&size
);
351 printf_unfiltered (_("Buffer size: %u%s.\n"), size
, suffix
);
355 /* Print a branch tracing configuration. */
358 record_btrace_print_conf (const struct btrace_config
*conf
)
360 printf_unfiltered (_("Recording format: %s.\n"),
361 btrace_format_string (conf
->format
));
363 switch (conf
->format
)
365 case BTRACE_FORMAT_NONE
:
368 case BTRACE_FORMAT_BTS
:
369 record_btrace_print_bts_conf (&conf
->bts
);
372 case BTRACE_FORMAT_PT
:
373 record_btrace_print_pt_conf (&conf
->pt
);
377 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
380 /* The to_info_record method of target record-btrace. */
383 record_btrace_info (struct target_ops
*self
)
385 struct btrace_thread_info
*btinfo
;
386 const struct btrace_config
*conf
;
387 struct thread_info
*tp
;
388 unsigned int insns
, calls
, gaps
;
392 tp
= find_thread_ptid (inferior_ptid
);
394 error (_("No thread."));
396 btinfo
= &tp
->btrace
;
398 conf
= btrace_conf (btinfo
);
400 record_btrace_print_conf (conf
);
408 if (!btrace_is_empty (tp
))
410 struct btrace_call_iterator call
;
411 struct btrace_insn_iterator insn
;
413 btrace_call_end (&call
, btinfo
);
414 btrace_call_prev (&call
, 1);
415 calls
= btrace_call_number (&call
);
417 btrace_insn_end (&insn
, btinfo
);
419 insns
= btrace_insn_number (&insn
);
422 /* The last instruction does not really belong to the trace. */
429 /* Skip gaps at the end. */
432 steps
= btrace_insn_prev (&insn
, 1);
436 insns
= btrace_insn_number (&insn
);
441 gaps
= btinfo
->ngaps
;
444 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
445 "for thread %d (%s).\n"), insns
, calls
, gaps
,
446 tp
->num
, target_pid_to_str (tp
->ptid
));
448 if (btrace_is_replaying (tp
))
449 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
450 btrace_insn_number (btinfo
->replay
));
453 /* Print a decode error. */
456 btrace_ui_out_decode_error (struct ui_out
*uiout
, int errcode
,
457 enum btrace_format format
)
462 errstr
= _("unknown");
470 case BTRACE_FORMAT_BTS
:
476 case BDE_BTS_OVERFLOW
:
477 errstr
= _("instruction overflow");
480 case BDE_BTS_INSN_SIZE
:
481 errstr
= _("unknown instruction");
486 #if defined (HAVE_LIBIPT)
487 case BTRACE_FORMAT_PT
:
490 case BDE_PT_USER_QUIT
:
492 errstr
= _("trace decode cancelled");
495 case BDE_PT_DISABLED
:
497 errstr
= _("disabled");
500 case BDE_PT_OVERFLOW
:
502 errstr
= _("overflow");
507 errstr
= pt_errstr (pt_errcode (errcode
));
511 #endif /* defined (HAVE_LIBIPT) */
514 ui_out_text (uiout
, _("["));
517 ui_out_text (uiout
, _("decode error ("));
518 ui_out_field_int (uiout
, "errcode", errcode
);
519 ui_out_text (uiout
, _("): "));
521 ui_out_text (uiout
, errstr
);
522 ui_out_text (uiout
, _("]\n"));
525 /* Print an unsigned int. */
528 ui_out_field_uint (struct ui_out
*uiout
, const char *fld
, unsigned int val
)
530 ui_out_field_fmt (uiout
, fld
, "%u", val
);
533 /* Disassemble a section of the recorded instruction trace. */
536 btrace_insn_history (struct ui_out
*uiout
,
537 const struct btrace_thread_info
*btinfo
,
538 const struct btrace_insn_iterator
*begin
,
539 const struct btrace_insn_iterator
*end
, int flags
)
541 struct gdbarch
*gdbarch
;
542 struct btrace_insn_iterator it
;
544 DEBUG ("itrace (0x%x): [%u; %u)", flags
, btrace_insn_number (begin
),
545 btrace_insn_number (end
));
547 gdbarch
= target_gdbarch ();
549 for (it
= *begin
; btrace_insn_cmp (&it
, end
) != 0; btrace_insn_next (&it
, 1))
551 const struct btrace_insn
*insn
;
553 insn
= btrace_insn_get (&it
);
555 /* A NULL instruction indicates a gap in the trace. */
558 const struct btrace_config
*conf
;
560 conf
= btrace_conf (btinfo
);
562 /* We have trace so we must have a configuration. */
563 gdb_assert (conf
!= NULL
);
565 btrace_ui_out_decode_error (uiout
, it
.function
->errcode
,
570 /* Print the instruction index. */
571 ui_out_field_uint (uiout
, "index", btrace_insn_number (&it
));
572 ui_out_text (uiout
, "\t");
574 /* Disassembly with '/m' flag may not produce the expected result.
576 gdb_disassembly (gdbarch
, uiout
, NULL
, flags
, 1, insn
->pc
,
582 /* The to_insn_history method of target record-btrace. */
585 record_btrace_insn_history (struct target_ops
*self
, int size
, int flags
)
587 struct btrace_thread_info
*btinfo
;
588 struct btrace_insn_history
*history
;
589 struct btrace_insn_iterator begin
, end
;
590 struct cleanup
*uiout_cleanup
;
591 struct ui_out
*uiout
;
592 unsigned int context
, covered
;
594 uiout
= current_uiout
;
595 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
597 context
= abs (size
);
599 error (_("Bad record instruction-history-size."));
601 btinfo
= require_btrace ();
602 history
= btinfo
->insn_history
;
605 struct btrace_insn_iterator
*replay
;
607 DEBUG ("insn-history (0x%x): %d", flags
, size
);
609 /* If we're replaying, we start at the replay position. Otherwise, we
610 start at the tail of the trace. */
611 replay
= btinfo
->replay
;
615 btrace_insn_end (&begin
, btinfo
);
617 /* We start from here and expand in the requested direction. Then we
618 expand in the other direction, as well, to fill up any remaining
623 /* We want the current position covered, as well. */
624 covered
= btrace_insn_next (&end
, 1);
625 covered
+= btrace_insn_prev (&begin
, context
- covered
);
626 covered
+= btrace_insn_next (&end
, context
- covered
);
630 covered
= btrace_insn_next (&end
, context
);
631 covered
+= btrace_insn_prev (&begin
, context
- covered
);
636 begin
= history
->begin
;
639 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags
, size
,
640 btrace_insn_number (&begin
), btrace_insn_number (&end
));
645 covered
= btrace_insn_prev (&begin
, context
);
650 covered
= btrace_insn_next (&end
, context
);
655 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
659 printf_unfiltered (_("At the start of the branch trace record.\n"));
661 printf_unfiltered (_("At the end of the branch trace record.\n"));
664 btrace_set_insn_history (btinfo
, &begin
, &end
);
665 do_cleanups (uiout_cleanup
);
668 /* The to_insn_history_range method of target record-btrace. */
671 record_btrace_insn_history_range (struct target_ops
*self
,
672 ULONGEST from
, ULONGEST to
, int flags
)
674 struct btrace_thread_info
*btinfo
;
675 struct btrace_insn_history
*history
;
676 struct btrace_insn_iterator begin
, end
;
677 struct cleanup
*uiout_cleanup
;
678 struct ui_out
*uiout
;
679 unsigned int low
, high
;
682 uiout
= current_uiout
;
683 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
688 DEBUG ("insn-history (0x%x): [%u; %u)", flags
, low
, high
);
690 /* Check for wrap-arounds. */
691 if (low
!= from
|| high
!= to
)
692 error (_("Bad range."));
695 error (_("Bad range."));
697 btinfo
= require_btrace ();
699 found
= btrace_find_insn_by_number (&begin
, btinfo
, low
);
701 error (_("Range out of bounds."));
703 found
= btrace_find_insn_by_number (&end
, btinfo
, high
);
706 /* Silently truncate the range. */
707 btrace_insn_end (&end
, btinfo
);
711 /* We want both begin and end to be inclusive. */
712 btrace_insn_next (&end
, 1);
715 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
716 btrace_set_insn_history (btinfo
, &begin
, &end
);
718 do_cleanups (uiout_cleanup
);
721 /* The to_insn_history_from method of target record-btrace. */
724 record_btrace_insn_history_from (struct target_ops
*self
,
725 ULONGEST from
, int size
, int flags
)
727 ULONGEST begin
, end
, context
;
729 context
= abs (size
);
731 error (_("Bad record instruction-history-size."));
740 begin
= from
- context
+ 1;
745 end
= from
+ context
- 1;
747 /* Check for wrap-around. */
752 record_btrace_insn_history_range (self
, begin
, end
, flags
);
755 /* Print the instruction number range for a function call history line. */
758 btrace_call_history_insn_range (struct ui_out
*uiout
,
759 const struct btrace_function
*bfun
)
761 unsigned int begin
, end
, size
;
763 size
= VEC_length (btrace_insn_s
, bfun
->insn
);
764 gdb_assert (size
> 0);
766 begin
= bfun
->insn_offset
;
767 end
= begin
+ size
- 1;
769 ui_out_field_uint (uiout
, "insn begin", begin
);
770 ui_out_text (uiout
, ",");
771 ui_out_field_uint (uiout
, "insn end", end
);
774 /* Compute the lowest and highest source line for the instructions in BFUN
775 and return them in PBEGIN and PEND.
776 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
777 result from inlining or macro expansion. */
780 btrace_compute_src_line_range (const struct btrace_function
*bfun
,
781 int *pbegin
, int *pend
)
783 struct btrace_insn
*insn
;
784 struct symtab
*symtab
;
796 symtab
= symbol_symtab (sym
);
798 for (idx
= 0; VEC_iterate (btrace_insn_s
, bfun
->insn
, idx
, insn
); ++idx
)
800 struct symtab_and_line sal
;
802 sal
= find_pc_line (insn
->pc
, 0);
803 if (sal
.symtab
!= symtab
|| sal
.line
== 0)
806 begin
= min (begin
, sal
.line
);
807 end
= max (end
, sal
.line
);
815 /* Print the source line information for a function call history line. */
818 btrace_call_history_src_line (struct ui_out
*uiout
,
819 const struct btrace_function
*bfun
)
828 ui_out_field_string (uiout
, "file",
829 symtab_to_filename_for_display (symbol_symtab (sym
)));
831 btrace_compute_src_line_range (bfun
, &begin
, &end
);
835 ui_out_text (uiout
, ":");
836 ui_out_field_int (uiout
, "min line", begin
);
841 ui_out_text (uiout
, ",");
842 ui_out_field_int (uiout
, "max line", end
);
845 /* Get the name of a branch trace function. */
848 btrace_get_bfun_name (const struct btrace_function
*bfun
)
850 struct minimal_symbol
*msym
;
860 return SYMBOL_PRINT_NAME (sym
);
861 else if (msym
!= NULL
)
862 return MSYMBOL_PRINT_NAME (msym
);
867 /* Disassemble a section of the recorded function trace. */
870 btrace_call_history (struct ui_out
*uiout
,
871 const struct btrace_thread_info
*btinfo
,
872 const struct btrace_call_iterator
*begin
,
873 const struct btrace_call_iterator
*end
,
874 enum record_print_flag flags
)
876 struct btrace_call_iterator it
;
878 DEBUG ("ftrace (0x%x): [%u; %u)", flags
, btrace_call_number (begin
),
879 btrace_call_number (end
));
881 for (it
= *begin
; btrace_call_cmp (&it
, end
) < 0; btrace_call_next (&it
, 1))
883 const struct btrace_function
*bfun
;
884 struct minimal_symbol
*msym
;
887 bfun
= btrace_call_get (&it
);
891 /* Print the function index. */
892 ui_out_field_uint (uiout
, "index", bfun
->number
);
893 ui_out_text (uiout
, "\t");
895 /* Indicate gaps in the trace. */
896 if (bfun
->errcode
!= 0)
898 const struct btrace_config
*conf
;
900 conf
= btrace_conf (btinfo
);
902 /* We have trace so we must have a configuration. */
903 gdb_assert (conf
!= NULL
);
905 btrace_ui_out_decode_error (uiout
, bfun
->errcode
, conf
->format
);
910 if ((flags
& RECORD_PRINT_INDENT_CALLS
) != 0)
912 int level
= bfun
->level
+ btinfo
->level
, i
;
914 for (i
= 0; i
< level
; ++i
)
915 ui_out_text (uiout
, " ");
919 ui_out_field_string (uiout
, "function", SYMBOL_PRINT_NAME (sym
));
920 else if (msym
!= NULL
)
921 ui_out_field_string (uiout
, "function", MSYMBOL_PRINT_NAME (msym
));
922 else if (!ui_out_is_mi_like_p (uiout
))
923 ui_out_field_string (uiout
, "function", "??");
925 if ((flags
& RECORD_PRINT_INSN_RANGE
) != 0)
927 ui_out_text (uiout
, _("\tinst "));
928 btrace_call_history_insn_range (uiout
, bfun
);
931 if ((flags
& RECORD_PRINT_SRC_LINE
) != 0)
933 ui_out_text (uiout
, _("\tat "));
934 btrace_call_history_src_line (uiout
, bfun
);
937 ui_out_text (uiout
, "\n");
941 /* The to_call_history method of target record-btrace. */
944 record_btrace_call_history (struct target_ops
*self
, int size
, int flags
)
946 struct btrace_thread_info
*btinfo
;
947 struct btrace_call_history
*history
;
948 struct btrace_call_iterator begin
, end
;
949 struct cleanup
*uiout_cleanup
;
950 struct ui_out
*uiout
;
951 unsigned int context
, covered
;
953 uiout
= current_uiout
;
954 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
956 context
= abs (size
);
958 error (_("Bad record function-call-history-size."));
960 btinfo
= require_btrace ();
961 history
= btinfo
->call_history
;
964 struct btrace_insn_iterator
*replay
;
966 DEBUG ("call-history (0x%x): %d", flags
, size
);
968 /* If we're replaying, we start at the replay position. Otherwise, we
969 start at the tail of the trace. */
970 replay
= btinfo
->replay
;
973 begin
.function
= replay
->function
;
974 begin
.btinfo
= btinfo
;
977 btrace_call_end (&begin
, btinfo
);
979 /* We start from here and expand in the requested direction. Then we
980 expand in the other direction, as well, to fill up any remaining
985 /* We want the current position covered, as well. */
986 covered
= btrace_call_next (&end
, 1);
987 covered
+= btrace_call_prev (&begin
, context
- covered
);
988 covered
+= btrace_call_next (&end
, context
- covered
);
992 covered
= btrace_call_next (&end
, context
);
993 covered
+= btrace_call_prev (&begin
, context
- covered
);
998 begin
= history
->begin
;
1001 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags
, size
,
1002 btrace_call_number (&begin
), btrace_call_number (&end
));
1007 covered
= btrace_call_prev (&begin
, context
);
1012 covered
= btrace_call_next (&end
, context
);
1017 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
1021 printf_unfiltered (_("At the start of the branch trace record.\n"));
1023 printf_unfiltered (_("At the end of the branch trace record.\n"));
1026 btrace_set_call_history (btinfo
, &begin
, &end
);
1027 do_cleanups (uiout_cleanup
);
1030 /* The to_call_history_range method of target record-btrace. */
1033 record_btrace_call_history_range (struct target_ops
*self
,
1034 ULONGEST from
, ULONGEST to
, int flags
)
1036 struct btrace_thread_info
*btinfo
;
1037 struct btrace_call_history
*history
;
1038 struct btrace_call_iterator begin
, end
;
1039 struct cleanup
*uiout_cleanup
;
1040 struct ui_out
*uiout
;
1041 unsigned int low
, high
;
1044 uiout
= current_uiout
;
1045 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
1050 DEBUG ("call-history (0x%x): [%u; %u)", flags
, low
, high
);
1052 /* Check for wrap-arounds. */
1053 if (low
!= from
|| high
!= to
)
1054 error (_("Bad range."));
1057 error (_("Bad range."));
1059 btinfo
= require_btrace ();
1061 found
= btrace_find_call_by_number (&begin
, btinfo
, low
);
1063 error (_("Range out of bounds."));
1065 found
= btrace_find_call_by_number (&end
, btinfo
, high
);
1068 /* Silently truncate the range. */
1069 btrace_call_end (&end
, btinfo
);
1073 /* We want both begin and end to be inclusive. */
1074 btrace_call_next (&end
, 1);
1077 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
1078 btrace_set_call_history (btinfo
, &begin
, &end
);
1080 do_cleanups (uiout_cleanup
);
1083 /* The to_call_history_from method of target record-btrace. */
1086 record_btrace_call_history_from (struct target_ops
*self
,
1087 ULONGEST from
, int size
, int flags
)
1089 ULONGEST begin
, end
, context
;
1091 context
= abs (size
);
1093 error (_("Bad record function-call-history-size."));
1102 begin
= from
- context
+ 1;
1107 end
= from
+ context
- 1;
1109 /* Check for wrap-around. */
1114 record_btrace_call_history_range (self
, begin
, end
, flags
);
1117 /* The to_record_is_replaying method of target record-btrace. */
1120 record_btrace_is_replaying (struct target_ops
*self
)
1122 struct thread_info
*tp
;
1124 ALL_NON_EXITED_THREADS (tp
)
1125 if (btrace_is_replaying (tp
))
1131 /* The to_xfer_partial method of target record-btrace. */
1133 static enum target_xfer_status
1134 record_btrace_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1135 const char *annex
, gdb_byte
*readbuf
,
1136 const gdb_byte
*writebuf
, ULONGEST offset
,
1137 ULONGEST len
, ULONGEST
*xfered_len
)
1139 struct target_ops
*t
;
1141 /* Filter out requests that don't make sense during replay. */
1142 if (replay_memory_access
== replay_memory_access_read_only
1143 && !record_btrace_generating_corefile
1144 && record_btrace_is_replaying (ops
))
1148 case TARGET_OBJECT_MEMORY
:
1150 struct target_section
*section
;
1152 /* We do not allow writing memory in general. */
1153 if (writebuf
!= NULL
)
1156 return TARGET_XFER_UNAVAILABLE
;
1159 /* We allow reading readonly memory. */
1160 section
= target_section_by_addr (ops
, offset
);
1161 if (section
!= NULL
)
1163 /* Check if the section we found is readonly. */
1164 if ((bfd_get_section_flags (section
->the_bfd_section
->owner
,
1165 section
->the_bfd_section
)
1166 & SEC_READONLY
) != 0)
1168 /* Truncate the request to fit into this section. */
1169 len
= min (len
, section
->endaddr
- offset
);
1175 return TARGET_XFER_UNAVAILABLE
;
1180 /* Forward the request. */
1182 return ops
->to_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
1183 offset
, len
, xfered_len
);
1186 /* The to_insert_breakpoint method of target record-btrace. */
1189 record_btrace_insert_breakpoint (struct target_ops
*ops
,
1190 struct gdbarch
*gdbarch
,
1191 struct bp_target_info
*bp_tgt
)
1196 /* Inserting breakpoints requires accessing memory. Allow it for the
1197 duration of this function. */
1198 old
= replay_memory_access
;
1199 replay_memory_access
= replay_memory_access_read_write
;
1204 ret
= ops
->beneath
->to_insert_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
);
1206 CATCH (except
, RETURN_MASK_ALL
)
1208 replay_memory_access
= old
;
1209 throw_exception (except
);
1212 replay_memory_access
= old
;
1217 /* The to_remove_breakpoint method of target record-btrace. */
1220 record_btrace_remove_breakpoint (struct target_ops
*ops
,
1221 struct gdbarch
*gdbarch
,
1222 struct bp_target_info
*bp_tgt
)
1227 /* Removing breakpoints requires accessing memory. Allow it for the
1228 duration of this function. */
1229 old
= replay_memory_access
;
1230 replay_memory_access
= replay_memory_access_read_write
;
1235 ret
= ops
->beneath
->to_remove_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
);
1237 CATCH (except
, RETURN_MASK_ALL
)
1239 replay_memory_access
= old
;
1240 throw_exception (except
);
1243 replay_memory_access
= old
;
1248 /* The to_fetch_registers method of target record-btrace. */
1251 record_btrace_fetch_registers (struct target_ops
*ops
,
1252 struct regcache
*regcache
, int regno
)
1254 struct btrace_insn_iterator
*replay
;
1255 struct thread_info
*tp
;
1257 tp
= find_thread_ptid (inferior_ptid
);
1258 gdb_assert (tp
!= NULL
);
1260 replay
= tp
->btrace
.replay
;
1261 if (replay
!= NULL
&& !record_btrace_generating_corefile
)
1263 const struct btrace_insn
*insn
;
1264 struct gdbarch
*gdbarch
;
1267 gdbarch
= get_regcache_arch (regcache
);
1268 pcreg
= gdbarch_pc_regnum (gdbarch
);
1272 /* We can only provide the PC register. */
1273 if (regno
>= 0 && regno
!= pcreg
)
1276 insn
= btrace_insn_get (replay
);
1277 gdb_assert (insn
!= NULL
);
1279 regcache_raw_supply (regcache
, regno
, &insn
->pc
);
1283 struct target_ops
*t
= ops
->beneath
;
1285 t
->to_fetch_registers (t
, regcache
, regno
);
1289 /* The to_store_registers method of target record-btrace. */
1292 record_btrace_store_registers (struct target_ops
*ops
,
1293 struct regcache
*regcache
, int regno
)
1295 struct target_ops
*t
;
1297 if (!record_btrace_generating_corefile
&& record_btrace_is_replaying (ops
))
1298 error (_("This record target does not allow writing registers."));
1300 gdb_assert (may_write_registers
!= 0);
1303 t
->to_store_registers (t
, regcache
, regno
);
1306 /* The to_prepare_to_store method of target record-btrace. */
1309 record_btrace_prepare_to_store (struct target_ops
*ops
,
1310 struct regcache
*regcache
)
1312 struct target_ops
*t
;
1314 if (!record_btrace_generating_corefile
&& record_btrace_is_replaying (ops
))
1318 t
->to_prepare_to_store (t
, regcache
);
1321 /* The branch trace frame cache. */
1323 struct btrace_frame_cache
1326 struct thread_info
*tp
;
1328 /* The frame info. */
1329 struct frame_info
*frame
;
1331 /* The branch trace function segment. */
1332 const struct btrace_function
*bfun
;
1335 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1337 static htab_t bfcache
;
1339 /* hash_f for htab_create_alloc of bfcache. */
1342 bfcache_hash (const void *arg
)
1344 const struct btrace_frame_cache
*cache
= arg
;
1346 return htab_hash_pointer (cache
->frame
);
1349 /* eq_f for htab_create_alloc of bfcache. */
1352 bfcache_eq (const void *arg1
, const void *arg2
)
1354 const struct btrace_frame_cache
*cache1
= arg1
;
1355 const struct btrace_frame_cache
*cache2
= arg2
;
1357 return cache1
->frame
== cache2
->frame
;
1360 /* Create a new btrace frame cache. */
1362 static struct btrace_frame_cache
*
1363 bfcache_new (struct frame_info
*frame
)
1365 struct btrace_frame_cache
*cache
;
1368 cache
= FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache
);
1369 cache
->frame
= frame
;
1371 slot
= htab_find_slot (bfcache
, cache
, INSERT
);
1372 gdb_assert (*slot
== NULL
);
1378 /* Extract the branch trace function from a branch trace frame. */
1380 static const struct btrace_function
*
1381 btrace_get_frame_function (struct frame_info
*frame
)
1383 const struct btrace_frame_cache
*cache
;
1384 const struct btrace_function
*bfun
;
1385 struct btrace_frame_cache pattern
;
1388 pattern
.frame
= frame
;
1390 slot
= htab_find_slot (bfcache
, &pattern
, NO_INSERT
);
1398 /* Implement stop_reason method for record_btrace_frame_unwind. */
1400 static enum unwind_stop_reason
1401 record_btrace_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1404 const struct btrace_frame_cache
*cache
;
1405 const struct btrace_function
*bfun
;
1407 cache
= *this_cache
;
1409 gdb_assert (bfun
!= NULL
);
1411 if (bfun
->up
== NULL
)
1412 return UNWIND_UNAVAILABLE
;
1414 return UNWIND_NO_REASON
;
1417 /* Implement this_id method for record_btrace_frame_unwind. */
1420 record_btrace_frame_this_id (struct frame_info
*this_frame
, void **this_cache
,
1421 struct frame_id
*this_id
)
1423 const struct btrace_frame_cache
*cache
;
1424 const struct btrace_function
*bfun
;
1425 CORE_ADDR code
, special
;
1427 cache
= *this_cache
;
1430 gdb_assert (bfun
!= NULL
);
1432 while (bfun
->segment
.prev
!= NULL
)
1433 bfun
= bfun
->segment
.prev
;
1435 code
= get_frame_func (this_frame
);
1436 special
= bfun
->number
;
1438 *this_id
= frame_id_build_unavailable_stack_special (code
, special
);
1440 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1441 btrace_get_bfun_name (cache
->bfun
),
1442 core_addr_to_string_nz (this_id
->code_addr
),
1443 core_addr_to_string_nz (this_id
->special_addr
));
1446 /* Implement prev_register method for record_btrace_frame_unwind. */
1448 static struct value
*
1449 record_btrace_frame_prev_register (struct frame_info
*this_frame
,
1453 const struct btrace_frame_cache
*cache
;
1454 const struct btrace_function
*bfun
, *caller
;
1455 const struct btrace_insn
*insn
;
1456 struct gdbarch
*gdbarch
;
1460 gdbarch
= get_frame_arch (this_frame
);
1461 pcreg
= gdbarch_pc_regnum (gdbarch
);
1462 if (pcreg
< 0 || regnum
!= pcreg
)
1463 throw_error (NOT_AVAILABLE_ERROR
,
1464 _("Registers are not available in btrace record history"));
1466 cache
= *this_cache
;
1468 gdb_assert (bfun
!= NULL
);
1472 throw_error (NOT_AVAILABLE_ERROR
,
1473 _("No caller in btrace record history"));
1475 if ((bfun
->flags
& BFUN_UP_LINKS_TO_RET
) != 0)
1477 insn
= VEC_index (btrace_insn_s
, caller
->insn
, 0);
1482 insn
= VEC_last (btrace_insn_s
, caller
->insn
);
1485 pc
+= gdb_insn_length (gdbarch
, pc
);
1488 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1489 btrace_get_bfun_name (bfun
), bfun
->level
,
1490 core_addr_to_string_nz (pc
));
1492 return frame_unwind_got_address (this_frame
, regnum
, pc
);
1495 /* Implement sniffer method for record_btrace_frame_unwind. */
1498 record_btrace_frame_sniffer (const struct frame_unwind
*self
,
1499 struct frame_info
*this_frame
,
1502 const struct btrace_function
*bfun
;
1503 struct btrace_frame_cache
*cache
;
1504 struct thread_info
*tp
;
1505 struct frame_info
*next
;
1507 /* THIS_FRAME does not contain a reference to its thread. */
1508 tp
= find_thread_ptid (inferior_ptid
);
1509 gdb_assert (tp
!= NULL
);
1512 next
= get_next_frame (this_frame
);
1515 const struct btrace_insn_iterator
*replay
;
1517 replay
= tp
->btrace
.replay
;
1519 bfun
= replay
->function
;
1523 const struct btrace_function
*callee
;
1525 callee
= btrace_get_frame_function (next
);
1526 if (callee
!= NULL
&& (callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1533 DEBUG ("[frame] sniffed frame for %s on level %d",
1534 btrace_get_bfun_name (bfun
), bfun
->level
);
1536 /* This is our frame. Initialize the frame cache. */
1537 cache
= bfcache_new (this_frame
);
1541 *this_cache
= cache
;
1545 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1548 record_btrace_tailcall_frame_sniffer (const struct frame_unwind
*self
,
1549 struct frame_info
*this_frame
,
1552 const struct btrace_function
*bfun
, *callee
;
1553 struct btrace_frame_cache
*cache
;
1554 struct frame_info
*next
;
1556 next
= get_next_frame (this_frame
);
1560 callee
= btrace_get_frame_function (next
);
1564 if ((callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1571 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1572 btrace_get_bfun_name (bfun
), bfun
->level
);
1574 /* This is our frame. Initialize the frame cache. */
1575 cache
= bfcache_new (this_frame
);
1576 cache
->tp
= find_thread_ptid (inferior_ptid
);
1579 *this_cache
= cache
;
1584 record_btrace_frame_dealloc_cache (struct frame_info
*self
, void *this_cache
)
1586 struct btrace_frame_cache
*cache
;
1591 slot
= htab_find_slot (bfcache
, cache
, NO_INSERT
);
1592 gdb_assert (slot
!= NULL
);
1594 htab_remove_elt (bfcache
, cache
);
1597 /* btrace recording does not store previous memory content, neither the stack
1598 frames content. Any unwinding would return errorneous results as the stack
1599 contents no longer matches the changed PC value restored from history.
1600 Therefore this unwinder reports any possibly unwound registers as
1603 const struct frame_unwind record_btrace_frame_unwind
=
1606 record_btrace_frame_unwind_stop_reason
,
1607 record_btrace_frame_this_id
,
1608 record_btrace_frame_prev_register
,
1610 record_btrace_frame_sniffer
,
1611 record_btrace_frame_dealloc_cache
1614 const struct frame_unwind record_btrace_tailcall_frame_unwind
=
1617 record_btrace_frame_unwind_stop_reason
,
1618 record_btrace_frame_this_id
,
1619 record_btrace_frame_prev_register
,
1621 record_btrace_tailcall_frame_sniffer
,
1622 record_btrace_frame_dealloc_cache
1625 /* Implement the to_get_unwinder method. */
1627 static const struct frame_unwind
*
1628 record_btrace_to_get_unwinder (struct target_ops
*self
)
1630 return &record_btrace_frame_unwind
;
1633 /* Implement the to_get_tailcall_unwinder method. */
1635 static const struct frame_unwind
*
1636 record_btrace_to_get_tailcall_unwinder (struct target_ops
*self
)
1638 return &record_btrace_tailcall_frame_unwind
;
1641 /* Indicate that TP should be resumed according to FLAG. */
1644 record_btrace_resume_thread (struct thread_info
*tp
,
1645 enum btrace_thread_flag flag
)
1647 struct btrace_thread_info
*btinfo
;
1649 DEBUG ("resuming %d (%s): %u", tp
->num
, target_pid_to_str (tp
->ptid
), flag
);
1651 btinfo
= &tp
->btrace
;
1653 if ((btinfo
->flags
& BTHR_MOVE
) != 0)
1654 error (_("Thread already moving."));
1656 /* Fetch the latest branch trace. */
1659 btinfo
->flags
|= flag
;
1662 /* Find the thread to resume given a PTID. */
1664 static struct thread_info
*
1665 record_btrace_find_resume_thread (ptid_t ptid
)
1667 struct thread_info
*tp
;
1669 /* When asked to resume everything, we pick the current thread. */
1670 if (ptid_equal (minus_one_ptid
, ptid
) || ptid_is_pid (ptid
))
1671 ptid
= inferior_ptid
;
1673 return find_thread_ptid (ptid
);
1676 /* Start replaying a thread. */
1678 static struct btrace_insn_iterator
*
1679 record_btrace_start_replaying (struct thread_info
*tp
)
1681 struct btrace_insn_iterator
*replay
;
1682 struct btrace_thread_info
*btinfo
;
1685 btinfo
= &tp
->btrace
;
1688 /* We can't start replaying without trace. */
1689 if (btinfo
->begin
== NULL
)
1692 /* Clear the executing flag to allow changes to the current frame.
1693 We are not actually running, yet. We just started a reverse execution
1694 command or a record goto command.
1695 For the latter, EXECUTING is false and this has no effect.
1696 For the former, EXECUTING is true and we're in to_wait, about to
1697 move the thread. Since we need to recompute the stack, we temporarily
1698 set EXECUTING to flase. */
1699 executing
= is_executing (tp
->ptid
);
1700 set_executing (tp
->ptid
, 0);
1702 /* GDB stores the current frame_id when stepping in order to detects steps
1704 Since frames are computed differently when we're replaying, we need to
1705 recompute those stored frames and fix them up so we can still detect
1706 subroutines after we started replaying. */
1709 struct frame_info
*frame
;
1710 struct frame_id frame_id
;
1711 int upd_step_frame_id
, upd_step_stack_frame_id
;
1713 /* The current frame without replaying - computed via normal unwind. */
1714 frame
= get_current_frame ();
1715 frame_id
= get_frame_id (frame
);
1717 /* Check if we need to update any stepping-related frame id's. */
1718 upd_step_frame_id
= frame_id_eq (frame_id
,
1719 tp
->control
.step_frame_id
);
1720 upd_step_stack_frame_id
= frame_id_eq (frame_id
,
1721 tp
->control
.step_stack_frame_id
);
1723 /* We start replaying at the end of the branch trace. This corresponds
1724 to the current instruction. */
1725 replay
= xmalloc (sizeof (*replay
));
1726 btrace_insn_end (replay
, btinfo
);
1728 /* Skip gaps at the end of the trace. */
1729 while (btrace_insn_get (replay
) == NULL
)
1733 steps
= btrace_insn_prev (replay
, 1);
1735 error (_("No trace."));
1738 /* We're not replaying, yet. */
1739 gdb_assert (btinfo
->replay
== NULL
);
1740 btinfo
->replay
= replay
;
1742 /* Make sure we're not using any stale registers. */
1743 registers_changed_ptid (tp
->ptid
);
1745 /* The current frame with replaying - computed via btrace unwind. */
1746 frame
= get_current_frame ();
1747 frame_id
= get_frame_id (frame
);
1749 /* Replace stepping related frames where necessary. */
1750 if (upd_step_frame_id
)
1751 tp
->control
.step_frame_id
= frame_id
;
1752 if (upd_step_stack_frame_id
)
1753 tp
->control
.step_stack_frame_id
= frame_id
;
1755 CATCH (except
, RETURN_MASK_ALL
)
1757 /* Restore the previous execution state. */
1758 set_executing (tp
->ptid
, executing
);
1760 xfree (btinfo
->replay
);
1761 btinfo
->replay
= NULL
;
1763 registers_changed_ptid (tp
->ptid
);
1765 throw_exception (except
);
1769 /* Restore the previous execution state. */
1770 set_executing (tp
->ptid
, executing
);
1775 /* Stop replaying a thread. */
1778 record_btrace_stop_replaying (struct thread_info
*tp
)
1780 struct btrace_thread_info
*btinfo
;
1782 btinfo
= &tp
->btrace
;
1784 xfree (btinfo
->replay
);
1785 btinfo
->replay
= NULL
;
1787 /* Make sure we're not leaving any stale registers. */
1788 registers_changed_ptid (tp
->ptid
);
1791 /* The to_resume method of target record-btrace. */
1794 record_btrace_resume (struct target_ops
*ops
, ptid_t ptid
, int step
,
1795 enum gdb_signal signal
)
1797 struct thread_info
*tp
, *other
;
1798 enum btrace_thread_flag flag
;
1800 DEBUG ("resume %s: %s", target_pid_to_str (ptid
), step
? "step" : "cont");
1802 /* Store the execution direction of the last resume. */
1803 record_btrace_resume_exec_dir
= execution_direction
;
1805 tp
= record_btrace_find_resume_thread (ptid
);
1807 error (_("Cannot find thread to resume."));
1809 /* Stop replaying other threads if the thread to resume is not replaying. */
1810 if (!btrace_is_replaying (tp
) && execution_direction
!= EXEC_REVERSE
)
1811 ALL_NON_EXITED_THREADS (other
)
1812 record_btrace_stop_replaying (other
);
1814 /* As long as we're not replaying, just forward the request. */
1815 if (!record_btrace_is_replaying (ops
) && execution_direction
!= EXEC_REVERSE
)
1818 return ops
->to_resume (ops
, ptid
, step
, signal
);
1821 /* Compute the btrace thread flag for the requested move. */
1823 flag
= execution_direction
== EXEC_REVERSE
? BTHR_RCONT
: BTHR_CONT
;
1825 flag
= execution_direction
== EXEC_REVERSE
? BTHR_RSTEP
: BTHR_STEP
;
1827 /* At the moment, we only move a single thread. We could also move
1828 all threads in parallel by single-stepping each resumed thread
1829 until the first runs into an event.
1830 When we do that, we would want to continue all other threads.
1831 For now, just resume one thread to not confuse to_wait. */
1832 record_btrace_resume_thread (tp
, flag
);
1834 /* We just indicate the resume intent here. The actual stepping happens in
1835 record_btrace_wait below. */
1837 /* Async support. */
1838 if (target_can_async_p ())
1841 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
1845 /* Find a thread to move. */
1847 static struct thread_info
*
1848 record_btrace_find_thread_to_move (ptid_t ptid
)
1850 struct thread_info
*tp
;
1852 /* First check the parameter thread. */
1853 tp
= find_thread_ptid (ptid
);
1854 if (tp
!= NULL
&& (tp
->btrace
.flags
& BTHR_MOVE
) != 0)
1857 /* Otherwise, find one other thread that has been resumed. */
1858 ALL_NON_EXITED_THREADS (tp
)
1859 if ((tp
->btrace
.flags
& BTHR_MOVE
) != 0)
1865 /* Return a target_waitstatus indicating that we ran out of history. */
1867 static struct target_waitstatus
1868 btrace_step_no_history (void)
1870 struct target_waitstatus status
;
1872 status
.kind
= TARGET_WAITKIND_NO_HISTORY
;
1877 /* Return a target_waitstatus indicating that a step finished. */
1879 static struct target_waitstatus
1880 btrace_step_stopped (void)
1882 struct target_waitstatus status
;
1884 status
.kind
= TARGET_WAITKIND_STOPPED
;
1885 status
.value
.sig
= GDB_SIGNAL_TRAP
;
1890 /* Clear the record histories. */
1893 record_btrace_clear_histories (struct btrace_thread_info
*btinfo
)
1895 xfree (btinfo
->insn_history
);
1896 xfree (btinfo
->call_history
);
1898 btinfo
->insn_history
= NULL
;
1899 btinfo
->call_history
= NULL
;
1902 /* Step a single thread. */
1904 static struct target_waitstatus
1905 record_btrace_step_thread (struct thread_info
*tp
)
1907 struct btrace_insn_iterator
*replay
, end
;
1908 struct btrace_thread_info
*btinfo
;
1909 struct address_space
*aspace
;
1910 struct inferior
*inf
;
1911 enum btrace_thread_flag flags
;
1914 /* We can't step without an execution history. */
1915 if (btrace_is_empty (tp
))
1916 return btrace_step_no_history ();
1918 btinfo
= &tp
->btrace
;
1919 replay
= btinfo
->replay
;
1921 flags
= btinfo
->flags
& BTHR_MOVE
;
1922 btinfo
->flags
&= ~BTHR_MOVE
;
1924 DEBUG ("stepping %d (%s): %u", tp
->num
, target_pid_to_str (tp
->ptid
), flags
);
1929 internal_error (__FILE__
, __LINE__
, _("invalid stepping type."));
1932 /* We're done if we're not replaying. */
1934 return btrace_step_no_history ();
1936 /* Skip gaps during replay. */
1939 steps
= btrace_insn_next (replay
, 1);
1942 record_btrace_stop_replaying (tp
);
1943 return btrace_step_no_history ();
1946 while (btrace_insn_get (replay
) == NULL
);
1948 /* Determine the end of the instruction trace. */
1949 btrace_insn_end (&end
, btinfo
);
1951 /* We stop replaying if we reached the end of the trace. */
1952 if (btrace_insn_cmp (replay
, &end
) == 0)
1953 record_btrace_stop_replaying (tp
);
1955 return btrace_step_stopped ();
1958 /* Start replaying if we're not already doing so. */
1960 replay
= record_btrace_start_replaying (tp
);
1962 /* If we can't step any further, we reached the end of the history.
1963 Skip gaps during replay. */
1966 steps
= btrace_insn_prev (replay
, 1);
1968 return btrace_step_no_history ();
1971 while (btrace_insn_get (replay
) == NULL
);
1973 return btrace_step_stopped ();
1976 /* We're done if we're not replaying. */
1978 return btrace_step_no_history ();
1980 inf
= find_inferior_ptid (tp
->ptid
);
1981 aspace
= inf
->aspace
;
1983 /* Determine the end of the instruction trace. */
1984 btrace_insn_end (&end
, btinfo
);
1988 const struct btrace_insn
*insn
;
1990 /* Skip gaps during replay. */
1993 steps
= btrace_insn_next (replay
, 1);
1996 record_btrace_stop_replaying (tp
);
1997 return btrace_step_no_history ();
2000 insn
= btrace_insn_get (replay
);
2002 while (insn
== NULL
);
2004 /* We stop replaying if we reached the end of the trace. */
2005 if (btrace_insn_cmp (replay
, &end
) == 0)
2007 record_btrace_stop_replaying (tp
);
2008 return btrace_step_no_history ();
2011 DEBUG ("stepping %d (%s) ... %s", tp
->num
,
2012 target_pid_to_str (tp
->ptid
),
2013 core_addr_to_string_nz (insn
->pc
));
2015 if (record_check_stopped_by_breakpoint (aspace
, insn
->pc
,
2016 &btinfo
->stop_reason
))
2017 return btrace_step_stopped ();
2021 /* Start replaying if we're not already doing so. */
2023 replay
= record_btrace_start_replaying (tp
);
2025 inf
= find_inferior_ptid (tp
->ptid
);
2026 aspace
= inf
->aspace
;
2030 const struct btrace_insn
*insn
;
2032 /* If we can't step any further, we reached the end of the history.
2033 Skip gaps during replay. */
2036 steps
= btrace_insn_prev (replay
, 1);
2038 return btrace_step_no_history ();
2040 insn
= btrace_insn_get (replay
);
2042 while (insn
== NULL
);
2044 DEBUG ("reverse-stepping %d (%s) ... %s", tp
->num
,
2045 target_pid_to_str (tp
->ptid
),
2046 core_addr_to_string_nz (insn
->pc
));
2048 if (record_check_stopped_by_breakpoint (aspace
, insn
->pc
,
2049 &btinfo
->stop_reason
))
2050 return btrace_step_stopped ();
2055 /* The to_wait method of target record-btrace. */
2058 record_btrace_wait (struct target_ops
*ops
, ptid_t ptid
,
2059 struct target_waitstatus
*status
, int options
)
2061 struct thread_info
*tp
, *other
;
2063 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid
), options
);
2065 /* As long as we're not replaying, just forward the request. */
2066 if (!record_btrace_is_replaying (ops
) && execution_direction
!= EXEC_REVERSE
)
2069 return ops
->to_wait (ops
, ptid
, status
, options
);
2072 /* Let's find a thread to move. */
2073 tp
= record_btrace_find_thread_to_move (ptid
);
2076 DEBUG ("wait %s: no thread", target_pid_to_str (ptid
));
2078 status
->kind
= TARGET_WAITKIND_IGNORE
;
2079 return minus_one_ptid
;
2082 /* We only move a single thread. We're not able to correlate threads. */
2083 *status
= record_btrace_step_thread (tp
);
2085 /* Stop all other threads. */
2087 ALL_NON_EXITED_THREADS (other
)
2088 other
->btrace
.flags
&= ~BTHR_MOVE
;
2090 /* Start record histories anew from the current position. */
2091 record_btrace_clear_histories (&tp
->btrace
);
2093 /* We moved the replay position but did not update registers. */
2094 registers_changed_ptid (tp
->ptid
);
2099 /* The to_can_execute_reverse method of target record-btrace. */
2102 record_btrace_can_execute_reverse (struct target_ops
*self
)
2107 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2110 record_btrace_stopped_by_sw_breakpoint (struct target_ops
*ops
)
2112 if (record_btrace_is_replaying (ops
))
2114 struct thread_info
*tp
= inferior_thread ();
2116 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
;
2119 return ops
->beneath
->to_stopped_by_sw_breakpoint (ops
->beneath
);
2122 /* The to_supports_stopped_by_sw_breakpoint method of target
2126 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops
*ops
)
2128 if (record_btrace_is_replaying (ops
))
2131 return ops
->beneath
->to_supports_stopped_by_sw_breakpoint (ops
->beneath
);
2134 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2137 record_btrace_stopped_by_hw_breakpoint (struct target_ops
*ops
)
2139 if (record_btrace_is_replaying (ops
))
2141 struct thread_info
*tp
= inferior_thread ();
2143 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
;
2146 return ops
->beneath
->to_stopped_by_hw_breakpoint (ops
->beneath
);
2149 /* The to_supports_stopped_by_hw_breakpoint method of target
2153 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops
*ops
)
2155 if (record_btrace_is_replaying (ops
))
2158 return ops
->beneath
->to_supports_stopped_by_hw_breakpoint (ops
->beneath
);
2161 /* The to_update_thread_list method of target record-btrace. */
2164 record_btrace_update_thread_list (struct target_ops
*ops
)
2166 /* We don't add or remove threads during replay. */
2167 if (record_btrace_is_replaying (ops
))
2170 /* Forward the request. */
2172 ops
->to_update_thread_list (ops
);
2175 /* The to_thread_alive method of target record-btrace. */
2178 record_btrace_thread_alive (struct target_ops
*ops
, ptid_t ptid
)
2180 /* We don't add or remove threads during replay. */
2181 if (record_btrace_is_replaying (ops
))
2182 return find_thread_ptid (ptid
) != NULL
;
2184 /* Forward the request. */
2186 return ops
->to_thread_alive (ops
, ptid
);
2189 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2193 record_btrace_set_replay (struct thread_info
*tp
,
2194 const struct btrace_insn_iterator
*it
)
2196 struct btrace_thread_info
*btinfo
;
2198 btinfo
= &tp
->btrace
;
2200 if (it
== NULL
|| it
->function
== NULL
)
2201 record_btrace_stop_replaying (tp
);
2204 if (btinfo
->replay
== NULL
)
2205 record_btrace_start_replaying (tp
);
2206 else if (btrace_insn_cmp (btinfo
->replay
, it
) == 0)
2209 *btinfo
->replay
= *it
;
2210 registers_changed_ptid (tp
->ptid
);
2213 /* Start anew from the new replay position. */
2214 record_btrace_clear_histories (btinfo
);
2216 stop_pc
= regcache_read_pc (get_current_regcache ());
2217 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
2220 /* The to_goto_record_begin method of target record-btrace. */
2223 record_btrace_goto_begin (struct target_ops
*self
)
2225 struct thread_info
*tp
;
2226 struct btrace_insn_iterator begin
;
2228 tp
= require_btrace_thread ();
2230 btrace_insn_begin (&begin
, &tp
->btrace
);
2231 record_btrace_set_replay (tp
, &begin
);
2234 /* The to_goto_record_end method of target record-btrace. */
2237 record_btrace_goto_end (struct target_ops
*ops
)
2239 struct thread_info
*tp
;
2241 tp
= require_btrace_thread ();
2243 record_btrace_set_replay (tp
, NULL
);
2246 /* The to_goto_record method of target record-btrace. */
2249 record_btrace_goto (struct target_ops
*self
, ULONGEST insn
)
2251 struct thread_info
*tp
;
2252 struct btrace_insn_iterator it
;
2253 unsigned int number
;
2258 /* Check for wrap-arounds. */
2260 error (_("Instruction number out of range."));
2262 tp
= require_btrace_thread ();
2264 found
= btrace_find_insn_by_number (&it
, &tp
->btrace
, number
);
2266 error (_("No such instruction."));
2268 record_btrace_set_replay (tp
, &it
);
2271 /* The to_execution_direction target method. */
2273 static enum exec_direction_kind
2274 record_btrace_execution_direction (struct target_ops
*self
)
2276 return record_btrace_resume_exec_dir
;
2279 /* The to_prepare_to_generate_core target method. */
2282 record_btrace_prepare_to_generate_core (struct target_ops
*self
)
2284 record_btrace_generating_corefile
= 1;
2287 /* The to_done_generating_core target method. */
2290 record_btrace_done_generating_core (struct target_ops
*self
)
2292 record_btrace_generating_corefile
= 0;
2295 /* Initialize the record-btrace target ops. */
2298 init_record_btrace_ops (void)
2300 struct target_ops
*ops
;
2302 ops
= &record_btrace_ops
;
2303 ops
->to_shortname
= "record-btrace";
2304 ops
->to_longname
= "Branch tracing target";
2305 ops
->to_doc
= "Collect control-flow trace and provide the execution history.";
2306 ops
->to_open
= record_btrace_open
;
2307 ops
->to_close
= record_btrace_close
;
2308 ops
->to_async
= record_btrace_async
;
2309 ops
->to_detach
= record_detach
;
2310 ops
->to_disconnect
= record_disconnect
;
2311 ops
->to_mourn_inferior
= record_mourn_inferior
;
2312 ops
->to_kill
= record_kill
;
2313 ops
->to_stop_recording
= record_btrace_stop_recording
;
2314 ops
->to_info_record
= record_btrace_info
;
2315 ops
->to_insn_history
= record_btrace_insn_history
;
2316 ops
->to_insn_history_from
= record_btrace_insn_history_from
;
2317 ops
->to_insn_history_range
= record_btrace_insn_history_range
;
2318 ops
->to_call_history
= record_btrace_call_history
;
2319 ops
->to_call_history_from
= record_btrace_call_history_from
;
2320 ops
->to_call_history_range
= record_btrace_call_history_range
;
2321 ops
->to_record_is_replaying
= record_btrace_is_replaying
;
2322 ops
->to_xfer_partial
= record_btrace_xfer_partial
;
2323 ops
->to_remove_breakpoint
= record_btrace_remove_breakpoint
;
2324 ops
->to_insert_breakpoint
= record_btrace_insert_breakpoint
;
2325 ops
->to_fetch_registers
= record_btrace_fetch_registers
;
2326 ops
->to_store_registers
= record_btrace_store_registers
;
2327 ops
->to_prepare_to_store
= record_btrace_prepare_to_store
;
2328 ops
->to_get_unwinder
= &record_btrace_to_get_unwinder
;
2329 ops
->to_get_tailcall_unwinder
= &record_btrace_to_get_tailcall_unwinder
;
2330 ops
->to_resume
= record_btrace_resume
;
2331 ops
->to_wait
= record_btrace_wait
;
2332 ops
->to_update_thread_list
= record_btrace_update_thread_list
;
2333 ops
->to_thread_alive
= record_btrace_thread_alive
;
2334 ops
->to_goto_record_begin
= record_btrace_goto_begin
;
2335 ops
->to_goto_record_end
= record_btrace_goto_end
;
2336 ops
->to_goto_record
= record_btrace_goto
;
2337 ops
->to_can_execute_reverse
= record_btrace_can_execute_reverse
;
2338 ops
->to_stopped_by_sw_breakpoint
= record_btrace_stopped_by_sw_breakpoint
;
2339 ops
->to_supports_stopped_by_sw_breakpoint
2340 = record_btrace_supports_stopped_by_sw_breakpoint
;
2341 ops
->to_stopped_by_hw_breakpoint
= record_btrace_stopped_by_hw_breakpoint
;
2342 ops
->to_supports_stopped_by_hw_breakpoint
2343 = record_btrace_supports_stopped_by_hw_breakpoint
;
2344 ops
->to_execution_direction
= record_btrace_execution_direction
;
2345 ops
->to_prepare_to_generate_core
= record_btrace_prepare_to_generate_core
;
2346 ops
->to_done_generating_core
= record_btrace_done_generating_core
;
2347 ops
->to_stratum
= record_stratum
;
2348 ops
->to_magic
= OPS_MAGIC
;
2351 /* Start recording in BTS format. */
2354 cmd_record_btrace_bts_start (char *args
, int from_tty
)
2356 if (args
!= NULL
&& *args
!= 0)
2357 error (_("Invalid argument."));
2359 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2363 execute_command ("target record-btrace", from_tty
);
2365 CATCH (exception
, RETURN_MASK_ALL
)
2367 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2368 throw_exception (exception
);
2373 /* Start recording Intel(R) Processor Trace. */
2376 cmd_record_btrace_pt_start (char *args
, int from_tty
)
2378 if (args
!= NULL
&& *args
!= 0)
2379 error (_("Invalid argument."));
2381 record_btrace_conf
.format
= BTRACE_FORMAT_PT
;
2385 execute_command ("target record-btrace", from_tty
);
2387 CATCH (exception
, RETURN_MASK_ALL
)
2389 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2390 throw_exception (exception
);
2395 /* Alias for "target record". */
2398 cmd_record_btrace_start (char *args
, int from_tty
)
2400 if (args
!= NULL
&& *args
!= 0)
2401 error (_("Invalid argument."));
2403 record_btrace_conf
.format
= BTRACE_FORMAT_PT
;
2407 execute_command ("target record-btrace", from_tty
);
2409 CATCH (exception
, RETURN_MASK_ALL
)
2411 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2415 execute_command ("target record-btrace", from_tty
);
2417 CATCH (exception
, RETURN_MASK_ALL
)
2419 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2420 throw_exception (exception
);
2427 /* The "set record btrace" command. */
2430 cmd_set_record_btrace (char *args
, int from_tty
)
2432 cmd_show_list (set_record_btrace_cmdlist
, from_tty
, "");
2435 /* The "show record btrace" command. */
2438 cmd_show_record_btrace (char *args
, int from_tty
)
2440 cmd_show_list (show_record_btrace_cmdlist
, from_tty
, "");
2443 /* The "show record btrace replay-memory-access" command. */
2446 cmd_show_replay_memory_access (struct ui_file
*file
, int from_tty
,
2447 struct cmd_list_element
*c
, const char *value
)
2449 fprintf_filtered (gdb_stdout
, _("Replay memory access is %s.\n"),
2450 replay_memory_access
);
2453 /* The "set record btrace bts" command. */
2456 cmd_set_record_btrace_bts (char *args
, int from_tty
)
2458 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2459 "by an appropriate subcommand.\n"));
2460 help_list (set_record_btrace_bts_cmdlist
, "set record btrace bts ",
2461 all_commands
, gdb_stdout
);
2464 /* The "show record btrace bts" command. */
2467 cmd_show_record_btrace_bts (char *args
, int from_tty
)
2469 cmd_show_list (show_record_btrace_bts_cmdlist
, from_tty
, "");
2472 /* The "set record btrace pt" command. */
2475 cmd_set_record_btrace_pt (char *args
, int from_tty
)
2477 printf_unfiltered (_("\"set record btrace pt\" must be followed "
2478 "by an appropriate subcommand.\n"));
2479 help_list (set_record_btrace_pt_cmdlist
, "set record btrace pt ",
2480 all_commands
, gdb_stdout
);
2483 /* The "show record btrace pt" command. */
2486 cmd_show_record_btrace_pt (char *args
, int from_tty
)
2488 cmd_show_list (show_record_btrace_pt_cmdlist
, from_tty
, "");
2491 /* The "record bts buffer-size" show value function. */
2494 show_record_bts_buffer_size_value (struct ui_file
*file
, int from_tty
,
2495 struct cmd_list_element
*c
,
2498 fprintf_filtered (file
, _("The record/replay bts buffer size is %s.\n"),
2502 /* The "record pt buffer-size" show value function. */
2505 show_record_pt_buffer_size_value (struct ui_file
*file
, int from_tty
,
2506 struct cmd_list_element
*c
,
2509 fprintf_filtered (file
, _("The record/replay pt buffer size is %s.\n"),
2513 void _initialize_record_btrace (void);
2515 /* Initialize btrace commands. */
2518 _initialize_record_btrace (void)
2520 add_prefix_cmd ("btrace", class_obscure
, cmd_record_btrace_start
,
2521 _("Start branch trace recording."), &record_btrace_cmdlist
,
2522 "record btrace ", 0, &record_cmdlist
);
2523 add_alias_cmd ("b", "btrace", class_obscure
, 1, &record_cmdlist
);
2525 add_cmd ("bts", class_obscure
, cmd_record_btrace_bts_start
,
2527 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2528 The processor stores a from/to record for each branch into a cyclic buffer.\n\
2529 This format may not be available on all processors."),
2530 &record_btrace_cmdlist
);
2531 add_alias_cmd ("bts", "btrace bts", class_obscure
, 1, &record_cmdlist
);
2533 add_cmd ("pt", class_obscure
, cmd_record_btrace_pt_start
,
2535 Start branch trace recording in Intel(R) Processor Trace format.\n\n\
2536 This format may not be available on all processors."),
2537 &record_btrace_cmdlist
);
2538 add_alias_cmd ("pt", "btrace pt", class_obscure
, 1, &record_cmdlist
);
2540 add_prefix_cmd ("btrace", class_support
, cmd_set_record_btrace
,
2541 _("Set record options"), &set_record_btrace_cmdlist
,
2542 "set record btrace ", 0, &set_record_cmdlist
);
2544 add_prefix_cmd ("btrace", class_support
, cmd_show_record_btrace
,
2545 _("Show record options"), &show_record_btrace_cmdlist
,
2546 "show record btrace ", 0, &show_record_cmdlist
);
2548 add_setshow_enum_cmd ("replay-memory-access", no_class
,
2549 replay_memory_access_types
, &replay_memory_access
, _("\
2550 Set what memory accesses are allowed during replay."), _("\
2551 Show what memory accesses are allowed during replay."),
2552 _("Default is READ-ONLY.\n\n\
2553 The btrace record target does not trace data.\n\
2554 The memory therefore corresponds to the live target and not \
2555 to the current replay position.\n\n\
2556 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2557 When READ-WRITE, allow accesses to read-only and read-write memory during \
2559 NULL
, cmd_show_replay_memory_access
,
2560 &set_record_btrace_cmdlist
,
2561 &show_record_btrace_cmdlist
);
2563 add_prefix_cmd ("bts", class_support
, cmd_set_record_btrace_bts
,
2564 _("Set record btrace bts options"),
2565 &set_record_btrace_bts_cmdlist
,
2566 "set record btrace bts ", 0, &set_record_btrace_cmdlist
);
2568 add_prefix_cmd ("bts", class_support
, cmd_show_record_btrace_bts
,
2569 _("Show record btrace bts options"),
2570 &show_record_btrace_bts_cmdlist
,
2571 "show record btrace bts ", 0, &show_record_btrace_cmdlist
);
2573 add_setshow_uinteger_cmd ("buffer-size", no_class
,
2574 &record_btrace_conf
.bts
.size
,
2575 _("Set the record/replay bts buffer size."),
2576 _("Show the record/replay bts buffer size."), _("\
2577 When starting recording request a trace buffer of this size. \
2578 The actual buffer size may differ from the requested size. \
2579 Use \"info record\" to see the actual buffer size.\n\n\
2580 Bigger buffers allow longer recording but also take more time to process \
2581 the recorded execution trace.\n\n\
2582 The trace buffer size may not be changed while recording."), NULL
,
2583 show_record_bts_buffer_size_value
,
2584 &set_record_btrace_bts_cmdlist
,
2585 &show_record_btrace_bts_cmdlist
);
2587 add_prefix_cmd ("pt", class_support
, cmd_set_record_btrace_pt
,
2588 _("Set record btrace pt options"),
2589 &set_record_btrace_pt_cmdlist
,
2590 "set record btrace pt ", 0, &set_record_btrace_cmdlist
);
2592 add_prefix_cmd ("pt", class_support
, cmd_show_record_btrace_pt
,
2593 _("Show record btrace pt options"),
2594 &show_record_btrace_pt_cmdlist
,
2595 "show record btrace pt ", 0, &show_record_btrace_cmdlist
);
2597 add_setshow_uinteger_cmd ("buffer-size", no_class
,
2598 &record_btrace_conf
.pt
.size
,
2599 _("Set the record/replay pt buffer size."),
2600 _("Show the record/replay pt buffer size."), _("\
2601 Bigger buffers allow longer recording but also take more time to process \
2602 the recorded execution.\n\
2603 The actual buffer size may differ from the requested size. Use \"info record\" \
2604 to see the actual buffer size."), NULL
, show_record_pt_buffer_size_value
,
2605 &set_record_btrace_pt_cmdlist
,
2606 &show_record_btrace_pt_cmdlist
);
2608 init_record_btrace_ops ();
2609 add_target (&record_btrace_ops
);
2611 bfcache
= htab_create_alloc (50, bfcache_hash
, bfcache_eq
, NULL
,
2614 record_btrace_conf
.bts
.size
= 64 * 1024;
2615 record_btrace_conf
.pt
.size
= 16 * 1024;