1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
29 #include "cli/cli-utils.h"
33 #include "filenames.h"
35 #include "frame-unwind.h"
38 #include "event-loop.h"
41 /* The target_ops of record-btrace. */
42 static struct target_ops record_btrace_ops
;
44 /* A new thread observer enabling branch tracing for the new thread. */
45 static struct observer
*record_btrace_thread_observer
;
47 /* Memory access types used in set/show record btrace replay-memory-access. */
48 static const char replay_memory_access_read_only
[] = "read-only";
49 static const char replay_memory_access_read_write
[] = "read-write";
50 static const char *const replay_memory_access_types
[] =
52 replay_memory_access_read_only
,
53 replay_memory_access_read_write
,
57 /* The currently allowed replay memory access type. */
58 static const char *replay_memory_access
= replay_memory_access_read_only
;
60 /* Command lists for "set/show record btrace". */
61 static struct cmd_list_element
*set_record_btrace_cmdlist
;
62 static struct cmd_list_element
*show_record_btrace_cmdlist
;
64 /* The execution direction of the last resume we got. See record-full.c. */
65 static enum exec_direction_kind record_btrace_resume_exec_dir
= EXEC_FORWARD
;
67 /* The async event handler for reverse/replay execution. */
68 static struct async_event_handler
*record_btrace_async_inferior_event_handler
;
70 /* A flag indicating that we are currently generating a core file. */
71 static int record_btrace_generating_corefile
;
73 /* The current branch trace configuration. */
74 static struct btrace_config record_btrace_conf
;
76 /* Command list for "record btrace". */
77 static struct cmd_list_element
*record_btrace_cmdlist
;
79 /* Command lists for "set/show record btrace bts". */
80 static struct cmd_list_element
*set_record_btrace_bts_cmdlist
;
81 static struct cmd_list_element
*show_record_btrace_bts_cmdlist
;
83 /* Print a record-btrace debug message. Use do ... while (0) to avoid
84 ambiguities when used in if statements. */
86 #define DEBUG(msg, args...) \
89 if (record_debug != 0) \
90 fprintf_unfiltered (gdb_stdlog, \
91 "[record-btrace] " msg "\n", ##args); \
96 /* Update the branch trace for the current thread and return a pointer to its
99 Throws an error if there is no thread or no trace. This function never
102 static struct thread_info
*
103 require_btrace_thread (void)
105 struct thread_info
*tp
;
109 tp
= find_thread_ptid (inferior_ptid
);
111 error (_("No thread."));
115 if (btrace_is_empty (tp
))
116 error (_("No trace."));
121 /* Update the branch trace for the current thread and return a pointer to its
122 branch trace information struct.
124 Throws an error if there is no thread or no trace. This function never
127 static struct btrace_thread_info
*
128 require_btrace (void)
130 struct thread_info
*tp
;
132 tp
= require_btrace_thread ();
137 /* Enable branch tracing for one thread. Warn on errors. */
140 record_btrace_enable_warn (struct thread_info
*tp
)
142 volatile struct gdb_exception error
;
144 TRY_CATCH (error
, RETURN_MASK_ERROR
)
145 btrace_enable (tp
, &record_btrace_conf
);
147 if (error
.message
!= NULL
)
148 warning ("%s", error
.message
);
151 /* Callback function to disable branch tracing for one thread. */
154 record_btrace_disable_callback (void *arg
)
156 struct thread_info
*tp
;
163 /* Enable automatic tracing of new threads. */
166 record_btrace_auto_enable (void)
168 DEBUG ("attach thread observer");
170 record_btrace_thread_observer
171 = observer_attach_new_thread (record_btrace_enable_warn
);
174 /* Disable automatic tracing of new threads. */
177 record_btrace_auto_disable (void)
179 /* The observer may have been detached, already. */
180 if (record_btrace_thread_observer
== NULL
)
183 DEBUG ("detach thread observer");
185 observer_detach_new_thread (record_btrace_thread_observer
);
186 record_btrace_thread_observer
= NULL
;
189 /* The record-btrace async event handler function. */
192 record_btrace_handle_async_inferior_event (gdb_client_data data
)
194 inferior_event_handler (INF_REG_EVENT
, NULL
);
197 /* The to_open method of target record-btrace. */
200 record_btrace_open (const char *args
, int from_tty
)
202 struct cleanup
*disable_chain
;
203 struct thread_info
*tp
;
209 if (!target_has_execution
)
210 error (_("The program is not being run."));
213 error (_("Record btrace can't debug inferior in non-stop mode."));
215 gdb_assert (record_btrace_thread_observer
== NULL
);
217 disable_chain
= make_cleanup (null_cleanup
, NULL
);
218 ALL_NON_EXITED_THREADS (tp
)
219 if (args
== NULL
|| *args
== 0 || number_is_in_list (args
, tp
->num
))
221 btrace_enable (tp
, &record_btrace_conf
);
223 make_cleanup (record_btrace_disable_callback
, tp
);
226 record_btrace_auto_enable ();
228 push_target (&record_btrace_ops
);
230 record_btrace_async_inferior_event_handler
231 = create_async_event_handler (record_btrace_handle_async_inferior_event
,
233 record_btrace_generating_corefile
= 0;
235 observer_notify_record_changed (current_inferior (), 1);
237 discard_cleanups (disable_chain
);
240 /* The to_stop_recording method of target record-btrace. */
243 record_btrace_stop_recording (struct target_ops
*self
)
245 struct thread_info
*tp
;
247 DEBUG ("stop recording");
249 record_btrace_auto_disable ();
251 ALL_NON_EXITED_THREADS (tp
)
252 if (tp
->btrace
.target
!= NULL
)
256 /* The to_close method of target record-btrace. */
259 record_btrace_close (struct target_ops
*self
)
261 struct thread_info
*tp
;
263 if (record_btrace_async_inferior_event_handler
!= NULL
)
264 delete_async_event_handler (&record_btrace_async_inferior_event_handler
);
266 /* Make sure automatic recording gets disabled even if we did not stop
267 recording before closing the record-btrace target. */
268 record_btrace_auto_disable ();
270 /* We should have already stopped recording.
271 Tear down btrace in case we have not. */
272 ALL_NON_EXITED_THREADS (tp
)
273 btrace_teardown (tp
);
276 /* The to_async method of target record-btrace. */
279 record_btrace_async (struct target_ops
*ops
,
280 void (*callback
) (enum inferior_event_type event_type
,
284 if (callback
!= NULL
)
285 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
287 clear_async_event_handler (record_btrace_async_inferior_event_handler
);
289 ops
->beneath
->to_async (ops
->beneath
, callback
, context
);
292 /* Adjusts the size and returns a human readable size suffix. */
295 record_btrace_adjust_size (unsigned int *size
)
301 if ((sz
& ((1u << 30) - 1)) == 0)
306 else if ((sz
& ((1u << 20) - 1)) == 0)
311 else if ((sz
& ((1u << 10) - 1)) == 0)
320 /* Print a BTS configuration. */
323 record_btrace_print_bts_conf (const struct btrace_config_bts
*conf
)
331 suffix
= record_btrace_adjust_size (&size
);
332 printf_unfiltered (_("Buffer size: %u%s.\n"), size
, suffix
);
336 /* Print a branch tracing configuration. */
339 record_btrace_print_conf (const struct btrace_config
*conf
)
341 printf_unfiltered (_("Recording format: %s.\n"),
342 btrace_format_string (conf
->format
));
344 switch (conf
->format
)
346 case BTRACE_FORMAT_NONE
:
349 case BTRACE_FORMAT_BTS
:
350 record_btrace_print_bts_conf (&conf
->bts
);
354 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
357 /* The to_info_record method of target record-btrace. */
360 record_btrace_info (struct target_ops
*self
)
362 struct btrace_thread_info
*btinfo
;
363 const struct btrace_config
*conf
;
364 struct thread_info
*tp
;
365 unsigned int insns
, calls
, gaps
;
369 tp
= find_thread_ptid (inferior_ptid
);
371 error (_("No thread."));
373 btinfo
= &tp
->btrace
;
375 conf
= btrace_conf (btinfo
);
377 record_btrace_print_conf (conf
);
385 if (!btrace_is_empty (tp
))
387 struct btrace_call_iterator call
;
388 struct btrace_insn_iterator insn
;
390 btrace_call_end (&call
, btinfo
);
391 btrace_call_prev (&call
, 1);
392 calls
= btrace_call_number (&call
);
394 btrace_insn_end (&insn
, btinfo
);
396 insns
= btrace_insn_number (&insn
);
399 /* The last instruction does not really belong to the trace. */
406 /* Skip gaps at the end. */
409 steps
= btrace_insn_prev (&insn
, 1);
413 insns
= btrace_insn_number (&insn
);
418 gaps
= btinfo
->ngaps
;
421 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
422 "for thread %d (%s).\n"), insns
, calls
, gaps
,
423 tp
->num
, target_pid_to_str (tp
->ptid
));
425 if (btrace_is_replaying (tp
))
426 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
427 btrace_insn_number (btinfo
->replay
));
430 /* Print a decode error. */
433 btrace_ui_out_decode_error (struct ui_out
*uiout
, int errcode
,
434 enum btrace_format format
)
439 errstr
= _("unknown");
447 case BTRACE_FORMAT_BTS
:
453 case BDE_BTS_OVERFLOW
:
454 errstr
= _("instruction overflow");
457 case BDE_BTS_INSN_SIZE
:
458 errstr
= _("unknown instruction");
464 ui_out_text (uiout
, _("["));
467 ui_out_text (uiout
, _("decode error ("));
468 ui_out_field_int (uiout
, "errcode", errcode
);
469 ui_out_text (uiout
, _("): "));
471 ui_out_text (uiout
, errstr
);
472 ui_out_text (uiout
, _("]\n"));
475 /* Print an unsigned int. */
478 ui_out_field_uint (struct ui_out
*uiout
, const char *fld
, unsigned int val
)
480 ui_out_field_fmt (uiout
, fld
, "%u", val
);
483 /* Disassemble a section of the recorded instruction trace. */
486 btrace_insn_history (struct ui_out
*uiout
,
487 const struct btrace_thread_info
*btinfo
,
488 const struct btrace_insn_iterator
*begin
,
489 const struct btrace_insn_iterator
*end
, int flags
)
491 struct gdbarch
*gdbarch
;
492 struct btrace_insn_iterator it
;
494 DEBUG ("itrace (0x%x): [%u; %u)", flags
, btrace_insn_number (begin
),
495 btrace_insn_number (end
));
497 gdbarch
= target_gdbarch ();
499 for (it
= *begin
; btrace_insn_cmp (&it
, end
) != 0; btrace_insn_next (&it
, 1))
501 const struct btrace_insn
*insn
;
503 insn
= btrace_insn_get (&it
);
505 /* A NULL instruction indicates a gap in the trace. */
508 const struct btrace_config
*conf
;
510 conf
= btrace_conf (btinfo
);
512 /* We have trace so we must have a configuration. */
513 gdb_assert (conf
!= NULL
);
515 btrace_ui_out_decode_error (uiout
, it
.function
->errcode
,
520 /* Print the instruction index. */
521 ui_out_field_uint (uiout
, "index", btrace_insn_number (&it
));
522 ui_out_text (uiout
, "\t");
524 /* Disassembly with '/m' flag may not produce the expected result.
526 gdb_disassembly (gdbarch
, uiout
, NULL
, flags
, 1, insn
->pc
,
532 /* The to_insn_history method of target record-btrace. */
535 record_btrace_insn_history (struct target_ops
*self
, int size
, int flags
)
537 struct btrace_thread_info
*btinfo
;
538 struct btrace_insn_history
*history
;
539 struct btrace_insn_iterator begin
, end
;
540 struct cleanup
*uiout_cleanup
;
541 struct ui_out
*uiout
;
542 unsigned int context
, covered
;
544 uiout
= current_uiout
;
545 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
547 context
= abs (size
);
549 error (_("Bad record instruction-history-size."));
551 btinfo
= require_btrace ();
552 history
= btinfo
->insn_history
;
555 struct btrace_insn_iterator
*replay
;
557 DEBUG ("insn-history (0x%x): %d", flags
, size
);
559 /* If we're replaying, we start at the replay position. Otherwise, we
560 start at the tail of the trace. */
561 replay
= btinfo
->replay
;
565 btrace_insn_end (&begin
, btinfo
);
567 /* We start from here and expand in the requested direction. Then we
568 expand in the other direction, as well, to fill up any remaining
573 /* We want the current position covered, as well. */
574 covered
= btrace_insn_next (&end
, 1);
575 covered
+= btrace_insn_prev (&begin
, context
- covered
);
576 covered
+= btrace_insn_next (&end
, context
- covered
);
580 covered
= btrace_insn_next (&end
, context
);
581 covered
+= btrace_insn_prev (&begin
, context
- covered
);
586 begin
= history
->begin
;
589 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags
, size
,
590 btrace_insn_number (&begin
), btrace_insn_number (&end
));
595 covered
= btrace_insn_prev (&begin
, context
);
600 covered
= btrace_insn_next (&end
, context
);
605 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
609 printf_unfiltered (_("At the start of the branch trace record.\n"));
611 printf_unfiltered (_("At the end of the branch trace record.\n"));
614 btrace_set_insn_history (btinfo
, &begin
, &end
);
615 do_cleanups (uiout_cleanup
);
618 /* The to_insn_history_range method of target record-btrace. */
621 record_btrace_insn_history_range (struct target_ops
*self
,
622 ULONGEST from
, ULONGEST to
, int flags
)
624 struct btrace_thread_info
*btinfo
;
625 struct btrace_insn_history
*history
;
626 struct btrace_insn_iterator begin
, end
;
627 struct cleanup
*uiout_cleanup
;
628 struct ui_out
*uiout
;
629 unsigned int low
, high
;
632 uiout
= current_uiout
;
633 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
638 DEBUG ("insn-history (0x%x): [%u; %u)", flags
, low
, high
);
640 /* Check for wrap-arounds. */
641 if (low
!= from
|| high
!= to
)
642 error (_("Bad range."));
645 error (_("Bad range."));
647 btinfo
= require_btrace ();
649 found
= btrace_find_insn_by_number (&begin
, btinfo
, low
);
651 error (_("Range out of bounds."));
653 found
= btrace_find_insn_by_number (&end
, btinfo
, high
);
656 /* Silently truncate the range. */
657 btrace_insn_end (&end
, btinfo
);
661 /* We want both begin and end to be inclusive. */
662 btrace_insn_next (&end
, 1);
665 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
666 btrace_set_insn_history (btinfo
, &begin
, &end
);
668 do_cleanups (uiout_cleanup
);
671 /* The to_insn_history_from method of target record-btrace. */
674 record_btrace_insn_history_from (struct target_ops
*self
,
675 ULONGEST from
, int size
, int flags
)
677 ULONGEST begin
, end
, context
;
679 context
= abs (size
);
681 error (_("Bad record instruction-history-size."));
690 begin
= from
- context
+ 1;
695 end
= from
+ context
- 1;
697 /* Check for wrap-around. */
702 record_btrace_insn_history_range (self
, begin
, end
, flags
);
705 /* Print the instruction number range for a function call history line. */
708 btrace_call_history_insn_range (struct ui_out
*uiout
,
709 const struct btrace_function
*bfun
)
711 unsigned int begin
, end
, size
;
713 size
= VEC_length (btrace_insn_s
, bfun
->insn
);
714 gdb_assert (size
> 0);
716 begin
= bfun
->insn_offset
;
717 end
= begin
+ size
- 1;
719 ui_out_field_uint (uiout
, "insn begin", begin
);
720 ui_out_text (uiout
, ",");
721 ui_out_field_uint (uiout
, "insn end", end
);
724 /* Compute the lowest and highest source line for the instructions in BFUN
725 and return them in PBEGIN and PEND.
726 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
727 result from inlining or macro expansion. */
730 btrace_compute_src_line_range (const struct btrace_function
*bfun
,
731 int *pbegin
, int *pend
)
733 struct btrace_insn
*insn
;
734 struct symtab
*symtab
;
746 symtab
= symbol_symtab (sym
);
748 for (idx
= 0; VEC_iterate (btrace_insn_s
, bfun
->insn
, idx
, insn
); ++idx
)
750 struct symtab_and_line sal
;
752 sal
= find_pc_line (insn
->pc
, 0);
753 if (sal
.symtab
!= symtab
|| sal
.line
== 0)
756 begin
= min (begin
, sal
.line
);
757 end
= max (end
, sal
.line
);
765 /* Print the source line information for a function call history line. */
768 btrace_call_history_src_line (struct ui_out
*uiout
,
769 const struct btrace_function
*bfun
)
778 ui_out_field_string (uiout
, "file",
779 symtab_to_filename_for_display (symbol_symtab (sym
)));
781 btrace_compute_src_line_range (bfun
, &begin
, &end
);
785 ui_out_text (uiout
, ":");
786 ui_out_field_int (uiout
, "min line", begin
);
791 ui_out_text (uiout
, ",");
792 ui_out_field_int (uiout
, "max line", end
);
795 /* Get the name of a branch trace function. */
798 btrace_get_bfun_name (const struct btrace_function
*bfun
)
800 struct minimal_symbol
*msym
;
810 return SYMBOL_PRINT_NAME (sym
);
811 else if (msym
!= NULL
)
812 return MSYMBOL_PRINT_NAME (msym
);
817 /* Disassemble a section of the recorded function trace. */
820 btrace_call_history (struct ui_out
*uiout
,
821 const struct btrace_thread_info
*btinfo
,
822 const struct btrace_call_iterator
*begin
,
823 const struct btrace_call_iterator
*end
,
824 enum record_print_flag flags
)
826 struct btrace_call_iterator it
;
828 DEBUG ("ftrace (0x%x): [%u; %u)", flags
, btrace_call_number (begin
),
829 btrace_call_number (end
));
831 for (it
= *begin
; btrace_call_cmp (&it
, end
) < 0; btrace_call_next (&it
, 1))
833 const struct btrace_function
*bfun
;
834 struct minimal_symbol
*msym
;
837 bfun
= btrace_call_get (&it
);
841 /* Print the function index. */
842 ui_out_field_uint (uiout
, "index", bfun
->number
);
843 ui_out_text (uiout
, "\t");
845 /* Indicate gaps in the trace. */
846 if (bfun
->errcode
!= 0)
848 const struct btrace_config
*conf
;
850 conf
= btrace_conf (btinfo
);
852 /* We have trace so we must have a configuration. */
853 gdb_assert (conf
!= NULL
);
855 btrace_ui_out_decode_error (uiout
, bfun
->errcode
, conf
->format
);
860 if ((flags
& RECORD_PRINT_INDENT_CALLS
) != 0)
862 int level
= bfun
->level
+ btinfo
->level
, i
;
864 for (i
= 0; i
< level
; ++i
)
865 ui_out_text (uiout
, " ");
869 ui_out_field_string (uiout
, "function", SYMBOL_PRINT_NAME (sym
));
870 else if (msym
!= NULL
)
871 ui_out_field_string (uiout
, "function", MSYMBOL_PRINT_NAME (msym
));
872 else if (!ui_out_is_mi_like_p (uiout
))
873 ui_out_field_string (uiout
, "function", "??");
875 if ((flags
& RECORD_PRINT_INSN_RANGE
) != 0)
877 ui_out_text (uiout
, _("\tinst "));
878 btrace_call_history_insn_range (uiout
, bfun
);
881 if ((flags
& RECORD_PRINT_SRC_LINE
) != 0)
883 ui_out_text (uiout
, _("\tat "));
884 btrace_call_history_src_line (uiout
, bfun
);
887 ui_out_text (uiout
, "\n");
891 /* The to_call_history method of target record-btrace. */
894 record_btrace_call_history (struct target_ops
*self
, int size
, int flags
)
896 struct btrace_thread_info
*btinfo
;
897 struct btrace_call_history
*history
;
898 struct btrace_call_iterator begin
, end
;
899 struct cleanup
*uiout_cleanup
;
900 struct ui_out
*uiout
;
901 unsigned int context
, covered
;
903 uiout
= current_uiout
;
904 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
906 context
= abs (size
);
908 error (_("Bad record function-call-history-size."));
910 btinfo
= require_btrace ();
911 history
= btinfo
->call_history
;
914 struct btrace_insn_iterator
*replay
;
916 DEBUG ("call-history (0x%x): %d", flags
, size
);
918 /* If we're replaying, we start at the replay position. Otherwise, we
919 start at the tail of the trace. */
920 replay
= btinfo
->replay
;
923 begin
.function
= replay
->function
;
924 begin
.btinfo
= btinfo
;
927 btrace_call_end (&begin
, btinfo
);
929 /* We start from here and expand in the requested direction. Then we
930 expand in the other direction, as well, to fill up any remaining
935 /* We want the current position covered, as well. */
936 covered
= btrace_call_next (&end
, 1);
937 covered
+= btrace_call_prev (&begin
, context
- covered
);
938 covered
+= btrace_call_next (&end
, context
- covered
);
942 covered
= btrace_call_next (&end
, context
);
943 covered
+= btrace_call_prev (&begin
, context
- covered
);
948 begin
= history
->begin
;
951 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags
, size
,
952 btrace_call_number (&begin
), btrace_call_number (&end
));
957 covered
= btrace_call_prev (&begin
, context
);
962 covered
= btrace_call_next (&end
, context
);
967 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
971 printf_unfiltered (_("At the start of the branch trace record.\n"));
973 printf_unfiltered (_("At the end of the branch trace record.\n"));
976 btrace_set_call_history (btinfo
, &begin
, &end
);
977 do_cleanups (uiout_cleanup
);
980 /* The to_call_history_range method of target record-btrace. */
983 record_btrace_call_history_range (struct target_ops
*self
,
984 ULONGEST from
, ULONGEST to
, int flags
)
986 struct btrace_thread_info
*btinfo
;
987 struct btrace_call_history
*history
;
988 struct btrace_call_iterator begin
, end
;
989 struct cleanup
*uiout_cleanup
;
990 struct ui_out
*uiout
;
991 unsigned int low
, high
;
994 uiout
= current_uiout
;
995 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
1000 DEBUG ("call-history (0x%x): [%u; %u)", flags
, low
, high
);
1002 /* Check for wrap-arounds. */
1003 if (low
!= from
|| high
!= to
)
1004 error (_("Bad range."));
1007 error (_("Bad range."));
1009 btinfo
= require_btrace ();
1011 found
= btrace_find_call_by_number (&begin
, btinfo
, low
);
1013 error (_("Range out of bounds."));
1015 found
= btrace_find_call_by_number (&end
, btinfo
, high
);
1018 /* Silently truncate the range. */
1019 btrace_call_end (&end
, btinfo
);
1023 /* We want both begin and end to be inclusive. */
1024 btrace_call_next (&end
, 1);
1027 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
1028 btrace_set_call_history (btinfo
, &begin
, &end
);
1030 do_cleanups (uiout_cleanup
);
1033 /* The to_call_history_from method of target record-btrace. */
1036 record_btrace_call_history_from (struct target_ops
*self
,
1037 ULONGEST from
, int size
, int flags
)
1039 ULONGEST begin
, end
, context
;
1041 context
= abs (size
);
1043 error (_("Bad record function-call-history-size."));
1052 begin
= from
- context
+ 1;
1057 end
= from
+ context
- 1;
1059 /* Check for wrap-around. */
1064 record_btrace_call_history_range (self
, begin
, end
, flags
);
1067 /* The to_record_is_replaying method of target record-btrace. */
1070 record_btrace_is_replaying (struct target_ops
*self
)
1072 struct thread_info
*tp
;
1074 ALL_NON_EXITED_THREADS (tp
)
1075 if (btrace_is_replaying (tp
))
1081 /* The to_xfer_partial method of target record-btrace. */
1083 static enum target_xfer_status
1084 record_btrace_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1085 const char *annex
, gdb_byte
*readbuf
,
1086 const gdb_byte
*writebuf
, ULONGEST offset
,
1087 ULONGEST len
, ULONGEST
*xfered_len
)
1089 struct target_ops
*t
;
1091 /* Filter out requests that don't make sense during replay. */
1092 if (replay_memory_access
== replay_memory_access_read_only
1093 && !record_btrace_generating_corefile
1094 && record_btrace_is_replaying (ops
))
1098 case TARGET_OBJECT_MEMORY
:
1100 struct target_section
*section
;
1102 /* We do not allow writing memory in general. */
1103 if (writebuf
!= NULL
)
1106 return TARGET_XFER_UNAVAILABLE
;
1109 /* We allow reading readonly memory. */
1110 section
= target_section_by_addr (ops
, offset
);
1111 if (section
!= NULL
)
1113 /* Check if the section we found is readonly. */
1114 if ((bfd_get_section_flags (section
->the_bfd_section
->owner
,
1115 section
->the_bfd_section
)
1116 & SEC_READONLY
) != 0)
1118 /* Truncate the request to fit into this section. */
1119 len
= min (len
, section
->endaddr
- offset
);
1125 return TARGET_XFER_UNAVAILABLE
;
1130 /* Forward the request. */
1132 return ops
->to_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
1133 offset
, len
, xfered_len
);
1136 /* The to_insert_breakpoint method of target record-btrace. */
1139 record_btrace_insert_breakpoint (struct target_ops
*ops
,
1140 struct gdbarch
*gdbarch
,
1141 struct bp_target_info
*bp_tgt
)
1143 volatile struct gdb_exception except
;
1147 /* Inserting breakpoints requires accessing memory. Allow it for the
1148 duration of this function. */
1149 old
= replay_memory_access
;
1150 replay_memory_access
= replay_memory_access_read_write
;
1153 TRY_CATCH (except
, RETURN_MASK_ALL
)
1154 ret
= ops
->beneath
->to_insert_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
);
1156 replay_memory_access
= old
;
1158 if (except
.reason
< 0)
1159 throw_exception (except
);
1164 /* The to_remove_breakpoint method of target record-btrace. */
1167 record_btrace_remove_breakpoint (struct target_ops
*ops
,
1168 struct gdbarch
*gdbarch
,
1169 struct bp_target_info
*bp_tgt
)
1171 volatile struct gdb_exception except
;
1175 /* Removing breakpoints requires accessing memory. Allow it for the
1176 duration of this function. */
1177 old
= replay_memory_access
;
1178 replay_memory_access
= replay_memory_access_read_write
;
1181 TRY_CATCH (except
, RETURN_MASK_ALL
)
1182 ret
= ops
->beneath
->to_remove_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
);
1184 replay_memory_access
= old
;
1186 if (except
.reason
< 0)
1187 throw_exception (except
);
1192 /* The to_fetch_registers method of target record-btrace. */
1195 record_btrace_fetch_registers (struct target_ops
*ops
,
1196 struct regcache
*regcache
, int regno
)
1198 struct btrace_insn_iterator
*replay
;
1199 struct thread_info
*tp
;
1201 tp
= find_thread_ptid (inferior_ptid
);
1202 gdb_assert (tp
!= NULL
);
1204 replay
= tp
->btrace
.replay
;
1205 if (replay
!= NULL
&& !record_btrace_generating_corefile
)
1207 const struct btrace_insn
*insn
;
1208 struct gdbarch
*gdbarch
;
1211 gdbarch
= get_regcache_arch (regcache
);
1212 pcreg
= gdbarch_pc_regnum (gdbarch
);
1216 /* We can only provide the PC register. */
1217 if (regno
>= 0 && regno
!= pcreg
)
1220 insn
= btrace_insn_get (replay
);
1221 gdb_assert (insn
!= NULL
);
1223 regcache_raw_supply (regcache
, regno
, &insn
->pc
);
1227 struct target_ops
*t
= ops
->beneath
;
1229 t
->to_fetch_registers (t
, regcache
, regno
);
1233 /* The to_store_registers method of target record-btrace. */
1236 record_btrace_store_registers (struct target_ops
*ops
,
1237 struct regcache
*regcache
, int regno
)
1239 struct target_ops
*t
;
1241 if (!record_btrace_generating_corefile
&& record_btrace_is_replaying (ops
))
1242 error (_("This record target does not allow writing registers."));
1244 gdb_assert (may_write_registers
!= 0);
1247 t
->to_store_registers (t
, regcache
, regno
);
1250 /* The to_prepare_to_store method of target record-btrace. */
1253 record_btrace_prepare_to_store (struct target_ops
*ops
,
1254 struct regcache
*regcache
)
1256 struct target_ops
*t
;
1258 if (!record_btrace_generating_corefile
&& record_btrace_is_replaying (ops
))
1262 t
->to_prepare_to_store (t
, regcache
);
1265 /* The branch trace frame cache. */
1267 struct btrace_frame_cache
1270 struct thread_info
*tp
;
1272 /* The frame info. */
1273 struct frame_info
*frame
;
1275 /* The branch trace function segment. */
1276 const struct btrace_function
*bfun
;
1279 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1281 static htab_t bfcache
;
1283 /* hash_f for htab_create_alloc of bfcache. */
1286 bfcache_hash (const void *arg
)
1288 const struct btrace_frame_cache
*cache
= arg
;
1290 return htab_hash_pointer (cache
->frame
);
1293 /* eq_f for htab_create_alloc of bfcache. */
1296 bfcache_eq (const void *arg1
, const void *arg2
)
1298 const struct btrace_frame_cache
*cache1
= arg1
;
1299 const struct btrace_frame_cache
*cache2
= arg2
;
1301 return cache1
->frame
== cache2
->frame
;
1304 /* Create a new btrace frame cache. */
1306 static struct btrace_frame_cache
*
1307 bfcache_new (struct frame_info
*frame
)
1309 struct btrace_frame_cache
*cache
;
1312 cache
= FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache
);
1313 cache
->frame
= frame
;
1315 slot
= htab_find_slot (bfcache
, cache
, INSERT
);
1316 gdb_assert (*slot
== NULL
);
1322 /* Extract the branch trace function from a branch trace frame. */
1324 static const struct btrace_function
*
1325 btrace_get_frame_function (struct frame_info
*frame
)
1327 const struct btrace_frame_cache
*cache
;
1328 const struct btrace_function
*bfun
;
1329 struct btrace_frame_cache pattern
;
1332 pattern
.frame
= frame
;
1334 slot
= htab_find_slot (bfcache
, &pattern
, NO_INSERT
);
1342 /* Implement stop_reason method for record_btrace_frame_unwind. */
1344 static enum unwind_stop_reason
1345 record_btrace_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1348 const struct btrace_frame_cache
*cache
;
1349 const struct btrace_function
*bfun
;
1351 cache
= *this_cache
;
1353 gdb_assert (bfun
!= NULL
);
1355 if (bfun
->up
== NULL
)
1356 return UNWIND_UNAVAILABLE
;
1358 return UNWIND_NO_REASON
;
1361 /* Implement this_id method for record_btrace_frame_unwind. */
1364 record_btrace_frame_this_id (struct frame_info
*this_frame
, void **this_cache
,
1365 struct frame_id
*this_id
)
1367 const struct btrace_frame_cache
*cache
;
1368 const struct btrace_function
*bfun
;
1369 CORE_ADDR code
, special
;
1371 cache
= *this_cache
;
1374 gdb_assert (bfun
!= NULL
);
1376 while (bfun
->segment
.prev
!= NULL
)
1377 bfun
= bfun
->segment
.prev
;
1379 code
= get_frame_func (this_frame
);
1380 special
= bfun
->number
;
1382 *this_id
= frame_id_build_unavailable_stack_special (code
, special
);
1384 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1385 btrace_get_bfun_name (cache
->bfun
),
1386 core_addr_to_string_nz (this_id
->code_addr
),
1387 core_addr_to_string_nz (this_id
->special_addr
));
1390 /* Implement prev_register method for record_btrace_frame_unwind. */
1392 static struct value
*
1393 record_btrace_frame_prev_register (struct frame_info
*this_frame
,
1397 const struct btrace_frame_cache
*cache
;
1398 const struct btrace_function
*bfun
, *caller
;
1399 const struct btrace_insn
*insn
;
1400 struct gdbarch
*gdbarch
;
1404 gdbarch
= get_frame_arch (this_frame
);
1405 pcreg
= gdbarch_pc_regnum (gdbarch
);
1406 if (pcreg
< 0 || regnum
!= pcreg
)
1407 throw_error (NOT_AVAILABLE_ERROR
,
1408 _("Registers are not available in btrace record history"));
1410 cache
= *this_cache
;
1412 gdb_assert (bfun
!= NULL
);
1416 throw_error (NOT_AVAILABLE_ERROR
,
1417 _("No caller in btrace record history"));
1419 if ((bfun
->flags
& BFUN_UP_LINKS_TO_RET
) != 0)
1421 insn
= VEC_index (btrace_insn_s
, caller
->insn
, 0);
1426 insn
= VEC_last (btrace_insn_s
, caller
->insn
);
1429 pc
+= gdb_insn_length (gdbarch
, pc
);
1432 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1433 btrace_get_bfun_name (bfun
), bfun
->level
,
1434 core_addr_to_string_nz (pc
));
1436 return frame_unwind_got_address (this_frame
, regnum
, pc
);
1439 /* Implement sniffer method for record_btrace_frame_unwind. */
1442 record_btrace_frame_sniffer (const struct frame_unwind
*self
,
1443 struct frame_info
*this_frame
,
1446 const struct btrace_function
*bfun
;
1447 struct btrace_frame_cache
*cache
;
1448 struct thread_info
*tp
;
1449 struct frame_info
*next
;
1451 /* THIS_FRAME does not contain a reference to its thread. */
1452 tp
= find_thread_ptid (inferior_ptid
);
1453 gdb_assert (tp
!= NULL
);
1456 next
= get_next_frame (this_frame
);
1459 const struct btrace_insn_iterator
*replay
;
1461 replay
= tp
->btrace
.replay
;
1463 bfun
= replay
->function
;
1467 const struct btrace_function
*callee
;
1469 callee
= btrace_get_frame_function (next
);
1470 if (callee
!= NULL
&& (callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1477 DEBUG ("[frame] sniffed frame for %s on level %d",
1478 btrace_get_bfun_name (bfun
), bfun
->level
);
1480 /* This is our frame. Initialize the frame cache. */
1481 cache
= bfcache_new (this_frame
);
1485 *this_cache
= cache
;
1489 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1492 record_btrace_tailcall_frame_sniffer (const struct frame_unwind
*self
,
1493 struct frame_info
*this_frame
,
1496 const struct btrace_function
*bfun
, *callee
;
1497 struct btrace_frame_cache
*cache
;
1498 struct frame_info
*next
;
1500 next
= get_next_frame (this_frame
);
1504 callee
= btrace_get_frame_function (next
);
1508 if ((callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1515 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1516 btrace_get_bfun_name (bfun
), bfun
->level
);
1518 /* This is our frame. Initialize the frame cache. */
1519 cache
= bfcache_new (this_frame
);
1520 cache
->tp
= find_thread_ptid (inferior_ptid
);
1523 *this_cache
= cache
;
1528 record_btrace_frame_dealloc_cache (struct frame_info
*self
, void *this_cache
)
1530 struct btrace_frame_cache
*cache
;
1535 slot
= htab_find_slot (bfcache
, cache
, NO_INSERT
);
1536 gdb_assert (slot
!= NULL
);
1538 htab_remove_elt (bfcache
, cache
);
1541 /* btrace recording does not store previous memory content, neither the stack
1542 frames content. Any unwinding would return errorneous results as the stack
1543 contents no longer matches the changed PC value restored from history.
1544 Therefore this unwinder reports any possibly unwound registers as
1547 const struct frame_unwind record_btrace_frame_unwind
=
1550 record_btrace_frame_unwind_stop_reason
,
1551 record_btrace_frame_this_id
,
1552 record_btrace_frame_prev_register
,
1554 record_btrace_frame_sniffer
,
1555 record_btrace_frame_dealloc_cache
1558 const struct frame_unwind record_btrace_tailcall_frame_unwind
=
1561 record_btrace_frame_unwind_stop_reason
,
1562 record_btrace_frame_this_id
,
1563 record_btrace_frame_prev_register
,
1565 record_btrace_tailcall_frame_sniffer
,
1566 record_btrace_frame_dealloc_cache
1569 /* Implement the to_get_unwinder method. */
1571 static const struct frame_unwind
*
1572 record_btrace_to_get_unwinder (struct target_ops
*self
)
1574 return &record_btrace_frame_unwind
;
1577 /* Implement the to_get_tailcall_unwinder method. */
1579 static const struct frame_unwind
*
1580 record_btrace_to_get_tailcall_unwinder (struct target_ops
*self
)
1582 return &record_btrace_tailcall_frame_unwind
;
1585 /* Indicate that TP should be resumed according to FLAG. */
1588 record_btrace_resume_thread (struct thread_info
*tp
,
1589 enum btrace_thread_flag flag
)
1591 struct btrace_thread_info
*btinfo
;
1593 DEBUG ("resuming %d (%s): %u", tp
->num
, target_pid_to_str (tp
->ptid
), flag
);
1595 btinfo
= &tp
->btrace
;
1597 if ((btinfo
->flags
& BTHR_MOVE
) != 0)
1598 error (_("Thread already moving."));
1600 /* Fetch the latest branch trace. */
1603 btinfo
->flags
|= flag
;
1606 /* Find the thread to resume given a PTID. */
1608 static struct thread_info
*
1609 record_btrace_find_resume_thread (ptid_t ptid
)
1611 struct thread_info
*tp
;
1613 /* When asked to resume everything, we pick the current thread. */
1614 if (ptid_equal (minus_one_ptid
, ptid
) || ptid_is_pid (ptid
))
1615 ptid
= inferior_ptid
;
1617 return find_thread_ptid (ptid
);
1620 /* Start replaying a thread. */
1622 static struct btrace_insn_iterator
*
1623 record_btrace_start_replaying (struct thread_info
*tp
)
1625 volatile struct gdb_exception except
;
1626 struct btrace_insn_iterator
*replay
;
1627 struct btrace_thread_info
*btinfo
;
1630 btinfo
= &tp
->btrace
;
1633 /* We can't start replaying without trace. */
1634 if (btinfo
->begin
== NULL
)
1637 /* Clear the executing flag to allow changes to the current frame.
1638 We are not actually running, yet. We just started a reverse execution
1639 command or a record goto command.
1640 For the latter, EXECUTING is false and this has no effect.
1641 For the former, EXECUTING is true and we're in to_wait, about to
1642 move the thread. Since we need to recompute the stack, we temporarily
1643 set EXECUTING to flase. */
1644 executing
= is_executing (tp
->ptid
);
1645 set_executing (tp
->ptid
, 0);
1647 /* GDB stores the current frame_id when stepping in order to detects steps
1649 Since frames are computed differently when we're replaying, we need to
1650 recompute those stored frames and fix them up so we can still detect
1651 subroutines after we started replaying. */
1652 TRY_CATCH (except
, RETURN_MASK_ALL
)
1654 struct frame_info
*frame
;
1655 struct frame_id frame_id
;
1656 int upd_step_frame_id
, upd_step_stack_frame_id
;
1658 /* The current frame without replaying - computed via normal unwind. */
1659 frame
= get_current_frame ();
1660 frame_id
= get_frame_id (frame
);
1662 /* Check if we need to update any stepping-related frame id's. */
1663 upd_step_frame_id
= frame_id_eq (frame_id
,
1664 tp
->control
.step_frame_id
);
1665 upd_step_stack_frame_id
= frame_id_eq (frame_id
,
1666 tp
->control
.step_stack_frame_id
);
1668 /* We start replaying at the end of the branch trace. This corresponds
1669 to the current instruction. */
1670 replay
= xmalloc (sizeof (*replay
));
1671 btrace_insn_end (replay
, btinfo
);
1673 /* Skip gaps at the end of the trace. */
1674 while (btrace_insn_get (replay
) == NULL
)
1678 steps
= btrace_insn_prev (replay
, 1);
1680 error (_("No trace."));
1683 /* We're not replaying, yet. */
1684 gdb_assert (btinfo
->replay
== NULL
);
1685 btinfo
->replay
= replay
;
1687 /* Make sure we're not using any stale registers. */
1688 registers_changed_ptid (tp
->ptid
);
1690 /* The current frame with replaying - computed via btrace unwind. */
1691 frame
= get_current_frame ();
1692 frame_id
= get_frame_id (frame
);
1694 /* Replace stepping related frames where necessary. */
1695 if (upd_step_frame_id
)
1696 tp
->control
.step_frame_id
= frame_id
;
1697 if (upd_step_stack_frame_id
)
1698 tp
->control
.step_stack_frame_id
= frame_id
;
1701 /* Restore the previous execution state. */
1702 set_executing (tp
->ptid
, executing
);
1704 if (except
.reason
< 0)
1706 xfree (btinfo
->replay
);
1707 btinfo
->replay
= NULL
;
1709 registers_changed_ptid (tp
->ptid
);
1711 throw_exception (except
);
1717 /* Stop replaying a thread. */
1720 record_btrace_stop_replaying (struct thread_info
*tp
)
1722 struct btrace_thread_info
*btinfo
;
1724 btinfo
= &tp
->btrace
;
1726 xfree (btinfo
->replay
);
1727 btinfo
->replay
= NULL
;
1729 /* Make sure we're not leaving any stale registers. */
1730 registers_changed_ptid (tp
->ptid
);
1733 /* The to_resume method of target record-btrace. */
1736 record_btrace_resume (struct target_ops
*ops
, ptid_t ptid
, int step
,
1737 enum gdb_signal signal
)
1739 struct thread_info
*tp
, *other
;
1740 enum btrace_thread_flag flag
;
1742 DEBUG ("resume %s: %s", target_pid_to_str (ptid
), step
? "step" : "cont");
1744 /* Store the execution direction of the last resume. */
1745 record_btrace_resume_exec_dir
= execution_direction
;
1747 tp
= record_btrace_find_resume_thread (ptid
);
1749 error (_("Cannot find thread to resume."));
1751 /* Stop replaying other threads if the thread to resume is not replaying. */
1752 if (!btrace_is_replaying (tp
) && execution_direction
!= EXEC_REVERSE
)
1753 ALL_NON_EXITED_THREADS (other
)
1754 record_btrace_stop_replaying (other
);
1756 /* As long as we're not replaying, just forward the request. */
1757 if (!record_btrace_is_replaying (ops
) && execution_direction
!= EXEC_REVERSE
)
1760 return ops
->to_resume (ops
, ptid
, step
, signal
);
1763 /* Compute the btrace thread flag for the requested move. */
1765 flag
= execution_direction
== EXEC_REVERSE
? BTHR_RCONT
: BTHR_CONT
;
1767 flag
= execution_direction
== EXEC_REVERSE
? BTHR_RSTEP
: BTHR_STEP
;
1769 /* At the moment, we only move a single thread. We could also move
1770 all threads in parallel by single-stepping each resumed thread
1771 until the first runs into an event.
1772 When we do that, we would want to continue all other threads.
1773 For now, just resume one thread to not confuse to_wait. */
1774 record_btrace_resume_thread (tp
, flag
);
1776 /* We just indicate the resume intent here. The actual stepping happens in
1777 record_btrace_wait below. */
1779 /* Async support. */
1780 if (target_can_async_p ())
1782 target_async (inferior_event_handler
, 0);
1783 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
1787 /* Find a thread to move. */
1789 static struct thread_info
*
1790 record_btrace_find_thread_to_move (ptid_t ptid
)
1792 struct thread_info
*tp
;
1794 /* First check the parameter thread. */
1795 tp
= find_thread_ptid (ptid
);
1796 if (tp
!= NULL
&& (tp
->btrace
.flags
& BTHR_MOVE
) != 0)
1799 /* Otherwise, find one other thread that has been resumed. */
1800 ALL_NON_EXITED_THREADS (tp
)
1801 if ((tp
->btrace
.flags
& BTHR_MOVE
) != 0)
1807 /* Return a target_waitstatus indicating that we ran out of history. */
1809 static struct target_waitstatus
1810 btrace_step_no_history (void)
1812 struct target_waitstatus status
;
1814 status
.kind
= TARGET_WAITKIND_NO_HISTORY
;
1819 /* Return a target_waitstatus indicating that a step finished. */
1821 static struct target_waitstatus
1822 btrace_step_stopped (void)
1824 struct target_waitstatus status
;
1826 status
.kind
= TARGET_WAITKIND_STOPPED
;
1827 status
.value
.sig
= GDB_SIGNAL_TRAP
;
1832 /* Clear the record histories. */
1835 record_btrace_clear_histories (struct btrace_thread_info
*btinfo
)
1837 xfree (btinfo
->insn_history
);
1838 xfree (btinfo
->call_history
);
1840 btinfo
->insn_history
= NULL
;
1841 btinfo
->call_history
= NULL
;
1844 /* Step a single thread. */
1846 static struct target_waitstatus
1847 record_btrace_step_thread (struct thread_info
*tp
)
1849 struct btrace_insn_iterator
*replay
, end
;
1850 struct btrace_thread_info
*btinfo
;
1851 struct address_space
*aspace
;
1852 struct inferior
*inf
;
1853 enum btrace_thread_flag flags
;
1856 /* We can't step without an execution history. */
1857 if (btrace_is_empty (tp
))
1858 return btrace_step_no_history ();
1860 btinfo
= &tp
->btrace
;
1861 replay
= btinfo
->replay
;
1863 flags
= btinfo
->flags
& BTHR_MOVE
;
1864 btinfo
->flags
&= ~BTHR_MOVE
;
1866 DEBUG ("stepping %d (%s): %u", tp
->num
, target_pid_to_str (tp
->ptid
), flags
);
1871 internal_error (__FILE__
, __LINE__
, _("invalid stepping type."));
1874 /* We're done if we're not replaying. */
1876 return btrace_step_no_history ();
1878 /* Skip gaps during replay. */
1881 steps
= btrace_insn_next (replay
, 1);
1884 record_btrace_stop_replaying (tp
);
1885 return btrace_step_no_history ();
1888 while (btrace_insn_get (replay
) == NULL
);
1890 /* Determine the end of the instruction trace. */
1891 btrace_insn_end (&end
, btinfo
);
1893 /* We stop replaying if we reached the end of the trace. */
1894 if (btrace_insn_cmp (replay
, &end
) == 0)
1895 record_btrace_stop_replaying (tp
);
1897 return btrace_step_stopped ();
1900 /* Start replaying if we're not already doing so. */
1902 replay
= record_btrace_start_replaying (tp
);
1904 /* If we can't step any further, we reached the end of the history.
1905 Skip gaps during replay. */
1908 steps
= btrace_insn_prev (replay
, 1);
1910 return btrace_step_no_history ();
1913 while (btrace_insn_get (replay
) == NULL
);
1915 return btrace_step_stopped ();
1918 /* We're done if we're not replaying. */
1920 return btrace_step_no_history ();
1922 inf
= find_inferior_ptid (tp
->ptid
);
1923 aspace
= inf
->aspace
;
1925 /* Determine the end of the instruction trace. */
1926 btrace_insn_end (&end
, btinfo
);
1930 const struct btrace_insn
*insn
;
1932 /* Skip gaps during replay. */
1935 steps
= btrace_insn_next (replay
, 1);
1938 record_btrace_stop_replaying (tp
);
1939 return btrace_step_no_history ();
1942 insn
= btrace_insn_get (replay
);
1944 while (insn
== NULL
);
1946 /* We stop replaying if we reached the end of the trace. */
1947 if (btrace_insn_cmp (replay
, &end
) == 0)
1949 record_btrace_stop_replaying (tp
);
1950 return btrace_step_no_history ();
1953 DEBUG ("stepping %d (%s) ... %s", tp
->num
,
1954 target_pid_to_str (tp
->ptid
),
1955 core_addr_to_string_nz (insn
->pc
));
1957 if (record_check_stopped_by_breakpoint (aspace
, insn
->pc
,
1958 &btinfo
->stop_reason
))
1959 return btrace_step_stopped ();
1963 /* Start replaying if we're not already doing so. */
1965 replay
= record_btrace_start_replaying (tp
);
1967 inf
= find_inferior_ptid (tp
->ptid
);
1968 aspace
= inf
->aspace
;
1972 const struct btrace_insn
*insn
;
1974 /* If we can't step any further, we reached the end of the history.
1975 Skip gaps during replay. */
1978 steps
= btrace_insn_prev (replay
, 1);
1980 return btrace_step_no_history ();
1982 insn
= btrace_insn_get (replay
);
1984 while (insn
== NULL
);
1986 DEBUG ("reverse-stepping %d (%s) ... %s", tp
->num
,
1987 target_pid_to_str (tp
->ptid
),
1988 core_addr_to_string_nz (insn
->pc
));
1990 if (record_check_stopped_by_breakpoint (aspace
, insn
->pc
,
1991 &btinfo
->stop_reason
))
1992 return btrace_step_stopped ();
1997 /* The to_wait method of target record-btrace. */
2000 record_btrace_wait (struct target_ops
*ops
, ptid_t ptid
,
2001 struct target_waitstatus
*status
, int options
)
2003 struct thread_info
*tp
, *other
;
2005 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid
), options
);
2007 /* As long as we're not replaying, just forward the request. */
2008 if (!record_btrace_is_replaying (ops
) && execution_direction
!= EXEC_REVERSE
)
2011 return ops
->to_wait (ops
, ptid
, status
, options
);
2014 /* Let's find a thread to move. */
2015 tp
= record_btrace_find_thread_to_move (ptid
);
2018 DEBUG ("wait %s: no thread", target_pid_to_str (ptid
));
2020 status
->kind
= TARGET_WAITKIND_IGNORE
;
2021 return minus_one_ptid
;
2024 /* We only move a single thread. We're not able to correlate threads. */
2025 *status
= record_btrace_step_thread (tp
);
2027 /* Stop all other threads. */
2029 ALL_NON_EXITED_THREADS (other
)
2030 other
->btrace
.flags
&= ~BTHR_MOVE
;
2032 /* Start record histories anew from the current position. */
2033 record_btrace_clear_histories (&tp
->btrace
);
2035 /* We moved the replay position but did not update registers. */
2036 registers_changed_ptid (tp
->ptid
);
2041 /* The to_can_execute_reverse method of target record-btrace. */
2044 record_btrace_can_execute_reverse (struct target_ops
*self
)
2049 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2052 record_btrace_stopped_by_sw_breakpoint (struct target_ops
*ops
)
2054 if (record_btrace_is_replaying (ops
))
2056 struct thread_info
*tp
= inferior_thread ();
2058 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
;
2061 return ops
->beneath
->to_stopped_by_sw_breakpoint (ops
->beneath
);
2064 /* The to_supports_stopped_by_sw_breakpoint method of target
2068 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops
*ops
)
2070 if (record_btrace_is_replaying (ops
))
2073 return ops
->beneath
->to_supports_stopped_by_sw_breakpoint (ops
->beneath
);
2076 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2079 record_btrace_stopped_by_hw_breakpoint (struct target_ops
*ops
)
2081 if (record_btrace_is_replaying (ops
))
2083 struct thread_info
*tp
= inferior_thread ();
2085 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
;
2088 return ops
->beneath
->to_stopped_by_hw_breakpoint (ops
->beneath
);
2091 /* The to_supports_stopped_by_hw_breakpoint method of target
2095 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops
*ops
)
2097 if (record_btrace_is_replaying (ops
))
2100 return ops
->beneath
->to_supports_stopped_by_hw_breakpoint (ops
->beneath
);
2103 /* The to_update_thread_list method of target record-btrace. */
2106 record_btrace_update_thread_list (struct target_ops
*ops
)
2108 /* We don't add or remove threads during replay. */
2109 if (record_btrace_is_replaying (ops
))
2112 /* Forward the request. */
2114 ops
->to_update_thread_list (ops
);
2117 /* The to_thread_alive method of target record-btrace. */
2120 record_btrace_thread_alive (struct target_ops
*ops
, ptid_t ptid
)
2122 /* We don't add or remove threads during replay. */
2123 if (record_btrace_is_replaying (ops
))
2124 return find_thread_ptid (ptid
) != NULL
;
2126 /* Forward the request. */
2128 return ops
->to_thread_alive (ops
, ptid
);
2131 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2135 record_btrace_set_replay (struct thread_info
*tp
,
2136 const struct btrace_insn_iterator
*it
)
2138 struct btrace_thread_info
*btinfo
;
2140 btinfo
= &tp
->btrace
;
2142 if (it
== NULL
|| it
->function
== NULL
)
2143 record_btrace_stop_replaying (tp
);
2146 if (btinfo
->replay
== NULL
)
2147 record_btrace_start_replaying (tp
);
2148 else if (btrace_insn_cmp (btinfo
->replay
, it
) == 0)
2151 *btinfo
->replay
= *it
;
2152 registers_changed_ptid (tp
->ptid
);
2155 /* Start anew from the new replay position. */
2156 record_btrace_clear_histories (btinfo
);
2159 /* The to_goto_record_begin method of target record-btrace. */
2162 record_btrace_goto_begin (struct target_ops
*self
)
2164 struct thread_info
*tp
;
2165 struct btrace_insn_iterator begin
;
2167 tp
= require_btrace_thread ();
2169 btrace_insn_begin (&begin
, &tp
->btrace
);
2170 record_btrace_set_replay (tp
, &begin
);
2172 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
2175 /* The to_goto_record_end method of target record-btrace. */
2178 record_btrace_goto_end (struct target_ops
*ops
)
2180 struct thread_info
*tp
;
2182 tp
= require_btrace_thread ();
2184 record_btrace_set_replay (tp
, NULL
);
2186 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
2189 /* The to_goto_record method of target record-btrace. */
2192 record_btrace_goto (struct target_ops
*self
, ULONGEST insn
)
2194 struct thread_info
*tp
;
2195 struct btrace_insn_iterator it
;
2196 unsigned int number
;
2201 /* Check for wrap-arounds. */
2203 error (_("Instruction number out of range."));
2205 tp
= require_btrace_thread ();
2207 found
= btrace_find_insn_by_number (&it
, &tp
->btrace
, number
);
2209 error (_("No such instruction."));
2211 record_btrace_set_replay (tp
, &it
);
2213 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
2216 /* The to_execution_direction target method. */
2218 static enum exec_direction_kind
2219 record_btrace_execution_direction (struct target_ops
*self
)
2221 return record_btrace_resume_exec_dir
;
2224 /* The to_prepare_to_generate_core target method. */
2227 record_btrace_prepare_to_generate_core (struct target_ops
*self
)
2229 record_btrace_generating_corefile
= 1;
2232 /* The to_done_generating_core target method. */
2235 record_btrace_done_generating_core (struct target_ops
*self
)
2237 record_btrace_generating_corefile
= 0;
2240 /* Initialize the record-btrace target ops. */
2243 init_record_btrace_ops (void)
2245 struct target_ops
*ops
;
2247 ops
= &record_btrace_ops
;
2248 ops
->to_shortname
= "record-btrace";
2249 ops
->to_longname
= "Branch tracing target";
2250 ops
->to_doc
= "Collect control-flow trace and provide the execution history.";
2251 ops
->to_open
= record_btrace_open
;
2252 ops
->to_close
= record_btrace_close
;
2253 ops
->to_async
= record_btrace_async
;
2254 ops
->to_detach
= record_detach
;
2255 ops
->to_disconnect
= record_disconnect
;
2256 ops
->to_mourn_inferior
= record_mourn_inferior
;
2257 ops
->to_kill
= record_kill
;
2258 ops
->to_stop_recording
= record_btrace_stop_recording
;
2259 ops
->to_info_record
= record_btrace_info
;
2260 ops
->to_insn_history
= record_btrace_insn_history
;
2261 ops
->to_insn_history_from
= record_btrace_insn_history_from
;
2262 ops
->to_insn_history_range
= record_btrace_insn_history_range
;
2263 ops
->to_call_history
= record_btrace_call_history
;
2264 ops
->to_call_history_from
= record_btrace_call_history_from
;
2265 ops
->to_call_history_range
= record_btrace_call_history_range
;
2266 ops
->to_record_is_replaying
= record_btrace_is_replaying
;
2267 ops
->to_xfer_partial
= record_btrace_xfer_partial
;
2268 ops
->to_remove_breakpoint
= record_btrace_remove_breakpoint
;
2269 ops
->to_insert_breakpoint
= record_btrace_insert_breakpoint
;
2270 ops
->to_fetch_registers
= record_btrace_fetch_registers
;
2271 ops
->to_store_registers
= record_btrace_store_registers
;
2272 ops
->to_prepare_to_store
= record_btrace_prepare_to_store
;
2273 ops
->to_get_unwinder
= &record_btrace_to_get_unwinder
;
2274 ops
->to_get_tailcall_unwinder
= &record_btrace_to_get_tailcall_unwinder
;
2275 ops
->to_resume
= record_btrace_resume
;
2276 ops
->to_wait
= record_btrace_wait
;
2277 ops
->to_update_thread_list
= record_btrace_update_thread_list
;
2278 ops
->to_thread_alive
= record_btrace_thread_alive
;
2279 ops
->to_goto_record_begin
= record_btrace_goto_begin
;
2280 ops
->to_goto_record_end
= record_btrace_goto_end
;
2281 ops
->to_goto_record
= record_btrace_goto
;
2282 ops
->to_can_execute_reverse
= record_btrace_can_execute_reverse
;
2283 ops
->to_stopped_by_sw_breakpoint
= record_btrace_stopped_by_sw_breakpoint
;
2284 ops
->to_supports_stopped_by_sw_breakpoint
2285 = record_btrace_supports_stopped_by_sw_breakpoint
;
2286 ops
->to_stopped_by_hw_breakpoint
= record_btrace_stopped_by_hw_breakpoint
;
2287 ops
->to_supports_stopped_by_hw_breakpoint
2288 = record_btrace_supports_stopped_by_hw_breakpoint
;
2289 ops
->to_execution_direction
= record_btrace_execution_direction
;
2290 ops
->to_prepare_to_generate_core
= record_btrace_prepare_to_generate_core
;
2291 ops
->to_done_generating_core
= record_btrace_done_generating_core
;
2292 ops
->to_stratum
= record_stratum
;
2293 ops
->to_magic
= OPS_MAGIC
;
2296 /* Start recording in BTS format. */
2299 cmd_record_btrace_bts_start (char *args
, int from_tty
)
2301 volatile struct gdb_exception exception
;
2303 if (args
!= NULL
&& *args
!= 0)
2304 error (_("Invalid argument."));
2306 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2308 TRY_CATCH (exception
, RETURN_MASK_ALL
)
2309 execute_command ("target record-btrace", from_tty
);
2311 if (exception
.error
!= 0)
2313 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2314 throw_exception (exception
);
2318 /* Alias for "target record". */
2321 cmd_record_btrace_start (char *args
, int from_tty
)
2323 volatile struct gdb_exception exception
;
2325 if (args
!= NULL
&& *args
!= 0)
2326 error (_("Invalid argument."));
2328 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2330 TRY_CATCH (exception
, RETURN_MASK_ALL
)
2331 execute_command ("target record-btrace", from_tty
);
2333 if (exception
.error
== 0)
2336 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2337 throw_exception (exception
);
2340 /* The "set record btrace" command. */
2343 cmd_set_record_btrace (char *args
, int from_tty
)
2345 cmd_show_list (set_record_btrace_cmdlist
, from_tty
, "");
2348 /* The "show record btrace" command. */
2351 cmd_show_record_btrace (char *args
, int from_tty
)
2353 cmd_show_list (show_record_btrace_cmdlist
, from_tty
, "");
2356 /* The "show record btrace replay-memory-access" command. */
2359 cmd_show_replay_memory_access (struct ui_file
*file
, int from_tty
,
2360 struct cmd_list_element
*c
, const char *value
)
2362 fprintf_filtered (gdb_stdout
, _("Replay memory access is %s.\n"),
2363 replay_memory_access
);
2366 /* The "set record btrace bts" command. */
2369 cmd_set_record_btrace_bts (char *args
, int from_tty
)
2371 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2372 "by an apporpriate subcommand.\n"));
2373 help_list (set_record_btrace_bts_cmdlist
, "set record btrace bts ",
2374 all_commands
, gdb_stdout
);
2377 /* The "show record btrace bts" command. */
2380 cmd_show_record_btrace_bts (char *args
, int from_tty
)
2382 cmd_show_list (show_record_btrace_bts_cmdlist
, from_tty
, "");
2385 void _initialize_record_btrace (void);
2387 /* Initialize btrace commands. */
2390 _initialize_record_btrace (void)
2392 add_prefix_cmd ("btrace", class_obscure
, cmd_record_btrace_start
,
2393 _("Start branch trace recording."), &record_btrace_cmdlist
,
2394 "record btrace ", 0, &record_cmdlist
);
2395 add_alias_cmd ("b", "btrace", class_obscure
, 1, &record_cmdlist
);
2397 add_cmd ("bts", class_obscure
, cmd_record_btrace_bts_start
,
2399 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2400 The processor stores a from/to record for each branch into a cyclic buffer.\n\
2401 This format may not be available on all processors."),
2402 &record_btrace_cmdlist
);
2403 add_alias_cmd ("bts", "btrace bts", class_obscure
, 1, &record_cmdlist
);
2405 add_prefix_cmd ("btrace", class_support
, cmd_set_record_btrace
,
2406 _("Set record options"), &set_record_btrace_cmdlist
,
2407 "set record btrace ", 0, &set_record_cmdlist
);
2409 add_prefix_cmd ("btrace", class_support
, cmd_show_record_btrace
,
2410 _("Show record options"), &show_record_btrace_cmdlist
,
2411 "show record btrace ", 0, &show_record_cmdlist
);
2413 add_setshow_enum_cmd ("replay-memory-access", no_class
,
2414 replay_memory_access_types
, &replay_memory_access
, _("\
2415 Set what memory accesses are allowed during replay."), _("\
2416 Show what memory accesses are allowed during replay."),
2417 _("Default is READ-ONLY.\n\n\
2418 The btrace record target does not trace data.\n\
2419 The memory therefore corresponds to the live target and not \
2420 to the current replay position.\n\n\
2421 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2422 When READ-WRITE, allow accesses to read-only and read-write memory during \
2424 NULL
, cmd_show_replay_memory_access
,
2425 &set_record_btrace_cmdlist
,
2426 &show_record_btrace_cmdlist
);
2428 add_prefix_cmd ("bts", class_support
, cmd_set_record_btrace_bts
,
2429 _("Set record btrace bts options"),
2430 &set_record_btrace_bts_cmdlist
,
2431 "set record btrace bts ", 0, &set_record_btrace_cmdlist
);
2433 add_prefix_cmd ("bts", class_support
, cmd_show_record_btrace_bts
,
2434 _("Show record btrace bts options"),
2435 &show_record_btrace_bts_cmdlist
,
2436 "show record btrace bts ", 0, &show_record_btrace_cmdlist
);
2438 add_setshow_uinteger_cmd ("buffer-size", no_class
,
2439 &record_btrace_conf
.bts
.size
,
2440 _("Set the record/replay bts buffer size."),
2441 _("Show the record/replay bts buffer size."), _("\
2442 When starting recording request a trace buffer of this size. \
2443 The actual buffer size may differ from the requested size. \
2444 Use \"info record\" to see the actual buffer size.\n\n\
2445 Bigger buffers allow longer recording but also take more time to process \
2446 the recorded execution trace.\n\n\
2447 The trace buffer size may not be changed while recording."), NULL
, NULL
,
2448 &set_record_btrace_bts_cmdlist
,
2449 &show_record_btrace_bts_cmdlist
);
2451 init_record_btrace_ops ();
2452 add_target (&record_btrace_ops
);
2454 bfcache
= htab_create_alloc (50, bfcache_hash
, bfcache_eq
, NULL
,
2457 record_btrace_conf
.bts
.size
= 64 * 1024;