1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "record-btrace.h"
25 #include "gdbthread.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
39 #include "event-loop.h"
44 /* The target_ops of record-btrace. */
45 static struct target_ops record_btrace_ops
;
47 /* A new thread observer enabling branch tracing for the new thread. */
48 static struct observer
*record_btrace_thread_observer
;
50 /* Memory access types used in set/show record btrace replay-memory-access. */
51 static const char replay_memory_access_read_only
[] = "read-only";
52 static const char replay_memory_access_read_write
[] = "read-write";
53 static const char *const replay_memory_access_types
[] =
55 replay_memory_access_read_only
,
56 replay_memory_access_read_write
,
60 /* The currently allowed replay memory access type. */
61 static const char *replay_memory_access
= replay_memory_access_read_only
;
63 /* Command lists for "set/show record btrace". */
64 static struct cmd_list_element
*set_record_btrace_cmdlist
;
65 static struct cmd_list_element
*show_record_btrace_cmdlist
;
67 /* The execution direction of the last resume we got. See record-full.c. */
68 static enum exec_direction_kind record_btrace_resume_exec_dir
= EXEC_FORWARD
;
70 /* The async event handler for reverse/replay execution. */
71 static struct async_event_handler
*record_btrace_async_inferior_event_handler
;
73 /* A flag indicating that we are currently generating a core file. */
74 static int record_btrace_generating_corefile
;
76 /* The current branch trace configuration. */
77 static struct btrace_config record_btrace_conf
;
79 /* Command list for "record btrace". */
80 static struct cmd_list_element
*record_btrace_cmdlist
;
82 /* Command lists for "set/show record btrace bts". */
83 static struct cmd_list_element
*set_record_btrace_bts_cmdlist
;
84 static struct cmd_list_element
*show_record_btrace_bts_cmdlist
;
86 /* Command lists for "set/show record btrace pt". */
87 static struct cmd_list_element
*set_record_btrace_pt_cmdlist
;
88 static struct cmd_list_element
*show_record_btrace_pt_cmdlist
;
90 /* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
93 #define DEBUG(msg, args...) \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
103 /* Update the branch trace for the current thread and return a pointer to its
106 Throws an error if there is no thread or no trace. This function never
109 static struct thread_info
*
110 require_btrace_thread (void)
112 struct thread_info
*tp
;
116 tp
= find_thread_ptid (inferior_ptid
);
118 error (_("No thread."));
120 validate_registers_access ();
124 if (btrace_is_empty (tp
))
125 error (_("No trace."));
130 /* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
133 Throws an error if there is no thread or no trace. This function never
136 static struct btrace_thread_info
*
137 require_btrace (void)
139 struct thread_info
*tp
;
141 tp
= require_btrace_thread ();
146 /* Enable branch tracing for one thread. Warn on errors. */
149 record_btrace_enable_warn (struct thread_info
*tp
)
153 btrace_enable (tp
, &record_btrace_conf
);
155 CATCH (error
, RETURN_MASK_ERROR
)
157 warning ("%s", error
.message
);
162 /* Callback function to disable branch tracing for one thread. */
165 record_btrace_disable_callback (void *arg
)
167 struct thread_info
*tp
= (struct thread_info
*) arg
;
172 /* Enable automatic tracing of new threads. */
175 record_btrace_auto_enable (void)
177 DEBUG ("attach thread observer");
179 record_btrace_thread_observer
180 = observer_attach_new_thread (record_btrace_enable_warn
);
183 /* Disable automatic tracing of new threads. */
186 record_btrace_auto_disable (void)
188 /* The observer may have been detached, already. */
189 if (record_btrace_thread_observer
== NULL
)
192 DEBUG ("detach thread observer");
194 observer_detach_new_thread (record_btrace_thread_observer
);
195 record_btrace_thread_observer
= NULL
;
198 /* The record-btrace async event handler function. */
201 record_btrace_handle_async_inferior_event (gdb_client_data data
)
203 inferior_event_handler (INF_REG_EVENT
, NULL
);
206 /* See record-btrace.h. */
209 record_btrace_push_target (void)
213 record_btrace_auto_enable ();
215 push_target (&record_btrace_ops
);
217 record_btrace_async_inferior_event_handler
218 = create_async_event_handler (record_btrace_handle_async_inferior_event
,
220 record_btrace_generating_corefile
= 0;
222 format
= btrace_format_short_string (record_btrace_conf
.format
);
223 observer_notify_record_changed (current_inferior (), 1, "btrace", format
);
226 /* The to_open method of target record-btrace. */
229 record_btrace_open (const char *args
, int from_tty
)
231 struct cleanup
*disable_chain
;
232 struct thread_info
*tp
;
238 if (!target_has_execution
)
239 error (_("The program is not being run."));
241 gdb_assert (record_btrace_thread_observer
== NULL
);
243 disable_chain
= make_cleanup (null_cleanup
, NULL
);
244 ALL_NON_EXITED_THREADS (tp
)
245 if (args
== NULL
|| *args
== 0 || number_is_in_list (args
, tp
->global_num
))
247 btrace_enable (tp
, &record_btrace_conf
);
249 make_cleanup (record_btrace_disable_callback
, tp
);
252 record_btrace_push_target ();
254 discard_cleanups (disable_chain
);
257 /* The to_stop_recording method of target record-btrace. */
260 record_btrace_stop_recording (struct target_ops
*self
)
262 struct thread_info
*tp
;
264 DEBUG ("stop recording");
266 record_btrace_auto_disable ();
268 ALL_NON_EXITED_THREADS (tp
)
269 if (tp
->btrace
.target
!= NULL
)
273 /* The to_disconnect method of target record-btrace. */
276 record_btrace_disconnect (struct target_ops
*self
, const char *args
,
279 struct target_ops
*beneath
= self
->beneath
;
281 /* Do not stop recording, just clean up GDB side. */
282 unpush_target (self
);
284 /* Forward disconnect. */
285 beneath
->to_disconnect (beneath
, args
, from_tty
);
288 /* The to_close method of target record-btrace. */
291 record_btrace_close (struct target_ops
*self
)
293 struct thread_info
*tp
;
295 if (record_btrace_async_inferior_event_handler
!= NULL
)
296 delete_async_event_handler (&record_btrace_async_inferior_event_handler
);
298 /* Make sure automatic recording gets disabled even if we did not stop
299 recording before closing the record-btrace target. */
300 record_btrace_auto_disable ();
302 /* We should have already stopped recording.
303 Tear down btrace in case we have not. */
304 ALL_NON_EXITED_THREADS (tp
)
305 btrace_teardown (tp
);
308 /* The to_async method of target record-btrace. */
311 record_btrace_async (struct target_ops
*ops
, int enable
)
314 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
316 clear_async_event_handler (record_btrace_async_inferior_event_handler
);
318 ops
->beneath
->to_async (ops
->beneath
, enable
);
321 /* Adjusts the size and returns a human readable size suffix. */
324 record_btrace_adjust_size (unsigned int *size
)
330 if ((sz
& ((1u << 30) - 1)) == 0)
335 else if ((sz
& ((1u << 20) - 1)) == 0)
340 else if ((sz
& ((1u << 10) - 1)) == 0)
349 /* Print a BTS configuration. */
352 record_btrace_print_bts_conf (const struct btrace_config_bts
*conf
)
360 suffix
= record_btrace_adjust_size (&size
);
361 printf_unfiltered (_("Buffer size: %u%s.\n"), size
, suffix
);
365 /* Print an Intel Processor Trace configuration. */
368 record_btrace_print_pt_conf (const struct btrace_config_pt
*conf
)
376 suffix
= record_btrace_adjust_size (&size
);
377 printf_unfiltered (_("Buffer size: %u%s.\n"), size
, suffix
);
381 /* Print a branch tracing configuration. */
384 record_btrace_print_conf (const struct btrace_config
*conf
)
386 printf_unfiltered (_("Recording format: %s.\n"),
387 btrace_format_string (conf
->format
));
389 switch (conf
->format
)
391 case BTRACE_FORMAT_NONE
:
394 case BTRACE_FORMAT_BTS
:
395 record_btrace_print_bts_conf (&conf
->bts
);
398 case BTRACE_FORMAT_PT
:
399 record_btrace_print_pt_conf (&conf
->pt
);
403 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
406 /* The to_info_record method of target record-btrace. */
409 record_btrace_info (struct target_ops
*self
)
411 struct btrace_thread_info
*btinfo
;
412 const struct btrace_config
*conf
;
413 struct thread_info
*tp
;
414 unsigned int insns
, calls
, gaps
;
418 tp
= find_thread_ptid (inferior_ptid
);
420 error (_("No thread."));
422 validate_registers_access ();
424 btinfo
= &tp
->btrace
;
426 conf
= btrace_conf (btinfo
);
428 record_btrace_print_conf (conf
);
436 if (!btrace_is_empty (tp
))
438 struct btrace_call_iterator call
;
439 struct btrace_insn_iterator insn
;
441 btrace_call_end (&call
, btinfo
);
442 btrace_call_prev (&call
, 1);
443 calls
= btrace_call_number (&call
);
445 btrace_insn_end (&insn
, btinfo
);
446 insns
= btrace_insn_number (&insn
);
448 /* If the last instruction is not a gap, it is the current instruction
449 that is not actually part of the record. */
450 if (btrace_insn_get (&insn
) != NULL
)
453 gaps
= btinfo
->ngaps
;
456 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
457 "for thread %s (%s).\n"), insns
, calls
, gaps
,
458 print_thread_id (tp
), target_pid_to_str (tp
->ptid
));
460 if (btrace_is_replaying (tp
))
461 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
462 btrace_insn_number (btinfo
->replay
));
465 /* Print a decode error. */
468 btrace_ui_out_decode_error (struct ui_out
*uiout
, int errcode
,
469 enum btrace_format format
)
471 const char *errstr
= btrace_decode_error (format
, errcode
);
473 uiout
->text (_("["));
474 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
475 if (!(format
== BTRACE_FORMAT_PT
&& errcode
> 0))
477 uiout
->text (_("decode error ("));
478 uiout
->field_int ("errcode", errcode
);
479 uiout
->text (_("): "));
481 uiout
->text (errstr
);
482 uiout
->text (_("]\n"));
485 /* Print an unsigned int. */
488 ui_out_field_uint (struct ui_out
*uiout
, const char *fld
, unsigned int val
)
490 uiout
->field_fmt (fld
, "%u", val
);
493 /* A range of source lines. */
495 struct btrace_line_range
497 /* The symtab this line is from. */
498 struct symtab
*symtab
;
500 /* The first line (inclusive). */
503 /* The last line (exclusive). */
507 /* Construct a line range. */
509 static struct btrace_line_range
510 btrace_mk_line_range (struct symtab
*symtab
, int begin
, int end
)
512 struct btrace_line_range range
;
514 range
.symtab
= symtab
;
521 /* Add a line to a line range. */
523 static struct btrace_line_range
524 btrace_line_range_add (struct btrace_line_range range
, int line
)
526 if (range
.end
<= range
.begin
)
528 /* This is the first entry. */
530 range
.end
= line
+ 1;
532 else if (line
< range
.begin
)
534 else if (range
.end
< line
)
540 /* Return non-zero if RANGE is empty, zero otherwise. */
543 btrace_line_range_is_empty (struct btrace_line_range range
)
545 return range
.end
<= range
.begin
;
548 /* Return non-zero if LHS contains RHS, zero otherwise. */
551 btrace_line_range_contains_range (struct btrace_line_range lhs
,
552 struct btrace_line_range rhs
)
554 return ((lhs
.symtab
== rhs
.symtab
)
555 && (lhs
.begin
<= rhs
.begin
)
556 && (rhs
.end
<= lhs
.end
));
559 /* Find the line range associated with PC. */
561 static struct btrace_line_range
562 btrace_find_line_range (CORE_ADDR pc
)
564 struct btrace_line_range range
;
565 struct linetable_entry
*lines
;
566 struct linetable
*ltable
;
567 struct symtab
*symtab
;
570 symtab
= find_pc_line_symtab (pc
);
572 return btrace_mk_line_range (NULL
, 0, 0);
574 ltable
= SYMTAB_LINETABLE (symtab
);
576 return btrace_mk_line_range (symtab
, 0, 0);
578 nlines
= ltable
->nitems
;
579 lines
= ltable
->item
;
581 return btrace_mk_line_range (symtab
, 0, 0);
583 range
= btrace_mk_line_range (symtab
, 0, 0);
584 for (i
= 0; i
< nlines
- 1; i
++)
586 if ((lines
[i
].pc
== pc
) && (lines
[i
].line
!= 0))
587 range
= btrace_line_range_add (range
, lines
[i
].line
);
593 /* Print source lines in LINES to UIOUT.
595 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
596 instructions corresponding to that source line. When printing a new source
597 line, we do the cleanups for the open chain and open a new cleanup chain for
598 the new source line. If the source line range in LINES is not empty, this
599 function will leave the cleanup chain for the last printed source line open
600 so instructions can be added to it. */
603 btrace_print_lines (struct btrace_line_range lines
, struct ui_out
*uiout
,
604 struct cleanup
**ui_item_chain
, int flags
)
606 print_source_lines_flags psl_flags
;
610 if (flags
& DISASSEMBLY_FILENAME
)
611 psl_flags
|= PRINT_SOURCE_LINES_FILENAME
;
613 for (line
= lines
.begin
; line
< lines
.end
; ++line
)
615 if (*ui_item_chain
!= NULL
)
616 do_cleanups (*ui_item_chain
);
619 = make_cleanup_ui_out_tuple_begin_end (uiout
, "src_and_asm_line");
621 print_source_lines (lines
.symtab
, line
, line
+ 1, psl_flags
);
623 make_cleanup_ui_out_list_begin_end (uiout
, "line_asm_insn");
627 /* Disassemble a section of the recorded instruction trace. */
630 btrace_insn_history (struct ui_out
*uiout
,
631 const struct btrace_thread_info
*btinfo
,
632 const struct btrace_insn_iterator
*begin
,
633 const struct btrace_insn_iterator
*end
,
634 gdb_disassembly_flags flags
)
636 struct cleanup
*cleanups
, *ui_item_chain
;
637 struct gdbarch
*gdbarch
;
638 struct btrace_insn_iterator it
;
639 struct btrace_line_range last_lines
;
641 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags
,
642 btrace_insn_number (begin
), btrace_insn_number (end
));
644 flags
|= DISASSEMBLY_SPECULATIVE
;
646 gdbarch
= target_gdbarch ();
647 last_lines
= btrace_mk_line_range (NULL
, 0, 0);
649 cleanups
= make_cleanup_ui_out_list_begin_end (uiout
, "asm_insns");
651 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
652 instructions corresponding to that line. */
653 ui_item_chain
= NULL
;
655 gdb_pretty_print_disassembler
disasm (gdbarch
);
657 for (it
= *begin
; btrace_insn_cmp (&it
, end
) != 0; btrace_insn_next (&it
, 1))
659 const struct btrace_insn
*insn
;
661 insn
= btrace_insn_get (&it
);
663 /* A NULL instruction indicates a gap in the trace. */
666 const struct btrace_config
*conf
;
668 conf
= btrace_conf (btinfo
);
670 /* We have trace so we must have a configuration. */
671 gdb_assert (conf
!= NULL
);
673 uiout
->field_fmt ("insn-number", "%u",
674 btrace_insn_number (&it
));
677 btrace_ui_out_decode_error (uiout
, btrace_insn_get_error (&it
),
682 struct disasm_insn dinsn
;
684 if ((flags
& DISASSEMBLY_SOURCE
) != 0)
686 struct btrace_line_range lines
;
688 lines
= btrace_find_line_range (insn
->pc
);
689 if (!btrace_line_range_is_empty (lines
)
690 && !btrace_line_range_contains_range (last_lines
, lines
))
692 btrace_print_lines (lines
, uiout
, &ui_item_chain
, flags
);
695 else if (ui_item_chain
== NULL
)
698 = make_cleanup_ui_out_tuple_begin_end (uiout
,
700 /* No source information. */
701 make_cleanup_ui_out_list_begin_end (uiout
, "line_asm_insn");
704 gdb_assert (ui_item_chain
!= NULL
);
707 memset (&dinsn
, 0, sizeof (dinsn
));
708 dinsn
.number
= btrace_insn_number (&it
);
709 dinsn
.addr
= insn
->pc
;
711 if ((insn
->flags
& BTRACE_INSN_FLAG_SPECULATIVE
) != 0)
712 dinsn
.is_speculative
= 1;
714 disasm
.pretty_print_insn (uiout
, &dinsn
, flags
);
718 do_cleanups (cleanups
);
721 /* The to_insn_history method of target record-btrace. */
724 record_btrace_insn_history (struct target_ops
*self
, int size
,
725 gdb_disassembly_flags flags
)
727 struct btrace_thread_info
*btinfo
;
728 struct btrace_insn_history
*history
;
729 struct btrace_insn_iterator begin
, end
;
730 struct ui_out
*uiout
;
731 unsigned int context
, covered
;
733 uiout
= current_uiout
;
734 ui_out_emit_tuple
tuple_emitter (uiout
, "insn history");
735 context
= abs (size
);
737 error (_("Bad record instruction-history-size."));
739 btinfo
= require_btrace ();
740 history
= btinfo
->insn_history
;
743 struct btrace_insn_iterator
*replay
;
745 DEBUG ("insn-history (0x%x): %d", (unsigned) flags
, size
);
747 /* If we're replaying, we start at the replay position. Otherwise, we
748 start at the tail of the trace. */
749 replay
= btinfo
->replay
;
753 btrace_insn_end (&begin
, btinfo
);
755 /* We start from here and expand in the requested direction. Then we
756 expand in the other direction, as well, to fill up any remaining
761 /* We want the current position covered, as well. */
762 covered
= btrace_insn_next (&end
, 1);
763 covered
+= btrace_insn_prev (&begin
, context
- covered
);
764 covered
+= btrace_insn_next (&end
, context
- covered
);
768 covered
= btrace_insn_next (&end
, context
);
769 covered
+= btrace_insn_prev (&begin
, context
- covered
);
774 begin
= history
->begin
;
777 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags
, size
,
778 btrace_insn_number (&begin
), btrace_insn_number (&end
));
783 covered
= btrace_insn_prev (&begin
, context
);
788 covered
= btrace_insn_next (&end
, context
);
793 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
797 printf_unfiltered (_("At the start of the branch trace record.\n"));
799 printf_unfiltered (_("At the end of the branch trace record.\n"));
802 btrace_set_insn_history (btinfo
, &begin
, &end
);
805 /* The to_insn_history_range method of target record-btrace. */
808 record_btrace_insn_history_range (struct target_ops
*self
,
809 ULONGEST from
, ULONGEST to
,
810 gdb_disassembly_flags flags
)
812 struct btrace_thread_info
*btinfo
;
813 struct btrace_insn_iterator begin
, end
;
814 struct ui_out
*uiout
;
815 unsigned int low
, high
;
818 uiout
= current_uiout
;
819 ui_out_emit_tuple
tuple_emitter (uiout
, "insn history");
823 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags
, low
, high
);
825 /* Check for wrap-arounds. */
826 if (low
!= from
|| high
!= to
)
827 error (_("Bad range."));
830 error (_("Bad range."));
832 btinfo
= require_btrace ();
834 found
= btrace_find_insn_by_number (&begin
, btinfo
, low
);
836 error (_("Range out of bounds."));
838 found
= btrace_find_insn_by_number (&end
, btinfo
, high
);
841 /* Silently truncate the range. */
842 btrace_insn_end (&end
, btinfo
);
846 /* We want both begin and end to be inclusive. */
847 btrace_insn_next (&end
, 1);
850 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
851 btrace_set_insn_history (btinfo
, &begin
, &end
);
854 /* The to_insn_history_from method of target record-btrace. */
857 record_btrace_insn_history_from (struct target_ops
*self
,
858 ULONGEST from
, int size
,
859 gdb_disassembly_flags flags
)
861 ULONGEST begin
, end
, context
;
863 context
= abs (size
);
865 error (_("Bad record instruction-history-size."));
874 begin
= from
- context
+ 1;
879 end
= from
+ context
- 1;
881 /* Check for wrap-around. */
886 record_btrace_insn_history_range (self
, begin
, end
, flags
);
889 /* Print the instruction number range for a function call history line. */
892 btrace_call_history_insn_range (struct ui_out
*uiout
,
893 const struct btrace_function
*bfun
)
895 unsigned int begin
, end
, size
;
897 size
= bfun
->insn
.size ();
898 gdb_assert (size
> 0);
900 begin
= bfun
->insn_offset
;
901 end
= begin
+ size
- 1;
903 ui_out_field_uint (uiout
, "insn begin", begin
);
905 ui_out_field_uint (uiout
, "insn end", end
);
908 /* Compute the lowest and highest source line for the instructions in BFUN
909 and return them in PBEGIN and PEND.
910 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
911 result from inlining or macro expansion. */
914 btrace_compute_src_line_range (const struct btrace_function
*bfun
,
915 int *pbegin
, int *pend
)
917 struct symtab
*symtab
;
928 symtab
= symbol_symtab (sym
);
930 for (const btrace_insn
&insn
: bfun
->insn
)
932 struct symtab_and_line sal
;
934 sal
= find_pc_line (insn
.pc
, 0);
935 if (sal
.symtab
!= symtab
|| sal
.line
== 0)
938 begin
= std::min (begin
, sal
.line
);
939 end
= std::max (end
, sal
.line
);
947 /* Print the source line information for a function call history line. */
950 btrace_call_history_src_line (struct ui_out
*uiout
,
951 const struct btrace_function
*bfun
)
960 uiout
->field_string ("file",
961 symtab_to_filename_for_display (symbol_symtab (sym
)));
963 btrace_compute_src_line_range (bfun
, &begin
, &end
);
968 uiout
->field_int ("min line", begin
);
974 uiout
->field_int ("max line", end
);
977 /* Get the name of a branch trace function. */
980 btrace_get_bfun_name (const struct btrace_function
*bfun
)
982 struct minimal_symbol
*msym
;
992 return SYMBOL_PRINT_NAME (sym
);
993 else if (msym
!= NULL
)
994 return MSYMBOL_PRINT_NAME (msym
);
999 /* Disassemble a section of the recorded function trace. */
1002 btrace_call_history (struct ui_out
*uiout
,
1003 const struct btrace_thread_info
*btinfo
,
1004 const struct btrace_call_iterator
*begin
,
1005 const struct btrace_call_iterator
*end
,
1008 struct btrace_call_iterator it
;
1009 record_print_flags flags
= (enum record_print_flag
) int_flags
;
1011 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags
, btrace_call_number (begin
),
1012 btrace_call_number (end
));
1014 for (it
= *begin
; btrace_call_cmp (&it
, end
) < 0; btrace_call_next (&it
, 1))
1016 const struct btrace_function
*bfun
;
1017 struct minimal_symbol
*msym
;
1020 bfun
= btrace_call_get (&it
);
1024 /* Print the function index. */
1025 ui_out_field_uint (uiout
, "index", bfun
->number
);
1028 /* Indicate gaps in the trace. */
1029 if (bfun
->errcode
!= 0)
1031 const struct btrace_config
*conf
;
1033 conf
= btrace_conf (btinfo
);
1035 /* We have trace so we must have a configuration. */
1036 gdb_assert (conf
!= NULL
);
1038 btrace_ui_out_decode_error (uiout
, bfun
->errcode
, conf
->format
);
1043 if ((flags
& RECORD_PRINT_INDENT_CALLS
) != 0)
1045 int level
= bfun
->level
+ btinfo
->level
, i
;
1047 for (i
= 0; i
< level
; ++i
)
1052 uiout
->field_string ("function", SYMBOL_PRINT_NAME (sym
));
1053 else if (msym
!= NULL
)
1054 uiout
->field_string ("function", MSYMBOL_PRINT_NAME (msym
));
1055 else if (!uiout
->is_mi_like_p ())
1056 uiout
->field_string ("function", "??");
1058 if ((flags
& RECORD_PRINT_INSN_RANGE
) != 0)
1060 uiout
->text (_("\tinst "));
1061 btrace_call_history_insn_range (uiout
, bfun
);
1064 if ((flags
& RECORD_PRINT_SRC_LINE
) != 0)
1066 uiout
->text (_("\tat "));
1067 btrace_call_history_src_line (uiout
, bfun
);
1074 /* The to_call_history method of target record-btrace. */
1077 record_btrace_call_history (struct target_ops
*self
, int size
,
1078 record_print_flags flags
)
1080 struct btrace_thread_info
*btinfo
;
1081 struct btrace_call_history
*history
;
1082 struct btrace_call_iterator begin
, end
;
1083 struct ui_out
*uiout
;
1084 unsigned int context
, covered
;
1086 uiout
= current_uiout
;
1087 ui_out_emit_tuple
tuple_emitter (uiout
, "insn history");
1088 context
= abs (size
);
1090 error (_("Bad record function-call-history-size."));
1092 btinfo
= require_btrace ();
1093 history
= btinfo
->call_history
;
1094 if (history
== NULL
)
1096 struct btrace_insn_iterator
*replay
;
1098 DEBUG ("call-history (0x%x): %d", (int) flags
, size
);
1100 /* If we're replaying, we start at the replay position. Otherwise, we
1101 start at the tail of the trace. */
1102 replay
= btinfo
->replay
;
1105 begin
.btinfo
= btinfo
;
1106 begin
.index
= replay
->call_index
;
1109 btrace_call_end (&begin
, btinfo
);
1111 /* We start from here and expand in the requested direction. Then we
1112 expand in the other direction, as well, to fill up any remaining
1117 /* We want the current position covered, as well. */
1118 covered
= btrace_call_next (&end
, 1);
1119 covered
+= btrace_call_prev (&begin
, context
- covered
);
1120 covered
+= btrace_call_next (&end
, context
- covered
);
1124 covered
= btrace_call_next (&end
, context
);
1125 covered
+= btrace_call_prev (&begin
, context
- covered
);
1130 begin
= history
->begin
;
1133 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags
, size
,
1134 btrace_call_number (&begin
), btrace_call_number (&end
));
1139 covered
= btrace_call_prev (&begin
, context
);
1144 covered
= btrace_call_next (&end
, context
);
1149 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
1153 printf_unfiltered (_("At the start of the branch trace record.\n"));
1155 printf_unfiltered (_("At the end of the branch trace record.\n"));
1158 btrace_set_call_history (btinfo
, &begin
, &end
);
1161 /* The to_call_history_range method of target record-btrace. */
1164 record_btrace_call_history_range (struct target_ops
*self
,
1165 ULONGEST from
, ULONGEST to
,
1166 record_print_flags flags
)
1168 struct btrace_thread_info
*btinfo
;
1169 struct btrace_call_iterator begin
, end
;
1170 struct ui_out
*uiout
;
1171 unsigned int low
, high
;
1174 uiout
= current_uiout
;
1175 ui_out_emit_tuple
tuple_emitter (uiout
, "func history");
1179 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags
, low
, high
);
1181 /* Check for wrap-arounds. */
1182 if (low
!= from
|| high
!= to
)
1183 error (_("Bad range."));
1186 error (_("Bad range."));
1188 btinfo
= require_btrace ();
1190 found
= btrace_find_call_by_number (&begin
, btinfo
, low
);
1192 error (_("Range out of bounds."));
1194 found
= btrace_find_call_by_number (&end
, btinfo
, high
);
1197 /* Silently truncate the range. */
1198 btrace_call_end (&end
, btinfo
);
1202 /* We want both begin and end to be inclusive. */
1203 btrace_call_next (&end
, 1);
1206 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
1207 btrace_set_call_history (btinfo
, &begin
, &end
);
1210 /* The to_call_history_from method of target record-btrace. */
1213 record_btrace_call_history_from (struct target_ops
*self
,
1214 ULONGEST from
, int size
,
1215 record_print_flags flags
)
1217 ULONGEST begin
, end
, context
;
1219 context
= abs (size
);
1221 error (_("Bad record function-call-history-size."));
1230 begin
= from
- context
+ 1;
1235 end
= from
+ context
- 1;
1237 /* Check for wrap-around. */
1242 record_btrace_call_history_range (self
, begin
, end
, flags
);
1245 /* The to_record_method method of target record-btrace. */
1247 static enum record_method
1248 record_btrace_record_method (struct target_ops
*self
, ptid_t ptid
)
1250 struct thread_info
* const tp
= find_thread_ptid (ptid
);
1253 error (_("No thread."));
1255 if (tp
->btrace
.target
== NULL
)
1256 return RECORD_METHOD_NONE
;
1258 return RECORD_METHOD_BTRACE
;
1261 /* The to_record_is_replaying method of target record-btrace. */
1264 record_btrace_is_replaying (struct target_ops
*self
, ptid_t ptid
)
1266 struct thread_info
*tp
;
1268 ALL_NON_EXITED_THREADS (tp
)
1269 if (ptid_match (tp
->ptid
, ptid
) && btrace_is_replaying (tp
))
1275 /* The to_record_will_replay method of target record-btrace. */
1278 record_btrace_will_replay (struct target_ops
*self
, ptid_t ptid
, int dir
)
1280 return dir
== EXEC_REVERSE
|| record_btrace_is_replaying (self
, ptid
);
1283 /* The to_xfer_partial method of target record-btrace. */
1285 static enum target_xfer_status
1286 record_btrace_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1287 const char *annex
, gdb_byte
*readbuf
,
1288 const gdb_byte
*writebuf
, ULONGEST offset
,
1289 ULONGEST len
, ULONGEST
*xfered_len
)
1291 /* Filter out requests that don't make sense during replay. */
1292 if (replay_memory_access
== replay_memory_access_read_only
1293 && !record_btrace_generating_corefile
1294 && record_btrace_is_replaying (ops
, inferior_ptid
))
1298 case TARGET_OBJECT_MEMORY
:
1300 struct target_section
*section
;
1302 /* We do not allow writing memory in general. */
1303 if (writebuf
!= NULL
)
1306 return TARGET_XFER_UNAVAILABLE
;
1309 /* We allow reading readonly memory. */
1310 section
= target_section_by_addr (ops
, offset
);
1311 if (section
!= NULL
)
1313 /* Check if the section we found is readonly. */
1314 if ((bfd_get_section_flags (section
->the_bfd_section
->owner
,
1315 section
->the_bfd_section
)
1316 & SEC_READONLY
) != 0)
1318 /* Truncate the request to fit into this section. */
1319 len
= std::min (len
, section
->endaddr
- offset
);
1325 return TARGET_XFER_UNAVAILABLE
;
1330 /* Forward the request. */
1332 return ops
->to_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
1333 offset
, len
, xfered_len
);
1336 /* The to_insert_breakpoint method of target record-btrace. */
1339 record_btrace_insert_breakpoint (struct target_ops
*ops
,
1340 struct gdbarch
*gdbarch
,
1341 struct bp_target_info
*bp_tgt
)
1346 /* Inserting breakpoints requires accessing memory. Allow it for the
1347 duration of this function. */
1348 old
= replay_memory_access
;
1349 replay_memory_access
= replay_memory_access_read_write
;
1354 ret
= ops
->beneath
->to_insert_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
);
1356 CATCH (except
, RETURN_MASK_ALL
)
1358 replay_memory_access
= old
;
1359 throw_exception (except
);
1362 replay_memory_access
= old
;
1367 /* The to_remove_breakpoint method of target record-btrace. */
1370 record_btrace_remove_breakpoint (struct target_ops
*ops
,
1371 struct gdbarch
*gdbarch
,
1372 struct bp_target_info
*bp_tgt
,
1373 enum remove_bp_reason reason
)
1378 /* Removing breakpoints requires accessing memory. Allow it for the
1379 duration of this function. */
1380 old
= replay_memory_access
;
1381 replay_memory_access
= replay_memory_access_read_write
;
1386 ret
= ops
->beneath
->to_remove_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
,
1389 CATCH (except
, RETURN_MASK_ALL
)
1391 replay_memory_access
= old
;
1392 throw_exception (except
);
1395 replay_memory_access
= old
;
1400 /* The to_fetch_registers method of target record-btrace. */
1403 record_btrace_fetch_registers (struct target_ops
*ops
,
1404 struct regcache
*regcache
, int regno
)
1406 struct btrace_insn_iterator
*replay
;
1407 struct thread_info
*tp
;
1409 tp
= find_thread_ptid (regcache_get_ptid (regcache
));
1410 gdb_assert (tp
!= NULL
);
1412 replay
= tp
->btrace
.replay
;
1413 if (replay
!= NULL
&& !record_btrace_generating_corefile
)
1415 const struct btrace_insn
*insn
;
1416 struct gdbarch
*gdbarch
;
1419 gdbarch
= regcache
->arch ();
1420 pcreg
= gdbarch_pc_regnum (gdbarch
);
1424 /* We can only provide the PC register. */
1425 if (regno
>= 0 && regno
!= pcreg
)
1428 insn
= btrace_insn_get (replay
);
1429 gdb_assert (insn
!= NULL
);
1431 regcache_raw_supply (regcache
, regno
, &insn
->pc
);
1435 struct target_ops
*t
= ops
->beneath
;
1437 t
->to_fetch_registers (t
, regcache
, regno
);
1441 /* The to_store_registers method of target record-btrace. */
1444 record_btrace_store_registers (struct target_ops
*ops
,
1445 struct regcache
*regcache
, int regno
)
1447 struct target_ops
*t
;
1449 if (!record_btrace_generating_corefile
1450 && record_btrace_is_replaying (ops
, regcache_get_ptid (regcache
)))
1451 error (_("Cannot write registers while replaying."));
1453 gdb_assert (may_write_registers
!= 0);
1456 t
->to_store_registers (t
, regcache
, regno
);
1459 /* The to_prepare_to_store method of target record-btrace. */
1462 record_btrace_prepare_to_store (struct target_ops
*ops
,
1463 struct regcache
*regcache
)
1465 struct target_ops
*t
;
1467 if (!record_btrace_generating_corefile
1468 && record_btrace_is_replaying (ops
, regcache_get_ptid (regcache
)))
1472 t
->to_prepare_to_store (t
, regcache
);
1475 /* The branch trace frame cache. */
1477 struct btrace_frame_cache
1480 struct thread_info
*tp
;
1482 /* The frame info. */
1483 struct frame_info
*frame
;
1485 /* The branch trace function segment. */
1486 const struct btrace_function
*bfun
;
1489 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1491 static htab_t bfcache
;
1493 /* hash_f for htab_create_alloc of bfcache. */
1496 bfcache_hash (const void *arg
)
1498 const struct btrace_frame_cache
*cache
1499 = (const struct btrace_frame_cache
*) arg
;
1501 return htab_hash_pointer (cache
->frame
);
1504 /* eq_f for htab_create_alloc of bfcache. */
1507 bfcache_eq (const void *arg1
, const void *arg2
)
1509 const struct btrace_frame_cache
*cache1
1510 = (const struct btrace_frame_cache
*) arg1
;
1511 const struct btrace_frame_cache
*cache2
1512 = (const struct btrace_frame_cache
*) arg2
;
1514 return cache1
->frame
== cache2
->frame
;
1517 /* Create a new btrace frame cache. */
1519 static struct btrace_frame_cache
*
1520 bfcache_new (struct frame_info
*frame
)
1522 struct btrace_frame_cache
*cache
;
1525 cache
= FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache
);
1526 cache
->frame
= frame
;
1528 slot
= htab_find_slot (bfcache
, cache
, INSERT
);
1529 gdb_assert (*slot
== NULL
);
1535 /* Extract the branch trace function from a branch trace frame. */
1537 static const struct btrace_function
*
1538 btrace_get_frame_function (struct frame_info
*frame
)
1540 const struct btrace_frame_cache
*cache
;
1541 struct btrace_frame_cache pattern
;
1544 pattern
.frame
= frame
;
1546 slot
= htab_find_slot (bfcache
, &pattern
, NO_INSERT
);
1550 cache
= (const struct btrace_frame_cache
*) *slot
;
1554 /* Implement stop_reason method for record_btrace_frame_unwind. */
1556 static enum unwind_stop_reason
1557 record_btrace_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1560 const struct btrace_frame_cache
*cache
;
1561 const struct btrace_function
*bfun
;
1563 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1565 gdb_assert (bfun
!= NULL
);
1568 return UNWIND_UNAVAILABLE
;
1570 return UNWIND_NO_REASON
;
1573 /* Implement this_id method for record_btrace_frame_unwind. */
1576 record_btrace_frame_this_id (struct frame_info
*this_frame
, void **this_cache
,
1577 struct frame_id
*this_id
)
1579 const struct btrace_frame_cache
*cache
;
1580 const struct btrace_function
*bfun
;
1581 struct btrace_call_iterator it
;
1582 CORE_ADDR code
, special
;
1584 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1587 gdb_assert (bfun
!= NULL
);
1589 while (btrace_find_call_by_number (&it
, &cache
->tp
->btrace
, bfun
->prev
) != 0)
1590 bfun
= btrace_call_get (&it
);
1592 code
= get_frame_func (this_frame
);
1593 special
= bfun
->number
;
1595 *this_id
= frame_id_build_unavailable_stack_special (code
, special
);
1597 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1598 btrace_get_bfun_name (cache
->bfun
),
1599 core_addr_to_string_nz (this_id
->code_addr
),
1600 core_addr_to_string_nz (this_id
->special_addr
));
1603 /* Implement prev_register method for record_btrace_frame_unwind. */
1605 static struct value
*
1606 record_btrace_frame_prev_register (struct frame_info
*this_frame
,
1610 const struct btrace_frame_cache
*cache
;
1611 const struct btrace_function
*bfun
, *caller
;
1612 struct btrace_call_iterator it
;
1613 struct gdbarch
*gdbarch
;
1617 gdbarch
= get_frame_arch (this_frame
);
1618 pcreg
= gdbarch_pc_regnum (gdbarch
);
1619 if (pcreg
< 0 || regnum
!= pcreg
)
1620 throw_error (NOT_AVAILABLE_ERROR
,
1621 _("Registers are not available in btrace record history"));
1623 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1625 gdb_assert (bfun
!= NULL
);
1627 if (btrace_find_call_by_number (&it
, &cache
->tp
->btrace
, bfun
->up
) == 0)
1628 throw_error (NOT_AVAILABLE_ERROR
,
1629 _("No caller in btrace record history"));
1631 caller
= btrace_call_get (&it
);
1633 if ((bfun
->flags
& BFUN_UP_LINKS_TO_RET
) != 0)
1634 pc
= caller
->insn
.front ().pc
;
1637 pc
= caller
->insn
.back ().pc
;
1638 pc
+= gdb_insn_length (gdbarch
, pc
);
1641 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1642 btrace_get_bfun_name (bfun
), bfun
->level
,
1643 core_addr_to_string_nz (pc
));
1645 return frame_unwind_got_address (this_frame
, regnum
, pc
);
1648 /* Implement sniffer method for record_btrace_frame_unwind. */
1651 record_btrace_frame_sniffer (const struct frame_unwind
*self
,
1652 struct frame_info
*this_frame
,
1655 const struct btrace_function
*bfun
;
1656 struct btrace_frame_cache
*cache
;
1657 struct thread_info
*tp
;
1658 struct frame_info
*next
;
1660 /* THIS_FRAME does not contain a reference to its thread. */
1661 tp
= find_thread_ptid (inferior_ptid
);
1662 gdb_assert (tp
!= NULL
);
1665 next
= get_next_frame (this_frame
);
1668 const struct btrace_insn_iterator
*replay
;
1670 replay
= tp
->btrace
.replay
;
1672 bfun
= &replay
->btinfo
->functions
[replay
->call_index
];
1676 const struct btrace_function
*callee
;
1677 struct btrace_call_iterator it
;
1679 callee
= btrace_get_frame_function (next
);
1680 if (callee
== NULL
|| (callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) != 0)
1683 if (btrace_find_call_by_number (&it
, &tp
->btrace
, callee
->up
) == 0)
1686 bfun
= btrace_call_get (&it
);
1692 DEBUG ("[frame] sniffed frame for %s on level %d",
1693 btrace_get_bfun_name (bfun
), bfun
->level
);
1695 /* This is our frame. Initialize the frame cache. */
1696 cache
= bfcache_new (this_frame
);
1700 *this_cache
= cache
;
1704 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1707 record_btrace_tailcall_frame_sniffer (const struct frame_unwind
*self
,
1708 struct frame_info
*this_frame
,
1711 const struct btrace_function
*bfun
, *callee
;
1712 struct btrace_frame_cache
*cache
;
1713 struct btrace_call_iterator it
;
1714 struct frame_info
*next
;
1715 struct thread_info
*tinfo
;
1717 next
= get_next_frame (this_frame
);
1721 callee
= btrace_get_frame_function (next
);
1725 if ((callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1728 tinfo
= find_thread_ptid (inferior_ptid
);
1729 if (btrace_find_call_by_number (&it
, &tinfo
->btrace
, callee
->up
) == 0)
1732 bfun
= btrace_call_get (&it
);
1734 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1735 btrace_get_bfun_name (bfun
), bfun
->level
);
1737 /* This is our frame. Initialize the frame cache. */
1738 cache
= bfcache_new (this_frame
);
1742 *this_cache
= cache
;
1747 record_btrace_frame_dealloc_cache (struct frame_info
*self
, void *this_cache
)
1749 struct btrace_frame_cache
*cache
;
1752 cache
= (struct btrace_frame_cache
*) this_cache
;
1754 slot
= htab_find_slot (bfcache
, cache
, NO_INSERT
);
1755 gdb_assert (slot
!= NULL
);
1757 htab_remove_elt (bfcache
, cache
);
1760 /* btrace recording does not store previous memory content, neither the stack
1761 frames content. Any unwinding would return errorneous results as the stack
1762 contents no longer matches the changed PC value restored from history.
1763 Therefore this unwinder reports any possibly unwound registers as
1766 const struct frame_unwind record_btrace_frame_unwind
=
1769 record_btrace_frame_unwind_stop_reason
,
1770 record_btrace_frame_this_id
,
1771 record_btrace_frame_prev_register
,
1773 record_btrace_frame_sniffer
,
1774 record_btrace_frame_dealloc_cache
1777 const struct frame_unwind record_btrace_tailcall_frame_unwind
=
1780 record_btrace_frame_unwind_stop_reason
,
1781 record_btrace_frame_this_id
,
1782 record_btrace_frame_prev_register
,
1784 record_btrace_tailcall_frame_sniffer
,
1785 record_btrace_frame_dealloc_cache
1788 /* Implement the to_get_unwinder method. */
1790 static const struct frame_unwind
*
1791 record_btrace_to_get_unwinder (struct target_ops
*self
)
1793 return &record_btrace_frame_unwind
;
1796 /* Implement the to_get_tailcall_unwinder method. */
1798 static const struct frame_unwind
*
1799 record_btrace_to_get_tailcall_unwinder (struct target_ops
*self
)
1801 return &record_btrace_tailcall_frame_unwind
;
1804 /* Return a human-readable string for FLAG. */
1807 btrace_thread_flag_to_str (enum btrace_thread_flag flag
)
1815 return "reverse-step";
1821 return "reverse-cont";
1830 /* Indicate that TP should be resumed according to FLAG. */
1833 record_btrace_resume_thread (struct thread_info
*tp
,
1834 enum btrace_thread_flag flag
)
1836 struct btrace_thread_info
*btinfo
;
1838 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp
),
1839 target_pid_to_str (tp
->ptid
), flag
, btrace_thread_flag_to_str (flag
));
1841 btinfo
= &tp
->btrace
;
1843 /* Fetch the latest branch trace. */
1846 /* A resume request overwrites a preceding resume or stop request. */
1847 btinfo
->flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
1848 btinfo
->flags
|= flag
;
1851 /* Get the current frame for TP. */
1853 static struct frame_info
*
1854 get_thread_current_frame (struct thread_info
*tp
)
1856 struct frame_info
*frame
;
1857 ptid_t old_inferior_ptid
;
1860 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1861 old_inferior_ptid
= inferior_ptid
;
1862 inferior_ptid
= tp
->ptid
;
1864 /* Clear the executing flag to allow changes to the current frame.
1865 We are not actually running, yet. We just started a reverse execution
1866 command or a record goto command.
1867 For the latter, EXECUTING is false and this has no effect.
1868 For the former, EXECUTING is true and we're in to_wait, about to
1869 move the thread. Since we need to recompute the stack, we temporarily
1870 set EXECUTING to flase. */
1871 executing
= is_executing (inferior_ptid
);
1872 set_executing (inferior_ptid
, 0);
1877 frame
= get_current_frame ();
1879 CATCH (except
, RETURN_MASK_ALL
)
1881 /* Restore the previous execution state. */
1882 set_executing (inferior_ptid
, executing
);
1884 /* Restore the previous inferior_ptid. */
1885 inferior_ptid
= old_inferior_ptid
;
1887 throw_exception (except
);
1891 /* Restore the previous execution state. */
1892 set_executing (inferior_ptid
, executing
);
1894 /* Restore the previous inferior_ptid. */
1895 inferior_ptid
= old_inferior_ptid
;
1900 /* Start replaying a thread. */
1902 static struct btrace_insn_iterator
*
1903 record_btrace_start_replaying (struct thread_info
*tp
)
1905 struct btrace_insn_iterator
*replay
;
1906 struct btrace_thread_info
*btinfo
;
1908 btinfo
= &tp
->btrace
;
1911 /* We can't start replaying without trace. */
1912 if (btinfo
->functions
.empty ())
1915 /* GDB stores the current frame_id when stepping in order to detects steps
1917 Since frames are computed differently when we're replaying, we need to
1918 recompute those stored frames and fix them up so we can still detect
1919 subroutines after we started replaying. */
1922 struct frame_info
*frame
;
1923 struct frame_id frame_id
;
1924 int upd_step_frame_id
, upd_step_stack_frame_id
;
1926 /* The current frame without replaying - computed via normal unwind. */
1927 frame
= get_thread_current_frame (tp
);
1928 frame_id
= get_frame_id (frame
);
1930 /* Check if we need to update any stepping-related frame id's. */
1931 upd_step_frame_id
= frame_id_eq (frame_id
,
1932 tp
->control
.step_frame_id
);
1933 upd_step_stack_frame_id
= frame_id_eq (frame_id
,
1934 tp
->control
.step_stack_frame_id
);
1936 /* We start replaying at the end of the branch trace. This corresponds
1937 to the current instruction. */
1938 replay
= XNEW (struct btrace_insn_iterator
);
1939 btrace_insn_end (replay
, btinfo
);
1941 /* Skip gaps at the end of the trace. */
1942 while (btrace_insn_get (replay
) == NULL
)
1946 steps
= btrace_insn_prev (replay
, 1);
1948 error (_("No trace."));
1951 /* We're not replaying, yet. */
1952 gdb_assert (btinfo
->replay
== NULL
);
1953 btinfo
->replay
= replay
;
1955 /* Make sure we're not using any stale registers. */
1956 registers_changed_ptid (tp
->ptid
);
1958 /* The current frame with replaying - computed via btrace unwind. */
1959 frame
= get_thread_current_frame (tp
);
1960 frame_id
= get_frame_id (frame
);
1962 /* Replace stepping related frames where necessary. */
1963 if (upd_step_frame_id
)
1964 tp
->control
.step_frame_id
= frame_id
;
1965 if (upd_step_stack_frame_id
)
1966 tp
->control
.step_stack_frame_id
= frame_id
;
1968 CATCH (except
, RETURN_MASK_ALL
)
1970 xfree (btinfo
->replay
);
1971 btinfo
->replay
= NULL
;
1973 registers_changed_ptid (tp
->ptid
);
1975 throw_exception (except
);
1982 /* Stop replaying a thread. */
1985 record_btrace_stop_replaying (struct thread_info
*tp
)
1987 struct btrace_thread_info
*btinfo
;
1989 btinfo
= &tp
->btrace
;
1991 xfree (btinfo
->replay
);
1992 btinfo
->replay
= NULL
;
1994 /* Make sure we're not leaving any stale registers. */
1995 registers_changed_ptid (tp
->ptid
);
1998 /* Stop replaying TP if it is at the end of its execution history. */
2001 record_btrace_stop_replaying_at_end (struct thread_info
*tp
)
2003 struct btrace_insn_iterator
*replay
, end
;
2004 struct btrace_thread_info
*btinfo
;
2006 btinfo
= &tp
->btrace
;
2007 replay
= btinfo
->replay
;
2012 btrace_insn_end (&end
, btinfo
);
2014 if (btrace_insn_cmp (replay
, &end
) == 0)
2015 record_btrace_stop_replaying (tp
);
2018 /* The to_resume method of target record-btrace. */
2021 record_btrace_resume (struct target_ops
*ops
, ptid_t ptid
, int step
,
2022 enum gdb_signal signal
)
2024 struct thread_info
*tp
;
2025 enum btrace_thread_flag flag
, cflag
;
2027 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid
),
2028 execution_direction
== EXEC_REVERSE
? "reverse-" : "",
2029 step
? "step" : "cont");
2031 /* Store the execution direction of the last resume.
2033 If there is more than one to_resume call, we have to rely on infrun
2034 to not change the execution direction in-between. */
2035 record_btrace_resume_exec_dir
= execution_direction
;
2037 /* As long as we're not replaying, just forward the request.
2039 For non-stop targets this means that no thread is replaying. In order to
2040 make progress, we may need to explicitly move replaying threads to the end
2041 of their execution history. */
2042 if ((execution_direction
!= EXEC_REVERSE
)
2043 && !record_btrace_is_replaying (ops
, minus_one_ptid
))
2046 ops
->to_resume (ops
, ptid
, step
, signal
);
2050 /* Compute the btrace thread flag for the requested move. */
2051 if (execution_direction
== EXEC_REVERSE
)
2053 flag
= step
== 0 ? BTHR_RCONT
: BTHR_RSTEP
;
2058 flag
= step
== 0 ? BTHR_CONT
: BTHR_STEP
;
2062 /* We just indicate the resume intent here. The actual stepping happens in
2063 record_btrace_wait below.
2065 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2066 if (!target_is_non_stop_p ())
2068 gdb_assert (ptid_match (inferior_ptid
, ptid
));
2070 ALL_NON_EXITED_THREADS (tp
)
2071 if (ptid_match (tp
->ptid
, ptid
))
2073 if (ptid_match (tp
->ptid
, inferior_ptid
))
2074 record_btrace_resume_thread (tp
, flag
);
2076 record_btrace_resume_thread (tp
, cflag
);
2081 ALL_NON_EXITED_THREADS (tp
)
2082 if (ptid_match (tp
->ptid
, ptid
))
2083 record_btrace_resume_thread (tp
, flag
);
2086 /* Async support. */
2087 if (target_can_async_p ())
2090 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
2094 /* The to_commit_resume method of target record-btrace. */
2097 record_btrace_commit_resume (struct target_ops
*ops
)
2099 if ((execution_direction
!= EXEC_REVERSE
)
2100 && !record_btrace_is_replaying (ops
, minus_one_ptid
))
2101 ops
->beneath
->to_commit_resume (ops
->beneath
);
2104 /* Cancel resuming TP. */
2107 record_btrace_cancel_resume (struct thread_info
*tp
)
2109 enum btrace_thread_flag flags
;
2111 flags
= tp
->btrace
.flags
& (BTHR_MOVE
| BTHR_STOP
);
2115 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2116 print_thread_id (tp
),
2117 target_pid_to_str (tp
->ptid
), flags
,
2118 btrace_thread_flag_to_str (flags
));
2120 tp
->btrace
.flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
2121 record_btrace_stop_replaying_at_end (tp
);
2124 /* Return a target_waitstatus indicating that we ran out of history. */
2126 static struct target_waitstatus
2127 btrace_step_no_history (void)
2129 struct target_waitstatus status
;
2131 status
.kind
= TARGET_WAITKIND_NO_HISTORY
;
2136 /* Return a target_waitstatus indicating that a step finished. */
2138 static struct target_waitstatus
2139 btrace_step_stopped (void)
2141 struct target_waitstatus status
;
2143 status
.kind
= TARGET_WAITKIND_STOPPED
;
2144 status
.value
.sig
= GDB_SIGNAL_TRAP
;
2149 /* Return a target_waitstatus indicating that a thread was stopped as
2152 static struct target_waitstatus
2153 btrace_step_stopped_on_request (void)
2155 struct target_waitstatus status
;
2157 status
.kind
= TARGET_WAITKIND_STOPPED
;
2158 status
.value
.sig
= GDB_SIGNAL_0
;
2163 /* Return a target_waitstatus indicating a spurious stop. */
2165 static struct target_waitstatus
2166 btrace_step_spurious (void)
2168 struct target_waitstatus status
;
2170 status
.kind
= TARGET_WAITKIND_SPURIOUS
;
2175 /* Return a target_waitstatus indicating that the thread was not resumed. */
2177 static struct target_waitstatus
2178 btrace_step_no_resumed (void)
2180 struct target_waitstatus status
;
2182 status
.kind
= TARGET_WAITKIND_NO_RESUMED
;
2187 /* Return a target_waitstatus indicating that we should wait again. */
2189 static struct target_waitstatus
2190 btrace_step_again (void)
2192 struct target_waitstatus status
;
2194 status
.kind
= TARGET_WAITKIND_IGNORE
;
2199 /* Clear the record histories. */
2202 record_btrace_clear_histories (struct btrace_thread_info
*btinfo
)
2204 xfree (btinfo
->insn_history
);
2205 xfree (btinfo
->call_history
);
2207 btinfo
->insn_history
= NULL
;
2208 btinfo
->call_history
= NULL
;
2211 /* Check whether TP's current replay position is at a breakpoint. */
2214 record_btrace_replay_at_breakpoint (struct thread_info
*tp
)
2216 struct btrace_insn_iterator
*replay
;
2217 struct btrace_thread_info
*btinfo
;
2218 const struct btrace_insn
*insn
;
2219 struct inferior
*inf
;
2221 btinfo
= &tp
->btrace
;
2222 replay
= btinfo
->replay
;
2227 insn
= btrace_insn_get (replay
);
2231 inf
= find_inferior_ptid (tp
->ptid
);
2235 return record_check_stopped_by_breakpoint (inf
->aspace
, insn
->pc
,
2236 &btinfo
->stop_reason
);
2239 /* Step one instruction in forward direction. */
2241 static struct target_waitstatus
2242 record_btrace_single_step_forward (struct thread_info
*tp
)
2244 struct btrace_insn_iterator
*replay
, end
, start
;
2245 struct btrace_thread_info
*btinfo
;
2247 btinfo
= &tp
->btrace
;
2248 replay
= btinfo
->replay
;
2250 /* We're done if we're not replaying. */
2252 return btrace_step_no_history ();
2254 /* Check if we're stepping a breakpoint. */
2255 if (record_btrace_replay_at_breakpoint (tp
))
2256 return btrace_step_stopped ();
2258 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2259 jump back to the instruction at which we started. */
2265 /* We will bail out here if we continue stepping after reaching the end
2266 of the execution history. */
2267 steps
= btrace_insn_next (replay
, 1);
2271 return btrace_step_no_history ();
2274 while (btrace_insn_get (replay
) == NULL
);
2276 /* Determine the end of the instruction trace. */
2277 btrace_insn_end (&end
, btinfo
);
2279 /* The execution trace contains (and ends with) the current instruction.
2280 This instruction has not been executed, yet, so the trace really ends
2281 one instruction earlier. */
2282 if (btrace_insn_cmp (replay
, &end
) == 0)
2283 return btrace_step_no_history ();
2285 return btrace_step_spurious ();
2288 /* Step one instruction in backward direction. */
2290 static struct target_waitstatus
2291 record_btrace_single_step_backward (struct thread_info
*tp
)
2293 struct btrace_insn_iterator
*replay
, start
;
2294 struct btrace_thread_info
*btinfo
;
2296 btinfo
= &tp
->btrace
;
2297 replay
= btinfo
->replay
;
2299 /* Start replaying if we're not already doing so. */
2301 replay
= record_btrace_start_replaying (tp
);
2303 /* If we can't step any further, we reached the end of the history.
2304 Skip gaps during replay. If we end up at a gap (at the beginning of
2305 the trace), jump back to the instruction at which we started. */
2311 steps
= btrace_insn_prev (replay
, 1);
2315 return btrace_step_no_history ();
2318 while (btrace_insn_get (replay
) == NULL
);
2320 /* Check if we're stepping a breakpoint.
2322 For reverse-stepping, this check is after the step. There is logic in
2323 infrun.c that handles reverse-stepping separately. See, for example,
2324 proceed and adjust_pc_after_break.
2326 This code assumes that for reverse-stepping, PC points to the last
2327 de-executed instruction, whereas for forward-stepping PC points to the
2328 next to-be-executed instruction. */
2329 if (record_btrace_replay_at_breakpoint (tp
))
2330 return btrace_step_stopped ();
2332 return btrace_step_spurious ();
2335 /* Step a single thread. */
2337 static struct target_waitstatus
2338 record_btrace_step_thread (struct thread_info
*tp
)
2340 struct btrace_thread_info
*btinfo
;
2341 struct target_waitstatus status
;
2342 enum btrace_thread_flag flags
;
2344 btinfo
= &tp
->btrace
;
2346 flags
= btinfo
->flags
& (BTHR_MOVE
| BTHR_STOP
);
2347 btinfo
->flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
2349 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp
),
2350 target_pid_to_str (tp
->ptid
), flags
,
2351 btrace_thread_flag_to_str (flags
));
2353 /* We can't step without an execution history. */
2354 if ((flags
& BTHR_MOVE
) != 0 && btrace_is_empty (tp
))
2355 return btrace_step_no_history ();
2360 internal_error (__FILE__
, __LINE__
, _("invalid stepping type."));
2363 return btrace_step_stopped_on_request ();
2366 status
= record_btrace_single_step_forward (tp
);
2367 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2370 return btrace_step_stopped ();
2373 status
= record_btrace_single_step_backward (tp
);
2374 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2377 return btrace_step_stopped ();
2380 status
= record_btrace_single_step_forward (tp
);
2381 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2384 btinfo
->flags
|= flags
;
2385 return btrace_step_again ();
2388 status
= record_btrace_single_step_backward (tp
);
2389 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2392 btinfo
->flags
|= flags
;
2393 return btrace_step_again ();
2396 /* We keep threads moving at the end of their execution history. The to_wait
2397 method will stop the thread for whom the event is reported. */
2398 if (status
.kind
== TARGET_WAITKIND_NO_HISTORY
)
2399 btinfo
->flags
|= flags
;
2404 /* A vector of threads. */
2406 typedef struct thread_info
* tp_t
;
2409 /* Announce further events if necessary. */
2412 record_btrace_maybe_mark_async_event (const VEC (tp_t
) *moving
,
2413 const VEC (tp_t
) *no_history
)
2415 int more_moving
, more_no_history
;
2417 more_moving
= !VEC_empty (tp_t
, moving
);
2418 more_no_history
= !VEC_empty (tp_t
, no_history
);
2420 if (!more_moving
&& !more_no_history
)
2424 DEBUG ("movers pending");
2426 if (more_no_history
)
2427 DEBUG ("no-history pending");
2429 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
2432 /* The to_wait method of target record-btrace. */
2435 record_btrace_wait (struct target_ops
*ops
, ptid_t ptid
,
2436 struct target_waitstatus
*status
, int options
)
2438 VEC (tp_t
) *moving
, *no_history
;
2439 struct thread_info
*tp
, *eventing
;
2440 struct cleanup
*cleanups
= make_cleanup (null_cleanup
, NULL
);
2442 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid
), options
);
2444 /* As long as we're not replaying, just forward the request. */
2445 if ((execution_direction
!= EXEC_REVERSE
)
2446 && !record_btrace_is_replaying (ops
, minus_one_ptid
))
2449 return ops
->to_wait (ops
, ptid
, status
, options
);
2455 make_cleanup (VEC_cleanup (tp_t
), &moving
);
2456 make_cleanup (VEC_cleanup (tp_t
), &no_history
);
2458 /* Keep a work list of moving threads. */
2459 ALL_NON_EXITED_THREADS (tp
)
2460 if (ptid_match (tp
->ptid
, ptid
)
2461 && ((tp
->btrace
.flags
& (BTHR_MOVE
| BTHR_STOP
)) != 0))
2462 VEC_safe_push (tp_t
, moving
, tp
);
2464 if (VEC_empty (tp_t
, moving
))
2466 *status
= btrace_step_no_resumed ();
2468 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid
),
2469 target_waitstatus_to_string (status
).c_str ());
2471 do_cleanups (cleanups
);
2475 /* Step moving threads one by one, one step each, until either one thread
2476 reports an event or we run out of threads to step.
2478 When stepping more than one thread, chances are that some threads reach
2479 the end of their execution history earlier than others. If we reported
2480 this immediately, all-stop on top of non-stop would stop all threads and
2481 resume the same threads next time. And we would report the same thread
2482 having reached the end of its execution history again.
2484 In the worst case, this would starve the other threads. But even if other
2485 threads would be allowed to make progress, this would result in far too
2486 many intermediate stops.
2488 We therefore delay the reporting of "no execution history" until we have
2489 nothing else to report. By this time, all threads should have moved to
2490 either the beginning or the end of their execution history. There will
2491 be a single user-visible stop. */
2493 while ((eventing
== NULL
) && !VEC_empty (tp_t
, moving
))
2498 while ((eventing
== NULL
) && VEC_iterate (tp_t
, moving
, ix
, tp
))
2500 *status
= record_btrace_step_thread (tp
);
2502 switch (status
->kind
)
2504 case TARGET_WAITKIND_IGNORE
:
2508 case TARGET_WAITKIND_NO_HISTORY
:
2509 VEC_safe_push (tp_t
, no_history
,
2510 VEC_ordered_remove (tp_t
, moving
, ix
));
2514 eventing
= VEC_unordered_remove (tp_t
, moving
, ix
);
2520 if (eventing
== NULL
)
2522 /* We started with at least one moving thread. This thread must have
2523 either stopped or reached the end of its execution history.
2525 In the former case, EVENTING must not be NULL.
2526 In the latter case, NO_HISTORY must not be empty. */
2527 gdb_assert (!VEC_empty (tp_t
, no_history
));
2529 /* We kept threads moving at the end of their execution history. Stop
2530 EVENTING now that we are going to report its stop. */
2531 eventing
= VEC_unordered_remove (tp_t
, no_history
, 0);
2532 eventing
->btrace
.flags
&= ~BTHR_MOVE
;
2534 *status
= btrace_step_no_history ();
2537 gdb_assert (eventing
!= NULL
);
2539 /* We kept threads replaying at the end of their execution history. Stop
2540 replaying EVENTING now that we are going to report its stop. */
2541 record_btrace_stop_replaying_at_end (eventing
);
2543 /* Stop all other threads. */
2544 if (!target_is_non_stop_p ())
2545 ALL_NON_EXITED_THREADS (tp
)
2546 record_btrace_cancel_resume (tp
);
2548 /* In async mode, we need to announce further events. */
2549 if (target_is_async_p ())
2550 record_btrace_maybe_mark_async_event (moving
, no_history
);
2552 /* Start record histories anew from the current position. */
2553 record_btrace_clear_histories (&eventing
->btrace
);
2555 /* We moved the replay position but did not update registers. */
2556 registers_changed_ptid (eventing
->ptid
);
2558 DEBUG ("wait ended by thread %s (%s): %s",
2559 print_thread_id (eventing
),
2560 target_pid_to_str (eventing
->ptid
),
2561 target_waitstatus_to_string (status
).c_str ());
2563 do_cleanups (cleanups
);
2564 return eventing
->ptid
;
2567 /* The to_stop method of target record-btrace. */
2570 record_btrace_stop (struct target_ops
*ops
, ptid_t ptid
)
2572 DEBUG ("stop %s", target_pid_to_str (ptid
));
2574 /* As long as we're not replaying, just forward the request. */
2575 if ((execution_direction
!= EXEC_REVERSE
)
2576 && !record_btrace_is_replaying (ops
, minus_one_ptid
))
2579 ops
->to_stop (ops
, ptid
);
2583 struct thread_info
*tp
;
2585 ALL_NON_EXITED_THREADS (tp
)
2586 if (ptid_match (tp
->ptid
, ptid
))
2588 tp
->btrace
.flags
&= ~BTHR_MOVE
;
2589 tp
->btrace
.flags
|= BTHR_STOP
;
2594 /* The to_can_execute_reverse method of target record-btrace. */
2597 record_btrace_can_execute_reverse (struct target_ops
*self
)
2602 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2605 record_btrace_stopped_by_sw_breakpoint (struct target_ops
*ops
)
2607 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2609 struct thread_info
*tp
= inferior_thread ();
2611 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
;
2614 return ops
->beneath
->to_stopped_by_sw_breakpoint (ops
->beneath
);
2617 /* The to_supports_stopped_by_sw_breakpoint method of target
2621 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops
*ops
)
2623 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2626 return ops
->beneath
->to_supports_stopped_by_sw_breakpoint (ops
->beneath
);
2629 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2632 record_btrace_stopped_by_hw_breakpoint (struct target_ops
*ops
)
2634 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2636 struct thread_info
*tp
= inferior_thread ();
2638 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
;
2641 return ops
->beneath
->to_stopped_by_hw_breakpoint (ops
->beneath
);
2644 /* The to_supports_stopped_by_hw_breakpoint method of target
2648 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops
*ops
)
2650 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2653 return ops
->beneath
->to_supports_stopped_by_hw_breakpoint (ops
->beneath
);
2656 /* The to_update_thread_list method of target record-btrace. */
2659 record_btrace_update_thread_list (struct target_ops
*ops
)
2661 /* We don't add or remove threads during replay. */
2662 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2665 /* Forward the request. */
2667 ops
->to_update_thread_list (ops
);
2670 /* The to_thread_alive method of target record-btrace. */
2673 record_btrace_thread_alive (struct target_ops
*ops
, ptid_t ptid
)
2675 /* We don't add or remove threads during replay. */
2676 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2677 return find_thread_ptid (ptid
) != NULL
;
2679 /* Forward the request. */
2681 return ops
->to_thread_alive (ops
, ptid
);
2684 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2688 record_btrace_set_replay (struct thread_info
*tp
,
2689 const struct btrace_insn_iterator
*it
)
2691 struct btrace_thread_info
*btinfo
;
2693 btinfo
= &tp
->btrace
;
2696 record_btrace_stop_replaying (tp
);
2699 if (btinfo
->replay
== NULL
)
2700 record_btrace_start_replaying (tp
);
2701 else if (btrace_insn_cmp (btinfo
->replay
, it
) == 0)
2704 *btinfo
->replay
= *it
;
2705 registers_changed_ptid (tp
->ptid
);
2708 /* Start anew from the new replay position. */
2709 record_btrace_clear_histories (btinfo
);
2711 stop_pc
= regcache_read_pc (get_current_regcache ());
2712 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
2715 /* The to_goto_record_begin method of target record-btrace. */
2718 record_btrace_goto_begin (struct target_ops
*self
)
2720 struct thread_info
*tp
;
2721 struct btrace_insn_iterator begin
;
2723 tp
= require_btrace_thread ();
2725 btrace_insn_begin (&begin
, &tp
->btrace
);
2727 /* Skip gaps at the beginning of the trace. */
2728 while (btrace_insn_get (&begin
) == NULL
)
2732 steps
= btrace_insn_next (&begin
, 1);
2734 error (_("No trace."));
2737 record_btrace_set_replay (tp
, &begin
);
2740 /* The to_goto_record_end method of target record-btrace. */
2743 record_btrace_goto_end (struct target_ops
*ops
)
2745 struct thread_info
*tp
;
2747 tp
= require_btrace_thread ();
2749 record_btrace_set_replay (tp
, NULL
);
2752 /* The to_goto_record method of target record-btrace. */
2755 record_btrace_goto (struct target_ops
*self
, ULONGEST insn
)
2757 struct thread_info
*tp
;
2758 struct btrace_insn_iterator it
;
2759 unsigned int number
;
2764 /* Check for wrap-arounds. */
2766 error (_("Instruction number out of range."));
2768 tp
= require_btrace_thread ();
2770 found
= btrace_find_insn_by_number (&it
, &tp
->btrace
, number
);
2772 /* Check if the instruction could not be found or is a gap. */
2773 if (found
== 0 || btrace_insn_get (&it
) == NULL
)
2774 error (_("No such instruction."));
2776 record_btrace_set_replay (tp
, &it
);
2779 /* The to_record_stop_replaying method of target record-btrace. */
2782 record_btrace_stop_replaying_all (struct target_ops
*self
)
2784 struct thread_info
*tp
;
2786 ALL_NON_EXITED_THREADS (tp
)
2787 record_btrace_stop_replaying (tp
);
2790 /* The to_execution_direction target method. */
2792 static enum exec_direction_kind
2793 record_btrace_execution_direction (struct target_ops
*self
)
2795 return record_btrace_resume_exec_dir
;
2798 /* The to_prepare_to_generate_core target method. */
2801 record_btrace_prepare_to_generate_core (struct target_ops
*self
)
2803 record_btrace_generating_corefile
= 1;
2806 /* The to_done_generating_core target method. */
2809 record_btrace_done_generating_core (struct target_ops
*self
)
2811 record_btrace_generating_corefile
= 0;
2814 /* Initialize the record-btrace target ops. */
2817 init_record_btrace_ops (void)
2819 struct target_ops
*ops
;
2821 ops
= &record_btrace_ops
;
2822 ops
->to_shortname
= "record-btrace";
2823 ops
->to_longname
= "Branch tracing target";
2824 ops
->to_doc
= "Collect control-flow trace and provide the execution history.";
2825 ops
->to_open
= record_btrace_open
;
2826 ops
->to_close
= record_btrace_close
;
2827 ops
->to_async
= record_btrace_async
;
2828 ops
->to_detach
= record_detach
;
2829 ops
->to_disconnect
= record_btrace_disconnect
;
2830 ops
->to_mourn_inferior
= record_mourn_inferior
;
2831 ops
->to_kill
= record_kill
;
2832 ops
->to_stop_recording
= record_btrace_stop_recording
;
2833 ops
->to_info_record
= record_btrace_info
;
2834 ops
->to_insn_history
= record_btrace_insn_history
;
2835 ops
->to_insn_history_from
= record_btrace_insn_history_from
;
2836 ops
->to_insn_history_range
= record_btrace_insn_history_range
;
2837 ops
->to_call_history
= record_btrace_call_history
;
2838 ops
->to_call_history_from
= record_btrace_call_history_from
;
2839 ops
->to_call_history_range
= record_btrace_call_history_range
;
2840 ops
->to_record_method
= record_btrace_record_method
;
2841 ops
->to_record_is_replaying
= record_btrace_is_replaying
;
2842 ops
->to_record_will_replay
= record_btrace_will_replay
;
2843 ops
->to_record_stop_replaying
= record_btrace_stop_replaying_all
;
2844 ops
->to_xfer_partial
= record_btrace_xfer_partial
;
2845 ops
->to_remove_breakpoint
= record_btrace_remove_breakpoint
;
2846 ops
->to_insert_breakpoint
= record_btrace_insert_breakpoint
;
2847 ops
->to_fetch_registers
= record_btrace_fetch_registers
;
2848 ops
->to_store_registers
= record_btrace_store_registers
;
2849 ops
->to_prepare_to_store
= record_btrace_prepare_to_store
;
2850 ops
->to_get_unwinder
= &record_btrace_to_get_unwinder
;
2851 ops
->to_get_tailcall_unwinder
= &record_btrace_to_get_tailcall_unwinder
;
2852 ops
->to_resume
= record_btrace_resume
;
2853 ops
->to_commit_resume
= record_btrace_commit_resume
;
2854 ops
->to_wait
= record_btrace_wait
;
2855 ops
->to_stop
= record_btrace_stop
;
2856 ops
->to_update_thread_list
= record_btrace_update_thread_list
;
2857 ops
->to_thread_alive
= record_btrace_thread_alive
;
2858 ops
->to_goto_record_begin
= record_btrace_goto_begin
;
2859 ops
->to_goto_record_end
= record_btrace_goto_end
;
2860 ops
->to_goto_record
= record_btrace_goto
;
2861 ops
->to_can_execute_reverse
= record_btrace_can_execute_reverse
;
2862 ops
->to_stopped_by_sw_breakpoint
= record_btrace_stopped_by_sw_breakpoint
;
2863 ops
->to_supports_stopped_by_sw_breakpoint
2864 = record_btrace_supports_stopped_by_sw_breakpoint
;
2865 ops
->to_stopped_by_hw_breakpoint
= record_btrace_stopped_by_hw_breakpoint
;
2866 ops
->to_supports_stopped_by_hw_breakpoint
2867 = record_btrace_supports_stopped_by_hw_breakpoint
;
2868 ops
->to_execution_direction
= record_btrace_execution_direction
;
2869 ops
->to_prepare_to_generate_core
= record_btrace_prepare_to_generate_core
;
2870 ops
->to_done_generating_core
= record_btrace_done_generating_core
;
2871 ops
->to_stratum
= record_stratum
;
2872 ops
->to_magic
= OPS_MAGIC
;
2875 /* Start recording in BTS format. */
2878 cmd_record_btrace_bts_start (const char *args
, int from_tty
)
2880 if (args
!= NULL
&& *args
!= 0)
2881 error (_("Invalid argument."));
2883 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2887 execute_command ("target record-btrace", from_tty
);
2889 CATCH (exception
, RETURN_MASK_ALL
)
2891 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2892 throw_exception (exception
);
2897 /* Start recording in Intel Processor Trace format. */
2900 cmd_record_btrace_pt_start (const char *args
, int from_tty
)
2902 if (args
!= NULL
&& *args
!= 0)
2903 error (_("Invalid argument."));
2905 record_btrace_conf
.format
= BTRACE_FORMAT_PT
;
2909 execute_command ("target record-btrace", from_tty
);
2911 CATCH (exception
, RETURN_MASK_ALL
)
2913 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2914 throw_exception (exception
);
2919 /* Alias for "target record". */
2922 cmd_record_btrace_start (const char *args
, int from_tty
)
2924 if (args
!= NULL
&& *args
!= 0)
2925 error (_("Invalid argument."));
2927 record_btrace_conf
.format
= BTRACE_FORMAT_PT
;
2931 execute_command ("target record-btrace", from_tty
);
2933 CATCH (exception
, RETURN_MASK_ALL
)
2935 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2939 execute_command ("target record-btrace", from_tty
);
2941 CATCH (exception
, RETURN_MASK_ALL
)
2943 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2944 throw_exception (exception
);
2951 /* The "set record btrace" command. */
2954 cmd_set_record_btrace (const char *args
, int from_tty
)
2956 cmd_show_list (set_record_btrace_cmdlist
, from_tty
, "");
2959 /* The "show record btrace" command. */
2962 cmd_show_record_btrace (const char *args
, int from_tty
)
2964 cmd_show_list (show_record_btrace_cmdlist
, from_tty
, "");
2967 /* The "show record btrace replay-memory-access" command. */
2970 cmd_show_replay_memory_access (struct ui_file
*file
, int from_tty
,
2971 struct cmd_list_element
*c
, const char *value
)
2973 fprintf_filtered (gdb_stdout
, _("Replay memory access is %s.\n"),
2974 replay_memory_access
);
2977 /* The "set record btrace bts" command. */
2980 cmd_set_record_btrace_bts (const char *args
, int from_tty
)
2982 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2983 "by an appropriate subcommand.\n"));
2984 help_list (set_record_btrace_bts_cmdlist
, "set record btrace bts ",
2985 all_commands
, gdb_stdout
);
2988 /* The "show record btrace bts" command. */
2991 cmd_show_record_btrace_bts (const char *args
, int from_tty
)
2993 cmd_show_list (show_record_btrace_bts_cmdlist
, from_tty
, "");
2996 /* The "set record btrace pt" command. */
2999 cmd_set_record_btrace_pt (const char *args
, int from_tty
)
3001 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3002 "by an appropriate subcommand.\n"));
3003 help_list (set_record_btrace_pt_cmdlist
, "set record btrace pt ",
3004 all_commands
, gdb_stdout
);
3007 /* The "show record btrace pt" command. */
3010 cmd_show_record_btrace_pt (const char *args
, int from_tty
)
3012 cmd_show_list (show_record_btrace_pt_cmdlist
, from_tty
, "");
3015 /* The "record bts buffer-size" show value function. */
3018 show_record_bts_buffer_size_value (struct ui_file
*file
, int from_tty
,
3019 struct cmd_list_element
*c
,
3022 fprintf_filtered (file
, _("The record/replay bts buffer size is %s.\n"),
3026 /* The "record pt buffer-size" show value function. */
3029 show_record_pt_buffer_size_value (struct ui_file
*file
, int from_tty
,
3030 struct cmd_list_element
*c
,
3033 fprintf_filtered (file
, _("The record/replay pt buffer size is %s.\n"),
3037 /* Initialize btrace commands. */
3040 _initialize_record_btrace (void)
3042 add_prefix_cmd ("btrace", class_obscure
, cmd_record_btrace_start
,
3043 _("Start branch trace recording."), &record_btrace_cmdlist
,
3044 "record btrace ", 0, &record_cmdlist
);
3045 add_alias_cmd ("b", "btrace", class_obscure
, 1, &record_cmdlist
);
3047 add_cmd ("bts", class_obscure
, cmd_record_btrace_bts_start
,
3049 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3050 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3051 This format may not be available on all processors."),
3052 &record_btrace_cmdlist
);
3053 add_alias_cmd ("bts", "btrace bts", class_obscure
, 1, &record_cmdlist
);
3055 add_cmd ("pt", class_obscure
, cmd_record_btrace_pt_start
,
3057 Start branch trace recording in Intel Processor Trace format.\n\n\
3058 This format may not be available on all processors."),
3059 &record_btrace_cmdlist
);
3060 add_alias_cmd ("pt", "btrace pt", class_obscure
, 1, &record_cmdlist
);
3062 add_prefix_cmd ("btrace", class_support
, cmd_set_record_btrace
,
3063 _("Set record options"), &set_record_btrace_cmdlist
,
3064 "set record btrace ", 0, &set_record_cmdlist
);
3066 add_prefix_cmd ("btrace", class_support
, cmd_show_record_btrace
,
3067 _("Show record options"), &show_record_btrace_cmdlist
,
3068 "show record btrace ", 0, &show_record_cmdlist
);
3070 add_setshow_enum_cmd ("replay-memory-access", no_class
,
3071 replay_memory_access_types
, &replay_memory_access
, _("\
3072 Set what memory accesses are allowed during replay."), _("\
3073 Show what memory accesses are allowed during replay."),
3074 _("Default is READ-ONLY.\n\n\
3075 The btrace record target does not trace data.\n\
3076 The memory therefore corresponds to the live target and not \
3077 to the current replay position.\n\n\
3078 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3079 When READ-WRITE, allow accesses to read-only and read-write memory during \
3081 NULL
, cmd_show_replay_memory_access
,
3082 &set_record_btrace_cmdlist
,
3083 &show_record_btrace_cmdlist
);
3085 add_prefix_cmd ("bts", class_support
, cmd_set_record_btrace_bts
,
3086 _("Set record btrace bts options"),
3087 &set_record_btrace_bts_cmdlist
,
3088 "set record btrace bts ", 0, &set_record_btrace_cmdlist
);
3090 add_prefix_cmd ("bts", class_support
, cmd_show_record_btrace_bts
,
3091 _("Show record btrace bts options"),
3092 &show_record_btrace_bts_cmdlist
,
3093 "show record btrace bts ", 0, &show_record_btrace_cmdlist
);
3095 add_setshow_uinteger_cmd ("buffer-size", no_class
,
3096 &record_btrace_conf
.bts
.size
,
3097 _("Set the record/replay bts buffer size."),
3098 _("Show the record/replay bts buffer size."), _("\
3099 When starting recording request a trace buffer of this size. \
3100 The actual buffer size may differ from the requested size. \
3101 Use \"info record\" to see the actual buffer size.\n\n\
3102 Bigger buffers allow longer recording but also take more time to process \
3103 the recorded execution trace.\n\n\
3104 The trace buffer size may not be changed while recording."), NULL
,
3105 show_record_bts_buffer_size_value
,
3106 &set_record_btrace_bts_cmdlist
,
3107 &show_record_btrace_bts_cmdlist
);
3109 add_prefix_cmd ("pt", class_support
, cmd_set_record_btrace_pt
,
3110 _("Set record btrace pt options"),
3111 &set_record_btrace_pt_cmdlist
,
3112 "set record btrace pt ", 0, &set_record_btrace_cmdlist
);
3114 add_prefix_cmd ("pt", class_support
, cmd_show_record_btrace_pt
,
3115 _("Show record btrace pt options"),
3116 &show_record_btrace_pt_cmdlist
,
3117 "show record btrace pt ", 0, &show_record_btrace_cmdlist
);
3119 add_setshow_uinteger_cmd ("buffer-size", no_class
,
3120 &record_btrace_conf
.pt
.size
,
3121 _("Set the record/replay pt buffer size."),
3122 _("Show the record/replay pt buffer size."), _("\
3123 Bigger buffers allow longer recording but also take more time to process \
3124 the recorded execution.\n\
3125 The actual buffer size may differ from the requested size. Use \"info record\" \
3126 to see the actual buffer size."), NULL
, show_record_pt_buffer_size_value
,
3127 &set_record_btrace_pt_cmdlist
,
3128 &show_record_btrace_pt_cmdlist
);
3130 init_record_btrace_ops ();
3131 add_target (&record_btrace_ops
);
3133 bfcache
= htab_create_alloc (50, bfcache_hash
, bfcache_eq
, NULL
,
3136 record_btrace_conf
.bts
.size
= 64 * 1024;
3137 record_btrace_conf
.pt
.size
= 16 * 1024;