1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "record-btrace.h"
25 #include "gdbthread.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
39 #include "event-loop.h"
44 /* The target_ops of record-btrace. */
45 static struct target_ops record_btrace_ops
;
47 /* A new thread observer enabling branch tracing for the new thread. */
48 static struct observer
*record_btrace_thread_observer
;
50 /* Memory access types used in set/show record btrace replay-memory-access. */
51 static const char replay_memory_access_read_only
[] = "read-only";
52 static const char replay_memory_access_read_write
[] = "read-write";
53 static const char *const replay_memory_access_types
[] =
55 replay_memory_access_read_only
,
56 replay_memory_access_read_write
,
60 /* The currently allowed replay memory access type. */
61 static const char *replay_memory_access
= replay_memory_access_read_only
;
63 /* Command lists for "set/show record btrace". */
64 static struct cmd_list_element
*set_record_btrace_cmdlist
;
65 static struct cmd_list_element
*show_record_btrace_cmdlist
;
67 /* The execution direction of the last resume we got. See record-full.c. */
68 static enum exec_direction_kind record_btrace_resume_exec_dir
= EXEC_FORWARD
;
70 /* The async event handler for reverse/replay execution. */
71 static struct async_event_handler
*record_btrace_async_inferior_event_handler
;
73 /* A flag indicating that we are currently generating a core file. */
74 static int record_btrace_generating_corefile
;
76 /* The current branch trace configuration. */
77 static struct btrace_config record_btrace_conf
;
79 /* Command list for "record btrace". */
80 static struct cmd_list_element
*record_btrace_cmdlist
;
82 /* Command lists for "set/show record btrace bts". */
83 static struct cmd_list_element
*set_record_btrace_bts_cmdlist
;
84 static struct cmd_list_element
*show_record_btrace_bts_cmdlist
;
86 /* Command lists for "set/show record btrace pt". */
87 static struct cmd_list_element
*set_record_btrace_pt_cmdlist
;
88 static struct cmd_list_element
*show_record_btrace_pt_cmdlist
;
90 /* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
93 #define DEBUG(msg, args...) \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
103 /* Update the branch trace for the current thread and return a pointer to its
106 Throws an error if there is no thread or no trace. This function never
109 static struct thread_info
*
110 require_btrace_thread (void)
112 struct thread_info
*tp
;
116 tp
= find_thread_ptid (inferior_ptid
);
118 error (_("No thread."));
120 validate_registers_access ();
124 if (btrace_is_empty (tp
))
125 error (_("No trace."));
130 /* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
133 Throws an error if there is no thread or no trace. This function never
136 static struct btrace_thread_info
*
137 require_btrace (void)
139 struct thread_info
*tp
;
141 tp
= require_btrace_thread ();
146 /* Enable branch tracing for one thread. Warn on errors. */
149 record_btrace_enable_warn (struct thread_info
*tp
)
153 btrace_enable (tp
, &record_btrace_conf
);
155 CATCH (error
, RETURN_MASK_ERROR
)
157 warning ("%s", error
.message
);
162 /* Callback function to disable branch tracing for one thread. */
165 record_btrace_disable_callback (void *arg
)
167 struct thread_info
*tp
= (struct thread_info
*) arg
;
172 /* Enable automatic tracing of new threads. */
175 record_btrace_auto_enable (void)
177 DEBUG ("attach thread observer");
179 record_btrace_thread_observer
180 = observer_attach_new_thread (record_btrace_enable_warn
);
183 /* Disable automatic tracing of new threads. */
186 record_btrace_auto_disable (void)
188 /* The observer may have been detached, already. */
189 if (record_btrace_thread_observer
== NULL
)
192 DEBUG ("detach thread observer");
194 observer_detach_new_thread (record_btrace_thread_observer
);
195 record_btrace_thread_observer
= NULL
;
198 /* The record-btrace async event handler function. */
201 record_btrace_handle_async_inferior_event (gdb_client_data data
)
203 inferior_event_handler (INF_REG_EVENT
, NULL
);
206 /* See record-btrace.h. */
209 record_btrace_push_target (void)
213 record_btrace_auto_enable ();
215 push_target (&record_btrace_ops
);
217 record_btrace_async_inferior_event_handler
218 = create_async_event_handler (record_btrace_handle_async_inferior_event
,
220 record_btrace_generating_corefile
= 0;
222 format
= btrace_format_short_string (record_btrace_conf
.format
);
223 observer_notify_record_changed (current_inferior (), 1, "btrace", format
);
226 /* The to_open method of target record-btrace. */
229 record_btrace_open (const char *args
, int from_tty
)
231 struct cleanup
*disable_chain
;
232 struct thread_info
*tp
;
238 if (!target_has_execution
)
239 error (_("The program is not being run."));
241 gdb_assert (record_btrace_thread_observer
== NULL
);
243 disable_chain
= make_cleanup (null_cleanup
, NULL
);
244 ALL_NON_EXITED_THREADS (tp
)
245 if (args
== NULL
|| *args
== 0 || number_is_in_list (args
, tp
->global_num
))
247 btrace_enable (tp
, &record_btrace_conf
);
249 make_cleanup (record_btrace_disable_callback
, tp
);
252 record_btrace_push_target ();
254 discard_cleanups (disable_chain
);
257 /* The to_stop_recording method of target record-btrace. */
260 record_btrace_stop_recording (struct target_ops
*self
)
262 struct thread_info
*tp
;
264 DEBUG ("stop recording");
266 record_btrace_auto_disable ();
268 ALL_NON_EXITED_THREADS (tp
)
269 if (tp
->btrace
.target
!= NULL
)
273 /* The to_disconnect method of target record-btrace. */
276 record_btrace_disconnect (struct target_ops
*self
, const char *args
,
279 struct target_ops
*beneath
= self
->beneath
;
281 /* Do not stop recording, just clean up GDB side. */
282 unpush_target (self
);
284 /* Forward disconnect. */
285 beneath
->to_disconnect (beneath
, args
, from_tty
);
288 /* The to_close method of target record-btrace. */
291 record_btrace_close (struct target_ops
*self
)
293 struct thread_info
*tp
;
295 if (record_btrace_async_inferior_event_handler
!= NULL
)
296 delete_async_event_handler (&record_btrace_async_inferior_event_handler
);
298 /* Make sure automatic recording gets disabled even if we did not stop
299 recording before closing the record-btrace target. */
300 record_btrace_auto_disable ();
302 /* We should have already stopped recording.
303 Tear down btrace in case we have not. */
304 ALL_NON_EXITED_THREADS (tp
)
305 btrace_teardown (tp
);
308 /* The to_async method of target record-btrace. */
311 record_btrace_async (struct target_ops
*ops
, int enable
)
314 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
316 clear_async_event_handler (record_btrace_async_inferior_event_handler
);
318 ops
->beneath
->to_async (ops
->beneath
, enable
);
321 /* Adjusts the size and returns a human readable size suffix. */
324 record_btrace_adjust_size (unsigned int *size
)
330 if ((sz
& ((1u << 30) - 1)) == 0)
335 else if ((sz
& ((1u << 20) - 1)) == 0)
340 else if ((sz
& ((1u << 10) - 1)) == 0)
349 /* Print a BTS configuration. */
352 record_btrace_print_bts_conf (const struct btrace_config_bts
*conf
)
360 suffix
= record_btrace_adjust_size (&size
);
361 printf_unfiltered (_("Buffer size: %u%s.\n"), size
, suffix
);
365 /* Print an Intel Processor Trace configuration. */
368 record_btrace_print_pt_conf (const struct btrace_config_pt
*conf
)
376 suffix
= record_btrace_adjust_size (&size
);
377 printf_unfiltered (_("Buffer size: %u%s.\n"), size
, suffix
);
381 /* Print a branch tracing configuration. */
384 record_btrace_print_conf (const struct btrace_config
*conf
)
386 printf_unfiltered (_("Recording format: %s.\n"),
387 btrace_format_string (conf
->format
));
389 switch (conf
->format
)
391 case BTRACE_FORMAT_NONE
:
394 case BTRACE_FORMAT_BTS
:
395 record_btrace_print_bts_conf (&conf
->bts
);
398 case BTRACE_FORMAT_PT
:
399 record_btrace_print_pt_conf (&conf
->pt
);
403 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
406 /* The to_info_record method of target record-btrace. */
409 record_btrace_info (struct target_ops
*self
)
411 struct btrace_thread_info
*btinfo
;
412 const struct btrace_config
*conf
;
413 struct thread_info
*tp
;
414 unsigned int insns
, calls
, gaps
;
418 tp
= find_thread_ptid (inferior_ptid
);
420 error (_("No thread."));
422 validate_registers_access ();
424 btinfo
= &tp
->btrace
;
426 conf
= btrace_conf (btinfo
);
428 record_btrace_print_conf (conf
);
436 if (!btrace_is_empty (tp
))
438 struct btrace_call_iterator call
;
439 struct btrace_insn_iterator insn
;
441 btrace_call_end (&call
, btinfo
);
442 btrace_call_prev (&call
, 1);
443 calls
= btrace_call_number (&call
);
445 btrace_insn_end (&insn
, btinfo
);
446 insns
= btrace_insn_number (&insn
);
448 /* If the last instruction is not a gap, it is the current instruction
449 that is not actually part of the record. */
450 if (btrace_insn_get (&insn
) != NULL
)
453 gaps
= btinfo
->ngaps
;
456 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
457 "for thread %s (%s).\n"), insns
, calls
, gaps
,
458 print_thread_id (tp
), target_pid_to_str (tp
->ptid
));
460 if (btrace_is_replaying (tp
))
461 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
462 btrace_insn_number (btinfo
->replay
));
465 /* Print a decode error. */
468 btrace_ui_out_decode_error (struct ui_out
*uiout
, int errcode
,
469 enum btrace_format format
)
471 const char *errstr
= btrace_decode_error (format
, errcode
);
473 uiout
->text (_("["));
474 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
475 if (!(format
== BTRACE_FORMAT_PT
&& errcode
> 0))
477 uiout
->text (_("decode error ("));
478 uiout
->field_int ("errcode", errcode
);
479 uiout
->text (_("): "));
481 uiout
->text (errstr
);
482 uiout
->text (_("]\n"));
485 /* Print an unsigned int. */
488 ui_out_field_uint (struct ui_out
*uiout
, const char *fld
, unsigned int val
)
490 uiout
->field_fmt (fld
, "%u", val
);
493 /* A range of source lines. */
495 struct btrace_line_range
497 /* The symtab this line is from. */
498 struct symtab
*symtab
;
500 /* The first line (inclusive). */
503 /* The last line (exclusive). */
507 /* Construct a line range. */
509 static struct btrace_line_range
510 btrace_mk_line_range (struct symtab
*symtab
, int begin
, int end
)
512 struct btrace_line_range range
;
514 range
.symtab
= symtab
;
521 /* Add a line to a line range. */
523 static struct btrace_line_range
524 btrace_line_range_add (struct btrace_line_range range
, int line
)
526 if (range
.end
<= range
.begin
)
528 /* This is the first entry. */
530 range
.end
= line
+ 1;
532 else if (line
< range
.begin
)
534 else if (range
.end
< line
)
540 /* Return non-zero if RANGE is empty, zero otherwise. */
543 btrace_line_range_is_empty (struct btrace_line_range range
)
545 return range
.end
<= range
.begin
;
548 /* Return non-zero if LHS contains RHS, zero otherwise. */
551 btrace_line_range_contains_range (struct btrace_line_range lhs
,
552 struct btrace_line_range rhs
)
554 return ((lhs
.symtab
== rhs
.symtab
)
555 && (lhs
.begin
<= rhs
.begin
)
556 && (rhs
.end
<= lhs
.end
));
559 /* Find the line range associated with PC. */
561 static struct btrace_line_range
562 btrace_find_line_range (CORE_ADDR pc
)
564 struct btrace_line_range range
;
565 struct linetable_entry
*lines
;
566 struct linetable
*ltable
;
567 struct symtab
*symtab
;
570 symtab
= find_pc_line_symtab (pc
);
572 return btrace_mk_line_range (NULL
, 0, 0);
574 ltable
= SYMTAB_LINETABLE (symtab
);
576 return btrace_mk_line_range (symtab
, 0, 0);
578 nlines
= ltable
->nitems
;
579 lines
= ltable
->item
;
581 return btrace_mk_line_range (symtab
, 0, 0);
583 range
= btrace_mk_line_range (symtab
, 0, 0);
584 for (i
= 0; i
< nlines
- 1; i
++)
586 if ((lines
[i
].pc
== pc
) && (lines
[i
].line
!= 0))
587 range
= btrace_line_range_add (range
, lines
[i
].line
);
593 /* Print source lines in LINES to UIOUT.
595 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
596 instructions corresponding to that source line. When printing a new source
597 line, we do the cleanups for the open chain and open a new cleanup chain for
598 the new source line. If the source line range in LINES is not empty, this
599 function will leave the cleanup chain for the last printed source line open
600 so instructions can be added to it. */
603 btrace_print_lines (struct btrace_line_range lines
, struct ui_out
*uiout
,
604 struct cleanup
**ui_item_chain
, int flags
)
606 print_source_lines_flags psl_flags
;
610 if (flags
& DISASSEMBLY_FILENAME
)
611 psl_flags
|= PRINT_SOURCE_LINES_FILENAME
;
613 for (line
= lines
.begin
; line
< lines
.end
; ++line
)
615 if (*ui_item_chain
!= NULL
)
616 do_cleanups (*ui_item_chain
);
619 = make_cleanup_ui_out_tuple_begin_end (uiout
, "src_and_asm_line");
621 print_source_lines (lines
.symtab
, line
, line
+ 1, psl_flags
);
623 make_cleanup_ui_out_list_begin_end (uiout
, "line_asm_insn");
627 /* Disassemble a section of the recorded instruction trace. */
630 btrace_insn_history (struct ui_out
*uiout
,
631 const struct btrace_thread_info
*btinfo
,
632 const struct btrace_insn_iterator
*begin
,
633 const struct btrace_insn_iterator
*end
,
634 gdb_disassembly_flags flags
)
636 struct cleanup
*cleanups
, *ui_item_chain
;
637 struct gdbarch
*gdbarch
;
638 struct btrace_insn_iterator it
;
639 struct btrace_line_range last_lines
;
641 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags
,
642 btrace_insn_number (begin
), btrace_insn_number (end
));
644 flags
|= DISASSEMBLY_SPECULATIVE
;
646 gdbarch
= target_gdbarch ();
647 last_lines
= btrace_mk_line_range (NULL
, 0, 0);
649 cleanups
= make_cleanup_ui_out_list_begin_end (uiout
, "asm_insns");
651 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
652 instructions corresponding to that line. */
653 ui_item_chain
= NULL
;
655 gdb_pretty_print_disassembler
disasm (gdbarch
);
657 for (it
= *begin
; btrace_insn_cmp (&it
, end
) != 0; btrace_insn_next (&it
, 1))
659 const struct btrace_insn
*insn
;
661 insn
= btrace_insn_get (&it
);
663 /* A NULL instruction indicates a gap in the trace. */
666 const struct btrace_config
*conf
;
668 conf
= btrace_conf (btinfo
);
670 /* We have trace so we must have a configuration. */
671 gdb_assert (conf
!= NULL
);
673 uiout
->field_fmt ("insn-number", "%u",
674 btrace_insn_number (&it
));
677 btrace_ui_out_decode_error (uiout
, btrace_insn_get_error (&it
),
682 struct disasm_insn dinsn
;
684 if ((flags
& DISASSEMBLY_SOURCE
) != 0)
686 struct btrace_line_range lines
;
688 lines
= btrace_find_line_range (insn
->pc
);
689 if (!btrace_line_range_is_empty (lines
)
690 && !btrace_line_range_contains_range (last_lines
, lines
))
692 btrace_print_lines (lines
, uiout
, &ui_item_chain
, flags
);
695 else if (ui_item_chain
== NULL
)
698 = make_cleanup_ui_out_tuple_begin_end (uiout
,
700 /* No source information. */
701 make_cleanup_ui_out_list_begin_end (uiout
, "line_asm_insn");
704 gdb_assert (ui_item_chain
!= NULL
);
707 memset (&dinsn
, 0, sizeof (dinsn
));
708 dinsn
.number
= btrace_insn_number (&it
);
709 dinsn
.addr
= insn
->pc
;
711 if ((insn
->flags
& BTRACE_INSN_FLAG_SPECULATIVE
) != 0)
712 dinsn
.is_speculative
= 1;
714 disasm
.pretty_print_insn (uiout
, &dinsn
, flags
);
718 do_cleanups (cleanups
);
721 /* The to_insn_history method of target record-btrace. */
724 record_btrace_insn_history (struct target_ops
*self
, int size
,
725 gdb_disassembly_flags flags
)
727 struct btrace_thread_info
*btinfo
;
728 struct btrace_insn_history
*history
;
729 struct btrace_insn_iterator begin
, end
;
730 struct ui_out
*uiout
;
731 unsigned int context
, covered
;
733 uiout
= current_uiout
;
734 ui_out_emit_tuple
tuple_emitter (uiout
, "insn history");
735 context
= abs (size
);
737 error (_("Bad record instruction-history-size."));
739 btinfo
= require_btrace ();
740 history
= btinfo
->insn_history
;
743 struct btrace_insn_iterator
*replay
;
745 DEBUG ("insn-history (0x%x): %d", (unsigned) flags
, size
);
747 /* If we're replaying, we start at the replay position. Otherwise, we
748 start at the tail of the trace. */
749 replay
= btinfo
->replay
;
753 btrace_insn_end (&begin
, btinfo
);
755 /* We start from here and expand in the requested direction. Then we
756 expand in the other direction, as well, to fill up any remaining
761 /* We want the current position covered, as well. */
762 covered
= btrace_insn_next (&end
, 1);
763 covered
+= btrace_insn_prev (&begin
, context
- covered
);
764 covered
+= btrace_insn_next (&end
, context
- covered
);
768 covered
= btrace_insn_next (&end
, context
);
769 covered
+= btrace_insn_prev (&begin
, context
- covered
);
774 begin
= history
->begin
;
777 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags
, size
,
778 btrace_insn_number (&begin
), btrace_insn_number (&end
));
783 covered
= btrace_insn_prev (&begin
, context
);
788 covered
= btrace_insn_next (&end
, context
);
793 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
797 printf_unfiltered (_("At the start of the branch trace record.\n"));
799 printf_unfiltered (_("At the end of the branch trace record.\n"));
802 btrace_set_insn_history (btinfo
, &begin
, &end
);
805 /* The to_insn_history_range method of target record-btrace. */
808 record_btrace_insn_history_range (struct target_ops
*self
,
809 ULONGEST from
, ULONGEST to
,
810 gdb_disassembly_flags flags
)
812 struct btrace_thread_info
*btinfo
;
813 struct btrace_insn_iterator begin
, end
;
814 struct ui_out
*uiout
;
815 unsigned int low
, high
;
818 uiout
= current_uiout
;
819 ui_out_emit_tuple
tuple_emitter (uiout
, "insn history");
823 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags
, low
, high
);
825 /* Check for wrap-arounds. */
826 if (low
!= from
|| high
!= to
)
827 error (_("Bad range."));
830 error (_("Bad range."));
832 btinfo
= require_btrace ();
834 found
= btrace_find_insn_by_number (&begin
, btinfo
, low
);
836 error (_("Range out of bounds."));
838 found
= btrace_find_insn_by_number (&end
, btinfo
, high
);
841 /* Silently truncate the range. */
842 btrace_insn_end (&end
, btinfo
);
846 /* We want both begin and end to be inclusive. */
847 btrace_insn_next (&end
, 1);
850 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
851 btrace_set_insn_history (btinfo
, &begin
, &end
);
854 /* The to_insn_history_from method of target record-btrace. */
857 record_btrace_insn_history_from (struct target_ops
*self
,
858 ULONGEST from
, int size
,
859 gdb_disassembly_flags flags
)
861 ULONGEST begin
, end
, context
;
863 context
= abs (size
);
865 error (_("Bad record instruction-history-size."));
874 begin
= from
- context
+ 1;
879 end
= from
+ context
- 1;
881 /* Check for wrap-around. */
886 record_btrace_insn_history_range (self
, begin
, end
, flags
);
889 /* Print the instruction number range for a function call history line. */
892 btrace_call_history_insn_range (struct ui_out
*uiout
,
893 const struct btrace_function
*bfun
)
895 unsigned int begin
, end
, size
;
897 size
= bfun
->insn
.size ();
898 gdb_assert (size
> 0);
900 begin
= bfun
->insn_offset
;
901 end
= begin
+ size
- 1;
903 ui_out_field_uint (uiout
, "insn begin", begin
);
905 ui_out_field_uint (uiout
, "insn end", end
);
908 /* Compute the lowest and highest source line for the instructions in BFUN
909 and return them in PBEGIN and PEND.
910 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
911 result from inlining or macro expansion. */
914 btrace_compute_src_line_range (const struct btrace_function
*bfun
,
915 int *pbegin
, int *pend
)
917 struct symtab
*symtab
;
928 symtab
= symbol_symtab (sym
);
930 for (const btrace_insn
&insn
: bfun
->insn
)
932 struct symtab_and_line sal
;
934 sal
= find_pc_line (insn
.pc
, 0);
935 if (sal
.symtab
!= symtab
|| sal
.line
== 0)
938 begin
= std::min (begin
, sal
.line
);
939 end
= std::max (end
, sal
.line
);
947 /* Print the source line information for a function call history line. */
950 btrace_call_history_src_line (struct ui_out
*uiout
,
951 const struct btrace_function
*bfun
)
960 uiout
->field_string ("file",
961 symtab_to_filename_for_display (symbol_symtab (sym
)));
963 btrace_compute_src_line_range (bfun
, &begin
, &end
);
968 uiout
->field_int ("min line", begin
);
974 uiout
->field_int ("max line", end
);
977 /* Get the name of a branch trace function. */
980 btrace_get_bfun_name (const struct btrace_function
*bfun
)
982 struct minimal_symbol
*msym
;
992 return SYMBOL_PRINT_NAME (sym
);
993 else if (msym
!= NULL
)
994 return MSYMBOL_PRINT_NAME (msym
);
999 /* Disassemble a section of the recorded function trace. */
1002 btrace_call_history (struct ui_out
*uiout
,
1003 const struct btrace_thread_info
*btinfo
,
1004 const struct btrace_call_iterator
*begin
,
1005 const struct btrace_call_iterator
*end
,
1008 struct btrace_call_iterator it
;
1009 record_print_flags flags
= (enum record_print_flag
) int_flags
;
1011 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags
, btrace_call_number (begin
),
1012 btrace_call_number (end
));
1014 for (it
= *begin
; btrace_call_cmp (&it
, end
) < 0; btrace_call_next (&it
, 1))
1016 const struct btrace_function
*bfun
;
1017 struct minimal_symbol
*msym
;
1020 bfun
= btrace_call_get (&it
);
1024 /* Print the function index. */
1025 ui_out_field_uint (uiout
, "index", bfun
->number
);
1028 /* Indicate gaps in the trace. */
1029 if (bfun
->errcode
!= 0)
1031 const struct btrace_config
*conf
;
1033 conf
= btrace_conf (btinfo
);
1035 /* We have trace so we must have a configuration. */
1036 gdb_assert (conf
!= NULL
);
1038 btrace_ui_out_decode_error (uiout
, bfun
->errcode
, conf
->format
);
1043 if ((flags
& RECORD_PRINT_INDENT_CALLS
) != 0)
1045 int level
= bfun
->level
+ btinfo
->level
, i
;
1047 for (i
= 0; i
< level
; ++i
)
1052 uiout
->field_string ("function", SYMBOL_PRINT_NAME (sym
));
1053 else if (msym
!= NULL
)
1054 uiout
->field_string ("function", MSYMBOL_PRINT_NAME (msym
));
1055 else if (!uiout
->is_mi_like_p ())
1056 uiout
->field_string ("function", "??");
1058 if ((flags
& RECORD_PRINT_INSN_RANGE
) != 0)
1060 uiout
->text (_("\tinst "));
1061 btrace_call_history_insn_range (uiout
, bfun
);
1064 if ((flags
& RECORD_PRINT_SRC_LINE
) != 0)
1066 uiout
->text (_("\tat "));
1067 btrace_call_history_src_line (uiout
, bfun
);
1074 /* The to_call_history method of target record-btrace. */
1077 record_btrace_call_history (struct target_ops
*self
, int size
, int int_flags
)
1079 struct btrace_thread_info
*btinfo
;
1080 struct btrace_call_history
*history
;
1081 struct btrace_call_iterator begin
, end
;
1082 struct ui_out
*uiout
;
1083 unsigned int context
, covered
;
1084 record_print_flags flags
= (enum record_print_flag
) int_flags
;
1086 uiout
= current_uiout
;
1087 ui_out_emit_tuple
tuple_emitter (uiout
, "insn history");
1088 context
= abs (size
);
1090 error (_("Bad record function-call-history-size."));
1092 btinfo
= require_btrace ();
1093 history
= btinfo
->call_history
;
1094 if (history
== NULL
)
1096 struct btrace_insn_iterator
*replay
;
1098 DEBUG ("call-history (0x%x): %d", int_flags
, size
);
1100 /* If we're replaying, we start at the replay position. Otherwise, we
1101 start at the tail of the trace. */
1102 replay
= btinfo
->replay
;
1105 begin
.btinfo
= btinfo
;
1106 begin
.index
= replay
->call_index
;
1109 btrace_call_end (&begin
, btinfo
);
1111 /* We start from here and expand in the requested direction. Then we
1112 expand in the other direction, as well, to fill up any remaining
1117 /* We want the current position covered, as well. */
1118 covered
= btrace_call_next (&end
, 1);
1119 covered
+= btrace_call_prev (&begin
, context
- covered
);
1120 covered
+= btrace_call_next (&end
, context
- covered
);
1124 covered
= btrace_call_next (&end
, context
);
1125 covered
+= btrace_call_prev (&begin
, context
- covered
);
1130 begin
= history
->begin
;
1133 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags
, size
,
1134 btrace_call_number (&begin
), btrace_call_number (&end
));
1139 covered
= btrace_call_prev (&begin
, context
);
1144 covered
= btrace_call_next (&end
, context
);
1149 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
1153 printf_unfiltered (_("At the start of the branch trace record.\n"));
1155 printf_unfiltered (_("At the end of the branch trace record.\n"));
1158 btrace_set_call_history (btinfo
, &begin
, &end
);
1161 /* The to_call_history_range method of target record-btrace. */
1164 record_btrace_call_history_range (struct target_ops
*self
,
1165 ULONGEST from
, ULONGEST to
,
1168 struct btrace_thread_info
*btinfo
;
1169 struct btrace_call_iterator begin
, end
;
1170 struct ui_out
*uiout
;
1171 unsigned int low
, high
;
1173 record_print_flags flags
= (enum record_print_flag
) int_flags
;
1175 uiout
= current_uiout
;
1176 ui_out_emit_tuple
tuple_emitter (uiout
, "func history");
1180 DEBUG ("call-history (0x%x): [%u; %u)", int_flags
, low
, high
);
1182 /* Check for wrap-arounds. */
1183 if (low
!= from
|| high
!= to
)
1184 error (_("Bad range."));
1187 error (_("Bad range."));
1189 btinfo
= require_btrace ();
1191 found
= btrace_find_call_by_number (&begin
, btinfo
, low
);
1193 error (_("Range out of bounds."));
1195 found
= btrace_find_call_by_number (&end
, btinfo
, high
);
1198 /* Silently truncate the range. */
1199 btrace_call_end (&end
, btinfo
);
1203 /* We want both begin and end to be inclusive. */
1204 btrace_call_next (&end
, 1);
1207 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
1208 btrace_set_call_history (btinfo
, &begin
, &end
);
1211 /* The to_call_history_from method of target record-btrace. */
1214 record_btrace_call_history_from (struct target_ops
*self
,
1215 ULONGEST from
, int size
,
1218 ULONGEST begin
, end
, context
;
1219 record_print_flags flags
= (enum record_print_flag
) int_flags
;
1221 context
= abs (size
);
1223 error (_("Bad record function-call-history-size."));
1232 begin
= from
- context
+ 1;
1237 end
= from
+ context
- 1;
1239 /* Check for wrap-around. */
1244 record_btrace_call_history_range (self
, begin
, end
, flags
);
1247 /* The to_record_method method of target record-btrace. */
1249 static enum record_method
1250 record_btrace_record_method (struct target_ops
*self
, ptid_t ptid
)
1252 struct thread_info
* const tp
= find_thread_ptid (ptid
);
1255 error (_("No thread."));
1257 if (tp
->btrace
.target
== NULL
)
1258 return RECORD_METHOD_NONE
;
1260 return RECORD_METHOD_BTRACE
;
1263 /* The to_record_is_replaying method of target record-btrace. */
1266 record_btrace_is_replaying (struct target_ops
*self
, ptid_t ptid
)
1268 struct thread_info
*tp
;
1270 ALL_NON_EXITED_THREADS (tp
)
1271 if (ptid_match (tp
->ptid
, ptid
) && btrace_is_replaying (tp
))
1277 /* The to_record_will_replay method of target record-btrace. */
1280 record_btrace_will_replay (struct target_ops
*self
, ptid_t ptid
, int dir
)
1282 return dir
== EXEC_REVERSE
|| record_btrace_is_replaying (self
, ptid
);
1285 /* The to_xfer_partial method of target record-btrace. */
1287 static enum target_xfer_status
1288 record_btrace_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1289 const char *annex
, gdb_byte
*readbuf
,
1290 const gdb_byte
*writebuf
, ULONGEST offset
,
1291 ULONGEST len
, ULONGEST
*xfered_len
)
1293 /* Filter out requests that don't make sense during replay. */
1294 if (replay_memory_access
== replay_memory_access_read_only
1295 && !record_btrace_generating_corefile
1296 && record_btrace_is_replaying (ops
, inferior_ptid
))
1300 case TARGET_OBJECT_MEMORY
:
1302 struct target_section
*section
;
1304 /* We do not allow writing memory in general. */
1305 if (writebuf
!= NULL
)
1308 return TARGET_XFER_UNAVAILABLE
;
1311 /* We allow reading readonly memory. */
1312 section
= target_section_by_addr (ops
, offset
);
1313 if (section
!= NULL
)
1315 /* Check if the section we found is readonly. */
1316 if ((bfd_get_section_flags (section
->the_bfd_section
->owner
,
1317 section
->the_bfd_section
)
1318 & SEC_READONLY
) != 0)
1320 /* Truncate the request to fit into this section. */
1321 len
= std::min (len
, section
->endaddr
- offset
);
1327 return TARGET_XFER_UNAVAILABLE
;
1332 /* Forward the request. */
1334 return ops
->to_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
1335 offset
, len
, xfered_len
);
1338 /* The to_insert_breakpoint method of target record-btrace. */
1341 record_btrace_insert_breakpoint (struct target_ops
*ops
,
1342 struct gdbarch
*gdbarch
,
1343 struct bp_target_info
*bp_tgt
)
1348 /* Inserting breakpoints requires accessing memory. Allow it for the
1349 duration of this function. */
1350 old
= replay_memory_access
;
1351 replay_memory_access
= replay_memory_access_read_write
;
1356 ret
= ops
->beneath
->to_insert_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
);
1358 CATCH (except
, RETURN_MASK_ALL
)
1360 replay_memory_access
= old
;
1361 throw_exception (except
);
1364 replay_memory_access
= old
;
1369 /* The to_remove_breakpoint method of target record-btrace. */
1372 record_btrace_remove_breakpoint (struct target_ops
*ops
,
1373 struct gdbarch
*gdbarch
,
1374 struct bp_target_info
*bp_tgt
,
1375 enum remove_bp_reason reason
)
1380 /* Removing breakpoints requires accessing memory. Allow it for the
1381 duration of this function. */
1382 old
= replay_memory_access
;
1383 replay_memory_access
= replay_memory_access_read_write
;
1388 ret
= ops
->beneath
->to_remove_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
,
1391 CATCH (except
, RETURN_MASK_ALL
)
1393 replay_memory_access
= old
;
1394 throw_exception (except
);
1397 replay_memory_access
= old
;
1402 /* The to_fetch_registers method of target record-btrace. */
1405 record_btrace_fetch_registers (struct target_ops
*ops
,
1406 struct regcache
*regcache
, int regno
)
1408 struct btrace_insn_iterator
*replay
;
1409 struct thread_info
*tp
;
1411 tp
= find_thread_ptid (regcache_get_ptid (regcache
));
1412 gdb_assert (tp
!= NULL
);
1414 replay
= tp
->btrace
.replay
;
1415 if (replay
!= NULL
&& !record_btrace_generating_corefile
)
1417 const struct btrace_insn
*insn
;
1418 struct gdbarch
*gdbarch
;
1421 gdbarch
= regcache
->arch ();
1422 pcreg
= gdbarch_pc_regnum (gdbarch
);
1426 /* We can only provide the PC register. */
1427 if (regno
>= 0 && regno
!= pcreg
)
1430 insn
= btrace_insn_get (replay
);
1431 gdb_assert (insn
!= NULL
);
1433 regcache_raw_supply (regcache
, regno
, &insn
->pc
);
1437 struct target_ops
*t
= ops
->beneath
;
1439 t
->to_fetch_registers (t
, regcache
, regno
);
1443 /* The to_store_registers method of target record-btrace. */
1446 record_btrace_store_registers (struct target_ops
*ops
,
1447 struct regcache
*regcache
, int regno
)
1449 struct target_ops
*t
;
1451 if (!record_btrace_generating_corefile
1452 && record_btrace_is_replaying (ops
, regcache_get_ptid (regcache
)))
1453 error (_("Cannot write registers while replaying."));
1455 gdb_assert (may_write_registers
!= 0);
1458 t
->to_store_registers (t
, regcache
, regno
);
1461 /* The to_prepare_to_store method of target record-btrace. */
1464 record_btrace_prepare_to_store (struct target_ops
*ops
,
1465 struct regcache
*regcache
)
1467 struct target_ops
*t
;
1469 if (!record_btrace_generating_corefile
1470 && record_btrace_is_replaying (ops
, regcache_get_ptid (regcache
)))
1474 t
->to_prepare_to_store (t
, regcache
);
1477 /* The branch trace frame cache. */
1479 struct btrace_frame_cache
1482 struct thread_info
*tp
;
1484 /* The frame info. */
1485 struct frame_info
*frame
;
1487 /* The branch trace function segment. */
1488 const struct btrace_function
*bfun
;
1491 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1493 static htab_t bfcache
;
1495 /* hash_f for htab_create_alloc of bfcache. */
1498 bfcache_hash (const void *arg
)
1500 const struct btrace_frame_cache
*cache
1501 = (const struct btrace_frame_cache
*) arg
;
1503 return htab_hash_pointer (cache
->frame
);
1506 /* eq_f for htab_create_alloc of bfcache. */
1509 bfcache_eq (const void *arg1
, const void *arg2
)
1511 const struct btrace_frame_cache
*cache1
1512 = (const struct btrace_frame_cache
*) arg1
;
1513 const struct btrace_frame_cache
*cache2
1514 = (const struct btrace_frame_cache
*) arg2
;
1516 return cache1
->frame
== cache2
->frame
;
1519 /* Create a new btrace frame cache. */
1521 static struct btrace_frame_cache
*
1522 bfcache_new (struct frame_info
*frame
)
1524 struct btrace_frame_cache
*cache
;
1527 cache
= FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache
);
1528 cache
->frame
= frame
;
1530 slot
= htab_find_slot (bfcache
, cache
, INSERT
);
1531 gdb_assert (*slot
== NULL
);
1537 /* Extract the branch trace function from a branch trace frame. */
1539 static const struct btrace_function
*
1540 btrace_get_frame_function (struct frame_info
*frame
)
1542 const struct btrace_frame_cache
*cache
;
1543 struct btrace_frame_cache pattern
;
1546 pattern
.frame
= frame
;
1548 slot
= htab_find_slot (bfcache
, &pattern
, NO_INSERT
);
1552 cache
= (const struct btrace_frame_cache
*) *slot
;
1556 /* Implement stop_reason method for record_btrace_frame_unwind. */
1558 static enum unwind_stop_reason
1559 record_btrace_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1562 const struct btrace_frame_cache
*cache
;
1563 const struct btrace_function
*bfun
;
1565 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1567 gdb_assert (bfun
!= NULL
);
1570 return UNWIND_UNAVAILABLE
;
1572 return UNWIND_NO_REASON
;
1575 /* Implement this_id method for record_btrace_frame_unwind. */
1578 record_btrace_frame_this_id (struct frame_info
*this_frame
, void **this_cache
,
1579 struct frame_id
*this_id
)
1581 const struct btrace_frame_cache
*cache
;
1582 const struct btrace_function
*bfun
;
1583 struct btrace_call_iterator it
;
1584 CORE_ADDR code
, special
;
1586 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1589 gdb_assert (bfun
!= NULL
);
1591 while (btrace_find_call_by_number (&it
, &cache
->tp
->btrace
, bfun
->prev
) != 0)
1592 bfun
= btrace_call_get (&it
);
1594 code
= get_frame_func (this_frame
);
1595 special
= bfun
->number
;
1597 *this_id
= frame_id_build_unavailable_stack_special (code
, special
);
1599 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1600 btrace_get_bfun_name (cache
->bfun
),
1601 core_addr_to_string_nz (this_id
->code_addr
),
1602 core_addr_to_string_nz (this_id
->special_addr
));
1605 /* Implement prev_register method for record_btrace_frame_unwind. */
1607 static struct value
*
1608 record_btrace_frame_prev_register (struct frame_info
*this_frame
,
1612 const struct btrace_frame_cache
*cache
;
1613 const struct btrace_function
*bfun
, *caller
;
1614 struct btrace_call_iterator it
;
1615 struct gdbarch
*gdbarch
;
1619 gdbarch
= get_frame_arch (this_frame
);
1620 pcreg
= gdbarch_pc_regnum (gdbarch
);
1621 if (pcreg
< 0 || regnum
!= pcreg
)
1622 throw_error (NOT_AVAILABLE_ERROR
,
1623 _("Registers are not available in btrace record history"));
1625 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1627 gdb_assert (bfun
!= NULL
);
1629 if (btrace_find_call_by_number (&it
, &cache
->tp
->btrace
, bfun
->up
) == 0)
1630 throw_error (NOT_AVAILABLE_ERROR
,
1631 _("No caller in btrace record history"));
1633 caller
= btrace_call_get (&it
);
1635 if ((bfun
->flags
& BFUN_UP_LINKS_TO_RET
) != 0)
1636 pc
= caller
->insn
.front ().pc
;
1639 pc
= caller
->insn
.back ().pc
;
1640 pc
+= gdb_insn_length (gdbarch
, pc
);
1643 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1644 btrace_get_bfun_name (bfun
), bfun
->level
,
1645 core_addr_to_string_nz (pc
));
1647 return frame_unwind_got_address (this_frame
, regnum
, pc
);
1650 /* Implement sniffer method for record_btrace_frame_unwind. */
1653 record_btrace_frame_sniffer (const struct frame_unwind
*self
,
1654 struct frame_info
*this_frame
,
1657 const struct btrace_function
*bfun
;
1658 struct btrace_frame_cache
*cache
;
1659 struct thread_info
*tp
;
1660 struct frame_info
*next
;
1662 /* THIS_FRAME does not contain a reference to its thread. */
1663 tp
= find_thread_ptid (inferior_ptid
);
1664 gdb_assert (tp
!= NULL
);
1667 next
= get_next_frame (this_frame
);
1670 const struct btrace_insn_iterator
*replay
;
1672 replay
= tp
->btrace
.replay
;
1674 bfun
= &replay
->btinfo
->functions
[replay
->call_index
];
1678 const struct btrace_function
*callee
;
1679 struct btrace_call_iterator it
;
1681 callee
= btrace_get_frame_function (next
);
1682 if (callee
== NULL
|| (callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) != 0)
1685 if (btrace_find_call_by_number (&it
, &tp
->btrace
, callee
->up
) == 0)
1688 bfun
= btrace_call_get (&it
);
1694 DEBUG ("[frame] sniffed frame for %s on level %d",
1695 btrace_get_bfun_name (bfun
), bfun
->level
);
1697 /* This is our frame. Initialize the frame cache. */
1698 cache
= bfcache_new (this_frame
);
1702 *this_cache
= cache
;
1706 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1709 record_btrace_tailcall_frame_sniffer (const struct frame_unwind
*self
,
1710 struct frame_info
*this_frame
,
1713 const struct btrace_function
*bfun
, *callee
;
1714 struct btrace_frame_cache
*cache
;
1715 struct btrace_call_iterator it
;
1716 struct frame_info
*next
;
1717 struct thread_info
*tinfo
;
1719 next
= get_next_frame (this_frame
);
1723 callee
= btrace_get_frame_function (next
);
1727 if ((callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1730 tinfo
= find_thread_ptid (inferior_ptid
);
1731 if (btrace_find_call_by_number (&it
, &tinfo
->btrace
, callee
->up
) == 0)
1734 bfun
= btrace_call_get (&it
);
1736 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1737 btrace_get_bfun_name (bfun
), bfun
->level
);
1739 /* This is our frame. Initialize the frame cache. */
1740 cache
= bfcache_new (this_frame
);
1744 *this_cache
= cache
;
1749 record_btrace_frame_dealloc_cache (struct frame_info
*self
, void *this_cache
)
1751 struct btrace_frame_cache
*cache
;
1754 cache
= (struct btrace_frame_cache
*) this_cache
;
1756 slot
= htab_find_slot (bfcache
, cache
, NO_INSERT
);
1757 gdb_assert (slot
!= NULL
);
1759 htab_remove_elt (bfcache
, cache
);
1762 /* btrace recording does not store previous memory content, neither the stack
1763 frames content. Any unwinding would return errorneous results as the stack
1764 contents no longer matches the changed PC value restored from history.
1765 Therefore this unwinder reports any possibly unwound registers as
1768 const struct frame_unwind record_btrace_frame_unwind
=
1771 record_btrace_frame_unwind_stop_reason
,
1772 record_btrace_frame_this_id
,
1773 record_btrace_frame_prev_register
,
1775 record_btrace_frame_sniffer
,
1776 record_btrace_frame_dealloc_cache
1779 const struct frame_unwind record_btrace_tailcall_frame_unwind
=
1782 record_btrace_frame_unwind_stop_reason
,
1783 record_btrace_frame_this_id
,
1784 record_btrace_frame_prev_register
,
1786 record_btrace_tailcall_frame_sniffer
,
1787 record_btrace_frame_dealloc_cache
1790 /* Implement the to_get_unwinder method. */
1792 static const struct frame_unwind
*
1793 record_btrace_to_get_unwinder (struct target_ops
*self
)
1795 return &record_btrace_frame_unwind
;
1798 /* Implement the to_get_tailcall_unwinder method. */
1800 static const struct frame_unwind
*
1801 record_btrace_to_get_tailcall_unwinder (struct target_ops
*self
)
1803 return &record_btrace_tailcall_frame_unwind
;
1806 /* Return a human-readable string for FLAG. */
1809 btrace_thread_flag_to_str (enum btrace_thread_flag flag
)
1817 return "reverse-step";
1823 return "reverse-cont";
1832 /* Indicate that TP should be resumed according to FLAG. */
1835 record_btrace_resume_thread (struct thread_info
*tp
,
1836 enum btrace_thread_flag flag
)
1838 struct btrace_thread_info
*btinfo
;
1840 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp
),
1841 target_pid_to_str (tp
->ptid
), flag
, btrace_thread_flag_to_str (flag
));
1843 btinfo
= &tp
->btrace
;
1845 /* Fetch the latest branch trace. */
1848 /* A resume request overwrites a preceding resume or stop request. */
1849 btinfo
->flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
1850 btinfo
->flags
|= flag
;
1853 /* Get the current frame for TP. */
1855 static struct frame_info
*
1856 get_thread_current_frame (struct thread_info
*tp
)
1858 struct frame_info
*frame
;
1859 ptid_t old_inferior_ptid
;
1862 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1863 old_inferior_ptid
= inferior_ptid
;
1864 inferior_ptid
= tp
->ptid
;
1866 /* Clear the executing flag to allow changes to the current frame.
1867 We are not actually running, yet. We just started a reverse execution
1868 command or a record goto command.
1869 For the latter, EXECUTING is false and this has no effect.
1870 For the former, EXECUTING is true and we're in to_wait, about to
1871 move the thread. Since we need to recompute the stack, we temporarily
1872 set EXECUTING to flase. */
1873 executing
= is_executing (inferior_ptid
);
1874 set_executing (inferior_ptid
, 0);
1879 frame
= get_current_frame ();
1881 CATCH (except
, RETURN_MASK_ALL
)
1883 /* Restore the previous execution state. */
1884 set_executing (inferior_ptid
, executing
);
1886 /* Restore the previous inferior_ptid. */
1887 inferior_ptid
= old_inferior_ptid
;
1889 throw_exception (except
);
1893 /* Restore the previous execution state. */
1894 set_executing (inferior_ptid
, executing
);
1896 /* Restore the previous inferior_ptid. */
1897 inferior_ptid
= old_inferior_ptid
;
1902 /* Start replaying a thread. */
1904 static struct btrace_insn_iterator
*
1905 record_btrace_start_replaying (struct thread_info
*tp
)
1907 struct btrace_insn_iterator
*replay
;
1908 struct btrace_thread_info
*btinfo
;
1910 btinfo
= &tp
->btrace
;
1913 /* We can't start replaying without trace. */
1914 if (btinfo
->functions
.empty ())
1917 /* GDB stores the current frame_id when stepping in order to detects steps
1919 Since frames are computed differently when we're replaying, we need to
1920 recompute those stored frames and fix them up so we can still detect
1921 subroutines after we started replaying. */
1924 struct frame_info
*frame
;
1925 struct frame_id frame_id
;
1926 int upd_step_frame_id
, upd_step_stack_frame_id
;
1928 /* The current frame without replaying - computed via normal unwind. */
1929 frame
= get_thread_current_frame (tp
);
1930 frame_id
= get_frame_id (frame
);
1932 /* Check if we need to update any stepping-related frame id's. */
1933 upd_step_frame_id
= frame_id_eq (frame_id
,
1934 tp
->control
.step_frame_id
);
1935 upd_step_stack_frame_id
= frame_id_eq (frame_id
,
1936 tp
->control
.step_stack_frame_id
);
1938 /* We start replaying at the end of the branch trace. This corresponds
1939 to the current instruction. */
1940 replay
= XNEW (struct btrace_insn_iterator
);
1941 btrace_insn_end (replay
, btinfo
);
1943 /* Skip gaps at the end of the trace. */
1944 while (btrace_insn_get (replay
) == NULL
)
1948 steps
= btrace_insn_prev (replay
, 1);
1950 error (_("No trace."));
1953 /* We're not replaying, yet. */
1954 gdb_assert (btinfo
->replay
== NULL
);
1955 btinfo
->replay
= replay
;
1957 /* Make sure we're not using any stale registers. */
1958 registers_changed_ptid (tp
->ptid
);
1960 /* The current frame with replaying - computed via btrace unwind. */
1961 frame
= get_thread_current_frame (tp
);
1962 frame_id
= get_frame_id (frame
);
1964 /* Replace stepping related frames where necessary. */
1965 if (upd_step_frame_id
)
1966 tp
->control
.step_frame_id
= frame_id
;
1967 if (upd_step_stack_frame_id
)
1968 tp
->control
.step_stack_frame_id
= frame_id
;
1970 CATCH (except
, RETURN_MASK_ALL
)
1972 xfree (btinfo
->replay
);
1973 btinfo
->replay
= NULL
;
1975 registers_changed_ptid (tp
->ptid
);
1977 throw_exception (except
);
1984 /* Stop replaying a thread. */
1987 record_btrace_stop_replaying (struct thread_info
*tp
)
1989 struct btrace_thread_info
*btinfo
;
1991 btinfo
= &tp
->btrace
;
1993 xfree (btinfo
->replay
);
1994 btinfo
->replay
= NULL
;
1996 /* Make sure we're not leaving any stale registers. */
1997 registers_changed_ptid (tp
->ptid
);
2000 /* Stop replaying TP if it is at the end of its execution history. */
2003 record_btrace_stop_replaying_at_end (struct thread_info
*tp
)
2005 struct btrace_insn_iterator
*replay
, end
;
2006 struct btrace_thread_info
*btinfo
;
2008 btinfo
= &tp
->btrace
;
2009 replay
= btinfo
->replay
;
2014 btrace_insn_end (&end
, btinfo
);
2016 if (btrace_insn_cmp (replay
, &end
) == 0)
2017 record_btrace_stop_replaying (tp
);
2020 /* The to_resume method of target record-btrace. */
2023 record_btrace_resume (struct target_ops
*ops
, ptid_t ptid
, int step
,
2024 enum gdb_signal signal
)
2026 struct thread_info
*tp
;
2027 enum btrace_thread_flag flag
, cflag
;
2029 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid
),
2030 execution_direction
== EXEC_REVERSE
? "reverse-" : "",
2031 step
? "step" : "cont");
2033 /* Store the execution direction of the last resume.
2035 If there is more than one to_resume call, we have to rely on infrun
2036 to not change the execution direction in-between. */
2037 record_btrace_resume_exec_dir
= execution_direction
;
2039 /* As long as we're not replaying, just forward the request.
2041 For non-stop targets this means that no thread is replaying. In order to
2042 make progress, we may need to explicitly move replaying threads to the end
2043 of their execution history. */
2044 if ((execution_direction
!= EXEC_REVERSE
)
2045 && !record_btrace_is_replaying (ops
, minus_one_ptid
))
2048 ops
->to_resume (ops
, ptid
, step
, signal
);
2052 /* Compute the btrace thread flag for the requested move. */
2053 if (execution_direction
== EXEC_REVERSE
)
2055 flag
= step
== 0 ? BTHR_RCONT
: BTHR_RSTEP
;
2060 flag
= step
== 0 ? BTHR_CONT
: BTHR_STEP
;
2064 /* We just indicate the resume intent here. The actual stepping happens in
2065 record_btrace_wait below.
2067 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2068 if (!target_is_non_stop_p ())
2070 gdb_assert (ptid_match (inferior_ptid
, ptid
));
2072 ALL_NON_EXITED_THREADS (tp
)
2073 if (ptid_match (tp
->ptid
, ptid
))
2075 if (ptid_match (tp
->ptid
, inferior_ptid
))
2076 record_btrace_resume_thread (tp
, flag
);
2078 record_btrace_resume_thread (tp
, cflag
);
2083 ALL_NON_EXITED_THREADS (tp
)
2084 if (ptid_match (tp
->ptid
, ptid
))
2085 record_btrace_resume_thread (tp
, flag
);
2088 /* Async support. */
2089 if (target_can_async_p ())
2092 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
2096 /* The to_commit_resume method of target record-btrace. */
2099 record_btrace_commit_resume (struct target_ops
*ops
)
2101 if ((execution_direction
!= EXEC_REVERSE
)
2102 && !record_btrace_is_replaying (ops
, minus_one_ptid
))
2103 ops
->beneath
->to_commit_resume (ops
->beneath
);
2106 /* Cancel resuming TP. */
2109 record_btrace_cancel_resume (struct thread_info
*tp
)
2111 enum btrace_thread_flag flags
;
2113 flags
= tp
->btrace
.flags
& (BTHR_MOVE
| BTHR_STOP
);
2117 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2118 print_thread_id (tp
),
2119 target_pid_to_str (tp
->ptid
), flags
,
2120 btrace_thread_flag_to_str (flags
));
2122 tp
->btrace
.flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
2123 record_btrace_stop_replaying_at_end (tp
);
2126 /* Return a target_waitstatus indicating that we ran out of history. */
2128 static struct target_waitstatus
2129 btrace_step_no_history (void)
2131 struct target_waitstatus status
;
2133 status
.kind
= TARGET_WAITKIND_NO_HISTORY
;
2138 /* Return a target_waitstatus indicating that a step finished. */
2140 static struct target_waitstatus
2141 btrace_step_stopped (void)
2143 struct target_waitstatus status
;
2145 status
.kind
= TARGET_WAITKIND_STOPPED
;
2146 status
.value
.sig
= GDB_SIGNAL_TRAP
;
2151 /* Return a target_waitstatus indicating that a thread was stopped as
2154 static struct target_waitstatus
2155 btrace_step_stopped_on_request (void)
2157 struct target_waitstatus status
;
2159 status
.kind
= TARGET_WAITKIND_STOPPED
;
2160 status
.value
.sig
= GDB_SIGNAL_0
;
2165 /* Return a target_waitstatus indicating a spurious stop. */
2167 static struct target_waitstatus
2168 btrace_step_spurious (void)
2170 struct target_waitstatus status
;
2172 status
.kind
= TARGET_WAITKIND_SPURIOUS
;
2177 /* Return a target_waitstatus indicating that the thread was not resumed. */
2179 static struct target_waitstatus
2180 btrace_step_no_resumed (void)
2182 struct target_waitstatus status
;
2184 status
.kind
= TARGET_WAITKIND_NO_RESUMED
;
2189 /* Return a target_waitstatus indicating that we should wait again. */
2191 static struct target_waitstatus
2192 btrace_step_again (void)
2194 struct target_waitstatus status
;
2196 status
.kind
= TARGET_WAITKIND_IGNORE
;
2201 /* Clear the record histories. */
2204 record_btrace_clear_histories (struct btrace_thread_info
*btinfo
)
2206 xfree (btinfo
->insn_history
);
2207 xfree (btinfo
->call_history
);
2209 btinfo
->insn_history
= NULL
;
2210 btinfo
->call_history
= NULL
;
2213 /* Check whether TP's current replay position is at a breakpoint. */
2216 record_btrace_replay_at_breakpoint (struct thread_info
*tp
)
2218 struct btrace_insn_iterator
*replay
;
2219 struct btrace_thread_info
*btinfo
;
2220 const struct btrace_insn
*insn
;
2221 struct inferior
*inf
;
2223 btinfo
= &tp
->btrace
;
2224 replay
= btinfo
->replay
;
2229 insn
= btrace_insn_get (replay
);
2233 inf
= find_inferior_ptid (tp
->ptid
);
2237 return record_check_stopped_by_breakpoint (inf
->aspace
, insn
->pc
,
2238 &btinfo
->stop_reason
);
2241 /* Step one instruction in forward direction. */
2243 static struct target_waitstatus
2244 record_btrace_single_step_forward (struct thread_info
*tp
)
2246 struct btrace_insn_iterator
*replay
, end
, start
;
2247 struct btrace_thread_info
*btinfo
;
2249 btinfo
= &tp
->btrace
;
2250 replay
= btinfo
->replay
;
2252 /* We're done if we're not replaying. */
2254 return btrace_step_no_history ();
2256 /* Check if we're stepping a breakpoint. */
2257 if (record_btrace_replay_at_breakpoint (tp
))
2258 return btrace_step_stopped ();
2260 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2261 jump back to the instruction at which we started. */
2267 /* We will bail out here if we continue stepping after reaching the end
2268 of the execution history. */
2269 steps
= btrace_insn_next (replay
, 1);
2273 return btrace_step_no_history ();
2276 while (btrace_insn_get (replay
) == NULL
);
2278 /* Determine the end of the instruction trace. */
2279 btrace_insn_end (&end
, btinfo
);
2281 /* The execution trace contains (and ends with) the current instruction.
2282 This instruction has not been executed, yet, so the trace really ends
2283 one instruction earlier. */
2284 if (btrace_insn_cmp (replay
, &end
) == 0)
2285 return btrace_step_no_history ();
2287 return btrace_step_spurious ();
2290 /* Step one instruction in backward direction. */
2292 static struct target_waitstatus
2293 record_btrace_single_step_backward (struct thread_info
*tp
)
2295 struct btrace_insn_iterator
*replay
, start
;
2296 struct btrace_thread_info
*btinfo
;
2298 btinfo
= &tp
->btrace
;
2299 replay
= btinfo
->replay
;
2301 /* Start replaying if we're not already doing so. */
2303 replay
= record_btrace_start_replaying (tp
);
2305 /* If we can't step any further, we reached the end of the history.
2306 Skip gaps during replay. If we end up at a gap (at the beginning of
2307 the trace), jump back to the instruction at which we started. */
2313 steps
= btrace_insn_prev (replay
, 1);
2317 return btrace_step_no_history ();
2320 while (btrace_insn_get (replay
) == NULL
);
2322 /* Check if we're stepping a breakpoint.
2324 For reverse-stepping, this check is after the step. There is logic in
2325 infrun.c that handles reverse-stepping separately. See, for example,
2326 proceed and adjust_pc_after_break.
2328 This code assumes that for reverse-stepping, PC points to the last
2329 de-executed instruction, whereas for forward-stepping PC points to the
2330 next to-be-executed instruction. */
2331 if (record_btrace_replay_at_breakpoint (tp
))
2332 return btrace_step_stopped ();
2334 return btrace_step_spurious ();
2337 /* Step a single thread. */
2339 static struct target_waitstatus
2340 record_btrace_step_thread (struct thread_info
*tp
)
2342 struct btrace_thread_info
*btinfo
;
2343 struct target_waitstatus status
;
2344 enum btrace_thread_flag flags
;
2346 btinfo
= &tp
->btrace
;
2348 flags
= btinfo
->flags
& (BTHR_MOVE
| BTHR_STOP
);
2349 btinfo
->flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
2351 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp
),
2352 target_pid_to_str (tp
->ptid
), flags
,
2353 btrace_thread_flag_to_str (flags
));
2355 /* We can't step without an execution history. */
2356 if ((flags
& BTHR_MOVE
) != 0 && btrace_is_empty (tp
))
2357 return btrace_step_no_history ();
2362 internal_error (__FILE__
, __LINE__
, _("invalid stepping type."));
2365 return btrace_step_stopped_on_request ();
2368 status
= record_btrace_single_step_forward (tp
);
2369 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2372 return btrace_step_stopped ();
2375 status
= record_btrace_single_step_backward (tp
);
2376 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2379 return btrace_step_stopped ();
2382 status
= record_btrace_single_step_forward (tp
);
2383 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2386 btinfo
->flags
|= flags
;
2387 return btrace_step_again ();
2390 status
= record_btrace_single_step_backward (tp
);
2391 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2394 btinfo
->flags
|= flags
;
2395 return btrace_step_again ();
2398 /* We keep threads moving at the end of their execution history. The to_wait
2399 method will stop the thread for whom the event is reported. */
2400 if (status
.kind
== TARGET_WAITKIND_NO_HISTORY
)
2401 btinfo
->flags
|= flags
;
2406 /* A vector of threads. */
2408 typedef struct thread_info
* tp_t
;
2411 /* Announce further events if necessary. */
2414 record_btrace_maybe_mark_async_event (const VEC (tp_t
) *moving
,
2415 const VEC (tp_t
) *no_history
)
2417 int more_moving
, more_no_history
;
2419 more_moving
= !VEC_empty (tp_t
, moving
);
2420 more_no_history
= !VEC_empty (tp_t
, no_history
);
2422 if (!more_moving
&& !more_no_history
)
2426 DEBUG ("movers pending");
2428 if (more_no_history
)
2429 DEBUG ("no-history pending");
2431 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
2434 /* The to_wait method of target record-btrace. */
2437 record_btrace_wait (struct target_ops
*ops
, ptid_t ptid
,
2438 struct target_waitstatus
*status
, int options
)
2440 VEC (tp_t
) *moving
, *no_history
;
2441 struct thread_info
*tp
, *eventing
;
2442 struct cleanup
*cleanups
= make_cleanup (null_cleanup
, NULL
);
2444 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid
), options
);
2446 /* As long as we're not replaying, just forward the request. */
2447 if ((execution_direction
!= EXEC_REVERSE
)
2448 && !record_btrace_is_replaying (ops
, minus_one_ptid
))
2451 return ops
->to_wait (ops
, ptid
, status
, options
);
2457 make_cleanup (VEC_cleanup (tp_t
), &moving
);
2458 make_cleanup (VEC_cleanup (tp_t
), &no_history
);
2460 /* Keep a work list of moving threads. */
2461 ALL_NON_EXITED_THREADS (tp
)
2462 if (ptid_match (tp
->ptid
, ptid
)
2463 && ((tp
->btrace
.flags
& (BTHR_MOVE
| BTHR_STOP
)) != 0))
2464 VEC_safe_push (tp_t
, moving
, tp
);
2466 if (VEC_empty (tp_t
, moving
))
2468 *status
= btrace_step_no_resumed ();
2470 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid
),
2471 target_waitstatus_to_string (status
).c_str ());
2473 do_cleanups (cleanups
);
2477 /* Step moving threads one by one, one step each, until either one thread
2478 reports an event or we run out of threads to step.
2480 When stepping more than one thread, chances are that some threads reach
2481 the end of their execution history earlier than others. If we reported
2482 this immediately, all-stop on top of non-stop would stop all threads and
2483 resume the same threads next time. And we would report the same thread
2484 having reached the end of its execution history again.
2486 In the worst case, this would starve the other threads. But even if other
2487 threads would be allowed to make progress, this would result in far too
2488 many intermediate stops.
2490 We therefore delay the reporting of "no execution history" until we have
2491 nothing else to report. By this time, all threads should have moved to
2492 either the beginning or the end of their execution history. There will
2493 be a single user-visible stop. */
2495 while ((eventing
== NULL
) && !VEC_empty (tp_t
, moving
))
2500 while ((eventing
== NULL
) && VEC_iterate (tp_t
, moving
, ix
, tp
))
2502 *status
= record_btrace_step_thread (tp
);
2504 switch (status
->kind
)
2506 case TARGET_WAITKIND_IGNORE
:
2510 case TARGET_WAITKIND_NO_HISTORY
:
2511 VEC_safe_push (tp_t
, no_history
,
2512 VEC_ordered_remove (tp_t
, moving
, ix
));
2516 eventing
= VEC_unordered_remove (tp_t
, moving
, ix
);
2522 if (eventing
== NULL
)
2524 /* We started with at least one moving thread. This thread must have
2525 either stopped or reached the end of its execution history.
2527 In the former case, EVENTING must not be NULL.
2528 In the latter case, NO_HISTORY must not be empty. */
2529 gdb_assert (!VEC_empty (tp_t
, no_history
));
2531 /* We kept threads moving at the end of their execution history. Stop
2532 EVENTING now that we are going to report its stop. */
2533 eventing
= VEC_unordered_remove (tp_t
, no_history
, 0);
2534 eventing
->btrace
.flags
&= ~BTHR_MOVE
;
2536 *status
= btrace_step_no_history ();
2539 gdb_assert (eventing
!= NULL
);
2541 /* We kept threads replaying at the end of their execution history. Stop
2542 replaying EVENTING now that we are going to report its stop. */
2543 record_btrace_stop_replaying_at_end (eventing
);
2545 /* Stop all other threads. */
2546 if (!target_is_non_stop_p ())
2547 ALL_NON_EXITED_THREADS (tp
)
2548 record_btrace_cancel_resume (tp
);
2550 /* In async mode, we need to announce further events. */
2551 if (target_is_async_p ())
2552 record_btrace_maybe_mark_async_event (moving
, no_history
);
2554 /* Start record histories anew from the current position. */
2555 record_btrace_clear_histories (&eventing
->btrace
);
2557 /* We moved the replay position but did not update registers. */
2558 registers_changed_ptid (eventing
->ptid
);
2560 DEBUG ("wait ended by thread %s (%s): %s",
2561 print_thread_id (eventing
),
2562 target_pid_to_str (eventing
->ptid
),
2563 target_waitstatus_to_string (status
).c_str ());
2565 do_cleanups (cleanups
);
2566 return eventing
->ptid
;
2569 /* The to_stop method of target record-btrace. */
2572 record_btrace_stop (struct target_ops
*ops
, ptid_t ptid
)
2574 DEBUG ("stop %s", target_pid_to_str (ptid
));
2576 /* As long as we're not replaying, just forward the request. */
2577 if ((execution_direction
!= EXEC_REVERSE
)
2578 && !record_btrace_is_replaying (ops
, minus_one_ptid
))
2581 ops
->to_stop (ops
, ptid
);
2585 struct thread_info
*tp
;
2587 ALL_NON_EXITED_THREADS (tp
)
2588 if (ptid_match (tp
->ptid
, ptid
))
2590 tp
->btrace
.flags
&= ~BTHR_MOVE
;
2591 tp
->btrace
.flags
|= BTHR_STOP
;
2596 /* The to_can_execute_reverse method of target record-btrace. */
2599 record_btrace_can_execute_reverse (struct target_ops
*self
)
2604 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2607 record_btrace_stopped_by_sw_breakpoint (struct target_ops
*ops
)
2609 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2611 struct thread_info
*tp
= inferior_thread ();
2613 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
;
2616 return ops
->beneath
->to_stopped_by_sw_breakpoint (ops
->beneath
);
2619 /* The to_supports_stopped_by_sw_breakpoint method of target
2623 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops
*ops
)
2625 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2628 return ops
->beneath
->to_supports_stopped_by_sw_breakpoint (ops
->beneath
);
2631 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2634 record_btrace_stopped_by_hw_breakpoint (struct target_ops
*ops
)
2636 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2638 struct thread_info
*tp
= inferior_thread ();
2640 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
;
2643 return ops
->beneath
->to_stopped_by_hw_breakpoint (ops
->beneath
);
2646 /* The to_supports_stopped_by_hw_breakpoint method of target
2650 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops
*ops
)
2652 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2655 return ops
->beneath
->to_supports_stopped_by_hw_breakpoint (ops
->beneath
);
2658 /* The to_update_thread_list method of target record-btrace. */
2661 record_btrace_update_thread_list (struct target_ops
*ops
)
2663 /* We don't add or remove threads during replay. */
2664 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2667 /* Forward the request. */
2669 ops
->to_update_thread_list (ops
);
2672 /* The to_thread_alive method of target record-btrace. */
2675 record_btrace_thread_alive (struct target_ops
*ops
, ptid_t ptid
)
2677 /* We don't add or remove threads during replay. */
2678 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2679 return find_thread_ptid (ptid
) != NULL
;
2681 /* Forward the request. */
2683 return ops
->to_thread_alive (ops
, ptid
);
2686 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2690 record_btrace_set_replay (struct thread_info
*tp
,
2691 const struct btrace_insn_iterator
*it
)
2693 struct btrace_thread_info
*btinfo
;
2695 btinfo
= &tp
->btrace
;
2698 record_btrace_stop_replaying (tp
);
2701 if (btinfo
->replay
== NULL
)
2702 record_btrace_start_replaying (tp
);
2703 else if (btrace_insn_cmp (btinfo
->replay
, it
) == 0)
2706 *btinfo
->replay
= *it
;
2707 registers_changed_ptid (tp
->ptid
);
2710 /* Start anew from the new replay position. */
2711 record_btrace_clear_histories (btinfo
);
2713 stop_pc
= regcache_read_pc (get_current_regcache ());
2714 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
2717 /* The to_goto_record_begin method of target record-btrace. */
2720 record_btrace_goto_begin (struct target_ops
*self
)
2722 struct thread_info
*tp
;
2723 struct btrace_insn_iterator begin
;
2725 tp
= require_btrace_thread ();
2727 btrace_insn_begin (&begin
, &tp
->btrace
);
2729 /* Skip gaps at the beginning of the trace. */
2730 while (btrace_insn_get (&begin
) == NULL
)
2734 steps
= btrace_insn_next (&begin
, 1);
2736 error (_("No trace."));
2739 record_btrace_set_replay (tp
, &begin
);
2742 /* The to_goto_record_end method of target record-btrace. */
2745 record_btrace_goto_end (struct target_ops
*ops
)
2747 struct thread_info
*tp
;
2749 tp
= require_btrace_thread ();
2751 record_btrace_set_replay (tp
, NULL
);
2754 /* The to_goto_record method of target record-btrace. */
2757 record_btrace_goto (struct target_ops
*self
, ULONGEST insn
)
2759 struct thread_info
*tp
;
2760 struct btrace_insn_iterator it
;
2761 unsigned int number
;
2766 /* Check for wrap-arounds. */
2768 error (_("Instruction number out of range."));
2770 tp
= require_btrace_thread ();
2772 found
= btrace_find_insn_by_number (&it
, &tp
->btrace
, number
);
2774 /* Check if the instruction could not be found or is a gap. */
2775 if (found
== 0 || btrace_insn_get (&it
) == NULL
)
2776 error (_("No such instruction."));
2778 record_btrace_set_replay (tp
, &it
);
2781 /* The to_record_stop_replaying method of target record-btrace. */
2784 record_btrace_stop_replaying_all (struct target_ops
*self
)
2786 struct thread_info
*tp
;
2788 ALL_NON_EXITED_THREADS (tp
)
2789 record_btrace_stop_replaying (tp
);
2792 /* The to_execution_direction target method. */
2794 static enum exec_direction_kind
2795 record_btrace_execution_direction (struct target_ops
*self
)
2797 return record_btrace_resume_exec_dir
;
2800 /* The to_prepare_to_generate_core target method. */
2803 record_btrace_prepare_to_generate_core (struct target_ops
*self
)
2805 record_btrace_generating_corefile
= 1;
2808 /* The to_done_generating_core target method. */
2811 record_btrace_done_generating_core (struct target_ops
*self
)
2813 record_btrace_generating_corefile
= 0;
2816 /* Initialize the record-btrace target ops. */
2819 init_record_btrace_ops (void)
2821 struct target_ops
*ops
;
2823 ops
= &record_btrace_ops
;
2824 ops
->to_shortname
= "record-btrace";
2825 ops
->to_longname
= "Branch tracing target";
2826 ops
->to_doc
= "Collect control-flow trace and provide the execution history.";
2827 ops
->to_open
= record_btrace_open
;
2828 ops
->to_close
= record_btrace_close
;
2829 ops
->to_async
= record_btrace_async
;
2830 ops
->to_detach
= record_detach
;
2831 ops
->to_disconnect
= record_btrace_disconnect
;
2832 ops
->to_mourn_inferior
= record_mourn_inferior
;
2833 ops
->to_kill
= record_kill
;
2834 ops
->to_stop_recording
= record_btrace_stop_recording
;
2835 ops
->to_info_record
= record_btrace_info
;
2836 ops
->to_insn_history
= record_btrace_insn_history
;
2837 ops
->to_insn_history_from
= record_btrace_insn_history_from
;
2838 ops
->to_insn_history_range
= record_btrace_insn_history_range
;
2839 ops
->to_call_history
= record_btrace_call_history
;
2840 ops
->to_call_history_from
= record_btrace_call_history_from
;
2841 ops
->to_call_history_range
= record_btrace_call_history_range
;
2842 ops
->to_record_method
= record_btrace_record_method
;
2843 ops
->to_record_is_replaying
= record_btrace_is_replaying
;
2844 ops
->to_record_will_replay
= record_btrace_will_replay
;
2845 ops
->to_record_stop_replaying
= record_btrace_stop_replaying_all
;
2846 ops
->to_xfer_partial
= record_btrace_xfer_partial
;
2847 ops
->to_remove_breakpoint
= record_btrace_remove_breakpoint
;
2848 ops
->to_insert_breakpoint
= record_btrace_insert_breakpoint
;
2849 ops
->to_fetch_registers
= record_btrace_fetch_registers
;
2850 ops
->to_store_registers
= record_btrace_store_registers
;
2851 ops
->to_prepare_to_store
= record_btrace_prepare_to_store
;
2852 ops
->to_get_unwinder
= &record_btrace_to_get_unwinder
;
2853 ops
->to_get_tailcall_unwinder
= &record_btrace_to_get_tailcall_unwinder
;
2854 ops
->to_resume
= record_btrace_resume
;
2855 ops
->to_commit_resume
= record_btrace_commit_resume
;
2856 ops
->to_wait
= record_btrace_wait
;
2857 ops
->to_stop
= record_btrace_stop
;
2858 ops
->to_update_thread_list
= record_btrace_update_thread_list
;
2859 ops
->to_thread_alive
= record_btrace_thread_alive
;
2860 ops
->to_goto_record_begin
= record_btrace_goto_begin
;
2861 ops
->to_goto_record_end
= record_btrace_goto_end
;
2862 ops
->to_goto_record
= record_btrace_goto
;
2863 ops
->to_can_execute_reverse
= record_btrace_can_execute_reverse
;
2864 ops
->to_stopped_by_sw_breakpoint
= record_btrace_stopped_by_sw_breakpoint
;
2865 ops
->to_supports_stopped_by_sw_breakpoint
2866 = record_btrace_supports_stopped_by_sw_breakpoint
;
2867 ops
->to_stopped_by_hw_breakpoint
= record_btrace_stopped_by_hw_breakpoint
;
2868 ops
->to_supports_stopped_by_hw_breakpoint
2869 = record_btrace_supports_stopped_by_hw_breakpoint
;
2870 ops
->to_execution_direction
= record_btrace_execution_direction
;
2871 ops
->to_prepare_to_generate_core
= record_btrace_prepare_to_generate_core
;
2872 ops
->to_done_generating_core
= record_btrace_done_generating_core
;
2873 ops
->to_stratum
= record_stratum
;
2874 ops
->to_magic
= OPS_MAGIC
;
2877 /* Start recording in BTS format. */
2880 cmd_record_btrace_bts_start (const char *args
, int from_tty
)
2882 if (args
!= NULL
&& *args
!= 0)
2883 error (_("Invalid argument."));
2885 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2889 execute_command ("target record-btrace", from_tty
);
2891 CATCH (exception
, RETURN_MASK_ALL
)
2893 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2894 throw_exception (exception
);
2899 /* Start recording in Intel Processor Trace format. */
2902 cmd_record_btrace_pt_start (const char *args
, int from_tty
)
2904 if (args
!= NULL
&& *args
!= 0)
2905 error (_("Invalid argument."));
2907 record_btrace_conf
.format
= BTRACE_FORMAT_PT
;
2911 execute_command ("target record-btrace", from_tty
);
2913 CATCH (exception
, RETURN_MASK_ALL
)
2915 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2916 throw_exception (exception
);
2921 /* Alias for "target record". */
2924 cmd_record_btrace_start (const char *args
, int from_tty
)
2926 if (args
!= NULL
&& *args
!= 0)
2927 error (_("Invalid argument."));
2929 record_btrace_conf
.format
= BTRACE_FORMAT_PT
;
2933 execute_command ("target record-btrace", from_tty
);
2935 CATCH (exception
, RETURN_MASK_ALL
)
2937 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2941 execute_command ("target record-btrace", from_tty
);
2943 CATCH (exception
, RETURN_MASK_ALL
)
2945 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2946 throw_exception (exception
);
2953 /* The "set record btrace" command. */
2956 cmd_set_record_btrace (const char *args
, int from_tty
)
2958 cmd_show_list (set_record_btrace_cmdlist
, from_tty
, "");
2961 /* The "show record btrace" command. */
2964 cmd_show_record_btrace (const char *args
, int from_tty
)
2966 cmd_show_list (show_record_btrace_cmdlist
, from_tty
, "");
2969 /* The "show record btrace replay-memory-access" command. */
2972 cmd_show_replay_memory_access (struct ui_file
*file
, int from_tty
,
2973 struct cmd_list_element
*c
, const char *value
)
2975 fprintf_filtered (gdb_stdout
, _("Replay memory access is %s.\n"),
2976 replay_memory_access
);
2979 /* The "set record btrace bts" command. */
2982 cmd_set_record_btrace_bts (const char *args
, int from_tty
)
2984 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2985 "by an appropriate subcommand.\n"));
2986 help_list (set_record_btrace_bts_cmdlist
, "set record btrace bts ",
2987 all_commands
, gdb_stdout
);
2990 /* The "show record btrace bts" command. */
2993 cmd_show_record_btrace_bts (const char *args
, int from_tty
)
2995 cmd_show_list (show_record_btrace_bts_cmdlist
, from_tty
, "");
2998 /* The "set record btrace pt" command. */
3001 cmd_set_record_btrace_pt (const char *args
, int from_tty
)
3003 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3004 "by an appropriate subcommand.\n"));
3005 help_list (set_record_btrace_pt_cmdlist
, "set record btrace pt ",
3006 all_commands
, gdb_stdout
);
3009 /* The "show record btrace pt" command. */
3012 cmd_show_record_btrace_pt (const char *args
, int from_tty
)
3014 cmd_show_list (show_record_btrace_pt_cmdlist
, from_tty
, "");
3017 /* The "record bts buffer-size" show value function. */
3020 show_record_bts_buffer_size_value (struct ui_file
*file
, int from_tty
,
3021 struct cmd_list_element
*c
,
3024 fprintf_filtered (file
, _("The record/replay bts buffer size is %s.\n"),
3028 /* The "record pt buffer-size" show value function. */
3031 show_record_pt_buffer_size_value (struct ui_file
*file
, int from_tty
,
3032 struct cmd_list_element
*c
,
3035 fprintf_filtered (file
, _("The record/replay pt buffer size is %s.\n"),
3039 /* Initialize btrace commands. */
3042 _initialize_record_btrace (void)
3044 add_prefix_cmd ("btrace", class_obscure
, cmd_record_btrace_start
,
3045 _("Start branch trace recording."), &record_btrace_cmdlist
,
3046 "record btrace ", 0, &record_cmdlist
);
3047 add_alias_cmd ("b", "btrace", class_obscure
, 1, &record_cmdlist
);
3049 add_cmd ("bts", class_obscure
, cmd_record_btrace_bts_start
,
3051 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3052 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3053 This format may not be available on all processors."),
3054 &record_btrace_cmdlist
);
3055 add_alias_cmd ("bts", "btrace bts", class_obscure
, 1, &record_cmdlist
);
3057 add_cmd ("pt", class_obscure
, cmd_record_btrace_pt_start
,
3059 Start branch trace recording in Intel Processor Trace format.\n\n\
3060 This format may not be available on all processors."),
3061 &record_btrace_cmdlist
);
3062 add_alias_cmd ("pt", "btrace pt", class_obscure
, 1, &record_cmdlist
);
3064 add_prefix_cmd ("btrace", class_support
, cmd_set_record_btrace
,
3065 _("Set record options"), &set_record_btrace_cmdlist
,
3066 "set record btrace ", 0, &set_record_cmdlist
);
3068 add_prefix_cmd ("btrace", class_support
, cmd_show_record_btrace
,
3069 _("Show record options"), &show_record_btrace_cmdlist
,
3070 "show record btrace ", 0, &show_record_cmdlist
);
3072 add_setshow_enum_cmd ("replay-memory-access", no_class
,
3073 replay_memory_access_types
, &replay_memory_access
, _("\
3074 Set what memory accesses are allowed during replay."), _("\
3075 Show what memory accesses are allowed during replay."),
3076 _("Default is READ-ONLY.\n\n\
3077 The btrace record target does not trace data.\n\
3078 The memory therefore corresponds to the live target and not \
3079 to the current replay position.\n\n\
3080 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3081 When READ-WRITE, allow accesses to read-only and read-write memory during \
3083 NULL
, cmd_show_replay_memory_access
,
3084 &set_record_btrace_cmdlist
,
3085 &show_record_btrace_cmdlist
);
3087 add_prefix_cmd ("bts", class_support
, cmd_set_record_btrace_bts
,
3088 _("Set record btrace bts options"),
3089 &set_record_btrace_bts_cmdlist
,
3090 "set record btrace bts ", 0, &set_record_btrace_cmdlist
);
3092 add_prefix_cmd ("bts", class_support
, cmd_show_record_btrace_bts
,
3093 _("Show record btrace bts options"),
3094 &show_record_btrace_bts_cmdlist
,
3095 "show record btrace bts ", 0, &show_record_btrace_cmdlist
);
3097 add_setshow_uinteger_cmd ("buffer-size", no_class
,
3098 &record_btrace_conf
.bts
.size
,
3099 _("Set the record/replay bts buffer size."),
3100 _("Show the record/replay bts buffer size."), _("\
3101 When starting recording request a trace buffer of this size. \
3102 The actual buffer size may differ from the requested size. \
3103 Use \"info record\" to see the actual buffer size.\n\n\
3104 Bigger buffers allow longer recording but also take more time to process \
3105 the recorded execution trace.\n\n\
3106 The trace buffer size may not be changed while recording."), NULL
,
3107 show_record_bts_buffer_size_value
,
3108 &set_record_btrace_bts_cmdlist
,
3109 &show_record_btrace_bts_cmdlist
);
3111 add_prefix_cmd ("pt", class_support
, cmd_set_record_btrace_pt
,
3112 _("Set record btrace pt options"),
3113 &set_record_btrace_pt_cmdlist
,
3114 "set record btrace pt ", 0, &set_record_btrace_cmdlist
);
3116 add_prefix_cmd ("pt", class_support
, cmd_show_record_btrace_pt
,
3117 _("Show record btrace pt options"),
3118 &show_record_btrace_pt_cmdlist
,
3119 "show record btrace pt ", 0, &show_record_btrace_cmdlist
);
3121 add_setshow_uinteger_cmd ("buffer-size", no_class
,
3122 &record_btrace_conf
.pt
.size
,
3123 _("Set the record/replay pt buffer size."),
3124 _("Show the record/replay pt buffer size."), _("\
3125 Bigger buffers allow longer recording but also take more time to process \
3126 the recorded execution.\n\
3127 The actual buffer size may differ from the requested size. Use \"info record\" \
3128 to see the actual buffer size."), NULL
, show_record_pt_buffer_size_value
,
3129 &set_record_btrace_pt_cmdlist
,
3130 &show_record_btrace_pt_cmdlist
);
3132 init_record_btrace_ops ();
3133 add_target (&record_btrace_ops
);
3135 bfcache
= htab_create_alloc (50, bfcache_hash
, bfcache_eq
, NULL
,
3138 record_btrace_conf
.bts
.size
= 64 * 1024;
3139 record_btrace_conf
.pt
.size
= 16 * 1024;