1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2019 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "record-btrace.h"
25 #include "gdbthread.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
39 #include "event-loop.h"
41 #include "gdbsupport/vec.h"
45 #include "cli/cli-style.h"
47 static const target_info record_btrace_target_info
= {
49 N_("Branch tracing target"),
50 N_("Collect control-flow trace and provide the execution history.")
53 /* The target_ops of record-btrace. */
55 class record_btrace_target final
: public target_ops
58 const target_info
&info () const override
59 { return record_btrace_target_info
; }
61 strata
stratum () const override
{ return record_stratum
; }
63 void close () override
;
64 void async (int) override
;
66 void detach (inferior
*inf
, int from_tty
) override
67 { record_detach (this, inf
, from_tty
); }
69 void disconnect (const char *, int) override
;
71 void mourn_inferior () override
72 { record_mourn_inferior (this); }
75 { record_kill (this); }
77 enum record_method
record_method (ptid_t ptid
) override
;
79 void stop_recording () override
;
80 void info_record () override
;
82 void insn_history (int size
, gdb_disassembly_flags flags
) override
;
83 void insn_history_from (ULONGEST from
, int size
,
84 gdb_disassembly_flags flags
) override
;
85 void insn_history_range (ULONGEST begin
, ULONGEST end
,
86 gdb_disassembly_flags flags
) override
;
87 void call_history (int size
, record_print_flags flags
) override
;
88 void call_history_from (ULONGEST begin
, int size
, record_print_flags flags
)
90 void call_history_range (ULONGEST begin
, ULONGEST end
, record_print_flags flags
)
93 bool record_is_replaying (ptid_t ptid
) override
;
94 bool record_will_replay (ptid_t ptid
, int dir
) override
;
95 void record_stop_replaying () override
;
97 enum target_xfer_status
xfer_partial (enum target_object object
,
100 const gdb_byte
*writebuf
,
101 ULONGEST offset
, ULONGEST len
,
102 ULONGEST
*xfered_len
) override
;
104 int insert_breakpoint (struct gdbarch
*,
105 struct bp_target_info
*) override
;
106 int remove_breakpoint (struct gdbarch
*, struct bp_target_info
*,
107 enum remove_bp_reason
) override
;
109 void fetch_registers (struct regcache
*, int) override
;
111 void store_registers (struct regcache
*, int) override
;
112 void prepare_to_store (struct regcache
*) override
;
114 const struct frame_unwind
*get_unwinder () override
;
116 const struct frame_unwind
*get_tailcall_unwinder () override
;
118 void commit_resume () override
;
119 void resume (ptid_t
, int, enum gdb_signal
) override
;
120 ptid_t
wait (ptid_t
, struct target_waitstatus
*, int) override
;
122 void stop (ptid_t
) override
;
123 void update_thread_list () override
;
124 bool thread_alive (ptid_t ptid
) override
;
125 void goto_record_begin () override
;
126 void goto_record_end () override
;
127 void goto_record (ULONGEST insn
) override
;
129 bool can_execute_reverse () override
;
131 bool stopped_by_sw_breakpoint () override
;
132 bool supports_stopped_by_sw_breakpoint () override
;
134 bool stopped_by_hw_breakpoint () override
;
135 bool supports_stopped_by_hw_breakpoint () override
;
137 enum exec_direction_kind
execution_direction () override
;
138 void prepare_to_generate_core () override
;
139 void done_generating_core () override
;
142 static record_btrace_target record_btrace_ops
;
144 /* Initialize the record-btrace target ops. */
146 /* Token associated with a new-thread observer enabling branch tracing
147 for the new thread. */
148 static const gdb::observers::token record_btrace_thread_observer_token
{};
150 /* Memory access types used in set/show record btrace replay-memory-access. */
151 static const char replay_memory_access_read_only
[] = "read-only";
152 static const char replay_memory_access_read_write
[] = "read-write";
153 static const char *const replay_memory_access_types
[] =
155 replay_memory_access_read_only
,
156 replay_memory_access_read_write
,
160 /* The currently allowed replay memory access type. */
161 static const char *replay_memory_access
= replay_memory_access_read_only
;
163 /* The cpu state kinds. */
164 enum record_btrace_cpu_state_kind
171 /* The current cpu state. */
172 static enum record_btrace_cpu_state_kind record_btrace_cpu_state
= CS_AUTO
;
174 /* The current cpu for trace decode. */
175 static struct btrace_cpu record_btrace_cpu
;
177 /* Command lists for "set/show record btrace". */
178 static struct cmd_list_element
*set_record_btrace_cmdlist
;
179 static struct cmd_list_element
*show_record_btrace_cmdlist
;
181 /* The execution direction of the last resume we got. See record-full.c. */
182 static enum exec_direction_kind record_btrace_resume_exec_dir
= EXEC_FORWARD
;
184 /* The async event handler for reverse/replay execution. */
185 static struct async_event_handler
*record_btrace_async_inferior_event_handler
;
187 /* A flag indicating that we are currently generating a core file. */
188 static int record_btrace_generating_corefile
;
190 /* The current branch trace configuration. */
191 static struct btrace_config record_btrace_conf
;
193 /* Command list for "record btrace". */
194 static struct cmd_list_element
*record_btrace_cmdlist
;
196 /* Command lists for "set/show record btrace bts". */
197 static struct cmd_list_element
*set_record_btrace_bts_cmdlist
;
198 static struct cmd_list_element
*show_record_btrace_bts_cmdlist
;
200 /* Command lists for "set/show record btrace pt". */
201 static struct cmd_list_element
*set_record_btrace_pt_cmdlist
;
202 static struct cmd_list_element
*show_record_btrace_pt_cmdlist
;
204 /* Command list for "set record btrace cpu". */
205 static struct cmd_list_element
*set_record_btrace_cpu_cmdlist
;
207 /* Print a record-btrace debug message. Use do ... while (0) to avoid
208 ambiguities when used in if statements. */
210 #define DEBUG(msg, args...) \
213 if (record_debug != 0) \
214 fprintf_unfiltered (gdb_stdlog, \
215 "[record-btrace] " msg "\n", ##args); \
220 /* Return the cpu configured by the user. Returns NULL if the cpu was
221 configured as auto. */
222 const struct btrace_cpu
*
223 record_btrace_get_cpu (void)
225 switch (record_btrace_cpu_state
)
231 record_btrace_cpu
.vendor
= CV_UNKNOWN
;
234 return &record_btrace_cpu
;
237 error (_("Internal error: bad record btrace cpu state."));
240 /* Update the branch trace for the current thread and return a pointer to its
243 Throws an error if there is no thread or no trace. This function never
246 static struct thread_info
*
247 require_btrace_thread (void)
251 if (inferior_ptid
== null_ptid
)
252 error (_("No thread."));
254 thread_info
*tp
= inferior_thread ();
256 validate_registers_access ();
258 btrace_fetch (tp
, record_btrace_get_cpu ());
260 if (btrace_is_empty (tp
))
261 error (_("No trace."));
266 /* Update the branch trace for the current thread and return a pointer to its
267 branch trace information struct.
269 Throws an error if there is no thread or no trace. This function never
272 static struct btrace_thread_info
*
273 require_btrace (void)
275 struct thread_info
*tp
;
277 tp
= require_btrace_thread ();
282 /* Enable branch tracing for one thread. Warn on errors. */
285 record_btrace_enable_warn (struct thread_info
*tp
)
289 btrace_enable (tp
, &record_btrace_conf
);
291 catch (const gdb_exception_error
&error
)
293 warning ("%s", error
.what ());
297 /* Enable automatic tracing of new threads. */
300 record_btrace_auto_enable (void)
302 DEBUG ("attach thread observer");
304 gdb::observers::new_thread
.attach (record_btrace_enable_warn
,
305 record_btrace_thread_observer_token
);
308 /* Disable automatic tracing of new threads. */
311 record_btrace_auto_disable (void)
313 DEBUG ("detach thread observer");
315 gdb::observers::new_thread
.detach (record_btrace_thread_observer_token
);
318 /* The record-btrace async event handler function. */
321 record_btrace_handle_async_inferior_event (gdb_client_data data
)
323 inferior_event_handler (INF_REG_EVENT
, NULL
);
326 /* See record-btrace.h. */
329 record_btrace_push_target (void)
333 record_btrace_auto_enable ();
335 push_target (&record_btrace_ops
);
337 record_btrace_async_inferior_event_handler
338 = create_async_event_handler (record_btrace_handle_async_inferior_event
,
340 record_btrace_generating_corefile
= 0;
342 format
= btrace_format_short_string (record_btrace_conf
.format
);
343 gdb::observers::record_changed
.notify (current_inferior (), 1, "btrace", format
);
346 /* Disable btrace on a set of threads on scope exit. */
348 struct scoped_btrace_disable
350 scoped_btrace_disable () = default;
352 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable
);
354 ~scoped_btrace_disable ()
356 for (thread_info
*tp
: m_threads
)
360 void add_thread (thread_info
*thread
)
362 m_threads
.push_front (thread
);
371 std::forward_list
<thread_info
*> m_threads
;
374 /* Open target record-btrace. */
377 record_btrace_target_open (const char *args
, int from_tty
)
379 /* If we fail to enable btrace for one thread, disable it for the threads for
380 which it was successfully enabled. */
381 scoped_btrace_disable btrace_disable
;
387 if (!target_has_execution
)
388 error (_("The program is not being run."));
390 for (thread_info
*tp
: all_non_exited_threads ())
391 if (args
== NULL
|| *args
== 0 || number_is_in_list (args
, tp
->global_num
))
393 btrace_enable (tp
, &record_btrace_conf
);
395 btrace_disable
.add_thread (tp
);
398 record_btrace_push_target ();
400 btrace_disable
.discard ();
403 /* The stop_recording method of target record-btrace. */
406 record_btrace_target::stop_recording ()
408 DEBUG ("stop recording");
410 record_btrace_auto_disable ();
412 for (thread_info
*tp
: all_non_exited_threads ())
413 if (tp
->btrace
.target
!= NULL
)
417 /* The disconnect method of target record-btrace. */
420 record_btrace_target::disconnect (const char *args
,
423 struct target_ops
*beneath
= this->beneath ();
425 /* Do not stop recording, just clean up GDB side. */
426 unpush_target (this);
428 /* Forward disconnect. */
429 beneath
->disconnect (args
, from_tty
);
432 /* The close method of target record-btrace. */
435 record_btrace_target::close ()
437 if (record_btrace_async_inferior_event_handler
!= NULL
)
438 delete_async_event_handler (&record_btrace_async_inferior_event_handler
);
440 /* Make sure automatic recording gets disabled even if we did not stop
441 recording before closing the record-btrace target. */
442 record_btrace_auto_disable ();
444 /* We should have already stopped recording.
445 Tear down btrace in case we have not. */
446 for (thread_info
*tp
: all_non_exited_threads ())
447 btrace_teardown (tp
);
450 /* The async method of target record-btrace. */
453 record_btrace_target::async (int enable
)
456 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
458 clear_async_event_handler (record_btrace_async_inferior_event_handler
);
460 this->beneath ()->async (enable
);
463 /* Adjusts the size and returns a human readable size suffix. */
466 record_btrace_adjust_size (unsigned int *size
)
472 if ((sz
& ((1u << 30) - 1)) == 0)
477 else if ((sz
& ((1u << 20) - 1)) == 0)
482 else if ((sz
& ((1u << 10) - 1)) == 0)
491 /* Print a BTS configuration. */
494 record_btrace_print_bts_conf (const struct btrace_config_bts
*conf
)
502 suffix
= record_btrace_adjust_size (&size
);
503 printf_unfiltered (_("Buffer size: %u%s.\n"), size
, suffix
);
507 /* Print an Intel Processor Trace configuration. */
510 record_btrace_print_pt_conf (const struct btrace_config_pt
*conf
)
518 suffix
= record_btrace_adjust_size (&size
);
519 printf_unfiltered (_("Buffer size: %u%s.\n"), size
, suffix
);
523 /* Print a branch tracing configuration. */
526 record_btrace_print_conf (const struct btrace_config
*conf
)
528 printf_unfiltered (_("Recording format: %s.\n"),
529 btrace_format_string (conf
->format
));
531 switch (conf
->format
)
533 case BTRACE_FORMAT_NONE
:
536 case BTRACE_FORMAT_BTS
:
537 record_btrace_print_bts_conf (&conf
->bts
);
540 case BTRACE_FORMAT_PT
:
541 record_btrace_print_pt_conf (&conf
->pt
);
545 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
548 /* The info_record method of target record-btrace. */
551 record_btrace_target::info_record ()
553 struct btrace_thread_info
*btinfo
;
554 const struct btrace_config
*conf
;
555 struct thread_info
*tp
;
556 unsigned int insns
, calls
, gaps
;
560 tp
= find_thread_ptid (inferior_ptid
);
562 error (_("No thread."));
564 validate_registers_access ();
566 btinfo
= &tp
->btrace
;
568 conf
= ::btrace_conf (btinfo
);
570 record_btrace_print_conf (conf
);
572 btrace_fetch (tp
, record_btrace_get_cpu ());
578 if (!btrace_is_empty (tp
))
580 struct btrace_call_iterator call
;
581 struct btrace_insn_iterator insn
;
583 btrace_call_end (&call
, btinfo
);
584 btrace_call_prev (&call
, 1);
585 calls
= btrace_call_number (&call
);
587 btrace_insn_end (&insn
, btinfo
);
588 insns
= btrace_insn_number (&insn
);
590 /* If the last instruction is not a gap, it is the current instruction
591 that is not actually part of the record. */
592 if (btrace_insn_get (&insn
) != NULL
)
595 gaps
= btinfo
->ngaps
;
598 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
599 "for thread %s (%s).\n"), insns
, calls
, gaps
,
600 print_thread_id (tp
),
601 target_pid_to_str (tp
->ptid
).c_str ());
603 if (btrace_is_replaying (tp
))
604 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
605 btrace_insn_number (btinfo
->replay
));
608 /* Print a decode error. */
611 btrace_ui_out_decode_error (struct ui_out
*uiout
, int errcode
,
612 enum btrace_format format
)
614 const char *errstr
= btrace_decode_error (format
, errcode
);
616 uiout
->text (_("["));
617 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
618 if (!(format
== BTRACE_FORMAT_PT
&& errcode
> 0))
620 uiout
->text (_("decode error ("));
621 uiout
->field_signed ("errcode", errcode
);
622 uiout
->text (_("): "));
624 uiout
->text (errstr
);
625 uiout
->text (_("]\n"));
628 /* A range of source lines. */
630 struct btrace_line_range
632 /* The symtab this line is from. */
633 struct symtab
*symtab
;
635 /* The first line (inclusive). */
638 /* The last line (exclusive). */
642 /* Construct a line range. */
644 static struct btrace_line_range
645 btrace_mk_line_range (struct symtab
*symtab
, int begin
, int end
)
647 struct btrace_line_range range
;
649 range
.symtab
= symtab
;
656 /* Add a line to a line range. */
658 static struct btrace_line_range
659 btrace_line_range_add (struct btrace_line_range range
, int line
)
661 if (range
.end
<= range
.begin
)
663 /* This is the first entry. */
665 range
.end
= line
+ 1;
667 else if (line
< range
.begin
)
669 else if (range
.end
< line
)
675 /* Return non-zero if RANGE is empty, zero otherwise. */
678 btrace_line_range_is_empty (struct btrace_line_range range
)
680 return range
.end
<= range
.begin
;
683 /* Return non-zero if LHS contains RHS, zero otherwise. */
686 btrace_line_range_contains_range (struct btrace_line_range lhs
,
687 struct btrace_line_range rhs
)
689 return ((lhs
.symtab
== rhs
.symtab
)
690 && (lhs
.begin
<= rhs
.begin
)
691 && (rhs
.end
<= lhs
.end
));
694 /* Find the line range associated with PC. */
696 static struct btrace_line_range
697 btrace_find_line_range (CORE_ADDR pc
)
699 struct btrace_line_range range
;
700 struct linetable_entry
*lines
;
701 struct linetable
*ltable
;
702 struct symtab
*symtab
;
705 symtab
= find_pc_line_symtab (pc
);
707 return btrace_mk_line_range (NULL
, 0, 0);
709 ltable
= SYMTAB_LINETABLE (symtab
);
711 return btrace_mk_line_range (symtab
, 0, 0);
713 nlines
= ltable
->nitems
;
714 lines
= ltable
->item
;
716 return btrace_mk_line_range (symtab
, 0, 0);
718 range
= btrace_mk_line_range (symtab
, 0, 0);
719 for (i
= 0; i
< nlines
- 1; i
++)
721 if ((lines
[i
].pc
== pc
) && (lines
[i
].line
!= 0))
722 range
= btrace_line_range_add (range
, lines
[i
].line
);
728 /* Print source lines in LINES to UIOUT.
730 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
731 instructions corresponding to that source line. When printing a new source
732 line, we do the cleanups for the open chain and open a new cleanup chain for
733 the new source line. If the source line range in LINES is not empty, this
734 function will leave the cleanup chain for the last printed source line open
735 so instructions can be added to it. */
738 btrace_print_lines (struct btrace_line_range lines
, struct ui_out
*uiout
,
739 gdb::optional
<ui_out_emit_tuple
> *src_and_asm_tuple
,
740 gdb::optional
<ui_out_emit_list
> *asm_list
,
741 gdb_disassembly_flags flags
)
743 print_source_lines_flags psl_flags
;
745 if (flags
& DISASSEMBLY_FILENAME
)
746 psl_flags
|= PRINT_SOURCE_LINES_FILENAME
;
748 for (int line
= lines
.begin
; line
< lines
.end
; ++line
)
752 src_and_asm_tuple
->emplace (uiout
, "src_and_asm_line");
754 print_source_lines (lines
.symtab
, line
, line
+ 1, psl_flags
);
756 asm_list
->emplace (uiout
, "line_asm_insn");
760 /* Disassemble a section of the recorded instruction trace. */
763 btrace_insn_history (struct ui_out
*uiout
,
764 const struct btrace_thread_info
*btinfo
,
765 const struct btrace_insn_iterator
*begin
,
766 const struct btrace_insn_iterator
*end
,
767 gdb_disassembly_flags flags
)
769 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags
,
770 btrace_insn_number (begin
), btrace_insn_number (end
));
772 flags
|= DISASSEMBLY_SPECULATIVE
;
774 struct gdbarch
*gdbarch
= target_gdbarch ();
775 btrace_line_range last_lines
= btrace_mk_line_range (NULL
, 0, 0);
777 ui_out_emit_list
list_emitter (uiout
, "asm_insns");
779 gdb::optional
<ui_out_emit_tuple
> src_and_asm_tuple
;
780 gdb::optional
<ui_out_emit_list
> asm_list
;
782 gdb_pretty_print_disassembler
disasm (gdbarch
, uiout
);
784 for (btrace_insn_iterator it
= *begin
; btrace_insn_cmp (&it
, end
) != 0;
785 btrace_insn_next (&it
, 1))
787 const struct btrace_insn
*insn
;
789 insn
= btrace_insn_get (&it
);
791 /* A NULL instruction indicates a gap in the trace. */
794 const struct btrace_config
*conf
;
796 conf
= btrace_conf (btinfo
);
798 /* We have trace so we must have a configuration. */
799 gdb_assert (conf
!= NULL
);
801 uiout
->field_fmt ("insn-number", "%u",
802 btrace_insn_number (&it
));
805 btrace_ui_out_decode_error (uiout
, btrace_insn_get_error (&it
),
810 struct disasm_insn dinsn
;
812 if ((flags
& DISASSEMBLY_SOURCE
) != 0)
814 struct btrace_line_range lines
;
816 lines
= btrace_find_line_range (insn
->pc
);
817 if (!btrace_line_range_is_empty (lines
)
818 && !btrace_line_range_contains_range (last_lines
, lines
))
820 btrace_print_lines (lines
, uiout
, &src_and_asm_tuple
, &asm_list
,
824 else if (!src_and_asm_tuple
.has_value ())
826 gdb_assert (!asm_list
.has_value ());
828 src_and_asm_tuple
.emplace (uiout
, "src_and_asm_line");
830 /* No source information. */
831 asm_list
.emplace (uiout
, "line_asm_insn");
834 gdb_assert (src_and_asm_tuple
.has_value ());
835 gdb_assert (asm_list
.has_value ());
838 memset (&dinsn
, 0, sizeof (dinsn
));
839 dinsn
.number
= btrace_insn_number (&it
);
840 dinsn
.addr
= insn
->pc
;
842 if ((insn
->flags
& BTRACE_INSN_FLAG_SPECULATIVE
) != 0)
843 dinsn
.is_speculative
= 1;
845 disasm
.pretty_print_insn (&dinsn
, flags
);
850 /* The insn_history method of target record-btrace. */
853 record_btrace_target::insn_history (int size
, gdb_disassembly_flags flags
)
855 struct btrace_thread_info
*btinfo
;
856 struct btrace_insn_history
*history
;
857 struct btrace_insn_iterator begin
, end
;
858 struct ui_out
*uiout
;
859 unsigned int context
, covered
;
861 uiout
= current_uiout
;
862 ui_out_emit_tuple
tuple_emitter (uiout
, "insn history");
863 context
= abs (size
);
865 error (_("Bad record instruction-history-size."));
867 btinfo
= require_btrace ();
868 history
= btinfo
->insn_history
;
871 struct btrace_insn_iterator
*replay
;
873 DEBUG ("insn-history (0x%x): %d", (unsigned) flags
, size
);
875 /* If we're replaying, we start at the replay position. Otherwise, we
876 start at the tail of the trace. */
877 replay
= btinfo
->replay
;
881 btrace_insn_end (&begin
, btinfo
);
883 /* We start from here and expand in the requested direction. Then we
884 expand in the other direction, as well, to fill up any remaining
889 /* We want the current position covered, as well. */
890 covered
= btrace_insn_next (&end
, 1);
891 covered
+= btrace_insn_prev (&begin
, context
- covered
);
892 covered
+= btrace_insn_next (&end
, context
- covered
);
896 covered
= btrace_insn_next (&end
, context
);
897 covered
+= btrace_insn_prev (&begin
, context
- covered
);
902 begin
= history
->begin
;
905 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags
, size
,
906 btrace_insn_number (&begin
), btrace_insn_number (&end
));
911 covered
= btrace_insn_prev (&begin
, context
);
916 covered
= btrace_insn_next (&end
, context
);
921 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
925 printf_unfiltered (_("At the start of the branch trace record.\n"));
927 printf_unfiltered (_("At the end of the branch trace record.\n"));
930 btrace_set_insn_history (btinfo
, &begin
, &end
);
933 /* The insn_history_range method of target record-btrace. */
936 record_btrace_target::insn_history_range (ULONGEST from
, ULONGEST to
,
937 gdb_disassembly_flags flags
)
939 struct btrace_thread_info
*btinfo
;
940 struct btrace_insn_iterator begin
, end
;
941 struct ui_out
*uiout
;
942 unsigned int low
, high
;
945 uiout
= current_uiout
;
946 ui_out_emit_tuple
tuple_emitter (uiout
, "insn history");
950 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags
, low
, high
);
952 /* Check for wrap-arounds. */
953 if (low
!= from
|| high
!= to
)
954 error (_("Bad range."));
957 error (_("Bad range."));
959 btinfo
= require_btrace ();
961 found
= btrace_find_insn_by_number (&begin
, btinfo
, low
);
963 error (_("Range out of bounds."));
965 found
= btrace_find_insn_by_number (&end
, btinfo
, high
);
968 /* Silently truncate the range. */
969 btrace_insn_end (&end
, btinfo
);
973 /* We want both begin and end to be inclusive. */
974 btrace_insn_next (&end
, 1);
977 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
978 btrace_set_insn_history (btinfo
, &begin
, &end
);
981 /* The insn_history_from method of target record-btrace. */
984 record_btrace_target::insn_history_from (ULONGEST from
, int size
,
985 gdb_disassembly_flags flags
)
987 ULONGEST begin
, end
, context
;
989 context
= abs (size
);
991 error (_("Bad record instruction-history-size."));
1000 begin
= from
- context
+ 1;
1005 end
= from
+ context
- 1;
1007 /* Check for wrap-around. */
1012 insn_history_range (begin
, end
, flags
);
1015 /* Print the instruction number range for a function call history line. */
1018 btrace_call_history_insn_range (struct ui_out
*uiout
,
1019 const struct btrace_function
*bfun
)
1021 unsigned int begin
, end
, size
;
1023 size
= bfun
->insn
.size ();
1024 gdb_assert (size
> 0);
1026 begin
= bfun
->insn_offset
;
1027 end
= begin
+ size
- 1;
1029 uiout
->field_unsigned ("insn begin", begin
);
1031 uiout
->field_unsigned ("insn end", end
);
1034 /* Compute the lowest and highest source line for the instructions in BFUN
1035 and return them in PBEGIN and PEND.
1036 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1037 result from inlining or macro expansion. */
1040 btrace_compute_src_line_range (const struct btrace_function
*bfun
,
1041 int *pbegin
, int *pend
)
1043 struct symtab
*symtab
;
1054 symtab
= symbol_symtab (sym
);
1056 for (const btrace_insn
&insn
: bfun
->insn
)
1058 struct symtab_and_line sal
;
1060 sal
= find_pc_line (insn
.pc
, 0);
1061 if (sal
.symtab
!= symtab
|| sal
.line
== 0)
1064 begin
= std::min (begin
, sal
.line
);
1065 end
= std::max (end
, sal
.line
);
1073 /* Print the source line information for a function call history line. */
1076 btrace_call_history_src_line (struct ui_out
*uiout
,
1077 const struct btrace_function
*bfun
)
1086 uiout
->field_string ("file",
1087 symtab_to_filename_for_display (symbol_symtab (sym
)),
1088 file_name_style
.style ());
1090 btrace_compute_src_line_range (bfun
, &begin
, &end
);
1095 uiout
->field_signed ("min line", begin
);
1101 uiout
->field_signed ("max line", end
);
1104 /* Get the name of a branch trace function. */
1107 btrace_get_bfun_name (const struct btrace_function
*bfun
)
1109 struct minimal_symbol
*msym
;
1119 return SYMBOL_PRINT_NAME (sym
);
1120 else if (msym
!= NULL
)
1121 return MSYMBOL_PRINT_NAME (msym
);
1126 /* Disassemble a section of the recorded function trace. */
1129 btrace_call_history (struct ui_out
*uiout
,
1130 const struct btrace_thread_info
*btinfo
,
1131 const struct btrace_call_iterator
*begin
,
1132 const struct btrace_call_iterator
*end
,
1135 struct btrace_call_iterator it
;
1136 record_print_flags flags
= (enum record_print_flag
) int_flags
;
1138 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags
, btrace_call_number (begin
),
1139 btrace_call_number (end
));
1141 for (it
= *begin
; btrace_call_cmp (&it
, end
) < 0; btrace_call_next (&it
, 1))
1143 const struct btrace_function
*bfun
;
1144 struct minimal_symbol
*msym
;
1147 bfun
= btrace_call_get (&it
);
1151 /* Print the function index. */
1152 uiout
->field_unsigned ("index", bfun
->number
);
1155 /* Indicate gaps in the trace. */
1156 if (bfun
->errcode
!= 0)
1158 const struct btrace_config
*conf
;
1160 conf
= btrace_conf (btinfo
);
1162 /* We have trace so we must have a configuration. */
1163 gdb_assert (conf
!= NULL
);
1165 btrace_ui_out_decode_error (uiout
, bfun
->errcode
, conf
->format
);
1170 if ((flags
& RECORD_PRINT_INDENT_CALLS
) != 0)
1172 int level
= bfun
->level
+ btinfo
->level
, i
;
1174 for (i
= 0; i
< level
; ++i
)
1179 uiout
->field_string ("function", SYMBOL_PRINT_NAME (sym
),
1180 function_name_style
.style ());
1181 else if (msym
!= NULL
)
1182 uiout
->field_string ("function", MSYMBOL_PRINT_NAME (msym
),
1183 function_name_style
.style ());
1184 else if (!uiout
->is_mi_like_p ())
1185 uiout
->field_string ("function", "??",
1186 function_name_style
.style ());
1188 if ((flags
& RECORD_PRINT_INSN_RANGE
) != 0)
1190 uiout
->text (_("\tinst "));
1191 btrace_call_history_insn_range (uiout
, bfun
);
1194 if ((flags
& RECORD_PRINT_SRC_LINE
) != 0)
1196 uiout
->text (_("\tat "));
1197 btrace_call_history_src_line (uiout
, bfun
);
1204 /* The call_history method of target record-btrace. */
1207 record_btrace_target::call_history (int size
, record_print_flags flags
)
1209 struct btrace_thread_info
*btinfo
;
1210 struct btrace_call_history
*history
;
1211 struct btrace_call_iterator begin
, end
;
1212 struct ui_out
*uiout
;
1213 unsigned int context
, covered
;
1215 uiout
= current_uiout
;
1216 ui_out_emit_tuple
tuple_emitter (uiout
, "insn history");
1217 context
= abs (size
);
1219 error (_("Bad record function-call-history-size."));
1221 btinfo
= require_btrace ();
1222 history
= btinfo
->call_history
;
1223 if (history
== NULL
)
1225 struct btrace_insn_iterator
*replay
;
1227 DEBUG ("call-history (0x%x): %d", (int) flags
, size
);
1229 /* If we're replaying, we start at the replay position. Otherwise, we
1230 start at the tail of the trace. */
1231 replay
= btinfo
->replay
;
1234 begin
.btinfo
= btinfo
;
1235 begin
.index
= replay
->call_index
;
1238 btrace_call_end (&begin
, btinfo
);
1240 /* We start from here and expand in the requested direction. Then we
1241 expand in the other direction, as well, to fill up any remaining
1246 /* We want the current position covered, as well. */
1247 covered
= btrace_call_next (&end
, 1);
1248 covered
+= btrace_call_prev (&begin
, context
- covered
);
1249 covered
+= btrace_call_next (&end
, context
- covered
);
1253 covered
= btrace_call_next (&end
, context
);
1254 covered
+= btrace_call_prev (&begin
, context
- covered
);
1259 begin
= history
->begin
;
1262 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags
, size
,
1263 btrace_call_number (&begin
), btrace_call_number (&end
));
1268 covered
= btrace_call_prev (&begin
, context
);
1273 covered
= btrace_call_next (&end
, context
);
1278 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
1282 printf_unfiltered (_("At the start of the branch trace record.\n"));
1284 printf_unfiltered (_("At the end of the branch trace record.\n"));
1287 btrace_set_call_history (btinfo
, &begin
, &end
);
1290 /* The call_history_range method of target record-btrace. */
1293 record_btrace_target::call_history_range (ULONGEST from
, ULONGEST to
,
1294 record_print_flags flags
)
1296 struct btrace_thread_info
*btinfo
;
1297 struct btrace_call_iterator begin
, end
;
1298 struct ui_out
*uiout
;
1299 unsigned int low
, high
;
1302 uiout
= current_uiout
;
1303 ui_out_emit_tuple
tuple_emitter (uiout
, "func history");
1307 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags
, low
, high
);
1309 /* Check for wrap-arounds. */
1310 if (low
!= from
|| high
!= to
)
1311 error (_("Bad range."));
1314 error (_("Bad range."));
1316 btinfo
= require_btrace ();
1318 found
= btrace_find_call_by_number (&begin
, btinfo
, low
);
1320 error (_("Range out of bounds."));
1322 found
= btrace_find_call_by_number (&end
, btinfo
, high
);
1325 /* Silently truncate the range. */
1326 btrace_call_end (&end
, btinfo
);
1330 /* We want both begin and end to be inclusive. */
1331 btrace_call_next (&end
, 1);
1334 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
1335 btrace_set_call_history (btinfo
, &begin
, &end
);
1338 /* The call_history_from method of target record-btrace. */
1341 record_btrace_target::call_history_from (ULONGEST from
, int size
,
1342 record_print_flags flags
)
1344 ULONGEST begin
, end
, context
;
1346 context
= abs (size
);
1348 error (_("Bad record function-call-history-size."));
1357 begin
= from
- context
+ 1;
1362 end
= from
+ context
- 1;
1364 /* Check for wrap-around. */
1369 call_history_range ( begin
, end
, flags
);
1372 /* The record_method method of target record-btrace. */
1375 record_btrace_target::record_method (ptid_t ptid
)
1377 struct thread_info
* const tp
= find_thread_ptid (ptid
);
1380 error (_("No thread."));
1382 if (tp
->btrace
.target
== NULL
)
1383 return RECORD_METHOD_NONE
;
1385 return RECORD_METHOD_BTRACE
;
1388 /* The record_is_replaying method of target record-btrace. */
1391 record_btrace_target::record_is_replaying (ptid_t ptid
)
1393 for (thread_info
*tp
: all_non_exited_threads (ptid
))
1394 if (btrace_is_replaying (tp
))
1400 /* The record_will_replay method of target record-btrace. */
1403 record_btrace_target::record_will_replay (ptid_t ptid
, int dir
)
1405 return dir
== EXEC_REVERSE
|| record_is_replaying (ptid
);
1408 /* The xfer_partial method of target record-btrace. */
1410 enum target_xfer_status
1411 record_btrace_target::xfer_partial (enum target_object object
,
1412 const char *annex
, gdb_byte
*readbuf
,
1413 const gdb_byte
*writebuf
, ULONGEST offset
,
1414 ULONGEST len
, ULONGEST
*xfered_len
)
1416 /* Filter out requests that don't make sense during replay. */
1417 if (replay_memory_access
== replay_memory_access_read_only
1418 && !record_btrace_generating_corefile
1419 && record_is_replaying (inferior_ptid
))
1423 case TARGET_OBJECT_MEMORY
:
1425 struct target_section
*section
;
1427 /* We do not allow writing memory in general. */
1428 if (writebuf
!= NULL
)
1431 return TARGET_XFER_UNAVAILABLE
;
1434 /* We allow reading readonly memory. */
1435 section
= target_section_by_addr (this, offset
);
1436 if (section
!= NULL
)
1438 /* Check if the section we found is readonly. */
1439 if ((bfd_section_flags (section
->the_bfd_section
)
1440 & SEC_READONLY
) != 0)
1442 /* Truncate the request to fit into this section. */
1443 len
= std::min (len
, section
->endaddr
- offset
);
1449 return TARGET_XFER_UNAVAILABLE
;
1454 /* Forward the request. */
1455 return this->beneath ()->xfer_partial (object
, annex
, readbuf
, writebuf
,
1456 offset
, len
, xfered_len
);
1459 /* The insert_breakpoint method of target record-btrace. */
1462 record_btrace_target::insert_breakpoint (struct gdbarch
*gdbarch
,
1463 struct bp_target_info
*bp_tgt
)
1468 /* Inserting breakpoints requires accessing memory. Allow it for the
1469 duration of this function. */
1470 old
= replay_memory_access
;
1471 replay_memory_access
= replay_memory_access_read_write
;
1476 ret
= this->beneath ()->insert_breakpoint (gdbarch
, bp_tgt
);
1478 catch (const gdb_exception
&except
)
1480 replay_memory_access
= old
;
1483 replay_memory_access
= old
;
1488 /* The remove_breakpoint method of target record-btrace. */
1491 record_btrace_target::remove_breakpoint (struct gdbarch
*gdbarch
,
1492 struct bp_target_info
*bp_tgt
,
1493 enum remove_bp_reason reason
)
1498 /* Removing breakpoints requires accessing memory. Allow it for the
1499 duration of this function. */
1500 old
= replay_memory_access
;
1501 replay_memory_access
= replay_memory_access_read_write
;
1506 ret
= this->beneath ()->remove_breakpoint (gdbarch
, bp_tgt
, reason
);
1508 catch (const gdb_exception
&except
)
1510 replay_memory_access
= old
;
1513 replay_memory_access
= old
;
1518 /* The fetch_registers method of target record-btrace. */
1521 record_btrace_target::fetch_registers (struct regcache
*regcache
, int regno
)
1523 struct btrace_insn_iterator
*replay
;
1524 struct thread_info
*tp
;
1526 tp
= find_thread_ptid (regcache
->ptid ());
1527 gdb_assert (tp
!= NULL
);
1529 replay
= tp
->btrace
.replay
;
1530 if (replay
!= NULL
&& !record_btrace_generating_corefile
)
1532 const struct btrace_insn
*insn
;
1533 struct gdbarch
*gdbarch
;
1536 gdbarch
= regcache
->arch ();
1537 pcreg
= gdbarch_pc_regnum (gdbarch
);
1541 /* We can only provide the PC register. */
1542 if (regno
>= 0 && regno
!= pcreg
)
1545 insn
= btrace_insn_get (replay
);
1546 gdb_assert (insn
!= NULL
);
1548 regcache
->raw_supply (regno
, &insn
->pc
);
1551 this->beneath ()->fetch_registers (regcache
, regno
);
1554 /* The store_registers method of target record-btrace. */
1557 record_btrace_target::store_registers (struct regcache
*regcache
, int regno
)
1559 if (!record_btrace_generating_corefile
1560 && record_is_replaying (regcache
->ptid ()))
1561 error (_("Cannot write registers while replaying."));
1563 gdb_assert (may_write_registers
);
1565 this->beneath ()->store_registers (regcache
, regno
);
1568 /* The prepare_to_store method of target record-btrace. */
1571 record_btrace_target::prepare_to_store (struct regcache
*regcache
)
1573 if (!record_btrace_generating_corefile
1574 && record_is_replaying (regcache
->ptid ()))
1577 this->beneath ()->prepare_to_store (regcache
);
1580 /* The branch trace frame cache. */
1582 struct btrace_frame_cache
1585 struct thread_info
*tp
;
1587 /* The frame info. */
1588 struct frame_info
*frame
;
1590 /* The branch trace function segment. */
1591 const struct btrace_function
*bfun
;
1594 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1596 static htab_t bfcache
;
1598 /* hash_f for htab_create_alloc of bfcache. */
1601 bfcache_hash (const void *arg
)
1603 const struct btrace_frame_cache
*cache
1604 = (const struct btrace_frame_cache
*) arg
;
1606 return htab_hash_pointer (cache
->frame
);
1609 /* eq_f for htab_create_alloc of bfcache. */
1612 bfcache_eq (const void *arg1
, const void *arg2
)
1614 const struct btrace_frame_cache
*cache1
1615 = (const struct btrace_frame_cache
*) arg1
;
1616 const struct btrace_frame_cache
*cache2
1617 = (const struct btrace_frame_cache
*) arg2
;
1619 return cache1
->frame
== cache2
->frame
;
1622 /* Create a new btrace frame cache. */
1624 static struct btrace_frame_cache
*
1625 bfcache_new (struct frame_info
*frame
)
1627 struct btrace_frame_cache
*cache
;
1630 cache
= FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache
);
1631 cache
->frame
= frame
;
1633 slot
= htab_find_slot (bfcache
, cache
, INSERT
);
1634 gdb_assert (*slot
== NULL
);
1640 /* Extract the branch trace function from a branch trace frame. */
1642 static const struct btrace_function
*
1643 btrace_get_frame_function (struct frame_info
*frame
)
1645 const struct btrace_frame_cache
*cache
;
1646 struct btrace_frame_cache pattern
;
1649 pattern
.frame
= frame
;
1651 slot
= htab_find_slot (bfcache
, &pattern
, NO_INSERT
);
1655 cache
= (const struct btrace_frame_cache
*) *slot
;
1659 /* Implement stop_reason method for record_btrace_frame_unwind. */
1661 static enum unwind_stop_reason
1662 record_btrace_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1665 const struct btrace_frame_cache
*cache
;
1666 const struct btrace_function
*bfun
;
1668 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1670 gdb_assert (bfun
!= NULL
);
1673 return UNWIND_UNAVAILABLE
;
1675 return UNWIND_NO_REASON
;
1678 /* Implement this_id method for record_btrace_frame_unwind. */
1681 record_btrace_frame_this_id (struct frame_info
*this_frame
, void **this_cache
,
1682 struct frame_id
*this_id
)
1684 const struct btrace_frame_cache
*cache
;
1685 const struct btrace_function
*bfun
;
1686 struct btrace_call_iterator it
;
1687 CORE_ADDR code
, special
;
1689 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1692 gdb_assert (bfun
!= NULL
);
1694 while (btrace_find_call_by_number (&it
, &cache
->tp
->btrace
, bfun
->prev
) != 0)
1695 bfun
= btrace_call_get (&it
);
1697 code
= get_frame_func (this_frame
);
1698 special
= bfun
->number
;
1700 *this_id
= frame_id_build_unavailable_stack_special (code
, special
);
1702 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1703 btrace_get_bfun_name (cache
->bfun
),
1704 core_addr_to_string_nz (this_id
->code_addr
),
1705 core_addr_to_string_nz (this_id
->special_addr
));
1708 /* Implement prev_register method for record_btrace_frame_unwind. */
1710 static struct value
*
1711 record_btrace_frame_prev_register (struct frame_info
*this_frame
,
1715 const struct btrace_frame_cache
*cache
;
1716 const struct btrace_function
*bfun
, *caller
;
1717 struct btrace_call_iterator it
;
1718 struct gdbarch
*gdbarch
;
1722 gdbarch
= get_frame_arch (this_frame
);
1723 pcreg
= gdbarch_pc_regnum (gdbarch
);
1724 if (pcreg
< 0 || regnum
!= pcreg
)
1725 throw_error (NOT_AVAILABLE_ERROR
,
1726 _("Registers are not available in btrace record history"));
1728 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1730 gdb_assert (bfun
!= NULL
);
1732 if (btrace_find_call_by_number (&it
, &cache
->tp
->btrace
, bfun
->up
) == 0)
1733 throw_error (NOT_AVAILABLE_ERROR
,
1734 _("No caller in btrace record history"));
1736 caller
= btrace_call_get (&it
);
1738 if ((bfun
->flags
& BFUN_UP_LINKS_TO_RET
) != 0)
1739 pc
= caller
->insn
.front ().pc
;
1742 pc
= caller
->insn
.back ().pc
;
1743 pc
+= gdb_insn_length (gdbarch
, pc
);
1746 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1747 btrace_get_bfun_name (bfun
), bfun
->level
,
1748 core_addr_to_string_nz (pc
));
1750 return frame_unwind_got_address (this_frame
, regnum
, pc
);
1753 /* Implement sniffer method for record_btrace_frame_unwind. */
1756 record_btrace_frame_sniffer (const struct frame_unwind
*self
,
1757 struct frame_info
*this_frame
,
1760 const struct btrace_function
*bfun
;
1761 struct btrace_frame_cache
*cache
;
1762 struct thread_info
*tp
;
1763 struct frame_info
*next
;
1765 /* THIS_FRAME does not contain a reference to its thread. */
1766 tp
= inferior_thread ();
1769 next
= get_next_frame (this_frame
);
1772 const struct btrace_insn_iterator
*replay
;
1774 replay
= tp
->btrace
.replay
;
1776 bfun
= &replay
->btinfo
->functions
[replay
->call_index
];
1780 const struct btrace_function
*callee
;
1781 struct btrace_call_iterator it
;
1783 callee
= btrace_get_frame_function (next
);
1784 if (callee
== NULL
|| (callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) != 0)
1787 if (btrace_find_call_by_number (&it
, &tp
->btrace
, callee
->up
) == 0)
1790 bfun
= btrace_call_get (&it
);
1796 DEBUG ("[frame] sniffed frame for %s on level %d",
1797 btrace_get_bfun_name (bfun
), bfun
->level
);
1799 /* This is our frame. Initialize the frame cache. */
1800 cache
= bfcache_new (this_frame
);
1804 *this_cache
= cache
;
1808 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1811 record_btrace_tailcall_frame_sniffer (const struct frame_unwind
*self
,
1812 struct frame_info
*this_frame
,
1815 const struct btrace_function
*bfun
, *callee
;
1816 struct btrace_frame_cache
*cache
;
1817 struct btrace_call_iterator it
;
1818 struct frame_info
*next
;
1819 struct thread_info
*tinfo
;
1821 next
= get_next_frame (this_frame
);
1825 callee
= btrace_get_frame_function (next
);
1829 if ((callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1832 tinfo
= inferior_thread ();
1833 if (btrace_find_call_by_number (&it
, &tinfo
->btrace
, callee
->up
) == 0)
1836 bfun
= btrace_call_get (&it
);
1838 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1839 btrace_get_bfun_name (bfun
), bfun
->level
);
1841 /* This is our frame. Initialize the frame cache. */
1842 cache
= bfcache_new (this_frame
);
1846 *this_cache
= cache
;
1851 record_btrace_frame_dealloc_cache (struct frame_info
*self
, void *this_cache
)
1853 struct btrace_frame_cache
*cache
;
1856 cache
= (struct btrace_frame_cache
*) this_cache
;
1858 slot
= htab_find_slot (bfcache
, cache
, NO_INSERT
);
1859 gdb_assert (slot
!= NULL
);
1861 htab_remove_elt (bfcache
, cache
);
1864 /* btrace recording does not store previous memory content, neither the stack
1865 frames content. Any unwinding would return errorneous results as the stack
1866 contents no longer matches the changed PC value restored from history.
1867 Therefore this unwinder reports any possibly unwound registers as
1870 const struct frame_unwind record_btrace_frame_unwind
=
1873 record_btrace_frame_unwind_stop_reason
,
1874 record_btrace_frame_this_id
,
1875 record_btrace_frame_prev_register
,
1877 record_btrace_frame_sniffer
,
1878 record_btrace_frame_dealloc_cache
1881 const struct frame_unwind record_btrace_tailcall_frame_unwind
=
1884 record_btrace_frame_unwind_stop_reason
,
1885 record_btrace_frame_this_id
,
1886 record_btrace_frame_prev_register
,
1888 record_btrace_tailcall_frame_sniffer
,
1889 record_btrace_frame_dealloc_cache
1892 /* Implement the get_unwinder method. */
1894 const struct frame_unwind
*
1895 record_btrace_target::get_unwinder ()
1897 return &record_btrace_frame_unwind
;
1900 /* Implement the get_tailcall_unwinder method. */
1902 const struct frame_unwind
*
1903 record_btrace_target::get_tailcall_unwinder ()
1905 return &record_btrace_tailcall_frame_unwind
;
1908 /* Return a human-readable string for FLAG. */
1911 btrace_thread_flag_to_str (enum btrace_thread_flag flag
)
1919 return "reverse-step";
1925 return "reverse-cont";
1934 /* Indicate that TP should be resumed according to FLAG. */
1937 record_btrace_resume_thread (struct thread_info
*tp
,
1938 enum btrace_thread_flag flag
)
1940 struct btrace_thread_info
*btinfo
;
1942 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp
),
1943 target_pid_to_str (tp
->ptid
).c_str (), flag
,
1944 btrace_thread_flag_to_str (flag
));
1946 btinfo
= &tp
->btrace
;
1948 /* Fetch the latest branch trace. */
1949 btrace_fetch (tp
, record_btrace_get_cpu ());
1951 /* A resume request overwrites a preceding resume or stop request. */
1952 btinfo
->flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
1953 btinfo
->flags
|= flag
;
1956 /* Get the current frame for TP. */
1958 static struct frame_id
1959 get_thread_current_frame_id (struct thread_info
*tp
)
1964 /* Set current thread, which is implicitly used by
1965 get_current_frame. */
1966 scoped_restore_current_thread restore_thread
;
1968 switch_to_thread (tp
);
1970 /* Clear the executing flag to allow changes to the current frame.
1971 We are not actually running, yet. We just started a reverse execution
1972 command or a record goto command.
1973 For the latter, EXECUTING is false and this has no effect.
1974 For the former, EXECUTING is true and we're in wait, about to
1975 move the thread. Since we need to recompute the stack, we temporarily
1976 set EXECUTING to flase. */
1977 executing
= tp
->executing
;
1978 set_executing (inferior_ptid
, false);
1983 id
= get_frame_id (get_current_frame ());
1985 catch (const gdb_exception
&except
)
1987 /* Restore the previous execution state. */
1988 set_executing (inferior_ptid
, executing
);
1993 /* Restore the previous execution state. */
1994 set_executing (inferior_ptid
, executing
);
1999 /* Start replaying a thread. */
2001 static struct btrace_insn_iterator
*
2002 record_btrace_start_replaying (struct thread_info
*tp
)
2004 struct btrace_insn_iterator
*replay
;
2005 struct btrace_thread_info
*btinfo
;
2007 btinfo
= &tp
->btrace
;
2010 /* We can't start replaying without trace. */
2011 if (btinfo
->functions
.empty ())
2014 /* GDB stores the current frame_id when stepping in order to detects steps
2016 Since frames are computed differently when we're replaying, we need to
2017 recompute those stored frames and fix them up so we can still detect
2018 subroutines after we started replaying. */
2021 struct frame_id frame_id
;
2022 int upd_step_frame_id
, upd_step_stack_frame_id
;
2024 /* The current frame without replaying - computed via normal unwind. */
2025 frame_id
= get_thread_current_frame_id (tp
);
2027 /* Check if we need to update any stepping-related frame id's. */
2028 upd_step_frame_id
= frame_id_eq (frame_id
,
2029 tp
->control
.step_frame_id
);
2030 upd_step_stack_frame_id
= frame_id_eq (frame_id
,
2031 tp
->control
.step_stack_frame_id
);
2033 /* We start replaying at the end of the branch trace. This corresponds
2034 to the current instruction. */
2035 replay
= XNEW (struct btrace_insn_iterator
);
2036 btrace_insn_end (replay
, btinfo
);
2038 /* Skip gaps at the end of the trace. */
2039 while (btrace_insn_get (replay
) == NULL
)
2043 steps
= btrace_insn_prev (replay
, 1);
2045 error (_("No trace."));
2048 /* We're not replaying, yet. */
2049 gdb_assert (btinfo
->replay
== NULL
);
2050 btinfo
->replay
= replay
;
2052 /* Make sure we're not using any stale registers. */
2053 registers_changed_thread (tp
);
2055 /* The current frame with replaying - computed via btrace unwind. */
2056 frame_id
= get_thread_current_frame_id (tp
);
2058 /* Replace stepping related frames where necessary. */
2059 if (upd_step_frame_id
)
2060 tp
->control
.step_frame_id
= frame_id
;
2061 if (upd_step_stack_frame_id
)
2062 tp
->control
.step_stack_frame_id
= frame_id
;
2064 catch (const gdb_exception
&except
)
2066 xfree (btinfo
->replay
);
2067 btinfo
->replay
= NULL
;
2069 registers_changed_thread (tp
);
2077 /* Stop replaying a thread. */
2080 record_btrace_stop_replaying (struct thread_info
*tp
)
2082 struct btrace_thread_info
*btinfo
;
2084 btinfo
= &tp
->btrace
;
2086 xfree (btinfo
->replay
);
2087 btinfo
->replay
= NULL
;
2089 /* Make sure we're not leaving any stale registers. */
2090 registers_changed_thread (tp
);
2093 /* Stop replaying TP if it is at the end of its execution history. */
2096 record_btrace_stop_replaying_at_end (struct thread_info
*tp
)
2098 struct btrace_insn_iterator
*replay
, end
;
2099 struct btrace_thread_info
*btinfo
;
2101 btinfo
= &tp
->btrace
;
2102 replay
= btinfo
->replay
;
2107 btrace_insn_end (&end
, btinfo
);
2109 if (btrace_insn_cmp (replay
, &end
) == 0)
2110 record_btrace_stop_replaying (tp
);
2113 /* The resume method of target record-btrace. */
2116 record_btrace_target::resume (ptid_t ptid
, int step
, enum gdb_signal signal
)
2118 enum btrace_thread_flag flag
, cflag
;
2120 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid
).c_str (),
2121 ::execution_direction
== EXEC_REVERSE
? "reverse-" : "",
2122 step
? "step" : "cont");
2124 /* Store the execution direction of the last resume.
2126 If there is more than one resume call, we have to rely on infrun
2127 to not change the execution direction in-between. */
2128 record_btrace_resume_exec_dir
= ::execution_direction
;
2130 /* As long as we're not replaying, just forward the request.
2132 For non-stop targets this means that no thread is replaying. In order to
2133 make progress, we may need to explicitly move replaying threads to the end
2134 of their execution history. */
2135 if ((::execution_direction
!= EXEC_REVERSE
)
2136 && !record_is_replaying (minus_one_ptid
))
2138 this->beneath ()->resume (ptid
, step
, signal
);
2142 /* Compute the btrace thread flag for the requested move. */
2143 if (::execution_direction
== EXEC_REVERSE
)
2145 flag
= step
== 0 ? BTHR_RCONT
: BTHR_RSTEP
;
2150 flag
= step
== 0 ? BTHR_CONT
: BTHR_STEP
;
2154 /* We just indicate the resume intent here. The actual stepping happens in
2155 record_btrace_wait below.
2157 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2158 if (!target_is_non_stop_p ())
2160 gdb_assert (inferior_ptid
.matches (ptid
));
2162 for (thread_info
*tp
: all_non_exited_threads (ptid
))
2164 if (tp
->ptid
.matches (inferior_ptid
))
2165 record_btrace_resume_thread (tp
, flag
);
2167 record_btrace_resume_thread (tp
, cflag
);
2172 for (thread_info
*tp
: all_non_exited_threads (ptid
))
2173 record_btrace_resume_thread (tp
, flag
);
2176 /* Async support. */
2177 if (target_can_async_p ())
2180 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
2184 /* The commit_resume method of target record-btrace. */
2187 record_btrace_target::commit_resume ()
2189 if ((::execution_direction
!= EXEC_REVERSE
)
2190 && !record_is_replaying (minus_one_ptid
))
2191 beneath ()->commit_resume ();
2194 /* Cancel resuming TP. */
2197 record_btrace_cancel_resume (struct thread_info
*tp
)
2199 enum btrace_thread_flag flags
;
2201 flags
= tp
->btrace
.flags
& (BTHR_MOVE
| BTHR_STOP
);
2205 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2206 print_thread_id (tp
),
2207 target_pid_to_str (tp
->ptid
).c_str (), flags
,
2208 btrace_thread_flag_to_str (flags
));
2210 tp
->btrace
.flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
2211 record_btrace_stop_replaying_at_end (tp
);
2214 /* Return a target_waitstatus indicating that we ran out of history. */
2216 static struct target_waitstatus
2217 btrace_step_no_history (void)
2219 struct target_waitstatus status
;
2221 status
.kind
= TARGET_WAITKIND_NO_HISTORY
;
2226 /* Return a target_waitstatus indicating that a step finished. */
2228 static struct target_waitstatus
2229 btrace_step_stopped (void)
2231 struct target_waitstatus status
;
2233 status
.kind
= TARGET_WAITKIND_STOPPED
;
2234 status
.value
.sig
= GDB_SIGNAL_TRAP
;
2239 /* Return a target_waitstatus indicating that a thread was stopped as
2242 static struct target_waitstatus
2243 btrace_step_stopped_on_request (void)
2245 struct target_waitstatus status
;
2247 status
.kind
= TARGET_WAITKIND_STOPPED
;
2248 status
.value
.sig
= GDB_SIGNAL_0
;
2253 /* Return a target_waitstatus indicating a spurious stop. */
2255 static struct target_waitstatus
2256 btrace_step_spurious (void)
2258 struct target_waitstatus status
;
2260 status
.kind
= TARGET_WAITKIND_SPURIOUS
;
2265 /* Return a target_waitstatus indicating that the thread was not resumed. */
2267 static struct target_waitstatus
2268 btrace_step_no_resumed (void)
2270 struct target_waitstatus status
;
2272 status
.kind
= TARGET_WAITKIND_NO_RESUMED
;
2277 /* Return a target_waitstatus indicating that we should wait again. */
2279 static struct target_waitstatus
2280 btrace_step_again (void)
2282 struct target_waitstatus status
;
2284 status
.kind
= TARGET_WAITKIND_IGNORE
;
2289 /* Clear the record histories. */
2292 record_btrace_clear_histories (struct btrace_thread_info
*btinfo
)
2294 xfree (btinfo
->insn_history
);
2295 xfree (btinfo
->call_history
);
2297 btinfo
->insn_history
= NULL
;
2298 btinfo
->call_history
= NULL
;
2301 /* Check whether TP's current replay position is at a breakpoint. */
2304 record_btrace_replay_at_breakpoint (struct thread_info
*tp
)
2306 struct btrace_insn_iterator
*replay
;
2307 struct btrace_thread_info
*btinfo
;
2308 const struct btrace_insn
*insn
;
2310 btinfo
= &tp
->btrace
;
2311 replay
= btinfo
->replay
;
2316 insn
= btrace_insn_get (replay
);
2320 return record_check_stopped_by_breakpoint (tp
->inf
->aspace
, insn
->pc
,
2321 &btinfo
->stop_reason
);
2324 /* Step one instruction in forward direction. */
2326 static struct target_waitstatus
2327 record_btrace_single_step_forward (struct thread_info
*tp
)
2329 struct btrace_insn_iterator
*replay
, end
, start
;
2330 struct btrace_thread_info
*btinfo
;
2332 btinfo
= &tp
->btrace
;
2333 replay
= btinfo
->replay
;
2335 /* We're done if we're not replaying. */
2337 return btrace_step_no_history ();
2339 /* Check if we're stepping a breakpoint. */
2340 if (record_btrace_replay_at_breakpoint (tp
))
2341 return btrace_step_stopped ();
2343 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2344 jump back to the instruction at which we started. */
2350 /* We will bail out here if we continue stepping after reaching the end
2351 of the execution history. */
2352 steps
= btrace_insn_next (replay
, 1);
2356 return btrace_step_no_history ();
2359 while (btrace_insn_get (replay
) == NULL
);
2361 /* Determine the end of the instruction trace. */
2362 btrace_insn_end (&end
, btinfo
);
2364 /* The execution trace contains (and ends with) the current instruction.
2365 This instruction has not been executed, yet, so the trace really ends
2366 one instruction earlier. */
2367 if (btrace_insn_cmp (replay
, &end
) == 0)
2368 return btrace_step_no_history ();
2370 return btrace_step_spurious ();
2373 /* Step one instruction in backward direction. */
2375 static struct target_waitstatus
2376 record_btrace_single_step_backward (struct thread_info
*tp
)
2378 struct btrace_insn_iterator
*replay
, start
;
2379 struct btrace_thread_info
*btinfo
;
2381 btinfo
= &tp
->btrace
;
2382 replay
= btinfo
->replay
;
2384 /* Start replaying if we're not already doing so. */
2386 replay
= record_btrace_start_replaying (tp
);
2388 /* If we can't step any further, we reached the end of the history.
2389 Skip gaps during replay. If we end up at a gap (at the beginning of
2390 the trace), jump back to the instruction at which we started. */
2396 steps
= btrace_insn_prev (replay
, 1);
2400 return btrace_step_no_history ();
2403 while (btrace_insn_get (replay
) == NULL
);
2405 /* Check if we're stepping a breakpoint.
2407 For reverse-stepping, this check is after the step. There is logic in
2408 infrun.c that handles reverse-stepping separately. See, for example,
2409 proceed and adjust_pc_after_break.
2411 This code assumes that for reverse-stepping, PC points to the last
2412 de-executed instruction, whereas for forward-stepping PC points to the
2413 next to-be-executed instruction. */
2414 if (record_btrace_replay_at_breakpoint (tp
))
2415 return btrace_step_stopped ();
2417 return btrace_step_spurious ();
2420 /* Step a single thread. */
2422 static struct target_waitstatus
2423 record_btrace_step_thread (struct thread_info
*tp
)
2425 struct btrace_thread_info
*btinfo
;
2426 struct target_waitstatus status
;
2427 enum btrace_thread_flag flags
;
2429 btinfo
= &tp
->btrace
;
2431 flags
= btinfo
->flags
& (BTHR_MOVE
| BTHR_STOP
);
2432 btinfo
->flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
2434 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp
),
2435 target_pid_to_str (tp
->ptid
).c_str (), flags
,
2436 btrace_thread_flag_to_str (flags
));
2438 /* We can't step without an execution history. */
2439 if ((flags
& BTHR_MOVE
) != 0 && btrace_is_empty (tp
))
2440 return btrace_step_no_history ();
2445 internal_error (__FILE__
, __LINE__
, _("invalid stepping type."));
2448 return btrace_step_stopped_on_request ();
2451 status
= record_btrace_single_step_forward (tp
);
2452 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2455 return btrace_step_stopped ();
2458 status
= record_btrace_single_step_backward (tp
);
2459 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2462 return btrace_step_stopped ();
2465 status
= record_btrace_single_step_forward (tp
);
2466 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2469 btinfo
->flags
|= flags
;
2470 return btrace_step_again ();
2473 status
= record_btrace_single_step_backward (tp
);
2474 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2477 btinfo
->flags
|= flags
;
2478 return btrace_step_again ();
2481 /* We keep threads moving at the end of their execution history. The wait
2482 method will stop the thread for whom the event is reported. */
2483 if (status
.kind
== TARGET_WAITKIND_NO_HISTORY
)
2484 btinfo
->flags
|= flags
;
2489 /* Announce further events if necessary. */
2492 record_btrace_maybe_mark_async_event
2493 (const std::vector
<thread_info
*> &moving
,
2494 const std::vector
<thread_info
*> &no_history
)
2496 bool more_moving
= !moving
.empty ();
2497 bool more_no_history
= !no_history
.empty ();;
2499 if (!more_moving
&& !more_no_history
)
2503 DEBUG ("movers pending");
2505 if (more_no_history
)
2506 DEBUG ("no-history pending");
2508 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
2511 /* The wait method of target record-btrace. */
2514 record_btrace_target::wait (ptid_t ptid
, struct target_waitstatus
*status
,
2517 std::vector
<thread_info
*> moving
;
2518 std::vector
<thread_info
*> no_history
;
2520 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid
).c_str (), options
);
2522 /* As long as we're not replaying, just forward the request. */
2523 if ((::execution_direction
!= EXEC_REVERSE
)
2524 && !record_is_replaying (minus_one_ptid
))
2526 return this->beneath ()->wait (ptid
, status
, options
);
2529 /* Keep a work list of moving threads. */
2530 for (thread_info
*tp
: all_non_exited_threads (ptid
))
2531 if ((tp
->btrace
.flags
& (BTHR_MOVE
| BTHR_STOP
)) != 0)
2532 moving
.push_back (tp
);
2534 if (moving
.empty ())
2536 *status
= btrace_step_no_resumed ();
2538 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid
).c_str (),
2539 target_waitstatus_to_string (status
).c_str ());
2544 /* Step moving threads one by one, one step each, until either one thread
2545 reports an event or we run out of threads to step.
2547 When stepping more than one thread, chances are that some threads reach
2548 the end of their execution history earlier than others. If we reported
2549 this immediately, all-stop on top of non-stop would stop all threads and
2550 resume the same threads next time. And we would report the same thread
2551 having reached the end of its execution history again.
2553 In the worst case, this would starve the other threads. But even if other
2554 threads would be allowed to make progress, this would result in far too
2555 many intermediate stops.
2557 We therefore delay the reporting of "no execution history" until we have
2558 nothing else to report. By this time, all threads should have moved to
2559 either the beginning or the end of their execution history. There will
2560 be a single user-visible stop. */
2561 struct thread_info
*eventing
= NULL
;
2562 while ((eventing
== NULL
) && !moving
.empty ())
2564 for (unsigned int ix
= 0; eventing
== NULL
&& ix
< moving
.size ();)
2566 thread_info
*tp
= moving
[ix
];
2568 *status
= record_btrace_step_thread (tp
);
2570 switch (status
->kind
)
2572 case TARGET_WAITKIND_IGNORE
:
2576 case TARGET_WAITKIND_NO_HISTORY
:
2577 no_history
.push_back (ordered_remove (moving
, ix
));
2581 eventing
= unordered_remove (moving
, ix
);
2587 if (eventing
== NULL
)
2589 /* We started with at least one moving thread. This thread must have
2590 either stopped or reached the end of its execution history.
2592 In the former case, EVENTING must not be NULL.
2593 In the latter case, NO_HISTORY must not be empty. */
2594 gdb_assert (!no_history
.empty ());
2596 /* We kept threads moving at the end of their execution history. Stop
2597 EVENTING now that we are going to report its stop. */
2598 eventing
= unordered_remove (no_history
, 0);
2599 eventing
->btrace
.flags
&= ~BTHR_MOVE
;
2601 *status
= btrace_step_no_history ();
2604 gdb_assert (eventing
!= NULL
);
2606 /* We kept threads replaying at the end of their execution history. Stop
2607 replaying EVENTING now that we are going to report its stop. */
2608 record_btrace_stop_replaying_at_end (eventing
);
2610 /* Stop all other threads. */
2611 if (!target_is_non_stop_p ())
2613 for (thread_info
*tp
: all_non_exited_threads ())
2614 record_btrace_cancel_resume (tp
);
2617 /* In async mode, we need to announce further events. */
2618 if (target_is_async_p ())
2619 record_btrace_maybe_mark_async_event (moving
, no_history
);
2621 /* Start record histories anew from the current position. */
2622 record_btrace_clear_histories (&eventing
->btrace
);
2624 /* We moved the replay position but did not update registers. */
2625 registers_changed_thread (eventing
);
2627 DEBUG ("wait ended by thread %s (%s): %s",
2628 print_thread_id (eventing
),
2629 target_pid_to_str (eventing
->ptid
).c_str (),
2630 target_waitstatus_to_string (status
).c_str ());
2632 return eventing
->ptid
;
2635 /* The stop method of target record-btrace. */
2638 record_btrace_target::stop (ptid_t ptid
)
2640 DEBUG ("stop %s", target_pid_to_str (ptid
).c_str ());
2642 /* As long as we're not replaying, just forward the request. */
2643 if ((::execution_direction
!= EXEC_REVERSE
)
2644 && !record_is_replaying (minus_one_ptid
))
2646 this->beneath ()->stop (ptid
);
2650 for (thread_info
*tp
: all_non_exited_threads (ptid
))
2652 tp
->btrace
.flags
&= ~BTHR_MOVE
;
2653 tp
->btrace
.flags
|= BTHR_STOP
;
2658 /* The can_execute_reverse method of target record-btrace. */
2661 record_btrace_target::can_execute_reverse ()
2666 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2669 record_btrace_target::stopped_by_sw_breakpoint ()
2671 if (record_is_replaying (minus_one_ptid
))
2673 struct thread_info
*tp
= inferior_thread ();
2675 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
;
2678 return this->beneath ()->stopped_by_sw_breakpoint ();
2681 /* The supports_stopped_by_sw_breakpoint method of target
2685 record_btrace_target::supports_stopped_by_sw_breakpoint ()
2687 if (record_is_replaying (minus_one_ptid
))
2690 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
2693 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2696 record_btrace_target::stopped_by_hw_breakpoint ()
2698 if (record_is_replaying (minus_one_ptid
))
2700 struct thread_info
*tp
= inferior_thread ();
2702 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
;
2705 return this->beneath ()->stopped_by_hw_breakpoint ();
2708 /* The supports_stopped_by_hw_breakpoint method of target
2712 record_btrace_target::supports_stopped_by_hw_breakpoint ()
2714 if (record_is_replaying (minus_one_ptid
))
2717 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
2720 /* The update_thread_list method of target record-btrace. */
2723 record_btrace_target::update_thread_list ()
2725 /* We don't add or remove threads during replay. */
2726 if (record_is_replaying (minus_one_ptid
))
2729 /* Forward the request. */
2730 this->beneath ()->update_thread_list ();
2733 /* The thread_alive method of target record-btrace. */
2736 record_btrace_target::thread_alive (ptid_t ptid
)
2738 /* We don't add or remove threads during replay. */
2739 if (record_is_replaying (minus_one_ptid
))
2742 /* Forward the request. */
2743 return this->beneath ()->thread_alive (ptid
);
2746 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2750 record_btrace_set_replay (struct thread_info
*tp
,
2751 const struct btrace_insn_iterator
*it
)
2753 struct btrace_thread_info
*btinfo
;
2755 btinfo
= &tp
->btrace
;
2758 record_btrace_stop_replaying (tp
);
2761 if (btinfo
->replay
== NULL
)
2762 record_btrace_start_replaying (tp
);
2763 else if (btrace_insn_cmp (btinfo
->replay
, it
) == 0)
2766 *btinfo
->replay
= *it
;
2767 registers_changed_thread (tp
);
2770 /* Start anew from the new replay position. */
2771 record_btrace_clear_histories (btinfo
);
2773 inferior_thread ()->suspend
.stop_pc
2774 = regcache_read_pc (get_current_regcache ());
2775 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
2778 /* The goto_record_begin method of target record-btrace. */
2781 record_btrace_target::goto_record_begin ()
2783 struct thread_info
*tp
;
2784 struct btrace_insn_iterator begin
;
2786 tp
= require_btrace_thread ();
2788 btrace_insn_begin (&begin
, &tp
->btrace
);
2790 /* Skip gaps at the beginning of the trace. */
2791 while (btrace_insn_get (&begin
) == NULL
)
2795 steps
= btrace_insn_next (&begin
, 1);
2797 error (_("No trace."));
2800 record_btrace_set_replay (tp
, &begin
);
2803 /* The goto_record_end method of target record-btrace. */
2806 record_btrace_target::goto_record_end ()
2808 struct thread_info
*tp
;
2810 tp
= require_btrace_thread ();
2812 record_btrace_set_replay (tp
, NULL
);
2815 /* The goto_record method of target record-btrace. */
2818 record_btrace_target::goto_record (ULONGEST insn
)
2820 struct thread_info
*tp
;
2821 struct btrace_insn_iterator it
;
2822 unsigned int number
;
2827 /* Check for wrap-arounds. */
2829 error (_("Instruction number out of range."));
2831 tp
= require_btrace_thread ();
2833 found
= btrace_find_insn_by_number (&it
, &tp
->btrace
, number
);
2835 /* Check if the instruction could not be found or is a gap. */
2836 if (found
== 0 || btrace_insn_get (&it
) == NULL
)
2837 error (_("No such instruction."));
2839 record_btrace_set_replay (tp
, &it
);
2842 /* The record_stop_replaying method of target record-btrace. */
2845 record_btrace_target::record_stop_replaying ()
2847 for (thread_info
*tp
: all_non_exited_threads ())
2848 record_btrace_stop_replaying (tp
);
2851 /* The execution_direction target method. */
2853 enum exec_direction_kind
2854 record_btrace_target::execution_direction ()
2856 return record_btrace_resume_exec_dir
;
2859 /* The prepare_to_generate_core target method. */
2862 record_btrace_target::prepare_to_generate_core ()
2864 record_btrace_generating_corefile
= 1;
2867 /* The done_generating_core target method. */
2870 record_btrace_target::done_generating_core ()
2872 record_btrace_generating_corefile
= 0;
2875 /* Start recording in BTS format. */
2878 cmd_record_btrace_bts_start (const char *args
, int from_tty
)
2880 if (args
!= NULL
&& *args
!= 0)
2881 error (_("Invalid argument."));
2883 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2887 execute_command ("target record-btrace", from_tty
);
2889 catch (const gdb_exception
&exception
)
2891 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2896 /* Start recording in Intel Processor Trace format. */
2899 cmd_record_btrace_pt_start (const char *args
, int from_tty
)
2901 if (args
!= NULL
&& *args
!= 0)
2902 error (_("Invalid argument."));
2904 record_btrace_conf
.format
= BTRACE_FORMAT_PT
;
2908 execute_command ("target record-btrace", from_tty
);
2910 catch (const gdb_exception
&exception
)
2912 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2917 /* Alias for "target record". */
2920 cmd_record_btrace_start (const char *args
, int from_tty
)
2922 if (args
!= NULL
&& *args
!= 0)
2923 error (_("Invalid argument."));
2925 record_btrace_conf
.format
= BTRACE_FORMAT_PT
;
2929 execute_command ("target record-btrace", from_tty
);
2931 catch (const gdb_exception
&exception
)
2933 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2937 execute_command ("target record-btrace", from_tty
);
2939 catch (const gdb_exception
&ex
)
2941 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2947 /* The "set record btrace" command. */
2950 cmd_set_record_btrace (const char *args
, int from_tty
)
2952 printf_unfiltered (_("\"set record btrace\" must be followed "
2953 "by an appropriate subcommand.\n"));
2954 help_list (set_record_btrace_cmdlist
, "set record btrace ",
2955 all_commands
, gdb_stdout
);
2958 /* The "show record btrace" command. */
2961 cmd_show_record_btrace (const char *args
, int from_tty
)
2963 cmd_show_list (show_record_btrace_cmdlist
, from_tty
, "");
2966 /* The "show record btrace replay-memory-access" command. */
2969 cmd_show_replay_memory_access (struct ui_file
*file
, int from_tty
,
2970 struct cmd_list_element
*c
, const char *value
)
2972 fprintf_filtered (gdb_stdout
, _("Replay memory access is %s.\n"),
2973 replay_memory_access
);
2976 /* The "set record btrace cpu none" command. */
2979 cmd_set_record_btrace_cpu_none (const char *args
, int from_tty
)
2981 if (args
!= nullptr && *args
!= 0)
2982 error (_("Trailing junk: '%s'."), args
);
2984 record_btrace_cpu_state
= CS_NONE
;
2987 /* The "set record btrace cpu auto" command. */
2990 cmd_set_record_btrace_cpu_auto (const char *args
, int from_tty
)
2992 if (args
!= nullptr && *args
!= 0)
2993 error (_("Trailing junk: '%s'."), args
);
2995 record_btrace_cpu_state
= CS_AUTO
;
2998 /* The "set record btrace cpu" command. */
3001 cmd_set_record_btrace_cpu (const char *args
, int from_tty
)
3003 if (args
== nullptr)
3006 /* We use a hard-coded vendor string for now. */
3007 unsigned int family
, model
, stepping
;
3008 int l1
, l2
, matches
= sscanf (args
, "intel: %u/%u%n/%u%n", &family
,
3009 &model
, &l1
, &stepping
, &l2
);
3012 if (strlen (args
) != l2
)
3013 error (_("Trailing junk: '%s'."), args
+ l2
);
3015 else if (matches
== 2)
3017 if (strlen (args
) != l1
)
3018 error (_("Trailing junk: '%s'."), args
+ l1
);
3023 error (_("Bad format. See \"help set record btrace cpu\"."));
3025 if (USHRT_MAX
< family
)
3026 error (_("Cpu family too big."));
3028 if (UCHAR_MAX
< model
)
3029 error (_("Cpu model too big."));
3031 if (UCHAR_MAX
< stepping
)
3032 error (_("Cpu stepping too big."));
3034 record_btrace_cpu
.vendor
= CV_INTEL
;
3035 record_btrace_cpu
.family
= family
;
3036 record_btrace_cpu
.model
= model
;
3037 record_btrace_cpu
.stepping
= stepping
;
3039 record_btrace_cpu_state
= CS_CPU
;
3042 /* The "show record btrace cpu" command. */
3045 cmd_show_record_btrace_cpu (const char *args
, int from_tty
)
3047 if (args
!= nullptr && *args
!= 0)
3048 error (_("Trailing junk: '%s'."), args
);
3050 switch (record_btrace_cpu_state
)
3053 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3057 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3061 switch (record_btrace_cpu
.vendor
)
3064 if (record_btrace_cpu
.stepping
== 0)
3065 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3066 record_btrace_cpu
.family
,
3067 record_btrace_cpu
.model
);
3069 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3070 record_btrace_cpu
.family
,
3071 record_btrace_cpu
.model
,
3072 record_btrace_cpu
.stepping
);
3077 error (_("Internal error: bad cpu state."));
3080 /* The "s record btrace bts" command. */
3083 cmd_set_record_btrace_bts (const char *args
, int from_tty
)
3085 printf_unfiltered (_("\"set record btrace bts\" must be followed "
3086 "by an appropriate subcommand.\n"));
3087 help_list (set_record_btrace_bts_cmdlist
, "set record btrace bts ",
3088 all_commands
, gdb_stdout
);
3091 /* The "show record btrace bts" command. */
3094 cmd_show_record_btrace_bts (const char *args
, int from_tty
)
3096 cmd_show_list (show_record_btrace_bts_cmdlist
, from_tty
, "");
3099 /* The "set record btrace pt" command. */
3102 cmd_set_record_btrace_pt (const char *args
, int from_tty
)
3104 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3105 "by an appropriate subcommand.\n"));
3106 help_list (set_record_btrace_pt_cmdlist
, "set record btrace pt ",
3107 all_commands
, gdb_stdout
);
3110 /* The "show record btrace pt" command. */
3113 cmd_show_record_btrace_pt (const char *args
, int from_tty
)
3115 cmd_show_list (show_record_btrace_pt_cmdlist
, from_tty
, "");
3118 /* The "record bts buffer-size" show value function. */
3121 show_record_bts_buffer_size_value (struct ui_file
*file
, int from_tty
,
3122 struct cmd_list_element
*c
,
3125 fprintf_filtered (file
, _("The record/replay bts buffer size is %s.\n"),
3129 /* The "record pt buffer-size" show value function. */
3132 show_record_pt_buffer_size_value (struct ui_file
*file
, int from_tty
,
3133 struct cmd_list_element
*c
,
3136 fprintf_filtered (file
, _("The record/replay pt buffer size is %s.\n"),
3140 /* Initialize btrace commands. */
3143 _initialize_record_btrace (void)
3145 add_prefix_cmd ("btrace", class_obscure
, cmd_record_btrace_start
,
3146 _("Start branch trace recording."), &record_btrace_cmdlist
,
3147 "record btrace ", 0, &record_cmdlist
);
3148 add_alias_cmd ("b", "btrace", class_obscure
, 1, &record_cmdlist
);
3150 add_cmd ("bts", class_obscure
, cmd_record_btrace_bts_start
,
3152 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3153 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3154 This format may not be available on all processors."),
3155 &record_btrace_cmdlist
);
3156 add_alias_cmd ("bts", "btrace bts", class_obscure
, 1, &record_cmdlist
);
3158 add_cmd ("pt", class_obscure
, cmd_record_btrace_pt_start
,
3160 Start branch trace recording in Intel Processor Trace format.\n\n\
3161 This format may not be available on all processors."),
3162 &record_btrace_cmdlist
);
3163 add_alias_cmd ("pt", "btrace pt", class_obscure
, 1, &record_cmdlist
);
3165 add_prefix_cmd ("btrace", class_support
, cmd_set_record_btrace
,
3166 _("Set record options."), &set_record_btrace_cmdlist
,
3167 "set record btrace ", 0, &set_record_cmdlist
);
3169 add_prefix_cmd ("btrace", class_support
, cmd_show_record_btrace
,
3170 _("Show record options."), &show_record_btrace_cmdlist
,
3171 "show record btrace ", 0, &show_record_cmdlist
);
3173 add_setshow_enum_cmd ("replay-memory-access", no_class
,
3174 replay_memory_access_types
, &replay_memory_access
, _("\
3175 Set what memory accesses are allowed during replay."), _("\
3176 Show what memory accesses are allowed during replay."),
3177 _("Default is READ-ONLY.\n\n\
3178 The btrace record target does not trace data.\n\
3179 The memory therefore corresponds to the live target and not \
3180 to the current replay position.\n\n\
3181 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3182 When READ-WRITE, allow accesses to read-only and read-write memory during \
3184 NULL
, cmd_show_replay_memory_access
,
3185 &set_record_btrace_cmdlist
,
3186 &show_record_btrace_cmdlist
);
3188 add_prefix_cmd ("cpu", class_support
, cmd_set_record_btrace_cpu
,
3190 Set the cpu to be used for trace decode.\n\n\
3191 The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3192 For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
3193 When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3194 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3195 When GDB does not support that cpu, this option can be used to enable\n\
3196 workarounds for a similar cpu that GDB supports.\n\n\
3197 When set to \"none\", errata workarounds are disabled."),
3198 &set_record_btrace_cpu_cmdlist
,
3199 "set record btrace cpu ", 1,
3200 &set_record_btrace_cmdlist
);
3202 add_cmd ("auto", class_support
, cmd_set_record_btrace_cpu_auto
, _("\
3203 Automatically determine the cpu to be used for trace decode."),
3204 &set_record_btrace_cpu_cmdlist
);
3206 add_cmd ("none", class_support
, cmd_set_record_btrace_cpu_none
, _("\
3207 Do not enable errata workarounds for trace decode."),
3208 &set_record_btrace_cpu_cmdlist
);
3210 add_cmd ("cpu", class_support
, cmd_show_record_btrace_cpu
, _("\
3211 Show the cpu to be used for trace decode."),
3212 &show_record_btrace_cmdlist
);
3214 add_prefix_cmd ("bts", class_support
, cmd_set_record_btrace_bts
,
3215 _("Set record btrace bts options."),
3216 &set_record_btrace_bts_cmdlist
,
3217 "set record btrace bts ", 0, &set_record_btrace_cmdlist
);
3219 add_prefix_cmd ("bts", class_support
, cmd_show_record_btrace_bts
,
3220 _("Show record btrace bts options."),
3221 &show_record_btrace_bts_cmdlist
,
3222 "show record btrace bts ", 0, &show_record_btrace_cmdlist
);
3224 add_setshow_uinteger_cmd ("buffer-size", no_class
,
3225 &record_btrace_conf
.bts
.size
,
3226 _("Set the record/replay bts buffer size."),
3227 _("Show the record/replay bts buffer size."), _("\
3228 When starting recording request a trace buffer of this size. \
3229 The actual buffer size may differ from the requested size. \
3230 Use \"info record\" to see the actual buffer size.\n\n\
3231 Bigger buffers allow longer recording but also take more time to process \
3232 the recorded execution trace.\n\n\
3233 The trace buffer size may not be changed while recording."), NULL
,
3234 show_record_bts_buffer_size_value
,
3235 &set_record_btrace_bts_cmdlist
,
3236 &show_record_btrace_bts_cmdlist
);
3238 add_prefix_cmd ("pt", class_support
, cmd_set_record_btrace_pt
,
3239 _("Set record btrace pt options."),
3240 &set_record_btrace_pt_cmdlist
,
3241 "set record btrace pt ", 0, &set_record_btrace_cmdlist
);
3243 add_prefix_cmd ("pt", class_support
, cmd_show_record_btrace_pt
,
3244 _("Show record btrace pt options."),
3245 &show_record_btrace_pt_cmdlist
,
3246 "show record btrace pt ", 0, &show_record_btrace_cmdlist
);
3248 add_setshow_uinteger_cmd ("buffer-size", no_class
,
3249 &record_btrace_conf
.pt
.size
,
3250 _("Set the record/replay pt buffer size."),
3251 _("Show the record/replay pt buffer size."), _("\
3252 Bigger buffers allow longer recording but also take more time to process \
3253 the recorded execution.\n\
3254 The actual buffer size may differ from the requested size. Use \"info record\" \
3255 to see the actual buffer size."), NULL
, show_record_pt_buffer_size_value
,
3256 &set_record_btrace_pt_cmdlist
,
3257 &show_record_btrace_pt_cmdlist
);
3259 add_target (record_btrace_target_info
, record_btrace_target_open
);
3261 bfcache
= htab_create_alloc (50, bfcache_hash
, bfcache_eq
, NULL
,
3264 record_btrace_conf
.bts
.size
= 64 * 1024;
3265 record_btrace_conf
.pt
.size
= 16 * 1024;