1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
29 #include "exceptions.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
39 #include "event-loop.h"
42 /* The target_ops of record-btrace. */
43 static struct target_ops record_btrace_ops
;
45 /* A new thread observer enabling branch tracing for the new thread. */
46 static struct observer
*record_btrace_thread_observer
;
48 /* Memory access types used in set/show record btrace replay-memory-access. */
49 static const char replay_memory_access_read_only
[] = "read-only";
50 static const char replay_memory_access_read_write
[] = "read-write";
51 static const char *const replay_memory_access_types
[] =
53 replay_memory_access_read_only
,
54 replay_memory_access_read_write
,
58 /* The currently allowed replay memory access type. */
59 static const char *replay_memory_access
= replay_memory_access_read_only
;
61 /* Command lists for "set/show record btrace". */
62 static struct cmd_list_element
*set_record_btrace_cmdlist
;
63 static struct cmd_list_element
*show_record_btrace_cmdlist
;
65 /* The execution direction of the last resume we got. See record-full.c. */
66 static enum exec_direction_kind record_btrace_resume_exec_dir
= EXEC_FORWARD
;
68 /* The async event handler for reverse/replay execution. */
69 static struct async_event_handler
*record_btrace_async_inferior_event_handler
;
71 /* Print a record-btrace debug message. Use do ... while (0) to avoid
72 ambiguities when used in if statements. */
74 #define DEBUG(msg, args...) \
77 if (record_debug != 0) \
78 fprintf_unfiltered (gdb_stdlog, \
79 "[record-btrace] " msg "\n", ##args); \
84 /* Update the branch trace for the current thread and return a pointer to its
87 Throws an error if there is no thread or no trace. This function never
90 static struct thread_info
*
91 require_btrace_thread (void)
93 struct thread_info
*tp
;
97 tp
= find_thread_ptid (inferior_ptid
);
99 error (_("No thread."));
103 if (btrace_is_empty (tp
))
104 error (_("No trace."));
109 /* Update the branch trace for the current thread and return a pointer to its
110 branch trace information struct.
112 Throws an error if there is no thread or no trace. This function never
115 static struct btrace_thread_info
*
116 require_btrace (void)
118 struct thread_info
*tp
;
120 tp
= require_btrace_thread ();
125 /* Enable branch tracing for one thread. Warn on errors. */
128 record_btrace_enable_warn (struct thread_info
*tp
)
130 volatile struct gdb_exception error
;
132 TRY_CATCH (error
, RETURN_MASK_ERROR
)
135 if (error
.message
!= NULL
)
136 warning ("%s", error
.message
);
139 /* Callback function to disable branch tracing for one thread. */
142 record_btrace_disable_callback (void *arg
)
144 struct thread_info
*tp
;
151 /* Enable automatic tracing of new threads. */
154 record_btrace_auto_enable (void)
156 DEBUG ("attach thread observer");
158 record_btrace_thread_observer
159 = observer_attach_new_thread (record_btrace_enable_warn
);
162 /* Disable automatic tracing of new threads. */
165 record_btrace_auto_disable (void)
167 /* The observer may have been detached, already. */
168 if (record_btrace_thread_observer
== NULL
)
171 DEBUG ("detach thread observer");
173 observer_detach_new_thread (record_btrace_thread_observer
);
174 record_btrace_thread_observer
= NULL
;
177 /* The record-btrace async event handler function. */
180 record_btrace_handle_async_inferior_event (gdb_client_data data
)
182 inferior_event_handler (INF_REG_EVENT
, NULL
);
185 /* The to_open method of target record-btrace. */
188 record_btrace_open (char *args
, int from_tty
)
190 struct cleanup
*disable_chain
;
191 struct thread_info
*tp
;
197 if (!target_has_execution
)
198 error (_("The program is not being run."));
200 if (!target_supports_btrace ())
201 error (_("Target does not support branch tracing."));
204 error (_("Record btrace can't debug inferior in non-stop mode."));
206 gdb_assert (record_btrace_thread_observer
== NULL
);
208 disable_chain
= make_cleanup (null_cleanup
, NULL
);
210 if (args
== NULL
|| *args
== 0 || number_is_in_list (args
, tp
->num
))
214 make_cleanup (record_btrace_disable_callback
, tp
);
217 record_btrace_auto_enable ();
219 push_target (&record_btrace_ops
);
221 record_btrace_async_inferior_event_handler
222 = create_async_event_handler (record_btrace_handle_async_inferior_event
,
225 observer_notify_record_changed (current_inferior (), 1);
227 discard_cleanups (disable_chain
);
230 /* The to_stop_recording method of target record-btrace. */
233 record_btrace_stop_recording (struct target_ops
*self
)
235 struct thread_info
*tp
;
237 DEBUG ("stop recording");
239 record_btrace_auto_disable ();
242 if (tp
->btrace
.target
!= NULL
)
246 /* The to_close method of target record-btrace. */
249 record_btrace_close (struct target_ops
*self
)
251 struct thread_info
*tp
;
253 if (record_btrace_async_inferior_event_handler
!= NULL
)
254 delete_async_event_handler (&record_btrace_async_inferior_event_handler
);
256 /* Make sure automatic recording gets disabled even if we did not stop
257 recording before closing the record-btrace target. */
258 record_btrace_auto_disable ();
260 /* We should have already stopped recording.
261 Tear down btrace in case we have not. */
263 btrace_teardown (tp
);
266 /* The to_info_record method of target record-btrace. */
269 record_btrace_info (struct target_ops
*self
)
271 struct btrace_thread_info
*btinfo
;
272 struct thread_info
*tp
;
273 unsigned int insns
, calls
;
277 tp
= find_thread_ptid (inferior_ptid
);
279 error (_("No thread."));
286 btinfo
= &tp
->btrace
;
288 if (!btrace_is_empty (tp
))
290 struct btrace_call_iterator call
;
291 struct btrace_insn_iterator insn
;
293 btrace_call_end (&call
, btinfo
);
294 btrace_call_prev (&call
, 1);
295 calls
= btrace_call_number (&call
);
297 btrace_insn_end (&insn
, btinfo
);
298 btrace_insn_prev (&insn
, 1);
299 insns
= btrace_insn_number (&insn
);
302 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
303 "%d (%s).\n"), insns
, calls
, tp
->num
,
304 target_pid_to_str (tp
->ptid
));
306 if (btrace_is_replaying (tp
))
307 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
308 btrace_insn_number (btinfo
->replay
));
311 /* Print an unsigned int. */
314 ui_out_field_uint (struct ui_out
*uiout
, const char *fld
, unsigned int val
)
316 ui_out_field_fmt (uiout
, fld
, "%u", val
);
319 /* Disassemble a section of the recorded instruction trace. */
322 btrace_insn_history (struct ui_out
*uiout
,
323 const struct btrace_insn_iterator
*begin
,
324 const struct btrace_insn_iterator
*end
, int flags
)
326 struct gdbarch
*gdbarch
;
327 struct btrace_insn_iterator it
;
329 DEBUG ("itrace (0x%x): [%u; %u)", flags
, btrace_insn_number (begin
),
330 btrace_insn_number (end
));
332 gdbarch
= target_gdbarch ();
334 for (it
= *begin
; btrace_insn_cmp (&it
, end
) != 0; btrace_insn_next (&it
, 1))
336 const struct btrace_insn
*insn
;
338 insn
= btrace_insn_get (&it
);
340 /* Print the instruction index. */
341 ui_out_field_uint (uiout
, "index", btrace_insn_number (&it
));
342 ui_out_text (uiout
, "\t");
344 /* Disassembly with '/m' flag may not produce the expected result.
346 gdb_disassembly (gdbarch
, uiout
, NULL
, flags
, 1, insn
->pc
, insn
->pc
+ 1);
350 /* The to_insn_history method of target record-btrace. */
353 record_btrace_insn_history (struct target_ops
*self
, int size
, int flags
)
355 struct btrace_thread_info
*btinfo
;
356 struct btrace_insn_history
*history
;
357 struct btrace_insn_iterator begin
, end
;
358 struct cleanup
*uiout_cleanup
;
359 struct ui_out
*uiout
;
360 unsigned int context
, covered
;
362 uiout
= current_uiout
;
363 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
365 context
= abs (size
);
367 error (_("Bad record instruction-history-size."));
369 btinfo
= require_btrace ();
370 history
= btinfo
->insn_history
;
373 struct btrace_insn_iterator
*replay
;
375 DEBUG ("insn-history (0x%x): %d", flags
, size
);
377 /* If we're replaying, we start at the replay position. Otherwise, we
378 start at the tail of the trace. */
379 replay
= btinfo
->replay
;
383 btrace_insn_end (&begin
, btinfo
);
385 /* We start from here and expand in the requested direction. Then we
386 expand in the other direction, as well, to fill up any remaining
391 /* We want the current position covered, as well. */
392 covered
= btrace_insn_next (&end
, 1);
393 covered
+= btrace_insn_prev (&begin
, context
- covered
);
394 covered
+= btrace_insn_next (&end
, context
- covered
);
398 covered
= btrace_insn_next (&end
, context
);
399 covered
+= btrace_insn_prev (&begin
, context
- covered
);
404 begin
= history
->begin
;
407 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags
, size
,
408 btrace_insn_number (&begin
), btrace_insn_number (&end
));
413 covered
= btrace_insn_prev (&begin
, context
);
418 covered
= btrace_insn_next (&end
, context
);
423 btrace_insn_history (uiout
, &begin
, &end
, flags
);
427 printf_unfiltered (_("At the start of the branch trace record.\n"));
429 printf_unfiltered (_("At the end of the branch trace record.\n"));
432 btrace_set_insn_history (btinfo
, &begin
, &end
);
433 do_cleanups (uiout_cleanup
);
436 /* The to_insn_history_range method of target record-btrace. */
439 record_btrace_insn_history_range (struct target_ops
*self
,
440 ULONGEST from
, ULONGEST to
, int flags
)
442 struct btrace_thread_info
*btinfo
;
443 struct btrace_insn_history
*history
;
444 struct btrace_insn_iterator begin
, end
;
445 struct cleanup
*uiout_cleanup
;
446 struct ui_out
*uiout
;
447 unsigned int low
, high
;
450 uiout
= current_uiout
;
451 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
456 DEBUG ("insn-history (0x%x): [%u; %u)", flags
, low
, high
);
458 /* Check for wrap-arounds. */
459 if (low
!= from
|| high
!= to
)
460 error (_("Bad range."));
463 error (_("Bad range."));
465 btinfo
= require_btrace ();
467 found
= btrace_find_insn_by_number (&begin
, btinfo
, low
);
469 error (_("Range out of bounds."));
471 found
= btrace_find_insn_by_number (&end
, btinfo
, high
);
474 /* Silently truncate the range. */
475 btrace_insn_end (&end
, btinfo
);
479 /* We want both begin and end to be inclusive. */
480 btrace_insn_next (&end
, 1);
483 btrace_insn_history (uiout
, &begin
, &end
, flags
);
484 btrace_set_insn_history (btinfo
, &begin
, &end
);
486 do_cleanups (uiout_cleanup
);
489 /* The to_insn_history_from method of target record-btrace. */
492 record_btrace_insn_history_from (struct target_ops
*self
,
493 ULONGEST from
, int size
, int flags
)
495 ULONGEST begin
, end
, context
;
497 context
= abs (size
);
499 error (_("Bad record instruction-history-size."));
508 begin
= from
- context
+ 1;
513 end
= from
+ context
- 1;
515 /* Check for wrap-around. */
520 record_btrace_insn_history_range (self
, begin
, end
, flags
);
523 /* Print the instruction number range for a function call history line. */
526 btrace_call_history_insn_range (struct ui_out
*uiout
,
527 const struct btrace_function
*bfun
)
529 unsigned int begin
, end
, size
;
531 size
= VEC_length (btrace_insn_s
, bfun
->insn
);
532 gdb_assert (size
> 0);
534 begin
= bfun
->insn_offset
;
535 end
= begin
+ size
- 1;
537 ui_out_field_uint (uiout
, "insn begin", begin
);
538 ui_out_text (uiout
, ",");
539 ui_out_field_uint (uiout
, "insn end", end
);
542 /* Print the source line information for a function call history line. */
545 btrace_call_history_src_line (struct ui_out
*uiout
,
546 const struct btrace_function
*bfun
)
555 ui_out_field_string (uiout
, "file",
556 symtab_to_filename_for_display (sym
->symtab
));
558 begin
= bfun
->lbegin
;
564 ui_out_text (uiout
, ":");
565 ui_out_field_int (uiout
, "min line", begin
);
570 ui_out_text (uiout
, ",");
571 ui_out_field_int (uiout
, "max line", end
);
574 /* Get the name of a branch trace function. */
577 btrace_get_bfun_name (const struct btrace_function
*bfun
)
579 struct minimal_symbol
*msym
;
589 return SYMBOL_PRINT_NAME (sym
);
590 else if (msym
!= NULL
)
591 return MSYMBOL_PRINT_NAME (msym
);
596 /* Disassemble a section of the recorded function trace. */
599 btrace_call_history (struct ui_out
*uiout
,
600 const struct btrace_thread_info
*btinfo
,
601 const struct btrace_call_iterator
*begin
,
602 const struct btrace_call_iterator
*end
,
603 enum record_print_flag flags
)
605 struct btrace_call_iterator it
;
607 DEBUG ("ftrace (0x%x): [%u; %u)", flags
, btrace_call_number (begin
),
608 btrace_call_number (end
));
610 for (it
= *begin
; btrace_call_cmp (&it
, end
) < 0; btrace_call_next (&it
, 1))
612 const struct btrace_function
*bfun
;
613 struct minimal_symbol
*msym
;
616 bfun
= btrace_call_get (&it
);
620 /* Print the function index. */
621 ui_out_field_uint (uiout
, "index", bfun
->number
);
622 ui_out_text (uiout
, "\t");
624 if ((flags
& RECORD_PRINT_INDENT_CALLS
) != 0)
626 int level
= bfun
->level
+ btinfo
->level
, i
;
628 for (i
= 0; i
< level
; ++i
)
629 ui_out_text (uiout
, " ");
633 ui_out_field_string (uiout
, "function", SYMBOL_PRINT_NAME (sym
));
634 else if (msym
!= NULL
)
635 ui_out_field_string (uiout
, "function", MSYMBOL_PRINT_NAME (msym
));
636 else if (!ui_out_is_mi_like_p (uiout
))
637 ui_out_field_string (uiout
, "function", "??");
639 if ((flags
& RECORD_PRINT_INSN_RANGE
) != 0)
641 ui_out_text (uiout
, _("\tinst "));
642 btrace_call_history_insn_range (uiout
, bfun
);
645 if ((flags
& RECORD_PRINT_SRC_LINE
) != 0)
647 ui_out_text (uiout
, _("\tat "));
648 btrace_call_history_src_line (uiout
, bfun
);
651 ui_out_text (uiout
, "\n");
655 /* The to_call_history method of target record-btrace. */
658 record_btrace_call_history (struct target_ops
*self
, int size
, int flags
)
660 struct btrace_thread_info
*btinfo
;
661 struct btrace_call_history
*history
;
662 struct btrace_call_iterator begin
, end
;
663 struct cleanup
*uiout_cleanup
;
664 struct ui_out
*uiout
;
665 unsigned int context
, covered
;
667 uiout
= current_uiout
;
668 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
670 context
= abs (size
);
672 error (_("Bad record function-call-history-size."));
674 btinfo
= require_btrace ();
675 history
= btinfo
->call_history
;
678 struct btrace_insn_iterator
*replay
;
680 DEBUG ("call-history (0x%x): %d", flags
, size
);
682 /* If we're replaying, we start at the replay position. Otherwise, we
683 start at the tail of the trace. */
684 replay
= btinfo
->replay
;
687 begin
.function
= replay
->function
;
688 begin
.btinfo
= btinfo
;
691 btrace_call_end (&begin
, btinfo
);
693 /* We start from here and expand in the requested direction. Then we
694 expand in the other direction, as well, to fill up any remaining
699 /* We want the current position covered, as well. */
700 covered
= btrace_call_next (&end
, 1);
701 covered
+= btrace_call_prev (&begin
, context
- covered
);
702 covered
+= btrace_call_next (&end
, context
- covered
);
706 covered
= btrace_call_next (&end
, context
);
707 covered
+= btrace_call_prev (&begin
, context
- covered
);
712 begin
= history
->begin
;
715 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags
, size
,
716 btrace_call_number (&begin
), btrace_call_number (&end
));
721 covered
= btrace_call_prev (&begin
, context
);
726 covered
= btrace_call_next (&end
, context
);
731 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
735 printf_unfiltered (_("At the start of the branch trace record.\n"));
737 printf_unfiltered (_("At the end of the branch trace record.\n"));
740 btrace_set_call_history (btinfo
, &begin
, &end
);
741 do_cleanups (uiout_cleanup
);
744 /* The to_call_history_range method of target record-btrace. */
747 record_btrace_call_history_range (struct target_ops
*self
,
748 ULONGEST from
, ULONGEST to
, int flags
)
750 struct btrace_thread_info
*btinfo
;
751 struct btrace_call_history
*history
;
752 struct btrace_call_iterator begin
, end
;
753 struct cleanup
*uiout_cleanup
;
754 struct ui_out
*uiout
;
755 unsigned int low
, high
;
758 uiout
= current_uiout
;
759 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
764 DEBUG ("call-history (0x%x): [%u; %u)", flags
, low
, high
);
766 /* Check for wrap-arounds. */
767 if (low
!= from
|| high
!= to
)
768 error (_("Bad range."));
771 error (_("Bad range."));
773 btinfo
= require_btrace ();
775 found
= btrace_find_call_by_number (&begin
, btinfo
, low
);
777 error (_("Range out of bounds."));
779 found
= btrace_find_call_by_number (&end
, btinfo
, high
);
782 /* Silently truncate the range. */
783 btrace_call_end (&end
, btinfo
);
787 /* We want both begin and end to be inclusive. */
788 btrace_call_next (&end
, 1);
791 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
792 btrace_set_call_history (btinfo
, &begin
, &end
);
794 do_cleanups (uiout_cleanup
);
797 /* The to_call_history_from method of target record-btrace. */
800 record_btrace_call_history_from (struct target_ops
*self
,
801 ULONGEST from
, int size
, int flags
)
803 ULONGEST begin
, end
, context
;
805 context
= abs (size
);
807 error (_("Bad record function-call-history-size."));
816 begin
= from
- context
+ 1;
821 end
= from
+ context
- 1;
823 /* Check for wrap-around. */
828 record_btrace_call_history_range (self
, begin
, end
, flags
);
831 /* The to_record_is_replaying method of target record-btrace. */
834 record_btrace_is_replaying (struct target_ops
*self
)
836 struct thread_info
*tp
;
839 if (btrace_is_replaying (tp
))
845 /* The to_xfer_partial method of target record-btrace. */
847 static enum target_xfer_status
848 record_btrace_xfer_partial (struct target_ops
*ops
, enum target_object object
,
849 const char *annex
, gdb_byte
*readbuf
,
850 const gdb_byte
*writebuf
, ULONGEST offset
,
851 ULONGEST len
, ULONGEST
*xfered_len
)
853 struct target_ops
*t
;
855 /* Filter out requests that don't make sense during replay. */
856 if (replay_memory_access
== replay_memory_access_read_only
857 && record_btrace_is_replaying (ops
))
861 case TARGET_OBJECT_MEMORY
:
863 struct target_section
*section
;
865 /* We do not allow writing memory in general. */
866 if (writebuf
!= NULL
)
869 return TARGET_XFER_UNAVAILABLE
;
872 /* We allow reading readonly memory. */
873 section
= target_section_by_addr (ops
, offset
);
876 /* Check if the section we found is readonly. */
877 if ((bfd_get_section_flags (section
->the_bfd_section
->owner
,
878 section
->the_bfd_section
)
879 & SEC_READONLY
) != 0)
881 /* Truncate the request to fit into this section. */
882 len
= min (len
, section
->endaddr
- offset
);
888 return TARGET_XFER_UNAVAILABLE
;
893 /* Forward the request. */
894 for (ops
= ops
->beneath
; ops
!= NULL
; ops
= ops
->beneath
)
895 if (ops
->to_xfer_partial
!= NULL
)
896 return ops
->to_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
897 offset
, len
, xfered_len
);
900 return TARGET_XFER_UNAVAILABLE
;
903 /* The to_insert_breakpoint method of target record-btrace. */
906 record_btrace_insert_breakpoint (struct target_ops
*ops
,
907 struct gdbarch
*gdbarch
,
908 struct bp_target_info
*bp_tgt
)
910 volatile struct gdb_exception except
;
914 /* Inserting breakpoints requires accessing memory. Allow it for the
915 duration of this function. */
916 old
= replay_memory_access
;
917 replay_memory_access
= replay_memory_access_read_write
;
920 TRY_CATCH (except
, RETURN_MASK_ALL
)
921 ret
= ops
->beneath
->to_insert_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
);
923 replay_memory_access
= old
;
925 if (except
.reason
< 0)
926 throw_exception (except
);
931 /* The to_remove_breakpoint method of target record-btrace. */
934 record_btrace_remove_breakpoint (struct target_ops
*ops
,
935 struct gdbarch
*gdbarch
,
936 struct bp_target_info
*bp_tgt
)
938 volatile struct gdb_exception except
;
942 /* Removing breakpoints requires accessing memory. Allow it for the
943 duration of this function. */
944 old
= replay_memory_access
;
945 replay_memory_access
= replay_memory_access_read_write
;
948 TRY_CATCH (except
, RETURN_MASK_ALL
)
949 ret
= ops
->beneath
->to_remove_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
);
951 replay_memory_access
= old
;
953 if (except
.reason
< 0)
954 throw_exception (except
);
959 /* The to_fetch_registers method of target record-btrace. */
962 record_btrace_fetch_registers (struct target_ops
*ops
,
963 struct regcache
*regcache
, int regno
)
965 struct btrace_insn_iterator
*replay
;
966 struct thread_info
*tp
;
968 tp
= find_thread_ptid (inferior_ptid
);
969 gdb_assert (tp
!= NULL
);
971 replay
= tp
->btrace
.replay
;
974 const struct btrace_insn
*insn
;
975 struct gdbarch
*gdbarch
;
978 gdbarch
= get_regcache_arch (regcache
);
979 pcreg
= gdbarch_pc_regnum (gdbarch
);
983 /* We can only provide the PC register. */
984 if (regno
>= 0 && regno
!= pcreg
)
987 insn
= btrace_insn_get (replay
);
988 gdb_assert (insn
!= NULL
);
990 regcache_raw_supply (regcache
, regno
, &insn
->pc
);
994 struct target_ops
*t
;
996 for (t
= ops
->beneath
; t
!= NULL
; t
= t
->beneath
)
997 if (t
->to_fetch_registers
!= NULL
)
999 t
->to_fetch_registers (t
, regcache
, regno
);
1005 /* The to_store_registers method of target record-btrace. */
1008 record_btrace_store_registers (struct target_ops
*ops
,
1009 struct regcache
*regcache
, int regno
)
1011 struct target_ops
*t
;
1013 if (record_btrace_is_replaying (ops
))
1014 error (_("This record target does not allow writing registers."));
1016 gdb_assert (may_write_registers
!= 0);
1018 for (t
= ops
->beneath
; t
!= NULL
; t
= t
->beneath
)
1019 if (t
->to_store_registers
!= NULL
)
1021 t
->to_store_registers (t
, regcache
, regno
);
1028 /* The to_prepare_to_store method of target record-btrace. */
1031 record_btrace_prepare_to_store (struct target_ops
*ops
,
1032 struct regcache
*regcache
)
1034 struct target_ops
*t
;
1036 if (record_btrace_is_replaying (ops
))
1039 for (t
= ops
->beneath
; t
!= NULL
; t
= t
->beneath
)
1040 if (t
->to_prepare_to_store
!= NULL
)
1042 t
->to_prepare_to_store (t
, regcache
);
1047 /* The branch trace frame cache. */
1049 struct btrace_frame_cache
1052 struct thread_info
*tp
;
1054 /* The frame info. */
1055 struct frame_info
*frame
;
1057 /* The branch trace function segment. */
1058 const struct btrace_function
*bfun
;
1061 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1063 static htab_t bfcache
;
1065 /* hash_f for htab_create_alloc of bfcache. */
1068 bfcache_hash (const void *arg
)
1070 const struct btrace_frame_cache
*cache
= arg
;
1072 return htab_hash_pointer (cache
->frame
);
1075 /* eq_f for htab_create_alloc of bfcache. */
1078 bfcache_eq (const void *arg1
, const void *arg2
)
1080 const struct btrace_frame_cache
*cache1
= arg1
;
1081 const struct btrace_frame_cache
*cache2
= arg2
;
1083 return cache1
->frame
== cache2
->frame
;
1086 /* Create a new btrace frame cache. */
1088 static struct btrace_frame_cache
*
1089 bfcache_new (struct frame_info
*frame
)
1091 struct btrace_frame_cache
*cache
;
1094 cache
= FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache
);
1095 cache
->frame
= frame
;
1097 slot
= htab_find_slot (bfcache
, cache
, INSERT
);
1098 gdb_assert (*slot
== NULL
);
1104 /* Extract the branch trace function from a branch trace frame. */
1106 static const struct btrace_function
*
1107 btrace_get_frame_function (struct frame_info
*frame
)
1109 const struct btrace_frame_cache
*cache
;
1110 const struct btrace_function
*bfun
;
1111 struct btrace_frame_cache pattern
;
1114 pattern
.frame
= frame
;
1116 slot
= htab_find_slot (bfcache
, &pattern
, NO_INSERT
);
1124 /* Implement stop_reason method for record_btrace_frame_unwind. */
1126 static enum unwind_stop_reason
1127 record_btrace_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1130 const struct btrace_frame_cache
*cache
;
1131 const struct btrace_function
*bfun
;
1133 cache
= *this_cache
;
1135 gdb_assert (bfun
!= NULL
);
1137 if (bfun
->up
== NULL
)
1138 return UNWIND_UNAVAILABLE
;
1140 return UNWIND_NO_REASON
;
1143 /* Implement this_id method for record_btrace_frame_unwind. */
1146 record_btrace_frame_this_id (struct frame_info
*this_frame
, void **this_cache
,
1147 struct frame_id
*this_id
)
1149 const struct btrace_frame_cache
*cache
;
1150 const struct btrace_function
*bfun
;
1151 CORE_ADDR code
, special
;
1153 cache
= *this_cache
;
1156 gdb_assert (bfun
!= NULL
);
1158 while (bfun
->segment
.prev
!= NULL
)
1159 bfun
= bfun
->segment
.prev
;
1161 code
= get_frame_func (this_frame
);
1162 special
= bfun
->number
;
1164 *this_id
= frame_id_build_unavailable_stack_special (code
, special
);
1166 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1167 btrace_get_bfun_name (cache
->bfun
),
1168 core_addr_to_string_nz (this_id
->code_addr
),
1169 core_addr_to_string_nz (this_id
->special_addr
));
1172 /* Implement prev_register method for record_btrace_frame_unwind. */
1174 static struct value
*
1175 record_btrace_frame_prev_register (struct frame_info
*this_frame
,
1179 const struct btrace_frame_cache
*cache
;
1180 const struct btrace_function
*bfun
, *caller
;
1181 const struct btrace_insn
*insn
;
1182 struct gdbarch
*gdbarch
;
1186 gdbarch
= get_frame_arch (this_frame
);
1187 pcreg
= gdbarch_pc_regnum (gdbarch
);
1188 if (pcreg
< 0 || regnum
!= pcreg
)
1189 throw_error (NOT_AVAILABLE_ERROR
,
1190 _("Registers are not available in btrace record history"));
1192 cache
= *this_cache
;
1194 gdb_assert (bfun
!= NULL
);
1198 throw_error (NOT_AVAILABLE_ERROR
,
1199 _("No caller in btrace record history"));
1201 if ((bfun
->flags
& BFUN_UP_LINKS_TO_RET
) != 0)
1203 insn
= VEC_index (btrace_insn_s
, caller
->insn
, 0);
1208 insn
= VEC_last (btrace_insn_s
, caller
->insn
);
1211 pc
+= gdb_insn_length (gdbarch
, pc
);
1214 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1215 btrace_get_bfun_name (bfun
), bfun
->level
,
1216 core_addr_to_string_nz (pc
));
1218 return frame_unwind_got_address (this_frame
, regnum
, pc
);
1221 /* Implement sniffer method for record_btrace_frame_unwind. */
1224 record_btrace_frame_sniffer (const struct frame_unwind
*self
,
1225 struct frame_info
*this_frame
,
1228 const struct btrace_function
*bfun
;
1229 struct btrace_frame_cache
*cache
;
1230 struct thread_info
*tp
;
1231 struct frame_info
*next
;
1233 /* THIS_FRAME does not contain a reference to its thread. */
1234 tp
= find_thread_ptid (inferior_ptid
);
1235 gdb_assert (tp
!= NULL
);
1238 next
= get_next_frame (this_frame
);
1241 const struct btrace_insn_iterator
*replay
;
1243 replay
= tp
->btrace
.replay
;
1245 bfun
= replay
->function
;
1249 const struct btrace_function
*callee
;
1251 callee
= btrace_get_frame_function (next
);
1252 if (callee
!= NULL
&& (callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1259 DEBUG ("[frame] sniffed frame for %s on level %d",
1260 btrace_get_bfun_name (bfun
), bfun
->level
);
1262 /* This is our frame. Initialize the frame cache. */
1263 cache
= bfcache_new (this_frame
);
1267 *this_cache
= cache
;
1271 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1274 record_btrace_tailcall_frame_sniffer (const struct frame_unwind
*self
,
1275 struct frame_info
*this_frame
,
1278 const struct btrace_function
*bfun
, *callee
;
1279 struct btrace_frame_cache
*cache
;
1280 struct frame_info
*next
;
1282 next
= get_next_frame (this_frame
);
1286 callee
= btrace_get_frame_function (next
);
1290 if ((callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1297 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1298 btrace_get_bfun_name (bfun
), bfun
->level
);
1300 /* This is our frame. Initialize the frame cache. */
1301 cache
= bfcache_new (this_frame
);
1302 cache
->tp
= find_thread_ptid (inferior_ptid
);
1305 *this_cache
= cache
;
1310 record_btrace_frame_dealloc_cache (struct frame_info
*self
, void *this_cache
)
1312 struct btrace_frame_cache
*cache
;
1317 slot
= htab_find_slot (bfcache
, cache
, NO_INSERT
);
1318 gdb_assert (slot
!= NULL
);
1320 htab_remove_elt (bfcache
, cache
);
1323 /* btrace recording does not store previous memory content, neither the stack
1324 frames content. Any unwinding would return errorneous results as the stack
1325 contents no longer matches the changed PC value restored from history.
1326 Therefore this unwinder reports any possibly unwound registers as
1329 const struct frame_unwind record_btrace_frame_unwind
=
1332 record_btrace_frame_unwind_stop_reason
,
1333 record_btrace_frame_this_id
,
1334 record_btrace_frame_prev_register
,
1336 record_btrace_frame_sniffer
,
1337 record_btrace_frame_dealloc_cache
1340 const struct frame_unwind record_btrace_tailcall_frame_unwind
=
1343 record_btrace_frame_unwind_stop_reason
,
1344 record_btrace_frame_this_id
,
1345 record_btrace_frame_prev_register
,
1347 record_btrace_tailcall_frame_sniffer
,
1348 record_btrace_frame_dealloc_cache
1351 /* Implement the to_get_unwinder method. */
1353 static const struct frame_unwind
*
1354 record_btrace_to_get_unwinder (struct target_ops
*self
)
1356 return &record_btrace_frame_unwind
;
1359 /* Implement the to_get_tailcall_unwinder method. */
1361 static const struct frame_unwind
*
1362 record_btrace_to_get_tailcall_unwinder (struct target_ops
*self
)
1364 return &record_btrace_tailcall_frame_unwind
;
1367 /* Indicate that TP should be resumed according to FLAG. */
1370 record_btrace_resume_thread (struct thread_info
*tp
,
1371 enum btrace_thread_flag flag
)
1373 struct btrace_thread_info
*btinfo
;
1375 DEBUG ("resuming %d (%s): %u", tp
->num
, target_pid_to_str (tp
->ptid
), flag
);
1377 btinfo
= &tp
->btrace
;
1379 if ((btinfo
->flags
& BTHR_MOVE
) != 0)
1380 error (_("Thread already moving."));
1382 /* Fetch the latest branch trace. */
1385 btinfo
->flags
|= flag
;
1388 /* Find the thread to resume given a PTID. */
1390 static struct thread_info
*
1391 record_btrace_find_resume_thread (ptid_t ptid
)
1393 struct thread_info
*tp
;
1395 /* When asked to resume everything, we pick the current thread. */
1396 if (ptid_equal (minus_one_ptid
, ptid
) || ptid_is_pid (ptid
))
1397 ptid
= inferior_ptid
;
1399 return find_thread_ptid (ptid
);
1402 /* Start replaying a thread. */
1404 static struct btrace_insn_iterator
*
1405 record_btrace_start_replaying (struct thread_info
*tp
)
1407 volatile struct gdb_exception except
;
1408 struct btrace_insn_iterator
*replay
;
1409 struct btrace_thread_info
*btinfo
;
1412 btinfo
= &tp
->btrace
;
1415 /* We can't start replaying without trace. */
1416 if (btinfo
->begin
== NULL
)
1419 /* Clear the executing flag to allow changes to the current frame.
1420 We are not actually running, yet. We just started a reverse execution
1421 command or a record goto command.
1422 For the latter, EXECUTING is false and this has no effect.
1423 For the former, EXECUTING is true and we're in to_wait, about to
1424 move the thread. Since we need to recompute the stack, we temporarily
1425 set EXECUTING to flase. */
1426 executing
= is_executing (tp
->ptid
);
1427 set_executing (tp
->ptid
, 0);
1429 /* GDB stores the current frame_id when stepping in order to detects steps
1431 Since frames are computed differently when we're replaying, we need to
1432 recompute those stored frames and fix them up so we can still detect
1433 subroutines after we started replaying. */
1434 TRY_CATCH (except
, RETURN_MASK_ALL
)
1436 struct frame_info
*frame
;
1437 struct frame_id frame_id
;
1438 int upd_step_frame_id
, upd_step_stack_frame_id
;
1440 /* The current frame without replaying - computed via normal unwind. */
1441 frame
= get_current_frame ();
1442 frame_id
= get_frame_id (frame
);
1444 /* Check if we need to update any stepping-related frame id's. */
1445 upd_step_frame_id
= frame_id_eq (frame_id
,
1446 tp
->control
.step_frame_id
);
1447 upd_step_stack_frame_id
= frame_id_eq (frame_id
,
1448 tp
->control
.step_stack_frame_id
);
1450 /* We start replaying at the end of the branch trace. This corresponds
1451 to the current instruction. */
1452 replay
= xmalloc (sizeof (*replay
));
1453 btrace_insn_end (replay
, btinfo
);
1455 /* We're not replaying, yet. */
1456 gdb_assert (btinfo
->replay
== NULL
);
1457 btinfo
->replay
= replay
;
1459 /* Make sure we're not using any stale registers. */
1460 registers_changed_ptid (tp
->ptid
);
1462 /* The current frame with replaying - computed via btrace unwind. */
1463 frame
= get_current_frame ();
1464 frame_id
= get_frame_id (frame
);
1466 /* Replace stepping related frames where necessary. */
1467 if (upd_step_frame_id
)
1468 tp
->control
.step_frame_id
= frame_id
;
1469 if (upd_step_stack_frame_id
)
1470 tp
->control
.step_stack_frame_id
= frame_id
;
1473 /* Restore the previous execution state. */
1474 set_executing (tp
->ptid
, executing
);
1476 if (except
.reason
< 0)
1478 xfree (btinfo
->replay
);
1479 btinfo
->replay
= NULL
;
1481 registers_changed_ptid (tp
->ptid
);
1483 throw_exception (except
);
1489 /* Stop replaying a thread. */
1492 record_btrace_stop_replaying (struct thread_info
*tp
)
1494 struct btrace_thread_info
*btinfo
;
1496 btinfo
= &tp
->btrace
;
1498 xfree (btinfo
->replay
);
1499 btinfo
->replay
= NULL
;
1501 /* Make sure we're not leaving any stale registers. */
1502 registers_changed_ptid (tp
->ptid
);
1505 /* The to_resume method of target record-btrace. */
1508 record_btrace_resume (struct target_ops
*ops
, ptid_t ptid
, int step
,
1509 enum gdb_signal signal
)
1511 struct thread_info
*tp
, *other
;
1512 enum btrace_thread_flag flag
;
1514 DEBUG ("resume %s: %s", target_pid_to_str (ptid
), step
? "step" : "cont");
1516 /* Store the execution direction of the last resume. */
1517 record_btrace_resume_exec_dir
= execution_direction
;
1519 tp
= record_btrace_find_resume_thread (ptid
);
1521 error (_("Cannot find thread to resume."));
1523 /* Stop replaying other threads if the thread to resume is not replaying. */
1524 if (!btrace_is_replaying (tp
) && execution_direction
!= EXEC_REVERSE
)
1526 record_btrace_stop_replaying (other
);
1528 /* As long as we're not replaying, just forward the request. */
1529 if (!record_btrace_is_replaying (ops
) && execution_direction
!= EXEC_REVERSE
)
1531 for (ops
= ops
->beneath
; ops
!= NULL
; ops
= ops
->beneath
)
1532 if (ops
->to_resume
!= NULL
)
1533 return ops
->to_resume (ops
, ptid
, step
, signal
);
1535 error (_("Cannot find target for stepping."));
1538 /* Compute the btrace thread flag for the requested move. */
1540 flag
= execution_direction
== EXEC_REVERSE
? BTHR_RCONT
: BTHR_CONT
;
1542 flag
= execution_direction
== EXEC_REVERSE
? BTHR_RSTEP
: BTHR_STEP
;
1544 /* At the moment, we only move a single thread. We could also move
1545 all threads in parallel by single-stepping each resumed thread
1546 until the first runs into an event.
1547 When we do that, we would want to continue all other threads.
1548 For now, just resume one thread to not confuse to_wait. */
1549 record_btrace_resume_thread (tp
, flag
);
1551 /* We just indicate the resume intent here. The actual stepping happens in
1552 record_btrace_wait below. */
1554 /* Async support. */
1555 if (target_can_async_p ())
1557 target_async (inferior_event_handler
, 0);
1558 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
1562 /* Find a thread to move. */
1564 static struct thread_info
*
1565 record_btrace_find_thread_to_move (ptid_t ptid
)
1567 struct thread_info
*tp
;
1569 /* First check the parameter thread. */
1570 tp
= find_thread_ptid (ptid
);
1571 if (tp
!= NULL
&& (tp
->btrace
.flags
& BTHR_MOVE
) != 0)
1574 /* Otherwise, find one other thread that has been resumed. */
1576 if ((tp
->btrace
.flags
& BTHR_MOVE
) != 0)
1582 /* Return a target_waitstatus indicating that we ran out of history. */
1584 static struct target_waitstatus
1585 btrace_step_no_history (void)
1587 struct target_waitstatus status
;
1589 status
.kind
= TARGET_WAITKIND_NO_HISTORY
;
1594 /* Return a target_waitstatus indicating that a step finished. */
1596 static struct target_waitstatus
1597 btrace_step_stopped (void)
1599 struct target_waitstatus status
;
1601 status
.kind
= TARGET_WAITKIND_STOPPED
;
1602 status
.value
.sig
= GDB_SIGNAL_TRAP
;
1607 /* Clear the record histories. */
1610 record_btrace_clear_histories (struct btrace_thread_info
*btinfo
)
1612 xfree (btinfo
->insn_history
);
1613 xfree (btinfo
->call_history
);
1615 btinfo
->insn_history
= NULL
;
1616 btinfo
->call_history
= NULL
;
1619 /* Step a single thread. */
1621 static struct target_waitstatus
1622 record_btrace_step_thread (struct thread_info
*tp
)
1624 struct btrace_insn_iterator
*replay
, end
;
1625 struct btrace_thread_info
*btinfo
;
1626 struct address_space
*aspace
;
1627 struct inferior
*inf
;
1628 enum btrace_thread_flag flags
;
1631 /* We can't step without an execution history. */
1632 if (btrace_is_empty (tp
))
1633 return btrace_step_no_history ();
1635 btinfo
= &tp
->btrace
;
1636 replay
= btinfo
->replay
;
1638 flags
= btinfo
->flags
& BTHR_MOVE
;
1639 btinfo
->flags
&= ~BTHR_MOVE
;
1641 DEBUG ("stepping %d (%s): %u", tp
->num
, target_pid_to_str (tp
->ptid
), flags
);
1646 internal_error (__FILE__
, __LINE__
, _("invalid stepping type."));
1649 /* We're done if we're not replaying. */
1651 return btrace_step_no_history ();
1653 /* We are always able to step at least once. */
1654 steps
= btrace_insn_next (replay
, 1);
1655 gdb_assert (steps
== 1);
1657 /* Determine the end of the instruction trace. */
1658 btrace_insn_end (&end
, btinfo
);
1660 /* We stop replaying if we reached the end of the trace. */
1661 if (btrace_insn_cmp (replay
, &end
) == 0)
1662 record_btrace_stop_replaying (tp
);
1664 return btrace_step_stopped ();
1667 /* Start replaying if we're not already doing so. */
1669 replay
= record_btrace_start_replaying (tp
);
1671 /* If we can't step any further, we reached the end of the history. */
1672 steps
= btrace_insn_prev (replay
, 1);
1674 return btrace_step_no_history ();
1676 return btrace_step_stopped ();
1679 /* We're done if we're not replaying. */
1681 return btrace_step_no_history ();
1683 inf
= find_inferior_pid (ptid_get_pid (tp
->ptid
));
1684 aspace
= inf
->aspace
;
1686 /* Determine the end of the instruction trace. */
1687 btrace_insn_end (&end
, btinfo
);
1691 const struct btrace_insn
*insn
;
1693 /* We are always able to step at least once. */
1694 steps
= btrace_insn_next (replay
, 1);
1695 gdb_assert (steps
== 1);
1697 /* We stop replaying if we reached the end of the trace. */
1698 if (btrace_insn_cmp (replay
, &end
) == 0)
1700 record_btrace_stop_replaying (tp
);
1701 return btrace_step_no_history ();
1704 insn
= btrace_insn_get (replay
);
1707 DEBUG ("stepping %d (%s) ... %s", tp
->num
,
1708 target_pid_to_str (tp
->ptid
),
1709 core_addr_to_string_nz (insn
->pc
));
1711 if (breakpoint_here_p (aspace
, insn
->pc
))
1712 return btrace_step_stopped ();
1716 /* Start replaying if we're not already doing so. */
1718 replay
= record_btrace_start_replaying (tp
);
1720 inf
= find_inferior_pid (ptid_get_pid (tp
->ptid
));
1721 aspace
= inf
->aspace
;
1725 const struct btrace_insn
*insn
;
1727 /* If we can't step any further, we're done. */
1728 steps
= btrace_insn_prev (replay
, 1);
1730 return btrace_step_no_history ();
1732 insn
= btrace_insn_get (replay
);
1735 DEBUG ("reverse-stepping %d (%s) ... %s", tp
->num
,
1736 target_pid_to_str (tp
->ptid
),
1737 core_addr_to_string_nz (insn
->pc
));
1739 if (breakpoint_here_p (aspace
, insn
->pc
))
1740 return btrace_step_stopped ();
1745 /* The to_wait method of target record-btrace. */
1748 record_btrace_wait (struct target_ops
*ops
, ptid_t ptid
,
1749 struct target_waitstatus
*status
, int options
)
1751 struct thread_info
*tp
, *other
;
1753 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid
), options
);
1755 /* As long as we're not replaying, just forward the request. */
1756 if (!record_btrace_is_replaying (ops
) && execution_direction
!= EXEC_REVERSE
)
1758 for (ops
= ops
->beneath
; ops
!= NULL
; ops
= ops
->beneath
)
1759 if (ops
->to_wait
!= NULL
)
1760 return ops
->to_wait (ops
, ptid
, status
, options
);
1762 error (_("Cannot find target for waiting."));
1765 /* Let's find a thread to move. */
1766 tp
= record_btrace_find_thread_to_move (ptid
);
1769 DEBUG ("wait %s: no thread", target_pid_to_str (ptid
));
1771 status
->kind
= TARGET_WAITKIND_IGNORE
;
1772 return minus_one_ptid
;
1775 /* We only move a single thread. We're not able to correlate threads. */
1776 *status
= record_btrace_step_thread (tp
);
1778 /* Stop all other threads. */
1781 other
->btrace
.flags
&= ~BTHR_MOVE
;
1783 /* Start record histories anew from the current position. */
1784 record_btrace_clear_histories (&tp
->btrace
);
1786 /* We moved the replay position but did not update registers. */
1787 registers_changed_ptid (tp
->ptid
);
1792 /* The to_can_execute_reverse method of target record-btrace. */
1795 record_btrace_can_execute_reverse (struct target_ops
*self
)
1800 /* The to_decr_pc_after_break method of target record-btrace. */
1803 record_btrace_decr_pc_after_break (struct target_ops
*ops
,
1804 struct gdbarch
*gdbarch
)
1806 /* When replaying, we do not actually execute the breakpoint instruction
1807 so there is no need to adjust the PC after hitting a breakpoint. */
1808 if (record_btrace_is_replaying (ops
))
1811 return ops
->beneath
->to_decr_pc_after_break (ops
->beneath
, gdbarch
);
1814 /* The to_find_new_threads method of target record-btrace. */
1817 record_btrace_find_new_threads (struct target_ops
*ops
)
1819 /* Don't expect new threads if we're replaying. */
1820 if (record_btrace_is_replaying (ops
))
1823 /* Forward the request. */
1824 for (ops
= ops
->beneath
; ops
!= NULL
; ops
= ops
->beneath
)
1825 if (ops
->to_find_new_threads
!= NULL
)
1827 ops
->to_find_new_threads (ops
);
1832 /* The to_thread_alive method of target record-btrace. */
1835 record_btrace_thread_alive (struct target_ops
*ops
, ptid_t ptid
)
1837 /* We don't add or remove threads during replay. */
1838 if (record_btrace_is_replaying (ops
))
1839 return find_thread_ptid (ptid
) != NULL
;
1841 /* Forward the request. */
1842 for (ops
= ops
->beneath
; ops
!= NULL
; ops
= ops
->beneath
)
1843 if (ops
->to_thread_alive
!= NULL
)
1844 return ops
->to_thread_alive (ops
, ptid
);
1849 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
1853 record_btrace_set_replay (struct thread_info
*tp
,
1854 const struct btrace_insn_iterator
*it
)
1856 struct btrace_thread_info
*btinfo
;
1858 btinfo
= &tp
->btrace
;
1860 if (it
== NULL
|| it
->function
== NULL
)
1861 record_btrace_stop_replaying (tp
);
1864 if (btinfo
->replay
== NULL
)
1865 record_btrace_start_replaying (tp
);
1866 else if (btrace_insn_cmp (btinfo
->replay
, it
) == 0)
1869 *btinfo
->replay
= *it
;
1870 registers_changed_ptid (tp
->ptid
);
1873 /* Start anew from the new replay position. */
1874 record_btrace_clear_histories (btinfo
);
1877 /* The to_goto_record_begin method of target record-btrace. */
1880 record_btrace_goto_begin (struct target_ops
*self
)
1882 struct thread_info
*tp
;
1883 struct btrace_insn_iterator begin
;
1885 tp
= require_btrace_thread ();
1887 btrace_insn_begin (&begin
, &tp
->btrace
);
1888 record_btrace_set_replay (tp
, &begin
);
1890 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
1893 /* The to_goto_record_end method of target record-btrace. */
1896 record_btrace_goto_end (struct target_ops
*ops
)
1898 struct thread_info
*tp
;
1900 tp
= require_btrace_thread ();
1902 record_btrace_set_replay (tp
, NULL
);
1904 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
1907 /* The to_goto_record method of target record-btrace. */
1910 record_btrace_goto (struct target_ops
*self
, ULONGEST insn
)
1912 struct thread_info
*tp
;
1913 struct btrace_insn_iterator it
;
1914 unsigned int number
;
1919 /* Check for wrap-arounds. */
1921 error (_("Instruction number out of range."));
1923 tp
= require_btrace_thread ();
1925 found
= btrace_find_insn_by_number (&it
, &tp
->btrace
, number
);
1927 error (_("No such instruction."));
1929 record_btrace_set_replay (tp
, &it
);
1931 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
1934 /* The to_execution_direction target method. */
1936 static enum exec_direction_kind
1937 record_btrace_execution_direction (struct target_ops
*self
)
1939 return record_btrace_resume_exec_dir
;
1942 /* Initialize the record-btrace target ops. */
1945 init_record_btrace_ops (void)
1947 struct target_ops
*ops
;
1949 ops
= &record_btrace_ops
;
1950 ops
->to_shortname
= "record-btrace";
1951 ops
->to_longname
= "Branch tracing target";
1952 ops
->to_doc
= "Collect control-flow trace and provide the execution history.";
1953 ops
->to_open
= record_btrace_open
;
1954 ops
->to_close
= record_btrace_close
;
1955 ops
->to_detach
= record_detach
;
1956 ops
->to_disconnect
= record_disconnect
;
1957 ops
->to_mourn_inferior
= record_mourn_inferior
;
1958 ops
->to_kill
= record_kill
;
1959 ops
->to_stop_recording
= record_btrace_stop_recording
;
1960 ops
->to_info_record
= record_btrace_info
;
1961 ops
->to_insn_history
= record_btrace_insn_history
;
1962 ops
->to_insn_history_from
= record_btrace_insn_history_from
;
1963 ops
->to_insn_history_range
= record_btrace_insn_history_range
;
1964 ops
->to_call_history
= record_btrace_call_history
;
1965 ops
->to_call_history_from
= record_btrace_call_history_from
;
1966 ops
->to_call_history_range
= record_btrace_call_history_range
;
1967 ops
->to_record_is_replaying
= record_btrace_is_replaying
;
1968 ops
->to_xfer_partial
= record_btrace_xfer_partial
;
1969 ops
->to_remove_breakpoint
= record_btrace_remove_breakpoint
;
1970 ops
->to_insert_breakpoint
= record_btrace_insert_breakpoint
;
1971 ops
->to_fetch_registers
= record_btrace_fetch_registers
;
1972 ops
->to_store_registers
= record_btrace_store_registers
;
1973 ops
->to_prepare_to_store
= record_btrace_prepare_to_store
;
1974 ops
->to_get_unwinder
= &record_btrace_to_get_unwinder
;
1975 ops
->to_get_tailcall_unwinder
= &record_btrace_to_get_tailcall_unwinder
;
1976 ops
->to_resume
= record_btrace_resume
;
1977 ops
->to_wait
= record_btrace_wait
;
1978 ops
->to_find_new_threads
= record_btrace_find_new_threads
;
1979 ops
->to_thread_alive
= record_btrace_thread_alive
;
1980 ops
->to_goto_record_begin
= record_btrace_goto_begin
;
1981 ops
->to_goto_record_end
= record_btrace_goto_end
;
1982 ops
->to_goto_record
= record_btrace_goto
;
1983 ops
->to_can_execute_reverse
= record_btrace_can_execute_reverse
;
1984 ops
->to_decr_pc_after_break
= record_btrace_decr_pc_after_break
;
1985 ops
->to_execution_direction
= record_btrace_execution_direction
;
1986 ops
->to_stratum
= record_stratum
;
1987 ops
->to_magic
= OPS_MAGIC
;
1990 /* Alias for "target record". */
1993 cmd_record_btrace_start (char *args
, int from_tty
)
1995 if (args
!= NULL
&& *args
!= 0)
1996 error (_("Invalid argument."));
1998 execute_command ("target record-btrace", from_tty
);
2001 /* The "set record btrace" command. */
2004 cmd_set_record_btrace (char *args
, int from_tty
)
2006 cmd_show_list (set_record_btrace_cmdlist
, from_tty
, "");
2009 /* The "show record btrace" command. */
2012 cmd_show_record_btrace (char *args
, int from_tty
)
2014 cmd_show_list (show_record_btrace_cmdlist
, from_tty
, "");
2017 /* The "show record btrace replay-memory-access" command. */
2020 cmd_show_replay_memory_access (struct ui_file
*file
, int from_tty
,
2021 struct cmd_list_element
*c
, const char *value
)
2023 fprintf_filtered (gdb_stdout
, _("Replay memory access is %s.\n"),
2024 replay_memory_access
);
2027 void _initialize_record_btrace (void);
2029 /* Initialize btrace commands. */
2032 _initialize_record_btrace (void)
2034 add_cmd ("btrace", class_obscure
, cmd_record_btrace_start
,
2035 _("Start branch trace recording."),
2037 add_alias_cmd ("b", "btrace", class_obscure
, 1, &record_cmdlist
);
2039 add_prefix_cmd ("btrace", class_support
, cmd_set_record_btrace
,
2040 _("Set record options"), &set_record_btrace_cmdlist
,
2041 "set record btrace ", 0, &set_record_cmdlist
);
2043 add_prefix_cmd ("btrace", class_support
, cmd_show_record_btrace
,
2044 _("Show record options"), &show_record_btrace_cmdlist
,
2045 "show record btrace ", 0, &show_record_cmdlist
);
2047 add_setshow_enum_cmd ("replay-memory-access", no_class
,
2048 replay_memory_access_types
, &replay_memory_access
, _("\
2049 Set what memory accesses are allowed during replay."), _("\
2050 Show what memory accesses are allowed during replay."),
2051 _("Default is READ-ONLY.\n\n\
2052 The btrace record target does not trace data.\n\
2053 The memory therefore corresponds to the live target and not \
2054 to the current replay position.\n\n\
2055 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2056 When READ-WRITE, allow accesses to read-only and read-write memory during \
2058 NULL
, cmd_show_replay_memory_access
,
2059 &set_record_btrace_cmdlist
,
2060 &show_record_btrace_cmdlist
);
2062 init_record_btrace_ops ();
2063 add_target (&record_btrace_ops
);
2065 bfcache
= htab_create_alloc (50, bfcache_hash
, bfcache_eq
, NULL
,