1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
29 #include "cli/cli-utils.h"
33 #include "filenames.h"
35 #include "frame-unwind.h"
38 #include "event-loop.h"
42 /* The target_ops of record-btrace. */
43 static struct target_ops record_btrace_ops
;
45 /* A new thread observer enabling branch tracing for the new thread. */
46 static struct observer
*record_btrace_thread_observer
;
48 /* Memory access types used in set/show record btrace replay-memory-access. */
49 static const char replay_memory_access_read_only
[] = "read-only";
50 static const char replay_memory_access_read_write
[] = "read-write";
51 static const char *const replay_memory_access_types
[] =
53 replay_memory_access_read_only
,
54 replay_memory_access_read_write
,
58 /* The currently allowed replay memory access type. */
59 static const char *replay_memory_access
= replay_memory_access_read_only
;
61 /* Command lists for "set/show record btrace". */
62 static struct cmd_list_element
*set_record_btrace_cmdlist
;
63 static struct cmd_list_element
*show_record_btrace_cmdlist
;
65 /* The execution direction of the last resume we got. See record-full.c. */
66 static enum exec_direction_kind record_btrace_resume_exec_dir
= EXEC_FORWARD
;
68 /* The async event handler for reverse/replay execution. */
69 static struct async_event_handler
*record_btrace_async_inferior_event_handler
;
71 /* A flag indicating that we are currently generating a core file. */
72 static int record_btrace_generating_corefile
;
74 /* The current branch trace configuration. */
75 static struct btrace_config record_btrace_conf
;
77 /* Command list for "record btrace". */
78 static struct cmd_list_element
*record_btrace_cmdlist
;
80 /* Command lists for "set/show record btrace bts". */
81 static struct cmd_list_element
*set_record_btrace_bts_cmdlist
;
82 static struct cmd_list_element
*show_record_btrace_bts_cmdlist
;
84 /* Command lists for "set/show record btrace pt". */
85 static struct cmd_list_element
*set_record_btrace_pt_cmdlist
;
86 static struct cmd_list_element
*show_record_btrace_pt_cmdlist
;
88 /* Print a record-btrace debug message. Use do ... while (0) to avoid
89 ambiguities when used in if statements. */
91 #define DEBUG(msg, args...) \
94 if (record_debug != 0) \
95 fprintf_unfiltered (gdb_stdlog, \
96 "[record-btrace] " msg "\n", ##args); \
101 /* Update the branch trace for the current thread and return a pointer to its
104 Throws an error if there is no thread or no trace. This function never
107 static struct thread_info
*
108 require_btrace_thread (void)
110 struct thread_info
*tp
;
114 tp
= find_thread_ptid (inferior_ptid
);
116 error (_("No thread."));
120 if (btrace_is_empty (tp
))
121 error (_("No trace."));
126 /* Update the branch trace for the current thread and return a pointer to its
127 branch trace information struct.
129 Throws an error if there is no thread or no trace. This function never
132 static struct btrace_thread_info
*
133 require_btrace (void)
135 struct thread_info
*tp
;
137 tp
= require_btrace_thread ();
142 /* Enable branch tracing for one thread. Warn on errors. */
145 record_btrace_enable_warn (struct thread_info
*tp
)
149 btrace_enable (tp
, &record_btrace_conf
);
151 CATCH (error
, RETURN_MASK_ERROR
)
153 warning ("%s", error
.message
);
158 /* Callback function to disable branch tracing for one thread. */
161 record_btrace_disable_callback (void *arg
)
163 struct thread_info
*tp
= (struct thread_info
*) arg
;
168 /* Enable automatic tracing of new threads. */
171 record_btrace_auto_enable (void)
173 DEBUG ("attach thread observer");
175 record_btrace_thread_observer
176 = observer_attach_new_thread (record_btrace_enable_warn
);
179 /* Disable automatic tracing of new threads. */
182 record_btrace_auto_disable (void)
184 /* The observer may have been detached, already. */
185 if (record_btrace_thread_observer
== NULL
)
188 DEBUG ("detach thread observer");
190 observer_detach_new_thread (record_btrace_thread_observer
);
191 record_btrace_thread_observer
= NULL
;
194 /* The record-btrace async event handler function. */
197 record_btrace_handle_async_inferior_event (gdb_client_data data
)
199 inferior_event_handler (INF_REG_EVENT
, NULL
);
202 /* The to_open method of target record-btrace. */
205 record_btrace_open (const char *args
, int from_tty
)
207 struct cleanup
*disable_chain
;
208 struct thread_info
*tp
;
214 if (!target_has_execution
)
215 error (_("The program is not being run."));
217 gdb_assert (record_btrace_thread_observer
== NULL
);
219 disable_chain
= make_cleanup (null_cleanup
, NULL
);
220 ALL_NON_EXITED_THREADS (tp
)
221 if (args
== NULL
|| *args
== 0 || number_is_in_list (args
, tp
->num
))
223 btrace_enable (tp
, &record_btrace_conf
);
225 make_cleanup (record_btrace_disable_callback
, tp
);
228 record_btrace_auto_enable ();
230 push_target (&record_btrace_ops
);
232 record_btrace_async_inferior_event_handler
233 = create_async_event_handler (record_btrace_handle_async_inferior_event
,
235 record_btrace_generating_corefile
= 0;
237 observer_notify_record_changed (current_inferior (), 1);
239 discard_cleanups (disable_chain
);
242 /* The to_stop_recording method of target record-btrace. */
245 record_btrace_stop_recording (struct target_ops
*self
)
247 struct thread_info
*tp
;
249 DEBUG ("stop recording");
251 record_btrace_auto_disable ();
253 ALL_NON_EXITED_THREADS (tp
)
254 if (tp
->btrace
.target
!= NULL
)
258 /* The to_close method of target record-btrace. */
261 record_btrace_close (struct target_ops
*self
)
263 struct thread_info
*tp
;
265 if (record_btrace_async_inferior_event_handler
!= NULL
)
266 delete_async_event_handler (&record_btrace_async_inferior_event_handler
);
268 /* Make sure automatic recording gets disabled even if we did not stop
269 recording before closing the record-btrace target. */
270 record_btrace_auto_disable ();
272 /* We should have already stopped recording.
273 Tear down btrace in case we have not. */
274 ALL_NON_EXITED_THREADS (tp
)
275 btrace_teardown (tp
);
278 /* The to_async method of target record-btrace. */
281 record_btrace_async (struct target_ops
*ops
, int enable
)
284 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
286 clear_async_event_handler (record_btrace_async_inferior_event_handler
);
288 ops
->beneath
->to_async (ops
->beneath
, enable
);
291 /* Adjusts the size and returns a human readable size suffix. */
294 record_btrace_adjust_size (unsigned int *size
)
300 if ((sz
& ((1u << 30) - 1)) == 0)
305 else if ((sz
& ((1u << 20) - 1)) == 0)
310 else if ((sz
& ((1u << 10) - 1)) == 0)
319 /* Print a BTS configuration. */
322 record_btrace_print_bts_conf (const struct btrace_config_bts
*conf
)
330 suffix
= record_btrace_adjust_size (&size
);
331 printf_unfiltered (_("Buffer size: %u%s.\n"), size
, suffix
);
335 /* Print an Intel(R) Processor Trace configuration. */
338 record_btrace_print_pt_conf (const struct btrace_config_pt
*conf
)
346 suffix
= record_btrace_adjust_size (&size
);
347 printf_unfiltered (_("Buffer size: %u%s.\n"), size
, suffix
);
351 /* Print a branch tracing configuration. */
354 record_btrace_print_conf (const struct btrace_config
*conf
)
356 printf_unfiltered (_("Recording format: %s.\n"),
357 btrace_format_string (conf
->format
));
359 switch (conf
->format
)
361 case BTRACE_FORMAT_NONE
:
364 case BTRACE_FORMAT_BTS
:
365 record_btrace_print_bts_conf (&conf
->bts
);
368 case BTRACE_FORMAT_PT
:
369 record_btrace_print_pt_conf (&conf
->pt
);
373 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
376 /* The to_info_record method of target record-btrace. */
379 record_btrace_info (struct target_ops
*self
)
381 struct btrace_thread_info
*btinfo
;
382 const struct btrace_config
*conf
;
383 struct thread_info
*tp
;
384 unsigned int insns
, calls
, gaps
;
388 tp
= find_thread_ptid (inferior_ptid
);
390 error (_("No thread."));
392 btinfo
= &tp
->btrace
;
394 conf
= btrace_conf (btinfo
);
396 record_btrace_print_conf (conf
);
404 if (!btrace_is_empty (tp
))
406 struct btrace_call_iterator call
;
407 struct btrace_insn_iterator insn
;
409 btrace_call_end (&call
, btinfo
);
410 btrace_call_prev (&call
, 1);
411 calls
= btrace_call_number (&call
);
413 btrace_insn_end (&insn
, btinfo
);
415 insns
= btrace_insn_number (&insn
);
418 /* The last instruction does not really belong to the trace. */
425 /* Skip gaps at the end. */
428 steps
= btrace_insn_prev (&insn
, 1);
432 insns
= btrace_insn_number (&insn
);
437 gaps
= btinfo
->ngaps
;
440 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
441 "for thread %d (%s).\n"), insns
, calls
, gaps
,
442 tp
->num
, target_pid_to_str (tp
->ptid
));
444 if (btrace_is_replaying (tp
))
445 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
446 btrace_insn_number (btinfo
->replay
));
449 /* Print a decode error. */
452 btrace_ui_out_decode_error (struct ui_out
*uiout
, int errcode
,
453 enum btrace_format format
)
458 errstr
= _("unknown");
466 case BTRACE_FORMAT_BTS
:
472 case BDE_BTS_OVERFLOW
:
473 errstr
= _("instruction overflow");
476 case BDE_BTS_INSN_SIZE
:
477 errstr
= _("unknown instruction");
482 #if defined (HAVE_LIBIPT)
483 case BTRACE_FORMAT_PT
:
486 case BDE_PT_USER_QUIT
:
488 errstr
= _("trace decode cancelled");
491 case BDE_PT_DISABLED
:
493 errstr
= _("disabled");
496 case BDE_PT_OVERFLOW
:
498 errstr
= _("overflow");
503 errstr
= pt_errstr (pt_errcode (errcode
));
507 #endif /* defined (HAVE_LIBIPT) */
510 ui_out_text (uiout
, _("["));
513 ui_out_text (uiout
, _("decode error ("));
514 ui_out_field_int (uiout
, "errcode", errcode
);
515 ui_out_text (uiout
, _("): "));
517 ui_out_text (uiout
, errstr
);
518 ui_out_text (uiout
, _("]\n"));
521 /* Print an unsigned int. */
524 ui_out_field_uint (struct ui_out
*uiout
, const char *fld
, unsigned int val
)
526 ui_out_field_fmt (uiout
, fld
, "%u", val
);
529 /* Disassemble a section of the recorded instruction trace. */
532 btrace_insn_history (struct ui_out
*uiout
,
533 const struct btrace_thread_info
*btinfo
,
534 const struct btrace_insn_iterator
*begin
,
535 const struct btrace_insn_iterator
*end
, int flags
)
537 struct gdbarch
*gdbarch
;
538 struct btrace_insn_iterator it
;
540 DEBUG ("itrace (0x%x): [%u; %u)", flags
, btrace_insn_number (begin
),
541 btrace_insn_number (end
));
543 gdbarch
= target_gdbarch ();
545 for (it
= *begin
; btrace_insn_cmp (&it
, end
) != 0; btrace_insn_next (&it
, 1))
547 const struct btrace_insn
*insn
;
549 insn
= btrace_insn_get (&it
);
551 /* A NULL instruction indicates a gap in the trace. */
554 const struct btrace_config
*conf
;
556 conf
= btrace_conf (btinfo
);
558 /* We have trace so we must have a configuration. */
559 gdb_assert (conf
!= NULL
);
561 btrace_ui_out_decode_error (uiout
, it
.function
->errcode
,
568 /* We may add a speculation prefix later. We use the same space
569 that is used for the pc prefix. */
570 if ((flags
& DISASSEMBLY_OMIT_PC
) == 0)
571 strncpy (prefix
, pc_prefix (insn
->pc
), 3);
580 /* Print the instruction index. */
581 ui_out_field_uint (uiout
, "index", btrace_insn_number (&it
));
582 ui_out_text (uiout
, "\t");
584 /* Indicate speculative execution by a leading '?'. */
585 if ((insn
->flags
& BTRACE_INSN_FLAG_SPECULATIVE
) != 0)
588 /* Print the prefix; we tell gdb_disassembly below to omit it. */
589 ui_out_field_fmt (uiout
, "prefix", "%s", prefix
);
591 /* Disassembly with '/m' flag may not produce the expected result.
593 gdb_disassembly (gdbarch
, uiout
, NULL
, flags
| DISASSEMBLY_OMIT_PC
,
594 1, insn
->pc
, insn
->pc
+ 1);
599 /* The to_insn_history method of target record-btrace. */
602 record_btrace_insn_history (struct target_ops
*self
, int size
, int flags
)
604 struct btrace_thread_info
*btinfo
;
605 struct btrace_insn_history
*history
;
606 struct btrace_insn_iterator begin
, end
;
607 struct cleanup
*uiout_cleanup
;
608 struct ui_out
*uiout
;
609 unsigned int context
, covered
;
611 uiout
= current_uiout
;
612 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
614 context
= abs (size
);
616 error (_("Bad record instruction-history-size."));
618 btinfo
= require_btrace ();
619 history
= btinfo
->insn_history
;
622 struct btrace_insn_iterator
*replay
;
624 DEBUG ("insn-history (0x%x): %d", flags
, size
);
626 /* If we're replaying, we start at the replay position. Otherwise, we
627 start at the tail of the trace. */
628 replay
= btinfo
->replay
;
632 btrace_insn_end (&begin
, btinfo
);
634 /* We start from here and expand in the requested direction. Then we
635 expand in the other direction, as well, to fill up any remaining
640 /* We want the current position covered, as well. */
641 covered
= btrace_insn_next (&end
, 1);
642 covered
+= btrace_insn_prev (&begin
, context
- covered
);
643 covered
+= btrace_insn_next (&end
, context
- covered
);
647 covered
= btrace_insn_next (&end
, context
);
648 covered
+= btrace_insn_prev (&begin
, context
- covered
);
653 begin
= history
->begin
;
656 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags
, size
,
657 btrace_insn_number (&begin
), btrace_insn_number (&end
));
662 covered
= btrace_insn_prev (&begin
, context
);
667 covered
= btrace_insn_next (&end
, context
);
672 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
676 printf_unfiltered (_("At the start of the branch trace record.\n"));
678 printf_unfiltered (_("At the end of the branch trace record.\n"));
681 btrace_set_insn_history (btinfo
, &begin
, &end
);
682 do_cleanups (uiout_cleanup
);
685 /* The to_insn_history_range method of target record-btrace. */
688 record_btrace_insn_history_range (struct target_ops
*self
,
689 ULONGEST from
, ULONGEST to
, int flags
)
691 struct btrace_thread_info
*btinfo
;
692 struct btrace_insn_history
*history
;
693 struct btrace_insn_iterator begin
, end
;
694 struct cleanup
*uiout_cleanup
;
695 struct ui_out
*uiout
;
696 unsigned int low
, high
;
699 uiout
= current_uiout
;
700 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
705 DEBUG ("insn-history (0x%x): [%u; %u)", flags
, low
, high
);
707 /* Check for wrap-arounds. */
708 if (low
!= from
|| high
!= to
)
709 error (_("Bad range."));
712 error (_("Bad range."));
714 btinfo
= require_btrace ();
716 found
= btrace_find_insn_by_number (&begin
, btinfo
, low
);
718 error (_("Range out of bounds."));
720 found
= btrace_find_insn_by_number (&end
, btinfo
, high
);
723 /* Silently truncate the range. */
724 btrace_insn_end (&end
, btinfo
);
728 /* We want both begin and end to be inclusive. */
729 btrace_insn_next (&end
, 1);
732 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
733 btrace_set_insn_history (btinfo
, &begin
, &end
);
735 do_cleanups (uiout_cleanup
);
738 /* The to_insn_history_from method of target record-btrace. */
741 record_btrace_insn_history_from (struct target_ops
*self
,
742 ULONGEST from
, int size
, int flags
)
744 ULONGEST begin
, end
, context
;
746 context
= abs (size
);
748 error (_("Bad record instruction-history-size."));
757 begin
= from
- context
+ 1;
762 end
= from
+ context
- 1;
764 /* Check for wrap-around. */
769 record_btrace_insn_history_range (self
, begin
, end
, flags
);
772 /* Print the instruction number range for a function call history line. */
775 btrace_call_history_insn_range (struct ui_out
*uiout
,
776 const struct btrace_function
*bfun
)
778 unsigned int begin
, end
, size
;
780 size
= VEC_length (btrace_insn_s
, bfun
->insn
);
781 gdb_assert (size
> 0);
783 begin
= bfun
->insn_offset
;
784 end
= begin
+ size
- 1;
786 ui_out_field_uint (uiout
, "insn begin", begin
);
787 ui_out_text (uiout
, ",");
788 ui_out_field_uint (uiout
, "insn end", end
);
791 /* Compute the lowest and highest source line for the instructions in BFUN
792 and return them in PBEGIN and PEND.
793 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
794 result from inlining or macro expansion. */
797 btrace_compute_src_line_range (const struct btrace_function
*bfun
,
798 int *pbegin
, int *pend
)
800 struct btrace_insn
*insn
;
801 struct symtab
*symtab
;
813 symtab
= symbol_symtab (sym
);
815 for (idx
= 0; VEC_iterate (btrace_insn_s
, bfun
->insn
, idx
, insn
); ++idx
)
817 struct symtab_and_line sal
;
819 sal
= find_pc_line (insn
->pc
, 0);
820 if (sal
.symtab
!= symtab
|| sal
.line
== 0)
823 begin
= min (begin
, sal
.line
);
824 end
= max (end
, sal
.line
);
832 /* Print the source line information for a function call history line. */
835 btrace_call_history_src_line (struct ui_out
*uiout
,
836 const struct btrace_function
*bfun
)
845 ui_out_field_string (uiout
, "file",
846 symtab_to_filename_for_display (symbol_symtab (sym
)));
848 btrace_compute_src_line_range (bfun
, &begin
, &end
);
852 ui_out_text (uiout
, ":");
853 ui_out_field_int (uiout
, "min line", begin
);
858 ui_out_text (uiout
, ",");
859 ui_out_field_int (uiout
, "max line", end
);
862 /* Get the name of a branch trace function. */
865 btrace_get_bfun_name (const struct btrace_function
*bfun
)
867 struct minimal_symbol
*msym
;
877 return SYMBOL_PRINT_NAME (sym
);
878 else if (msym
!= NULL
)
879 return MSYMBOL_PRINT_NAME (msym
);
884 /* Disassemble a section of the recorded function trace. */
887 btrace_call_history (struct ui_out
*uiout
,
888 const struct btrace_thread_info
*btinfo
,
889 const struct btrace_call_iterator
*begin
,
890 const struct btrace_call_iterator
*end
,
891 enum record_print_flag flags
)
893 struct btrace_call_iterator it
;
895 DEBUG ("ftrace (0x%x): [%u; %u)", flags
, btrace_call_number (begin
),
896 btrace_call_number (end
));
898 for (it
= *begin
; btrace_call_cmp (&it
, end
) < 0; btrace_call_next (&it
, 1))
900 const struct btrace_function
*bfun
;
901 struct minimal_symbol
*msym
;
904 bfun
= btrace_call_get (&it
);
908 /* Print the function index. */
909 ui_out_field_uint (uiout
, "index", bfun
->number
);
910 ui_out_text (uiout
, "\t");
912 /* Indicate gaps in the trace. */
913 if (bfun
->errcode
!= 0)
915 const struct btrace_config
*conf
;
917 conf
= btrace_conf (btinfo
);
919 /* We have trace so we must have a configuration. */
920 gdb_assert (conf
!= NULL
);
922 btrace_ui_out_decode_error (uiout
, bfun
->errcode
, conf
->format
);
927 if ((flags
& RECORD_PRINT_INDENT_CALLS
) != 0)
929 int level
= bfun
->level
+ btinfo
->level
, i
;
931 for (i
= 0; i
< level
; ++i
)
932 ui_out_text (uiout
, " ");
936 ui_out_field_string (uiout
, "function", SYMBOL_PRINT_NAME (sym
));
937 else if (msym
!= NULL
)
938 ui_out_field_string (uiout
, "function", MSYMBOL_PRINT_NAME (msym
));
939 else if (!ui_out_is_mi_like_p (uiout
))
940 ui_out_field_string (uiout
, "function", "??");
942 if ((flags
& RECORD_PRINT_INSN_RANGE
) != 0)
944 ui_out_text (uiout
, _("\tinst "));
945 btrace_call_history_insn_range (uiout
, bfun
);
948 if ((flags
& RECORD_PRINT_SRC_LINE
) != 0)
950 ui_out_text (uiout
, _("\tat "));
951 btrace_call_history_src_line (uiout
, bfun
);
954 ui_out_text (uiout
, "\n");
958 /* The to_call_history method of target record-btrace. */
961 record_btrace_call_history (struct target_ops
*self
, int size
, int flags
)
963 struct btrace_thread_info
*btinfo
;
964 struct btrace_call_history
*history
;
965 struct btrace_call_iterator begin
, end
;
966 struct cleanup
*uiout_cleanup
;
967 struct ui_out
*uiout
;
968 unsigned int context
, covered
;
970 uiout
= current_uiout
;
971 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
973 context
= abs (size
);
975 error (_("Bad record function-call-history-size."));
977 btinfo
= require_btrace ();
978 history
= btinfo
->call_history
;
981 struct btrace_insn_iterator
*replay
;
983 DEBUG ("call-history (0x%x): %d", flags
, size
);
985 /* If we're replaying, we start at the replay position. Otherwise, we
986 start at the tail of the trace. */
987 replay
= btinfo
->replay
;
990 begin
.function
= replay
->function
;
991 begin
.btinfo
= btinfo
;
994 btrace_call_end (&begin
, btinfo
);
996 /* We start from here and expand in the requested direction. Then we
997 expand in the other direction, as well, to fill up any remaining
1002 /* We want the current position covered, as well. */
1003 covered
= btrace_call_next (&end
, 1);
1004 covered
+= btrace_call_prev (&begin
, context
- covered
);
1005 covered
+= btrace_call_next (&end
, context
- covered
);
1009 covered
= btrace_call_next (&end
, context
);
1010 covered
+= btrace_call_prev (&begin
, context
- covered
);
1015 begin
= history
->begin
;
1018 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags
, size
,
1019 btrace_call_number (&begin
), btrace_call_number (&end
));
1024 covered
= btrace_call_prev (&begin
, context
);
1029 covered
= btrace_call_next (&end
, context
);
1034 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
1038 printf_unfiltered (_("At the start of the branch trace record.\n"));
1040 printf_unfiltered (_("At the end of the branch trace record.\n"));
1043 btrace_set_call_history (btinfo
, &begin
, &end
);
1044 do_cleanups (uiout_cleanup
);
1047 /* The to_call_history_range method of target record-btrace. */
1050 record_btrace_call_history_range (struct target_ops
*self
,
1051 ULONGEST from
, ULONGEST to
, int flags
)
1053 struct btrace_thread_info
*btinfo
;
1054 struct btrace_call_history
*history
;
1055 struct btrace_call_iterator begin
, end
;
1056 struct cleanup
*uiout_cleanup
;
1057 struct ui_out
*uiout
;
1058 unsigned int low
, high
;
1061 uiout
= current_uiout
;
1062 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
1067 DEBUG ("call-history (0x%x): [%u; %u)", flags
, low
, high
);
1069 /* Check for wrap-arounds. */
1070 if (low
!= from
|| high
!= to
)
1071 error (_("Bad range."));
1074 error (_("Bad range."));
1076 btinfo
= require_btrace ();
1078 found
= btrace_find_call_by_number (&begin
, btinfo
, low
);
1080 error (_("Range out of bounds."));
1082 found
= btrace_find_call_by_number (&end
, btinfo
, high
);
1085 /* Silently truncate the range. */
1086 btrace_call_end (&end
, btinfo
);
1090 /* We want both begin and end to be inclusive. */
1091 btrace_call_next (&end
, 1);
1094 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
1095 btrace_set_call_history (btinfo
, &begin
, &end
);
1097 do_cleanups (uiout_cleanup
);
1100 /* The to_call_history_from method of target record-btrace. */
1103 record_btrace_call_history_from (struct target_ops
*self
,
1104 ULONGEST from
, int size
, int flags
)
1106 ULONGEST begin
, end
, context
;
1108 context
= abs (size
);
1110 error (_("Bad record function-call-history-size."));
1119 begin
= from
- context
+ 1;
1124 end
= from
+ context
- 1;
1126 /* Check for wrap-around. */
1131 record_btrace_call_history_range (self
, begin
, end
, flags
);
1134 /* The to_record_is_replaying method of target record-btrace. */
1137 record_btrace_is_replaying (struct target_ops
*self
, ptid_t ptid
)
1139 struct thread_info
*tp
;
1141 ALL_NON_EXITED_THREADS (tp
)
1142 if (ptid_match (tp
->ptid
, ptid
) && btrace_is_replaying (tp
))
1148 /* The to_record_will_replay method of target record-btrace. */
1151 record_btrace_will_replay (struct target_ops
*self
, ptid_t ptid
, int dir
)
1153 return dir
== EXEC_REVERSE
|| record_btrace_is_replaying (self
, ptid
);
1156 /* The to_xfer_partial method of target record-btrace. */
1158 static enum target_xfer_status
1159 record_btrace_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1160 const char *annex
, gdb_byte
*readbuf
,
1161 const gdb_byte
*writebuf
, ULONGEST offset
,
1162 ULONGEST len
, ULONGEST
*xfered_len
)
1164 struct target_ops
*t
;
1166 /* Filter out requests that don't make sense during replay. */
1167 if (replay_memory_access
== replay_memory_access_read_only
1168 && !record_btrace_generating_corefile
1169 && record_btrace_is_replaying (ops
, inferior_ptid
))
1173 case TARGET_OBJECT_MEMORY
:
1175 struct target_section
*section
;
1177 /* We do not allow writing memory in general. */
1178 if (writebuf
!= NULL
)
1181 return TARGET_XFER_UNAVAILABLE
;
1184 /* We allow reading readonly memory. */
1185 section
= target_section_by_addr (ops
, offset
);
1186 if (section
!= NULL
)
1188 /* Check if the section we found is readonly. */
1189 if ((bfd_get_section_flags (section
->the_bfd_section
->owner
,
1190 section
->the_bfd_section
)
1191 & SEC_READONLY
) != 0)
1193 /* Truncate the request to fit into this section. */
1194 len
= min (len
, section
->endaddr
- offset
);
1200 return TARGET_XFER_UNAVAILABLE
;
1205 /* Forward the request. */
1207 return ops
->to_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
1208 offset
, len
, xfered_len
);
1211 /* The to_insert_breakpoint method of target record-btrace. */
1214 record_btrace_insert_breakpoint (struct target_ops
*ops
,
1215 struct gdbarch
*gdbarch
,
1216 struct bp_target_info
*bp_tgt
)
1221 /* Inserting breakpoints requires accessing memory. Allow it for the
1222 duration of this function. */
1223 old
= replay_memory_access
;
1224 replay_memory_access
= replay_memory_access_read_write
;
1229 ret
= ops
->beneath
->to_insert_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
);
1231 CATCH (except
, RETURN_MASK_ALL
)
1233 replay_memory_access
= old
;
1234 throw_exception (except
);
1237 replay_memory_access
= old
;
1242 /* The to_remove_breakpoint method of target record-btrace. */
1245 record_btrace_remove_breakpoint (struct target_ops
*ops
,
1246 struct gdbarch
*gdbarch
,
1247 struct bp_target_info
*bp_tgt
)
1252 /* Removing breakpoints requires accessing memory. Allow it for the
1253 duration of this function. */
1254 old
= replay_memory_access
;
1255 replay_memory_access
= replay_memory_access_read_write
;
1260 ret
= ops
->beneath
->to_remove_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
);
1262 CATCH (except
, RETURN_MASK_ALL
)
1264 replay_memory_access
= old
;
1265 throw_exception (except
);
1268 replay_memory_access
= old
;
1273 /* The to_fetch_registers method of target record-btrace. */
1276 record_btrace_fetch_registers (struct target_ops
*ops
,
1277 struct regcache
*regcache
, int regno
)
1279 struct btrace_insn_iterator
*replay
;
1280 struct thread_info
*tp
;
1282 tp
= find_thread_ptid (inferior_ptid
);
1283 gdb_assert (tp
!= NULL
);
1285 replay
= tp
->btrace
.replay
;
1286 if (replay
!= NULL
&& !record_btrace_generating_corefile
)
1288 const struct btrace_insn
*insn
;
1289 struct gdbarch
*gdbarch
;
1292 gdbarch
= get_regcache_arch (regcache
);
1293 pcreg
= gdbarch_pc_regnum (gdbarch
);
1297 /* We can only provide the PC register. */
1298 if (regno
>= 0 && regno
!= pcreg
)
1301 insn
= btrace_insn_get (replay
);
1302 gdb_assert (insn
!= NULL
);
1304 regcache_raw_supply (regcache
, regno
, &insn
->pc
);
1308 struct target_ops
*t
= ops
->beneath
;
1310 t
->to_fetch_registers (t
, regcache
, regno
);
1314 /* The to_store_registers method of target record-btrace. */
1317 record_btrace_store_registers (struct target_ops
*ops
,
1318 struct regcache
*regcache
, int regno
)
1320 struct target_ops
*t
;
1322 if (!record_btrace_generating_corefile
1323 && record_btrace_is_replaying (ops
, inferior_ptid
))
1324 error (_("Cannot write registers while replaying."));
1326 gdb_assert (may_write_registers
!= 0);
1329 t
->to_store_registers (t
, regcache
, regno
);
1332 /* The to_prepare_to_store method of target record-btrace. */
1335 record_btrace_prepare_to_store (struct target_ops
*ops
,
1336 struct regcache
*regcache
)
1338 struct target_ops
*t
;
1340 if (!record_btrace_generating_corefile
1341 && record_btrace_is_replaying (ops
, inferior_ptid
))
1345 t
->to_prepare_to_store (t
, regcache
);
1348 /* The branch trace frame cache. */
1350 struct btrace_frame_cache
1353 struct thread_info
*tp
;
1355 /* The frame info. */
1356 struct frame_info
*frame
;
1358 /* The branch trace function segment. */
1359 const struct btrace_function
*bfun
;
1362 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1364 static htab_t bfcache
;
1366 /* hash_f for htab_create_alloc of bfcache. */
1369 bfcache_hash (const void *arg
)
1371 const struct btrace_frame_cache
*cache
1372 = (const struct btrace_frame_cache
*) arg
;
1374 return htab_hash_pointer (cache
->frame
);
1377 /* eq_f for htab_create_alloc of bfcache. */
1380 bfcache_eq (const void *arg1
, const void *arg2
)
1382 const struct btrace_frame_cache
*cache1
1383 = (const struct btrace_frame_cache
*) arg1
;
1384 const struct btrace_frame_cache
*cache2
1385 = (const struct btrace_frame_cache
*) arg2
;
1387 return cache1
->frame
== cache2
->frame
;
1390 /* Create a new btrace frame cache. */
1392 static struct btrace_frame_cache
*
1393 bfcache_new (struct frame_info
*frame
)
1395 struct btrace_frame_cache
*cache
;
1398 cache
= FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache
);
1399 cache
->frame
= frame
;
1401 slot
= htab_find_slot (bfcache
, cache
, INSERT
);
1402 gdb_assert (*slot
== NULL
);
1408 /* Extract the branch trace function from a branch trace frame. */
1410 static const struct btrace_function
*
1411 btrace_get_frame_function (struct frame_info
*frame
)
1413 const struct btrace_frame_cache
*cache
;
1414 const struct btrace_function
*bfun
;
1415 struct btrace_frame_cache pattern
;
1418 pattern
.frame
= frame
;
1420 slot
= htab_find_slot (bfcache
, &pattern
, NO_INSERT
);
1424 cache
= (const struct btrace_frame_cache
*) *slot
;
1428 /* Implement stop_reason method for record_btrace_frame_unwind. */
1430 static enum unwind_stop_reason
1431 record_btrace_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1434 const struct btrace_frame_cache
*cache
;
1435 const struct btrace_function
*bfun
;
1437 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1439 gdb_assert (bfun
!= NULL
);
1441 if (bfun
->up
== NULL
)
1442 return UNWIND_UNAVAILABLE
;
1444 return UNWIND_NO_REASON
;
1447 /* Implement this_id method for record_btrace_frame_unwind. */
1450 record_btrace_frame_this_id (struct frame_info
*this_frame
, void **this_cache
,
1451 struct frame_id
*this_id
)
1453 const struct btrace_frame_cache
*cache
;
1454 const struct btrace_function
*bfun
;
1455 CORE_ADDR code
, special
;
1457 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1460 gdb_assert (bfun
!= NULL
);
1462 while (bfun
->segment
.prev
!= NULL
)
1463 bfun
= bfun
->segment
.prev
;
1465 code
= get_frame_func (this_frame
);
1466 special
= bfun
->number
;
1468 *this_id
= frame_id_build_unavailable_stack_special (code
, special
);
1470 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1471 btrace_get_bfun_name (cache
->bfun
),
1472 core_addr_to_string_nz (this_id
->code_addr
),
1473 core_addr_to_string_nz (this_id
->special_addr
));
1476 /* Implement prev_register method for record_btrace_frame_unwind. */
1478 static struct value
*
1479 record_btrace_frame_prev_register (struct frame_info
*this_frame
,
1483 const struct btrace_frame_cache
*cache
;
1484 const struct btrace_function
*bfun
, *caller
;
1485 const struct btrace_insn
*insn
;
1486 struct gdbarch
*gdbarch
;
1490 gdbarch
= get_frame_arch (this_frame
);
1491 pcreg
= gdbarch_pc_regnum (gdbarch
);
1492 if (pcreg
< 0 || regnum
!= pcreg
)
1493 throw_error (NOT_AVAILABLE_ERROR
,
1494 _("Registers are not available in btrace record history"));
1496 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1498 gdb_assert (bfun
!= NULL
);
1502 throw_error (NOT_AVAILABLE_ERROR
,
1503 _("No caller in btrace record history"));
1505 if ((bfun
->flags
& BFUN_UP_LINKS_TO_RET
) != 0)
1507 insn
= VEC_index (btrace_insn_s
, caller
->insn
, 0);
1512 insn
= VEC_last (btrace_insn_s
, caller
->insn
);
1515 pc
+= gdb_insn_length (gdbarch
, pc
);
1518 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1519 btrace_get_bfun_name (bfun
), bfun
->level
,
1520 core_addr_to_string_nz (pc
));
1522 return frame_unwind_got_address (this_frame
, regnum
, pc
);
1525 /* Implement sniffer method for record_btrace_frame_unwind. */
1528 record_btrace_frame_sniffer (const struct frame_unwind
*self
,
1529 struct frame_info
*this_frame
,
1532 const struct btrace_function
*bfun
;
1533 struct btrace_frame_cache
*cache
;
1534 struct thread_info
*tp
;
1535 struct frame_info
*next
;
1537 /* THIS_FRAME does not contain a reference to its thread. */
1538 tp
= find_thread_ptid (inferior_ptid
);
1539 gdb_assert (tp
!= NULL
);
1542 next
= get_next_frame (this_frame
);
1545 const struct btrace_insn_iterator
*replay
;
1547 replay
= tp
->btrace
.replay
;
1549 bfun
= replay
->function
;
1553 const struct btrace_function
*callee
;
1555 callee
= btrace_get_frame_function (next
);
1556 if (callee
!= NULL
&& (callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1563 DEBUG ("[frame] sniffed frame for %s on level %d",
1564 btrace_get_bfun_name (bfun
), bfun
->level
);
1566 /* This is our frame. Initialize the frame cache. */
1567 cache
= bfcache_new (this_frame
);
1571 *this_cache
= cache
;
1575 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1578 record_btrace_tailcall_frame_sniffer (const struct frame_unwind
*self
,
1579 struct frame_info
*this_frame
,
1582 const struct btrace_function
*bfun
, *callee
;
1583 struct btrace_frame_cache
*cache
;
1584 struct frame_info
*next
;
1586 next
= get_next_frame (this_frame
);
1590 callee
= btrace_get_frame_function (next
);
1594 if ((callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1601 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1602 btrace_get_bfun_name (bfun
), bfun
->level
);
1604 /* This is our frame. Initialize the frame cache. */
1605 cache
= bfcache_new (this_frame
);
1606 cache
->tp
= find_thread_ptid (inferior_ptid
);
1609 *this_cache
= cache
;
1614 record_btrace_frame_dealloc_cache (struct frame_info
*self
, void *this_cache
)
1616 struct btrace_frame_cache
*cache
;
1619 cache
= (struct btrace_frame_cache
*) this_cache
;
1621 slot
= htab_find_slot (bfcache
, cache
, NO_INSERT
);
1622 gdb_assert (slot
!= NULL
);
1624 htab_remove_elt (bfcache
, cache
);
1627 /* btrace recording does not store previous memory content, neither the stack
1628 frames content. Any unwinding would return errorneous results as the stack
1629 contents no longer matches the changed PC value restored from history.
1630 Therefore this unwinder reports any possibly unwound registers as
1633 const struct frame_unwind record_btrace_frame_unwind
=
1636 record_btrace_frame_unwind_stop_reason
,
1637 record_btrace_frame_this_id
,
1638 record_btrace_frame_prev_register
,
1640 record_btrace_frame_sniffer
,
1641 record_btrace_frame_dealloc_cache
1644 const struct frame_unwind record_btrace_tailcall_frame_unwind
=
1647 record_btrace_frame_unwind_stop_reason
,
1648 record_btrace_frame_this_id
,
1649 record_btrace_frame_prev_register
,
1651 record_btrace_tailcall_frame_sniffer
,
1652 record_btrace_frame_dealloc_cache
1655 /* Implement the to_get_unwinder method. */
1657 static const struct frame_unwind
*
1658 record_btrace_to_get_unwinder (struct target_ops
*self
)
1660 return &record_btrace_frame_unwind
;
1663 /* Implement the to_get_tailcall_unwinder method. */
1665 static const struct frame_unwind
*
1666 record_btrace_to_get_tailcall_unwinder (struct target_ops
*self
)
1668 return &record_btrace_tailcall_frame_unwind
;
1671 /* Return a human-readable string for FLAG. */
1674 btrace_thread_flag_to_str (enum btrace_thread_flag flag
)
1682 return "reverse-step";
1688 return "reverse-cont";
1697 /* Indicate that TP should be resumed according to FLAG. */
1700 record_btrace_resume_thread (struct thread_info
*tp
,
1701 enum btrace_thread_flag flag
)
1703 struct btrace_thread_info
*btinfo
;
1705 DEBUG ("resuming thread %d (%s): %x (%s)", tp
->num
,
1706 target_pid_to_str (tp
->ptid
), flag
, btrace_thread_flag_to_str (flag
));
1708 btinfo
= &tp
->btrace
;
1710 /* Fetch the latest branch trace. */
1713 /* A resume request overwrites a preceding resume or stop request. */
1714 btinfo
->flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
1715 btinfo
->flags
|= flag
;
1718 /* Get the current frame for TP. */
1720 static struct frame_info
*
1721 get_thread_current_frame (struct thread_info
*tp
)
1723 struct frame_info
*frame
;
1724 ptid_t old_inferior_ptid
;
1727 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1728 old_inferior_ptid
= inferior_ptid
;
1729 inferior_ptid
= tp
->ptid
;
1731 /* Clear the executing flag to allow changes to the current frame.
1732 We are not actually running, yet. We just started a reverse execution
1733 command or a record goto command.
1734 For the latter, EXECUTING is false and this has no effect.
1735 For the former, EXECUTING is true and we're in to_wait, about to
1736 move the thread. Since we need to recompute the stack, we temporarily
1737 set EXECUTING to flase. */
1738 executing
= is_executing (inferior_ptid
);
1739 set_executing (inferior_ptid
, 0);
1744 frame
= get_current_frame ();
1746 CATCH (except
, RETURN_MASK_ALL
)
1748 /* Restore the previous execution state. */
1749 set_executing (inferior_ptid
, executing
);
1751 /* Restore the previous inferior_ptid. */
1752 inferior_ptid
= old_inferior_ptid
;
1754 throw_exception (except
);
1758 /* Restore the previous execution state. */
1759 set_executing (inferior_ptid
, executing
);
1761 /* Restore the previous inferior_ptid. */
1762 inferior_ptid
= old_inferior_ptid
;
1767 /* Start replaying a thread. */
1769 static struct btrace_insn_iterator
*
1770 record_btrace_start_replaying (struct thread_info
*tp
)
1772 struct btrace_insn_iterator
*replay
;
1773 struct btrace_thread_info
*btinfo
;
1775 btinfo
= &tp
->btrace
;
1778 /* We can't start replaying without trace. */
1779 if (btinfo
->begin
== NULL
)
1782 /* GDB stores the current frame_id when stepping in order to detects steps
1784 Since frames are computed differently when we're replaying, we need to
1785 recompute those stored frames and fix them up so we can still detect
1786 subroutines after we started replaying. */
1789 struct frame_info
*frame
;
1790 struct frame_id frame_id
;
1791 int upd_step_frame_id
, upd_step_stack_frame_id
;
1793 /* The current frame without replaying - computed via normal unwind. */
1794 frame
= get_thread_current_frame (tp
);
1795 frame_id
= get_frame_id (frame
);
1797 /* Check if we need to update any stepping-related frame id's. */
1798 upd_step_frame_id
= frame_id_eq (frame_id
,
1799 tp
->control
.step_frame_id
);
1800 upd_step_stack_frame_id
= frame_id_eq (frame_id
,
1801 tp
->control
.step_stack_frame_id
);
1803 /* We start replaying at the end of the branch trace. This corresponds
1804 to the current instruction. */
1805 replay
= XNEW (struct btrace_insn_iterator
);
1806 btrace_insn_end (replay
, btinfo
);
1808 /* Skip gaps at the end of the trace. */
1809 while (btrace_insn_get (replay
) == NULL
)
1813 steps
= btrace_insn_prev (replay
, 1);
1815 error (_("No trace."));
1818 /* We're not replaying, yet. */
1819 gdb_assert (btinfo
->replay
== NULL
);
1820 btinfo
->replay
= replay
;
1822 /* Make sure we're not using any stale registers. */
1823 registers_changed_ptid (tp
->ptid
);
1825 /* The current frame with replaying - computed via btrace unwind. */
1826 frame
= get_thread_current_frame (tp
);
1827 frame_id
= get_frame_id (frame
);
1829 /* Replace stepping related frames where necessary. */
1830 if (upd_step_frame_id
)
1831 tp
->control
.step_frame_id
= frame_id
;
1832 if (upd_step_stack_frame_id
)
1833 tp
->control
.step_stack_frame_id
= frame_id
;
1835 CATCH (except
, RETURN_MASK_ALL
)
1837 xfree (btinfo
->replay
);
1838 btinfo
->replay
= NULL
;
1840 registers_changed_ptid (tp
->ptid
);
1842 throw_exception (except
);
1849 /* Stop replaying a thread. */
1852 record_btrace_stop_replaying (struct thread_info
*tp
)
1854 struct btrace_thread_info
*btinfo
;
1856 btinfo
= &tp
->btrace
;
1858 xfree (btinfo
->replay
);
1859 btinfo
->replay
= NULL
;
1861 /* Make sure we're not leaving any stale registers. */
1862 registers_changed_ptid (tp
->ptid
);
1865 /* Stop replaying TP if it is at the end of its execution history. */
1868 record_btrace_stop_replaying_at_end (struct thread_info
*tp
)
1870 struct btrace_insn_iterator
*replay
, end
;
1871 struct btrace_thread_info
*btinfo
;
1873 btinfo
= &tp
->btrace
;
1874 replay
= btinfo
->replay
;
1879 btrace_insn_end (&end
, btinfo
);
1881 if (btrace_insn_cmp (replay
, &end
) == 0)
1882 record_btrace_stop_replaying (tp
);
1885 /* The to_resume method of target record-btrace. */
1888 record_btrace_resume (struct target_ops
*ops
, ptid_t ptid
, int step
,
1889 enum gdb_signal signal
)
1891 struct thread_info
*tp
;
1892 enum btrace_thread_flag flag
, cflag
;
1894 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid
),
1895 execution_direction
== EXEC_REVERSE
? "reverse-" : "",
1896 step
? "step" : "cont");
1898 /* Store the execution direction of the last resume.
1900 If there is more than one to_resume call, we have to rely on infrun
1901 to not change the execution direction in-between. */
1902 record_btrace_resume_exec_dir
= execution_direction
;
1904 /* As long as we're not replaying, just forward the request.
1906 For non-stop targets this means that no thread is replaying. In order to
1907 make progress, we may need to explicitly move replaying threads to the end
1908 of their execution history. */
1909 if ((execution_direction
!= EXEC_REVERSE
)
1910 && !record_btrace_is_replaying (ops
, minus_one_ptid
))
1913 ops
->to_resume (ops
, ptid
, step
, signal
);
1917 /* Compute the btrace thread flag for the requested move. */
1918 if (execution_direction
== EXEC_REVERSE
)
1920 flag
= step
== 0 ? BTHR_RCONT
: BTHR_RSTEP
;
1925 flag
= step
== 0 ? BTHR_CONT
: BTHR_STEP
;
1929 /* We just indicate the resume intent here. The actual stepping happens in
1930 record_btrace_wait below.
1932 For all-stop targets, we only step INFERIOR_PTID and continue others. */
1933 if (!target_is_non_stop_p ())
1935 gdb_assert (ptid_match (inferior_ptid
, ptid
));
1937 ALL_NON_EXITED_THREADS (tp
)
1938 if (ptid_match (tp
->ptid
, ptid
))
1940 if (ptid_match (tp
->ptid
, inferior_ptid
))
1941 record_btrace_resume_thread (tp
, flag
);
1943 record_btrace_resume_thread (tp
, cflag
);
1948 ALL_NON_EXITED_THREADS (tp
)
1949 if (ptid_match (tp
->ptid
, ptid
))
1950 record_btrace_resume_thread (tp
, flag
);
1953 /* Async support. */
1954 if (target_can_async_p ())
1957 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
1961 /* Cancel resuming TP. */
1964 record_btrace_cancel_resume (struct thread_info
*tp
)
1966 enum btrace_thread_flag flags
;
1968 flags
= tp
->btrace
.flags
& (BTHR_MOVE
| BTHR_STOP
);
1972 DEBUG ("cancel resume thread %d (%s): %x (%s)", tp
->num
,
1973 target_pid_to_str (tp
->ptid
), flags
,
1974 btrace_thread_flag_to_str (flags
));
1976 tp
->btrace
.flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
1977 record_btrace_stop_replaying_at_end (tp
);
1980 /* Return a target_waitstatus indicating that we ran out of history. */
1982 static struct target_waitstatus
1983 btrace_step_no_history (void)
1985 struct target_waitstatus status
;
1987 status
.kind
= TARGET_WAITKIND_NO_HISTORY
;
1992 /* Return a target_waitstatus indicating that a step finished. */
1994 static struct target_waitstatus
1995 btrace_step_stopped (void)
1997 struct target_waitstatus status
;
1999 status
.kind
= TARGET_WAITKIND_STOPPED
;
2000 status
.value
.sig
= GDB_SIGNAL_TRAP
;
2005 /* Return a target_waitstatus indicating that a thread was stopped as
2008 static struct target_waitstatus
2009 btrace_step_stopped_on_request (void)
2011 struct target_waitstatus status
;
2013 status
.kind
= TARGET_WAITKIND_STOPPED
;
2014 status
.value
.sig
= GDB_SIGNAL_0
;
2019 /* Return a target_waitstatus indicating a spurious stop. */
2021 static struct target_waitstatus
2022 btrace_step_spurious (void)
2024 struct target_waitstatus status
;
2026 status
.kind
= TARGET_WAITKIND_SPURIOUS
;
2031 /* Return a target_waitstatus indicating that the thread was not resumed. */
2033 static struct target_waitstatus
2034 btrace_step_no_resumed (void)
2036 struct target_waitstatus status
;
2038 status
.kind
= TARGET_WAITKIND_NO_RESUMED
;
2043 /* Return a target_waitstatus indicating that we should wait again. */
2045 static struct target_waitstatus
2046 btrace_step_again (void)
2048 struct target_waitstatus status
;
2050 status
.kind
= TARGET_WAITKIND_IGNORE
;
2055 /* Clear the record histories. */
2058 record_btrace_clear_histories (struct btrace_thread_info
*btinfo
)
2060 xfree (btinfo
->insn_history
);
2061 xfree (btinfo
->call_history
);
2063 btinfo
->insn_history
= NULL
;
2064 btinfo
->call_history
= NULL
;
2067 /* Check whether TP's current replay position is at a breakpoint. */
2070 record_btrace_replay_at_breakpoint (struct thread_info
*tp
)
2072 struct btrace_insn_iterator
*replay
;
2073 struct btrace_thread_info
*btinfo
;
2074 const struct btrace_insn
*insn
;
2075 struct inferior
*inf
;
2077 btinfo
= &tp
->btrace
;
2078 replay
= btinfo
->replay
;
2083 insn
= btrace_insn_get (replay
);
2087 inf
= find_inferior_ptid (tp
->ptid
);
2091 return record_check_stopped_by_breakpoint (inf
->aspace
, insn
->pc
,
2092 &btinfo
->stop_reason
);
2095 /* Step one instruction in forward direction. */
2097 static struct target_waitstatus
2098 record_btrace_single_step_forward (struct thread_info
*tp
)
2100 struct btrace_insn_iterator
*replay
, end
;
2101 struct btrace_thread_info
*btinfo
;
2103 btinfo
= &tp
->btrace
;
2104 replay
= btinfo
->replay
;
2106 /* We're done if we're not replaying. */
2108 return btrace_step_no_history ();
2110 /* Check if we're stepping a breakpoint. */
2111 if (record_btrace_replay_at_breakpoint (tp
))
2112 return btrace_step_stopped ();
2114 /* Skip gaps during replay. */
2119 /* We will bail out here if we continue stepping after reaching the end
2120 of the execution history. */
2121 steps
= btrace_insn_next (replay
, 1);
2123 return btrace_step_no_history ();
2125 while (btrace_insn_get (replay
) == NULL
);
2127 /* Determine the end of the instruction trace. */
2128 btrace_insn_end (&end
, btinfo
);
2130 /* The execution trace contains (and ends with) the current instruction.
2131 This instruction has not been executed, yet, so the trace really ends
2132 one instruction earlier. */
2133 if (btrace_insn_cmp (replay
, &end
) == 0)
2134 return btrace_step_no_history ();
2136 return btrace_step_spurious ();
2139 /* Step one instruction in backward direction. */
2141 static struct target_waitstatus
2142 record_btrace_single_step_backward (struct thread_info
*tp
)
2144 struct btrace_insn_iterator
*replay
;
2145 struct btrace_thread_info
*btinfo
;
2147 btinfo
= &tp
->btrace
;
2148 replay
= btinfo
->replay
;
2150 /* Start replaying if we're not already doing so. */
2152 replay
= record_btrace_start_replaying (tp
);
2154 /* If we can't step any further, we reached the end of the history.
2155 Skip gaps during replay. */
2160 steps
= btrace_insn_prev (replay
, 1);
2162 return btrace_step_no_history ();
2164 while (btrace_insn_get (replay
) == NULL
);
2166 /* Check if we're stepping a breakpoint.
2168 For reverse-stepping, this check is after the step. There is logic in
2169 infrun.c that handles reverse-stepping separately. See, for example,
2170 proceed and adjust_pc_after_break.
2172 This code assumes that for reverse-stepping, PC points to the last
2173 de-executed instruction, whereas for forward-stepping PC points to the
2174 next to-be-executed instruction. */
2175 if (record_btrace_replay_at_breakpoint (tp
))
2176 return btrace_step_stopped ();
2178 return btrace_step_spurious ();
2181 /* Step a single thread. */
2183 static struct target_waitstatus
2184 record_btrace_step_thread (struct thread_info
*tp
)
2186 struct btrace_thread_info
*btinfo
;
2187 struct target_waitstatus status
;
2188 enum btrace_thread_flag flags
;
2190 btinfo
= &tp
->btrace
;
2192 flags
= btinfo
->flags
& (BTHR_MOVE
| BTHR_STOP
);
2193 btinfo
->flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
2195 DEBUG ("stepping thread %d (%s): %x (%s)", tp
->num
,
2196 target_pid_to_str (tp
->ptid
), flags
,
2197 btrace_thread_flag_to_str (flags
));
2199 /* We can't step without an execution history. */
2200 if ((flags
& BTHR_MOVE
) != 0 && btrace_is_empty (tp
))
2201 return btrace_step_no_history ();
2206 internal_error (__FILE__
, __LINE__
, _("invalid stepping type."));
2209 return btrace_step_stopped_on_request ();
2212 status
= record_btrace_single_step_forward (tp
);
2213 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2216 return btrace_step_stopped ();
2219 status
= record_btrace_single_step_backward (tp
);
2220 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2223 return btrace_step_stopped ();
2226 status
= record_btrace_single_step_forward (tp
);
2227 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2230 btinfo
->flags
|= flags
;
2231 return btrace_step_again ();
2234 status
= record_btrace_single_step_backward (tp
);
2235 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2238 btinfo
->flags
|= flags
;
2239 return btrace_step_again ();
2242 /* We keep threads moving at the end of their execution history. The to_wait
2243 method will stop the thread for whom the event is reported. */
2244 if (status
.kind
== TARGET_WAITKIND_NO_HISTORY
)
2245 btinfo
->flags
|= flags
;
2250 /* A vector of threads. */
2252 typedef struct thread_info
* tp_t
;
2255 /* Announce further events if necessary. */
2258 record_btrace_maybe_mark_async_event (const VEC (tp_t
) *moving
,
2259 const VEC (tp_t
) *no_history
)
2261 int more_moving
, more_no_history
;
2263 more_moving
= !VEC_empty (tp_t
, moving
);
2264 more_no_history
= !VEC_empty (tp_t
, no_history
);
2266 if (!more_moving
&& !more_no_history
)
2270 DEBUG ("movers pending");
2272 if (more_no_history
)
2273 DEBUG ("no-history pending");
2275 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
2278 /* The to_wait method of target record-btrace. */
2281 record_btrace_wait (struct target_ops
*ops
, ptid_t ptid
,
2282 struct target_waitstatus
*status
, int options
)
2284 VEC (tp_t
) *moving
, *no_history
;
2285 struct thread_info
*tp
, *eventing
;
2286 struct cleanup
*cleanups
= make_cleanup (null_cleanup
, NULL
);
2288 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid
), options
);
2290 /* As long as we're not replaying, just forward the request. */
2291 if ((execution_direction
!= EXEC_REVERSE
)
2292 && !record_btrace_is_replaying (ops
, minus_one_ptid
))
2295 return ops
->to_wait (ops
, ptid
, status
, options
);
2301 make_cleanup (VEC_cleanup (tp_t
), &moving
);
2302 make_cleanup (VEC_cleanup (tp_t
), &no_history
);
2304 /* Keep a work list of moving threads. */
2305 ALL_NON_EXITED_THREADS (tp
)
2306 if (ptid_match (tp
->ptid
, ptid
)
2307 && ((tp
->btrace
.flags
& (BTHR_MOVE
| BTHR_STOP
)) != 0))
2308 VEC_safe_push (tp_t
, moving
, tp
);
2310 if (VEC_empty (tp_t
, moving
))
2312 *status
= btrace_step_no_resumed ();
2314 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid
),
2315 target_waitstatus_to_string (status
));
2317 do_cleanups (cleanups
);
2321 /* Step moving threads one by one, one step each, until either one thread
2322 reports an event or we run out of threads to step.
2324 When stepping more than one thread, chances are that some threads reach
2325 the end of their execution history earlier than others. If we reported
2326 this immediately, all-stop on top of non-stop would stop all threads and
2327 resume the same threads next time. And we would report the same thread
2328 having reached the end of its execution history again.
2330 In the worst case, this would starve the other threads. But even if other
2331 threads would be allowed to make progress, this would result in far too
2332 many intermediate stops.
2334 We therefore delay the reporting of "no execution history" until we have
2335 nothing else to report. By this time, all threads should have moved to
2336 either the beginning or the end of their execution history. There will
2337 be a single user-visible stop. */
2339 while ((eventing
== NULL
) && !VEC_empty (tp_t
, moving
))
2344 while ((eventing
== NULL
) && VEC_iterate (tp_t
, moving
, ix
, tp
))
2346 *status
= record_btrace_step_thread (tp
);
2348 switch (status
->kind
)
2350 case TARGET_WAITKIND_IGNORE
:
2354 case TARGET_WAITKIND_NO_HISTORY
:
2355 VEC_safe_push (tp_t
, no_history
,
2356 VEC_ordered_remove (tp_t
, moving
, ix
));
2360 eventing
= VEC_unordered_remove (tp_t
, moving
, ix
);
2366 if (eventing
== NULL
)
2368 /* We started with at least one moving thread. This thread must have
2369 either stopped or reached the end of its execution history.
2371 In the former case, EVENTING must not be NULL.
2372 In the latter case, NO_HISTORY must not be empty. */
2373 gdb_assert (!VEC_empty (tp_t
, no_history
));
2375 /* We kept threads moving at the end of their execution history. Stop
2376 EVENTING now that we are going to report its stop. */
2377 eventing
= VEC_unordered_remove (tp_t
, no_history
, 0);
2378 eventing
->btrace
.flags
&= ~BTHR_MOVE
;
2380 *status
= btrace_step_no_history ();
2383 gdb_assert (eventing
!= NULL
);
2385 /* We kept threads replaying at the end of their execution history. Stop
2386 replaying EVENTING now that we are going to report its stop. */
2387 record_btrace_stop_replaying_at_end (eventing
);
2389 /* Stop all other threads. */
2390 if (!target_is_non_stop_p ())
2391 ALL_NON_EXITED_THREADS (tp
)
2392 record_btrace_cancel_resume (tp
);
2394 /* In async mode, we need to announce further events. */
2395 if (target_is_async_p ())
2396 record_btrace_maybe_mark_async_event (moving
, no_history
);
2398 /* Start record histories anew from the current position. */
2399 record_btrace_clear_histories (&eventing
->btrace
);
2401 /* We moved the replay position but did not update registers. */
2402 registers_changed_ptid (eventing
->ptid
);
2404 DEBUG ("wait ended by thread %d (%s): %s", eventing
->num
,
2405 target_pid_to_str (eventing
->ptid
),
2406 target_waitstatus_to_string (status
));
2408 do_cleanups (cleanups
);
2409 return eventing
->ptid
;
2412 /* The to_stop method of target record-btrace. */
2415 record_btrace_stop (struct target_ops
*ops
, ptid_t ptid
)
2417 DEBUG ("stop %s", target_pid_to_str (ptid
));
2419 /* As long as we're not replaying, just forward the request. */
2420 if ((execution_direction
!= EXEC_REVERSE
)
2421 && !record_btrace_is_replaying (ops
, minus_one_ptid
))
2424 ops
->to_stop (ops
, ptid
);
2428 struct thread_info
*tp
;
2430 ALL_NON_EXITED_THREADS (tp
)
2431 if (ptid_match (tp
->ptid
, ptid
))
2433 tp
->btrace
.flags
&= ~BTHR_MOVE
;
2434 tp
->btrace
.flags
|= BTHR_STOP
;
2439 /* The to_can_execute_reverse method of target record-btrace. */
2442 record_btrace_can_execute_reverse (struct target_ops
*self
)
2447 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2450 record_btrace_stopped_by_sw_breakpoint (struct target_ops
*ops
)
2452 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2454 struct thread_info
*tp
= inferior_thread ();
2456 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
;
2459 return ops
->beneath
->to_stopped_by_sw_breakpoint (ops
->beneath
);
2462 /* The to_supports_stopped_by_sw_breakpoint method of target
2466 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops
*ops
)
2468 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2471 return ops
->beneath
->to_supports_stopped_by_sw_breakpoint (ops
->beneath
);
2474 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2477 record_btrace_stopped_by_hw_breakpoint (struct target_ops
*ops
)
2479 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2481 struct thread_info
*tp
= inferior_thread ();
2483 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
;
2486 return ops
->beneath
->to_stopped_by_hw_breakpoint (ops
->beneath
);
2489 /* The to_supports_stopped_by_hw_breakpoint method of target
2493 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops
*ops
)
2495 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2498 return ops
->beneath
->to_supports_stopped_by_hw_breakpoint (ops
->beneath
);
2501 /* The to_update_thread_list method of target record-btrace. */
2504 record_btrace_update_thread_list (struct target_ops
*ops
)
2506 /* We don't add or remove threads during replay. */
2507 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2510 /* Forward the request. */
2512 ops
->to_update_thread_list (ops
);
2515 /* The to_thread_alive method of target record-btrace. */
2518 record_btrace_thread_alive (struct target_ops
*ops
, ptid_t ptid
)
2520 /* We don't add or remove threads during replay. */
2521 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2522 return find_thread_ptid (ptid
) != NULL
;
2524 /* Forward the request. */
2526 return ops
->to_thread_alive (ops
, ptid
);
2529 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2533 record_btrace_set_replay (struct thread_info
*tp
,
2534 const struct btrace_insn_iterator
*it
)
2536 struct btrace_thread_info
*btinfo
;
2538 btinfo
= &tp
->btrace
;
2540 if (it
== NULL
|| it
->function
== NULL
)
2541 record_btrace_stop_replaying (tp
);
2544 if (btinfo
->replay
== NULL
)
2545 record_btrace_start_replaying (tp
);
2546 else if (btrace_insn_cmp (btinfo
->replay
, it
) == 0)
2549 *btinfo
->replay
= *it
;
2550 registers_changed_ptid (tp
->ptid
);
2553 /* Start anew from the new replay position. */
2554 record_btrace_clear_histories (btinfo
);
2556 stop_pc
= regcache_read_pc (get_current_regcache ());
2557 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
2560 /* The to_goto_record_begin method of target record-btrace. */
2563 record_btrace_goto_begin (struct target_ops
*self
)
2565 struct thread_info
*tp
;
2566 struct btrace_insn_iterator begin
;
2568 tp
= require_btrace_thread ();
2570 btrace_insn_begin (&begin
, &tp
->btrace
);
2571 record_btrace_set_replay (tp
, &begin
);
2574 /* The to_goto_record_end method of target record-btrace. */
2577 record_btrace_goto_end (struct target_ops
*ops
)
2579 struct thread_info
*tp
;
2581 tp
= require_btrace_thread ();
2583 record_btrace_set_replay (tp
, NULL
);
2586 /* The to_goto_record method of target record-btrace. */
2589 record_btrace_goto (struct target_ops
*self
, ULONGEST insn
)
2591 struct thread_info
*tp
;
2592 struct btrace_insn_iterator it
;
2593 unsigned int number
;
2598 /* Check for wrap-arounds. */
2600 error (_("Instruction number out of range."));
2602 tp
= require_btrace_thread ();
2604 found
= btrace_find_insn_by_number (&it
, &tp
->btrace
, number
);
2606 error (_("No such instruction."));
2608 record_btrace_set_replay (tp
, &it
);
2611 /* The to_record_stop_replaying method of target record-btrace. */
2614 record_btrace_stop_replaying_all (struct target_ops
*self
)
2616 struct thread_info
*tp
;
2618 ALL_NON_EXITED_THREADS (tp
)
2619 record_btrace_stop_replaying (tp
);
2622 /* The to_execution_direction target method. */
2624 static enum exec_direction_kind
2625 record_btrace_execution_direction (struct target_ops
*self
)
2627 return record_btrace_resume_exec_dir
;
2630 /* The to_prepare_to_generate_core target method. */
2633 record_btrace_prepare_to_generate_core (struct target_ops
*self
)
2635 record_btrace_generating_corefile
= 1;
2638 /* The to_done_generating_core target method. */
2641 record_btrace_done_generating_core (struct target_ops
*self
)
2643 record_btrace_generating_corefile
= 0;
2646 /* Initialize the record-btrace target ops. */
2649 init_record_btrace_ops (void)
2651 struct target_ops
*ops
;
2653 ops
= &record_btrace_ops
;
2654 ops
->to_shortname
= "record-btrace";
2655 ops
->to_longname
= "Branch tracing target";
2656 ops
->to_doc
= "Collect control-flow trace and provide the execution history.";
2657 ops
->to_open
= record_btrace_open
;
2658 ops
->to_close
= record_btrace_close
;
2659 ops
->to_async
= record_btrace_async
;
2660 ops
->to_detach
= record_detach
;
2661 ops
->to_disconnect
= record_disconnect
;
2662 ops
->to_mourn_inferior
= record_mourn_inferior
;
2663 ops
->to_kill
= record_kill
;
2664 ops
->to_stop_recording
= record_btrace_stop_recording
;
2665 ops
->to_info_record
= record_btrace_info
;
2666 ops
->to_insn_history
= record_btrace_insn_history
;
2667 ops
->to_insn_history_from
= record_btrace_insn_history_from
;
2668 ops
->to_insn_history_range
= record_btrace_insn_history_range
;
2669 ops
->to_call_history
= record_btrace_call_history
;
2670 ops
->to_call_history_from
= record_btrace_call_history_from
;
2671 ops
->to_call_history_range
= record_btrace_call_history_range
;
2672 ops
->to_record_is_replaying
= record_btrace_is_replaying
;
2673 ops
->to_record_will_replay
= record_btrace_will_replay
;
2674 ops
->to_record_stop_replaying
= record_btrace_stop_replaying_all
;
2675 ops
->to_xfer_partial
= record_btrace_xfer_partial
;
2676 ops
->to_remove_breakpoint
= record_btrace_remove_breakpoint
;
2677 ops
->to_insert_breakpoint
= record_btrace_insert_breakpoint
;
2678 ops
->to_fetch_registers
= record_btrace_fetch_registers
;
2679 ops
->to_store_registers
= record_btrace_store_registers
;
2680 ops
->to_prepare_to_store
= record_btrace_prepare_to_store
;
2681 ops
->to_get_unwinder
= &record_btrace_to_get_unwinder
;
2682 ops
->to_get_tailcall_unwinder
= &record_btrace_to_get_tailcall_unwinder
;
2683 ops
->to_resume
= record_btrace_resume
;
2684 ops
->to_wait
= record_btrace_wait
;
2685 ops
->to_stop
= record_btrace_stop
;
2686 ops
->to_update_thread_list
= record_btrace_update_thread_list
;
2687 ops
->to_thread_alive
= record_btrace_thread_alive
;
2688 ops
->to_goto_record_begin
= record_btrace_goto_begin
;
2689 ops
->to_goto_record_end
= record_btrace_goto_end
;
2690 ops
->to_goto_record
= record_btrace_goto
;
2691 ops
->to_can_execute_reverse
= record_btrace_can_execute_reverse
;
2692 ops
->to_stopped_by_sw_breakpoint
= record_btrace_stopped_by_sw_breakpoint
;
2693 ops
->to_supports_stopped_by_sw_breakpoint
2694 = record_btrace_supports_stopped_by_sw_breakpoint
;
2695 ops
->to_stopped_by_hw_breakpoint
= record_btrace_stopped_by_hw_breakpoint
;
2696 ops
->to_supports_stopped_by_hw_breakpoint
2697 = record_btrace_supports_stopped_by_hw_breakpoint
;
2698 ops
->to_execution_direction
= record_btrace_execution_direction
;
2699 ops
->to_prepare_to_generate_core
= record_btrace_prepare_to_generate_core
;
2700 ops
->to_done_generating_core
= record_btrace_done_generating_core
;
2701 ops
->to_stratum
= record_stratum
;
2702 ops
->to_magic
= OPS_MAGIC
;
2705 /* Start recording in BTS format. */
2708 cmd_record_btrace_bts_start (char *args
, int from_tty
)
2710 if (args
!= NULL
&& *args
!= 0)
2711 error (_("Invalid argument."));
2713 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2717 execute_command ("target record-btrace", from_tty
);
2719 CATCH (exception
, RETURN_MASK_ALL
)
2721 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2722 throw_exception (exception
);
2727 /* Start recording Intel(R) Processor Trace. */
2730 cmd_record_btrace_pt_start (char *args
, int from_tty
)
2732 if (args
!= NULL
&& *args
!= 0)
2733 error (_("Invalid argument."));
2735 record_btrace_conf
.format
= BTRACE_FORMAT_PT
;
2739 execute_command ("target record-btrace", from_tty
);
2741 CATCH (exception
, RETURN_MASK_ALL
)
2743 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2744 throw_exception (exception
);
2749 /* Alias for "target record". */
2752 cmd_record_btrace_start (char *args
, int from_tty
)
2754 if (args
!= NULL
&& *args
!= 0)
2755 error (_("Invalid argument."));
2757 record_btrace_conf
.format
= BTRACE_FORMAT_PT
;
2761 execute_command ("target record-btrace", from_tty
);
2763 CATCH (exception
, RETURN_MASK_ALL
)
2765 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2769 execute_command ("target record-btrace", from_tty
);
2771 CATCH (exception
, RETURN_MASK_ALL
)
2773 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2774 throw_exception (exception
);
2781 /* The "set record btrace" command. */
2784 cmd_set_record_btrace (char *args
, int from_tty
)
2786 cmd_show_list (set_record_btrace_cmdlist
, from_tty
, "");
2789 /* The "show record btrace" command. */
2792 cmd_show_record_btrace (char *args
, int from_tty
)
2794 cmd_show_list (show_record_btrace_cmdlist
, from_tty
, "");
2797 /* The "show record btrace replay-memory-access" command. */
2800 cmd_show_replay_memory_access (struct ui_file
*file
, int from_tty
,
2801 struct cmd_list_element
*c
, const char *value
)
2803 fprintf_filtered (gdb_stdout
, _("Replay memory access is %s.\n"),
2804 replay_memory_access
);
2807 /* The "set record btrace bts" command. */
2810 cmd_set_record_btrace_bts (char *args
, int from_tty
)
2812 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2813 "by an appropriate subcommand.\n"));
2814 help_list (set_record_btrace_bts_cmdlist
, "set record btrace bts ",
2815 all_commands
, gdb_stdout
);
2818 /* The "show record btrace bts" command. */
2821 cmd_show_record_btrace_bts (char *args
, int from_tty
)
2823 cmd_show_list (show_record_btrace_bts_cmdlist
, from_tty
, "");
2826 /* The "set record btrace pt" command. */
2829 cmd_set_record_btrace_pt (char *args
, int from_tty
)
2831 printf_unfiltered (_("\"set record btrace pt\" must be followed "
2832 "by an appropriate subcommand.\n"));
2833 help_list (set_record_btrace_pt_cmdlist
, "set record btrace pt ",
2834 all_commands
, gdb_stdout
);
2837 /* The "show record btrace pt" command. */
2840 cmd_show_record_btrace_pt (char *args
, int from_tty
)
2842 cmd_show_list (show_record_btrace_pt_cmdlist
, from_tty
, "");
2845 /* The "record bts buffer-size" show value function. */
2848 show_record_bts_buffer_size_value (struct ui_file
*file
, int from_tty
,
2849 struct cmd_list_element
*c
,
2852 fprintf_filtered (file
, _("The record/replay bts buffer size is %s.\n"),
2856 /* The "record pt buffer-size" show value function. */
2859 show_record_pt_buffer_size_value (struct ui_file
*file
, int from_tty
,
2860 struct cmd_list_element
*c
,
2863 fprintf_filtered (file
, _("The record/replay pt buffer size is %s.\n"),
2867 void _initialize_record_btrace (void);
2869 /* Initialize btrace commands. */
2872 _initialize_record_btrace (void)
2874 add_prefix_cmd ("btrace", class_obscure
, cmd_record_btrace_start
,
2875 _("Start branch trace recording."), &record_btrace_cmdlist
,
2876 "record btrace ", 0, &record_cmdlist
);
2877 add_alias_cmd ("b", "btrace", class_obscure
, 1, &record_cmdlist
);
2879 add_cmd ("bts", class_obscure
, cmd_record_btrace_bts_start
,
2881 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2882 The processor stores a from/to record for each branch into a cyclic buffer.\n\
2883 This format may not be available on all processors."),
2884 &record_btrace_cmdlist
);
2885 add_alias_cmd ("bts", "btrace bts", class_obscure
, 1, &record_cmdlist
);
2887 add_cmd ("pt", class_obscure
, cmd_record_btrace_pt_start
,
2889 Start branch trace recording in Intel(R) Processor Trace format.\n\n\
2890 This format may not be available on all processors."),
2891 &record_btrace_cmdlist
);
2892 add_alias_cmd ("pt", "btrace pt", class_obscure
, 1, &record_cmdlist
);
2894 add_prefix_cmd ("btrace", class_support
, cmd_set_record_btrace
,
2895 _("Set record options"), &set_record_btrace_cmdlist
,
2896 "set record btrace ", 0, &set_record_cmdlist
);
2898 add_prefix_cmd ("btrace", class_support
, cmd_show_record_btrace
,
2899 _("Show record options"), &show_record_btrace_cmdlist
,
2900 "show record btrace ", 0, &show_record_cmdlist
);
2902 add_setshow_enum_cmd ("replay-memory-access", no_class
,
2903 replay_memory_access_types
, &replay_memory_access
, _("\
2904 Set what memory accesses are allowed during replay."), _("\
2905 Show what memory accesses are allowed during replay."),
2906 _("Default is READ-ONLY.\n\n\
2907 The btrace record target does not trace data.\n\
2908 The memory therefore corresponds to the live target and not \
2909 to the current replay position.\n\n\
2910 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2911 When READ-WRITE, allow accesses to read-only and read-write memory during \
2913 NULL
, cmd_show_replay_memory_access
,
2914 &set_record_btrace_cmdlist
,
2915 &show_record_btrace_cmdlist
);
2917 add_prefix_cmd ("bts", class_support
, cmd_set_record_btrace_bts
,
2918 _("Set record btrace bts options"),
2919 &set_record_btrace_bts_cmdlist
,
2920 "set record btrace bts ", 0, &set_record_btrace_cmdlist
);
2922 add_prefix_cmd ("bts", class_support
, cmd_show_record_btrace_bts
,
2923 _("Show record btrace bts options"),
2924 &show_record_btrace_bts_cmdlist
,
2925 "show record btrace bts ", 0, &show_record_btrace_cmdlist
);
2927 add_setshow_uinteger_cmd ("buffer-size", no_class
,
2928 &record_btrace_conf
.bts
.size
,
2929 _("Set the record/replay bts buffer size."),
2930 _("Show the record/replay bts buffer size."), _("\
2931 When starting recording request a trace buffer of this size. \
2932 The actual buffer size may differ from the requested size. \
2933 Use \"info record\" to see the actual buffer size.\n\n\
2934 Bigger buffers allow longer recording but also take more time to process \
2935 the recorded execution trace.\n\n\
2936 The trace buffer size may not be changed while recording."), NULL
,
2937 show_record_bts_buffer_size_value
,
2938 &set_record_btrace_bts_cmdlist
,
2939 &show_record_btrace_bts_cmdlist
);
2941 add_prefix_cmd ("pt", class_support
, cmd_set_record_btrace_pt
,
2942 _("Set record btrace pt options"),
2943 &set_record_btrace_pt_cmdlist
,
2944 "set record btrace pt ", 0, &set_record_btrace_cmdlist
);
2946 add_prefix_cmd ("pt", class_support
, cmd_show_record_btrace_pt
,
2947 _("Show record btrace pt options"),
2948 &show_record_btrace_pt_cmdlist
,
2949 "show record btrace pt ", 0, &show_record_btrace_cmdlist
);
2951 add_setshow_uinteger_cmd ("buffer-size", no_class
,
2952 &record_btrace_conf
.pt
.size
,
2953 _("Set the record/replay pt buffer size."),
2954 _("Show the record/replay pt buffer size."), _("\
2955 Bigger buffers allow longer recording but also take more time to process \
2956 the recorded execution.\n\
2957 The actual buffer size may differ from the requested size. Use \"info record\" \
2958 to see the actual buffer size."), NULL
, show_record_pt_buffer_size_value
,
2959 &set_record_btrace_pt_cmdlist
,
2960 &show_record_btrace_pt_cmdlist
);
2962 init_record_btrace_ops ();
2963 add_target (&record_btrace_ops
);
2965 bfcache
= htab_create_alloc (50, bfcache_hash
, bfcache_eq
, NULL
,
2968 record_btrace_conf
.bts
.size
= 64 * 1024;
2969 record_btrace_conf
.pt
.size
= 16 * 1024;