Move event-loop.[ch] to gdbsupport/
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2020 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "gdbsupport/event-loop.h"
40 #include "inf-loop.h"
41 #include "inferior.h"
42 #include <algorithm>
43 #include "gdbarch.h"
44 #include "cli/cli-style.h"
45 #include "async-event.h"
46
47 static const target_info record_btrace_target_info = {
48 "record-btrace",
49 N_("Branch tracing target"),
50 N_("Collect control-flow trace and provide the execution history.")
51 };
52
53 /* The target_ops of record-btrace. */
54
55 class record_btrace_target final : public target_ops
56 {
57 public:
58 const target_info &info () const override
59 { return record_btrace_target_info; }
60
61 strata stratum () const override { return record_stratum; }
62
63 void close () override;
64 void async (int) override;
65
66 void detach (inferior *inf, int from_tty) override
67 { record_detach (this, inf, from_tty); }
68
69 void disconnect (const char *, int) override;
70
71 void mourn_inferior () override
72 { record_mourn_inferior (this); }
73
74 void kill () override
75 { record_kill (this); }
76
77 enum record_method record_method (ptid_t ptid) override;
78
79 void stop_recording () override;
80 void info_record () override;
81
82 void insn_history (int size, gdb_disassembly_flags flags) override;
83 void insn_history_from (ULONGEST from, int size,
84 gdb_disassembly_flags flags) override;
85 void insn_history_range (ULONGEST begin, ULONGEST end,
86 gdb_disassembly_flags flags) override;
87 void call_history (int size, record_print_flags flags) override;
88 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
89 override;
90 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
91 override;
92
93 bool record_is_replaying (ptid_t ptid) override;
94 bool record_will_replay (ptid_t ptid, int dir) override;
95 void record_stop_replaying () override;
96
97 enum target_xfer_status xfer_partial (enum target_object object,
98 const char *annex,
99 gdb_byte *readbuf,
100 const gdb_byte *writebuf,
101 ULONGEST offset, ULONGEST len,
102 ULONGEST *xfered_len) override;
103
104 int insert_breakpoint (struct gdbarch *,
105 struct bp_target_info *) override;
106 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
107 enum remove_bp_reason) override;
108
109 void fetch_registers (struct regcache *, int) override;
110
111 void store_registers (struct regcache *, int) override;
112 void prepare_to_store (struct regcache *) override;
113
114 const struct frame_unwind *get_unwinder () override;
115
116 const struct frame_unwind *get_tailcall_unwinder () override;
117
118 void commit_resume () override;
119 void resume (ptid_t, int, enum gdb_signal) override;
120 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
121
122 void stop (ptid_t) override;
123 void update_thread_list () override;
124 bool thread_alive (ptid_t ptid) override;
125 void goto_record_begin () override;
126 void goto_record_end () override;
127 void goto_record (ULONGEST insn) override;
128
129 bool can_execute_reverse () override;
130
131 bool stopped_by_sw_breakpoint () override;
132 bool supports_stopped_by_sw_breakpoint () override;
133
134 bool stopped_by_hw_breakpoint () override;
135 bool supports_stopped_by_hw_breakpoint () override;
136
137 enum exec_direction_kind execution_direction () override;
138 void prepare_to_generate_core () override;
139 void done_generating_core () override;
140 };
141
142 static record_btrace_target record_btrace_ops;
143
144 /* Initialize the record-btrace target ops. */
145
146 /* Token associated with a new-thread observer enabling branch tracing
147 for the new thread. */
148 static const gdb::observers::token record_btrace_thread_observer_token {};
149
150 /* Memory access types used in set/show record btrace replay-memory-access. */
151 static const char replay_memory_access_read_only[] = "read-only";
152 static const char replay_memory_access_read_write[] = "read-write";
153 static const char *const replay_memory_access_types[] =
154 {
155 replay_memory_access_read_only,
156 replay_memory_access_read_write,
157 NULL
158 };
159
160 /* The currently allowed replay memory access type. */
161 static const char *replay_memory_access = replay_memory_access_read_only;
162
163 /* The cpu state kinds. */
164 enum record_btrace_cpu_state_kind
165 {
166 CS_AUTO,
167 CS_NONE,
168 CS_CPU
169 };
170
171 /* The current cpu state. */
172 static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
173
174 /* The current cpu for trace decode. */
175 static struct btrace_cpu record_btrace_cpu;
176
177 /* Command lists for "set/show record btrace". */
178 static struct cmd_list_element *set_record_btrace_cmdlist;
179 static struct cmd_list_element *show_record_btrace_cmdlist;
180
181 /* The execution direction of the last resume we got. See record-full.c. */
182 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
183
184 /* The async event handler for reverse/replay execution. */
185 static struct async_event_handler *record_btrace_async_inferior_event_handler;
186
187 /* A flag indicating that we are currently generating a core file. */
188 static int record_btrace_generating_corefile;
189
190 /* The current branch trace configuration. */
191 static struct btrace_config record_btrace_conf;
192
193 /* Command list for "record btrace". */
194 static struct cmd_list_element *record_btrace_cmdlist;
195
196 /* Command lists for "set/show record btrace bts". */
197 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
198 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
199
200 /* Command lists for "set/show record btrace pt". */
201 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
202 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
203
204 /* Command list for "set record btrace cpu". */
205 static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
206
207 /* Print a record-btrace debug message. Use do ... while (0) to avoid
208 ambiguities when used in if statements. */
209
210 #define DEBUG(msg, args...) \
211 do \
212 { \
213 if (record_debug != 0) \
214 fprintf_unfiltered (gdb_stdlog, \
215 "[record-btrace] " msg "\n", ##args); \
216 } \
217 while (0)
218
219
220 /* Return the cpu configured by the user. Returns NULL if the cpu was
221 configured as auto. */
222 const struct btrace_cpu *
223 record_btrace_get_cpu (void)
224 {
225 switch (record_btrace_cpu_state)
226 {
227 case CS_AUTO:
228 return nullptr;
229
230 case CS_NONE:
231 record_btrace_cpu.vendor = CV_UNKNOWN;
232 /* Fall through. */
233 case CS_CPU:
234 return &record_btrace_cpu;
235 }
236
237 error (_("Internal error: bad record btrace cpu state."));
238 }
239
240 /* Update the branch trace for the current thread and return a pointer to its
241 thread_info.
242
243 Throws an error if there is no thread or no trace. This function never
244 returns NULL. */
245
246 static struct thread_info *
247 require_btrace_thread (void)
248 {
249 DEBUG ("require");
250
251 if (inferior_ptid == null_ptid)
252 error (_("No thread."));
253
254 thread_info *tp = inferior_thread ();
255
256 validate_registers_access ();
257
258 btrace_fetch (tp, record_btrace_get_cpu ());
259
260 if (btrace_is_empty (tp))
261 error (_("No trace."));
262
263 return tp;
264 }
265
266 /* Update the branch trace for the current thread and return a pointer to its
267 branch trace information struct.
268
269 Throws an error if there is no thread or no trace. This function never
270 returns NULL. */
271
272 static struct btrace_thread_info *
273 require_btrace (void)
274 {
275 struct thread_info *tp;
276
277 tp = require_btrace_thread ();
278
279 return &tp->btrace;
280 }
281
282 /* Enable branch tracing for one thread. Warn on errors. */
283
284 static void
285 record_btrace_enable_warn (struct thread_info *tp)
286 {
287 try
288 {
289 btrace_enable (tp, &record_btrace_conf);
290 }
291 catch (const gdb_exception_error &error)
292 {
293 warning ("%s", error.what ());
294 }
295 }
296
297 /* Enable automatic tracing of new threads. */
298
299 static void
300 record_btrace_auto_enable (void)
301 {
302 DEBUG ("attach thread observer");
303
304 gdb::observers::new_thread.attach (record_btrace_enable_warn,
305 record_btrace_thread_observer_token);
306 }
307
308 /* Disable automatic tracing of new threads. */
309
310 static void
311 record_btrace_auto_disable (void)
312 {
313 DEBUG ("detach thread observer");
314
315 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
316 }
317
318 /* The record-btrace async event handler function. */
319
320 static void
321 record_btrace_handle_async_inferior_event (gdb_client_data data)
322 {
323 inferior_event_handler (INF_REG_EVENT, NULL);
324 }
325
326 /* See record-btrace.h. */
327
328 void
329 record_btrace_push_target (void)
330 {
331 const char *format;
332
333 record_btrace_auto_enable ();
334
335 push_target (&record_btrace_ops);
336
337 record_btrace_async_inferior_event_handler
338 = create_async_event_handler (record_btrace_handle_async_inferior_event,
339 NULL);
340 record_btrace_generating_corefile = 0;
341
342 format = btrace_format_short_string (record_btrace_conf.format);
343 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
344 }
345
346 /* Disable btrace on a set of threads on scope exit. */
347
348 struct scoped_btrace_disable
349 {
350 scoped_btrace_disable () = default;
351
352 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
353
354 ~scoped_btrace_disable ()
355 {
356 for (thread_info *tp : m_threads)
357 btrace_disable (tp);
358 }
359
360 void add_thread (thread_info *thread)
361 {
362 m_threads.push_front (thread);
363 }
364
365 void discard ()
366 {
367 m_threads.clear ();
368 }
369
370 private:
371 std::forward_list<thread_info *> m_threads;
372 };
373
374 /* Open target record-btrace. */
375
376 static void
377 record_btrace_target_open (const char *args, int from_tty)
378 {
379 /* If we fail to enable btrace for one thread, disable it for the threads for
380 which it was successfully enabled. */
381 scoped_btrace_disable btrace_disable;
382
383 DEBUG ("open");
384
385 record_preopen ();
386
387 if (!target_has_execution)
388 error (_("The program is not being run."));
389
390 for (thread_info *tp : all_non_exited_threads ())
391 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
392 {
393 btrace_enable (tp, &record_btrace_conf);
394
395 btrace_disable.add_thread (tp);
396 }
397
398 record_btrace_push_target ();
399
400 btrace_disable.discard ();
401 }
402
403 /* The stop_recording method of target record-btrace. */
404
405 void
406 record_btrace_target::stop_recording ()
407 {
408 DEBUG ("stop recording");
409
410 record_btrace_auto_disable ();
411
412 for (thread_info *tp : all_non_exited_threads ())
413 if (tp->btrace.target != NULL)
414 btrace_disable (tp);
415 }
416
417 /* The disconnect method of target record-btrace. */
418
419 void
420 record_btrace_target::disconnect (const char *args,
421 int from_tty)
422 {
423 struct target_ops *beneath = this->beneath ();
424
425 /* Do not stop recording, just clean up GDB side. */
426 unpush_target (this);
427
428 /* Forward disconnect. */
429 beneath->disconnect (args, from_tty);
430 }
431
432 /* The close method of target record-btrace. */
433
434 void
435 record_btrace_target::close ()
436 {
437 if (record_btrace_async_inferior_event_handler != NULL)
438 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
439
440 /* Make sure automatic recording gets disabled even if we did not stop
441 recording before closing the record-btrace target. */
442 record_btrace_auto_disable ();
443
444 /* We should have already stopped recording.
445 Tear down btrace in case we have not. */
446 for (thread_info *tp : all_non_exited_threads ())
447 btrace_teardown (tp);
448 }
449
450 /* The async method of target record-btrace. */
451
452 void
453 record_btrace_target::async (int enable)
454 {
455 if (enable)
456 mark_async_event_handler (record_btrace_async_inferior_event_handler);
457 else
458 clear_async_event_handler (record_btrace_async_inferior_event_handler);
459
460 this->beneath ()->async (enable);
461 }
462
463 /* Adjusts the size and returns a human readable size suffix. */
464
465 static const char *
466 record_btrace_adjust_size (unsigned int *size)
467 {
468 unsigned int sz;
469
470 sz = *size;
471
472 if ((sz & ((1u << 30) - 1)) == 0)
473 {
474 *size = sz >> 30;
475 return "GB";
476 }
477 else if ((sz & ((1u << 20) - 1)) == 0)
478 {
479 *size = sz >> 20;
480 return "MB";
481 }
482 else if ((sz & ((1u << 10) - 1)) == 0)
483 {
484 *size = sz >> 10;
485 return "kB";
486 }
487 else
488 return "";
489 }
490
491 /* Print a BTS configuration. */
492
493 static void
494 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
495 {
496 const char *suffix;
497 unsigned int size;
498
499 size = conf->size;
500 if (size > 0)
501 {
502 suffix = record_btrace_adjust_size (&size);
503 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
504 }
505 }
506
507 /* Print an Intel Processor Trace configuration. */
508
509 static void
510 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
511 {
512 const char *suffix;
513 unsigned int size;
514
515 size = conf->size;
516 if (size > 0)
517 {
518 suffix = record_btrace_adjust_size (&size);
519 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
520 }
521 }
522
523 /* Print a branch tracing configuration. */
524
525 static void
526 record_btrace_print_conf (const struct btrace_config *conf)
527 {
528 printf_unfiltered (_("Recording format: %s.\n"),
529 btrace_format_string (conf->format));
530
531 switch (conf->format)
532 {
533 case BTRACE_FORMAT_NONE:
534 return;
535
536 case BTRACE_FORMAT_BTS:
537 record_btrace_print_bts_conf (&conf->bts);
538 return;
539
540 case BTRACE_FORMAT_PT:
541 record_btrace_print_pt_conf (&conf->pt);
542 return;
543 }
544
545 internal_error (__FILE__, __LINE__, _("Unknown branch trace format."));
546 }
547
548 /* The info_record method of target record-btrace. */
549
550 void
551 record_btrace_target::info_record ()
552 {
553 struct btrace_thread_info *btinfo;
554 const struct btrace_config *conf;
555 struct thread_info *tp;
556 unsigned int insns, calls, gaps;
557
558 DEBUG ("info");
559
560 if (inferior_ptid == null_ptid)
561 error (_("No thread."));
562
563 tp = inferior_thread ();
564
565 validate_registers_access ();
566
567 btinfo = &tp->btrace;
568
569 conf = ::btrace_conf (btinfo);
570 if (conf != NULL)
571 record_btrace_print_conf (conf);
572
573 btrace_fetch (tp, record_btrace_get_cpu ());
574
575 insns = 0;
576 calls = 0;
577 gaps = 0;
578
579 if (!btrace_is_empty (tp))
580 {
581 struct btrace_call_iterator call;
582 struct btrace_insn_iterator insn;
583
584 btrace_call_end (&call, btinfo);
585 btrace_call_prev (&call, 1);
586 calls = btrace_call_number (&call);
587
588 btrace_insn_end (&insn, btinfo);
589 insns = btrace_insn_number (&insn);
590
591 /* If the last instruction is not a gap, it is the current instruction
592 that is not actually part of the record. */
593 if (btrace_insn_get (&insn) != NULL)
594 insns -= 1;
595
596 gaps = btinfo->ngaps;
597 }
598
599 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
600 "for thread %s (%s).\n"), insns, calls, gaps,
601 print_thread_id (tp),
602 target_pid_to_str (tp->ptid).c_str ());
603
604 if (btrace_is_replaying (tp))
605 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
606 btrace_insn_number (btinfo->replay));
607 }
608
609 /* Print a decode error. */
610
611 static void
612 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
613 enum btrace_format format)
614 {
615 const char *errstr = btrace_decode_error (format, errcode);
616
617 uiout->text (_("["));
618 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
619 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
620 {
621 uiout->text (_("decode error ("));
622 uiout->field_signed ("errcode", errcode);
623 uiout->text (_("): "));
624 }
625 uiout->text (errstr);
626 uiout->text (_("]\n"));
627 }
628
629 /* A range of source lines. */
630
631 struct btrace_line_range
632 {
633 /* The symtab this line is from. */
634 struct symtab *symtab;
635
636 /* The first line (inclusive). */
637 int begin;
638
639 /* The last line (exclusive). */
640 int end;
641 };
642
643 /* Construct a line range. */
644
645 static struct btrace_line_range
646 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
647 {
648 struct btrace_line_range range;
649
650 range.symtab = symtab;
651 range.begin = begin;
652 range.end = end;
653
654 return range;
655 }
656
657 /* Add a line to a line range. */
658
659 static struct btrace_line_range
660 btrace_line_range_add (struct btrace_line_range range, int line)
661 {
662 if (range.end <= range.begin)
663 {
664 /* This is the first entry. */
665 range.begin = line;
666 range.end = line + 1;
667 }
668 else if (line < range.begin)
669 range.begin = line;
670 else if (range.end < line)
671 range.end = line;
672
673 return range;
674 }
675
676 /* Return non-zero if RANGE is empty, zero otherwise. */
677
678 static int
679 btrace_line_range_is_empty (struct btrace_line_range range)
680 {
681 return range.end <= range.begin;
682 }
683
684 /* Return non-zero if LHS contains RHS, zero otherwise. */
685
686 static int
687 btrace_line_range_contains_range (struct btrace_line_range lhs,
688 struct btrace_line_range rhs)
689 {
690 return ((lhs.symtab == rhs.symtab)
691 && (lhs.begin <= rhs.begin)
692 && (rhs.end <= lhs.end));
693 }
694
695 /* Find the line range associated with PC. */
696
697 static struct btrace_line_range
698 btrace_find_line_range (CORE_ADDR pc)
699 {
700 struct btrace_line_range range;
701 struct linetable_entry *lines;
702 struct linetable *ltable;
703 struct symtab *symtab;
704 int nlines, i;
705
706 symtab = find_pc_line_symtab (pc);
707 if (symtab == NULL)
708 return btrace_mk_line_range (NULL, 0, 0);
709
710 ltable = SYMTAB_LINETABLE (symtab);
711 if (ltable == NULL)
712 return btrace_mk_line_range (symtab, 0, 0);
713
714 nlines = ltable->nitems;
715 lines = ltable->item;
716 if (nlines <= 0)
717 return btrace_mk_line_range (symtab, 0, 0);
718
719 range = btrace_mk_line_range (symtab, 0, 0);
720 for (i = 0; i < nlines - 1; i++)
721 {
722 /* The test of is_stmt here was added when the is_stmt field was
723 introduced to the 'struct linetable_entry' structure. This
724 ensured that this loop maintained the same behaviour as before we
725 introduced is_stmt. That said, it might be that we would be
726 better off not checking is_stmt here, this would lead to us
727 possibly adding more line numbers to the range. At the time this
728 change was made I was unsure how to test this so chose to go with
729 maintaining the existing experience. */
730 if ((lines[i].pc == pc) && (lines[i].line != 0)
731 && (lines[i].is_stmt == 1))
732 range = btrace_line_range_add (range, lines[i].line);
733 }
734
735 return range;
736 }
737
738 /* Print source lines in LINES to UIOUT.
739
740 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
741 instructions corresponding to that source line. When printing a new source
742 line, we do the cleanups for the open chain and open a new cleanup chain for
743 the new source line. If the source line range in LINES is not empty, this
744 function will leave the cleanup chain for the last printed source line open
745 so instructions can be added to it. */
746
747 static void
748 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
749 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
750 gdb::optional<ui_out_emit_list> *asm_list,
751 gdb_disassembly_flags flags)
752 {
753 print_source_lines_flags psl_flags;
754
755 if (flags & DISASSEMBLY_FILENAME)
756 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
757
758 for (int line = lines.begin; line < lines.end; ++line)
759 {
760 asm_list->reset ();
761
762 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
763
764 print_source_lines (lines.symtab, line, line + 1, psl_flags);
765
766 asm_list->emplace (uiout, "line_asm_insn");
767 }
768 }
769
770 /* Disassemble a section of the recorded instruction trace. */
771
772 static void
773 btrace_insn_history (struct ui_out *uiout,
774 const struct btrace_thread_info *btinfo,
775 const struct btrace_insn_iterator *begin,
776 const struct btrace_insn_iterator *end,
777 gdb_disassembly_flags flags)
778 {
779 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
780 btrace_insn_number (begin), btrace_insn_number (end));
781
782 flags |= DISASSEMBLY_SPECULATIVE;
783
784 struct gdbarch *gdbarch = target_gdbarch ();
785 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
786
787 ui_out_emit_list list_emitter (uiout, "asm_insns");
788
789 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
790 gdb::optional<ui_out_emit_list> asm_list;
791
792 gdb_pretty_print_disassembler disasm (gdbarch, uiout);
793
794 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
795 btrace_insn_next (&it, 1))
796 {
797 const struct btrace_insn *insn;
798
799 insn = btrace_insn_get (&it);
800
801 /* A NULL instruction indicates a gap in the trace. */
802 if (insn == NULL)
803 {
804 const struct btrace_config *conf;
805
806 conf = btrace_conf (btinfo);
807
808 /* We have trace so we must have a configuration. */
809 gdb_assert (conf != NULL);
810
811 uiout->field_fmt ("insn-number", "%u",
812 btrace_insn_number (&it));
813 uiout->text ("\t");
814
815 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
816 conf->format);
817 }
818 else
819 {
820 struct disasm_insn dinsn;
821
822 if ((flags & DISASSEMBLY_SOURCE) != 0)
823 {
824 struct btrace_line_range lines;
825
826 lines = btrace_find_line_range (insn->pc);
827 if (!btrace_line_range_is_empty (lines)
828 && !btrace_line_range_contains_range (last_lines, lines))
829 {
830 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
831 flags);
832 last_lines = lines;
833 }
834 else if (!src_and_asm_tuple.has_value ())
835 {
836 gdb_assert (!asm_list.has_value ());
837
838 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
839
840 /* No source information. */
841 asm_list.emplace (uiout, "line_asm_insn");
842 }
843
844 gdb_assert (src_and_asm_tuple.has_value ());
845 gdb_assert (asm_list.has_value ());
846 }
847
848 memset (&dinsn, 0, sizeof (dinsn));
849 dinsn.number = btrace_insn_number (&it);
850 dinsn.addr = insn->pc;
851
852 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
853 dinsn.is_speculative = 1;
854
855 disasm.pretty_print_insn (&dinsn, flags);
856 }
857 }
858 }
859
860 /* The insn_history method of target record-btrace. */
861
862 void
863 record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
864 {
865 struct btrace_thread_info *btinfo;
866 struct btrace_insn_history *history;
867 struct btrace_insn_iterator begin, end;
868 struct ui_out *uiout;
869 unsigned int context, covered;
870
871 uiout = current_uiout;
872 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
873 context = abs (size);
874 if (context == 0)
875 error (_("Bad record instruction-history-size."));
876
877 btinfo = require_btrace ();
878 history = btinfo->insn_history;
879 if (history == NULL)
880 {
881 struct btrace_insn_iterator *replay;
882
883 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
884
885 /* If we're replaying, we start at the replay position. Otherwise, we
886 start at the tail of the trace. */
887 replay = btinfo->replay;
888 if (replay != NULL)
889 begin = *replay;
890 else
891 btrace_insn_end (&begin, btinfo);
892
893 /* We start from here and expand in the requested direction. Then we
894 expand in the other direction, as well, to fill up any remaining
895 context. */
896 end = begin;
897 if (size < 0)
898 {
899 /* We want the current position covered, as well. */
900 covered = btrace_insn_next (&end, 1);
901 covered += btrace_insn_prev (&begin, context - covered);
902 covered += btrace_insn_next (&end, context - covered);
903 }
904 else
905 {
906 covered = btrace_insn_next (&end, context);
907 covered += btrace_insn_prev (&begin, context - covered);
908 }
909 }
910 else
911 {
912 begin = history->begin;
913 end = history->end;
914
915 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
916 btrace_insn_number (&begin), btrace_insn_number (&end));
917
918 if (size < 0)
919 {
920 end = begin;
921 covered = btrace_insn_prev (&begin, context);
922 }
923 else
924 {
925 begin = end;
926 covered = btrace_insn_next (&end, context);
927 }
928 }
929
930 if (covered > 0)
931 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
932 else
933 {
934 if (size < 0)
935 printf_unfiltered (_("At the start of the branch trace record.\n"));
936 else
937 printf_unfiltered (_("At the end of the branch trace record.\n"));
938 }
939
940 btrace_set_insn_history (btinfo, &begin, &end);
941 }
942
943 /* The insn_history_range method of target record-btrace. */
944
945 void
946 record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
947 gdb_disassembly_flags flags)
948 {
949 struct btrace_thread_info *btinfo;
950 struct btrace_insn_iterator begin, end;
951 struct ui_out *uiout;
952 unsigned int low, high;
953 int found;
954
955 uiout = current_uiout;
956 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
957 low = from;
958 high = to;
959
960 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
961
962 /* Check for wrap-arounds. */
963 if (low != from || high != to)
964 error (_("Bad range."));
965
966 if (high < low)
967 error (_("Bad range."));
968
969 btinfo = require_btrace ();
970
971 found = btrace_find_insn_by_number (&begin, btinfo, low);
972 if (found == 0)
973 error (_("Range out of bounds."));
974
975 found = btrace_find_insn_by_number (&end, btinfo, high);
976 if (found == 0)
977 {
978 /* Silently truncate the range. */
979 btrace_insn_end (&end, btinfo);
980 }
981 else
982 {
983 /* We want both begin and end to be inclusive. */
984 btrace_insn_next (&end, 1);
985 }
986
987 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
988 btrace_set_insn_history (btinfo, &begin, &end);
989 }
990
991 /* The insn_history_from method of target record-btrace. */
992
993 void
994 record_btrace_target::insn_history_from (ULONGEST from, int size,
995 gdb_disassembly_flags flags)
996 {
997 ULONGEST begin, end, context;
998
999 context = abs (size);
1000 if (context == 0)
1001 error (_("Bad record instruction-history-size."));
1002
1003 if (size < 0)
1004 {
1005 end = from;
1006
1007 if (from < context)
1008 begin = 0;
1009 else
1010 begin = from - context + 1;
1011 }
1012 else
1013 {
1014 begin = from;
1015 end = from + context - 1;
1016
1017 /* Check for wrap-around. */
1018 if (end < begin)
1019 end = ULONGEST_MAX;
1020 }
1021
1022 insn_history_range (begin, end, flags);
1023 }
1024
1025 /* Print the instruction number range for a function call history line. */
1026
1027 static void
1028 btrace_call_history_insn_range (struct ui_out *uiout,
1029 const struct btrace_function *bfun)
1030 {
1031 unsigned int begin, end, size;
1032
1033 size = bfun->insn.size ();
1034 gdb_assert (size > 0);
1035
1036 begin = bfun->insn_offset;
1037 end = begin + size - 1;
1038
1039 uiout->field_unsigned ("insn begin", begin);
1040 uiout->text (",");
1041 uiout->field_unsigned ("insn end", end);
1042 }
1043
1044 /* Compute the lowest and highest source line for the instructions in BFUN
1045 and return them in PBEGIN and PEND.
1046 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1047 result from inlining or macro expansion. */
1048
1049 static void
1050 btrace_compute_src_line_range (const struct btrace_function *bfun,
1051 int *pbegin, int *pend)
1052 {
1053 struct symtab *symtab;
1054 struct symbol *sym;
1055 int begin, end;
1056
1057 begin = INT_MAX;
1058 end = INT_MIN;
1059
1060 sym = bfun->sym;
1061 if (sym == NULL)
1062 goto out;
1063
1064 symtab = symbol_symtab (sym);
1065
1066 for (const btrace_insn &insn : bfun->insn)
1067 {
1068 struct symtab_and_line sal;
1069
1070 sal = find_pc_line (insn.pc, 0);
1071 if (sal.symtab != symtab || sal.line == 0)
1072 continue;
1073
1074 begin = std::min (begin, sal.line);
1075 end = std::max (end, sal.line);
1076 }
1077
1078 out:
1079 *pbegin = begin;
1080 *pend = end;
1081 }
1082
1083 /* Print the source line information for a function call history line. */
1084
1085 static void
1086 btrace_call_history_src_line (struct ui_out *uiout,
1087 const struct btrace_function *bfun)
1088 {
1089 struct symbol *sym;
1090 int begin, end;
1091
1092 sym = bfun->sym;
1093 if (sym == NULL)
1094 return;
1095
1096 uiout->field_string ("file",
1097 symtab_to_filename_for_display (symbol_symtab (sym)),
1098 file_name_style.style ());
1099
1100 btrace_compute_src_line_range (bfun, &begin, &end);
1101 if (end < begin)
1102 return;
1103
1104 uiout->text (":");
1105 uiout->field_signed ("min line", begin);
1106
1107 if (end == begin)
1108 return;
1109
1110 uiout->text (",");
1111 uiout->field_signed ("max line", end);
1112 }
1113
1114 /* Get the name of a branch trace function. */
1115
1116 static const char *
1117 btrace_get_bfun_name (const struct btrace_function *bfun)
1118 {
1119 struct minimal_symbol *msym;
1120 struct symbol *sym;
1121
1122 if (bfun == NULL)
1123 return "??";
1124
1125 msym = bfun->msym;
1126 sym = bfun->sym;
1127
1128 if (sym != NULL)
1129 return sym->print_name ();
1130 else if (msym != NULL)
1131 return msym->print_name ();
1132 else
1133 return "??";
1134 }
1135
1136 /* Disassemble a section of the recorded function trace. */
1137
1138 static void
1139 btrace_call_history (struct ui_out *uiout,
1140 const struct btrace_thread_info *btinfo,
1141 const struct btrace_call_iterator *begin,
1142 const struct btrace_call_iterator *end,
1143 int int_flags)
1144 {
1145 struct btrace_call_iterator it;
1146 record_print_flags flags = (enum record_print_flag) int_flags;
1147
1148 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1149 btrace_call_number (end));
1150
1151 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1152 {
1153 const struct btrace_function *bfun;
1154 struct minimal_symbol *msym;
1155 struct symbol *sym;
1156
1157 bfun = btrace_call_get (&it);
1158 sym = bfun->sym;
1159 msym = bfun->msym;
1160
1161 /* Print the function index. */
1162 uiout->field_unsigned ("index", bfun->number);
1163 uiout->text ("\t");
1164
1165 /* Indicate gaps in the trace. */
1166 if (bfun->errcode != 0)
1167 {
1168 const struct btrace_config *conf;
1169
1170 conf = btrace_conf (btinfo);
1171
1172 /* We have trace so we must have a configuration. */
1173 gdb_assert (conf != NULL);
1174
1175 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1176
1177 continue;
1178 }
1179
1180 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1181 {
1182 int level = bfun->level + btinfo->level, i;
1183
1184 for (i = 0; i < level; ++i)
1185 uiout->text (" ");
1186 }
1187
1188 if (sym != NULL)
1189 uiout->field_string ("function", sym->print_name (),
1190 function_name_style.style ());
1191 else if (msym != NULL)
1192 uiout->field_string ("function", msym->print_name (),
1193 function_name_style.style ());
1194 else if (!uiout->is_mi_like_p ())
1195 uiout->field_string ("function", "??",
1196 function_name_style.style ());
1197
1198 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1199 {
1200 uiout->text (_("\tinst "));
1201 btrace_call_history_insn_range (uiout, bfun);
1202 }
1203
1204 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1205 {
1206 uiout->text (_("\tat "));
1207 btrace_call_history_src_line (uiout, bfun);
1208 }
1209
1210 uiout->text ("\n");
1211 }
1212 }
1213
1214 /* The call_history method of target record-btrace. */
1215
1216 void
1217 record_btrace_target::call_history (int size, record_print_flags flags)
1218 {
1219 struct btrace_thread_info *btinfo;
1220 struct btrace_call_history *history;
1221 struct btrace_call_iterator begin, end;
1222 struct ui_out *uiout;
1223 unsigned int context, covered;
1224
1225 uiout = current_uiout;
1226 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1227 context = abs (size);
1228 if (context == 0)
1229 error (_("Bad record function-call-history-size."));
1230
1231 btinfo = require_btrace ();
1232 history = btinfo->call_history;
1233 if (history == NULL)
1234 {
1235 struct btrace_insn_iterator *replay;
1236
1237 DEBUG ("call-history (0x%x): %d", (int) flags, size);
1238
1239 /* If we're replaying, we start at the replay position. Otherwise, we
1240 start at the tail of the trace. */
1241 replay = btinfo->replay;
1242 if (replay != NULL)
1243 {
1244 begin.btinfo = btinfo;
1245 begin.index = replay->call_index;
1246 }
1247 else
1248 btrace_call_end (&begin, btinfo);
1249
1250 /* We start from here and expand in the requested direction. Then we
1251 expand in the other direction, as well, to fill up any remaining
1252 context. */
1253 end = begin;
1254 if (size < 0)
1255 {
1256 /* We want the current position covered, as well. */
1257 covered = btrace_call_next (&end, 1);
1258 covered += btrace_call_prev (&begin, context - covered);
1259 covered += btrace_call_next (&end, context - covered);
1260 }
1261 else
1262 {
1263 covered = btrace_call_next (&end, context);
1264 covered += btrace_call_prev (&begin, context- covered);
1265 }
1266 }
1267 else
1268 {
1269 begin = history->begin;
1270 end = history->end;
1271
1272 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1273 btrace_call_number (&begin), btrace_call_number (&end));
1274
1275 if (size < 0)
1276 {
1277 end = begin;
1278 covered = btrace_call_prev (&begin, context);
1279 }
1280 else
1281 {
1282 begin = end;
1283 covered = btrace_call_next (&end, context);
1284 }
1285 }
1286
1287 if (covered > 0)
1288 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1289 else
1290 {
1291 if (size < 0)
1292 printf_unfiltered (_("At the start of the branch trace record.\n"));
1293 else
1294 printf_unfiltered (_("At the end of the branch trace record.\n"));
1295 }
1296
1297 btrace_set_call_history (btinfo, &begin, &end);
1298 }
1299
1300 /* The call_history_range method of target record-btrace. */
1301
1302 void
1303 record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1304 record_print_flags flags)
1305 {
1306 struct btrace_thread_info *btinfo;
1307 struct btrace_call_iterator begin, end;
1308 struct ui_out *uiout;
1309 unsigned int low, high;
1310 int found;
1311
1312 uiout = current_uiout;
1313 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1314 low = from;
1315 high = to;
1316
1317 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1318
1319 /* Check for wrap-arounds. */
1320 if (low != from || high != to)
1321 error (_("Bad range."));
1322
1323 if (high < low)
1324 error (_("Bad range."));
1325
1326 btinfo = require_btrace ();
1327
1328 found = btrace_find_call_by_number (&begin, btinfo, low);
1329 if (found == 0)
1330 error (_("Range out of bounds."));
1331
1332 found = btrace_find_call_by_number (&end, btinfo, high);
1333 if (found == 0)
1334 {
1335 /* Silently truncate the range. */
1336 btrace_call_end (&end, btinfo);
1337 }
1338 else
1339 {
1340 /* We want both begin and end to be inclusive. */
1341 btrace_call_next (&end, 1);
1342 }
1343
1344 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1345 btrace_set_call_history (btinfo, &begin, &end);
1346 }
1347
1348 /* The call_history_from method of target record-btrace. */
1349
1350 void
1351 record_btrace_target::call_history_from (ULONGEST from, int size,
1352 record_print_flags flags)
1353 {
1354 ULONGEST begin, end, context;
1355
1356 context = abs (size);
1357 if (context == 0)
1358 error (_("Bad record function-call-history-size."));
1359
1360 if (size < 0)
1361 {
1362 end = from;
1363
1364 if (from < context)
1365 begin = 0;
1366 else
1367 begin = from - context + 1;
1368 }
1369 else
1370 {
1371 begin = from;
1372 end = from + context - 1;
1373
1374 /* Check for wrap-around. */
1375 if (end < begin)
1376 end = ULONGEST_MAX;
1377 }
1378
1379 call_history_range ( begin, end, flags);
1380 }
1381
1382 /* The record_method method of target record-btrace. */
1383
1384 enum record_method
1385 record_btrace_target::record_method (ptid_t ptid)
1386 {
1387 process_stratum_target *proc_target = current_inferior ()->process_target ();
1388 thread_info *const tp = find_thread_ptid (proc_target, ptid);
1389
1390 if (tp == NULL)
1391 error (_("No thread."));
1392
1393 if (tp->btrace.target == NULL)
1394 return RECORD_METHOD_NONE;
1395
1396 return RECORD_METHOD_BTRACE;
1397 }
1398
1399 /* The record_is_replaying method of target record-btrace. */
1400
1401 bool
1402 record_btrace_target::record_is_replaying (ptid_t ptid)
1403 {
1404 process_stratum_target *proc_target = current_inferior ()->process_target ();
1405 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
1406 if (btrace_is_replaying (tp))
1407 return true;
1408
1409 return false;
1410 }
1411
1412 /* The record_will_replay method of target record-btrace. */
1413
1414 bool
1415 record_btrace_target::record_will_replay (ptid_t ptid, int dir)
1416 {
1417 return dir == EXEC_REVERSE || record_is_replaying (ptid);
1418 }
1419
1420 /* The xfer_partial method of target record-btrace. */
1421
1422 enum target_xfer_status
1423 record_btrace_target::xfer_partial (enum target_object object,
1424 const char *annex, gdb_byte *readbuf,
1425 const gdb_byte *writebuf, ULONGEST offset,
1426 ULONGEST len, ULONGEST *xfered_len)
1427 {
1428 /* Filter out requests that don't make sense during replay. */
1429 if (replay_memory_access == replay_memory_access_read_only
1430 && !record_btrace_generating_corefile
1431 && record_is_replaying (inferior_ptid))
1432 {
1433 switch (object)
1434 {
1435 case TARGET_OBJECT_MEMORY:
1436 {
1437 struct target_section *section;
1438
1439 /* We do not allow writing memory in general. */
1440 if (writebuf != NULL)
1441 {
1442 *xfered_len = len;
1443 return TARGET_XFER_UNAVAILABLE;
1444 }
1445
1446 /* We allow reading readonly memory. */
1447 section = target_section_by_addr (this, offset);
1448 if (section != NULL)
1449 {
1450 /* Check if the section we found is readonly. */
1451 if ((bfd_section_flags (section->the_bfd_section)
1452 & SEC_READONLY) != 0)
1453 {
1454 /* Truncate the request to fit into this section. */
1455 len = std::min (len, section->endaddr - offset);
1456 break;
1457 }
1458 }
1459
1460 *xfered_len = len;
1461 return TARGET_XFER_UNAVAILABLE;
1462 }
1463 }
1464 }
1465
1466 /* Forward the request. */
1467 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1468 offset, len, xfered_len);
1469 }
1470
1471 /* The insert_breakpoint method of target record-btrace. */
1472
1473 int
1474 record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1475 struct bp_target_info *bp_tgt)
1476 {
1477 const char *old;
1478 int ret;
1479
1480 /* Inserting breakpoints requires accessing memory. Allow it for the
1481 duration of this function. */
1482 old = replay_memory_access;
1483 replay_memory_access = replay_memory_access_read_write;
1484
1485 ret = 0;
1486 try
1487 {
1488 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
1489 }
1490 catch (const gdb_exception &except)
1491 {
1492 replay_memory_access = old;
1493 throw;
1494 }
1495 replay_memory_access = old;
1496
1497 return ret;
1498 }
1499
1500 /* The remove_breakpoint method of target record-btrace. */
1501
1502 int
1503 record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1504 struct bp_target_info *bp_tgt,
1505 enum remove_bp_reason reason)
1506 {
1507 const char *old;
1508 int ret;
1509
1510 /* Removing breakpoints requires accessing memory. Allow it for the
1511 duration of this function. */
1512 old = replay_memory_access;
1513 replay_memory_access = replay_memory_access_read_write;
1514
1515 ret = 0;
1516 try
1517 {
1518 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
1519 }
1520 catch (const gdb_exception &except)
1521 {
1522 replay_memory_access = old;
1523 throw;
1524 }
1525 replay_memory_access = old;
1526
1527 return ret;
1528 }
1529
1530 /* The fetch_registers method of target record-btrace. */
1531
1532 void
1533 record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1534 {
1535 thread_info *tp = find_thread_ptid (regcache->target (), regcache->ptid ());
1536 gdb_assert (tp != NULL);
1537
1538 btrace_insn_iterator *replay = tp->btrace.replay;
1539 if (replay != NULL && !record_btrace_generating_corefile)
1540 {
1541 const struct btrace_insn *insn;
1542 struct gdbarch *gdbarch;
1543 int pcreg;
1544
1545 gdbarch = regcache->arch ();
1546 pcreg = gdbarch_pc_regnum (gdbarch);
1547 if (pcreg < 0)
1548 return;
1549
1550 /* We can only provide the PC register. */
1551 if (regno >= 0 && regno != pcreg)
1552 return;
1553
1554 insn = btrace_insn_get (replay);
1555 gdb_assert (insn != NULL);
1556
1557 regcache->raw_supply (regno, &insn->pc);
1558 }
1559 else
1560 this->beneath ()->fetch_registers (regcache, regno);
1561 }
1562
1563 /* The store_registers method of target record-btrace. */
1564
1565 void
1566 record_btrace_target::store_registers (struct regcache *regcache, int regno)
1567 {
1568 if (!record_btrace_generating_corefile
1569 && record_is_replaying (regcache->ptid ()))
1570 error (_("Cannot write registers while replaying."));
1571
1572 gdb_assert (may_write_registers);
1573
1574 this->beneath ()->store_registers (regcache, regno);
1575 }
1576
1577 /* The prepare_to_store method of target record-btrace. */
1578
1579 void
1580 record_btrace_target::prepare_to_store (struct regcache *regcache)
1581 {
1582 if (!record_btrace_generating_corefile
1583 && record_is_replaying (regcache->ptid ()))
1584 return;
1585
1586 this->beneath ()->prepare_to_store (regcache);
1587 }
1588
1589 /* The branch trace frame cache. */
1590
1591 struct btrace_frame_cache
1592 {
1593 /* The thread. */
1594 struct thread_info *tp;
1595
1596 /* The frame info. */
1597 struct frame_info *frame;
1598
1599 /* The branch trace function segment. */
1600 const struct btrace_function *bfun;
1601 };
1602
1603 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1604
1605 static htab_t bfcache;
1606
1607 /* hash_f for htab_create_alloc of bfcache. */
1608
1609 static hashval_t
1610 bfcache_hash (const void *arg)
1611 {
1612 const struct btrace_frame_cache *cache
1613 = (const struct btrace_frame_cache *) arg;
1614
1615 return htab_hash_pointer (cache->frame);
1616 }
1617
1618 /* eq_f for htab_create_alloc of bfcache. */
1619
1620 static int
1621 bfcache_eq (const void *arg1, const void *arg2)
1622 {
1623 const struct btrace_frame_cache *cache1
1624 = (const struct btrace_frame_cache *) arg1;
1625 const struct btrace_frame_cache *cache2
1626 = (const struct btrace_frame_cache *) arg2;
1627
1628 return cache1->frame == cache2->frame;
1629 }
1630
1631 /* Create a new btrace frame cache. */
1632
1633 static struct btrace_frame_cache *
1634 bfcache_new (struct frame_info *frame)
1635 {
1636 struct btrace_frame_cache *cache;
1637 void **slot;
1638
1639 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1640 cache->frame = frame;
1641
1642 slot = htab_find_slot (bfcache, cache, INSERT);
1643 gdb_assert (*slot == NULL);
1644 *slot = cache;
1645
1646 return cache;
1647 }
1648
1649 /* Extract the branch trace function from a branch trace frame. */
1650
1651 static const struct btrace_function *
1652 btrace_get_frame_function (struct frame_info *frame)
1653 {
1654 const struct btrace_frame_cache *cache;
1655 struct btrace_frame_cache pattern;
1656 void **slot;
1657
1658 pattern.frame = frame;
1659
1660 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1661 if (slot == NULL)
1662 return NULL;
1663
1664 cache = (const struct btrace_frame_cache *) *slot;
1665 return cache->bfun;
1666 }
1667
1668 /* Implement stop_reason method for record_btrace_frame_unwind. */
1669
1670 static enum unwind_stop_reason
1671 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1672 void **this_cache)
1673 {
1674 const struct btrace_frame_cache *cache;
1675 const struct btrace_function *bfun;
1676
1677 cache = (const struct btrace_frame_cache *) *this_cache;
1678 bfun = cache->bfun;
1679 gdb_assert (bfun != NULL);
1680
1681 if (bfun->up == 0)
1682 return UNWIND_UNAVAILABLE;
1683
1684 return UNWIND_NO_REASON;
1685 }
1686
1687 /* Implement this_id method for record_btrace_frame_unwind. */
1688
1689 static void
1690 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1691 struct frame_id *this_id)
1692 {
1693 const struct btrace_frame_cache *cache;
1694 const struct btrace_function *bfun;
1695 struct btrace_call_iterator it;
1696 CORE_ADDR code, special;
1697
1698 cache = (const struct btrace_frame_cache *) *this_cache;
1699
1700 bfun = cache->bfun;
1701 gdb_assert (bfun != NULL);
1702
1703 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1704 bfun = btrace_call_get (&it);
1705
1706 code = get_frame_func (this_frame);
1707 special = bfun->number;
1708
1709 *this_id = frame_id_build_unavailable_stack_special (code, special);
1710
1711 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1712 btrace_get_bfun_name (cache->bfun),
1713 core_addr_to_string_nz (this_id->code_addr),
1714 core_addr_to_string_nz (this_id->special_addr));
1715 }
1716
1717 /* Implement prev_register method for record_btrace_frame_unwind. */
1718
1719 static struct value *
1720 record_btrace_frame_prev_register (struct frame_info *this_frame,
1721 void **this_cache,
1722 int regnum)
1723 {
1724 const struct btrace_frame_cache *cache;
1725 const struct btrace_function *bfun, *caller;
1726 struct btrace_call_iterator it;
1727 struct gdbarch *gdbarch;
1728 CORE_ADDR pc;
1729 int pcreg;
1730
1731 gdbarch = get_frame_arch (this_frame);
1732 pcreg = gdbarch_pc_regnum (gdbarch);
1733 if (pcreg < 0 || regnum != pcreg)
1734 throw_error (NOT_AVAILABLE_ERROR,
1735 _("Registers are not available in btrace record history"));
1736
1737 cache = (const struct btrace_frame_cache *) *this_cache;
1738 bfun = cache->bfun;
1739 gdb_assert (bfun != NULL);
1740
1741 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1742 throw_error (NOT_AVAILABLE_ERROR,
1743 _("No caller in btrace record history"));
1744
1745 caller = btrace_call_get (&it);
1746
1747 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1748 pc = caller->insn.front ().pc;
1749 else
1750 {
1751 pc = caller->insn.back ().pc;
1752 pc += gdb_insn_length (gdbarch, pc);
1753 }
1754
1755 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1756 btrace_get_bfun_name (bfun), bfun->level,
1757 core_addr_to_string_nz (pc));
1758
1759 return frame_unwind_got_address (this_frame, regnum, pc);
1760 }
1761
1762 /* Implement sniffer method for record_btrace_frame_unwind. */
1763
1764 static int
1765 record_btrace_frame_sniffer (const struct frame_unwind *self,
1766 struct frame_info *this_frame,
1767 void **this_cache)
1768 {
1769 const struct btrace_function *bfun;
1770 struct btrace_frame_cache *cache;
1771 struct thread_info *tp;
1772 struct frame_info *next;
1773
1774 /* THIS_FRAME does not contain a reference to its thread. */
1775 tp = inferior_thread ();
1776
1777 bfun = NULL;
1778 next = get_next_frame (this_frame);
1779 if (next == NULL)
1780 {
1781 const struct btrace_insn_iterator *replay;
1782
1783 replay = tp->btrace.replay;
1784 if (replay != NULL)
1785 bfun = &replay->btinfo->functions[replay->call_index];
1786 }
1787 else
1788 {
1789 const struct btrace_function *callee;
1790 struct btrace_call_iterator it;
1791
1792 callee = btrace_get_frame_function (next);
1793 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1794 return 0;
1795
1796 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1797 return 0;
1798
1799 bfun = btrace_call_get (&it);
1800 }
1801
1802 if (bfun == NULL)
1803 return 0;
1804
1805 DEBUG ("[frame] sniffed frame for %s on level %d",
1806 btrace_get_bfun_name (bfun), bfun->level);
1807
1808 /* This is our frame. Initialize the frame cache. */
1809 cache = bfcache_new (this_frame);
1810 cache->tp = tp;
1811 cache->bfun = bfun;
1812
1813 *this_cache = cache;
1814 return 1;
1815 }
1816
1817 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1818
1819 static int
1820 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1821 struct frame_info *this_frame,
1822 void **this_cache)
1823 {
1824 const struct btrace_function *bfun, *callee;
1825 struct btrace_frame_cache *cache;
1826 struct btrace_call_iterator it;
1827 struct frame_info *next;
1828 struct thread_info *tinfo;
1829
1830 next = get_next_frame (this_frame);
1831 if (next == NULL)
1832 return 0;
1833
1834 callee = btrace_get_frame_function (next);
1835 if (callee == NULL)
1836 return 0;
1837
1838 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1839 return 0;
1840
1841 tinfo = inferior_thread ();
1842 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1843 return 0;
1844
1845 bfun = btrace_call_get (&it);
1846
1847 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1848 btrace_get_bfun_name (bfun), bfun->level);
1849
1850 /* This is our frame. Initialize the frame cache. */
1851 cache = bfcache_new (this_frame);
1852 cache->tp = tinfo;
1853 cache->bfun = bfun;
1854
1855 *this_cache = cache;
1856 return 1;
1857 }
1858
1859 static void
1860 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1861 {
1862 struct btrace_frame_cache *cache;
1863 void **slot;
1864
1865 cache = (struct btrace_frame_cache *) this_cache;
1866
1867 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1868 gdb_assert (slot != NULL);
1869
1870 htab_remove_elt (bfcache, cache);
1871 }
1872
1873 /* btrace recording does not store previous memory content, neither the stack
1874 frames content. Any unwinding would return erroneous results as the stack
1875 contents no longer matches the changed PC value restored from history.
1876 Therefore this unwinder reports any possibly unwound registers as
1877 <unavailable>. */
1878
1879 const struct frame_unwind record_btrace_frame_unwind =
1880 {
1881 NORMAL_FRAME,
1882 record_btrace_frame_unwind_stop_reason,
1883 record_btrace_frame_this_id,
1884 record_btrace_frame_prev_register,
1885 NULL,
1886 record_btrace_frame_sniffer,
1887 record_btrace_frame_dealloc_cache
1888 };
1889
1890 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1891 {
1892 TAILCALL_FRAME,
1893 record_btrace_frame_unwind_stop_reason,
1894 record_btrace_frame_this_id,
1895 record_btrace_frame_prev_register,
1896 NULL,
1897 record_btrace_tailcall_frame_sniffer,
1898 record_btrace_frame_dealloc_cache
1899 };
1900
1901 /* Implement the get_unwinder method. */
1902
1903 const struct frame_unwind *
1904 record_btrace_target::get_unwinder ()
1905 {
1906 return &record_btrace_frame_unwind;
1907 }
1908
1909 /* Implement the get_tailcall_unwinder method. */
1910
1911 const struct frame_unwind *
1912 record_btrace_target::get_tailcall_unwinder ()
1913 {
1914 return &record_btrace_tailcall_frame_unwind;
1915 }
1916
1917 /* Return a human-readable string for FLAG. */
1918
1919 static const char *
1920 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1921 {
1922 switch (flag)
1923 {
1924 case BTHR_STEP:
1925 return "step";
1926
1927 case BTHR_RSTEP:
1928 return "reverse-step";
1929
1930 case BTHR_CONT:
1931 return "cont";
1932
1933 case BTHR_RCONT:
1934 return "reverse-cont";
1935
1936 case BTHR_STOP:
1937 return "stop";
1938 }
1939
1940 return "<invalid>";
1941 }
1942
1943 /* Indicate that TP should be resumed according to FLAG. */
1944
1945 static void
1946 record_btrace_resume_thread (struct thread_info *tp,
1947 enum btrace_thread_flag flag)
1948 {
1949 struct btrace_thread_info *btinfo;
1950
1951 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1952 target_pid_to_str (tp->ptid).c_str (), flag,
1953 btrace_thread_flag_to_str (flag));
1954
1955 btinfo = &tp->btrace;
1956
1957 /* Fetch the latest branch trace. */
1958 btrace_fetch (tp, record_btrace_get_cpu ());
1959
1960 /* A resume request overwrites a preceding resume or stop request. */
1961 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1962 btinfo->flags |= flag;
1963 }
1964
1965 /* Get the current frame for TP. */
1966
1967 static struct frame_id
1968 get_thread_current_frame_id (struct thread_info *tp)
1969 {
1970 struct frame_id id;
1971 bool executing;
1972
1973 /* Set current thread, which is implicitly used by
1974 get_current_frame. */
1975 scoped_restore_current_thread restore_thread;
1976
1977 switch_to_thread (tp);
1978
1979 process_stratum_target *proc_target = tp->inf->process_target ();
1980
1981 /* Clear the executing flag to allow changes to the current frame.
1982 We are not actually running, yet. We just started a reverse execution
1983 command or a record goto command.
1984 For the latter, EXECUTING is false and this has no effect.
1985 For the former, EXECUTING is true and we're in wait, about to
1986 move the thread. Since we need to recompute the stack, we temporarily
1987 set EXECUTING to false. */
1988 executing = tp->executing;
1989 set_executing (proc_target, inferior_ptid, false);
1990
1991 id = null_frame_id;
1992 try
1993 {
1994 id = get_frame_id (get_current_frame ());
1995 }
1996 catch (const gdb_exception &except)
1997 {
1998 /* Restore the previous execution state. */
1999 set_executing (proc_target, inferior_ptid, executing);
2000
2001 throw;
2002 }
2003
2004 /* Restore the previous execution state. */
2005 set_executing (proc_target, inferior_ptid, executing);
2006
2007 return id;
2008 }
2009
2010 /* Start replaying a thread. */
2011
2012 static struct btrace_insn_iterator *
2013 record_btrace_start_replaying (struct thread_info *tp)
2014 {
2015 struct btrace_insn_iterator *replay;
2016 struct btrace_thread_info *btinfo;
2017
2018 btinfo = &tp->btrace;
2019 replay = NULL;
2020
2021 /* We can't start replaying without trace. */
2022 if (btinfo->functions.empty ())
2023 return NULL;
2024
2025 /* GDB stores the current frame_id when stepping in order to detects steps
2026 into subroutines.
2027 Since frames are computed differently when we're replaying, we need to
2028 recompute those stored frames and fix them up so we can still detect
2029 subroutines after we started replaying. */
2030 try
2031 {
2032 struct frame_id frame_id;
2033 int upd_step_frame_id, upd_step_stack_frame_id;
2034
2035 /* The current frame without replaying - computed via normal unwind. */
2036 frame_id = get_thread_current_frame_id (tp);
2037
2038 /* Check if we need to update any stepping-related frame id's. */
2039 upd_step_frame_id = frame_id_eq (frame_id,
2040 tp->control.step_frame_id);
2041 upd_step_stack_frame_id = frame_id_eq (frame_id,
2042 tp->control.step_stack_frame_id);
2043
2044 /* We start replaying at the end of the branch trace. This corresponds
2045 to the current instruction. */
2046 replay = XNEW (struct btrace_insn_iterator);
2047 btrace_insn_end (replay, btinfo);
2048
2049 /* Skip gaps at the end of the trace. */
2050 while (btrace_insn_get (replay) == NULL)
2051 {
2052 unsigned int steps;
2053
2054 steps = btrace_insn_prev (replay, 1);
2055 if (steps == 0)
2056 error (_("No trace."));
2057 }
2058
2059 /* We're not replaying, yet. */
2060 gdb_assert (btinfo->replay == NULL);
2061 btinfo->replay = replay;
2062
2063 /* Make sure we're not using any stale registers. */
2064 registers_changed_thread (tp);
2065
2066 /* The current frame with replaying - computed via btrace unwind. */
2067 frame_id = get_thread_current_frame_id (tp);
2068
2069 /* Replace stepping related frames where necessary. */
2070 if (upd_step_frame_id)
2071 tp->control.step_frame_id = frame_id;
2072 if (upd_step_stack_frame_id)
2073 tp->control.step_stack_frame_id = frame_id;
2074 }
2075 catch (const gdb_exception &except)
2076 {
2077 xfree (btinfo->replay);
2078 btinfo->replay = NULL;
2079
2080 registers_changed_thread (tp);
2081
2082 throw;
2083 }
2084
2085 return replay;
2086 }
2087
2088 /* Stop replaying a thread. */
2089
2090 static void
2091 record_btrace_stop_replaying (struct thread_info *tp)
2092 {
2093 struct btrace_thread_info *btinfo;
2094
2095 btinfo = &tp->btrace;
2096
2097 xfree (btinfo->replay);
2098 btinfo->replay = NULL;
2099
2100 /* Make sure we're not leaving any stale registers. */
2101 registers_changed_thread (tp);
2102 }
2103
2104 /* Stop replaying TP if it is at the end of its execution history. */
2105
2106 static void
2107 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2108 {
2109 struct btrace_insn_iterator *replay, end;
2110 struct btrace_thread_info *btinfo;
2111
2112 btinfo = &tp->btrace;
2113 replay = btinfo->replay;
2114
2115 if (replay == NULL)
2116 return;
2117
2118 btrace_insn_end (&end, btinfo);
2119
2120 if (btrace_insn_cmp (replay, &end) == 0)
2121 record_btrace_stop_replaying (tp);
2122 }
2123
2124 /* The resume method of target record-btrace. */
2125
2126 void
2127 record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
2128 {
2129 enum btrace_thread_flag flag, cflag;
2130
2131 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid).c_str (),
2132 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
2133 step ? "step" : "cont");
2134
2135 /* Store the execution direction of the last resume.
2136
2137 If there is more than one resume call, we have to rely on infrun
2138 to not change the execution direction in-between. */
2139 record_btrace_resume_exec_dir = ::execution_direction;
2140
2141 /* As long as we're not replaying, just forward the request.
2142
2143 For non-stop targets this means that no thread is replaying. In order to
2144 make progress, we may need to explicitly move replaying threads to the end
2145 of their execution history. */
2146 if ((::execution_direction != EXEC_REVERSE)
2147 && !record_is_replaying (minus_one_ptid))
2148 {
2149 this->beneath ()->resume (ptid, step, signal);
2150 return;
2151 }
2152
2153 /* Compute the btrace thread flag for the requested move. */
2154 if (::execution_direction == EXEC_REVERSE)
2155 {
2156 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2157 cflag = BTHR_RCONT;
2158 }
2159 else
2160 {
2161 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2162 cflag = BTHR_CONT;
2163 }
2164
2165 /* We just indicate the resume intent here. The actual stepping happens in
2166 record_btrace_wait below.
2167
2168 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2169
2170 process_stratum_target *proc_target = current_inferior ()->process_target ();
2171
2172 if (!target_is_non_stop_p ())
2173 {
2174 gdb_assert (inferior_ptid.matches (ptid));
2175
2176 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2177 {
2178 if (tp->ptid.matches (inferior_ptid))
2179 record_btrace_resume_thread (tp, flag);
2180 else
2181 record_btrace_resume_thread (tp, cflag);
2182 }
2183 }
2184 else
2185 {
2186 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2187 record_btrace_resume_thread (tp, flag);
2188 }
2189
2190 /* Async support. */
2191 if (target_can_async_p ())
2192 {
2193 target_async (1);
2194 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2195 }
2196 }
2197
2198 /* The commit_resume method of target record-btrace. */
2199
2200 void
2201 record_btrace_target::commit_resume ()
2202 {
2203 if ((::execution_direction != EXEC_REVERSE)
2204 && !record_is_replaying (minus_one_ptid))
2205 beneath ()->commit_resume ();
2206 }
2207
2208 /* Cancel resuming TP. */
2209
2210 static void
2211 record_btrace_cancel_resume (struct thread_info *tp)
2212 {
2213 enum btrace_thread_flag flags;
2214
2215 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2216 if (flags == 0)
2217 return;
2218
2219 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2220 print_thread_id (tp),
2221 target_pid_to_str (tp->ptid).c_str (), flags,
2222 btrace_thread_flag_to_str (flags));
2223
2224 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2225 record_btrace_stop_replaying_at_end (tp);
2226 }
2227
2228 /* Return a target_waitstatus indicating that we ran out of history. */
2229
2230 static struct target_waitstatus
2231 btrace_step_no_history (void)
2232 {
2233 struct target_waitstatus status;
2234
2235 status.kind = TARGET_WAITKIND_NO_HISTORY;
2236
2237 return status;
2238 }
2239
2240 /* Return a target_waitstatus indicating that a step finished. */
2241
2242 static struct target_waitstatus
2243 btrace_step_stopped (void)
2244 {
2245 struct target_waitstatus status;
2246
2247 status.kind = TARGET_WAITKIND_STOPPED;
2248 status.value.sig = GDB_SIGNAL_TRAP;
2249
2250 return status;
2251 }
2252
2253 /* Return a target_waitstatus indicating that a thread was stopped as
2254 requested. */
2255
2256 static struct target_waitstatus
2257 btrace_step_stopped_on_request (void)
2258 {
2259 struct target_waitstatus status;
2260
2261 status.kind = TARGET_WAITKIND_STOPPED;
2262 status.value.sig = GDB_SIGNAL_0;
2263
2264 return status;
2265 }
2266
2267 /* Return a target_waitstatus indicating a spurious stop. */
2268
2269 static struct target_waitstatus
2270 btrace_step_spurious (void)
2271 {
2272 struct target_waitstatus status;
2273
2274 status.kind = TARGET_WAITKIND_SPURIOUS;
2275
2276 return status;
2277 }
2278
2279 /* Return a target_waitstatus indicating that the thread was not resumed. */
2280
2281 static struct target_waitstatus
2282 btrace_step_no_resumed (void)
2283 {
2284 struct target_waitstatus status;
2285
2286 status.kind = TARGET_WAITKIND_NO_RESUMED;
2287
2288 return status;
2289 }
2290
2291 /* Return a target_waitstatus indicating that we should wait again. */
2292
2293 static struct target_waitstatus
2294 btrace_step_again (void)
2295 {
2296 struct target_waitstatus status;
2297
2298 status.kind = TARGET_WAITKIND_IGNORE;
2299
2300 return status;
2301 }
2302
2303 /* Clear the record histories. */
2304
2305 static void
2306 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2307 {
2308 xfree (btinfo->insn_history);
2309 xfree (btinfo->call_history);
2310
2311 btinfo->insn_history = NULL;
2312 btinfo->call_history = NULL;
2313 }
2314
2315 /* Check whether TP's current replay position is at a breakpoint. */
2316
2317 static int
2318 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2319 {
2320 struct btrace_insn_iterator *replay;
2321 struct btrace_thread_info *btinfo;
2322 const struct btrace_insn *insn;
2323
2324 btinfo = &tp->btrace;
2325 replay = btinfo->replay;
2326
2327 if (replay == NULL)
2328 return 0;
2329
2330 insn = btrace_insn_get (replay);
2331 if (insn == NULL)
2332 return 0;
2333
2334 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
2335 &btinfo->stop_reason);
2336 }
2337
2338 /* Step one instruction in forward direction. */
2339
2340 static struct target_waitstatus
2341 record_btrace_single_step_forward (struct thread_info *tp)
2342 {
2343 struct btrace_insn_iterator *replay, end, start;
2344 struct btrace_thread_info *btinfo;
2345
2346 btinfo = &tp->btrace;
2347 replay = btinfo->replay;
2348
2349 /* We're done if we're not replaying. */
2350 if (replay == NULL)
2351 return btrace_step_no_history ();
2352
2353 /* Check if we're stepping a breakpoint. */
2354 if (record_btrace_replay_at_breakpoint (tp))
2355 return btrace_step_stopped ();
2356
2357 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2358 jump back to the instruction at which we started. */
2359 start = *replay;
2360 do
2361 {
2362 unsigned int steps;
2363
2364 /* We will bail out here if we continue stepping after reaching the end
2365 of the execution history. */
2366 steps = btrace_insn_next (replay, 1);
2367 if (steps == 0)
2368 {
2369 *replay = start;
2370 return btrace_step_no_history ();
2371 }
2372 }
2373 while (btrace_insn_get (replay) == NULL);
2374
2375 /* Determine the end of the instruction trace. */
2376 btrace_insn_end (&end, btinfo);
2377
2378 /* The execution trace contains (and ends with) the current instruction.
2379 This instruction has not been executed, yet, so the trace really ends
2380 one instruction earlier. */
2381 if (btrace_insn_cmp (replay, &end) == 0)
2382 return btrace_step_no_history ();
2383
2384 return btrace_step_spurious ();
2385 }
2386
2387 /* Step one instruction in backward direction. */
2388
2389 static struct target_waitstatus
2390 record_btrace_single_step_backward (struct thread_info *tp)
2391 {
2392 struct btrace_insn_iterator *replay, start;
2393 struct btrace_thread_info *btinfo;
2394
2395 btinfo = &tp->btrace;
2396 replay = btinfo->replay;
2397
2398 /* Start replaying if we're not already doing so. */
2399 if (replay == NULL)
2400 replay = record_btrace_start_replaying (tp);
2401
2402 /* If we can't step any further, we reached the end of the history.
2403 Skip gaps during replay. If we end up at a gap (at the beginning of
2404 the trace), jump back to the instruction at which we started. */
2405 start = *replay;
2406 do
2407 {
2408 unsigned int steps;
2409
2410 steps = btrace_insn_prev (replay, 1);
2411 if (steps == 0)
2412 {
2413 *replay = start;
2414 return btrace_step_no_history ();
2415 }
2416 }
2417 while (btrace_insn_get (replay) == NULL);
2418
2419 /* Check if we're stepping a breakpoint.
2420
2421 For reverse-stepping, this check is after the step. There is logic in
2422 infrun.c that handles reverse-stepping separately. See, for example,
2423 proceed and adjust_pc_after_break.
2424
2425 This code assumes that for reverse-stepping, PC points to the last
2426 de-executed instruction, whereas for forward-stepping PC points to the
2427 next to-be-executed instruction. */
2428 if (record_btrace_replay_at_breakpoint (tp))
2429 return btrace_step_stopped ();
2430
2431 return btrace_step_spurious ();
2432 }
2433
2434 /* Step a single thread. */
2435
2436 static struct target_waitstatus
2437 record_btrace_step_thread (struct thread_info *tp)
2438 {
2439 struct btrace_thread_info *btinfo;
2440 struct target_waitstatus status;
2441 enum btrace_thread_flag flags;
2442
2443 btinfo = &tp->btrace;
2444
2445 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2446 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2447
2448 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2449 target_pid_to_str (tp->ptid).c_str (), flags,
2450 btrace_thread_flag_to_str (flags));
2451
2452 /* We can't step without an execution history. */
2453 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2454 return btrace_step_no_history ();
2455
2456 switch (flags)
2457 {
2458 default:
2459 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2460
2461 case BTHR_STOP:
2462 return btrace_step_stopped_on_request ();
2463
2464 case BTHR_STEP:
2465 status = record_btrace_single_step_forward (tp);
2466 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2467 break;
2468
2469 return btrace_step_stopped ();
2470
2471 case BTHR_RSTEP:
2472 status = record_btrace_single_step_backward (tp);
2473 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2474 break;
2475
2476 return btrace_step_stopped ();
2477
2478 case BTHR_CONT:
2479 status = record_btrace_single_step_forward (tp);
2480 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2481 break;
2482
2483 btinfo->flags |= flags;
2484 return btrace_step_again ();
2485
2486 case BTHR_RCONT:
2487 status = record_btrace_single_step_backward (tp);
2488 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2489 break;
2490
2491 btinfo->flags |= flags;
2492 return btrace_step_again ();
2493 }
2494
2495 /* We keep threads moving at the end of their execution history. The wait
2496 method will stop the thread for whom the event is reported. */
2497 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2498 btinfo->flags |= flags;
2499
2500 return status;
2501 }
2502
2503 /* Announce further events if necessary. */
2504
2505 static void
2506 record_btrace_maybe_mark_async_event
2507 (const std::vector<thread_info *> &moving,
2508 const std::vector<thread_info *> &no_history)
2509 {
2510 bool more_moving = !moving.empty ();
2511 bool more_no_history = !no_history.empty ();;
2512
2513 if (!more_moving && !more_no_history)
2514 return;
2515
2516 if (more_moving)
2517 DEBUG ("movers pending");
2518
2519 if (more_no_history)
2520 DEBUG ("no-history pending");
2521
2522 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2523 }
2524
2525 /* The wait method of target record-btrace. */
2526
2527 ptid_t
2528 record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2529 int options)
2530 {
2531 std::vector<thread_info *> moving;
2532 std::vector<thread_info *> no_history;
2533
2534 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid).c_str (), options);
2535
2536 /* As long as we're not replaying, just forward the request. */
2537 if ((::execution_direction != EXEC_REVERSE)
2538 && !record_is_replaying (minus_one_ptid))
2539 {
2540 return this->beneath ()->wait (ptid, status, options);
2541 }
2542
2543 /* Keep a work list of moving threads. */
2544 process_stratum_target *proc_target = current_inferior ()->process_target ();
2545 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2546 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2547 moving.push_back (tp);
2548
2549 if (moving.empty ())
2550 {
2551 *status = btrace_step_no_resumed ();
2552
2553 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid).c_str (),
2554 target_waitstatus_to_string (status).c_str ());
2555
2556 return null_ptid;
2557 }
2558
2559 /* Step moving threads one by one, one step each, until either one thread
2560 reports an event or we run out of threads to step.
2561
2562 When stepping more than one thread, chances are that some threads reach
2563 the end of their execution history earlier than others. If we reported
2564 this immediately, all-stop on top of non-stop would stop all threads and
2565 resume the same threads next time. And we would report the same thread
2566 having reached the end of its execution history again.
2567
2568 In the worst case, this would starve the other threads. But even if other
2569 threads would be allowed to make progress, this would result in far too
2570 many intermediate stops.
2571
2572 We therefore delay the reporting of "no execution history" until we have
2573 nothing else to report. By this time, all threads should have moved to
2574 either the beginning or the end of their execution history. There will
2575 be a single user-visible stop. */
2576 struct thread_info *eventing = NULL;
2577 while ((eventing == NULL) && !moving.empty ())
2578 {
2579 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
2580 {
2581 thread_info *tp = moving[ix];
2582
2583 *status = record_btrace_step_thread (tp);
2584
2585 switch (status->kind)
2586 {
2587 case TARGET_WAITKIND_IGNORE:
2588 ix++;
2589 break;
2590
2591 case TARGET_WAITKIND_NO_HISTORY:
2592 no_history.push_back (ordered_remove (moving, ix));
2593 break;
2594
2595 default:
2596 eventing = unordered_remove (moving, ix);
2597 break;
2598 }
2599 }
2600 }
2601
2602 if (eventing == NULL)
2603 {
2604 /* We started with at least one moving thread. This thread must have
2605 either stopped or reached the end of its execution history.
2606
2607 In the former case, EVENTING must not be NULL.
2608 In the latter case, NO_HISTORY must not be empty. */
2609 gdb_assert (!no_history.empty ());
2610
2611 /* We kept threads moving at the end of their execution history. Stop
2612 EVENTING now that we are going to report its stop. */
2613 eventing = unordered_remove (no_history, 0);
2614 eventing->btrace.flags &= ~BTHR_MOVE;
2615
2616 *status = btrace_step_no_history ();
2617 }
2618
2619 gdb_assert (eventing != NULL);
2620
2621 /* We kept threads replaying at the end of their execution history. Stop
2622 replaying EVENTING now that we are going to report its stop. */
2623 record_btrace_stop_replaying_at_end (eventing);
2624
2625 /* Stop all other threads. */
2626 if (!target_is_non_stop_p ())
2627 {
2628 for (thread_info *tp : all_non_exited_threads ())
2629 record_btrace_cancel_resume (tp);
2630 }
2631
2632 /* In async mode, we need to announce further events. */
2633 if (target_is_async_p ())
2634 record_btrace_maybe_mark_async_event (moving, no_history);
2635
2636 /* Start record histories anew from the current position. */
2637 record_btrace_clear_histories (&eventing->btrace);
2638
2639 /* We moved the replay position but did not update registers. */
2640 registers_changed_thread (eventing);
2641
2642 DEBUG ("wait ended by thread %s (%s): %s",
2643 print_thread_id (eventing),
2644 target_pid_to_str (eventing->ptid).c_str (),
2645 target_waitstatus_to_string (status).c_str ());
2646
2647 return eventing->ptid;
2648 }
2649
2650 /* The stop method of target record-btrace. */
2651
2652 void
2653 record_btrace_target::stop (ptid_t ptid)
2654 {
2655 DEBUG ("stop %s", target_pid_to_str (ptid).c_str ());
2656
2657 /* As long as we're not replaying, just forward the request. */
2658 if ((::execution_direction != EXEC_REVERSE)
2659 && !record_is_replaying (minus_one_ptid))
2660 {
2661 this->beneath ()->stop (ptid);
2662 }
2663 else
2664 {
2665 process_stratum_target *proc_target
2666 = current_inferior ()->process_target ();
2667
2668 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2669 {
2670 tp->btrace.flags &= ~BTHR_MOVE;
2671 tp->btrace.flags |= BTHR_STOP;
2672 }
2673 }
2674 }
2675
2676 /* The can_execute_reverse method of target record-btrace. */
2677
2678 bool
2679 record_btrace_target::can_execute_reverse ()
2680 {
2681 return true;
2682 }
2683
2684 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2685
2686 bool
2687 record_btrace_target::stopped_by_sw_breakpoint ()
2688 {
2689 if (record_is_replaying (minus_one_ptid))
2690 {
2691 struct thread_info *tp = inferior_thread ();
2692
2693 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2694 }
2695
2696 return this->beneath ()->stopped_by_sw_breakpoint ();
2697 }
2698
2699 /* The supports_stopped_by_sw_breakpoint method of target
2700 record-btrace. */
2701
2702 bool
2703 record_btrace_target::supports_stopped_by_sw_breakpoint ()
2704 {
2705 if (record_is_replaying (minus_one_ptid))
2706 return true;
2707
2708 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
2709 }
2710
2711 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2712
2713 bool
2714 record_btrace_target::stopped_by_hw_breakpoint ()
2715 {
2716 if (record_is_replaying (minus_one_ptid))
2717 {
2718 struct thread_info *tp = inferior_thread ();
2719
2720 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2721 }
2722
2723 return this->beneath ()->stopped_by_hw_breakpoint ();
2724 }
2725
2726 /* The supports_stopped_by_hw_breakpoint method of target
2727 record-btrace. */
2728
2729 bool
2730 record_btrace_target::supports_stopped_by_hw_breakpoint ()
2731 {
2732 if (record_is_replaying (minus_one_ptid))
2733 return true;
2734
2735 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
2736 }
2737
2738 /* The update_thread_list method of target record-btrace. */
2739
2740 void
2741 record_btrace_target::update_thread_list ()
2742 {
2743 /* We don't add or remove threads during replay. */
2744 if (record_is_replaying (minus_one_ptid))
2745 return;
2746
2747 /* Forward the request. */
2748 this->beneath ()->update_thread_list ();
2749 }
2750
2751 /* The thread_alive method of target record-btrace. */
2752
2753 bool
2754 record_btrace_target::thread_alive (ptid_t ptid)
2755 {
2756 /* We don't add or remove threads during replay. */
2757 if (record_is_replaying (minus_one_ptid))
2758 return true;
2759
2760 /* Forward the request. */
2761 return this->beneath ()->thread_alive (ptid);
2762 }
2763
2764 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2765 is stopped. */
2766
2767 static void
2768 record_btrace_set_replay (struct thread_info *tp,
2769 const struct btrace_insn_iterator *it)
2770 {
2771 struct btrace_thread_info *btinfo;
2772
2773 btinfo = &tp->btrace;
2774
2775 if (it == NULL)
2776 record_btrace_stop_replaying (tp);
2777 else
2778 {
2779 if (btinfo->replay == NULL)
2780 record_btrace_start_replaying (tp);
2781 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2782 return;
2783
2784 *btinfo->replay = *it;
2785 registers_changed_thread (tp);
2786 }
2787
2788 /* Start anew from the new replay position. */
2789 record_btrace_clear_histories (btinfo);
2790
2791 inferior_thread ()->suspend.stop_pc
2792 = regcache_read_pc (get_current_regcache ());
2793 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2794 }
2795
2796 /* The goto_record_begin method of target record-btrace. */
2797
2798 void
2799 record_btrace_target::goto_record_begin ()
2800 {
2801 struct thread_info *tp;
2802 struct btrace_insn_iterator begin;
2803
2804 tp = require_btrace_thread ();
2805
2806 btrace_insn_begin (&begin, &tp->btrace);
2807
2808 /* Skip gaps at the beginning of the trace. */
2809 while (btrace_insn_get (&begin) == NULL)
2810 {
2811 unsigned int steps;
2812
2813 steps = btrace_insn_next (&begin, 1);
2814 if (steps == 0)
2815 error (_("No trace."));
2816 }
2817
2818 record_btrace_set_replay (tp, &begin);
2819 }
2820
2821 /* The goto_record_end method of target record-btrace. */
2822
2823 void
2824 record_btrace_target::goto_record_end ()
2825 {
2826 struct thread_info *tp;
2827
2828 tp = require_btrace_thread ();
2829
2830 record_btrace_set_replay (tp, NULL);
2831 }
2832
2833 /* The goto_record method of target record-btrace. */
2834
2835 void
2836 record_btrace_target::goto_record (ULONGEST insn)
2837 {
2838 struct thread_info *tp;
2839 struct btrace_insn_iterator it;
2840 unsigned int number;
2841 int found;
2842
2843 number = insn;
2844
2845 /* Check for wrap-arounds. */
2846 if (number != insn)
2847 error (_("Instruction number out of range."));
2848
2849 tp = require_btrace_thread ();
2850
2851 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2852
2853 /* Check if the instruction could not be found or is a gap. */
2854 if (found == 0 || btrace_insn_get (&it) == NULL)
2855 error (_("No such instruction."));
2856
2857 record_btrace_set_replay (tp, &it);
2858 }
2859
2860 /* The record_stop_replaying method of target record-btrace. */
2861
2862 void
2863 record_btrace_target::record_stop_replaying ()
2864 {
2865 for (thread_info *tp : all_non_exited_threads ())
2866 record_btrace_stop_replaying (tp);
2867 }
2868
2869 /* The execution_direction target method. */
2870
2871 enum exec_direction_kind
2872 record_btrace_target::execution_direction ()
2873 {
2874 return record_btrace_resume_exec_dir;
2875 }
2876
2877 /* The prepare_to_generate_core target method. */
2878
2879 void
2880 record_btrace_target::prepare_to_generate_core ()
2881 {
2882 record_btrace_generating_corefile = 1;
2883 }
2884
2885 /* The done_generating_core target method. */
2886
2887 void
2888 record_btrace_target::done_generating_core ()
2889 {
2890 record_btrace_generating_corefile = 0;
2891 }
2892
2893 /* Start recording in BTS format. */
2894
2895 static void
2896 cmd_record_btrace_bts_start (const char *args, int from_tty)
2897 {
2898 if (args != NULL && *args != 0)
2899 error (_("Invalid argument."));
2900
2901 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2902
2903 try
2904 {
2905 execute_command ("target record-btrace", from_tty);
2906 }
2907 catch (const gdb_exception &exception)
2908 {
2909 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2910 throw;
2911 }
2912 }
2913
2914 /* Start recording in Intel Processor Trace format. */
2915
2916 static void
2917 cmd_record_btrace_pt_start (const char *args, int from_tty)
2918 {
2919 if (args != NULL && *args != 0)
2920 error (_("Invalid argument."));
2921
2922 record_btrace_conf.format = BTRACE_FORMAT_PT;
2923
2924 try
2925 {
2926 execute_command ("target record-btrace", from_tty);
2927 }
2928 catch (const gdb_exception &exception)
2929 {
2930 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2931 throw;
2932 }
2933 }
2934
2935 /* Alias for "target record". */
2936
2937 static void
2938 cmd_record_btrace_start (const char *args, int from_tty)
2939 {
2940 if (args != NULL && *args != 0)
2941 error (_("Invalid argument."));
2942
2943 record_btrace_conf.format = BTRACE_FORMAT_PT;
2944
2945 try
2946 {
2947 execute_command ("target record-btrace", from_tty);
2948 }
2949 catch (const gdb_exception &exception)
2950 {
2951 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2952
2953 try
2954 {
2955 execute_command ("target record-btrace", from_tty);
2956 }
2957 catch (const gdb_exception &ex)
2958 {
2959 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2960 throw;
2961 }
2962 }
2963 }
2964
2965 /* The "set record btrace" command. */
2966
2967 static void
2968 cmd_set_record_btrace (const char *args, int from_tty)
2969 {
2970 printf_unfiltered (_("\"set record btrace\" must be followed "
2971 "by an appropriate subcommand.\n"));
2972 help_list (set_record_btrace_cmdlist, "set record btrace ",
2973 all_commands, gdb_stdout);
2974 }
2975
2976 /* The "show record btrace" command. */
2977
2978 static void
2979 cmd_show_record_btrace (const char *args, int from_tty)
2980 {
2981 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2982 }
2983
2984 /* The "show record btrace replay-memory-access" command. */
2985
2986 static void
2987 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2988 struct cmd_list_element *c, const char *value)
2989 {
2990 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2991 replay_memory_access);
2992 }
2993
2994 /* The "set record btrace cpu none" command. */
2995
2996 static void
2997 cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2998 {
2999 if (args != nullptr && *args != 0)
3000 error (_("Trailing junk: '%s'."), args);
3001
3002 record_btrace_cpu_state = CS_NONE;
3003 }
3004
3005 /* The "set record btrace cpu auto" command. */
3006
3007 static void
3008 cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
3009 {
3010 if (args != nullptr && *args != 0)
3011 error (_("Trailing junk: '%s'."), args);
3012
3013 record_btrace_cpu_state = CS_AUTO;
3014 }
3015
3016 /* The "set record btrace cpu" command. */
3017
3018 static void
3019 cmd_set_record_btrace_cpu (const char *args, int from_tty)
3020 {
3021 if (args == nullptr)
3022 args = "";
3023
3024 /* We use a hard-coded vendor string for now. */
3025 unsigned int family, model, stepping;
3026 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3027 &model, &l1, &stepping, &l2);
3028 if (matches == 3)
3029 {
3030 if (strlen (args) != l2)
3031 error (_("Trailing junk: '%s'."), args + l2);
3032 }
3033 else if (matches == 2)
3034 {
3035 if (strlen (args) != l1)
3036 error (_("Trailing junk: '%s'."), args + l1);
3037
3038 stepping = 0;
3039 }
3040 else
3041 error (_("Bad format. See \"help set record btrace cpu\"."));
3042
3043 if (USHRT_MAX < family)
3044 error (_("Cpu family too big."));
3045
3046 if (UCHAR_MAX < model)
3047 error (_("Cpu model too big."));
3048
3049 if (UCHAR_MAX < stepping)
3050 error (_("Cpu stepping too big."));
3051
3052 record_btrace_cpu.vendor = CV_INTEL;
3053 record_btrace_cpu.family = family;
3054 record_btrace_cpu.model = model;
3055 record_btrace_cpu.stepping = stepping;
3056
3057 record_btrace_cpu_state = CS_CPU;
3058 }
3059
3060 /* The "show record btrace cpu" command. */
3061
3062 static void
3063 cmd_show_record_btrace_cpu (const char *args, int from_tty)
3064 {
3065 if (args != nullptr && *args != 0)
3066 error (_("Trailing junk: '%s'."), args);
3067
3068 switch (record_btrace_cpu_state)
3069 {
3070 case CS_AUTO:
3071 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3072 return;
3073
3074 case CS_NONE:
3075 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3076 return;
3077
3078 case CS_CPU:
3079 switch (record_btrace_cpu.vendor)
3080 {
3081 case CV_INTEL:
3082 if (record_btrace_cpu.stepping == 0)
3083 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3084 record_btrace_cpu.family,
3085 record_btrace_cpu.model);
3086 else
3087 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3088 record_btrace_cpu.family,
3089 record_btrace_cpu.model,
3090 record_btrace_cpu.stepping);
3091 return;
3092 }
3093 }
3094
3095 error (_("Internal error: bad cpu state."));
3096 }
3097
3098 /* The "s record btrace bts" command. */
3099
3100 static void
3101 cmd_set_record_btrace_bts (const char *args, int from_tty)
3102 {
3103 printf_unfiltered (_("\"set record btrace bts\" must be followed "
3104 "by an appropriate subcommand.\n"));
3105 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3106 all_commands, gdb_stdout);
3107 }
3108
3109 /* The "show record btrace bts" command. */
3110
3111 static void
3112 cmd_show_record_btrace_bts (const char *args, int from_tty)
3113 {
3114 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3115 }
3116
3117 /* The "set record btrace pt" command. */
3118
3119 static void
3120 cmd_set_record_btrace_pt (const char *args, int from_tty)
3121 {
3122 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3123 "by an appropriate subcommand.\n"));
3124 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3125 all_commands, gdb_stdout);
3126 }
3127
3128 /* The "show record btrace pt" command. */
3129
3130 static void
3131 cmd_show_record_btrace_pt (const char *args, int from_tty)
3132 {
3133 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3134 }
3135
3136 /* The "record bts buffer-size" show value function. */
3137
3138 static void
3139 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3140 struct cmd_list_element *c,
3141 const char *value)
3142 {
3143 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3144 value);
3145 }
3146
3147 /* The "record pt buffer-size" show value function. */
3148
3149 static void
3150 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3151 struct cmd_list_element *c,
3152 const char *value)
3153 {
3154 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3155 value);
3156 }
3157
3158 /* Initialize btrace commands. */
3159
3160 void _initialize_record_btrace ();
3161 void
3162 _initialize_record_btrace ()
3163 {
3164 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3165 _("Start branch trace recording."), &record_btrace_cmdlist,
3166 "record btrace ", 0, &record_cmdlist);
3167 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3168
3169 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3170 _("\
3171 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3172 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3173 This format may not be available on all processors."),
3174 &record_btrace_cmdlist);
3175 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3176
3177 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3178 _("\
3179 Start branch trace recording in Intel Processor Trace format.\n\n\
3180 This format may not be available on all processors."),
3181 &record_btrace_cmdlist);
3182 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3183
3184 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3185 _("Set record options."), &set_record_btrace_cmdlist,
3186 "set record btrace ", 0, &set_record_cmdlist);
3187
3188 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3189 _("Show record options."), &show_record_btrace_cmdlist,
3190 "show record btrace ", 0, &show_record_cmdlist);
3191
3192 add_setshow_enum_cmd ("replay-memory-access", no_class,
3193 replay_memory_access_types, &replay_memory_access, _("\
3194 Set what memory accesses are allowed during replay."), _("\
3195 Show what memory accesses are allowed during replay."),
3196 _("Default is READ-ONLY.\n\n\
3197 The btrace record target does not trace data.\n\
3198 The memory therefore corresponds to the live target and not \
3199 to the current replay position.\n\n\
3200 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3201 When READ-WRITE, allow accesses to read-only and read-write memory during \
3202 replay."),
3203 NULL, cmd_show_replay_memory_access,
3204 &set_record_btrace_cmdlist,
3205 &show_record_btrace_cmdlist);
3206
3207 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3208 _("\
3209 Set the cpu to be used for trace decode.\n\n\
3210 The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3211 For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
3212 When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3213 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3214 When GDB does not support that cpu, this option can be used to enable\n\
3215 workarounds for a similar cpu that GDB supports.\n\n\
3216 When set to \"none\", errata workarounds are disabled."),
3217 &set_record_btrace_cpu_cmdlist,
3218 "set record btrace cpu ", 1,
3219 &set_record_btrace_cmdlist);
3220
3221 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3222 Automatically determine the cpu to be used for trace decode."),
3223 &set_record_btrace_cpu_cmdlist);
3224
3225 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3226 Do not enable errata workarounds for trace decode."),
3227 &set_record_btrace_cpu_cmdlist);
3228
3229 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3230 Show the cpu to be used for trace decode."),
3231 &show_record_btrace_cmdlist);
3232
3233 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3234 _("Set record btrace bts options."),
3235 &set_record_btrace_bts_cmdlist,
3236 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3237
3238 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3239 _("Show record btrace bts options."),
3240 &show_record_btrace_bts_cmdlist,
3241 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3242
3243 add_setshow_uinteger_cmd ("buffer-size", no_class,
3244 &record_btrace_conf.bts.size,
3245 _("Set the record/replay bts buffer size."),
3246 _("Show the record/replay bts buffer size."), _("\
3247 When starting recording request a trace buffer of this size. \
3248 The actual buffer size may differ from the requested size. \
3249 Use \"info record\" to see the actual buffer size.\n\n\
3250 Bigger buffers allow longer recording but also take more time to process \
3251 the recorded execution trace.\n\n\
3252 The trace buffer size may not be changed while recording."), NULL,
3253 show_record_bts_buffer_size_value,
3254 &set_record_btrace_bts_cmdlist,
3255 &show_record_btrace_bts_cmdlist);
3256
3257 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3258 _("Set record btrace pt options."),
3259 &set_record_btrace_pt_cmdlist,
3260 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3261
3262 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3263 _("Show record btrace pt options."),
3264 &show_record_btrace_pt_cmdlist,
3265 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3266
3267 add_setshow_uinteger_cmd ("buffer-size", no_class,
3268 &record_btrace_conf.pt.size,
3269 _("Set the record/replay pt buffer size."),
3270 _("Show the record/replay pt buffer size."), _("\
3271 Bigger buffers allow longer recording but also take more time to process \
3272 the recorded execution.\n\
3273 The actual buffer size may differ from the requested size. Use \"info record\" \
3274 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3275 &set_record_btrace_pt_cmdlist,
3276 &show_record_btrace_pt_cmdlist);
3277
3278 add_target (record_btrace_target_info, record_btrace_target_open);
3279
3280 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3281 xcalloc, xfree);
3282
3283 record_btrace_conf.bts.size = 64 * 1024;
3284 record_btrace_conf.pt.size = 16 * 1024;
3285 }
This page took 0.133392 seconds and 4 git commands to generate.