gdb: Add support for tracking the DWARF line table is-stmt field
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2020 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "event-loop.h"
40 #include "inf-loop.h"
41 #include "inferior.h"
42 #include <algorithm>
43 #include "gdbarch.h"
44 #include "cli/cli-style.h"
45
46 static const target_info record_btrace_target_info = {
47 "record-btrace",
48 N_("Branch tracing target"),
49 N_("Collect control-flow trace and provide the execution history.")
50 };
51
52 /* The target_ops of record-btrace. */
53
54 class record_btrace_target final : public target_ops
55 {
56 public:
57 const target_info &info () const override
58 { return record_btrace_target_info; }
59
60 strata stratum () const override { return record_stratum; }
61
62 void close () override;
63 void async (int) override;
64
65 void detach (inferior *inf, int from_tty) override
66 { record_detach (this, inf, from_tty); }
67
68 void disconnect (const char *, int) override;
69
70 void mourn_inferior () override
71 { record_mourn_inferior (this); }
72
73 void kill () override
74 { record_kill (this); }
75
76 enum record_method record_method (ptid_t ptid) override;
77
78 void stop_recording () override;
79 void info_record () override;
80
81 void insn_history (int size, gdb_disassembly_flags flags) override;
82 void insn_history_from (ULONGEST from, int size,
83 gdb_disassembly_flags flags) override;
84 void insn_history_range (ULONGEST begin, ULONGEST end,
85 gdb_disassembly_flags flags) override;
86 void call_history (int size, record_print_flags flags) override;
87 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
88 override;
89 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
90 override;
91
92 bool record_is_replaying (ptid_t ptid) override;
93 bool record_will_replay (ptid_t ptid, int dir) override;
94 void record_stop_replaying () override;
95
96 enum target_xfer_status xfer_partial (enum target_object object,
97 const char *annex,
98 gdb_byte *readbuf,
99 const gdb_byte *writebuf,
100 ULONGEST offset, ULONGEST len,
101 ULONGEST *xfered_len) override;
102
103 int insert_breakpoint (struct gdbarch *,
104 struct bp_target_info *) override;
105 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
106 enum remove_bp_reason) override;
107
108 void fetch_registers (struct regcache *, int) override;
109
110 void store_registers (struct regcache *, int) override;
111 void prepare_to_store (struct regcache *) override;
112
113 const struct frame_unwind *get_unwinder () override;
114
115 const struct frame_unwind *get_tailcall_unwinder () override;
116
117 void commit_resume () override;
118 void resume (ptid_t, int, enum gdb_signal) override;
119 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
120
121 void stop (ptid_t) override;
122 void update_thread_list () override;
123 bool thread_alive (ptid_t ptid) override;
124 void goto_record_begin () override;
125 void goto_record_end () override;
126 void goto_record (ULONGEST insn) override;
127
128 bool can_execute_reverse () override;
129
130 bool stopped_by_sw_breakpoint () override;
131 bool supports_stopped_by_sw_breakpoint () override;
132
133 bool stopped_by_hw_breakpoint () override;
134 bool supports_stopped_by_hw_breakpoint () override;
135
136 enum exec_direction_kind execution_direction () override;
137 void prepare_to_generate_core () override;
138 void done_generating_core () override;
139 };
140
141 static record_btrace_target record_btrace_ops;
142
143 /* Initialize the record-btrace target ops. */
144
145 /* Token associated with a new-thread observer enabling branch tracing
146 for the new thread. */
147 static const gdb::observers::token record_btrace_thread_observer_token {};
148
149 /* Memory access types used in set/show record btrace replay-memory-access. */
150 static const char replay_memory_access_read_only[] = "read-only";
151 static const char replay_memory_access_read_write[] = "read-write";
152 static const char *const replay_memory_access_types[] =
153 {
154 replay_memory_access_read_only,
155 replay_memory_access_read_write,
156 NULL
157 };
158
159 /* The currently allowed replay memory access type. */
160 static const char *replay_memory_access = replay_memory_access_read_only;
161
162 /* The cpu state kinds. */
163 enum record_btrace_cpu_state_kind
164 {
165 CS_AUTO,
166 CS_NONE,
167 CS_CPU
168 };
169
170 /* The current cpu state. */
171 static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
172
173 /* The current cpu for trace decode. */
174 static struct btrace_cpu record_btrace_cpu;
175
176 /* Command lists for "set/show record btrace". */
177 static struct cmd_list_element *set_record_btrace_cmdlist;
178 static struct cmd_list_element *show_record_btrace_cmdlist;
179
180 /* The execution direction of the last resume we got. See record-full.c. */
181 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
182
183 /* The async event handler for reverse/replay execution. */
184 static struct async_event_handler *record_btrace_async_inferior_event_handler;
185
186 /* A flag indicating that we are currently generating a core file. */
187 static int record_btrace_generating_corefile;
188
189 /* The current branch trace configuration. */
190 static struct btrace_config record_btrace_conf;
191
192 /* Command list for "record btrace". */
193 static struct cmd_list_element *record_btrace_cmdlist;
194
195 /* Command lists for "set/show record btrace bts". */
196 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
197 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
198
199 /* Command lists for "set/show record btrace pt". */
200 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
201 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
202
203 /* Command list for "set record btrace cpu". */
204 static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
205
206 /* Print a record-btrace debug message. Use do ... while (0) to avoid
207 ambiguities when used in if statements. */
208
209 #define DEBUG(msg, args...) \
210 do \
211 { \
212 if (record_debug != 0) \
213 fprintf_unfiltered (gdb_stdlog, \
214 "[record-btrace] " msg "\n", ##args); \
215 } \
216 while (0)
217
218
219 /* Return the cpu configured by the user. Returns NULL if the cpu was
220 configured as auto. */
221 const struct btrace_cpu *
222 record_btrace_get_cpu (void)
223 {
224 switch (record_btrace_cpu_state)
225 {
226 case CS_AUTO:
227 return nullptr;
228
229 case CS_NONE:
230 record_btrace_cpu.vendor = CV_UNKNOWN;
231 /* Fall through. */
232 case CS_CPU:
233 return &record_btrace_cpu;
234 }
235
236 error (_("Internal error: bad record btrace cpu state."));
237 }
238
239 /* Update the branch trace for the current thread and return a pointer to its
240 thread_info.
241
242 Throws an error if there is no thread or no trace. This function never
243 returns NULL. */
244
245 static struct thread_info *
246 require_btrace_thread (void)
247 {
248 DEBUG ("require");
249
250 if (inferior_ptid == null_ptid)
251 error (_("No thread."));
252
253 thread_info *tp = inferior_thread ();
254
255 validate_registers_access ();
256
257 btrace_fetch (tp, record_btrace_get_cpu ());
258
259 if (btrace_is_empty (tp))
260 error (_("No trace."));
261
262 return tp;
263 }
264
265 /* Update the branch trace for the current thread and return a pointer to its
266 branch trace information struct.
267
268 Throws an error if there is no thread or no trace. This function never
269 returns NULL. */
270
271 static struct btrace_thread_info *
272 require_btrace (void)
273 {
274 struct thread_info *tp;
275
276 tp = require_btrace_thread ();
277
278 return &tp->btrace;
279 }
280
281 /* Enable branch tracing for one thread. Warn on errors. */
282
283 static void
284 record_btrace_enable_warn (struct thread_info *tp)
285 {
286 try
287 {
288 btrace_enable (tp, &record_btrace_conf);
289 }
290 catch (const gdb_exception_error &error)
291 {
292 warning ("%s", error.what ());
293 }
294 }
295
296 /* Enable automatic tracing of new threads. */
297
298 static void
299 record_btrace_auto_enable (void)
300 {
301 DEBUG ("attach thread observer");
302
303 gdb::observers::new_thread.attach (record_btrace_enable_warn,
304 record_btrace_thread_observer_token);
305 }
306
307 /* Disable automatic tracing of new threads. */
308
309 static void
310 record_btrace_auto_disable (void)
311 {
312 DEBUG ("detach thread observer");
313
314 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
315 }
316
317 /* The record-btrace async event handler function. */
318
319 static void
320 record_btrace_handle_async_inferior_event (gdb_client_data data)
321 {
322 inferior_event_handler (INF_REG_EVENT, NULL);
323 }
324
325 /* See record-btrace.h. */
326
327 void
328 record_btrace_push_target (void)
329 {
330 const char *format;
331
332 record_btrace_auto_enable ();
333
334 push_target (&record_btrace_ops);
335
336 record_btrace_async_inferior_event_handler
337 = create_async_event_handler (record_btrace_handle_async_inferior_event,
338 NULL);
339 record_btrace_generating_corefile = 0;
340
341 format = btrace_format_short_string (record_btrace_conf.format);
342 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
343 }
344
345 /* Disable btrace on a set of threads on scope exit. */
346
347 struct scoped_btrace_disable
348 {
349 scoped_btrace_disable () = default;
350
351 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
352
353 ~scoped_btrace_disable ()
354 {
355 for (thread_info *tp : m_threads)
356 btrace_disable (tp);
357 }
358
359 void add_thread (thread_info *thread)
360 {
361 m_threads.push_front (thread);
362 }
363
364 void discard ()
365 {
366 m_threads.clear ();
367 }
368
369 private:
370 std::forward_list<thread_info *> m_threads;
371 };
372
373 /* Open target record-btrace. */
374
375 static void
376 record_btrace_target_open (const char *args, int from_tty)
377 {
378 /* If we fail to enable btrace for one thread, disable it for the threads for
379 which it was successfully enabled. */
380 scoped_btrace_disable btrace_disable;
381
382 DEBUG ("open");
383
384 record_preopen ();
385
386 if (!target_has_execution)
387 error (_("The program is not being run."));
388
389 for (thread_info *tp : all_non_exited_threads ())
390 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
391 {
392 btrace_enable (tp, &record_btrace_conf);
393
394 btrace_disable.add_thread (tp);
395 }
396
397 record_btrace_push_target ();
398
399 btrace_disable.discard ();
400 }
401
402 /* The stop_recording method of target record-btrace. */
403
404 void
405 record_btrace_target::stop_recording ()
406 {
407 DEBUG ("stop recording");
408
409 record_btrace_auto_disable ();
410
411 for (thread_info *tp : all_non_exited_threads ())
412 if (tp->btrace.target != NULL)
413 btrace_disable (tp);
414 }
415
416 /* The disconnect method of target record-btrace. */
417
418 void
419 record_btrace_target::disconnect (const char *args,
420 int from_tty)
421 {
422 struct target_ops *beneath = this->beneath ();
423
424 /* Do not stop recording, just clean up GDB side. */
425 unpush_target (this);
426
427 /* Forward disconnect. */
428 beneath->disconnect (args, from_tty);
429 }
430
431 /* The close method of target record-btrace. */
432
433 void
434 record_btrace_target::close ()
435 {
436 if (record_btrace_async_inferior_event_handler != NULL)
437 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
438
439 /* Make sure automatic recording gets disabled even if we did not stop
440 recording before closing the record-btrace target. */
441 record_btrace_auto_disable ();
442
443 /* We should have already stopped recording.
444 Tear down btrace in case we have not. */
445 for (thread_info *tp : all_non_exited_threads ())
446 btrace_teardown (tp);
447 }
448
449 /* The async method of target record-btrace. */
450
451 void
452 record_btrace_target::async (int enable)
453 {
454 if (enable)
455 mark_async_event_handler (record_btrace_async_inferior_event_handler);
456 else
457 clear_async_event_handler (record_btrace_async_inferior_event_handler);
458
459 this->beneath ()->async (enable);
460 }
461
462 /* Adjusts the size and returns a human readable size suffix. */
463
464 static const char *
465 record_btrace_adjust_size (unsigned int *size)
466 {
467 unsigned int sz;
468
469 sz = *size;
470
471 if ((sz & ((1u << 30) - 1)) == 0)
472 {
473 *size = sz >> 30;
474 return "GB";
475 }
476 else if ((sz & ((1u << 20) - 1)) == 0)
477 {
478 *size = sz >> 20;
479 return "MB";
480 }
481 else if ((sz & ((1u << 10) - 1)) == 0)
482 {
483 *size = sz >> 10;
484 return "kB";
485 }
486 else
487 return "";
488 }
489
490 /* Print a BTS configuration. */
491
492 static void
493 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
494 {
495 const char *suffix;
496 unsigned int size;
497
498 size = conf->size;
499 if (size > 0)
500 {
501 suffix = record_btrace_adjust_size (&size);
502 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
503 }
504 }
505
506 /* Print an Intel Processor Trace configuration. */
507
508 static void
509 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
510 {
511 const char *suffix;
512 unsigned int size;
513
514 size = conf->size;
515 if (size > 0)
516 {
517 suffix = record_btrace_adjust_size (&size);
518 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
519 }
520 }
521
522 /* Print a branch tracing configuration. */
523
524 static void
525 record_btrace_print_conf (const struct btrace_config *conf)
526 {
527 printf_unfiltered (_("Recording format: %s.\n"),
528 btrace_format_string (conf->format));
529
530 switch (conf->format)
531 {
532 case BTRACE_FORMAT_NONE:
533 return;
534
535 case BTRACE_FORMAT_BTS:
536 record_btrace_print_bts_conf (&conf->bts);
537 return;
538
539 case BTRACE_FORMAT_PT:
540 record_btrace_print_pt_conf (&conf->pt);
541 return;
542 }
543
544 internal_error (__FILE__, __LINE__, _("Unknown branch trace format."));
545 }
546
547 /* The info_record method of target record-btrace. */
548
549 void
550 record_btrace_target::info_record ()
551 {
552 struct btrace_thread_info *btinfo;
553 const struct btrace_config *conf;
554 struct thread_info *tp;
555 unsigned int insns, calls, gaps;
556
557 DEBUG ("info");
558
559 if (inferior_ptid == null_ptid)
560 error (_("No thread."));
561
562 tp = inferior_thread ();
563
564 validate_registers_access ();
565
566 btinfo = &tp->btrace;
567
568 conf = ::btrace_conf (btinfo);
569 if (conf != NULL)
570 record_btrace_print_conf (conf);
571
572 btrace_fetch (tp, record_btrace_get_cpu ());
573
574 insns = 0;
575 calls = 0;
576 gaps = 0;
577
578 if (!btrace_is_empty (tp))
579 {
580 struct btrace_call_iterator call;
581 struct btrace_insn_iterator insn;
582
583 btrace_call_end (&call, btinfo);
584 btrace_call_prev (&call, 1);
585 calls = btrace_call_number (&call);
586
587 btrace_insn_end (&insn, btinfo);
588 insns = btrace_insn_number (&insn);
589
590 /* If the last instruction is not a gap, it is the current instruction
591 that is not actually part of the record. */
592 if (btrace_insn_get (&insn) != NULL)
593 insns -= 1;
594
595 gaps = btinfo->ngaps;
596 }
597
598 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
599 "for thread %s (%s).\n"), insns, calls, gaps,
600 print_thread_id (tp),
601 target_pid_to_str (tp->ptid).c_str ());
602
603 if (btrace_is_replaying (tp))
604 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
605 btrace_insn_number (btinfo->replay));
606 }
607
608 /* Print a decode error. */
609
610 static void
611 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
612 enum btrace_format format)
613 {
614 const char *errstr = btrace_decode_error (format, errcode);
615
616 uiout->text (_("["));
617 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
618 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
619 {
620 uiout->text (_("decode error ("));
621 uiout->field_signed ("errcode", errcode);
622 uiout->text (_("): "));
623 }
624 uiout->text (errstr);
625 uiout->text (_("]\n"));
626 }
627
628 /* A range of source lines. */
629
630 struct btrace_line_range
631 {
632 /* The symtab this line is from. */
633 struct symtab *symtab;
634
635 /* The first line (inclusive). */
636 int begin;
637
638 /* The last line (exclusive). */
639 int end;
640 };
641
642 /* Construct a line range. */
643
644 static struct btrace_line_range
645 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
646 {
647 struct btrace_line_range range;
648
649 range.symtab = symtab;
650 range.begin = begin;
651 range.end = end;
652
653 return range;
654 }
655
656 /* Add a line to a line range. */
657
658 static struct btrace_line_range
659 btrace_line_range_add (struct btrace_line_range range, int line)
660 {
661 if (range.end <= range.begin)
662 {
663 /* This is the first entry. */
664 range.begin = line;
665 range.end = line + 1;
666 }
667 else if (line < range.begin)
668 range.begin = line;
669 else if (range.end < line)
670 range.end = line;
671
672 return range;
673 }
674
675 /* Return non-zero if RANGE is empty, zero otherwise. */
676
677 static int
678 btrace_line_range_is_empty (struct btrace_line_range range)
679 {
680 return range.end <= range.begin;
681 }
682
683 /* Return non-zero if LHS contains RHS, zero otherwise. */
684
685 static int
686 btrace_line_range_contains_range (struct btrace_line_range lhs,
687 struct btrace_line_range rhs)
688 {
689 return ((lhs.symtab == rhs.symtab)
690 && (lhs.begin <= rhs.begin)
691 && (rhs.end <= lhs.end));
692 }
693
694 /* Find the line range associated with PC. */
695
696 static struct btrace_line_range
697 btrace_find_line_range (CORE_ADDR pc)
698 {
699 struct btrace_line_range range;
700 struct linetable_entry *lines;
701 struct linetable *ltable;
702 struct symtab *symtab;
703 int nlines, i;
704
705 symtab = find_pc_line_symtab (pc);
706 if (symtab == NULL)
707 return btrace_mk_line_range (NULL, 0, 0);
708
709 ltable = SYMTAB_LINETABLE (symtab);
710 if (ltable == NULL)
711 return btrace_mk_line_range (symtab, 0, 0);
712
713 nlines = ltable->nitems;
714 lines = ltable->item;
715 if (nlines <= 0)
716 return btrace_mk_line_range (symtab, 0, 0);
717
718 range = btrace_mk_line_range (symtab, 0, 0);
719 for (i = 0; i < nlines - 1; i++)
720 {
721 /* The test of is_stmt here was added when the is_stmt field was
722 introduced to the 'struct linetable_entry' structure. This
723 ensured that this loop maintained the same behaviour as before we
724 introduced is_stmt. That said, it might be that we would be
725 better off not checking is_stmt here, this would lead to us
726 possibly adding more line numbers to the range. At the time this
727 change was made I was unsure how to test this so chose to go with
728 maintaining the existing experience. */
729 if ((lines[i].pc == pc) && (lines[i].line != 0)
730 && (lines[i].is_stmt == 1))
731 range = btrace_line_range_add (range, lines[i].line);
732 }
733
734 return range;
735 }
736
737 /* Print source lines in LINES to UIOUT.
738
739 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
740 instructions corresponding to that source line. When printing a new source
741 line, we do the cleanups for the open chain and open a new cleanup chain for
742 the new source line. If the source line range in LINES is not empty, this
743 function will leave the cleanup chain for the last printed source line open
744 so instructions can be added to it. */
745
746 static void
747 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
748 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
749 gdb::optional<ui_out_emit_list> *asm_list,
750 gdb_disassembly_flags flags)
751 {
752 print_source_lines_flags psl_flags;
753
754 if (flags & DISASSEMBLY_FILENAME)
755 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
756
757 for (int line = lines.begin; line < lines.end; ++line)
758 {
759 asm_list->reset ();
760
761 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
762
763 print_source_lines (lines.symtab, line, line + 1, psl_flags);
764
765 asm_list->emplace (uiout, "line_asm_insn");
766 }
767 }
768
769 /* Disassemble a section of the recorded instruction trace. */
770
771 static void
772 btrace_insn_history (struct ui_out *uiout,
773 const struct btrace_thread_info *btinfo,
774 const struct btrace_insn_iterator *begin,
775 const struct btrace_insn_iterator *end,
776 gdb_disassembly_flags flags)
777 {
778 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
779 btrace_insn_number (begin), btrace_insn_number (end));
780
781 flags |= DISASSEMBLY_SPECULATIVE;
782
783 struct gdbarch *gdbarch = target_gdbarch ();
784 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
785
786 ui_out_emit_list list_emitter (uiout, "asm_insns");
787
788 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
789 gdb::optional<ui_out_emit_list> asm_list;
790
791 gdb_pretty_print_disassembler disasm (gdbarch, uiout);
792
793 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
794 btrace_insn_next (&it, 1))
795 {
796 const struct btrace_insn *insn;
797
798 insn = btrace_insn_get (&it);
799
800 /* A NULL instruction indicates a gap in the trace. */
801 if (insn == NULL)
802 {
803 const struct btrace_config *conf;
804
805 conf = btrace_conf (btinfo);
806
807 /* We have trace so we must have a configuration. */
808 gdb_assert (conf != NULL);
809
810 uiout->field_fmt ("insn-number", "%u",
811 btrace_insn_number (&it));
812 uiout->text ("\t");
813
814 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
815 conf->format);
816 }
817 else
818 {
819 struct disasm_insn dinsn;
820
821 if ((flags & DISASSEMBLY_SOURCE) != 0)
822 {
823 struct btrace_line_range lines;
824
825 lines = btrace_find_line_range (insn->pc);
826 if (!btrace_line_range_is_empty (lines)
827 && !btrace_line_range_contains_range (last_lines, lines))
828 {
829 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
830 flags);
831 last_lines = lines;
832 }
833 else if (!src_and_asm_tuple.has_value ())
834 {
835 gdb_assert (!asm_list.has_value ());
836
837 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
838
839 /* No source information. */
840 asm_list.emplace (uiout, "line_asm_insn");
841 }
842
843 gdb_assert (src_and_asm_tuple.has_value ());
844 gdb_assert (asm_list.has_value ());
845 }
846
847 memset (&dinsn, 0, sizeof (dinsn));
848 dinsn.number = btrace_insn_number (&it);
849 dinsn.addr = insn->pc;
850
851 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
852 dinsn.is_speculative = 1;
853
854 disasm.pretty_print_insn (&dinsn, flags);
855 }
856 }
857 }
858
859 /* The insn_history method of target record-btrace. */
860
861 void
862 record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
863 {
864 struct btrace_thread_info *btinfo;
865 struct btrace_insn_history *history;
866 struct btrace_insn_iterator begin, end;
867 struct ui_out *uiout;
868 unsigned int context, covered;
869
870 uiout = current_uiout;
871 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
872 context = abs (size);
873 if (context == 0)
874 error (_("Bad record instruction-history-size."));
875
876 btinfo = require_btrace ();
877 history = btinfo->insn_history;
878 if (history == NULL)
879 {
880 struct btrace_insn_iterator *replay;
881
882 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
883
884 /* If we're replaying, we start at the replay position. Otherwise, we
885 start at the tail of the trace. */
886 replay = btinfo->replay;
887 if (replay != NULL)
888 begin = *replay;
889 else
890 btrace_insn_end (&begin, btinfo);
891
892 /* We start from here and expand in the requested direction. Then we
893 expand in the other direction, as well, to fill up any remaining
894 context. */
895 end = begin;
896 if (size < 0)
897 {
898 /* We want the current position covered, as well. */
899 covered = btrace_insn_next (&end, 1);
900 covered += btrace_insn_prev (&begin, context - covered);
901 covered += btrace_insn_next (&end, context - covered);
902 }
903 else
904 {
905 covered = btrace_insn_next (&end, context);
906 covered += btrace_insn_prev (&begin, context - covered);
907 }
908 }
909 else
910 {
911 begin = history->begin;
912 end = history->end;
913
914 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
915 btrace_insn_number (&begin), btrace_insn_number (&end));
916
917 if (size < 0)
918 {
919 end = begin;
920 covered = btrace_insn_prev (&begin, context);
921 }
922 else
923 {
924 begin = end;
925 covered = btrace_insn_next (&end, context);
926 }
927 }
928
929 if (covered > 0)
930 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
931 else
932 {
933 if (size < 0)
934 printf_unfiltered (_("At the start of the branch trace record.\n"));
935 else
936 printf_unfiltered (_("At the end of the branch trace record.\n"));
937 }
938
939 btrace_set_insn_history (btinfo, &begin, &end);
940 }
941
942 /* The insn_history_range method of target record-btrace. */
943
944 void
945 record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
946 gdb_disassembly_flags flags)
947 {
948 struct btrace_thread_info *btinfo;
949 struct btrace_insn_iterator begin, end;
950 struct ui_out *uiout;
951 unsigned int low, high;
952 int found;
953
954 uiout = current_uiout;
955 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
956 low = from;
957 high = to;
958
959 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
960
961 /* Check for wrap-arounds. */
962 if (low != from || high != to)
963 error (_("Bad range."));
964
965 if (high < low)
966 error (_("Bad range."));
967
968 btinfo = require_btrace ();
969
970 found = btrace_find_insn_by_number (&begin, btinfo, low);
971 if (found == 0)
972 error (_("Range out of bounds."));
973
974 found = btrace_find_insn_by_number (&end, btinfo, high);
975 if (found == 0)
976 {
977 /* Silently truncate the range. */
978 btrace_insn_end (&end, btinfo);
979 }
980 else
981 {
982 /* We want both begin and end to be inclusive. */
983 btrace_insn_next (&end, 1);
984 }
985
986 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
987 btrace_set_insn_history (btinfo, &begin, &end);
988 }
989
990 /* The insn_history_from method of target record-btrace. */
991
992 void
993 record_btrace_target::insn_history_from (ULONGEST from, int size,
994 gdb_disassembly_flags flags)
995 {
996 ULONGEST begin, end, context;
997
998 context = abs (size);
999 if (context == 0)
1000 error (_("Bad record instruction-history-size."));
1001
1002 if (size < 0)
1003 {
1004 end = from;
1005
1006 if (from < context)
1007 begin = 0;
1008 else
1009 begin = from - context + 1;
1010 }
1011 else
1012 {
1013 begin = from;
1014 end = from + context - 1;
1015
1016 /* Check for wrap-around. */
1017 if (end < begin)
1018 end = ULONGEST_MAX;
1019 }
1020
1021 insn_history_range (begin, end, flags);
1022 }
1023
1024 /* Print the instruction number range for a function call history line. */
1025
1026 static void
1027 btrace_call_history_insn_range (struct ui_out *uiout,
1028 const struct btrace_function *bfun)
1029 {
1030 unsigned int begin, end, size;
1031
1032 size = bfun->insn.size ();
1033 gdb_assert (size > 0);
1034
1035 begin = bfun->insn_offset;
1036 end = begin + size - 1;
1037
1038 uiout->field_unsigned ("insn begin", begin);
1039 uiout->text (",");
1040 uiout->field_unsigned ("insn end", end);
1041 }
1042
1043 /* Compute the lowest and highest source line for the instructions in BFUN
1044 and return them in PBEGIN and PEND.
1045 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1046 result from inlining or macro expansion. */
1047
1048 static void
1049 btrace_compute_src_line_range (const struct btrace_function *bfun,
1050 int *pbegin, int *pend)
1051 {
1052 struct symtab *symtab;
1053 struct symbol *sym;
1054 int begin, end;
1055
1056 begin = INT_MAX;
1057 end = INT_MIN;
1058
1059 sym = bfun->sym;
1060 if (sym == NULL)
1061 goto out;
1062
1063 symtab = symbol_symtab (sym);
1064
1065 for (const btrace_insn &insn : bfun->insn)
1066 {
1067 struct symtab_and_line sal;
1068
1069 sal = find_pc_line (insn.pc, 0);
1070 if (sal.symtab != symtab || sal.line == 0)
1071 continue;
1072
1073 begin = std::min (begin, sal.line);
1074 end = std::max (end, sal.line);
1075 }
1076
1077 out:
1078 *pbegin = begin;
1079 *pend = end;
1080 }
1081
1082 /* Print the source line information for a function call history line. */
1083
1084 static void
1085 btrace_call_history_src_line (struct ui_out *uiout,
1086 const struct btrace_function *bfun)
1087 {
1088 struct symbol *sym;
1089 int begin, end;
1090
1091 sym = bfun->sym;
1092 if (sym == NULL)
1093 return;
1094
1095 uiout->field_string ("file",
1096 symtab_to_filename_for_display (symbol_symtab (sym)),
1097 file_name_style.style ());
1098
1099 btrace_compute_src_line_range (bfun, &begin, &end);
1100 if (end < begin)
1101 return;
1102
1103 uiout->text (":");
1104 uiout->field_signed ("min line", begin);
1105
1106 if (end == begin)
1107 return;
1108
1109 uiout->text (",");
1110 uiout->field_signed ("max line", end);
1111 }
1112
1113 /* Get the name of a branch trace function. */
1114
1115 static const char *
1116 btrace_get_bfun_name (const struct btrace_function *bfun)
1117 {
1118 struct minimal_symbol *msym;
1119 struct symbol *sym;
1120
1121 if (bfun == NULL)
1122 return "??";
1123
1124 msym = bfun->msym;
1125 sym = bfun->sym;
1126
1127 if (sym != NULL)
1128 return sym->print_name ();
1129 else if (msym != NULL)
1130 return msym->print_name ();
1131 else
1132 return "??";
1133 }
1134
1135 /* Disassemble a section of the recorded function trace. */
1136
1137 static void
1138 btrace_call_history (struct ui_out *uiout,
1139 const struct btrace_thread_info *btinfo,
1140 const struct btrace_call_iterator *begin,
1141 const struct btrace_call_iterator *end,
1142 int int_flags)
1143 {
1144 struct btrace_call_iterator it;
1145 record_print_flags flags = (enum record_print_flag) int_flags;
1146
1147 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1148 btrace_call_number (end));
1149
1150 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1151 {
1152 const struct btrace_function *bfun;
1153 struct minimal_symbol *msym;
1154 struct symbol *sym;
1155
1156 bfun = btrace_call_get (&it);
1157 sym = bfun->sym;
1158 msym = bfun->msym;
1159
1160 /* Print the function index. */
1161 uiout->field_unsigned ("index", bfun->number);
1162 uiout->text ("\t");
1163
1164 /* Indicate gaps in the trace. */
1165 if (bfun->errcode != 0)
1166 {
1167 const struct btrace_config *conf;
1168
1169 conf = btrace_conf (btinfo);
1170
1171 /* We have trace so we must have a configuration. */
1172 gdb_assert (conf != NULL);
1173
1174 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1175
1176 continue;
1177 }
1178
1179 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1180 {
1181 int level = bfun->level + btinfo->level, i;
1182
1183 for (i = 0; i < level; ++i)
1184 uiout->text (" ");
1185 }
1186
1187 if (sym != NULL)
1188 uiout->field_string ("function", sym->print_name (),
1189 function_name_style.style ());
1190 else if (msym != NULL)
1191 uiout->field_string ("function", msym->print_name (),
1192 function_name_style.style ());
1193 else if (!uiout->is_mi_like_p ())
1194 uiout->field_string ("function", "??",
1195 function_name_style.style ());
1196
1197 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1198 {
1199 uiout->text (_("\tinst "));
1200 btrace_call_history_insn_range (uiout, bfun);
1201 }
1202
1203 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1204 {
1205 uiout->text (_("\tat "));
1206 btrace_call_history_src_line (uiout, bfun);
1207 }
1208
1209 uiout->text ("\n");
1210 }
1211 }
1212
1213 /* The call_history method of target record-btrace. */
1214
1215 void
1216 record_btrace_target::call_history (int size, record_print_flags flags)
1217 {
1218 struct btrace_thread_info *btinfo;
1219 struct btrace_call_history *history;
1220 struct btrace_call_iterator begin, end;
1221 struct ui_out *uiout;
1222 unsigned int context, covered;
1223
1224 uiout = current_uiout;
1225 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1226 context = abs (size);
1227 if (context == 0)
1228 error (_("Bad record function-call-history-size."));
1229
1230 btinfo = require_btrace ();
1231 history = btinfo->call_history;
1232 if (history == NULL)
1233 {
1234 struct btrace_insn_iterator *replay;
1235
1236 DEBUG ("call-history (0x%x): %d", (int) flags, size);
1237
1238 /* If we're replaying, we start at the replay position. Otherwise, we
1239 start at the tail of the trace. */
1240 replay = btinfo->replay;
1241 if (replay != NULL)
1242 {
1243 begin.btinfo = btinfo;
1244 begin.index = replay->call_index;
1245 }
1246 else
1247 btrace_call_end (&begin, btinfo);
1248
1249 /* We start from here and expand in the requested direction. Then we
1250 expand in the other direction, as well, to fill up any remaining
1251 context. */
1252 end = begin;
1253 if (size < 0)
1254 {
1255 /* We want the current position covered, as well. */
1256 covered = btrace_call_next (&end, 1);
1257 covered += btrace_call_prev (&begin, context - covered);
1258 covered += btrace_call_next (&end, context - covered);
1259 }
1260 else
1261 {
1262 covered = btrace_call_next (&end, context);
1263 covered += btrace_call_prev (&begin, context- covered);
1264 }
1265 }
1266 else
1267 {
1268 begin = history->begin;
1269 end = history->end;
1270
1271 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1272 btrace_call_number (&begin), btrace_call_number (&end));
1273
1274 if (size < 0)
1275 {
1276 end = begin;
1277 covered = btrace_call_prev (&begin, context);
1278 }
1279 else
1280 {
1281 begin = end;
1282 covered = btrace_call_next (&end, context);
1283 }
1284 }
1285
1286 if (covered > 0)
1287 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1288 else
1289 {
1290 if (size < 0)
1291 printf_unfiltered (_("At the start of the branch trace record.\n"));
1292 else
1293 printf_unfiltered (_("At the end of the branch trace record.\n"));
1294 }
1295
1296 btrace_set_call_history (btinfo, &begin, &end);
1297 }
1298
1299 /* The call_history_range method of target record-btrace. */
1300
1301 void
1302 record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1303 record_print_flags flags)
1304 {
1305 struct btrace_thread_info *btinfo;
1306 struct btrace_call_iterator begin, end;
1307 struct ui_out *uiout;
1308 unsigned int low, high;
1309 int found;
1310
1311 uiout = current_uiout;
1312 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1313 low = from;
1314 high = to;
1315
1316 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1317
1318 /* Check for wrap-arounds. */
1319 if (low != from || high != to)
1320 error (_("Bad range."));
1321
1322 if (high < low)
1323 error (_("Bad range."));
1324
1325 btinfo = require_btrace ();
1326
1327 found = btrace_find_call_by_number (&begin, btinfo, low);
1328 if (found == 0)
1329 error (_("Range out of bounds."));
1330
1331 found = btrace_find_call_by_number (&end, btinfo, high);
1332 if (found == 0)
1333 {
1334 /* Silently truncate the range. */
1335 btrace_call_end (&end, btinfo);
1336 }
1337 else
1338 {
1339 /* We want both begin and end to be inclusive. */
1340 btrace_call_next (&end, 1);
1341 }
1342
1343 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1344 btrace_set_call_history (btinfo, &begin, &end);
1345 }
1346
1347 /* The call_history_from method of target record-btrace. */
1348
1349 void
1350 record_btrace_target::call_history_from (ULONGEST from, int size,
1351 record_print_flags flags)
1352 {
1353 ULONGEST begin, end, context;
1354
1355 context = abs (size);
1356 if (context == 0)
1357 error (_("Bad record function-call-history-size."));
1358
1359 if (size < 0)
1360 {
1361 end = from;
1362
1363 if (from < context)
1364 begin = 0;
1365 else
1366 begin = from - context + 1;
1367 }
1368 else
1369 {
1370 begin = from;
1371 end = from + context - 1;
1372
1373 /* Check for wrap-around. */
1374 if (end < begin)
1375 end = ULONGEST_MAX;
1376 }
1377
1378 call_history_range ( begin, end, flags);
1379 }
1380
1381 /* The record_method method of target record-btrace. */
1382
1383 enum record_method
1384 record_btrace_target::record_method (ptid_t ptid)
1385 {
1386 process_stratum_target *proc_target = current_inferior ()->process_target ();
1387 thread_info *const tp = find_thread_ptid (proc_target, ptid);
1388
1389 if (tp == NULL)
1390 error (_("No thread."));
1391
1392 if (tp->btrace.target == NULL)
1393 return RECORD_METHOD_NONE;
1394
1395 return RECORD_METHOD_BTRACE;
1396 }
1397
1398 /* The record_is_replaying method of target record-btrace. */
1399
1400 bool
1401 record_btrace_target::record_is_replaying (ptid_t ptid)
1402 {
1403 process_stratum_target *proc_target = current_inferior ()->process_target ();
1404 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
1405 if (btrace_is_replaying (tp))
1406 return true;
1407
1408 return false;
1409 }
1410
1411 /* The record_will_replay method of target record-btrace. */
1412
1413 bool
1414 record_btrace_target::record_will_replay (ptid_t ptid, int dir)
1415 {
1416 return dir == EXEC_REVERSE || record_is_replaying (ptid);
1417 }
1418
1419 /* The xfer_partial method of target record-btrace. */
1420
1421 enum target_xfer_status
1422 record_btrace_target::xfer_partial (enum target_object object,
1423 const char *annex, gdb_byte *readbuf,
1424 const gdb_byte *writebuf, ULONGEST offset,
1425 ULONGEST len, ULONGEST *xfered_len)
1426 {
1427 /* Filter out requests that don't make sense during replay. */
1428 if (replay_memory_access == replay_memory_access_read_only
1429 && !record_btrace_generating_corefile
1430 && record_is_replaying (inferior_ptid))
1431 {
1432 switch (object)
1433 {
1434 case TARGET_OBJECT_MEMORY:
1435 {
1436 struct target_section *section;
1437
1438 /* We do not allow writing memory in general. */
1439 if (writebuf != NULL)
1440 {
1441 *xfered_len = len;
1442 return TARGET_XFER_UNAVAILABLE;
1443 }
1444
1445 /* We allow reading readonly memory. */
1446 section = target_section_by_addr (this, offset);
1447 if (section != NULL)
1448 {
1449 /* Check if the section we found is readonly. */
1450 if ((bfd_section_flags (section->the_bfd_section)
1451 & SEC_READONLY) != 0)
1452 {
1453 /* Truncate the request to fit into this section. */
1454 len = std::min (len, section->endaddr - offset);
1455 break;
1456 }
1457 }
1458
1459 *xfered_len = len;
1460 return TARGET_XFER_UNAVAILABLE;
1461 }
1462 }
1463 }
1464
1465 /* Forward the request. */
1466 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1467 offset, len, xfered_len);
1468 }
1469
1470 /* The insert_breakpoint method of target record-btrace. */
1471
1472 int
1473 record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1474 struct bp_target_info *bp_tgt)
1475 {
1476 const char *old;
1477 int ret;
1478
1479 /* Inserting breakpoints requires accessing memory. Allow it for the
1480 duration of this function. */
1481 old = replay_memory_access;
1482 replay_memory_access = replay_memory_access_read_write;
1483
1484 ret = 0;
1485 try
1486 {
1487 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
1488 }
1489 catch (const gdb_exception &except)
1490 {
1491 replay_memory_access = old;
1492 throw;
1493 }
1494 replay_memory_access = old;
1495
1496 return ret;
1497 }
1498
1499 /* The remove_breakpoint method of target record-btrace. */
1500
1501 int
1502 record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1503 struct bp_target_info *bp_tgt,
1504 enum remove_bp_reason reason)
1505 {
1506 const char *old;
1507 int ret;
1508
1509 /* Removing breakpoints requires accessing memory. Allow it for the
1510 duration of this function. */
1511 old = replay_memory_access;
1512 replay_memory_access = replay_memory_access_read_write;
1513
1514 ret = 0;
1515 try
1516 {
1517 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
1518 }
1519 catch (const gdb_exception &except)
1520 {
1521 replay_memory_access = old;
1522 throw;
1523 }
1524 replay_memory_access = old;
1525
1526 return ret;
1527 }
1528
1529 /* The fetch_registers method of target record-btrace. */
1530
1531 void
1532 record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1533 {
1534 thread_info *tp = find_thread_ptid (regcache->target (), regcache->ptid ());
1535 gdb_assert (tp != NULL);
1536
1537 btrace_insn_iterator *replay = tp->btrace.replay;
1538 if (replay != NULL && !record_btrace_generating_corefile)
1539 {
1540 const struct btrace_insn *insn;
1541 struct gdbarch *gdbarch;
1542 int pcreg;
1543
1544 gdbarch = regcache->arch ();
1545 pcreg = gdbarch_pc_regnum (gdbarch);
1546 if (pcreg < 0)
1547 return;
1548
1549 /* We can only provide the PC register. */
1550 if (regno >= 0 && regno != pcreg)
1551 return;
1552
1553 insn = btrace_insn_get (replay);
1554 gdb_assert (insn != NULL);
1555
1556 regcache->raw_supply (regno, &insn->pc);
1557 }
1558 else
1559 this->beneath ()->fetch_registers (regcache, regno);
1560 }
1561
1562 /* The store_registers method of target record-btrace. */
1563
1564 void
1565 record_btrace_target::store_registers (struct regcache *regcache, int regno)
1566 {
1567 if (!record_btrace_generating_corefile
1568 && record_is_replaying (regcache->ptid ()))
1569 error (_("Cannot write registers while replaying."));
1570
1571 gdb_assert (may_write_registers);
1572
1573 this->beneath ()->store_registers (regcache, regno);
1574 }
1575
1576 /* The prepare_to_store method of target record-btrace. */
1577
1578 void
1579 record_btrace_target::prepare_to_store (struct regcache *regcache)
1580 {
1581 if (!record_btrace_generating_corefile
1582 && record_is_replaying (regcache->ptid ()))
1583 return;
1584
1585 this->beneath ()->prepare_to_store (regcache);
1586 }
1587
1588 /* The branch trace frame cache. */
1589
1590 struct btrace_frame_cache
1591 {
1592 /* The thread. */
1593 struct thread_info *tp;
1594
1595 /* The frame info. */
1596 struct frame_info *frame;
1597
1598 /* The branch trace function segment. */
1599 const struct btrace_function *bfun;
1600 };
1601
1602 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1603
1604 static htab_t bfcache;
1605
1606 /* hash_f for htab_create_alloc of bfcache. */
1607
1608 static hashval_t
1609 bfcache_hash (const void *arg)
1610 {
1611 const struct btrace_frame_cache *cache
1612 = (const struct btrace_frame_cache *) arg;
1613
1614 return htab_hash_pointer (cache->frame);
1615 }
1616
1617 /* eq_f for htab_create_alloc of bfcache. */
1618
1619 static int
1620 bfcache_eq (const void *arg1, const void *arg2)
1621 {
1622 const struct btrace_frame_cache *cache1
1623 = (const struct btrace_frame_cache *) arg1;
1624 const struct btrace_frame_cache *cache2
1625 = (const struct btrace_frame_cache *) arg2;
1626
1627 return cache1->frame == cache2->frame;
1628 }
1629
1630 /* Create a new btrace frame cache. */
1631
1632 static struct btrace_frame_cache *
1633 bfcache_new (struct frame_info *frame)
1634 {
1635 struct btrace_frame_cache *cache;
1636 void **slot;
1637
1638 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1639 cache->frame = frame;
1640
1641 slot = htab_find_slot (bfcache, cache, INSERT);
1642 gdb_assert (*slot == NULL);
1643 *slot = cache;
1644
1645 return cache;
1646 }
1647
1648 /* Extract the branch trace function from a branch trace frame. */
1649
1650 static const struct btrace_function *
1651 btrace_get_frame_function (struct frame_info *frame)
1652 {
1653 const struct btrace_frame_cache *cache;
1654 struct btrace_frame_cache pattern;
1655 void **slot;
1656
1657 pattern.frame = frame;
1658
1659 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1660 if (slot == NULL)
1661 return NULL;
1662
1663 cache = (const struct btrace_frame_cache *) *slot;
1664 return cache->bfun;
1665 }
1666
1667 /* Implement stop_reason method for record_btrace_frame_unwind. */
1668
1669 static enum unwind_stop_reason
1670 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1671 void **this_cache)
1672 {
1673 const struct btrace_frame_cache *cache;
1674 const struct btrace_function *bfun;
1675
1676 cache = (const struct btrace_frame_cache *) *this_cache;
1677 bfun = cache->bfun;
1678 gdb_assert (bfun != NULL);
1679
1680 if (bfun->up == 0)
1681 return UNWIND_UNAVAILABLE;
1682
1683 return UNWIND_NO_REASON;
1684 }
1685
1686 /* Implement this_id method for record_btrace_frame_unwind. */
1687
1688 static void
1689 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1690 struct frame_id *this_id)
1691 {
1692 const struct btrace_frame_cache *cache;
1693 const struct btrace_function *bfun;
1694 struct btrace_call_iterator it;
1695 CORE_ADDR code, special;
1696
1697 cache = (const struct btrace_frame_cache *) *this_cache;
1698
1699 bfun = cache->bfun;
1700 gdb_assert (bfun != NULL);
1701
1702 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1703 bfun = btrace_call_get (&it);
1704
1705 code = get_frame_func (this_frame);
1706 special = bfun->number;
1707
1708 *this_id = frame_id_build_unavailable_stack_special (code, special);
1709
1710 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1711 btrace_get_bfun_name (cache->bfun),
1712 core_addr_to_string_nz (this_id->code_addr),
1713 core_addr_to_string_nz (this_id->special_addr));
1714 }
1715
1716 /* Implement prev_register method for record_btrace_frame_unwind. */
1717
1718 static struct value *
1719 record_btrace_frame_prev_register (struct frame_info *this_frame,
1720 void **this_cache,
1721 int regnum)
1722 {
1723 const struct btrace_frame_cache *cache;
1724 const struct btrace_function *bfun, *caller;
1725 struct btrace_call_iterator it;
1726 struct gdbarch *gdbarch;
1727 CORE_ADDR pc;
1728 int pcreg;
1729
1730 gdbarch = get_frame_arch (this_frame);
1731 pcreg = gdbarch_pc_regnum (gdbarch);
1732 if (pcreg < 0 || regnum != pcreg)
1733 throw_error (NOT_AVAILABLE_ERROR,
1734 _("Registers are not available in btrace record history"));
1735
1736 cache = (const struct btrace_frame_cache *) *this_cache;
1737 bfun = cache->bfun;
1738 gdb_assert (bfun != NULL);
1739
1740 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1741 throw_error (NOT_AVAILABLE_ERROR,
1742 _("No caller in btrace record history"));
1743
1744 caller = btrace_call_get (&it);
1745
1746 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1747 pc = caller->insn.front ().pc;
1748 else
1749 {
1750 pc = caller->insn.back ().pc;
1751 pc += gdb_insn_length (gdbarch, pc);
1752 }
1753
1754 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1755 btrace_get_bfun_name (bfun), bfun->level,
1756 core_addr_to_string_nz (pc));
1757
1758 return frame_unwind_got_address (this_frame, regnum, pc);
1759 }
1760
1761 /* Implement sniffer method for record_btrace_frame_unwind. */
1762
1763 static int
1764 record_btrace_frame_sniffer (const struct frame_unwind *self,
1765 struct frame_info *this_frame,
1766 void **this_cache)
1767 {
1768 const struct btrace_function *bfun;
1769 struct btrace_frame_cache *cache;
1770 struct thread_info *tp;
1771 struct frame_info *next;
1772
1773 /* THIS_FRAME does not contain a reference to its thread. */
1774 tp = inferior_thread ();
1775
1776 bfun = NULL;
1777 next = get_next_frame (this_frame);
1778 if (next == NULL)
1779 {
1780 const struct btrace_insn_iterator *replay;
1781
1782 replay = tp->btrace.replay;
1783 if (replay != NULL)
1784 bfun = &replay->btinfo->functions[replay->call_index];
1785 }
1786 else
1787 {
1788 const struct btrace_function *callee;
1789 struct btrace_call_iterator it;
1790
1791 callee = btrace_get_frame_function (next);
1792 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1793 return 0;
1794
1795 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1796 return 0;
1797
1798 bfun = btrace_call_get (&it);
1799 }
1800
1801 if (bfun == NULL)
1802 return 0;
1803
1804 DEBUG ("[frame] sniffed frame for %s on level %d",
1805 btrace_get_bfun_name (bfun), bfun->level);
1806
1807 /* This is our frame. Initialize the frame cache. */
1808 cache = bfcache_new (this_frame);
1809 cache->tp = tp;
1810 cache->bfun = bfun;
1811
1812 *this_cache = cache;
1813 return 1;
1814 }
1815
1816 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1817
1818 static int
1819 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1820 struct frame_info *this_frame,
1821 void **this_cache)
1822 {
1823 const struct btrace_function *bfun, *callee;
1824 struct btrace_frame_cache *cache;
1825 struct btrace_call_iterator it;
1826 struct frame_info *next;
1827 struct thread_info *tinfo;
1828
1829 next = get_next_frame (this_frame);
1830 if (next == NULL)
1831 return 0;
1832
1833 callee = btrace_get_frame_function (next);
1834 if (callee == NULL)
1835 return 0;
1836
1837 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1838 return 0;
1839
1840 tinfo = inferior_thread ();
1841 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1842 return 0;
1843
1844 bfun = btrace_call_get (&it);
1845
1846 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1847 btrace_get_bfun_name (bfun), bfun->level);
1848
1849 /* This is our frame. Initialize the frame cache. */
1850 cache = bfcache_new (this_frame);
1851 cache->tp = tinfo;
1852 cache->bfun = bfun;
1853
1854 *this_cache = cache;
1855 return 1;
1856 }
1857
1858 static void
1859 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1860 {
1861 struct btrace_frame_cache *cache;
1862 void **slot;
1863
1864 cache = (struct btrace_frame_cache *) this_cache;
1865
1866 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1867 gdb_assert (slot != NULL);
1868
1869 htab_remove_elt (bfcache, cache);
1870 }
1871
1872 /* btrace recording does not store previous memory content, neither the stack
1873 frames content. Any unwinding would return erroneous results as the stack
1874 contents no longer matches the changed PC value restored from history.
1875 Therefore this unwinder reports any possibly unwound registers as
1876 <unavailable>. */
1877
1878 const struct frame_unwind record_btrace_frame_unwind =
1879 {
1880 NORMAL_FRAME,
1881 record_btrace_frame_unwind_stop_reason,
1882 record_btrace_frame_this_id,
1883 record_btrace_frame_prev_register,
1884 NULL,
1885 record_btrace_frame_sniffer,
1886 record_btrace_frame_dealloc_cache
1887 };
1888
1889 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1890 {
1891 TAILCALL_FRAME,
1892 record_btrace_frame_unwind_stop_reason,
1893 record_btrace_frame_this_id,
1894 record_btrace_frame_prev_register,
1895 NULL,
1896 record_btrace_tailcall_frame_sniffer,
1897 record_btrace_frame_dealloc_cache
1898 };
1899
1900 /* Implement the get_unwinder method. */
1901
1902 const struct frame_unwind *
1903 record_btrace_target::get_unwinder ()
1904 {
1905 return &record_btrace_frame_unwind;
1906 }
1907
1908 /* Implement the get_tailcall_unwinder method. */
1909
1910 const struct frame_unwind *
1911 record_btrace_target::get_tailcall_unwinder ()
1912 {
1913 return &record_btrace_tailcall_frame_unwind;
1914 }
1915
1916 /* Return a human-readable string for FLAG. */
1917
1918 static const char *
1919 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1920 {
1921 switch (flag)
1922 {
1923 case BTHR_STEP:
1924 return "step";
1925
1926 case BTHR_RSTEP:
1927 return "reverse-step";
1928
1929 case BTHR_CONT:
1930 return "cont";
1931
1932 case BTHR_RCONT:
1933 return "reverse-cont";
1934
1935 case BTHR_STOP:
1936 return "stop";
1937 }
1938
1939 return "<invalid>";
1940 }
1941
1942 /* Indicate that TP should be resumed according to FLAG. */
1943
1944 static void
1945 record_btrace_resume_thread (struct thread_info *tp,
1946 enum btrace_thread_flag flag)
1947 {
1948 struct btrace_thread_info *btinfo;
1949
1950 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1951 target_pid_to_str (tp->ptid).c_str (), flag,
1952 btrace_thread_flag_to_str (flag));
1953
1954 btinfo = &tp->btrace;
1955
1956 /* Fetch the latest branch trace. */
1957 btrace_fetch (tp, record_btrace_get_cpu ());
1958
1959 /* A resume request overwrites a preceding resume or stop request. */
1960 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1961 btinfo->flags |= flag;
1962 }
1963
1964 /* Get the current frame for TP. */
1965
1966 static struct frame_id
1967 get_thread_current_frame_id (struct thread_info *tp)
1968 {
1969 struct frame_id id;
1970 bool executing;
1971
1972 /* Set current thread, which is implicitly used by
1973 get_current_frame. */
1974 scoped_restore_current_thread restore_thread;
1975
1976 switch_to_thread (tp);
1977
1978 process_stratum_target *proc_target = tp->inf->process_target ();
1979
1980 /* Clear the executing flag to allow changes to the current frame.
1981 We are not actually running, yet. We just started a reverse execution
1982 command or a record goto command.
1983 For the latter, EXECUTING is false and this has no effect.
1984 For the former, EXECUTING is true and we're in wait, about to
1985 move the thread. Since we need to recompute the stack, we temporarily
1986 set EXECUTING to false. */
1987 executing = tp->executing;
1988 set_executing (proc_target, inferior_ptid, false);
1989
1990 id = null_frame_id;
1991 try
1992 {
1993 id = get_frame_id (get_current_frame ());
1994 }
1995 catch (const gdb_exception &except)
1996 {
1997 /* Restore the previous execution state. */
1998 set_executing (proc_target, inferior_ptid, executing);
1999
2000 throw;
2001 }
2002
2003 /* Restore the previous execution state. */
2004 set_executing (proc_target, inferior_ptid, executing);
2005
2006 return id;
2007 }
2008
2009 /* Start replaying a thread. */
2010
2011 static struct btrace_insn_iterator *
2012 record_btrace_start_replaying (struct thread_info *tp)
2013 {
2014 struct btrace_insn_iterator *replay;
2015 struct btrace_thread_info *btinfo;
2016
2017 btinfo = &tp->btrace;
2018 replay = NULL;
2019
2020 /* We can't start replaying without trace. */
2021 if (btinfo->functions.empty ())
2022 return NULL;
2023
2024 /* GDB stores the current frame_id when stepping in order to detects steps
2025 into subroutines.
2026 Since frames are computed differently when we're replaying, we need to
2027 recompute those stored frames and fix them up so we can still detect
2028 subroutines after we started replaying. */
2029 try
2030 {
2031 struct frame_id frame_id;
2032 int upd_step_frame_id, upd_step_stack_frame_id;
2033
2034 /* The current frame without replaying - computed via normal unwind. */
2035 frame_id = get_thread_current_frame_id (tp);
2036
2037 /* Check if we need to update any stepping-related frame id's. */
2038 upd_step_frame_id = frame_id_eq (frame_id,
2039 tp->control.step_frame_id);
2040 upd_step_stack_frame_id = frame_id_eq (frame_id,
2041 tp->control.step_stack_frame_id);
2042
2043 /* We start replaying at the end of the branch trace. This corresponds
2044 to the current instruction. */
2045 replay = XNEW (struct btrace_insn_iterator);
2046 btrace_insn_end (replay, btinfo);
2047
2048 /* Skip gaps at the end of the trace. */
2049 while (btrace_insn_get (replay) == NULL)
2050 {
2051 unsigned int steps;
2052
2053 steps = btrace_insn_prev (replay, 1);
2054 if (steps == 0)
2055 error (_("No trace."));
2056 }
2057
2058 /* We're not replaying, yet. */
2059 gdb_assert (btinfo->replay == NULL);
2060 btinfo->replay = replay;
2061
2062 /* Make sure we're not using any stale registers. */
2063 registers_changed_thread (tp);
2064
2065 /* The current frame with replaying - computed via btrace unwind. */
2066 frame_id = get_thread_current_frame_id (tp);
2067
2068 /* Replace stepping related frames where necessary. */
2069 if (upd_step_frame_id)
2070 tp->control.step_frame_id = frame_id;
2071 if (upd_step_stack_frame_id)
2072 tp->control.step_stack_frame_id = frame_id;
2073 }
2074 catch (const gdb_exception &except)
2075 {
2076 xfree (btinfo->replay);
2077 btinfo->replay = NULL;
2078
2079 registers_changed_thread (tp);
2080
2081 throw;
2082 }
2083
2084 return replay;
2085 }
2086
2087 /* Stop replaying a thread. */
2088
2089 static void
2090 record_btrace_stop_replaying (struct thread_info *tp)
2091 {
2092 struct btrace_thread_info *btinfo;
2093
2094 btinfo = &tp->btrace;
2095
2096 xfree (btinfo->replay);
2097 btinfo->replay = NULL;
2098
2099 /* Make sure we're not leaving any stale registers. */
2100 registers_changed_thread (tp);
2101 }
2102
2103 /* Stop replaying TP if it is at the end of its execution history. */
2104
2105 static void
2106 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2107 {
2108 struct btrace_insn_iterator *replay, end;
2109 struct btrace_thread_info *btinfo;
2110
2111 btinfo = &tp->btrace;
2112 replay = btinfo->replay;
2113
2114 if (replay == NULL)
2115 return;
2116
2117 btrace_insn_end (&end, btinfo);
2118
2119 if (btrace_insn_cmp (replay, &end) == 0)
2120 record_btrace_stop_replaying (tp);
2121 }
2122
2123 /* The resume method of target record-btrace. */
2124
2125 void
2126 record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
2127 {
2128 enum btrace_thread_flag flag, cflag;
2129
2130 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid).c_str (),
2131 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
2132 step ? "step" : "cont");
2133
2134 /* Store the execution direction of the last resume.
2135
2136 If there is more than one resume call, we have to rely on infrun
2137 to not change the execution direction in-between. */
2138 record_btrace_resume_exec_dir = ::execution_direction;
2139
2140 /* As long as we're not replaying, just forward the request.
2141
2142 For non-stop targets this means that no thread is replaying. In order to
2143 make progress, we may need to explicitly move replaying threads to the end
2144 of their execution history. */
2145 if ((::execution_direction != EXEC_REVERSE)
2146 && !record_is_replaying (minus_one_ptid))
2147 {
2148 this->beneath ()->resume (ptid, step, signal);
2149 return;
2150 }
2151
2152 /* Compute the btrace thread flag for the requested move. */
2153 if (::execution_direction == EXEC_REVERSE)
2154 {
2155 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2156 cflag = BTHR_RCONT;
2157 }
2158 else
2159 {
2160 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2161 cflag = BTHR_CONT;
2162 }
2163
2164 /* We just indicate the resume intent here. The actual stepping happens in
2165 record_btrace_wait below.
2166
2167 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2168
2169 process_stratum_target *proc_target = current_inferior ()->process_target ();
2170
2171 if (!target_is_non_stop_p ())
2172 {
2173 gdb_assert (inferior_ptid.matches (ptid));
2174
2175 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2176 {
2177 if (tp->ptid.matches (inferior_ptid))
2178 record_btrace_resume_thread (tp, flag);
2179 else
2180 record_btrace_resume_thread (tp, cflag);
2181 }
2182 }
2183 else
2184 {
2185 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2186 record_btrace_resume_thread (tp, flag);
2187 }
2188
2189 /* Async support. */
2190 if (target_can_async_p ())
2191 {
2192 target_async (1);
2193 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2194 }
2195 }
2196
2197 /* The commit_resume method of target record-btrace. */
2198
2199 void
2200 record_btrace_target::commit_resume ()
2201 {
2202 if ((::execution_direction != EXEC_REVERSE)
2203 && !record_is_replaying (minus_one_ptid))
2204 beneath ()->commit_resume ();
2205 }
2206
2207 /* Cancel resuming TP. */
2208
2209 static void
2210 record_btrace_cancel_resume (struct thread_info *tp)
2211 {
2212 enum btrace_thread_flag flags;
2213
2214 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2215 if (flags == 0)
2216 return;
2217
2218 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2219 print_thread_id (tp),
2220 target_pid_to_str (tp->ptid).c_str (), flags,
2221 btrace_thread_flag_to_str (flags));
2222
2223 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2224 record_btrace_stop_replaying_at_end (tp);
2225 }
2226
2227 /* Return a target_waitstatus indicating that we ran out of history. */
2228
2229 static struct target_waitstatus
2230 btrace_step_no_history (void)
2231 {
2232 struct target_waitstatus status;
2233
2234 status.kind = TARGET_WAITKIND_NO_HISTORY;
2235
2236 return status;
2237 }
2238
2239 /* Return a target_waitstatus indicating that a step finished. */
2240
2241 static struct target_waitstatus
2242 btrace_step_stopped (void)
2243 {
2244 struct target_waitstatus status;
2245
2246 status.kind = TARGET_WAITKIND_STOPPED;
2247 status.value.sig = GDB_SIGNAL_TRAP;
2248
2249 return status;
2250 }
2251
2252 /* Return a target_waitstatus indicating that a thread was stopped as
2253 requested. */
2254
2255 static struct target_waitstatus
2256 btrace_step_stopped_on_request (void)
2257 {
2258 struct target_waitstatus status;
2259
2260 status.kind = TARGET_WAITKIND_STOPPED;
2261 status.value.sig = GDB_SIGNAL_0;
2262
2263 return status;
2264 }
2265
2266 /* Return a target_waitstatus indicating a spurious stop. */
2267
2268 static struct target_waitstatus
2269 btrace_step_spurious (void)
2270 {
2271 struct target_waitstatus status;
2272
2273 status.kind = TARGET_WAITKIND_SPURIOUS;
2274
2275 return status;
2276 }
2277
2278 /* Return a target_waitstatus indicating that the thread was not resumed. */
2279
2280 static struct target_waitstatus
2281 btrace_step_no_resumed (void)
2282 {
2283 struct target_waitstatus status;
2284
2285 status.kind = TARGET_WAITKIND_NO_RESUMED;
2286
2287 return status;
2288 }
2289
2290 /* Return a target_waitstatus indicating that we should wait again. */
2291
2292 static struct target_waitstatus
2293 btrace_step_again (void)
2294 {
2295 struct target_waitstatus status;
2296
2297 status.kind = TARGET_WAITKIND_IGNORE;
2298
2299 return status;
2300 }
2301
2302 /* Clear the record histories. */
2303
2304 static void
2305 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2306 {
2307 xfree (btinfo->insn_history);
2308 xfree (btinfo->call_history);
2309
2310 btinfo->insn_history = NULL;
2311 btinfo->call_history = NULL;
2312 }
2313
2314 /* Check whether TP's current replay position is at a breakpoint. */
2315
2316 static int
2317 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2318 {
2319 struct btrace_insn_iterator *replay;
2320 struct btrace_thread_info *btinfo;
2321 const struct btrace_insn *insn;
2322
2323 btinfo = &tp->btrace;
2324 replay = btinfo->replay;
2325
2326 if (replay == NULL)
2327 return 0;
2328
2329 insn = btrace_insn_get (replay);
2330 if (insn == NULL)
2331 return 0;
2332
2333 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
2334 &btinfo->stop_reason);
2335 }
2336
2337 /* Step one instruction in forward direction. */
2338
2339 static struct target_waitstatus
2340 record_btrace_single_step_forward (struct thread_info *tp)
2341 {
2342 struct btrace_insn_iterator *replay, end, start;
2343 struct btrace_thread_info *btinfo;
2344
2345 btinfo = &tp->btrace;
2346 replay = btinfo->replay;
2347
2348 /* We're done if we're not replaying. */
2349 if (replay == NULL)
2350 return btrace_step_no_history ();
2351
2352 /* Check if we're stepping a breakpoint. */
2353 if (record_btrace_replay_at_breakpoint (tp))
2354 return btrace_step_stopped ();
2355
2356 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2357 jump back to the instruction at which we started. */
2358 start = *replay;
2359 do
2360 {
2361 unsigned int steps;
2362
2363 /* We will bail out here if we continue stepping after reaching the end
2364 of the execution history. */
2365 steps = btrace_insn_next (replay, 1);
2366 if (steps == 0)
2367 {
2368 *replay = start;
2369 return btrace_step_no_history ();
2370 }
2371 }
2372 while (btrace_insn_get (replay) == NULL);
2373
2374 /* Determine the end of the instruction trace. */
2375 btrace_insn_end (&end, btinfo);
2376
2377 /* The execution trace contains (and ends with) the current instruction.
2378 This instruction has not been executed, yet, so the trace really ends
2379 one instruction earlier. */
2380 if (btrace_insn_cmp (replay, &end) == 0)
2381 return btrace_step_no_history ();
2382
2383 return btrace_step_spurious ();
2384 }
2385
2386 /* Step one instruction in backward direction. */
2387
2388 static struct target_waitstatus
2389 record_btrace_single_step_backward (struct thread_info *tp)
2390 {
2391 struct btrace_insn_iterator *replay, start;
2392 struct btrace_thread_info *btinfo;
2393
2394 btinfo = &tp->btrace;
2395 replay = btinfo->replay;
2396
2397 /* Start replaying if we're not already doing so. */
2398 if (replay == NULL)
2399 replay = record_btrace_start_replaying (tp);
2400
2401 /* If we can't step any further, we reached the end of the history.
2402 Skip gaps during replay. If we end up at a gap (at the beginning of
2403 the trace), jump back to the instruction at which we started. */
2404 start = *replay;
2405 do
2406 {
2407 unsigned int steps;
2408
2409 steps = btrace_insn_prev (replay, 1);
2410 if (steps == 0)
2411 {
2412 *replay = start;
2413 return btrace_step_no_history ();
2414 }
2415 }
2416 while (btrace_insn_get (replay) == NULL);
2417
2418 /* Check if we're stepping a breakpoint.
2419
2420 For reverse-stepping, this check is after the step. There is logic in
2421 infrun.c that handles reverse-stepping separately. See, for example,
2422 proceed and adjust_pc_after_break.
2423
2424 This code assumes that for reverse-stepping, PC points to the last
2425 de-executed instruction, whereas for forward-stepping PC points to the
2426 next to-be-executed instruction. */
2427 if (record_btrace_replay_at_breakpoint (tp))
2428 return btrace_step_stopped ();
2429
2430 return btrace_step_spurious ();
2431 }
2432
2433 /* Step a single thread. */
2434
2435 static struct target_waitstatus
2436 record_btrace_step_thread (struct thread_info *tp)
2437 {
2438 struct btrace_thread_info *btinfo;
2439 struct target_waitstatus status;
2440 enum btrace_thread_flag flags;
2441
2442 btinfo = &tp->btrace;
2443
2444 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2445 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2446
2447 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2448 target_pid_to_str (tp->ptid).c_str (), flags,
2449 btrace_thread_flag_to_str (flags));
2450
2451 /* We can't step without an execution history. */
2452 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2453 return btrace_step_no_history ();
2454
2455 switch (flags)
2456 {
2457 default:
2458 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2459
2460 case BTHR_STOP:
2461 return btrace_step_stopped_on_request ();
2462
2463 case BTHR_STEP:
2464 status = record_btrace_single_step_forward (tp);
2465 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2466 break;
2467
2468 return btrace_step_stopped ();
2469
2470 case BTHR_RSTEP:
2471 status = record_btrace_single_step_backward (tp);
2472 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2473 break;
2474
2475 return btrace_step_stopped ();
2476
2477 case BTHR_CONT:
2478 status = record_btrace_single_step_forward (tp);
2479 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2480 break;
2481
2482 btinfo->flags |= flags;
2483 return btrace_step_again ();
2484
2485 case BTHR_RCONT:
2486 status = record_btrace_single_step_backward (tp);
2487 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2488 break;
2489
2490 btinfo->flags |= flags;
2491 return btrace_step_again ();
2492 }
2493
2494 /* We keep threads moving at the end of their execution history. The wait
2495 method will stop the thread for whom the event is reported. */
2496 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2497 btinfo->flags |= flags;
2498
2499 return status;
2500 }
2501
2502 /* Announce further events if necessary. */
2503
2504 static void
2505 record_btrace_maybe_mark_async_event
2506 (const std::vector<thread_info *> &moving,
2507 const std::vector<thread_info *> &no_history)
2508 {
2509 bool more_moving = !moving.empty ();
2510 bool more_no_history = !no_history.empty ();;
2511
2512 if (!more_moving && !more_no_history)
2513 return;
2514
2515 if (more_moving)
2516 DEBUG ("movers pending");
2517
2518 if (more_no_history)
2519 DEBUG ("no-history pending");
2520
2521 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2522 }
2523
2524 /* The wait method of target record-btrace. */
2525
2526 ptid_t
2527 record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2528 int options)
2529 {
2530 std::vector<thread_info *> moving;
2531 std::vector<thread_info *> no_history;
2532
2533 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid).c_str (), options);
2534
2535 /* As long as we're not replaying, just forward the request. */
2536 if ((::execution_direction != EXEC_REVERSE)
2537 && !record_is_replaying (minus_one_ptid))
2538 {
2539 return this->beneath ()->wait (ptid, status, options);
2540 }
2541
2542 /* Keep a work list of moving threads. */
2543 process_stratum_target *proc_target = current_inferior ()->process_target ();
2544 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2545 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2546 moving.push_back (tp);
2547
2548 if (moving.empty ())
2549 {
2550 *status = btrace_step_no_resumed ();
2551
2552 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid).c_str (),
2553 target_waitstatus_to_string (status).c_str ());
2554
2555 return null_ptid;
2556 }
2557
2558 /* Step moving threads one by one, one step each, until either one thread
2559 reports an event or we run out of threads to step.
2560
2561 When stepping more than one thread, chances are that some threads reach
2562 the end of their execution history earlier than others. If we reported
2563 this immediately, all-stop on top of non-stop would stop all threads and
2564 resume the same threads next time. And we would report the same thread
2565 having reached the end of its execution history again.
2566
2567 In the worst case, this would starve the other threads. But even if other
2568 threads would be allowed to make progress, this would result in far too
2569 many intermediate stops.
2570
2571 We therefore delay the reporting of "no execution history" until we have
2572 nothing else to report. By this time, all threads should have moved to
2573 either the beginning or the end of their execution history. There will
2574 be a single user-visible stop. */
2575 struct thread_info *eventing = NULL;
2576 while ((eventing == NULL) && !moving.empty ())
2577 {
2578 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
2579 {
2580 thread_info *tp = moving[ix];
2581
2582 *status = record_btrace_step_thread (tp);
2583
2584 switch (status->kind)
2585 {
2586 case TARGET_WAITKIND_IGNORE:
2587 ix++;
2588 break;
2589
2590 case TARGET_WAITKIND_NO_HISTORY:
2591 no_history.push_back (ordered_remove (moving, ix));
2592 break;
2593
2594 default:
2595 eventing = unordered_remove (moving, ix);
2596 break;
2597 }
2598 }
2599 }
2600
2601 if (eventing == NULL)
2602 {
2603 /* We started with at least one moving thread. This thread must have
2604 either stopped or reached the end of its execution history.
2605
2606 In the former case, EVENTING must not be NULL.
2607 In the latter case, NO_HISTORY must not be empty. */
2608 gdb_assert (!no_history.empty ());
2609
2610 /* We kept threads moving at the end of their execution history. Stop
2611 EVENTING now that we are going to report its stop. */
2612 eventing = unordered_remove (no_history, 0);
2613 eventing->btrace.flags &= ~BTHR_MOVE;
2614
2615 *status = btrace_step_no_history ();
2616 }
2617
2618 gdb_assert (eventing != NULL);
2619
2620 /* We kept threads replaying at the end of their execution history. Stop
2621 replaying EVENTING now that we are going to report its stop. */
2622 record_btrace_stop_replaying_at_end (eventing);
2623
2624 /* Stop all other threads. */
2625 if (!target_is_non_stop_p ())
2626 {
2627 for (thread_info *tp : all_non_exited_threads ())
2628 record_btrace_cancel_resume (tp);
2629 }
2630
2631 /* In async mode, we need to announce further events. */
2632 if (target_is_async_p ())
2633 record_btrace_maybe_mark_async_event (moving, no_history);
2634
2635 /* Start record histories anew from the current position. */
2636 record_btrace_clear_histories (&eventing->btrace);
2637
2638 /* We moved the replay position but did not update registers. */
2639 registers_changed_thread (eventing);
2640
2641 DEBUG ("wait ended by thread %s (%s): %s",
2642 print_thread_id (eventing),
2643 target_pid_to_str (eventing->ptid).c_str (),
2644 target_waitstatus_to_string (status).c_str ());
2645
2646 return eventing->ptid;
2647 }
2648
2649 /* The stop method of target record-btrace. */
2650
2651 void
2652 record_btrace_target::stop (ptid_t ptid)
2653 {
2654 DEBUG ("stop %s", target_pid_to_str (ptid).c_str ());
2655
2656 /* As long as we're not replaying, just forward the request. */
2657 if ((::execution_direction != EXEC_REVERSE)
2658 && !record_is_replaying (minus_one_ptid))
2659 {
2660 this->beneath ()->stop (ptid);
2661 }
2662 else
2663 {
2664 process_stratum_target *proc_target
2665 = current_inferior ()->process_target ();
2666
2667 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2668 {
2669 tp->btrace.flags &= ~BTHR_MOVE;
2670 tp->btrace.flags |= BTHR_STOP;
2671 }
2672 }
2673 }
2674
2675 /* The can_execute_reverse method of target record-btrace. */
2676
2677 bool
2678 record_btrace_target::can_execute_reverse ()
2679 {
2680 return true;
2681 }
2682
2683 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2684
2685 bool
2686 record_btrace_target::stopped_by_sw_breakpoint ()
2687 {
2688 if (record_is_replaying (minus_one_ptid))
2689 {
2690 struct thread_info *tp = inferior_thread ();
2691
2692 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2693 }
2694
2695 return this->beneath ()->stopped_by_sw_breakpoint ();
2696 }
2697
2698 /* The supports_stopped_by_sw_breakpoint method of target
2699 record-btrace. */
2700
2701 bool
2702 record_btrace_target::supports_stopped_by_sw_breakpoint ()
2703 {
2704 if (record_is_replaying (minus_one_ptid))
2705 return true;
2706
2707 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
2708 }
2709
2710 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2711
2712 bool
2713 record_btrace_target::stopped_by_hw_breakpoint ()
2714 {
2715 if (record_is_replaying (minus_one_ptid))
2716 {
2717 struct thread_info *tp = inferior_thread ();
2718
2719 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2720 }
2721
2722 return this->beneath ()->stopped_by_hw_breakpoint ();
2723 }
2724
2725 /* The supports_stopped_by_hw_breakpoint method of target
2726 record-btrace. */
2727
2728 bool
2729 record_btrace_target::supports_stopped_by_hw_breakpoint ()
2730 {
2731 if (record_is_replaying (minus_one_ptid))
2732 return true;
2733
2734 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
2735 }
2736
2737 /* The update_thread_list method of target record-btrace. */
2738
2739 void
2740 record_btrace_target::update_thread_list ()
2741 {
2742 /* We don't add or remove threads during replay. */
2743 if (record_is_replaying (minus_one_ptid))
2744 return;
2745
2746 /* Forward the request. */
2747 this->beneath ()->update_thread_list ();
2748 }
2749
2750 /* The thread_alive method of target record-btrace. */
2751
2752 bool
2753 record_btrace_target::thread_alive (ptid_t ptid)
2754 {
2755 /* We don't add or remove threads during replay. */
2756 if (record_is_replaying (minus_one_ptid))
2757 return true;
2758
2759 /* Forward the request. */
2760 return this->beneath ()->thread_alive (ptid);
2761 }
2762
2763 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2764 is stopped. */
2765
2766 static void
2767 record_btrace_set_replay (struct thread_info *tp,
2768 const struct btrace_insn_iterator *it)
2769 {
2770 struct btrace_thread_info *btinfo;
2771
2772 btinfo = &tp->btrace;
2773
2774 if (it == NULL)
2775 record_btrace_stop_replaying (tp);
2776 else
2777 {
2778 if (btinfo->replay == NULL)
2779 record_btrace_start_replaying (tp);
2780 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2781 return;
2782
2783 *btinfo->replay = *it;
2784 registers_changed_thread (tp);
2785 }
2786
2787 /* Start anew from the new replay position. */
2788 record_btrace_clear_histories (btinfo);
2789
2790 inferior_thread ()->suspend.stop_pc
2791 = regcache_read_pc (get_current_regcache ());
2792 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2793 }
2794
2795 /* The goto_record_begin method of target record-btrace. */
2796
2797 void
2798 record_btrace_target::goto_record_begin ()
2799 {
2800 struct thread_info *tp;
2801 struct btrace_insn_iterator begin;
2802
2803 tp = require_btrace_thread ();
2804
2805 btrace_insn_begin (&begin, &tp->btrace);
2806
2807 /* Skip gaps at the beginning of the trace. */
2808 while (btrace_insn_get (&begin) == NULL)
2809 {
2810 unsigned int steps;
2811
2812 steps = btrace_insn_next (&begin, 1);
2813 if (steps == 0)
2814 error (_("No trace."));
2815 }
2816
2817 record_btrace_set_replay (tp, &begin);
2818 }
2819
2820 /* The goto_record_end method of target record-btrace. */
2821
2822 void
2823 record_btrace_target::goto_record_end ()
2824 {
2825 struct thread_info *tp;
2826
2827 tp = require_btrace_thread ();
2828
2829 record_btrace_set_replay (tp, NULL);
2830 }
2831
2832 /* The goto_record method of target record-btrace. */
2833
2834 void
2835 record_btrace_target::goto_record (ULONGEST insn)
2836 {
2837 struct thread_info *tp;
2838 struct btrace_insn_iterator it;
2839 unsigned int number;
2840 int found;
2841
2842 number = insn;
2843
2844 /* Check for wrap-arounds. */
2845 if (number != insn)
2846 error (_("Instruction number out of range."));
2847
2848 tp = require_btrace_thread ();
2849
2850 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2851
2852 /* Check if the instruction could not be found or is a gap. */
2853 if (found == 0 || btrace_insn_get (&it) == NULL)
2854 error (_("No such instruction."));
2855
2856 record_btrace_set_replay (tp, &it);
2857 }
2858
2859 /* The record_stop_replaying method of target record-btrace. */
2860
2861 void
2862 record_btrace_target::record_stop_replaying ()
2863 {
2864 for (thread_info *tp : all_non_exited_threads ())
2865 record_btrace_stop_replaying (tp);
2866 }
2867
2868 /* The execution_direction target method. */
2869
2870 enum exec_direction_kind
2871 record_btrace_target::execution_direction ()
2872 {
2873 return record_btrace_resume_exec_dir;
2874 }
2875
2876 /* The prepare_to_generate_core target method. */
2877
2878 void
2879 record_btrace_target::prepare_to_generate_core ()
2880 {
2881 record_btrace_generating_corefile = 1;
2882 }
2883
2884 /* The done_generating_core target method. */
2885
2886 void
2887 record_btrace_target::done_generating_core ()
2888 {
2889 record_btrace_generating_corefile = 0;
2890 }
2891
2892 /* Start recording in BTS format. */
2893
2894 static void
2895 cmd_record_btrace_bts_start (const char *args, int from_tty)
2896 {
2897 if (args != NULL && *args != 0)
2898 error (_("Invalid argument."));
2899
2900 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2901
2902 try
2903 {
2904 execute_command ("target record-btrace", from_tty);
2905 }
2906 catch (const gdb_exception &exception)
2907 {
2908 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2909 throw;
2910 }
2911 }
2912
2913 /* Start recording in Intel Processor Trace format. */
2914
2915 static void
2916 cmd_record_btrace_pt_start (const char *args, int from_tty)
2917 {
2918 if (args != NULL && *args != 0)
2919 error (_("Invalid argument."));
2920
2921 record_btrace_conf.format = BTRACE_FORMAT_PT;
2922
2923 try
2924 {
2925 execute_command ("target record-btrace", from_tty);
2926 }
2927 catch (const gdb_exception &exception)
2928 {
2929 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2930 throw;
2931 }
2932 }
2933
2934 /* Alias for "target record". */
2935
2936 static void
2937 cmd_record_btrace_start (const char *args, int from_tty)
2938 {
2939 if (args != NULL && *args != 0)
2940 error (_("Invalid argument."));
2941
2942 record_btrace_conf.format = BTRACE_FORMAT_PT;
2943
2944 try
2945 {
2946 execute_command ("target record-btrace", from_tty);
2947 }
2948 catch (const gdb_exception &exception)
2949 {
2950 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2951
2952 try
2953 {
2954 execute_command ("target record-btrace", from_tty);
2955 }
2956 catch (const gdb_exception &ex)
2957 {
2958 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2959 throw;
2960 }
2961 }
2962 }
2963
2964 /* The "set record btrace" command. */
2965
2966 static void
2967 cmd_set_record_btrace (const char *args, int from_tty)
2968 {
2969 printf_unfiltered (_("\"set record btrace\" must be followed "
2970 "by an appropriate subcommand.\n"));
2971 help_list (set_record_btrace_cmdlist, "set record btrace ",
2972 all_commands, gdb_stdout);
2973 }
2974
2975 /* The "show record btrace" command. */
2976
2977 static void
2978 cmd_show_record_btrace (const char *args, int from_tty)
2979 {
2980 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2981 }
2982
2983 /* The "show record btrace replay-memory-access" command. */
2984
2985 static void
2986 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2987 struct cmd_list_element *c, const char *value)
2988 {
2989 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2990 replay_memory_access);
2991 }
2992
2993 /* The "set record btrace cpu none" command. */
2994
2995 static void
2996 cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2997 {
2998 if (args != nullptr && *args != 0)
2999 error (_("Trailing junk: '%s'."), args);
3000
3001 record_btrace_cpu_state = CS_NONE;
3002 }
3003
3004 /* The "set record btrace cpu auto" command. */
3005
3006 static void
3007 cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
3008 {
3009 if (args != nullptr && *args != 0)
3010 error (_("Trailing junk: '%s'."), args);
3011
3012 record_btrace_cpu_state = CS_AUTO;
3013 }
3014
3015 /* The "set record btrace cpu" command. */
3016
3017 static void
3018 cmd_set_record_btrace_cpu (const char *args, int from_tty)
3019 {
3020 if (args == nullptr)
3021 args = "";
3022
3023 /* We use a hard-coded vendor string for now. */
3024 unsigned int family, model, stepping;
3025 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3026 &model, &l1, &stepping, &l2);
3027 if (matches == 3)
3028 {
3029 if (strlen (args) != l2)
3030 error (_("Trailing junk: '%s'."), args + l2);
3031 }
3032 else if (matches == 2)
3033 {
3034 if (strlen (args) != l1)
3035 error (_("Trailing junk: '%s'."), args + l1);
3036
3037 stepping = 0;
3038 }
3039 else
3040 error (_("Bad format. See \"help set record btrace cpu\"."));
3041
3042 if (USHRT_MAX < family)
3043 error (_("Cpu family too big."));
3044
3045 if (UCHAR_MAX < model)
3046 error (_("Cpu model too big."));
3047
3048 if (UCHAR_MAX < stepping)
3049 error (_("Cpu stepping too big."));
3050
3051 record_btrace_cpu.vendor = CV_INTEL;
3052 record_btrace_cpu.family = family;
3053 record_btrace_cpu.model = model;
3054 record_btrace_cpu.stepping = stepping;
3055
3056 record_btrace_cpu_state = CS_CPU;
3057 }
3058
3059 /* The "show record btrace cpu" command. */
3060
3061 static void
3062 cmd_show_record_btrace_cpu (const char *args, int from_tty)
3063 {
3064 if (args != nullptr && *args != 0)
3065 error (_("Trailing junk: '%s'."), args);
3066
3067 switch (record_btrace_cpu_state)
3068 {
3069 case CS_AUTO:
3070 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3071 return;
3072
3073 case CS_NONE:
3074 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3075 return;
3076
3077 case CS_CPU:
3078 switch (record_btrace_cpu.vendor)
3079 {
3080 case CV_INTEL:
3081 if (record_btrace_cpu.stepping == 0)
3082 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3083 record_btrace_cpu.family,
3084 record_btrace_cpu.model);
3085 else
3086 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3087 record_btrace_cpu.family,
3088 record_btrace_cpu.model,
3089 record_btrace_cpu.stepping);
3090 return;
3091 }
3092 }
3093
3094 error (_("Internal error: bad cpu state."));
3095 }
3096
3097 /* The "s record btrace bts" command. */
3098
3099 static void
3100 cmd_set_record_btrace_bts (const char *args, int from_tty)
3101 {
3102 printf_unfiltered (_("\"set record btrace bts\" must be followed "
3103 "by an appropriate subcommand.\n"));
3104 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3105 all_commands, gdb_stdout);
3106 }
3107
3108 /* The "show record btrace bts" command. */
3109
3110 static void
3111 cmd_show_record_btrace_bts (const char *args, int from_tty)
3112 {
3113 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3114 }
3115
3116 /* The "set record btrace pt" command. */
3117
3118 static void
3119 cmd_set_record_btrace_pt (const char *args, int from_tty)
3120 {
3121 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3122 "by an appropriate subcommand.\n"));
3123 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3124 all_commands, gdb_stdout);
3125 }
3126
3127 /* The "show record btrace pt" command. */
3128
3129 static void
3130 cmd_show_record_btrace_pt (const char *args, int from_tty)
3131 {
3132 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3133 }
3134
3135 /* The "record bts buffer-size" show value function. */
3136
3137 static void
3138 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3139 struct cmd_list_element *c,
3140 const char *value)
3141 {
3142 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3143 value);
3144 }
3145
3146 /* The "record pt buffer-size" show value function. */
3147
3148 static void
3149 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3150 struct cmd_list_element *c,
3151 const char *value)
3152 {
3153 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3154 value);
3155 }
3156
3157 /* Initialize btrace commands. */
3158
3159 void _initialize_record_btrace ();
3160 void
3161 _initialize_record_btrace ()
3162 {
3163 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3164 _("Start branch trace recording."), &record_btrace_cmdlist,
3165 "record btrace ", 0, &record_cmdlist);
3166 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3167
3168 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3169 _("\
3170 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3171 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3172 This format may not be available on all processors."),
3173 &record_btrace_cmdlist);
3174 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3175
3176 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3177 _("\
3178 Start branch trace recording in Intel Processor Trace format.\n\n\
3179 This format may not be available on all processors."),
3180 &record_btrace_cmdlist);
3181 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3182
3183 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3184 _("Set record options."), &set_record_btrace_cmdlist,
3185 "set record btrace ", 0, &set_record_cmdlist);
3186
3187 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3188 _("Show record options."), &show_record_btrace_cmdlist,
3189 "show record btrace ", 0, &show_record_cmdlist);
3190
3191 add_setshow_enum_cmd ("replay-memory-access", no_class,
3192 replay_memory_access_types, &replay_memory_access, _("\
3193 Set what memory accesses are allowed during replay."), _("\
3194 Show what memory accesses are allowed during replay."),
3195 _("Default is READ-ONLY.\n\n\
3196 The btrace record target does not trace data.\n\
3197 The memory therefore corresponds to the live target and not \
3198 to the current replay position.\n\n\
3199 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3200 When READ-WRITE, allow accesses to read-only and read-write memory during \
3201 replay."),
3202 NULL, cmd_show_replay_memory_access,
3203 &set_record_btrace_cmdlist,
3204 &show_record_btrace_cmdlist);
3205
3206 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3207 _("\
3208 Set the cpu to be used for trace decode.\n\n\
3209 The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3210 For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
3211 When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3212 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3213 When GDB does not support that cpu, this option can be used to enable\n\
3214 workarounds for a similar cpu that GDB supports.\n\n\
3215 When set to \"none\", errata workarounds are disabled."),
3216 &set_record_btrace_cpu_cmdlist,
3217 "set record btrace cpu ", 1,
3218 &set_record_btrace_cmdlist);
3219
3220 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3221 Automatically determine the cpu to be used for trace decode."),
3222 &set_record_btrace_cpu_cmdlist);
3223
3224 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3225 Do not enable errata workarounds for trace decode."),
3226 &set_record_btrace_cpu_cmdlist);
3227
3228 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3229 Show the cpu to be used for trace decode."),
3230 &show_record_btrace_cmdlist);
3231
3232 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3233 _("Set record btrace bts options."),
3234 &set_record_btrace_bts_cmdlist,
3235 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3236
3237 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3238 _("Show record btrace bts options."),
3239 &show_record_btrace_bts_cmdlist,
3240 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3241
3242 add_setshow_uinteger_cmd ("buffer-size", no_class,
3243 &record_btrace_conf.bts.size,
3244 _("Set the record/replay bts buffer size."),
3245 _("Show the record/replay bts buffer size."), _("\
3246 When starting recording request a trace buffer of this size. \
3247 The actual buffer size may differ from the requested size. \
3248 Use \"info record\" to see the actual buffer size.\n\n\
3249 Bigger buffers allow longer recording but also take more time to process \
3250 the recorded execution trace.\n\n\
3251 The trace buffer size may not be changed while recording."), NULL,
3252 show_record_bts_buffer_size_value,
3253 &set_record_btrace_bts_cmdlist,
3254 &show_record_btrace_bts_cmdlist);
3255
3256 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3257 _("Set record btrace pt options."),
3258 &set_record_btrace_pt_cmdlist,
3259 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3260
3261 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3262 _("Show record btrace pt options."),
3263 &show_record_btrace_pt_cmdlist,
3264 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3265
3266 add_setshow_uinteger_cmd ("buffer-size", no_class,
3267 &record_btrace_conf.pt.size,
3268 _("Set the record/replay pt buffer size."),
3269 _("Show the record/replay pt buffer size."), _("\
3270 Bigger buffers allow longer recording but also take more time to process \
3271 the recorded execution.\n\
3272 The actual buffer size may differ from the requested size. Use \"info record\" \
3273 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3274 &set_record_btrace_pt_cmdlist,
3275 &show_record_btrace_pt_cmdlist);
3276
3277 add_target (record_btrace_target_info, record_btrace_target_open);
3278
3279 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3280 xcalloc, xfree);
3281
3282 record_btrace_conf.bts.size = 64 * 1024;
3283 record_btrace_conf.pt.size = 16 * 1024;
3284 }
This page took 0.095687 seconds and 4 git commands to generate.