Multi-target support
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2020 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "event-loop.h"
40 #include "inf-loop.h"
41 #include "inferior.h"
42 #include <algorithm>
43 #include "gdbarch.h"
44 #include "cli/cli-style.h"
45
46 static const target_info record_btrace_target_info = {
47 "record-btrace",
48 N_("Branch tracing target"),
49 N_("Collect control-flow trace and provide the execution history.")
50 };
51
52 /* The target_ops of record-btrace. */
53
54 class record_btrace_target final : public target_ops
55 {
56 public:
57 const target_info &info () const override
58 { return record_btrace_target_info; }
59
60 strata stratum () const override { return record_stratum; }
61
62 void close () override;
63 void async (int) override;
64
65 void detach (inferior *inf, int from_tty) override
66 { record_detach (this, inf, from_tty); }
67
68 void disconnect (const char *, int) override;
69
70 void mourn_inferior () override
71 { record_mourn_inferior (this); }
72
73 void kill () override
74 { record_kill (this); }
75
76 enum record_method record_method (ptid_t ptid) override;
77
78 void stop_recording () override;
79 void info_record () override;
80
81 void insn_history (int size, gdb_disassembly_flags flags) override;
82 void insn_history_from (ULONGEST from, int size,
83 gdb_disassembly_flags flags) override;
84 void insn_history_range (ULONGEST begin, ULONGEST end,
85 gdb_disassembly_flags flags) override;
86 void call_history (int size, record_print_flags flags) override;
87 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
88 override;
89 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
90 override;
91
92 bool record_is_replaying (ptid_t ptid) override;
93 bool record_will_replay (ptid_t ptid, int dir) override;
94 void record_stop_replaying () override;
95
96 enum target_xfer_status xfer_partial (enum target_object object,
97 const char *annex,
98 gdb_byte *readbuf,
99 const gdb_byte *writebuf,
100 ULONGEST offset, ULONGEST len,
101 ULONGEST *xfered_len) override;
102
103 int insert_breakpoint (struct gdbarch *,
104 struct bp_target_info *) override;
105 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
106 enum remove_bp_reason) override;
107
108 void fetch_registers (struct regcache *, int) override;
109
110 void store_registers (struct regcache *, int) override;
111 void prepare_to_store (struct regcache *) override;
112
113 const struct frame_unwind *get_unwinder () override;
114
115 const struct frame_unwind *get_tailcall_unwinder () override;
116
117 void commit_resume () override;
118 void resume (ptid_t, int, enum gdb_signal) override;
119 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
120
121 void stop (ptid_t) override;
122 void update_thread_list () override;
123 bool thread_alive (ptid_t ptid) override;
124 void goto_record_begin () override;
125 void goto_record_end () override;
126 void goto_record (ULONGEST insn) override;
127
128 bool can_execute_reverse () override;
129
130 bool stopped_by_sw_breakpoint () override;
131 bool supports_stopped_by_sw_breakpoint () override;
132
133 bool stopped_by_hw_breakpoint () override;
134 bool supports_stopped_by_hw_breakpoint () override;
135
136 enum exec_direction_kind execution_direction () override;
137 void prepare_to_generate_core () override;
138 void done_generating_core () override;
139 };
140
141 static record_btrace_target record_btrace_ops;
142
143 /* Initialize the record-btrace target ops. */
144
145 /* Token associated with a new-thread observer enabling branch tracing
146 for the new thread. */
147 static const gdb::observers::token record_btrace_thread_observer_token {};
148
149 /* Memory access types used in set/show record btrace replay-memory-access. */
150 static const char replay_memory_access_read_only[] = "read-only";
151 static const char replay_memory_access_read_write[] = "read-write";
152 static const char *const replay_memory_access_types[] =
153 {
154 replay_memory_access_read_only,
155 replay_memory_access_read_write,
156 NULL
157 };
158
159 /* The currently allowed replay memory access type. */
160 static const char *replay_memory_access = replay_memory_access_read_only;
161
162 /* The cpu state kinds. */
163 enum record_btrace_cpu_state_kind
164 {
165 CS_AUTO,
166 CS_NONE,
167 CS_CPU
168 };
169
170 /* The current cpu state. */
171 static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
172
173 /* The current cpu for trace decode. */
174 static struct btrace_cpu record_btrace_cpu;
175
176 /* Command lists for "set/show record btrace". */
177 static struct cmd_list_element *set_record_btrace_cmdlist;
178 static struct cmd_list_element *show_record_btrace_cmdlist;
179
180 /* The execution direction of the last resume we got. See record-full.c. */
181 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
182
183 /* The async event handler for reverse/replay execution. */
184 static struct async_event_handler *record_btrace_async_inferior_event_handler;
185
186 /* A flag indicating that we are currently generating a core file. */
187 static int record_btrace_generating_corefile;
188
189 /* The current branch trace configuration. */
190 static struct btrace_config record_btrace_conf;
191
192 /* Command list for "record btrace". */
193 static struct cmd_list_element *record_btrace_cmdlist;
194
195 /* Command lists for "set/show record btrace bts". */
196 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
197 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
198
199 /* Command lists for "set/show record btrace pt". */
200 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
201 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
202
203 /* Command list for "set record btrace cpu". */
204 static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
205
206 /* Print a record-btrace debug message. Use do ... while (0) to avoid
207 ambiguities when used in if statements. */
208
209 #define DEBUG(msg, args...) \
210 do \
211 { \
212 if (record_debug != 0) \
213 fprintf_unfiltered (gdb_stdlog, \
214 "[record-btrace] " msg "\n", ##args); \
215 } \
216 while (0)
217
218
219 /* Return the cpu configured by the user. Returns NULL if the cpu was
220 configured as auto. */
221 const struct btrace_cpu *
222 record_btrace_get_cpu (void)
223 {
224 switch (record_btrace_cpu_state)
225 {
226 case CS_AUTO:
227 return nullptr;
228
229 case CS_NONE:
230 record_btrace_cpu.vendor = CV_UNKNOWN;
231 /* Fall through. */
232 case CS_CPU:
233 return &record_btrace_cpu;
234 }
235
236 error (_("Internal error: bad record btrace cpu state."));
237 }
238
239 /* Update the branch trace for the current thread and return a pointer to its
240 thread_info.
241
242 Throws an error if there is no thread or no trace. This function never
243 returns NULL. */
244
245 static struct thread_info *
246 require_btrace_thread (void)
247 {
248 DEBUG ("require");
249
250 if (inferior_ptid == null_ptid)
251 error (_("No thread."));
252
253 thread_info *tp = inferior_thread ();
254
255 validate_registers_access ();
256
257 btrace_fetch (tp, record_btrace_get_cpu ());
258
259 if (btrace_is_empty (tp))
260 error (_("No trace."));
261
262 return tp;
263 }
264
265 /* Update the branch trace for the current thread and return a pointer to its
266 branch trace information struct.
267
268 Throws an error if there is no thread or no trace. This function never
269 returns NULL. */
270
271 static struct btrace_thread_info *
272 require_btrace (void)
273 {
274 struct thread_info *tp;
275
276 tp = require_btrace_thread ();
277
278 return &tp->btrace;
279 }
280
281 /* Enable branch tracing for one thread. Warn on errors. */
282
283 static void
284 record_btrace_enable_warn (struct thread_info *tp)
285 {
286 try
287 {
288 btrace_enable (tp, &record_btrace_conf);
289 }
290 catch (const gdb_exception_error &error)
291 {
292 warning ("%s", error.what ());
293 }
294 }
295
296 /* Enable automatic tracing of new threads. */
297
298 static void
299 record_btrace_auto_enable (void)
300 {
301 DEBUG ("attach thread observer");
302
303 gdb::observers::new_thread.attach (record_btrace_enable_warn,
304 record_btrace_thread_observer_token);
305 }
306
307 /* Disable automatic tracing of new threads. */
308
309 static void
310 record_btrace_auto_disable (void)
311 {
312 DEBUG ("detach thread observer");
313
314 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
315 }
316
317 /* The record-btrace async event handler function. */
318
319 static void
320 record_btrace_handle_async_inferior_event (gdb_client_data data)
321 {
322 inferior_event_handler (INF_REG_EVENT, NULL);
323 }
324
325 /* See record-btrace.h. */
326
327 void
328 record_btrace_push_target (void)
329 {
330 const char *format;
331
332 record_btrace_auto_enable ();
333
334 push_target (&record_btrace_ops);
335
336 record_btrace_async_inferior_event_handler
337 = create_async_event_handler (record_btrace_handle_async_inferior_event,
338 NULL);
339 record_btrace_generating_corefile = 0;
340
341 format = btrace_format_short_string (record_btrace_conf.format);
342 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
343 }
344
345 /* Disable btrace on a set of threads on scope exit. */
346
347 struct scoped_btrace_disable
348 {
349 scoped_btrace_disable () = default;
350
351 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
352
353 ~scoped_btrace_disable ()
354 {
355 for (thread_info *tp : m_threads)
356 btrace_disable (tp);
357 }
358
359 void add_thread (thread_info *thread)
360 {
361 m_threads.push_front (thread);
362 }
363
364 void discard ()
365 {
366 m_threads.clear ();
367 }
368
369 private:
370 std::forward_list<thread_info *> m_threads;
371 };
372
373 /* Open target record-btrace. */
374
375 static void
376 record_btrace_target_open (const char *args, int from_tty)
377 {
378 /* If we fail to enable btrace for one thread, disable it for the threads for
379 which it was successfully enabled. */
380 scoped_btrace_disable btrace_disable;
381
382 DEBUG ("open");
383
384 record_preopen ();
385
386 if (!target_has_execution)
387 error (_("The program is not being run."));
388
389 for (thread_info *tp : all_non_exited_threads ())
390 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
391 {
392 btrace_enable (tp, &record_btrace_conf);
393
394 btrace_disable.add_thread (tp);
395 }
396
397 record_btrace_push_target ();
398
399 btrace_disable.discard ();
400 }
401
402 /* The stop_recording method of target record-btrace. */
403
404 void
405 record_btrace_target::stop_recording ()
406 {
407 DEBUG ("stop recording");
408
409 record_btrace_auto_disable ();
410
411 for (thread_info *tp : all_non_exited_threads ())
412 if (tp->btrace.target != NULL)
413 btrace_disable (tp);
414 }
415
416 /* The disconnect method of target record-btrace. */
417
418 void
419 record_btrace_target::disconnect (const char *args,
420 int from_tty)
421 {
422 struct target_ops *beneath = this->beneath ();
423
424 /* Do not stop recording, just clean up GDB side. */
425 unpush_target (this);
426
427 /* Forward disconnect. */
428 beneath->disconnect (args, from_tty);
429 }
430
431 /* The close method of target record-btrace. */
432
433 void
434 record_btrace_target::close ()
435 {
436 if (record_btrace_async_inferior_event_handler != NULL)
437 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
438
439 /* Make sure automatic recording gets disabled even if we did not stop
440 recording before closing the record-btrace target. */
441 record_btrace_auto_disable ();
442
443 /* We should have already stopped recording.
444 Tear down btrace in case we have not. */
445 for (thread_info *tp : all_non_exited_threads ())
446 btrace_teardown (tp);
447 }
448
449 /* The async method of target record-btrace. */
450
451 void
452 record_btrace_target::async (int enable)
453 {
454 if (enable)
455 mark_async_event_handler (record_btrace_async_inferior_event_handler);
456 else
457 clear_async_event_handler (record_btrace_async_inferior_event_handler);
458
459 this->beneath ()->async (enable);
460 }
461
462 /* Adjusts the size and returns a human readable size suffix. */
463
464 static const char *
465 record_btrace_adjust_size (unsigned int *size)
466 {
467 unsigned int sz;
468
469 sz = *size;
470
471 if ((sz & ((1u << 30) - 1)) == 0)
472 {
473 *size = sz >> 30;
474 return "GB";
475 }
476 else if ((sz & ((1u << 20) - 1)) == 0)
477 {
478 *size = sz >> 20;
479 return "MB";
480 }
481 else if ((sz & ((1u << 10) - 1)) == 0)
482 {
483 *size = sz >> 10;
484 return "kB";
485 }
486 else
487 return "";
488 }
489
490 /* Print a BTS configuration. */
491
492 static void
493 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
494 {
495 const char *suffix;
496 unsigned int size;
497
498 size = conf->size;
499 if (size > 0)
500 {
501 suffix = record_btrace_adjust_size (&size);
502 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
503 }
504 }
505
506 /* Print an Intel Processor Trace configuration. */
507
508 static void
509 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
510 {
511 const char *suffix;
512 unsigned int size;
513
514 size = conf->size;
515 if (size > 0)
516 {
517 suffix = record_btrace_adjust_size (&size);
518 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
519 }
520 }
521
522 /* Print a branch tracing configuration. */
523
524 static void
525 record_btrace_print_conf (const struct btrace_config *conf)
526 {
527 printf_unfiltered (_("Recording format: %s.\n"),
528 btrace_format_string (conf->format));
529
530 switch (conf->format)
531 {
532 case BTRACE_FORMAT_NONE:
533 return;
534
535 case BTRACE_FORMAT_BTS:
536 record_btrace_print_bts_conf (&conf->bts);
537 return;
538
539 case BTRACE_FORMAT_PT:
540 record_btrace_print_pt_conf (&conf->pt);
541 return;
542 }
543
544 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
545 }
546
547 /* The info_record method of target record-btrace. */
548
549 void
550 record_btrace_target::info_record ()
551 {
552 struct btrace_thread_info *btinfo;
553 const struct btrace_config *conf;
554 struct thread_info *tp;
555 unsigned int insns, calls, gaps;
556
557 DEBUG ("info");
558
559 if (inferior_ptid == null_ptid)
560 error (_("No thread."));
561
562 tp = inferior_thread ();
563
564 validate_registers_access ();
565
566 btinfo = &tp->btrace;
567
568 conf = ::btrace_conf (btinfo);
569 if (conf != NULL)
570 record_btrace_print_conf (conf);
571
572 btrace_fetch (tp, record_btrace_get_cpu ());
573
574 insns = 0;
575 calls = 0;
576 gaps = 0;
577
578 if (!btrace_is_empty (tp))
579 {
580 struct btrace_call_iterator call;
581 struct btrace_insn_iterator insn;
582
583 btrace_call_end (&call, btinfo);
584 btrace_call_prev (&call, 1);
585 calls = btrace_call_number (&call);
586
587 btrace_insn_end (&insn, btinfo);
588 insns = btrace_insn_number (&insn);
589
590 /* If the last instruction is not a gap, it is the current instruction
591 that is not actually part of the record. */
592 if (btrace_insn_get (&insn) != NULL)
593 insns -= 1;
594
595 gaps = btinfo->ngaps;
596 }
597
598 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
599 "for thread %s (%s).\n"), insns, calls, gaps,
600 print_thread_id (tp),
601 target_pid_to_str (tp->ptid).c_str ());
602
603 if (btrace_is_replaying (tp))
604 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
605 btrace_insn_number (btinfo->replay));
606 }
607
608 /* Print a decode error. */
609
610 static void
611 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
612 enum btrace_format format)
613 {
614 const char *errstr = btrace_decode_error (format, errcode);
615
616 uiout->text (_("["));
617 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
618 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
619 {
620 uiout->text (_("decode error ("));
621 uiout->field_signed ("errcode", errcode);
622 uiout->text (_("): "));
623 }
624 uiout->text (errstr);
625 uiout->text (_("]\n"));
626 }
627
628 /* A range of source lines. */
629
630 struct btrace_line_range
631 {
632 /* The symtab this line is from. */
633 struct symtab *symtab;
634
635 /* The first line (inclusive). */
636 int begin;
637
638 /* The last line (exclusive). */
639 int end;
640 };
641
642 /* Construct a line range. */
643
644 static struct btrace_line_range
645 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
646 {
647 struct btrace_line_range range;
648
649 range.symtab = symtab;
650 range.begin = begin;
651 range.end = end;
652
653 return range;
654 }
655
656 /* Add a line to a line range. */
657
658 static struct btrace_line_range
659 btrace_line_range_add (struct btrace_line_range range, int line)
660 {
661 if (range.end <= range.begin)
662 {
663 /* This is the first entry. */
664 range.begin = line;
665 range.end = line + 1;
666 }
667 else if (line < range.begin)
668 range.begin = line;
669 else if (range.end < line)
670 range.end = line;
671
672 return range;
673 }
674
675 /* Return non-zero if RANGE is empty, zero otherwise. */
676
677 static int
678 btrace_line_range_is_empty (struct btrace_line_range range)
679 {
680 return range.end <= range.begin;
681 }
682
683 /* Return non-zero if LHS contains RHS, zero otherwise. */
684
685 static int
686 btrace_line_range_contains_range (struct btrace_line_range lhs,
687 struct btrace_line_range rhs)
688 {
689 return ((lhs.symtab == rhs.symtab)
690 && (lhs.begin <= rhs.begin)
691 && (rhs.end <= lhs.end));
692 }
693
694 /* Find the line range associated with PC. */
695
696 static struct btrace_line_range
697 btrace_find_line_range (CORE_ADDR pc)
698 {
699 struct btrace_line_range range;
700 struct linetable_entry *lines;
701 struct linetable *ltable;
702 struct symtab *symtab;
703 int nlines, i;
704
705 symtab = find_pc_line_symtab (pc);
706 if (symtab == NULL)
707 return btrace_mk_line_range (NULL, 0, 0);
708
709 ltable = SYMTAB_LINETABLE (symtab);
710 if (ltable == NULL)
711 return btrace_mk_line_range (symtab, 0, 0);
712
713 nlines = ltable->nitems;
714 lines = ltable->item;
715 if (nlines <= 0)
716 return btrace_mk_line_range (symtab, 0, 0);
717
718 range = btrace_mk_line_range (symtab, 0, 0);
719 for (i = 0; i < nlines - 1; i++)
720 {
721 if ((lines[i].pc == pc) && (lines[i].line != 0))
722 range = btrace_line_range_add (range, lines[i].line);
723 }
724
725 return range;
726 }
727
728 /* Print source lines in LINES to UIOUT.
729
730 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
731 instructions corresponding to that source line. When printing a new source
732 line, we do the cleanups for the open chain and open a new cleanup chain for
733 the new source line. If the source line range in LINES is not empty, this
734 function will leave the cleanup chain for the last printed source line open
735 so instructions can be added to it. */
736
737 static void
738 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
739 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
740 gdb::optional<ui_out_emit_list> *asm_list,
741 gdb_disassembly_flags flags)
742 {
743 print_source_lines_flags psl_flags;
744
745 if (flags & DISASSEMBLY_FILENAME)
746 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
747
748 for (int line = lines.begin; line < lines.end; ++line)
749 {
750 asm_list->reset ();
751
752 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
753
754 print_source_lines (lines.symtab, line, line + 1, psl_flags);
755
756 asm_list->emplace (uiout, "line_asm_insn");
757 }
758 }
759
760 /* Disassemble a section of the recorded instruction trace. */
761
762 static void
763 btrace_insn_history (struct ui_out *uiout,
764 const struct btrace_thread_info *btinfo,
765 const struct btrace_insn_iterator *begin,
766 const struct btrace_insn_iterator *end,
767 gdb_disassembly_flags flags)
768 {
769 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
770 btrace_insn_number (begin), btrace_insn_number (end));
771
772 flags |= DISASSEMBLY_SPECULATIVE;
773
774 struct gdbarch *gdbarch = target_gdbarch ();
775 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
776
777 ui_out_emit_list list_emitter (uiout, "asm_insns");
778
779 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
780 gdb::optional<ui_out_emit_list> asm_list;
781
782 gdb_pretty_print_disassembler disasm (gdbarch, uiout);
783
784 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
785 btrace_insn_next (&it, 1))
786 {
787 const struct btrace_insn *insn;
788
789 insn = btrace_insn_get (&it);
790
791 /* A NULL instruction indicates a gap in the trace. */
792 if (insn == NULL)
793 {
794 const struct btrace_config *conf;
795
796 conf = btrace_conf (btinfo);
797
798 /* We have trace so we must have a configuration. */
799 gdb_assert (conf != NULL);
800
801 uiout->field_fmt ("insn-number", "%u",
802 btrace_insn_number (&it));
803 uiout->text ("\t");
804
805 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
806 conf->format);
807 }
808 else
809 {
810 struct disasm_insn dinsn;
811
812 if ((flags & DISASSEMBLY_SOURCE) != 0)
813 {
814 struct btrace_line_range lines;
815
816 lines = btrace_find_line_range (insn->pc);
817 if (!btrace_line_range_is_empty (lines)
818 && !btrace_line_range_contains_range (last_lines, lines))
819 {
820 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
821 flags);
822 last_lines = lines;
823 }
824 else if (!src_and_asm_tuple.has_value ())
825 {
826 gdb_assert (!asm_list.has_value ());
827
828 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
829
830 /* No source information. */
831 asm_list.emplace (uiout, "line_asm_insn");
832 }
833
834 gdb_assert (src_and_asm_tuple.has_value ());
835 gdb_assert (asm_list.has_value ());
836 }
837
838 memset (&dinsn, 0, sizeof (dinsn));
839 dinsn.number = btrace_insn_number (&it);
840 dinsn.addr = insn->pc;
841
842 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
843 dinsn.is_speculative = 1;
844
845 disasm.pretty_print_insn (&dinsn, flags);
846 }
847 }
848 }
849
850 /* The insn_history method of target record-btrace. */
851
852 void
853 record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
854 {
855 struct btrace_thread_info *btinfo;
856 struct btrace_insn_history *history;
857 struct btrace_insn_iterator begin, end;
858 struct ui_out *uiout;
859 unsigned int context, covered;
860
861 uiout = current_uiout;
862 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
863 context = abs (size);
864 if (context == 0)
865 error (_("Bad record instruction-history-size."));
866
867 btinfo = require_btrace ();
868 history = btinfo->insn_history;
869 if (history == NULL)
870 {
871 struct btrace_insn_iterator *replay;
872
873 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
874
875 /* If we're replaying, we start at the replay position. Otherwise, we
876 start at the tail of the trace. */
877 replay = btinfo->replay;
878 if (replay != NULL)
879 begin = *replay;
880 else
881 btrace_insn_end (&begin, btinfo);
882
883 /* We start from here and expand in the requested direction. Then we
884 expand in the other direction, as well, to fill up any remaining
885 context. */
886 end = begin;
887 if (size < 0)
888 {
889 /* We want the current position covered, as well. */
890 covered = btrace_insn_next (&end, 1);
891 covered += btrace_insn_prev (&begin, context - covered);
892 covered += btrace_insn_next (&end, context - covered);
893 }
894 else
895 {
896 covered = btrace_insn_next (&end, context);
897 covered += btrace_insn_prev (&begin, context - covered);
898 }
899 }
900 else
901 {
902 begin = history->begin;
903 end = history->end;
904
905 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
906 btrace_insn_number (&begin), btrace_insn_number (&end));
907
908 if (size < 0)
909 {
910 end = begin;
911 covered = btrace_insn_prev (&begin, context);
912 }
913 else
914 {
915 begin = end;
916 covered = btrace_insn_next (&end, context);
917 }
918 }
919
920 if (covered > 0)
921 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
922 else
923 {
924 if (size < 0)
925 printf_unfiltered (_("At the start of the branch trace record.\n"));
926 else
927 printf_unfiltered (_("At the end of the branch trace record.\n"));
928 }
929
930 btrace_set_insn_history (btinfo, &begin, &end);
931 }
932
933 /* The insn_history_range method of target record-btrace. */
934
935 void
936 record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
937 gdb_disassembly_flags flags)
938 {
939 struct btrace_thread_info *btinfo;
940 struct btrace_insn_iterator begin, end;
941 struct ui_out *uiout;
942 unsigned int low, high;
943 int found;
944
945 uiout = current_uiout;
946 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
947 low = from;
948 high = to;
949
950 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
951
952 /* Check for wrap-arounds. */
953 if (low != from || high != to)
954 error (_("Bad range."));
955
956 if (high < low)
957 error (_("Bad range."));
958
959 btinfo = require_btrace ();
960
961 found = btrace_find_insn_by_number (&begin, btinfo, low);
962 if (found == 0)
963 error (_("Range out of bounds."));
964
965 found = btrace_find_insn_by_number (&end, btinfo, high);
966 if (found == 0)
967 {
968 /* Silently truncate the range. */
969 btrace_insn_end (&end, btinfo);
970 }
971 else
972 {
973 /* We want both begin and end to be inclusive. */
974 btrace_insn_next (&end, 1);
975 }
976
977 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
978 btrace_set_insn_history (btinfo, &begin, &end);
979 }
980
981 /* The insn_history_from method of target record-btrace. */
982
983 void
984 record_btrace_target::insn_history_from (ULONGEST from, int size,
985 gdb_disassembly_flags flags)
986 {
987 ULONGEST begin, end, context;
988
989 context = abs (size);
990 if (context == 0)
991 error (_("Bad record instruction-history-size."));
992
993 if (size < 0)
994 {
995 end = from;
996
997 if (from < context)
998 begin = 0;
999 else
1000 begin = from - context + 1;
1001 }
1002 else
1003 {
1004 begin = from;
1005 end = from + context - 1;
1006
1007 /* Check for wrap-around. */
1008 if (end < begin)
1009 end = ULONGEST_MAX;
1010 }
1011
1012 insn_history_range (begin, end, flags);
1013 }
1014
1015 /* Print the instruction number range for a function call history line. */
1016
1017 static void
1018 btrace_call_history_insn_range (struct ui_out *uiout,
1019 const struct btrace_function *bfun)
1020 {
1021 unsigned int begin, end, size;
1022
1023 size = bfun->insn.size ();
1024 gdb_assert (size > 0);
1025
1026 begin = bfun->insn_offset;
1027 end = begin + size - 1;
1028
1029 uiout->field_unsigned ("insn begin", begin);
1030 uiout->text (",");
1031 uiout->field_unsigned ("insn end", end);
1032 }
1033
1034 /* Compute the lowest and highest source line for the instructions in BFUN
1035 and return them in PBEGIN and PEND.
1036 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1037 result from inlining or macro expansion. */
1038
1039 static void
1040 btrace_compute_src_line_range (const struct btrace_function *bfun,
1041 int *pbegin, int *pend)
1042 {
1043 struct symtab *symtab;
1044 struct symbol *sym;
1045 int begin, end;
1046
1047 begin = INT_MAX;
1048 end = INT_MIN;
1049
1050 sym = bfun->sym;
1051 if (sym == NULL)
1052 goto out;
1053
1054 symtab = symbol_symtab (sym);
1055
1056 for (const btrace_insn &insn : bfun->insn)
1057 {
1058 struct symtab_and_line sal;
1059
1060 sal = find_pc_line (insn.pc, 0);
1061 if (sal.symtab != symtab || sal.line == 0)
1062 continue;
1063
1064 begin = std::min (begin, sal.line);
1065 end = std::max (end, sal.line);
1066 }
1067
1068 out:
1069 *pbegin = begin;
1070 *pend = end;
1071 }
1072
1073 /* Print the source line information for a function call history line. */
1074
1075 static void
1076 btrace_call_history_src_line (struct ui_out *uiout,
1077 const struct btrace_function *bfun)
1078 {
1079 struct symbol *sym;
1080 int begin, end;
1081
1082 sym = bfun->sym;
1083 if (sym == NULL)
1084 return;
1085
1086 uiout->field_string ("file",
1087 symtab_to_filename_for_display (symbol_symtab (sym)),
1088 file_name_style.style ());
1089
1090 btrace_compute_src_line_range (bfun, &begin, &end);
1091 if (end < begin)
1092 return;
1093
1094 uiout->text (":");
1095 uiout->field_signed ("min line", begin);
1096
1097 if (end == begin)
1098 return;
1099
1100 uiout->text (",");
1101 uiout->field_signed ("max line", end);
1102 }
1103
1104 /* Get the name of a branch trace function. */
1105
1106 static const char *
1107 btrace_get_bfun_name (const struct btrace_function *bfun)
1108 {
1109 struct minimal_symbol *msym;
1110 struct symbol *sym;
1111
1112 if (bfun == NULL)
1113 return "??";
1114
1115 msym = bfun->msym;
1116 sym = bfun->sym;
1117
1118 if (sym != NULL)
1119 return sym->print_name ();
1120 else if (msym != NULL)
1121 return msym->print_name ();
1122 else
1123 return "??";
1124 }
1125
1126 /* Disassemble a section of the recorded function trace. */
1127
1128 static void
1129 btrace_call_history (struct ui_out *uiout,
1130 const struct btrace_thread_info *btinfo,
1131 const struct btrace_call_iterator *begin,
1132 const struct btrace_call_iterator *end,
1133 int int_flags)
1134 {
1135 struct btrace_call_iterator it;
1136 record_print_flags flags = (enum record_print_flag) int_flags;
1137
1138 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1139 btrace_call_number (end));
1140
1141 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1142 {
1143 const struct btrace_function *bfun;
1144 struct minimal_symbol *msym;
1145 struct symbol *sym;
1146
1147 bfun = btrace_call_get (&it);
1148 sym = bfun->sym;
1149 msym = bfun->msym;
1150
1151 /* Print the function index. */
1152 uiout->field_unsigned ("index", bfun->number);
1153 uiout->text ("\t");
1154
1155 /* Indicate gaps in the trace. */
1156 if (bfun->errcode != 0)
1157 {
1158 const struct btrace_config *conf;
1159
1160 conf = btrace_conf (btinfo);
1161
1162 /* We have trace so we must have a configuration. */
1163 gdb_assert (conf != NULL);
1164
1165 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1166
1167 continue;
1168 }
1169
1170 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1171 {
1172 int level = bfun->level + btinfo->level, i;
1173
1174 for (i = 0; i < level; ++i)
1175 uiout->text (" ");
1176 }
1177
1178 if (sym != NULL)
1179 uiout->field_string ("function", sym->print_name (),
1180 function_name_style.style ());
1181 else if (msym != NULL)
1182 uiout->field_string ("function", msym->print_name (),
1183 function_name_style.style ());
1184 else if (!uiout->is_mi_like_p ())
1185 uiout->field_string ("function", "??",
1186 function_name_style.style ());
1187
1188 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1189 {
1190 uiout->text (_("\tinst "));
1191 btrace_call_history_insn_range (uiout, bfun);
1192 }
1193
1194 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1195 {
1196 uiout->text (_("\tat "));
1197 btrace_call_history_src_line (uiout, bfun);
1198 }
1199
1200 uiout->text ("\n");
1201 }
1202 }
1203
1204 /* The call_history method of target record-btrace. */
1205
1206 void
1207 record_btrace_target::call_history (int size, record_print_flags flags)
1208 {
1209 struct btrace_thread_info *btinfo;
1210 struct btrace_call_history *history;
1211 struct btrace_call_iterator begin, end;
1212 struct ui_out *uiout;
1213 unsigned int context, covered;
1214
1215 uiout = current_uiout;
1216 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1217 context = abs (size);
1218 if (context == 0)
1219 error (_("Bad record function-call-history-size."));
1220
1221 btinfo = require_btrace ();
1222 history = btinfo->call_history;
1223 if (history == NULL)
1224 {
1225 struct btrace_insn_iterator *replay;
1226
1227 DEBUG ("call-history (0x%x): %d", (int) flags, size);
1228
1229 /* If we're replaying, we start at the replay position. Otherwise, we
1230 start at the tail of the trace. */
1231 replay = btinfo->replay;
1232 if (replay != NULL)
1233 {
1234 begin.btinfo = btinfo;
1235 begin.index = replay->call_index;
1236 }
1237 else
1238 btrace_call_end (&begin, btinfo);
1239
1240 /* We start from here and expand in the requested direction. Then we
1241 expand in the other direction, as well, to fill up any remaining
1242 context. */
1243 end = begin;
1244 if (size < 0)
1245 {
1246 /* We want the current position covered, as well. */
1247 covered = btrace_call_next (&end, 1);
1248 covered += btrace_call_prev (&begin, context - covered);
1249 covered += btrace_call_next (&end, context - covered);
1250 }
1251 else
1252 {
1253 covered = btrace_call_next (&end, context);
1254 covered += btrace_call_prev (&begin, context- covered);
1255 }
1256 }
1257 else
1258 {
1259 begin = history->begin;
1260 end = history->end;
1261
1262 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1263 btrace_call_number (&begin), btrace_call_number (&end));
1264
1265 if (size < 0)
1266 {
1267 end = begin;
1268 covered = btrace_call_prev (&begin, context);
1269 }
1270 else
1271 {
1272 begin = end;
1273 covered = btrace_call_next (&end, context);
1274 }
1275 }
1276
1277 if (covered > 0)
1278 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1279 else
1280 {
1281 if (size < 0)
1282 printf_unfiltered (_("At the start of the branch trace record.\n"));
1283 else
1284 printf_unfiltered (_("At the end of the branch trace record.\n"));
1285 }
1286
1287 btrace_set_call_history (btinfo, &begin, &end);
1288 }
1289
1290 /* The call_history_range method of target record-btrace. */
1291
1292 void
1293 record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1294 record_print_flags flags)
1295 {
1296 struct btrace_thread_info *btinfo;
1297 struct btrace_call_iterator begin, end;
1298 struct ui_out *uiout;
1299 unsigned int low, high;
1300 int found;
1301
1302 uiout = current_uiout;
1303 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1304 low = from;
1305 high = to;
1306
1307 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1308
1309 /* Check for wrap-arounds. */
1310 if (low != from || high != to)
1311 error (_("Bad range."));
1312
1313 if (high < low)
1314 error (_("Bad range."));
1315
1316 btinfo = require_btrace ();
1317
1318 found = btrace_find_call_by_number (&begin, btinfo, low);
1319 if (found == 0)
1320 error (_("Range out of bounds."));
1321
1322 found = btrace_find_call_by_number (&end, btinfo, high);
1323 if (found == 0)
1324 {
1325 /* Silently truncate the range. */
1326 btrace_call_end (&end, btinfo);
1327 }
1328 else
1329 {
1330 /* We want both begin and end to be inclusive. */
1331 btrace_call_next (&end, 1);
1332 }
1333
1334 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1335 btrace_set_call_history (btinfo, &begin, &end);
1336 }
1337
1338 /* The call_history_from method of target record-btrace. */
1339
1340 void
1341 record_btrace_target::call_history_from (ULONGEST from, int size,
1342 record_print_flags flags)
1343 {
1344 ULONGEST begin, end, context;
1345
1346 context = abs (size);
1347 if (context == 0)
1348 error (_("Bad record function-call-history-size."));
1349
1350 if (size < 0)
1351 {
1352 end = from;
1353
1354 if (from < context)
1355 begin = 0;
1356 else
1357 begin = from - context + 1;
1358 }
1359 else
1360 {
1361 begin = from;
1362 end = from + context - 1;
1363
1364 /* Check for wrap-around. */
1365 if (end < begin)
1366 end = ULONGEST_MAX;
1367 }
1368
1369 call_history_range ( begin, end, flags);
1370 }
1371
1372 /* The record_method method of target record-btrace. */
1373
1374 enum record_method
1375 record_btrace_target::record_method (ptid_t ptid)
1376 {
1377 process_stratum_target *proc_target = current_inferior ()->process_target ();
1378 thread_info *const tp = find_thread_ptid (proc_target, ptid);
1379
1380 if (tp == NULL)
1381 error (_("No thread."));
1382
1383 if (tp->btrace.target == NULL)
1384 return RECORD_METHOD_NONE;
1385
1386 return RECORD_METHOD_BTRACE;
1387 }
1388
1389 /* The record_is_replaying method of target record-btrace. */
1390
1391 bool
1392 record_btrace_target::record_is_replaying (ptid_t ptid)
1393 {
1394 process_stratum_target *proc_target = current_inferior ()->process_target ();
1395 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
1396 if (btrace_is_replaying (tp))
1397 return true;
1398
1399 return false;
1400 }
1401
1402 /* The record_will_replay method of target record-btrace. */
1403
1404 bool
1405 record_btrace_target::record_will_replay (ptid_t ptid, int dir)
1406 {
1407 return dir == EXEC_REVERSE || record_is_replaying (ptid);
1408 }
1409
1410 /* The xfer_partial method of target record-btrace. */
1411
1412 enum target_xfer_status
1413 record_btrace_target::xfer_partial (enum target_object object,
1414 const char *annex, gdb_byte *readbuf,
1415 const gdb_byte *writebuf, ULONGEST offset,
1416 ULONGEST len, ULONGEST *xfered_len)
1417 {
1418 /* Filter out requests that don't make sense during replay. */
1419 if (replay_memory_access == replay_memory_access_read_only
1420 && !record_btrace_generating_corefile
1421 && record_is_replaying (inferior_ptid))
1422 {
1423 switch (object)
1424 {
1425 case TARGET_OBJECT_MEMORY:
1426 {
1427 struct target_section *section;
1428
1429 /* We do not allow writing memory in general. */
1430 if (writebuf != NULL)
1431 {
1432 *xfered_len = len;
1433 return TARGET_XFER_UNAVAILABLE;
1434 }
1435
1436 /* We allow reading readonly memory. */
1437 section = target_section_by_addr (this, offset);
1438 if (section != NULL)
1439 {
1440 /* Check if the section we found is readonly. */
1441 if ((bfd_section_flags (section->the_bfd_section)
1442 & SEC_READONLY) != 0)
1443 {
1444 /* Truncate the request to fit into this section. */
1445 len = std::min (len, section->endaddr - offset);
1446 break;
1447 }
1448 }
1449
1450 *xfered_len = len;
1451 return TARGET_XFER_UNAVAILABLE;
1452 }
1453 }
1454 }
1455
1456 /* Forward the request. */
1457 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1458 offset, len, xfered_len);
1459 }
1460
1461 /* The insert_breakpoint method of target record-btrace. */
1462
1463 int
1464 record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1465 struct bp_target_info *bp_tgt)
1466 {
1467 const char *old;
1468 int ret;
1469
1470 /* Inserting breakpoints requires accessing memory. Allow it for the
1471 duration of this function. */
1472 old = replay_memory_access;
1473 replay_memory_access = replay_memory_access_read_write;
1474
1475 ret = 0;
1476 try
1477 {
1478 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
1479 }
1480 catch (const gdb_exception &except)
1481 {
1482 replay_memory_access = old;
1483 throw;
1484 }
1485 replay_memory_access = old;
1486
1487 return ret;
1488 }
1489
1490 /* The remove_breakpoint method of target record-btrace. */
1491
1492 int
1493 record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1494 struct bp_target_info *bp_tgt,
1495 enum remove_bp_reason reason)
1496 {
1497 const char *old;
1498 int ret;
1499
1500 /* Removing breakpoints requires accessing memory. Allow it for the
1501 duration of this function. */
1502 old = replay_memory_access;
1503 replay_memory_access = replay_memory_access_read_write;
1504
1505 ret = 0;
1506 try
1507 {
1508 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
1509 }
1510 catch (const gdb_exception &except)
1511 {
1512 replay_memory_access = old;
1513 throw;
1514 }
1515 replay_memory_access = old;
1516
1517 return ret;
1518 }
1519
1520 /* The fetch_registers method of target record-btrace. */
1521
1522 void
1523 record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1524 {
1525 thread_info *tp = find_thread_ptid (regcache->target (), regcache->ptid ());
1526 gdb_assert (tp != NULL);
1527
1528 btrace_insn_iterator *replay = tp->btrace.replay;
1529 if (replay != NULL && !record_btrace_generating_corefile)
1530 {
1531 const struct btrace_insn *insn;
1532 struct gdbarch *gdbarch;
1533 int pcreg;
1534
1535 gdbarch = regcache->arch ();
1536 pcreg = gdbarch_pc_regnum (gdbarch);
1537 if (pcreg < 0)
1538 return;
1539
1540 /* We can only provide the PC register. */
1541 if (regno >= 0 && regno != pcreg)
1542 return;
1543
1544 insn = btrace_insn_get (replay);
1545 gdb_assert (insn != NULL);
1546
1547 regcache->raw_supply (regno, &insn->pc);
1548 }
1549 else
1550 this->beneath ()->fetch_registers (regcache, regno);
1551 }
1552
1553 /* The store_registers method of target record-btrace. */
1554
1555 void
1556 record_btrace_target::store_registers (struct regcache *regcache, int regno)
1557 {
1558 if (!record_btrace_generating_corefile
1559 && record_is_replaying (regcache->ptid ()))
1560 error (_("Cannot write registers while replaying."));
1561
1562 gdb_assert (may_write_registers);
1563
1564 this->beneath ()->store_registers (regcache, regno);
1565 }
1566
1567 /* The prepare_to_store method of target record-btrace. */
1568
1569 void
1570 record_btrace_target::prepare_to_store (struct regcache *regcache)
1571 {
1572 if (!record_btrace_generating_corefile
1573 && record_is_replaying (regcache->ptid ()))
1574 return;
1575
1576 this->beneath ()->prepare_to_store (regcache);
1577 }
1578
1579 /* The branch trace frame cache. */
1580
1581 struct btrace_frame_cache
1582 {
1583 /* The thread. */
1584 struct thread_info *tp;
1585
1586 /* The frame info. */
1587 struct frame_info *frame;
1588
1589 /* The branch trace function segment. */
1590 const struct btrace_function *bfun;
1591 };
1592
1593 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1594
1595 static htab_t bfcache;
1596
1597 /* hash_f for htab_create_alloc of bfcache. */
1598
1599 static hashval_t
1600 bfcache_hash (const void *arg)
1601 {
1602 const struct btrace_frame_cache *cache
1603 = (const struct btrace_frame_cache *) arg;
1604
1605 return htab_hash_pointer (cache->frame);
1606 }
1607
1608 /* eq_f for htab_create_alloc of bfcache. */
1609
1610 static int
1611 bfcache_eq (const void *arg1, const void *arg2)
1612 {
1613 const struct btrace_frame_cache *cache1
1614 = (const struct btrace_frame_cache *) arg1;
1615 const struct btrace_frame_cache *cache2
1616 = (const struct btrace_frame_cache *) arg2;
1617
1618 return cache1->frame == cache2->frame;
1619 }
1620
1621 /* Create a new btrace frame cache. */
1622
1623 static struct btrace_frame_cache *
1624 bfcache_new (struct frame_info *frame)
1625 {
1626 struct btrace_frame_cache *cache;
1627 void **slot;
1628
1629 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1630 cache->frame = frame;
1631
1632 slot = htab_find_slot (bfcache, cache, INSERT);
1633 gdb_assert (*slot == NULL);
1634 *slot = cache;
1635
1636 return cache;
1637 }
1638
1639 /* Extract the branch trace function from a branch trace frame. */
1640
1641 static const struct btrace_function *
1642 btrace_get_frame_function (struct frame_info *frame)
1643 {
1644 const struct btrace_frame_cache *cache;
1645 struct btrace_frame_cache pattern;
1646 void **slot;
1647
1648 pattern.frame = frame;
1649
1650 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1651 if (slot == NULL)
1652 return NULL;
1653
1654 cache = (const struct btrace_frame_cache *) *slot;
1655 return cache->bfun;
1656 }
1657
1658 /* Implement stop_reason method for record_btrace_frame_unwind. */
1659
1660 static enum unwind_stop_reason
1661 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1662 void **this_cache)
1663 {
1664 const struct btrace_frame_cache *cache;
1665 const struct btrace_function *bfun;
1666
1667 cache = (const struct btrace_frame_cache *) *this_cache;
1668 bfun = cache->bfun;
1669 gdb_assert (bfun != NULL);
1670
1671 if (bfun->up == 0)
1672 return UNWIND_UNAVAILABLE;
1673
1674 return UNWIND_NO_REASON;
1675 }
1676
1677 /* Implement this_id method for record_btrace_frame_unwind. */
1678
1679 static void
1680 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1681 struct frame_id *this_id)
1682 {
1683 const struct btrace_frame_cache *cache;
1684 const struct btrace_function *bfun;
1685 struct btrace_call_iterator it;
1686 CORE_ADDR code, special;
1687
1688 cache = (const struct btrace_frame_cache *) *this_cache;
1689
1690 bfun = cache->bfun;
1691 gdb_assert (bfun != NULL);
1692
1693 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1694 bfun = btrace_call_get (&it);
1695
1696 code = get_frame_func (this_frame);
1697 special = bfun->number;
1698
1699 *this_id = frame_id_build_unavailable_stack_special (code, special);
1700
1701 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1702 btrace_get_bfun_name (cache->bfun),
1703 core_addr_to_string_nz (this_id->code_addr),
1704 core_addr_to_string_nz (this_id->special_addr));
1705 }
1706
1707 /* Implement prev_register method for record_btrace_frame_unwind. */
1708
1709 static struct value *
1710 record_btrace_frame_prev_register (struct frame_info *this_frame,
1711 void **this_cache,
1712 int regnum)
1713 {
1714 const struct btrace_frame_cache *cache;
1715 const struct btrace_function *bfun, *caller;
1716 struct btrace_call_iterator it;
1717 struct gdbarch *gdbarch;
1718 CORE_ADDR pc;
1719 int pcreg;
1720
1721 gdbarch = get_frame_arch (this_frame);
1722 pcreg = gdbarch_pc_regnum (gdbarch);
1723 if (pcreg < 0 || regnum != pcreg)
1724 throw_error (NOT_AVAILABLE_ERROR,
1725 _("Registers are not available in btrace record history"));
1726
1727 cache = (const struct btrace_frame_cache *) *this_cache;
1728 bfun = cache->bfun;
1729 gdb_assert (bfun != NULL);
1730
1731 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1732 throw_error (NOT_AVAILABLE_ERROR,
1733 _("No caller in btrace record history"));
1734
1735 caller = btrace_call_get (&it);
1736
1737 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1738 pc = caller->insn.front ().pc;
1739 else
1740 {
1741 pc = caller->insn.back ().pc;
1742 pc += gdb_insn_length (gdbarch, pc);
1743 }
1744
1745 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1746 btrace_get_bfun_name (bfun), bfun->level,
1747 core_addr_to_string_nz (pc));
1748
1749 return frame_unwind_got_address (this_frame, regnum, pc);
1750 }
1751
1752 /* Implement sniffer method for record_btrace_frame_unwind. */
1753
1754 static int
1755 record_btrace_frame_sniffer (const struct frame_unwind *self,
1756 struct frame_info *this_frame,
1757 void **this_cache)
1758 {
1759 const struct btrace_function *bfun;
1760 struct btrace_frame_cache *cache;
1761 struct thread_info *tp;
1762 struct frame_info *next;
1763
1764 /* THIS_FRAME does not contain a reference to its thread. */
1765 tp = inferior_thread ();
1766
1767 bfun = NULL;
1768 next = get_next_frame (this_frame);
1769 if (next == NULL)
1770 {
1771 const struct btrace_insn_iterator *replay;
1772
1773 replay = tp->btrace.replay;
1774 if (replay != NULL)
1775 bfun = &replay->btinfo->functions[replay->call_index];
1776 }
1777 else
1778 {
1779 const struct btrace_function *callee;
1780 struct btrace_call_iterator it;
1781
1782 callee = btrace_get_frame_function (next);
1783 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1784 return 0;
1785
1786 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1787 return 0;
1788
1789 bfun = btrace_call_get (&it);
1790 }
1791
1792 if (bfun == NULL)
1793 return 0;
1794
1795 DEBUG ("[frame] sniffed frame for %s on level %d",
1796 btrace_get_bfun_name (bfun), bfun->level);
1797
1798 /* This is our frame. Initialize the frame cache. */
1799 cache = bfcache_new (this_frame);
1800 cache->tp = tp;
1801 cache->bfun = bfun;
1802
1803 *this_cache = cache;
1804 return 1;
1805 }
1806
1807 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1808
1809 static int
1810 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1811 struct frame_info *this_frame,
1812 void **this_cache)
1813 {
1814 const struct btrace_function *bfun, *callee;
1815 struct btrace_frame_cache *cache;
1816 struct btrace_call_iterator it;
1817 struct frame_info *next;
1818 struct thread_info *tinfo;
1819
1820 next = get_next_frame (this_frame);
1821 if (next == NULL)
1822 return 0;
1823
1824 callee = btrace_get_frame_function (next);
1825 if (callee == NULL)
1826 return 0;
1827
1828 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1829 return 0;
1830
1831 tinfo = inferior_thread ();
1832 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1833 return 0;
1834
1835 bfun = btrace_call_get (&it);
1836
1837 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1838 btrace_get_bfun_name (bfun), bfun->level);
1839
1840 /* This is our frame. Initialize the frame cache. */
1841 cache = bfcache_new (this_frame);
1842 cache->tp = tinfo;
1843 cache->bfun = bfun;
1844
1845 *this_cache = cache;
1846 return 1;
1847 }
1848
1849 static void
1850 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1851 {
1852 struct btrace_frame_cache *cache;
1853 void **slot;
1854
1855 cache = (struct btrace_frame_cache *) this_cache;
1856
1857 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1858 gdb_assert (slot != NULL);
1859
1860 htab_remove_elt (bfcache, cache);
1861 }
1862
1863 /* btrace recording does not store previous memory content, neither the stack
1864 frames content. Any unwinding would return erroneous results as the stack
1865 contents no longer matches the changed PC value restored from history.
1866 Therefore this unwinder reports any possibly unwound registers as
1867 <unavailable>. */
1868
1869 const struct frame_unwind record_btrace_frame_unwind =
1870 {
1871 NORMAL_FRAME,
1872 record_btrace_frame_unwind_stop_reason,
1873 record_btrace_frame_this_id,
1874 record_btrace_frame_prev_register,
1875 NULL,
1876 record_btrace_frame_sniffer,
1877 record_btrace_frame_dealloc_cache
1878 };
1879
1880 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1881 {
1882 TAILCALL_FRAME,
1883 record_btrace_frame_unwind_stop_reason,
1884 record_btrace_frame_this_id,
1885 record_btrace_frame_prev_register,
1886 NULL,
1887 record_btrace_tailcall_frame_sniffer,
1888 record_btrace_frame_dealloc_cache
1889 };
1890
1891 /* Implement the get_unwinder method. */
1892
1893 const struct frame_unwind *
1894 record_btrace_target::get_unwinder ()
1895 {
1896 return &record_btrace_frame_unwind;
1897 }
1898
1899 /* Implement the get_tailcall_unwinder method. */
1900
1901 const struct frame_unwind *
1902 record_btrace_target::get_tailcall_unwinder ()
1903 {
1904 return &record_btrace_tailcall_frame_unwind;
1905 }
1906
1907 /* Return a human-readable string for FLAG. */
1908
1909 static const char *
1910 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1911 {
1912 switch (flag)
1913 {
1914 case BTHR_STEP:
1915 return "step";
1916
1917 case BTHR_RSTEP:
1918 return "reverse-step";
1919
1920 case BTHR_CONT:
1921 return "cont";
1922
1923 case BTHR_RCONT:
1924 return "reverse-cont";
1925
1926 case BTHR_STOP:
1927 return "stop";
1928 }
1929
1930 return "<invalid>";
1931 }
1932
1933 /* Indicate that TP should be resumed according to FLAG. */
1934
1935 static void
1936 record_btrace_resume_thread (struct thread_info *tp,
1937 enum btrace_thread_flag flag)
1938 {
1939 struct btrace_thread_info *btinfo;
1940
1941 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1942 target_pid_to_str (tp->ptid).c_str (), flag,
1943 btrace_thread_flag_to_str (flag));
1944
1945 btinfo = &tp->btrace;
1946
1947 /* Fetch the latest branch trace. */
1948 btrace_fetch (tp, record_btrace_get_cpu ());
1949
1950 /* A resume request overwrites a preceding resume or stop request. */
1951 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1952 btinfo->flags |= flag;
1953 }
1954
1955 /* Get the current frame for TP. */
1956
1957 static struct frame_id
1958 get_thread_current_frame_id (struct thread_info *tp)
1959 {
1960 struct frame_id id;
1961 int executing;
1962
1963 /* Set current thread, which is implicitly used by
1964 get_current_frame. */
1965 scoped_restore_current_thread restore_thread;
1966
1967 switch_to_thread (tp);
1968
1969 process_stratum_target *proc_target = tp->inf->process_target ();
1970
1971 /* Clear the executing flag to allow changes to the current frame.
1972 We are not actually running, yet. We just started a reverse execution
1973 command or a record goto command.
1974 For the latter, EXECUTING is false and this has no effect.
1975 For the former, EXECUTING is true and we're in wait, about to
1976 move the thread. Since we need to recompute the stack, we temporarily
1977 set EXECUTING to false. */
1978 executing = tp->executing;
1979 set_executing (proc_target, inferior_ptid, false);
1980
1981 id = null_frame_id;
1982 try
1983 {
1984 id = get_frame_id (get_current_frame ());
1985 }
1986 catch (const gdb_exception &except)
1987 {
1988 /* Restore the previous execution state. */
1989 set_executing (proc_target, inferior_ptid, executing);
1990
1991 throw;
1992 }
1993
1994 /* Restore the previous execution state. */
1995 set_executing (proc_target, inferior_ptid, executing);
1996
1997 return id;
1998 }
1999
2000 /* Start replaying a thread. */
2001
2002 static struct btrace_insn_iterator *
2003 record_btrace_start_replaying (struct thread_info *tp)
2004 {
2005 struct btrace_insn_iterator *replay;
2006 struct btrace_thread_info *btinfo;
2007
2008 btinfo = &tp->btrace;
2009 replay = NULL;
2010
2011 /* We can't start replaying without trace. */
2012 if (btinfo->functions.empty ())
2013 return NULL;
2014
2015 /* GDB stores the current frame_id when stepping in order to detects steps
2016 into subroutines.
2017 Since frames are computed differently when we're replaying, we need to
2018 recompute those stored frames and fix them up so we can still detect
2019 subroutines after we started replaying. */
2020 try
2021 {
2022 struct frame_id frame_id;
2023 int upd_step_frame_id, upd_step_stack_frame_id;
2024
2025 /* The current frame without replaying - computed via normal unwind. */
2026 frame_id = get_thread_current_frame_id (tp);
2027
2028 /* Check if we need to update any stepping-related frame id's. */
2029 upd_step_frame_id = frame_id_eq (frame_id,
2030 tp->control.step_frame_id);
2031 upd_step_stack_frame_id = frame_id_eq (frame_id,
2032 tp->control.step_stack_frame_id);
2033
2034 /* We start replaying at the end of the branch trace. This corresponds
2035 to the current instruction. */
2036 replay = XNEW (struct btrace_insn_iterator);
2037 btrace_insn_end (replay, btinfo);
2038
2039 /* Skip gaps at the end of the trace. */
2040 while (btrace_insn_get (replay) == NULL)
2041 {
2042 unsigned int steps;
2043
2044 steps = btrace_insn_prev (replay, 1);
2045 if (steps == 0)
2046 error (_("No trace."));
2047 }
2048
2049 /* We're not replaying, yet. */
2050 gdb_assert (btinfo->replay == NULL);
2051 btinfo->replay = replay;
2052
2053 /* Make sure we're not using any stale registers. */
2054 registers_changed_thread (tp);
2055
2056 /* The current frame with replaying - computed via btrace unwind. */
2057 frame_id = get_thread_current_frame_id (tp);
2058
2059 /* Replace stepping related frames where necessary. */
2060 if (upd_step_frame_id)
2061 tp->control.step_frame_id = frame_id;
2062 if (upd_step_stack_frame_id)
2063 tp->control.step_stack_frame_id = frame_id;
2064 }
2065 catch (const gdb_exception &except)
2066 {
2067 xfree (btinfo->replay);
2068 btinfo->replay = NULL;
2069
2070 registers_changed_thread (tp);
2071
2072 throw;
2073 }
2074
2075 return replay;
2076 }
2077
2078 /* Stop replaying a thread. */
2079
2080 static void
2081 record_btrace_stop_replaying (struct thread_info *tp)
2082 {
2083 struct btrace_thread_info *btinfo;
2084
2085 btinfo = &tp->btrace;
2086
2087 xfree (btinfo->replay);
2088 btinfo->replay = NULL;
2089
2090 /* Make sure we're not leaving any stale registers. */
2091 registers_changed_thread (tp);
2092 }
2093
2094 /* Stop replaying TP if it is at the end of its execution history. */
2095
2096 static void
2097 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2098 {
2099 struct btrace_insn_iterator *replay, end;
2100 struct btrace_thread_info *btinfo;
2101
2102 btinfo = &tp->btrace;
2103 replay = btinfo->replay;
2104
2105 if (replay == NULL)
2106 return;
2107
2108 btrace_insn_end (&end, btinfo);
2109
2110 if (btrace_insn_cmp (replay, &end) == 0)
2111 record_btrace_stop_replaying (tp);
2112 }
2113
2114 /* The resume method of target record-btrace. */
2115
2116 void
2117 record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
2118 {
2119 enum btrace_thread_flag flag, cflag;
2120
2121 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid).c_str (),
2122 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
2123 step ? "step" : "cont");
2124
2125 /* Store the execution direction of the last resume.
2126
2127 If there is more than one resume call, we have to rely on infrun
2128 to not change the execution direction in-between. */
2129 record_btrace_resume_exec_dir = ::execution_direction;
2130
2131 /* As long as we're not replaying, just forward the request.
2132
2133 For non-stop targets this means that no thread is replaying. In order to
2134 make progress, we may need to explicitly move replaying threads to the end
2135 of their execution history. */
2136 if ((::execution_direction != EXEC_REVERSE)
2137 && !record_is_replaying (minus_one_ptid))
2138 {
2139 this->beneath ()->resume (ptid, step, signal);
2140 return;
2141 }
2142
2143 /* Compute the btrace thread flag for the requested move. */
2144 if (::execution_direction == EXEC_REVERSE)
2145 {
2146 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2147 cflag = BTHR_RCONT;
2148 }
2149 else
2150 {
2151 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2152 cflag = BTHR_CONT;
2153 }
2154
2155 /* We just indicate the resume intent here. The actual stepping happens in
2156 record_btrace_wait below.
2157
2158 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2159
2160 process_stratum_target *proc_target = current_inferior ()->process_target ();
2161
2162 if (!target_is_non_stop_p ())
2163 {
2164 gdb_assert (inferior_ptid.matches (ptid));
2165
2166 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2167 {
2168 if (tp->ptid.matches (inferior_ptid))
2169 record_btrace_resume_thread (tp, flag);
2170 else
2171 record_btrace_resume_thread (tp, cflag);
2172 }
2173 }
2174 else
2175 {
2176 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2177 record_btrace_resume_thread (tp, flag);
2178 }
2179
2180 /* Async support. */
2181 if (target_can_async_p ())
2182 {
2183 target_async (1);
2184 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2185 }
2186 }
2187
2188 /* The commit_resume method of target record-btrace. */
2189
2190 void
2191 record_btrace_target::commit_resume ()
2192 {
2193 if ((::execution_direction != EXEC_REVERSE)
2194 && !record_is_replaying (minus_one_ptid))
2195 beneath ()->commit_resume ();
2196 }
2197
2198 /* Cancel resuming TP. */
2199
2200 static void
2201 record_btrace_cancel_resume (struct thread_info *tp)
2202 {
2203 enum btrace_thread_flag flags;
2204
2205 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2206 if (flags == 0)
2207 return;
2208
2209 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2210 print_thread_id (tp),
2211 target_pid_to_str (tp->ptid).c_str (), flags,
2212 btrace_thread_flag_to_str (flags));
2213
2214 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2215 record_btrace_stop_replaying_at_end (tp);
2216 }
2217
2218 /* Return a target_waitstatus indicating that we ran out of history. */
2219
2220 static struct target_waitstatus
2221 btrace_step_no_history (void)
2222 {
2223 struct target_waitstatus status;
2224
2225 status.kind = TARGET_WAITKIND_NO_HISTORY;
2226
2227 return status;
2228 }
2229
2230 /* Return a target_waitstatus indicating that a step finished. */
2231
2232 static struct target_waitstatus
2233 btrace_step_stopped (void)
2234 {
2235 struct target_waitstatus status;
2236
2237 status.kind = TARGET_WAITKIND_STOPPED;
2238 status.value.sig = GDB_SIGNAL_TRAP;
2239
2240 return status;
2241 }
2242
2243 /* Return a target_waitstatus indicating that a thread was stopped as
2244 requested. */
2245
2246 static struct target_waitstatus
2247 btrace_step_stopped_on_request (void)
2248 {
2249 struct target_waitstatus status;
2250
2251 status.kind = TARGET_WAITKIND_STOPPED;
2252 status.value.sig = GDB_SIGNAL_0;
2253
2254 return status;
2255 }
2256
2257 /* Return a target_waitstatus indicating a spurious stop. */
2258
2259 static struct target_waitstatus
2260 btrace_step_spurious (void)
2261 {
2262 struct target_waitstatus status;
2263
2264 status.kind = TARGET_WAITKIND_SPURIOUS;
2265
2266 return status;
2267 }
2268
2269 /* Return a target_waitstatus indicating that the thread was not resumed. */
2270
2271 static struct target_waitstatus
2272 btrace_step_no_resumed (void)
2273 {
2274 struct target_waitstatus status;
2275
2276 status.kind = TARGET_WAITKIND_NO_RESUMED;
2277
2278 return status;
2279 }
2280
2281 /* Return a target_waitstatus indicating that we should wait again. */
2282
2283 static struct target_waitstatus
2284 btrace_step_again (void)
2285 {
2286 struct target_waitstatus status;
2287
2288 status.kind = TARGET_WAITKIND_IGNORE;
2289
2290 return status;
2291 }
2292
2293 /* Clear the record histories. */
2294
2295 static void
2296 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2297 {
2298 xfree (btinfo->insn_history);
2299 xfree (btinfo->call_history);
2300
2301 btinfo->insn_history = NULL;
2302 btinfo->call_history = NULL;
2303 }
2304
2305 /* Check whether TP's current replay position is at a breakpoint. */
2306
2307 static int
2308 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2309 {
2310 struct btrace_insn_iterator *replay;
2311 struct btrace_thread_info *btinfo;
2312 const struct btrace_insn *insn;
2313
2314 btinfo = &tp->btrace;
2315 replay = btinfo->replay;
2316
2317 if (replay == NULL)
2318 return 0;
2319
2320 insn = btrace_insn_get (replay);
2321 if (insn == NULL)
2322 return 0;
2323
2324 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
2325 &btinfo->stop_reason);
2326 }
2327
2328 /* Step one instruction in forward direction. */
2329
2330 static struct target_waitstatus
2331 record_btrace_single_step_forward (struct thread_info *tp)
2332 {
2333 struct btrace_insn_iterator *replay, end, start;
2334 struct btrace_thread_info *btinfo;
2335
2336 btinfo = &tp->btrace;
2337 replay = btinfo->replay;
2338
2339 /* We're done if we're not replaying. */
2340 if (replay == NULL)
2341 return btrace_step_no_history ();
2342
2343 /* Check if we're stepping a breakpoint. */
2344 if (record_btrace_replay_at_breakpoint (tp))
2345 return btrace_step_stopped ();
2346
2347 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2348 jump back to the instruction at which we started. */
2349 start = *replay;
2350 do
2351 {
2352 unsigned int steps;
2353
2354 /* We will bail out here if we continue stepping after reaching the end
2355 of the execution history. */
2356 steps = btrace_insn_next (replay, 1);
2357 if (steps == 0)
2358 {
2359 *replay = start;
2360 return btrace_step_no_history ();
2361 }
2362 }
2363 while (btrace_insn_get (replay) == NULL);
2364
2365 /* Determine the end of the instruction trace. */
2366 btrace_insn_end (&end, btinfo);
2367
2368 /* The execution trace contains (and ends with) the current instruction.
2369 This instruction has not been executed, yet, so the trace really ends
2370 one instruction earlier. */
2371 if (btrace_insn_cmp (replay, &end) == 0)
2372 return btrace_step_no_history ();
2373
2374 return btrace_step_spurious ();
2375 }
2376
2377 /* Step one instruction in backward direction. */
2378
2379 static struct target_waitstatus
2380 record_btrace_single_step_backward (struct thread_info *tp)
2381 {
2382 struct btrace_insn_iterator *replay, start;
2383 struct btrace_thread_info *btinfo;
2384
2385 btinfo = &tp->btrace;
2386 replay = btinfo->replay;
2387
2388 /* Start replaying if we're not already doing so. */
2389 if (replay == NULL)
2390 replay = record_btrace_start_replaying (tp);
2391
2392 /* If we can't step any further, we reached the end of the history.
2393 Skip gaps during replay. If we end up at a gap (at the beginning of
2394 the trace), jump back to the instruction at which we started. */
2395 start = *replay;
2396 do
2397 {
2398 unsigned int steps;
2399
2400 steps = btrace_insn_prev (replay, 1);
2401 if (steps == 0)
2402 {
2403 *replay = start;
2404 return btrace_step_no_history ();
2405 }
2406 }
2407 while (btrace_insn_get (replay) == NULL);
2408
2409 /* Check if we're stepping a breakpoint.
2410
2411 For reverse-stepping, this check is after the step. There is logic in
2412 infrun.c that handles reverse-stepping separately. See, for example,
2413 proceed and adjust_pc_after_break.
2414
2415 This code assumes that for reverse-stepping, PC points to the last
2416 de-executed instruction, whereas for forward-stepping PC points to the
2417 next to-be-executed instruction. */
2418 if (record_btrace_replay_at_breakpoint (tp))
2419 return btrace_step_stopped ();
2420
2421 return btrace_step_spurious ();
2422 }
2423
2424 /* Step a single thread. */
2425
2426 static struct target_waitstatus
2427 record_btrace_step_thread (struct thread_info *tp)
2428 {
2429 struct btrace_thread_info *btinfo;
2430 struct target_waitstatus status;
2431 enum btrace_thread_flag flags;
2432
2433 btinfo = &tp->btrace;
2434
2435 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2436 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2437
2438 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2439 target_pid_to_str (tp->ptid).c_str (), flags,
2440 btrace_thread_flag_to_str (flags));
2441
2442 /* We can't step without an execution history. */
2443 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2444 return btrace_step_no_history ();
2445
2446 switch (flags)
2447 {
2448 default:
2449 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2450
2451 case BTHR_STOP:
2452 return btrace_step_stopped_on_request ();
2453
2454 case BTHR_STEP:
2455 status = record_btrace_single_step_forward (tp);
2456 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2457 break;
2458
2459 return btrace_step_stopped ();
2460
2461 case BTHR_RSTEP:
2462 status = record_btrace_single_step_backward (tp);
2463 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2464 break;
2465
2466 return btrace_step_stopped ();
2467
2468 case BTHR_CONT:
2469 status = record_btrace_single_step_forward (tp);
2470 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2471 break;
2472
2473 btinfo->flags |= flags;
2474 return btrace_step_again ();
2475
2476 case BTHR_RCONT:
2477 status = record_btrace_single_step_backward (tp);
2478 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2479 break;
2480
2481 btinfo->flags |= flags;
2482 return btrace_step_again ();
2483 }
2484
2485 /* We keep threads moving at the end of their execution history. The wait
2486 method will stop the thread for whom the event is reported. */
2487 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2488 btinfo->flags |= flags;
2489
2490 return status;
2491 }
2492
2493 /* Announce further events if necessary. */
2494
2495 static void
2496 record_btrace_maybe_mark_async_event
2497 (const std::vector<thread_info *> &moving,
2498 const std::vector<thread_info *> &no_history)
2499 {
2500 bool more_moving = !moving.empty ();
2501 bool more_no_history = !no_history.empty ();;
2502
2503 if (!more_moving && !more_no_history)
2504 return;
2505
2506 if (more_moving)
2507 DEBUG ("movers pending");
2508
2509 if (more_no_history)
2510 DEBUG ("no-history pending");
2511
2512 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2513 }
2514
2515 /* The wait method of target record-btrace. */
2516
2517 ptid_t
2518 record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2519 int options)
2520 {
2521 std::vector<thread_info *> moving;
2522 std::vector<thread_info *> no_history;
2523
2524 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid).c_str (), options);
2525
2526 /* As long as we're not replaying, just forward the request. */
2527 if ((::execution_direction != EXEC_REVERSE)
2528 && !record_is_replaying (minus_one_ptid))
2529 {
2530 return this->beneath ()->wait (ptid, status, options);
2531 }
2532
2533 /* Keep a work list of moving threads. */
2534 process_stratum_target *proc_target = current_inferior ()->process_target ();
2535 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2536 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2537 moving.push_back (tp);
2538
2539 if (moving.empty ())
2540 {
2541 *status = btrace_step_no_resumed ();
2542
2543 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid).c_str (),
2544 target_waitstatus_to_string (status).c_str ());
2545
2546 return null_ptid;
2547 }
2548
2549 /* Step moving threads one by one, one step each, until either one thread
2550 reports an event or we run out of threads to step.
2551
2552 When stepping more than one thread, chances are that some threads reach
2553 the end of their execution history earlier than others. If we reported
2554 this immediately, all-stop on top of non-stop would stop all threads and
2555 resume the same threads next time. And we would report the same thread
2556 having reached the end of its execution history again.
2557
2558 In the worst case, this would starve the other threads. But even if other
2559 threads would be allowed to make progress, this would result in far too
2560 many intermediate stops.
2561
2562 We therefore delay the reporting of "no execution history" until we have
2563 nothing else to report. By this time, all threads should have moved to
2564 either the beginning or the end of their execution history. There will
2565 be a single user-visible stop. */
2566 struct thread_info *eventing = NULL;
2567 while ((eventing == NULL) && !moving.empty ())
2568 {
2569 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
2570 {
2571 thread_info *tp = moving[ix];
2572
2573 *status = record_btrace_step_thread (tp);
2574
2575 switch (status->kind)
2576 {
2577 case TARGET_WAITKIND_IGNORE:
2578 ix++;
2579 break;
2580
2581 case TARGET_WAITKIND_NO_HISTORY:
2582 no_history.push_back (ordered_remove (moving, ix));
2583 break;
2584
2585 default:
2586 eventing = unordered_remove (moving, ix);
2587 break;
2588 }
2589 }
2590 }
2591
2592 if (eventing == NULL)
2593 {
2594 /* We started with at least one moving thread. This thread must have
2595 either stopped or reached the end of its execution history.
2596
2597 In the former case, EVENTING must not be NULL.
2598 In the latter case, NO_HISTORY must not be empty. */
2599 gdb_assert (!no_history.empty ());
2600
2601 /* We kept threads moving at the end of their execution history. Stop
2602 EVENTING now that we are going to report its stop. */
2603 eventing = unordered_remove (no_history, 0);
2604 eventing->btrace.flags &= ~BTHR_MOVE;
2605
2606 *status = btrace_step_no_history ();
2607 }
2608
2609 gdb_assert (eventing != NULL);
2610
2611 /* We kept threads replaying at the end of their execution history. Stop
2612 replaying EVENTING now that we are going to report its stop. */
2613 record_btrace_stop_replaying_at_end (eventing);
2614
2615 /* Stop all other threads. */
2616 if (!target_is_non_stop_p ())
2617 {
2618 for (thread_info *tp : all_non_exited_threads ())
2619 record_btrace_cancel_resume (tp);
2620 }
2621
2622 /* In async mode, we need to announce further events. */
2623 if (target_is_async_p ())
2624 record_btrace_maybe_mark_async_event (moving, no_history);
2625
2626 /* Start record histories anew from the current position. */
2627 record_btrace_clear_histories (&eventing->btrace);
2628
2629 /* We moved the replay position but did not update registers. */
2630 registers_changed_thread (eventing);
2631
2632 DEBUG ("wait ended by thread %s (%s): %s",
2633 print_thread_id (eventing),
2634 target_pid_to_str (eventing->ptid).c_str (),
2635 target_waitstatus_to_string (status).c_str ());
2636
2637 return eventing->ptid;
2638 }
2639
2640 /* The stop method of target record-btrace. */
2641
2642 void
2643 record_btrace_target::stop (ptid_t ptid)
2644 {
2645 DEBUG ("stop %s", target_pid_to_str (ptid).c_str ());
2646
2647 /* As long as we're not replaying, just forward the request. */
2648 if ((::execution_direction != EXEC_REVERSE)
2649 && !record_is_replaying (minus_one_ptid))
2650 {
2651 this->beneath ()->stop (ptid);
2652 }
2653 else
2654 {
2655 process_stratum_target *proc_target
2656 = current_inferior ()->process_target ();
2657
2658 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2659 {
2660 tp->btrace.flags &= ~BTHR_MOVE;
2661 tp->btrace.flags |= BTHR_STOP;
2662 }
2663 }
2664 }
2665
2666 /* The can_execute_reverse method of target record-btrace. */
2667
2668 bool
2669 record_btrace_target::can_execute_reverse ()
2670 {
2671 return true;
2672 }
2673
2674 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2675
2676 bool
2677 record_btrace_target::stopped_by_sw_breakpoint ()
2678 {
2679 if (record_is_replaying (minus_one_ptid))
2680 {
2681 struct thread_info *tp = inferior_thread ();
2682
2683 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2684 }
2685
2686 return this->beneath ()->stopped_by_sw_breakpoint ();
2687 }
2688
2689 /* The supports_stopped_by_sw_breakpoint method of target
2690 record-btrace. */
2691
2692 bool
2693 record_btrace_target::supports_stopped_by_sw_breakpoint ()
2694 {
2695 if (record_is_replaying (minus_one_ptid))
2696 return true;
2697
2698 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
2699 }
2700
2701 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2702
2703 bool
2704 record_btrace_target::stopped_by_hw_breakpoint ()
2705 {
2706 if (record_is_replaying (minus_one_ptid))
2707 {
2708 struct thread_info *tp = inferior_thread ();
2709
2710 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2711 }
2712
2713 return this->beneath ()->stopped_by_hw_breakpoint ();
2714 }
2715
2716 /* The supports_stopped_by_hw_breakpoint method of target
2717 record-btrace. */
2718
2719 bool
2720 record_btrace_target::supports_stopped_by_hw_breakpoint ()
2721 {
2722 if (record_is_replaying (minus_one_ptid))
2723 return true;
2724
2725 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
2726 }
2727
2728 /* The update_thread_list method of target record-btrace. */
2729
2730 void
2731 record_btrace_target::update_thread_list ()
2732 {
2733 /* We don't add or remove threads during replay. */
2734 if (record_is_replaying (minus_one_ptid))
2735 return;
2736
2737 /* Forward the request. */
2738 this->beneath ()->update_thread_list ();
2739 }
2740
2741 /* The thread_alive method of target record-btrace. */
2742
2743 bool
2744 record_btrace_target::thread_alive (ptid_t ptid)
2745 {
2746 /* We don't add or remove threads during replay. */
2747 if (record_is_replaying (minus_one_ptid))
2748 return true;
2749
2750 /* Forward the request. */
2751 return this->beneath ()->thread_alive (ptid);
2752 }
2753
2754 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2755 is stopped. */
2756
2757 static void
2758 record_btrace_set_replay (struct thread_info *tp,
2759 const struct btrace_insn_iterator *it)
2760 {
2761 struct btrace_thread_info *btinfo;
2762
2763 btinfo = &tp->btrace;
2764
2765 if (it == NULL)
2766 record_btrace_stop_replaying (tp);
2767 else
2768 {
2769 if (btinfo->replay == NULL)
2770 record_btrace_start_replaying (tp);
2771 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2772 return;
2773
2774 *btinfo->replay = *it;
2775 registers_changed_thread (tp);
2776 }
2777
2778 /* Start anew from the new replay position. */
2779 record_btrace_clear_histories (btinfo);
2780
2781 inferior_thread ()->suspend.stop_pc
2782 = regcache_read_pc (get_current_regcache ());
2783 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2784 }
2785
2786 /* The goto_record_begin method of target record-btrace. */
2787
2788 void
2789 record_btrace_target::goto_record_begin ()
2790 {
2791 struct thread_info *tp;
2792 struct btrace_insn_iterator begin;
2793
2794 tp = require_btrace_thread ();
2795
2796 btrace_insn_begin (&begin, &tp->btrace);
2797
2798 /* Skip gaps at the beginning of the trace. */
2799 while (btrace_insn_get (&begin) == NULL)
2800 {
2801 unsigned int steps;
2802
2803 steps = btrace_insn_next (&begin, 1);
2804 if (steps == 0)
2805 error (_("No trace."));
2806 }
2807
2808 record_btrace_set_replay (tp, &begin);
2809 }
2810
2811 /* The goto_record_end method of target record-btrace. */
2812
2813 void
2814 record_btrace_target::goto_record_end ()
2815 {
2816 struct thread_info *tp;
2817
2818 tp = require_btrace_thread ();
2819
2820 record_btrace_set_replay (tp, NULL);
2821 }
2822
2823 /* The goto_record method of target record-btrace. */
2824
2825 void
2826 record_btrace_target::goto_record (ULONGEST insn)
2827 {
2828 struct thread_info *tp;
2829 struct btrace_insn_iterator it;
2830 unsigned int number;
2831 int found;
2832
2833 number = insn;
2834
2835 /* Check for wrap-arounds. */
2836 if (number != insn)
2837 error (_("Instruction number out of range."));
2838
2839 tp = require_btrace_thread ();
2840
2841 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2842
2843 /* Check if the instruction could not be found or is a gap. */
2844 if (found == 0 || btrace_insn_get (&it) == NULL)
2845 error (_("No such instruction."));
2846
2847 record_btrace_set_replay (tp, &it);
2848 }
2849
2850 /* The record_stop_replaying method of target record-btrace. */
2851
2852 void
2853 record_btrace_target::record_stop_replaying ()
2854 {
2855 for (thread_info *tp : all_non_exited_threads ())
2856 record_btrace_stop_replaying (tp);
2857 }
2858
2859 /* The execution_direction target method. */
2860
2861 enum exec_direction_kind
2862 record_btrace_target::execution_direction ()
2863 {
2864 return record_btrace_resume_exec_dir;
2865 }
2866
2867 /* The prepare_to_generate_core target method. */
2868
2869 void
2870 record_btrace_target::prepare_to_generate_core ()
2871 {
2872 record_btrace_generating_corefile = 1;
2873 }
2874
2875 /* The done_generating_core target method. */
2876
2877 void
2878 record_btrace_target::done_generating_core ()
2879 {
2880 record_btrace_generating_corefile = 0;
2881 }
2882
2883 /* Start recording in BTS format. */
2884
2885 static void
2886 cmd_record_btrace_bts_start (const char *args, int from_tty)
2887 {
2888 if (args != NULL && *args != 0)
2889 error (_("Invalid argument."));
2890
2891 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2892
2893 try
2894 {
2895 execute_command ("target record-btrace", from_tty);
2896 }
2897 catch (const gdb_exception &exception)
2898 {
2899 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2900 throw;
2901 }
2902 }
2903
2904 /* Start recording in Intel Processor Trace format. */
2905
2906 static void
2907 cmd_record_btrace_pt_start (const char *args, int from_tty)
2908 {
2909 if (args != NULL && *args != 0)
2910 error (_("Invalid argument."));
2911
2912 record_btrace_conf.format = BTRACE_FORMAT_PT;
2913
2914 try
2915 {
2916 execute_command ("target record-btrace", from_tty);
2917 }
2918 catch (const gdb_exception &exception)
2919 {
2920 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2921 throw;
2922 }
2923 }
2924
2925 /* Alias for "target record". */
2926
2927 static void
2928 cmd_record_btrace_start (const char *args, int from_tty)
2929 {
2930 if (args != NULL && *args != 0)
2931 error (_("Invalid argument."));
2932
2933 record_btrace_conf.format = BTRACE_FORMAT_PT;
2934
2935 try
2936 {
2937 execute_command ("target record-btrace", from_tty);
2938 }
2939 catch (const gdb_exception &exception)
2940 {
2941 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2942
2943 try
2944 {
2945 execute_command ("target record-btrace", from_tty);
2946 }
2947 catch (const gdb_exception &ex)
2948 {
2949 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2950 throw;
2951 }
2952 }
2953 }
2954
2955 /* The "set record btrace" command. */
2956
2957 static void
2958 cmd_set_record_btrace (const char *args, int from_tty)
2959 {
2960 printf_unfiltered (_("\"set record btrace\" must be followed "
2961 "by an appropriate subcommand.\n"));
2962 help_list (set_record_btrace_cmdlist, "set record btrace ",
2963 all_commands, gdb_stdout);
2964 }
2965
2966 /* The "show record btrace" command. */
2967
2968 static void
2969 cmd_show_record_btrace (const char *args, int from_tty)
2970 {
2971 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2972 }
2973
2974 /* The "show record btrace replay-memory-access" command. */
2975
2976 static void
2977 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2978 struct cmd_list_element *c, const char *value)
2979 {
2980 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2981 replay_memory_access);
2982 }
2983
2984 /* The "set record btrace cpu none" command. */
2985
2986 static void
2987 cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2988 {
2989 if (args != nullptr && *args != 0)
2990 error (_("Trailing junk: '%s'."), args);
2991
2992 record_btrace_cpu_state = CS_NONE;
2993 }
2994
2995 /* The "set record btrace cpu auto" command. */
2996
2997 static void
2998 cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
2999 {
3000 if (args != nullptr && *args != 0)
3001 error (_("Trailing junk: '%s'."), args);
3002
3003 record_btrace_cpu_state = CS_AUTO;
3004 }
3005
3006 /* The "set record btrace cpu" command. */
3007
3008 static void
3009 cmd_set_record_btrace_cpu (const char *args, int from_tty)
3010 {
3011 if (args == nullptr)
3012 args = "";
3013
3014 /* We use a hard-coded vendor string for now. */
3015 unsigned int family, model, stepping;
3016 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3017 &model, &l1, &stepping, &l2);
3018 if (matches == 3)
3019 {
3020 if (strlen (args) != l2)
3021 error (_("Trailing junk: '%s'."), args + l2);
3022 }
3023 else if (matches == 2)
3024 {
3025 if (strlen (args) != l1)
3026 error (_("Trailing junk: '%s'."), args + l1);
3027
3028 stepping = 0;
3029 }
3030 else
3031 error (_("Bad format. See \"help set record btrace cpu\"."));
3032
3033 if (USHRT_MAX < family)
3034 error (_("Cpu family too big."));
3035
3036 if (UCHAR_MAX < model)
3037 error (_("Cpu model too big."));
3038
3039 if (UCHAR_MAX < stepping)
3040 error (_("Cpu stepping too big."));
3041
3042 record_btrace_cpu.vendor = CV_INTEL;
3043 record_btrace_cpu.family = family;
3044 record_btrace_cpu.model = model;
3045 record_btrace_cpu.stepping = stepping;
3046
3047 record_btrace_cpu_state = CS_CPU;
3048 }
3049
3050 /* The "show record btrace cpu" command. */
3051
3052 static void
3053 cmd_show_record_btrace_cpu (const char *args, int from_tty)
3054 {
3055 if (args != nullptr && *args != 0)
3056 error (_("Trailing junk: '%s'."), args);
3057
3058 switch (record_btrace_cpu_state)
3059 {
3060 case CS_AUTO:
3061 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3062 return;
3063
3064 case CS_NONE:
3065 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3066 return;
3067
3068 case CS_CPU:
3069 switch (record_btrace_cpu.vendor)
3070 {
3071 case CV_INTEL:
3072 if (record_btrace_cpu.stepping == 0)
3073 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3074 record_btrace_cpu.family,
3075 record_btrace_cpu.model);
3076 else
3077 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3078 record_btrace_cpu.family,
3079 record_btrace_cpu.model,
3080 record_btrace_cpu.stepping);
3081 return;
3082 }
3083 }
3084
3085 error (_("Internal error: bad cpu state."));
3086 }
3087
3088 /* The "s record btrace bts" command. */
3089
3090 static void
3091 cmd_set_record_btrace_bts (const char *args, int from_tty)
3092 {
3093 printf_unfiltered (_("\"set record btrace bts\" must be followed "
3094 "by an appropriate subcommand.\n"));
3095 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3096 all_commands, gdb_stdout);
3097 }
3098
3099 /* The "show record btrace bts" command. */
3100
3101 static void
3102 cmd_show_record_btrace_bts (const char *args, int from_tty)
3103 {
3104 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3105 }
3106
3107 /* The "set record btrace pt" command. */
3108
3109 static void
3110 cmd_set_record_btrace_pt (const char *args, int from_tty)
3111 {
3112 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3113 "by an appropriate subcommand.\n"));
3114 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3115 all_commands, gdb_stdout);
3116 }
3117
3118 /* The "show record btrace pt" command. */
3119
3120 static void
3121 cmd_show_record_btrace_pt (const char *args, int from_tty)
3122 {
3123 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3124 }
3125
3126 /* The "record bts buffer-size" show value function. */
3127
3128 static void
3129 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3130 struct cmd_list_element *c,
3131 const char *value)
3132 {
3133 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3134 value);
3135 }
3136
3137 /* The "record pt buffer-size" show value function. */
3138
3139 static void
3140 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3141 struct cmd_list_element *c,
3142 const char *value)
3143 {
3144 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3145 value);
3146 }
3147
3148 /* Initialize btrace commands. */
3149
3150 void
3151 _initialize_record_btrace (void)
3152 {
3153 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3154 _("Start branch trace recording."), &record_btrace_cmdlist,
3155 "record btrace ", 0, &record_cmdlist);
3156 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3157
3158 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3159 _("\
3160 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3161 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3162 This format may not be available on all processors."),
3163 &record_btrace_cmdlist);
3164 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3165
3166 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3167 _("\
3168 Start branch trace recording in Intel Processor Trace format.\n\n\
3169 This format may not be available on all processors."),
3170 &record_btrace_cmdlist);
3171 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3172
3173 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3174 _("Set record options."), &set_record_btrace_cmdlist,
3175 "set record btrace ", 0, &set_record_cmdlist);
3176
3177 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3178 _("Show record options."), &show_record_btrace_cmdlist,
3179 "show record btrace ", 0, &show_record_cmdlist);
3180
3181 add_setshow_enum_cmd ("replay-memory-access", no_class,
3182 replay_memory_access_types, &replay_memory_access, _("\
3183 Set what memory accesses are allowed during replay."), _("\
3184 Show what memory accesses are allowed during replay."),
3185 _("Default is READ-ONLY.\n\n\
3186 The btrace record target does not trace data.\n\
3187 The memory therefore corresponds to the live target and not \
3188 to the current replay position.\n\n\
3189 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3190 When READ-WRITE, allow accesses to read-only and read-write memory during \
3191 replay."),
3192 NULL, cmd_show_replay_memory_access,
3193 &set_record_btrace_cmdlist,
3194 &show_record_btrace_cmdlist);
3195
3196 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3197 _("\
3198 Set the cpu to be used for trace decode.\n\n\
3199 The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3200 For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
3201 When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3202 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3203 When GDB does not support that cpu, this option can be used to enable\n\
3204 workarounds for a similar cpu that GDB supports.\n\n\
3205 When set to \"none\", errata workarounds are disabled."),
3206 &set_record_btrace_cpu_cmdlist,
3207 "set record btrace cpu ", 1,
3208 &set_record_btrace_cmdlist);
3209
3210 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3211 Automatically determine the cpu to be used for trace decode."),
3212 &set_record_btrace_cpu_cmdlist);
3213
3214 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3215 Do not enable errata workarounds for trace decode."),
3216 &set_record_btrace_cpu_cmdlist);
3217
3218 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3219 Show the cpu to be used for trace decode."),
3220 &show_record_btrace_cmdlist);
3221
3222 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3223 _("Set record btrace bts options."),
3224 &set_record_btrace_bts_cmdlist,
3225 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3226
3227 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3228 _("Show record btrace bts options."),
3229 &show_record_btrace_bts_cmdlist,
3230 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3231
3232 add_setshow_uinteger_cmd ("buffer-size", no_class,
3233 &record_btrace_conf.bts.size,
3234 _("Set the record/replay bts buffer size."),
3235 _("Show the record/replay bts buffer size."), _("\
3236 When starting recording request a trace buffer of this size. \
3237 The actual buffer size may differ from the requested size. \
3238 Use \"info record\" to see the actual buffer size.\n\n\
3239 Bigger buffers allow longer recording but also take more time to process \
3240 the recorded execution trace.\n\n\
3241 The trace buffer size may not be changed while recording."), NULL,
3242 show_record_bts_buffer_size_value,
3243 &set_record_btrace_bts_cmdlist,
3244 &show_record_btrace_bts_cmdlist);
3245
3246 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3247 _("Set record btrace pt options."),
3248 &set_record_btrace_pt_cmdlist,
3249 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3250
3251 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3252 _("Show record btrace pt options."),
3253 &show_record_btrace_pt_cmdlist,
3254 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3255
3256 add_setshow_uinteger_cmd ("buffer-size", no_class,
3257 &record_btrace_conf.pt.size,
3258 _("Set the record/replay pt buffer size."),
3259 _("Show the record/replay pt buffer size."), _("\
3260 Bigger buffers allow longer recording but also take more time to process \
3261 the recorded execution.\n\
3262 The actual buffer size may differ from the requested size. Use \"info record\" \
3263 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3264 &set_record_btrace_pt_cmdlist,
3265 &show_record_btrace_pt_cmdlist);
3266
3267 add_target (record_btrace_target_info, record_btrace_target_open);
3268
3269 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3270 xcalloc, xfree);
3271
3272 record_btrace_conf.bts.size = 64 * 1024;
3273 record_btrace_conf.pt.size = 16 * 1024;
3274 }
This page took 0.108284 seconds and 4 git commands to generate.