7d45666cc55a45651780f8f77764cffa51daeba6
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2020 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "event-loop.h"
40 #include "inf-loop.h"
41 #include "inferior.h"
42 #include <algorithm>
43 #include "gdbarch.h"
44 #include "cli/cli-style.h"
45
46 static const target_info record_btrace_target_info = {
47 "record-btrace",
48 N_("Branch tracing target"),
49 N_("Collect control-flow trace and provide the execution history.")
50 };
51
52 /* The target_ops of record-btrace. */
53
54 class record_btrace_target final : public target_ops
55 {
56 public:
57 const target_info &info () const override
58 { return record_btrace_target_info; }
59
60 strata stratum () const override { return record_stratum; }
61
62 void close () override;
63 void async (int) override;
64
65 void detach (inferior *inf, int from_tty) override
66 { record_detach (this, inf, from_tty); }
67
68 void disconnect (const char *, int) override;
69
70 void mourn_inferior () override
71 { record_mourn_inferior (this); }
72
73 void kill () override
74 { record_kill (this); }
75
76 enum record_method record_method (ptid_t ptid) override;
77
78 void stop_recording () override;
79 void info_record () override;
80
81 void insn_history (int size, gdb_disassembly_flags flags) override;
82 void insn_history_from (ULONGEST from, int size,
83 gdb_disassembly_flags flags) override;
84 void insn_history_range (ULONGEST begin, ULONGEST end,
85 gdb_disassembly_flags flags) override;
86 void call_history (int size, record_print_flags flags) override;
87 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
88 override;
89 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
90 override;
91
92 bool record_is_replaying (ptid_t ptid) override;
93 bool record_will_replay (ptid_t ptid, int dir) override;
94 void record_stop_replaying () override;
95
96 enum target_xfer_status xfer_partial (enum target_object object,
97 const char *annex,
98 gdb_byte *readbuf,
99 const gdb_byte *writebuf,
100 ULONGEST offset, ULONGEST len,
101 ULONGEST *xfered_len) override;
102
103 int insert_breakpoint (struct gdbarch *,
104 struct bp_target_info *) override;
105 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
106 enum remove_bp_reason) override;
107
108 void fetch_registers (struct regcache *, int) override;
109
110 void store_registers (struct regcache *, int) override;
111 void prepare_to_store (struct regcache *) override;
112
113 const struct frame_unwind *get_unwinder () override;
114
115 const struct frame_unwind *get_tailcall_unwinder () override;
116
117 void commit_resume () override;
118 void resume (ptid_t, int, enum gdb_signal) override;
119 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
120
121 void stop (ptid_t) override;
122 void update_thread_list () override;
123 bool thread_alive (ptid_t ptid) override;
124 void goto_record_begin () override;
125 void goto_record_end () override;
126 void goto_record (ULONGEST insn) override;
127
128 bool can_execute_reverse () override;
129
130 bool stopped_by_sw_breakpoint () override;
131 bool supports_stopped_by_sw_breakpoint () override;
132
133 bool stopped_by_hw_breakpoint () override;
134 bool supports_stopped_by_hw_breakpoint () override;
135
136 enum exec_direction_kind execution_direction () override;
137 void prepare_to_generate_core () override;
138 void done_generating_core () override;
139 };
140
141 static record_btrace_target record_btrace_ops;
142
143 /* Initialize the record-btrace target ops. */
144
145 /* Token associated with a new-thread observer enabling branch tracing
146 for the new thread. */
147 static const gdb::observers::token record_btrace_thread_observer_token {};
148
149 /* Memory access types used in set/show record btrace replay-memory-access. */
150 static const char replay_memory_access_read_only[] = "read-only";
151 static const char replay_memory_access_read_write[] = "read-write";
152 static const char *const replay_memory_access_types[] =
153 {
154 replay_memory_access_read_only,
155 replay_memory_access_read_write,
156 NULL
157 };
158
159 /* The currently allowed replay memory access type. */
160 static const char *replay_memory_access = replay_memory_access_read_only;
161
162 /* The cpu state kinds. */
163 enum record_btrace_cpu_state_kind
164 {
165 CS_AUTO,
166 CS_NONE,
167 CS_CPU
168 };
169
170 /* The current cpu state. */
171 static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
172
173 /* The current cpu for trace decode. */
174 static struct btrace_cpu record_btrace_cpu;
175
176 /* Command lists for "set/show record btrace". */
177 static struct cmd_list_element *set_record_btrace_cmdlist;
178 static struct cmd_list_element *show_record_btrace_cmdlist;
179
180 /* The execution direction of the last resume we got. See record-full.c. */
181 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
182
183 /* The async event handler for reverse/replay execution. */
184 static struct async_event_handler *record_btrace_async_inferior_event_handler;
185
186 /* A flag indicating that we are currently generating a core file. */
187 static int record_btrace_generating_corefile;
188
189 /* The current branch trace configuration. */
190 static struct btrace_config record_btrace_conf;
191
192 /* Command list for "record btrace". */
193 static struct cmd_list_element *record_btrace_cmdlist;
194
195 /* Command lists for "set/show record btrace bts". */
196 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
197 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
198
199 /* Command lists for "set/show record btrace pt". */
200 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
201 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
202
203 /* Command list for "set record btrace cpu". */
204 static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
205
206 /* Print a record-btrace debug message. Use do ... while (0) to avoid
207 ambiguities when used in if statements. */
208
209 #define DEBUG(msg, args...) \
210 do \
211 { \
212 if (record_debug != 0) \
213 fprintf_unfiltered (gdb_stdlog, \
214 "[record-btrace] " msg "\n", ##args); \
215 } \
216 while (0)
217
218
219 /* Return the cpu configured by the user. Returns NULL if the cpu was
220 configured as auto. */
221 const struct btrace_cpu *
222 record_btrace_get_cpu (void)
223 {
224 switch (record_btrace_cpu_state)
225 {
226 case CS_AUTO:
227 return nullptr;
228
229 case CS_NONE:
230 record_btrace_cpu.vendor = CV_UNKNOWN;
231 /* Fall through. */
232 case CS_CPU:
233 return &record_btrace_cpu;
234 }
235
236 error (_("Internal error: bad record btrace cpu state."));
237 }
238
239 /* Update the branch trace for the current thread and return a pointer to its
240 thread_info.
241
242 Throws an error if there is no thread or no trace. This function never
243 returns NULL. */
244
245 static struct thread_info *
246 require_btrace_thread (void)
247 {
248 DEBUG ("require");
249
250 if (inferior_ptid == null_ptid)
251 error (_("No thread."));
252
253 thread_info *tp = inferior_thread ();
254
255 validate_registers_access ();
256
257 btrace_fetch (tp, record_btrace_get_cpu ());
258
259 if (btrace_is_empty (tp))
260 error (_("No trace."));
261
262 return tp;
263 }
264
265 /* Update the branch trace for the current thread and return a pointer to its
266 branch trace information struct.
267
268 Throws an error if there is no thread or no trace. This function never
269 returns NULL. */
270
271 static struct btrace_thread_info *
272 require_btrace (void)
273 {
274 struct thread_info *tp;
275
276 tp = require_btrace_thread ();
277
278 return &tp->btrace;
279 }
280
281 /* Enable branch tracing for one thread. Warn on errors. */
282
283 static void
284 record_btrace_enable_warn (struct thread_info *tp)
285 {
286 try
287 {
288 btrace_enable (tp, &record_btrace_conf);
289 }
290 catch (const gdb_exception_error &error)
291 {
292 warning ("%s", error.what ());
293 }
294 }
295
296 /* Enable automatic tracing of new threads. */
297
298 static void
299 record_btrace_auto_enable (void)
300 {
301 DEBUG ("attach thread observer");
302
303 gdb::observers::new_thread.attach (record_btrace_enable_warn,
304 record_btrace_thread_observer_token);
305 }
306
307 /* Disable automatic tracing of new threads. */
308
309 static void
310 record_btrace_auto_disable (void)
311 {
312 DEBUG ("detach thread observer");
313
314 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
315 }
316
317 /* The record-btrace async event handler function. */
318
319 static void
320 record_btrace_handle_async_inferior_event (gdb_client_data data)
321 {
322 inferior_event_handler (INF_REG_EVENT, NULL);
323 }
324
325 /* See record-btrace.h. */
326
327 void
328 record_btrace_push_target (void)
329 {
330 const char *format;
331
332 record_btrace_auto_enable ();
333
334 push_target (&record_btrace_ops);
335
336 record_btrace_async_inferior_event_handler
337 = create_async_event_handler (record_btrace_handle_async_inferior_event,
338 NULL);
339 record_btrace_generating_corefile = 0;
340
341 format = btrace_format_short_string (record_btrace_conf.format);
342 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
343 }
344
345 /* Disable btrace on a set of threads on scope exit. */
346
347 struct scoped_btrace_disable
348 {
349 scoped_btrace_disable () = default;
350
351 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
352
353 ~scoped_btrace_disable ()
354 {
355 for (thread_info *tp : m_threads)
356 btrace_disable (tp);
357 }
358
359 void add_thread (thread_info *thread)
360 {
361 m_threads.push_front (thread);
362 }
363
364 void discard ()
365 {
366 m_threads.clear ();
367 }
368
369 private:
370 std::forward_list<thread_info *> m_threads;
371 };
372
373 /* Open target record-btrace. */
374
375 static void
376 record_btrace_target_open (const char *args, int from_tty)
377 {
378 /* If we fail to enable btrace for one thread, disable it for the threads for
379 which it was successfully enabled. */
380 scoped_btrace_disable btrace_disable;
381
382 DEBUG ("open");
383
384 record_preopen ();
385
386 if (!target_has_execution)
387 error (_("The program is not being run."));
388
389 for (thread_info *tp : all_non_exited_threads ())
390 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
391 {
392 btrace_enable (tp, &record_btrace_conf);
393
394 btrace_disable.add_thread (tp);
395 }
396
397 record_btrace_push_target ();
398
399 btrace_disable.discard ();
400 }
401
402 /* The stop_recording method of target record-btrace. */
403
404 void
405 record_btrace_target::stop_recording ()
406 {
407 DEBUG ("stop recording");
408
409 record_btrace_auto_disable ();
410
411 for (thread_info *tp : all_non_exited_threads ())
412 if (tp->btrace.target != NULL)
413 btrace_disable (tp);
414 }
415
416 /* The disconnect method of target record-btrace. */
417
418 void
419 record_btrace_target::disconnect (const char *args,
420 int from_tty)
421 {
422 struct target_ops *beneath = this->beneath ();
423
424 /* Do not stop recording, just clean up GDB side. */
425 unpush_target (this);
426
427 /* Forward disconnect. */
428 beneath->disconnect (args, from_tty);
429 }
430
431 /* The close method of target record-btrace. */
432
433 void
434 record_btrace_target::close ()
435 {
436 if (record_btrace_async_inferior_event_handler != NULL)
437 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
438
439 /* Make sure automatic recording gets disabled even if we did not stop
440 recording before closing the record-btrace target. */
441 record_btrace_auto_disable ();
442
443 /* We should have already stopped recording.
444 Tear down btrace in case we have not. */
445 for (thread_info *tp : all_non_exited_threads ())
446 btrace_teardown (tp);
447 }
448
449 /* The async method of target record-btrace. */
450
451 void
452 record_btrace_target::async (int enable)
453 {
454 if (enable)
455 mark_async_event_handler (record_btrace_async_inferior_event_handler);
456 else
457 clear_async_event_handler (record_btrace_async_inferior_event_handler);
458
459 this->beneath ()->async (enable);
460 }
461
462 /* Adjusts the size and returns a human readable size suffix. */
463
464 static const char *
465 record_btrace_adjust_size (unsigned int *size)
466 {
467 unsigned int sz;
468
469 sz = *size;
470
471 if ((sz & ((1u << 30) - 1)) == 0)
472 {
473 *size = sz >> 30;
474 return "GB";
475 }
476 else if ((sz & ((1u << 20) - 1)) == 0)
477 {
478 *size = sz >> 20;
479 return "MB";
480 }
481 else if ((sz & ((1u << 10) - 1)) == 0)
482 {
483 *size = sz >> 10;
484 return "kB";
485 }
486 else
487 return "";
488 }
489
490 /* Print a BTS configuration. */
491
492 static void
493 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
494 {
495 const char *suffix;
496 unsigned int size;
497
498 size = conf->size;
499 if (size > 0)
500 {
501 suffix = record_btrace_adjust_size (&size);
502 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
503 }
504 }
505
506 /* Print an Intel Processor Trace configuration. */
507
508 static void
509 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
510 {
511 const char *suffix;
512 unsigned int size;
513
514 size = conf->size;
515 if (size > 0)
516 {
517 suffix = record_btrace_adjust_size (&size);
518 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
519 }
520 }
521
522 /* Print a branch tracing configuration. */
523
524 static void
525 record_btrace_print_conf (const struct btrace_config *conf)
526 {
527 printf_unfiltered (_("Recording format: %s.\n"),
528 btrace_format_string (conf->format));
529
530 switch (conf->format)
531 {
532 case BTRACE_FORMAT_NONE:
533 return;
534
535 case BTRACE_FORMAT_BTS:
536 record_btrace_print_bts_conf (&conf->bts);
537 return;
538
539 case BTRACE_FORMAT_PT:
540 record_btrace_print_pt_conf (&conf->pt);
541 return;
542 }
543
544 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
545 }
546
547 /* The info_record method of target record-btrace. */
548
549 void
550 record_btrace_target::info_record ()
551 {
552 struct btrace_thread_info *btinfo;
553 const struct btrace_config *conf;
554 struct thread_info *tp;
555 unsigned int insns, calls, gaps;
556
557 DEBUG ("info");
558
559 tp = find_thread_ptid (inferior_ptid);
560 if (tp == NULL)
561 error (_("No thread."));
562
563 validate_registers_access ();
564
565 btinfo = &tp->btrace;
566
567 conf = ::btrace_conf (btinfo);
568 if (conf != NULL)
569 record_btrace_print_conf (conf);
570
571 btrace_fetch (tp, record_btrace_get_cpu ());
572
573 insns = 0;
574 calls = 0;
575 gaps = 0;
576
577 if (!btrace_is_empty (tp))
578 {
579 struct btrace_call_iterator call;
580 struct btrace_insn_iterator insn;
581
582 btrace_call_end (&call, btinfo);
583 btrace_call_prev (&call, 1);
584 calls = btrace_call_number (&call);
585
586 btrace_insn_end (&insn, btinfo);
587 insns = btrace_insn_number (&insn);
588
589 /* If the last instruction is not a gap, it is the current instruction
590 that is not actually part of the record. */
591 if (btrace_insn_get (&insn) != NULL)
592 insns -= 1;
593
594 gaps = btinfo->ngaps;
595 }
596
597 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
598 "for thread %s (%s).\n"), insns, calls, gaps,
599 print_thread_id (tp),
600 target_pid_to_str (tp->ptid).c_str ());
601
602 if (btrace_is_replaying (tp))
603 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
604 btrace_insn_number (btinfo->replay));
605 }
606
607 /* Print a decode error. */
608
609 static void
610 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
611 enum btrace_format format)
612 {
613 const char *errstr = btrace_decode_error (format, errcode);
614
615 uiout->text (_("["));
616 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
617 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
618 {
619 uiout->text (_("decode error ("));
620 uiout->field_signed ("errcode", errcode);
621 uiout->text (_("): "));
622 }
623 uiout->text (errstr);
624 uiout->text (_("]\n"));
625 }
626
627 /* A range of source lines. */
628
629 struct btrace_line_range
630 {
631 /* The symtab this line is from. */
632 struct symtab *symtab;
633
634 /* The first line (inclusive). */
635 int begin;
636
637 /* The last line (exclusive). */
638 int end;
639 };
640
641 /* Construct a line range. */
642
643 static struct btrace_line_range
644 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
645 {
646 struct btrace_line_range range;
647
648 range.symtab = symtab;
649 range.begin = begin;
650 range.end = end;
651
652 return range;
653 }
654
655 /* Add a line to a line range. */
656
657 static struct btrace_line_range
658 btrace_line_range_add (struct btrace_line_range range, int line)
659 {
660 if (range.end <= range.begin)
661 {
662 /* This is the first entry. */
663 range.begin = line;
664 range.end = line + 1;
665 }
666 else if (line < range.begin)
667 range.begin = line;
668 else if (range.end < line)
669 range.end = line;
670
671 return range;
672 }
673
674 /* Return non-zero if RANGE is empty, zero otherwise. */
675
676 static int
677 btrace_line_range_is_empty (struct btrace_line_range range)
678 {
679 return range.end <= range.begin;
680 }
681
682 /* Return non-zero if LHS contains RHS, zero otherwise. */
683
684 static int
685 btrace_line_range_contains_range (struct btrace_line_range lhs,
686 struct btrace_line_range rhs)
687 {
688 return ((lhs.symtab == rhs.symtab)
689 && (lhs.begin <= rhs.begin)
690 && (rhs.end <= lhs.end));
691 }
692
693 /* Find the line range associated with PC. */
694
695 static struct btrace_line_range
696 btrace_find_line_range (CORE_ADDR pc)
697 {
698 struct btrace_line_range range;
699 struct linetable_entry *lines;
700 struct linetable *ltable;
701 struct symtab *symtab;
702 int nlines, i;
703
704 symtab = find_pc_line_symtab (pc);
705 if (symtab == NULL)
706 return btrace_mk_line_range (NULL, 0, 0);
707
708 ltable = SYMTAB_LINETABLE (symtab);
709 if (ltable == NULL)
710 return btrace_mk_line_range (symtab, 0, 0);
711
712 nlines = ltable->nitems;
713 lines = ltable->item;
714 if (nlines <= 0)
715 return btrace_mk_line_range (symtab, 0, 0);
716
717 range = btrace_mk_line_range (symtab, 0, 0);
718 for (i = 0; i < nlines - 1; i++)
719 {
720 if ((lines[i].pc == pc) && (lines[i].line != 0))
721 range = btrace_line_range_add (range, lines[i].line);
722 }
723
724 return range;
725 }
726
727 /* Print source lines in LINES to UIOUT.
728
729 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
730 instructions corresponding to that source line. When printing a new source
731 line, we do the cleanups for the open chain and open a new cleanup chain for
732 the new source line. If the source line range in LINES is not empty, this
733 function will leave the cleanup chain for the last printed source line open
734 so instructions can be added to it. */
735
736 static void
737 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
738 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
739 gdb::optional<ui_out_emit_list> *asm_list,
740 gdb_disassembly_flags flags)
741 {
742 print_source_lines_flags psl_flags;
743
744 if (flags & DISASSEMBLY_FILENAME)
745 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
746
747 for (int line = lines.begin; line < lines.end; ++line)
748 {
749 asm_list->reset ();
750
751 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
752
753 print_source_lines (lines.symtab, line, line + 1, psl_flags);
754
755 asm_list->emplace (uiout, "line_asm_insn");
756 }
757 }
758
759 /* Disassemble a section of the recorded instruction trace. */
760
761 static void
762 btrace_insn_history (struct ui_out *uiout,
763 const struct btrace_thread_info *btinfo,
764 const struct btrace_insn_iterator *begin,
765 const struct btrace_insn_iterator *end,
766 gdb_disassembly_flags flags)
767 {
768 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
769 btrace_insn_number (begin), btrace_insn_number (end));
770
771 flags |= DISASSEMBLY_SPECULATIVE;
772
773 struct gdbarch *gdbarch = target_gdbarch ();
774 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
775
776 ui_out_emit_list list_emitter (uiout, "asm_insns");
777
778 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
779 gdb::optional<ui_out_emit_list> asm_list;
780
781 gdb_pretty_print_disassembler disasm (gdbarch, uiout);
782
783 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
784 btrace_insn_next (&it, 1))
785 {
786 const struct btrace_insn *insn;
787
788 insn = btrace_insn_get (&it);
789
790 /* A NULL instruction indicates a gap in the trace. */
791 if (insn == NULL)
792 {
793 const struct btrace_config *conf;
794
795 conf = btrace_conf (btinfo);
796
797 /* We have trace so we must have a configuration. */
798 gdb_assert (conf != NULL);
799
800 uiout->field_fmt ("insn-number", "%u",
801 btrace_insn_number (&it));
802 uiout->text ("\t");
803
804 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
805 conf->format);
806 }
807 else
808 {
809 struct disasm_insn dinsn;
810
811 if ((flags & DISASSEMBLY_SOURCE) != 0)
812 {
813 struct btrace_line_range lines;
814
815 lines = btrace_find_line_range (insn->pc);
816 if (!btrace_line_range_is_empty (lines)
817 && !btrace_line_range_contains_range (last_lines, lines))
818 {
819 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
820 flags);
821 last_lines = lines;
822 }
823 else if (!src_and_asm_tuple.has_value ())
824 {
825 gdb_assert (!asm_list.has_value ());
826
827 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
828
829 /* No source information. */
830 asm_list.emplace (uiout, "line_asm_insn");
831 }
832
833 gdb_assert (src_and_asm_tuple.has_value ());
834 gdb_assert (asm_list.has_value ());
835 }
836
837 memset (&dinsn, 0, sizeof (dinsn));
838 dinsn.number = btrace_insn_number (&it);
839 dinsn.addr = insn->pc;
840
841 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
842 dinsn.is_speculative = 1;
843
844 disasm.pretty_print_insn (&dinsn, flags);
845 }
846 }
847 }
848
849 /* The insn_history method of target record-btrace. */
850
851 void
852 record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
853 {
854 struct btrace_thread_info *btinfo;
855 struct btrace_insn_history *history;
856 struct btrace_insn_iterator begin, end;
857 struct ui_out *uiout;
858 unsigned int context, covered;
859
860 uiout = current_uiout;
861 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
862 context = abs (size);
863 if (context == 0)
864 error (_("Bad record instruction-history-size."));
865
866 btinfo = require_btrace ();
867 history = btinfo->insn_history;
868 if (history == NULL)
869 {
870 struct btrace_insn_iterator *replay;
871
872 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
873
874 /* If we're replaying, we start at the replay position. Otherwise, we
875 start at the tail of the trace. */
876 replay = btinfo->replay;
877 if (replay != NULL)
878 begin = *replay;
879 else
880 btrace_insn_end (&begin, btinfo);
881
882 /* We start from here and expand in the requested direction. Then we
883 expand in the other direction, as well, to fill up any remaining
884 context. */
885 end = begin;
886 if (size < 0)
887 {
888 /* We want the current position covered, as well. */
889 covered = btrace_insn_next (&end, 1);
890 covered += btrace_insn_prev (&begin, context - covered);
891 covered += btrace_insn_next (&end, context - covered);
892 }
893 else
894 {
895 covered = btrace_insn_next (&end, context);
896 covered += btrace_insn_prev (&begin, context - covered);
897 }
898 }
899 else
900 {
901 begin = history->begin;
902 end = history->end;
903
904 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
905 btrace_insn_number (&begin), btrace_insn_number (&end));
906
907 if (size < 0)
908 {
909 end = begin;
910 covered = btrace_insn_prev (&begin, context);
911 }
912 else
913 {
914 begin = end;
915 covered = btrace_insn_next (&end, context);
916 }
917 }
918
919 if (covered > 0)
920 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
921 else
922 {
923 if (size < 0)
924 printf_unfiltered (_("At the start of the branch trace record.\n"));
925 else
926 printf_unfiltered (_("At the end of the branch trace record.\n"));
927 }
928
929 btrace_set_insn_history (btinfo, &begin, &end);
930 }
931
932 /* The insn_history_range method of target record-btrace. */
933
934 void
935 record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
936 gdb_disassembly_flags flags)
937 {
938 struct btrace_thread_info *btinfo;
939 struct btrace_insn_iterator begin, end;
940 struct ui_out *uiout;
941 unsigned int low, high;
942 int found;
943
944 uiout = current_uiout;
945 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
946 low = from;
947 high = to;
948
949 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
950
951 /* Check for wrap-arounds. */
952 if (low != from || high != to)
953 error (_("Bad range."));
954
955 if (high < low)
956 error (_("Bad range."));
957
958 btinfo = require_btrace ();
959
960 found = btrace_find_insn_by_number (&begin, btinfo, low);
961 if (found == 0)
962 error (_("Range out of bounds."));
963
964 found = btrace_find_insn_by_number (&end, btinfo, high);
965 if (found == 0)
966 {
967 /* Silently truncate the range. */
968 btrace_insn_end (&end, btinfo);
969 }
970 else
971 {
972 /* We want both begin and end to be inclusive. */
973 btrace_insn_next (&end, 1);
974 }
975
976 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
977 btrace_set_insn_history (btinfo, &begin, &end);
978 }
979
980 /* The insn_history_from method of target record-btrace. */
981
982 void
983 record_btrace_target::insn_history_from (ULONGEST from, int size,
984 gdb_disassembly_flags flags)
985 {
986 ULONGEST begin, end, context;
987
988 context = abs (size);
989 if (context == 0)
990 error (_("Bad record instruction-history-size."));
991
992 if (size < 0)
993 {
994 end = from;
995
996 if (from < context)
997 begin = 0;
998 else
999 begin = from - context + 1;
1000 }
1001 else
1002 {
1003 begin = from;
1004 end = from + context - 1;
1005
1006 /* Check for wrap-around. */
1007 if (end < begin)
1008 end = ULONGEST_MAX;
1009 }
1010
1011 insn_history_range (begin, end, flags);
1012 }
1013
1014 /* Print the instruction number range for a function call history line. */
1015
1016 static void
1017 btrace_call_history_insn_range (struct ui_out *uiout,
1018 const struct btrace_function *bfun)
1019 {
1020 unsigned int begin, end, size;
1021
1022 size = bfun->insn.size ();
1023 gdb_assert (size > 0);
1024
1025 begin = bfun->insn_offset;
1026 end = begin + size - 1;
1027
1028 uiout->field_unsigned ("insn begin", begin);
1029 uiout->text (",");
1030 uiout->field_unsigned ("insn end", end);
1031 }
1032
1033 /* Compute the lowest and highest source line for the instructions in BFUN
1034 and return them in PBEGIN and PEND.
1035 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1036 result from inlining or macro expansion. */
1037
1038 static void
1039 btrace_compute_src_line_range (const struct btrace_function *bfun,
1040 int *pbegin, int *pend)
1041 {
1042 struct symtab *symtab;
1043 struct symbol *sym;
1044 int begin, end;
1045
1046 begin = INT_MAX;
1047 end = INT_MIN;
1048
1049 sym = bfun->sym;
1050 if (sym == NULL)
1051 goto out;
1052
1053 symtab = symbol_symtab (sym);
1054
1055 for (const btrace_insn &insn : bfun->insn)
1056 {
1057 struct symtab_and_line sal;
1058
1059 sal = find_pc_line (insn.pc, 0);
1060 if (sal.symtab != symtab || sal.line == 0)
1061 continue;
1062
1063 begin = std::min (begin, sal.line);
1064 end = std::max (end, sal.line);
1065 }
1066
1067 out:
1068 *pbegin = begin;
1069 *pend = end;
1070 }
1071
1072 /* Print the source line information for a function call history line. */
1073
1074 static void
1075 btrace_call_history_src_line (struct ui_out *uiout,
1076 const struct btrace_function *bfun)
1077 {
1078 struct symbol *sym;
1079 int begin, end;
1080
1081 sym = bfun->sym;
1082 if (sym == NULL)
1083 return;
1084
1085 uiout->field_string ("file",
1086 symtab_to_filename_for_display (symbol_symtab (sym)),
1087 file_name_style.style ());
1088
1089 btrace_compute_src_line_range (bfun, &begin, &end);
1090 if (end < begin)
1091 return;
1092
1093 uiout->text (":");
1094 uiout->field_signed ("min line", begin);
1095
1096 if (end == begin)
1097 return;
1098
1099 uiout->text (",");
1100 uiout->field_signed ("max line", end);
1101 }
1102
1103 /* Get the name of a branch trace function. */
1104
1105 static const char *
1106 btrace_get_bfun_name (const struct btrace_function *bfun)
1107 {
1108 struct minimal_symbol *msym;
1109 struct symbol *sym;
1110
1111 if (bfun == NULL)
1112 return "??";
1113
1114 msym = bfun->msym;
1115 sym = bfun->sym;
1116
1117 if (sym != NULL)
1118 return sym->print_name ();
1119 else if (msym != NULL)
1120 return msym->print_name ();
1121 else
1122 return "??";
1123 }
1124
1125 /* Disassemble a section of the recorded function trace. */
1126
1127 static void
1128 btrace_call_history (struct ui_out *uiout,
1129 const struct btrace_thread_info *btinfo,
1130 const struct btrace_call_iterator *begin,
1131 const struct btrace_call_iterator *end,
1132 int int_flags)
1133 {
1134 struct btrace_call_iterator it;
1135 record_print_flags flags = (enum record_print_flag) int_flags;
1136
1137 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1138 btrace_call_number (end));
1139
1140 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1141 {
1142 const struct btrace_function *bfun;
1143 struct minimal_symbol *msym;
1144 struct symbol *sym;
1145
1146 bfun = btrace_call_get (&it);
1147 sym = bfun->sym;
1148 msym = bfun->msym;
1149
1150 /* Print the function index. */
1151 uiout->field_unsigned ("index", bfun->number);
1152 uiout->text ("\t");
1153
1154 /* Indicate gaps in the trace. */
1155 if (bfun->errcode != 0)
1156 {
1157 const struct btrace_config *conf;
1158
1159 conf = btrace_conf (btinfo);
1160
1161 /* We have trace so we must have a configuration. */
1162 gdb_assert (conf != NULL);
1163
1164 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1165
1166 continue;
1167 }
1168
1169 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1170 {
1171 int level = bfun->level + btinfo->level, i;
1172
1173 for (i = 0; i < level; ++i)
1174 uiout->text (" ");
1175 }
1176
1177 if (sym != NULL)
1178 uiout->field_string ("function", sym->print_name (),
1179 function_name_style.style ());
1180 else if (msym != NULL)
1181 uiout->field_string ("function", msym->print_name (),
1182 function_name_style.style ());
1183 else if (!uiout->is_mi_like_p ())
1184 uiout->field_string ("function", "??",
1185 function_name_style.style ());
1186
1187 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1188 {
1189 uiout->text (_("\tinst "));
1190 btrace_call_history_insn_range (uiout, bfun);
1191 }
1192
1193 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1194 {
1195 uiout->text (_("\tat "));
1196 btrace_call_history_src_line (uiout, bfun);
1197 }
1198
1199 uiout->text ("\n");
1200 }
1201 }
1202
1203 /* The call_history method of target record-btrace. */
1204
1205 void
1206 record_btrace_target::call_history (int size, record_print_flags flags)
1207 {
1208 struct btrace_thread_info *btinfo;
1209 struct btrace_call_history *history;
1210 struct btrace_call_iterator begin, end;
1211 struct ui_out *uiout;
1212 unsigned int context, covered;
1213
1214 uiout = current_uiout;
1215 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1216 context = abs (size);
1217 if (context == 0)
1218 error (_("Bad record function-call-history-size."));
1219
1220 btinfo = require_btrace ();
1221 history = btinfo->call_history;
1222 if (history == NULL)
1223 {
1224 struct btrace_insn_iterator *replay;
1225
1226 DEBUG ("call-history (0x%x): %d", (int) flags, size);
1227
1228 /* If we're replaying, we start at the replay position. Otherwise, we
1229 start at the tail of the trace. */
1230 replay = btinfo->replay;
1231 if (replay != NULL)
1232 {
1233 begin.btinfo = btinfo;
1234 begin.index = replay->call_index;
1235 }
1236 else
1237 btrace_call_end (&begin, btinfo);
1238
1239 /* We start from here and expand in the requested direction. Then we
1240 expand in the other direction, as well, to fill up any remaining
1241 context. */
1242 end = begin;
1243 if (size < 0)
1244 {
1245 /* We want the current position covered, as well. */
1246 covered = btrace_call_next (&end, 1);
1247 covered += btrace_call_prev (&begin, context - covered);
1248 covered += btrace_call_next (&end, context - covered);
1249 }
1250 else
1251 {
1252 covered = btrace_call_next (&end, context);
1253 covered += btrace_call_prev (&begin, context- covered);
1254 }
1255 }
1256 else
1257 {
1258 begin = history->begin;
1259 end = history->end;
1260
1261 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1262 btrace_call_number (&begin), btrace_call_number (&end));
1263
1264 if (size < 0)
1265 {
1266 end = begin;
1267 covered = btrace_call_prev (&begin, context);
1268 }
1269 else
1270 {
1271 begin = end;
1272 covered = btrace_call_next (&end, context);
1273 }
1274 }
1275
1276 if (covered > 0)
1277 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1278 else
1279 {
1280 if (size < 0)
1281 printf_unfiltered (_("At the start of the branch trace record.\n"));
1282 else
1283 printf_unfiltered (_("At the end of the branch trace record.\n"));
1284 }
1285
1286 btrace_set_call_history (btinfo, &begin, &end);
1287 }
1288
1289 /* The call_history_range method of target record-btrace. */
1290
1291 void
1292 record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1293 record_print_flags flags)
1294 {
1295 struct btrace_thread_info *btinfo;
1296 struct btrace_call_iterator begin, end;
1297 struct ui_out *uiout;
1298 unsigned int low, high;
1299 int found;
1300
1301 uiout = current_uiout;
1302 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1303 low = from;
1304 high = to;
1305
1306 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1307
1308 /* Check for wrap-arounds. */
1309 if (low != from || high != to)
1310 error (_("Bad range."));
1311
1312 if (high < low)
1313 error (_("Bad range."));
1314
1315 btinfo = require_btrace ();
1316
1317 found = btrace_find_call_by_number (&begin, btinfo, low);
1318 if (found == 0)
1319 error (_("Range out of bounds."));
1320
1321 found = btrace_find_call_by_number (&end, btinfo, high);
1322 if (found == 0)
1323 {
1324 /* Silently truncate the range. */
1325 btrace_call_end (&end, btinfo);
1326 }
1327 else
1328 {
1329 /* We want both begin and end to be inclusive. */
1330 btrace_call_next (&end, 1);
1331 }
1332
1333 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1334 btrace_set_call_history (btinfo, &begin, &end);
1335 }
1336
1337 /* The call_history_from method of target record-btrace. */
1338
1339 void
1340 record_btrace_target::call_history_from (ULONGEST from, int size,
1341 record_print_flags flags)
1342 {
1343 ULONGEST begin, end, context;
1344
1345 context = abs (size);
1346 if (context == 0)
1347 error (_("Bad record function-call-history-size."));
1348
1349 if (size < 0)
1350 {
1351 end = from;
1352
1353 if (from < context)
1354 begin = 0;
1355 else
1356 begin = from - context + 1;
1357 }
1358 else
1359 {
1360 begin = from;
1361 end = from + context - 1;
1362
1363 /* Check for wrap-around. */
1364 if (end < begin)
1365 end = ULONGEST_MAX;
1366 }
1367
1368 call_history_range ( begin, end, flags);
1369 }
1370
1371 /* The record_method method of target record-btrace. */
1372
1373 enum record_method
1374 record_btrace_target::record_method (ptid_t ptid)
1375 {
1376 struct thread_info * const tp = find_thread_ptid (ptid);
1377
1378 if (tp == NULL)
1379 error (_("No thread."));
1380
1381 if (tp->btrace.target == NULL)
1382 return RECORD_METHOD_NONE;
1383
1384 return RECORD_METHOD_BTRACE;
1385 }
1386
1387 /* The record_is_replaying method of target record-btrace. */
1388
1389 bool
1390 record_btrace_target::record_is_replaying (ptid_t ptid)
1391 {
1392 for (thread_info *tp : all_non_exited_threads (ptid))
1393 if (btrace_is_replaying (tp))
1394 return true;
1395
1396 return false;
1397 }
1398
1399 /* The record_will_replay method of target record-btrace. */
1400
1401 bool
1402 record_btrace_target::record_will_replay (ptid_t ptid, int dir)
1403 {
1404 return dir == EXEC_REVERSE || record_is_replaying (ptid);
1405 }
1406
1407 /* The xfer_partial method of target record-btrace. */
1408
1409 enum target_xfer_status
1410 record_btrace_target::xfer_partial (enum target_object object,
1411 const char *annex, gdb_byte *readbuf,
1412 const gdb_byte *writebuf, ULONGEST offset,
1413 ULONGEST len, ULONGEST *xfered_len)
1414 {
1415 /* Filter out requests that don't make sense during replay. */
1416 if (replay_memory_access == replay_memory_access_read_only
1417 && !record_btrace_generating_corefile
1418 && record_is_replaying (inferior_ptid))
1419 {
1420 switch (object)
1421 {
1422 case TARGET_OBJECT_MEMORY:
1423 {
1424 struct target_section *section;
1425
1426 /* We do not allow writing memory in general. */
1427 if (writebuf != NULL)
1428 {
1429 *xfered_len = len;
1430 return TARGET_XFER_UNAVAILABLE;
1431 }
1432
1433 /* We allow reading readonly memory. */
1434 section = target_section_by_addr (this, offset);
1435 if (section != NULL)
1436 {
1437 /* Check if the section we found is readonly. */
1438 if ((bfd_section_flags (section->the_bfd_section)
1439 & SEC_READONLY) != 0)
1440 {
1441 /* Truncate the request to fit into this section. */
1442 len = std::min (len, section->endaddr - offset);
1443 break;
1444 }
1445 }
1446
1447 *xfered_len = len;
1448 return TARGET_XFER_UNAVAILABLE;
1449 }
1450 }
1451 }
1452
1453 /* Forward the request. */
1454 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1455 offset, len, xfered_len);
1456 }
1457
1458 /* The insert_breakpoint method of target record-btrace. */
1459
1460 int
1461 record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1462 struct bp_target_info *bp_tgt)
1463 {
1464 const char *old;
1465 int ret;
1466
1467 /* Inserting breakpoints requires accessing memory. Allow it for the
1468 duration of this function. */
1469 old = replay_memory_access;
1470 replay_memory_access = replay_memory_access_read_write;
1471
1472 ret = 0;
1473 try
1474 {
1475 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
1476 }
1477 catch (const gdb_exception &except)
1478 {
1479 replay_memory_access = old;
1480 throw;
1481 }
1482 replay_memory_access = old;
1483
1484 return ret;
1485 }
1486
1487 /* The remove_breakpoint method of target record-btrace. */
1488
1489 int
1490 record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1491 struct bp_target_info *bp_tgt,
1492 enum remove_bp_reason reason)
1493 {
1494 const char *old;
1495 int ret;
1496
1497 /* Removing breakpoints requires accessing memory. Allow it for the
1498 duration of this function. */
1499 old = replay_memory_access;
1500 replay_memory_access = replay_memory_access_read_write;
1501
1502 ret = 0;
1503 try
1504 {
1505 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
1506 }
1507 catch (const gdb_exception &except)
1508 {
1509 replay_memory_access = old;
1510 throw;
1511 }
1512 replay_memory_access = old;
1513
1514 return ret;
1515 }
1516
1517 /* The fetch_registers method of target record-btrace. */
1518
1519 void
1520 record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1521 {
1522 struct btrace_insn_iterator *replay;
1523 struct thread_info *tp;
1524
1525 tp = find_thread_ptid (regcache->ptid ());
1526 gdb_assert (tp != NULL);
1527
1528 replay = tp->btrace.replay;
1529 if (replay != NULL && !record_btrace_generating_corefile)
1530 {
1531 const struct btrace_insn *insn;
1532 struct gdbarch *gdbarch;
1533 int pcreg;
1534
1535 gdbarch = regcache->arch ();
1536 pcreg = gdbarch_pc_regnum (gdbarch);
1537 if (pcreg < 0)
1538 return;
1539
1540 /* We can only provide the PC register. */
1541 if (regno >= 0 && regno != pcreg)
1542 return;
1543
1544 insn = btrace_insn_get (replay);
1545 gdb_assert (insn != NULL);
1546
1547 regcache->raw_supply (regno, &insn->pc);
1548 }
1549 else
1550 this->beneath ()->fetch_registers (regcache, regno);
1551 }
1552
1553 /* The store_registers method of target record-btrace. */
1554
1555 void
1556 record_btrace_target::store_registers (struct regcache *regcache, int regno)
1557 {
1558 if (!record_btrace_generating_corefile
1559 && record_is_replaying (regcache->ptid ()))
1560 error (_("Cannot write registers while replaying."));
1561
1562 gdb_assert (may_write_registers);
1563
1564 this->beneath ()->store_registers (regcache, regno);
1565 }
1566
1567 /* The prepare_to_store method of target record-btrace. */
1568
1569 void
1570 record_btrace_target::prepare_to_store (struct regcache *regcache)
1571 {
1572 if (!record_btrace_generating_corefile
1573 && record_is_replaying (regcache->ptid ()))
1574 return;
1575
1576 this->beneath ()->prepare_to_store (regcache);
1577 }
1578
1579 /* The branch trace frame cache. */
1580
1581 struct btrace_frame_cache
1582 {
1583 /* The thread. */
1584 struct thread_info *tp;
1585
1586 /* The frame info. */
1587 struct frame_info *frame;
1588
1589 /* The branch trace function segment. */
1590 const struct btrace_function *bfun;
1591 };
1592
1593 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1594
1595 static htab_t bfcache;
1596
1597 /* hash_f for htab_create_alloc of bfcache. */
1598
1599 static hashval_t
1600 bfcache_hash (const void *arg)
1601 {
1602 const struct btrace_frame_cache *cache
1603 = (const struct btrace_frame_cache *) arg;
1604
1605 return htab_hash_pointer (cache->frame);
1606 }
1607
1608 /* eq_f for htab_create_alloc of bfcache. */
1609
1610 static int
1611 bfcache_eq (const void *arg1, const void *arg2)
1612 {
1613 const struct btrace_frame_cache *cache1
1614 = (const struct btrace_frame_cache *) arg1;
1615 const struct btrace_frame_cache *cache2
1616 = (const struct btrace_frame_cache *) arg2;
1617
1618 return cache1->frame == cache2->frame;
1619 }
1620
1621 /* Create a new btrace frame cache. */
1622
1623 static struct btrace_frame_cache *
1624 bfcache_new (struct frame_info *frame)
1625 {
1626 struct btrace_frame_cache *cache;
1627 void **slot;
1628
1629 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1630 cache->frame = frame;
1631
1632 slot = htab_find_slot (bfcache, cache, INSERT);
1633 gdb_assert (*slot == NULL);
1634 *slot = cache;
1635
1636 return cache;
1637 }
1638
1639 /* Extract the branch trace function from a branch trace frame. */
1640
1641 static const struct btrace_function *
1642 btrace_get_frame_function (struct frame_info *frame)
1643 {
1644 const struct btrace_frame_cache *cache;
1645 struct btrace_frame_cache pattern;
1646 void **slot;
1647
1648 pattern.frame = frame;
1649
1650 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1651 if (slot == NULL)
1652 return NULL;
1653
1654 cache = (const struct btrace_frame_cache *) *slot;
1655 return cache->bfun;
1656 }
1657
1658 /* Implement stop_reason method for record_btrace_frame_unwind. */
1659
1660 static enum unwind_stop_reason
1661 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1662 void **this_cache)
1663 {
1664 const struct btrace_frame_cache *cache;
1665 const struct btrace_function *bfun;
1666
1667 cache = (const struct btrace_frame_cache *) *this_cache;
1668 bfun = cache->bfun;
1669 gdb_assert (bfun != NULL);
1670
1671 if (bfun->up == 0)
1672 return UNWIND_UNAVAILABLE;
1673
1674 return UNWIND_NO_REASON;
1675 }
1676
1677 /* Implement this_id method for record_btrace_frame_unwind. */
1678
1679 static void
1680 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1681 struct frame_id *this_id)
1682 {
1683 const struct btrace_frame_cache *cache;
1684 const struct btrace_function *bfun;
1685 struct btrace_call_iterator it;
1686 CORE_ADDR code, special;
1687
1688 cache = (const struct btrace_frame_cache *) *this_cache;
1689
1690 bfun = cache->bfun;
1691 gdb_assert (bfun != NULL);
1692
1693 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1694 bfun = btrace_call_get (&it);
1695
1696 code = get_frame_func (this_frame);
1697 special = bfun->number;
1698
1699 *this_id = frame_id_build_unavailable_stack_special (code, special);
1700
1701 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1702 btrace_get_bfun_name (cache->bfun),
1703 core_addr_to_string_nz (this_id->code_addr),
1704 core_addr_to_string_nz (this_id->special_addr));
1705 }
1706
1707 /* Implement prev_register method for record_btrace_frame_unwind. */
1708
1709 static struct value *
1710 record_btrace_frame_prev_register (struct frame_info *this_frame,
1711 void **this_cache,
1712 int regnum)
1713 {
1714 const struct btrace_frame_cache *cache;
1715 const struct btrace_function *bfun, *caller;
1716 struct btrace_call_iterator it;
1717 struct gdbarch *gdbarch;
1718 CORE_ADDR pc;
1719 int pcreg;
1720
1721 gdbarch = get_frame_arch (this_frame);
1722 pcreg = gdbarch_pc_regnum (gdbarch);
1723 if (pcreg < 0 || regnum != pcreg)
1724 throw_error (NOT_AVAILABLE_ERROR,
1725 _("Registers are not available in btrace record history"));
1726
1727 cache = (const struct btrace_frame_cache *) *this_cache;
1728 bfun = cache->bfun;
1729 gdb_assert (bfun != NULL);
1730
1731 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1732 throw_error (NOT_AVAILABLE_ERROR,
1733 _("No caller in btrace record history"));
1734
1735 caller = btrace_call_get (&it);
1736
1737 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1738 pc = caller->insn.front ().pc;
1739 else
1740 {
1741 pc = caller->insn.back ().pc;
1742 pc += gdb_insn_length (gdbarch, pc);
1743 }
1744
1745 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1746 btrace_get_bfun_name (bfun), bfun->level,
1747 core_addr_to_string_nz (pc));
1748
1749 return frame_unwind_got_address (this_frame, regnum, pc);
1750 }
1751
1752 /* Implement sniffer method for record_btrace_frame_unwind. */
1753
1754 static int
1755 record_btrace_frame_sniffer (const struct frame_unwind *self,
1756 struct frame_info *this_frame,
1757 void **this_cache)
1758 {
1759 const struct btrace_function *bfun;
1760 struct btrace_frame_cache *cache;
1761 struct thread_info *tp;
1762 struct frame_info *next;
1763
1764 /* THIS_FRAME does not contain a reference to its thread. */
1765 tp = inferior_thread ();
1766
1767 bfun = NULL;
1768 next = get_next_frame (this_frame);
1769 if (next == NULL)
1770 {
1771 const struct btrace_insn_iterator *replay;
1772
1773 replay = tp->btrace.replay;
1774 if (replay != NULL)
1775 bfun = &replay->btinfo->functions[replay->call_index];
1776 }
1777 else
1778 {
1779 const struct btrace_function *callee;
1780 struct btrace_call_iterator it;
1781
1782 callee = btrace_get_frame_function (next);
1783 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1784 return 0;
1785
1786 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1787 return 0;
1788
1789 bfun = btrace_call_get (&it);
1790 }
1791
1792 if (bfun == NULL)
1793 return 0;
1794
1795 DEBUG ("[frame] sniffed frame for %s on level %d",
1796 btrace_get_bfun_name (bfun), bfun->level);
1797
1798 /* This is our frame. Initialize the frame cache. */
1799 cache = bfcache_new (this_frame);
1800 cache->tp = tp;
1801 cache->bfun = bfun;
1802
1803 *this_cache = cache;
1804 return 1;
1805 }
1806
1807 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1808
1809 static int
1810 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1811 struct frame_info *this_frame,
1812 void **this_cache)
1813 {
1814 const struct btrace_function *bfun, *callee;
1815 struct btrace_frame_cache *cache;
1816 struct btrace_call_iterator it;
1817 struct frame_info *next;
1818 struct thread_info *tinfo;
1819
1820 next = get_next_frame (this_frame);
1821 if (next == NULL)
1822 return 0;
1823
1824 callee = btrace_get_frame_function (next);
1825 if (callee == NULL)
1826 return 0;
1827
1828 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1829 return 0;
1830
1831 tinfo = inferior_thread ();
1832 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1833 return 0;
1834
1835 bfun = btrace_call_get (&it);
1836
1837 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1838 btrace_get_bfun_name (bfun), bfun->level);
1839
1840 /* This is our frame. Initialize the frame cache. */
1841 cache = bfcache_new (this_frame);
1842 cache->tp = tinfo;
1843 cache->bfun = bfun;
1844
1845 *this_cache = cache;
1846 return 1;
1847 }
1848
1849 static void
1850 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1851 {
1852 struct btrace_frame_cache *cache;
1853 void **slot;
1854
1855 cache = (struct btrace_frame_cache *) this_cache;
1856
1857 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1858 gdb_assert (slot != NULL);
1859
1860 htab_remove_elt (bfcache, cache);
1861 }
1862
1863 /* btrace recording does not store previous memory content, neither the stack
1864 frames content. Any unwinding would return erroneous results as the stack
1865 contents no longer matches the changed PC value restored from history.
1866 Therefore this unwinder reports any possibly unwound registers as
1867 <unavailable>. */
1868
1869 const struct frame_unwind record_btrace_frame_unwind =
1870 {
1871 NORMAL_FRAME,
1872 record_btrace_frame_unwind_stop_reason,
1873 record_btrace_frame_this_id,
1874 record_btrace_frame_prev_register,
1875 NULL,
1876 record_btrace_frame_sniffer,
1877 record_btrace_frame_dealloc_cache
1878 };
1879
1880 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1881 {
1882 TAILCALL_FRAME,
1883 record_btrace_frame_unwind_stop_reason,
1884 record_btrace_frame_this_id,
1885 record_btrace_frame_prev_register,
1886 NULL,
1887 record_btrace_tailcall_frame_sniffer,
1888 record_btrace_frame_dealloc_cache
1889 };
1890
1891 /* Implement the get_unwinder method. */
1892
1893 const struct frame_unwind *
1894 record_btrace_target::get_unwinder ()
1895 {
1896 return &record_btrace_frame_unwind;
1897 }
1898
1899 /* Implement the get_tailcall_unwinder method. */
1900
1901 const struct frame_unwind *
1902 record_btrace_target::get_tailcall_unwinder ()
1903 {
1904 return &record_btrace_tailcall_frame_unwind;
1905 }
1906
1907 /* Return a human-readable string for FLAG. */
1908
1909 static const char *
1910 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1911 {
1912 switch (flag)
1913 {
1914 case BTHR_STEP:
1915 return "step";
1916
1917 case BTHR_RSTEP:
1918 return "reverse-step";
1919
1920 case BTHR_CONT:
1921 return "cont";
1922
1923 case BTHR_RCONT:
1924 return "reverse-cont";
1925
1926 case BTHR_STOP:
1927 return "stop";
1928 }
1929
1930 return "<invalid>";
1931 }
1932
1933 /* Indicate that TP should be resumed according to FLAG. */
1934
1935 static void
1936 record_btrace_resume_thread (struct thread_info *tp,
1937 enum btrace_thread_flag flag)
1938 {
1939 struct btrace_thread_info *btinfo;
1940
1941 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1942 target_pid_to_str (tp->ptid).c_str (), flag,
1943 btrace_thread_flag_to_str (flag));
1944
1945 btinfo = &tp->btrace;
1946
1947 /* Fetch the latest branch trace. */
1948 btrace_fetch (tp, record_btrace_get_cpu ());
1949
1950 /* A resume request overwrites a preceding resume or stop request. */
1951 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1952 btinfo->flags |= flag;
1953 }
1954
1955 /* Get the current frame for TP. */
1956
1957 static struct frame_id
1958 get_thread_current_frame_id (struct thread_info *tp)
1959 {
1960 struct frame_id id;
1961 int executing;
1962
1963 /* Set current thread, which is implicitly used by
1964 get_current_frame. */
1965 scoped_restore_current_thread restore_thread;
1966
1967 switch_to_thread (tp);
1968
1969 /* Clear the executing flag to allow changes to the current frame.
1970 We are not actually running, yet. We just started a reverse execution
1971 command or a record goto command.
1972 For the latter, EXECUTING is false and this has no effect.
1973 For the former, EXECUTING is true and we're in wait, about to
1974 move the thread. Since we need to recompute the stack, we temporarily
1975 set EXECUTING to false. */
1976 executing = tp->executing;
1977 set_executing (inferior_ptid, false);
1978
1979 id = null_frame_id;
1980 try
1981 {
1982 id = get_frame_id (get_current_frame ());
1983 }
1984 catch (const gdb_exception &except)
1985 {
1986 /* Restore the previous execution state. */
1987 set_executing (inferior_ptid, executing);
1988
1989 throw;
1990 }
1991
1992 /* Restore the previous execution state. */
1993 set_executing (inferior_ptid, executing);
1994
1995 return id;
1996 }
1997
1998 /* Start replaying a thread. */
1999
2000 static struct btrace_insn_iterator *
2001 record_btrace_start_replaying (struct thread_info *tp)
2002 {
2003 struct btrace_insn_iterator *replay;
2004 struct btrace_thread_info *btinfo;
2005
2006 btinfo = &tp->btrace;
2007 replay = NULL;
2008
2009 /* We can't start replaying without trace. */
2010 if (btinfo->functions.empty ())
2011 return NULL;
2012
2013 /* GDB stores the current frame_id when stepping in order to detects steps
2014 into subroutines.
2015 Since frames are computed differently when we're replaying, we need to
2016 recompute those stored frames and fix them up so we can still detect
2017 subroutines after we started replaying. */
2018 try
2019 {
2020 struct frame_id frame_id;
2021 int upd_step_frame_id, upd_step_stack_frame_id;
2022
2023 /* The current frame without replaying - computed via normal unwind. */
2024 frame_id = get_thread_current_frame_id (tp);
2025
2026 /* Check if we need to update any stepping-related frame id's. */
2027 upd_step_frame_id = frame_id_eq (frame_id,
2028 tp->control.step_frame_id);
2029 upd_step_stack_frame_id = frame_id_eq (frame_id,
2030 tp->control.step_stack_frame_id);
2031
2032 /* We start replaying at the end of the branch trace. This corresponds
2033 to the current instruction. */
2034 replay = XNEW (struct btrace_insn_iterator);
2035 btrace_insn_end (replay, btinfo);
2036
2037 /* Skip gaps at the end of the trace. */
2038 while (btrace_insn_get (replay) == NULL)
2039 {
2040 unsigned int steps;
2041
2042 steps = btrace_insn_prev (replay, 1);
2043 if (steps == 0)
2044 error (_("No trace."));
2045 }
2046
2047 /* We're not replaying, yet. */
2048 gdb_assert (btinfo->replay == NULL);
2049 btinfo->replay = replay;
2050
2051 /* Make sure we're not using any stale registers. */
2052 registers_changed_thread (tp);
2053
2054 /* The current frame with replaying - computed via btrace unwind. */
2055 frame_id = get_thread_current_frame_id (tp);
2056
2057 /* Replace stepping related frames where necessary. */
2058 if (upd_step_frame_id)
2059 tp->control.step_frame_id = frame_id;
2060 if (upd_step_stack_frame_id)
2061 tp->control.step_stack_frame_id = frame_id;
2062 }
2063 catch (const gdb_exception &except)
2064 {
2065 xfree (btinfo->replay);
2066 btinfo->replay = NULL;
2067
2068 registers_changed_thread (tp);
2069
2070 throw;
2071 }
2072
2073 return replay;
2074 }
2075
2076 /* Stop replaying a thread. */
2077
2078 static void
2079 record_btrace_stop_replaying (struct thread_info *tp)
2080 {
2081 struct btrace_thread_info *btinfo;
2082
2083 btinfo = &tp->btrace;
2084
2085 xfree (btinfo->replay);
2086 btinfo->replay = NULL;
2087
2088 /* Make sure we're not leaving any stale registers. */
2089 registers_changed_thread (tp);
2090 }
2091
2092 /* Stop replaying TP if it is at the end of its execution history. */
2093
2094 static void
2095 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2096 {
2097 struct btrace_insn_iterator *replay, end;
2098 struct btrace_thread_info *btinfo;
2099
2100 btinfo = &tp->btrace;
2101 replay = btinfo->replay;
2102
2103 if (replay == NULL)
2104 return;
2105
2106 btrace_insn_end (&end, btinfo);
2107
2108 if (btrace_insn_cmp (replay, &end) == 0)
2109 record_btrace_stop_replaying (tp);
2110 }
2111
2112 /* The resume method of target record-btrace. */
2113
2114 void
2115 record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
2116 {
2117 enum btrace_thread_flag flag, cflag;
2118
2119 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid).c_str (),
2120 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
2121 step ? "step" : "cont");
2122
2123 /* Store the execution direction of the last resume.
2124
2125 If there is more than one resume call, we have to rely on infrun
2126 to not change the execution direction in-between. */
2127 record_btrace_resume_exec_dir = ::execution_direction;
2128
2129 /* As long as we're not replaying, just forward the request.
2130
2131 For non-stop targets this means that no thread is replaying. In order to
2132 make progress, we may need to explicitly move replaying threads to the end
2133 of their execution history. */
2134 if ((::execution_direction != EXEC_REVERSE)
2135 && !record_is_replaying (minus_one_ptid))
2136 {
2137 this->beneath ()->resume (ptid, step, signal);
2138 return;
2139 }
2140
2141 /* Compute the btrace thread flag for the requested move. */
2142 if (::execution_direction == EXEC_REVERSE)
2143 {
2144 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2145 cflag = BTHR_RCONT;
2146 }
2147 else
2148 {
2149 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2150 cflag = BTHR_CONT;
2151 }
2152
2153 /* We just indicate the resume intent here. The actual stepping happens in
2154 record_btrace_wait below.
2155
2156 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2157 if (!target_is_non_stop_p ())
2158 {
2159 gdb_assert (inferior_ptid.matches (ptid));
2160
2161 for (thread_info *tp : all_non_exited_threads (ptid))
2162 {
2163 if (tp->ptid.matches (inferior_ptid))
2164 record_btrace_resume_thread (tp, flag);
2165 else
2166 record_btrace_resume_thread (tp, cflag);
2167 }
2168 }
2169 else
2170 {
2171 for (thread_info *tp : all_non_exited_threads (ptid))
2172 record_btrace_resume_thread (tp, flag);
2173 }
2174
2175 /* Async support. */
2176 if (target_can_async_p ())
2177 {
2178 target_async (1);
2179 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2180 }
2181 }
2182
2183 /* The commit_resume method of target record-btrace. */
2184
2185 void
2186 record_btrace_target::commit_resume ()
2187 {
2188 if ((::execution_direction != EXEC_REVERSE)
2189 && !record_is_replaying (minus_one_ptid))
2190 beneath ()->commit_resume ();
2191 }
2192
2193 /* Cancel resuming TP. */
2194
2195 static void
2196 record_btrace_cancel_resume (struct thread_info *tp)
2197 {
2198 enum btrace_thread_flag flags;
2199
2200 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2201 if (flags == 0)
2202 return;
2203
2204 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2205 print_thread_id (tp),
2206 target_pid_to_str (tp->ptid).c_str (), flags,
2207 btrace_thread_flag_to_str (flags));
2208
2209 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2210 record_btrace_stop_replaying_at_end (tp);
2211 }
2212
2213 /* Return a target_waitstatus indicating that we ran out of history. */
2214
2215 static struct target_waitstatus
2216 btrace_step_no_history (void)
2217 {
2218 struct target_waitstatus status;
2219
2220 status.kind = TARGET_WAITKIND_NO_HISTORY;
2221
2222 return status;
2223 }
2224
2225 /* Return a target_waitstatus indicating that a step finished. */
2226
2227 static struct target_waitstatus
2228 btrace_step_stopped (void)
2229 {
2230 struct target_waitstatus status;
2231
2232 status.kind = TARGET_WAITKIND_STOPPED;
2233 status.value.sig = GDB_SIGNAL_TRAP;
2234
2235 return status;
2236 }
2237
2238 /* Return a target_waitstatus indicating that a thread was stopped as
2239 requested. */
2240
2241 static struct target_waitstatus
2242 btrace_step_stopped_on_request (void)
2243 {
2244 struct target_waitstatus status;
2245
2246 status.kind = TARGET_WAITKIND_STOPPED;
2247 status.value.sig = GDB_SIGNAL_0;
2248
2249 return status;
2250 }
2251
2252 /* Return a target_waitstatus indicating a spurious stop. */
2253
2254 static struct target_waitstatus
2255 btrace_step_spurious (void)
2256 {
2257 struct target_waitstatus status;
2258
2259 status.kind = TARGET_WAITKIND_SPURIOUS;
2260
2261 return status;
2262 }
2263
2264 /* Return a target_waitstatus indicating that the thread was not resumed. */
2265
2266 static struct target_waitstatus
2267 btrace_step_no_resumed (void)
2268 {
2269 struct target_waitstatus status;
2270
2271 status.kind = TARGET_WAITKIND_NO_RESUMED;
2272
2273 return status;
2274 }
2275
2276 /* Return a target_waitstatus indicating that we should wait again. */
2277
2278 static struct target_waitstatus
2279 btrace_step_again (void)
2280 {
2281 struct target_waitstatus status;
2282
2283 status.kind = TARGET_WAITKIND_IGNORE;
2284
2285 return status;
2286 }
2287
2288 /* Clear the record histories. */
2289
2290 static void
2291 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2292 {
2293 xfree (btinfo->insn_history);
2294 xfree (btinfo->call_history);
2295
2296 btinfo->insn_history = NULL;
2297 btinfo->call_history = NULL;
2298 }
2299
2300 /* Check whether TP's current replay position is at a breakpoint. */
2301
2302 static int
2303 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2304 {
2305 struct btrace_insn_iterator *replay;
2306 struct btrace_thread_info *btinfo;
2307 const struct btrace_insn *insn;
2308
2309 btinfo = &tp->btrace;
2310 replay = btinfo->replay;
2311
2312 if (replay == NULL)
2313 return 0;
2314
2315 insn = btrace_insn_get (replay);
2316 if (insn == NULL)
2317 return 0;
2318
2319 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
2320 &btinfo->stop_reason);
2321 }
2322
2323 /* Step one instruction in forward direction. */
2324
2325 static struct target_waitstatus
2326 record_btrace_single_step_forward (struct thread_info *tp)
2327 {
2328 struct btrace_insn_iterator *replay, end, start;
2329 struct btrace_thread_info *btinfo;
2330
2331 btinfo = &tp->btrace;
2332 replay = btinfo->replay;
2333
2334 /* We're done if we're not replaying. */
2335 if (replay == NULL)
2336 return btrace_step_no_history ();
2337
2338 /* Check if we're stepping a breakpoint. */
2339 if (record_btrace_replay_at_breakpoint (tp))
2340 return btrace_step_stopped ();
2341
2342 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2343 jump back to the instruction at which we started. */
2344 start = *replay;
2345 do
2346 {
2347 unsigned int steps;
2348
2349 /* We will bail out here if we continue stepping after reaching the end
2350 of the execution history. */
2351 steps = btrace_insn_next (replay, 1);
2352 if (steps == 0)
2353 {
2354 *replay = start;
2355 return btrace_step_no_history ();
2356 }
2357 }
2358 while (btrace_insn_get (replay) == NULL);
2359
2360 /* Determine the end of the instruction trace. */
2361 btrace_insn_end (&end, btinfo);
2362
2363 /* The execution trace contains (and ends with) the current instruction.
2364 This instruction has not been executed, yet, so the trace really ends
2365 one instruction earlier. */
2366 if (btrace_insn_cmp (replay, &end) == 0)
2367 return btrace_step_no_history ();
2368
2369 return btrace_step_spurious ();
2370 }
2371
2372 /* Step one instruction in backward direction. */
2373
2374 static struct target_waitstatus
2375 record_btrace_single_step_backward (struct thread_info *tp)
2376 {
2377 struct btrace_insn_iterator *replay, start;
2378 struct btrace_thread_info *btinfo;
2379
2380 btinfo = &tp->btrace;
2381 replay = btinfo->replay;
2382
2383 /* Start replaying if we're not already doing so. */
2384 if (replay == NULL)
2385 replay = record_btrace_start_replaying (tp);
2386
2387 /* If we can't step any further, we reached the end of the history.
2388 Skip gaps during replay. If we end up at a gap (at the beginning of
2389 the trace), jump back to the instruction at which we started. */
2390 start = *replay;
2391 do
2392 {
2393 unsigned int steps;
2394
2395 steps = btrace_insn_prev (replay, 1);
2396 if (steps == 0)
2397 {
2398 *replay = start;
2399 return btrace_step_no_history ();
2400 }
2401 }
2402 while (btrace_insn_get (replay) == NULL);
2403
2404 /* Check if we're stepping a breakpoint.
2405
2406 For reverse-stepping, this check is after the step. There is logic in
2407 infrun.c that handles reverse-stepping separately. See, for example,
2408 proceed and adjust_pc_after_break.
2409
2410 This code assumes that for reverse-stepping, PC points to the last
2411 de-executed instruction, whereas for forward-stepping PC points to the
2412 next to-be-executed instruction. */
2413 if (record_btrace_replay_at_breakpoint (tp))
2414 return btrace_step_stopped ();
2415
2416 return btrace_step_spurious ();
2417 }
2418
2419 /* Step a single thread. */
2420
2421 static struct target_waitstatus
2422 record_btrace_step_thread (struct thread_info *tp)
2423 {
2424 struct btrace_thread_info *btinfo;
2425 struct target_waitstatus status;
2426 enum btrace_thread_flag flags;
2427
2428 btinfo = &tp->btrace;
2429
2430 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2431 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2432
2433 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2434 target_pid_to_str (tp->ptid).c_str (), flags,
2435 btrace_thread_flag_to_str (flags));
2436
2437 /* We can't step without an execution history. */
2438 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2439 return btrace_step_no_history ();
2440
2441 switch (flags)
2442 {
2443 default:
2444 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2445
2446 case BTHR_STOP:
2447 return btrace_step_stopped_on_request ();
2448
2449 case BTHR_STEP:
2450 status = record_btrace_single_step_forward (tp);
2451 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2452 break;
2453
2454 return btrace_step_stopped ();
2455
2456 case BTHR_RSTEP:
2457 status = record_btrace_single_step_backward (tp);
2458 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2459 break;
2460
2461 return btrace_step_stopped ();
2462
2463 case BTHR_CONT:
2464 status = record_btrace_single_step_forward (tp);
2465 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2466 break;
2467
2468 btinfo->flags |= flags;
2469 return btrace_step_again ();
2470
2471 case BTHR_RCONT:
2472 status = record_btrace_single_step_backward (tp);
2473 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2474 break;
2475
2476 btinfo->flags |= flags;
2477 return btrace_step_again ();
2478 }
2479
2480 /* We keep threads moving at the end of their execution history. The wait
2481 method will stop the thread for whom the event is reported. */
2482 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2483 btinfo->flags |= flags;
2484
2485 return status;
2486 }
2487
2488 /* Announce further events if necessary. */
2489
2490 static void
2491 record_btrace_maybe_mark_async_event
2492 (const std::vector<thread_info *> &moving,
2493 const std::vector<thread_info *> &no_history)
2494 {
2495 bool more_moving = !moving.empty ();
2496 bool more_no_history = !no_history.empty ();;
2497
2498 if (!more_moving && !more_no_history)
2499 return;
2500
2501 if (more_moving)
2502 DEBUG ("movers pending");
2503
2504 if (more_no_history)
2505 DEBUG ("no-history pending");
2506
2507 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2508 }
2509
2510 /* The wait method of target record-btrace. */
2511
2512 ptid_t
2513 record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2514 int options)
2515 {
2516 std::vector<thread_info *> moving;
2517 std::vector<thread_info *> no_history;
2518
2519 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid).c_str (), options);
2520
2521 /* As long as we're not replaying, just forward the request. */
2522 if ((::execution_direction != EXEC_REVERSE)
2523 && !record_is_replaying (minus_one_ptid))
2524 {
2525 return this->beneath ()->wait (ptid, status, options);
2526 }
2527
2528 /* Keep a work list of moving threads. */
2529 for (thread_info *tp : all_non_exited_threads (ptid))
2530 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2531 moving.push_back (tp);
2532
2533 if (moving.empty ())
2534 {
2535 *status = btrace_step_no_resumed ();
2536
2537 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid).c_str (),
2538 target_waitstatus_to_string (status).c_str ());
2539
2540 return null_ptid;
2541 }
2542
2543 /* Step moving threads one by one, one step each, until either one thread
2544 reports an event or we run out of threads to step.
2545
2546 When stepping more than one thread, chances are that some threads reach
2547 the end of their execution history earlier than others. If we reported
2548 this immediately, all-stop on top of non-stop would stop all threads and
2549 resume the same threads next time. And we would report the same thread
2550 having reached the end of its execution history again.
2551
2552 In the worst case, this would starve the other threads. But even if other
2553 threads would be allowed to make progress, this would result in far too
2554 many intermediate stops.
2555
2556 We therefore delay the reporting of "no execution history" until we have
2557 nothing else to report. By this time, all threads should have moved to
2558 either the beginning or the end of their execution history. There will
2559 be a single user-visible stop. */
2560 struct thread_info *eventing = NULL;
2561 while ((eventing == NULL) && !moving.empty ())
2562 {
2563 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
2564 {
2565 thread_info *tp = moving[ix];
2566
2567 *status = record_btrace_step_thread (tp);
2568
2569 switch (status->kind)
2570 {
2571 case TARGET_WAITKIND_IGNORE:
2572 ix++;
2573 break;
2574
2575 case TARGET_WAITKIND_NO_HISTORY:
2576 no_history.push_back (ordered_remove (moving, ix));
2577 break;
2578
2579 default:
2580 eventing = unordered_remove (moving, ix);
2581 break;
2582 }
2583 }
2584 }
2585
2586 if (eventing == NULL)
2587 {
2588 /* We started with at least one moving thread. This thread must have
2589 either stopped or reached the end of its execution history.
2590
2591 In the former case, EVENTING must not be NULL.
2592 In the latter case, NO_HISTORY must not be empty. */
2593 gdb_assert (!no_history.empty ());
2594
2595 /* We kept threads moving at the end of their execution history. Stop
2596 EVENTING now that we are going to report its stop. */
2597 eventing = unordered_remove (no_history, 0);
2598 eventing->btrace.flags &= ~BTHR_MOVE;
2599
2600 *status = btrace_step_no_history ();
2601 }
2602
2603 gdb_assert (eventing != NULL);
2604
2605 /* We kept threads replaying at the end of their execution history. Stop
2606 replaying EVENTING now that we are going to report its stop. */
2607 record_btrace_stop_replaying_at_end (eventing);
2608
2609 /* Stop all other threads. */
2610 if (!target_is_non_stop_p ())
2611 {
2612 for (thread_info *tp : all_non_exited_threads ())
2613 record_btrace_cancel_resume (tp);
2614 }
2615
2616 /* In async mode, we need to announce further events. */
2617 if (target_is_async_p ())
2618 record_btrace_maybe_mark_async_event (moving, no_history);
2619
2620 /* Start record histories anew from the current position. */
2621 record_btrace_clear_histories (&eventing->btrace);
2622
2623 /* We moved the replay position but did not update registers. */
2624 registers_changed_thread (eventing);
2625
2626 DEBUG ("wait ended by thread %s (%s): %s",
2627 print_thread_id (eventing),
2628 target_pid_to_str (eventing->ptid).c_str (),
2629 target_waitstatus_to_string (status).c_str ());
2630
2631 return eventing->ptid;
2632 }
2633
2634 /* The stop method of target record-btrace. */
2635
2636 void
2637 record_btrace_target::stop (ptid_t ptid)
2638 {
2639 DEBUG ("stop %s", target_pid_to_str (ptid).c_str ());
2640
2641 /* As long as we're not replaying, just forward the request. */
2642 if ((::execution_direction != EXEC_REVERSE)
2643 && !record_is_replaying (minus_one_ptid))
2644 {
2645 this->beneath ()->stop (ptid);
2646 }
2647 else
2648 {
2649 for (thread_info *tp : all_non_exited_threads (ptid))
2650 {
2651 tp->btrace.flags &= ~BTHR_MOVE;
2652 tp->btrace.flags |= BTHR_STOP;
2653 }
2654 }
2655 }
2656
2657 /* The can_execute_reverse method of target record-btrace. */
2658
2659 bool
2660 record_btrace_target::can_execute_reverse ()
2661 {
2662 return true;
2663 }
2664
2665 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2666
2667 bool
2668 record_btrace_target::stopped_by_sw_breakpoint ()
2669 {
2670 if (record_is_replaying (minus_one_ptid))
2671 {
2672 struct thread_info *tp = inferior_thread ();
2673
2674 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2675 }
2676
2677 return this->beneath ()->stopped_by_sw_breakpoint ();
2678 }
2679
2680 /* The supports_stopped_by_sw_breakpoint method of target
2681 record-btrace. */
2682
2683 bool
2684 record_btrace_target::supports_stopped_by_sw_breakpoint ()
2685 {
2686 if (record_is_replaying (minus_one_ptid))
2687 return true;
2688
2689 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
2690 }
2691
2692 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2693
2694 bool
2695 record_btrace_target::stopped_by_hw_breakpoint ()
2696 {
2697 if (record_is_replaying (minus_one_ptid))
2698 {
2699 struct thread_info *tp = inferior_thread ();
2700
2701 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2702 }
2703
2704 return this->beneath ()->stopped_by_hw_breakpoint ();
2705 }
2706
2707 /* The supports_stopped_by_hw_breakpoint method of target
2708 record-btrace. */
2709
2710 bool
2711 record_btrace_target::supports_stopped_by_hw_breakpoint ()
2712 {
2713 if (record_is_replaying (minus_one_ptid))
2714 return true;
2715
2716 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
2717 }
2718
2719 /* The update_thread_list method of target record-btrace. */
2720
2721 void
2722 record_btrace_target::update_thread_list ()
2723 {
2724 /* We don't add or remove threads during replay. */
2725 if (record_is_replaying (minus_one_ptid))
2726 return;
2727
2728 /* Forward the request. */
2729 this->beneath ()->update_thread_list ();
2730 }
2731
2732 /* The thread_alive method of target record-btrace. */
2733
2734 bool
2735 record_btrace_target::thread_alive (ptid_t ptid)
2736 {
2737 /* We don't add or remove threads during replay. */
2738 if (record_is_replaying (minus_one_ptid))
2739 return true;
2740
2741 /* Forward the request. */
2742 return this->beneath ()->thread_alive (ptid);
2743 }
2744
2745 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2746 is stopped. */
2747
2748 static void
2749 record_btrace_set_replay (struct thread_info *tp,
2750 const struct btrace_insn_iterator *it)
2751 {
2752 struct btrace_thread_info *btinfo;
2753
2754 btinfo = &tp->btrace;
2755
2756 if (it == NULL)
2757 record_btrace_stop_replaying (tp);
2758 else
2759 {
2760 if (btinfo->replay == NULL)
2761 record_btrace_start_replaying (tp);
2762 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2763 return;
2764
2765 *btinfo->replay = *it;
2766 registers_changed_thread (tp);
2767 }
2768
2769 /* Start anew from the new replay position. */
2770 record_btrace_clear_histories (btinfo);
2771
2772 inferior_thread ()->suspend.stop_pc
2773 = regcache_read_pc (get_current_regcache ());
2774 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2775 }
2776
2777 /* The goto_record_begin method of target record-btrace. */
2778
2779 void
2780 record_btrace_target::goto_record_begin ()
2781 {
2782 struct thread_info *tp;
2783 struct btrace_insn_iterator begin;
2784
2785 tp = require_btrace_thread ();
2786
2787 btrace_insn_begin (&begin, &tp->btrace);
2788
2789 /* Skip gaps at the beginning of the trace. */
2790 while (btrace_insn_get (&begin) == NULL)
2791 {
2792 unsigned int steps;
2793
2794 steps = btrace_insn_next (&begin, 1);
2795 if (steps == 0)
2796 error (_("No trace."));
2797 }
2798
2799 record_btrace_set_replay (tp, &begin);
2800 }
2801
2802 /* The goto_record_end method of target record-btrace. */
2803
2804 void
2805 record_btrace_target::goto_record_end ()
2806 {
2807 struct thread_info *tp;
2808
2809 tp = require_btrace_thread ();
2810
2811 record_btrace_set_replay (tp, NULL);
2812 }
2813
2814 /* The goto_record method of target record-btrace. */
2815
2816 void
2817 record_btrace_target::goto_record (ULONGEST insn)
2818 {
2819 struct thread_info *tp;
2820 struct btrace_insn_iterator it;
2821 unsigned int number;
2822 int found;
2823
2824 number = insn;
2825
2826 /* Check for wrap-arounds. */
2827 if (number != insn)
2828 error (_("Instruction number out of range."));
2829
2830 tp = require_btrace_thread ();
2831
2832 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2833
2834 /* Check if the instruction could not be found or is a gap. */
2835 if (found == 0 || btrace_insn_get (&it) == NULL)
2836 error (_("No such instruction."));
2837
2838 record_btrace_set_replay (tp, &it);
2839 }
2840
2841 /* The record_stop_replaying method of target record-btrace. */
2842
2843 void
2844 record_btrace_target::record_stop_replaying ()
2845 {
2846 for (thread_info *tp : all_non_exited_threads ())
2847 record_btrace_stop_replaying (tp);
2848 }
2849
2850 /* The execution_direction target method. */
2851
2852 enum exec_direction_kind
2853 record_btrace_target::execution_direction ()
2854 {
2855 return record_btrace_resume_exec_dir;
2856 }
2857
2858 /* The prepare_to_generate_core target method. */
2859
2860 void
2861 record_btrace_target::prepare_to_generate_core ()
2862 {
2863 record_btrace_generating_corefile = 1;
2864 }
2865
2866 /* The done_generating_core target method. */
2867
2868 void
2869 record_btrace_target::done_generating_core ()
2870 {
2871 record_btrace_generating_corefile = 0;
2872 }
2873
2874 /* Start recording in BTS format. */
2875
2876 static void
2877 cmd_record_btrace_bts_start (const char *args, int from_tty)
2878 {
2879 if (args != NULL && *args != 0)
2880 error (_("Invalid argument."));
2881
2882 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2883
2884 try
2885 {
2886 execute_command ("target record-btrace", from_tty);
2887 }
2888 catch (const gdb_exception &exception)
2889 {
2890 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2891 throw;
2892 }
2893 }
2894
2895 /* Start recording in Intel Processor Trace format. */
2896
2897 static void
2898 cmd_record_btrace_pt_start (const char *args, int from_tty)
2899 {
2900 if (args != NULL && *args != 0)
2901 error (_("Invalid argument."));
2902
2903 record_btrace_conf.format = BTRACE_FORMAT_PT;
2904
2905 try
2906 {
2907 execute_command ("target record-btrace", from_tty);
2908 }
2909 catch (const gdb_exception &exception)
2910 {
2911 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2912 throw;
2913 }
2914 }
2915
2916 /* Alias for "target record". */
2917
2918 static void
2919 cmd_record_btrace_start (const char *args, int from_tty)
2920 {
2921 if (args != NULL && *args != 0)
2922 error (_("Invalid argument."));
2923
2924 record_btrace_conf.format = BTRACE_FORMAT_PT;
2925
2926 try
2927 {
2928 execute_command ("target record-btrace", from_tty);
2929 }
2930 catch (const gdb_exception &exception)
2931 {
2932 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2933
2934 try
2935 {
2936 execute_command ("target record-btrace", from_tty);
2937 }
2938 catch (const gdb_exception &ex)
2939 {
2940 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2941 throw;
2942 }
2943 }
2944 }
2945
2946 /* The "set record btrace" command. */
2947
2948 static void
2949 cmd_set_record_btrace (const char *args, int from_tty)
2950 {
2951 printf_unfiltered (_("\"set record btrace\" must be followed "
2952 "by an appropriate subcommand.\n"));
2953 help_list (set_record_btrace_cmdlist, "set record btrace ",
2954 all_commands, gdb_stdout);
2955 }
2956
2957 /* The "show record btrace" command. */
2958
2959 static void
2960 cmd_show_record_btrace (const char *args, int from_tty)
2961 {
2962 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2963 }
2964
2965 /* The "show record btrace replay-memory-access" command. */
2966
2967 static void
2968 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2969 struct cmd_list_element *c, const char *value)
2970 {
2971 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2972 replay_memory_access);
2973 }
2974
2975 /* The "set record btrace cpu none" command. */
2976
2977 static void
2978 cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2979 {
2980 if (args != nullptr && *args != 0)
2981 error (_("Trailing junk: '%s'."), args);
2982
2983 record_btrace_cpu_state = CS_NONE;
2984 }
2985
2986 /* The "set record btrace cpu auto" command. */
2987
2988 static void
2989 cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
2990 {
2991 if (args != nullptr && *args != 0)
2992 error (_("Trailing junk: '%s'."), args);
2993
2994 record_btrace_cpu_state = CS_AUTO;
2995 }
2996
2997 /* The "set record btrace cpu" command. */
2998
2999 static void
3000 cmd_set_record_btrace_cpu (const char *args, int from_tty)
3001 {
3002 if (args == nullptr)
3003 args = "";
3004
3005 /* We use a hard-coded vendor string for now. */
3006 unsigned int family, model, stepping;
3007 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3008 &model, &l1, &stepping, &l2);
3009 if (matches == 3)
3010 {
3011 if (strlen (args) != l2)
3012 error (_("Trailing junk: '%s'."), args + l2);
3013 }
3014 else if (matches == 2)
3015 {
3016 if (strlen (args) != l1)
3017 error (_("Trailing junk: '%s'."), args + l1);
3018
3019 stepping = 0;
3020 }
3021 else
3022 error (_("Bad format. See \"help set record btrace cpu\"."));
3023
3024 if (USHRT_MAX < family)
3025 error (_("Cpu family too big."));
3026
3027 if (UCHAR_MAX < model)
3028 error (_("Cpu model too big."));
3029
3030 if (UCHAR_MAX < stepping)
3031 error (_("Cpu stepping too big."));
3032
3033 record_btrace_cpu.vendor = CV_INTEL;
3034 record_btrace_cpu.family = family;
3035 record_btrace_cpu.model = model;
3036 record_btrace_cpu.stepping = stepping;
3037
3038 record_btrace_cpu_state = CS_CPU;
3039 }
3040
3041 /* The "show record btrace cpu" command. */
3042
3043 static void
3044 cmd_show_record_btrace_cpu (const char *args, int from_tty)
3045 {
3046 if (args != nullptr && *args != 0)
3047 error (_("Trailing junk: '%s'."), args);
3048
3049 switch (record_btrace_cpu_state)
3050 {
3051 case CS_AUTO:
3052 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3053 return;
3054
3055 case CS_NONE:
3056 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3057 return;
3058
3059 case CS_CPU:
3060 switch (record_btrace_cpu.vendor)
3061 {
3062 case CV_INTEL:
3063 if (record_btrace_cpu.stepping == 0)
3064 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3065 record_btrace_cpu.family,
3066 record_btrace_cpu.model);
3067 else
3068 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3069 record_btrace_cpu.family,
3070 record_btrace_cpu.model,
3071 record_btrace_cpu.stepping);
3072 return;
3073 }
3074 }
3075
3076 error (_("Internal error: bad cpu state."));
3077 }
3078
3079 /* The "s record btrace bts" command. */
3080
3081 static void
3082 cmd_set_record_btrace_bts (const char *args, int from_tty)
3083 {
3084 printf_unfiltered (_("\"set record btrace bts\" must be followed "
3085 "by an appropriate subcommand.\n"));
3086 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3087 all_commands, gdb_stdout);
3088 }
3089
3090 /* The "show record btrace bts" command. */
3091
3092 static void
3093 cmd_show_record_btrace_bts (const char *args, int from_tty)
3094 {
3095 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3096 }
3097
3098 /* The "set record btrace pt" command. */
3099
3100 static void
3101 cmd_set_record_btrace_pt (const char *args, int from_tty)
3102 {
3103 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3104 "by an appropriate subcommand.\n"));
3105 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3106 all_commands, gdb_stdout);
3107 }
3108
3109 /* The "show record btrace pt" command. */
3110
3111 static void
3112 cmd_show_record_btrace_pt (const char *args, int from_tty)
3113 {
3114 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3115 }
3116
3117 /* The "record bts buffer-size" show value function. */
3118
3119 static void
3120 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3121 struct cmd_list_element *c,
3122 const char *value)
3123 {
3124 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3125 value);
3126 }
3127
3128 /* The "record pt buffer-size" show value function. */
3129
3130 static void
3131 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3132 struct cmd_list_element *c,
3133 const char *value)
3134 {
3135 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3136 value);
3137 }
3138
3139 /* Initialize btrace commands. */
3140
3141 void
3142 _initialize_record_btrace (void)
3143 {
3144 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3145 _("Start branch trace recording."), &record_btrace_cmdlist,
3146 "record btrace ", 0, &record_cmdlist);
3147 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3148
3149 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3150 _("\
3151 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3152 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3153 This format may not be available on all processors."),
3154 &record_btrace_cmdlist);
3155 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3156
3157 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3158 _("\
3159 Start branch trace recording in Intel Processor Trace format.\n\n\
3160 This format may not be available on all processors."),
3161 &record_btrace_cmdlist);
3162 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3163
3164 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3165 _("Set record options."), &set_record_btrace_cmdlist,
3166 "set record btrace ", 0, &set_record_cmdlist);
3167
3168 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3169 _("Show record options."), &show_record_btrace_cmdlist,
3170 "show record btrace ", 0, &show_record_cmdlist);
3171
3172 add_setshow_enum_cmd ("replay-memory-access", no_class,
3173 replay_memory_access_types, &replay_memory_access, _("\
3174 Set what memory accesses are allowed during replay."), _("\
3175 Show what memory accesses are allowed during replay."),
3176 _("Default is READ-ONLY.\n\n\
3177 The btrace record target does not trace data.\n\
3178 The memory therefore corresponds to the live target and not \
3179 to the current replay position.\n\n\
3180 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3181 When READ-WRITE, allow accesses to read-only and read-write memory during \
3182 replay."),
3183 NULL, cmd_show_replay_memory_access,
3184 &set_record_btrace_cmdlist,
3185 &show_record_btrace_cmdlist);
3186
3187 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3188 _("\
3189 Set the cpu to be used for trace decode.\n\n\
3190 The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3191 For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
3192 When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3193 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3194 When GDB does not support that cpu, this option can be used to enable\n\
3195 workarounds for a similar cpu that GDB supports.\n\n\
3196 When set to \"none\", errata workarounds are disabled."),
3197 &set_record_btrace_cpu_cmdlist,
3198 "set record btrace cpu ", 1,
3199 &set_record_btrace_cmdlist);
3200
3201 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3202 Automatically determine the cpu to be used for trace decode."),
3203 &set_record_btrace_cpu_cmdlist);
3204
3205 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3206 Do not enable errata workarounds for trace decode."),
3207 &set_record_btrace_cpu_cmdlist);
3208
3209 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3210 Show the cpu to be used for trace decode."),
3211 &show_record_btrace_cmdlist);
3212
3213 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3214 _("Set record btrace bts options."),
3215 &set_record_btrace_bts_cmdlist,
3216 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3217
3218 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3219 _("Show record btrace bts options."),
3220 &show_record_btrace_bts_cmdlist,
3221 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3222
3223 add_setshow_uinteger_cmd ("buffer-size", no_class,
3224 &record_btrace_conf.bts.size,
3225 _("Set the record/replay bts buffer size."),
3226 _("Show the record/replay bts buffer size."), _("\
3227 When starting recording request a trace buffer of this size. \
3228 The actual buffer size may differ from the requested size. \
3229 Use \"info record\" to see the actual buffer size.\n\n\
3230 Bigger buffers allow longer recording but also take more time to process \
3231 the recorded execution trace.\n\n\
3232 The trace buffer size may not be changed while recording."), NULL,
3233 show_record_bts_buffer_size_value,
3234 &set_record_btrace_bts_cmdlist,
3235 &show_record_btrace_bts_cmdlist);
3236
3237 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3238 _("Set record btrace pt options."),
3239 &set_record_btrace_pt_cmdlist,
3240 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3241
3242 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3243 _("Show record btrace pt options."),
3244 &show_record_btrace_pt_cmdlist,
3245 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3246
3247 add_setshow_uinteger_cmd ("buffer-size", no_class,
3248 &record_btrace_conf.pt.size,
3249 _("Set the record/replay pt buffer size."),
3250 _("Show the record/replay pt buffer size."), _("\
3251 Bigger buffers allow longer recording but also take more time to process \
3252 the recorded execution.\n\
3253 The actual buffer size may differ from the requested size. Use \"info record\" \
3254 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3255 &set_record_btrace_pt_cmdlist,
3256 &show_record_btrace_pt_cmdlist);
3257
3258 add_target (record_btrace_target_info, record_btrace_target_open);
3259
3260 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3261 xcalloc, xfree);
3262
3263 record_btrace_conf.bts.size = 64 * 1024;
3264 record_btrace_conf.pt.size = 16 * 1024;
3265 }
This page took 0.109833 seconds and 3 git commands to generate.