Simplify exception handling
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2019 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "event-loop.h"
40 #include "inf-loop.h"
41 #include "common/vec.h"
42 #include "inferior.h"
43 #include <algorithm>
44
45 static const target_info record_btrace_target_info = {
46 "record-btrace",
47 N_("Branch tracing target"),
48 N_("Collect control-flow trace and provide the execution history.")
49 };
50
51 /* The target_ops of record-btrace. */
52
53 class record_btrace_target final : public target_ops
54 {
55 public:
56 const target_info &info () const override
57 { return record_btrace_target_info; }
58
59 strata stratum () const override { return record_stratum; }
60
61 void close () override;
62 void async (int) override;
63
64 void detach (inferior *inf, int from_tty) override
65 { record_detach (this, inf, from_tty); }
66
67 void disconnect (const char *, int) override;
68
69 void mourn_inferior () override
70 { record_mourn_inferior (this); }
71
72 void kill () override
73 { record_kill (this); }
74
75 enum record_method record_method (ptid_t ptid) override;
76
77 void stop_recording () override;
78 void info_record () override;
79
80 void insn_history (int size, gdb_disassembly_flags flags) override;
81 void insn_history_from (ULONGEST from, int size,
82 gdb_disassembly_flags flags) override;
83 void insn_history_range (ULONGEST begin, ULONGEST end,
84 gdb_disassembly_flags flags) override;
85 void call_history (int size, record_print_flags flags) override;
86 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
87 override;
88 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
89 override;
90
91 bool record_is_replaying (ptid_t ptid) override;
92 bool record_will_replay (ptid_t ptid, int dir) override;
93 void record_stop_replaying () override;
94
95 enum target_xfer_status xfer_partial (enum target_object object,
96 const char *annex,
97 gdb_byte *readbuf,
98 const gdb_byte *writebuf,
99 ULONGEST offset, ULONGEST len,
100 ULONGEST *xfered_len) override;
101
102 int insert_breakpoint (struct gdbarch *,
103 struct bp_target_info *) override;
104 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
105 enum remove_bp_reason) override;
106
107 void fetch_registers (struct regcache *, int) override;
108
109 void store_registers (struct regcache *, int) override;
110 void prepare_to_store (struct regcache *) override;
111
112 const struct frame_unwind *get_unwinder () override;
113
114 const struct frame_unwind *get_tailcall_unwinder () override;
115
116 void commit_resume () override;
117 void resume (ptid_t, int, enum gdb_signal) override;
118 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
119
120 void stop (ptid_t) override;
121 void update_thread_list () override;
122 bool thread_alive (ptid_t ptid) override;
123 void goto_record_begin () override;
124 void goto_record_end () override;
125 void goto_record (ULONGEST insn) override;
126
127 bool can_execute_reverse () override;
128
129 bool stopped_by_sw_breakpoint () override;
130 bool supports_stopped_by_sw_breakpoint () override;
131
132 bool stopped_by_hw_breakpoint () override;
133 bool supports_stopped_by_hw_breakpoint () override;
134
135 enum exec_direction_kind execution_direction () override;
136 void prepare_to_generate_core () override;
137 void done_generating_core () override;
138 };
139
140 static record_btrace_target record_btrace_ops;
141
142 /* Initialize the record-btrace target ops. */
143
144 /* Token associated with a new-thread observer enabling branch tracing
145 for the new thread. */
146 static const gdb::observers::token record_btrace_thread_observer_token {};
147
148 /* Memory access types used in set/show record btrace replay-memory-access. */
149 static const char replay_memory_access_read_only[] = "read-only";
150 static const char replay_memory_access_read_write[] = "read-write";
151 static const char *const replay_memory_access_types[] =
152 {
153 replay_memory_access_read_only,
154 replay_memory_access_read_write,
155 NULL
156 };
157
158 /* The currently allowed replay memory access type. */
159 static const char *replay_memory_access = replay_memory_access_read_only;
160
161 /* The cpu state kinds. */
162 enum record_btrace_cpu_state_kind
163 {
164 CS_AUTO,
165 CS_NONE,
166 CS_CPU
167 };
168
169 /* The current cpu state. */
170 static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
171
172 /* The current cpu for trace decode. */
173 static struct btrace_cpu record_btrace_cpu;
174
175 /* Command lists for "set/show record btrace". */
176 static struct cmd_list_element *set_record_btrace_cmdlist;
177 static struct cmd_list_element *show_record_btrace_cmdlist;
178
179 /* The execution direction of the last resume we got. See record-full.c. */
180 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
181
182 /* The async event handler for reverse/replay execution. */
183 static struct async_event_handler *record_btrace_async_inferior_event_handler;
184
185 /* A flag indicating that we are currently generating a core file. */
186 static int record_btrace_generating_corefile;
187
188 /* The current branch trace configuration. */
189 static struct btrace_config record_btrace_conf;
190
191 /* Command list for "record btrace". */
192 static struct cmd_list_element *record_btrace_cmdlist;
193
194 /* Command lists for "set/show record btrace bts". */
195 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
196 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
197
198 /* Command lists for "set/show record btrace pt". */
199 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
200 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
201
202 /* Command list for "set record btrace cpu". */
203 static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
204
205 /* Print a record-btrace debug message. Use do ... while (0) to avoid
206 ambiguities when used in if statements. */
207
208 #define DEBUG(msg, args...) \
209 do \
210 { \
211 if (record_debug != 0) \
212 fprintf_unfiltered (gdb_stdlog, \
213 "[record-btrace] " msg "\n", ##args); \
214 } \
215 while (0)
216
217
218 /* Return the cpu configured by the user. Returns NULL if the cpu was
219 configured as auto. */
220 const struct btrace_cpu *
221 record_btrace_get_cpu (void)
222 {
223 switch (record_btrace_cpu_state)
224 {
225 case CS_AUTO:
226 return nullptr;
227
228 case CS_NONE:
229 record_btrace_cpu.vendor = CV_UNKNOWN;
230 /* Fall through. */
231 case CS_CPU:
232 return &record_btrace_cpu;
233 }
234
235 error (_("Internal error: bad record btrace cpu state."));
236 }
237
238 /* Update the branch trace for the current thread and return a pointer to its
239 thread_info.
240
241 Throws an error if there is no thread or no trace. This function never
242 returns NULL. */
243
244 static struct thread_info *
245 require_btrace_thread (void)
246 {
247 DEBUG ("require");
248
249 if (inferior_ptid == null_ptid)
250 error (_("No thread."));
251
252 thread_info *tp = inferior_thread ();
253
254 validate_registers_access ();
255
256 btrace_fetch (tp, record_btrace_get_cpu ());
257
258 if (btrace_is_empty (tp))
259 error (_("No trace."));
260
261 return tp;
262 }
263
264 /* Update the branch trace for the current thread and return a pointer to its
265 branch trace information struct.
266
267 Throws an error if there is no thread or no trace. This function never
268 returns NULL. */
269
270 static struct btrace_thread_info *
271 require_btrace (void)
272 {
273 struct thread_info *tp;
274
275 tp = require_btrace_thread ();
276
277 return &tp->btrace;
278 }
279
280 /* Enable branch tracing for one thread. Warn on errors. */
281
282 static void
283 record_btrace_enable_warn (struct thread_info *tp)
284 {
285 TRY
286 {
287 btrace_enable (tp, &record_btrace_conf);
288 }
289 CATCH (error, RETURN_MASK_ERROR)
290 {
291 warning ("%s", error.message);
292 }
293 END_CATCH
294 }
295
296 /* Enable automatic tracing of new threads. */
297
298 static void
299 record_btrace_auto_enable (void)
300 {
301 DEBUG ("attach thread observer");
302
303 gdb::observers::new_thread.attach (record_btrace_enable_warn,
304 record_btrace_thread_observer_token);
305 }
306
307 /* Disable automatic tracing of new threads. */
308
309 static void
310 record_btrace_auto_disable (void)
311 {
312 DEBUG ("detach thread observer");
313
314 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
315 }
316
317 /* The record-btrace async event handler function. */
318
319 static void
320 record_btrace_handle_async_inferior_event (gdb_client_data data)
321 {
322 inferior_event_handler (INF_REG_EVENT, NULL);
323 }
324
325 /* See record-btrace.h. */
326
327 void
328 record_btrace_push_target (void)
329 {
330 const char *format;
331
332 record_btrace_auto_enable ();
333
334 push_target (&record_btrace_ops);
335
336 record_btrace_async_inferior_event_handler
337 = create_async_event_handler (record_btrace_handle_async_inferior_event,
338 NULL);
339 record_btrace_generating_corefile = 0;
340
341 format = btrace_format_short_string (record_btrace_conf.format);
342 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
343 }
344
345 /* Disable btrace on a set of threads on scope exit. */
346
347 struct scoped_btrace_disable
348 {
349 scoped_btrace_disable () = default;
350
351 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
352
353 ~scoped_btrace_disable ()
354 {
355 for (thread_info *tp : m_threads)
356 btrace_disable (tp);
357 }
358
359 void add_thread (thread_info *thread)
360 {
361 m_threads.push_front (thread);
362 }
363
364 void discard ()
365 {
366 m_threads.clear ();
367 }
368
369 private:
370 std::forward_list<thread_info *> m_threads;
371 };
372
373 /* Open target record-btrace. */
374
375 static void
376 record_btrace_target_open (const char *args, int from_tty)
377 {
378 /* If we fail to enable btrace for one thread, disable it for the threads for
379 which it was successfully enabled. */
380 scoped_btrace_disable btrace_disable;
381
382 DEBUG ("open");
383
384 record_preopen ();
385
386 if (!target_has_execution)
387 error (_("The program is not being run."));
388
389 for (thread_info *tp : all_non_exited_threads ())
390 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
391 {
392 btrace_enable (tp, &record_btrace_conf);
393
394 btrace_disable.add_thread (tp);
395 }
396
397 record_btrace_push_target ();
398
399 btrace_disable.discard ();
400 }
401
402 /* The stop_recording method of target record-btrace. */
403
404 void
405 record_btrace_target::stop_recording ()
406 {
407 DEBUG ("stop recording");
408
409 record_btrace_auto_disable ();
410
411 for (thread_info *tp : all_non_exited_threads ())
412 if (tp->btrace.target != NULL)
413 btrace_disable (tp);
414 }
415
416 /* The disconnect method of target record-btrace. */
417
418 void
419 record_btrace_target::disconnect (const char *args,
420 int from_tty)
421 {
422 struct target_ops *beneath = this->beneath ();
423
424 /* Do not stop recording, just clean up GDB side. */
425 unpush_target (this);
426
427 /* Forward disconnect. */
428 beneath->disconnect (args, from_tty);
429 }
430
431 /* The close method of target record-btrace. */
432
433 void
434 record_btrace_target::close ()
435 {
436 if (record_btrace_async_inferior_event_handler != NULL)
437 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
438
439 /* Make sure automatic recording gets disabled even if we did not stop
440 recording before closing the record-btrace target. */
441 record_btrace_auto_disable ();
442
443 /* We should have already stopped recording.
444 Tear down btrace in case we have not. */
445 for (thread_info *tp : all_non_exited_threads ())
446 btrace_teardown (tp);
447 }
448
449 /* The async method of target record-btrace. */
450
451 void
452 record_btrace_target::async (int enable)
453 {
454 if (enable)
455 mark_async_event_handler (record_btrace_async_inferior_event_handler);
456 else
457 clear_async_event_handler (record_btrace_async_inferior_event_handler);
458
459 this->beneath ()->async (enable);
460 }
461
462 /* Adjusts the size and returns a human readable size suffix. */
463
464 static const char *
465 record_btrace_adjust_size (unsigned int *size)
466 {
467 unsigned int sz;
468
469 sz = *size;
470
471 if ((sz & ((1u << 30) - 1)) == 0)
472 {
473 *size = sz >> 30;
474 return "GB";
475 }
476 else if ((sz & ((1u << 20) - 1)) == 0)
477 {
478 *size = sz >> 20;
479 return "MB";
480 }
481 else if ((sz & ((1u << 10) - 1)) == 0)
482 {
483 *size = sz >> 10;
484 return "kB";
485 }
486 else
487 return "";
488 }
489
490 /* Print a BTS configuration. */
491
492 static void
493 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
494 {
495 const char *suffix;
496 unsigned int size;
497
498 size = conf->size;
499 if (size > 0)
500 {
501 suffix = record_btrace_adjust_size (&size);
502 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
503 }
504 }
505
506 /* Print an Intel Processor Trace configuration. */
507
508 static void
509 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
510 {
511 const char *suffix;
512 unsigned int size;
513
514 size = conf->size;
515 if (size > 0)
516 {
517 suffix = record_btrace_adjust_size (&size);
518 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
519 }
520 }
521
522 /* Print a branch tracing configuration. */
523
524 static void
525 record_btrace_print_conf (const struct btrace_config *conf)
526 {
527 printf_unfiltered (_("Recording format: %s.\n"),
528 btrace_format_string (conf->format));
529
530 switch (conf->format)
531 {
532 case BTRACE_FORMAT_NONE:
533 return;
534
535 case BTRACE_FORMAT_BTS:
536 record_btrace_print_bts_conf (&conf->bts);
537 return;
538
539 case BTRACE_FORMAT_PT:
540 record_btrace_print_pt_conf (&conf->pt);
541 return;
542 }
543
544 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
545 }
546
547 /* The info_record method of target record-btrace. */
548
549 void
550 record_btrace_target::info_record ()
551 {
552 struct btrace_thread_info *btinfo;
553 const struct btrace_config *conf;
554 struct thread_info *tp;
555 unsigned int insns, calls, gaps;
556
557 DEBUG ("info");
558
559 tp = find_thread_ptid (inferior_ptid);
560 if (tp == NULL)
561 error (_("No thread."));
562
563 validate_registers_access ();
564
565 btinfo = &tp->btrace;
566
567 conf = ::btrace_conf (btinfo);
568 if (conf != NULL)
569 record_btrace_print_conf (conf);
570
571 btrace_fetch (tp, record_btrace_get_cpu ());
572
573 insns = 0;
574 calls = 0;
575 gaps = 0;
576
577 if (!btrace_is_empty (tp))
578 {
579 struct btrace_call_iterator call;
580 struct btrace_insn_iterator insn;
581
582 btrace_call_end (&call, btinfo);
583 btrace_call_prev (&call, 1);
584 calls = btrace_call_number (&call);
585
586 btrace_insn_end (&insn, btinfo);
587 insns = btrace_insn_number (&insn);
588
589 /* If the last instruction is not a gap, it is the current instruction
590 that is not actually part of the record. */
591 if (btrace_insn_get (&insn) != NULL)
592 insns -= 1;
593
594 gaps = btinfo->ngaps;
595 }
596
597 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
598 "for thread %s (%s).\n"), insns, calls, gaps,
599 print_thread_id (tp),
600 target_pid_to_str (tp->ptid).c_str ());
601
602 if (btrace_is_replaying (tp))
603 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
604 btrace_insn_number (btinfo->replay));
605 }
606
607 /* Print a decode error. */
608
609 static void
610 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
611 enum btrace_format format)
612 {
613 const char *errstr = btrace_decode_error (format, errcode);
614
615 uiout->text (_("["));
616 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
617 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
618 {
619 uiout->text (_("decode error ("));
620 uiout->field_int ("errcode", errcode);
621 uiout->text (_("): "));
622 }
623 uiout->text (errstr);
624 uiout->text (_("]\n"));
625 }
626
627 /* Print an unsigned int. */
628
629 static void
630 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
631 {
632 uiout->field_fmt (fld, "%u", val);
633 }
634
635 /* A range of source lines. */
636
637 struct btrace_line_range
638 {
639 /* The symtab this line is from. */
640 struct symtab *symtab;
641
642 /* The first line (inclusive). */
643 int begin;
644
645 /* The last line (exclusive). */
646 int end;
647 };
648
649 /* Construct a line range. */
650
651 static struct btrace_line_range
652 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
653 {
654 struct btrace_line_range range;
655
656 range.symtab = symtab;
657 range.begin = begin;
658 range.end = end;
659
660 return range;
661 }
662
663 /* Add a line to a line range. */
664
665 static struct btrace_line_range
666 btrace_line_range_add (struct btrace_line_range range, int line)
667 {
668 if (range.end <= range.begin)
669 {
670 /* This is the first entry. */
671 range.begin = line;
672 range.end = line + 1;
673 }
674 else if (line < range.begin)
675 range.begin = line;
676 else if (range.end < line)
677 range.end = line;
678
679 return range;
680 }
681
682 /* Return non-zero if RANGE is empty, zero otherwise. */
683
684 static int
685 btrace_line_range_is_empty (struct btrace_line_range range)
686 {
687 return range.end <= range.begin;
688 }
689
690 /* Return non-zero if LHS contains RHS, zero otherwise. */
691
692 static int
693 btrace_line_range_contains_range (struct btrace_line_range lhs,
694 struct btrace_line_range rhs)
695 {
696 return ((lhs.symtab == rhs.symtab)
697 && (lhs.begin <= rhs.begin)
698 && (rhs.end <= lhs.end));
699 }
700
701 /* Find the line range associated with PC. */
702
703 static struct btrace_line_range
704 btrace_find_line_range (CORE_ADDR pc)
705 {
706 struct btrace_line_range range;
707 struct linetable_entry *lines;
708 struct linetable *ltable;
709 struct symtab *symtab;
710 int nlines, i;
711
712 symtab = find_pc_line_symtab (pc);
713 if (symtab == NULL)
714 return btrace_mk_line_range (NULL, 0, 0);
715
716 ltable = SYMTAB_LINETABLE (symtab);
717 if (ltable == NULL)
718 return btrace_mk_line_range (symtab, 0, 0);
719
720 nlines = ltable->nitems;
721 lines = ltable->item;
722 if (nlines <= 0)
723 return btrace_mk_line_range (symtab, 0, 0);
724
725 range = btrace_mk_line_range (symtab, 0, 0);
726 for (i = 0; i < nlines - 1; i++)
727 {
728 if ((lines[i].pc == pc) && (lines[i].line != 0))
729 range = btrace_line_range_add (range, lines[i].line);
730 }
731
732 return range;
733 }
734
735 /* Print source lines in LINES to UIOUT.
736
737 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
738 instructions corresponding to that source line. When printing a new source
739 line, we do the cleanups for the open chain and open a new cleanup chain for
740 the new source line. If the source line range in LINES is not empty, this
741 function will leave the cleanup chain for the last printed source line open
742 so instructions can be added to it. */
743
744 static void
745 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
746 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
747 gdb::optional<ui_out_emit_list> *asm_list,
748 gdb_disassembly_flags flags)
749 {
750 print_source_lines_flags psl_flags;
751
752 if (flags & DISASSEMBLY_FILENAME)
753 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
754
755 for (int line = lines.begin; line < lines.end; ++line)
756 {
757 asm_list->reset ();
758
759 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
760
761 print_source_lines (lines.symtab, line, line + 1, psl_flags);
762
763 asm_list->emplace (uiout, "line_asm_insn");
764 }
765 }
766
767 /* Disassemble a section of the recorded instruction trace. */
768
769 static void
770 btrace_insn_history (struct ui_out *uiout,
771 const struct btrace_thread_info *btinfo,
772 const struct btrace_insn_iterator *begin,
773 const struct btrace_insn_iterator *end,
774 gdb_disassembly_flags flags)
775 {
776 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
777 btrace_insn_number (begin), btrace_insn_number (end));
778
779 flags |= DISASSEMBLY_SPECULATIVE;
780
781 struct gdbarch *gdbarch = target_gdbarch ();
782 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
783
784 ui_out_emit_list list_emitter (uiout, "asm_insns");
785
786 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
787 gdb::optional<ui_out_emit_list> asm_list;
788
789 gdb_pretty_print_disassembler disasm (gdbarch);
790
791 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
792 btrace_insn_next (&it, 1))
793 {
794 const struct btrace_insn *insn;
795
796 insn = btrace_insn_get (&it);
797
798 /* A NULL instruction indicates a gap in the trace. */
799 if (insn == NULL)
800 {
801 const struct btrace_config *conf;
802
803 conf = btrace_conf (btinfo);
804
805 /* We have trace so we must have a configuration. */
806 gdb_assert (conf != NULL);
807
808 uiout->field_fmt ("insn-number", "%u",
809 btrace_insn_number (&it));
810 uiout->text ("\t");
811
812 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
813 conf->format);
814 }
815 else
816 {
817 struct disasm_insn dinsn;
818
819 if ((flags & DISASSEMBLY_SOURCE) != 0)
820 {
821 struct btrace_line_range lines;
822
823 lines = btrace_find_line_range (insn->pc);
824 if (!btrace_line_range_is_empty (lines)
825 && !btrace_line_range_contains_range (last_lines, lines))
826 {
827 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
828 flags);
829 last_lines = lines;
830 }
831 else if (!src_and_asm_tuple.has_value ())
832 {
833 gdb_assert (!asm_list.has_value ());
834
835 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
836
837 /* No source information. */
838 asm_list.emplace (uiout, "line_asm_insn");
839 }
840
841 gdb_assert (src_and_asm_tuple.has_value ());
842 gdb_assert (asm_list.has_value ());
843 }
844
845 memset (&dinsn, 0, sizeof (dinsn));
846 dinsn.number = btrace_insn_number (&it);
847 dinsn.addr = insn->pc;
848
849 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
850 dinsn.is_speculative = 1;
851
852 disasm.pretty_print_insn (uiout, &dinsn, flags);
853 }
854 }
855 }
856
857 /* The insn_history method of target record-btrace. */
858
859 void
860 record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
861 {
862 struct btrace_thread_info *btinfo;
863 struct btrace_insn_history *history;
864 struct btrace_insn_iterator begin, end;
865 struct ui_out *uiout;
866 unsigned int context, covered;
867
868 uiout = current_uiout;
869 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
870 context = abs (size);
871 if (context == 0)
872 error (_("Bad record instruction-history-size."));
873
874 btinfo = require_btrace ();
875 history = btinfo->insn_history;
876 if (history == NULL)
877 {
878 struct btrace_insn_iterator *replay;
879
880 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
881
882 /* If we're replaying, we start at the replay position. Otherwise, we
883 start at the tail of the trace. */
884 replay = btinfo->replay;
885 if (replay != NULL)
886 begin = *replay;
887 else
888 btrace_insn_end (&begin, btinfo);
889
890 /* We start from here and expand in the requested direction. Then we
891 expand in the other direction, as well, to fill up any remaining
892 context. */
893 end = begin;
894 if (size < 0)
895 {
896 /* We want the current position covered, as well. */
897 covered = btrace_insn_next (&end, 1);
898 covered += btrace_insn_prev (&begin, context - covered);
899 covered += btrace_insn_next (&end, context - covered);
900 }
901 else
902 {
903 covered = btrace_insn_next (&end, context);
904 covered += btrace_insn_prev (&begin, context - covered);
905 }
906 }
907 else
908 {
909 begin = history->begin;
910 end = history->end;
911
912 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
913 btrace_insn_number (&begin), btrace_insn_number (&end));
914
915 if (size < 0)
916 {
917 end = begin;
918 covered = btrace_insn_prev (&begin, context);
919 }
920 else
921 {
922 begin = end;
923 covered = btrace_insn_next (&end, context);
924 }
925 }
926
927 if (covered > 0)
928 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
929 else
930 {
931 if (size < 0)
932 printf_unfiltered (_("At the start of the branch trace record.\n"));
933 else
934 printf_unfiltered (_("At the end of the branch trace record.\n"));
935 }
936
937 btrace_set_insn_history (btinfo, &begin, &end);
938 }
939
940 /* The insn_history_range method of target record-btrace. */
941
942 void
943 record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
944 gdb_disassembly_flags flags)
945 {
946 struct btrace_thread_info *btinfo;
947 struct btrace_insn_iterator begin, end;
948 struct ui_out *uiout;
949 unsigned int low, high;
950 int found;
951
952 uiout = current_uiout;
953 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
954 low = from;
955 high = to;
956
957 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
958
959 /* Check for wrap-arounds. */
960 if (low != from || high != to)
961 error (_("Bad range."));
962
963 if (high < low)
964 error (_("Bad range."));
965
966 btinfo = require_btrace ();
967
968 found = btrace_find_insn_by_number (&begin, btinfo, low);
969 if (found == 0)
970 error (_("Range out of bounds."));
971
972 found = btrace_find_insn_by_number (&end, btinfo, high);
973 if (found == 0)
974 {
975 /* Silently truncate the range. */
976 btrace_insn_end (&end, btinfo);
977 }
978 else
979 {
980 /* We want both begin and end to be inclusive. */
981 btrace_insn_next (&end, 1);
982 }
983
984 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
985 btrace_set_insn_history (btinfo, &begin, &end);
986 }
987
988 /* The insn_history_from method of target record-btrace. */
989
990 void
991 record_btrace_target::insn_history_from (ULONGEST from, int size,
992 gdb_disassembly_flags flags)
993 {
994 ULONGEST begin, end, context;
995
996 context = abs (size);
997 if (context == 0)
998 error (_("Bad record instruction-history-size."));
999
1000 if (size < 0)
1001 {
1002 end = from;
1003
1004 if (from < context)
1005 begin = 0;
1006 else
1007 begin = from - context + 1;
1008 }
1009 else
1010 {
1011 begin = from;
1012 end = from + context - 1;
1013
1014 /* Check for wrap-around. */
1015 if (end < begin)
1016 end = ULONGEST_MAX;
1017 }
1018
1019 insn_history_range (begin, end, flags);
1020 }
1021
1022 /* Print the instruction number range for a function call history line. */
1023
1024 static void
1025 btrace_call_history_insn_range (struct ui_out *uiout,
1026 const struct btrace_function *bfun)
1027 {
1028 unsigned int begin, end, size;
1029
1030 size = bfun->insn.size ();
1031 gdb_assert (size > 0);
1032
1033 begin = bfun->insn_offset;
1034 end = begin + size - 1;
1035
1036 ui_out_field_uint (uiout, "insn begin", begin);
1037 uiout->text (",");
1038 ui_out_field_uint (uiout, "insn end", end);
1039 }
1040
1041 /* Compute the lowest and highest source line for the instructions in BFUN
1042 and return them in PBEGIN and PEND.
1043 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1044 result from inlining or macro expansion. */
1045
1046 static void
1047 btrace_compute_src_line_range (const struct btrace_function *bfun,
1048 int *pbegin, int *pend)
1049 {
1050 struct symtab *symtab;
1051 struct symbol *sym;
1052 int begin, end;
1053
1054 begin = INT_MAX;
1055 end = INT_MIN;
1056
1057 sym = bfun->sym;
1058 if (sym == NULL)
1059 goto out;
1060
1061 symtab = symbol_symtab (sym);
1062
1063 for (const btrace_insn &insn : bfun->insn)
1064 {
1065 struct symtab_and_line sal;
1066
1067 sal = find_pc_line (insn.pc, 0);
1068 if (sal.symtab != symtab || sal.line == 0)
1069 continue;
1070
1071 begin = std::min (begin, sal.line);
1072 end = std::max (end, sal.line);
1073 }
1074
1075 out:
1076 *pbegin = begin;
1077 *pend = end;
1078 }
1079
1080 /* Print the source line information for a function call history line. */
1081
1082 static void
1083 btrace_call_history_src_line (struct ui_out *uiout,
1084 const struct btrace_function *bfun)
1085 {
1086 struct symbol *sym;
1087 int begin, end;
1088
1089 sym = bfun->sym;
1090 if (sym == NULL)
1091 return;
1092
1093 uiout->field_string ("file",
1094 symtab_to_filename_for_display (symbol_symtab (sym)),
1095 ui_out_style_kind::FILE);
1096
1097 btrace_compute_src_line_range (bfun, &begin, &end);
1098 if (end < begin)
1099 return;
1100
1101 uiout->text (":");
1102 uiout->field_int ("min line", begin);
1103
1104 if (end == begin)
1105 return;
1106
1107 uiout->text (",");
1108 uiout->field_int ("max line", end);
1109 }
1110
1111 /* Get the name of a branch trace function. */
1112
1113 static const char *
1114 btrace_get_bfun_name (const struct btrace_function *bfun)
1115 {
1116 struct minimal_symbol *msym;
1117 struct symbol *sym;
1118
1119 if (bfun == NULL)
1120 return "??";
1121
1122 msym = bfun->msym;
1123 sym = bfun->sym;
1124
1125 if (sym != NULL)
1126 return SYMBOL_PRINT_NAME (sym);
1127 else if (msym != NULL)
1128 return MSYMBOL_PRINT_NAME (msym);
1129 else
1130 return "??";
1131 }
1132
1133 /* Disassemble a section of the recorded function trace. */
1134
1135 static void
1136 btrace_call_history (struct ui_out *uiout,
1137 const struct btrace_thread_info *btinfo,
1138 const struct btrace_call_iterator *begin,
1139 const struct btrace_call_iterator *end,
1140 int int_flags)
1141 {
1142 struct btrace_call_iterator it;
1143 record_print_flags flags = (enum record_print_flag) int_flags;
1144
1145 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1146 btrace_call_number (end));
1147
1148 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1149 {
1150 const struct btrace_function *bfun;
1151 struct minimal_symbol *msym;
1152 struct symbol *sym;
1153
1154 bfun = btrace_call_get (&it);
1155 sym = bfun->sym;
1156 msym = bfun->msym;
1157
1158 /* Print the function index. */
1159 ui_out_field_uint (uiout, "index", bfun->number);
1160 uiout->text ("\t");
1161
1162 /* Indicate gaps in the trace. */
1163 if (bfun->errcode != 0)
1164 {
1165 const struct btrace_config *conf;
1166
1167 conf = btrace_conf (btinfo);
1168
1169 /* We have trace so we must have a configuration. */
1170 gdb_assert (conf != NULL);
1171
1172 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1173
1174 continue;
1175 }
1176
1177 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1178 {
1179 int level = bfun->level + btinfo->level, i;
1180
1181 for (i = 0; i < level; ++i)
1182 uiout->text (" ");
1183 }
1184
1185 if (sym != NULL)
1186 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym),
1187 ui_out_style_kind::FUNCTION);
1188 else if (msym != NULL)
1189 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym),
1190 ui_out_style_kind::FUNCTION);
1191 else if (!uiout->is_mi_like_p ())
1192 uiout->field_string ("function", "??",
1193 ui_out_style_kind::FUNCTION);
1194
1195 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1196 {
1197 uiout->text (_("\tinst "));
1198 btrace_call_history_insn_range (uiout, bfun);
1199 }
1200
1201 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1202 {
1203 uiout->text (_("\tat "));
1204 btrace_call_history_src_line (uiout, bfun);
1205 }
1206
1207 uiout->text ("\n");
1208 }
1209 }
1210
1211 /* The call_history method of target record-btrace. */
1212
1213 void
1214 record_btrace_target::call_history (int size, record_print_flags flags)
1215 {
1216 struct btrace_thread_info *btinfo;
1217 struct btrace_call_history *history;
1218 struct btrace_call_iterator begin, end;
1219 struct ui_out *uiout;
1220 unsigned int context, covered;
1221
1222 uiout = current_uiout;
1223 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1224 context = abs (size);
1225 if (context == 0)
1226 error (_("Bad record function-call-history-size."));
1227
1228 btinfo = require_btrace ();
1229 history = btinfo->call_history;
1230 if (history == NULL)
1231 {
1232 struct btrace_insn_iterator *replay;
1233
1234 DEBUG ("call-history (0x%x): %d", (int) flags, size);
1235
1236 /* If we're replaying, we start at the replay position. Otherwise, we
1237 start at the tail of the trace. */
1238 replay = btinfo->replay;
1239 if (replay != NULL)
1240 {
1241 begin.btinfo = btinfo;
1242 begin.index = replay->call_index;
1243 }
1244 else
1245 btrace_call_end (&begin, btinfo);
1246
1247 /* We start from here and expand in the requested direction. Then we
1248 expand in the other direction, as well, to fill up any remaining
1249 context. */
1250 end = begin;
1251 if (size < 0)
1252 {
1253 /* We want the current position covered, as well. */
1254 covered = btrace_call_next (&end, 1);
1255 covered += btrace_call_prev (&begin, context - covered);
1256 covered += btrace_call_next (&end, context - covered);
1257 }
1258 else
1259 {
1260 covered = btrace_call_next (&end, context);
1261 covered += btrace_call_prev (&begin, context- covered);
1262 }
1263 }
1264 else
1265 {
1266 begin = history->begin;
1267 end = history->end;
1268
1269 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1270 btrace_call_number (&begin), btrace_call_number (&end));
1271
1272 if (size < 0)
1273 {
1274 end = begin;
1275 covered = btrace_call_prev (&begin, context);
1276 }
1277 else
1278 {
1279 begin = end;
1280 covered = btrace_call_next (&end, context);
1281 }
1282 }
1283
1284 if (covered > 0)
1285 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1286 else
1287 {
1288 if (size < 0)
1289 printf_unfiltered (_("At the start of the branch trace record.\n"));
1290 else
1291 printf_unfiltered (_("At the end of the branch trace record.\n"));
1292 }
1293
1294 btrace_set_call_history (btinfo, &begin, &end);
1295 }
1296
1297 /* The call_history_range method of target record-btrace. */
1298
1299 void
1300 record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1301 record_print_flags flags)
1302 {
1303 struct btrace_thread_info *btinfo;
1304 struct btrace_call_iterator begin, end;
1305 struct ui_out *uiout;
1306 unsigned int low, high;
1307 int found;
1308
1309 uiout = current_uiout;
1310 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1311 low = from;
1312 high = to;
1313
1314 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1315
1316 /* Check for wrap-arounds. */
1317 if (low != from || high != to)
1318 error (_("Bad range."));
1319
1320 if (high < low)
1321 error (_("Bad range."));
1322
1323 btinfo = require_btrace ();
1324
1325 found = btrace_find_call_by_number (&begin, btinfo, low);
1326 if (found == 0)
1327 error (_("Range out of bounds."));
1328
1329 found = btrace_find_call_by_number (&end, btinfo, high);
1330 if (found == 0)
1331 {
1332 /* Silently truncate the range. */
1333 btrace_call_end (&end, btinfo);
1334 }
1335 else
1336 {
1337 /* We want both begin and end to be inclusive. */
1338 btrace_call_next (&end, 1);
1339 }
1340
1341 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1342 btrace_set_call_history (btinfo, &begin, &end);
1343 }
1344
1345 /* The call_history_from method of target record-btrace. */
1346
1347 void
1348 record_btrace_target::call_history_from (ULONGEST from, int size,
1349 record_print_flags flags)
1350 {
1351 ULONGEST begin, end, context;
1352
1353 context = abs (size);
1354 if (context == 0)
1355 error (_("Bad record function-call-history-size."));
1356
1357 if (size < 0)
1358 {
1359 end = from;
1360
1361 if (from < context)
1362 begin = 0;
1363 else
1364 begin = from - context + 1;
1365 }
1366 else
1367 {
1368 begin = from;
1369 end = from + context - 1;
1370
1371 /* Check for wrap-around. */
1372 if (end < begin)
1373 end = ULONGEST_MAX;
1374 }
1375
1376 call_history_range ( begin, end, flags);
1377 }
1378
1379 /* The record_method method of target record-btrace. */
1380
1381 enum record_method
1382 record_btrace_target::record_method (ptid_t ptid)
1383 {
1384 struct thread_info * const tp = find_thread_ptid (ptid);
1385
1386 if (tp == NULL)
1387 error (_("No thread."));
1388
1389 if (tp->btrace.target == NULL)
1390 return RECORD_METHOD_NONE;
1391
1392 return RECORD_METHOD_BTRACE;
1393 }
1394
1395 /* The record_is_replaying method of target record-btrace. */
1396
1397 bool
1398 record_btrace_target::record_is_replaying (ptid_t ptid)
1399 {
1400 for (thread_info *tp : all_non_exited_threads (ptid))
1401 if (btrace_is_replaying (tp))
1402 return true;
1403
1404 return false;
1405 }
1406
1407 /* The record_will_replay method of target record-btrace. */
1408
1409 bool
1410 record_btrace_target::record_will_replay (ptid_t ptid, int dir)
1411 {
1412 return dir == EXEC_REVERSE || record_is_replaying (ptid);
1413 }
1414
1415 /* The xfer_partial method of target record-btrace. */
1416
1417 enum target_xfer_status
1418 record_btrace_target::xfer_partial (enum target_object object,
1419 const char *annex, gdb_byte *readbuf,
1420 const gdb_byte *writebuf, ULONGEST offset,
1421 ULONGEST len, ULONGEST *xfered_len)
1422 {
1423 /* Filter out requests that don't make sense during replay. */
1424 if (replay_memory_access == replay_memory_access_read_only
1425 && !record_btrace_generating_corefile
1426 && record_is_replaying (inferior_ptid))
1427 {
1428 switch (object)
1429 {
1430 case TARGET_OBJECT_MEMORY:
1431 {
1432 struct target_section *section;
1433
1434 /* We do not allow writing memory in general. */
1435 if (writebuf != NULL)
1436 {
1437 *xfered_len = len;
1438 return TARGET_XFER_UNAVAILABLE;
1439 }
1440
1441 /* We allow reading readonly memory. */
1442 section = target_section_by_addr (this, offset);
1443 if (section != NULL)
1444 {
1445 /* Check if the section we found is readonly. */
1446 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1447 section->the_bfd_section)
1448 & SEC_READONLY) != 0)
1449 {
1450 /* Truncate the request to fit into this section. */
1451 len = std::min (len, section->endaddr - offset);
1452 break;
1453 }
1454 }
1455
1456 *xfered_len = len;
1457 return TARGET_XFER_UNAVAILABLE;
1458 }
1459 }
1460 }
1461
1462 /* Forward the request. */
1463 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1464 offset, len, xfered_len);
1465 }
1466
1467 /* The insert_breakpoint method of target record-btrace. */
1468
1469 int
1470 record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1471 struct bp_target_info *bp_tgt)
1472 {
1473 const char *old;
1474 int ret;
1475
1476 /* Inserting breakpoints requires accessing memory. Allow it for the
1477 duration of this function. */
1478 old = replay_memory_access;
1479 replay_memory_access = replay_memory_access_read_write;
1480
1481 ret = 0;
1482 TRY
1483 {
1484 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
1485 }
1486 CATCH (except, RETURN_MASK_ALL)
1487 {
1488 replay_memory_access = old;
1489 throw_exception (except);
1490 }
1491 END_CATCH
1492 replay_memory_access = old;
1493
1494 return ret;
1495 }
1496
1497 /* The remove_breakpoint method of target record-btrace. */
1498
1499 int
1500 record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1501 struct bp_target_info *bp_tgt,
1502 enum remove_bp_reason reason)
1503 {
1504 const char *old;
1505 int ret;
1506
1507 /* Removing breakpoints requires accessing memory. Allow it for the
1508 duration of this function. */
1509 old = replay_memory_access;
1510 replay_memory_access = replay_memory_access_read_write;
1511
1512 ret = 0;
1513 TRY
1514 {
1515 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
1516 }
1517 CATCH (except, RETURN_MASK_ALL)
1518 {
1519 replay_memory_access = old;
1520 throw_exception (except);
1521 }
1522 END_CATCH
1523 replay_memory_access = old;
1524
1525 return ret;
1526 }
1527
1528 /* The fetch_registers method of target record-btrace. */
1529
1530 void
1531 record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1532 {
1533 struct btrace_insn_iterator *replay;
1534 struct thread_info *tp;
1535
1536 tp = find_thread_ptid (regcache->ptid ());
1537 gdb_assert (tp != NULL);
1538
1539 replay = tp->btrace.replay;
1540 if (replay != NULL && !record_btrace_generating_corefile)
1541 {
1542 const struct btrace_insn *insn;
1543 struct gdbarch *gdbarch;
1544 int pcreg;
1545
1546 gdbarch = regcache->arch ();
1547 pcreg = gdbarch_pc_regnum (gdbarch);
1548 if (pcreg < 0)
1549 return;
1550
1551 /* We can only provide the PC register. */
1552 if (regno >= 0 && regno != pcreg)
1553 return;
1554
1555 insn = btrace_insn_get (replay);
1556 gdb_assert (insn != NULL);
1557
1558 regcache->raw_supply (regno, &insn->pc);
1559 }
1560 else
1561 this->beneath ()->fetch_registers (regcache, regno);
1562 }
1563
1564 /* The store_registers method of target record-btrace. */
1565
1566 void
1567 record_btrace_target::store_registers (struct regcache *regcache, int regno)
1568 {
1569 if (!record_btrace_generating_corefile
1570 && record_is_replaying (regcache->ptid ()))
1571 error (_("Cannot write registers while replaying."));
1572
1573 gdb_assert (may_write_registers != 0);
1574
1575 this->beneath ()->store_registers (regcache, regno);
1576 }
1577
1578 /* The prepare_to_store method of target record-btrace. */
1579
1580 void
1581 record_btrace_target::prepare_to_store (struct regcache *regcache)
1582 {
1583 if (!record_btrace_generating_corefile
1584 && record_is_replaying (regcache->ptid ()))
1585 return;
1586
1587 this->beneath ()->prepare_to_store (regcache);
1588 }
1589
1590 /* The branch trace frame cache. */
1591
1592 struct btrace_frame_cache
1593 {
1594 /* The thread. */
1595 struct thread_info *tp;
1596
1597 /* The frame info. */
1598 struct frame_info *frame;
1599
1600 /* The branch trace function segment. */
1601 const struct btrace_function *bfun;
1602 };
1603
1604 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1605
1606 static htab_t bfcache;
1607
1608 /* hash_f for htab_create_alloc of bfcache. */
1609
1610 static hashval_t
1611 bfcache_hash (const void *arg)
1612 {
1613 const struct btrace_frame_cache *cache
1614 = (const struct btrace_frame_cache *) arg;
1615
1616 return htab_hash_pointer (cache->frame);
1617 }
1618
1619 /* eq_f for htab_create_alloc of bfcache. */
1620
1621 static int
1622 bfcache_eq (const void *arg1, const void *arg2)
1623 {
1624 const struct btrace_frame_cache *cache1
1625 = (const struct btrace_frame_cache *) arg1;
1626 const struct btrace_frame_cache *cache2
1627 = (const struct btrace_frame_cache *) arg2;
1628
1629 return cache1->frame == cache2->frame;
1630 }
1631
1632 /* Create a new btrace frame cache. */
1633
1634 static struct btrace_frame_cache *
1635 bfcache_new (struct frame_info *frame)
1636 {
1637 struct btrace_frame_cache *cache;
1638 void **slot;
1639
1640 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1641 cache->frame = frame;
1642
1643 slot = htab_find_slot (bfcache, cache, INSERT);
1644 gdb_assert (*slot == NULL);
1645 *slot = cache;
1646
1647 return cache;
1648 }
1649
1650 /* Extract the branch trace function from a branch trace frame. */
1651
1652 static const struct btrace_function *
1653 btrace_get_frame_function (struct frame_info *frame)
1654 {
1655 const struct btrace_frame_cache *cache;
1656 struct btrace_frame_cache pattern;
1657 void **slot;
1658
1659 pattern.frame = frame;
1660
1661 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1662 if (slot == NULL)
1663 return NULL;
1664
1665 cache = (const struct btrace_frame_cache *) *slot;
1666 return cache->bfun;
1667 }
1668
1669 /* Implement stop_reason method for record_btrace_frame_unwind. */
1670
1671 static enum unwind_stop_reason
1672 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1673 void **this_cache)
1674 {
1675 const struct btrace_frame_cache *cache;
1676 const struct btrace_function *bfun;
1677
1678 cache = (const struct btrace_frame_cache *) *this_cache;
1679 bfun = cache->bfun;
1680 gdb_assert (bfun != NULL);
1681
1682 if (bfun->up == 0)
1683 return UNWIND_UNAVAILABLE;
1684
1685 return UNWIND_NO_REASON;
1686 }
1687
1688 /* Implement this_id method for record_btrace_frame_unwind. */
1689
1690 static void
1691 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1692 struct frame_id *this_id)
1693 {
1694 const struct btrace_frame_cache *cache;
1695 const struct btrace_function *bfun;
1696 struct btrace_call_iterator it;
1697 CORE_ADDR code, special;
1698
1699 cache = (const struct btrace_frame_cache *) *this_cache;
1700
1701 bfun = cache->bfun;
1702 gdb_assert (bfun != NULL);
1703
1704 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1705 bfun = btrace_call_get (&it);
1706
1707 code = get_frame_func (this_frame);
1708 special = bfun->number;
1709
1710 *this_id = frame_id_build_unavailable_stack_special (code, special);
1711
1712 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1713 btrace_get_bfun_name (cache->bfun),
1714 core_addr_to_string_nz (this_id->code_addr),
1715 core_addr_to_string_nz (this_id->special_addr));
1716 }
1717
1718 /* Implement prev_register method for record_btrace_frame_unwind. */
1719
1720 static struct value *
1721 record_btrace_frame_prev_register (struct frame_info *this_frame,
1722 void **this_cache,
1723 int regnum)
1724 {
1725 const struct btrace_frame_cache *cache;
1726 const struct btrace_function *bfun, *caller;
1727 struct btrace_call_iterator it;
1728 struct gdbarch *gdbarch;
1729 CORE_ADDR pc;
1730 int pcreg;
1731
1732 gdbarch = get_frame_arch (this_frame);
1733 pcreg = gdbarch_pc_regnum (gdbarch);
1734 if (pcreg < 0 || regnum != pcreg)
1735 throw_error (NOT_AVAILABLE_ERROR,
1736 _("Registers are not available in btrace record history"));
1737
1738 cache = (const struct btrace_frame_cache *) *this_cache;
1739 bfun = cache->bfun;
1740 gdb_assert (bfun != NULL);
1741
1742 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1743 throw_error (NOT_AVAILABLE_ERROR,
1744 _("No caller in btrace record history"));
1745
1746 caller = btrace_call_get (&it);
1747
1748 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1749 pc = caller->insn.front ().pc;
1750 else
1751 {
1752 pc = caller->insn.back ().pc;
1753 pc += gdb_insn_length (gdbarch, pc);
1754 }
1755
1756 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1757 btrace_get_bfun_name (bfun), bfun->level,
1758 core_addr_to_string_nz (pc));
1759
1760 return frame_unwind_got_address (this_frame, regnum, pc);
1761 }
1762
1763 /* Implement sniffer method for record_btrace_frame_unwind. */
1764
1765 static int
1766 record_btrace_frame_sniffer (const struct frame_unwind *self,
1767 struct frame_info *this_frame,
1768 void **this_cache)
1769 {
1770 const struct btrace_function *bfun;
1771 struct btrace_frame_cache *cache;
1772 struct thread_info *tp;
1773 struct frame_info *next;
1774
1775 /* THIS_FRAME does not contain a reference to its thread. */
1776 tp = inferior_thread ();
1777
1778 bfun = NULL;
1779 next = get_next_frame (this_frame);
1780 if (next == NULL)
1781 {
1782 const struct btrace_insn_iterator *replay;
1783
1784 replay = tp->btrace.replay;
1785 if (replay != NULL)
1786 bfun = &replay->btinfo->functions[replay->call_index];
1787 }
1788 else
1789 {
1790 const struct btrace_function *callee;
1791 struct btrace_call_iterator it;
1792
1793 callee = btrace_get_frame_function (next);
1794 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1795 return 0;
1796
1797 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1798 return 0;
1799
1800 bfun = btrace_call_get (&it);
1801 }
1802
1803 if (bfun == NULL)
1804 return 0;
1805
1806 DEBUG ("[frame] sniffed frame for %s on level %d",
1807 btrace_get_bfun_name (bfun), bfun->level);
1808
1809 /* This is our frame. Initialize the frame cache. */
1810 cache = bfcache_new (this_frame);
1811 cache->tp = tp;
1812 cache->bfun = bfun;
1813
1814 *this_cache = cache;
1815 return 1;
1816 }
1817
1818 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1819
1820 static int
1821 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1822 struct frame_info *this_frame,
1823 void **this_cache)
1824 {
1825 const struct btrace_function *bfun, *callee;
1826 struct btrace_frame_cache *cache;
1827 struct btrace_call_iterator it;
1828 struct frame_info *next;
1829 struct thread_info *tinfo;
1830
1831 next = get_next_frame (this_frame);
1832 if (next == NULL)
1833 return 0;
1834
1835 callee = btrace_get_frame_function (next);
1836 if (callee == NULL)
1837 return 0;
1838
1839 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1840 return 0;
1841
1842 tinfo = inferior_thread ();
1843 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1844 return 0;
1845
1846 bfun = btrace_call_get (&it);
1847
1848 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1849 btrace_get_bfun_name (bfun), bfun->level);
1850
1851 /* This is our frame. Initialize the frame cache. */
1852 cache = bfcache_new (this_frame);
1853 cache->tp = tinfo;
1854 cache->bfun = bfun;
1855
1856 *this_cache = cache;
1857 return 1;
1858 }
1859
1860 static void
1861 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1862 {
1863 struct btrace_frame_cache *cache;
1864 void **slot;
1865
1866 cache = (struct btrace_frame_cache *) this_cache;
1867
1868 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1869 gdb_assert (slot != NULL);
1870
1871 htab_remove_elt (bfcache, cache);
1872 }
1873
1874 /* btrace recording does not store previous memory content, neither the stack
1875 frames content. Any unwinding would return errorneous results as the stack
1876 contents no longer matches the changed PC value restored from history.
1877 Therefore this unwinder reports any possibly unwound registers as
1878 <unavailable>. */
1879
1880 const struct frame_unwind record_btrace_frame_unwind =
1881 {
1882 NORMAL_FRAME,
1883 record_btrace_frame_unwind_stop_reason,
1884 record_btrace_frame_this_id,
1885 record_btrace_frame_prev_register,
1886 NULL,
1887 record_btrace_frame_sniffer,
1888 record_btrace_frame_dealloc_cache
1889 };
1890
1891 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1892 {
1893 TAILCALL_FRAME,
1894 record_btrace_frame_unwind_stop_reason,
1895 record_btrace_frame_this_id,
1896 record_btrace_frame_prev_register,
1897 NULL,
1898 record_btrace_tailcall_frame_sniffer,
1899 record_btrace_frame_dealloc_cache
1900 };
1901
1902 /* Implement the get_unwinder method. */
1903
1904 const struct frame_unwind *
1905 record_btrace_target::get_unwinder ()
1906 {
1907 return &record_btrace_frame_unwind;
1908 }
1909
1910 /* Implement the get_tailcall_unwinder method. */
1911
1912 const struct frame_unwind *
1913 record_btrace_target::get_tailcall_unwinder ()
1914 {
1915 return &record_btrace_tailcall_frame_unwind;
1916 }
1917
1918 /* Return a human-readable string for FLAG. */
1919
1920 static const char *
1921 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1922 {
1923 switch (flag)
1924 {
1925 case BTHR_STEP:
1926 return "step";
1927
1928 case BTHR_RSTEP:
1929 return "reverse-step";
1930
1931 case BTHR_CONT:
1932 return "cont";
1933
1934 case BTHR_RCONT:
1935 return "reverse-cont";
1936
1937 case BTHR_STOP:
1938 return "stop";
1939 }
1940
1941 return "<invalid>";
1942 }
1943
1944 /* Indicate that TP should be resumed according to FLAG. */
1945
1946 static void
1947 record_btrace_resume_thread (struct thread_info *tp,
1948 enum btrace_thread_flag flag)
1949 {
1950 struct btrace_thread_info *btinfo;
1951
1952 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1953 target_pid_to_str (tp->ptid).c_str (), flag,
1954 btrace_thread_flag_to_str (flag));
1955
1956 btinfo = &tp->btrace;
1957
1958 /* Fetch the latest branch trace. */
1959 btrace_fetch (tp, record_btrace_get_cpu ());
1960
1961 /* A resume request overwrites a preceding resume or stop request. */
1962 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1963 btinfo->flags |= flag;
1964 }
1965
1966 /* Get the current frame for TP. */
1967
1968 static struct frame_id
1969 get_thread_current_frame_id (struct thread_info *tp)
1970 {
1971 struct frame_id id;
1972 int executing;
1973
1974 /* Set current thread, which is implicitly used by
1975 get_current_frame. */
1976 scoped_restore_current_thread restore_thread;
1977
1978 switch_to_thread (tp);
1979
1980 /* Clear the executing flag to allow changes to the current frame.
1981 We are not actually running, yet. We just started a reverse execution
1982 command or a record goto command.
1983 For the latter, EXECUTING is false and this has no effect.
1984 For the former, EXECUTING is true and we're in wait, about to
1985 move the thread. Since we need to recompute the stack, we temporarily
1986 set EXECUTING to flase. */
1987 executing = tp->executing;
1988 set_executing (inferior_ptid, false);
1989
1990 id = null_frame_id;
1991 TRY
1992 {
1993 id = get_frame_id (get_current_frame ());
1994 }
1995 CATCH (except, RETURN_MASK_ALL)
1996 {
1997 /* Restore the previous execution state. */
1998 set_executing (inferior_ptid, executing);
1999
2000 throw_exception (except);
2001 }
2002 END_CATCH
2003
2004 /* Restore the previous execution state. */
2005 set_executing (inferior_ptid, executing);
2006
2007 return id;
2008 }
2009
2010 /* Start replaying a thread. */
2011
2012 static struct btrace_insn_iterator *
2013 record_btrace_start_replaying (struct thread_info *tp)
2014 {
2015 struct btrace_insn_iterator *replay;
2016 struct btrace_thread_info *btinfo;
2017
2018 btinfo = &tp->btrace;
2019 replay = NULL;
2020
2021 /* We can't start replaying without trace. */
2022 if (btinfo->functions.empty ())
2023 return NULL;
2024
2025 /* GDB stores the current frame_id when stepping in order to detects steps
2026 into subroutines.
2027 Since frames are computed differently when we're replaying, we need to
2028 recompute those stored frames and fix them up so we can still detect
2029 subroutines after we started replaying. */
2030 TRY
2031 {
2032 struct frame_id frame_id;
2033 int upd_step_frame_id, upd_step_stack_frame_id;
2034
2035 /* The current frame without replaying - computed via normal unwind. */
2036 frame_id = get_thread_current_frame_id (tp);
2037
2038 /* Check if we need to update any stepping-related frame id's. */
2039 upd_step_frame_id = frame_id_eq (frame_id,
2040 tp->control.step_frame_id);
2041 upd_step_stack_frame_id = frame_id_eq (frame_id,
2042 tp->control.step_stack_frame_id);
2043
2044 /* We start replaying at the end of the branch trace. This corresponds
2045 to the current instruction. */
2046 replay = XNEW (struct btrace_insn_iterator);
2047 btrace_insn_end (replay, btinfo);
2048
2049 /* Skip gaps at the end of the trace. */
2050 while (btrace_insn_get (replay) == NULL)
2051 {
2052 unsigned int steps;
2053
2054 steps = btrace_insn_prev (replay, 1);
2055 if (steps == 0)
2056 error (_("No trace."));
2057 }
2058
2059 /* We're not replaying, yet. */
2060 gdb_assert (btinfo->replay == NULL);
2061 btinfo->replay = replay;
2062
2063 /* Make sure we're not using any stale registers. */
2064 registers_changed_thread (tp);
2065
2066 /* The current frame with replaying - computed via btrace unwind. */
2067 frame_id = get_thread_current_frame_id (tp);
2068
2069 /* Replace stepping related frames where necessary. */
2070 if (upd_step_frame_id)
2071 tp->control.step_frame_id = frame_id;
2072 if (upd_step_stack_frame_id)
2073 tp->control.step_stack_frame_id = frame_id;
2074 }
2075 CATCH (except, RETURN_MASK_ALL)
2076 {
2077 xfree (btinfo->replay);
2078 btinfo->replay = NULL;
2079
2080 registers_changed_thread (tp);
2081
2082 throw_exception (except);
2083 }
2084 END_CATCH
2085
2086 return replay;
2087 }
2088
2089 /* Stop replaying a thread. */
2090
2091 static void
2092 record_btrace_stop_replaying (struct thread_info *tp)
2093 {
2094 struct btrace_thread_info *btinfo;
2095
2096 btinfo = &tp->btrace;
2097
2098 xfree (btinfo->replay);
2099 btinfo->replay = NULL;
2100
2101 /* Make sure we're not leaving any stale registers. */
2102 registers_changed_thread (tp);
2103 }
2104
2105 /* Stop replaying TP if it is at the end of its execution history. */
2106
2107 static void
2108 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2109 {
2110 struct btrace_insn_iterator *replay, end;
2111 struct btrace_thread_info *btinfo;
2112
2113 btinfo = &tp->btrace;
2114 replay = btinfo->replay;
2115
2116 if (replay == NULL)
2117 return;
2118
2119 btrace_insn_end (&end, btinfo);
2120
2121 if (btrace_insn_cmp (replay, &end) == 0)
2122 record_btrace_stop_replaying (tp);
2123 }
2124
2125 /* The resume method of target record-btrace. */
2126
2127 void
2128 record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
2129 {
2130 enum btrace_thread_flag flag, cflag;
2131
2132 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid).c_str (),
2133 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
2134 step ? "step" : "cont");
2135
2136 /* Store the execution direction of the last resume.
2137
2138 If there is more than one resume call, we have to rely on infrun
2139 to not change the execution direction in-between. */
2140 record_btrace_resume_exec_dir = ::execution_direction;
2141
2142 /* As long as we're not replaying, just forward the request.
2143
2144 For non-stop targets this means that no thread is replaying. In order to
2145 make progress, we may need to explicitly move replaying threads to the end
2146 of their execution history. */
2147 if ((::execution_direction != EXEC_REVERSE)
2148 && !record_is_replaying (minus_one_ptid))
2149 {
2150 this->beneath ()->resume (ptid, step, signal);
2151 return;
2152 }
2153
2154 /* Compute the btrace thread flag for the requested move. */
2155 if (::execution_direction == EXEC_REVERSE)
2156 {
2157 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2158 cflag = BTHR_RCONT;
2159 }
2160 else
2161 {
2162 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2163 cflag = BTHR_CONT;
2164 }
2165
2166 /* We just indicate the resume intent here. The actual stepping happens in
2167 record_btrace_wait below.
2168
2169 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2170 if (!target_is_non_stop_p ())
2171 {
2172 gdb_assert (inferior_ptid.matches (ptid));
2173
2174 for (thread_info *tp : all_non_exited_threads (ptid))
2175 {
2176 if (tp->ptid.matches (inferior_ptid))
2177 record_btrace_resume_thread (tp, flag);
2178 else
2179 record_btrace_resume_thread (tp, cflag);
2180 }
2181 }
2182 else
2183 {
2184 for (thread_info *tp : all_non_exited_threads (ptid))
2185 record_btrace_resume_thread (tp, flag);
2186 }
2187
2188 /* Async support. */
2189 if (target_can_async_p ())
2190 {
2191 target_async (1);
2192 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2193 }
2194 }
2195
2196 /* The commit_resume method of target record-btrace. */
2197
2198 void
2199 record_btrace_target::commit_resume ()
2200 {
2201 if ((::execution_direction != EXEC_REVERSE)
2202 && !record_is_replaying (minus_one_ptid))
2203 beneath ()->commit_resume ();
2204 }
2205
2206 /* Cancel resuming TP. */
2207
2208 static void
2209 record_btrace_cancel_resume (struct thread_info *tp)
2210 {
2211 enum btrace_thread_flag flags;
2212
2213 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2214 if (flags == 0)
2215 return;
2216
2217 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2218 print_thread_id (tp),
2219 target_pid_to_str (tp->ptid).c_str (), flags,
2220 btrace_thread_flag_to_str (flags));
2221
2222 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2223 record_btrace_stop_replaying_at_end (tp);
2224 }
2225
2226 /* Return a target_waitstatus indicating that we ran out of history. */
2227
2228 static struct target_waitstatus
2229 btrace_step_no_history (void)
2230 {
2231 struct target_waitstatus status;
2232
2233 status.kind = TARGET_WAITKIND_NO_HISTORY;
2234
2235 return status;
2236 }
2237
2238 /* Return a target_waitstatus indicating that a step finished. */
2239
2240 static struct target_waitstatus
2241 btrace_step_stopped (void)
2242 {
2243 struct target_waitstatus status;
2244
2245 status.kind = TARGET_WAITKIND_STOPPED;
2246 status.value.sig = GDB_SIGNAL_TRAP;
2247
2248 return status;
2249 }
2250
2251 /* Return a target_waitstatus indicating that a thread was stopped as
2252 requested. */
2253
2254 static struct target_waitstatus
2255 btrace_step_stopped_on_request (void)
2256 {
2257 struct target_waitstatus status;
2258
2259 status.kind = TARGET_WAITKIND_STOPPED;
2260 status.value.sig = GDB_SIGNAL_0;
2261
2262 return status;
2263 }
2264
2265 /* Return a target_waitstatus indicating a spurious stop. */
2266
2267 static struct target_waitstatus
2268 btrace_step_spurious (void)
2269 {
2270 struct target_waitstatus status;
2271
2272 status.kind = TARGET_WAITKIND_SPURIOUS;
2273
2274 return status;
2275 }
2276
2277 /* Return a target_waitstatus indicating that the thread was not resumed. */
2278
2279 static struct target_waitstatus
2280 btrace_step_no_resumed (void)
2281 {
2282 struct target_waitstatus status;
2283
2284 status.kind = TARGET_WAITKIND_NO_RESUMED;
2285
2286 return status;
2287 }
2288
2289 /* Return a target_waitstatus indicating that we should wait again. */
2290
2291 static struct target_waitstatus
2292 btrace_step_again (void)
2293 {
2294 struct target_waitstatus status;
2295
2296 status.kind = TARGET_WAITKIND_IGNORE;
2297
2298 return status;
2299 }
2300
2301 /* Clear the record histories. */
2302
2303 static void
2304 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2305 {
2306 xfree (btinfo->insn_history);
2307 xfree (btinfo->call_history);
2308
2309 btinfo->insn_history = NULL;
2310 btinfo->call_history = NULL;
2311 }
2312
2313 /* Check whether TP's current replay position is at a breakpoint. */
2314
2315 static int
2316 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2317 {
2318 struct btrace_insn_iterator *replay;
2319 struct btrace_thread_info *btinfo;
2320 const struct btrace_insn *insn;
2321
2322 btinfo = &tp->btrace;
2323 replay = btinfo->replay;
2324
2325 if (replay == NULL)
2326 return 0;
2327
2328 insn = btrace_insn_get (replay);
2329 if (insn == NULL)
2330 return 0;
2331
2332 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
2333 &btinfo->stop_reason);
2334 }
2335
2336 /* Step one instruction in forward direction. */
2337
2338 static struct target_waitstatus
2339 record_btrace_single_step_forward (struct thread_info *tp)
2340 {
2341 struct btrace_insn_iterator *replay, end, start;
2342 struct btrace_thread_info *btinfo;
2343
2344 btinfo = &tp->btrace;
2345 replay = btinfo->replay;
2346
2347 /* We're done if we're not replaying. */
2348 if (replay == NULL)
2349 return btrace_step_no_history ();
2350
2351 /* Check if we're stepping a breakpoint. */
2352 if (record_btrace_replay_at_breakpoint (tp))
2353 return btrace_step_stopped ();
2354
2355 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2356 jump back to the instruction at which we started. */
2357 start = *replay;
2358 do
2359 {
2360 unsigned int steps;
2361
2362 /* We will bail out here if we continue stepping after reaching the end
2363 of the execution history. */
2364 steps = btrace_insn_next (replay, 1);
2365 if (steps == 0)
2366 {
2367 *replay = start;
2368 return btrace_step_no_history ();
2369 }
2370 }
2371 while (btrace_insn_get (replay) == NULL);
2372
2373 /* Determine the end of the instruction trace. */
2374 btrace_insn_end (&end, btinfo);
2375
2376 /* The execution trace contains (and ends with) the current instruction.
2377 This instruction has not been executed, yet, so the trace really ends
2378 one instruction earlier. */
2379 if (btrace_insn_cmp (replay, &end) == 0)
2380 return btrace_step_no_history ();
2381
2382 return btrace_step_spurious ();
2383 }
2384
2385 /* Step one instruction in backward direction. */
2386
2387 static struct target_waitstatus
2388 record_btrace_single_step_backward (struct thread_info *tp)
2389 {
2390 struct btrace_insn_iterator *replay, start;
2391 struct btrace_thread_info *btinfo;
2392
2393 btinfo = &tp->btrace;
2394 replay = btinfo->replay;
2395
2396 /* Start replaying if we're not already doing so. */
2397 if (replay == NULL)
2398 replay = record_btrace_start_replaying (tp);
2399
2400 /* If we can't step any further, we reached the end of the history.
2401 Skip gaps during replay. If we end up at a gap (at the beginning of
2402 the trace), jump back to the instruction at which we started. */
2403 start = *replay;
2404 do
2405 {
2406 unsigned int steps;
2407
2408 steps = btrace_insn_prev (replay, 1);
2409 if (steps == 0)
2410 {
2411 *replay = start;
2412 return btrace_step_no_history ();
2413 }
2414 }
2415 while (btrace_insn_get (replay) == NULL);
2416
2417 /* Check if we're stepping a breakpoint.
2418
2419 For reverse-stepping, this check is after the step. There is logic in
2420 infrun.c that handles reverse-stepping separately. See, for example,
2421 proceed and adjust_pc_after_break.
2422
2423 This code assumes that for reverse-stepping, PC points to the last
2424 de-executed instruction, whereas for forward-stepping PC points to the
2425 next to-be-executed instruction. */
2426 if (record_btrace_replay_at_breakpoint (tp))
2427 return btrace_step_stopped ();
2428
2429 return btrace_step_spurious ();
2430 }
2431
2432 /* Step a single thread. */
2433
2434 static struct target_waitstatus
2435 record_btrace_step_thread (struct thread_info *tp)
2436 {
2437 struct btrace_thread_info *btinfo;
2438 struct target_waitstatus status;
2439 enum btrace_thread_flag flags;
2440
2441 btinfo = &tp->btrace;
2442
2443 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2444 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2445
2446 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2447 target_pid_to_str (tp->ptid).c_str (), flags,
2448 btrace_thread_flag_to_str (flags));
2449
2450 /* We can't step without an execution history. */
2451 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2452 return btrace_step_no_history ();
2453
2454 switch (flags)
2455 {
2456 default:
2457 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2458
2459 case BTHR_STOP:
2460 return btrace_step_stopped_on_request ();
2461
2462 case BTHR_STEP:
2463 status = record_btrace_single_step_forward (tp);
2464 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2465 break;
2466
2467 return btrace_step_stopped ();
2468
2469 case BTHR_RSTEP:
2470 status = record_btrace_single_step_backward (tp);
2471 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2472 break;
2473
2474 return btrace_step_stopped ();
2475
2476 case BTHR_CONT:
2477 status = record_btrace_single_step_forward (tp);
2478 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2479 break;
2480
2481 btinfo->flags |= flags;
2482 return btrace_step_again ();
2483
2484 case BTHR_RCONT:
2485 status = record_btrace_single_step_backward (tp);
2486 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2487 break;
2488
2489 btinfo->flags |= flags;
2490 return btrace_step_again ();
2491 }
2492
2493 /* We keep threads moving at the end of their execution history. The wait
2494 method will stop the thread for whom the event is reported. */
2495 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2496 btinfo->flags |= flags;
2497
2498 return status;
2499 }
2500
2501 /* Announce further events if necessary. */
2502
2503 static void
2504 record_btrace_maybe_mark_async_event
2505 (const std::vector<thread_info *> &moving,
2506 const std::vector<thread_info *> &no_history)
2507 {
2508 bool more_moving = !moving.empty ();
2509 bool more_no_history = !no_history.empty ();;
2510
2511 if (!more_moving && !more_no_history)
2512 return;
2513
2514 if (more_moving)
2515 DEBUG ("movers pending");
2516
2517 if (more_no_history)
2518 DEBUG ("no-history pending");
2519
2520 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2521 }
2522
2523 /* The wait method of target record-btrace. */
2524
2525 ptid_t
2526 record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2527 int options)
2528 {
2529 std::vector<thread_info *> moving;
2530 std::vector<thread_info *> no_history;
2531
2532 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid).c_str (), options);
2533
2534 /* As long as we're not replaying, just forward the request. */
2535 if ((::execution_direction != EXEC_REVERSE)
2536 && !record_is_replaying (minus_one_ptid))
2537 {
2538 return this->beneath ()->wait (ptid, status, options);
2539 }
2540
2541 /* Keep a work list of moving threads. */
2542 for (thread_info *tp : all_non_exited_threads (ptid))
2543 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2544 moving.push_back (tp);
2545
2546 if (moving.empty ())
2547 {
2548 *status = btrace_step_no_resumed ();
2549
2550 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid).c_str (),
2551 target_waitstatus_to_string (status).c_str ());
2552
2553 return null_ptid;
2554 }
2555
2556 /* Step moving threads one by one, one step each, until either one thread
2557 reports an event or we run out of threads to step.
2558
2559 When stepping more than one thread, chances are that some threads reach
2560 the end of their execution history earlier than others. If we reported
2561 this immediately, all-stop on top of non-stop would stop all threads and
2562 resume the same threads next time. And we would report the same thread
2563 having reached the end of its execution history again.
2564
2565 In the worst case, this would starve the other threads. But even if other
2566 threads would be allowed to make progress, this would result in far too
2567 many intermediate stops.
2568
2569 We therefore delay the reporting of "no execution history" until we have
2570 nothing else to report. By this time, all threads should have moved to
2571 either the beginning or the end of their execution history. There will
2572 be a single user-visible stop. */
2573 struct thread_info *eventing = NULL;
2574 while ((eventing == NULL) && !moving.empty ())
2575 {
2576 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
2577 {
2578 thread_info *tp = moving[ix];
2579
2580 *status = record_btrace_step_thread (tp);
2581
2582 switch (status->kind)
2583 {
2584 case TARGET_WAITKIND_IGNORE:
2585 ix++;
2586 break;
2587
2588 case TARGET_WAITKIND_NO_HISTORY:
2589 no_history.push_back (ordered_remove (moving, ix));
2590 break;
2591
2592 default:
2593 eventing = unordered_remove (moving, ix);
2594 break;
2595 }
2596 }
2597 }
2598
2599 if (eventing == NULL)
2600 {
2601 /* We started with at least one moving thread. This thread must have
2602 either stopped or reached the end of its execution history.
2603
2604 In the former case, EVENTING must not be NULL.
2605 In the latter case, NO_HISTORY must not be empty. */
2606 gdb_assert (!no_history.empty ());
2607
2608 /* We kept threads moving at the end of their execution history. Stop
2609 EVENTING now that we are going to report its stop. */
2610 eventing = unordered_remove (no_history, 0);
2611 eventing->btrace.flags &= ~BTHR_MOVE;
2612
2613 *status = btrace_step_no_history ();
2614 }
2615
2616 gdb_assert (eventing != NULL);
2617
2618 /* We kept threads replaying at the end of their execution history. Stop
2619 replaying EVENTING now that we are going to report its stop. */
2620 record_btrace_stop_replaying_at_end (eventing);
2621
2622 /* Stop all other threads. */
2623 if (!target_is_non_stop_p ())
2624 {
2625 for (thread_info *tp : all_non_exited_threads ())
2626 record_btrace_cancel_resume (tp);
2627 }
2628
2629 /* In async mode, we need to announce further events. */
2630 if (target_is_async_p ())
2631 record_btrace_maybe_mark_async_event (moving, no_history);
2632
2633 /* Start record histories anew from the current position. */
2634 record_btrace_clear_histories (&eventing->btrace);
2635
2636 /* We moved the replay position but did not update registers. */
2637 registers_changed_thread (eventing);
2638
2639 DEBUG ("wait ended by thread %s (%s): %s",
2640 print_thread_id (eventing),
2641 target_pid_to_str (eventing->ptid).c_str (),
2642 target_waitstatus_to_string (status).c_str ());
2643
2644 return eventing->ptid;
2645 }
2646
2647 /* The stop method of target record-btrace. */
2648
2649 void
2650 record_btrace_target::stop (ptid_t ptid)
2651 {
2652 DEBUG ("stop %s", target_pid_to_str (ptid).c_str ());
2653
2654 /* As long as we're not replaying, just forward the request. */
2655 if ((::execution_direction != EXEC_REVERSE)
2656 && !record_is_replaying (minus_one_ptid))
2657 {
2658 this->beneath ()->stop (ptid);
2659 }
2660 else
2661 {
2662 for (thread_info *tp : all_non_exited_threads (ptid))
2663 {
2664 tp->btrace.flags &= ~BTHR_MOVE;
2665 tp->btrace.flags |= BTHR_STOP;
2666 }
2667 }
2668 }
2669
2670 /* The can_execute_reverse method of target record-btrace. */
2671
2672 bool
2673 record_btrace_target::can_execute_reverse ()
2674 {
2675 return true;
2676 }
2677
2678 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2679
2680 bool
2681 record_btrace_target::stopped_by_sw_breakpoint ()
2682 {
2683 if (record_is_replaying (minus_one_ptid))
2684 {
2685 struct thread_info *tp = inferior_thread ();
2686
2687 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2688 }
2689
2690 return this->beneath ()->stopped_by_sw_breakpoint ();
2691 }
2692
2693 /* The supports_stopped_by_sw_breakpoint method of target
2694 record-btrace. */
2695
2696 bool
2697 record_btrace_target::supports_stopped_by_sw_breakpoint ()
2698 {
2699 if (record_is_replaying (minus_one_ptid))
2700 return true;
2701
2702 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
2703 }
2704
2705 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2706
2707 bool
2708 record_btrace_target::stopped_by_hw_breakpoint ()
2709 {
2710 if (record_is_replaying (minus_one_ptid))
2711 {
2712 struct thread_info *tp = inferior_thread ();
2713
2714 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2715 }
2716
2717 return this->beneath ()->stopped_by_hw_breakpoint ();
2718 }
2719
2720 /* The supports_stopped_by_hw_breakpoint method of target
2721 record-btrace. */
2722
2723 bool
2724 record_btrace_target::supports_stopped_by_hw_breakpoint ()
2725 {
2726 if (record_is_replaying (minus_one_ptid))
2727 return true;
2728
2729 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
2730 }
2731
2732 /* The update_thread_list method of target record-btrace. */
2733
2734 void
2735 record_btrace_target::update_thread_list ()
2736 {
2737 /* We don't add or remove threads during replay. */
2738 if (record_is_replaying (minus_one_ptid))
2739 return;
2740
2741 /* Forward the request. */
2742 this->beneath ()->update_thread_list ();
2743 }
2744
2745 /* The thread_alive method of target record-btrace. */
2746
2747 bool
2748 record_btrace_target::thread_alive (ptid_t ptid)
2749 {
2750 /* We don't add or remove threads during replay. */
2751 if (record_is_replaying (minus_one_ptid))
2752 return true;
2753
2754 /* Forward the request. */
2755 return this->beneath ()->thread_alive (ptid);
2756 }
2757
2758 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2759 is stopped. */
2760
2761 static void
2762 record_btrace_set_replay (struct thread_info *tp,
2763 const struct btrace_insn_iterator *it)
2764 {
2765 struct btrace_thread_info *btinfo;
2766
2767 btinfo = &tp->btrace;
2768
2769 if (it == NULL)
2770 record_btrace_stop_replaying (tp);
2771 else
2772 {
2773 if (btinfo->replay == NULL)
2774 record_btrace_start_replaying (tp);
2775 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2776 return;
2777
2778 *btinfo->replay = *it;
2779 registers_changed_thread (tp);
2780 }
2781
2782 /* Start anew from the new replay position. */
2783 record_btrace_clear_histories (btinfo);
2784
2785 inferior_thread ()->suspend.stop_pc
2786 = regcache_read_pc (get_current_regcache ());
2787 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2788 }
2789
2790 /* The goto_record_begin method of target record-btrace. */
2791
2792 void
2793 record_btrace_target::goto_record_begin ()
2794 {
2795 struct thread_info *tp;
2796 struct btrace_insn_iterator begin;
2797
2798 tp = require_btrace_thread ();
2799
2800 btrace_insn_begin (&begin, &tp->btrace);
2801
2802 /* Skip gaps at the beginning of the trace. */
2803 while (btrace_insn_get (&begin) == NULL)
2804 {
2805 unsigned int steps;
2806
2807 steps = btrace_insn_next (&begin, 1);
2808 if (steps == 0)
2809 error (_("No trace."));
2810 }
2811
2812 record_btrace_set_replay (tp, &begin);
2813 }
2814
2815 /* The goto_record_end method of target record-btrace. */
2816
2817 void
2818 record_btrace_target::goto_record_end ()
2819 {
2820 struct thread_info *tp;
2821
2822 tp = require_btrace_thread ();
2823
2824 record_btrace_set_replay (tp, NULL);
2825 }
2826
2827 /* The goto_record method of target record-btrace. */
2828
2829 void
2830 record_btrace_target::goto_record (ULONGEST insn)
2831 {
2832 struct thread_info *tp;
2833 struct btrace_insn_iterator it;
2834 unsigned int number;
2835 int found;
2836
2837 number = insn;
2838
2839 /* Check for wrap-arounds. */
2840 if (number != insn)
2841 error (_("Instruction number out of range."));
2842
2843 tp = require_btrace_thread ();
2844
2845 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2846
2847 /* Check if the instruction could not be found or is a gap. */
2848 if (found == 0 || btrace_insn_get (&it) == NULL)
2849 error (_("No such instruction."));
2850
2851 record_btrace_set_replay (tp, &it);
2852 }
2853
2854 /* The record_stop_replaying method of target record-btrace. */
2855
2856 void
2857 record_btrace_target::record_stop_replaying ()
2858 {
2859 for (thread_info *tp : all_non_exited_threads ())
2860 record_btrace_stop_replaying (tp);
2861 }
2862
2863 /* The execution_direction target method. */
2864
2865 enum exec_direction_kind
2866 record_btrace_target::execution_direction ()
2867 {
2868 return record_btrace_resume_exec_dir;
2869 }
2870
2871 /* The prepare_to_generate_core target method. */
2872
2873 void
2874 record_btrace_target::prepare_to_generate_core ()
2875 {
2876 record_btrace_generating_corefile = 1;
2877 }
2878
2879 /* The done_generating_core target method. */
2880
2881 void
2882 record_btrace_target::done_generating_core ()
2883 {
2884 record_btrace_generating_corefile = 0;
2885 }
2886
2887 /* Start recording in BTS format. */
2888
2889 static void
2890 cmd_record_btrace_bts_start (const char *args, int from_tty)
2891 {
2892 if (args != NULL && *args != 0)
2893 error (_("Invalid argument."));
2894
2895 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2896
2897 TRY
2898 {
2899 execute_command ("target record-btrace", from_tty);
2900 }
2901 CATCH (exception, RETURN_MASK_ALL)
2902 {
2903 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2904 throw_exception (exception);
2905 }
2906 END_CATCH
2907 }
2908
2909 /* Start recording in Intel Processor Trace format. */
2910
2911 static void
2912 cmd_record_btrace_pt_start (const char *args, int from_tty)
2913 {
2914 if (args != NULL && *args != 0)
2915 error (_("Invalid argument."));
2916
2917 record_btrace_conf.format = BTRACE_FORMAT_PT;
2918
2919 TRY
2920 {
2921 execute_command ("target record-btrace", from_tty);
2922 }
2923 CATCH (exception, RETURN_MASK_ALL)
2924 {
2925 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2926 throw_exception (exception);
2927 }
2928 END_CATCH
2929 }
2930
2931 /* Alias for "target record". */
2932
2933 static void
2934 cmd_record_btrace_start (const char *args, int from_tty)
2935 {
2936 if (args != NULL && *args != 0)
2937 error (_("Invalid argument."));
2938
2939 record_btrace_conf.format = BTRACE_FORMAT_PT;
2940
2941 TRY
2942 {
2943 execute_command ("target record-btrace", from_tty);
2944 }
2945 CATCH (exception, RETURN_MASK_ALL)
2946 {
2947 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2948
2949 TRY
2950 {
2951 execute_command ("target record-btrace", from_tty);
2952 }
2953 CATCH (ex, RETURN_MASK_ALL)
2954 {
2955 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2956 throw_exception (ex);
2957 }
2958 END_CATCH
2959 }
2960 END_CATCH
2961 }
2962
2963 /* The "set record btrace" command. */
2964
2965 static void
2966 cmd_set_record_btrace (const char *args, int from_tty)
2967 {
2968 printf_unfiltered (_("\"set record btrace\" must be followed "
2969 "by an appropriate subcommand.\n"));
2970 help_list (set_record_btrace_cmdlist, "set record btrace ",
2971 all_commands, gdb_stdout);
2972 }
2973
2974 /* The "show record btrace" command. */
2975
2976 static void
2977 cmd_show_record_btrace (const char *args, int from_tty)
2978 {
2979 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2980 }
2981
2982 /* The "show record btrace replay-memory-access" command. */
2983
2984 static void
2985 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2986 struct cmd_list_element *c, const char *value)
2987 {
2988 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2989 replay_memory_access);
2990 }
2991
2992 /* The "set record btrace cpu none" command. */
2993
2994 static void
2995 cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2996 {
2997 if (args != nullptr && *args != 0)
2998 error (_("Trailing junk: '%s'."), args);
2999
3000 record_btrace_cpu_state = CS_NONE;
3001 }
3002
3003 /* The "set record btrace cpu auto" command. */
3004
3005 static void
3006 cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
3007 {
3008 if (args != nullptr && *args != 0)
3009 error (_("Trailing junk: '%s'."), args);
3010
3011 record_btrace_cpu_state = CS_AUTO;
3012 }
3013
3014 /* The "set record btrace cpu" command. */
3015
3016 static void
3017 cmd_set_record_btrace_cpu (const char *args, int from_tty)
3018 {
3019 if (args == nullptr)
3020 args = "";
3021
3022 /* We use a hard-coded vendor string for now. */
3023 unsigned int family, model, stepping;
3024 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3025 &model, &l1, &stepping, &l2);
3026 if (matches == 3)
3027 {
3028 if (strlen (args) != l2)
3029 error (_("Trailing junk: '%s'."), args + l2);
3030 }
3031 else if (matches == 2)
3032 {
3033 if (strlen (args) != l1)
3034 error (_("Trailing junk: '%s'."), args + l1);
3035
3036 stepping = 0;
3037 }
3038 else
3039 error (_("Bad format. See \"help set record btrace cpu\"."));
3040
3041 if (USHRT_MAX < family)
3042 error (_("Cpu family too big."));
3043
3044 if (UCHAR_MAX < model)
3045 error (_("Cpu model too big."));
3046
3047 if (UCHAR_MAX < stepping)
3048 error (_("Cpu stepping too big."));
3049
3050 record_btrace_cpu.vendor = CV_INTEL;
3051 record_btrace_cpu.family = family;
3052 record_btrace_cpu.model = model;
3053 record_btrace_cpu.stepping = stepping;
3054
3055 record_btrace_cpu_state = CS_CPU;
3056 }
3057
3058 /* The "show record btrace cpu" command. */
3059
3060 static void
3061 cmd_show_record_btrace_cpu (const char *args, int from_tty)
3062 {
3063 if (args != nullptr && *args != 0)
3064 error (_("Trailing junk: '%s'."), args);
3065
3066 switch (record_btrace_cpu_state)
3067 {
3068 case CS_AUTO:
3069 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3070 return;
3071
3072 case CS_NONE:
3073 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3074 return;
3075
3076 case CS_CPU:
3077 switch (record_btrace_cpu.vendor)
3078 {
3079 case CV_INTEL:
3080 if (record_btrace_cpu.stepping == 0)
3081 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3082 record_btrace_cpu.family,
3083 record_btrace_cpu.model);
3084 else
3085 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3086 record_btrace_cpu.family,
3087 record_btrace_cpu.model,
3088 record_btrace_cpu.stepping);
3089 return;
3090 }
3091 }
3092
3093 error (_("Internal error: bad cpu state."));
3094 }
3095
3096 /* The "s record btrace bts" command. */
3097
3098 static void
3099 cmd_set_record_btrace_bts (const char *args, int from_tty)
3100 {
3101 printf_unfiltered (_("\"set record btrace bts\" must be followed "
3102 "by an appropriate subcommand.\n"));
3103 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3104 all_commands, gdb_stdout);
3105 }
3106
3107 /* The "show record btrace bts" command. */
3108
3109 static void
3110 cmd_show_record_btrace_bts (const char *args, int from_tty)
3111 {
3112 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3113 }
3114
3115 /* The "set record btrace pt" command. */
3116
3117 static void
3118 cmd_set_record_btrace_pt (const char *args, int from_tty)
3119 {
3120 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3121 "by an appropriate subcommand.\n"));
3122 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3123 all_commands, gdb_stdout);
3124 }
3125
3126 /* The "show record btrace pt" command. */
3127
3128 static void
3129 cmd_show_record_btrace_pt (const char *args, int from_tty)
3130 {
3131 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3132 }
3133
3134 /* The "record bts buffer-size" show value function. */
3135
3136 static void
3137 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3138 struct cmd_list_element *c,
3139 const char *value)
3140 {
3141 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3142 value);
3143 }
3144
3145 /* The "record pt buffer-size" show value function. */
3146
3147 static void
3148 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3149 struct cmd_list_element *c,
3150 const char *value)
3151 {
3152 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3153 value);
3154 }
3155
3156 /* Initialize btrace commands. */
3157
3158 void
3159 _initialize_record_btrace (void)
3160 {
3161 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3162 _("Start branch trace recording."), &record_btrace_cmdlist,
3163 "record btrace ", 0, &record_cmdlist);
3164 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3165
3166 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3167 _("\
3168 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3169 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3170 This format may not be available on all processors."),
3171 &record_btrace_cmdlist);
3172 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3173
3174 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3175 _("\
3176 Start branch trace recording in Intel Processor Trace format.\n\n\
3177 This format may not be available on all processors."),
3178 &record_btrace_cmdlist);
3179 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3180
3181 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3182 _("Set record options"), &set_record_btrace_cmdlist,
3183 "set record btrace ", 0, &set_record_cmdlist);
3184
3185 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3186 _("Show record options"), &show_record_btrace_cmdlist,
3187 "show record btrace ", 0, &show_record_cmdlist);
3188
3189 add_setshow_enum_cmd ("replay-memory-access", no_class,
3190 replay_memory_access_types, &replay_memory_access, _("\
3191 Set what memory accesses are allowed during replay."), _("\
3192 Show what memory accesses are allowed during replay."),
3193 _("Default is READ-ONLY.\n\n\
3194 The btrace record target does not trace data.\n\
3195 The memory therefore corresponds to the live target and not \
3196 to the current replay position.\n\n\
3197 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3198 When READ-WRITE, allow accesses to read-only and read-write memory during \
3199 replay."),
3200 NULL, cmd_show_replay_memory_access,
3201 &set_record_btrace_cmdlist,
3202 &show_record_btrace_cmdlist);
3203
3204 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3205 _("\
3206 Set the cpu to be used for trace decode.\n\n\
3207 The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3208 For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
3209 When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3210 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3211 When GDB does not support that cpu, this option can be used to enable\n\
3212 workarounds for a similar cpu that GDB supports.\n\n\
3213 When set to \"none\", errata workarounds are disabled."),
3214 &set_record_btrace_cpu_cmdlist,
3215 _("set record btrace cpu "), 1,
3216 &set_record_btrace_cmdlist);
3217
3218 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3219 Automatically determine the cpu to be used for trace decode."),
3220 &set_record_btrace_cpu_cmdlist);
3221
3222 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3223 Do not enable errata workarounds for trace decode."),
3224 &set_record_btrace_cpu_cmdlist);
3225
3226 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3227 Show the cpu to be used for trace decode."),
3228 &show_record_btrace_cmdlist);
3229
3230 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3231 _("Set record btrace bts options"),
3232 &set_record_btrace_bts_cmdlist,
3233 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3234
3235 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3236 _("Show record btrace bts options"),
3237 &show_record_btrace_bts_cmdlist,
3238 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3239
3240 add_setshow_uinteger_cmd ("buffer-size", no_class,
3241 &record_btrace_conf.bts.size,
3242 _("Set the record/replay bts buffer size."),
3243 _("Show the record/replay bts buffer size."), _("\
3244 When starting recording request a trace buffer of this size. \
3245 The actual buffer size may differ from the requested size. \
3246 Use \"info record\" to see the actual buffer size.\n\n\
3247 Bigger buffers allow longer recording but also take more time to process \
3248 the recorded execution trace.\n\n\
3249 The trace buffer size may not be changed while recording."), NULL,
3250 show_record_bts_buffer_size_value,
3251 &set_record_btrace_bts_cmdlist,
3252 &show_record_btrace_bts_cmdlist);
3253
3254 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3255 _("Set record btrace pt options"),
3256 &set_record_btrace_pt_cmdlist,
3257 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3258
3259 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3260 _("Show record btrace pt options"),
3261 &show_record_btrace_pt_cmdlist,
3262 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3263
3264 add_setshow_uinteger_cmd ("buffer-size", no_class,
3265 &record_btrace_conf.pt.size,
3266 _("Set the record/replay pt buffer size."),
3267 _("Show the record/replay pt buffer size."), _("\
3268 Bigger buffers allow longer recording but also take more time to process \
3269 the recorded execution.\n\
3270 The actual buffer size may differ from the requested size. Use \"info record\" \
3271 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3272 &set_record_btrace_pt_cmdlist,
3273 &show_record_btrace_pt_cmdlist);
3274
3275 add_target (record_btrace_target_info, record_btrace_target_open);
3276
3277 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3278 xcalloc, xfree);
3279
3280 record_btrace_conf.bts.size = 64 * 1024;
3281 record_btrace_conf.pt.size = 16 * 1024;
3282 }
This page took 0.095884 seconds and 5 git commands to generate.