Per-inferior thread list, thread ranges/iterators, down with ALL_THREADS, etc.
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "event-loop.h"
40 #include "inf-loop.h"
41 #include "vec.h"
42 #include "inferior.h"
43 #include <algorithm>
44
45 static const target_info record_btrace_target_info = {
46 "record-btrace",
47 N_("Branch tracing target"),
48 N_("Collect control-flow trace and provide the execution history.")
49 };
50
51 /* The target_ops of record-btrace. */
52
53 class record_btrace_target final : public target_ops
54 {
55 public:
56 record_btrace_target ()
57 { to_stratum = record_stratum; }
58
59 const target_info &info () const override
60 { return record_btrace_target_info; }
61
62 void close () override;
63 void async (int) override;
64
65 void detach (inferior *inf, int from_tty) override
66 { record_detach (this, inf, from_tty); }
67
68 void disconnect (const char *, int) override;
69
70 void mourn_inferior () override
71 { record_mourn_inferior (this); }
72
73 void kill () override
74 { record_kill (this); }
75
76 enum record_method record_method (ptid_t ptid) override;
77
78 void stop_recording () override;
79 void info_record () override;
80
81 void insn_history (int size, gdb_disassembly_flags flags) override;
82 void insn_history_from (ULONGEST from, int size,
83 gdb_disassembly_flags flags) override;
84 void insn_history_range (ULONGEST begin, ULONGEST end,
85 gdb_disassembly_flags flags) override;
86 void call_history (int size, record_print_flags flags) override;
87 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
88 override;
89 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
90 override;
91
92 bool record_is_replaying (ptid_t ptid) override;
93 bool record_will_replay (ptid_t ptid, int dir) override;
94 void record_stop_replaying () override;
95
96 enum target_xfer_status xfer_partial (enum target_object object,
97 const char *annex,
98 gdb_byte *readbuf,
99 const gdb_byte *writebuf,
100 ULONGEST offset, ULONGEST len,
101 ULONGEST *xfered_len) override;
102
103 int insert_breakpoint (struct gdbarch *,
104 struct bp_target_info *) override;
105 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
106 enum remove_bp_reason) override;
107
108 void fetch_registers (struct regcache *, int) override;
109
110 void store_registers (struct regcache *, int) override;
111 void prepare_to_store (struct regcache *) override;
112
113 const struct frame_unwind *get_unwinder () override;
114
115 const struct frame_unwind *get_tailcall_unwinder () override;
116
117 void commit_resume () override;
118 void resume (ptid_t, int, enum gdb_signal) override;
119 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
120
121 void stop (ptid_t) override;
122 void update_thread_list () override;
123 bool thread_alive (ptid_t ptid) override;
124 void goto_record_begin () override;
125 void goto_record_end () override;
126 void goto_record (ULONGEST insn) override;
127
128 bool can_execute_reverse () override;
129
130 bool stopped_by_sw_breakpoint () override;
131 bool supports_stopped_by_sw_breakpoint () override;
132
133 bool stopped_by_hw_breakpoint () override;
134 bool supports_stopped_by_hw_breakpoint () override;
135
136 enum exec_direction_kind execution_direction () override;
137 void prepare_to_generate_core () override;
138 void done_generating_core () override;
139 };
140
141 static record_btrace_target record_btrace_ops;
142
143 /* Initialize the record-btrace target ops. */
144
145 /* Token associated with a new-thread observer enabling branch tracing
146 for the new thread. */
147 static const gdb::observers::token record_btrace_thread_observer_token;
148
149 /* Memory access types used in set/show record btrace replay-memory-access. */
150 static const char replay_memory_access_read_only[] = "read-only";
151 static const char replay_memory_access_read_write[] = "read-write";
152 static const char *const replay_memory_access_types[] =
153 {
154 replay_memory_access_read_only,
155 replay_memory_access_read_write,
156 NULL
157 };
158
159 /* The currently allowed replay memory access type. */
160 static const char *replay_memory_access = replay_memory_access_read_only;
161
162 /* The cpu state kinds. */
163 enum record_btrace_cpu_state_kind
164 {
165 CS_AUTO,
166 CS_NONE,
167 CS_CPU
168 };
169
170 /* The current cpu state. */
171 static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
172
173 /* The current cpu for trace decode. */
174 static struct btrace_cpu record_btrace_cpu;
175
176 /* Command lists for "set/show record btrace". */
177 static struct cmd_list_element *set_record_btrace_cmdlist;
178 static struct cmd_list_element *show_record_btrace_cmdlist;
179
180 /* The execution direction of the last resume we got. See record-full.c. */
181 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
182
183 /* The async event handler for reverse/replay execution. */
184 static struct async_event_handler *record_btrace_async_inferior_event_handler;
185
186 /* A flag indicating that we are currently generating a core file. */
187 static int record_btrace_generating_corefile;
188
189 /* The current branch trace configuration. */
190 static struct btrace_config record_btrace_conf;
191
192 /* Command list for "record btrace". */
193 static struct cmd_list_element *record_btrace_cmdlist;
194
195 /* Command lists for "set/show record btrace bts". */
196 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
197 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
198
199 /* Command lists for "set/show record btrace pt". */
200 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
201 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
202
203 /* Command list for "set record btrace cpu". */
204 static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
205
206 /* Print a record-btrace debug message. Use do ... while (0) to avoid
207 ambiguities when used in if statements. */
208
209 #define DEBUG(msg, args...) \
210 do \
211 { \
212 if (record_debug != 0) \
213 fprintf_unfiltered (gdb_stdlog, \
214 "[record-btrace] " msg "\n", ##args); \
215 } \
216 while (0)
217
218
219 /* Return the cpu configured by the user. Returns NULL if the cpu was
220 configured as auto. */
221 const struct btrace_cpu *
222 record_btrace_get_cpu (void)
223 {
224 switch (record_btrace_cpu_state)
225 {
226 case CS_AUTO:
227 return nullptr;
228
229 case CS_NONE:
230 record_btrace_cpu.vendor = CV_UNKNOWN;
231 /* Fall through. */
232 case CS_CPU:
233 return &record_btrace_cpu;
234 }
235
236 error (_("Internal error: bad record btrace cpu state."));
237 }
238
239 /* Update the branch trace for the current thread and return a pointer to its
240 thread_info.
241
242 Throws an error if there is no thread or no trace. This function never
243 returns NULL. */
244
245 static struct thread_info *
246 require_btrace_thread (void)
247 {
248 DEBUG ("require");
249
250 if (inferior_ptid == null_ptid)
251 error (_("No thread."));
252
253 thread_info *tp = inferior_thread ();
254
255 validate_registers_access ();
256
257 btrace_fetch (tp, record_btrace_get_cpu ());
258
259 if (btrace_is_empty (tp))
260 error (_("No trace."));
261
262 return tp;
263 }
264
265 /* Update the branch trace for the current thread and return a pointer to its
266 branch trace information struct.
267
268 Throws an error if there is no thread or no trace. This function never
269 returns NULL. */
270
271 static struct btrace_thread_info *
272 require_btrace (void)
273 {
274 struct thread_info *tp;
275
276 tp = require_btrace_thread ();
277
278 return &tp->btrace;
279 }
280
281 /* Enable branch tracing for one thread. Warn on errors. */
282
283 static void
284 record_btrace_enable_warn (struct thread_info *tp)
285 {
286 TRY
287 {
288 btrace_enable (tp, &record_btrace_conf);
289 }
290 CATCH (error, RETURN_MASK_ERROR)
291 {
292 warning ("%s", error.message);
293 }
294 END_CATCH
295 }
296
297 /* Enable automatic tracing of new threads. */
298
299 static void
300 record_btrace_auto_enable (void)
301 {
302 DEBUG ("attach thread observer");
303
304 gdb::observers::new_thread.attach (record_btrace_enable_warn,
305 record_btrace_thread_observer_token);
306 }
307
308 /* Disable automatic tracing of new threads. */
309
310 static void
311 record_btrace_auto_disable (void)
312 {
313 DEBUG ("detach thread observer");
314
315 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
316 }
317
318 /* The record-btrace async event handler function. */
319
320 static void
321 record_btrace_handle_async_inferior_event (gdb_client_data data)
322 {
323 inferior_event_handler (INF_REG_EVENT, NULL);
324 }
325
326 /* See record-btrace.h. */
327
328 void
329 record_btrace_push_target (void)
330 {
331 const char *format;
332
333 record_btrace_auto_enable ();
334
335 push_target (&record_btrace_ops);
336
337 record_btrace_async_inferior_event_handler
338 = create_async_event_handler (record_btrace_handle_async_inferior_event,
339 NULL);
340 record_btrace_generating_corefile = 0;
341
342 format = btrace_format_short_string (record_btrace_conf.format);
343 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
344 }
345
346 /* Disable btrace on a set of threads on scope exit. */
347
348 struct scoped_btrace_disable
349 {
350 scoped_btrace_disable () = default;
351
352 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
353
354 ~scoped_btrace_disable ()
355 {
356 for (thread_info *tp : m_threads)
357 btrace_disable (tp);
358 }
359
360 void add_thread (thread_info *thread)
361 {
362 m_threads.push_front (thread);
363 }
364
365 void discard ()
366 {
367 m_threads.clear ();
368 }
369
370 private:
371 std::forward_list<thread_info *> m_threads;
372 };
373
374 /* Open target record-btrace. */
375
376 static void
377 record_btrace_target_open (const char *args, int from_tty)
378 {
379 /* If we fail to enable btrace for one thread, disable it for the threads for
380 which it was successfully enabled. */
381 scoped_btrace_disable btrace_disable;
382
383 DEBUG ("open");
384
385 record_preopen ();
386
387 if (!target_has_execution)
388 error (_("The program is not being run."));
389
390 for (thread_info *tp : all_non_exited_threads ())
391 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
392 {
393 btrace_enable (tp, &record_btrace_conf);
394
395 btrace_disable.add_thread (tp);
396 }
397
398 record_btrace_push_target ();
399
400 btrace_disable.discard ();
401 }
402
403 /* The stop_recording method of target record-btrace. */
404
405 void
406 record_btrace_target::stop_recording ()
407 {
408 DEBUG ("stop recording");
409
410 record_btrace_auto_disable ();
411
412 for (thread_info *tp : all_non_exited_threads ())
413 if (tp->btrace.target != NULL)
414 btrace_disable (tp);
415 }
416
417 /* The disconnect method of target record-btrace. */
418
419 void
420 record_btrace_target::disconnect (const char *args,
421 int from_tty)
422 {
423 struct target_ops *beneath = this->beneath ();
424
425 /* Do not stop recording, just clean up GDB side. */
426 unpush_target (this);
427
428 /* Forward disconnect. */
429 beneath->disconnect (args, from_tty);
430 }
431
432 /* The close method of target record-btrace. */
433
434 void
435 record_btrace_target::close ()
436 {
437 if (record_btrace_async_inferior_event_handler != NULL)
438 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
439
440 /* Make sure automatic recording gets disabled even if we did not stop
441 recording before closing the record-btrace target. */
442 record_btrace_auto_disable ();
443
444 /* We should have already stopped recording.
445 Tear down btrace in case we have not. */
446 for (thread_info *tp : all_non_exited_threads ())
447 btrace_teardown (tp);
448 }
449
450 /* The async method of target record-btrace. */
451
452 void
453 record_btrace_target::async (int enable)
454 {
455 if (enable)
456 mark_async_event_handler (record_btrace_async_inferior_event_handler);
457 else
458 clear_async_event_handler (record_btrace_async_inferior_event_handler);
459
460 this->beneath ()->async (enable);
461 }
462
463 /* Adjusts the size and returns a human readable size suffix. */
464
465 static const char *
466 record_btrace_adjust_size (unsigned int *size)
467 {
468 unsigned int sz;
469
470 sz = *size;
471
472 if ((sz & ((1u << 30) - 1)) == 0)
473 {
474 *size = sz >> 30;
475 return "GB";
476 }
477 else if ((sz & ((1u << 20) - 1)) == 0)
478 {
479 *size = sz >> 20;
480 return "MB";
481 }
482 else if ((sz & ((1u << 10) - 1)) == 0)
483 {
484 *size = sz >> 10;
485 return "kB";
486 }
487 else
488 return "";
489 }
490
491 /* Print a BTS configuration. */
492
493 static void
494 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
495 {
496 const char *suffix;
497 unsigned int size;
498
499 size = conf->size;
500 if (size > 0)
501 {
502 suffix = record_btrace_adjust_size (&size);
503 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
504 }
505 }
506
507 /* Print an Intel Processor Trace configuration. */
508
509 static void
510 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
511 {
512 const char *suffix;
513 unsigned int size;
514
515 size = conf->size;
516 if (size > 0)
517 {
518 suffix = record_btrace_adjust_size (&size);
519 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
520 }
521 }
522
523 /* Print a branch tracing configuration. */
524
525 static void
526 record_btrace_print_conf (const struct btrace_config *conf)
527 {
528 printf_unfiltered (_("Recording format: %s.\n"),
529 btrace_format_string (conf->format));
530
531 switch (conf->format)
532 {
533 case BTRACE_FORMAT_NONE:
534 return;
535
536 case BTRACE_FORMAT_BTS:
537 record_btrace_print_bts_conf (&conf->bts);
538 return;
539
540 case BTRACE_FORMAT_PT:
541 record_btrace_print_pt_conf (&conf->pt);
542 return;
543 }
544
545 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
546 }
547
548 /* The info_record method of target record-btrace. */
549
550 void
551 record_btrace_target::info_record ()
552 {
553 struct btrace_thread_info *btinfo;
554 const struct btrace_config *conf;
555 struct thread_info *tp;
556 unsigned int insns, calls, gaps;
557
558 DEBUG ("info");
559
560 tp = find_thread_ptid (inferior_ptid);
561 if (tp == NULL)
562 error (_("No thread."));
563
564 validate_registers_access ();
565
566 btinfo = &tp->btrace;
567
568 conf = ::btrace_conf (btinfo);
569 if (conf != NULL)
570 record_btrace_print_conf (conf);
571
572 btrace_fetch (tp, record_btrace_get_cpu ());
573
574 insns = 0;
575 calls = 0;
576 gaps = 0;
577
578 if (!btrace_is_empty (tp))
579 {
580 struct btrace_call_iterator call;
581 struct btrace_insn_iterator insn;
582
583 btrace_call_end (&call, btinfo);
584 btrace_call_prev (&call, 1);
585 calls = btrace_call_number (&call);
586
587 btrace_insn_end (&insn, btinfo);
588 insns = btrace_insn_number (&insn);
589
590 /* If the last instruction is not a gap, it is the current instruction
591 that is not actually part of the record. */
592 if (btrace_insn_get (&insn) != NULL)
593 insns -= 1;
594
595 gaps = btinfo->ngaps;
596 }
597
598 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
599 "for thread %s (%s).\n"), insns, calls, gaps,
600 print_thread_id (tp), target_pid_to_str (tp->ptid));
601
602 if (btrace_is_replaying (tp))
603 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
604 btrace_insn_number (btinfo->replay));
605 }
606
607 /* Print a decode error. */
608
609 static void
610 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
611 enum btrace_format format)
612 {
613 const char *errstr = btrace_decode_error (format, errcode);
614
615 uiout->text (_("["));
616 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
617 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
618 {
619 uiout->text (_("decode error ("));
620 uiout->field_int ("errcode", errcode);
621 uiout->text (_("): "));
622 }
623 uiout->text (errstr);
624 uiout->text (_("]\n"));
625 }
626
627 /* Print an unsigned int. */
628
629 static void
630 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
631 {
632 uiout->field_fmt (fld, "%u", val);
633 }
634
635 /* A range of source lines. */
636
637 struct btrace_line_range
638 {
639 /* The symtab this line is from. */
640 struct symtab *symtab;
641
642 /* The first line (inclusive). */
643 int begin;
644
645 /* The last line (exclusive). */
646 int end;
647 };
648
649 /* Construct a line range. */
650
651 static struct btrace_line_range
652 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
653 {
654 struct btrace_line_range range;
655
656 range.symtab = symtab;
657 range.begin = begin;
658 range.end = end;
659
660 return range;
661 }
662
663 /* Add a line to a line range. */
664
665 static struct btrace_line_range
666 btrace_line_range_add (struct btrace_line_range range, int line)
667 {
668 if (range.end <= range.begin)
669 {
670 /* This is the first entry. */
671 range.begin = line;
672 range.end = line + 1;
673 }
674 else if (line < range.begin)
675 range.begin = line;
676 else if (range.end < line)
677 range.end = line;
678
679 return range;
680 }
681
682 /* Return non-zero if RANGE is empty, zero otherwise. */
683
684 static int
685 btrace_line_range_is_empty (struct btrace_line_range range)
686 {
687 return range.end <= range.begin;
688 }
689
690 /* Return non-zero if LHS contains RHS, zero otherwise. */
691
692 static int
693 btrace_line_range_contains_range (struct btrace_line_range lhs,
694 struct btrace_line_range rhs)
695 {
696 return ((lhs.symtab == rhs.symtab)
697 && (lhs.begin <= rhs.begin)
698 && (rhs.end <= lhs.end));
699 }
700
701 /* Find the line range associated with PC. */
702
703 static struct btrace_line_range
704 btrace_find_line_range (CORE_ADDR pc)
705 {
706 struct btrace_line_range range;
707 struct linetable_entry *lines;
708 struct linetable *ltable;
709 struct symtab *symtab;
710 int nlines, i;
711
712 symtab = find_pc_line_symtab (pc);
713 if (symtab == NULL)
714 return btrace_mk_line_range (NULL, 0, 0);
715
716 ltable = SYMTAB_LINETABLE (symtab);
717 if (ltable == NULL)
718 return btrace_mk_line_range (symtab, 0, 0);
719
720 nlines = ltable->nitems;
721 lines = ltable->item;
722 if (nlines <= 0)
723 return btrace_mk_line_range (symtab, 0, 0);
724
725 range = btrace_mk_line_range (symtab, 0, 0);
726 for (i = 0; i < nlines - 1; i++)
727 {
728 if ((lines[i].pc == pc) && (lines[i].line != 0))
729 range = btrace_line_range_add (range, lines[i].line);
730 }
731
732 return range;
733 }
734
735 /* Print source lines in LINES to UIOUT.
736
737 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
738 instructions corresponding to that source line. When printing a new source
739 line, we do the cleanups for the open chain and open a new cleanup chain for
740 the new source line. If the source line range in LINES is not empty, this
741 function will leave the cleanup chain for the last printed source line open
742 so instructions can be added to it. */
743
744 static void
745 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
746 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
747 gdb::optional<ui_out_emit_list> *asm_list,
748 gdb_disassembly_flags flags)
749 {
750 print_source_lines_flags psl_flags;
751
752 if (flags & DISASSEMBLY_FILENAME)
753 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
754
755 for (int line = lines.begin; line < lines.end; ++line)
756 {
757 asm_list->reset ();
758
759 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
760
761 print_source_lines (lines.symtab, line, line + 1, psl_flags);
762
763 asm_list->emplace (uiout, "line_asm_insn");
764 }
765 }
766
767 /* Disassemble a section of the recorded instruction trace. */
768
769 static void
770 btrace_insn_history (struct ui_out *uiout,
771 const struct btrace_thread_info *btinfo,
772 const struct btrace_insn_iterator *begin,
773 const struct btrace_insn_iterator *end,
774 gdb_disassembly_flags flags)
775 {
776 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
777 btrace_insn_number (begin), btrace_insn_number (end));
778
779 flags |= DISASSEMBLY_SPECULATIVE;
780
781 struct gdbarch *gdbarch = target_gdbarch ();
782 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
783
784 ui_out_emit_list list_emitter (uiout, "asm_insns");
785
786 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
787 gdb::optional<ui_out_emit_list> asm_list;
788
789 gdb_pretty_print_disassembler disasm (gdbarch);
790
791 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
792 btrace_insn_next (&it, 1))
793 {
794 const struct btrace_insn *insn;
795
796 insn = btrace_insn_get (&it);
797
798 /* A NULL instruction indicates a gap in the trace. */
799 if (insn == NULL)
800 {
801 const struct btrace_config *conf;
802
803 conf = btrace_conf (btinfo);
804
805 /* We have trace so we must have a configuration. */
806 gdb_assert (conf != NULL);
807
808 uiout->field_fmt ("insn-number", "%u",
809 btrace_insn_number (&it));
810 uiout->text ("\t");
811
812 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
813 conf->format);
814 }
815 else
816 {
817 struct disasm_insn dinsn;
818
819 if ((flags & DISASSEMBLY_SOURCE) != 0)
820 {
821 struct btrace_line_range lines;
822
823 lines = btrace_find_line_range (insn->pc);
824 if (!btrace_line_range_is_empty (lines)
825 && !btrace_line_range_contains_range (last_lines, lines))
826 {
827 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
828 flags);
829 last_lines = lines;
830 }
831 else if (!src_and_asm_tuple.has_value ())
832 {
833 gdb_assert (!asm_list.has_value ());
834
835 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
836
837 /* No source information. */
838 asm_list.emplace (uiout, "line_asm_insn");
839 }
840
841 gdb_assert (src_and_asm_tuple.has_value ());
842 gdb_assert (asm_list.has_value ());
843 }
844
845 memset (&dinsn, 0, sizeof (dinsn));
846 dinsn.number = btrace_insn_number (&it);
847 dinsn.addr = insn->pc;
848
849 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
850 dinsn.is_speculative = 1;
851
852 disasm.pretty_print_insn (uiout, &dinsn, flags);
853 }
854 }
855 }
856
857 /* The insn_history method of target record-btrace. */
858
859 void
860 record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
861 {
862 struct btrace_thread_info *btinfo;
863 struct btrace_insn_history *history;
864 struct btrace_insn_iterator begin, end;
865 struct ui_out *uiout;
866 unsigned int context, covered;
867
868 uiout = current_uiout;
869 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
870 context = abs (size);
871 if (context == 0)
872 error (_("Bad record instruction-history-size."));
873
874 btinfo = require_btrace ();
875 history = btinfo->insn_history;
876 if (history == NULL)
877 {
878 struct btrace_insn_iterator *replay;
879
880 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
881
882 /* If we're replaying, we start at the replay position. Otherwise, we
883 start at the tail of the trace. */
884 replay = btinfo->replay;
885 if (replay != NULL)
886 begin = *replay;
887 else
888 btrace_insn_end (&begin, btinfo);
889
890 /* We start from here and expand in the requested direction. Then we
891 expand in the other direction, as well, to fill up any remaining
892 context. */
893 end = begin;
894 if (size < 0)
895 {
896 /* We want the current position covered, as well. */
897 covered = btrace_insn_next (&end, 1);
898 covered += btrace_insn_prev (&begin, context - covered);
899 covered += btrace_insn_next (&end, context - covered);
900 }
901 else
902 {
903 covered = btrace_insn_next (&end, context);
904 covered += btrace_insn_prev (&begin, context - covered);
905 }
906 }
907 else
908 {
909 begin = history->begin;
910 end = history->end;
911
912 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
913 btrace_insn_number (&begin), btrace_insn_number (&end));
914
915 if (size < 0)
916 {
917 end = begin;
918 covered = btrace_insn_prev (&begin, context);
919 }
920 else
921 {
922 begin = end;
923 covered = btrace_insn_next (&end, context);
924 }
925 }
926
927 if (covered > 0)
928 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
929 else
930 {
931 if (size < 0)
932 printf_unfiltered (_("At the start of the branch trace record.\n"));
933 else
934 printf_unfiltered (_("At the end of the branch trace record.\n"));
935 }
936
937 btrace_set_insn_history (btinfo, &begin, &end);
938 }
939
940 /* The insn_history_range method of target record-btrace. */
941
942 void
943 record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
944 gdb_disassembly_flags flags)
945 {
946 struct btrace_thread_info *btinfo;
947 struct btrace_insn_iterator begin, end;
948 struct ui_out *uiout;
949 unsigned int low, high;
950 int found;
951
952 uiout = current_uiout;
953 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
954 low = from;
955 high = to;
956
957 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
958
959 /* Check for wrap-arounds. */
960 if (low != from || high != to)
961 error (_("Bad range."));
962
963 if (high < low)
964 error (_("Bad range."));
965
966 btinfo = require_btrace ();
967
968 found = btrace_find_insn_by_number (&begin, btinfo, low);
969 if (found == 0)
970 error (_("Range out of bounds."));
971
972 found = btrace_find_insn_by_number (&end, btinfo, high);
973 if (found == 0)
974 {
975 /* Silently truncate the range. */
976 btrace_insn_end (&end, btinfo);
977 }
978 else
979 {
980 /* We want both begin and end to be inclusive. */
981 btrace_insn_next (&end, 1);
982 }
983
984 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
985 btrace_set_insn_history (btinfo, &begin, &end);
986 }
987
988 /* The insn_history_from method of target record-btrace. */
989
990 void
991 record_btrace_target::insn_history_from (ULONGEST from, int size,
992 gdb_disassembly_flags flags)
993 {
994 ULONGEST begin, end, context;
995
996 context = abs (size);
997 if (context == 0)
998 error (_("Bad record instruction-history-size."));
999
1000 if (size < 0)
1001 {
1002 end = from;
1003
1004 if (from < context)
1005 begin = 0;
1006 else
1007 begin = from - context + 1;
1008 }
1009 else
1010 {
1011 begin = from;
1012 end = from + context - 1;
1013
1014 /* Check for wrap-around. */
1015 if (end < begin)
1016 end = ULONGEST_MAX;
1017 }
1018
1019 insn_history_range (begin, end, flags);
1020 }
1021
1022 /* Print the instruction number range for a function call history line. */
1023
1024 static void
1025 btrace_call_history_insn_range (struct ui_out *uiout,
1026 const struct btrace_function *bfun)
1027 {
1028 unsigned int begin, end, size;
1029
1030 size = bfun->insn.size ();
1031 gdb_assert (size > 0);
1032
1033 begin = bfun->insn_offset;
1034 end = begin + size - 1;
1035
1036 ui_out_field_uint (uiout, "insn begin", begin);
1037 uiout->text (",");
1038 ui_out_field_uint (uiout, "insn end", end);
1039 }
1040
1041 /* Compute the lowest and highest source line for the instructions in BFUN
1042 and return them in PBEGIN and PEND.
1043 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1044 result from inlining or macro expansion. */
1045
1046 static void
1047 btrace_compute_src_line_range (const struct btrace_function *bfun,
1048 int *pbegin, int *pend)
1049 {
1050 struct symtab *symtab;
1051 struct symbol *sym;
1052 int begin, end;
1053
1054 begin = INT_MAX;
1055 end = INT_MIN;
1056
1057 sym = bfun->sym;
1058 if (sym == NULL)
1059 goto out;
1060
1061 symtab = symbol_symtab (sym);
1062
1063 for (const btrace_insn &insn : bfun->insn)
1064 {
1065 struct symtab_and_line sal;
1066
1067 sal = find_pc_line (insn.pc, 0);
1068 if (sal.symtab != symtab || sal.line == 0)
1069 continue;
1070
1071 begin = std::min (begin, sal.line);
1072 end = std::max (end, sal.line);
1073 }
1074
1075 out:
1076 *pbegin = begin;
1077 *pend = end;
1078 }
1079
1080 /* Print the source line information for a function call history line. */
1081
1082 static void
1083 btrace_call_history_src_line (struct ui_out *uiout,
1084 const struct btrace_function *bfun)
1085 {
1086 struct symbol *sym;
1087 int begin, end;
1088
1089 sym = bfun->sym;
1090 if (sym == NULL)
1091 return;
1092
1093 uiout->field_string ("file",
1094 symtab_to_filename_for_display (symbol_symtab (sym)));
1095
1096 btrace_compute_src_line_range (bfun, &begin, &end);
1097 if (end < begin)
1098 return;
1099
1100 uiout->text (":");
1101 uiout->field_int ("min line", begin);
1102
1103 if (end == begin)
1104 return;
1105
1106 uiout->text (",");
1107 uiout->field_int ("max line", end);
1108 }
1109
1110 /* Get the name of a branch trace function. */
1111
1112 static const char *
1113 btrace_get_bfun_name (const struct btrace_function *bfun)
1114 {
1115 struct minimal_symbol *msym;
1116 struct symbol *sym;
1117
1118 if (bfun == NULL)
1119 return "??";
1120
1121 msym = bfun->msym;
1122 sym = bfun->sym;
1123
1124 if (sym != NULL)
1125 return SYMBOL_PRINT_NAME (sym);
1126 else if (msym != NULL)
1127 return MSYMBOL_PRINT_NAME (msym);
1128 else
1129 return "??";
1130 }
1131
1132 /* Disassemble a section of the recorded function trace. */
1133
1134 static void
1135 btrace_call_history (struct ui_out *uiout,
1136 const struct btrace_thread_info *btinfo,
1137 const struct btrace_call_iterator *begin,
1138 const struct btrace_call_iterator *end,
1139 int int_flags)
1140 {
1141 struct btrace_call_iterator it;
1142 record_print_flags flags = (enum record_print_flag) int_flags;
1143
1144 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1145 btrace_call_number (end));
1146
1147 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1148 {
1149 const struct btrace_function *bfun;
1150 struct minimal_symbol *msym;
1151 struct symbol *sym;
1152
1153 bfun = btrace_call_get (&it);
1154 sym = bfun->sym;
1155 msym = bfun->msym;
1156
1157 /* Print the function index. */
1158 ui_out_field_uint (uiout, "index", bfun->number);
1159 uiout->text ("\t");
1160
1161 /* Indicate gaps in the trace. */
1162 if (bfun->errcode != 0)
1163 {
1164 const struct btrace_config *conf;
1165
1166 conf = btrace_conf (btinfo);
1167
1168 /* We have trace so we must have a configuration. */
1169 gdb_assert (conf != NULL);
1170
1171 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1172
1173 continue;
1174 }
1175
1176 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1177 {
1178 int level = bfun->level + btinfo->level, i;
1179
1180 for (i = 0; i < level; ++i)
1181 uiout->text (" ");
1182 }
1183
1184 if (sym != NULL)
1185 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
1186 else if (msym != NULL)
1187 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1188 else if (!uiout->is_mi_like_p ())
1189 uiout->field_string ("function", "??");
1190
1191 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1192 {
1193 uiout->text (_("\tinst "));
1194 btrace_call_history_insn_range (uiout, bfun);
1195 }
1196
1197 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1198 {
1199 uiout->text (_("\tat "));
1200 btrace_call_history_src_line (uiout, bfun);
1201 }
1202
1203 uiout->text ("\n");
1204 }
1205 }
1206
1207 /* The call_history method of target record-btrace. */
1208
1209 void
1210 record_btrace_target::call_history (int size, record_print_flags flags)
1211 {
1212 struct btrace_thread_info *btinfo;
1213 struct btrace_call_history *history;
1214 struct btrace_call_iterator begin, end;
1215 struct ui_out *uiout;
1216 unsigned int context, covered;
1217
1218 uiout = current_uiout;
1219 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1220 context = abs (size);
1221 if (context == 0)
1222 error (_("Bad record function-call-history-size."));
1223
1224 btinfo = require_btrace ();
1225 history = btinfo->call_history;
1226 if (history == NULL)
1227 {
1228 struct btrace_insn_iterator *replay;
1229
1230 DEBUG ("call-history (0x%x): %d", (int) flags, size);
1231
1232 /* If we're replaying, we start at the replay position. Otherwise, we
1233 start at the tail of the trace. */
1234 replay = btinfo->replay;
1235 if (replay != NULL)
1236 {
1237 begin.btinfo = btinfo;
1238 begin.index = replay->call_index;
1239 }
1240 else
1241 btrace_call_end (&begin, btinfo);
1242
1243 /* We start from here and expand in the requested direction. Then we
1244 expand in the other direction, as well, to fill up any remaining
1245 context. */
1246 end = begin;
1247 if (size < 0)
1248 {
1249 /* We want the current position covered, as well. */
1250 covered = btrace_call_next (&end, 1);
1251 covered += btrace_call_prev (&begin, context - covered);
1252 covered += btrace_call_next (&end, context - covered);
1253 }
1254 else
1255 {
1256 covered = btrace_call_next (&end, context);
1257 covered += btrace_call_prev (&begin, context- covered);
1258 }
1259 }
1260 else
1261 {
1262 begin = history->begin;
1263 end = history->end;
1264
1265 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1266 btrace_call_number (&begin), btrace_call_number (&end));
1267
1268 if (size < 0)
1269 {
1270 end = begin;
1271 covered = btrace_call_prev (&begin, context);
1272 }
1273 else
1274 {
1275 begin = end;
1276 covered = btrace_call_next (&end, context);
1277 }
1278 }
1279
1280 if (covered > 0)
1281 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1282 else
1283 {
1284 if (size < 0)
1285 printf_unfiltered (_("At the start of the branch trace record.\n"));
1286 else
1287 printf_unfiltered (_("At the end of the branch trace record.\n"));
1288 }
1289
1290 btrace_set_call_history (btinfo, &begin, &end);
1291 }
1292
1293 /* The call_history_range method of target record-btrace. */
1294
1295 void
1296 record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1297 record_print_flags flags)
1298 {
1299 struct btrace_thread_info *btinfo;
1300 struct btrace_call_iterator begin, end;
1301 struct ui_out *uiout;
1302 unsigned int low, high;
1303 int found;
1304
1305 uiout = current_uiout;
1306 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1307 low = from;
1308 high = to;
1309
1310 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1311
1312 /* Check for wrap-arounds. */
1313 if (low != from || high != to)
1314 error (_("Bad range."));
1315
1316 if (high < low)
1317 error (_("Bad range."));
1318
1319 btinfo = require_btrace ();
1320
1321 found = btrace_find_call_by_number (&begin, btinfo, low);
1322 if (found == 0)
1323 error (_("Range out of bounds."));
1324
1325 found = btrace_find_call_by_number (&end, btinfo, high);
1326 if (found == 0)
1327 {
1328 /* Silently truncate the range. */
1329 btrace_call_end (&end, btinfo);
1330 }
1331 else
1332 {
1333 /* We want both begin and end to be inclusive. */
1334 btrace_call_next (&end, 1);
1335 }
1336
1337 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1338 btrace_set_call_history (btinfo, &begin, &end);
1339 }
1340
1341 /* The call_history_from method of target record-btrace. */
1342
1343 void
1344 record_btrace_target::call_history_from (ULONGEST from, int size,
1345 record_print_flags flags)
1346 {
1347 ULONGEST begin, end, context;
1348
1349 context = abs (size);
1350 if (context == 0)
1351 error (_("Bad record function-call-history-size."));
1352
1353 if (size < 0)
1354 {
1355 end = from;
1356
1357 if (from < context)
1358 begin = 0;
1359 else
1360 begin = from - context + 1;
1361 }
1362 else
1363 {
1364 begin = from;
1365 end = from + context - 1;
1366
1367 /* Check for wrap-around. */
1368 if (end < begin)
1369 end = ULONGEST_MAX;
1370 }
1371
1372 call_history_range ( begin, end, flags);
1373 }
1374
1375 /* The record_method method of target record-btrace. */
1376
1377 enum record_method
1378 record_btrace_target::record_method (ptid_t ptid)
1379 {
1380 struct thread_info * const tp = find_thread_ptid (ptid);
1381
1382 if (tp == NULL)
1383 error (_("No thread."));
1384
1385 if (tp->btrace.target == NULL)
1386 return RECORD_METHOD_NONE;
1387
1388 return RECORD_METHOD_BTRACE;
1389 }
1390
1391 /* The record_is_replaying method of target record-btrace. */
1392
1393 bool
1394 record_btrace_target::record_is_replaying (ptid_t ptid)
1395 {
1396 for (thread_info *tp : all_non_exited_threads (ptid))
1397 if (btrace_is_replaying (tp))
1398 return true;
1399
1400 return false;
1401 }
1402
1403 /* The record_will_replay method of target record-btrace. */
1404
1405 bool
1406 record_btrace_target::record_will_replay (ptid_t ptid, int dir)
1407 {
1408 return dir == EXEC_REVERSE || record_is_replaying (ptid);
1409 }
1410
1411 /* The xfer_partial method of target record-btrace. */
1412
1413 enum target_xfer_status
1414 record_btrace_target::xfer_partial (enum target_object object,
1415 const char *annex, gdb_byte *readbuf,
1416 const gdb_byte *writebuf, ULONGEST offset,
1417 ULONGEST len, ULONGEST *xfered_len)
1418 {
1419 /* Filter out requests that don't make sense during replay. */
1420 if (replay_memory_access == replay_memory_access_read_only
1421 && !record_btrace_generating_corefile
1422 && record_is_replaying (inferior_ptid))
1423 {
1424 switch (object)
1425 {
1426 case TARGET_OBJECT_MEMORY:
1427 {
1428 struct target_section *section;
1429
1430 /* We do not allow writing memory in general. */
1431 if (writebuf != NULL)
1432 {
1433 *xfered_len = len;
1434 return TARGET_XFER_UNAVAILABLE;
1435 }
1436
1437 /* We allow reading readonly memory. */
1438 section = target_section_by_addr (this, offset);
1439 if (section != NULL)
1440 {
1441 /* Check if the section we found is readonly. */
1442 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1443 section->the_bfd_section)
1444 & SEC_READONLY) != 0)
1445 {
1446 /* Truncate the request to fit into this section. */
1447 len = std::min (len, section->endaddr - offset);
1448 break;
1449 }
1450 }
1451
1452 *xfered_len = len;
1453 return TARGET_XFER_UNAVAILABLE;
1454 }
1455 }
1456 }
1457
1458 /* Forward the request. */
1459 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1460 offset, len, xfered_len);
1461 }
1462
1463 /* The insert_breakpoint method of target record-btrace. */
1464
1465 int
1466 record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1467 struct bp_target_info *bp_tgt)
1468 {
1469 const char *old;
1470 int ret;
1471
1472 /* Inserting breakpoints requires accessing memory. Allow it for the
1473 duration of this function. */
1474 old = replay_memory_access;
1475 replay_memory_access = replay_memory_access_read_write;
1476
1477 ret = 0;
1478 TRY
1479 {
1480 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
1481 }
1482 CATCH (except, RETURN_MASK_ALL)
1483 {
1484 replay_memory_access = old;
1485 throw_exception (except);
1486 }
1487 END_CATCH
1488 replay_memory_access = old;
1489
1490 return ret;
1491 }
1492
1493 /* The remove_breakpoint method of target record-btrace. */
1494
1495 int
1496 record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1497 struct bp_target_info *bp_tgt,
1498 enum remove_bp_reason reason)
1499 {
1500 const char *old;
1501 int ret;
1502
1503 /* Removing breakpoints requires accessing memory. Allow it for the
1504 duration of this function. */
1505 old = replay_memory_access;
1506 replay_memory_access = replay_memory_access_read_write;
1507
1508 ret = 0;
1509 TRY
1510 {
1511 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
1512 }
1513 CATCH (except, RETURN_MASK_ALL)
1514 {
1515 replay_memory_access = old;
1516 throw_exception (except);
1517 }
1518 END_CATCH
1519 replay_memory_access = old;
1520
1521 return ret;
1522 }
1523
1524 /* The fetch_registers method of target record-btrace. */
1525
1526 void
1527 record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1528 {
1529 struct btrace_insn_iterator *replay;
1530 struct thread_info *tp;
1531
1532 tp = find_thread_ptid (regcache->ptid ());
1533 gdb_assert (tp != NULL);
1534
1535 replay = tp->btrace.replay;
1536 if (replay != NULL && !record_btrace_generating_corefile)
1537 {
1538 const struct btrace_insn *insn;
1539 struct gdbarch *gdbarch;
1540 int pcreg;
1541
1542 gdbarch = regcache->arch ();
1543 pcreg = gdbarch_pc_regnum (gdbarch);
1544 if (pcreg < 0)
1545 return;
1546
1547 /* We can only provide the PC register. */
1548 if (regno >= 0 && regno != pcreg)
1549 return;
1550
1551 insn = btrace_insn_get (replay);
1552 gdb_assert (insn != NULL);
1553
1554 regcache->raw_supply (regno, &insn->pc);
1555 }
1556 else
1557 this->beneath ()->fetch_registers (regcache, regno);
1558 }
1559
1560 /* The store_registers method of target record-btrace. */
1561
1562 void
1563 record_btrace_target::store_registers (struct regcache *regcache, int regno)
1564 {
1565 if (!record_btrace_generating_corefile
1566 && record_is_replaying (regcache->ptid ()))
1567 error (_("Cannot write registers while replaying."));
1568
1569 gdb_assert (may_write_registers != 0);
1570
1571 this->beneath ()->store_registers (regcache, regno);
1572 }
1573
1574 /* The prepare_to_store method of target record-btrace. */
1575
1576 void
1577 record_btrace_target::prepare_to_store (struct regcache *regcache)
1578 {
1579 if (!record_btrace_generating_corefile
1580 && record_is_replaying (regcache->ptid ()))
1581 return;
1582
1583 this->beneath ()->prepare_to_store (regcache);
1584 }
1585
1586 /* The branch trace frame cache. */
1587
1588 struct btrace_frame_cache
1589 {
1590 /* The thread. */
1591 struct thread_info *tp;
1592
1593 /* The frame info. */
1594 struct frame_info *frame;
1595
1596 /* The branch trace function segment. */
1597 const struct btrace_function *bfun;
1598 };
1599
1600 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1601
1602 static htab_t bfcache;
1603
1604 /* hash_f for htab_create_alloc of bfcache. */
1605
1606 static hashval_t
1607 bfcache_hash (const void *arg)
1608 {
1609 const struct btrace_frame_cache *cache
1610 = (const struct btrace_frame_cache *) arg;
1611
1612 return htab_hash_pointer (cache->frame);
1613 }
1614
1615 /* eq_f for htab_create_alloc of bfcache. */
1616
1617 static int
1618 bfcache_eq (const void *arg1, const void *arg2)
1619 {
1620 const struct btrace_frame_cache *cache1
1621 = (const struct btrace_frame_cache *) arg1;
1622 const struct btrace_frame_cache *cache2
1623 = (const struct btrace_frame_cache *) arg2;
1624
1625 return cache1->frame == cache2->frame;
1626 }
1627
1628 /* Create a new btrace frame cache. */
1629
1630 static struct btrace_frame_cache *
1631 bfcache_new (struct frame_info *frame)
1632 {
1633 struct btrace_frame_cache *cache;
1634 void **slot;
1635
1636 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1637 cache->frame = frame;
1638
1639 slot = htab_find_slot (bfcache, cache, INSERT);
1640 gdb_assert (*slot == NULL);
1641 *slot = cache;
1642
1643 return cache;
1644 }
1645
1646 /* Extract the branch trace function from a branch trace frame. */
1647
1648 static const struct btrace_function *
1649 btrace_get_frame_function (struct frame_info *frame)
1650 {
1651 const struct btrace_frame_cache *cache;
1652 struct btrace_frame_cache pattern;
1653 void **slot;
1654
1655 pattern.frame = frame;
1656
1657 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1658 if (slot == NULL)
1659 return NULL;
1660
1661 cache = (const struct btrace_frame_cache *) *slot;
1662 return cache->bfun;
1663 }
1664
1665 /* Implement stop_reason method for record_btrace_frame_unwind. */
1666
1667 static enum unwind_stop_reason
1668 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1669 void **this_cache)
1670 {
1671 const struct btrace_frame_cache *cache;
1672 const struct btrace_function *bfun;
1673
1674 cache = (const struct btrace_frame_cache *) *this_cache;
1675 bfun = cache->bfun;
1676 gdb_assert (bfun != NULL);
1677
1678 if (bfun->up == 0)
1679 return UNWIND_UNAVAILABLE;
1680
1681 return UNWIND_NO_REASON;
1682 }
1683
1684 /* Implement this_id method for record_btrace_frame_unwind. */
1685
1686 static void
1687 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1688 struct frame_id *this_id)
1689 {
1690 const struct btrace_frame_cache *cache;
1691 const struct btrace_function *bfun;
1692 struct btrace_call_iterator it;
1693 CORE_ADDR code, special;
1694
1695 cache = (const struct btrace_frame_cache *) *this_cache;
1696
1697 bfun = cache->bfun;
1698 gdb_assert (bfun != NULL);
1699
1700 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1701 bfun = btrace_call_get (&it);
1702
1703 code = get_frame_func (this_frame);
1704 special = bfun->number;
1705
1706 *this_id = frame_id_build_unavailable_stack_special (code, special);
1707
1708 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1709 btrace_get_bfun_name (cache->bfun),
1710 core_addr_to_string_nz (this_id->code_addr),
1711 core_addr_to_string_nz (this_id->special_addr));
1712 }
1713
1714 /* Implement prev_register method for record_btrace_frame_unwind. */
1715
1716 static struct value *
1717 record_btrace_frame_prev_register (struct frame_info *this_frame,
1718 void **this_cache,
1719 int regnum)
1720 {
1721 const struct btrace_frame_cache *cache;
1722 const struct btrace_function *bfun, *caller;
1723 struct btrace_call_iterator it;
1724 struct gdbarch *gdbarch;
1725 CORE_ADDR pc;
1726 int pcreg;
1727
1728 gdbarch = get_frame_arch (this_frame);
1729 pcreg = gdbarch_pc_regnum (gdbarch);
1730 if (pcreg < 0 || regnum != pcreg)
1731 throw_error (NOT_AVAILABLE_ERROR,
1732 _("Registers are not available in btrace record history"));
1733
1734 cache = (const struct btrace_frame_cache *) *this_cache;
1735 bfun = cache->bfun;
1736 gdb_assert (bfun != NULL);
1737
1738 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1739 throw_error (NOT_AVAILABLE_ERROR,
1740 _("No caller in btrace record history"));
1741
1742 caller = btrace_call_get (&it);
1743
1744 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1745 pc = caller->insn.front ().pc;
1746 else
1747 {
1748 pc = caller->insn.back ().pc;
1749 pc += gdb_insn_length (gdbarch, pc);
1750 }
1751
1752 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1753 btrace_get_bfun_name (bfun), bfun->level,
1754 core_addr_to_string_nz (pc));
1755
1756 return frame_unwind_got_address (this_frame, regnum, pc);
1757 }
1758
1759 /* Implement sniffer method for record_btrace_frame_unwind. */
1760
1761 static int
1762 record_btrace_frame_sniffer (const struct frame_unwind *self,
1763 struct frame_info *this_frame,
1764 void **this_cache)
1765 {
1766 const struct btrace_function *bfun;
1767 struct btrace_frame_cache *cache;
1768 struct thread_info *tp;
1769 struct frame_info *next;
1770
1771 /* THIS_FRAME does not contain a reference to its thread. */
1772 tp = inferior_thread ();
1773
1774 bfun = NULL;
1775 next = get_next_frame (this_frame);
1776 if (next == NULL)
1777 {
1778 const struct btrace_insn_iterator *replay;
1779
1780 replay = tp->btrace.replay;
1781 if (replay != NULL)
1782 bfun = &replay->btinfo->functions[replay->call_index];
1783 }
1784 else
1785 {
1786 const struct btrace_function *callee;
1787 struct btrace_call_iterator it;
1788
1789 callee = btrace_get_frame_function (next);
1790 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1791 return 0;
1792
1793 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1794 return 0;
1795
1796 bfun = btrace_call_get (&it);
1797 }
1798
1799 if (bfun == NULL)
1800 return 0;
1801
1802 DEBUG ("[frame] sniffed frame for %s on level %d",
1803 btrace_get_bfun_name (bfun), bfun->level);
1804
1805 /* This is our frame. Initialize the frame cache. */
1806 cache = bfcache_new (this_frame);
1807 cache->tp = tp;
1808 cache->bfun = bfun;
1809
1810 *this_cache = cache;
1811 return 1;
1812 }
1813
1814 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1815
1816 static int
1817 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1818 struct frame_info *this_frame,
1819 void **this_cache)
1820 {
1821 const struct btrace_function *bfun, *callee;
1822 struct btrace_frame_cache *cache;
1823 struct btrace_call_iterator it;
1824 struct frame_info *next;
1825 struct thread_info *tinfo;
1826
1827 next = get_next_frame (this_frame);
1828 if (next == NULL)
1829 return 0;
1830
1831 callee = btrace_get_frame_function (next);
1832 if (callee == NULL)
1833 return 0;
1834
1835 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1836 return 0;
1837
1838 tinfo = inferior_thread ();
1839 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1840 return 0;
1841
1842 bfun = btrace_call_get (&it);
1843
1844 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1845 btrace_get_bfun_name (bfun), bfun->level);
1846
1847 /* This is our frame. Initialize the frame cache. */
1848 cache = bfcache_new (this_frame);
1849 cache->tp = tinfo;
1850 cache->bfun = bfun;
1851
1852 *this_cache = cache;
1853 return 1;
1854 }
1855
1856 static void
1857 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1858 {
1859 struct btrace_frame_cache *cache;
1860 void **slot;
1861
1862 cache = (struct btrace_frame_cache *) this_cache;
1863
1864 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1865 gdb_assert (slot != NULL);
1866
1867 htab_remove_elt (bfcache, cache);
1868 }
1869
1870 /* btrace recording does not store previous memory content, neither the stack
1871 frames content. Any unwinding would return errorneous results as the stack
1872 contents no longer matches the changed PC value restored from history.
1873 Therefore this unwinder reports any possibly unwound registers as
1874 <unavailable>. */
1875
1876 const struct frame_unwind record_btrace_frame_unwind =
1877 {
1878 NORMAL_FRAME,
1879 record_btrace_frame_unwind_stop_reason,
1880 record_btrace_frame_this_id,
1881 record_btrace_frame_prev_register,
1882 NULL,
1883 record_btrace_frame_sniffer,
1884 record_btrace_frame_dealloc_cache
1885 };
1886
1887 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1888 {
1889 TAILCALL_FRAME,
1890 record_btrace_frame_unwind_stop_reason,
1891 record_btrace_frame_this_id,
1892 record_btrace_frame_prev_register,
1893 NULL,
1894 record_btrace_tailcall_frame_sniffer,
1895 record_btrace_frame_dealloc_cache
1896 };
1897
1898 /* Implement the get_unwinder method. */
1899
1900 const struct frame_unwind *
1901 record_btrace_target::get_unwinder ()
1902 {
1903 return &record_btrace_frame_unwind;
1904 }
1905
1906 /* Implement the get_tailcall_unwinder method. */
1907
1908 const struct frame_unwind *
1909 record_btrace_target::get_tailcall_unwinder ()
1910 {
1911 return &record_btrace_tailcall_frame_unwind;
1912 }
1913
1914 /* Return a human-readable string for FLAG. */
1915
1916 static const char *
1917 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1918 {
1919 switch (flag)
1920 {
1921 case BTHR_STEP:
1922 return "step";
1923
1924 case BTHR_RSTEP:
1925 return "reverse-step";
1926
1927 case BTHR_CONT:
1928 return "cont";
1929
1930 case BTHR_RCONT:
1931 return "reverse-cont";
1932
1933 case BTHR_STOP:
1934 return "stop";
1935 }
1936
1937 return "<invalid>";
1938 }
1939
1940 /* Indicate that TP should be resumed according to FLAG. */
1941
1942 static void
1943 record_btrace_resume_thread (struct thread_info *tp,
1944 enum btrace_thread_flag flag)
1945 {
1946 struct btrace_thread_info *btinfo;
1947
1948 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1949 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1950
1951 btinfo = &tp->btrace;
1952
1953 /* Fetch the latest branch trace. */
1954 btrace_fetch (tp, record_btrace_get_cpu ());
1955
1956 /* A resume request overwrites a preceding resume or stop request. */
1957 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1958 btinfo->flags |= flag;
1959 }
1960
1961 /* Get the current frame for TP. */
1962
1963 static struct frame_id
1964 get_thread_current_frame_id (struct thread_info *tp)
1965 {
1966 struct frame_id id;
1967 int executing;
1968
1969 /* Set current thread, which is implicitly used by
1970 get_current_frame. */
1971 scoped_restore_current_thread restore_thread;
1972
1973 switch_to_thread (tp);
1974
1975 /* Clear the executing flag to allow changes to the current frame.
1976 We are not actually running, yet. We just started a reverse execution
1977 command or a record goto command.
1978 For the latter, EXECUTING is false and this has no effect.
1979 For the former, EXECUTING is true and we're in wait, about to
1980 move the thread. Since we need to recompute the stack, we temporarily
1981 set EXECUTING to flase. */
1982 executing = tp->executing;
1983 set_executing (inferior_ptid, false);
1984
1985 id = null_frame_id;
1986 TRY
1987 {
1988 id = get_frame_id (get_current_frame ());
1989 }
1990 CATCH (except, RETURN_MASK_ALL)
1991 {
1992 /* Restore the previous execution state. */
1993 set_executing (inferior_ptid, executing);
1994
1995 throw_exception (except);
1996 }
1997 END_CATCH
1998
1999 /* Restore the previous execution state. */
2000 set_executing (inferior_ptid, executing);
2001
2002 return id;
2003 }
2004
2005 /* Start replaying a thread. */
2006
2007 static struct btrace_insn_iterator *
2008 record_btrace_start_replaying (struct thread_info *tp)
2009 {
2010 struct btrace_insn_iterator *replay;
2011 struct btrace_thread_info *btinfo;
2012
2013 btinfo = &tp->btrace;
2014 replay = NULL;
2015
2016 /* We can't start replaying without trace. */
2017 if (btinfo->functions.empty ())
2018 return NULL;
2019
2020 /* GDB stores the current frame_id when stepping in order to detects steps
2021 into subroutines.
2022 Since frames are computed differently when we're replaying, we need to
2023 recompute those stored frames and fix them up so we can still detect
2024 subroutines after we started replaying. */
2025 TRY
2026 {
2027 struct frame_id frame_id;
2028 int upd_step_frame_id, upd_step_stack_frame_id;
2029
2030 /* The current frame without replaying - computed via normal unwind. */
2031 frame_id = get_thread_current_frame_id (tp);
2032
2033 /* Check if we need to update any stepping-related frame id's. */
2034 upd_step_frame_id = frame_id_eq (frame_id,
2035 tp->control.step_frame_id);
2036 upd_step_stack_frame_id = frame_id_eq (frame_id,
2037 tp->control.step_stack_frame_id);
2038
2039 /* We start replaying at the end of the branch trace. This corresponds
2040 to the current instruction. */
2041 replay = XNEW (struct btrace_insn_iterator);
2042 btrace_insn_end (replay, btinfo);
2043
2044 /* Skip gaps at the end of the trace. */
2045 while (btrace_insn_get (replay) == NULL)
2046 {
2047 unsigned int steps;
2048
2049 steps = btrace_insn_prev (replay, 1);
2050 if (steps == 0)
2051 error (_("No trace."));
2052 }
2053
2054 /* We're not replaying, yet. */
2055 gdb_assert (btinfo->replay == NULL);
2056 btinfo->replay = replay;
2057
2058 /* Make sure we're not using any stale registers. */
2059 registers_changed_thread (tp);
2060
2061 /* The current frame with replaying - computed via btrace unwind. */
2062 frame_id = get_thread_current_frame_id (tp);
2063
2064 /* Replace stepping related frames where necessary. */
2065 if (upd_step_frame_id)
2066 tp->control.step_frame_id = frame_id;
2067 if (upd_step_stack_frame_id)
2068 tp->control.step_stack_frame_id = frame_id;
2069 }
2070 CATCH (except, RETURN_MASK_ALL)
2071 {
2072 xfree (btinfo->replay);
2073 btinfo->replay = NULL;
2074
2075 registers_changed_thread (tp);
2076
2077 throw_exception (except);
2078 }
2079 END_CATCH
2080
2081 return replay;
2082 }
2083
2084 /* Stop replaying a thread. */
2085
2086 static void
2087 record_btrace_stop_replaying (struct thread_info *tp)
2088 {
2089 struct btrace_thread_info *btinfo;
2090
2091 btinfo = &tp->btrace;
2092
2093 xfree (btinfo->replay);
2094 btinfo->replay = NULL;
2095
2096 /* Make sure we're not leaving any stale registers. */
2097 registers_changed_thread (tp);
2098 }
2099
2100 /* Stop replaying TP if it is at the end of its execution history. */
2101
2102 static void
2103 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2104 {
2105 struct btrace_insn_iterator *replay, end;
2106 struct btrace_thread_info *btinfo;
2107
2108 btinfo = &tp->btrace;
2109 replay = btinfo->replay;
2110
2111 if (replay == NULL)
2112 return;
2113
2114 btrace_insn_end (&end, btinfo);
2115
2116 if (btrace_insn_cmp (replay, &end) == 0)
2117 record_btrace_stop_replaying (tp);
2118 }
2119
2120 /* The resume method of target record-btrace. */
2121
2122 void
2123 record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
2124 {
2125 enum btrace_thread_flag flag, cflag;
2126
2127 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2128 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
2129 step ? "step" : "cont");
2130
2131 /* Store the execution direction of the last resume.
2132
2133 If there is more than one resume call, we have to rely on infrun
2134 to not change the execution direction in-between. */
2135 record_btrace_resume_exec_dir = ::execution_direction;
2136
2137 /* As long as we're not replaying, just forward the request.
2138
2139 For non-stop targets this means that no thread is replaying. In order to
2140 make progress, we may need to explicitly move replaying threads to the end
2141 of their execution history. */
2142 if ((::execution_direction != EXEC_REVERSE)
2143 && !record_is_replaying (minus_one_ptid))
2144 {
2145 this->beneath ()->resume (ptid, step, signal);
2146 return;
2147 }
2148
2149 /* Compute the btrace thread flag for the requested move. */
2150 if (::execution_direction == EXEC_REVERSE)
2151 {
2152 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2153 cflag = BTHR_RCONT;
2154 }
2155 else
2156 {
2157 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2158 cflag = BTHR_CONT;
2159 }
2160
2161 /* We just indicate the resume intent here. The actual stepping happens in
2162 record_btrace_wait below.
2163
2164 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2165 if (!target_is_non_stop_p ())
2166 {
2167 gdb_assert (inferior_ptid.matches (ptid));
2168
2169 for (thread_info *tp : all_non_exited_threads (ptid))
2170 {
2171 if (tp->ptid.matches (inferior_ptid))
2172 record_btrace_resume_thread (tp, flag);
2173 else
2174 record_btrace_resume_thread (tp, cflag);
2175 }
2176 }
2177 else
2178 {
2179 for (thread_info *tp : all_non_exited_threads (ptid))
2180 record_btrace_resume_thread (tp, flag);
2181 }
2182
2183 /* Async support. */
2184 if (target_can_async_p ())
2185 {
2186 target_async (1);
2187 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2188 }
2189 }
2190
2191 /* The commit_resume method of target record-btrace. */
2192
2193 void
2194 record_btrace_target::commit_resume ()
2195 {
2196 if ((::execution_direction != EXEC_REVERSE)
2197 && !record_is_replaying (minus_one_ptid))
2198 beneath ()->commit_resume ();
2199 }
2200
2201 /* Cancel resuming TP. */
2202
2203 static void
2204 record_btrace_cancel_resume (struct thread_info *tp)
2205 {
2206 enum btrace_thread_flag flags;
2207
2208 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2209 if (flags == 0)
2210 return;
2211
2212 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2213 print_thread_id (tp),
2214 target_pid_to_str (tp->ptid), flags,
2215 btrace_thread_flag_to_str (flags));
2216
2217 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2218 record_btrace_stop_replaying_at_end (tp);
2219 }
2220
2221 /* Return a target_waitstatus indicating that we ran out of history. */
2222
2223 static struct target_waitstatus
2224 btrace_step_no_history (void)
2225 {
2226 struct target_waitstatus status;
2227
2228 status.kind = TARGET_WAITKIND_NO_HISTORY;
2229
2230 return status;
2231 }
2232
2233 /* Return a target_waitstatus indicating that a step finished. */
2234
2235 static struct target_waitstatus
2236 btrace_step_stopped (void)
2237 {
2238 struct target_waitstatus status;
2239
2240 status.kind = TARGET_WAITKIND_STOPPED;
2241 status.value.sig = GDB_SIGNAL_TRAP;
2242
2243 return status;
2244 }
2245
2246 /* Return a target_waitstatus indicating that a thread was stopped as
2247 requested. */
2248
2249 static struct target_waitstatus
2250 btrace_step_stopped_on_request (void)
2251 {
2252 struct target_waitstatus status;
2253
2254 status.kind = TARGET_WAITKIND_STOPPED;
2255 status.value.sig = GDB_SIGNAL_0;
2256
2257 return status;
2258 }
2259
2260 /* Return a target_waitstatus indicating a spurious stop. */
2261
2262 static struct target_waitstatus
2263 btrace_step_spurious (void)
2264 {
2265 struct target_waitstatus status;
2266
2267 status.kind = TARGET_WAITKIND_SPURIOUS;
2268
2269 return status;
2270 }
2271
2272 /* Return a target_waitstatus indicating that the thread was not resumed. */
2273
2274 static struct target_waitstatus
2275 btrace_step_no_resumed (void)
2276 {
2277 struct target_waitstatus status;
2278
2279 status.kind = TARGET_WAITKIND_NO_RESUMED;
2280
2281 return status;
2282 }
2283
2284 /* Return a target_waitstatus indicating that we should wait again. */
2285
2286 static struct target_waitstatus
2287 btrace_step_again (void)
2288 {
2289 struct target_waitstatus status;
2290
2291 status.kind = TARGET_WAITKIND_IGNORE;
2292
2293 return status;
2294 }
2295
2296 /* Clear the record histories. */
2297
2298 static void
2299 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2300 {
2301 xfree (btinfo->insn_history);
2302 xfree (btinfo->call_history);
2303
2304 btinfo->insn_history = NULL;
2305 btinfo->call_history = NULL;
2306 }
2307
2308 /* Check whether TP's current replay position is at a breakpoint. */
2309
2310 static int
2311 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2312 {
2313 struct btrace_insn_iterator *replay;
2314 struct btrace_thread_info *btinfo;
2315 const struct btrace_insn *insn;
2316
2317 btinfo = &tp->btrace;
2318 replay = btinfo->replay;
2319
2320 if (replay == NULL)
2321 return 0;
2322
2323 insn = btrace_insn_get (replay);
2324 if (insn == NULL)
2325 return 0;
2326
2327 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
2328 &btinfo->stop_reason);
2329 }
2330
2331 /* Step one instruction in forward direction. */
2332
2333 static struct target_waitstatus
2334 record_btrace_single_step_forward (struct thread_info *tp)
2335 {
2336 struct btrace_insn_iterator *replay, end, start;
2337 struct btrace_thread_info *btinfo;
2338
2339 btinfo = &tp->btrace;
2340 replay = btinfo->replay;
2341
2342 /* We're done if we're not replaying. */
2343 if (replay == NULL)
2344 return btrace_step_no_history ();
2345
2346 /* Check if we're stepping a breakpoint. */
2347 if (record_btrace_replay_at_breakpoint (tp))
2348 return btrace_step_stopped ();
2349
2350 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2351 jump back to the instruction at which we started. */
2352 start = *replay;
2353 do
2354 {
2355 unsigned int steps;
2356
2357 /* We will bail out here if we continue stepping after reaching the end
2358 of the execution history. */
2359 steps = btrace_insn_next (replay, 1);
2360 if (steps == 0)
2361 {
2362 *replay = start;
2363 return btrace_step_no_history ();
2364 }
2365 }
2366 while (btrace_insn_get (replay) == NULL);
2367
2368 /* Determine the end of the instruction trace. */
2369 btrace_insn_end (&end, btinfo);
2370
2371 /* The execution trace contains (and ends with) the current instruction.
2372 This instruction has not been executed, yet, so the trace really ends
2373 one instruction earlier. */
2374 if (btrace_insn_cmp (replay, &end) == 0)
2375 return btrace_step_no_history ();
2376
2377 return btrace_step_spurious ();
2378 }
2379
2380 /* Step one instruction in backward direction. */
2381
2382 static struct target_waitstatus
2383 record_btrace_single_step_backward (struct thread_info *tp)
2384 {
2385 struct btrace_insn_iterator *replay, start;
2386 struct btrace_thread_info *btinfo;
2387
2388 btinfo = &tp->btrace;
2389 replay = btinfo->replay;
2390
2391 /* Start replaying if we're not already doing so. */
2392 if (replay == NULL)
2393 replay = record_btrace_start_replaying (tp);
2394
2395 /* If we can't step any further, we reached the end of the history.
2396 Skip gaps during replay. If we end up at a gap (at the beginning of
2397 the trace), jump back to the instruction at which we started. */
2398 start = *replay;
2399 do
2400 {
2401 unsigned int steps;
2402
2403 steps = btrace_insn_prev (replay, 1);
2404 if (steps == 0)
2405 {
2406 *replay = start;
2407 return btrace_step_no_history ();
2408 }
2409 }
2410 while (btrace_insn_get (replay) == NULL);
2411
2412 /* Check if we're stepping a breakpoint.
2413
2414 For reverse-stepping, this check is after the step. There is logic in
2415 infrun.c that handles reverse-stepping separately. See, for example,
2416 proceed and adjust_pc_after_break.
2417
2418 This code assumes that for reverse-stepping, PC points to the last
2419 de-executed instruction, whereas for forward-stepping PC points to the
2420 next to-be-executed instruction. */
2421 if (record_btrace_replay_at_breakpoint (tp))
2422 return btrace_step_stopped ();
2423
2424 return btrace_step_spurious ();
2425 }
2426
2427 /* Step a single thread. */
2428
2429 static struct target_waitstatus
2430 record_btrace_step_thread (struct thread_info *tp)
2431 {
2432 struct btrace_thread_info *btinfo;
2433 struct target_waitstatus status;
2434 enum btrace_thread_flag flags;
2435
2436 btinfo = &tp->btrace;
2437
2438 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2439 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2440
2441 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2442 target_pid_to_str (tp->ptid), flags,
2443 btrace_thread_flag_to_str (flags));
2444
2445 /* We can't step without an execution history. */
2446 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2447 return btrace_step_no_history ();
2448
2449 switch (flags)
2450 {
2451 default:
2452 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2453
2454 case BTHR_STOP:
2455 return btrace_step_stopped_on_request ();
2456
2457 case BTHR_STEP:
2458 status = record_btrace_single_step_forward (tp);
2459 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2460 break;
2461
2462 return btrace_step_stopped ();
2463
2464 case BTHR_RSTEP:
2465 status = record_btrace_single_step_backward (tp);
2466 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2467 break;
2468
2469 return btrace_step_stopped ();
2470
2471 case BTHR_CONT:
2472 status = record_btrace_single_step_forward (tp);
2473 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2474 break;
2475
2476 btinfo->flags |= flags;
2477 return btrace_step_again ();
2478
2479 case BTHR_RCONT:
2480 status = record_btrace_single_step_backward (tp);
2481 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2482 break;
2483
2484 btinfo->flags |= flags;
2485 return btrace_step_again ();
2486 }
2487
2488 /* We keep threads moving at the end of their execution history. The wait
2489 method will stop the thread for whom the event is reported. */
2490 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2491 btinfo->flags |= flags;
2492
2493 return status;
2494 }
2495
2496 /* Announce further events if necessary. */
2497
2498 static void
2499 record_btrace_maybe_mark_async_event
2500 (const std::vector<thread_info *> &moving,
2501 const std::vector<thread_info *> &no_history)
2502 {
2503 bool more_moving = !moving.empty ();
2504 bool more_no_history = !no_history.empty ();;
2505
2506 if (!more_moving && !more_no_history)
2507 return;
2508
2509 if (more_moving)
2510 DEBUG ("movers pending");
2511
2512 if (more_no_history)
2513 DEBUG ("no-history pending");
2514
2515 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2516 }
2517
2518 /* The wait method of target record-btrace. */
2519
2520 ptid_t
2521 record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2522 int options)
2523 {
2524 std::vector<thread_info *> moving;
2525 std::vector<thread_info *> no_history;
2526
2527 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2528
2529 /* As long as we're not replaying, just forward the request. */
2530 if ((::execution_direction != EXEC_REVERSE)
2531 && !record_is_replaying (minus_one_ptid))
2532 {
2533 return this->beneath ()->wait (ptid, status, options);
2534 }
2535
2536 /* Keep a work list of moving threads. */
2537 for (thread_info *tp : all_non_exited_threads (ptid))
2538 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2539 moving.push_back (tp);
2540
2541 if (moving.empty ())
2542 {
2543 *status = btrace_step_no_resumed ();
2544
2545 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2546 target_waitstatus_to_string (status).c_str ());
2547
2548 return null_ptid;
2549 }
2550
2551 /* Step moving threads one by one, one step each, until either one thread
2552 reports an event or we run out of threads to step.
2553
2554 When stepping more than one thread, chances are that some threads reach
2555 the end of their execution history earlier than others. If we reported
2556 this immediately, all-stop on top of non-stop would stop all threads and
2557 resume the same threads next time. And we would report the same thread
2558 having reached the end of its execution history again.
2559
2560 In the worst case, this would starve the other threads. But even if other
2561 threads would be allowed to make progress, this would result in far too
2562 many intermediate stops.
2563
2564 We therefore delay the reporting of "no execution history" until we have
2565 nothing else to report. By this time, all threads should have moved to
2566 either the beginning or the end of their execution history. There will
2567 be a single user-visible stop. */
2568 struct thread_info *eventing = NULL;
2569 while ((eventing == NULL) && !moving.empty ())
2570 {
2571 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
2572 {
2573 thread_info *tp = moving[ix];
2574
2575 *status = record_btrace_step_thread (tp);
2576
2577 switch (status->kind)
2578 {
2579 case TARGET_WAITKIND_IGNORE:
2580 ix++;
2581 break;
2582
2583 case TARGET_WAITKIND_NO_HISTORY:
2584 no_history.push_back (ordered_remove (moving, ix));
2585 break;
2586
2587 default:
2588 eventing = unordered_remove (moving, ix);
2589 break;
2590 }
2591 }
2592 }
2593
2594 if (eventing == NULL)
2595 {
2596 /* We started with at least one moving thread. This thread must have
2597 either stopped or reached the end of its execution history.
2598
2599 In the former case, EVENTING must not be NULL.
2600 In the latter case, NO_HISTORY must not be empty. */
2601 gdb_assert (!no_history.empty ());
2602
2603 /* We kept threads moving at the end of their execution history. Stop
2604 EVENTING now that we are going to report its stop. */
2605 eventing = unordered_remove (no_history, 0);
2606 eventing->btrace.flags &= ~BTHR_MOVE;
2607
2608 *status = btrace_step_no_history ();
2609 }
2610
2611 gdb_assert (eventing != NULL);
2612
2613 /* We kept threads replaying at the end of their execution history. Stop
2614 replaying EVENTING now that we are going to report its stop. */
2615 record_btrace_stop_replaying_at_end (eventing);
2616
2617 /* Stop all other threads. */
2618 if (!target_is_non_stop_p ())
2619 {
2620 for (thread_info *tp : all_non_exited_threads ())
2621 record_btrace_cancel_resume (tp);
2622 }
2623
2624 /* In async mode, we need to announce further events. */
2625 if (target_is_async_p ())
2626 record_btrace_maybe_mark_async_event (moving, no_history);
2627
2628 /* Start record histories anew from the current position. */
2629 record_btrace_clear_histories (&eventing->btrace);
2630
2631 /* We moved the replay position but did not update registers. */
2632 registers_changed_thread (eventing);
2633
2634 DEBUG ("wait ended by thread %s (%s): %s",
2635 print_thread_id (eventing),
2636 target_pid_to_str (eventing->ptid),
2637 target_waitstatus_to_string (status).c_str ());
2638
2639 return eventing->ptid;
2640 }
2641
2642 /* The stop method of target record-btrace. */
2643
2644 void
2645 record_btrace_target::stop (ptid_t ptid)
2646 {
2647 DEBUG ("stop %s", target_pid_to_str (ptid));
2648
2649 /* As long as we're not replaying, just forward the request. */
2650 if ((::execution_direction != EXEC_REVERSE)
2651 && !record_is_replaying (minus_one_ptid))
2652 {
2653 this->beneath ()->stop (ptid);
2654 }
2655 else
2656 {
2657 for (thread_info *tp : all_non_exited_threads (ptid))
2658 {
2659 tp->btrace.flags &= ~BTHR_MOVE;
2660 tp->btrace.flags |= BTHR_STOP;
2661 }
2662 }
2663 }
2664
2665 /* The can_execute_reverse method of target record-btrace. */
2666
2667 bool
2668 record_btrace_target::can_execute_reverse ()
2669 {
2670 return true;
2671 }
2672
2673 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2674
2675 bool
2676 record_btrace_target::stopped_by_sw_breakpoint ()
2677 {
2678 if (record_is_replaying (minus_one_ptid))
2679 {
2680 struct thread_info *tp = inferior_thread ();
2681
2682 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2683 }
2684
2685 return this->beneath ()->stopped_by_sw_breakpoint ();
2686 }
2687
2688 /* The supports_stopped_by_sw_breakpoint method of target
2689 record-btrace. */
2690
2691 bool
2692 record_btrace_target::supports_stopped_by_sw_breakpoint ()
2693 {
2694 if (record_is_replaying (minus_one_ptid))
2695 return true;
2696
2697 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
2698 }
2699
2700 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2701
2702 bool
2703 record_btrace_target::stopped_by_hw_breakpoint ()
2704 {
2705 if (record_is_replaying (minus_one_ptid))
2706 {
2707 struct thread_info *tp = inferior_thread ();
2708
2709 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2710 }
2711
2712 return this->beneath ()->stopped_by_hw_breakpoint ();
2713 }
2714
2715 /* The supports_stopped_by_hw_breakpoint method of target
2716 record-btrace. */
2717
2718 bool
2719 record_btrace_target::supports_stopped_by_hw_breakpoint ()
2720 {
2721 if (record_is_replaying (minus_one_ptid))
2722 return true;
2723
2724 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
2725 }
2726
2727 /* The update_thread_list method of target record-btrace. */
2728
2729 void
2730 record_btrace_target::update_thread_list ()
2731 {
2732 /* We don't add or remove threads during replay. */
2733 if (record_is_replaying (minus_one_ptid))
2734 return;
2735
2736 /* Forward the request. */
2737 this->beneath ()->update_thread_list ();
2738 }
2739
2740 /* The thread_alive method of target record-btrace. */
2741
2742 bool
2743 record_btrace_target::thread_alive (ptid_t ptid)
2744 {
2745 /* We don't add or remove threads during replay. */
2746 if (record_is_replaying (minus_one_ptid))
2747 return true;
2748
2749 /* Forward the request. */
2750 return this->beneath ()->thread_alive (ptid);
2751 }
2752
2753 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2754 is stopped. */
2755
2756 static void
2757 record_btrace_set_replay (struct thread_info *tp,
2758 const struct btrace_insn_iterator *it)
2759 {
2760 struct btrace_thread_info *btinfo;
2761
2762 btinfo = &tp->btrace;
2763
2764 if (it == NULL)
2765 record_btrace_stop_replaying (tp);
2766 else
2767 {
2768 if (btinfo->replay == NULL)
2769 record_btrace_start_replaying (tp);
2770 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2771 return;
2772
2773 *btinfo->replay = *it;
2774 registers_changed_thread (tp);
2775 }
2776
2777 /* Start anew from the new replay position. */
2778 record_btrace_clear_histories (btinfo);
2779
2780 inferior_thread ()->suspend.stop_pc
2781 = regcache_read_pc (get_current_regcache ());
2782 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2783 }
2784
2785 /* The goto_record_begin method of target record-btrace. */
2786
2787 void
2788 record_btrace_target::goto_record_begin ()
2789 {
2790 struct thread_info *tp;
2791 struct btrace_insn_iterator begin;
2792
2793 tp = require_btrace_thread ();
2794
2795 btrace_insn_begin (&begin, &tp->btrace);
2796
2797 /* Skip gaps at the beginning of the trace. */
2798 while (btrace_insn_get (&begin) == NULL)
2799 {
2800 unsigned int steps;
2801
2802 steps = btrace_insn_next (&begin, 1);
2803 if (steps == 0)
2804 error (_("No trace."));
2805 }
2806
2807 record_btrace_set_replay (tp, &begin);
2808 }
2809
2810 /* The goto_record_end method of target record-btrace. */
2811
2812 void
2813 record_btrace_target::goto_record_end ()
2814 {
2815 struct thread_info *tp;
2816
2817 tp = require_btrace_thread ();
2818
2819 record_btrace_set_replay (tp, NULL);
2820 }
2821
2822 /* The goto_record method of target record-btrace. */
2823
2824 void
2825 record_btrace_target::goto_record (ULONGEST insn)
2826 {
2827 struct thread_info *tp;
2828 struct btrace_insn_iterator it;
2829 unsigned int number;
2830 int found;
2831
2832 number = insn;
2833
2834 /* Check for wrap-arounds. */
2835 if (number != insn)
2836 error (_("Instruction number out of range."));
2837
2838 tp = require_btrace_thread ();
2839
2840 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2841
2842 /* Check if the instruction could not be found or is a gap. */
2843 if (found == 0 || btrace_insn_get (&it) == NULL)
2844 error (_("No such instruction."));
2845
2846 record_btrace_set_replay (tp, &it);
2847 }
2848
2849 /* The record_stop_replaying method of target record-btrace. */
2850
2851 void
2852 record_btrace_target::record_stop_replaying ()
2853 {
2854 for (thread_info *tp : all_non_exited_threads ())
2855 record_btrace_stop_replaying (tp);
2856 }
2857
2858 /* The execution_direction target method. */
2859
2860 enum exec_direction_kind
2861 record_btrace_target::execution_direction ()
2862 {
2863 return record_btrace_resume_exec_dir;
2864 }
2865
2866 /* The prepare_to_generate_core target method. */
2867
2868 void
2869 record_btrace_target::prepare_to_generate_core ()
2870 {
2871 record_btrace_generating_corefile = 1;
2872 }
2873
2874 /* The done_generating_core target method. */
2875
2876 void
2877 record_btrace_target::done_generating_core ()
2878 {
2879 record_btrace_generating_corefile = 0;
2880 }
2881
2882 /* Start recording in BTS format. */
2883
2884 static void
2885 cmd_record_btrace_bts_start (const char *args, int from_tty)
2886 {
2887 if (args != NULL && *args != 0)
2888 error (_("Invalid argument."));
2889
2890 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2891
2892 TRY
2893 {
2894 execute_command ("target record-btrace", from_tty);
2895 }
2896 CATCH (exception, RETURN_MASK_ALL)
2897 {
2898 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2899 throw_exception (exception);
2900 }
2901 END_CATCH
2902 }
2903
2904 /* Start recording in Intel Processor Trace format. */
2905
2906 static void
2907 cmd_record_btrace_pt_start (const char *args, int from_tty)
2908 {
2909 if (args != NULL && *args != 0)
2910 error (_("Invalid argument."));
2911
2912 record_btrace_conf.format = BTRACE_FORMAT_PT;
2913
2914 TRY
2915 {
2916 execute_command ("target record-btrace", from_tty);
2917 }
2918 CATCH (exception, RETURN_MASK_ALL)
2919 {
2920 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2921 throw_exception (exception);
2922 }
2923 END_CATCH
2924 }
2925
2926 /* Alias for "target record". */
2927
2928 static void
2929 cmd_record_btrace_start (const char *args, int from_tty)
2930 {
2931 if (args != NULL && *args != 0)
2932 error (_("Invalid argument."));
2933
2934 record_btrace_conf.format = BTRACE_FORMAT_PT;
2935
2936 TRY
2937 {
2938 execute_command ("target record-btrace", from_tty);
2939 }
2940 CATCH (exception, RETURN_MASK_ALL)
2941 {
2942 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2943
2944 TRY
2945 {
2946 execute_command ("target record-btrace", from_tty);
2947 }
2948 CATCH (ex, RETURN_MASK_ALL)
2949 {
2950 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2951 throw_exception (ex);
2952 }
2953 END_CATCH
2954 }
2955 END_CATCH
2956 }
2957
2958 /* The "set record btrace" command. */
2959
2960 static void
2961 cmd_set_record_btrace (const char *args, int from_tty)
2962 {
2963 printf_unfiltered (_("\"set record btrace\" must be followed "
2964 "by an appropriate subcommand.\n"));
2965 help_list (set_record_btrace_cmdlist, "set record btrace ",
2966 all_commands, gdb_stdout);
2967 }
2968
2969 /* The "show record btrace" command. */
2970
2971 static void
2972 cmd_show_record_btrace (const char *args, int from_tty)
2973 {
2974 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2975 }
2976
2977 /* The "show record btrace replay-memory-access" command. */
2978
2979 static void
2980 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2981 struct cmd_list_element *c, const char *value)
2982 {
2983 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2984 replay_memory_access);
2985 }
2986
2987 /* The "set record btrace cpu none" command. */
2988
2989 static void
2990 cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2991 {
2992 if (args != nullptr && *args != 0)
2993 error (_("Trailing junk: '%s'."), args);
2994
2995 record_btrace_cpu_state = CS_NONE;
2996 }
2997
2998 /* The "set record btrace cpu auto" command. */
2999
3000 static void
3001 cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
3002 {
3003 if (args != nullptr && *args != 0)
3004 error (_("Trailing junk: '%s'."), args);
3005
3006 record_btrace_cpu_state = CS_AUTO;
3007 }
3008
3009 /* The "set record btrace cpu" command. */
3010
3011 static void
3012 cmd_set_record_btrace_cpu (const char *args, int from_tty)
3013 {
3014 if (args == nullptr)
3015 args = "";
3016
3017 /* We use a hard-coded vendor string for now. */
3018 unsigned int family, model, stepping;
3019 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3020 &model, &l1, &stepping, &l2);
3021 if (matches == 3)
3022 {
3023 if (strlen (args) != l2)
3024 error (_("Trailing junk: '%s'."), args + l2);
3025 }
3026 else if (matches == 2)
3027 {
3028 if (strlen (args) != l1)
3029 error (_("Trailing junk: '%s'."), args + l1);
3030
3031 stepping = 0;
3032 }
3033 else
3034 error (_("Bad format. See \"help set record btrace cpu\"."));
3035
3036 if (USHRT_MAX < family)
3037 error (_("Cpu family too big."));
3038
3039 if (UCHAR_MAX < model)
3040 error (_("Cpu model too big."));
3041
3042 if (UCHAR_MAX < stepping)
3043 error (_("Cpu stepping too big."));
3044
3045 record_btrace_cpu.vendor = CV_INTEL;
3046 record_btrace_cpu.family = family;
3047 record_btrace_cpu.model = model;
3048 record_btrace_cpu.stepping = stepping;
3049
3050 record_btrace_cpu_state = CS_CPU;
3051 }
3052
3053 /* The "show record btrace cpu" command. */
3054
3055 static void
3056 cmd_show_record_btrace_cpu (const char *args, int from_tty)
3057 {
3058 if (args != nullptr && *args != 0)
3059 error (_("Trailing junk: '%s'."), args);
3060
3061 switch (record_btrace_cpu_state)
3062 {
3063 case CS_AUTO:
3064 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3065 return;
3066
3067 case CS_NONE:
3068 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3069 return;
3070
3071 case CS_CPU:
3072 switch (record_btrace_cpu.vendor)
3073 {
3074 case CV_INTEL:
3075 if (record_btrace_cpu.stepping == 0)
3076 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3077 record_btrace_cpu.family,
3078 record_btrace_cpu.model);
3079 else
3080 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3081 record_btrace_cpu.family,
3082 record_btrace_cpu.model,
3083 record_btrace_cpu.stepping);
3084 return;
3085 }
3086 }
3087
3088 error (_("Internal error: bad cpu state."));
3089 }
3090
3091 /* The "s record btrace bts" command. */
3092
3093 static void
3094 cmd_set_record_btrace_bts (const char *args, int from_tty)
3095 {
3096 printf_unfiltered (_("\"set record btrace bts\" must be followed "
3097 "by an appropriate subcommand.\n"));
3098 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3099 all_commands, gdb_stdout);
3100 }
3101
3102 /* The "show record btrace bts" command. */
3103
3104 static void
3105 cmd_show_record_btrace_bts (const char *args, int from_tty)
3106 {
3107 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3108 }
3109
3110 /* The "set record btrace pt" command. */
3111
3112 static void
3113 cmd_set_record_btrace_pt (const char *args, int from_tty)
3114 {
3115 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3116 "by an appropriate subcommand.\n"));
3117 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3118 all_commands, gdb_stdout);
3119 }
3120
3121 /* The "show record btrace pt" command. */
3122
3123 static void
3124 cmd_show_record_btrace_pt (const char *args, int from_tty)
3125 {
3126 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3127 }
3128
3129 /* The "record bts buffer-size" show value function. */
3130
3131 static void
3132 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3133 struct cmd_list_element *c,
3134 const char *value)
3135 {
3136 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3137 value);
3138 }
3139
3140 /* The "record pt buffer-size" show value function. */
3141
3142 static void
3143 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3144 struct cmd_list_element *c,
3145 const char *value)
3146 {
3147 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3148 value);
3149 }
3150
3151 /* Initialize btrace commands. */
3152
3153 void
3154 _initialize_record_btrace (void)
3155 {
3156 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3157 _("Start branch trace recording."), &record_btrace_cmdlist,
3158 "record btrace ", 0, &record_cmdlist);
3159 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3160
3161 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3162 _("\
3163 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3164 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3165 This format may not be available on all processors."),
3166 &record_btrace_cmdlist);
3167 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3168
3169 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3170 _("\
3171 Start branch trace recording in Intel Processor Trace format.\n\n\
3172 This format may not be available on all processors."),
3173 &record_btrace_cmdlist);
3174 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3175
3176 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3177 _("Set record options"), &set_record_btrace_cmdlist,
3178 "set record btrace ", 0, &set_record_cmdlist);
3179
3180 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3181 _("Show record options"), &show_record_btrace_cmdlist,
3182 "show record btrace ", 0, &show_record_cmdlist);
3183
3184 add_setshow_enum_cmd ("replay-memory-access", no_class,
3185 replay_memory_access_types, &replay_memory_access, _("\
3186 Set what memory accesses are allowed during replay."), _("\
3187 Show what memory accesses are allowed during replay."),
3188 _("Default is READ-ONLY.\n\n\
3189 The btrace record target does not trace data.\n\
3190 The memory therefore corresponds to the live target and not \
3191 to the current replay position.\n\n\
3192 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3193 When READ-WRITE, allow accesses to read-only and read-write memory during \
3194 replay."),
3195 NULL, cmd_show_replay_memory_access,
3196 &set_record_btrace_cmdlist,
3197 &show_record_btrace_cmdlist);
3198
3199 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3200 _("\
3201 Set the cpu to be used for trace decode.\n\n\
3202 The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3203 For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
3204 When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3205 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3206 When GDB does not support that cpu, this option can be used to enable\n\
3207 workarounds for a similar cpu that GDB supports.\n\n\
3208 When set to \"none\", errata workarounds are disabled."),
3209 &set_record_btrace_cpu_cmdlist,
3210 _("set record btrace cpu "), 1,
3211 &set_record_btrace_cmdlist);
3212
3213 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3214 Automatically determine the cpu to be used for trace decode."),
3215 &set_record_btrace_cpu_cmdlist);
3216
3217 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3218 Do not enable errata workarounds for trace decode."),
3219 &set_record_btrace_cpu_cmdlist);
3220
3221 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3222 Show the cpu to be used for trace decode."),
3223 &show_record_btrace_cmdlist);
3224
3225 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3226 _("Set record btrace bts options"),
3227 &set_record_btrace_bts_cmdlist,
3228 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3229
3230 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3231 _("Show record btrace bts options"),
3232 &show_record_btrace_bts_cmdlist,
3233 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3234
3235 add_setshow_uinteger_cmd ("buffer-size", no_class,
3236 &record_btrace_conf.bts.size,
3237 _("Set the record/replay bts buffer size."),
3238 _("Show the record/replay bts buffer size."), _("\
3239 When starting recording request a trace buffer of this size. \
3240 The actual buffer size may differ from the requested size. \
3241 Use \"info record\" to see the actual buffer size.\n\n\
3242 Bigger buffers allow longer recording but also take more time to process \
3243 the recorded execution trace.\n\n\
3244 The trace buffer size may not be changed while recording."), NULL,
3245 show_record_bts_buffer_size_value,
3246 &set_record_btrace_bts_cmdlist,
3247 &show_record_btrace_bts_cmdlist);
3248
3249 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3250 _("Set record btrace pt options"),
3251 &set_record_btrace_pt_cmdlist,
3252 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3253
3254 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3255 _("Show record btrace pt options"),
3256 &show_record_btrace_pt_cmdlist,
3257 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3258
3259 add_setshow_uinteger_cmd ("buffer-size", no_class,
3260 &record_btrace_conf.pt.size,
3261 _("Set the record/replay pt buffer size."),
3262 _("Show the record/replay pt buffer size."), _("\
3263 Bigger buffers allow longer recording but also take more time to process \
3264 the recorded execution.\n\
3265 The actual buffer size may differ from the requested size. Use \"info record\" \
3266 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3267 &set_record_btrace_pt_cmdlist,
3268 &show_record_btrace_pt_cmdlist);
3269
3270 add_target (record_btrace_target_info, record_btrace_target_open);
3271
3272 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3273 xcalloc, xfree);
3274
3275 record_btrace_conf.bts.size = 64 * 1024;
3276 record_btrace_conf.pt.size = 16 * 1024;
3277 }
This page took 0.11448 seconds and 4 git commands to generate.