gdb: add target_ops::supports_displaced_step
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2020 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "gdbsupport/event-loop.h"
40 #include "inf-loop.h"
41 #include "inferior.h"
42 #include <algorithm>
43 #include "gdbarch.h"
44 #include "cli/cli-style.h"
45 #include "async-event.h"
46
47 static const target_info record_btrace_target_info = {
48 "record-btrace",
49 N_("Branch tracing target"),
50 N_("Collect control-flow trace and provide the execution history.")
51 };
52
53 /* The target_ops of record-btrace. */
54
55 class record_btrace_target final : public target_ops
56 {
57 public:
58 const target_info &info () const override
59 { return record_btrace_target_info; }
60
61 strata stratum () const override { return record_stratum; }
62
63 void close () override;
64 void async (int) override;
65
66 void detach (inferior *inf, int from_tty) override
67 { record_detach (this, inf, from_tty); }
68
69 void disconnect (const char *, int) override;
70
71 void mourn_inferior () override
72 { record_mourn_inferior (this); }
73
74 void kill () override
75 { record_kill (this); }
76
77 enum record_method record_method (ptid_t ptid) override;
78
79 void stop_recording () override;
80 void info_record () override;
81
82 void insn_history (int size, gdb_disassembly_flags flags) override;
83 void insn_history_from (ULONGEST from, int size,
84 gdb_disassembly_flags flags) override;
85 void insn_history_range (ULONGEST begin, ULONGEST end,
86 gdb_disassembly_flags flags) override;
87 void call_history (int size, record_print_flags flags) override;
88 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
89 override;
90 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
91 override;
92
93 bool record_is_replaying (ptid_t ptid) override;
94 bool record_will_replay (ptid_t ptid, int dir) override;
95 void record_stop_replaying () override;
96
97 enum target_xfer_status xfer_partial (enum target_object object,
98 const char *annex,
99 gdb_byte *readbuf,
100 const gdb_byte *writebuf,
101 ULONGEST offset, ULONGEST len,
102 ULONGEST *xfered_len) override;
103
104 int insert_breakpoint (struct gdbarch *,
105 struct bp_target_info *) override;
106 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
107 enum remove_bp_reason) override;
108
109 void fetch_registers (struct regcache *, int) override;
110
111 void store_registers (struct regcache *, int) override;
112 void prepare_to_store (struct regcache *) override;
113
114 const struct frame_unwind *get_unwinder () override;
115
116 const struct frame_unwind *get_tailcall_unwinder () override;
117
118 void commit_resume () override;
119 void resume (ptid_t, int, enum gdb_signal) override;
120 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
121
122 void stop (ptid_t) override;
123 void update_thread_list () override;
124 bool thread_alive (ptid_t ptid) override;
125 void goto_record_begin () override;
126 void goto_record_end () override;
127 void goto_record (ULONGEST insn) override;
128
129 bool can_execute_reverse () override;
130
131 bool stopped_by_sw_breakpoint () override;
132 bool supports_stopped_by_sw_breakpoint () override;
133
134 bool stopped_by_hw_breakpoint () override;
135 bool supports_stopped_by_hw_breakpoint () override;
136
137 enum exec_direction_kind execution_direction () override;
138 void prepare_to_generate_core () override;
139 void done_generating_core () override;
140 };
141
142 static record_btrace_target record_btrace_ops;
143
144 /* Initialize the record-btrace target ops. */
145
146 /* Token associated with a new-thread observer enabling branch tracing
147 for the new thread. */
148 static const gdb::observers::token record_btrace_thread_observer_token {};
149
150 /* Memory access types used in set/show record btrace replay-memory-access. */
151 static const char replay_memory_access_read_only[] = "read-only";
152 static const char replay_memory_access_read_write[] = "read-write";
153 static const char *const replay_memory_access_types[] =
154 {
155 replay_memory_access_read_only,
156 replay_memory_access_read_write,
157 NULL
158 };
159
160 /* The currently allowed replay memory access type. */
161 static const char *replay_memory_access = replay_memory_access_read_only;
162
163 /* The cpu state kinds. */
164 enum record_btrace_cpu_state_kind
165 {
166 CS_AUTO,
167 CS_NONE,
168 CS_CPU
169 };
170
171 /* The current cpu state. */
172 static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
173
174 /* The current cpu for trace decode. */
175 static struct btrace_cpu record_btrace_cpu;
176
177 /* Command lists for "set/show record btrace". */
178 static struct cmd_list_element *set_record_btrace_cmdlist;
179 static struct cmd_list_element *show_record_btrace_cmdlist;
180
181 /* The execution direction of the last resume we got. See record-full.c. */
182 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
183
184 /* The async event handler for reverse/replay execution. */
185 static struct async_event_handler *record_btrace_async_inferior_event_handler;
186
187 /* A flag indicating that we are currently generating a core file. */
188 static int record_btrace_generating_corefile;
189
190 /* The current branch trace configuration. */
191 static struct btrace_config record_btrace_conf;
192
193 /* Command list for "record btrace". */
194 static struct cmd_list_element *record_btrace_cmdlist;
195
196 /* Command lists for "set/show record btrace bts". */
197 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
198 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
199
200 /* Command lists for "set/show record btrace pt". */
201 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
202 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
203
204 /* Command list for "set record btrace cpu". */
205 static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
206
207 /* Print a record-btrace debug message. Use do ... while (0) to avoid
208 ambiguities when used in if statements. */
209
210 #define DEBUG(msg, args...) \
211 do \
212 { \
213 if (record_debug != 0) \
214 fprintf_unfiltered (gdb_stdlog, \
215 "[record-btrace] " msg "\n", ##args); \
216 } \
217 while (0)
218
219
220 /* Return the cpu configured by the user. Returns NULL if the cpu was
221 configured as auto. */
222 const struct btrace_cpu *
223 record_btrace_get_cpu (void)
224 {
225 switch (record_btrace_cpu_state)
226 {
227 case CS_AUTO:
228 return nullptr;
229
230 case CS_NONE:
231 record_btrace_cpu.vendor = CV_UNKNOWN;
232 /* Fall through. */
233 case CS_CPU:
234 return &record_btrace_cpu;
235 }
236
237 error (_("Internal error: bad record btrace cpu state."));
238 }
239
240 /* Update the branch trace for the current thread and return a pointer to its
241 thread_info.
242
243 Throws an error if there is no thread or no trace. This function never
244 returns NULL. */
245
246 static struct thread_info *
247 require_btrace_thread (void)
248 {
249 DEBUG ("require");
250
251 if (inferior_ptid == null_ptid)
252 error (_("No thread."));
253
254 thread_info *tp = inferior_thread ();
255
256 validate_registers_access ();
257
258 btrace_fetch (tp, record_btrace_get_cpu ());
259
260 if (btrace_is_empty (tp))
261 error (_("No trace."));
262
263 return tp;
264 }
265
266 /* Update the branch trace for the current thread and return a pointer to its
267 branch trace information struct.
268
269 Throws an error if there is no thread or no trace. This function never
270 returns NULL. */
271
272 static struct btrace_thread_info *
273 require_btrace (void)
274 {
275 struct thread_info *tp;
276
277 tp = require_btrace_thread ();
278
279 return &tp->btrace;
280 }
281
282 /* Enable branch tracing for one thread. Warn on errors. */
283
284 static void
285 record_btrace_enable_warn (struct thread_info *tp)
286 {
287 /* Ignore this thread if its inferior is not recorded by us. */
288 target_ops *rec = tp->inf->target_at (record_stratum);
289 if (rec != &record_btrace_ops)
290 return;
291
292 try
293 {
294 btrace_enable (tp, &record_btrace_conf);
295 }
296 catch (const gdb_exception_error &error)
297 {
298 warning ("%s", error.what ());
299 }
300 }
301
302 /* Enable automatic tracing of new threads. */
303
304 static void
305 record_btrace_auto_enable (void)
306 {
307 DEBUG ("attach thread observer");
308
309 gdb::observers::new_thread.attach (record_btrace_enable_warn,
310 record_btrace_thread_observer_token);
311 }
312
313 /* Disable automatic tracing of new threads. */
314
315 static void
316 record_btrace_auto_disable (void)
317 {
318 DEBUG ("detach thread observer");
319
320 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
321 }
322
323 /* The record-btrace async event handler function. */
324
325 static void
326 record_btrace_handle_async_inferior_event (gdb_client_data data)
327 {
328 inferior_event_handler (INF_REG_EVENT, NULL);
329 }
330
331 /* See record-btrace.h. */
332
333 void
334 record_btrace_push_target (void)
335 {
336 const char *format;
337
338 record_btrace_auto_enable ();
339
340 push_target (&record_btrace_ops);
341
342 record_btrace_async_inferior_event_handler
343 = create_async_event_handler (record_btrace_handle_async_inferior_event,
344 NULL);
345 record_btrace_generating_corefile = 0;
346
347 format = btrace_format_short_string (record_btrace_conf.format);
348 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
349 }
350
351 /* Disable btrace on a set of threads on scope exit. */
352
353 struct scoped_btrace_disable
354 {
355 scoped_btrace_disable () = default;
356
357 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
358
359 ~scoped_btrace_disable ()
360 {
361 for (thread_info *tp : m_threads)
362 btrace_disable (tp);
363 }
364
365 void add_thread (thread_info *thread)
366 {
367 m_threads.push_front (thread);
368 }
369
370 void discard ()
371 {
372 m_threads.clear ();
373 }
374
375 private:
376 std::forward_list<thread_info *> m_threads;
377 };
378
379 /* Open target record-btrace. */
380
381 static void
382 record_btrace_target_open (const char *args, int from_tty)
383 {
384 /* If we fail to enable btrace for one thread, disable it for the threads for
385 which it was successfully enabled. */
386 scoped_btrace_disable btrace_disable;
387
388 DEBUG ("open");
389
390 record_preopen ();
391
392 if (!target_has_execution)
393 error (_("The program is not being run."));
394
395 for (thread_info *tp : current_inferior ()->non_exited_threads ())
396 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
397 {
398 btrace_enable (tp, &record_btrace_conf);
399
400 btrace_disable.add_thread (tp);
401 }
402
403 record_btrace_push_target ();
404
405 btrace_disable.discard ();
406 }
407
408 /* The stop_recording method of target record-btrace. */
409
410 void
411 record_btrace_target::stop_recording ()
412 {
413 DEBUG ("stop recording");
414
415 record_btrace_auto_disable ();
416
417 for (thread_info *tp : current_inferior ()->non_exited_threads ())
418 if (tp->btrace.target != NULL)
419 btrace_disable (tp);
420 }
421
422 /* The disconnect method of target record-btrace. */
423
424 void
425 record_btrace_target::disconnect (const char *args,
426 int from_tty)
427 {
428 struct target_ops *beneath = this->beneath ();
429
430 /* Do not stop recording, just clean up GDB side. */
431 unpush_target (this);
432
433 /* Forward disconnect. */
434 beneath->disconnect (args, from_tty);
435 }
436
437 /* The close method of target record-btrace. */
438
439 void
440 record_btrace_target::close ()
441 {
442 if (record_btrace_async_inferior_event_handler != NULL)
443 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
444
445 /* Make sure automatic recording gets disabled even if we did not stop
446 recording before closing the record-btrace target. */
447 record_btrace_auto_disable ();
448
449 /* We should have already stopped recording.
450 Tear down btrace in case we have not. */
451 for (thread_info *tp : current_inferior ()->non_exited_threads ())
452 btrace_teardown (tp);
453 }
454
455 /* The async method of target record-btrace. */
456
457 void
458 record_btrace_target::async (int enable)
459 {
460 if (enable)
461 mark_async_event_handler (record_btrace_async_inferior_event_handler);
462 else
463 clear_async_event_handler (record_btrace_async_inferior_event_handler);
464
465 this->beneath ()->async (enable);
466 }
467
468 /* Adjusts the size and returns a human readable size suffix. */
469
470 static const char *
471 record_btrace_adjust_size (unsigned int *size)
472 {
473 unsigned int sz;
474
475 sz = *size;
476
477 if ((sz & ((1u << 30) - 1)) == 0)
478 {
479 *size = sz >> 30;
480 return "GB";
481 }
482 else if ((sz & ((1u << 20) - 1)) == 0)
483 {
484 *size = sz >> 20;
485 return "MB";
486 }
487 else if ((sz & ((1u << 10) - 1)) == 0)
488 {
489 *size = sz >> 10;
490 return "kB";
491 }
492 else
493 return "";
494 }
495
496 /* Print a BTS configuration. */
497
498 static void
499 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
500 {
501 const char *suffix;
502 unsigned int size;
503
504 size = conf->size;
505 if (size > 0)
506 {
507 suffix = record_btrace_adjust_size (&size);
508 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
509 }
510 }
511
512 /* Print an Intel Processor Trace configuration. */
513
514 static void
515 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
516 {
517 const char *suffix;
518 unsigned int size;
519
520 size = conf->size;
521 if (size > 0)
522 {
523 suffix = record_btrace_adjust_size (&size);
524 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
525 }
526 }
527
528 /* Print a branch tracing configuration. */
529
530 static void
531 record_btrace_print_conf (const struct btrace_config *conf)
532 {
533 printf_unfiltered (_("Recording format: %s.\n"),
534 btrace_format_string (conf->format));
535
536 switch (conf->format)
537 {
538 case BTRACE_FORMAT_NONE:
539 return;
540
541 case BTRACE_FORMAT_BTS:
542 record_btrace_print_bts_conf (&conf->bts);
543 return;
544
545 case BTRACE_FORMAT_PT:
546 record_btrace_print_pt_conf (&conf->pt);
547 return;
548 }
549
550 internal_error (__FILE__, __LINE__, _("Unknown branch trace format."));
551 }
552
553 /* The info_record method of target record-btrace. */
554
555 void
556 record_btrace_target::info_record ()
557 {
558 struct btrace_thread_info *btinfo;
559 const struct btrace_config *conf;
560 struct thread_info *tp;
561 unsigned int insns, calls, gaps;
562
563 DEBUG ("info");
564
565 if (inferior_ptid == null_ptid)
566 error (_("No thread."));
567
568 tp = inferior_thread ();
569
570 validate_registers_access ();
571
572 btinfo = &tp->btrace;
573
574 conf = ::btrace_conf (btinfo);
575 if (conf != NULL)
576 record_btrace_print_conf (conf);
577
578 btrace_fetch (tp, record_btrace_get_cpu ());
579
580 insns = 0;
581 calls = 0;
582 gaps = 0;
583
584 if (!btrace_is_empty (tp))
585 {
586 struct btrace_call_iterator call;
587 struct btrace_insn_iterator insn;
588
589 btrace_call_end (&call, btinfo);
590 btrace_call_prev (&call, 1);
591 calls = btrace_call_number (&call);
592
593 btrace_insn_end (&insn, btinfo);
594 insns = btrace_insn_number (&insn);
595
596 /* If the last instruction is not a gap, it is the current instruction
597 that is not actually part of the record. */
598 if (btrace_insn_get (&insn) != NULL)
599 insns -= 1;
600
601 gaps = btinfo->ngaps;
602 }
603
604 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
605 "for thread %s (%s).\n"), insns, calls, gaps,
606 print_thread_id (tp),
607 target_pid_to_str (tp->ptid).c_str ());
608
609 if (btrace_is_replaying (tp))
610 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
611 btrace_insn_number (btinfo->replay));
612 }
613
614 /* Print a decode error. */
615
616 static void
617 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
618 enum btrace_format format)
619 {
620 const char *errstr = btrace_decode_error (format, errcode);
621
622 uiout->text (_("["));
623 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
624 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
625 {
626 uiout->text (_("decode error ("));
627 uiout->field_signed ("errcode", errcode);
628 uiout->text (_("): "));
629 }
630 uiout->text (errstr);
631 uiout->text (_("]\n"));
632 }
633
634 /* A range of source lines. */
635
636 struct btrace_line_range
637 {
638 /* The symtab this line is from. */
639 struct symtab *symtab;
640
641 /* The first line (inclusive). */
642 int begin;
643
644 /* The last line (exclusive). */
645 int end;
646 };
647
648 /* Construct a line range. */
649
650 static struct btrace_line_range
651 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
652 {
653 struct btrace_line_range range;
654
655 range.symtab = symtab;
656 range.begin = begin;
657 range.end = end;
658
659 return range;
660 }
661
662 /* Add a line to a line range. */
663
664 static struct btrace_line_range
665 btrace_line_range_add (struct btrace_line_range range, int line)
666 {
667 if (range.end <= range.begin)
668 {
669 /* This is the first entry. */
670 range.begin = line;
671 range.end = line + 1;
672 }
673 else if (line < range.begin)
674 range.begin = line;
675 else if (range.end < line)
676 range.end = line;
677
678 return range;
679 }
680
681 /* Return non-zero if RANGE is empty, zero otherwise. */
682
683 static int
684 btrace_line_range_is_empty (struct btrace_line_range range)
685 {
686 return range.end <= range.begin;
687 }
688
689 /* Return non-zero if LHS contains RHS, zero otherwise. */
690
691 static int
692 btrace_line_range_contains_range (struct btrace_line_range lhs,
693 struct btrace_line_range rhs)
694 {
695 return ((lhs.symtab == rhs.symtab)
696 && (lhs.begin <= rhs.begin)
697 && (rhs.end <= lhs.end));
698 }
699
700 /* Find the line range associated with PC. */
701
702 static struct btrace_line_range
703 btrace_find_line_range (CORE_ADDR pc)
704 {
705 struct btrace_line_range range;
706 struct linetable_entry *lines;
707 struct linetable *ltable;
708 struct symtab *symtab;
709 int nlines, i;
710
711 symtab = find_pc_line_symtab (pc);
712 if (symtab == NULL)
713 return btrace_mk_line_range (NULL, 0, 0);
714
715 ltable = SYMTAB_LINETABLE (symtab);
716 if (ltable == NULL)
717 return btrace_mk_line_range (symtab, 0, 0);
718
719 nlines = ltable->nitems;
720 lines = ltable->item;
721 if (nlines <= 0)
722 return btrace_mk_line_range (symtab, 0, 0);
723
724 range = btrace_mk_line_range (symtab, 0, 0);
725 for (i = 0; i < nlines - 1; i++)
726 {
727 /* The test of is_stmt here was added when the is_stmt field was
728 introduced to the 'struct linetable_entry' structure. This
729 ensured that this loop maintained the same behaviour as before we
730 introduced is_stmt. That said, it might be that we would be
731 better off not checking is_stmt here, this would lead to us
732 possibly adding more line numbers to the range. At the time this
733 change was made I was unsure how to test this so chose to go with
734 maintaining the existing experience. */
735 if ((lines[i].pc == pc) && (lines[i].line != 0)
736 && (lines[i].is_stmt == 1))
737 range = btrace_line_range_add (range, lines[i].line);
738 }
739
740 return range;
741 }
742
743 /* Print source lines in LINES to UIOUT.
744
745 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
746 instructions corresponding to that source line. When printing a new source
747 line, we do the cleanups for the open chain and open a new cleanup chain for
748 the new source line. If the source line range in LINES is not empty, this
749 function will leave the cleanup chain for the last printed source line open
750 so instructions can be added to it. */
751
752 static void
753 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
754 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
755 gdb::optional<ui_out_emit_list> *asm_list,
756 gdb_disassembly_flags flags)
757 {
758 print_source_lines_flags psl_flags;
759
760 if (flags & DISASSEMBLY_FILENAME)
761 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
762
763 for (int line = lines.begin; line < lines.end; ++line)
764 {
765 asm_list->reset ();
766
767 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
768
769 print_source_lines (lines.symtab, line, line + 1, psl_flags);
770
771 asm_list->emplace (uiout, "line_asm_insn");
772 }
773 }
774
775 /* Disassemble a section of the recorded instruction trace. */
776
777 static void
778 btrace_insn_history (struct ui_out *uiout,
779 const struct btrace_thread_info *btinfo,
780 const struct btrace_insn_iterator *begin,
781 const struct btrace_insn_iterator *end,
782 gdb_disassembly_flags flags)
783 {
784 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
785 btrace_insn_number (begin), btrace_insn_number (end));
786
787 flags |= DISASSEMBLY_SPECULATIVE;
788
789 struct gdbarch *gdbarch = target_gdbarch ();
790 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
791
792 ui_out_emit_list list_emitter (uiout, "asm_insns");
793
794 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
795 gdb::optional<ui_out_emit_list> asm_list;
796
797 gdb_pretty_print_disassembler disasm (gdbarch, uiout);
798
799 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
800 btrace_insn_next (&it, 1))
801 {
802 const struct btrace_insn *insn;
803
804 insn = btrace_insn_get (&it);
805
806 /* A NULL instruction indicates a gap in the trace. */
807 if (insn == NULL)
808 {
809 const struct btrace_config *conf;
810
811 conf = btrace_conf (btinfo);
812
813 /* We have trace so we must have a configuration. */
814 gdb_assert (conf != NULL);
815
816 uiout->field_fmt ("insn-number", "%u",
817 btrace_insn_number (&it));
818 uiout->text ("\t");
819
820 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
821 conf->format);
822 }
823 else
824 {
825 struct disasm_insn dinsn;
826
827 if ((flags & DISASSEMBLY_SOURCE) != 0)
828 {
829 struct btrace_line_range lines;
830
831 lines = btrace_find_line_range (insn->pc);
832 if (!btrace_line_range_is_empty (lines)
833 && !btrace_line_range_contains_range (last_lines, lines))
834 {
835 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
836 flags);
837 last_lines = lines;
838 }
839 else if (!src_and_asm_tuple.has_value ())
840 {
841 gdb_assert (!asm_list.has_value ());
842
843 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
844
845 /* No source information. */
846 asm_list.emplace (uiout, "line_asm_insn");
847 }
848
849 gdb_assert (src_and_asm_tuple.has_value ());
850 gdb_assert (asm_list.has_value ());
851 }
852
853 memset (&dinsn, 0, sizeof (dinsn));
854 dinsn.number = btrace_insn_number (&it);
855 dinsn.addr = insn->pc;
856
857 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
858 dinsn.is_speculative = 1;
859
860 disasm.pretty_print_insn (&dinsn, flags);
861 }
862 }
863 }
864
865 /* The insn_history method of target record-btrace. */
866
867 void
868 record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
869 {
870 struct btrace_thread_info *btinfo;
871 struct btrace_insn_history *history;
872 struct btrace_insn_iterator begin, end;
873 struct ui_out *uiout;
874 unsigned int context, covered;
875
876 uiout = current_uiout;
877 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
878 context = abs (size);
879 if (context == 0)
880 error (_("Bad record instruction-history-size."));
881
882 btinfo = require_btrace ();
883 history = btinfo->insn_history;
884 if (history == NULL)
885 {
886 struct btrace_insn_iterator *replay;
887
888 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
889
890 /* If we're replaying, we start at the replay position. Otherwise, we
891 start at the tail of the trace. */
892 replay = btinfo->replay;
893 if (replay != NULL)
894 begin = *replay;
895 else
896 btrace_insn_end (&begin, btinfo);
897
898 /* We start from here and expand in the requested direction. Then we
899 expand in the other direction, as well, to fill up any remaining
900 context. */
901 end = begin;
902 if (size < 0)
903 {
904 /* We want the current position covered, as well. */
905 covered = btrace_insn_next (&end, 1);
906 covered += btrace_insn_prev (&begin, context - covered);
907 covered += btrace_insn_next (&end, context - covered);
908 }
909 else
910 {
911 covered = btrace_insn_next (&end, context);
912 covered += btrace_insn_prev (&begin, context - covered);
913 }
914 }
915 else
916 {
917 begin = history->begin;
918 end = history->end;
919
920 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
921 btrace_insn_number (&begin), btrace_insn_number (&end));
922
923 if (size < 0)
924 {
925 end = begin;
926 covered = btrace_insn_prev (&begin, context);
927 }
928 else
929 {
930 begin = end;
931 covered = btrace_insn_next (&end, context);
932 }
933 }
934
935 if (covered > 0)
936 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
937 else
938 {
939 if (size < 0)
940 printf_unfiltered (_("At the start of the branch trace record.\n"));
941 else
942 printf_unfiltered (_("At the end of the branch trace record.\n"));
943 }
944
945 btrace_set_insn_history (btinfo, &begin, &end);
946 }
947
948 /* The insn_history_range method of target record-btrace. */
949
950 void
951 record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
952 gdb_disassembly_flags flags)
953 {
954 struct btrace_thread_info *btinfo;
955 struct btrace_insn_iterator begin, end;
956 struct ui_out *uiout;
957 unsigned int low, high;
958 int found;
959
960 uiout = current_uiout;
961 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
962 low = from;
963 high = to;
964
965 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
966
967 /* Check for wrap-arounds. */
968 if (low != from || high != to)
969 error (_("Bad range."));
970
971 if (high < low)
972 error (_("Bad range."));
973
974 btinfo = require_btrace ();
975
976 found = btrace_find_insn_by_number (&begin, btinfo, low);
977 if (found == 0)
978 error (_("Range out of bounds."));
979
980 found = btrace_find_insn_by_number (&end, btinfo, high);
981 if (found == 0)
982 {
983 /* Silently truncate the range. */
984 btrace_insn_end (&end, btinfo);
985 }
986 else
987 {
988 /* We want both begin and end to be inclusive. */
989 btrace_insn_next (&end, 1);
990 }
991
992 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
993 btrace_set_insn_history (btinfo, &begin, &end);
994 }
995
996 /* The insn_history_from method of target record-btrace. */
997
998 void
999 record_btrace_target::insn_history_from (ULONGEST from, int size,
1000 gdb_disassembly_flags flags)
1001 {
1002 ULONGEST begin, end, context;
1003
1004 context = abs (size);
1005 if (context == 0)
1006 error (_("Bad record instruction-history-size."));
1007
1008 if (size < 0)
1009 {
1010 end = from;
1011
1012 if (from < context)
1013 begin = 0;
1014 else
1015 begin = from - context + 1;
1016 }
1017 else
1018 {
1019 begin = from;
1020 end = from + context - 1;
1021
1022 /* Check for wrap-around. */
1023 if (end < begin)
1024 end = ULONGEST_MAX;
1025 }
1026
1027 insn_history_range (begin, end, flags);
1028 }
1029
1030 /* Print the instruction number range for a function call history line. */
1031
1032 static void
1033 btrace_call_history_insn_range (struct ui_out *uiout,
1034 const struct btrace_function *bfun)
1035 {
1036 unsigned int begin, end, size;
1037
1038 size = bfun->insn.size ();
1039 gdb_assert (size > 0);
1040
1041 begin = bfun->insn_offset;
1042 end = begin + size - 1;
1043
1044 uiout->field_unsigned ("insn begin", begin);
1045 uiout->text (",");
1046 uiout->field_unsigned ("insn end", end);
1047 }
1048
1049 /* Compute the lowest and highest source line for the instructions in BFUN
1050 and return them in PBEGIN and PEND.
1051 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1052 result from inlining or macro expansion. */
1053
1054 static void
1055 btrace_compute_src_line_range (const struct btrace_function *bfun,
1056 int *pbegin, int *pend)
1057 {
1058 struct symtab *symtab;
1059 struct symbol *sym;
1060 int begin, end;
1061
1062 begin = INT_MAX;
1063 end = INT_MIN;
1064
1065 sym = bfun->sym;
1066 if (sym == NULL)
1067 goto out;
1068
1069 symtab = symbol_symtab (sym);
1070
1071 for (const btrace_insn &insn : bfun->insn)
1072 {
1073 struct symtab_and_line sal;
1074
1075 sal = find_pc_line (insn.pc, 0);
1076 if (sal.symtab != symtab || sal.line == 0)
1077 continue;
1078
1079 begin = std::min (begin, sal.line);
1080 end = std::max (end, sal.line);
1081 }
1082
1083 out:
1084 *pbegin = begin;
1085 *pend = end;
1086 }
1087
1088 /* Print the source line information for a function call history line. */
1089
1090 static void
1091 btrace_call_history_src_line (struct ui_out *uiout,
1092 const struct btrace_function *bfun)
1093 {
1094 struct symbol *sym;
1095 int begin, end;
1096
1097 sym = bfun->sym;
1098 if (sym == NULL)
1099 return;
1100
1101 uiout->field_string ("file",
1102 symtab_to_filename_for_display (symbol_symtab (sym)),
1103 file_name_style.style ());
1104
1105 btrace_compute_src_line_range (bfun, &begin, &end);
1106 if (end < begin)
1107 return;
1108
1109 uiout->text (":");
1110 uiout->field_signed ("min line", begin);
1111
1112 if (end == begin)
1113 return;
1114
1115 uiout->text (",");
1116 uiout->field_signed ("max line", end);
1117 }
1118
1119 /* Get the name of a branch trace function. */
1120
1121 static const char *
1122 btrace_get_bfun_name (const struct btrace_function *bfun)
1123 {
1124 struct minimal_symbol *msym;
1125 struct symbol *sym;
1126
1127 if (bfun == NULL)
1128 return "??";
1129
1130 msym = bfun->msym;
1131 sym = bfun->sym;
1132
1133 if (sym != NULL)
1134 return sym->print_name ();
1135 else if (msym != NULL)
1136 return msym->print_name ();
1137 else
1138 return "??";
1139 }
1140
1141 /* Disassemble a section of the recorded function trace. */
1142
1143 static void
1144 btrace_call_history (struct ui_out *uiout,
1145 const struct btrace_thread_info *btinfo,
1146 const struct btrace_call_iterator *begin,
1147 const struct btrace_call_iterator *end,
1148 int int_flags)
1149 {
1150 struct btrace_call_iterator it;
1151 record_print_flags flags = (enum record_print_flag) int_flags;
1152
1153 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1154 btrace_call_number (end));
1155
1156 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1157 {
1158 const struct btrace_function *bfun;
1159 struct minimal_symbol *msym;
1160 struct symbol *sym;
1161
1162 bfun = btrace_call_get (&it);
1163 sym = bfun->sym;
1164 msym = bfun->msym;
1165
1166 /* Print the function index. */
1167 uiout->field_unsigned ("index", bfun->number);
1168 uiout->text ("\t");
1169
1170 /* Indicate gaps in the trace. */
1171 if (bfun->errcode != 0)
1172 {
1173 const struct btrace_config *conf;
1174
1175 conf = btrace_conf (btinfo);
1176
1177 /* We have trace so we must have a configuration. */
1178 gdb_assert (conf != NULL);
1179
1180 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1181
1182 continue;
1183 }
1184
1185 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1186 {
1187 int level = bfun->level + btinfo->level, i;
1188
1189 for (i = 0; i < level; ++i)
1190 uiout->text (" ");
1191 }
1192
1193 if (sym != NULL)
1194 uiout->field_string ("function", sym->print_name (),
1195 function_name_style.style ());
1196 else if (msym != NULL)
1197 uiout->field_string ("function", msym->print_name (),
1198 function_name_style.style ());
1199 else if (!uiout->is_mi_like_p ())
1200 uiout->field_string ("function", "??",
1201 function_name_style.style ());
1202
1203 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1204 {
1205 uiout->text (_("\tinst "));
1206 btrace_call_history_insn_range (uiout, bfun);
1207 }
1208
1209 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1210 {
1211 uiout->text (_("\tat "));
1212 btrace_call_history_src_line (uiout, bfun);
1213 }
1214
1215 uiout->text ("\n");
1216 }
1217 }
1218
1219 /* The call_history method of target record-btrace. */
1220
1221 void
1222 record_btrace_target::call_history (int size, record_print_flags flags)
1223 {
1224 struct btrace_thread_info *btinfo;
1225 struct btrace_call_history *history;
1226 struct btrace_call_iterator begin, end;
1227 struct ui_out *uiout;
1228 unsigned int context, covered;
1229
1230 uiout = current_uiout;
1231 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1232 context = abs (size);
1233 if (context == 0)
1234 error (_("Bad record function-call-history-size."));
1235
1236 btinfo = require_btrace ();
1237 history = btinfo->call_history;
1238 if (history == NULL)
1239 {
1240 struct btrace_insn_iterator *replay;
1241
1242 DEBUG ("call-history (0x%x): %d", (int) flags, size);
1243
1244 /* If we're replaying, we start at the replay position. Otherwise, we
1245 start at the tail of the trace. */
1246 replay = btinfo->replay;
1247 if (replay != NULL)
1248 {
1249 begin.btinfo = btinfo;
1250 begin.index = replay->call_index;
1251 }
1252 else
1253 btrace_call_end (&begin, btinfo);
1254
1255 /* We start from here and expand in the requested direction. Then we
1256 expand in the other direction, as well, to fill up any remaining
1257 context. */
1258 end = begin;
1259 if (size < 0)
1260 {
1261 /* We want the current position covered, as well. */
1262 covered = btrace_call_next (&end, 1);
1263 covered += btrace_call_prev (&begin, context - covered);
1264 covered += btrace_call_next (&end, context - covered);
1265 }
1266 else
1267 {
1268 covered = btrace_call_next (&end, context);
1269 covered += btrace_call_prev (&begin, context- covered);
1270 }
1271 }
1272 else
1273 {
1274 begin = history->begin;
1275 end = history->end;
1276
1277 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1278 btrace_call_number (&begin), btrace_call_number (&end));
1279
1280 if (size < 0)
1281 {
1282 end = begin;
1283 covered = btrace_call_prev (&begin, context);
1284 }
1285 else
1286 {
1287 begin = end;
1288 covered = btrace_call_next (&end, context);
1289 }
1290 }
1291
1292 if (covered > 0)
1293 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1294 else
1295 {
1296 if (size < 0)
1297 printf_unfiltered (_("At the start of the branch trace record.\n"));
1298 else
1299 printf_unfiltered (_("At the end of the branch trace record.\n"));
1300 }
1301
1302 btrace_set_call_history (btinfo, &begin, &end);
1303 }
1304
1305 /* The call_history_range method of target record-btrace. */
1306
1307 void
1308 record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1309 record_print_flags flags)
1310 {
1311 struct btrace_thread_info *btinfo;
1312 struct btrace_call_iterator begin, end;
1313 struct ui_out *uiout;
1314 unsigned int low, high;
1315 int found;
1316
1317 uiout = current_uiout;
1318 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1319 low = from;
1320 high = to;
1321
1322 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1323
1324 /* Check for wrap-arounds. */
1325 if (low != from || high != to)
1326 error (_("Bad range."));
1327
1328 if (high < low)
1329 error (_("Bad range."));
1330
1331 btinfo = require_btrace ();
1332
1333 found = btrace_find_call_by_number (&begin, btinfo, low);
1334 if (found == 0)
1335 error (_("Range out of bounds."));
1336
1337 found = btrace_find_call_by_number (&end, btinfo, high);
1338 if (found == 0)
1339 {
1340 /* Silently truncate the range. */
1341 btrace_call_end (&end, btinfo);
1342 }
1343 else
1344 {
1345 /* We want both begin and end to be inclusive. */
1346 btrace_call_next (&end, 1);
1347 }
1348
1349 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1350 btrace_set_call_history (btinfo, &begin, &end);
1351 }
1352
1353 /* The call_history_from method of target record-btrace. */
1354
1355 void
1356 record_btrace_target::call_history_from (ULONGEST from, int size,
1357 record_print_flags flags)
1358 {
1359 ULONGEST begin, end, context;
1360
1361 context = abs (size);
1362 if (context == 0)
1363 error (_("Bad record function-call-history-size."));
1364
1365 if (size < 0)
1366 {
1367 end = from;
1368
1369 if (from < context)
1370 begin = 0;
1371 else
1372 begin = from - context + 1;
1373 }
1374 else
1375 {
1376 begin = from;
1377 end = from + context - 1;
1378
1379 /* Check for wrap-around. */
1380 if (end < begin)
1381 end = ULONGEST_MAX;
1382 }
1383
1384 call_history_range ( begin, end, flags);
1385 }
1386
1387 /* The record_method method of target record-btrace. */
1388
1389 enum record_method
1390 record_btrace_target::record_method (ptid_t ptid)
1391 {
1392 process_stratum_target *proc_target = current_inferior ()->process_target ();
1393 thread_info *const tp = find_thread_ptid (proc_target, ptid);
1394
1395 if (tp == NULL)
1396 error (_("No thread."));
1397
1398 if (tp->btrace.target == NULL)
1399 return RECORD_METHOD_NONE;
1400
1401 return RECORD_METHOD_BTRACE;
1402 }
1403
1404 /* The record_is_replaying method of target record-btrace. */
1405
1406 bool
1407 record_btrace_target::record_is_replaying (ptid_t ptid)
1408 {
1409 process_stratum_target *proc_target = current_inferior ()->process_target ();
1410 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
1411 if (btrace_is_replaying (tp))
1412 return true;
1413
1414 return false;
1415 }
1416
1417 /* The record_will_replay method of target record-btrace. */
1418
1419 bool
1420 record_btrace_target::record_will_replay (ptid_t ptid, int dir)
1421 {
1422 return dir == EXEC_REVERSE || record_is_replaying (ptid);
1423 }
1424
1425 /* The xfer_partial method of target record-btrace. */
1426
1427 enum target_xfer_status
1428 record_btrace_target::xfer_partial (enum target_object object,
1429 const char *annex, gdb_byte *readbuf,
1430 const gdb_byte *writebuf, ULONGEST offset,
1431 ULONGEST len, ULONGEST *xfered_len)
1432 {
1433 /* Filter out requests that don't make sense during replay. */
1434 if (replay_memory_access == replay_memory_access_read_only
1435 && !record_btrace_generating_corefile
1436 && record_is_replaying (inferior_ptid))
1437 {
1438 switch (object)
1439 {
1440 case TARGET_OBJECT_MEMORY:
1441 {
1442 struct target_section *section;
1443
1444 /* We do not allow writing memory in general. */
1445 if (writebuf != NULL)
1446 {
1447 *xfered_len = len;
1448 return TARGET_XFER_UNAVAILABLE;
1449 }
1450
1451 /* We allow reading readonly memory. */
1452 section = target_section_by_addr (this, offset);
1453 if (section != NULL)
1454 {
1455 /* Check if the section we found is readonly. */
1456 if ((bfd_section_flags (section->the_bfd_section)
1457 & SEC_READONLY) != 0)
1458 {
1459 /* Truncate the request to fit into this section. */
1460 len = std::min (len, section->endaddr - offset);
1461 break;
1462 }
1463 }
1464
1465 *xfered_len = len;
1466 return TARGET_XFER_UNAVAILABLE;
1467 }
1468 }
1469 }
1470
1471 /* Forward the request. */
1472 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1473 offset, len, xfered_len);
1474 }
1475
1476 /* The insert_breakpoint method of target record-btrace. */
1477
1478 int
1479 record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1480 struct bp_target_info *bp_tgt)
1481 {
1482 const char *old;
1483 int ret;
1484
1485 /* Inserting breakpoints requires accessing memory. Allow it for the
1486 duration of this function. */
1487 old = replay_memory_access;
1488 replay_memory_access = replay_memory_access_read_write;
1489
1490 ret = 0;
1491 try
1492 {
1493 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
1494 }
1495 catch (const gdb_exception &except)
1496 {
1497 replay_memory_access = old;
1498 throw;
1499 }
1500 replay_memory_access = old;
1501
1502 return ret;
1503 }
1504
1505 /* The remove_breakpoint method of target record-btrace. */
1506
1507 int
1508 record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1509 struct bp_target_info *bp_tgt,
1510 enum remove_bp_reason reason)
1511 {
1512 const char *old;
1513 int ret;
1514
1515 /* Removing breakpoints requires accessing memory. Allow it for the
1516 duration of this function. */
1517 old = replay_memory_access;
1518 replay_memory_access = replay_memory_access_read_write;
1519
1520 ret = 0;
1521 try
1522 {
1523 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
1524 }
1525 catch (const gdb_exception &except)
1526 {
1527 replay_memory_access = old;
1528 throw;
1529 }
1530 replay_memory_access = old;
1531
1532 return ret;
1533 }
1534
1535 /* The fetch_registers method of target record-btrace. */
1536
1537 void
1538 record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1539 {
1540 btrace_insn_iterator *replay = nullptr;
1541
1542 /* Thread-db may ask for a thread's registers before GDB knows about the
1543 thread. We forward the request to the target beneath in this
1544 case. */
1545 thread_info *tp = find_thread_ptid (regcache->target (), regcache->ptid ());
1546 if (tp != nullptr)
1547 replay = tp->btrace.replay;
1548
1549 if (replay != nullptr && !record_btrace_generating_corefile)
1550 {
1551 const struct btrace_insn *insn;
1552 struct gdbarch *gdbarch;
1553 int pcreg;
1554
1555 gdbarch = regcache->arch ();
1556 pcreg = gdbarch_pc_regnum (gdbarch);
1557 if (pcreg < 0)
1558 return;
1559
1560 /* We can only provide the PC register. */
1561 if (regno >= 0 && regno != pcreg)
1562 return;
1563
1564 insn = btrace_insn_get (replay);
1565 gdb_assert (insn != NULL);
1566
1567 regcache->raw_supply (regno, &insn->pc);
1568 }
1569 else
1570 this->beneath ()->fetch_registers (regcache, regno);
1571 }
1572
1573 /* The store_registers method of target record-btrace. */
1574
1575 void
1576 record_btrace_target::store_registers (struct regcache *regcache, int regno)
1577 {
1578 if (!record_btrace_generating_corefile
1579 && record_is_replaying (regcache->ptid ()))
1580 error (_("Cannot write registers while replaying."));
1581
1582 gdb_assert (may_write_registers);
1583
1584 this->beneath ()->store_registers (regcache, regno);
1585 }
1586
1587 /* The prepare_to_store method of target record-btrace. */
1588
1589 void
1590 record_btrace_target::prepare_to_store (struct regcache *regcache)
1591 {
1592 if (!record_btrace_generating_corefile
1593 && record_is_replaying (regcache->ptid ()))
1594 return;
1595
1596 this->beneath ()->prepare_to_store (regcache);
1597 }
1598
1599 /* The branch trace frame cache. */
1600
1601 struct btrace_frame_cache
1602 {
1603 /* The thread. */
1604 struct thread_info *tp;
1605
1606 /* The frame info. */
1607 struct frame_info *frame;
1608
1609 /* The branch trace function segment. */
1610 const struct btrace_function *bfun;
1611 };
1612
1613 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1614
1615 static htab_t bfcache;
1616
1617 /* hash_f for htab_create_alloc of bfcache. */
1618
1619 static hashval_t
1620 bfcache_hash (const void *arg)
1621 {
1622 const struct btrace_frame_cache *cache
1623 = (const struct btrace_frame_cache *) arg;
1624
1625 return htab_hash_pointer (cache->frame);
1626 }
1627
1628 /* eq_f for htab_create_alloc of bfcache. */
1629
1630 static int
1631 bfcache_eq (const void *arg1, const void *arg2)
1632 {
1633 const struct btrace_frame_cache *cache1
1634 = (const struct btrace_frame_cache *) arg1;
1635 const struct btrace_frame_cache *cache2
1636 = (const struct btrace_frame_cache *) arg2;
1637
1638 return cache1->frame == cache2->frame;
1639 }
1640
1641 /* Create a new btrace frame cache. */
1642
1643 static struct btrace_frame_cache *
1644 bfcache_new (struct frame_info *frame)
1645 {
1646 struct btrace_frame_cache *cache;
1647 void **slot;
1648
1649 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1650 cache->frame = frame;
1651
1652 slot = htab_find_slot (bfcache, cache, INSERT);
1653 gdb_assert (*slot == NULL);
1654 *slot = cache;
1655
1656 return cache;
1657 }
1658
1659 /* Extract the branch trace function from a branch trace frame. */
1660
1661 static const struct btrace_function *
1662 btrace_get_frame_function (struct frame_info *frame)
1663 {
1664 const struct btrace_frame_cache *cache;
1665 struct btrace_frame_cache pattern;
1666 void **slot;
1667
1668 pattern.frame = frame;
1669
1670 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1671 if (slot == NULL)
1672 return NULL;
1673
1674 cache = (const struct btrace_frame_cache *) *slot;
1675 return cache->bfun;
1676 }
1677
1678 /* Implement stop_reason method for record_btrace_frame_unwind. */
1679
1680 static enum unwind_stop_reason
1681 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1682 void **this_cache)
1683 {
1684 const struct btrace_frame_cache *cache;
1685 const struct btrace_function *bfun;
1686
1687 cache = (const struct btrace_frame_cache *) *this_cache;
1688 bfun = cache->bfun;
1689 gdb_assert (bfun != NULL);
1690
1691 if (bfun->up == 0)
1692 return UNWIND_UNAVAILABLE;
1693
1694 return UNWIND_NO_REASON;
1695 }
1696
1697 /* Implement this_id method for record_btrace_frame_unwind. */
1698
1699 static void
1700 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1701 struct frame_id *this_id)
1702 {
1703 const struct btrace_frame_cache *cache;
1704 const struct btrace_function *bfun;
1705 struct btrace_call_iterator it;
1706 CORE_ADDR code, special;
1707
1708 cache = (const struct btrace_frame_cache *) *this_cache;
1709
1710 bfun = cache->bfun;
1711 gdb_assert (bfun != NULL);
1712
1713 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1714 bfun = btrace_call_get (&it);
1715
1716 code = get_frame_func (this_frame);
1717 special = bfun->number;
1718
1719 *this_id = frame_id_build_unavailable_stack_special (code, special);
1720
1721 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1722 btrace_get_bfun_name (cache->bfun),
1723 core_addr_to_string_nz (this_id->code_addr),
1724 core_addr_to_string_nz (this_id->special_addr));
1725 }
1726
1727 /* Implement prev_register method for record_btrace_frame_unwind. */
1728
1729 static struct value *
1730 record_btrace_frame_prev_register (struct frame_info *this_frame,
1731 void **this_cache,
1732 int regnum)
1733 {
1734 const struct btrace_frame_cache *cache;
1735 const struct btrace_function *bfun, *caller;
1736 struct btrace_call_iterator it;
1737 struct gdbarch *gdbarch;
1738 CORE_ADDR pc;
1739 int pcreg;
1740
1741 gdbarch = get_frame_arch (this_frame);
1742 pcreg = gdbarch_pc_regnum (gdbarch);
1743 if (pcreg < 0 || regnum != pcreg)
1744 throw_error (NOT_AVAILABLE_ERROR,
1745 _("Registers are not available in btrace record history"));
1746
1747 cache = (const struct btrace_frame_cache *) *this_cache;
1748 bfun = cache->bfun;
1749 gdb_assert (bfun != NULL);
1750
1751 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1752 throw_error (NOT_AVAILABLE_ERROR,
1753 _("No caller in btrace record history"));
1754
1755 caller = btrace_call_get (&it);
1756
1757 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1758 pc = caller->insn.front ().pc;
1759 else
1760 {
1761 pc = caller->insn.back ().pc;
1762 pc += gdb_insn_length (gdbarch, pc);
1763 }
1764
1765 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1766 btrace_get_bfun_name (bfun), bfun->level,
1767 core_addr_to_string_nz (pc));
1768
1769 return frame_unwind_got_address (this_frame, regnum, pc);
1770 }
1771
1772 /* Implement sniffer method for record_btrace_frame_unwind. */
1773
1774 static int
1775 record_btrace_frame_sniffer (const struct frame_unwind *self,
1776 struct frame_info *this_frame,
1777 void **this_cache)
1778 {
1779 const struct btrace_function *bfun;
1780 struct btrace_frame_cache *cache;
1781 struct thread_info *tp;
1782 struct frame_info *next;
1783
1784 /* THIS_FRAME does not contain a reference to its thread. */
1785 tp = inferior_thread ();
1786
1787 bfun = NULL;
1788 next = get_next_frame (this_frame);
1789 if (next == NULL)
1790 {
1791 const struct btrace_insn_iterator *replay;
1792
1793 replay = tp->btrace.replay;
1794 if (replay != NULL)
1795 bfun = &replay->btinfo->functions[replay->call_index];
1796 }
1797 else
1798 {
1799 const struct btrace_function *callee;
1800 struct btrace_call_iterator it;
1801
1802 callee = btrace_get_frame_function (next);
1803 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1804 return 0;
1805
1806 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1807 return 0;
1808
1809 bfun = btrace_call_get (&it);
1810 }
1811
1812 if (bfun == NULL)
1813 return 0;
1814
1815 DEBUG ("[frame] sniffed frame for %s on level %d",
1816 btrace_get_bfun_name (bfun), bfun->level);
1817
1818 /* This is our frame. Initialize the frame cache. */
1819 cache = bfcache_new (this_frame);
1820 cache->tp = tp;
1821 cache->bfun = bfun;
1822
1823 *this_cache = cache;
1824 return 1;
1825 }
1826
1827 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1828
1829 static int
1830 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1831 struct frame_info *this_frame,
1832 void **this_cache)
1833 {
1834 const struct btrace_function *bfun, *callee;
1835 struct btrace_frame_cache *cache;
1836 struct btrace_call_iterator it;
1837 struct frame_info *next;
1838 struct thread_info *tinfo;
1839
1840 next = get_next_frame (this_frame);
1841 if (next == NULL)
1842 return 0;
1843
1844 callee = btrace_get_frame_function (next);
1845 if (callee == NULL)
1846 return 0;
1847
1848 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1849 return 0;
1850
1851 tinfo = inferior_thread ();
1852 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1853 return 0;
1854
1855 bfun = btrace_call_get (&it);
1856
1857 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1858 btrace_get_bfun_name (bfun), bfun->level);
1859
1860 /* This is our frame. Initialize the frame cache. */
1861 cache = bfcache_new (this_frame);
1862 cache->tp = tinfo;
1863 cache->bfun = bfun;
1864
1865 *this_cache = cache;
1866 return 1;
1867 }
1868
1869 static void
1870 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1871 {
1872 struct btrace_frame_cache *cache;
1873 void **slot;
1874
1875 cache = (struct btrace_frame_cache *) this_cache;
1876
1877 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1878 gdb_assert (slot != NULL);
1879
1880 htab_remove_elt (bfcache, cache);
1881 }
1882
1883 /* btrace recording does not store previous memory content, neither the stack
1884 frames content. Any unwinding would return erroneous results as the stack
1885 contents no longer matches the changed PC value restored from history.
1886 Therefore this unwinder reports any possibly unwound registers as
1887 <unavailable>. */
1888
1889 const struct frame_unwind record_btrace_frame_unwind =
1890 {
1891 NORMAL_FRAME,
1892 record_btrace_frame_unwind_stop_reason,
1893 record_btrace_frame_this_id,
1894 record_btrace_frame_prev_register,
1895 NULL,
1896 record_btrace_frame_sniffer,
1897 record_btrace_frame_dealloc_cache
1898 };
1899
1900 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1901 {
1902 TAILCALL_FRAME,
1903 record_btrace_frame_unwind_stop_reason,
1904 record_btrace_frame_this_id,
1905 record_btrace_frame_prev_register,
1906 NULL,
1907 record_btrace_tailcall_frame_sniffer,
1908 record_btrace_frame_dealloc_cache
1909 };
1910
1911 /* Implement the get_unwinder method. */
1912
1913 const struct frame_unwind *
1914 record_btrace_target::get_unwinder ()
1915 {
1916 return &record_btrace_frame_unwind;
1917 }
1918
1919 /* Implement the get_tailcall_unwinder method. */
1920
1921 const struct frame_unwind *
1922 record_btrace_target::get_tailcall_unwinder ()
1923 {
1924 return &record_btrace_tailcall_frame_unwind;
1925 }
1926
1927 /* Return a human-readable string for FLAG. */
1928
1929 static const char *
1930 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1931 {
1932 switch (flag)
1933 {
1934 case BTHR_STEP:
1935 return "step";
1936
1937 case BTHR_RSTEP:
1938 return "reverse-step";
1939
1940 case BTHR_CONT:
1941 return "cont";
1942
1943 case BTHR_RCONT:
1944 return "reverse-cont";
1945
1946 case BTHR_STOP:
1947 return "stop";
1948 }
1949
1950 return "<invalid>";
1951 }
1952
1953 /* Indicate that TP should be resumed according to FLAG. */
1954
1955 static void
1956 record_btrace_resume_thread (struct thread_info *tp,
1957 enum btrace_thread_flag flag)
1958 {
1959 struct btrace_thread_info *btinfo;
1960
1961 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1962 target_pid_to_str (tp->ptid).c_str (), flag,
1963 btrace_thread_flag_to_str (flag));
1964
1965 btinfo = &tp->btrace;
1966
1967 /* Fetch the latest branch trace. */
1968 btrace_fetch (tp, record_btrace_get_cpu ());
1969
1970 /* A resume request overwrites a preceding resume or stop request. */
1971 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1972 btinfo->flags |= flag;
1973 }
1974
1975 /* Get the current frame for TP. */
1976
1977 static struct frame_id
1978 get_thread_current_frame_id (struct thread_info *tp)
1979 {
1980 struct frame_id id;
1981 bool executing;
1982
1983 /* Set current thread, which is implicitly used by
1984 get_current_frame. */
1985 scoped_restore_current_thread restore_thread;
1986
1987 switch_to_thread (tp);
1988
1989 process_stratum_target *proc_target = tp->inf->process_target ();
1990
1991 /* Clear the executing flag to allow changes to the current frame.
1992 We are not actually running, yet. We just started a reverse execution
1993 command or a record goto command.
1994 For the latter, EXECUTING is false and this has no effect.
1995 For the former, EXECUTING is true and we're in wait, about to
1996 move the thread. Since we need to recompute the stack, we temporarily
1997 set EXECUTING to false. */
1998 executing = tp->executing;
1999 set_executing (proc_target, inferior_ptid, false);
2000
2001 id = null_frame_id;
2002 try
2003 {
2004 id = get_frame_id (get_current_frame ());
2005 }
2006 catch (const gdb_exception &except)
2007 {
2008 /* Restore the previous execution state. */
2009 set_executing (proc_target, inferior_ptid, executing);
2010
2011 throw;
2012 }
2013
2014 /* Restore the previous execution state. */
2015 set_executing (proc_target, inferior_ptid, executing);
2016
2017 return id;
2018 }
2019
2020 /* Start replaying a thread. */
2021
2022 static struct btrace_insn_iterator *
2023 record_btrace_start_replaying (struct thread_info *tp)
2024 {
2025 struct btrace_insn_iterator *replay;
2026 struct btrace_thread_info *btinfo;
2027
2028 btinfo = &tp->btrace;
2029 replay = NULL;
2030
2031 /* We can't start replaying without trace. */
2032 if (btinfo->functions.empty ())
2033 return NULL;
2034
2035 /* GDB stores the current frame_id when stepping in order to detects steps
2036 into subroutines.
2037 Since frames are computed differently when we're replaying, we need to
2038 recompute those stored frames and fix them up so we can still detect
2039 subroutines after we started replaying. */
2040 try
2041 {
2042 struct frame_id frame_id;
2043 int upd_step_frame_id, upd_step_stack_frame_id;
2044
2045 /* The current frame without replaying - computed via normal unwind. */
2046 frame_id = get_thread_current_frame_id (tp);
2047
2048 /* Check if we need to update any stepping-related frame id's. */
2049 upd_step_frame_id = frame_id_eq (frame_id,
2050 tp->control.step_frame_id);
2051 upd_step_stack_frame_id = frame_id_eq (frame_id,
2052 tp->control.step_stack_frame_id);
2053
2054 /* We start replaying at the end of the branch trace. This corresponds
2055 to the current instruction. */
2056 replay = XNEW (struct btrace_insn_iterator);
2057 btrace_insn_end (replay, btinfo);
2058
2059 /* Skip gaps at the end of the trace. */
2060 while (btrace_insn_get (replay) == NULL)
2061 {
2062 unsigned int steps;
2063
2064 steps = btrace_insn_prev (replay, 1);
2065 if (steps == 0)
2066 error (_("No trace."));
2067 }
2068
2069 /* We're not replaying, yet. */
2070 gdb_assert (btinfo->replay == NULL);
2071 btinfo->replay = replay;
2072
2073 /* Make sure we're not using any stale registers. */
2074 registers_changed_thread (tp);
2075
2076 /* The current frame with replaying - computed via btrace unwind. */
2077 frame_id = get_thread_current_frame_id (tp);
2078
2079 /* Replace stepping related frames where necessary. */
2080 if (upd_step_frame_id)
2081 tp->control.step_frame_id = frame_id;
2082 if (upd_step_stack_frame_id)
2083 tp->control.step_stack_frame_id = frame_id;
2084 }
2085 catch (const gdb_exception &except)
2086 {
2087 xfree (btinfo->replay);
2088 btinfo->replay = NULL;
2089
2090 registers_changed_thread (tp);
2091
2092 throw;
2093 }
2094
2095 return replay;
2096 }
2097
2098 /* Stop replaying a thread. */
2099
2100 static void
2101 record_btrace_stop_replaying (struct thread_info *tp)
2102 {
2103 struct btrace_thread_info *btinfo;
2104
2105 btinfo = &tp->btrace;
2106
2107 xfree (btinfo->replay);
2108 btinfo->replay = NULL;
2109
2110 /* Make sure we're not leaving any stale registers. */
2111 registers_changed_thread (tp);
2112 }
2113
2114 /* Stop replaying TP if it is at the end of its execution history. */
2115
2116 static void
2117 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2118 {
2119 struct btrace_insn_iterator *replay, end;
2120 struct btrace_thread_info *btinfo;
2121
2122 btinfo = &tp->btrace;
2123 replay = btinfo->replay;
2124
2125 if (replay == NULL)
2126 return;
2127
2128 btrace_insn_end (&end, btinfo);
2129
2130 if (btrace_insn_cmp (replay, &end) == 0)
2131 record_btrace_stop_replaying (tp);
2132 }
2133
2134 /* The resume method of target record-btrace. */
2135
2136 void
2137 record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
2138 {
2139 enum btrace_thread_flag flag, cflag;
2140
2141 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid).c_str (),
2142 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
2143 step ? "step" : "cont");
2144
2145 /* Store the execution direction of the last resume.
2146
2147 If there is more than one resume call, we have to rely on infrun
2148 to not change the execution direction in-between. */
2149 record_btrace_resume_exec_dir = ::execution_direction;
2150
2151 /* As long as we're not replaying, just forward the request.
2152
2153 For non-stop targets this means that no thread is replaying. In order to
2154 make progress, we may need to explicitly move replaying threads to the end
2155 of their execution history. */
2156 if ((::execution_direction != EXEC_REVERSE)
2157 && !record_is_replaying (minus_one_ptid))
2158 {
2159 this->beneath ()->resume (ptid, step, signal);
2160 return;
2161 }
2162
2163 /* Compute the btrace thread flag for the requested move. */
2164 if (::execution_direction == EXEC_REVERSE)
2165 {
2166 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2167 cflag = BTHR_RCONT;
2168 }
2169 else
2170 {
2171 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2172 cflag = BTHR_CONT;
2173 }
2174
2175 /* We just indicate the resume intent here. The actual stepping happens in
2176 record_btrace_wait below.
2177
2178 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2179
2180 process_stratum_target *proc_target = current_inferior ()->process_target ();
2181
2182 if (!target_is_non_stop_p ())
2183 {
2184 gdb_assert (inferior_ptid.matches (ptid));
2185
2186 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2187 {
2188 if (tp->ptid.matches (inferior_ptid))
2189 record_btrace_resume_thread (tp, flag);
2190 else
2191 record_btrace_resume_thread (tp, cflag);
2192 }
2193 }
2194 else
2195 {
2196 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2197 record_btrace_resume_thread (tp, flag);
2198 }
2199
2200 /* Async support. */
2201 if (target_can_async_p ())
2202 {
2203 target_async (1);
2204 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2205 }
2206 }
2207
2208 /* The commit_resume method of target record-btrace. */
2209
2210 void
2211 record_btrace_target::commit_resume ()
2212 {
2213 if ((::execution_direction != EXEC_REVERSE)
2214 && !record_is_replaying (minus_one_ptid))
2215 beneath ()->commit_resume ();
2216 }
2217
2218 /* Cancel resuming TP. */
2219
2220 static void
2221 record_btrace_cancel_resume (struct thread_info *tp)
2222 {
2223 enum btrace_thread_flag flags;
2224
2225 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2226 if (flags == 0)
2227 return;
2228
2229 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2230 print_thread_id (tp),
2231 target_pid_to_str (tp->ptid).c_str (), flags,
2232 btrace_thread_flag_to_str (flags));
2233
2234 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2235 record_btrace_stop_replaying_at_end (tp);
2236 }
2237
2238 /* Return a target_waitstatus indicating that we ran out of history. */
2239
2240 static struct target_waitstatus
2241 btrace_step_no_history (void)
2242 {
2243 struct target_waitstatus status;
2244
2245 status.kind = TARGET_WAITKIND_NO_HISTORY;
2246
2247 return status;
2248 }
2249
2250 /* Return a target_waitstatus indicating that a step finished. */
2251
2252 static struct target_waitstatus
2253 btrace_step_stopped (void)
2254 {
2255 struct target_waitstatus status;
2256
2257 status.kind = TARGET_WAITKIND_STOPPED;
2258 status.value.sig = GDB_SIGNAL_TRAP;
2259
2260 return status;
2261 }
2262
2263 /* Return a target_waitstatus indicating that a thread was stopped as
2264 requested. */
2265
2266 static struct target_waitstatus
2267 btrace_step_stopped_on_request (void)
2268 {
2269 struct target_waitstatus status;
2270
2271 status.kind = TARGET_WAITKIND_STOPPED;
2272 status.value.sig = GDB_SIGNAL_0;
2273
2274 return status;
2275 }
2276
2277 /* Return a target_waitstatus indicating a spurious stop. */
2278
2279 static struct target_waitstatus
2280 btrace_step_spurious (void)
2281 {
2282 struct target_waitstatus status;
2283
2284 status.kind = TARGET_WAITKIND_SPURIOUS;
2285
2286 return status;
2287 }
2288
2289 /* Return a target_waitstatus indicating that the thread was not resumed. */
2290
2291 static struct target_waitstatus
2292 btrace_step_no_resumed (void)
2293 {
2294 struct target_waitstatus status;
2295
2296 status.kind = TARGET_WAITKIND_NO_RESUMED;
2297
2298 return status;
2299 }
2300
2301 /* Return a target_waitstatus indicating that we should wait again. */
2302
2303 static struct target_waitstatus
2304 btrace_step_again (void)
2305 {
2306 struct target_waitstatus status;
2307
2308 status.kind = TARGET_WAITKIND_IGNORE;
2309
2310 return status;
2311 }
2312
2313 /* Clear the record histories. */
2314
2315 static void
2316 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2317 {
2318 xfree (btinfo->insn_history);
2319 xfree (btinfo->call_history);
2320
2321 btinfo->insn_history = NULL;
2322 btinfo->call_history = NULL;
2323 }
2324
2325 /* Check whether TP's current replay position is at a breakpoint. */
2326
2327 static int
2328 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2329 {
2330 struct btrace_insn_iterator *replay;
2331 struct btrace_thread_info *btinfo;
2332 const struct btrace_insn *insn;
2333
2334 btinfo = &tp->btrace;
2335 replay = btinfo->replay;
2336
2337 if (replay == NULL)
2338 return 0;
2339
2340 insn = btrace_insn_get (replay);
2341 if (insn == NULL)
2342 return 0;
2343
2344 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
2345 &btinfo->stop_reason);
2346 }
2347
2348 /* Step one instruction in forward direction. */
2349
2350 static struct target_waitstatus
2351 record_btrace_single_step_forward (struct thread_info *tp)
2352 {
2353 struct btrace_insn_iterator *replay, end, start;
2354 struct btrace_thread_info *btinfo;
2355
2356 btinfo = &tp->btrace;
2357 replay = btinfo->replay;
2358
2359 /* We're done if we're not replaying. */
2360 if (replay == NULL)
2361 return btrace_step_no_history ();
2362
2363 /* Check if we're stepping a breakpoint. */
2364 if (record_btrace_replay_at_breakpoint (tp))
2365 return btrace_step_stopped ();
2366
2367 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2368 jump back to the instruction at which we started. */
2369 start = *replay;
2370 do
2371 {
2372 unsigned int steps;
2373
2374 /* We will bail out here if we continue stepping after reaching the end
2375 of the execution history. */
2376 steps = btrace_insn_next (replay, 1);
2377 if (steps == 0)
2378 {
2379 *replay = start;
2380 return btrace_step_no_history ();
2381 }
2382 }
2383 while (btrace_insn_get (replay) == NULL);
2384
2385 /* Determine the end of the instruction trace. */
2386 btrace_insn_end (&end, btinfo);
2387
2388 /* The execution trace contains (and ends with) the current instruction.
2389 This instruction has not been executed, yet, so the trace really ends
2390 one instruction earlier. */
2391 if (btrace_insn_cmp (replay, &end) == 0)
2392 return btrace_step_no_history ();
2393
2394 return btrace_step_spurious ();
2395 }
2396
2397 /* Step one instruction in backward direction. */
2398
2399 static struct target_waitstatus
2400 record_btrace_single_step_backward (struct thread_info *tp)
2401 {
2402 struct btrace_insn_iterator *replay, start;
2403 struct btrace_thread_info *btinfo;
2404
2405 btinfo = &tp->btrace;
2406 replay = btinfo->replay;
2407
2408 /* Start replaying if we're not already doing so. */
2409 if (replay == NULL)
2410 replay = record_btrace_start_replaying (tp);
2411
2412 /* If we can't step any further, we reached the end of the history.
2413 Skip gaps during replay. If we end up at a gap (at the beginning of
2414 the trace), jump back to the instruction at which we started. */
2415 start = *replay;
2416 do
2417 {
2418 unsigned int steps;
2419
2420 steps = btrace_insn_prev (replay, 1);
2421 if (steps == 0)
2422 {
2423 *replay = start;
2424 return btrace_step_no_history ();
2425 }
2426 }
2427 while (btrace_insn_get (replay) == NULL);
2428
2429 /* Check if we're stepping a breakpoint.
2430
2431 For reverse-stepping, this check is after the step. There is logic in
2432 infrun.c that handles reverse-stepping separately. See, for example,
2433 proceed and adjust_pc_after_break.
2434
2435 This code assumes that for reverse-stepping, PC points to the last
2436 de-executed instruction, whereas for forward-stepping PC points to the
2437 next to-be-executed instruction. */
2438 if (record_btrace_replay_at_breakpoint (tp))
2439 return btrace_step_stopped ();
2440
2441 return btrace_step_spurious ();
2442 }
2443
2444 /* Step a single thread. */
2445
2446 static struct target_waitstatus
2447 record_btrace_step_thread (struct thread_info *tp)
2448 {
2449 struct btrace_thread_info *btinfo;
2450 struct target_waitstatus status;
2451 enum btrace_thread_flag flags;
2452
2453 btinfo = &tp->btrace;
2454
2455 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2456 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2457
2458 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2459 target_pid_to_str (tp->ptid).c_str (), flags,
2460 btrace_thread_flag_to_str (flags));
2461
2462 /* We can't step without an execution history. */
2463 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2464 return btrace_step_no_history ();
2465
2466 switch (flags)
2467 {
2468 default:
2469 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2470
2471 case BTHR_STOP:
2472 return btrace_step_stopped_on_request ();
2473
2474 case BTHR_STEP:
2475 status = record_btrace_single_step_forward (tp);
2476 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2477 break;
2478
2479 return btrace_step_stopped ();
2480
2481 case BTHR_RSTEP:
2482 status = record_btrace_single_step_backward (tp);
2483 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2484 break;
2485
2486 return btrace_step_stopped ();
2487
2488 case BTHR_CONT:
2489 status = record_btrace_single_step_forward (tp);
2490 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2491 break;
2492
2493 btinfo->flags |= flags;
2494 return btrace_step_again ();
2495
2496 case BTHR_RCONT:
2497 status = record_btrace_single_step_backward (tp);
2498 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2499 break;
2500
2501 btinfo->flags |= flags;
2502 return btrace_step_again ();
2503 }
2504
2505 /* We keep threads moving at the end of their execution history. The wait
2506 method will stop the thread for whom the event is reported. */
2507 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2508 btinfo->flags |= flags;
2509
2510 return status;
2511 }
2512
2513 /* Announce further events if necessary. */
2514
2515 static void
2516 record_btrace_maybe_mark_async_event
2517 (const std::vector<thread_info *> &moving,
2518 const std::vector<thread_info *> &no_history)
2519 {
2520 bool more_moving = !moving.empty ();
2521 bool more_no_history = !no_history.empty ();;
2522
2523 if (!more_moving && !more_no_history)
2524 return;
2525
2526 if (more_moving)
2527 DEBUG ("movers pending");
2528
2529 if (more_no_history)
2530 DEBUG ("no-history pending");
2531
2532 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2533 }
2534
2535 /* The wait method of target record-btrace. */
2536
2537 ptid_t
2538 record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2539 int options)
2540 {
2541 std::vector<thread_info *> moving;
2542 std::vector<thread_info *> no_history;
2543
2544 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid).c_str (), options);
2545
2546 /* As long as we're not replaying, just forward the request. */
2547 if ((::execution_direction != EXEC_REVERSE)
2548 && !record_is_replaying (minus_one_ptid))
2549 {
2550 return this->beneath ()->wait (ptid, status, options);
2551 }
2552
2553 /* Keep a work list of moving threads. */
2554 process_stratum_target *proc_target = current_inferior ()->process_target ();
2555 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2556 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2557 moving.push_back (tp);
2558
2559 if (moving.empty ())
2560 {
2561 *status = btrace_step_no_resumed ();
2562
2563 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid).c_str (),
2564 target_waitstatus_to_string (status).c_str ());
2565
2566 return null_ptid;
2567 }
2568
2569 /* Step moving threads one by one, one step each, until either one thread
2570 reports an event or we run out of threads to step.
2571
2572 When stepping more than one thread, chances are that some threads reach
2573 the end of their execution history earlier than others. If we reported
2574 this immediately, all-stop on top of non-stop would stop all threads and
2575 resume the same threads next time. And we would report the same thread
2576 having reached the end of its execution history again.
2577
2578 In the worst case, this would starve the other threads. But even if other
2579 threads would be allowed to make progress, this would result in far too
2580 many intermediate stops.
2581
2582 We therefore delay the reporting of "no execution history" until we have
2583 nothing else to report. By this time, all threads should have moved to
2584 either the beginning or the end of their execution history. There will
2585 be a single user-visible stop. */
2586 struct thread_info *eventing = NULL;
2587 while ((eventing == NULL) && !moving.empty ())
2588 {
2589 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
2590 {
2591 thread_info *tp = moving[ix];
2592
2593 *status = record_btrace_step_thread (tp);
2594
2595 switch (status->kind)
2596 {
2597 case TARGET_WAITKIND_IGNORE:
2598 ix++;
2599 break;
2600
2601 case TARGET_WAITKIND_NO_HISTORY:
2602 no_history.push_back (ordered_remove (moving, ix));
2603 break;
2604
2605 default:
2606 eventing = unordered_remove (moving, ix);
2607 break;
2608 }
2609 }
2610 }
2611
2612 if (eventing == NULL)
2613 {
2614 /* We started with at least one moving thread. This thread must have
2615 either stopped or reached the end of its execution history.
2616
2617 In the former case, EVENTING must not be NULL.
2618 In the latter case, NO_HISTORY must not be empty. */
2619 gdb_assert (!no_history.empty ());
2620
2621 /* We kept threads moving at the end of their execution history. Stop
2622 EVENTING now that we are going to report its stop. */
2623 eventing = unordered_remove (no_history, 0);
2624 eventing->btrace.flags &= ~BTHR_MOVE;
2625
2626 *status = btrace_step_no_history ();
2627 }
2628
2629 gdb_assert (eventing != NULL);
2630
2631 /* We kept threads replaying at the end of their execution history. Stop
2632 replaying EVENTING now that we are going to report its stop. */
2633 record_btrace_stop_replaying_at_end (eventing);
2634
2635 /* Stop all other threads. */
2636 if (!target_is_non_stop_p ())
2637 {
2638 for (thread_info *tp : current_inferior ()->non_exited_threads ())
2639 record_btrace_cancel_resume (tp);
2640 }
2641
2642 /* In async mode, we need to announce further events. */
2643 if (target_is_async_p ())
2644 record_btrace_maybe_mark_async_event (moving, no_history);
2645
2646 /* Start record histories anew from the current position. */
2647 record_btrace_clear_histories (&eventing->btrace);
2648
2649 /* We moved the replay position but did not update registers. */
2650 registers_changed_thread (eventing);
2651
2652 DEBUG ("wait ended by thread %s (%s): %s",
2653 print_thread_id (eventing),
2654 target_pid_to_str (eventing->ptid).c_str (),
2655 target_waitstatus_to_string (status).c_str ());
2656
2657 return eventing->ptid;
2658 }
2659
2660 /* The stop method of target record-btrace. */
2661
2662 void
2663 record_btrace_target::stop (ptid_t ptid)
2664 {
2665 DEBUG ("stop %s", target_pid_to_str (ptid).c_str ());
2666
2667 /* As long as we're not replaying, just forward the request. */
2668 if ((::execution_direction != EXEC_REVERSE)
2669 && !record_is_replaying (minus_one_ptid))
2670 {
2671 this->beneath ()->stop (ptid);
2672 }
2673 else
2674 {
2675 process_stratum_target *proc_target
2676 = current_inferior ()->process_target ();
2677
2678 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2679 {
2680 tp->btrace.flags &= ~BTHR_MOVE;
2681 tp->btrace.flags |= BTHR_STOP;
2682 }
2683 }
2684 }
2685
2686 /* The can_execute_reverse method of target record-btrace. */
2687
2688 bool
2689 record_btrace_target::can_execute_reverse ()
2690 {
2691 return true;
2692 }
2693
2694 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2695
2696 bool
2697 record_btrace_target::stopped_by_sw_breakpoint ()
2698 {
2699 if (record_is_replaying (minus_one_ptid))
2700 {
2701 struct thread_info *tp = inferior_thread ();
2702
2703 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2704 }
2705
2706 return this->beneath ()->stopped_by_sw_breakpoint ();
2707 }
2708
2709 /* The supports_stopped_by_sw_breakpoint method of target
2710 record-btrace. */
2711
2712 bool
2713 record_btrace_target::supports_stopped_by_sw_breakpoint ()
2714 {
2715 if (record_is_replaying (minus_one_ptid))
2716 return true;
2717
2718 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
2719 }
2720
2721 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2722
2723 bool
2724 record_btrace_target::stopped_by_hw_breakpoint ()
2725 {
2726 if (record_is_replaying (minus_one_ptid))
2727 {
2728 struct thread_info *tp = inferior_thread ();
2729
2730 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2731 }
2732
2733 return this->beneath ()->stopped_by_hw_breakpoint ();
2734 }
2735
2736 /* The supports_stopped_by_hw_breakpoint method of target
2737 record-btrace. */
2738
2739 bool
2740 record_btrace_target::supports_stopped_by_hw_breakpoint ()
2741 {
2742 if (record_is_replaying (minus_one_ptid))
2743 return true;
2744
2745 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
2746 }
2747
2748 /* The update_thread_list method of target record-btrace. */
2749
2750 void
2751 record_btrace_target::update_thread_list ()
2752 {
2753 /* We don't add or remove threads during replay. */
2754 if (record_is_replaying (minus_one_ptid))
2755 return;
2756
2757 /* Forward the request. */
2758 this->beneath ()->update_thread_list ();
2759 }
2760
2761 /* The thread_alive method of target record-btrace. */
2762
2763 bool
2764 record_btrace_target::thread_alive (ptid_t ptid)
2765 {
2766 /* We don't add or remove threads during replay. */
2767 if (record_is_replaying (minus_one_ptid))
2768 return true;
2769
2770 /* Forward the request. */
2771 return this->beneath ()->thread_alive (ptid);
2772 }
2773
2774 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2775 is stopped. */
2776
2777 static void
2778 record_btrace_set_replay (struct thread_info *tp,
2779 const struct btrace_insn_iterator *it)
2780 {
2781 struct btrace_thread_info *btinfo;
2782
2783 btinfo = &tp->btrace;
2784
2785 if (it == NULL)
2786 record_btrace_stop_replaying (tp);
2787 else
2788 {
2789 if (btinfo->replay == NULL)
2790 record_btrace_start_replaying (tp);
2791 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2792 return;
2793
2794 *btinfo->replay = *it;
2795 registers_changed_thread (tp);
2796 }
2797
2798 /* Start anew from the new replay position. */
2799 record_btrace_clear_histories (btinfo);
2800
2801 inferior_thread ()->suspend.stop_pc
2802 = regcache_read_pc (get_current_regcache ());
2803 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2804 }
2805
2806 /* The goto_record_begin method of target record-btrace. */
2807
2808 void
2809 record_btrace_target::goto_record_begin ()
2810 {
2811 struct thread_info *tp;
2812 struct btrace_insn_iterator begin;
2813
2814 tp = require_btrace_thread ();
2815
2816 btrace_insn_begin (&begin, &tp->btrace);
2817
2818 /* Skip gaps at the beginning of the trace. */
2819 while (btrace_insn_get (&begin) == NULL)
2820 {
2821 unsigned int steps;
2822
2823 steps = btrace_insn_next (&begin, 1);
2824 if (steps == 0)
2825 error (_("No trace."));
2826 }
2827
2828 record_btrace_set_replay (tp, &begin);
2829 }
2830
2831 /* The goto_record_end method of target record-btrace. */
2832
2833 void
2834 record_btrace_target::goto_record_end ()
2835 {
2836 struct thread_info *tp;
2837
2838 tp = require_btrace_thread ();
2839
2840 record_btrace_set_replay (tp, NULL);
2841 }
2842
2843 /* The goto_record method of target record-btrace. */
2844
2845 void
2846 record_btrace_target::goto_record (ULONGEST insn)
2847 {
2848 struct thread_info *tp;
2849 struct btrace_insn_iterator it;
2850 unsigned int number;
2851 int found;
2852
2853 number = insn;
2854
2855 /* Check for wrap-arounds. */
2856 if (number != insn)
2857 error (_("Instruction number out of range."));
2858
2859 tp = require_btrace_thread ();
2860
2861 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2862
2863 /* Check if the instruction could not be found or is a gap. */
2864 if (found == 0 || btrace_insn_get (&it) == NULL)
2865 error (_("No such instruction."));
2866
2867 record_btrace_set_replay (tp, &it);
2868 }
2869
2870 /* The record_stop_replaying method of target record-btrace. */
2871
2872 void
2873 record_btrace_target::record_stop_replaying ()
2874 {
2875 for (thread_info *tp : current_inferior ()->non_exited_threads ())
2876 record_btrace_stop_replaying (tp);
2877 }
2878
2879 /* The execution_direction target method. */
2880
2881 enum exec_direction_kind
2882 record_btrace_target::execution_direction ()
2883 {
2884 return record_btrace_resume_exec_dir;
2885 }
2886
2887 /* The prepare_to_generate_core target method. */
2888
2889 void
2890 record_btrace_target::prepare_to_generate_core ()
2891 {
2892 record_btrace_generating_corefile = 1;
2893 }
2894
2895 /* The done_generating_core target method. */
2896
2897 void
2898 record_btrace_target::done_generating_core ()
2899 {
2900 record_btrace_generating_corefile = 0;
2901 }
2902
2903 /* Start recording in BTS format. */
2904
2905 static void
2906 cmd_record_btrace_bts_start (const char *args, int from_tty)
2907 {
2908 if (args != NULL && *args != 0)
2909 error (_("Invalid argument."));
2910
2911 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2912
2913 try
2914 {
2915 execute_command ("target record-btrace", from_tty);
2916 }
2917 catch (const gdb_exception &exception)
2918 {
2919 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2920 throw;
2921 }
2922 }
2923
2924 /* Start recording in Intel Processor Trace format. */
2925
2926 static void
2927 cmd_record_btrace_pt_start (const char *args, int from_tty)
2928 {
2929 if (args != NULL && *args != 0)
2930 error (_("Invalid argument."));
2931
2932 record_btrace_conf.format = BTRACE_FORMAT_PT;
2933
2934 try
2935 {
2936 execute_command ("target record-btrace", from_tty);
2937 }
2938 catch (const gdb_exception &exception)
2939 {
2940 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2941 throw;
2942 }
2943 }
2944
2945 /* Alias for "target record". */
2946
2947 static void
2948 cmd_record_btrace_start (const char *args, int from_tty)
2949 {
2950 if (args != NULL && *args != 0)
2951 error (_("Invalid argument."));
2952
2953 record_btrace_conf.format = BTRACE_FORMAT_PT;
2954
2955 try
2956 {
2957 execute_command ("target record-btrace", from_tty);
2958 }
2959 catch (const gdb_exception &exception)
2960 {
2961 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2962
2963 try
2964 {
2965 execute_command ("target record-btrace", from_tty);
2966 }
2967 catch (const gdb_exception &ex)
2968 {
2969 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2970 throw;
2971 }
2972 }
2973 }
2974
2975 /* The "show record btrace replay-memory-access" command. */
2976
2977 static void
2978 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2979 struct cmd_list_element *c, const char *value)
2980 {
2981 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2982 replay_memory_access);
2983 }
2984
2985 /* The "set record btrace cpu none" command. */
2986
2987 static void
2988 cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2989 {
2990 if (args != nullptr && *args != 0)
2991 error (_("Trailing junk: '%s'."), args);
2992
2993 record_btrace_cpu_state = CS_NONE;
2994 }
2995
2996 /* The "set record btrace cpu auto" command. */
2997
2998 static void
2999 cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
3000 {
3001 if (args != nullptr && *args != 0)
3002 error (_("Trailing junk: '%s'."), args);
3003
3004 record_btrace_cpu_state = CS_AUTO;
3005 }
3006
3007 /* The "set record btrace cpu" command. */
3008
3009 static void
3010 cmd_set_record_btrace_cpu (const char *args, int from_tty)
3011 {
3012 if (args == nullptr)
3013 args = "";
3014
3015 /* We use a hard-coded vendor string for now. */
3016 unsigned int family, model, stepping;
3017 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3018 &model, &l1, &stepping, &l2);
3019 if (matches == 3)
3020 {
3021 if (strlen (args) != l2)
3022 error (_("Trailing junk: '%s'."), args + l2);
3023 }
3024 else if (matches == 2)
3025 {
3026 if (strlen (args) != l1)
3027 error (_("Trailing junk: '%s'."), args + l1);
3028
3029 stepping = 0;
3030 }
3031 else
3032 error (_("Bad format. See \"help set record btrace cpu\"."));
3033
3034 if (USHRT_MAX < family)
3035 error (_("Cpu family too big."));
3036
3037 if (UCHAR_MAX < model)
3038 error (_("Cpu model too big."));
3039
3040 if (UCHAR_MAX < stepping)
3041 error (_("Cpu stepping too big."));
3042
3043 record_btrace_cpu.vendor = CV_INTEL;
3044 record_btrace_cpu.family = family;
3045 record_btrace_cpu.model = model;
3046 record_btrace_cpu.stepping = stepping;
3047
3048 record_btrace_cpu_state = CS_CPU;
3049 }
3050
3051 /* The "show record btrace cpu" command. */
3052
3053 static void
3054 cmd_show_record_btrace_cpu (const char *args, int from_tty)
3055 {
3056 if (args != nullptr && *args != 0)
3057 error (_("Trailing junk: '%s'."), args);
3058
3059 switch (record_btrace_cpu_state)
3060 {
3061 case CS_AUTO:
3062 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3063 return;
3064
3065 case CS_NONE:
3066 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3067 return;
3068
3069 case CS_CPU:
3070 switch (record_btrace_cpu.vendor)
3071 {
3072 case CV_INTEL:
3073 if (record_btrace_cpu.stepping == 0)
3074 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3075 record_btrace_cpu.family,
3076 record_btrace_cpu.model);
3077 else
3078 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3079 record_btrace_cpu.family,
3080 record_btrace_cpu.model,
3081 record_btrace_cpu.stepping);
3082 return;
3083 }
3084 }
3085
3086 error (_("Internal error: bad cpu state."));
3087 }
3088
3089 /* The "record bts buffer-size" show value function. */
3090
3091 static void
3092 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3093 struct cmd_list_element *c,
3094 const char *value)
3095 {
3096 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3097 value);
3098 }
3099
3100 /* The "record pt buffer-size" show value function. */
3101
3102 static void
3103 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3104 struct cmd_list_element *c,
3105 const char *value)
3106 {
3107 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3108 value);
3109 }
3110
3111 /* Initialize btrace commands. */
3112
3113 void _initialize_record_btrace ();
3114 void
3115 _initialize_record_btrace ()
3116 {
3117 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3118 _("Start branch trace recording."), &record_btrace_cmdlist,
3119 "record btrace ", 0, &record_cmdlist);
3120 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3121
3122 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3123 _("\
3124 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3125 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3126 This format may not be available on all processors."),
3127 &record_btrace_cmdlist);
3128 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3129
3130 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3131 _("\
3132 Start branch trace recording in Intel Processor Trace format.\n\n\
3133 This format may not be available on all processors."),
3134 &record_btrace_cmdlist);
3135 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3136
3137 add_basic_prefix_cmd ("btrace", class_support,
3138 _("Set record options."), &set_record_btrace_cmdlist,
3139 "set record btrace ", 0, &set_record_cmdlist);
3140
3141 add_show_prefix_cmd ("btrace", class_support,
3142 _("Show record options."), &show_record_btrace_cmdlist,
3143 "show record btrace ", 0, &show_record_cmdlist);
3144
3145 add_setshow_enum_cmd ("replay-memory-access", no_class,
3146 replay_memory_access_types, &replay_memory_access, _("\
3147 Set what memory accesses are allowed during replay."), _("\
3148 Show what memory accesses are allowed during replay."),
3149 _("Default is READ-ONLY.\n\n\
3150 The btrace record target does not trace data.\n\
3151 The memory therefore corresponds to the live target and not \
3152 to the current replay position.\n\n\
3153 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3154 When READ-WRITE, allow accesses to read-only and read-write memory during \
3155 replay."),
3156 NULL, cmd_show_replay_memory_access,
3157 &set_record_btrace_cmdlist,
3158 &show_record_btrace_cmdlist);
3159
3160 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3161 _("\
3162 Set the cpu to be used for trace decode.\n\n\
3163 The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3164 For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
3165 When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3166 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3167 When GDB does not support that cpu, this option can be used to enable\n\
3168 workarounds for a similar cpu that GDB supports.\n\n\
3169 When set to \"none\", errata workarounds are disabled."),
3170 &set_record_btrace_cpu_cmdlist,
3171 "set record btrace cpu ", 1,
3172 &set_record_btrace_cmdlist);
3173
3174 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3175 Automatically determine the cpu to be used for trace decode."),
3176 &set_record_btrace_cpu_cmdlist);
3177
3178 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3179 Do not enable errata workarounds for trace decode."),
3180 &set_record_btrace_cpu_cmdlist);
3181
3182 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3183 Show the cpu to be used for trace decode."),
3184 &show_record_btrace_cmdlist);
3185
3186 add_basic_prefix_cmd ("bts", class_support,
3187 _("Set record btrace bts options."),
3188 &set_record_btrace_bts_cmdlist,
3189 "set record btrace bts ", 0,
3190 &set_record_btrace_cmdlist);
3191
3192 add_show_prefix_cmd ("bts", class_support,
3193 _("Show record btrace bts options."),
3194 &show_record_btrace_bts_cmdlist,
3195 "show record btrace bts ", 0,
3196 &show_record_btrace_cmdlist);
3197
3198 add_setshow_uinteger_cmd ("buffer-size", no_class,
3199 &record_btrace_conf.bts.size,
3200 _("Set the record/replay bts buffer size."),
3201 _("Show the record/replay bts buffer size."), _("\
3202 When starting recording request a trace buffer of this size. \
3203 The actual buffer size may differ from the requested size. \
3204 Use \"info record\" to see the actual buffer size.\n\n\
3205 Bigger buffers allow longer recording but also take more time to process \
3206 the recorded execution trace.\n\n\
3207 The trace buffer size may not be changed while recording."), NULL,
3208 show_record_bts_buffer_size_value,
3209 &set_record_btrace_bts_cmdlist,
3210 &show_record_btrace_bts_cmdlist);
3211
3212 add_basic_prefix_cmd ("pt", class_support,
3213 _("Set record btrace pt options."),
3214 &set_record_btrace_pt_cmdlist,
3215 "set record btrace pt ", 0,
3216 &set_record_btrace_cmdlist);
3217
3218 add_show_prefix_cmd ("pt", class_support,
3219 _("Show record btrace pt options."),
3220 &show_record_btrace_pt_cmdlist,
3221 "show record btrace pt ", 0,
3222 &show_record_btrace_cmdlist);
3223
3224 add_setshow_uinteger_cmd ("buffer-size", no_class,
3225 &record_btrace_conf.pt.size,
3226 _("Set the record/replay pt buffer size."),
3227 _("Show the record/replay pt buffer size."), _("\
3228 Bigger buffers allow longer recording but also take more time to process \
3229 the recorded execution.\n\
3230 The actual buffer size may differ from the requested size. Use \"info record\" \
3231 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3232 &set_record_btrace_pt_cmdlist,
3233 &show_record_btrace_pt_cmdlist);
3234
3235 add_target (record_btrace_target_info, record_btrace_target_open);
3236
3237 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3238 xcalloc, xfree);
3239
3240 record_btrace_conf.bts.size = 64 * 1024;
3241 record_btrace_conf.pt.size = 16 * 1024;
3242 }
This page took 0.099468 seconds and 4 git commands to generate.