Simple -Wshadow=local fixes
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "event-loop.h"
40 #include "inf-loop.h"
41 #include "vec.h"
42 #include "inferior.h"
43 #include <algorithm>
44
45 static const target_info record_btrace_target_info = {
46 "record-btrace",
47 N_("Branch tracing target"),
48 N_("Collect control-flow trace and provide the execution history.")
49 };
50
51 /* The target_ops of record-btrace. */
52
53 class record_btrace_target final : public target_ops
54 {
55 public:
56 record_btrace_target ()
57 { to_stratum = record_stratum; }
58
59 const target_info &info () const override
60 { return record_btrace_target_info; }
61
62 void close () override;
63 void async (int) override;
64
65 void detach (inferior *inf, int from_tty) override
66 { record_detach (this, inf, from_tty); }
67
68 void disconnect (const char *, int) override;
69
70 void mourn_inferior () override
71 { record_mourn_inferior (this); }
72
73 void kill () override
74 { record_kill (this); }
75
76 enum record_method record_method (ptid_t ptid) override;
77
78 void stop_recording () override;
79 void info_record () override;
80
81 void insn_history (int size, gdb_disassembly_flags flags) override;
82 void insn_history_from (ULONGEST from, int size,
83 gdb_disassembly_flags flags) override;
84 void insn_history_range (ULONGEST begin, ULONGEST end,
85 gdb_disassembly_flags flags) override;
86 void call_history (int size, record_print_flags flags) override;
87 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
88 override;
89 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
90 override;
91
92 bool record_is_replaying (ptid_t ptid) override;
93 bool record_will_replay (ptid_t ptid, int dir) override;
94 void record_stop_replaying () override;
95
96 enum target_xfer_status xfer_partial (enum target_object object,
97 const char *annex,
98 gdb_byte *readbuf,
99 const gdb_byte *writebuf,
100 ULONGEST offset, ULONGEST len,
101 ULONGEST *xfered_len) override;
102
103 int insert_breakpoint (struct gdbarch *,
104 struct bp_target_info *) override;
105 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
106 enum remove_bp_reason) override;
107
108 void fetch_registers (struct regcache *, int) override;
109
110 void store_registers (struct regcache *, int) override;
111 void prepare_to_store (struct regcache *) override;
112
113 const struct frame_unwind *get_unwinder () override;
114
115 const struct frame_unwind *get_tailcall_unwinder () override;
116
117 void commit_resume () override;
118 void resume (ptid_t, int, enum gdb_signal) override;
119 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
120
121 void stop (ptid_t) override;
122 void update_thread_list () override;
123 bool thread_alive (ptid_t ptid) override;
124 void goto_record_begin () override;
125 void goto_record_end () override;
126 void goto_record (ULONGEST insn) override;
127
128 bool can_execute_reverse () override;
129
130 bool stopped_by_sw_breakpoint () override;
131 bool supports_stopped_by_sw_breakpoint () override;
132
133 bool stopped_by_hw_breakpoint () override;
134 bool supports_stopped_by_hw_breakpoint () override;
135
136 enum exec_direction_kind execution_direction () override;
137 void prepare_to_generate_core () override;
138 void done_generating_core () override;
139 };
140
141 static record_btrace_target record_btrace_ops;
142
143 /* Initialize the record-btrace target ops. */
144
145 /* Token associated with a new-thread observer enabling branch tracing
146 for the new thread. */
147 static const gdb::observers::token record_btrace_thread_observer_token;
148
149 /* Memory access types used in set/show record btrace replay-memory-access. */
150 static const char replay_memory_access_read_only[] = "read-only";
151 static const char replay_memory_access_read_write[] = "read-write";
152 static const char *const replay_memory_access_types[] =
153 {
154 replay_memory_access_read_only,
155 replay_memory_access_read_write,
156 NULL
157 };
158
159 /* The currently allowed replay memory access type. */
160 static const char *replay_memory_access = replay_memory_access_read_only;
161
162 /* The cpu state kinds. */
163 enum record_btrace_cpu_state_kind
164 {
165 CS_AUTO,
166 CS_NONE,
167 CS_CPU
168 };
169
170 /* The current cpu state. */
171 static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
172
173 /* The current cpu for trace decode. */
174 static struct btrace_cpu record_btrace_cpu;
175
176 /* Command lists for "set/show record btrace". */
177 static struct cmd_list_element *set_record_btrace_cmdlist;
178 static struct cmd_list_element *show_record_btrace_cmdlist;
179
180 /* The execution direction of the last resume we got. See record-full.c. */
181 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
182
183 /* The async event handler for reverse/replay execution. */
184 static struct async_event_handler *record_btrace_async_inferior_event_handler;
185
186 /* A flag indicating that we are currently generating a core file. */
187 static int record_btrace_generating_corefile;
188
189 /* The current branch trace configuration. */
190 static struct btrace_config record_btrace_conf;
191
192 /* Command list for "record btrace". */
193 static struct cmd_list_element *record_btrace_cmdlist;
194
195 /* Command lists for "set/show record btrace bts". */
196 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
197 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
198
199 /* Command lists for "set/show record btrace pt". */
200 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
201 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
202
203 /* Command list for "set record btrace cpu". */
204 static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
205
206 /* Print a record-btrace debug message. Use do ... while (0) to avoid
207 ambiguities when used in if statements. */
208
209 #define DEBUG(msg, args...) \
210 do \
211 { \
212 if (record_debug != 0) \
213 fprintf_unfiltered (gdb_stdlog, \
214 "[record-btrace] " msg "\n", ##args); \
215 } \
216 while (0)
217
218
219 /* Return the cpu configured by the user. Returns NULL if the cpu was
220 configured as auto. */
221 const struct btrace_cpu *
222 record_btrace_get_cpu (void)
223 {
224 switch (record_btrace_cpu_state)
225 {
226 case CS_AUTO:
227 return nullptr;
228
229 case CS_NONE:
230 record_btrace_cpu.vendor = CV_UNKNOWN;
231 /* Fall through. */
232 case CS_CPU:
233 return &record_btrace_cpu;
234 }
235
236 error (_("Internal error: bad record btrace cpu state."));
237 }
238
239 /* Update the branch trace for the current thread and return a pointer to its
240 thread_info.
241
242 Throws an error if there is no thread or no trace. This function never
243 returns NULL. */
244
245 static struct thread_info *
246 require_btrace_thread (void)
247 {
248 DEBUG ("require");
249
250 if (inferior_ptid == null_ptid)
251 error (_("No thread."));
252
253 thread_info *tp = inferior_thread ();
254
255 validate_registers_access ();
256
257 btrace_fetch (tp, record_btrace_get_cpu ());
258
259 if (btrace_is_empty (tp))
260 error (_("No trace."));
261
262 return tp;
263 }
264
265 /* Update the branch trace for the current thread and return a pointer to its
266 branch trace information struct.
267
268 Throws an error if there is no thread or no trace. This function never
269 returns NULL. */
270
271 static struct btrace_thread_info *
272 require_btrace (void)
273 {
274 struct thread_info *tp;
275
276 tp = require_btrace_thread ();
277
278 return &tp->btrace;
279 }
280
281 /* Enable branch tracing for one thread. Warn on errors. */
282
283 static void
284 record_btrace_enable_warn (struct thread_info *tp)
285 {
286 TRY
287 {
288 btrace_enable (tp, &record_btrace_conf);
289 }
290 CATCH (error, RETURN_MASK_ERROR)
291 {
292 warning ("%s", error.message);
293 }
294 END_CATCH
295 }
296
297 /* Enable automatic tracing of new threads. */
298
299 static void
300 record_btrace_auto_enable (void)
301 {
302 DEBUG ("attach thread observer");
303
304 gdb::observers::new_thread.attach (record_btrace_enable_warn,
305 record_btrace_thread_observer_token);
306 }
307
308 /* Disable automatic tracing of new threads. */
309
310 static void
311 record_btrace_auto_disable (void)
312 {
313 DEBUG ("detach thread observer");
314
315 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
316 }
317
318 /* The record-btrace async event handler function. */
319
320 static void
321 record_btrace_handle_async_inferior_event (gdb_client_data data)
322 {
323 inferior_event_handler (INF_REG_EVENT, NULL);
324 }
325
326 /* See record-btrace.h. */
327
328 void
329 record_btrace_push_target (void)
330 {
331 const char *format;
332
333 record_btrace_auto_enable ();
334
335 push_target (&record_btrace_ops);
336
337 record_btrace_async_inferior_event_handler
338 = create_async_event_handler (record_btrace_handle_async_inferior_event,
339 NULL);
340 record_btrace_generating_corefile = 0;
341
342 format = btrace_format_short_string (record_btrace_conf.format);
343 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
344 }
345
346 /* Disable btrace on a set of threads on scope exit. */
347
348 struct scoped_btrace_disable
349 {
350 scoped_btrace_disable () = default;
351
352 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
353
354 ~scoped_btrace_disable ()
355 {
356 for (thread_info *tp : m_threads)
357 btrace_disable (tp);
358 }
359
360 void add_thread (thread_info *thread)
361 {
362 m_threads.push_front (thread);
363 }
364
365 void discard ()
366 {
367 m_threads.clear ();
368 }
369
370 private:
371 std::forward_list<thread_info *> m_threads;
372 };
373
374 /* Open target record-btrace. */
375
376 static void
377 record_btrace_target_open (const char *args, int from_tty)
378 {
379 /* If we fail to enable btrace for one thread, disable it for the threads for
380 which it was successfully enabled. */
381 scoped_btrace_disable btrace_disable;
382 struct thread_info *tp;
383
384 DEBUG ("open");
385
386 record_preopen ();
387
388 if (!target_has_execution)
389 error (_("The program is not being run."));
390
391 ALL_NON_EXITED_THREADS (tp)
392 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
393 {
394 btrace_enable (tp, &record_btrace_conf);
395
396 btrace_disable.add_thread (tp);
397 }
398
399 record_btrace_push_target ();
400
401 btrace_disable.discard ();
402 }
403
404 /* The stop_recording method of target record-btrace. */
405
406 void
407 record_btrace_target::stop_recording ()
408 {
409 struct thread_info *tp;
410
411 DEBUG ("stop recording");
412
413 record_btrace_auto_disable ();
414
415 ALL_NON_EXITED_THREADS (tp)
416 if (tp->btrace.target != NULL)
417 btrace_disable (tp);
418 }
419
420 /* The disconnect method of target record-btrace. */
421
422 void
423 record_btrace_target::disconnect (const char *args,
424 int from_tty)
425 {
426 struct target_ops *beneath = this->beneath ();
427
428 /* Do not stop recording, just clean up GDB side. */
429 unpush_target (this);
430
431 /* Forward disconnect. */
432 beneath->disconnect (args, from_tty);
433 }
434
435 /* The close method of target record-btrace. */
436
437 void
438 record_btrace_target::close ()
439 {
440 struct thread_info *tp;
441
442 if (record_btrace_async_inferior_event_handler != NULL)
443 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
444
445 /* Make sure automatic recording gets disabled even if we did not stop
446 recording before closing the record-btrace target. */
447 record_btrace_auto_disable ();
448
449 /* We should have already stopped recording.
450 Tear down btrace in case we have not. */
451 ALL_NON_EXITED_THREADS (tp)
452 btrace_teardown (tp);
453 }
454
455 /* The async method of target record-btrace. */
456
457 void
458 record_btrace_target::async (int enable)
459 {
460 if (enable)
461 mark_async_event_handler (record_btrace_async_inferior_event_handler);
462 else
463 clear_async_event_handler (record_btrace_async_inferior_event_handler);
464
465 this->beneath ()->async (enable);
466 }
467
468 /* Adjusts the size and returns a human readable size suffix. */
469
470 static const char *
471 record_btrace_adjust_size (unsigned int *size)
472 {
473 unsigned int sz;
474
475 sz = *size;
476
477 if ((sz & ((1u << 30) - 1)) == 0)
478 {
479 *size = sz >> 30;
480 return "GB";
481 }
482 else if ((sz & ((1u << 20) - 1)) == 0)
483 {
484 *size = sz >> 20;
485 return "MB";
486 }
487 else if ((sz & ((1u << 10) - 1)) == 0)
488 {
489 *size = sz >> 10;
490 return "kB";
491 }
492 else
493 return "";
494 }
495
496 /* Print a BTS configuration. */
497
498 static void
499 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
500 {
501 const char *suffix;
502 unsigned int size;
503
504 size = conf->size;
505 if (size > 0)
506 {
507 suffix = record_btrace_adjust_size (&size);
508 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
509 }
510 }
511
512 /* Print an Intel Processor Trace configuration. */
513
514 static void
515 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
516 {
517 const char *suffix;
518 unsigned int size;
519
520 size = conf->size;
521 if (size > 0)
522 {
523 suffix = record_btrace_adjust_size (&size);
524 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
525 }
526 }
527
528 /* Print a branch tracing configuration. */
529
530 static void
531 record_btrace_print_conf (const struct btrace_config *conf)
532 {
533 printf_unfiltered (_("Recording format: %s.\n"),
534 btrace_format_string (conf->format));
535
536 switch (conf->format)
537 {
538 case BTRACE_FORMAT_NONE:
539 return;
540
541 case BTRACE_FORMAT_BTS:
542 record_btrace_print_bts_conf (&conf->bts);
543 return;
544
545 case BTRACE_FORMAT_PT:
546 record_btrace_print_pt_conf (&conf->pt);
547 return;
548 }
549
550 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
551 }
552
553 /* The info_record method of target record-btrace. */
554
555 void
556 record_btrace_target::info_record ()
557 {
558 struct btrace_thread_info *btinfo;
559 const struct btrace_config *conf;
560 struct thread_info *tp;
561 unsigned int insns, calls, gaps;
562
563 DEBUG ("info");
564
565 tp = find_thread_ptid (inferior_ptid);
566 if (tp == NULL)
567 error (_("No thread."));
568
569 validate_registers_access ();
570
571 btinfo = &tp->btrace;
572
573 conf = ::btrace_conf (btinfo);
574 if (conf != NULL)
575 record_btrace_print_conf (conf);
576
577 btrace_fetch (tp, record_btrace_get_cpu ());
578
579 insns = 0;
580 calls = 0;
581 gaps = 0;
582
583 if (!btrace_is_empty (tp))
584 {
585 struct btrace_call_iterator call;
586 struct btrace_insn_iterator insn;
587
588 btrace_call_end (&call, btinfo);
589 btrace_call_prev (&call, 1);
590 calls = btrace_call_number (&call);
591
592 btrace_insn_end (&insn, btinfo);
593 insns = btrace_insn_number (&insn);
594
595 /* If the last instruction is not a gap, it is the current instruction
596 that is not actually part of the record. */
597 if (btrace_insn_get (&insn) != NULL)
598 insns -= 1;
599
600 gaps = btinfo->ngaps;
601 }
602
603 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
604 "for thread %s (%s).\n"), insns, calls, gaps,
605 print_thread_id (tp), target_pid_to_str (tp->ptid));
606
607 if (btrace_is_replaying (tp))
608 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
609 btrace_insn_number (btinfo->replay));
610 }
611
612 /* Print a decode error. */
613
614 static void
615 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
616 enum btrace_format format)
617 {
618 const char *errstr = btrace_decode_error (format, errcode);
619
620 uiout->text (_("["));
621 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
622 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
623 {
624 uiout->text (_("decode error ("));
625 uiout->field_int ("errcode", errcode);
626 uiout->text (_("): "));
627 }
628 uiout->text (errstr);
629 uiout->text (_("]\n"));
630 }
631
632 /* Print an unsigned int. */
633
634 static void
635 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
636 {
637 uiout->field_fmt (fld, "%u", val);
638 }
639
640 /* A range of source lines. */
641
642 struct btrace_line_range
643 {
644 /* The symtab this line is from. */
645 struct symtab *symtab;
646
647 /* The first line (inclusive). */
648 int begin;
649
650 /* The last line (exclusive). */
651 int end;
652 };
653
654 /* Construct a line range. */
655
656 static struct btrace_line_range
657 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
658 {
659 struct btrace_line_range range;
660
661 range.symtab = symtab;
662 range.begin = begin;
663 range.end = end;
664
665 return range;
666 }
667
668 /* Add a line to a line range. */
669
670 static struct btrace_line_range
671 btrace_line_range_add (struct btrace_line_range range, int line)
672 {
673 if (range.end <= range.begin)
674 {
675 /* This is the first entry. */
676 range.begin = line;
677 range.end = line + 1;
678 }
679 else if (line < range.begin)
680 range.begin = line;
681 else if (range.end < line)
682 range.end = line;
683
684 return range;
685 }
686
687 /* Return non-zero if RANGE is empty, zero otherwise. */
688
689 static int
690 btrace_line_range_is_empty (struct btrace_line_range range)
691 {
692 return range.end <= range.begin;
693 }
694
695 /* Return non-zero if LHS contains RHS, zero otherwise. */
696
697 static int
698 btrace_line_range_contains_range (struct btrace_line_range lhs,
699 struct btrace_line_range rhs)
700 {
701 return ((lhs.symtab == rhs.symtab)
702 && (lhs.begin <= rhs.begin)
703 && (rhs.end <= lhs.end));
704 }
705
706 /* Find the line range associated with PC. */
707
708 static struct btrace_line_range
709 btrace_find_line_range (CORE_ADDR pc)
710 {
711 struct btrace_line_range range;
712 struct linetable_entry *lines;
713 struct linetable *ltable;
714 struct symtab *symtab;
715 int nlines, i;
716
717 symtab = find_pc_line_symtab (pc);
718 if (symtab == NULL)
719 return btrace_mk_line_range (NULL, 0, 0);
720
721 ltable = SYMTAB_LINETABLE (symtab);
722 if (ltable == NULL)
723 return btrace_mk_line_range (symtab, 0, 0);
724
725 nlines = ltable->nitems;
726 lines = ltable->item;
727 if (nlines <= 0)
728 return btrace_mk_line_range (symtab, 0, 0);
729
730 range = btrace_mk_line_range (symtab, 0, 0);
731 for (i = 0; i < nlines - 1; i++)
732 {
733 if ((lines[i].pc == pc) && (lines[i].line != 0))
734 range = btrace_line_range_add (range, lines[i].line);
735 }
736
737 return range;
738 }
739
740 /* Print source lines in LINES to UIOUT.
741
742 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
743 instructions corresponding to that source line. When printing a new source
744 line, we do the cleanups for the open chain and open a new cleanup chain for
745 the new source line. If the source line range in LINES is not empty, this
746 function will leave the cleanup chain for the last printed source line open
747 so instructions can be added to it. */
748
749 static void
750 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
751 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
752 gdb::optional<ui_out_emit_list> *asm_list,
753 gdb_disassembly_flags flags)
754 {
755 print_source_lines_flags psl_flags;
756
757 if (flags & DISASSEMBLY_FILENAME)
758 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
759
760 for (int line = lines.begin; line < lines.end; ++line)
761 {
762 asm_list->reset ();
763
764 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
765
766 print_source_lines (lines.symtab, line, line + 1, psl_flags);
767
768 asm_list->emplace (uiout, "line_asm_insn");
769 }
770 }
771
772 /* Disassemble a section of the recorded instruction trace. */
773
774 static void
775 btrace_insn_history (struct ui_out *uiout,
776 const struct btrace_thread_info *btinfo,
777 const struct btrace_insn_iterator *begin,
778 const struct btrace_insn_iterator *end,
779 gdb_disassembly_flags flags)
780 {
781 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
782 btrace_insn_number (begin), btrace_insn_number (end));
783
784 flags |= DISASSEMBLY_SPECULATIVE;
785
786 struct gdbarch *gdbarch = target_gdbarch ();
787 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
788
789 ui_out_emit_list list_emitter (uiout, "asm_insns");
790
791 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
792 gdb::optional<ui_out_emit_list> asm_list;
793
794 gdb_pretty_print_disassembler disasm (gdbarch);
795
796 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
797 btrace_insn_next (&it, 1))
798 {
799 const struct btrace_insn *insn;
800
801 insn = btrace_insn_get (&it);
802
803 /* A NULL instruction indicates a gap in the trace. */
804 if (insn == NULL)
805 {
806 const struct btrace_config *conf;
807
808 conf = btrace_conf (btinfo);
809
810 /* We have trace so we must have a configuration. */
811 gdb_assert (conf != NULL);
812
813 uiout->field_fmt ("insn-number", "%u",
814 btrace_insn_number (&it));
815 uiout->text ("\t");
816
817 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
818 conf->format);
819 }
820 else
821 {
822 struct disasm_insn dinsn;
823
824 if ((flags & DISASSEMBLY_SOURCE) != 0)
825 {
826 struct btrace_line_range lines;
827
828 lines = btrace_find_line_range (insn->pc);
829 if (!btrace_line_range_is_empty (lines)
830 && !btrace_line_range_contains_range (last_lines, lines))
831 {
832 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
833 flags);
834 last_lines = lines;
835 }
836 else if (!src_and_asm_tuple.has_value ())
837 {
838 gdb_assert (!asm_list.has_value ());
839
840 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
841
842 /* No source information. */
843 asm_list.emplace (uiout, "line_asm_insn");
844 }
845
846 gdb_assert (src_and_asm_tuple.has_value ());
847 gdb_assert (asm_list.has_value ());
848 }
849
850 memset (&dinsn, 0, sizeof (dinsn));
851 dinsn.number = btrace_insn_number (&it);
852 dinsn.addr = insn->pc;
853
854 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
855 dinsn.is_speculative = 1;
856
857 disasm.pretty_print_insn (uiout, &dinsn, flags);
858 }
859 }
860 }
861
862 /* The insn_history method of target record-btrace. */
863
864 void
865 record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
866 {
867 struct btrace_thread_info *btinfo;
868 struct btrace_insn_history *history;
869 struct btrace_insn_iterator begin, end;
870 struct ui_out *uiout;
871 unsigned int context, covered;
872
873 uiout = current_uiout;
874 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
875 context = abs (size);
876 if (context == 0)
877 error (_("Bad record instruction-history-size."));
878
879 btinfo = require_btrace ();
880 history = btinfo->insn_history;
881 if (history == NULL)
882 {
883 struct btrace_insn_iterator *replay;
884
885 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
886
887 /* If we're replaying, we start at the replay position. Otherwise, we
888 start at the tail of the trace. */
889 replay = btinfo->replay;
890 if (replay != NULL)
891 begin = *replay;
892 else
893 btrace_insn_end (&begin, btinfo);
894
895 /* We start from here and expand in the requested direction. Then we
896 expand in the other direction, as well, to fill up any remaining
897 context. */
898 end = begin;
899 if (size < 0)
900 {
901 /* We want the current position covered, as well. */
902 covered = btrace_insn_next (&end, 1);
903 covered += btrace_insn_prev (&begin, context - covered);
904 covered += btrace_insn_next (&end, context - covered);
905 }
906 else
907 {
908 covered = btrace_insn_next (&end, context);
909 covered += btrace_insn_prev (&begin, context - covered);
910 }
911 }
912 else
913 {
914 begin = history->begin;
915 end = history->end;
916
917 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
918 btrace_insn_number (&begin), btrace_insn_number (&end));
919
920 if (size < 0)
921 {
922 end = begin;
923 covered = btrace_insn_prev (&begin, context);
924 }
925 else
926 {
927 begin = end;
928 covered = btrace_insn_next (&end, context);
929 }
930 }
931
932 if (covered > 0)
933 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
934 else
935 {
936 if (size < 0)
937 printf_unfiltered (_("At the start of the branch trace record.\n"));
938 else
939 printf_unfiltered (_("At the end of the branch trace record.\n"));
940 }
941
942 btrace_set_insn_history (btinfo, &begin, &end);
943 }
944
945 /* The insn_history_range method of target record-btrace. */
946
947 void
948 record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
949 gdb_disassembly_flags flags)
950 {
951 struct btrace_thread_info *btinfo;
952 struct btrace_insn_iterator begin, end;
953 struct ui_out *uiout;
954 unsigned int low, high;
955 int found;
956
957 uiout = current_uiout;
958 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
959 low = from;
960 high = to;
961
962 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
963
964 /* Check for wrap-arounds. */
965 if (low != from || high != to)
966 error (_("Bad range."));
967
968 if (high < low)
969 error (_("Bad range."));
970
971 btinfo = require_btrace ();
972
973 found = btrace_find_insn_by_number (&begin, btinfo, low);
974 if (found == 0)
975 error (_("Range out of bounds."));
976
977 found = btrace_find_insn_by_number (&end, btinfo, high);
978 if (found == 0)
979 {
980 /* Silently truncate the range. */
981 btrace_insn_end (&end, btinfo);
982 }
983 else
984 {
985 /* We want both begin and end to be inclusive. */
986 btrace_insn_next (&end, 1);
987 }
988
989 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
990 btrace_set_insn_history (btinfo, &begin, &end);
991 }
992
993 /* The insn_history_from method of target record-btrace. */
994
995 void
996 record_btrace_target::insn_history_from (ULONGEST from, int size,
997 gdb_disassembly_flags flags)
998 {
999 ULONGEST begin, end, context;
1000
1001 context = abs (size);
1002 if (context == 0)
1003 error (_("Bad record instruction-history-size."));
1004
1005 if (size < 0)
1006 {
1007 end = from;
1008
1009 if (from < context)
1010 begin = 0;
1011 else
1012 begin = from - context + 1;
1013 }
1014 else
1015 {
1016 begin = from;
1017 end = from + context - 1;
1018
1019 /* Check for wrap-around. */
1020 if (end < begin)
1021 end = ULONGEST_MAX;
1022 }
1023
1024 insn_history_range (begin, end, flags);
1025 }
1026
1027 /* Print the instruction number range for a function call history line. */
1028
1029 static void
1030 btrace_call_history_insn_range (struct ui_out *uiout,
1031 const struct btrace_function *bfun)
1032 {
1033 unsigned int begin, end, size;
1034
1035 size = bfun->insn.size ();
1036 gdb_assert (size > 0);
1037
1038 begin = bfun->insn_offset;
1039 end = begin + size - 1;
1040
1041 ui_out_field_uint (uiout, "insn begin", begin);
1042 uiout->text (",");
1043 ui_out_field_uint (uiout, "insn end", end);
1044 }
1045
1046 /* Compute the lowest and highest source line for the instructions in BFUN
1047 and return them in PBEGIN and PEND.
1048 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1049 result from inlining or macro expansion. */
1050
1051 static void
1052 btrace_compute_src_line_range (const struct btrace_function *bfun,
1053 int *pbegin, int *pend)
1054 {
1055 struct symtab *symtab;
1056 struct symbol *sym;
1057 int begin, end;
1058
1059 begin = INT_MAX;
1060 end = INT_MIN;
1061
1062 sym = bfun->sym;
1063 if (sym == NULL)
1064 goto out;
1065
1066 symtab = symbol_symtab (sym);
1067
1068 for (const btrace_insn &insn : bfun->insn)
1069 {
1070 struct symtab_and_line sal;
1071
1072 sal = find_pc_line (insn.pc, 0);
1073 if (sal.symtab != symtab || sal.line == 0)
1074 continue;
1075
1076 begin = std::min (begin, sal.line);
1077 end = std::max (end, sal.line);
1078 }
1079
1080 out:
1081 *pbegin = begin;
1082 *pend = end;
1083 }
1084
1085 /* Print the source line information for a function call history line. */
1086
1087 static void
1088 btrace_call_history_src_line (struct ui_out *uiout,
1089 const struct btrace_function *bfun)
1090 {
1091 struct symbol *sym;
1092 int begin, end;
1093
1094 sym = bfun->sym;
1095 if (sym == NULL)
1096 return;
1097
1098 uiout->field_string ("file",
1099 symtab_to_filename_for_display (symbol_symtab (sym)));
1100
1101 btrace_compute_src_line_range (bfun, &begin, &end);
1102 if (end < begin)
1103 return;
1104
1105 uiout->text (":");
1106 uiout->field_int ("min line", begin);
1107
1108 if (end == begin)
1109 return;
1110
1111 uiout->text (",");
1112 uiout->field_int ("max line", end);
1113 }
1114
1115 /* Get the name of a branch trace function. */
1116
1117 static const char *
1118 btrace_get_bfun_name (const struct btrace_function *bfun)
1119 {
1120 struct minimal_symbol *msym;
1121 struct symbol *sym;
1122
1123 if (bfun == NULL)
1124 return "??";
1125
1126 msym = bfun->msym;
1127 sym = bfun->sym;
1128
1129 if (sym != NULL)
1130 return SYMBOL_PRINT_NAME (sym);
1131 else if (msym != NULL)
1132 return MSYMBOL_PRINT_NAME (msym);
1133 else
1134 return "??";
1135 }
1136
1137 /* Disassemble a section of the recorded function trace. */
1138
1139 static void
1140 btrace_call_history (struct ui_out *uiout,
1141 const struct btrace_thread_info *btinfo,
1142 const struct btrace_call_iterator *begin,
1143 const struct btrace_call_iterator *end,
1144 int int_flags)
1145 {
1146 struct btrace_call_iterator it;
1147 record_print_flags flags = (enum record_print_flag) int_flags;
1148
1149 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1150 btrace_call_number (end));
1151
1152 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1153 {
1154 const struct btrace_function *bfun;
1155 struct minimal_symbol *msym;
1156 struct symbol *sym;
1157
1158 bfun = btrace_call_get (&it);
1159 sym = bfun->sym;
1160 msym = bfun->msym;
1161
1162 /* Print the function index. */
1163 ui_out_field_uint (uiout, "index", bfun->number);
1164 uiout->text ("\t");
1165
1166 /* Indicate gaps in the trace. */
1167 if (bfun->errcode != 0)
1168 {
1169 const struct btrace_config *conf;
1170
1171 conf = btrace_conf (btinfo);
1172
1173 /* We have trace so we must have a configuration. */
1174 gdb_assert (conf != NULL);
1175
1176 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1177
1178 continue;
1179 }
1180
1181 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1182 {
1183 int level = bfun->level + btinfo->level, i;
1184
1185 for (i = 0; i < level; ++i)
1186 uiout->text (" ");
1187 }
1188
1189 if (sym != NULL)
1190 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
1191 else if (msym != NULL)
1192 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1193 else if (!uiout->is_mi_like_p ())
1194 uiout->field_string ("function", "??");
1195
1196 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1197 {
1198 uiout->text (_("\tinst "));
1199 btrace_call_history_insn_range (uiout, bfun);
1200 }
1201
1202 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1203 {
1204 uiout->text (_("\tat "));
1205 btrace_call_history_src_line (uiout, bfun);
1206 }
1207
1208 uiout->text ("\n");
1209 }
1210 }
1211
1212 /* The call_history method of target record-btrace. */
1213
1214 void
1215 record_btrace_target::call_history (int size, record_print_flags flags)
1216 {
1217 struct btrace_thread_info *btinfo;
1218 struct btrace_call_history *history;
1219 struct btrace_call_iterator begin, end;
1220 struct ui_out *uiout;
1221 unsigned int context, covered;
1222
1223 uiout = current_uiout;
1224 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1225 context = abs (size);
1226 if (context == 0)
1227 error (_("Bad record function-call-history-size."));
1228
1229 btinfo = require_btrace ();
1230 history = btinfo->call_history;
1231 if (history == NULL)
1232 {
1233 struct btrace_insn_iterator *replay;
1234
1235 DEBUG ("call-history (0x%x): %d", (int) flags, size);
1236
1237 /* If we're replaying, we start at the replay position. Otherwise, we
1238 start at the tail of the trace. */
1239 replay = btinfo->replay;
1240 if (replay != NULL)
1241 {
1242 begin.btinfo = btinfo;
1243 begin.index = replay->call_index;
1244 }
1245 else
1246 btrace_call_end (&begin, btinfo);
1247
1248 /* We start from here and expand in the requested direction. Then we
1249 expand in the other direction, as well, to fill up any remaining
1250 context. */
1251 end = begin;
1252 if (size < 0)
1253 {
1254 /* We want the current position covered, as well. */
1255 covered = btrace_call_next (&end, 1);
1256 covered += btrace_call_prev (&begin, context - covered);
1257 covered += btrace_call_next (&end, context - covered);
1258 }
1259 else
1260 {
1261 covered = btrace_call_next (&end, context);
1262 covered += btrace_call_prev (&begin, context- covered);
1263 }
1264 }
1265 else
1266 {
1267 begin = history->begin;
1268 end = history->end;
1269
1270 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1271 btrace_call_number (&begin), btrace_call_number (&end));
1272
1273 if (size < 0)
1274 {
1275 end = begin;
1276 covered = btrace_call_prev (&begin, context);
1277 }
1278 else
1279 {
1280 begin = end;
1281 covered = btrace_call_next (&end, context);
1282 }
1283 }
1284
1285 if (covered > 0)
1286 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1287 else
1288 {
1289 if (size < 0)
1290 printf_unfiltered (_("At the start of the branch trace record.\n"));
1291 else
1292 printf_unfiltered (_("At the end of the branch trace record.\n"));
1293 }
1294
1295 btrace_set_call_history (btinfo, &begin, &end);
1296 }
1297
1298 /* The call_history_range method of target record-btrace. */
1299
1300 void
1301 record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1302 record_print_flags flags)
1303 {
1304 struct btrace_thread_info *btinfo;
1305 struct btrace_call_iterator begin, end;
1306 struct ui_out *uiout;
1307 unsigned int low, high;
1308 int found;
1309
1310 uiout = current_uiout;
1311 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1312 low = from;
1313 high = to;
1314
1315 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1316
1317 /* Check for wrap-arounds. */
1318 if (low != from || high != to)
1319 error (_("Bad range."));
1320
1321 if (high < low)
1322 error (_("Bad range."));
1323
1324 btinfo = require_btrace ();
1325
1326 found = btrace_find_call_by_number (&begin, btinfo, low);
1327 if (found == 0)
1328 error (_("Range out of bounds."));
1329
1330 found = btrace_find_call_by_number (&end, btinfo, high);
1331 if (found == 0)
1332 {
1333 /* Silently truncate the range. */
1334 btrace_call_end (&end, btinfo);
1335 }
1336 else
1337 {
1338 /* We want both begin and end to be inclusive. */
1339 btrace_call_next (&end, 1);
1340 }
1341
1342 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1343 btrace_set_call_history (btinfo, &begin, &end);
1344 }
1345
1346 /* The call_history_from method of target record-btrace. */
1347
1348 void
1349 record_btrace_target::call_history_from (ULONGEST from, int size,
1350 record_print_flags flags)
1351 {
1352 ULONGEST begin, end, context;
1353
1354 context = abs (size);
1355 if (context == 0)
1356 error (_("Bad record function-call-history-size."));
1357
1358 if (size < 0)
1359 {
1360 end = from;
1361
1362 if (from < context)
1363 begin = 0;
1364 else
1365 begin = from - context + 1;
1366 }
1367 else
1368 {
1369 begin = from;
1370 end = from + context - 1;
1371
1372 /* Check for wrap-around. */
1373 if (end < begin)
1374 end = ULONGEST_MAX;
1375 }
1376
1377 call_history_range ( begin, end, flags);
1378 }
1379
1380 /* The record_method method of target record-btrace. */
1381
1382 enum record_method
1383 record_btrace_target::record_method (ptid_t ptid)
1384 {
1385 struct thread_info * const tp = find_thread_ptid (ptid);
1386
1387 if (tp == NULL)
1388 error (_("No thread."));
1389
1390 if (tp->btrace.target == NULL)
1391 return RECORD_METHOD_NONE;
1392
1393 return RECORD_METHOD_BTRACE;
1394 }
1395
1396 /* The record_is_replaying method of target record-btrace. */
1397
1398 bool
1399 record_btrace_target::record_is_replaying (ptid_t ptid)
1400 {
1401 struct thread_info *tp;
1402
1403 ALL_NON_EXITED_THREADS (tp)
1404 if (tp->ptid.matches (ptid) && btrace_is_replaying (tp))
1405 return true;
1406
1407 return false;
1408 }
1409
1410 /* The record_will_replay method of target record-btrace. */
1411
1412 bool
1413 record_btrace_target::record_will_replay (ptid_t ptid, int dir)
1414 {
1415 return dir == EXEC_REVERSE || record_is_replaying (ptid);
1416 }
1417
1418 /* The xfer_partial method of target record-btrace. */
1419
1420 enum target_xfer_status
1421 record_btrace_target::xfer_partial (enum target_object object,
1422 const char *annex, gdb_byte *readbuf,
1423 const gdb_byte *writebuf, ULONGEST offset,
1424 ULONGEST len, ULONGEST *xfered_len)
1425 {
1426 /* Filter out requests that don't make sense during replay. */
1427 if (replay_memory_access == replay_memory_access_read_only
1428 && !record_btrace_generating_corefile
1429 && record_is_replaying (inferior_ptid))
1430 {
1431 switch (object)
1432 {
1433 case TARGET_OBJECT_MEMORY:
1434 {
1435 struct target_section *section;
1436
1437 /* We do not allow writing memory in general. */
1438 if (writebuf != NULL)
1439 {
1440 *xfered_len = len;
1441 return TARGET_XFER_UNAVAILABLE;
1442 }
1443
1444 /* We allow reading readonly memory. */
1445 section = target_section_by_addr (this, offset);
1446 if (section != NULL)
1447 {
1448 /* Check if the section we found is readonly. */
1449 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1450 section->the_bfd_section)
1451 & SEC_READONLY) != 0)
1452 {
1453 /* Truncate the request to fit into this section. */
1454 len = std::min (len, section->endaddr - offset);
1455 break;
1456 }
1457 }
1458
1459 *xfered_len = len;
1460 return TARGET_XFER_UNAVAILABLE;
1461 }
1462 }
1463 }
1464
1465 /* Forward the request. */
1466 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1467 offset, len, xfered_len);
1468 }
1469
1470 /* The insert_breakpoint method of target record-btrace. */
1471
1472 int
1473 record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1474 struct bp_target_info *bp_tgt)
1475 {
1476 const char *old;
1477 int ret;
1478
1479 /* Inserting breakpoints requires accessing memory. Allow it for the
1480 duration of this function. */
1481 old = replay_memory_access;
1482 replay_memory_access = replay_memory_access_read_write;
1483
1484 ret = 0;
1485 TRY
1486 {
1487 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
1488 }
1489 CATCH (except, RETURN_MASK_ALL)
1490 {
1491 replay_memory_access = old;
1492 throw_exception (except);
1493 }
1494 END_CATCH
1495 replay_memory_access = old;
1496
1497 return ret;
1498 }
1499
1500 /* The remove_breakpoint method of target record-btrace. */
1501
1502 int
1503 record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1504 struct bp_target_info *bp_tgt,
1505 enum remove_bp_reason reason)
1506 {
1507 const char *old;
1508 int ret;
1509
1510 /* Removing breakpoints requires accessing memory. Allow it for the
1511 duration of this function. */
1512 old = replay_memory_access;
1513 replay_memory_access = replay_memory_access_read_write;
1514
1515 ret = 0;
1516 TRY
1517 {
1518 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
1519 }
1520 CATCH (except, RETURN_MASK_ALL)
1521 {
1522 replay_memory_access = old;
1523 throw_exception (except);
1524 }
1525 END_CATCH
1526 replay_memory_access = old;
1527
1528 return ret;
1529 }
1530
1531 /* The fetch_registers method of target record-btrace. */
1532
1533 void
1534 record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1535 {
1536 struct btrace_insn_iterator *replay;
1537 struct thread_info *tp;
1538
1539 tp = find_thread_ptid (regcache->ptid ());
1540 gdb_assert (tp != NULL);
1541
1542 replay = tp->btrace.replay;
1543 if (replay != NULL && !record_btrace_generating_corefile)
1544 {
1545 const struct btrace_insn *insn;
1546 struct gdbarch *gdbarch;
1547 int pcreg;
1548
1549 gdbarch = regcache->arch ();
1550 pcreg = gdbarch_pc_regnum (gdbarch);
1551 if (pcreg < 0)
1552 return;
1553
1554 /* We can only provide the PC register. */
1555 if (regno >= 0 && regno != pcreg)
1556 return;
1557
1558 insn = btrace_insn_get (replay);
1559 gdb_assert (insn != NULL);
1560
1561 regcache->raw_supply (regno, &insn->pc);
1562 }
1563 else
1564 this->beneath ()->fetch_registers (regcache, regno);
1565 }
1566
1567 /* The store_registers method of target record-btrace. */
1568
1569 void
1570 record_btrace_target::store_registers (struct regcache *regcache, int regno)
1571 {
1572 if (!record_btrace_generating_corefile
1573 && record_is_replaying (regcache->ptid ()))
1574 error (_("Cannot write registers while replaying."));
1575
1576 gdb_assert (may_write_registers != 0);
1577
1578 this->beneath ()->store_registers (regcache, regno);
1579 }
1580
1581 /* The prepare_to_store method of target record-btrace. */
1582
1583 void
1584 record_btrace_target::prepare_to_store (struct regcache *regcache)
1585 {
1586 if (!record_btrace_generating_corefile
1587 && record_is_replaying (regcache->ptid ()))
1588 return;
1589
1590 this->beneath ()->prepare_to_store (regcache);
1591 }
1592
1593 /* The branch trace frame cache. */
1594
1595 struct btrace_frame_cache
1596 {
1597 /* The thread. */
1598 struct thread_info *tp;
1599
1600 /* The frame info. */
1601 struct frame_info *frame;
1602
1603 /* The branch trace function segment. */
1604 const struct btrace_function *bfun;
1605 };
1606
1607 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1608
1609 static htab_t bfcache;
1610
1611 /* hash_f for htab_create_alloc of bfcache. */
1612
1613 static hashval_t
1614 bfcache_hash (const void *arg)
1615 {
1616 const struct btrace_frame_cache *cache
1617 = (const struct btrace_frame_cache *) arg;
1618
1619 return htab_hash_pointer (cache->frame);
1620 }
1621
1622 /* eq_f for htab_create_alloc of bfcache. */
1623
1624 static int
1625 bfcache_eq (const void *arg1, const void *arg2)
1626 {
1627 const struct btrace_frame_cache *cache1
1628 = (const struct btrace_frame_cache *) arg1;
1629 const struct btrace_frame_cache *cache2
1630 = (const struct btrace_frame_cache *) arg2;
1631
1632 return cache1->frame == cache2->frame;
1633 }
1634
1635 /* Create a new btrace frame cache. */
1636
1637 static struct btrace_frame_cache *
1638 bfcache_new (struct frame_info *frame)
1639 {
1640 struct btrace_frame_cache *cache;
1641 void **slot;
1642
1643 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1644 cache->frame = frame;
1645
1646 slot = htab_find_slot (bfcache, cache, INSERT);
1647 gdb_assert (*slot == NULL);
1648 *slot = cache;
1649
1650 return cache;
1651 }
1652
1653 /* Extract the branch trace function from a branch trace frame. */
1654
1655 static const struct btrace_function *
1656 btrace_get_frame_function (struct frame_info *frame)
1657 {
1658 const struct btrace_frame_cache *cache;
1659 struct btrace_frame_cache pattern;
1660 void **slot;
1661
1662 pattern.frame = frame;
1663
1664 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1665 if (slot == NULL)
1666 return NULL;
1667
1668 cache = (const struct btrace_frame_cache *) *slot;
1669 return cache->bfun;
1670 }
1671
1672 /* Implement stop_reason method for record_btrace_frame_unwind. */
1673
1674 static enum unwind_stop_reason
1675 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1676 void **this_cache)
1677 {
1678 const struct btrace_frame_cache *cache;
1679 const struct btrace_function *bfun;
1680
1681 cache = (const struct btrace_frame_cache *) *this_cache;
1682 bfun = cache->bfun;
1683 gdb_assert (bfun != NULL);
1684
1685 if (bfun->up == 0)
1686 return UNWIND_UNAVAILABLE;
1687
1688 return UNWIND_NO_REASON;
1689 }
1690
1691 /* Implement this_id method for record_btrace_frame_unwind. */
1692
1693 static void
1694 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1695 struct frame_id *this_id)
1696 {
1697 const struct btrace_frame_cache *cache;
1698 const struct btrace_function *bfun;
1699 struct btrace_call_iterator it;
1700 CORE_ADDR code, special;
1701
1702 cache = (const struct btrace_frame_cache *) *this_cache;
1703
1704 bfun = cache->bfun;
1705 gdb_assert (bfun != NULL);
1706
1707 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1708 bfun = btrace_call_get (&it);
1709
1710 code = get_frame_func (this_frame);
1711 special = bfun->number;
1712
1713 *this_id = frame_id_build_unavailable_stack_special (code, special);
1714
1715 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1716 btrace_get_bfun_name (cache->bfun),
1717 core_addr_to_string_nz (this_id->code_addr),
1718 core_addr_to_string_nz (this_id->special_addr));
1719 }
1720
1721 /* Implement prev_register method for record_btrace_frame_unwind. */
1722
1723 static struct value *
1724 record_btrace_frame_prev_register (struct frame_info *this_frame,
1725 void **this_cache,
1726 int regnum)
1727 {
1728 const struct btrace_frame_cache *cache;
1729 const struct btrace_function *bfun, *caller;
1730 struct btrace_call_iterator it;
1731 struct gdbarch *gdbarch;
1732 CORE_ADDR pc;
1733 int pcreg;
1734
1735 gdbarch = get_frame_arch (this_frame);
1736 pcreg = gdbarch_pc_regnum (gdbarch);
1737 if (pcreg < 0 || regnum != pcreg)
1738 throw_error (NOT_AVAILABLE_ERROR,
1739 _("Registers are not available in btrace record history"));
1740
1741 cache = (const struct btrace_frame_cache *) *this_cache;
1742 bfun = cache->bfun;
1743 gdb_assert (bfun != NULL);
1744
1745 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1746 throw_error (NOT_AVAILABLE_ERROR,
1747 _("No caller in btrace record history"));
1748
1749 caller = btrace_call_get (&it);
1750
1751 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1752 pc = caller->insn.front ().pc;
1753 else
1754 {
1755 pc = caller->insn.back ().pc;
1756 pc += gdb_insn_length (gdbarch, pc);
1757 }
1758
1759 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1760 btrace_get_bfun_name (bfun), bfun->level,
1761 core_addr_to_string_nz (pc));
1762
1763 return frame_unwind_got_address (this_frame, regnum, pc);
1764 }
1765
1766 /* Implement sniffer method for record_btrace_frame_unwind. */
1767
1768 static int
1769 record_btrace_frame_sniffer (const struct frame_unwind *self,
1770 struct frame_info *this_frame,
1771 void **this_cache)
1772 {
1773 const struct btrace_function *bfun;
1774 struct btrace_frame_cache *cache;
1775 struct thread_info *tp;
1776 struct frame_info *next;
1777
1778 /* THIS_FRAME does not contain a reference to its thread. */
1779 tp = inferior_thread ();
1780
1781 bfun = NULL;
1782 next = get_next_frame (this_frame);
1783 if (next == NULL)
1784 {
1785 const struct btrace_insn_iterator *replay;
1786
1787 replay = tp->btrace.replay;
1788 if (replay != NULL)
1789 bfun = &replay->btinfo->functions[replay->call_index];
1790 }
1791 else
1792 {
1793 const struct btrace_function *callee;
1794 struct btrace_call_iterator it;
1795
1796 callee = btrace_get_frame_function (next);
1797 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1798 return 0;
1799
1800 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1801 return 0;
1802
1803 bfun = btrace_call_get (&it);
1804 }
1805
1806 if (bfun == NULL)
1807 return 0;
1808
1809 DEBUG ("[frame] sniffed frame for %s on level %d",
1810 btrace_get_bfun_name (bfun), bfun->level);
1811
1812 /* This is our frame. Initialize the frame cache. */
1813 cache = bfcache_new (this_frame);
1814 cache->tp = tp;
1815 cache->bfun = bfun;
1816
1817 *this_cache = cache;
1818 return 1;
1819 }
1820
1821 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1822
1823 static int
1824 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1825 struct frame_info *this_frame,
1826 void **this_cache)
1827 {
1828 const struct btrace_function *bfun, *callee;
1829 struct btrace_frame_cache *cache;
1830 struct btrace_call_iterator it;
1831 struct frame_info *next;
1832 struct thread_info *tinfo;
1833
1834 next = get_next_frame (this_frame);
1835 if (next == NULL)
1836 return 0;
1837
1838 callee = btrace_get_frame_function (next);
1839 if (callee == NULL)
1840 return 0;
1841
1842 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1843 return 0;
1844
1845 tinfo = inferior_thread ();
1846 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1847 return 0;
1848
1849 bfun = btrace_call_get (&it);
1850
1851 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1852 btrace_get_bfun_name (bfun), bfun->level);
1853
1854 /* This is our frame. Initialize the frame cache. */
1855 cache = bfcache_new (this_frame);
1856 cache->tp = tinfo;
1857 cache->bfun = bfun;
1858
1859 *this_cache = cache;
1860 return 1;
1861 }
1862
1863 static void
1864 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1865 {
1866 struct btrace_frame_cache *cache;
1867 void **slot;
1868
1869 cache = (struct btrace_frame_cache *) this_cache;
1870
1871 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1872 gdb_assert (slot != NULL);
1873
1874 htab_remove_elt (bfcache, cache);
1875 }
1876
1877 /* btrace recording does not store previous memory content, neither the stack
1878 frames content. Any unwinding would return errorneous results as the stack
1879 contents no longer matches the changed PC value restored from history.
1880 Therefore this unwinder reports any possibly unwound registers as
1881 <unavailable>. */
1882
1883 const struct frame_unwind record_btrace_frame_unwind =
1884 {
1885 NORMAL_FRAME,
1886 record_btrace_frame_unwind_stop_reason,
1887 record_btrace_frame_this_id,
1888 record_btrace_frame_prev_register,
1889 NULL,
1890 record_btrace_frame_sniffer,
1891 record_btrace_frame_dealloc_cache
1892 };
1893
1894 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1895 {
1896 TAILCALL_FRAME,
1897 record_btrace_frame_unwind_stop_reason,
1898 record_btrace_frame_this_id,
1899 record_btrace_frame_prev_register,
1900 NULL,
1901 record_btrace_tailcall_frame_sniffer,
1902 record_btrace_frame_dealloc_cache
1903 };
1904
1905 /* Implement the get_unwinder method. */
1906
1907 const struct frame_unwind *
1908 record_btrace_target::get_unwinder ()
1909 {
1910 return &record_btrace_frame_unwind;
1911 }
1912
1913 /* Implement the get_tailcall_unwinder method. */
1914
1915 const struct frame_unwind *
1916 record_btrace_target::get_tailcall_unwinder ()
1917 {
1918 return &record_btrace_tailcall_frame_unwind;
1919 }
1920
1921 /* Return a human-readable string for FLAG. */
1922
1923 static const char *
1924 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1925 {
1926 switch (flag)
1927 {
1928 case BTHR_STEP:
1929 return "step";
1930
1931 case BTHR_RSTEP:
1932 return "reverse-step";
1933
1934 case BTHR_CONT:
1935 return "cont";
1936
1937 case BTHR_RCONT:
1938 return "reverse-cont";
1939
1940 case BTHR_STOP:
1941 return "stop";
1942 }
1943
1944 return "<invalid>";
1945 }
1946
1947 /* Indicate that TP should be resumed according to FLAG. */
1948
1949 static void
1950 record_btrace_resume_thread (struct thread_info *tp,
1951 enum btrace_thread_flag flag)
1952 {
1953 struct btrace_thread_info *btinfo;
1954
1955 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1956 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1957
1958 btinfo = &tp->btrace;
1959
1960 /* Fetch the latest branch trace. */
1961 btrace_fetch (tp, record_btrace_get_cpu ());
1962
1963 /* A resume request overwrites a preceding resume or stop request. */
1964 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1965 btinfo->flags |= flag;
1966 }
1967
1968 /* Get the current frame for TP. */
1969
1970 static struct frame_info *
1971 get_thread_current_frame (struct thread_info *tp)
1972 {
1973 struct frame_info *frame;
1974 int executing;
1975
1976 /* Set current thread, which is implicitly used by
1977 get_current_frame. */
1978 scoped_restore_current_thread restore_thread;
1979
1980 switch_to_thread (tp);
1981
1982 /* Clear the executing flag to allow changes to the current frame.
1983 We are not actually running, yet. We just started a reverse execution
1984 command or a record goto command.
1985 For the latter, EXECUTING is false and this has no effect.
1986 For the former, EXECUTING is true and we're in wait, about to
1987 move the thread. Since we need to recompute the stack, we temporarily
1988 set EXECUTING to flase. */
1989 executing = tp->executing;
1990 set_executing (inferior_ptid, false);
1991
1992 frame = NULL;
1993 TRY
1994 {
1995 frame = get_current_frame ();
1996 }
1997 CATCH (except, RETURN_MASK_ALL)
1998 {
1999 /* Restore the previous execution state. */
2000 set_executing (inferior_ptid, executing);
2001
2002 throw_exception (except);
2003 }
2004 END_CATCH
2005
2006 /* Restore the previous execution state. */
2007 set_executing (inferior_ptid, executing);
2008
2009 return frame;
2010 }
2011
2012 /* Start replaying a thread. */
2013
2014 static struct btrace_insn_iterator *
2015 record_btrace_start_replaying (struct thread_info *tp)
2016 {
2017 struct btrace_insn_iterator *replay;
2018 struct btrace_thread_info *btinfo;
2019
2020 btinfo = &tp->btrace;
2021 replay = NULL;
2022
2023 /* We can't start replaying without trace. */
2024 if (btinfo->functions.empty ())
2025 return NULL;
2026
2027 /* GDB stores the current frame_id when stepping in order to detects steps
2028 into subroutines.
2029 Since frames are computed differently when we're replaying, we need to
2030 recompute those stored frames and fix them up so we can still detect
2031 subroutines after we started replaying. */
2032 TRY
2033 {
2034 struct frame_info *frame;
2035 struct frame_id frame_id;
2036 int upd_step_frame_id, upd_step_stack_frame_id;
2037
2038 /* The current frame without replaying - computed via normal unwind. */
2039 frame = get_thread_current_frame (tp);
2040 frame_id = get_frame_id (frame);
2041
2042 /* Check if we need to update any stepping-related frame id's. */
2043 upd_step_frame_id = frame_id_eq (frame_id,
2044 tp->control.step_frame_id);
2045 upd_step_stack_frame_id = frame_id_eq (frame_id,
2046 tp->control.step_stack_frame_id);
2047
2048 /* We start replaying at the end of the branch trace. This corresponds
2049 to the current instruction. */
2050 replay = XNEW (struct btrace_insn_iterator);
2051 btrace_insn_end (replay, btinfo);
2052
2053 /* Skip gaps at the end of the trace. */
2054 while (btrace_insn_get (replay) == NULL)
2055 {
2056 unsigned int steps;
2057
2058 steps = btrace_insn_prev (replay, 1);
2059 if (steps == 0)
2060 error (_("No trace."));
2061 }
2062
2063 /* We're not replaying, yet. */
2064 gdb_assert (btinfo->replay == NULL);
2065 btinfo->replay = replay;
2066
2067 /* Make sure we're not using any stale registers. */
2068 registers_changed_thread (tp);
2069
2070 /* The current frame with replaying - computed via btrace unwind. */
2071 frame = get_thread_current_frame (tp);
2072 frame_id = get_frame_id (frame);
2073
2074 /* Replace stepping related frames where necessary. */
2075 if (upd_step_frame_id)
2076 tp->control.step_frame_id = frame_id;
2077 if (upd_step_stack_frame_id)
2078 tp->control.step_stack_frame_id = frame_id;
2079 }
2080 CATCH (except, RETURN_MASK_ALL)
2081 {
2082 xfree (btinfo->replay);
2083 btinfo->replay = NULL;
2084
2085 registers_changed_thread (tp);
2086
2087 throw_exception (except);
2088 }
2089 END_CATCH
2090
2091 return replay;
2092 }
2093
2094 /* Stop replaying a thread. */
2095
2096 static void
2097 record_btrace_stop_replaying (struct thread_info *tp)
2098 {
2099 struct btrace_thread_info *btinfo;
2100
2101 btinfo = &tp->btrace;
2102
2103 xfree (btinfo->replay);
2104 btinfo->replay = NULL;
2105
2106 /* Make sure we're not leaving any stale registers. */
2107 registers_changed_thread (tp);
2108 }
2109
2110 /* Stop replaying TP if it is at the end of its execution history. */
2111
2112 static void
2113 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2114 {
2115 struct btrace_insn_iterator *replay, end;
2116 struct btrace_thread_info *btinfo;
2117
2118 btinfo = &tp->btrace;
2119 replay = btinfo->replay;
2120
2121 if (replay == NULL)
2122 return;
2123
2124 btrace_insn_end (&end, btinfo);
2125
2126 if (btrace_insn_cmp (replay, &end) == 0)
2127 record_btrace_stop_replaying (tp);
2128 }
2129
2130 /* The resume method of target record-btrace. */
2131
2132 void
2133 record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
2134 {
2135 struct thread_info *tp;
2136 enum btrace_thread_flag flag, cflag;
2137
2138 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2139 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
2140 step ? "step" : "cont");
2141
2142 /* Store the execution direction of the last resume.
2143
2144 If there is more than one resume call, we have to rely on infrun
2145 to not change the execution direction in-between. */
2146 record_btrace_resume_exec_dir = ::execution_direction;
2147
2148 /* As long as we're not replaying, just forward the request.
2149
2150 For non-stop targets this means that no thread is replaying. In order to
2151 make progress, we may need to explicitly move replaying threads to the end
2152 of their execution history. */
2153 if ((::execution_direction != EXEC_REVERSE)
2154 && !record_is_replaying (minus_one_ptid))
2155 {
2156 this->beneath ()->resume (ptid, step, signal);
2157 return;
2158 }
2159
2160 /* Compute the btrace thread flag for the requested move. */
2161 if (::execution_direction == EXEC_REVERSE)
2162 {
2163 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2164 cflag = BTHR_RCONT;
2165 }
2166 else
2167 {
2168 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2169 cflag = BTHR_CONT;
2170 }
2171
2172 /* We just indicate the resume intent here. The actual stepping happens in
2173 record_btrace_wait below.
2174
2175 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2176 if (!target_is_non_stop_p ())
2177 {
2178 gdb_assert (inferior_ptid.matches (ptid));
2179
2180 ALL_NON_EXITED_THREADS (tp)
2181 if (tp->ptid.matches (ptid))
2182 {
2183 if (tp->ptid.matches (inferior_ptid))
2184 record_btrace_resume_thread (tp, flag);
2185 else
2186 record_btrace_resume_thread (tp, cflag);
2187 }
2188 }
2189 else
2190 {
2191 ALL_NON_EXITED_THREADS (tp)
2192 if (tp->ptid.matches (ptid))
2193 record_btrace_resume_thread (tp, flag);
2194 }
2195
2196 /* Async support. */
2197 if (target_can_async_p ())
2198 {
2199 target_async (1);
2200 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2201 }
2202 }
2203
2204 /* The commit_resume method of target record-btrace. */
2205
2206 void
2207 record_btrace_target::commit_resume ()
2208 {
2209 if ((::execution_direction != EXEC_REVERSE)
2210 && !record_is_replaying (minus_one_ptid))
2211 beneath ()->commit_resume ();
2212 }
2213
2214 /* Cancel resuming TP. */
2215
2216 static void
2217 record_btrace_cancel_resume (struct thread_info *tp)
2218 {
2219 enum btrace_thread_flag flags;
2220
2221 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2222 if (flags == 0)
2223 return;
2224
2225 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2226 print_thread_id (tp),
2227 target_pid_to_str (tp->ptid), flags,
2228 btrace_thread_flag_to_str (flags));
2229
2230 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2231 record_btrace_stop_replaying_at_end (tp);
2232 }
2233
2234 /* Return a target_waitstatus indicating that we ran out of history. */
2235
2236 static struct target_waitstatus
2237 btrace_step_no_history (void)
2238 {
2239 struct target_waitstatus status;
2240
2241 status.kind = TARGET_WAITKIND_NO_HISTORY;
2242
2243 return status;
2244 }
2245
2246 /* Return a target_waitstatus indicating that a step finished. */
2247
2248 static struct target_waitstatus
2249 btrace_step_stopped (void)
2250 {
2251 struct target_waitstatus status;
2252
2253 status.kind = TARGET_WAITKIND_STOPPED;
2254 status.value.sig = GDB_SIGNAL_TRAP;
2255
2256 return status;
2257 }
2258
2259 /* Return a target_waitstatus indicating that a thread was stopped as
2260 requested. */
2261
2262 static struct target_waitstatus
2263 btrace_step_stopped_on_request (void)
2264 {
2265 struct target_waitstatus status;
2266
2267 status.kind = TARGET_WAITKIND_STOPPED;
2268 status.value.sig = GDB_SIGNAL_0;
2269
2270 return status;
2271 }
2272
2273 /* Return a target_waitstatus indicating a spurious stop. */
2274
2275 static struct target_waitstatus
2276 btrace_step_spurious (void)
2277 {
2278 struct target_waitstatus status;
2279
2280 status.kind = TARGET_WAITKIND_SPURIOUS;
2281
2282 return status;
2283 }
2284
2285 /* Return a target_waitstatus indicating that the thread was not resumed. */
2286
2287 static struct target_waitstatus
2288 btrace_step_no_resumed (void)
2289 {
2290 struct target_waitstatus status;
2291
2292 status.kind = TARGET_WAITKIND_NO_RESUMED;
2293
2294 return status;
2295 }
2296
2297 /* Return a target_waitstatus indicating that we should wait again. */
2298
2299 static struct target_waitstatus
2300 btrace_step_again (void)
2301 {
2302 struct target_waitstatus status;
2303
2304 status.kind = TARGET_WAITKIND_IGNORE;
2305
2306 return status;
2307 }
2308
2309 /* Clear the record histories. */
2310
2311 static void
2312 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2313 {
2314 xfree (btinfo->insn_history);
2315 xfree (btinfo->call_history);
2316
2317 btinfo->insn_history = NULL;
2318 btinfo->call_history = NULL;
2319 }
2320
2321 /* Check whether TP's current replay position is at a breakpoint. */
2322
2323 static int
2324 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2325 {
2326 struct btrace_insn_iterator *replay;
2327 struct btrace_thread_info *btinfo;
2328 const struct btrace_insn *insn;
2329
2330 btinfo = &tp->btrace;
2331 replay = btinfo->replay;
2332
2333 if (replay == NULL)
2334 return 0;
2335
2336 insn = btrace_insn_get (replay);
2337 if (insn == NULL)
2338 return 0;
2339
2340 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
2341 &btinfo->stop_reason);
2342 }
2343
2344 /* Step one instruction in forward direction. */
2345
2346 static struct target_waitstatus
2347 record_btrace_single_step_forward (struct thread_info *tp)
2348 {
2349 struct btrace_insn_iterator *replay, end, start;
2350 struct btrace_thread_info *btinfo;
2351
2352 btinfo = &tp->btrace;
2353 replay = btinfo->replay;
2354
2355 /* We're done if we're not replaying. */
2356 if (replay == NULL)
2357 return btrace_step_no_history ();
2358
2359 /* Check if we're stepping a breakpoint. */
2360 if (record_btrace_replay_at_breakpoint (tp))
2361 return btrace_step_stopped ();
2362
2363 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2364 jump back to the instruction at which we started. */
2365 start = *replay;
2366 do
2367 {
2368 unsigned int steps;
2369
2370 /* We will bail out here if we continue stepping after reaching the end
2371 of the execution history. */
2372 steps = btrace_insn_next (replay, 1);
2373 if (steps == 0)
2374 {
2375 *replay = start;
2376 return btrace_step_no_history ();
2377 }
2378 }
2379 while (btrace_insn_get (replay) == NULL);
2380
2381 /* Determine the end of the instruction trace. */
2382 btrace_insn_end (&end, btinfo);
2383
2384 /* The execution trace contains (and ends with) the current instruction.
2385 This instruction has not been executed, yet, so the trace really ends
2386 one instruction earlier. */
2387 if (btrace_insn_cmp (replay, &end) == 0)
2388 return btrace_step_no_history ();
2389
2390 return btrace_step_spurious ();
2391 }
2392
2393 /* Step one instruction in backward direction. */
2394
2395 static struct target_waitstatus
2396 record_btrace_single_step_backward (struct thread_info *tp)
2397 {
2398 struct btrace_insn_iterator *replay, start;
2399 struct btrace_thread_info *btinfo;
2400
2401 btinfo = &tp->btrace;
2402 replay = btinfo->replay;
2403
2404 /* Start replaying if we're not already doing so. */
2405 if (replay == NULL)
2406 replay = record_btrace_start_replaying (tp);
2407
2408 /* If we can't step any further, we reached the end of the history.
2409 Skip gaps during replay. If we end up at a gap (at the beginning of
2410 the trace), jump back to the instruction at which we started. */
2411 start = *replay;
2412 do
2413 {
2414 unsigned int steps;
2415
2416 steps = btrace_insn_prev (replay, 1);
2417 if (steps == 0)
2418 {
2419 *replay = start;
2420 return btrace_step_no_history ();
2421 }
2422 }
2423 while (btrace_insn_get (replay) == NULL);
2424
2425 /* Check if we're stepping a breakpoint.
2426
2427 For reverse-stepping, this check is after the step. There is logic in
2428 infrun.c that handles reverse-stepping separately. See, for example,
2429 proceed and adjust_pc_after_break.
2430
2431 This code assumes that for reverse-stepping, PC points to the last
2432 de-executed instruction, whereas for forward-stepping PC points to the
2433 next to-be-executed instruction. */
2434 if (record_btrace_replay_at_breakpoint (tp))
2435 return btrace_step_stopped ();
2436
2437 return btrace_step_spurious ();
2438 }
2439
2440 /* Step a single thread. */
2441
2442 static struct target_waitstatus
2443 record_btrace_step_thread (struct thread_info *tp)
2444 {
2445 struct btrace_thread_info *btinfo;
2446 struct target_waitstatus status;
2447 enum btrace_thread_flag flags;
2448
2449 btinfo = &tp->btrace;
2450
2451 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2452 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2453
2454 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2455 target_pid_to_str (tp->ptid), flags,
2456 btrace_thread_flag_to_str (flags));
2457
2458 /* We can't step without an execution history. */
2459 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2460 return btrace_step_no_history ();
2461
2462 switch (flags)
2463 {
2464 default:
2465 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2466
2467 case BTHR_STOP:
2468 return btrace_step_stopped_on_request ();
2469
2470 case BTHR_STEP:
2471 status = record_btrace_single_step_forward (tp);
2472 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2473 break;
2474
2475 return btrace_step_stopped ();
2476
2477 case BTHR_RSTEP:
2478 status = record_btrace_single_step_backward (tp);
2479 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2480 break;
2481
2482 return btrace_step_stopped ();
2483
2484 case BTHR_CONT:
2485 status = record_btrace_single_step_forward (tp);
2486 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2487 break;
2488
2489 btinfo->flags |= flags;
2490 return btrace_step_again ();
2491
2492 case BTHR_RCONT:
2493 status = record_btrace_single_step_backward (tp);
2494 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2495 break;
2496
2497 btinfo->flags |= flags;
2498 return btrace_step_again ();
2499 }
2500
2501 /* We keep threads moving at the end of their execution history. The wait
2502 method will stop the thread for whom the event is reported. */
2503 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2504 btinfo->flags |= flags;
2505
2506 return status;
2507 }
2508
2509 /* Announce further events if necessary. */
2510
2511 static void
2512 record_btrace_maybe_mark_async_event
2513 (const std::vector<thread_info *> &moving,
2514 const std::vector<thread_info *> &no_history)
2515 {
2516 bool more_moving = !moving.empty ();
2517 bool more_no_history = !no_history.empty ();;
2518
2519 if (!more_moving && !more_no_history)
2520 return;
2521
2522 if (more_moving)
2523 DEBUG ("movers pending");
2524
2525 if (more_no_history)
2526 DEBUG ("no-history pending");
2527
2528 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2529 }
2530
2531 /* The wait method of target record-btrace. */
2532
2533 ptid_t
2534 record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2535 int options)
2536 {
2537 std::vector<thread_info *> moving;
2538 std::vector<thread_info *> no_history;
2539
2540 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2541
2542 /* As long as we're not replaying, just forward the request. */
2543 if ((::execution_direction != EXEC_REVERSE)
2544 && !record_is_replaying (minus_one_ptid))
2545 {
2546 return this->beneath ()->wait (ptid, status, options);
2547 }
2548
2549 /* Keep a work list of moving threads. */
2550 {
2551 thread_info *tp;
2552
2553 ALL_NON_EXITED_THREADS (tp)
2554 {
2555 if (tp->ptid.matches (ptid)
2556 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2557 moving.push_back (tp);
2558 }
2559 }
2560
2561 if (moving.empty ())
2562 {
2563 *status = btrace_step_no_resumed ();
2564
2565 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2566 target_waitstatus_to_string (status).c_str ());
2567
2568 return null_ptid;
2569 }
2570
2571 /* Step moving threads one by one, one step each, until either one thread
2572 reports an event or we run out of threads to step.
2573
2574 When stepping more than one thread, chances are that some threads reach
2575 the end of their execution history earlier than others. If we reported
2576 this immediately, all-stop on top of non-stop would stop all threads and
2577 resume the same threads next time. And we would report the same thread
2578 having reached the end of its execution history again.
2579
2580 In the worst case, this would starve the other threads. But even if other
2581 threads would be allowed to make progress, this would result in far too
2582 many intermediate stops.
2583
2584 We therefore delay the reporting of "no execution history" until we have
2585 nothing else to report. By this time, all threads should have moved to
2586 either the beginning or the end of their execution history. There will
2587 be a single user-visible stop. */
2588 struct thread_info *eventing = NULL;
2589 while ((eventing == NULL) && !moving.empty ())
2590 {
2591 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
2592 {
2593 thread_info *tp = moving[ix];
2594
2595 *status = record_btrace_step_thread (tp);
2596
2597 switch (status->kind)
2598 {
2599 case TARGET_WAITKIND_IGNORE:
2600 ix++;
2601 break;
2602
2603 case TARGET_WAITKIND_NO_HISTORY:
2604 no_history.push_back (ordered_remove (moving, ix));
2605 break;
2606
2607 default:
2608 eventing = unordered_remove (moving, ix);
2609 break;
2610 }
2611 }
2612 }
2613
2614 if (eventing == NULL)
2615 {
2616 /* We started with at least one moving thread. This thread must have
2617 either stopped or reached the end of its execution history.
2618
2619 In the former case, EVENTING must not be NULL.
2620 In the latter case, NO_HISTORY must not be empty. */
2621 gdb_assert (!no_history.empty ());
2622
2623 /* We kept threads moving at the end of their execution history. Stop
2624 EVENTING now that we are going to report its stop. */
2625 eventing = unordered_remove (no_history, 0);
2626 eventing->btrace.flags &= ~BTHR_MOVE;
2627
2628 *status = btrace_step_no_history ();
2629 }
2630
2631 gdb_assert (eventing != NULL);
2632
2633 /* We kept threads replaying at the end of their execution history. Stop
2634 replaying EVENTING now that we are going to report its stop. */
2635 record_btrace_stop_replaying_at_end (eventing);
2636
2637 /* Stop all other threads. */
2638 if (!target_is_non_stop_p ())
2639 {
2640 thread_info *tp;
2641
2642 ALL_NON_EXITED_THREADS (tp)
2643 record_btrace_cancel_resume (tp);
2644 }
2645
2646 /* In async mode, we need to announce further events. */
2647 if (target_is_async_p ())
2648 record_btrace_maybe_mark_async_event (moving, no_history);
2649
2650 /* Start record histories anew from the current position. */
2651 record_btrace_clear_histories (&eventing->btrace);
2652
2653 /* We moved the replay position but did not update registers. */
2654 registers_changed_thread (eventing);
2655
2656 DEBUG ("wait ended by thread %s (%s): %s",
2657 print_thread_id (eventing),
2658 target_pid_to_str (eventing->ptid),
2659 target_waitstatus_to_string (status).c_str ());
2660
2661 return eventing->ptid;
2662 }
2663
2664 /* The stop method of target record-btrace. */
2665
2666 void
2667 record_btrace_target::stop (ptid_t ptid)
2668 {
2669 DEBUG ("stop %s", target_pid_to_str (ptid));
2670
2671 /* As long as we're not replaying, just forward the request. */
2672 if ((::execution_direction != EXEC_REVERSE)
2673 && !record_is_replaying (minus_one_ptid))
2674 {
2675 this->beneath ()->stop (ptid);
2676 }
2677 else
2678 {
2679 struct thread_info *tp;
2680
2681 ALL_NON_EXITED_THREADS (tp)
2682 if (tp->ptid.matches (ptid))
2683 {
2684 tp->btrace.flags &= ~BTHR_MOVE;
2685 tp->btrace.flags |= BTHR_STOP;
2686 }
2687 }
2688 }
2689
2690 /* The can_execute_reverse method of target record-btrace. */
2691
2692 bool
2693 record_btrace_target::can_execute_reverse ()
2694 {
2695 return true;
2696 }
2697
2698 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2699
2700 bool
2701 record_btrace_target::stopped_by_sw_breakpoint ()
2702 {
2703 if (record_is_replaying (minus_one_ptid))
2704 {
2705 struct thread_info *tp = inferior_thread ();
2706
2707 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2708 }
2709
2710 return this->beneath ()->stopped_by_sw_breakpoint ();
2711 }
2712
2713 /* The supports_stopped_by_sw_breakpoint method of target
2714 record-btrace. */
2715
2716 bool
2717 record_btrace_target::supports_stopped_by_sw_breakpoint ()
2718 {
2719 if (record_is_replaying (minus_one_ptid))
2720 return true;
2721
2722 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
2723 }
2724
2725 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2726
2727 bool
2728 record_btrace_target::stopped_by_hw_breakpoint ()
2729 {
2730 if (record_is_replaying (minus_one_ptid))
2731 {
2732 struct thread_info *tp = inferior_thread ();
2733
2734 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2735 }
2736
2737 return this->beneath ()->stopped_by_hw_breakpoint ();
2738 }
2739
2740 /* The supports_stopped_by_hw_breakpoint method of target
2741 record-btrace. */
2742
2743 bool
2744 record_btrace_target::supports_stopped_by_hw_breakpoint ()
2745 {
2746 if (record_is_replaying (minus_one_ptid))
2747 return true;
2748
2749 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
2750 }
2751
2752 /* The update_thread_list method of target record-btrace. */
2753
2754 void
2755 record_btrace_target::update_thread_list ()
2756 {
2757 /* We don't add or remove threads during replay. */
2758 if (record_is_replaying (minus_one_ptid))
2759 return;
2760
2761 /* Forward the request. */
2762 this->beneath ()->update_thread_list ();
2763 }
2764
2765 /* The thread_alive method of target record-btrace. */
2766
2767 bool
2768 record_btrace_target::thread_alive (ptid_t ptid)
2769 {
2770 /* We don't add or remove threads during replay. */
2771 if (record_is_replaying (minus_one_ptid))
2772 return true;
2773
2774 /* Forward the request. */
2775 return this->beneath ()->thread_alive (ptid);
2776 }
2777
2778 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2779 is stopped. */
2780
2781 static void
2782 record_btrace_set_replay (struct thread_info *tp,
2783 const struct btrace_insn_iterator *it)
2784 {
2785 struct btrace_thread_info *btinfo;
2786
2787 btinfo = &tp->btrace;
2788
2789 if (it == NULL)
2790 record_btrace_stop_replaying (tp);
2791 else
2792 {
2793 if (btinfo->replay == NULL)
2794 record_btrace_start_replaying (tp);
2795 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2796 return;
2797
2798 *btinfo->replay = *it;
2799 registers_changed_thread (tp);
2800 }
2801
2802 /* Start anew from the new replay position. */
2803 record_btrace_clear_histories (btinfo);
2804
2805 inferior_thread ()->suspend.stop_pc
2806 = regcache_read_pc (get_current_regcache ());
2807 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2808 }
2809
2810 /* The goto_record_begin method of target record-btrace. */
2811
2812 void
2813 record_btrace_target::goto_record_begin ()
2814 {
2815 struct thread_info *tp;
2816 struct btrace_insn_iterator begin;
2817
2818 tp = require_btrace_thread ();
2819
2820 btrace_insn_begin (&begin, &tp->btrace);
2821
2822 /* Skip gaps at the beginning of the trace. */
2823 while (btrace_insn_get (&begin) == NULL)
2824 {
2825 unsigned int steps;
2826
2827 steps = btrace_insn_next (&begin, 1);
2828 if (steps == 0)
2829 error (_("No trace."));
2830 }
2831
2832 record_btrace_set_replay (tp, &begin);
2833 }
2834
2835 /* The goto_record_end method of target record-btrace. */
2836
2837 void
2838 record_btrace_target::goto_record_end ()
2839 {
2840 struct thread_info *tp;
2841
2842 tp = require_btrace_thread ();
2843
2844 record_btrace_set_replay (tp, NULL);
2845 }
2846
2847 /* The goto_record method of target record-btrace. */
2848
2849 void
2850 record_btrace_target::goto_record (ULONGEST insn)
2851 {
2852 struct thread_info *tp;
2853 struct btrace_insn_iterator it;
2854 unsigned int number;
2855 int found;
2856
2857 number = insn;
2858
2859 /* Check for wrap-arounds. */
2860 if (number != insn)
2861 error (_("Instruction number out of range."));
2862
2863 tp = require_btrace_thread ();
2864
2865 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2866
2867 /* Check if the instruction could not be found or is a gap. */
2868 if (found == 0 || btrace_insn_get (&it) == NULL)
2869 error (_("No such instruction."));
2870
2871 record_btrace_set_replay (tp, &it);
2872 }
2873
2874 /* The record_stop_replaying method of target record-btrace. */
2875
2876 void
2877 record_btrace_target::record_stop_replaying ()
2878 {
2879 struct thread_info *tp;
2880
2881 ALL_NON_EXITED_THREADS (tp)
2882 record_btrace_stop_replaying (tp);
2883 }
2884
2885 /* The execution_direction target method. */
2886
2887 enum exec_direction_kind
2888 record_btrace_target::execution_direction ()
2889 {
2890 return record_btrace_resume_exec_dir;
2891 }
2892
2893 /* The prepare_to_generate_core target method. */
2894
2895 void
2896 record_btrace_target::prepare_to_generate_core ()
2897 {
2898 record_btrace_generating_corefile = 1;
2899 }
2900
2901 /* The done_generating_core target method. */
2902
2903 void
2904 record_btrace_target::done_generating_core ()
2905 {
2906 record_btrace_generating_corefile = 0;
2907 }
2908
2909 /* Start recording in BTS format. */
2910
2911 static void
2912 cmd_record_btrace_bts_start (const char *args, int from_tty)
2913 {
2914 if (args != NULL && *args != 0)
2915 error (_("Invalid argument."));
2916
2917 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2918
2919 TRY
2920 {
2921 execute_command ("target record-btrace", from_tty);
2922 }
2923 CATCH (exception, RETURN_MASK_ALL)
2924 {
2925 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2926 throw_exception (exception);
2927 }
2928 END_CATCH
2929 }
2930
2931 /* Start recording in Intel Processor Trace format. */
2932
2933 static void
2934 cmd_record_btrace_pt_start (const char *args, int from_tty)
2935 {
2936 if (args != NULL && *args != 0)
2937 error (_("Invalid argument."));
2938
2939 record_btrace_conf.format = BTRACE_FORMAT_PT;
2940
2941 TRY
2942 {
2943 execute_command ("target record-btrace", from_tty);
2944 }
2945 CATCH (exception, RETURN_MASK_ALL)
2946 {
2947 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2948 throw_exception (exception);
2949 }
2950 END_CATCH
2951 }
2952
2953 /* Alias for "target record". */
2954
2955 static void
2956 cmd_record_btrace_start (const char *args, int from_tty)
2957 {
2958 if (args != NULL && *args != 0)
2959 error (_("Invalid argument."));
2960
2961 record_btrace_conf.format = BTRACE_FORMAT_PT;
2962
2963 TRY
2964 {
2965 execute_command ("target record-btrace", from_tty);
2966 }
2967 CATCH (exception, RETURN_MASK_ALL)
2968 {
2969 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2970
2971 TRY
2972 {
2973 execute_command ("target record-btrace", from_tty);
2974 }
2975 CATCH (ex, RETURN_MASK_ALL)
2976 {
2977 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2978 throw_exception (ex);
2979 }
2980 END_CATCH
2981 }
2982 END_CATCH
2983 }
2984
2985 /* The "set record btrace" command. */
2986
2987 static void
2988 cmd_set_record_btrace (const char *args, int from_tty)
2989 {
2990 printf_unfiltered (_("\"set record btrace\" must be followed "
2991 "by an appropriate subcommand.\n"));
2992 help_list (set_record_btrace_cmdlist, "set record btrace ",
2993 all_commands, gdb_stdout);
2994 }
2995
2996 /* The "show record btrace" command. */
2997
2998 static void
2999 cmd_show_record_btrace (const char *args, int from_tty)
3000 {
3001 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
3002 }
3003
3004 /* The "show record btrace replay-memory-access" command. */
3005
3006 static void
3007 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
3008 struct cmd_list_element *c, const char *value)
3009 {
3010 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
3011 replay_memory_access);
3012 }
3013
3014 /* The "set record btrace cpu none" command. */
3015
3016 static void
3017 cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
3018 {
3019 if (args != nullptr && *args != 0)
3020 error (_("Trailing junk: '%s'."), args);
3021
3022 record_btrace_cpu_state = CS_NONE;
3023 }
3024
3025 /* The "set record btrace cpu auto" command. */
3026
3027 static void
3028 cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
3029 {
3030 if (args != nullptr && *args != 0)
3031 error (_("Trailing junk: '%s'."), args);
3032
3033 record_btrace_cpu_state = CS_AUTO;
3034 }
3035
3036 /* The "set record btrace cpu" command. */
3037
3038 static void
3039 cmd_set_record_btrace_cpu (const char *args, int from_tty)
3040 {
3041 if (args == nullptr)
3042 args = "";
3043
3044 /* We use a hard-coded vendor string for now. */
3045 unsigned int family, model, stepping;
3046 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3047 &model, &l1, &stepping, &l2);
3048 if (matches == 3)
3049 {
3050 if (strlen (args) != l2)
3051 error (_("Trailing junk: '%s'."), args + l2);
3052 }
3053 else if (matches == 2)
3054 {
3055 if (strlen (args) != l1)
3056 error (_("Trailing junk: '%s'."), args + l1);
3057
3058 stepping = 0;
3059 }
3060 else
3061 error (_("Bad format. See \"help set record btrace cpu\"."));
3062
3063 if (USHRT_MAX < family)
3064 error (_("Cpu family too big."));
3065
3066 if (UCHAR_MAX < model)
3067 error (_("Cpu model too big."));
3068
3069 if (UCHAR_MAX < stepping)
3070 error (_("Cpu stepping too big."));
3071
3072 record_btrace_cpu.vendor = CV_INTEL;
3073 record_btrace_cpu.family = family;
3074 record_btrace_cpu.model = model;
3075 record_btrace_cpu.stepping = stepping;
3076
3077 record_btrace_cpu_state = CS_CPU;
3078 }
3079
3080 /* The "show record btrace cpu" command. */
3081
3082 static void
3083 cmd_show_record_btrace_cpu (const char *args, int from_tty)
3084 {
3085 if (args != nullptr && *args != 0)
3086 error (_("Trailing junk: '%s'."), args);
3087
3088 switch (record_btrace_cpu_state)
3089 {
3090 case CS_AUTO:
3091 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3092 return;
3093
3094 case CS_NONE:
3095 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3096 return;
3097
3098 case CS_CPU:
3099 switch (record_btrace_cpu.vendor)
3100 {
3101 case CV_INTEL:
3102 if (record_btrace_cpu.stepping == 0)
3103 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3104 record_btrace_cpu.family,
3105 record_btrace_cpu.model);
3106 else
3107 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3108 record_btrace_cpu.family,
3109 record_btrace_cpu.model,
3110 record_btrace_cpu.stepping);
3111 return;
3112 }
3113 }
3114
3115 error (_("Internal error: bad cpu state."));
3116 }
3117
3118 /* The "s record btrace bts" command. */
3119
3120 static void
3121 cmd_set_record_btrace_bts (const char *args, int from_tty)
3122 {
3123 printf_unfiltered (_("\"set record btrace bts\" must be followed "
3124 "by an appropriate subcommand.\n"));
3125 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3126 all_commands, gdb_stdout);
3127 }
3128
3129 /* The "show record btrace bts" command. */
3130
3131 static void
3132 cmd_show_record_btrace_bts (const char *args, int from_tty)
3133 {
3134 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3135 }
3136
3137 /* The "set record btrace pt" command. */
3138
3139 static void
3140 cmd_set_record_btrace_pt (const char *args, int from_tty)
3141 {
3142 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3143 "by an appropriate subcommand.\n"));
3144 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3145 all_commands, gdb_stdout);
3146 }
3147
3148 /* The "show record btrace pt" command. */
3149
3150 static void
3151 cmd_show_record_btrace_pt (const char *args, int from_tty)
3152 {
3153 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3154 }
3155
3156 /* The "record bts buffer-size" show value function. */
3157
3158 static void
3159 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3160 struct cmd_list_element *c,
3161 const char *value)
3162 {
3163 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3164 value);
3165 }
3166
3167 /* The "record pt buffer-size" show value function. */
3168
3169 static void
3170 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3171 struct cmd_list_element *c,
3172 const char *value)
3173 {
3174 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3175 value);
3176 }
3177
3178 /* Initialize btrace commands. */
3179
3180 void
3181 _initialize_record_btrace (void)
3182 {
3183 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3184 _("Start branch trace recording."), &record_btrace_cmdlist,
3185 "record btrace ", 0, &record_cmdlist);
3186 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3187
3188 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3189 _("\
3190 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3191 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3192 This format may not be available on all processors."),
3193 &record_btrace_cmdlist);
3194 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3195
3196 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3197 _("\
3198 Start branch trace recording in Intel Processor Trace format.\n\n\
3199 This format may not be available on all processors."),
3200 &record_btrace_cmdlist);
3201 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3202
3203 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3204 _("Set record options"), &set_record_btrace_cmdlist,
3205 "set record btrace ", 0, &set_record_cmdlist);
3206
3207 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3208 _("Show record options"), &show_record_btrace_cmdlist,
3209 "show record btrace ", 0, &show_record_cmdlist);
3210
3211 add_setshow_enum_cmd ("replay-memory-access", no_class,
3212 replay_memory_access_types, &replay_memory_access, _("\
3213 Set what memory accesses are allowed during replay."), _("\
3214 Show what memory accesses are allowed during replay."),
3215 _("Default is READ-ONLY.\n\n\
3216 The btrace record target does not trace data.\n\
3217 The memory therefore corresponds to the live target and not \
3218 to the current replay position.\n\n\
3219 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3220 When READ-WRITE, allow accesses to read-only and read-write memory during \
3221 replay."),
3222 NULL, cmd_show_replay_memory_access,
3223 &set_record_btrace_cmdlist,
3224 &show_record_btrace_cmdlist);
3225
3226 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3227 _("\
3228 Set the cpu to be used for trace decode.\n\n\
3229 The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3230 For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
3231 When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3232 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3233 When GDB does not support that cpu, this option can be used to enable\n\
3234 workarounds for a similar cpu that GDB supports.\n\n\
3235 When set to \"none\", errata workarounds are disabled."),
3236 &set_record_btrace_cpu_cmdlist,
3237 _("set record btrace cpu "), 1,
3238 &set_record_btrace_cmdlist);
3239
3240 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3241 Automatically determine the cpu to be used for trace decode."),
3242 &set_record_btrace_cpu_cmdlist);
3243
3244 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3245 Do not enable errata workarounds for trace decode."),
3246 &set_record_btrace_cpu_cmdlist);
3247
3248 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3249 Show the cpu to be used for trace decode."),
3250 &show_record_btrace_cmdlist);
3251
3252 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3253 _("Set record btrace bts options"),
3254 &set_record_btrace_bts_cmdlist,
3255 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3256
3257 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3258 _("Show record btrace bts options"),
3259 &show_record_btrace_bts_cmdlist,
3260 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3261
3262 add_setshow_uinteger_cmd ("buffer-size", no_class,
3263 &record_btrace_conf.bts.size,
3264 _("Set the record/replay bts buffer size."),
3265 _("Show the record/replay bts buffer size."), _("\
3266 When starting recording request a trace buffer of this size. \
3267 The actual buffer size may differ from the requested size. \
3268 Use \"info record\" to see the actual buffer size.\n\n\
3269 Bigger buffers allow longer recording but also take more time to process \
3270 the recorded execution trace.\n\n\
3271 The trace buffer size may not be changed while recording."), NULL,
3272 show_record_bts_buffer_size_value,
3273 &set_record_btrace_bts_cmdlist,
3274 &show_record_btrace_bts_cmdlist);
3275
3276 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3277 _("Set record btrace pt options"),
3278 &set_record_btrace_pt_cmdlist,
3279 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3280
3281 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3282 _("Show record btrace pt options"),
3283 &show_record_btrace_pt_cmdlist,
3284 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3285
3286 add_setshow_uinteger_cmd ("buffer-size", no_class,
3287 &record_btrace_conf.pt.size,
3288 _("Set the record/replay pt buffer size."),
3289 _("Show the record/replay pt buffer size."), _("\
3290 Bigger buffers allow longer recording but also take more time to process \
3291 the recorded execution.\n\
3292 The actual buffer size may differ from the requested size. Use \"info record\" \
3293 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3294 &set_record_btrace_pt_cmdlist,
3295 &show_record_btrace_pt_cmdlist);
3296
3297 add_target (record_btrace_target_info, record_btrace_target_open);
3298
3299 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3300 xcalloc, xfree);
3301
3302 record_btrace_conf.bts.size = 64 * 1024;
3303 record_btrace_conf.pt.size = 16 * 1024;
3304 }
This page took 0.15887 seconds and 4 git commands to generate.