Convert struct target_ops to C++
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "event-loop.h"
40 #include "inf-loop.h"
41 #include "vec.h"
42 #include <algorithm>
43
44 /* The target_ops of record-btrace. */
45
46 class record_btrace_target final : public target_ops
47 {
48 public:
49 record_btrace_target ()
50 { to_stratum = record_stratum; }
51
52 const char *shortname () override
53 { return "record-btrace"; }
54
55 const char *longname () override
56 { return _("Branch tracing target"); }
57
58 const char *doc () override
59 { return _("Collect control-flow trace and provide the execution history."); }
60
61 void open (const char *, int) override;
62 void close () override;
63 void async (int) override;
64
65 void detach (inferior *inf, int from_tty) override
66 { record_detach (this, inf, from_tty); }
67
68 void disconnect (const char *, int) override;
69
70 void mourn_inferior () override
71 { record_mourn_inferior (this); }
72
73 void kill () override
74 { record_kill (this); }
75
76 enum record_method record_method (ptid_t ptid) override;
77
78 void stop_recording () override;
79 void info_record () override;
80
81 void insn_history (int size, gdb_disassembly_flags flags) override;
82 void insn_history_from (ULONGEST from, int size,
83 gdb_disassembly_flags flags) override;
84 void insn_history_range (ULONGEST begin, ULONGEST end,
85 gdb_disassembly_flags flags) override;
86 void call_history (int size, record_print_flags flags) override;
87 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
88 override;
89 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
90 override;
91
92 int record_is_replaying (ptid_t ptid) override;
93 int record_will_replay (ptid_t ptid, int dir) override;
94 void record_stop_replaying () override;
95
96 enum target_xfer_status xfer_partial (enum target_object object,
97 const char *annex,
98 gdb_byte *readbuf,
99 const gdb_byte *writebuf,
100 ULONGEST offset, ULONGEST len,
101 ULONGEST *xfered_len) override;
102
103 int insert_breakpoint (struct gdbarch *,
104 struct bp_target_info *) override;
105 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
106 enum remove_bp_reason) override;
107
108 void fetch_registers (struct regcache *, int) override;
109
110 void store_registers (struct regcache *, int) override;
111 void prepare_to_store (struct regcache *) override;
112
113 const struct frame_unwind *get_unwinder () override;
114
115 const struct frame_unwind *get_tailcall_unwinder () override;
116
117 void commit_resume () override;
118 void resume (ptid_t, int, enum gdb_signal) override;
119 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
120
121 void stop (ptid_t) override;
122 void update_thread_list () override;
123 int thread_alive (ptid_t ptid) override;
124 void goto_record_begin () override;
125 void goto_record_end () override;
126 void goto_record (ULONGEST insn) override;
127
128 int can_execute_reverse () override;
129
130 int stopped_by_sw_breakpoint () override;
131 int supports_stopped_by_sw_breakpoint () override;
132
133 int stopped_by_hw_breakpoint () override;
134 int supports_stopped_by_hw_breakpoint () override;
135
136 enum exec_direction_kind execution_direction () override;
137 void prepare_to_generate_core () override;
138 void done_generating_core () override;
139 };
140
141 static record_btrace_target record_btrace_ops;
142
143 /* Initialize the record-btrace target ops. */
144
145 /* Token associated with a new-thread observer enabling branch tracing
146 for the new thread. */
147 static const gdb::observers::token record_btrace_thread_observer_token;
148
149 /* Memory access types used in set/show record btrace replay-memory-access. */
150 static const char replay_memory_access_read_only[] = "read-only";
151 static const char replay_memory_access_read_write[] = "read-write";
152 static const char *const replay_memory_access_types[] =
153 {
154 replay_memory_access_read_only,
155 replay_memory_access_read_write,
156 NULL
157 };
158
159 /* The currently allowed replay memory access type. */
160 static const char *replay_memory_access = replay_memory_access_read_only;
161
162 /* The cpu state kinds. */
163 enum record_btrace_cpu_state_kind
164 {
165 CS_AUTO,
166 CS_NONE,
167 CS_CPU
168 };
169
170 /* The current cpu state. */
171 static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
172
173 /* The current cpu for trace decode. */
174 static struct btrace_cpu record_btrace_cpu;
175
176 /* Command lists for "set/show record btrace". */
177 static struct cmd_list_element *set_record_btrace_cmdlist;
178 static struct cmd_list_element *show_record_btrace_cmdlist;
179
180 /* The execution direction of the last resume we got. See record-full.c. */
181 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
182
183 /* The async event handler for reverse/replay execution. */
184 static struct async_event_handler *record_btrace_async_inferior_event_handler;
185
186 /* A flag indicating that we are currently generating a core file. */
187 static int record_btrace_generating_corefile;
188
189 /* The current branch trace configuration. */
190 static struct btrace_config record_btrace_conf;
191
192 /* Command list for "record btrace". */
193 static struct cmd_list_element *record_btrace_cmdlist;
194
195 /* Command lists for "set/show record btrace bts". */
196 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
197 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
198
199 /* Command lists for "set/show record btrace pt". */
200 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
201 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
202
203 /* Command list for "set record btrace cpu". */
204 static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
205
206 /* Print a record-btrace debug message. Use do ... while (0) to avoid
207 ambiguities when used in if statements. */
208
209 #define DEBUG(msg, args...) \
210 do \
211 { \
212 if (record_debug != 0) \
213 fprintf_unfiltered (gdb_stdlog, \
214 "[record-btrace] " msg "\n", ##args); \
215 } \
216 while (0)
217
218
219 /* Return the cpu configured by the user. Returns NULL if the cpu was
220 configured as auto. */
221 const struct btrace_cpu *
222 record_btrace_get_cpu (void)
223 {
224 switch (record_btrace_cpu_state)
225 {
226 case CS_AUTO:
227 return nullptr;
228
229 case CS_NONE:
230 record_btrace_cpu.vendor = CV_UNKNOWN;
231 /* Fall through. */
232 case CS_CPU:
233 return &record_btrace_cpu;
234 }
235
236 error (_("Internal error: bad record btrace cpu state."));
237 }
238
239 /* Update the branch trace for the current thread and return a pointer to its
240 thread_info.
241
242 Throws an error if there is no thread or no trace. This function never
243 returns NULL. */
244
245 static struct thread_info *
246 require_btrace_thread (void)
247 {
248 struct thread_info *tp;
249
250 DEBUG ("require");
251
252 tp = find_thread_ptid (inferior_ptid);
253 if (tp == NULL)
254 error (_("No thread."));
255
256 validate_registers_access ();
257
258 btrace_fetch (tp, record_btrace_get_cpu ());
259
260 if (btrace_is_empty (tp))
261 error (_("No trace."));
262
263 return tp;
264 }
265
266 /* Update the branch trace for the current thread and return a pointer to its
267 branch trace information struct.
268
269 Throws an error if there is no thread or no trace. This function never
270 returns NULL. */
271
272 static struct btrace_thread_info *
273 require_btrace (void)
274 {
275 struct thread_info *tp;
276
277 tp = require_btrace_thread ();
278
279 return &tp->btrace;
280 }
281
282 /* Enable branch tracing for one thread. Warn on errors. */
283
284 static void
285 record_btrace_enable_warn (struct thread_info *tp)
286 {
287 TRY
288 {
289 btrace_enable (tp, &record_btrace_conf);
290 }
291 CATCH (error, RETURN_MASK_ERROR)
292 {
293 warning ("%s", error.message);
294 }
295 END_CATCH
296 }
297
298 /* Enable automatic tracing of new threads. */
299
300 static void
301 record_btrace_auto_enable (void)
302 {
303 DEBUG ("attach thread observer");
304
305 gdb::observers::new_thread.attach (record_btrace_enable_warn,
306 record_btrace_thread_observer_token);
307 }
308
309 /* Disable automatic tracing of new threads. */
310
311 static void
312 record_btrace_auto_disable (void)
313 {
314 DEBUG ("detach thread observer");
315
316 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
317 }
318
319 /* The record-btrace async event handler function. */
320
321 static void
322 record_btrace_handle_async_inferior_event (gdb_client_data data)
323 {
324 inferior_event_handler (INF_REG_EVENT, NULL);
325 }
326
327 /* See record-btrace.h. */
328
329 void
330 record_btrace_push_target (void)
331 {
332 const char *format;
333
334 record_btrace_auto_enable ();
335
336 push_target (&record_btrace_ops);
337
338 record_btrace_async_inferior_event_handler
339 = create_async_event_handler (record_btrace_handle_async_inferior_event,
340 NULL);
341 record_btrace_generating_corefile = 0;
342
343 format = btrace_format_short_string (record_btrace_conf.format);
344 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
345 }
346
347 /* Disable btrace on a set of threads on scope exit. */
348
349 struct scoped_btrace_disable
350 {
351 scoped_btrace_disable () = default;
352
353 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
354
355 ~scoped_btrace_disable ()
356 {
357 for (thread_info *tp : m_threads)
358 btrace_disable (tp);
359 }
360
361 void add_thread (thread_info *thread)
362 {
363 m_threads.push_front (thread);
364 }
365
366 void discard ()
367 {
368 m_threads.clear ();
369 }
370
371 private:
372 std::forward_list<thread_info *> m_threads;
373 };
374
375 /* The open method of target record-btrace. */
376
377 void
378 record_btrace_target::open (const char *args, int from_tty)
379 {
380 /* If we fail to enable btrace for one thread, disable it for the threads for
381 which it was successfully enabled. */
382 scoped_btrace_disable btrace_disable;
383 struct thread_info *tp;
384
385 DEBUG ("open");
386
387 record_preopen ();
388
389 if (!target_has_execution)
390 error (_("The program is not being run."));
391
392 ALL_NON_EXITED_THREADS (tp)
393 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
394 {
395 btrace_enable (tp, &record_btrace_conf);
396
397 btrace_disable.add_thread (tp);
398 }
399
400 record_btrace_push_target ();
401
402 btrace_disable.discard ();
403 }
404
405 /* The stop_recording method of target record-btrace. */
406
407 void
408 record_btrace_target::stop_recording ()
409 {
410 struct thread_info *tp;
411
412 DEBUG ("stop recording");
413
414 record_btrace_auto_disable ();
415
416 ALL_NON_EXITED_THREADS (tp)
417 if (tp->btrace.target != NULL)
418 btrace_disable (tp);
419 }
420
421 /* The disconnect method of target record-btrace. */
422
423 void
424 record_btrace_target::disconnect (const char *args,
425 int from_tty)
426 {
427 struct target_ops *beneath = this->beneath;
428
429 /* Do not stop recording, just clean up GDB side. */
430 unpush_target (this);
431
432 /* Forward disconnect. */
433 beneath->disconnect (args, from_tty);
434 }
435
436 /* The close method of target record-btrace. */
437
438 void
439 record_btrace_target::close ()
440 {
441 struct thread_info *tp;
442
443 if (record_btrace_async_inferior_event_handler != NULL)
444 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
445
446 /* Make sure automatic recording gets disabled even if we did not stop
447 recording before closing the record-btrace target. */
448 record_btrace_auto_disable ();
449
450 /* We should have already stopped recording.
451 Tear down btrace in case we have not. */
452 ALL_NON_EXITED_THREADS (tp)
453 btrace_teardown (tp);
454 }
455
456 /* The async method of target record-btrace. */
457
458 void
459 record_btrace_target::async (int enable)
460 {
461 if (enable)
462 mark_async_event_handler (record_btrace_async_inferior_event_handler);
463 else
464 clear_async_event_handler (record_btrace_async_inferior_event_handler);
465
466 this->beneath->async (enable);
467 }
468
469 /* Adjusts the size and returns a human readable size suffix. */
470
471 static const char *
472 record_btrace_adjust_size (unsigned int *size)
473 {
474 unsigned int sz;
475
476 sz = *size;
477
478 if ((sz & ((1u << 30) - 1)) == 0)
479 {
480 *size = sz >> 30;
481 return "GB";
482 }
483 else if ((sz & ((1u << 20) - 1)) == 0)
484 {
485 *size = sz >> 20;
486 return "MB";
487 }
488 else if ((sz & ((1u << 10) - 1)) == 0)
489 {
490 *size = sz >> 10;
491 return "kB";
492 }
493 else
494 return "";
495 }
496
497 /* Print a BTS configuration. */
498
499 static void
500 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
501 {
502 const char *suffix;
503 unsigned int size;
504
505 size = conf->size;
506 if (size > 0)
507 {
508 suffix = record_btrace_adjust_size (&size);
509 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
510 }
511 }
512
513 /* Print an Intel Processor Trace configuration. */
514
515 static void
516 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
517 {
518 const char *suffix;
519 unsigned int size;
520
521 size = conf->size;
522 if (size > 0)
523 {
524 suffix = record_btrace_adjust_size (&size);
525 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
526 }
527 }
528
529 /* Print a branch tracing configuration. */
530
531 static void
532 record_btrace_print_conf (const struct btrace_config *conf)
533 {
534 printf_unfiltered (_("Recording format: %s.\n"),
535 btrace_format_string (conf->format));
536
537 switch (conf->format)
538 {
539 case BTRACE_FORMAT_NONE:
540 return;
541
542 case BTRACE_FORMAT_BTS:
543 record_btrace_print_bts_conf (&conf->bts);
544 return;
545
546 case BTRACE_FORMAT_PT:
547 record_btrace_print_pt_conf (&conf->pt);
548 return;
549 }
550
551 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
552 }
553
554 /* The info_record method of target record-btrace. */
555
556 void
557 record_btrace_target::info_record ()
558 {
559 struct btrace_thread_info *btinfo;
560 const struct btrace_config *conf;
561 struct thread_info *tp;
562 unsigned int insns, calls, gaps;
563
564 DEBUG ("info");
565
566 tp = find_thread_ptid (inferior_ptid);
567 if (tp == NULL)
568 error (_("No thread."));
569
570 validate_registers_access ();
571
572 btinfo = &tp->btrace;
573
574 conf = ::btrace_conf (btinfo);
575 if (conf != NULL)
576 record_btrace_print_conf (conf);
577
578 btrace_fetch (tp, record_btrace_get_cpu ());
579
580 insns = 0;
581 calls = 0;
582 gaps = 0;
583
584 if (!btrace_is_empty (tp))
585 {
586 struct btrace_call_iterator call;
587 struct btrace_insn_iterator insn;
588
589 btrace_call_end (&call, btinfo);
590 btrace_call_prev (&call, 1);
591 calls = btrace_call_number (&call);
592
593 btrace_insn_end (&insn, btinfo);
594 insns = btrace_insn_number (&insn);
595
596 /* If the last instruction is not a gap, it is the current instruction
597 that is not actually part of the record. */
598 if (btrace_insn_get (&insn) != NULL)
599 insns -= 1;
600
601 gaps = btinfo->ngaps;
602 }
603
604 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
605 "for thread %s (%s).\n"), insns, calls, gaps,
606 print_thread_id (tp), target_pid_to_str (tp->ptid));
607
608 if (btrace_is_replaying (tp))
609 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
610 btrace_insn_number (btinfo->replay));
611 }
612
613 /* Print a decode error. */
614
615 static void
616 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
617 enum btrace_format format)
618 {
619 const char *errstr = btrace_decode_error (format, errcode);
620
621 uiout->text (_("["));
622 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
623 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
624 {
625 uiout->text (_("decode error ("));
626 uiout->field_int ("errcode", errcode);
627 uiout->text (_("): "));
628 }
629 uiout->text (errstr);
630 uiout->text (_("]\n"));
631 }
632
633 /* Print an unsigned int. */
634
635 static void
636 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
637 {
638 uiout->field_fmt (fld, "%u", val);
639 }
640
641 /* A range of source lines. */
642
643 struct btrace_line_range
644 {
645 /* The symtab this line is from. */
646 struct symtab *symtab;
647
648 /* The first line (inclusive). */
649 int begin;
650
651 /* The last line (exclusive). */
652 int end;
653 };
654
655 /* Construct a line range. */
656
657 static struct btrace_line_range
658 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
659 {
660 struct btrace_line_range range;
661
662 range.symtab = symtab;
663 range.begin = begin;
664 range.end = end;
665
666 return range;
667 }
668
669 /* Add a line to a line range. */
670
671 static struct btrace_line_range
672 btrace_line_range_add (struct btrace_line_range range, int line)
673 {
674 if (range.end <= range.begin)
675 {
676 /* This is the first entry. */
677 range.begin = line;
678 range.end = line + 1;
679 }
680 else if (line < range.begin)
681 range.begin = line;
682 else if (range.end < line)
683 range.end = line;
684
685 return range;
686 }
687
688 /* Return non-zero if RANGE is empty, zero otherwise. */
689
690 static int
691 btrace_line_range_is_empty (struct btrace_line_range range)
692 {
693 return range.end <= range.begin;
694 }
695
696 /* Return non-zero if LHS contains RHS, zero otherwise. */
697
698 static int
699 btrace_line_range_contains_range (struct btrace_line_range lhs,
700 struct btrace_line_range rhs)
701 {
702 return ((lhs.symtab == rhs.symtab)
703 && (lhs.begin <= rhs.begin)
704 && (rhs.end <= lhs.end));
705 }
706
707 /* Find the line range associated with PC. */
708
709 static struct btrace_line_range
710 btrace_find_line_range (CORE_ADDR pc)
711 {
712 struct btrace_line_range range;
713 struct linetable_entry *lines;
714 struct linetable *ltable;
715 struct symtab *symtab;
716 int nlines, i;
717
718 symtab = find_pc_line_symtab (pc);
719 if (symtab == NULL)
720 return btrace_mk_line_range (NULL, 0, 0);
721
722 ltable = SYMTAB_LINETABLE (symtab);
723 if (ltable == NULL)
724 return btrace_mk_line_range (symtab, 0, 0);
725
726 nlines = ltable->nitems;
727 lines = ltable->item;
728 if (nlines <= 0)
729 return btrace_mk_line_range (symtab, 0, 0);
730
731 range = btrace_mk_line_range (symtab, 0, 0);
732 for (i = 0; i < nlines - 1; i++)
733 {
734 if ((lines[i].pc == pc) && (lines[i].line != 0))
735 range = btrace_line_range_add (range, lines[i].line);
736 }
737
738 return range;
739 }
740
741 /* Print source lines in LINES to UIOUT.
742
743 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
744 instructions corresponding to that source line. When printing a new source
745 line, we do the cleanups for the open chain and open a new cleanup chain for
746 the new source line. If the source line range in LINES is not empty, this
747 function will leave the cleanup chain for the last printed source line open
748 so instructions can be added to it. */
749
750 static void
751 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
752 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
753 gdb::optional<ui_out_emit_list> *asm_list,
754 gdb_disassembly_flags flags)
755 {
756 print_source_lines_flags psl_flags;
757
758 if (flags & DISASSEMBLY_FILENAME)
759 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
760
761 for (int line = lines.begin; line < lines.end; ++line)
762 {
763 asm_list->reset ();
764
765 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
766
767 print_source_lines (lines.symtab, line, line + 1, psl_flags);
768
769 asm_list->emplace (uiout, "line_asm_insn");
770 }
771 }
772
773 /* Disassemble a section of the recorded instruction trace. */
774
775 static void
776 btrace_insn_history (struct ui_out *uiout,
777 const struct btrace_thread_info *btinfo,
778 const struct btrace_insn_iterator *begin,
779 const struct btrace_insn_iterator *end,
780 gdb_disassembly_flags flags)
781 {
782 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
783 btrace_insn_number (begin), btrace_insn_number (end));
784
785 flags |= DISASSEMBLY_SPECULATIVE;
786
787 struct gdbarch *gdbarch = target_gdbarch ();
788 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
789
790 ui_out_emit_list list_emitter (uiout, "asm_insns");
791
792 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
793 gdb::optional<ui_out_emit_list> asm_list;
794
795 gdb_pretty_print_disassembler disasm (gdbarch);
796
797 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
798 btrace_insn_next (&it, 1))
799 {
800 const struct btrace_insn *insn;
801
802 insn = btrace_insn_get (&it);
803
804 /* A NULL instruction indicates a gap in the trace. */
805 if (insn == NULL)
806 {
807 const struct btrace_config *conf;
808
809 conf = btrace_conf (btinfo);
810
811 /* We have trace so we must have a configuration. */
812 gdb_assert (conf != NULL);
813
814 uiout->field_fmt ("insn-number", "%u",
815 btrace_insn_number (&it));
816 uiout->text ("\t");
817
818 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
819 conf->format);
820 }
821 else
822 {
823 struct disasm_insn dinsn;
824
825 if ((flags & DISASSEMBLY_SOURCE) != 0)
826 {
827 struct btrace_line_range lines;
828
829 lines = btrace_find_line_range (insn->pc);
830 if (!btrace_line_range_is_empty (lines)
831 && !btrace_line_range_contains_range (last_lines, lines))
832 {
833 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
834 flags);
835 last_lines = lines;
836 }
837 else if (!src_and_asm_tuple.has_value ())
838 {
839 gdb_assert (!asm_list.has_value ());
840
841 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
842
843 /* No source information. */
844 asm_list.emplace (uiout, "line_asm_insn");
845 }
846
847 gdb_assert (src_and_asm_tuple.has_value ());
848 gdb_assert (asm_list.has_value ());
849 }
850
851 memset (&dinsn, 0, sizeof (dinsn));
852 dinsn.number = btrace_insn_number (&it);
853 dinsn.addr = insn->pc;
854
855 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
856 dinsn.is_speculative = 1;
857
858 disasm.pretty_print_insn (uiout, &dinsn, flags);
859 }
860 }
861 }
862
863 /* The insn_history method of target record-btrace. */
864
865 void
866 record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
867 {
868 struct btrace_thread_info *btinfo;
869 struct btrace_insn_history *history;
870 struct btrace_insn_iterator begin, end;
871 struct ui_out *uiout;
872 unsigned int context, covered;
873
874 uiout = current_uiout;
875 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
876 context = abs (size);
877 if (context == 0)
878 error (_("Bad record instruction-history-size."));
879
880 btinfo = require_btrace ();
881 history = btinfo->insn_history;
882 if (history == NULL)
883 {
884 struct btrace_insn_iterator *replay;
885
886 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
887
888 /* If we're replaying, we start at the replay position. Otherwise, we
889 start at the tail of the trace. */
890 replay = btinfo->replay;
891 if (replay != NULL)
892 begin = *replay;
893 else
894 btrace_insn_end (&begin, btinfo);
895
896 /* We start from here and expand in the requested direction. Then we
897 expand in the other direction, as well, to fill up any remaining
898 context. */
899 end = begin;
900 if (size < 0)
901 {
902 /* We want the current position covered, as well. */
903 covered = btrace_insn_next (&end, 1);
904 covered += btrace_insn_prev (&begin, context - covered);
905 covered += btrace_insn_next (&end, context - covered);
906 }
907 else
908 {
909 covered = btrace_insn_next (&end, context);
910 covered += btrace_insn_prev (&begin, context - covered);
911 }
912 }
913 else
914 {
915 begin = history->begin;
916 end = history->end;
917
918 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
919 btrace_insn_number (&begin), btrace_insn_number (&end));
920
921 if (size < 0)
922 {
923 end = begin;
924 covered = btrace_insn_prev (&begin, context);
925 }
926 else
927 {
928 begin = end;
929 covered = btrace_insn_next (&end, context);
930 }
931 }
932
933 if (covered > 0)
934 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
935 else
936 {
937 if (size < 0)
938 printf_unfiltered (_("At the start of the branch trace record.\n"));
939 else
940 printf_unfiltered (_("At the end of the branch trace record.\n"));
941 }
942
943 btrace_set_insn_history (btinfo, &begin, &end);
944 }
945
946 /* The insn_history_range method of target record-btrace. */
947
948 void
949 record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
950 gdb_disassembly_flags flags)
951 {
952 struct btrace_thread_info *btinfo;
953 struct btrace_insn_iterator begin, end;
954 struct ui_out *uiout;
955 unsigned int low, high;
956 int found;
957
958 uiout = current_uiout;
959 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
960 low = from;
961 high = to;
962
963 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
964
965 /* Check for wrap-arounds. */
966 if (low != from || high != to)
967 error (_("Bad range."));
968
969 if (high < low)
970 error (_("Bad range."));
971
972 btinfo = require_btrace ();
973
974 found = btrace_find_insn_by_number (&begin, btinfo, low);
975 if (found == 0)
976 error (_("Range out of bounds."));
977
978 found = btrace_find_insn_by_number (&end, btinfo, high);
979 if (found == 0)
980 {
981 /* Silently truncate the range. */
982 btrace_insn_end (&end, btinfo);
983 }
984 else
985 {
986 /* We want both begin and end to be inclusive. */
987 btrace_insn_next (&end, 1);
988 }
989
990 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
991 btrace_set_insn_history (btinfo, &begin, &end);
992 }
993
994 /* The insn_history_from method of target record-btrace. */
995
996 void
997 record_btrace_target::insn_history_from (ULONGEST from, int size,
998 gdb_disassembly_flags flags)
999 {
1000 ULONGEST begin, end, context;
1001
1002 context = abs (size);
1003 if (context == 0)
1004 error (_("Bad record instruction-history-size."));
1005
1006 if (size < 0)
1007 {
1008 end = from;
1009
1010 if (from < context)
1011 begin = 0;
1012 else
1013 begin = from - context + 1;
1014 }
1015 else
1016 {
1017 begin = from;
1018 end = from + context - 1;
1019
1020 /* Check for wrap-around. */
1021 if (end < begin)
1022 end = ULONGEST_MAX;
1023 }
1024
1025 insn_history_range (begin, end, flags);
1026 }
1027
1028 /* Print the instruction number range for a function call history line. */
1029
1030 static void
1031 btrace_call_history_insn_range (struct ui_out *uiout,
1032 const struct btrace_function *bfun)
1033 {
1034 unsigned int begin, end, size;
1035
1036 size = bfun->insn.size ();
1037 gdb_assert (size > 0);
1038
1039 begin = bfun->insn_offset;
1040 end = begin + size - 1;
1041
1042 ui_out_field_uint (uiout, "insn begin", begin);
1043 uiout->text (",");
1044 ui_out_field_uint (uiout, "insn end", end);
1045 }
1046
1047 /* Compute the lowest and highest source line for the instructions in BFUN
1048 and return them in PBEGIN and PEND.
1049 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1050 result from inlining or macro expansion. */
1051
1052 static void
1053 btrace_compute_src_line_range (const struct btrace_function *bfun,
1054 int *pbegin, int *pend)
1055 {
1056 struct symtab *symtab;
1057 struct symbol *sym;
1058 int begin, end;
1059
1060 begin = INT_MAX;
1061 end = INT_MIN;
1062
1063 sym = bfun->sym;
1064 if (sym == NULL)
1065 goto out;
1066
1067 symtab = symbol_symtab (sym);
1068
1069 for (const btrace_insn &insn : bfun->insn)
1070 {
1071 struct symtab_and_line sal;
1072
1073 sal = find_pc_line (insn.pc, 0);
1074 if (sal.symtab != symtab || sal.line == 0)
1075 continue;
1076
1077 begin = std::min (begin, sal.line);
1078 end = std::max (end, sal.line);
1079 }
1080
1081 out:
1082 *pbegin = begin;
1083 *pend = end;
1084 }
1085
1086 /* Print the source line information for a function call history line. */
1087
1088 static void
1089 btrace_call_history_src_line (struct ui_out *uiout,
1090 const struct btrace_function *bfun)
1091 {
1092 struct symbol *sym;
1093 int begin, end;
1094
1095 sym = bfun->sym;
1096 if (sym == NULL)
1097 return;
1098
1099 uiout->field_string ("file",
1100 symtab_to_filename_for_display (symbol_symtab (sym)));
1101
1102 btrace_compute_src_line_range (bfun, &begin, &end);
1103 if (end < begin)
1104 return;
1105
1106 uiout->text (":");
1107 uiout->field_int ("min line", begin);
1108
1109 if (end == begin)
1110 return;
1111
1112 uiout->text (",");
1113 uiout->field_int ("max line", end);
1114 }
1115
1116 /* Get the name of a branch trace function. */
1117
1118 static const char *
1119 btrace_get_bfun_name (const struct btrace_function *bfun)
1120 {
1121 struct minimal_symbol *msym;
1122 struct symbol *sym;
1123
1124 if (bfun == NULL)
1125 return "??";
1126
1127 msym = bfun->msym;
1128 sym = bfun->sym;
1129
1130 if (sym != NULL)
1131 return SYMBOL_PRINT_NAME (sym);
1132 else if (msym != NULL)
1133 return MSYMBOL_PRINT_NAME (msym);
1134 else
1135 return "??";
1136 }
1137
1138 /* Disassemble a section of the recorded function trace. */
1139
1140 static void
1141 btrace_call_history (struct ui_out *uiout,
1142 const struct btrace_thread_info *btinfo,
1143 const struct btrace_call_iterator *begin,
1144 const struct btrace_call_iterator *end,
1145 int int_flags)
1146 {
1147 struct btrace_call_iterator it;
1148 record_print_flags flags = (enum record_print_flag) int_flags;
1149
1150 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1151 btrace_call_number (end));
1152
1153 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1154 {
1155 const struct btrace_function *bfun;
1156 struct minimal_symbol *msym;
1157 struct symbol *sym;
1158
1159 bfun = btrace_call_get (&it);
1160 sym = bfun->sym;
1161 msym = bfun->msym;
1162
1163 /* Print the function index. */
1164 ui_out_field_uint (uiout, "index", bfun->number);
1165 uiout->text ("\t");
1166
1167 /* Indicate gaps in the trace. */
1168 if (bfun->errcode != 0)
1169 {
1170 const struct btrace_config *conf;
1171
1172 conf = btrace_conf (btinfo);
1173
1174 /* We have trace so we must have a configuration. */
1175 gdb_assert (conf != NULL);
1176
1177 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1178
1179 continue;
1180 }
1181
1182 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1183 {
1184 int level = bfun->level + btinfo->level, i;
1185
1186 for (i = 0; i < level; ++i)
1187 uiout->text (" ");
1188 }
1189
1190 if (sym != NULL)
1191 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
1192 else if (msym != NULL)
1193 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1194 else if (!uiout->is_mi_like_p ())
1195 uiout->field_string ("function", "??");
1196
1197 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1198 {
1199 uiout->text (_("\tinst "));
1200 btrace_call_history_insn_range (uiout, bfun);
1201 }
1202
1203 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1204 {
1205 uiout->text (_("\tat "));
1206 btrace_call_history_src_line (uiout, bfun);
1207 }
1208
1209 uiout->text ("\n");
1210 }
1211 }
1212
1213 /* The call_history method of target record-btrace. */
1214
1215 void
1216 record_btrace_target::call_history (int size, record_print_flags flags)
1217 {
1218 struct btrace_thread_info *btinfo;
1219 struct btrace_call_history *history;
1220 struct btrace_call_iterator begin, end;
1221 struct ui_out *uiout;
1222 unsigned int context, covered;
1223
1224 uiout = current_uiout;
1225 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1226 context = abs (size);
1227 if (context == 0)
1228 error (_("Bad record function-call-history-size."));
1229
1230 btinfo = require_btrace ();
1231 history = btinfo->call_history;
1232 if (history == NULL)
1233 {
1234 struct btrace_insn_iterator *replay;
1235
1236 DEBUG ("call-history (0x%x): %d", (int) flags, size);
1237
1238 /* If we're replaying, we start at the replay position. Otherwise, we
1239 start at the tail of the trace. */
1240 replay = btinfo->replay;
1241 if (replay != NULL)
1242 {
1243 begin.btinfo = btinfo;
1244 begin.index = replay->call_index;
1245 }
1246 else
1247 btrace_call_end (&begin, btinfo);
1248
1249 /* We start from here and expand in the requested direction. Then we
1250 expand in the other direction, as well, to fill up any remaining
1251 context. */
1252 end = begin;
1253 if (size < 0)
1254 {
1255 /* We want the current position covered, as well. */
1256 covered = btrace_call_next (&end, 1);
1257 covered += btrace_call_prev (&begin, context - covered);
1258 covered += btrace_call_next (&end, context - covered);
1259 }
1260 else
1261 {
1262 covered = btrace_call_next (&end, context);
1263 covered += btrace_call_prev (&begin, context- covered);
1264 }
1265 }
1266 else
1267 {
1268 begin = history->begin;
1269 end = history->end;
1270
1271 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1272 btrace_call_number (&begin), btrace_call_number (&end));
1273
1274 if (size < 0)
1275 {
1276 end = begin;
1277 covered = btrace_call_prev (&begin, context);
1278 }
1279 else
1280 {
1281 begin = end;
1282 covered = btrace_call_next (&end, context);
1283 }
1284 }
1285
1286 if (covered > 0)
1287 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1288 else
1289 {
1290 if (size < 0)
1291 printf_unfiltered (_("At the start of the branch trace record.\n"));
1292 else
1293 printf_unfiltered (_("At the end of the branch trace record.\n"));
1294 }
1295
1296 btrace_set_call_history (btinfo, &begin, &end);
1297 }
1298
1299 /* The call_history_range method of target record-btrace. */
1300
1301 void
1302 record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1303 record_print_flags flags)
1304 {
1305 struct btrace_thread_info *btinfo;
1306 struct btrace_call_iterator begin, end;
1307 struct ui_out *uiout;
1308 unsigned int low, high;
1309 int found;
1310
1311 uiout = current_uiout;
1312 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1313 low = from;
1314 high = to;
1315
1316 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1317
1318 /* Check for wrap-arounds. */
1319 if (low != from || high != to)
1320 error (_("Bad range."));
1321
1322 if (high < low)
1323 error (_("Bad range."));
1324
1325 btinfo = require_btrace ();
1326
1327 found = btrace_find_call_by_number (&begin, btinfo, low);
1328 if (found == 0)
1329 error (_("Range out of bounds."));
1330
1331 found = btrace_find_call_by_number (&end, btinfo, high);
1332 if (found == 0)
1333 {
1334 /* Silently truncate the range. */
1335 btrace_call_end (&end, btinfo);
1336 }
1337 else
1338 {
1339 /* We want both begin and end to be inclusive. */
1340 btrace_call_next (&end, 1);
1341 }
1342
1343 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1344 btrace_set_call_history (btinfo, &begin, &end);
1345 }
1346
1347 /* The call_history_from method of target record-btrace. */
1348
1349 void
1350 record_btrace_target::call_history_from (ULONGEST from, int size,
1351 record_print_flags flags)
1352 {
1353 ULONGEST begin, end, context;
1354
1355 context = abs (size);
1356 if (context == 0)
1357 error (_("Bad record function-call-history-size."));
1358
1359 if (size < 0)
1360 {
1361 end = from;
1362
1363 if (from < context)
1364 begin = 0;
1365 else
1366 begin = from - context + 1;
1367 }
1368 else
1369 {
1370 begin = from;
1371 end = from + context - 1;
1372
1373 /* Check for wrap-around. */
1374 if (end < begin)
1375 end = ULONGEST_MAX;
1376 }
1377
1378 call_history_range ( begin, end, flags);
1379 }
1380
1381 /* The record_method method of target record-btrace. */
1382
1383 enum record_method
1384 record_btrace_target::record_method (ptid_t ptid)
1385 {
1386 struct thread_info * const tp = find_thread_ptid (ptid);
1387
1388 if (tp == NULL)
1389 error (_("No thread."));
1390
1391 if (tp->btrace.target == NULL)
1392 return RECORD_METHOD_NONE;
1393
1394 return RECORD_METHOD_BTRACE;
1395 }
1396
1397 /* The record_is_replaying method of target record-btrace. */
1398
1399 int
1400 record_btrace_target::record_is_replaying (ptid_t ptid)
1401 {
1402 struct thread_info *tp;
1403
1404 ALL_NON_EXITED_THREADS (tp)
1405 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1406 return 1;
1407
1408 return 0;
1409 }
1410
1411 /* The record_will_replay method of target record-btrace. */
1412
1413 int
1414 record_btrace_target::record_will_replay (ptid_t ptid, int dir)
1415 {
1416 return dir == EXEC_REVERSE || record_is_replaying (ptid);
1417 }
1418
1419 /* The xfer_partial method of target record-btrace. */
1420
1421 enum target_xfer_status
1422 record_btrace_target::xfer_partial (enum target_object object,
1423 const char *annex, gdb_byte *readbuf,
1424 const gdb_byte *writebuf, ULONGEST offset,
1425 ULONGEST len, ULONGEST *xfered_len)
1426 {
1427 /* Filter out requests that don't make sense during replay. */
1428 if (replay_memory_access == replay_memory_access_read_only
1429 && !record_btrace_generating_corefile
1430 && record_is_replaying (inferior_ptid))
1431 {
1432 switch (object)
1433 {
1434 case TARGET_OBJECT_MEMORY:
1435 {
1436 struct target_section *section;
1437
1438 /* We do not allow writing memory in general. */
1439 if (writebuf != NULL)
1440 {
1441 *xfered_len = len;
1442 return TARGET_XFER_UNAVAILABLE;
1443 }
1444
1445 /* We allow reading readonly memory. */
1446 section = target_section_by_addr (this, offset);
1447 if (section != NULL)
1448 {
1449 /* Check if the section we found is readonly. */
1450 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1451 section->the_bfd_section)
1452 & SEC_READONLY) != 0)
1453 {
1454 /* Truncate the request to fit into this section. */
1455 len = std::min (len, section->endaddr - offset);
1456 break;
1457 }
1458 }
1459
1460 *xfered_len = len;
1461 return TARGET_XFER_UNAVAILABLE;
1462 }
1463 }
1464 }
1465
1466 /* Forward the request. */
1467 return this->beneath->xfer_partial (object, annex, readbuf, writebuf,
1468 offset, len, xfered_len);
1469 }
1470
1471 /* The insert_breakpoint method of target record-btrace. */
1472
1473 int
1474 record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1475 struct bp_target_info *bp_tgt)
1476 {
1477 const char *old;
1478 int ret;
1479
1480 /* Inserting breakpoints requires accessing memory. Allow it for the
1481 duration of this function. */
1482 old = replay_memory_access;
1483 replay_memory_access = replay_memory_access_read_write;
1484
1485 ret = 0;
1486 TRY
1487 {
1488 ret = this->beneath->insert_breakpoint (gdbarch, bp_tgt);
1489 }
1490 CATCH (except, RETURN_MASK_ALL)
1491 {
1492 replay_memory_access = old;
1493 throw_exception (except);
1494 }
1495 END_CATCH
1496 replay_memory_access = old;
1497
1498 return ret;
1499 }
1500
1501 /* The remove_breakpoint method of target record-btrace. */
1502
1503 int
1504 record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1505 struct bp_target_info *bp_tgt,
1506 enum remove_bp_reason reason)
1507 {
1508 const char *old;
1509 int ret;
1510
1511 /* Removing breakpoints requires accessing memory. Allow it for the
1512 duration of this function. */
1513 old = replay_memory_access;
1514 replay_memory_access = replay_memory_access_read_write;
1515
1516 ret = 0;
1517 TRY
1518 {
1519 ret = this->beneath->remove_breakpoint (gdbarch, bp_tgt, reason);
1520 }
1521 CATCH (except, RETURN_MASK_ALL)
1522 {
1523 replay_memory_access = old;
1524 throw_exception (except);
1525 }
1526 END_CATCH
1527 replay_memory_access = old;
1528
1529 return ret;
1530 }
1531
1532 /* The fetch_registers method of target record-btrace. */
1533
1534 void
1535 record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1536 {
1537 struct btrace_insn_iterator *replay;
1538 struct thread_info *tp;
1539
1540 tp = find_thread_ptid (regcache_get_ptid (regcache));
1541 gdb_assert (tp != NULL);
1542
1543 replay = tp->btrace.replay;
1544 if (replay != NULL && !record_btrace_generating_corefile)
1545 {
1546 const struct btrace_insn *insn;
1547 struct gdbarch *gdbarch;
1548 int pcreg;
1549
1550 gdbarch = regcache->arch ();
1551 pcreg = gdbarch_pc_regnum (gdbarch);
1552 if (pcreg < 0)
1553 return;
1554
1555 /* We can only provide the PC register. */
1556 if (regno >= 0 && regno != pcreg)
1557 return;
1558
1559 insn = btrace_insn_get (replay);
1560 gdb_assert (insn != NULL);
1561
1562 regcache_raw_supply (regcache, regno, &insn->pc);
1563 }
1564 else
1565 this->beneath->fetch_registers (regcache, regno);
1566 }
1567
1568 /* The store_registers method of target record-btrace. */
1569
1570 void
1571 record_btrace_target::store_registers (struct regcache *regcache, int regno)
1572 {
1573 struct target_ops *t;
1574
1575 if (!record_btrace_generating_corefile
1576 && record_is_replaying (regcache_get_ptid (regcache)))
1577 error (_("Cannot write registers while replaying."));
1578
1579 gdb_assert (may_write_registers != 0);
1580
1581 this->beneath->store_registers (regcache, regno);
1582 }
1583
1584 /* The prepare_to_store method of target record-btrace. */
1585
1586 void
1587 record_btrace_target::prepare_to_store (struct regcache *regcache)
1588 {
1589 if (!record_btrace_generating_corefile
1590 && record_is_replaying (regcache_get_ptid (regcache)))
1591 return;
1592
1593 this->beneath->prepare_to_store (regcache);
1594 }
1595
1596 /* The branch trace frame cache. */
1597
1598 struct btrace_frame_cache
1599 {
1600 /* The thread. */
1601 struct thread_info *tp;
1602
1603 /* The frame info. */
1604 struct frame_info *frame;
1605
1606 /* The branch trace function segment. */
1607 const struct btrace_function *bfun;
1608 };
1609
1610 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1611
1612 static htab_t bfcache;
1613
1614 /* hash_f for htab_create_alloc of bfcache. */
1615
1616 static hashval_t
1617 bfcache_hash (const void *arg)
1618 {
1619 const struct btrace_frame_cache *cache
1620 = (const struct btrace_frame_cache *) arg;
1621
1622 return htab_hash_pointer (cache->frame);
1623 }
1624
1625 /* eq_f for htab_create_alloc of bfcache. */
1626
1627 static int
1628 bfcache_eq (const void *arg1, const void *arg2)
1629 {
1630 const struct btrace_frame_cache *cache1
1631 = (const struct btrace_frame_cache *) arg1;
1632 const struct btrace_frame_cache *cache2
1633 = (const struct btrace_frame_cache *) arg2;
1634
1635 return cache1->frame == cache2->frame;
1636 }
1637
1638 /* Create a new btrace frame cache. */
1639
1640 static struct btrace_frame_cache *
1641 bfcache_new (struct frame_info *frame)
1642 {
1643 struct btrace_frame_cache *cache;
1644 void **slot;
1645
1646 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1647 cache->frame = frame;
1648
1649 slot = htab_find_slot (bfcache, cache, INSERT);
1650 gdb_assert (*slot == NULL);
1651 *slot = cache;
1652
1653 return cache;
1654 }
1655
1656 /* Extract the branch trace function from a branch trace frame. */
1657
1658 static const struct btrace_function *
1659 btrace_get_frame_function (struct frame_info *frame)
1660 {
1661 const struct btrace_frame_cache *cache;
1662 struct btrace_frame_cache pattern;
1663 void **slot;
1664
1665 pattern.frame = frame;
1666
1667 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1668 if (slot == NULL)
1669 return NULL;
1670
1671 cache = (const struct btrace_frame_cache *) *slot;
1672 return cache->bfun;
1673 }
1674
1675 /* Implement stop_reason method for record_btrace_frame_unwind. */
1676
1677 static enum unwind_stop_reason
1678 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1679 void **this_cache)
1680 {
1681 const struct btrace_frame_cache *cache;
1682 const struct btrace_function *bfun;
1683
1684 cache = (const struct btrace_frame_cache *) *this_cache;
1685 bfun = cache->bfun;
1686 gdb_assert (bfun != NULL);
1687
1688 if (bfun->up == 0)
1689 return UNWIND_UNAVAILABLE;
1690
1691 return UNWIND_NO_REASON;
1692 }
1693
1694 /* Implement this_id method for record_btrace_frame_unwind. */
1695
1696 static void
1697 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1698 struct frame_id *this_id)
1699 {
1700 const struct btrace_frame_cache *cache;
1701 const struct btrace_function *bfun;
1702 struct btrace_call_iterator it;
1703 CORE_ADDR code, special;
1704
1705 cache = (const struct btrace_frame_cache *) *this_cache;
1706
1707 bfun = cache->bfun;
1708 gdb_assert (bfun != NULL);
1709
1710 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1711 bfun = btrace_call_get (&it);
1712
1713 code = get_frame_func (this_frame);
1714 special = bfun->number;
1715
1716 *this_id = frame_id_build_unavailable_stack_special (code, special);
1717
1718 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1719 btrace_get_bfun_name (cache->bfun),
1720 core_addr_to_string_nz (this_id->code_addr),
1721 core_addr_to_string_nz (this_id->special_addr));
1722 }
1723
1724 /* Implement prev_register method for record_btrace_frame_unwind. */
1725
1726 static struct value *
1727 record_btrace_frame_prev_register (struct frame_info *this_frame,
1728 void **this_cache,
1729 int regnum)
1730 {
1731 const struct btrace_frame_cache *cache;
1732 const struct btrace_function *bfun, *caller;
1733 struct btrace_call_iterator it;
1734 struct gdbarch *gdbarch;
1735 CORE_ADDR pc;
1736 int pcreg;
1737
1738 gdbarch = get_frame_arch (this_frame);
1739 pcreg = gdbarch_pc_regnum (gdbarch);
1740 if (pcreg < 0 || regnum != pcreg)
1741 throw_error (NOT_AVAILABLE_ERROR,
1742 _("Registers are not available in btrace record history"));
1743
1744 cache = (const struct btrace_frame_cache *) *this_cache;
1745 bfun = cache->bfun;
1746 gdb_assert (bfun != NULL);
1747
1748 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1749 throw_error (NOT_AVAILABLE_ERROR,
1750 _("No caller in btrace record history"));
1751
1752 caller = btrace_call_get (&it);
1753
1754 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1755 pc = caller->insn.front ().pc;
1756 else
1757 {
1758 pc = caller->insn.back ().pc;
1759 pc += gdb_insn_length (gdbarch, pc);
1760 }
1761
1762 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1763 btrace_get_bfun_name (bfun), bfun->level,
1764 core_addr_to_string_nz (pc));
1765
1766 return frame_unwind_got_address (this_frame, regnum, pc);
1767 }
1768
1769 /* Implement sniffer method for record_btrace_frame_unwind. */
1770
1771 static int
1772 record_btrace_frame_sniffer (const struct frame_unwind *self,
1773 struct frame_info *this_frame,
1774 void **this_cache)
1775 {
1776 const struct btrace_function *bfun;
1777 struct btrace_frame_cache *cache;
1778 struct thread_info *tp;
1779 struct frame_info *next;
1780
1781 /* THIS_FRAME does not contain a reference to its thread. */
1782 tp = find_thread_ptid (inferior_ptid);
1783 gdb_assert (tp != NULL);
1784
1785 bfun = NULL;
1786 next = get_next_frame (this_frame);
1787 if (next == NULL)
1788 {
1789 const struct btrace_insn_iterator *replay;
1790
1791 replay = tp->btrace.replay;
1792 if (replay != NULL)
1793 bfun = &replay->btinfo->functions[replay->call_index];
1794 }
1795 else
1796 {
1797 const struct btrace_function *callee;
1798 struct btrace_call_iterator it;
1799
1800 callee = btrace_get_frame_function (next);
1801 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1802 return 0;
1803
1804 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1805 return 0;
1806
1807 bfun = btrace_call_get (&it);
1808 }
1809
1810 if (bfun == NULL)
1811 return 0;
1812
1813 DEBUG ("[frame] sniffed frame for %s on level %d",
1814 btrace_get_bfun_name (bfun), bfun->level);
1815
1816 /* This is our frame. Initialize the frame cache. */
1817 cache = bfcache_new (this_frame);
1818 cache->tp = tp;
1819 cache->bfun = bfun;
1820
1821 *this_cache = cache;
1822 return 1;
1823 }
1824
1825 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1826
1827 static int
1828 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1829 struct frame_info *this_frame,
1830 void **this_cache)
1831 {
1832 const struct btrace_function *bfun, *callee;
1833 struct btrace_frame_cache *cache;
1834 struct btrace_call_iterator it;
1835 struct frame_info *next;
1836 struct thread_info *tinfo;
1837
1838 next = get_next_frame (this_frame);
1839 if (next == NULL)
1840 return 0;
1841
1842 callee = btrace_get_frame_function (next);
1843 if (callee == NULL)
1844 return 0;
1845
1846 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1847 return 0;
1848
1849 tinfo = find_thread_ptid (inferior_ptid);
1850 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1851 return 0;
1852
1853 bfun = btrace_call_get (&it);
1854
1855 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1856 btrace_get_bfun_name (bfun), bfun->level);
1857
1858 /* This is our frame. Initialize the frame cache. */
1859 cache = bfcache_new (this_frame);
1860 cache->tp = tinfo;
1861 cache->bfun = bfun;
1862
1863 *this_cache = cache;
1864 return 1;
1865 }
1866
1867 static void
1868 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1869 {
1870 struct btrace_frame_cache *cache;
1871 void **slot;
1872
1873 cache = (struct btrace_frame_cache *) this_cache;
1874
1875 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1876 gdb_assert (slot != NULL);
1877
1878 htab_remove_elt (bfcache, cache);
1879 }
1880
1881 /* btrace recording does not store previous memory content, neither the stack
1882 frames content. Any unwinding would return errorneous results as the stack
1883 contents no longer matches the changed PC value restored from history.
1884 Therefore this unwinder reports any possibly unwound registers as
1885 <unavailable>. */
1886
1887 const struct frame_unwind record_btrace_frame_unwind =
1888 {
1889 NORMAL_FRAME,
1890 record_btrace_frame_unwind_stop_reason,
1891 record_btrace_frame_this_id,
1892 record_btrace_frame_prev_register,
1893 NULL,
1894 record_btrace_frame_sniffer,
1895 record_btrace_frame_dealloc_cache
1896 };
1897
1898 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1899 {
1900 TAILCALL_FRAME,
1901 record_btrace_frame_unwind_stop_reason,
1902 record_btrace_frame_this_id,
1903 record_btrace_frame_prev_register,
1904 NULL,
1905 record_btrace_tailcall_frame_sniffer,
1906 record_btrace_frame_dealloc_cache
1907 };
1908
1909 /* Implement the get_unwinder method. */
1910
1911 const struct frame_unwind *
1912 record_btrace_target::get_unwinder ()
1913 {
1914 return &record_btrace_frame_unwind;
1915 }
1916
1917 /* Implement the get_tailcall_unwinder method. */
1918
1919 const struct frame_unwind *
1920 record_btrace_target::get_tailcall_unwinder ()
1921 {
1922 return &record_btrace_tailcall_frame_unwind;
1923 }
1924
1925 /* Return a human-readable string for FLAG. */
1926
1927 static const char *
1928 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1929 {
1930 switch (flag)
1931 {
1932 case BTHR_STEP:
1933 return "step";
1934
1935 case BTHR_RSTEP:
1936 return "reverse-step";
1937
1938 case BTHR_CONT:
1939 return "cont";
1940
1941 case BTHR_RCONT:
1942 return "reverse-cont";
1943
1944 case BTHR_STOP:
1945 return "stop";
1946 }
1947
1948 return "<invalid>";
1949 }
1950
1951 /* Indicate that TP should be resumed according to FLAG. */
1952
1953 static void
1954 record_btrace_resume_thread (struct thread_info *tp,
1955 enum btrace_thread_flag flag)
1956 {
1957 struct btrace_thread_info *btinfo;
1958
1959 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1960 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1961
1962 btinfo = &tp->btrace;
1963
1964 /* Fetch the latest branch trace. */
1965 btrace_fetch (tp, record_btrace_get_cpu ());
1966
1967 /* A resume request overwrites a preceding resume or stop request. */
1968 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1969 btinfo->flags |= flag;
1970 }
1971
1972 /* Get the current frame for TP. */
1973
1974 static struct frame_info *
1975 get_thread_current_frame (struct thread_info *tp)
1976 {
1977 struct frame_info *frame;
1978 ptid_t old_inferior_ptid;
1979 int executing;
1980
1981 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1982 old_inferior_ptid = inferior_ptid;
1983 inferior_ptid = tp->ptid;
1984
1985 /* Clear the executing flag to allow changes to the current frame.
1986 We are not actually running, yet. We just started a reverse execution
1987 command or a record goto command.
1988 For the latter, EXECUTING is false and this has no effect.
1989 For the former, EXECUTING is true and we're in wait, about to
1990 move the thread. Since we need to recompute the stack, we temporarily
1991 set EXECUTING to flase. */
1992 executing = is_executing (inferior_ptid);
1993 set_executing (inferior_ptid, 0);
1994
1995 frame = NULL;
1996 TRY
1997 {
1998 frame = get_current_frame ();
1999 }
2000 CATCH (except, RETURN_MASK_ALL)
2001 {
2002 /* Restore the previous execution state. */
2003 set_executing (inferior_ptid, executing);
2004
2005 /* Restore the previous inferior_ptid. */
2006 inferior_ptid = old_inferior_ptid;
2007
2008 throw_exception (except);
2009 }
2010 END_CATCH
2011
2012 /* Restore the previous execution state. */
2013 set_executing (inferior_ptid, executing);
2014
2015 /* Restore the previous inferior_ptid. */
2016 inferior_ptid = old_inferior_ptid;
2017
2018 return frame;
2019 }
2020
2021 /* Start replaying a thread. */
2022
2023 static struct btrace_insn_iterator *
2024 record_btrace_start_replaying (struct thread_info *tp)
2025 {
2026 struct btrace_insn_iterator *replay;
2027 struct btrace_thread_info *btinfo;
2028
2029 btinfo = &tp->btrace;
2030 replay = NULL;
2031
2032 /* We can't start replaying without trace. */
2033 if (btinfo->functions.empty ())
2034 return NULL;
2035
2036 /* GDB stores the current frame_id when stepping in order to detects steps
2037 into subroutines.
2038 Since frames are computed differently when we're replaying, we need to
2039 recompute those stored frames and fix them up so we can still detect
2040 subroutines after we started replaying. */
2041 TRY
2042 {
2043 struct frame_info *frame;
2044 struct frame_id frame_id;
2045 int upd_step_frame_id, upd_step_stack_frame_id;
2046
2047 /* The current frame without replaying - computed via normal unwind. */
2048 frame = get_thread_current_frame (tp);
2049 frame_id = get_frame_id (frame);
2050
2051 /* Check if we need to update any stepping-related frame id's. */
2052 upd_step_frame_id = frame_id_eq (frame_id,
2053 tp->control.step_frame_id);
2054 upd_step_stack_frame_id = frame_id_eq (frame_id,
2055 tp->control.step_stack_frame_id);
2056
2057 /* We start replaying at the end of the branch trace. This corresponds
2058 to the current instruction. */
2059 replay = XNEW (struct btrace_insn_iterator);
2060 btrace_insn_end (replay, btinfo);
2061
2062 /* Skip gaps at the end of the trace. */
2063 while (btrace_insn_get (replay) == NULL)
2064 {
2065 unsigned int steps;
2066
2067 steps = btrace_insn_prev (replay, 1);
2068 if (steps == 0)
2069 error (_("No trace."));
2070 }
2071
2072 /* We're not replaying, yet. */
2073 gdb_assert (btinfo->replay == NULL);
2074 btinfo->replay = replay;
2075
2076 /* Make sure we're not using any stale registers. */
2077 registers_changed_ptid (tp->ptid);
2078
2079 /* The current frame with replaying - computed via btrace unwind. */
2080 frame = get_thread_current_frame (tp);
2081 frame_id = get_frame_id (frame);
2082
2083 /* Replace stepping related frames where necessary. */
2084 if (upd_step_frame_id)
2085 tp->control.step_frame_id = frame_id;
2086 if (upd_step_stack_frame_id)
2087 tp->control.step_stack_frame_id = frame_id;
2088 }
2089 CATCH (except, RETURN_MASK_ALL)
2090 {
2091 xfree (btinfo->replay);
2092 btinfo->replay = NULL;
2093
2094 registers_changed_ptid (tp->ptid);
2095
2096 throw_exception (except);
2097 }
2098 END_CATCH
2099
2100 return replay;
2101 }
2102
2103 /* Stop replaying a thread. */
2104
2105 static void
2106 record_btrace_stop_replaying (struct thread_info *tp)
2107 {
2108 struct btrace_thread_info *btinfo;
2109
2110 btinfo = &tp->btrace;
2111
2112 xfree (btinfo->replay);
2113 btinfo->replay = NULL;
2114
2115 /* Make sure we're not leaving any stale registers. */
2116 registers_changed_ptid (tp->ptid);
2117 }
2118
2119 /* Stop replaying TP if it is at the end of its execution history. */
2120
2121 static void
2122 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2123 {
2124 struct btrace_insn_iterator *replay, end;
2125 struct btrace_thread_info *btinfo;
2126
2127 btinfo = &tp->btrace;
2128 replay = btinfo->replay;
2129
2130 if (replay == NULL)
2131 return;
2132
2133 btrace_insn_end (&end, btinfo);
2134
2135 if (btrace_insn_cmp (replay, &end) == 0)
2136 record_btrace_stop_replaying (tp);
2137 }
2138
2139 /* The resume method of target record-btrace. */
2140
2141 void
2142 record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
2143 {
2144 struct thread_info *tp;
2145 enum btrace_thread_flag flag, cflag;
2146
2147 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2148 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
2149 step ? "step" : "cont");
2150
2151 /* Store the execution direction of the last resume.
2152
2153 If there is more than one resume call, we have to rely on infrun
2154 to not change the execution direction in-between. */
2155 record_btrace_resume_exec_dir = ::execution_direction;
2156
2157 /* As long as we're not replaying, just forward the request.
2158
2159 For non-stop targets this means that no thread is replaying. In order to
2160 make progress, we may need to explicitly move replaying threads to the end
2161 of their execution history. */
2162 if ((::execution_direction != EXEC_REVERSE)
2163 && !record_is_replaying (minus_one_ptid))
2164 {
2165 this->beneath->resume (ptid, step, signal);
2166 return;
2167 }
2168
2169 /* Compute the btrace thread flag for the requested move. */
2170 if (::execution_direction == EXEC_REVERSE)
2171 {
2172 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2173 cflag = BTHR_RCONT;
2174 }
2175 else
2176 {
2177 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2178 cflag = BTHR_CONT;
2179 }
2180
2181 /* We just indicate the resume intent here. The actual stepping happens in
2182 record_btrace_wait below.
2183
2184 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2185 if (!target_is_non_stop_p ())
2186 {
2187 gdb_assert (ptid_match (inferior_ptid, ptid));
2188
2189 ALL_NON_EXITED_THREADS (tp)
2190 if (ptid_match (tp->ptid, ptid))
2191 {
2192 if (ptid_match (tp->ptid, inferior_ptid))
2193 record_btrace_resume_thread (tp, flag);
2194 else
2195 record_btrace_resume_thread (tp, cflag);
2196 }
2197 }
2198 else
2199 {
2200 ALL_NON_EXITED_THREADS (tp)
2201 if (ptid_match (tp->ptid, ptid))
2202 record_btrace_resume_thread (tp, flag);
2203 }
2204
2205 /* Async support. */
2206 if (target_can_async_p ())
2207 {
2208 target_async (1);
2209 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2210 }
2211 }
2212
2213 /* The commit_resume method of target record-btrace. */
2214
2215 void
2216 record_btrace_target::commit_resume ()
2217 {
2218 if ((::execution_direction != EXEC_REVERSE)
2219 && !record_is_replaying (minus_one_ptid))
2220 beneath->commit_resume ();
2221 }
2222
2223 /* Cancel resuming TP. */
2224
2225 static void
2226 record_btrace_cancel_resume (struct thread_info *tp)
2227 {
2228 enum btrace_thread_flag flags;
2229
2230 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2231 if (flags == 0)
2232 return;
2233
2234 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2235 print_thread_id (tp),
2236 target_pid_to_str (tp->ptid), flags,
2237 btrace_thread_flag_to_str (flags));
2238
2239 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2240 record_btrace_stop_replaying_at_end (tp);
2241 }
2242
2243 /* Return a target_waitstatus indicating that we ran out of history. */
2244
2245 static struct target_waitstatus
2246 btrace_step_no_history (void)
2247 {
2248 struct target_waitstatus status;
2249
2250 status.kind = TARGET_WAITKIND_NO_HISTORY;
2251
2252 return status;
2253 }
2254
2255 /* Return a target_waitstatus indicating that a step finished. */
2256
2257 static struct target_waitstatus
2258 btrace_step_stopped (void)
2259 {
2260 struct target_waitstatus status;
2261
2262 status.kind = TARGET_WAITKIND_STOPPED;
2263 status.value.sig = GDB_SIGNAL_TRAP;
2264
2265 return status;
2266 }
2267
2268 /* Return a target_waitstatus indicating that a thread was stopped as
2269 requested. */
2270
2271 static struct target_waitstatus
2272 btrace_step_stopped_on_request (void)
2273 {
2274 struct target_waitstatus status;
2275
2276 status.kind = TARGET_WAITKIND_STOPPED;
2277 status.value.sig = GDB_SIGNAL_0;
2278
2279 return status;
2280 }
2281
2282 /* Return a target_waitstatus indicating a spurious stop. */
2283
2284 static struct target_waitstatus
2285 btrace_step_spurious (void)
2286 {
2287 struct target_waitstatus status;
2288
2289 status.kind = TARGET_WAITKIND_SPURIOUS;
2290
2291 return status;
2292 }
2293
2294 /* Return a target_waitstatus indicating that the thread was not resumed. */
2295
2296 static struct target_waitstatus
2297 btrace_step_no_resumed (void)
2298 {
2299 struct target_waitstatus status;
2300
2301 status.kind = TARGET_WAITKIND_NO_RESUMED;
2302
2303 return status;
2304 }
2305
2306 /* Return a target_waitstatus indicating that we should wait again. */
2307
2308 static struct target_waitstatus
2309 btrace_step_again (void)
2310 {
2311 struct target_waitstatus status;
2312
2313 status.kind = TARGET_WAITKIND_IGNORE;
2314
2315 return status;
2316 }
2317
2318 /* Clear the record histories. */
2319
2320 static void
2321 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2322 {
2323 xfree (btinfo->insn_history);
2324 xfree (btinfo->call_history);
2325
2326 btinfo->insn_history = NULL;
2327 btinfo->call_history = NULL;
2328 }
2329
2330 /* Check whether TP's current replay position is at a breakpoint. */
2331
2332 static int
2333 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2334 {
2335 struct btrace_insn_iterator *replay;
2336 struct btrace_thread_info *btinfo;
2337 const struct btrace_insn *insn;
2338 struct inferior *inf;
2339
2340 btinfo = &tp->btrace;
2341 replay = btinfo->replay;
2342
2343 if (replay == NULL)
2344 return 0;
2345
2346 insn = btrace_insn_get (replay);
2347 if (insn == NULL)
2348 return 0;
2349
2350 inf = find_inferior_ptid (tp->ptid);
2351 if (inf == NULL)
2352 return 0;
2353
2354 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2355 &btinfo->stop_reason);
2356 }
2357
2358 /* Step one instruction in forward direction. */
2359
2360 static struct target_waitstatus
2361 record_btrace_single_step_forward (struct thread_info *tp)
2362 {
2363 struct btrace_insn_iterator *replay, end, start;
2364 struct btrace_thread_info *btinfo;
2365
2366 btinfo = &tp->btrace;
2367 replay = btinfo->replay;
2368
2369 /* We're done if we're not replaying. */
2370 if (replay == NULL)
2371 return btrace_step_no_history ();
2372
2373 /* Check if we're stepping a breakpoint. */
2374 if (record_btrace_replay_at_breakpoint (tp))
2375 return btrace_step_stopped ();
2376
2377 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2378 jump back to the instruction at which we started. */
2379 start = *replay;
2380 do
2381 {
2382 unsigned int steps;
2383
2384 /* We will bail out here if we continue stepping after reaching the end
2385 of the execution history. */
2386 steps = btrace_insn_next (replay, 1);
2387 if (steps == 0)
2388 {
2389 *replay = start;
2390 return btrace_step_no_history ();
2391 }
2392 }
2393 while (btrace_insn_get (replay) == NULL);
2394
2395 /* Determine the end of the instruction trace. */
2396 btrace_insn_end (&end, btinfo);
2397
2398 /* The execution trace contains (and ends with) the current instruction.
2399 This instruction has not been executed, yet, so the trace really ends
2400 one instruction earlier. */
2401 if (btrace_insn_cmp (replay, &end) == 0)
2402 return btrace_step_no_history ();
2403
2404 return btrace_step_spurious ();
2405 }
2406
2407 /* Step one instruction in backward direction. */
2408
2409 static struct target_waitstatus
2410 record_btrace_single_step_backward (struct thread_info *tp)
2411 {
2412 struct btrace_insn_iterator *replay, start;
2413 struct btrace_thread_info *btinfo;
2414
2415 btinfo = &tp->btrace;
2416 replay = btinfo->replay;
2417
2418 /* Start replaying if we're not already doing so. */
2419 if (replay == NULL)
2420 replay = record_btrace_start_replaying (tp);
2421
2422 /* If we can't step any further, we reached the end of the history.
2423 Skip gaps during replay. If we end up at a gap (at the beginning of
2424 the trace), jump back to the instruction at which we started. */
2425 start = *replay;
2426 do
2427 {
2428 unsigned int steps;
2429
2430 steps = btrace_insn_prev (replay, 1);
2431 if (steps == 0)
2432 {
2433 *replay = start;
2434 return btrace_step_no_history ();
2435 }
2436 }
2437 while (btrace_insn_get (replay) == NULL);
2438
2439 /* Check if we're stepping a breakpoint.
2440
2441 For reverse-stepping, this check is after the step. There is logic in
2442 infrun.c that handles reverse-stepping separately. See, for example,
2443 proceed and adjust_pc_after_break.
2444
2445 This code assumes that for reverse-stepping, PC points to the last
2446 de-executed instruction, whereas for forward-stepping PC points to the
2447 next to-be-executed instruction. */
2448 if (record_btrace_replay_at_breakpoint (tp))
2449 return btrace_step_stopped ();
2450
2451 return btrace_step_spurious ();
2452 }
2453
2454 /* Step a single thread. */
2455
2456 static struct target_waitstatus
2457 record_btrace_step_thread (struct thread_info *tp)
2458 {
2459 struct btrace_thread_info *btinfo;
2460 struct target_waitstatus status;
2461 enum btrace_thread_flag flags;
2462
2463 btinfo = &tp->btrace;
2464
2465 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2466 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2467
2468 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2469 target_pid_to_str (tp->ptid), flags,
2470 btrace_thread_flag_to_str (flags));
2471
2472 /* We can't step without an execution history. */
2473 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2474 return btrace_step_no_history ();
2475
2476 switch (flags)
2477 {
2478 default:
2479 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2480
2481 case BTHR_STOP:
2482 return btrace_step_stopped_on_request ();
2483
2484 case BTHR_STEP:
2485 status = record_btrace_single_step_forward (tp);
2486 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2487 break;
2488
2489 return btrace_step_stopped ();
2490
2491 case BTHR_RSTEP:
2492 status = record_btrace_single_step_backward (tp);
2493 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2494 break;
2495
2496 return btrace_step_stopped ();
2497
2498 case BTHR_CONT:
2499 status = record_btrace_single_step_forward (tp);
2500 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2501 break;
2502
2503 btinfo->flags |= flags;
2504 return btrace_step_again ();
2505
2506 case BTHR_RCONT:
2507 status = record_btrace_single_step_backward (tp);
2508 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2509 break;
2510
2511 btinfo->flags |= flags;
2512 return btrace_step_again ();
2513 }
2514
2515 /* We keep threads moving at the end of their execution history. The wait
2516 method will stop the thread for whom the event is reported. */
2517 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2518 btinfo->flags |= flags;
2519
2520 return status;
2521 }
2522
2523 /* A vector of threads. */
2524
2525 typedef struct thread_info * tp_t;
2526 DEF_VEC_P (tp_t);
2527
2528 /* Announce further events if necessary. */
2529
2530 static void
2531 record_btrace_maybe_mark_async_event
2532 (const std::vector<thread_info *> &moving,
2533 const std::vector<thread_info *> &no_history)
2534 {
2535 bool more_moving = !moving.empty ();
2536 bool more_no_history = !no_history.empty ();;
2537
2538 if (!more_moving && !more_no_history)
2539 return;
2540
2541 if (more_moving)
2542 DEBUG ("movers pending");
2543
2544 if (more_no_history)
2545 DEBUG ("no-history pending");
2546
2547 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2548 }
2549
2550 /* The wait method of target record-btrace. */
2551
2552 ptid_t
2553 record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2554 int options)
2555 {
2556 std::vector<thread_info *> moving;
2557 std::vector<thread_info *> no_history;
2558
2559 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2560
2561 /* As long as we're not replaying, just forward the request. */
2562 if ((::execution_direction != EXEC_REVERSE)
2563 && !record_is_replaying (minus_one_ptid))
2564 {
2565 return this->beneath->wait (ptid, status, options);
2566 }
2567
2568 /* Keep a work list of moving threads. */
2569 {
2570 thread_info *tp;
2571
2572 ALL_NON_EXITED_THREADS (tp)
2573 {
2574 if (ptid_match (tp->ptid, ptid)
2575 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2576 moving.push_back (tp);
2577 }
2578 }
2579
2580 if (moving.empty ())
2581 {
2582 *status = btrace_step_no_resumed ();
2583
2584 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2585 target_waitstatus_to_string (status).c_str ());
2586
2587 return null_ptid;
2588 }
2589
2590 /* Step moving threads one by one, one step each, until either one thread
2591 reports an event or we run out of threads to step.
2592
2593 When stepping more than one thread, chances are that some threads reach
2594 the end of their execution history earlier than others. If we reported
2595 this immediately, all-stop on top of non-stop would stop all threads and
2596 resume the same threads next time. And we would report the same thread
2597 having reached the end of its execution history again.
2598
2599 In the worst case, this would starve the other threads. But even if other
2600 threads would be allowed to make progress, this would result in far too
2601 many intermediate stops.
2602
2603 We therefore delay the reporting of "no execution history" until we have
2604 nothing else to report. By this time, all threads should have moved to
2605 either the beginning or the end of their execution history. There will
2606 be a single user-visible stop. */
2607 struct thread_info *eventing = NULL;
2608 while ((eventing == NULL) && !moving.empty ())
2609 {
2610 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
2611 {
2612 thread_info *tp = moving[ix];
2613
2614 *status = record_btrace_step_thread (tp);
2615
2616 switch (status->kind)
2617 {
2618 case TARGET_WAITKIND_IGNORE:
2619 ix++;
2620 break;
2621
2622 case TARGET_WAITKIND_NO_HISTORY:
2623 no_history.push_back (ordered_remove (moving, ix));
2624 break;
2625
2626 default:
2627 eventing = unordered_remove (moving, ix);
2628 break;
2629 }
2630 }
2631 }
2632
2633 if (eventing == NULL)
2634 {
2635 /* We started with at least one moving thread. This thread must have
2636 either stopped or reached the end of its execution history.
2637
2638 In the former case, EVENTING must not be NULL.
2639 In the latter case, NO_HISTORY must not be empty. */
2640 gdb_assert (!no_history.empty ());
2641
2642 /* We kept threads moving at the end of their execution history. Stop
2643 EVENTING now that we are going to report its stop. */
2644 eventing = unordered_remove (no_history, 0);
2645 eventing->btrace.flags &= ~BTHR_MOVE;
2646
2647 *status = btrace_step_no_history ();
2648 }
2649
2650 gdb_assert (eventing != NULL);
2651
2652 /* We kept threads replaying at the end of their execution history. Stop
2653 replaying EVENTING now that we are going to report its stop. */
2654 record_btrace_stop_replaying_at_end (eventing);
2655
2656 /* Stop all other threads. */
2657 if (!target_is_non_stop_p ())
2658 {
2659 thread_info *tp;
2660
2661 ALL_NON_EXITED_THREADS (tp)
2662 record_btrace_cancel_resume (tp);
2663 }
2664
2665 /* In async mode, we need to announce further events. */
2666 if (target_is_async_p ())
2667 record_btrace_maybe_mark_async_event (moving, no_history);
2668
2669 /* Start record histories anew from the current position. */
2670 record_btrace_clear_histories (&eventing->btrace);
2671
2672 /* We moved the replay position but did not update registers. */
2673 registers_changed_ptid (eventing->ptid);
2674
2675 DEBUG ("wait ended by thread %s (%s): %s",
2676 print_thread_id (eventing),
2677 target_pid_to_str (eventing->ptid),
2678 target_waitstatus_to_string (status).c_str ());
2679
2680 return eventing->ptid;
2681 }
2682
2683 /* The stop method of target record-btrace. */
2684
2685 void
2686 record_btrace_target::stop (ptid_t ptid)
2687 {
2688 DEBUG ("stop %s", target_pid_to_str (ptid));
2689
2690 /* As long as we're not replaying, just forward the request. */
2691 if ((::execution_direction != EXEC_REVERSE)
2692 && !record_is_replaying (minus_one_ptid))
2693 {
2694 this->beneath->stop (ptid);
2695 }
2696 else
2697 {
2698 struct thread_info *tp;
2699
2700 ALL_NON_EXITED_THREADS (tp)
2701 if (ptid_match (tp->ptid, ptid))
2702 {
2703 tp->btrace.flags &= ~BTHR_MOVE;
2704 tp->btrace.flags |= BTHR_STOP;
2705 }
2706 }
2707 }
2708
2709 /* The can_execute_reverse method of target record-btrace. */
2710
2711 int
2712 record_btrace_target::can_execute_reverse ()
2713 {
2714 return 1;
2715 }
2716
2717 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2718
2719 int
2720 record_btrace_target::stopped_by_sw_breakpoint ()
2721 {
2722 if (record_is_replaying (minus_one_ptid))
2723 {
2724 struct thread_info *tp = inferior_thread ();
2725
2726 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2727 }
2728
2729 return this->beneath->stopped_by_sw_breakpoint ();
2730 }
2731
2732 /* The supports_stopped_by_sw_breakpoint method of target
2733 record-btrace. */
2734
2735 int
2736 record_btrace_target::supports_stopped_by_sw_breakpoint ()
2737 {
2738 if (record_is_replaying (minus_one_ptid))
2739 return 1;
2740
2741 return this->beneath->supports_stopped_by_sw_breakpoint ();
2742 }
2743
2744 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2745
2746 int
2747 record_btrace_target::stopped_by_hw_breakpoint ()
2748 {
2749 if (record_is_replaying (minus_one_ptid))
2750 {
2751 struct thread_info *tp = inferior_thread ();
2752
2753 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2754 }
2755
2756 return this->beneath->stopped_by_hw_breakpoint ();
2757 }
2758
2759 /* The supports_stopped_by_hw_breakpoint method of target
2760 record-btrace. */
2761
2762 int
2763 record_btrace_target::supports_stopped_by_hw_breakpoint ()
2764 {
2765 if (record_is_replaying (minus_one_ptid))
2766 return 1;
2767
2768 return this->beneath->supports_stopped_by_hw_breakpoint ();
2769 }
2770
2771 /* The update_thread_list method of target record-btrace. */
2772
2773 void
2774 record_btrace_target::update_thread_list ()
2775 {
2776 /* We don't add or remove threads during replay. */
2777 if (record_is_replaying (minus_one_ptid))
2778 return;
2779
2780 /* Forward the request. */
2781 this->beneath->update_thread_list ();
2782 }
2783
2784 /* The thread_alive method of target record-btrace. */
2785
2786 int
2787 record_btrace_target::thread_alive (ptid_t ptid)
2788 {
2789 /* We don't add or remove threads during replay. */
2790 if (record_is_replaying (minus_one_ptid))
2791 return find_thread_ptid (ptid) != NULL;
2792
2793 /* Forward the request. */
2794 return this->beneath->thread_alive (ptid);
2795 }
2796
2797 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2798 is stopped. */
2799
2800 static void
2801 record_btrace_set_replay (struct thread_info *tp,
2802 const struct btrace_insn_iterator *it)
2803 {
2804 struct btrace_thread_info *btinfo;
2805
2806 btinfo = &tp->btrace;
2807
2808 if (it == NULL)
2809 record_btrace_stop_replaying (tp);
2810 else
2811 {
2812 if (btinfo->replay == NULL)
2813 record_btrace_start_replaying (tp);
2814 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2815 return;
2816
2817 *btinfo->replay = *it;
2818 registers_changed_ptid (tp->ptid);
2819 }
2820
2821 /* Start anew from the new replay position. */
2822 record_btrace_clear_histories (btinfo);
2823
2824 stop_pc = regcache_read_pc (get_current_regcache ());
2825 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2826 }
2827
2828 /* The goto_record_begin method of target record-btrace. */
2829
2830 void
2831 record_btrace_target::goto_record_begin ()
2832 {
2833 struct thread_info *tp;
2834 struct btrace_insn_iterator begin;
2835
2836 tp = require_btrace_thread ();
2837
2838 btrace_insn_begin (&begin, &tp->btrace);
2839
2840 /* Skip gaps at the beginning of the trace. */
2841 while (btrace_insn_get (&begin) == NULL)
2842 {
2843 unsigned int steps;
2844
2845 steps = btrace_insn_next (&begin, 1);
2846 if (steps == 0)
2847 error (_("No trace."));
2848 }
2849
2850 record_btrace_set_replay (tp, &begin);
2851 }
2852
2853 /* The goto_record_end method of target record-btrace. */
2854
2855 void
2856 record_btrace_target::goto_record_end ()
2857 {
2858 struct thread_info *tp;
2859
2860 tp = require_btrace_thread ();
2861
2862 record_btrace_set_replay (tp, NULL);
2863 }
2864
2865 /* The goto_record method of target record-btrace. */
2866
2867 void
2868 record_btrace_target::goto_record (ULONGEST insn)
2869 {
2870 struct thread_info *tp;
2871 struct btrace_insn_iterator it;
2872 unsigned int number;
2873 int found;
2874
2875 number = insn;
2876
2877 /* Check for wrap-arounds. */
2878 if (number != insn)
2879 error (_("Instruction number out of range."));
2880
2881 tp = require_btrace_thread ();
2882
2883 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2884
2885 /* Check if the instruction could not be found or is a gap. */
2886 if (found == 0 || btrace_insn_get (&it) == NULL)
2887 error (_("No such instruction."));
2888
2889 record_btrace_set_replay (tp, &it);
2890 }
2891
2892 /* The record_stop_replaying method of target record-btrace. */
2893
2894 void
2895 record_btrace_target::record_stop_replaying ()
2896 {
2897 struct thread_info *tp;
2898
2899 ALL_NON_EXITED_THREADS (tp)
2900 record_btrace_stop_replaying (tp);
2901 }
2902
2903 /* The execution_direction target method. */
2904
2905 enum exec_direction_kind
2906 record_btrace_target::execution_direction ()
2907 {
2908 return record_btrace_resume_exec_dir;
2909 }
2910
2911 /* The prepare_to_generate_core target method. */
2912
2913 void
2914 record_btrace_target::prepare_to_generate_core ()
2915 {
2916 record_btrace_generating_corefile = 1;
2917 }
2918
2919 /* The done_generating_core target method. */
2920
2921 void
2922 record_btrace_target::done_generating_core ()
2923 {
2924 record_btrace_generating_corefile = 0;
2925 }
2926
2927 /* Start recording in BTS format. */
2928
2929 static void
2930 cmd_record_btrace_bts_start (const char *args, int from_tty)
2931 {
2932 if (args != NULL && *args != 0)
2933 error (_("Invalid argument."));
2934
2935 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2936
2937 TRY
2938 {
2939 execute_command ("target record-btrace", from_tty);
2940 }
2941 CATCH (exception, RETURN_MASK_ALL)
2942 {
2943 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2944 throw_exception (exception);
2945 }
2946 END_CATCH
2947 }
2948
2949 /* Start recording in Intel Processor Trace format. */
2950
2951 static void
2952 cmd_record_btrace_pt_start (const char *args, int from_tty)
2953 {
2954 if (args != NULL && *args != 0)
2955 error (_("Invalid argument."));
2956
2957 record_btrace_conf.format = BTRACE_FORMAT_PT;
2958
2959 TRY
2960 {
2961 execute_command ("target record-btrace", from_tty);
2962 }
2963 CATCH (exception, RETURN_MASK_ALL)
2964 {
2965 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2966 throw_exception (exception);
2967 }
2968 END_CATCH
2969 }
2970
2971 /* Alias for "target record". */
2972
2973 static void
2974 cmd_record_btrace_start (const char *args, int from_tty)
2975 {
2976 if (args != NULL && *args != 0)
2977 error (_("Invalid argument."));
2978
2979 record_btrace_conf.format = BTRACE_FORMAT_PT;
2980
2981 TRY
2982 {
2983 execute_command ("target record-btrace", from_tty);
2984 }
2985 CATCH (exception, RETURN_MASK_ALL)
2986 {
2987 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2988
2989 TRY
2990 {
2991 execute_command ("target record-btrace", from_tty);
2992 }
2993 CATCH (exception, RETURN_MASK_ALL)
2994 {
2995 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2996 throw_exception (exception);
2997 }
2998 END_CATCH
2999 }
3000 END_CATCH
3001 }
3002
3003 /* The "set record btrace" command. */
3004
3005 static void
3006 cmd_set_record_btrace (const char *args, int from_tty)
3007 {
3008 printf_unfiltered (_("\"set record btrace\" must be followed "
3009 "by an appropriate subcommand.\n"));
3010 help_list (set_record_btrace_cmdlist, "set record btrace ",
3011 all_commands, gdb_stdout);
3012 }
3013
3014 /* The "show record btrace" command. */
3015
3016 static void
3017 cmd_show_record_btrace (const char *args, int from_tty)
3018 {
3019 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
3020 }
3021
3022 /* The "show record btrace replay-memory-access" command. */
3023
3024 static void
3025 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
3026 struct cmd_list_element *c, const char *value)
3027 {
3028 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
3029 replay_memory_access);
3030 }
3031
3032 /* The "set record btrace cpu none" command. */
3033
3034 static void
3035 cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
3036 {
3037 if (args != nullptr && *args != 0)
3038 error (_("Trailing junk: '%s'."), args);
3039
3040 record_btrace_cpu_state = CS_NONE;
3041 }
3042
3043 /* The "set record btrace cpu auto" command. */
3044
3045 static void
3046 cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
3047 {
3048 if (args != nullptr && *args != 0)
3049 error (_("Trailing junk: '%s'."), args);
3050
3051 record_btrace_cpu_state = CS_AUTO;
3052 }
3053
3054 /* The "set record btrace cpu" command. */
3055
3056 static void
3057 cmd_set_record_btrace_cpu (const char *args, int from_tty)
3058 {
3059 if (args == nullptr)
3060 args = "";
3061
3062 /* We use a hard-coded vendor string for now. */
3063 unsigned int family, model, stepping;
3064 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3065 &model, &l1, &stepping, &l2);
3066 if (matches == 3)
3067 {
3068 if (strlen (args) != l2)
3069 error (_("Trailing junk: '%s'."), args + l2);
3070 }
3071 else if (matches == 2)
3072 {
3073 if (strlen (args) != l1)
3074 error (_("Trailing junk: '%s'."), args + l1);
3075
3076 stepping = 0;
3077 }
3078 else
3079 error (_("Bad format. See \"help set record btrace cpu\"."));
3080
3081 if (USHRT_MAX < family)
3082 error (_("Cpu family too big."));
3083
3084 if (UCHAR_MAX < model)
3085 error (_("Cpu model too big."));
3086
3087 if (UCHAR_MAX < stepping)
3088 error (_("Cpu stepping too big."));
3089
3090 record_btrace_cpu.vendor = CV_INTEL;
3091 record_btrace_cpu.family = family;
3092 record_btrace_cpu.model = model;
3093 record_btrace_cpu.stepping = stepping;
3094
3095 record_btrace_cpu_state = CS_CPU;
3096 }
3097
3098 /* The "show record btrace cpu" command. */
3099
3100 static void
3101 cmd_show_record_btrace_cpu (const char *args, int from_tty)
3102 {
3103 const char *cpu;
3104
3105 if (args != nullptr && *args != 0)
3106 error (_("Trailing junk: '%s'."), args);
3107
3108 switch (record_btrace_cpu_state)
3109 {
3110 case CS_AUTO:
3111 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3112 return;
3113
3114 case CS_NONE:
3115 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3116 return;
3117
3118 case CS_CPU:
3119 switch (record_btrace_cpu.vendor)
3120 {
3121 case CV_INTEL:
3122 if (record_btrace_cpu.stepping == 0)
3123 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3124 record_btrace_cpu.family,
3125 record_btrace_cpu.model);
3126 else
3127 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3128 record_btrace_cpu.family,
3129 record_btrace_cpu.model,
3130 record_btrace_cpu.stepping);
3131 return;
3132 }
3133 }
3134
3135 error (_("Internal error: bad cpu state."));
3136 }
3137
3138 /* The "s record btrace bts" command. */
3139
3140 static void
3141 cmd_set_record_btrace_bts (const char *args, int from_tty)
3142 {
3143 printf_unfiltered (_("\"set record btrace bts\" must be followed "
3144 "by an appropriate subcommand.\n"));
3145 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3146 all_commands, gdb_stdout);
3147 }
3148
3149 /* The "show record btrace bts" command. */
3150
3151 static void
3152 cmd_show_record_btrace_bts (const char *args, int from_tty)
3153 {
3154 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3155 }
3156
3157 /* The "set record btrace pt" command. */
3158
3159 static void
3160 cmd_set_record_btrace_pt (const char *args, int from_tty)
3161 {
3162 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3163 "by an appropriate subcommand.\n"));
3164 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3165 all_commands, gdb_stdout);
3166 }
3167
3168 /* The "show record btrace pt" command. */
3169
3170 static void
3171 cmd_show_record_btrace_pt (const char *args, int from_tty)
3172 {
3173 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3174 }
3175
3176 /* The "record bts buffer-size" show value function. */
3177
3178 static void
3179 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3180 struct cmd_list_element *c,
3181 const char *value)
3182 {
3183 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3184 value);
3185 }
3186
3187 /* The "record pt buffer-size" show value function. */
3188
3189 static void
3190 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3191 struct cmd_list_element *c,
3192 const char *value)
3193 {
3194 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3195 value);
3196 }
3197
3198 /* Initialize btrace commands. */
3199
3200 void
3201 _initialize_record_btrace (void)
3202 {
3203 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3204 _("Start branch trace recording."), &record_btrace_cmdlist,
3205 "record btrace ", 0, &record_cmdlist);
3206 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3207
3208 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3209 _("\
3210 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3211 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3212 This format may not be available on all processors."),
3213 &record_btrace_cmdlist);
3214 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3215
3216 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3217 _("\
3218 Start branch trace recording in Intel Processor Trace format.\n\n\
3219 This format may not be available on all processors."),
3220 &record_btrace_cmdlist);
3221 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3222
3223 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3224 _("Set record options"), &set_record_btrace_cmdlist,
3225 "set record btrace ", 0, &set_record_cmdlist);
3226
3227 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3228 _("Show record options"), &show_record_btrace_cmdlist,
3229 "show record btrace ", 0, &show_record_cmdlist);
3230
3231 add_setshow_enum_cmd ("replay-memory-access", no_class,
3232 replay_memory_access_types, &replay_memory_access, _("\
3233 Set what memory accesses are allowed during replay."), _("\
3234 Show what memory accesses are allowed during replay."),
3235 _("Default is READ-ONLY.\n\n\
3236 The btrace record target does not trace data.\n\
3237 The memory therefore corresponds to the live target and not \
3238 to the current replay position.\n\n\
3239 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3240 When READ-WRITE, allow accesses to read-only and read-write memory during \
3241 replay."),
3242 NULL, cmd_show_replay_memory_access,
3243 &set_record_btrace_cmdlist,
3244 &show_record_btrace_cmdlist);
3245
3246 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3247 _("\
3248 Set the cpu to be used for trace decode.\n\n\
3249 The format is \"<vendor>:<identifier>\" or \"none\" or \"auto\" (default).\n\
3250 For vendor \"intel\" the format is \"<family>/<model>[/<stepping>]\".\n\n\
3251 When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3252 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3253 When GDB does not support that cpu, this option can be used to enable\n\
3254 workarounds for a similar cpu that GDB supports.\n\n\
3255 When set to \"none\", errata workarounds are disabled."),
3256 &set_record_btrace_cpu_cmdlist,
3257 _("set record btrace cpu "), 1,
3258 &set_record_btrace_cmdlist);
3259
3260 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3261 Automatically determine the cpu to be used for trace decode."),
3262 &set_record_btrace_cpu_cmdlist);
3263
3264 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3265 Do not enable errata workarounds for trace decode."),
3266 &set_record_btrace_cpu_cmdlist);
3267
3268 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3269 Show the cpu to be used for trace decode."),
3270 &show_record_btrace_cmdlist);
3271
3272 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3273 _("Set record btrace bts options"),
3274 &set_record_btrace_bts_cmdlist,
3275 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3276
3277 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3278 _("Show record btrace bts options"),
3279 &show_record_btrace_bts_cmdlist,
3280 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3281
3282 add_setshow_uinteger_cmd ("buffer-size", no_class,
3283 &record_btrace_conf.bts.size,
3284 _("Set the record/replay bts buffer size."),
3285 _("Show the record/replay bts buffer size."), _("\
3286 When starting recording request a trace buffer of this size. \
3287 The actual buffer size may differ from the requested size. \
3288 Use \"info record\" to see the actual buffer size.\n\n\
3289 Bigger buffers allow longer recording but also take more time to process \
3290 the recorded execution trace.\n\n\
3291 The trace buffer size may not be changed while recording."), NULL,
3292 show_record_bts_buffer_size_value,
3293 &set_record_btrace_bts_cmdlist,
3294 &show_record_btrace_bts_cmdlist);
3295
3296 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3297 _("Set record btrace pt options"),
3298 &set_record_btrace_pt_cmdlist,
3299 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3300
3301 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3302 _("Show record btrace pt options"),
3303 &show_record_btrace_pt_cmdlist,
3304 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3305
3306 add_setshow_uinteger_cmd ("buffer-size", no_class,
3307 &record_btrace_conf.pt.size,
3308 _("Set the record/replay pt buffer size."),
3309 _("Show the record/replay pt buffer size."), _("\
3310 Bigger buffers allow longer recording but also take more time to process \
3311 the recorded execution.\n\
3312 The actual buffer size may differ from the requested size. Use \"info record\" \
3313 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3314 &set_record_btrace_pt_cmdlist,
3315 &show_record_btrace_pt_cmdlist);
3316
3317 add_target (&record_btrace_ops);
3318
3319 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3320 xcalloc, xfree);
3321
3322 record_btrace_conf.bts.size = 64 * 1024;
3323 record_btrace_conf.pt.size = 16 * 1024;
3324 }
This page took 0.105154 seconds and 4 git commands to generate.