gdb: Eliminate the 'stop_pc' global
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "event-loop.h"
40 #include "inf-loop.h"
41 #include "vec.h"
42 #include "inferior.h"
43 #include <algorithm>
44
45 static const target_info record_btrace_target_info = {
46 "record-btrace",
47 N_("Branch tracing target"),
48 N_("Collect control-flow trace and provide the execution history.")
49 };
50
51 /* The target_ops of record-btrace. */
52
53 class record_btrace_target final : public target_ops
54 {
55 public:
56 record_btrace_target ()
57 { to_stratum = record_stratum; }
58
59 const target_info &info () const override
60 { return record_btrace_target_info; }
61
62 void close () override;
63 void async (int) override;
64
65 void detach (inferior *inf, int from_tty) override
66 { record_detach (this, inf, from_tty); }
67
68 void disconnect (const char *, int) override;
69
70 void mourn_inferior () override
71 { record_mourn_inferior (this); }
72
73 void kill () override
74 { record_kill (this); }
75
76 enum record_method record_method (ptid_t ptid) override;
77
78 void stop_recording () override;
79 void info_record () override;
80
81 void insn_history (int size, gdb_disassembly_flags flags) override;
82 void insn_history_from (ULONGEST from, int size,
83 gdb_disassembly_flags flags) override;
84 void insn_history_range (ULONGEST begin, ULONGEST end,
85 gdb_disassembly_flags flags) override;
86 void call_history (int size, record_print_flags flags) override;
87 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
88 override;
89 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
90 override;
91
92 bool record_is_replaying (ptid_t ptid) override;
93 bool record_will_replay (ptid_t ptid, int dir) override;
94 void record_stop_replaying () override;
95
96 enum target_xfer_status xfer_partial (enum target_object object,
97 const char *annex,
98 gdb_byte *readbuf,
99 const gdb_byte *writebuf,
100 ULONGEST offset, ULONGEST len,
101 ULONGEST *xfered_len) override;
102
103 int insert_breakpoint (struct gdbarch *,
104 struct bp_target_info *) override;
105 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
106 enum remove_bp_reason) override;
107
108 void fetch_registers (struct regcache *, int) override;
109
110 void store_registers (struct regcache *, int) override;
111 void prepare_to_store (struct regcache *) override;
112
113 const struct frame_unwind *get_unwinder () override;
114
115 const struct frame_unwind *get_tailcall_unwinder () override;
116
117 void commit_resume () override;
118 void resume (ptid_t, int, enum gdb_signal) override;
119 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
120
121 void stop (ptid_t) override;
122 void update_thread_list () override;
123 bool thread_alive (ptid_t ptid) override;
124 void goto_record_begin () override;
125 void goto_record_end () override;
126 void goto_record (ULONGEST insn) override;
127
128 bool can_execute_reverse () override;
129
130 bool stopped_by_sw_breakpoint () override;
131 bool supports_stopped_by_sw_breakpoint () override;
132
133 bool stopped_by_hw_breakpoint () override;
134 bool supports_stopped_by_hw_breakpoint () override;
135
136 enum exec_direction_kind execution_direction () override;
137 void prepare_to_generate_core () override;
138 void done_generating_core () override;
139 };
140
141 static record_btrace_target record_btrace_ops;
142
143 /* Initialize the record-btrace target ops. */
144
145 /* Token associated with a new-thread observer enabling branch tracing
146 for the new thread. */
147 static const gdb::observers::token record_btrace_thread_observer_token;
148
149 /* Memory access types used in set/show record btrace replay-memory-access. */
150 static const char replay_memory_access_read_only[] = "read-only";
151 static const char replay_memory_access_read_write[] = "read-write";
152 static const char *const replay_memory_access_types[] =
153 {
154 replay_memory_access_read_only,
155 replay_memory_access_read_write,
156 NULL
157 };
158
159 /* The currently allowed replay memory access type. */
160 static const char *replay_memory_access = replay_memory_access_read_only;
161
162 /* The cpu state kinds. */
163 enum record_btrace_cpu_state_kind
164 {
165 CS_AUTO,
166 CS_NONE,
167 CS_CPU
168 };
169
170 /* The current cpu state. */
171 static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
172
173 /* The current cpu for trace decode. */
174 static struct btrace_cpu record_btrace_cpu;
175
176 /* Command lists for "set/show record btrace". */
177 static struct cmd_list_element *set_record_btrace_cmdlist;
178 static struct cmd_list_element *show_record_btrace_cmdlist;
179
180 /* The execution direction of the last resume we got. See record-full.c. */
181 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
182
183 /* The async event handler for reverse/replay execution. */
184 static struct async_event_handler *record_btrace_async_inferior_event_handler;
185
186 /* A flag indicating that we are currently generating a core file. */
187 static int record_btrace_generating_corefile;
188
189 /* The current branch trace configuration. */
190 static struct btrace_config record_btrace_conf;
191
192 /* Command list for "record btrace". */
193 static struct cmd_list_element *record_btrace_cmdlist;
194
195 /* Command lists for "set/show record btrace bts". */
196 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
197 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
198
199 /* Command lists for "set/show record btrace pt". */
200 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
201 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
202
203 /* Command list for "set record btrace cpu". */
204 static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
205
206 /* Print a record-btrace debug message. Use do ... while (0) to avoid
207 ambiguities when used in if statements. */
208
209 #define DEBUG(msg, args...) \
210 do \
211 { \
212 if (record_debug != 0) \
213 fprintf_unfiltered (gdb_stdlog, \
214 "[record-btrace] " msg "\n", ##args); \
215 } \
216 while (0)
217
218
219 /* Return the cpu configured by the user. Returns NULL if the cpu was
220 configured as auto. */
221 const struct btrace_cpu *
222 record_btrace_get_cpu (void)
223 {
224 switch (record_btrace_cpu_state)
225 {
226 case CS_AUTO:
227 return nullptr;
228
229 case CS_NONE:
230 record_btrace_cpu.vendor = CV_UNKNOWN;
231 /* Fall through. */
232 case CS_CPU:
233 return &record_btrace_cpu;
234 }
235
236 error (_("Internal error: bad record btrace cpu state."));
237 }
238
239 /* Update the branch trace for the current thread and return a pointer to its
240 thread_info.
241
242 Throws an error if there is no thread or no trace. This function never
243 returns NULL. */
244
245 static struct thread_info *
246 require_btrace_thread (void)
247 {
248 DEBUG ("require");
249
250 if (inferior_ptid == null_ptid)
251 error (_("No thread."));
252
253 thread_info *tp = inferior_thread ();
254
255 validate_registers_access ();
256
257 btrace_fetch (tp, record_btrace_get_cpu ());
258
259 if (btrace_is_empty (tp))
260 error (_("No trace."));
261
262 return tp;
263 }
264
265 /* Update the branch trace for the current thread and return a pointer to its
266 branch trace information struct.
267
268 Throws an error if there is no thread or no trace. This function never
269 returns NULL. */
270
271 static struct btrace_thread_info *
272 require_btrace (void)
273 {
274 struct thread_info *tp;
275
276 tp = require_btrace_thread ();
277
278 return &tp->btrace;
279 }
280
281 /* Enable branch tracing for one thread. Warn on errors. */
282
283 static void
284 record_btrace_enable_warn (struct thread_info *tp)
285 {
286 TRY
287 {
288 btrace_enable (tp, &record_btrace_conf);
289 }
290 CATCH (error, RETURN_MASK_ERROR)
291 {
292 warning ("%s", error.message);
293 }
294 END_CATCH
295 }
296
297 /* Enable automatic tracing of new threads. */
298
299 static void
300 record_btrace_auto_enable (void)
301 {
302 DEBUG ("attach thread observer");
303
304 gdb::observers::new_thread.attach (record_btrace_enable_warn,
305 record_btrace_thread_observer_token);
306 }
307
308 /* Disable automatic tracing of new threads. */
309
310 static void
311 record_btrace_auto_disable (void)
312 {
313 DEBUG ("detach thread observer");
314
315 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
316 }
317
318 /* The record-btrace async event handler function. */
319
320 static void
321 record_btrace_handle_async_inferior_event (gdb_client_data data)
322 {
323 inferior_event_handler (INF_REG_EVENT, NULL);
324 }
325
326 /* See record-btrace.h. */
327
328 void
329 record_btrace_push_target (void)
330 {
331 const char *format;
332
333 record_btrace_auto_enable ();
334
335 push_target (&record_btrace_ops);
336
337 record_btrace_async_inferior_event_handler
338 = create_async_event_handler (record_btrace_handle_async_inferior_event,
339 NULL);
340 record_btrace_generating_corefile = 0;
341
342 format = btrace_format_short_string (record_btrace_conf.format);
343 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
344 }
345
346 /* Disable btrace on a set of threads on scope exit. */
347
348 struct scoped_btrace_disable
349 {
350 scoped_btrace_disable () = default;
351
352 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
353
354 ~scoped_btrace_disable ()
355 {
356 for (thread_info *tp : m_threads)
357 btrace_disable (tp);
358 }
359
360 void add_thread (thread_info *thread)
361 {
362 m_threads.push_front (thread);
363 }
364
365 void discard ()
366 {
367 m_threads.clear ();
368 }
369
370 private:
371 std::forward_list<thread_info *> m_threads;
372 };
373
374 /* Open target record-btrace. */
375
376 static void
377 record_btrace_target_open (const char *args, int from_tty)
378 {
379 /* If we fail to enable btrace for one thread, disable it for the threads for
380 which it was successfully enabled. */
381 scoped_btrace_disable btrace_disable;
382 struct thread_info *tp;
383
384 DEBUG ("open");
385
386 record_preopen ();
387
388 if (!target_has_execution)
389 error (_("The program is not being run."));
390
391 ALL_NON_EXITED_THREADS (tp)
392 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
393 {
394 btrace_enable (tp, &record_btrace_conf);
395
396 btrace_disable.add_thread (tp);
397 }
398
399 record_btrace_push_target ();
400
401 btrace_disable.discard ();
402 }
403
404 /* The stop_recording method of target record-btrace. */
405
406 void
407 record_btrace_target::stop_recording ()
408 {
409 struct thread_info *tp;
410
411 DEBUG ("stop recording");
412
413 record_btrace_auto_disable ();
414
415 ALL_NON_EXITED_THREADS (tp)
416 if (tp->btrace.target != NULL)
417 btrace_disable (tp);
418 }
419
420 /* The disconnect method of target record-btrace. */
421
422 void
423 record_btrace_target::disconnect (const char *args,
424 int from_tty)
425 {
426 struct target_ops *beneath = this->beneath ();
427
428 /* Do not stop recording, just clean up GDB side. */
429 unpush_target (this);
430
431 /* Forward disconnect. */
432 beneath->disconnect (args, from_tty);
433 }
434
435 /* The close method of target record-btrace. */
436
437 void
438 record_btrace_target::close ()
439 {
440 struct thread_info *tp;
441
442 if (record_btrace_async_inferior_event_handler != NULL)
443 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
444
445 /* Make sure automatic recording gets disabled even if we did not stop
446 recording before closing the record-btrace target. */
447 record_btrace_auto_disable ();
448
449 /* We should have already stopped recording.
450 Tear down btrace in case we have not. */
451 ALL_NON_EXITED_THREADS (tp)
452 btrace_teardown (tp);
453 }
454
455 /* The async method of target record-btrace. */
456
457 void
458 record_btrace_target::async (int enable)
459 {
460 if (enable)
461 mark_async_event_handler (record_btrace_async_inferior_event_handler);
462 else
463 clear_async_event_handler (record_btrace_async_inferior_event_handler);
464
465 this->beneath ()->async (enable);
466 }
467
468 /* Adjusts the size and returns a human readable size suffix. */
469
470 static const char *
471 record_btrace_adjust_size (unsigned int *size)
472 {
473 unsigned int sz;
474
475 sz = *size;
476
477 if ((sz & ((1u << 30) - 1)) == 0)
478 {
479 *size = sz >> 30;
480 return "GB";
481 }
482 else if ((sz & ((1u << 20) - 1)) == 0)
483 {
484 *size = sz >> 20;
485 return "MB";
486 }
487 else if ((sz & ((1u << 10) - 1)) == 0)
488 {
489 *size = sz >> 10;
490 return "kB";
491 }
492 else
493 return "";
494 }
495
496 /* Print a BTS configuration. */
497
498 static void
499 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
500 {
501 const char *suffix;
502 unsigned int size;
503
504 size = conf->size;
505 if (size > 0)
506 {
507 suffix = record_btrace_adjust_size (&size);
508 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
509 }
510 }
511
512 /* Print an Intel Processor Trace configuration. */
513
514 static void
515 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
516 {
517 const char *suffix;
518 unsigned int size;
519
520 size = conf->size;
521 if (size > 0)
522 {
523 suffix = record_btrace_adjust_size (&size);
524 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
525 }
526 }
527
528 /* Print a branch tracing configuration. */
529
530 static void
531 record_btrace_print_conf (const struct btrace_config *conf)
532 {
533 printf_unfiltered (_("Recording format: %s.\n"),
534 btrace_format_string (conf->format));
535
536 switch (conf->format)
537 {
538 case BTRACE_FORMAT_NONE:
539 return;
540
541 case BTRACE_FORMAT_BTS:
542 record_btrace_print_bts_conf (&conf->bts);
543 return;
544
545 case BTRACE_FORMAT_PT:
546 record_btrace_print_pt_conf (&conf->pt);
547 return;
548 }
549
550 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
551 }
552
553 /* The info_record method of target record-btrace. */
554
555 void
556 record_btrace_target::info_record ()
557 {
558 struct btrace_thread_info *btinfo;
559 const struct btrace_config *conf;
560 struct thread_info *tp;
561 unsigned int insns, calls, gaps;
562
563 DEBUG ("info");
564
565 tp = find_thread_ptid (inferior_ptid);
566 if (tp == NULL)
567 error (_("No thread."));
568
569 validate_registers_access ();
570
571 btinfo = &tp->btrace;
572
573 conf = ::btrace_conf (btinfo);
574 if (conf != NULL)
575 record_btrace_print_conf (conf);
576
577 btrace_fetch (tp, record_btrace_get_cpu ());
578
579 insns = 0;
580 calls = 0;
581 gaps = 0;
582
583 if (!btrace_is_empty (tp))
584 {
585 struct btrace_call_iterator call;
586 struct btrace_insn_iterator insn;
587
588 btrace_call_end (&call, btinfo);
589 btrace_call_prev (&call, 1);
590 calls = btrace_call_number (&call);
591
592 btrace_insn_end (&insn, btinfo);
593 insns = btrace_insn_number (&insn);
594
595 /* If the last instruction is not a gap, it is the current instruction
596 that is not actually part of the record. */
597 if (btrace_insn_get (&insn) != NULL)
598 insns -= 1;
599
600 gaps = btinfo->ngaps;
601 }
602
603 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
604 "for thread %s (%s).\n"), insns, calls, gaps,
605 print_thread_id (tp), target_pid_to_str (tp->ptid));
606
607 if (btrace_is_replaying (tp))
608 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
609 btrace_insn_number (btinfo->replay));
610 }
611
612 /* Print a decode error. */
613
614 static void
615 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
616 enum btrace_format format)
617 {
618 const char *errstr = btrace_decode_error (format, errcode);
619
620 uiout->text (_("["));
621 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
622 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
623 {
624 uiout->text (_("decode error ("));
625 uiout->field_int ("errcode", errcode);
626 uiout->text (_("): "));
627 }
628 uiout->text (errstr);
629 uiout->text (_("]\n"));
630 }
631
632 /* Print an unsigned int. */
633
634 static void
635 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
636 {
637 uiout->field_fmt (fld, "%u", val);
638 }
639
640 /* A range of source lines. */
641
642 struct btrace_line_range
643 {
644 /* The symtab this line is from. */
645 struct symtab *symtab;
646
647 /* The first line (inclusive). */
648 int begin;
649
650 /* The last line (exclusive). */
651 int end;
652 };
653
654 /* Construct a line range. */
655
656 static struct btrace_line_range
657 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
658 {
659 struct btrace_line_range range;
660
661 range.symtab = symtab;
662 range.begin = begin;
663 range.end = end;
664
665 return range;
666 }
667
668 /* Add a line to a line range. */
669
670 static struct btrace_line_range
671 btrace_line_range_add (struct btrace_line_range range, int line)
672 {
673 if (range.end <= range.begin)
674 {
675 /* This is the first entry. */
676 range.begin = line;
677 range.end = line + 1;
678 }
679 else if (line < range.begin)
680 range.begin = line;
681 else if (range.end < line)
682 range.end = line;
683
684 return range;
685 }
686
687 /* Return non-zero if RANGE is empty, zero otherwise. */
688
689 static int
690 btrace_line_range_is_empty (struct btrace_line_range range)
691 {
692 return range.end <= range.begin;
693 }
694
695 /* Return non-zero if LHS contains RHS, zero otherwise. */
696
697 static int
698 btrace_line_range_contains_range (struct btrace_line_range lhs,
699 struct btrace_line_range rhs)
700 {
701 return ((lhs.symtab == rhs.symtab)
702 && (lhs.begin <= rhs.begin)
703 && (rhs.end <= lhs.end));
704 }
705
706 /* Find the line range associated with PC. */
707
708 static struct btrace_line_range
709 btrace_find_line_range (CORE_ADDR pc)
710 {
711 struct btrace_line_range range;
712 struct linetable_entry *lines;
713 struct linetable *ltable;
714 struct symtab *symtab;
715 int nlines, i;
716
717 symtab = find_pc_line_symtab (pc);
718 if (symtab == NULL)
719 return btrace_mk_line_range (NULL, 0, 0);
720
721 ltable = SYMTAB_LINETABLE (symtab);
722 if (ltable == NULL)
723 return btrace_mk_line_range (symtab, 0, 0);
724
725 nlines = ltable->nitems;
726 lines = ltable->item;
727 if (nlines <= 0)
728 return btrace_mk_line_range (symtab, 0, 0);
729
730 range = btrace_mk_line_range (symtab, 0, 0);
731 for (i = 0; i < nlines - 1; i++)
732 {
733 if ((lines[i].pc == pc) && (lines[i].line != 0))
734 range = btrace_line_range_add (range, lines[i].line);
735 }
736
737 return range;
738 }
739
740 /* Print source lines in LINES to UIOUT.
741
742 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
743 instructions corresponding to that source line. When printing a new source
744 line, we do the cleanups for the open chain and open a new cleanup chain for
745 the new source line. If the source line range in LINES is not empty, this
746 function will leave the cleanup chain for the last printed source line open
747 so instructions can be added to it. */
748
749 static void
750 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
751 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
752 gdb::optional<ui_out_emit_list> *asm_list,
753 gdb_disassembly_flags flags)
754 {
755 print_source_lines_flags psl_flags;
756
757 if (flags & DISASSEMBLY_FILENAME)
758 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
759
760 for (int line = lines.begin; line < lines.end; ++line)
761 {
762 asm_list->reset ();
763
764 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
765
766 print_source_lines (lines.symtab, line, line + 1, psl_flags);
767
768 asm_list->emplace (uiout, "line_asm_insn");
769 }
770 }
771
772 /* Disassemble a section of the recorded instruction trace. */
773
774 static void
775 btrace_insn_history (struct ui_out *uiout,
776 const struct btrace_thread_info *btinfo,
777 const struct btrace_insn_iterator *begin,
778 const struct btrace_insn_iterator *end,
779 gdb_disassembly_flags flags)
780 {
781 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
782 btrace_insn_number (begin), btrace_insn_number (end));
783
784 flags |= DISASSEMBLY_SPECULATIVE;
785
786 struct gdbarch *gdbarch = target_gdbarch ();
787 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
788
789 ui_out_emit_list list_emitter (uiout, "asm_insns");
790
791 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
792 gdb::optional<ui_out_emit_list> asm_list;
793
794 gdb_pretty_print_disassembler disasm (gdbarch);
795
796 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
797 btrace_insn_next (&it, 1))
798 {
799 const struct btrace_insn *insn;
800
801 insn = btrace_insn_get (&it);
802
803 /* A NULL instruction indicates a gap in the trace. */
804 if (insn == NULL)
805 {
806 const struct btrace_config *conf;
807
808 conf = btrace_conf (btinfo);
809
810 /* We have trace so we must have a configuration. */
811 gdb_assert (conf != NULL);
812
813 uiout->field_fmt ("insn-number", "%u",
814 btrace_insn_number (&it));
815 uiout->text ("\t");
816
817 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
818 conf->format);
819 }
820 else
821 {
822 struct disasm_insn dinsn;
823
824 if ((flags & DISASSEMBLY_SOURCE) != 0)
825 {
826 struct btrace_line_range lines;
827
828 lines = btrace_find_line_range (insn->pc);
829 if (!btrace_line_range_is_empty (lines)
830 && !btrace_line_range_contains_range (last_lines, lines))
831 {
832 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
833 flags);
834 last_lines = lines;
835 }
836 else if (!src_and_asm_tuple.has_value ())
837 {
838 gdb_assert (!asm_list.has_value ());
839
840 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
841
842 /* No source information. */
843 asm_list.emplace (uiout, "line_asm_insn");
844 }
845
846 gdb_assert (src_and_asm_tuple.has_value ());
847 gdb_assert (asm_list.has_value ());
848 }
849
850 memset (&dinsn, 0, sizeof (dinsn));
851 dinsn.number = btrace_insn_number (&it);
852 dinsn.addr = insn->pc;
853
854 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
855 dinsn.is_speculative = 1;
856
857 disasm.pretty_print_insn (uiout, &dinsn, flags);
858 }
859 }
860 }
861
862 /* The insn_history method of target record-btrace. */
863
864 void
865 record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
866 {
867 struct btrace_thread_info *btinfo;
868 struct btrace_insn_history *history;
869 struct btrace_insn_iterator begin, end;
870 struct ui_out *uiout;
871 unsigned int context, covered;
872
873 uiout = current_uiout;
874 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
875 context = abs (size);
876 if (context == 0)
877 error (_("Bad record instruction-history-size."));
878
879 btinfo = require_btrace ();
880 history = btinfo->insn_history;
881 if (history == NULL)
882 {
883 struct btrace_insn_iterator *replay;
884
885 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
886
887 /* If we're replaying, we start at the replay position. Otherwise, we
888 start at the tail of the trace. */
889 replay = btinfo->replay;
890 if (replay != NULL)
891 begin = *replay;
892 else
893 btrace_insn_end (&begin, btinfo);
894
895 /* We start from here and expand in the requested direction. Then we
896 expand in the other direction, as well, to fill up any remaining
897 context. */
898 end = begin;
899 if (size < 0)
900 {
901 /* We want the current position covered, as well. */
902 covered = btrace_insn_next (&end, 1);
903 covered += btrace_insn_prev (&begin, context - covered);
904 covered += btrace_insn_next (&end, context - covered);
905 }
906 else
907 {
908 covered = btrace_insn_next (&end, context);
909 covered += btrace_insn_prev (&begin, context - covered);
910 }
911 }
912 else
913 {
914 begin = history->begin;
915 end = history->end;
916
917 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
918 btrace_insn_number (&begin), btrace_insn_number (&end));
919
920 if (size < 0)
921 {
922 end = begin;
923 covered = btrace_insn_prev (&begin, context);
924 }
925 else
926 {
927 begin = end;
928 covered = btrace_insn_next (&end, context);
929 }
930 }
931
932 if (covered > 0)
933 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
934 else
935 {
936 if (size < 0)
937 printf_unfiltered (_("At the start of the branch trace record.\n"));
938 else
939 printf_unfiltered (_("At the end of the branch trace record.\n"));
940 }
941
942 btrace_set_insn_history (btinfo, &begin, &end);
943 }
944
945 /* The insn_history_range method of target record-btrace. */
946
947 void
948 record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
949 gdb_disassembly_flags flags)
950 {
951 struct btrace_thread_info *btinfo;
952 struct btrace_insn_iterator begin, end;
953 struct ui_out *uiout;
954 unsigned int low, high;
955 int found;
956
957 uiout = current_uiout;
958 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
959 low = from;
960 high = to;
961
962 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
963
964 /* Check for wrap-arounds. */
965 if (low != from || high != to)
966 error (_("Bad range."));
967
968 if (high < low)
969 error (_("Bad range."));
970
971 btinfo = require_btrace ();
972
973 found = btrace_find_insn_by_number (&begin, btinfo, low);
974 if (found == 0)
975 error (_("Range out of bounds."));
976
977 found = btrace_find_insn_by_number (&end, btinfo, high);
978 if (found == 0)
979 {
980 /* Silently truncate the range. */
981 btrace_insn_end (&end, btinfo);
982 }
983 else
984 {
985 /* We want both begin and end to be inclusive. */
986 btrace_insn_next (&end, 1);
987 }
988
989 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
990 btrace_set_insn_history (btinfo, &begin, &end);
991 }
992
993 /* The insn_history_from method of target record-btrace. */
994
995 void
996 record_btrace_target::insn_history_from (ULONGEST from, int size,
997 gdb_disassembly_flags flags)
998 {
999 ULONGEST begin, end, context;
1000
1001 context = abs (size);
1002 if (context == 0)
1003 error (_("Bad record instruction-history-size."));
1004
1005 if (size < 0)
1006 {
1007 end = from;
1008
1009 if (from < context)
1010 begin = 0;
1011 else
1012 begin = from - context + 1;
1013 }
1014 else
1015 {
1016 begin = from;
1017 end = from + context - 1;
1018
1019 /* Check for wrap-around. */
1020 if (end < begin)
1021 end = ULONGEST_MAX;
1022 }
1023
1024 insn_history_range (begin, end, flags);
1025 }
1026
1027 /* Print the instruction number range for a function call history line. */
1028
1029 static void
1030 btrace_call_history_insn_range (struct ui_out *uiout,
1031 const struct btrace_function *bfun)
1032 {
1033 unsigned int begin, end, size;
1034
1035 size = bfun->insn.size ();
1036 gdb_assert (size > 0);
1037
1038 begin = bfun->insn_offset;
1039 end = begin + size - 1;
1040
1041 ui_out_field_uint (uiout, "insn begin", begin);
1042 uiout->text (",");
1043 ui_out_field_uint (uiout, "insn end", end);
1044 }
1045
1046 /* Compute the lowest and highest source line for the instructions in BFUN
1047 and return them in PBEGIN and PEND.
1048 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1049 result from inlining or macro expansion. */
1050
1051 static void
1052 btrace_compute_src_line_range (const struct btrace_function *bfun,
1053 int *pbegin, int *pend)
1054 {
1055 struct symtab *symtab;
1056 struct symbol *sym;
1057 int begin, end;
1058
1059 begin = INT_MAX;
1060 end = INT_MIN;
1061
1062 sym = bfun->sym;
1063 if (sym == NULL)
1064 goto out;
1065
1066 symtab = symbol_symtab (sym);
1067
1068 for (const btrace_insn &insn : bfun->insn)
1069 {
1070 struct symtab_and_line sal;
1071
1072 sal = find_pc_line (insn.pc, 0);
1073 if (sal.symtab != symtab || sal.line == 0)
1074 continue;
1075
1076 begin = std::min (begin, sal.line);
1077 end = std::max (end, sal.line);
1078 }
1079
1080 out:
1081 *pbegin = begin;
1082 *pend = end;
1083 }
1084
1085 /* Print the source line information for a function call history line. */
1086
1087 static void
1088 btrace_call_history_src_line (struct ui_out *uiout,
1089 const struct btrace_function *bfun)
1090 {
1091 struct symbol *sym;
1092 int begin, end;
1093
1094 sym = bfun->sym;
1095 if (sym == NULL)
1096 return;
1097
1098 uiout->field_string ("file",
1099 symtab_to_filename_for_display (symbol_symtab (sym)));
1100
1101 btrace_compute_src_line_range (bfun, &begin, &end);
1102 if (end < begin)
1103 return;
1104
1105 uiout->text (":");
1106 uiout->field_int ("min line", begin);
1107
1108 if (end == begin)
1109 return;
1110
1111 uiout->text (",");
1112 uiout->field_int ("max line", end);
1113 }
1114
1115 /* Get the name of a branch trace function. */
1116
1117 static const char *
1118 btrace_get_bfun_name (const struct btrace_function *bfun)
1119 {
1120 struct minimal_symbol *msym;
1121 struct symbol *sym;
1122
1123 if (bfun == NULL)
1124 return "??";
1125
1126 msym = bfun->msym;
1127 sym = bfun->sym;
1128
1129 if (sym != NULL)
1130 return SYMBOL_PRINT_NAME (sym);
1131 else if (msym != NULL)
1132 return MSYMBOL_PRINT_NAME (msym);
1133 else
1134 return "??";
1135 }
1136
1137 /* Disassemble a section of the recorded function trace. */
1138
1139 static void
1140 btrace_call_history (struct ui_out *uiout,
1141 const struct btrace_thread_info *btinfo,
1142 const struct btrace_call_iterator *begin,
1143 const struct btrace_call_iterator *end,
1144 int int_flags)
1145 {
1146 struct btrace_call_iterator it;
1147 record_print_flags flags = (enum record_print_flag) int_flags;
1148
1149 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1150 btrace_call_number (end));
1151
1152 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1153 {
1154 const struct btrace_function *bfun;
1155 struct minimal_symbol *msym;
1156 struct symbol *sym;
1157
1158 bfun = btrace_call_get (&it);
1159 sym = bfun->sym;
1160 msym = bfun->msym;
1161
1162 /* Print the function index. */
1163 ui_out_field_uint (uiout, "index", bfun->number);
1164 uiout->text ("\t");
1165
1166 /* Indicate gaps in the trace. */
1167 if (bfun->errcode != 0)
1168 {
1169 const struct btrace_config *conf;
1170
1171 conf = btrace_conf (btinfo);
1172
1173 /* We have trace so we must have a configuration. */
1174 gdb_assert (conf != NULL);
1175
1176 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1177
1178 continue;
1179 }
1180
1181 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1182 {
1183 int level = bfun->level + btinfo->level, i;
1184
1185 for (i = 0; i < level; ++i)
1186 uiout->text (" ");
1187 }
1188
1189 if (sym != NULL)
1190 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
1191 else if (msym != NULL)
1192 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1193 else if (!uiout->is_mi_like_p ())
1194 uiout->field_string ("function", "??");
1195
1196 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1197 {
1198 uiout->text (_("\tinst "));
1199 btrace_call_history_insn_range (uiout, bfun);
1200 }
1201
1202 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1203 {
1204 uiout->text (_("\tat "));
1205 btrace_call_history_src_line (uiout, bfun);
1206 }
1207
1208 uiout->text ("\n");
1209 }
1210 }
1211
1212 /* The call_history method of target record-btrace. */
1213
1214 void
1215 record_btrace_target::call_history (int size, record_print_flags flags)
1216 {
1217 struct btrace_thread_info *btinfo;
1218 struct btrace_call_history *history;
1219 struct btrace_call_iterator begin, end;
1220 struct ui_out *uiout;
1221 unsigned int context, covered;
1222
1223 uiout = current_uiout;
1224 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1225 context = abs (size);
1226 if (context == 0)
1227 error (_("Bad record function-call-history-size."));
1228
1229 btinfo = require_btrace ();
1230 history = btinfo->call_history;
1231 if (history == NULL)
1232 {
1233 struct btrace_insn_iterator *replay;
1234
1235 DEBUG ("call-history (0x%x): %d", (int) flags, size);
1236
1237 /* If we're replaying, we start at the replay position. Otherwise, we
1238 start at the tail of the trace. */
1239 replay = btinfo->replay;
1240 if (replay != NULL)
1241 {
1242 begin.btinfo = btinfo;
1243 begin.index = replay->call_index;
1244 }
1245 else
1246 btrace_call_end (&begin, btinfo);
1247
1248 /* We start from here and expand in the requested direction. Then we
1249 expand in the other direction, as well, to fill up any remaining
1250 context. */
1251 end = begin;
1252 if (size < 0)
1253 {
1254 /* We want the current position covered, as well. */
1255 covered = btrace_call_next (&end, 1);
1256 covered += btrace_call_prev (&begin, context - covered);
1257 covered += btrace_call_next (&end, context - covered);
1258 }
1259 else
1260 {
1261 covered = btrace_call_next (&end, context);
1262 covered += btrace_call_prev (&begin, context- covered);
1263 }
1264 }
1265 else
1266 {
1267 begin = history->begin;
1268 end = history->end;
1269
1270 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1271 btrace_call_number (&begin), btrace_call_number (&end));
1272
1273 if (size < 0)
1274 {
1275 end = begin;
1276 covered = btrace_call_prev (&begin, context);
1277 }
1278 else
1279 {
1280 begin = end;
1281 covered = btrace_call_next (&end, context);
1282 }
1283 }
1284
1285 if (covered > 0)
1286 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1287 else
1288 {
1289 if (size < 0)
1290 printf_unfiltered (_("At the start of the branch trace record.\n"));
1291 else
1292 printf_unfiltered (_("At the end of the branch trace record.\n"));
1293 }
1294
1295 btrace_set_call_history (btinfo, &begin, &end);
1296 }
1297
1298 /* The call_history_range method of target record-btrace. */
1299
1300 void
1301 record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1302 record_print_flags flags)
1303 {
1304 struct btrace_thread_info *btinfo;
1305 struct btrace_call_iterator begin, end;
1306 struct ui_out *uiout;
1307 unsigned int low, high;
1308 int found;
1309
1310 uiout = current_uiout;
1311 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1312 low = from;
1313 high = to;
1314
1315 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1316
1317 /* Check for wrap-arounds. */
1318 if (low != from || high != to)
1319 error (_("Bad range."));
1320
1321 if (high < low)
1322 error (_("Bad range."));
1323
1324 btinfo = require_btrace ();
1325
1326 found = btrace_find_call_by_number (&begin, btinfo, low);
1327 if (found == 0)
1328 error (_("Range out of bounds."));
1329
1330 found = btrace_find_call_by_number (&end, btinfo, high);
1331 if (found == 0)
1332 {
1333 /* Silently truncate the range. */
1334 btrace_call_end (&end, btinfo);
1335 }
1336 else
1337 {
1338 /* We want both begin and end to be inclusive. */
1339 btrace_call_next (&end, 1);
1340 }
1341
1342 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1343 btrace_set_call_history (btinfo, &begin, &end);
1344 }
1345
1346 /* The call_history_from method of target record-btrace. */
1347
1348 void
1349 record_btrace_target::call_history_from (ULONGEST from, int size,
1350 record_print_flags flags)
1351 {
1352 ULONGEST begin, end, context;
1353
1354 context = abs (size);
1355 if (context == 0)
1356 error (_("Bad record function-call-history-size."));
1357
1358 if (size < 0)
1359 {
1360 end = from;
1361
1362 if (from < context)
1363 begin = 0;
1364 else
1365 begin = from - context + 1;
1366 }
1367 else
1368 {
1369 begin = from;
1370 end = from + context - 1;
1371
1372 /* Check for wrap-around. */
1373 if (end < begin)
1374 end = ULONGEST_MAX;
1375 }
1376
1377 call_history_range ( begin, end, flags);
1378 }
1379
1380 /* The record_method method of target record-btrace. */
1381
1382 enum record_method
1383 record_btrace_target::record_method (ptid_t ptid)
1384 {
1385 struct thread_info * const tp = find_thread_ptid (ptid);
1386
1387 if (tp == NULL)
1388 error (_("No thread."));
1389
1390 if (tp->btrace.target == NULL)
1391 return RECORD_METHOD_NONE;
1392
1393 return RECORD_METHOD_BTRACE;
1394 }
1395
1396 /* The record_is_replaying method of target record-btrace. */
1397
1398 bool
1399 record_btrace_target::record_is_replaying (ptid_t ptid)
1400 {
1401 struct thread_info *tp;
1402
1403 ALL_NON_EXITED_THREADS (tp)
1404 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1405 return true;
1406
1407 return false;
1408 }
1409
1410 /* The record_will_replay method of target record-btrace. */
1411
1412 bool
1413 record_btrace_target::record_will_replay (ptid_t ptid, int dir)
1414 {
1415 return dir == EXEC_REVERSE || record_is_replaying (ptid);
1416 }
1417
1418 /* The xfer_partial method of target record-btrace. */
1419
1420 enum target_xfer_status
1421 record_btrace_target::xfer_partial (enum target_object object,
1422 const char *annex, gdb_byte *readbuf,
1423 const gdb_byte *writebuf, ULONGEST offset,
1424 ULONGEST len, ULONGEST *xfered_len)
1425 {
1426 /* Filter out requests that don't make sense during replay. */
1427 if (replay_memory_access == replay_memory_access_read_only
1428 && !record_btrace_generating_corefile
1429 && record_is_replaying (inferior_ptid))
1430 {
1431 switch (object)
1432 {
1433 case TARGET_OBJECT_MEMORY:
1434 {
1435 struct target_section *section;
1436
1437 /* We do not allow writing memory in general. */
1438 if (writebuf != NULL)
1439 {
1440 *xfered_len = len;
1441 return TARGET_XFER_UNAVAILABLE;
1442 }
1443
1444 /* We allow reading readonly memory. */
1445 section = target_section_by_addr (this, offset);
1446 if (section != NULL)
1447 {
1448 /* Check if the section we found is readonly. */
1449 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1450 section->the_bfd_section)
1451 & SEC_READONLY) != 0)
1452 {
1453 /* Truncate the request to fit into this section. */
1454 len = std::min (len, section->endaddr - offset);
1455 break;
1456 }
1457 }
1458
1459 *xfered_len = len;
1460 return TARGET_XFER_UNAVAILABLE;
1461 }
1462 }
1463 }
1464
1465 /* Forward the request. */
1466 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1467 offset, len, xfered_len);
1468 }
1469
1470 /* The insert_breakpoint method of target record-btrace. */
1471
1472 int
1473 record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1474 struct bp_target_info *bp_tgt)
1475 {
1476 const char *old;
1477 int ret;
1478
1479 /* Inserting breakpoints requires accessing memory. Allow it for the
1480 duration of this function. */
1481 old = replay_memory_access;
1482 replay_memory_access = replay_memory_access_read_write;
1483
1484 ret = 0;
1485 TRY
1486 {
1487 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
1488 }
1489 CATCH (except, RETURN_MASK_ALL)
1490 {
1491 replay_memory_access = old;
1492 throw_exception (except);
1493 }
1494 END_CATCH
1495 replay_memory_access = old;
1496
1497 return ret;
1498 }
1499
1500 /* The remove_breakpoint method of target record-btrace. */
1501
1502 int
1503 record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1504 struct bp_target_info *bp_tgt,
1505 enum remove_bp_reason reason)
1506 {
1507 const char *old;
1508 int ret;
1509
1510 /* Removing breakpoints requires accessing memory. Allow it for the
1511 duration of this function. */
1512 old = replay_memory_access;
1513 replay_memory_access = replay_memory_access_read_write;
1514
1515 ret = 0;
1516 TRY
1517 {
1518 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
1519 }
1520 CATCH (except, RETURN_MASK_ALL)
1521 {
1522 replay_memory_access = old;
1523 throw_exception (except);
1524 }
1525 END_CATCH
1526 replay_memory_access = old;
1527
1528 return ret;
1529 }
1530
1531 /* The fetch_registers method of target record-btrace. */
1532
1533 void
1534 record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1535 {
1536 struct btrace_insn_iterator *replay;
1537 struct thread_info *tp;
1538
1539 tp = find_thread_ptid (regcache->ptid ());
1540 gdb_assert (tp != NULL);
1541
1542 replay = tp->btrace.replay;
1543 if (replay != NULL && !record_btrace_generating_corefile)
1544 {
1545 const struct btrace_insn *insn;
1546 struct gdbarch *gdbarch;
1547 int pcreg;
1548
1549 gdbarch = regcache->arch ();
1550 pcreg = gdbarch_pc_regnum (gdbarch);
1551 if (pcreg < 0)
1552 return;
1553
1554 /* We can only provide the PC register. */
1555 if (regno >= 0 && regno != pcreg)
1556 return;
1557
1558 insn = btrace_insn_get (replay);
1559 gdb_assert (insn != NULL);
1560
1561 regcache->raw_supply (regno, &insn->pc);
1562 }
1563 else
1564 this->beneath ()->fetch_registers (regcache, regno);
1565 }
1566
1567 /* The store_registers method of target record-btrace. */
1568
1569 void
1570 record_btrace_target::store_registers (struct regcache *regcache, int regno)
1571 {
1572 struct target_ops *t;
1573
1574 if (!record_btrace_generating_corefile
1575 && record_is_replaying (regcache->ptid ()))
1576 error (_("Cannot write registers while replaying."));
1577
1578 gdb_assert (may_write_registers != 0);
1579
1580 this->beneath ()->store_registers (regcache, regno);
1581 }
1582
1583 /* The prepare_to_store method of target record-btrace. */
1584
1585 void
1586 record_btrace_target::prepare_to_store (struct regcache *regcache)
1587 {
1588 if (!record_btrace_generating_corefile
1589 && record_is_replaying (regcache->ptid ()))
1590 return;
1591
1592 this->beneath ()->prepare_to_store (regcache);
1593 }
1594
1595 /* The branch trace frame cache. */
1596
1597 struct btrace_frame_cache
1598 {
1599 /* The thread. */
1600 struct thread_info *tp;
1601
1602 /* The frame info. */
1603 struct frame_info *frame;
1604
1605 /* The branch trace function segment. */
1606 const struct btrace_function *bfun;
1607 };
1608
1609 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1610
1611 static htab_t bfcache;
1612
1613 /* hash_f for htab_create_alloc of bfcache. */
1614
1615 static hashval_t
1616 bfcache_hash (const void *arg)
1617 {
1618 const struct btrace_frame_cache *cache
1619 = (const struct btrace_frame_cache *) arg;
1620
1621 return htab_hash_pointer (cache->frame);
1622 }
1623
1624 /* eq_f for htab_create_alloc of bfcache. */
1625
1626 static int
1627 bfcache_eq (const void *arg1, const void *arg2)
1628 {
1629 const struct btrace_frame_cache *cache1
1630 = (const struct btrace_frame_cache *) arg1;
1631 const struct btrace_frame_cache *cache2
1632 = (const struct btrace_frame_cache *) arg2;
1633
1634 return cache1->frame == cache2->frame;
1635 }
1636
1637 /* Create a new btrace frame cache. */
1638
1639 static struct btrace_frame_cache *
1640 bfcache_new (struct frame_info *frame)
1641 {
1642 struct btrace_frame_cache *cache;
1643 void **slot;
1644
1645 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1646 cache->frame = frame;
1647
1648 slot = htab_find_slot (bfcache, cache, INSERT);
1649 gdb_assert (*slot == NULL);
1650 *slot = cache;
1651
1652 return cache;
1653 }
1654
1655 /* Extract the branch trace function from a branch trace frame. */
1656
1657 static const struct btrace_function *
1658 btrace_get_frame_function (struct frame_info *frame)
1659 {
1660 const struct btrace_frame_cache *cache;
1661 struct btrace_frame_cache pattern;
1662 void **slot;
1663
1664 pattern.frame = frame;
1665
1666 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1667 if (slot == NULL)
1668 return NULL;
1669
1670 cache = (const struct btrace_frame_cache *) *slot;
1671 return cache->bfun;
1672 }
1673
1674 /* Implement stop_reason method for record_btrace_frame_unwind. */
1675
1676 static enum unwind_stop_reason
1677 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1678 void **this_cache)
1679 {
1680 const struct btrace_frame_cache *cache;
1681 const struct btrace_function *bfun;
1682
1683 cache = (const struct btrace_frame_cache *) *this_cache;
1684 bfun = cache->bfun;
1685 gdb_assert (bfun != NULL);
1686
1687 if (bfun->up == 0)
1688 return UNWIND_UNAVAILABLE;
1689
1690 return UNWIND_NO_REASON;
1691 }
1692
1693 /* Implement this_id method for record_btrace_frame_unwind. */
1694
1695 static void
1696 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1697 struct frame_id *this_id)
1698 {
1699 const struct btrace_frame_cache *cache;
1700 const struct btrace_function *bfun;
1701 struct btrace_call_iterator it;
1702 CORE_ADDR code, special;
1703
1704 cache = (const struct btrace_frame_cache *) *this_cache;
1705
1706 bfun = cache->bfun;
1707 gdb_assert (bfun != NULL);
1708
1709 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1710 bfun = btrace_call_get (&it);
1711
1712 code = get_frame_func (this_frame);
1713 special = bfun->number;
1714
1715 *this_id = frame_id_build_unavailable_stack_special (code, special);
1716
1717 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1718 btrace_get_bfun_name (cache->bfun),
1719 core_addr_to_string_nz (this_id->code_addr),
1720 core_addr_to_string_nz (this_id->special_addr));
1721 }
1722
1723 /* Implement prev_register method for record_btrace_frame_unwind. */
1724
1725 static struct value *
1726 record_btrace_frame_prev_register (struct frame_info *this_frame,
1727 void **this_cache,
1728 int regnum)
1729 {
1730 const struct btrace_frame_cache *cache;
1731 const struct btrace_function *bfun, *caller;
1732 struct btrace_call_iterator it;
1733 struct gdbarch *gdbarch;
1734 CORE_ADDR pc;
1735 int pcreg;
1736
1737 gdbarch = get_frame_arch (this_frame);
1738 pcreg = gdbarch_pc_regnum (gdbarch);
1739 if (pcreg < 0 || regnum != pcreg)
1740 throw_error (NOT_AVAILABLE_ERROR,
1741 _("Registers are not available in btrace record history"));
1742
1743 cache = (const struct btrace_frame_cache *) *this_cache;
1744 bfun = cache->bfun;
1745 gdb_assert (bfun != NULL);
1746
1747 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1748 throw_error (NOT_AVAILABLE_ERROR,
1749 _("No caller in btrace record history"));
1750
1751 caller = btrace_call_get (&it);
1752
1753 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1754 pc = caller->insn.front ().pc;
1755 else
1756 {
1757 pc = caller->insn.back ().pc;
1758 pc += gdb_insn_length (gdbarch, pc);
1759 }
1760
1761 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1762 btrace_get_bfun_name (bfun), bfun->level,
1763 core_addr_to_string_nz (pc));
1764
1765 return frame_unwind_got_address (this_frame, regnum, pc);
1766 }
1767
1768 /* Implement sniffer method for record_btrace_frame_unwind. */
1769
1770 static int
1771 record_btrace_frame_sniffer (const struct frame_unwind *self,
1772 struct frame_info *this_frame,
1773 void **this_cache)
1774 {
1775 const struct btrace_function *bfun;
1776 struct btrace_frame_cache *cache;
1777 struct thread_info *tp;
1778 struct frame_info *next;
1779
1780 /* THIS_FRAME does not contain a reference to its thread. */
1781 tp = inferior_thread ();
1782
1783 bfun = NULL;
1784 next = get_next_frame (this_frame);
1785 if (next == NULL)
1786 {
1787 const struct btrace_insn_iterator *replay;
1788
1789 replay = tp->btrace.replay;
1790 if (replay != NULL)
1791 bfun = &replay->btinfo->functions[replay->call_index];
1792 }
1793 else
1794 {
1795 const struct btrace_function *callee;
1796 struct btrace_call_iterator it;
1797
1798 callee = btrace_get_frame_function (next);
1799 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1800 return 0;
1801
1802 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1803 return 0;
1804
1805 bfun = btrace_call_get (&it);
1806 }
1807
1808 if (bfun == NULL)
1809 return 0;
1810
1811 DEBUG ("[frame] sniffed frame for %s on level %d",
1812 btrace_get_bfun_name (bfun), bfun->level);
1813
1814 /* This is our frame. Initialize the frame cache. */
1815 cache = bfcache_new (this_frame);
1816 cache->tp = tp;
1817 cache->bfun = bfun;
1818
1819 *this_cache = cache;
1820 return 1;
1821 }
1822
1823 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1824
1825 static int
1826 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1827 struct frame_info *this_frame,
1828 void **this_cache)
1829 {
1830 const struct btrace_function *bfun, *callee;
1831 struct btrace_frame_cache *cache;
1832 struct btrace_call_iterator it;
1833 struct frame_info *next;
1834 struct thread_info *tinfo;
1835
1836 next = get_next_frame (this_frame);
1837 if (next == NULL)
1838 return 0;
1839
1840 callee = btrace_get_frame_function (next);
1841 if (callee == NULL)
1842 return 0;
1843
1844 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1845 return 0;
1846
1847 tinfo = inferior_thread ();
1848 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1849 return 0;
1850
1851 bfun = btrace_call_get (&it);
1852
1853 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1854 btrace_get_bfun_name (bfun), bfun->level);
1855
1856 /* This is our frame. Initialize the frame cache. */
1857 cache = bfcache_new (this_frame);
1858 cache->tp = tinfo;
1859 cache->bfun = bfun;
1860
1861 *this_cache = cache;
1862 return 1;
1863 }
1864
1865 static void
1866 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1867 {
1868 struct btrace_frame_cache *cache;
1869 void **slot;
1870
1871 cache = (struct btrace_frame_cache *) this_cache;
1872
1873 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1874 gdb_assert (slot != NULL);
1875
1876 htab_remove_elt (bfcache, cache);
1877 }
1878
1879 /* btrace recording does not store previous memory content, neither the stack
1880 frames content. Any unwinding would return errorneous results as the stack
1881 contents no longer matches the changed PC value restored from history.
1882 Therefore this unwinder reports any possibly unwound registers as
1883 <unavailable>. */
1884
1885 const struct frame_unwind record_btrace_frame_unwind =
1886 {
1887 NORMAL_FRAME,
1888 record_btrace_frame_unwind_stop_reason,
1889 record_btrace_frame_this_id,
1890 record_btrace_frame_prev_register,
1891 NULL,
1892 record_btrace_frame_sniffer,
1893 record_btrace_frame_dealloc_cache
1894 };
1895
1896 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1897 {
1898 TAILCALL_FRAME,
1899 record_btrace_frame_unwind_stop_reason,
1900 record_btrace_frame_this_id,
1901 record_btrace_frame_prev_register,
1902 NULL,
1903 record_btrace_tailcall_frame_sniffer,
1904 record_btrace_frame_dealloc_cache
1905 };
1906
1907 /* Implement the get_unwinder method. */
1908
1909 const struct frame_unwind *
1910 record_btrace_target::get_unwinder ()
1911 {
1912 return &record_btrace_frame_unwind;
1913 }
1914
1915 /* Implement the get_tailcall_unwinder method. */
1916
1917 const struct frame_unwind *
1918 record_btrace_target::get_tailcall_unwinder ()
1919 {
1920 return &record_btrace_tailcall_frame_unwind;
1921 }
1922
1923 /* Return a human-readable string for FLAG. */
1924
1925 static const char *
1926 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1927 {
1928 switch (flag)
1929 {
1930 case BTHR_STEP:
1931 return "step";
1932
1933 case BTHR_RSTEP:
1934 return "reverse-step";
1935
1936 case BTHR_CONT:
1937 return "cont";
1938
1939 case BTHR_RCONT:
1940 return "reverse-cont";
1941
1942 case BTHR_STOP:
1943 return "stop";
1944 }
1945
1946 return "<invalid>";
1947 }
1948
1949 /* Indicate that TP should be resumed according to FLAG. */
1950
1951 static void
1952 record_btrace_resume_thread (struct thread_info *tp,
1953 enum btrace_thread_flag flag)
1954 {
1955 struct btrace_thread_info *btinfo;
1956
1957 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1958 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1959
1960 btinfo = &tp->btrace;
1961
1962 /* Fetch the latest branch trace. */
1963 btrace_fetch (tp, record_btrace_get_cpu ());
1964
1965 /* A resume request overwrites a preceding resume or stop request. */
1966 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1967 btinfo->flags |= flag;
1968 }
1969
1970 /* Get the current frame for TP. */
1971
1972 static struct frame_info *
1973 get_thread_current_frame (struct thread_info *tp)
1974 {
1975 struct frame_info *frame;
1976 ptid_t old_inferior_ptid;
1977 int executing;
1978
1979 /* Set current thread, which is implicitly used by
1980 get_current_frame. */
1981 scoped_restore_current_thread restore_thread;
1982
1983 switch_to_thread (tp);
1984
1985 /* Clear the executing flag to allow changes to the current frame.
1986 We are not actually running, yet. We just started a reverse execution
1987 command or a record goto command.
1988 For the latter, EXECUTING is false and this has no effect.
1989 For the former, EXECUTING is true and we're in wait, about to
1990 move the thread. Since we need to recompute the stack, we temporarily
1991 set EXECUTING to flase. */
1992 executing = tp->executing;
1993 set_executing (inferior_ptid, false);
1994
1995 frame = NULL;
1996 TRY
1997 {
1998 frame = get_current_frame ();
1999 }
2000 CATCH (except, RETURN_MASK_ALL)
2001 {
2002 /* Restore the previous execution state. */
2003 set_executing (inferior_ptid, executing);
2004
2005 throw_exception (except);
2006 }
2007 END_CATCH
2008
2009 /* Restore the previous execution state. */
2010 set_executing (inferior_ptid, executing);
2011
2012 return frame;
2013 }
2014
2015 /* Start replaying a thread. */
2016
2017 static struct btrace_insn_iterator *
2018 record_btrace_start_replaying (struct thread_info *tp)
2019 {
2020 struct btrace_insn_iterator *replay;
2021 struct btrace_thread_info *btinfo;
2022
2023 btinfo = &tp->btrace;
2024 replay = NULL;
2025
2026 /* We can't start replaying without trace. */
2027 if (btinfo->functions.empty ())
2028 return NULL;
2029
2030 /* GDB stores the current frame_id when stepping in order to detects steps
2031 into subroutines.
2032 Since frames are computed differently when we're replaying, we need to
2033 recompute those stored frames and fix them up so we can still detect
2034 subroutines after we started replaying. */
2035 TRY
2036 {
2037 struct frame_info *frame;
2038 struct frame_id frame_id;
2039 int upd_step_frame_id, upd_step_stack_frame_id;
2040
2041 /* The current frame without replaying - computed via normal unwind. */
2042 frame = get_thread_current_frame (tp);
2043 frame_id = get_frame_id (frame);
2044
2045 /* Check if we need to update any stepping-related frame id's. */
2046 upd_step_frame_id = frame_id_eq (frame_id,
2047 tp->control.step_frame_id);
2048 upd_step_stack_frame_id = frame_id_eq (frame_id,
2049 tp->control.step_stack_frame_id);
2050
2051 /* We start replaying at the end of the branch trace. This corresponds
2052 to the current instruction. */
2053 replay = XNEW (struct btrace_insn_iterator);
2054 btrace_insn_end (replay, btinfo);
2055
2056 /* Skip gaps at the end of the trace. */
2057 while (btrace_insn_get (replay) == NULL)
2058 {
2059 unsigned int steps;
2060
2061 steps = btrace_insn_prev (replay, 1);
2062 if (steps == 0)
2063 error (_("No trace."));
2064 }
2065
2066 /* We're not replaying, yet. */
2067 gdb_assert (btinfo->replay == NULL);
2068 btinfo->replay = replay;
2069
2070 /* Make sure we're not using any stale registers. */
2071 registers_changed_thread (tp);
2072
2073 /* The current frame with replaying - computed via btrace unwind. */
2074 frame = get_thread_current_frame (tp);
2075 frame_id = get_frame_id (frame);
2076
2077 /* Replace stepping related frames where necessary. */
2078 if (upd_step_frame_id)
2079 tp->control.step_frame_id = frame_id;
2080 if (upd_step_stack_frame_id)
2081 tp->control.step_stack_frame_id = frame_id;
2082 }
2083 CATCH (except, RETURN_MASK_ALL)
2084 {
2085 xfree (btinfo->replay);
2086 btinfo->replay = NULL;
2087
2088 registers_changed_thread (tp);
2089
2090 throw_exception (except);
2091 }
2092 END_CATCH
2093
2094 return replay;
2095 }
2096
2097 /* Stop replaying a thread. */
2098
2099 static void
2100 record_btrace_stop_replaying (struct thread_info *tp)
2101 {
2102 struct btrace_thread_info *btinfo;
2103
2104 btinfo = &tp->btrace;
2105
2106 xfree (btinfo->replay);
2107 btinfo->replay = NULL;
2108
2109 /* Make sure we're not leaving any stale registers. */
2110 registers_changed_thread (tp);
2111 }
2112
2113 /* Stop replaying TP if it is at the end of its execution history. */
2114
2115 static void
2116 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2117 {
2118 struct btrace_insn_iterator *replay, end;
2119 struct btrace_thread_info *btinfo;
2120
2121 btinfo = &tp->btrace;
2122 replay = btinfo->replay;
2123
2124 if (replay == NULL)
2125 return;
2126
2127 btrace_insn_end (&end, btinfo);
2128
2129 if (btrace_insn_cmp (replay, &end) == 0)
2130 record_btrace_stop_replaying (tp);
2131 }
2132
2133 /* The resume method of target record-btrace. */
2134
2135 void
2136 record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
2137 {
2138 struct thread_info *tp;
2139 enum btrace_thread_flag flag, cflag;
2140
2141 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2142 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
2143 step ? "step" : "cont");
2144
2145 /* Store the execution direction of the last resume.
2146
2147 If there is more than one resume call, we have to rely on infrun
2148 to not change the execution direction in-between. */
2149 record_btrace_resume_exec_dir = ::execution_direction;
2150
2151 /* As long as we're not replaying, just forward the request.
2152
2153 For non-stop targets this means that no thread is replaying. In order to
2154 make progress, we may need to explicitly move replaying threads to the end
2155 of their execution history. */
2156 if ((::execution_direction != EXEC_REVERSE)
2157 && !record_is_replaying (minus_one_ptid))
2158 {
2159 this->beneath ()->resume (ptid, step, signal);
2160 return;
2161 }
2162
2163 /* Compute the btrace thread flag for the requested move. */
2164 if (::execution_direction == EXEC_REVERSE)
2165 {
2166 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2167 cflag = BTHR_RCONT;
2168 }
2169 else
2170 {
2171 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2172 cflag = BTHR_CONT;
2173 }
2174
2175 /* We just indicate the resume intent here. The actual stepping happens in
2176 record_btrace_wait below.
2177
2178 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2179 if (!target_is_non_stop_p ())
2180 {
2181 gdb_assert (ptid_match (inferior_ptid, ptid));
2182
2183 ALL_NON_EXITED_THREADS (tp)
2184 if (ptid_match (tp->ptid, ptid))
2185 {
2186 if (ptid_match (tp->ptid, inferior_ptid))
2187 record_btrace_resume_thread (tp, flag);
2188 else
2189 record_btrace_resume_thread (tp, cflag);
2190 }
2191 }
2192 else
2193 {
2194 ALL_NON_EXITED_THREADS (tp)
2195 if (ptid_match (tp->ptid, ptid))
2196 record_btrace_resume_thread (tp, flag);
2197 }
2198
2199 /* Async support. */
2200 if (target_can_async_p ())
2201 {
2202 target_async (1);
2203 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2204 }
2205 }
2206
2207 /* The commit_resume method of target record-btrace. */
2208
2209 void
2210 record_btrace_target::commit_resume ()
2211 {
2212 if ((::execution_direction != EXEC_REVERSE)
2213 && !record_is_replaying (minus_one_ptid))
2214 beneath ()->commit_resume ();
2215 }
2216
2217 /* Cancel resuming TP. */
2218
2219 static void
2220 record_btrace_cancel_resume (struct thread_info *tp)
2221 {
2222 enum btrace_thread_flag flags;
2223
2224 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2225 if (flags == 0)
2226 return;
2227
2228 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2229 print_thread_id (tp),
2230 target_pid_to_str (tp->ptid), flags,
2231 btrace_thread_flag_to_str (flags));
2232
2233 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2234 record_btrace_stop_replaying_at_end (tp);
2235 }
2236
2237 /* Return a target_waitstatus indicating that we ran out of history. */
2238
2239 static struct target_waitstatus
2240 btrace_step_no_history (void)
2241 {
2242 struct target_waitstatus status;
2243
2244 status.kind = TARGET_WAITKIND_NO_HISTORY;
2245
2246 return status;
2247 }
2248
2249 /* Return a target_waitstatus indicating that a step finished. */
2250
2251 static struct target_waitstatus
2252 btrace_step_stopped (void)
2253 {
2254 struct target_waitstatus status;
2255
2256 status.kind = TARGET_WAITKIND_STOPPED;
2257 status.value.sig = GDB_SIGNAL_TRAP;
2258
2259 return status;
2260 }
2261
2262 /* Return a target_waitstatus indicating that a thread was stopped as
2263 requested. */
2264
2265 static struct target_waitstatus
2266 btrace_step_stopped_on_request (void)
2267 {
2268 struct target_waitstatus status;
2269
2270 status.kind = TARGET_WAITKIND_STOPPED;
2271 status.value.sig = GDB_SIGNAL_0;
2272
2273 return status;
2274 }
2275
2276 /* Return a target_waitstatus indicating a spurious stop. */
2277
2278 static struct target_waitstatus
2279 btrace_step_spurious (void)
2280 {
2281 struct target_waitstatus status;
2282
2283 status.kind = TARGET_WAITKIND_SPURIOUS;
2284
2285 return status;
2286 }
2287
2288 /* Return a target_waitstatus indicating that the thread was not resumed. */
2289
2290 static struct target_waitstatus
2291 btrace_step_no_resumed (void)
2292 {
2293 struct target_waitstatus status;
2294
2295 status.kind = TARGET_WAITKIND_NO_RESUMED;
2296
2297 return status;
2298 }
2299
2300 /* Return a target_waitstatus indicating that we should wait again. */
2301
2302 static struct target_waitstatus
2303 btrace_step_again (void)
2304 {
2305 struct target_waitstatus status;
2306
2307 status.kind = TARGET_WAITKIND_IGNORE;
2308
2309 return status;
2310 }
2311
2312 /* Clear the record histories. */
2313
2314 static void
2315 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2316 {
2317 xfree (btinfo->insn_history);
2318 xfree (btinfo->call_history);
2319
2320 btinfo->insn_history = NULL;
2321 btinfo->call_history = NULL;
2322 }
2323
2324 /* Check whether TP's current replay position is at a breakpoint. */
2325
2326 static int
2327 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2328 {
2329 struct btrace_insn_iterator *replay;
2330 struct btrace_thread_info *btinfo;
2331 const struct btrace_insn *insn;
2332
2333 btinfo = &tp->btrace;
2334 replay = btinfo->replay;
2335
2336 if (replay == NULL)
2337 return 0;
2338
2339 insn = btrace_insn_get (replay);
2340 if (insn == NULL)
2341 return 0;
2342
2343 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
2344 &btinfo->stop_reason);
2345 }
2346
2347 /* Step one instruction in forward direction. */
2348
2349 static struct target_waitstatus
2350 record_btrace_single_step_forward (struct thread_info *tp)
2351 {
2352 struct btrace_insn_iterator *replay, end, start;
2353 struct btrace_thread_info *btinfo;
2354
2355 btinfo = &tp->btrace;
2356 replay = btinfo->replay;
2357
2358 /* We're done if we're not replaying. */
2359 if (replay == NULL)
2360 return btrace_step_no_history ();
2361
2362 /* Check if we're stepping a breakpoint. */
2363 if (record_btrace_replay_at_breakpoint (tp))
2364 return btrace_step_stopped ();
2365
2366 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2367 jump back to the instruction at which we started. */
2368 start = *replay;
2369 do
2370 {
2371 unsigned int steps;
2372
2373 /* We will bail out here if we continue stepping after reaching the end
2374 of the execution history. */
2375 steps = btrace_insn_next (replay, 1);
2376 if (steps == 0)
2377 {
2378 *replay = start;
2379 return btrace_step_no_history ();
2380 }
2381 }
2382 while (btrace_insn_get (replay) == NULL);
2383
2384 /* Determine the end of the instruction trace. */
2385 btrace_insn_end (&end, btinfo);
2386
2387 /* The execution trace contains (and ends with) the current instruction.
2388 This instruction has not been executed, yet, so the trace really ends
2389 one instruction earlier. */
2390 if (btrace_insn_cmp (replay, &end) == 0)
2391 return btrace_step_no_history ();
2392
2393 return btrace_step_spurious ();
2394 }
2395
2396 /* Step one instruction in backward direction. */
2397
2398 static struct target_waitstatus
2399 record_btrace_single_step_backward (struct thread_info *tp)
2400 {
2401 struct btrace_insn_iterator *replay, start;
2402 struct btrace_thread_info *btinfo;
2403
2404 btinfo = &tp->btrace;
2405 replay = btinfo->replay;
2406
2407 /* Start replaying if we're not already doing so. */
2408 if (replay == NULL)
2409 replay = record_btrace_start_replaying (tp);
2410
2411 /* If we can't step any further, we reached the end of the history.
2412 Skip gaps during replay. If we end up at a gap (at the beginning of
2413 the trace), jump back to the instruction at which we started. */
2414 start = *replay;
2415 do
2416 {
2417 unsigned int steps;
2418
2419 steps = btrace_insn_prev (replay, 1);
2420 if (steps == 0)
2421 {
2422 *replay = start;
2423 return btrace_step_no_history ();
2424 }
2425 }
2426 while (btrace_insn_get (replay) == NULL);
2427
2428 /* Check if we're stepping a breakpoint.
2429
2430 For reverse-stepping, this check is after the step. There is logic in
2431 infrun.c that handles reverse-stepping separately. See, for example,
2432 proceed and adjust_pc_after_break.
2433
2434 This code assumes that for reverse-stepping, PC points to the last
2435 de-executed instruction, whereas for forward-stepping PC points to the
2436 next to-be-executed instruction. */
2437 if (record_btrace_replay_at_breakpoint (tp))
2438 return btrace_step_stopped ();
2439
2440 return btrace_step_spurious ();
2441 }
2442
2443 /* Step a single thread. */
2444
2445 static struct target_waitstatus
2446 record_btrace_step_thread (struct thread_info *tp)
2447 {
2448 struct btrace_thread_info *btinfo;
2449 struct target_waitstatus status;
2450 enum btrace_thread_flag flags;
2451
2452 btinfo = &tp->btrace;
2453
2454 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2455 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2456
2457 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2458 target_pid_to_str (tp->ptid), flags,
2459 btrace_thread_flag_to_str (flags));
2460
2461 /* We can't step without an execution history. */
2462 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2463 return btrace_step_no_history ();
2464
2465 switch (flags)
2466 {
2467 default:
2468 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2469
2470 case BTHR_STOP:
2471 return btrace_step_stopped_on_request ();
2472
2473 case BTHR_STEP:
2474 status = record_btrace_single_step_forward (tp);
2475 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2476 break;
2477
2478 return btrace_step_stopped ();
2479
2480 case BTHR_RSTEP:
2481 status = record_btrace_single_step_backward (tp);
2482 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2483 break;
2484
2485 return btrace_step_stopped ();
2486
2487 case BTHR_CONT:
2488 status = record_btrace_single_step_forward (tp);
2489 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2490 break;
2491
2492 btinfo->flags |= flags;
2493 return btrace_step_again ();
2494
2495 case BTHR_RCONT:
2496 status = record_btrace_single_step_backward (tp);
2497 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2498 break;
2499
2500 btinfo->flags |= flags;
2501 return btrace_step_again ();
2502 }
2503
2504 /* We keep threads moving at the end of their execution history. The wait
2505 method will stop the thread for whom the event is reported. */
2506 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2507 btinfo->flags |= flags;
2508
2509 return status;
2510 }
2511
2512 /* Announce further events if necessary. */
2513
2514 static void
2515 record_btrace_maybe_mark_async_event
2516 (const std::vector<thread_info *> &moving,
2517 const std::vector<thread_info *> &no_history)
2518 {
2519 bool more_moving = !moving.empty ();
2520 bool more_no_history = !no_history.empty ();;
2521
2522 if (!more_moving && !more_no_history)
2523 return;
2524
2525 if (more_moving)
2526 DEBUG ("movers pending");
2527
2528 if (more_no_history)
2529 DEBUG ("no-history pending");
2530
2531 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2532 }
2533
2534 /* The wait method of target record-btrace. */
2535
2536 ptid_t
2537 record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2538 int options)
2539 {
2540 std::vector<thread_info *> moving;
2541 std::vector<thread_info *> no_history;
2542
2543 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2544
2545 /* As long as we're not replaying, just forward the request. */
2546 if ((::execution_direction != EXEC_REVERSE)
2547 && !record_is_replaying (minus_one_ptid))
2548 {
2549 return this->beneath ()->wait (ptid, status, options);
2550 }
2551
2552 /* Keep a work list of moving threads. */
2553 {
2554 thread_info *tp;
2555
2556 ALL_NON_EXITED_THREADS (tp)
2557 {
2558 if (ptid_match (tp->ptid, ptid)
2559 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2560 moving.push_back (tp);
2561 }
2562 }
2563
2564 if (moving.empty ())
2565 {
2566 *status = btrace_step_no_resumed ();
2567
2568 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2569 target_waitstatus_to_string (status).c_str ());
2570
2571 return null_ptid;
2572 }
2573
2574 /* Step moving threads one by one, one step each, until either one thread
2575 reports an event or we run out of threads to step.
2576
2577 When stepping more than one thread, chances are that some threads reach
2578 the end of their execution history earlier than others. If we reported
2579 this immediately, all-stop on top of non-stop would stop all threads and
2580 resume the same threads next time. And we would report the same thread
2581 having reached the end of its execution history again.
2582
2583 In the worst case, this would starve the other threads. But even if other
2584 threads would be allowed to make progress, this would result in far too
2585 many intermediate stops.
2586
2587 We therefore delay the reporting of "no execution history" until we have
2588 nothing else to report. By this time, all threads should have moved to
2589 either the beginning or the end of their execution history. There will
2590 be a single user-visible stop. */
2591 struct thread_info *eventing = NULL;
2592 while ((eventing == NULL) && !moving.empty ())
2593 {
2594 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
2595 {
2596 thread_info *tp = moving[ix];
2597
2598 *status = record_btrace_step_thread (tp);
2599
2600 switch (status->kind)
2601 {
2602 case TARGET_WAITKIND_IGNORE:
2603 ix++;
2604 break;
2605
2606 case TARGET_WAITKIND_NO_HISTORY:
2607 no_history.push_back (ordered_remove (moving, ix));
2608 break;
2609
2610 default:
2611 eventing = unordered_remove (moving, ix);
2612 break;
2613 }
2614 }
2615 }
2616
2617 if (eventing == NULL)
2618 {
2619 /* We started with at least one moving thread. This thread must have
2620 either stopped or reached the end of its execution history.
2621
2622 In the former case, EVENTING must not be NULL.
2623 In the latter case, NO_HISTORY must not be empty. */
2624 gdb_assert (!no_history.empty ());
2625
2626 /* We kept threads moving at the end of their execution history. Stop
2627 EVENTING now that we are going to report its stop. */
2628 eventing = unordered_remove (no_history, 0);
2629 eventing->btrace.flags &= ~BTHR_MOVE;
2630
2631 *status = btrace_step_no_history ();
2632 }
2633
2634 gdb_assert (eventing != NULL);
2635
2636 /* We kept threads replaying at the end of their execution history. Stop
2637 replaying EVENTING now that we are going to report its stop. */
2638 record_btrace_stop_replaying_at_end (eventing);
2639
2640 /* Stop all other threads. */
2641 if (!target_is_non_stop_p ())
2642 {
2643 thread_info *tp;
2644
2645 ALL_NON_EXITED_THREADS (tp)
2646 record_btrace_cancel_resume (tp);
2647 }
2648
2649 /* In async mode, we need to announce further events. */
2650 if (target_is_async_p ())
2651 record_btrace_maybe_mark_async_event (moving, no_history);
2652
2653 /* Start record histories anew from the current position. */
2654 record_btrace_clear_histories (&eventing->btrace);
2655
2656 /* We moved the replay position but did not update registers. */
2657 registers_changed_thread (eventing);
2658
2659 DEBUG ("wait ended by thread %s (%s): %s",
2660 print_thread_id (eventing),
2661 target_pid_to_str (eventing->ptid),
2662 target_waitstatus_to_string (status).c_str ());
2663
2664 return eventing->ptid;
2665 }
2666
2667 /* The stop method of target record-btrace. */
2668
2669 void
2670 record_btrace_target::stop (ptid_t ptid)
2671 {
2672 DEBUG ("stop %s", target_pid_to_str (ptid));
2673
2674 /* As long as we're not replaying, just forward the request. */
2675 if ((::execution_direction != EXEC_REVERSE)
2676 && !record_is_replaying (minus_one_ptid))
2677 {
2678 this->beneath ()->stop (ptid);
2679 }
2680 else
2681 {
2682 struct thread_info *tp;
2683
2684 ALL_NON_EXITED_THREADS (tp)
2685 if (ptid_match (tp->ptid, ptid))
2686 {
2687 tp->btrace.flags &= ~BTHR_MOVE;
2688 tp->btrace.flags |= BTHR_STOP;
2689 }
2690 }
2691 }
2692
2693 /* The can_execute_reverse method of target record-btrace. */
2694
2695 bool
2696 record_btrace_target::can_execute_reverse ()
2697 {
2698 return true;
2699 }
2700
2701 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2702
2703 bool
2704 record_btrace_target::stopped_by_sw_breakpoint ()
2705 {
2706 if (record_is_replaying (minus_one_ptid))
2707 {
2708 struct thread_info *tp = inferior_thread ();
2709
2710 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2711 }
2712
2713 return this->beneath ()->stopped_by_sw_breakpoint ();
2714 }
2715
2716 /* The supports_stopped_by_sw_breakpoint method of target
2717 record-btrace. */
2718
2719 bool
2720 record_btrace_target::supports_stopped_by_sw_breakpoint ()
2721 {
2722 if (record_is_replaying (minus_one_ptid))
2723 return true;
2724
2725 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
2726 }
2727
2728 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2729
2730 bool
2731 record_btrace_target::stopped_by_hw_breakpoint ()
2732 {
2733 if (record_is_replaying (minus_one_ptid))
2734 {
2735 struct thread_info *tp = inferior_thread ();
2736
2737 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2738 }
2739
2740 return this->beneath ()->stopped_by_hw_breakpoint ();
2741 }
2742
2743 /* The supports_stopped_by_hw_breakpoint method of target
2744 record-btrace. */
2745
2746 bool
2747 record_btrace_target::supports_stopped_by_hw_breakpoint ()
2748 {
2749 if (record_is_replaying (minus_one_ptid))
2750 return true;
2751
2752 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
2753 }
2754
2755 /* The update_thread_list method of target record-btrace. */
2756
2757 void
2758 record_btrace_target::update_thread_list ()
2759 {
2760 /* We don't add or remove threads during replay. */
2761 if (record_is_replaying (minus_one_ptid))
2762 return;
2763
2764 /* Forward the request. */
2765 this->beneath ()->update_thread_list ();
2766 }
2767
2768 /* The thread_alive method of target record-btrace. */
2769
2770 bool
2771 record_btrace_target::thread_alive (ptid_t ptid)
2772 {
2773 /* We don't add or remove threads during replay. */
2774 if (record_is_replaying (minus_one_ptid))
2775 return true;
2776
2777 /* Forward the request. */
2778 return this->beneath ()->thread_alive (ptid);
2779 }
2780
2781 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2782 is stopped. */
2783
2784 static void
2785 record_btrace_set_replay (struct thread_info *tp,
2786 const struct btrace_insn_iterator *it)
2787 {
2788 struct btrace_thread_info *btinfo;
2789
2790 btinfo = &tp->btrace;
2791
2792 if (it == NULL)
2793 record_btrace_stop_replaying (tp);
2794 else
2795 {
2796 if (btinfo->replay == NULL)
2797 record_btrace_start_replaying (tp);
2798 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2799 return;
2800
2801 *btinfo->replay = *it;
2802 registers_changed_thread (tp);
2803 }
2804
2805 /* Start anew from the new replay position. */
2806 record_btrace_clear_histories (btinfo);
2807
2808 inferior_thread ()->suspend.stop_pc
2809 = regcache_read_pc (get_current_regcache ());
2810 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2811 }
2812
2813 /* The goto_record_begin method of target record-btrace. */
2814
2815 void
2816 record_btrace_target::goto_record_begin ()
2817 {
2818 struct thread_info *tp;
2819 struct btrace_insn_iterator begin;
2820
2821 tp = require_btrace_thread ();
2822
2823 btrace_insn_begin (&begin, &tp->btrace);
2824
2825 /* Skip gaps at the beginning of the trace. */
2826 while (btrace_insn_get (&begin) == NULL)
2827 {
2828 unsigned int steps;
2829
2830 steps = btrace_insn_next (&begin, 1);
2831 if (steps == 0)
2832 error (_("No trace."));
2833 }
2834
2835 record_btrace_set_replay (tp, &begin);
2836 }
2837
2838 /* The goto_record_end method of target record-btrace. */
2839
2840 void
2841 record_btrace_target::goto_record_end ()
2842 {
2843 struct thread_info *tp;
2844
2845 tp = require_btrace_thread ();
2846
2847 record_btrace_set_replay (tp, NULL);
2848 }
2849
2850 /* The goto_record method of target record-btrace. */
2851
2852 void
2853 record_btrace_target::goto_record (ULONGEST insn)
2854 {
2855 struct thread_info *tp;
2856 struct btrace_insn_iterator it;
2857 unsigned int number;
2858 int found;
2859
2860 number = insn;
2861
2862 /* Check for wrap-arounds. */
2863 if (number != insn)
2864 error (_("Instruction number out of range."));
2865
2866 tp = require_btrace_thread ();
2867
2868 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2869
2870 /* Check if the instruction could not be found or is a gap. */
2871 if (found == 0 || btrace_insn_get (&it) == NULL)
2872 error (_("No such instruction."));
2873
2874 record_btrace_set_replay (tp, &it);
2875 }
2876
2877 /* The record_stop_replaying method of target record-btrace. */
2878
2879 void
2880 record_btrace_target::record_stop_replaying ()
2881 {
2882 struct thread_info *tp;
2883
2884 ALL_NON_EXITED_THREADS (tp)
2885 record_btrace_stop_replaying (tp);
2886 }
2887
2888 /* The execution_direction target method. */
2889
2890 enum exec_direction_kind
2891 record_btrace_target::execution_direction ()
2892 {
2893 return record_btrace_resume_exec_dir;
2894 }
2895
2896 /* The prepare_to_generate_core target method. */
2897
2898 void
2899 record_btrace_target::prepare_to_generate_core ()
2900 {
2901 record_btrace_generating_corefile = 1;
2902 }
2903
2904 /* The done_generating_core target method. */
2905
2906 void
2907 record_btrace_target::done_generating_core ()
2908 {
2909 record_btrace_generating_corefile = 0;
2910 }
2911
2912 /* Start recording in BTS format. */
2913
2914 static void
2915 cmd_record_btrace_bts_start (const char *args, int from_tty)
2916 {
2917 if (args != NULL && *args != 0)
2918 error (_("Invalid argument."));
2919
2920 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2921
2922 TRY
2923 {
2924 execute_command ("target record-btrace", from_tty);
2925 }
2926 CATCH (exception, RETURN_MASK_ALL)
2927 {
2928 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2929 throw_exception (exception);
2930 }
2931 END_CATCH
2932 }
2933
2934 /* Start recording in Intel Processor Trace format. */
2935
2936 static void
2937 cmd_record_btrace_pt_start (const char *args, int from_tty)
2938 {
2939 if (args != NULL && *args != 0)
2940 error (_("Invalid argument."));
2941
2942 record_btrace_conf.format = BTRACE_FORMAT_PT;
2943
2944 TRY
2945 {
2946 execute_command ("target record-btrace", from_tty);
2947 }
2948 CATCH (exception, RETURN_MASK_ALL)
2949 {
2950 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2951 throw_exception (exception);
2952 }
2953 END_CATCH
2954 }
2955
2956 /* Alias for "target record". */
2957
2958 static void
2959 cmd_record_btrace_start (const char *args, int from_tty)
2960 {
2961 if (args != NULL && *args != 0)
2962 error (_("Invalid argument."));
2963
2964 record_btrace_conf.format = BTRACE_FORMAT_PT;
2965
2966 TRY
2967 {
2968 execute_command ("target record-btrace", from_tty);
2969 }
2970 CATCH (exception, RETURN_MASK_ALL)
2971 {
2972 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2973
2974 TRY
2975 {
2976 execute_command ("target record-btrace", from_tty);
2977 }
2978 CATCH (exception, RETURN_MASK_ALL)
2979 {
2980 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2981 throw_exception (exception);
2982 }
2983 END_CATCH
2984 }
2985 END_CATCH
2986 }
2987
2988 /* The "set record btrace" command. */
2989
2990 static void
2991 cmd_set_record_btrace (const char *args, int from_tty)
2992 {
2993 printf_unfiltered (_("\"set record btrace\" must be followed "
2994 "by an appropriate subcommand.\n"));
2995 help_list (set_record_btrace_cmdlist, "set record btrace ",
2996 all_commands, gdb_stdout);
2997 }
2998
2999 /* The "show record btrace" command. */
3000
3001 static void
3002 cmd_show_record_btrace (const char *args, int from_tty)
3003 {
3004 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
3005 }
3006
3007 /* The "show record btrace replay-memory-access" command. */
3008
3009 static void
3010 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
3011 struct cmd_list_element *c, const char *value)
3012 {
3013 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
3014 replay_memory_access);
3015 }
3016
3017 /* The "set record btrace cpu none" command. */
3018
3019 static void
3020 cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
3021 {
3022 if (args != nullptr && *args != 0)
3023 error (_("Trailing junk: '%s'."), args);
3024
3025 record_btrace_cpu_state = CS_NONE;
3026 }
3027
3028 /* The "set record btrace cpu auto" command. */
3029
3030 static void
3031 cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
3032 {
3033 if (args != nullptr && *args != 0)
3034 error (_("Trailing junk: '%s'."), args);
3035
3036 record_btrace_cpu_state = CS_AUTO;
3037 }
3038
3039 /* The "set record btrace cpu" command. */
3040
3041 static void
3042 cmd_set_record_btrace_cpu (const char *args, int from_tty)
3043 {
3044 if (args == nullptr)
3045 args = "";
3046
3047 /* We use a hard-coded vendor string for now. */
3048 unsigned int family, model, stepping;
3049 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3050 &model, &l1, &stepping, &l2);
3051 if (matches == 3)
3052 {
3053 if (strlen (args) != l2)
3054 error (_("Trailing junk: '%s'."), args + l2);
3055 }
3056 else if (matches == 2)
3057 {
3058 if (strlen (args) != l1)
3059 error (_("Trailing junk: '%s'."), args + l1);
3060
3061 stepping = 0;
3062 }
3063 else
3064 error (_("Bad format. See \"help set record btrace cpu\"."));
3065
3066 if (USHRT_MAX < family)
3067 error (_("Cpu family too big."));
3068
3069 if (UCHAR_MAX < model)
3070 error (_("Cpu model too big."));
3071
3072 if (UCHAR_MAX < stepping)
3073 error (_("Cpu stepping too big."));
3074
3075 record_btrace_cpu.vendor = CV_INTEL;
3076 record_btrace_cpu.family = family;
3077 record_btrace_cpu.model = model;
3078 record_btrace_cpu.stepping = stepping;
3079
3080 record_btrace_cpu_state = CS_CPU;
3081 }
3082
3083 /* The "show record btrace cpu" command. */
3084
3085 static void
3086 cmd_show_record_btrace_cpu (const char *args, int from_tty)
3087 {
3088 const char *cpu;
3089
3090 if (args != nullptr && *args != 0)
3091 error (_("Trailing junk: '%s'."), args);
3092
3093 switch (record_btrace_cpu_state)
3094 {
3095 case CS_AUTO:
3096 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3097 return;
3098
3099 case CS_NONE:
3100 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3101 return;
3102
3103 case CS_CPU:
3104 switch (record_btrace_cpu.vendor)
3105 {
3106 case CV_INTEL:
3107 if (record_btrace_cpu.stepping == 0)
3108 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3109 record_btrace_cpu.family,
3110 record_btrace_cpu.model);
3111 else
3112 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3113 record_btrace_cpu.family,
3114 record_btrace_cpu.model,
3115 record_btrace_cpu.stepping);
3116 return;
3117 }
3118 }
3119
3120 error (_("Internal error: bad cpu state."));
3121 }
3122
3123 /* The "s record btrace bts" command. */
3124
3125 static void
3126 cmd_set_record_btrace_bts (const char *args, int from_tty)
3127 {
3128 printf_unfiltered (_("\"set record btrace bts\" must be followed "
3129 "by an appropriate subcommand.\n"));
3130 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3131 all_commands, gdb_stdout);
3132 }
3133
3134 /* The "show record btrace bts" command. */
3135
3136 static void
3137 cmd_show_record_btrace_bts (const char *args, int from_tty)
3138 {
3139 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3140 }
3141
3142 /* The "set record btrace pt" command. */
3143
3144 static void
3145 cmd_set_record_btrace_pt (const char *args, int from_tty)
3146 {
3147 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3148 "by an appropriate subcommand.\n"));
3149 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3150 all_commands, gdb_stdout);
3151 }
3152
3153 /* The "show record btrace pt" command. */
3154
3155 static void
3156 cmd_show_record_btrace_pt (const char *args, int from_tty)
3157 {
3158 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3159 }
3160
3161 /* The "record bts buffer-size" show value function. */
3162
3163 static void
3164 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3165 struct cmd_list_element *c,
3166 const char *value)
3167 {
3168 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3169 value);
3170 }
3171
3172 /* The "record pt buffer-size" show value function. */
3173
3174 static void
3175 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3176 struct cmd_list_element *c,
3177 const char *value)
3178 {
3179 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3180 value);
3181 }
3182
3183 /* Initialize btrace commands. */
3184
3185 void
3186 _initialize_record_btrace (void)
3187 {
3188 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3189 _("Start branch trace recording."), &record_btrace_cmdlist,
3190 "record btrace ", 0, &record_cmdlist);
3191 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3192
3193 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3194 _("\
3195 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3196 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3197 This format may not be available on all processors."),
3198 &record_btrace_cmdlist);
3199 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3200
3201 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3202 _("\
3203 Start branch trace recording in Intel Processor Trace format.\n\n\
3204 This format may not be available on all processors."),
3205 &record_btrace_cmdlist);
3206 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3207
3208 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3209 _("Set record options"), &set_record_btrace_cmdlist,
3210 "set record btrace ", 0, &set_record_cmdlist);
3211
3212 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3213 _("Show record options"), &show_record_btrace_cmdlist,
3214 "show record btrace ", 0, &show_record_cmdlist);
3215
3216 add_setshow_enum_cmd ("replay-memory-access", no_class,
3217 replay_memory_access_types, &replay_memory_access, _("\
3218 Set what memory accesses are allowed during replay."), _("\
3219 Show what memory accesses are allowed during replay."),
3220 _("Default is READ-ONLY.\n\n\
3221 The btrace record target does not trace data.\n\
3222 The memory therefore corresponds to the live target and not \
3223 to the current replay position.\n\n\
3224 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3225 When READ-WRITE, allow accesses to read-only and read-write memory during \
3226 replay."),
3227 NULL, cmd_show_replay_memory_access,
3228 &set_record_btrace_cmdlist,
3229 &show_record_btrace_cmdlist);
3230
3231 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3232 _("\
3233 Set the cpu to be used for trace decode.\n\n\
3234 The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3235 For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
3236 When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3237 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3238 When GDB does not support that cpu, this option can be used to enable\n\
3239 workarounds for a similar cpu that GDB supports.\n\n\
3240 When set to \"none\", errata workarounds are disabled."),
3241 &set_record_btrace_cpu_cmdlist,
3242 _("set record btrace cpu "), 1,
3243 &set_record_btrace_cmdlist);
3244
3245 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3246 Automatically determine the cpu to be used for trace decode."),
3247 &set_record_btrace_cpu_cmdlist);
3248
3249 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3250 Do not enable errata workarounds for trace decode."),
3251 &set_record_btrace_cpu_cmdlist);
3252
3253 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3254 Show the cpu to be used for trace decode."),
3255 &show_record_btrace_cmdlist);
3256
3257 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3258 _("Set record btrace bts options"),
3259 &set_record_btrace_bts_cmdlist,
3260 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3261
3262 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3263 _("Show record btrace bts options"),
3264 &show_record_btrace_bts_cmdlist,
3265 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3266
3267 add_setshow_uinteger_cmd ("buffer-size", no_class,
3268 &record_btrace_conf.bts.size,
3269 _("Set the record/replay bts buffer size."),
3270 _("Show the record/replay bts buffer size."), _("\
3271 When starting recording request a trace buffer of this size. \
3272 The actual buffer size may differ from the requested size. \
3273 Use \"info record\" to see the actual buffer size.\n\n\
3274 Bigger buffers allow longer recording but also take more time to process \
3275 the recorded execution trace.\n\n\
3276 The trace buffer size may not be changed while recording."), NULL,
3277 show_record_bts_buffer_size_value,
3278 &set_record_btrace_bts_cmdlist,
3279 &show_record_btrace_bts_cmdlist);
3280
3281 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3282 _("Set record btrace pt options"),
3283 &set_record_btrace_pt_cmdlist,
3284 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3285
3286 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3287 _("Show record btrace pt options"),
3288 &show_record_btrace_pt_cmdlist,
3289 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3290
3291 add_setshow_uinteger_cmd ("buffer-size", no_class,
3292 &record_btrace_conf.pt.size,
3293 _("Set the record/replay pt buffer size."),
3294 _("Show the record/replay pt buffer size."), _("\
3295 Bigger buffers allow longer recording but also take more time to process \
3296 the recorded execution.\n\
3297 The actual buffer size may differ from the requested size. Use \"info record\" \
3298 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3299 &set_record_btrace_pt_cmdlist,
3300 &show_record_btrace_pt_cmdlist);
3301
3302 add_target (record_btrace_target_info, record_btrace_target_open);
3303
3304 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3305 xcalloc, xfree);
3306
3307 record_btrace_conf.bts.size = 64 * 1024;
3308 record_btrace_conf.pt.size = 16 * 1024;
3309 }
This page took 0.101933 seconds and 4 git commands to generate.