d35800b78c5ec7125032033a75181184c094bb03
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observer.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "event-loop.h"
40 #include "inf-loop.h"
41 #include "vec.h"
42 #include <algorithm>
43
44 /* The target_ops of record-btrace. */
45 static struct target_ops record_btrace_ops;
46
47 /* A new thread observer enabling branch tracing for the new thread. */
48 static struct observer *record_btrace_thread_observer;
49
50 /* Memory access types used in set/show record btrace replay-memory-access. */
51 static const char replay_memory_access_read_only[] = "read-only";
52 static const char replay_memory_access_read_write[] = "read-write";
53 static const char *const replay_memory_access_types[] =
54 {
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
57 NULL
58 };
59
60 /* The currently allowed replay memory access type. */
61 static const char *replay_memory_access = replay_memory_access_read_only;
62
63 /* Command lists for "set/show record btrace". */
64 static struct cmd_list_element *set_record_btrace_cmdlist;
65 static struct cmd_list_element *show_record_btrace_cmdlist;
66
67 /* The execution direction of the last resume we got. See record-full.c. */
68 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
69
70 /* The async event handler for reverse/replay execution. */
71 static struct async_event_handler *record_btrace_async_inferior_event_handler;
72
73 /* A flag indicating that we are currently generating a core file. */
74 static int record_btrace_generating_corefile;
75
76 /* The current branch trace configuration. */
77 static struct btrace_config record_btrace_conf;
78
79 /* Command list for "record btrace". */
80 static struct cmd_list_element *record_btrace_cmdlist;
81
82 /* Command lists for "set/show record btrace bts". */
83 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
85
86 /* Command lists for "set/show record btrace pt". */
87 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
89
90 /* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
92
93 #define DEBUG(msg, args...) \
94 do \
95 { \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
99 } \
100 while (0)
101
102
103 /* Update the branch trace for the current thread and return a pointer to its
104 thread_info.
105
106 Throws an error if there is no thread or no trace. This function never
107 returns NULL. */
108
109 static struct thread_info *
110 require_btrace_thread (void)
111 {
112 struct thread_info *tp;
113
114 DEBUG ("require");
115
116 tp = find_thread_ptid (inferior_ptid);
117 if (tp == NULL)
118 error (_("No thread."));
119
120 validate_registers_access ();
121
122 btrace_fetch (tp);
123
124 if (btrace_is_empty (tp))
125 error (_("No trace."));
126
127 return tp;
128 }
129
130 /* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
132
133 Throws an error if there is no thread or no trace. This function never
134 returns NULL. */
135
136 static struct btrace_thread_info *
137 require_btrace (void)
138 {
139 struct thread_info *tp;
140
141 tp = require_btrace_thread ();
142
143 return &tp->btrace;
144 }
145
146 /* Enable branch tracing for one thread. Warn on errors. */
147
148 static void
149 record_btrace_enable_warn (struct thread_info *tp)
150 {
151 TRY
152 {
153 btrace_enable (tp, &record_btrace_conf);
154 }
155 CATCH (error, RETURN_MASK_ERROR)
156 {
157 warning ("%s", error.message);
158 }
159 END_CATCH
160 }
161
162 /* Callback function to disable branch tracing for one thread. */
163
164 static void
165 record_btrace_disable_callback (void *arg)
166 {
167 struct thread_info *tp = (struct thread_info *) arg;
168
169 btrace_disable (tp);
170 }
171
172 /* Enable automatic tracing of new threads. */
173
174 static void
175 record_btrace_auto_enable (void)
176 {
177 DEBUG ("attach thread observer");
178
179 record_btrace_thread_observer
180 = observer_attach_new_thread (record_btrace_enable_warn);
181 }
182
183 /* Disable automatic tracing of new threads. */
184
185 static void
186 record_btrace_auto_disable (void)
187 {
188 /* The observer may have been detached, already. */
189 if (record_btrace_thread_observer == NULL)
190 return;
191
192 DEBUG ("detach thread observer");
193
194 observer_detach_new_thread (record_btrace_thread_observer);
195 record_btrace_thread_observer = NULL;
196 }
197
198 /* The record-btrace async event handler function. */
199
200 static void
201 record_btrace_handle_async_inferior_event (gdb_client_data data)
202 {
203 inferior_event_handler (INF_REG_EVENT, NULL);
204 }
205
206 /* See record-btrace.h. */
207
208 void
209 record_btrace_push_target (void)
210 {
211 const char *format;
212
213 record_btrace_auto_enable ();
214
215 push_target (&record_btrace_ops);
216
217 record_btrace_async_inferior_event_handler
218 = create_async_event_handler (record_btrace_handle_async_inferior_event,
219 NULL);
220 record_btrace_generating_corefile = 0;
221
222 format = btrace_format_short_string (record_btrace_conf.format);
223 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
224 }
225
226 /* The to_open method of target record-btrace. */
227
228 static void
229 record_btrace_open (const char *args, int from_tty)
230 {
231 struct cleanup *disable_chain;
232 struct thread_info *tp;
233
234 DEBUG ("open");
235
236 record_preopen ();
237
238 if (!target_has_execution)
239 error (_("The program is not being run."));
240
241 gdb_assert (record_btrace_thread_observer == NULL);
242
243 disable_chain = make_cleanup (null_cleanup, NULL);
244 ALL_NON_EXITED_THREADS (tp)
245 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
246 {
247 btrace_enable (tp, &record_btrace_conf);
248
249 make_cleanup (record_btrace_disable_callback, tp);
250 }
251
252 record_btrace_push_target ();
253
254 discard_cleanups (disable_chain);
255 }
256
257 /* The to_stop_recording method of target record-btrace. */
258
259 static void
260 record_btrace_stop_recording (struct target_ops *self)
261 {
262 struct thread_info *tp;
263
264 DEBUG ("stop recording");
265
266 record_btrace_auto_disable ();
267
268 ALL_NON_EXITED_THREADS (tp)
269 if (tp->btrace.target != NULL)
270 btrace_disable (tp);
271 }
272
273 /* The to_disconnect method of target record-btrace. */
274
275 static void
276 record_btrace_disconnect (struct target_ops *self, const char *args,
277 int from_tty)
278 {
279 struct target_ops *beneath = self->beneath;
280
281 /* Do not stop recording, just clean up GDB side. */
282 unpush_target (self);
283
284 /* Forward disconnect. */
285 beneath->to_disconnect (beneath, args, from_tty);
286 }
287
288 /* The to_close method of target record-btrace. */
289
290 static void
291 record_btrace_close (struct target_ops *self)
292 {
293 struct thread_info *tp;
294
295 if (record_btrace_async_inferior_event_handler != NULL)
296 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
297
298 /* Make sure automatic recording gets disabled even if we did not stop
299 recording before closing the record-btrace target. */
300 record_btrace_auto_disable ();
301
302 /* We should have already stopped recording.
303 Tear down btrace in case we have not. */
304 ALL_NON_EXITED_THREADS (tp)
305 btrace_teardown (tp);
306 }
307
308 /* The to_async method of target record-btrace. */
309
310 static void
311 record_btrace_async (struct target_ops *ops, int enable)
312 {
313 if (enable)
314 mark_async_event_handler (record_btrace_async_inferior_event_handler);
315 else
316 clear_async_event_handler (record_btrace_async_inferior_event_handler);
317
318 ops->beneath->to_async (ops->beneath, enable);
319 }
320
321 /* Adjusts the size and returns a human readable size suffix. */
322
323 static const char *
324 record_btrace_adjust_size (unsigned int *size)
325 {
326 unsigned int sz;
327
328 sz = *size;
329
330 if ((sz & ((1u << 30) - 1)) == 0)
331 {
332 *size = sz >> 30;
333 return "GB";
334 }
335 else if ((sz & ((1u << 20) - 1)) == 0)
336 {
337 *size = sz >> 20;
338 return "MB";
339 }
340 else if ((sz & ((1u << 10) - 1)) == 0)
341 {
342 *size = sz >> 10;
343 return "kB";
344 }
345 else
346 return "";
347 }
348
349 /* Print a BTS configuration. */
350
351 static void
352 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
353 {
354 const char *suffix;
355 unsigned int size;
356
357 size = conf->size;
358 if (size > 0)
359 {
360 suffix = record_btrace_adjust_size (&size);
361 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
362 }
363 }
364
365 /* Print an Intel Processor Trace configuration. */
366
367 static void
368 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
369 {
370 const char *suffix;
371 unsigned int size;
372
373 size = conf->size;
374 if (size > 0)
375 {
376 suffix = record_btrace_adjust_size (&size);
377 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
378 }
379 }
380
381 /* Print a branch tracing configuration. */
382
383 static void
384 record_btrace_print_conf (const struct btrace_config *conf)
385 {
386 printf_unfiltered (_("Recording format: %s.\n"),
387 btrace_format_string (conf->format));
388
389 switch (conf->format)
390 {
391 case BTRACE_FORMAT_NONE:
392 return;
393
394 case BTRACE_FORMAT_BTS:
395 record_btrace_print_bts_conf (&conf->bts);
396 return;
397
398 case BTRACE_FORMAT_PT:
399 record_btrace_print_pt_conf (&conf->pt);
400 return;
401 }
402
403 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
404 }
405
406 /* The to_info_record method of target record-btrace. */
407
408 static void
409 record_btrace_info (struct target_ops *self)
410 {
411 struct btrace_thread_info *btinfo;
412 const struct btrace_config *conf;
413 struct thread_info *tp;
414 unsigned int insns, calls, gaps;
415
416 DEBUG ("info");
417
418 tp = find_thread_ptid (inferior_ptid);
419 if (tp == NULL)
420 error (_("No thread."));
421
422 validate_registers_access ();
423
424 btinfo = &tp->btrace;
425
426 conf = btrace_conf (btinfo);
427 if (conf != NULL)
428 record_btrace_print_conf (conf);
429
430 btrace_fetch (tp);
431
432 insns = 0;
433 calls = 0;
434 gaps = 0;
435
436 if (!btrace_is_empty (tp))
437 {
438 struct btrace_call_iterator call;
439 struct btrace_insn_iterator insn;
440
441 btrace_call_end (&call, btinfo);
442 btrace_call_prev (&call, 1);
443 calls = btrace_call_number (&call);
444
445 btrace_insn_end (&insn, btinfo);
446 insns = btrace_insn_number (&insn);
447
448 /* If the last instruction is not a gap, it is the current instruction
449 that is not actually part of the record. */
450 if (btrace_insn_get (&insn) != NULL)
451 insns -= 1;
452
453 gaps = btinfo->ngaps;
454 }
455
456 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
457 "for thread %s (%s).\n"), insns, calls, gaps,
458 print_thread_id (tp), target_pid_to_str (tp->ptid));
459
460 if (btrace_is_replaying (tp))
461 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
462 btrace_insn_number (btinfo->replay));
463 }
464
465 /* Print a decode error. */
466
467 static void
468 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
469 enum btrace_format format)
470 {
471 const char *errstr = btrace_decode_error (format, errcode);
472
473 uiout->text (_("["));
474 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
475 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
476 {
477 uiout->text (_("decode error ("));
478 uiout->field_int ("errcode", errcode);
479 uiout->text (_("): "));
480 }
481 uiout->text (errstr);
482 uiout->text (_("]\n"));
483 }
484
485 /* Print an unsigned int. */
486
487 static void
488 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
489 {
490 uiout->field_fmt (fld, "%u", val);
491 }
492
493 /* A range of source lines. */
494
495 struct btrace_line_range
496 {
497 /* The symtab this line is from. */
498 struct symtab *symtab;
499
500 /* The first line (inclusive). */
501 int begin;
502
503 /* The last line (exclusive). */
504 int end;
505 };
506
507 /* Construct a line range. */
508
509 static struct btrace_line_range
510 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
511 {
512 struct btrace_line_range range;
513
514 range.symtab = symtab;
515 range.begin = begin;
516 range.end = end;
517
518 return range;
519 }
520
521 /* Add a line to a line range. */
522
523 static struct btrace_line_range
524 btrace_line_range_add (struct btrace_line_range range, int line)
525 {
526 if (range.end <= range.begin)
527 {
528 /* This is the first entry. */
529 range.begin = line;
530 range.end = line + 1;
531 }
532 else if (line < range.begin)
533 range.begin = line;
534 else if (range.end < line)
535 range.end = line;
536
537 return range;
538 }
539
540 /* Return non-zero if RANGE is empty, zero otherwise. */
541
542 static int
543 btrace_line_range_is_empty (struct btrace_line_range range)
544 {
545 return range.end <= range.begin;
546 }
547
548 /* Return non-zero if LHS contains RHS, zero otherwise. */
549
550 static int
551 btrace_line_range_contains_range (struct btrace_line_range lhs,
552 struct btrace_line_range rhs)
553 {
554 return ((lhs.symtab == rhs.symtab)
555 && (lhs.begin <= rhs.begin)
556 && (rhs.end <= lhs.end));
557 }
558
559 /* Find the line range associated with PC. */
560
561 static struct btrace_line_range
562 btrace_find_line_range (CORE_ADDR pc)
563 {
564 struct btrace_line_range range;
565 struct linetable_entry *lines;
566 struct linetable *ltable;
567 struct symtab *symtab;
568 int nlines, i;
569
570 symtab = find_pc_line_symtab (pc);
571 if (symtab == NULL)
572 return btrace_mk_line_range (NULL, 0, 0);
573
574 ltable = SYMTAB_LINETABLE (symtab);
575 if (ltable == NULL)
576 return btrace_mk_line_range (symtab, 0, 0);
577
578 nlines = ltable->nitems;
579 lines = ltable->item;
580 if (nlines <= 0)
581 return btrace_mk_line_range (symtab, 0, 0);
582
583 range = btrace_mk_line_range (symtab, 0, 0);
584 for (i = 0; i < nlines - 1; i++)
585 {
586 if ((lines[i].pc == pc) && (lines[i].line != 0))
587 range = btrace_line_range_add (range, lines[i].line);
588 }
589
590 return range;
591 }
592
593 /* Print source lines in LINES to UIOUT.
594
595 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
596 instructions corresponding to that source line. When printing a new source
597 line, we do the cleanups for the open chain and open a new cleanup chain for
598 the new source line. If the source line range in LINES is not empty, this
599 function will leave the cleanup chain for the last printed source line open
600 so instructions can be added to it. */
601
602 static void
603 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
604 struct cleanup **ui_item_chain, int flags)
605 {
606 print_source_lines_flags psl_flags;
607 int line;
608
609 psl_flags = 0;
610 if (flags & DISASSEMBLY_FILENAME)
611 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
612
613 for (line = lines.begin; line < lines.end; ++line)
614 {
615 if (*ui_item_chain != NULL)
616 do_cleanups (*ui_item_chain);
617
618 *ui_item_chain
619 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
620
621 print_source_lines (lines.symtab, line, line + 1, psl_flags);
622
623 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
624 }
625 }
626
627 /* Disassemble a section of the recorded instruction trace. */
628
629 static void
630 btrace_insn_history (struct ui_out *uiout,
631 const struct btrace_thread_info *btinfo,
632 const struct btrace_insn_iterator *begin,
633 const struct btrace_insn_iterator *end, int flags)
634 {
635 struct cleanup *cleanups, *ui_item_chain;
636 struct gdbarch *gdbarch;
637 struct btrace_insn_iterator it;
638 struct btrace_line_range last_lines;
639
640 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
641 btrace_insn_number (end));
642
643 flags |= DISASSEMBLY_SPECULATIVE;
644
645 gdbarch = target_gdbarch ();
646 last_lines = btrace_mk_line_range (NULL, 0, 0);
647
648 cleanups = make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
649
650 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
651 instructions corresponding to that line. */
652 ui_item_chain = NULL;
653
654 gdb_pretty_print_disassembler disasm (gdbarch);
655
656 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
657 {
658 const struct btrace_insn *insn;
659
660 insn = btrace_insn_get (&it);
661
662 /* A NULL instruction indicates a gap in the trace. */
663 if (insn == NULL)
664 {
665 const struct btrace_config *conf;
666
667 conf = btrace_conf (btinfo);
668
669 /* We have trace so we must have a configuration. */
670 gdb_assert (conf != NULL);
671
672 uiout->field_fmt ("insn-number", "%u",
673 btrace_insn_number (&it));
674 uiout->text ("\t");
675
676 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
677 conf->format);
678 }
679 else
680 {
681 struct disasm_insn dinsn;
682
683 if ((flags & DISASSEMBLY_SOURCE) != 0)
684 {
685 struct btrace_line_range lines;
686
687 lines = btrace_find_line_range (insn->pc);
688 if (!btrace_line_range_is_empty (lines)
689 && !btrace_line_range_contains_range (last_lines, lines))
690 {
691 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
692 last_lines = lines;
693 }
694 else if (ui_item_chain == NULL)
695 {
696 ui_item_chain
697 = make_cleanup_ui_out_tuple_begin_end (uiout,
698 "src_and_asm_line");
699 /* No source information. */
700 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
701 }
702
703 gdb_assert (ui_item_chain != NULL);
704 }
705
706 memset (&dinsn, 0, sizeof (dinsn));
707 dinsn.number = btrace_insn_number (&it);
708 dinsn.addr = insn->pc;
709
710 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
711 dinsn.is_speculative = 1;
712
713 disasm.pretty_print_insn (uiout, &dinsn, flags);
714 }
715 }
716
717 do_cleanups (cleanups);
718 }
719
720 /* The to_insn_history method of target record-btrace. */
721
722 static void
723 record_btrace_insn_history (struct target_ops *self, int size, int flags)
724 {
725 struct btrace_thread_info *btinfo;
726 struct btrace_insn_history *history;
727 struct btrace_insn_iterator begin, end;
728 struct ui_out *uiout;
729 unsigned int context, covered;
730
731 uiout = current_uiout;
732 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
733 context = abs (size);
734 if (context == 0)
735 error (_("Bad record instruction-history-size."));
736
737 btinfo = require_btrace ();
738 history = btinfo->insn_history;
739 if (history == NULL)
740 {
741 struct btrace_insn_iterator *replay;
742
743 DEBUG ("insn-history (0x%x): %d", flags, size);
744
745 /* If we're replaying, we start at the replay position. Otherwise, we
746 start at the tail of the trace. */
747 replay = btinfo->replay;
748 if (replay != NULL)
749 begin = *replay;
750 else
751 btrace_insn_end (&begin, btinfo);
752
753 /* We start from here and expand in the requested direction. Then we
754 expand in the other direction, as well, to fill up any remaining
755 context. */
756 end = begin;
757 if (size < 0)
758 {
759 /* We want the current position covered, as well. */
760 covered = btrace_insn_next (&end, 1);
761 covered += btrace_insn_prev (&begin, context - covered);
762 covered += btrace_insn_next (&end, context - covered);
763 }
764 else
765 {
766 covered = btrace_insn_next (&end, context);
767 covered += btrace_insn_prev (&begin, context - covered);
768 }
769 }
770 else
771 {
772 begin = history->begin;
773 end = history->end;
774
775 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
776 btrace_insn_number (&begin), btrace_insn_number (&end));
777
778 if (size < 0)
779 {
780 end = begin;
781 covered = btrace_insn_prev (&begin, context);
782 }
783 else
784 {
785 begin = end;
786 covered = btrace_insn_next (&end, context);
787 }
788 }
789
790 if (covered > 0)
791 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
792 else
793 {
794 if (size < 0)
795 printf_unfiltered (_("At the start of the branch trace record.\n"));
796 else
797 printf_unfiltered (_("At the end of the branch trace record.\n"));
798 }
799
800 btrace_set_insn_history (btinfo, &begin, &end);
801 }
802
803 /* The to_insn_history_range method of target record-btrace. */
804
805 static void
806 record_btrace_insn_history_range (struct target_ops *self,
807 ULONGEST from, ULONGEST to, int flags)
808 {
809 struct btrace_thread_info *btinfo;
810 struct btrace_insn_history *history;
811 struct btrace_insn_iterator begin, end;
812 struct ui_out *uiout;
813 unsigned int low, high;
814 int found;
815
816 uiout = current_uiout;
817 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
818 low = from;
819 high = to;
820
821 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
822
823 /* Check for wrap-arounds. */
824 if (low != from || high != to)
825 error (_("Bad range."));
826
827 if (high < low)
828 error (_("Bad range."));
829
830 btinfo = require_btrace ();
831
832 found = btrace_find_insn_by_number (&begin, btinfo, low);
833 if (found == 0)
834 error (_("Range out of bounds."));
835
836 found = btrace_find_insn_by_number (&end, btinfo, high);
837 if (found == 0)
838 {
839 /* Silently truncate the range. */
840 btrace_insn_end (&end, btinfo);
841 }
842 else
843 {
844 /* We want both begin and end to be inclusive. */
845 btrace_insn_next (&end, 1);
846 }
847
848 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
849 btrace_set_insn_history (btinfo, &begin, &end);
850 }
851
852 /* The to_insn_history_from method of target record-btrace. */
853
854 static void
855 record_btrace_insn_history_from (struct target_ops *self,
856 ULONGEST from, int size, int flags)
857 {
858 ULONGEST begin, end, context;
859
860 context = abs (size);
861 if (context == 0)
862 error (_("Bad record instruction-history-size."));
863
864 if (size < 0)
865 {
866 end = from;
867
868 if (from < context)
869 begin = 0;
870 else
871 begin = from - context + 1;
872 }
873 else
874 {
875 begin = from;
876 end = from + context - 1;
877
878 /* Check for wrap-around. */
879 if (end < begin)
880 end = ULONGEST_MAX;
881 }
882
883 record_btrace_insn_history_range (self, begin, end, flags);
884 }
885
886 /* Print the instruction number range for a function call history line. */
887
888 static void
889 btrace_call_history_insn_range (struct ui_out *uiout,
890 const struct btrace_function *bfun)
891 {
892 unsigned int begin, end, size;
893
894 size = bfun->insn.size ();
895 gdb_assert (size > 0);
896
897 begin = bfun->insn_offset;
898 end = begin + size - 1;
899
900 ui_out_field_uint (uiout, "insn begin", begin);
901 uiout->text (",");
902 ui_out_field_uint (uiout, "insn end", end);
903 }
904
905 /* Compute the lowest and highest source line for the instructions in BFUN
906 and return them in PBEGIN and PEND.
907 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
908 result from inlining or macro expansion. */
909
910 static void
911 btrace_compute_src_line_range (const struct btrace_function *bfun,
912 int *pbegin, int *pend)
913 {
914 struct symtab *symtab;
915 struct symbol *sym;
916 int begin, end;
917
918 begin = INT_MAX;
919 end = INT_MIN;
920
921 sym = bfun->sym;
922 if (sym == NULL)
923 goto out;
924
925 symtab = symbol_symtab (sym);
926
927 for (const btrace_insn &insn : bfun->insn)
928 {
929 struct symtab_and_line sal;
930
931 sal = find_pc_line (insn.pc, 0);
932 if (sal.symtab != symtab || sal.line == 0)
933 continue;
934
935 begin = std::min (begin, sal.line);
936 end = std::max (end, sal.line);
937 }
938
939 out:
940 *pbegin = begin;
941 *pend = end;
942 }
943
944 /* Print the source line information for a function call history line. */
945
946 static void
947 btrace_call_history_src_line (struct ui_out *uiout,
948 const struct btrace_function *bfun)
949 {
950 struct symbol *sym;
951 int begin, end;
952
953 sym = bfun->sym;
954 if (sym == NULL)
955 return;
956
957 uiout->field_string ("file",
958 symtab_to_filename_for_display (symbol_symtab (sym)));
959
960 btrace_compute_src_line_range (bfun, &begin, &end);
961 if (end < begin)
962 return;
963
964 uiout->text (":");
965 uiout->field_int ("min line", begin);
966
967 if (end == begin)
968 return;
969
970 uiout->text (",");
971 uiout->field_int ("max line", end);
972 }
973
974 /* Get the name of a branch trace function. */
975
976 static const char *
977 btrace_get_bfun_name (const struct btrace_function *bfun)
978 {
979 struct minimal_symbol *msym;
980 struct symbol *sym;
981
982 if (bfun == NULL)
983 return "??";
984
985 msym = bfun->msym;
986 sym = bfun->sym;
987
988 if (sym != NULL)
989 return SYMBOL_PRINT_NAME (sym);
990 else if (msym != NULL)
991 return MSYMBOL_PRINT_NAME (msym);
992 else
993 return "??";
994 }
995
996 /* Disassemble a section of the recorded function trace. */
997
998 static void
999 btrace_call_history (struct ui_out *uiout,
1000 const struct btrace_thread_info *btinfo,
1001 const struct btrace_call_iterator *begin,
1002 const struct btrace_call_iterator *end,
1003 int int_flags)
1004 {
1005 struct btrace_call_iterator it;
1006 record_print_flags flags = (enum record_print_flag) int_flags;
1007
1008 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1009 btrace_call_number (end));
1010
1011 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1012 {
1013 const struct btrace_function *bfun;
1014 struct minimal_symbol *msym;
1015 struct symbol *sym;
1016
1017 bfun = btrace_call_get (&it);
1018 sym = bfun->sym;
1019 msym = bfun->msym;
1020
1021 /* Print the function index. */
1022 ui_out_field_uint (uiout, "index", bfun->number);
1023 uiout->text ("\t");
1024
1025 /* Indicate gaps in the trace. */
1026 if (bfun->errcode != 0)
1027 {
1028 const struct btrace_config *conf;
1029
1030 conf = btrace_conf (btinfo);
1031
1032 /* We have trace so we must have a configuration. */
1033 gdb_assert (conf != NULL);
1034
1035 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1036
1037 continue;
1038 }
1039
1040 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1041 {
1042 int level = bfun->level + btinfo->level, i;
1043
1044 for (i = 0; i < level; ++i)
1045 uiout->text (" ");
1046 }
1047
1048 if (sym != NULL)
1049 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
1050 else if (msym != NULL)
1051 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1052 else if (!uiout->is_mi_like_p ())
1053 uiout->field_string ("function", "??");
1054
1055 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1056 {
1057 uiout->text (_("\tinst "));
1058 btrace_call_history_insn_range (uiout, bfun);
1059 }
1060
1061 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1062 {
1063 uiout->text (_("\tat "));
1064 btrace_call_history_src_line (uiout, bfun);
1065 }
1066
1067 uiout->text ("\n");
1068 }
1069 }
1070
1071 /* The to_call_history method of target record-btrace. */
1072
1073 static void
1074 record_btrace_call_history (struct target_ops *self, int size, int int_flags)
1075 {
1076 struct btrace_thread_info *btinfo;
1077 struct btrace_call_history *history;
1078 struct btrace_call_iterator begin, end;
1079 struct ui_out *uiout;
1080 unsigned int context, covered;
1081 record_print_flags flags = (enum record_print_flag) int_flags;
1082
1083 uiout = current_uiout;
1084 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1085 context = abs (size);
1086 if (context == 0)
1087 error (_("Bad record function-call-history-size."));
1088
1089 btinfo = require_btrace ();
1090 history = btinfo->call_history;
1091 if (history == NULL)
1092 {
1093 struct btrace_insn_iterator *replay;
1094
1095 DEBUG ("call-history (0x%x): %d", int_flags, size);
1096
1097 /* If we're replaying, we start at the replay position. Otherwise, we
1098 start at the tail of the trace. */
1099 replay = btinfo->replay;
1100 if (replay != NULL)
1101 {
1102 begin.btinfo = btinfo;
1103 begin.index = replay->call_index;
1104 }
1105 else
1106 btrace_call_end (&begin, btinfo);
1107
1108 /* We start from here and expand in the requested direction. Then we
1109 expand in the other direction, as well, to fill up any remaining
1110 context. */
1111 end = begin;
1112 if (size < 0)
1113 {
1114 /* We want the current position covered, as well. */
1115 covered = btrace_call_next (&end, 1);
1116 covered += btrace_call_prev (&begin, context - covered);
1117 covered += btrace_call_next (&end, context - covered);
1118 }
1119 else
1120 {
1121 covered = btrace_call_next (&end, context);
1122 covered += btrace_call_prev (&begin, context- covered);
1123 }
1124 }
1125 else
1126 {
1127 begin = history->begin;
1128 end = history->end;
1129
1130 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
1131 btrace_call_number (&begin), btrace_call_number (&end));
1132
1133 if (size < 0)
1134 {
1135 end = begin;
1136 covered = btrace_call_prev (&begin, context);
1137 }
1138 else
1139 {
1140 begin = end;
1141 covered = btrace_call_next (&end, context);
1142 }
1143 }
1144
1145 if (covered > 0)
1146 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1147 else
1148 {
1149 if (size < 0)
1150 printf_unfiltered (_("At the start of the branch trace record.\n"));
1151 else
1152 printf_unfiltered (_("At the end of the branch trace record.\n"));
1153 }
1154
1155 btrace_set_call_history (btinfo, &begin, &end);
1156 }
1157
1158 /* The to_call_history_range method of target record-btrace. */
1159
1160 static void
1161 record_btrace_call_history_range (struct target_ops *self,
1162 ULONGEST from, ULONGEST to,
1163 int int_flags)
1164 {
1165 struct btrace_thread_info *btinfo;
1166 struct btrace_call_history *history;
1167 struct btrace_call_iterator begin, end;
1168 struct ui_out *uiout;
1169 unsigned int low, high;
1170 int found;
1171 record_print_flags flags = (enum record_print_flag) int_flags;
1172
1173 uiout = current_uiout;
1174 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1175 low = from;
1176 high = to;
1177
1178 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
1179
1180 /* Check for wrap-arounds. */
1181 if (low != from || high != to)
1182 error (_("Bad range."));
1183
1184 if (high < low)
1185 error (_("Bad range."));
1186
1187 btinfo = require_btrace ();
1188
1189 found = btrace_find_call_by_number (&begin, btinfo, low);
1190 if (found == 0)
1191 error (_("Range out of bounds."));
1192
1193 found = btrace_find_call_by_number (&end, btinfo, high);
1194 if (found == 0)
1195 {
1196 /* Silently truncate the range. */
1197 btrace_call_end (&end, btinfo);
1198 }
1199 else
1200 {
1201 /* We want both begin and end to be inclusive. */
1202 btrace_call_next (&end, 1);
1203 }
1204
1205 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1206 btrace_set_call_history (btinfo, &begin, &end);
1207 }
1208
1209 /* The to_call_history_from method of target record-btrace. */
1210
1211 static void
1212 record_btrace_call_history_from (struct target_ops *self,
1213 ULONGEST from, int size,
1214 int int_flags)
1215 {
1216 ULONGEST begin, end, context;
1217 record_print_flags flags = (enum record_print_flag) int_flags;
1218
1219 context = abs (size);
1220 if (context == 0)
1221 error (_("Bad record function-call-history-size."));
1222
1223 if (size < 0)
1224 {
1225 end = from;
1226
1227 if (from < context)
1228 begin = 0;
1229 else
1230 begin = from - context + 1;
1231 }
1232 else
1233 {
1234 begin = from;
1235 end = from + context - 1;
1236
1237 /* Check for wrap-around. */
1238 if (end < begin)
1239 end = ULONGEST_MAX;
1240 }
1241
1242 record_btrace_call_history_range (self, begin, end, flags);
1243 }
1244
1245 /* The to_record_method method of target record-btrace. */
1246
1247 static enum record_method
1248 record_btrace_record_method (struct target_ops *self, ptid_t ptid)
1249 {
1250 const struct btrace_config *config;
1251 struct thread_info * const tp = find_thread_ptid (ptid);
1252
1253 if (tp == NULL)
1254 error (_("No thread."));
1255
1256 if (tp->btrace.target == NULL)
1257 return RECORD_METHOD_NONE;
1258
1259 return RECORD_METHOD_BTRACE;
1260 }
1261
1262 /* The to_record_is_replaying method of target record-btrace. */
1263
1264 static int
1265 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1266 {
1267 struct thread_info *tp;
1268
1269 ALL_NON_EXITED_THREADS (tp)
1270 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1271 return 1;
1272
1273 return 0;
1274 }
1275
1276 /* The to_record_will_replay method of target record-btrace. */
1277
1278 static int
1279 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1280 {
1281 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1282 }
1283
1284 /* The to_xfer_partial method of target record-btrace. */
1285
1286 static enum target_xfer_status
1287 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1288 const char *annex, gdb_byte *readbuf,
1289 const gdb_byte *writebuf, ULONGEST offset,
1290 ULONGEST len, ULONGEST *xfered_len)
1291 {
1292 struct target_ops *t;
1293
1294 /* Filter out requests that don't make sense during replay. */
1295 if (replay_memory_access == replay_memory_access_read_only
1296 && !record_btrace_generating_corefile
1297 && record_btrace_is_replaying (ops, inferior_ptid))
1298 {
1299 switch (object)
1300 {
1301 case TARGET_OBJECT_MEMORY:
1302 {
1303 struct target_section *section;
1304
1305 /* We do not allow writing memory in general. */
1306 if (writebuf != NULL)
1307 {
1308 *xfered_len = len;
1309 return TARGET_XFER_UNAVAILABLE;
1310 }
1311
1312 /* We allow reading readonly memory. */
1313 section = target_section_by_addr (ops, offset);
1314 if (section != NULL)
1315 {
1316 /* Check if the section we found is readonly. */
1317 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1318 section->the_bfd_section)
1319 & SEC_READONLY) != 0)
1320 {
1321 /* Truncate the request to fit into this section. */
1322 len = std::min (len, section->endaddr - offset);
1323 break;
1324 }
1325 }
1326
1327 *xfered_len = len;
1328 return TARGET_XFER_UNAVAILABLE;
1329 }
1330 }
1331 }
1332
1333 /* Forward the request. */
1334 ops = ops->beneath;
1335 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1336 offset, len, xfered_len);
1337 }
1338
1339 /* The to_insert_breakpoint method of target record-btrace. */
1340
1341 static int
1342 record_btrace_insert_breakpoint (struct target_ops *ops,
1343 struct gdbarch *gdbarch,
1344 struct bp_target_info *bp_tgt)
1345 {
1346 const char *old;
1347 int ret;
1348
1349 /* Inserting breakpoints requires accessing memory. Allow it for the
1350 duration of this function. */
1351 old = replay_memory_access;
1352 replay_memory_access = replay_memory_access_read_write;
1353
1354 ret = 0;
1355 TRY
1356 {
1357 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1358 }
1359 CATCH (except, RETURN_MASK_ALL)
1360 {
1361 replay_memory_access = old;
1362 throw_exception (except);
1363 }
1364 END_CATCH
1365 replay_memory_access = old;
1366
1367 return ret;
1368 }
1369
1370 /* The to_remove_breakpoint method of target record-btrace. */
1371
1372 static int
1373 record_btrace_remove_breakpoint (struct target_ops *ops,
1374 struct gdbarch *gdbarch,
1375 struct bp_target_info *bp_tgt,
1376 enum remove_bp_reason reason)
1377 {
1378 const char *old;
1379 int ret;
1380
1381 /* Removing breakpoints requires accessing memory. Allow it for the
1382 duration of this function. */
1383 old = replay_memory_access;
1384 replay_memory_access = replay_memory_access_read_write;
1385
1386 ret = 0;
1387 TRY
1388 {
1389 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1390 reason);
1391 }
1392 CATCH (except, RETURN_MASK_ALL)
1393 {
1394 replay_memory_access = old;
1395 throw_exception (except);
1396 }
1397 END_CATCH
1398 replay_memory_access = old;
1399
1400 return ret;
1401 }
1402
1403 /* The to_fetch_registers method of target record-btrace. */
1404
1405 static void
1406 record_btrace_fetch_registers (struct target_ops *ops,
1407 struct regcache *regcache, int regno)
1408 {
1409 struct btrace_insn_iterator *replay;
1410 struct thread_info *tp;
1411
1412 tp = find_thread_ptid (regcache_get_ptid (regcache));
1413 gdb_assert (tp != NULL);
1414
1415 replay = tp->btrace.replay;
1416 if (replay != NULL && !record_btrace_generating_corefile)
1417 {
1418 const struct btrace_insn *insn;
1419 struct gdbarch *gdbarch;
1420 int pcreg;
1421
1422 gdbarch = get_regcache_arch (regcache);
1423 pcreg = gdbarch_pc_regnum (gdbarch);
1424 if (pcreg < 0)
1425 return;
1426
1427 /* We can only provide the PC register. */
1428 if (regno >= 0 && regno != pcreg)
1429 return;
1430
1431 insn = btrace_insn_get (replay);
1432 gdb_assert (insn != NULL);
1433
1434 regcache_raw_supply (regcache, regno, &insn->pc);
1435 }
1436 else
1437 {
1438 struct target_ops *t = ops->beneath;
1439
1440 t->to_fetch_registers (t, regcache, regno);
1441 }
1442 }
1443
1444 /* The to_store_registers method of target record-btrace. */
1445
1446 static void
1447 record_btrace_store_registers (struct target_ops *ops,
1448 struct regcache *regcache, int regno)
1449 {
1450 struct target_ops *t;
1451
1452 if (!record_btrace_generating_corefile
1453 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1454 error (_("Cannot write registers while replaying."));
1455
1456 gdb_assert (may_write_registers != 0);
1457
1458 t = ops->beneath;
1459 t->to_store_registers (t, regcache, regno);
1460 }
1461
1462 /* The to_prepare_to_store method of target record-btrace. */
1463
1464 static void
1465 record_btrace_prepare_to_store (struct target_ops *ops,
1466 struct regcache *regcache)
1467 {
1468 struct target_ops *t;
1469
1470 if (!record_btrace_generating_corefile
1471 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1472 return;
1473
1474 t = ops->beneath;
1475 t->to_prepare_to_store (t, regcache);
1476 }
1477
1478 /* The branch trace frame cache. */
1479
1480 struct btrace_frame_cache
1481 {
1482 /* The thread. */
1483 struct thread_info *tp;
1484
1485 /* The frame info. */
1486 struct frame_info *frame;
1487
1488 /* The branch trace function segment. */
1489 const struct btrace_function *bfun;
1490 };
1491
1492 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1493
1494 static htab_t bfcache;
1495
1496 /* hash_f for htab_create_alloc of bfcache. */
1497
1498 static hashval_t
1499 bfcache_hash (const void *arg)
1500 {
1501 const struct btrace_frame_cache *cache
1502 = (const struct btrace_frame_cache *) arg;
1503
1504 return htab_hash_pointer (cache->frame);
1505 }
1506
1507 /* eq_f for htab_create_alloc of bfcache. */
1508
1509 static int
1510 bfcache_eq (const void *arg1, const void *arg2)
1511 {
1512 const struct btrace_frame_cache *cache1
1513 = (const struct btrace_frame_cache *) arg1;
1514 const struct btrace_frame_cache *cache2
1515 = (const struct btrace_frame_cache *) arg2;
1516
1517 return cache1->frame == cache2->frame;
1518 }
1519
1520 /* Create a new btrace frame cache. */
1521
1522 static struct btrace_frame_cache *
1523 bfcache_new (struct frame_info *frame)
1524 {
1525 struct btrace_frame_cache *cache;
1526 void **slot;
1527
1528 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1529 cache->frame = frame;
1530
1531 slot = htab_find_slot (bfcache, cache, INSERT);
1532 gdb_assert (*slot == NULL);
1533 *slot = cache;
1534
1535 return cache;
1536 }
1537
1538 /* Extract the branch trace function from a branch trace frame. */
1539
1540 static const struct btrace_function *
1541 btrace_get_frame_function (struct frame_info *frame)
1542 {
1543 const struct btrace_frame_cache *cache;
1544 const struct btrace_function *bfun;
1545 struct btrace_frame_cache pattern;
1546 void **slot;
1547
1548 pattern.frame = frame;
1549
1550 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1551 if (slot == NULL)
1552 return NULL;
1553
1554 cache = (const struct btrace_frame_cache *) *slot;
1555 return cache->bfun;
1556 }
1557
1558 /* Implement stop_reason method for record_btrace_frame_unwind. */
1559
1560 static enum unwind_stop_reason
1561 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1562 void **this_cache)
1563 {
1564 const struct btrace_frame_cache *cache;
1565 const struct btrace_function *bfun;
1566
1567 cache = (const struct btrace_frame_cache *) *this_cache;
1568 bfun = cache->bfun;
1569 gdb_assert (bfun != NULL);
1570
1571 if (bfun->up == 0)
1572 return UNWIND_UNAVAILABLE;
1573
1574 return UNWIND_NO_REASON;
1575 }
1576
1577 /* Implement this_id method for record_btrace_frame_unwind. */
1578
1579 static void
1580 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1581 struct frame_id *this_id)
1582 {
1583 const struct btrace_frame_cache *cache;
1584 const struct btrace_function *bfun;
1585 struct btrace_call_iterator it;
1586 CORE_ADDR code, special;
1587
1588 cache = (const struct btrace_frame_cache *) *this_cache;
1589
1590 bfun = cache->bfun;
1591 gdb_assert (bfun != NULL);
1592
1593 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1594 bfun = btrace_call_get (&it);
1595
1596 code = get_frame_func (this_frame);
1597 special = bfun->number;
1598
1599 *this_id = frame_id_build_unavailable_stack_special (code, special);
1600
1601 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1602 btrace_get_bfun_name (cache->bfun),
1603 core_addr_to_string_nz (this_id->code_addr),
1604 core_addr_to_string_nz (this_id->special_addr));
1605 }
1606
1607 /* Implement prev_register method for record_btrace_frame_unwind. */
1608
1609 static struct value *
1610 record_btrace_frame_prev_register (struct frame_info *this_frame,
1611 void **this_cache,
1612 int regnum)
1613 {
1614 const struct btrace_frame_cache *cache;
1615 const struct btrace_function *bfun, *caller;
1616 struct btrace_call_iterator it;
1617 struct gdbarch *gdbarch;
1618 CORE_ADDR pc;
1619 int pcreg;
1620
1621 gdbarch = get_frame_arch (this_frame);
1622 pcreg = gdbarch_pc_regnum (gdbarch);
1623 if (pcreg < 0 || regnum != pcreg)
1624 throw_error (NOT_AVAILABLE_ERROR,
1625 _("Registers are not available in btrace record history"));
1626
1627 cache = (const struct btrace_frame_cache *) *this_cache;
1628 bfun = cache->bfun;
1629 gdb_assert (bfun != NULL);
1630
1631 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1632 throw_error (NOT_AVAILABLE_ERROR,
1633 _("No caller in btrace record history"));
1634
1635 caller = btrace_call_get (&it);
1636
1637 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1638 pc = caller->insn.front ().pc;
1639 else
1640 {
1641 pc = caller->insn.back ().pc;
1642 pc += gdb_insn_length (gdbarch, pc);
1643 }
1644
1645 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1646 btrace_get_bfun_name (bfun), bfun->level,
1647 core_addr_to_string_nz (pc));
1648
1649 return frame_unwind_got_address (this_frame, regnum, pc);
1650 }
1651
1652 /* Implement sniffer method for record_btrace_frame_unwind. */
1653
1654 static int
1655 record_btrace_frame_sniffer (const struct frame_unwind *self,
1656 struct frame_info *this_frame,
1657 void **this_cache)
1658 {
1659 const struct btrace_function *bfun;
1660 struct btrace_frame_cache *cache;
1661 struct thread_info *tp;
1662 struct frame_info *next;
1663
1664 /* THIS_FRAME does not contain a reference to its thread. */
1665 tp = find_thread_ptid (inferior_ptid);
1666 gdb_assert (tp != NULL);
1667
1668 bfun = NULL;
1669 next = get_next_frame (this_frame);
1670 if (next == NULL)
1671 {
1672 const struct btrace_insn_iterator *replay;
1673
1674 replay = tp->btrace.replay;
1675 if (replay != NULL)
1676 bfun = &replay->btinfo->functions[replay->call_index];
1677 }
1678 else
1679 {
1680 const struct btrace_function *callee;
1681 struct btrace_call_iterator it;
1682
1683 callee = btrace_get_frame_function (next);
1684 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1685 return 0;
1686
1687 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1688 return 0;
1689
1690 bfun = btrace_call_get (&it);
1691 }
1692
1693 if (bfun == NULL)
1694 return 0;
1695
1696 DEBUG ("[frame] sniffed frame for %s on level %d",
1697 btrace_get_bfun_name (bfun), bfun->level);
1698
1699 /* This is our frame. Initialize the frame cache. */
1700 cache = bfcache_new (this_frame);
1701 cache->tp = tp;
1702 cache->bfun = bfun;
1703
1704 *this_cache = cache;
1705 return 1;
1706 }
1707
1708 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1709
1710 static int
1711 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1712 struct frame_info *this_frame,
1713 void **this_cache)
1714 {
1715 const struct btrace_function *bfun, *callee;
1716 struct btrace_frame_cache *cache;
1717 struct btrace_call_iterator it;
1718 struct frame_info *next;
1719 struct thread_info *tinfo;
1720
1721 next = get_next_frame (this_frame);
1722 if (next == NULL)
1723 return 0;
1724
1725 callee = btrace_get_frame_function (next);
1726 if (callee == NULL)
1727 return 0;
1728
1729 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1730 return 0;
1731
1732 tinfo = find_thread_ptid (inferior_ptid);
1733 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1734 return 0;
1735
1736 bfun = btrace_call_get (&it);
1737
1738 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1739 btrace_get_bfun_name (bfun), bfun->level);
1740
1741 /* This is our frame. Initialize the frame cache. */
1742 cache = bfcache_new (this_frame);
1743 cache->tp = tinfo;
1744 cache->bfun = bfun;
1745
1746 *this_cache = cache;
1747 return 1;
1748 }
1749
1750 static void
1751 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1752 {
1753 struct btrace_frame_cache *cache;
1754 void **slot;
1755
1756 cache = (struct btrace_frame_cache *) this_cache;
1757
1758 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1759 gdb_assert (slot != NULL);
1760
1761 htab_remove_elt (bfcache, cache);
1762 }
1763
1764 /* btrace recording does not store previous memory content, neither the stack
1765 frames content. Any unwinding would return errorneous results as the stack
1766 contents no longer matches the changed PC value restored from history.
1767 Therefore this unwinder reports any possibly unwound registers as
1768 <unavailable>. */
1769
1770 const struct frame_unwind record_btrace_frame_unwind =
1771 {
1772 NORMAL_FRAME,
1773 record_btrace_frame_unwind_stop_reason,
1774 record_btrace_frame_this_id,
1775 record_btrace_frame_prev_register,
1776 NULL,
1777 record_btrace_frame_sniffer,
1778 record_btrace_frame_dealloc_cache
1779 };
1780
1781 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1782 {
1783 TAILCALL_FRAME,
1784 record_btrace_frame_unwind_stop_reason,
1785 record_btrace_frame_this_id,
1786 record_btrace_frame_prev_register,
1787 NULL,
1788 record_btrace_tailcall_frame_sniffer,
1789 record_btrace_frame_dealloc_cache
1790 };
1791
1792 /* Implement the to_get_unwinder method. */
1793
1794 static const struct frame_unwind *
1795 record_btrace_to_get_unwinder (struct target_ops *self)
1796 {
1797 return &record_btrace_frame_unwind;
1798 }
1799
1800 /* Implement the to_get_tailcall_unwinder method. */
1801
1802 static const struct frame_unwind *
1803 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1804 {
1805 return &record_btrace_tailcall_frame_unwind;
1806 }
1807
1808 /* Return a human-readable string for FLAG. */
1809
1810 static const char *
1811 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1812 {
1813 switch (flag)
1814 {
1815 case BTHR_STEP:
1816 return "step";
1817
1818 case BTHR_RSTEP:
1819 return "reverse-step";
1820
1821 case BTHR_CONT:
1822 return "cont";
1823
1824 case BTHR_RCONT:
1825 return "reverse-cont";
1826
1827 case BTHR_STOP:
1828 return "stop";
1829 }
1830
1831 return "<invalid>";
1832 }
1833
1834 /* Indicate that TP should be resumed according to FLAG. */
1835
1836 static void
1837 record_btrace_resume_thread (struct thread_info *tp,
1838 enum btrace_thread_flag flag)
1839 {
1840 struct btrace_thread_info *btinfo;
1841
1842 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1843 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1844
1845 btinfo = &tp->btrace;
1846
1847 /* Fetch the latest branch trace. */
1848 btrace_fetch (tp);
1849
1850 /* A resume request overwrites a preceding resume or stop request. */
1851 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1852 btinfo->flags |= flag;
1853 }
1854
1855 /* Get the current frame for TP. */
1856
1857 static struct frame_info *
1858 get_thread_current_frame (struct thread_info *tp)
1859 {
1860 struct frame_info *frame;
1861 ptid_t old_inferior_ptid;
1862 int executing;
1863
1864 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1865 old_inferior_ptid = inferior_ptid;
1866 inferior_ptid = tp->ptid;
1867
1868 /* Clear the executing flag to allow changes to the current frame.
1869 We are not actually running, yet. We just started a reverse execution
1870 command or a record goto command.
1871 For the latter, EXECUTING is false and this has no effect.
1872 For the former, EXECUTING is true and we're in to_wait, about to
1873 move the thread. Since we need to recompute the stack, we temporarily
1874 set EXECUTING to flase. */
1875 executing = is_executing (inferior_ptid);
1876 set_executing (inferior_ptid, 0);
1877
1878 frame = NULL;
1879 TRY
1880 {
1881 frame = get_current_frame ();
1882 }
1883 CATCH (except, RETURN_MASK_ALL)
1884 {
1885 /* Restore the previous execution state. */
1886 set_executing (inferior_ptid, executing);
1887
1888 /* Restore the previous inferior_ptid. */
1889 inferior_ptid = old_inferior_ptid;
1890
1891 throw_exception (except);
1892 }
1893 END_CATCH
1894
1895 /* Restore the previous execution state. */
1896 set_executing (inferior_ptid, executing);
1897
1898 /* Restore the previous inferior_ptid. */
1899 inferior_ptid = old_inferior_ptid;
1900
1901 return frame;
1902 }
1903
1904 /* Start replaying a thread. */
1905
1906 static struct btrace_insn_iterator *
1907 record_btrace_start_replaying (struct thread_info *tp)
1908 {
1909 struct btrace_insn_iterator *replay;
1910 struct btrace_thread_info *btinfo;
1911
1912 btinfo = &tp->btrace;
1913 replay = NULL;
1914
1915 /* We can't start replaying without trace. */
1916 if (btinfo->functions.empty ())
1917 return NULL;
1918
1919 /* GDB stores the current frame_id when stepping in order to detects steps
1920 into subroutines.
1921 Since frames are computed differently when we're replaying, we need to
1922 recompute those stored frames and fix them up so we can still detect
1923 subroutines after we started replaying. */
1924 TRY
1925 {
1926 struct frame_info *frame;
1927 struct frame_id frame_id;
1928 int upd_step_frame_id, upd_step_stack_frame_id;
1929
1930 /* The current frame without replaying - computed via normal unwind. */
1931 frame = get_thread_current_frame (tp);
1932 frame_id = get_frame_id (frame);
1933
1934 /* Check if we need to update any stepping-related frame id's. */
1935 upd_step_frame_id = frame_id_eq (frame_id,
1936 tp->control.step_frame_id);
1937 upd_step_stack_frame_id = frame_id_eq (frame_id,
1938 tp->control.step_stack_frame_id);
1939
1940 /* We start replaying at the end of the branch trace. This corresponds
1941 to the current instruction. */
1942 replay = XNEW (struct btrace_insn_iterator);
1943 btrace_insn_end (replay, btinfo);
1944
1945 /* Skip gaps at the end of the trace. */
1946 while (btrace_insn_get (replay) == NULL)
1947 {
1948 unsigned int steps;
1949
1950 steps = btrace_insn_prev (replay, 1);
1951 if (steps == 0)
1952 error (_("No trace."));
1953 }
1954
1955 /* We're not replaying, yet. */
1956 gdb_assert (btinfo->replay == NULL);
1957 btinfo->replay = replay;
1958
1959 /* Make sure we're not using any stale registers. */
1960 registers_changed_ptid (tp->ptid);
1961
1962 /* The current frame with replaying - computed via btrace unwind. */
1963 frame = get_thread_current_frame (tp);
1964 frame_id = get_frame_id (frame);
1965
1966 /* Replace stepping related frames where necessary. */
1967 if (upd_step_frame_id)
1968 tp->control.step_frame_id = frame_id;
1969 if (upd_step_stack_frame_id)
1970 tp->control.step_stack_frame_id = frame_id;
1971 }
1972 CATCH (except, RETURN_MASK_ALL)
1973 {
1974 xfree (btinfo->replay);
1975 btinfo->replay = NULL;
1976
1977 registers_changed_ptid (tp->ptid);
1978
1979 throw_exception (except);
1980 }
1981 END_CATCH
1982
1983 return replay;
1984 }
1985
1986 /* Stop replaying a thread. */
1987
1988 static void
1989 record_btrace_stop_replaying (struct thread_info *tp)
1990 {
1991 struct btrace_thread_info *btinfo;
1992
1993 btinfo = &tp->btrace;
1994
1995 xfree (btinfo->replay);
1996 btinfo->replay = NULL;
1997
1998 /* Make sure we're not leaving any stale registers. */
1999 registers_changed_ptid (tp->ptid);
2000 }
2001
2002 /* Stop replaying TP if it is at the end of its execution history. */
2003
2004 static void
2005 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2006 {
2007 struct btrace_insn_iterator *replay, end;
2008 struct btrace_thread_info *btinfo;
2009
2010 btinfo = &tp->btrace;
2011 replay = btinfo->replay;
2012
2013 if (replay == NULL)
2014 return;
2015
2016 btrace_insn_end (&end, btinfo);
2017
2018 if (btrace_insn_cmp (replay, &end) == 0)
2019 record_btrace_stop_replaying (tp);
2020 }
2021
2022 /* The to_resume method of target record-btrace. */
2023
2024 static void
2025 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2026 enum gdb_signal signal)
2027 {
2028 struct thread_info *tp;
2029 enum btrace_thread_flag flag, cflag;
2030
2031 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2032 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2033 step ? "step" : "cont");
2034
2035 /* Store the execution direction of the last resume.
2036
2037 If there is more than one to_resume call, we have to rely on infrun
2038 to not change the execution direction in-between. */
2039 record_btrace_resume_exec_dir = execution_direction;
2040
2041 /* As long as we're not replaying, just forward the request.
2042
2043 For non-stop targets this means that no thread is replaying. In order to
2044 make progress, we may need to explicitly move replaying threads to the end
2045 of their execution history. */
2046 if ((execution_direction != EXEC_REVERSE)
2047 && !record_btrace_is_replaying (ops, minus_one_ptid))
2048 {
2049 ops = ops->beneath;
2050 ops->to_resume (ops, ptid, step, signal);
2051 return;
2052 }
2053
2054 /* Compute the btrace thread flag for the requested move. */
2055 if (execution_direction == EXEC_REVERSE)
2056 {
2057 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2058 cflag = BTHR_RCONT;
2059 }
2060 else
2061 {
2062 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2063 cflag = BTHR_CONT;
2064 }
2065
2066 /* We just indicate the resume intent here. The actual stepping happens in
2067 record_btrace_wait below.
2068
2069 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2070 if (!target_is_non_stop_p ())
2071 {
2072 gdb_assert (ptid_match (inferior_ptid, ptid));
2073
2074 ALL_NON_EXITED_THREADS (tp)
2075 if (ptid_match (tp->ptid, ptid))
2076 {
2077 if (ptid_match (tp->ptid, inferior_ptid))
2078 record_btrace_resume_thread (tp, flag);
2079 else
2080 record_btrace_resume_thread (tp, cflag);
2081 }
2082 }
2083 else
2084 {
2085 ALL_NON_EXITED_THREADS (tp)
2086 if (ptid_match (tp->ptid, ptid))
2087 record_btrace_resume_thread (tp, flag);
2088 }
2089
2090 /* Async support. */
2091 if (target_can_async_p ())
2092 {
2093 target_async (1);
2094 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2095 }
2096 }
2097
2098 /* The to_commit_resume method of target record-btrace. */
2099
2100 static void
2101 record_btrace_commit_resume (struct target_ops *ops)
2102 {
2103 if ((execution_direction != EXEC_REVERSE)
2104 && !record_btrace_is_replaying (ops, minus_one_ptid))
2105 ops->beneath->to_commit_resume (ops->beneath);
2106 }
2107
2108 /* Cancel resuming TP. */
2109
2110 static void
2111 record_btrace_cancel_resume (struct thread_info *tp)
2112 {
2113 enum btrace_thread_flag flags;
2114
2115 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2116 if (flags == 0)
2117 return;
2118
2119 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2120 print_thread_id (tp),
2121 target_pid_to_str (tp->ptid), flags,
2122 btrace_thread_flag_to_str (flags));
2123
2124 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2125 record_btrace_stop_replaying_at_end (tp);
2126 }
2127
2128 /* Return a target_waitstatus indicating that we ran out of history. */
2129
2130 static struct target_waitstatus
2131 btrace_step_no_history (void)
2132 {
2133 struct target_waitstatus status;
2134
2135 status.kind = TARGET_WAITKIND_NO_HISTORY;
2136
2137 return status;
2138 }
2139
2140 /* Return a target_waitstatus indicating that a step finished. */
2141
2142 static struct target_waitstatus
2143 btrace_step_stopped (void)
2144 {
2145 struct target_waitstatus status;
2146
2147 status.kind = TARGET_WAITKIND_STOPPED;
2148 status.value.sig = GDB_SIGNAL_TRAP;
2149
2150 return status;
2151 }
2152
2153 /* Return a target_waitstatus indicating that a thread was stopped as
2154 requested. */
2155
2156 static struct target_waitstatus
2157 btrace_step_stopped_on_request (void)
2158 {
2159 struct target_waitstatus status;
2160
2161 status.kind = TARGET_WAITKIND_STOPPED;
2162 status.value.sig = GDB_SIGNAL_0;
2163
2164 return status;
2165 }
2166
2167 /* Return a target_waitstatus indicating a spurious stop. */
2168
2169 static struct target_waitstatus
2170 btrace_step_spurious (void)
2171 {
2172 struct target_waitstatus status;
2173
2174 status.kind = TARGET_WAITKIND_SPURIOUS;
2175
2176 return status;
2177 }
2178
2179 /* Return a target_waitstatus indicating that the thread was not resumed. */
2180
2181 static struct target_waitstatus
2182 btrace_step_no_resumed (void)
2183 {
2184 struct target_waitstatus status;
2185
2186 status.kind = TARGET_WAITKIND_NO_RESUMED;
2187
2188 return status;
2189 }
2190
2191 /* Return a target_waitstatus indicating that we should wait again. */
2192
2193 static struct target_waitstatus
2194 btrace_step_again (void)
2195 {
2196 struct target_waitstatus status;
2197
2198 status.kind = TARGET_WAITKIND_IGNORE;
2199
2200 return status;
2201 }
2202
2203 /* Clear the record histories. */
2204
2205 static void
2206 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2207 {
2208 xfree (btinfo->insn_history);
2209 xfree (btinfo->call_history);
2210
2211 btinfo->insn_history = NULL;
2212 btinfo->call_history = NULL;
2213 }
2214
2215 /* Check whether TP's current replay position is at a breakpoint. */
2216
2217 static int
2218 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2219 {
2220 struct btrace_insn_iterator *replay;
2221 struct btrace_thread_info *btinfo;
2222 const struct btrace_insn *insn;
2223 struct inferior *inf;
2224
2225 btinfo = &tp->btrace;
2226 replay = btinfo->replay;
2227
2228 if (replay == NULL)
2229 return 0;
2230
2231 insn = btrace_insn_get (replay);
2232 if (insn == NULL)
2233 return 0;
2234
2235 inf = find_inferior_ptid (tp->ptid);
2236 if (inf == NULL)
2237 return 0;
2238
2239 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2240 &btinfo->stop_reason);
2241 }
2242
2243 /* Step one instruction in forward direction. */
2244
2245 static struct target_waitstatus
2246 record_btrace_single_step_forward (struct thread_info *tp)
2247 {
2248 struct btrace_insn_iterator *replay, end, start;
2249 struct btrace_thread_info *btinfo;
2250
2251 btinfo = &tp->btrace;
2252 replay = btinfo->replay;
2253
2254 /* We're done if we're not replaying. */
2255 if (replay == NULL)
2256 return btrace_step_no_history ();
2257
2258 /* Check if we're stepping a breakpoint. */
2259 if (record_btrace_replay_at_breakpoint (tp))
2260 return btrace_step_stopped ();
2261
2262 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2263 jump back to the instruction at which we started. */
2264 start = *replay;
2265 do
2266 {
2267 unsigned int steps;
2268
2269 /* We will bail out here if we continue stepping after reaching the end
2270 of the execution history. */
2271 steps = btrace_insn_next (replay, 1);
2272 if (steps == 0)
2273 {
2274 *replay = start;
2275 return btrace_step_no_history ();
2276 }
2277 }
2278 while (btrace_insn_get (replay) == NULL);
2279
2280 /* Determine the end of the instruction trace. */
2281 btrace_insn_end (&end, btinfo);
2282
2283 /* The execution trace contains (and ends with) the current instruction.
2284 This instruction has not been executed, yet, so the trace really ends
2285 one instruction earlier. */
2286 if (btrace_insn_cmp (replay, &end) == 0)
2287 return btrace_step_no_history ();
2288
2289 return btrace_step_spurious ();
2290 }
2291
2292 /* Step one instruction in backward direction. */
2293
2294 static struct target_waitstatus
2295 record_btrace_single_step_backward (struct thread_info *tp)
2296 {
2297 struct btrace_insn_iterator *replay, start;
2298 struct btrace_thread_info *btinfo;
2299
2300 btinfo = &tp->btrace;
2301 replay = btinfo->replay;
2302
2303 /* Start replaying if we're not already doing so. */
2304 if (replay == NULL)
2305 replay = record_btrace_start_replaying (tp);
2306
2307 /* If we can't step any further, we reached the end of the history.
2308 Skip gaps during replay. If we end up at a gap (at the beginning of
2309 the trace), jump back to the instruction at which we started. */
2310 start = *replay;
2311 do
2312 {
2313 unsigned int steps;
2314
2315 steps = btrace_insn_prev (replay, 1);
2316 if (steps == 0)
2317 {
2318 *replay = start;
2319 return btrace_step_no_history ();
2320 }
2321 }
2322 while (btrace_insn_get (replay) == NULL);
2323
2324 /* Check if we're stepping a breakpoint.
2325
2326 For reverse-stepping, this check is after the step. There is logic in
2327 infrun.c that handles reverse-stepping separately. See, for example,
2328 proceed and adjust_pc_after_break.
2329
2330 This code assumes that for reverse-stepping, PC points to the last
2331 de-executed instruction, whereas for forward-stepping PC points to the
2332 next to-be-executed instruction. */
2333 if (record_btrace_replay_at_breakpoint (tp))
2334 return btrace_step_stopped ();
2335
2336 return btrace_step_spurious ();
2337 }
2338
2339 /* Step a single thread. */
2340
2341 static struct target_waitstatus
2342 record_btrace_step_thread (struct thread_info *tp)
2343 {
2344 struct btrace_thread_info *btinfo;
2345 struct target_waitstatus status;
2346 enum btrace_thread_flag flags;
2347
2348 btinfo = &tp->btrace;
2349
2350 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2351 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2352
2353 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2354 target_pid_to_str (tp->ptid), flags,
2355 btrace_thread_flag_to_str (flags));
2356
2357 /* We can't step without an execution history. */
2358 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2359 return btrace_step_no_history ();
2360
2361 switch (flags)
2362 {
2363 default:
2364 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2365
2366 case BTHR_STOP:
2367 return btrace_step_stopped_on_request ();
2368
2369 case BTHR_STEP:
2370 status = record_btrace_single_step_forward (tp);
2371 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2372 break;
2373
2374 return btrace_step_stopped ();
2375
2376 case BTHR_RSTEP:
2377 status = record_btrace_single_step_backward (tp);
2378 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2379 break;
2380
2381 return btrace_step_stopped ();
2382
2383 case BTHR_CONT:
2384 status = record_btrace_single_step_forward (tp);
2385 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2386 break;
2387
2388 btinfo->flags |= flags;
2389 return btrace_step_again ();
2390
2391 case BTHR_RCONT:
2392 status = record_btrace_single_step_backward (tp);
2393 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2394 break;
2395
2396 btinfo->flags |= flags;
2397 return btrace_step_again ();
2398 }
2399
2400 /* We keep threads moving at the end of their execution history. The to_wait
2401 method will stop the thread for whom the event is reported. */
2402 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2403 btinfo->flags |= flags;
2404
2405 return status;
2406 }
2407
2408 /* A vector of threads. */
2409
2410 typedef struct thread_info * tp_t;
2411 DEF_VEC_P (tp_t);
2412
2413 /* Announce further events if necessary. */
2414
2415 static void
2416 record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2417 const VEC (tp_t) *no_history)
2418 {
2419 int more_moving, more_no_history;
2420
2421 more_moving = !VEC_empty (tp_t, moving);
2422 more_no_history = !VEC_empty (tp_t, no_history);
2423
2424 if (!more_moving && !more_no_history)
2425 return;
2426
2427 if (more_moving)
2428 DEBUG ("movers pending");
2429
2430 if (more_no_history)
2431 DEBUG ("no-history pending");
2432
2433 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2434 }
2435
2436 /* The to_wait method of target record-btrace. */
2437
2438 static ptid_t
2439 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2440 struct target_waitstatus *status, int options)
2441 {
2442 VEC (tp_t) *moving, *no_history;
2443 struct thread_info *tp, *eventing;
2444 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2445
2446 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2447
2448 /* As long as we're not replaying, just forward the request. */
2449 if ((execution_direction != EXEC_REVERSE)
2450 && !record_btrace_is_replaying (ops, minus_one_ptid))
2451 {
2452 ops = ops->beneath;
2453 return ops->to_wait (ops, ptid, status, options);
2454 }
2455
2456 moving = NULL;
2457 no_history = NULL;
2458
2459 make_cleanup (VEC_cleanup (tp_t), &moving);
2460 make_cleanup (VEC_cleanup (tp_t), &no_history);
2461
2462 /* Keep a work list of moving threads. */
2463 ALL_NON_EXITED_THREADS (tp)
2464 if (ptid_match (tp->ptid, ptid)
2465 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2466 VEC_safe_push (tp_t, moving, tp);
2467
2468 if (VEC_empty (tp_t, moving))
2469 {
2470 *status = btrace_step_no_resumed ();
2471
2472 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2473 target_waitstatus_to_string (status).c_str ());
2474
2475 do_cleanups (cleanups);
2476 return null_ptid;
2477 }
2478
2479 /* Step moving threads one by one, one step each, until either one thread
2480 reports an event or we run out of threads to step.
2481
2482 When stepping more than one thread, chances are that some threads reach
2483 the end of their execution history earlier than others. If we reported
2484 this immediately, all-stop on top of non-stop would stop all threads and
2485 resume the same threads next time. And we would report the same thread
2486 having reached the end of its execution history again.
2487
2488 In the worst case, this would starve the other threads. But even if other
2489 threads would be allowed to make progress, this would result in far too
2490 many intermediate stops.
2491
2492 We therefore delay the reporting of "no execution history" until we have
2493 nothing else to report. By this time, all threads should have moved to
2494 either the beginning or the end of their execution history. There will
2495 be a single user-visible stop. */
2496 eventing = NULL;
2497 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2498 {
2499 unsigned int ix;
2500
2501 ix = 0;
2502 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2503 {
2504 *status = record_btrace_step_thread (tp);
2505
2506 switch (status->kind)
2507 {
2508 case TARGET_WAITKIND_IGNORE:
2509 ix++;
2510 break;
2511
2512 case TARGET_WAITKIND_NO_HISTORY:
2513 VEC_safe_push (tp_t, no_history,
2514 VEC_ordered_remove (tp_t, moving, ix));
2515 break;
2516
2517 default:
2518 eventing = VEC_unordered_remove (tp_t, moving, ix);
2519 break;
2520 }
2521 }
2522 }
2523
2524 if (eventing == NULL)
2525 {
2526 /* We started with at least one moving thread. This thread must have
2527 either stopped or reached the end of its execution history.
2528
2529 In the former case, EVENTING must not be NULL.
2530 In the latter case, NO_HISTORY must not be empty. */
2531 gdb_assert (!VEC_empty (tp_t, no_history));
2532
2533 /* We kept threads moving at the end of their execution history. Stop
2534 EVENTING now that we are going to report its stop. */
2535 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2536 eventing->btrace.flags &= ~BTHR_MOVE;
2537
2538 *status = btrace_step_no_history ();
2539 }
2540
2541 gdb_assert (eventing != NULL);
2542
2543 /* We kept threads replaying at the end of their execution history. Stop
2544 replaying EVENTING now that we are going to report its stop. */
2545 record_btrace_stop_replaying_at_end (eventing);
2546
2547 /* Stop all other threads. */
2548 if (!target_is_non_stop_p ())
2549 ALL_NON_EXITED_THREADS (tp)
2550 record_btrace_cancel_resume (tp);
2551
2552 /* In async mode, we need to announce further events. */
2553 if (target_is_async_p ())
2554 record_btrace_maybe_mark_async_event (moving, no_history);
2555
2556 /* Start record histories anew from the current position. */
2557 record_btrace_clear_histories (&eventing->btrace);
2558
2559 /* We moved the replay position but did not update registers. */
2560 registers_changed_ptid (eventing->ptid);
2561
2562 DEBUG ("wait ended by thread %s (%s): %s",
2563 print_thread_id (eventing),
2564 target_pid_to_str (eventing->ptid),
2565 target_waitstatus_to_string (status).c_str ());
2566
2567 do_cleanups (cleanups);
2568 return eventing->ptid;
2569 }
2570
2571 /* The to_stop method of target record-btrace. */
2572
2573 static void
2574 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2575 {
2576 DEBUG ("stop %s", target_pid_to_str (ptid));
2577
2578 /* As long as we're not replaying, just forward the request. */
2579 if ((execution_direction != EXEC_REVERSE)
2580 && !record_btrace_is_replaying (ops, minus_one_ptid))
2581 {
2582 ops = ops->beneath;
2583 ops->to_stop (ops, ptid);
2584 }
2585 else
2586 {
2587 struct thread_info *tp;
2588
2589 ALL_NON_EXITED_THREADS (tp)
2590 if (ptid_match (tp->ptid, ptid))
2591 {
2592 tp->btrace.flags &= ~BTHR_MOVE;
2593 tp->btrace.flags |= BTHR_STOP;
2594 }
2595 }
2596 }
2597
2598 /* The to_can_execute_reverse method of target record-btrace. */
2599
2600 static int
2601 record_btrace_can_execute_reverse (struct target_ops *self)
2602 {
2603 return 1;
2604 }
2605
2606 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2607
2608 static int
2609 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2610 {
2611 if (record_btrace_is_replaying (ops, minus_one_ptid))
2612 {
2613 struct thread_info *tp = inferior_thread ();
2614
2615 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2616 }
2617
2618 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2619 }
2620
2621 /* The to_supports_stopped_by_sw_breakpoint method of target
2622 record-btrace. */
2623
2624 static int
2625 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2626 {
2627 if (record_btrace_is_replaying (ops, minus_one_ptid))
2628 return 1;
2629
2630 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2631 }
2632
2633 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2634
2635 static int
2636 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2637 {
2638 if (record_btrace_is_replaying (ops, minus_one_ptid))
2639 {
2640 struct thread_info *tp = inferior_thread ();
2641
2642 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2643 }
2644
2645 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2646 }
2647
2648 /* The to_supports_stopped_by_hw_breakpoint method of target
2649 record-btrace. */
2650
2651 static int
2652 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2653 {
2654 if (record_btrace_is_replaying (ops, minus_one_ptid))
2655 return 1;
2656
2657 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2658 }
2659
2660 /* The to_update_thread_list method of target record-btrace. */
2661
2662 static void
2663 record_btrace_update_thread_list (struct target_ops *ops)
2664 {
2665 /* We don't add or remove threads during replay. */
2666 if (record_btrace_is_replaying (ops, minus_one_ptid))
2667 return;
2668
2669 /* Forward the request. */
2670 ops = ops->beneath;
2671 ops->to_update_thread_list (ops);
2672 }
2673
2674 /* The to_thread_alive method of target record-btrace. */
2675
2676 static int
2677 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2678 {
2679 /* We don't add or remove threads during replay. */
2680 if (record_btrace_is_replaying (ops, minus_one_ptid))
2681 return find_thread_ptid (ptid) != NULL;
2682
2683 /* Forward the request. */
2684 ops = ops->beneath;
2685 return ops->to_thread_alive (ops, ptid);
2686 }
2687
2688 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2689 is stopped. */
2690
2691 static void
2692 record_btrace_set_replay (struct thread_info *tp,
2693 const struct btrace_insn_iterator *it)
2694 {
2695 struct btrace_thread_info *btinfo;
2696
2697 btinfo = &tp->btrace;
2698
2699 if (it == NULL)
2700 record_btrace_stop_replaying (tp);
2701 else
2702 {
2703 if (btinfo->replay == NULL)
2704 record_btrace_start_replaying (tp);
2705 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2706 return;
2707
2708 *btinfo->replay = *it;
2709 registers_changed_ptid (tp->ptid);
2710 }
2711
2712 /* Start anew from the new replay position. */
2713 record_btrace_clear_histories (btinfo);
2714
2715 stop_pc = regcache_read_pc (get_current_regcache ());
2716 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2717 }
2718
2719 /* The to_goto_record_begin method of target record-btrace. */
2720
2721 static void
2722 record_btrace_goto_begin (struct target_ops *self)
2723 {
2724 struct thread_info *tp;
2725 struct btrace_insn_iterator begin;
2726
2727 tp = require_btrace_thread ();
2728
2729 btrace_insn_begin (&begin, &tp->btrace);
2730
2731 /* Skip gaps at the beginning of the trace. */
2732 while (btrace_insn_get (&begin) == NULL)
2733 {
2734 unsigned int steps;
2735
2736 steps = btrace_insn_next (&begin, 1);
2737 if (steps == 0)
2738 error (_("No trace."));
2739 }
2740
2741 record_btrace_set_replay (tp, &begin);
2742 }
2743
2744 /* The to_goto_record_end method of target record-btrace. */
2745
2746 static void
2747 record_btrace_goto_end (struct target_ops *ops)
2748 {
2749 struct thread_info *tp;
2750
2751 tp = require_btrace_thread ();
2752
2753 record_btrace_set_replay (tp, NULL);
2754 }
2755
2756 /* The to_goto_record method of target record-btrace. */
2757
2758 static void
2759 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2760 {
2761 struct thread_info *tp;
2762 struct btrace_insn_iterator it;
2763 unsigned int number;
2764 int found;
2765
2766 number = insn;
2767
2768 /* Check for wrap-arounds. */
2769 if (number != insn)
2770 error (_("Instruction number out of range."));
2771
2772 tp = require_btrace_thread ();
2773
2774 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2775
2776 /* Check if the instruction could not be found or is a gap. */
2777 if (found == 0 || btrace_insn_get (&it) == NULL)
2778 error (_("No such instruction."));
2779
2780 record_btrace_set_replay (tp, &it);
2781 }
2782
2783 /* The to_record_stop_replaying method of target record-btrace. */
2784
2785 static void
2786 record_btrace_stop_replaying_all (struct target_ops *self)
2787 {
2788 struct thread_info *tp;
2789
2790 ALL_NON_EXITED_THREADS (tp)
2791 record_btrace_stop_replaying (tp);
2792 }
2793
2794 /* The to_execution_direction target method. */
2795
2796 static enum exec_direction_kind
2797 record_btrace_execution_direction (struct target_ops *self)
2798 {
2799 return record_btrace_resume_exec_dir;
2800 }
2801
2802 /* The to_prepare_to_generate_core target method. */
2803
2804 static void
2805 record_btrace_prepare_to_generate_core (struct target_ops *self)
2806 {
2807 record_btrace_generating_corefile = 1;
2808 }
2809
2810 /* The to_done_generating_core target method. */
2811
2812 static void
2813 record_btrace_done_generating_core (struct target_ops *self)
2814 {
2815 record_btrace_generating_corefile = 0;
2816 }
2817
2818 /* Initialize the record-btrace target ops. */
2819
2820 static void
2821 init_record_btrace_ops (void)
2822 {
2823 struct target_ops *ops;
2824
2825 ops = &record_btrace_ops;
2826 ops->to_shortname = "record-btrace";
2827 ops->to_longname = "Branch tracing target";
2828 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2829 ops->to_open = record_btrace_open;
2830 ops->to_close = record_btrace_close;
2831 ops->to_async = record_btrace_async;
2832 ops->to_detach = record_detach;
2833 ops->to_disconnect = record_btrace_disconnect;
2834 ops->to_mourn_inferior = record_mourn_inferior;
2835 ops->to_kill = record_kill;
2836 ops->to_stop_recording = record_btrace_stop_recording;
2837 ops->to_info_record = record_btrace_info;
2838 ops->to_insn_history = record_btrace_insn_history;
2839 ops->to_insn_history_from = record_btrace_insn_history_from;
2840 ops->to_insn_history_range = record_btrace_insn_history_range;
2841 ops->to_call_history = record_btrace_call_history;
2842 ops->to_call_history_from = record_btrace_call_history_from;
2843 ops->to_call_history_range = record_btrace_call_history_range;
2844 ops->to_record_method = record_btrace_record_method;
2845 ops->to_record_is_replaying = record_btrace_is_replaying;
2846 ops->to_record_will_replay = record_btrace_will_replay;
2847 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2848 ops->to_xfer_partial = record_btrace_xfer_partial;
2849 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2850 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2851 ops->to_fetch_registers = record_btrace_fetch_registers;
2852 ops->to_store_registers = record_btrace_store_registers;
2853 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2854 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2855 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2856 ops->to_resume = record_btrace_resume;
2857 ops->to_commit_resume = record_btrace_commit_resume;
2858 ops->to_wait = record_btrace_wait;
2859 ops->to_stop = record_btrace_stop;
2860 ops->to_update_thread_list = record_btrace_update_thread_list;
2861 ops->to_thread_alive = record_btrace_thread_alive;
2862 ops->to_goto_record_begin = record_btrace_goto_begin;
2863 ops->to_goto_record_end = record_btrace_goto_end;
2864 ops->to_goto_record = record_btrace_goto;
2865 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2866 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2867 ops->to_supports_stopped_by_sw_breakpoint
2868 = record_btrace_supports_stopped_by_sw_breakpoint;
2869 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2870 ops->to_supports_stopped_by_hw_breakpoint
2871 = record_btrace_supports_stopped_by_hw_breakpoint;
2872 ops->to_execution_direction = record_btrace_execution_direction;
2873 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2874 ops->to_done_generating_core = record_btrace_done_generating_core;
2875 ops->to_stratum = record_stratum;
2876 ops->to_magic = OPS_MAGIC;
2877 }
2878
2879 /* Start recording in BTS format. */
2880
2881 static void
2882 cmd_record_btrace_bts_start (char *args, int from_tty)
2883 {
2884 if (args != NULL && *args != 0)
2885 error (_("Invalid argument."));
2886
2887 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2888
2889 TRY
2890 {
2891 execute_command ((char *) "target record-btrace", from_tty);
2892 }
2893 CATCH (exception, RETURN_MASK_ALL)
2894 {
2895 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2896 throw_exception (exception);
2897 }
2898 END_CATCH
2899 }
2900
2901 /* Start recording in Intel Processor Trace format. */
2902
2903 static void
2904 cmd_record_btrace_pt_start (char *args, int from_tty)
2905 {
2906 if (args != NULL && *args != 0)
2907 error (_("Invalid argument."));
2908
2909 record_btrace_conf.format = BTRACE_FORMAT_PT;
2910
2911 TRY
2912 {
2913 execute_command ((char *) "target record-btrace", from_tty);
2914 }
2915 CATCH (exception, RETURN_MASK_ALL)
2916 {
2917 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2918 throw_exception (exception);
2919 }
2920 END_CATCH
2921 }
2922
2923 /* Alias for "target record". */
2924
2925 static void
2926 cmd_record_btrace_start (char *args, int from_tty)
2927 {
2928 if (args != NULL && *args != 0)
2929 error (_("Invalid argument."));
2930
2931 record_btrace_conf.format = BTRACE_FORMAT_PT;
2932
2933 TRY
2934 {
2935 execute_command ((char *) "target record-btrace", from_tty);
2936 }
2937 CATCH (exception, RETURN_MASK_ALL)
2938 {
2939 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2940
2941 TRY
2942 {
2943 execute_command ((char *) "target record-btrace", from_tty);
2944 }
2945 CATCH (exception, RETURN_MASK_ALL)
2946 {
2947 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2948 throw_exception (exception);
2949 }
2950 END_CATCH
2951 }
2952 END_CATCH
2953 }
2954
2955 /* The "set record btrace" command. */
2956
2957 static void
2958 cmd_set_record_btrace (char *args, int from_tty)
2959 {
2960 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2961 }
2962
2963 /* The "show record btrace" command. */
2964
2965 static void
2966 cmd_show_record_btrace (char *args, int from_tty)
2967 {
2968 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2969 }
2970
2971 /* The "show record btrace replay-memory-access" command. */
2972
2973 static void
2974 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2975 struct cmd_list_element *c, const char *value)
2976 {
2977 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2978 replay_memory_access);
2979 }
2980
2981 /* The "set record btrace bts" command. */
2982
2983 static void
2984 cmd_set_record_btrace_bts (char *args, int from_tty)
2985 {
2986 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2987 "by an appropriate subcommand.\n"));
2988 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2989 all_commands, gdb_stdout);
2990 }
2991
2992 /* The "show record btrace bts" command. */
2993
2994 static void
2995 cmd_show_record_btrace_bts (char *args, int from_tty)
2996 {
2997 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2998 }
2999
3000 /* The "set record btrace pt" command. */
3001
3002 static void
3003 cmd_set_record_btrace_pt (char *args, int from_tty)
3004 {
3005 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3006 "by an appropriate subcommand.\n"));
3007 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3008 all_commands, gdb_stdout);
3009 }
3010
3011 /* The "show record btrace pt" command. */
3012
3013 static void
3014 cmd_show_record_btrace_pt (char *args, int from_tty)
3015 {
3016 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3017 }
3018
3019 /* The "record bts buffer-size" show value function. */
3020
3021 static void
3022 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3023 struct cmd_list_element *c,
3024 const char *value)
3025 {
3026 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3027 value);
3028 }
3029
3030 /* The "record pt buffer-size" show value function. */
3031
3032 static void
3033 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3034 struct cmd_list_element *c,
3035 const char *value)
3036 {
3037 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3038 value);
3039 }
3040
3041 void _initialize_record_btrace (void);
3042
3043 /* Initialize btrace commands. */
3044
3045 void
3046 _initialize_record_btrace (void)
3047 {
3048 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3049 _("Start branch trace recording."), &record_btrace_cmdlist,
3050 "record btrace ", 0, &record_cmdlist);
3051 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3052
3053 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3054 _("\
3055 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3056 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3057 This format may not be available on all processors."),
3058 &record_btrace_cmdlist);
3059 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3060
3061 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3062 _("\
3063 Start branch trace recording in Intel Processor Trace format.\n\n\
3064 This format may not be available on all processors."),
3065 &record_btrace_cmdlist);
3066 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3067
3068 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3069 _("Set record options"), &set_record_btrace_cmdlist,
3070 "set record btrace ", 0, &set_record_cmdlist);
3071
3072 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3073 _("Show record options"), &show_record_btrace_cmdlist,
3074 "show record btrace ", 0, &show_record_cmdlist);
3075
3076 add_setshow_enum_cmd ("replay-memory-access", no_class,
3077 replay_memory_access_types, &replay_memory_access, _("\
3078 Set what memory accesses are allowed during replay."), _("\
3079 Show what memory accesses are allowed during replay."),
3080 _("Default is READ-ONLY.\n\n\
3081 The btrace record target does not trace data.\n\
3082 The memory therefore corresponds to the live target and not \
3083 to the current replay position.\n\n\
3084 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3085 When READ-WRITE, allow accesses to read-only and read-write memory during \
3086 replay."),
3087 NULL, cmd_show_replay_memory_access,
3088 &set_record_btrace_cmdlist,
3089 &show_record_btrace_cmdlist);
3090
3091 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3092 _("Set record btrace bts options"),
3093 &set_record_btrace_bts_cmdlist,
3094 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3095
3096 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3097 _("Show record btrace bts options"),
3098 &show_record_btrace_bts_cmdlist,
3099 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3100
3101 add_setshow_uinteger_cmd ("buffer-size", no_class,
3102 &record_btrace_conf.bts.size,
3103 _("Set the record/replay bts buffer size."),
3104 _("Show the record/replay bts buffer size."), _("\
3105 When starting recording request a trace buffer of this size. \
3106 The actual buffer size may differ from the requested size. \
3107 Use \"info record\" to see the actual buffer size.\n\n\
3108 Bigger buffers allow longer recording but also take more time to process \
3109 the recorded execution trace.\n\n\
3110 The trace buffer size may not be changed while recording."), NULL,
3111 show_record_bts_buffer_size_value,
3112 &set_record_btrace_bts_cmdlist,
3113 &show_record_btrace_bts_cmdlist);
3114
3115 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3116 _("Set record btrace pt options"),
3117 &set_record_btrace_pt_cmdlist,
3118 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3119
3120 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3121 _("Show record btrace pt options"),
3122 &show_record_btrace_pt_cmdlist,
3123 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3124
3125 add_setshow_uinteger_cmd ("buffer-size", no_class,
3126 &record_btrace_conf.pt.size,
3127 _("Set the record/replay pt buffer size."),
3128 _("Show the record/replay pt buffer size."), _("\
3129 Bigger buffers allow longer recording but also take more time to process \
3130 the recorded execution.\n\
3131 The actual buffer size may differ from the requested size. Use \"info record\" \
3132 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3133 &set_record_btrace_pt_cmdlist,
3134 &show_record_btrace_pt_cmdlist);
3135
3136 init_record_btrace_ops ();
3137 add_target (&record_btrace_ops);
3138
3139 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3140 xcalloc, xfree);
3141
3142 record_btrace_conf.bts.size = 64 * 1024;
3143 record_btrace_conf.pt.size = 16 * 1024;
3144 }
This page took 0.097758 seconds and 4 git commands to generate.