48eda5435155ee748b8cfaf8a02b52a729c0cee8
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observer.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "event-loop.h"
40 #include "inf-loop.h"
41 #include "vec.h"
42 #include <algorithm>
43
44 /* The target_ops of record-btrace. */
45 static struct target_ops record_btrace_ops;
46
47 /* A new thread observer enabling branch tracing for the new thread. */
48 static struct observer *record_btrace_thread_observer;
49
50 /* Memory access types used in set/show record btrace replay-memory-access. */
51 static const char replay_memory_access_read_only[] = "read-only";
52 static const char replay_memory_access_read_write[] = "read-write";
53 static const char *const replay_memory_access_types[] =
54 {
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
57 NULL
58 };
59
60 /* The currently allowed replay memory access type. */
61 static const char *replay_memory_access = replay_memory_access_read_only;
62
63 /* Command lists for "set/show record btrace". */
64 static struct cmd_list_element *set_record_btrace_cmdlist;
65 static struct cmd_list_element *show_record_btrace_cmdlist;
66
67 /* The execution direction of the last resume we got. See record-full.c. */
68 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
69
70 /* The async event handler for reverse/replay execution. */
71 static struct async_event_handler *record_btrace_async_inferior_event_handler;
72
73 /* A flag indicating that we are currently generating a core file. */
74 static int record_btrace_generating_corefile;
75
76 /* The current branch trace configuration. */
77 static struct btrace_config record_btrace_conf;
78
79 /* Command list for "record btrace". */
80 static struct cmd_list_element *record_btrace_cmdlist;
81
82 /* Command lists for "set/show record btrace bts". */
83 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
85
86 /* Command lists for "set/show record btrace pt". */
87 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
89
90 /* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
92
93 #define DEBUG(msg, args...) \
94 do \
95 { \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
99 } \
100 while (0)
101
102
103 /* Update the branch trace for the current thread and return a pointer to its
104 thread_info.
105
106 Throws an error if there is no thread or no trace. This function never
107 returns NULL. */
108
109 static struct thread_info *
110 require_btrace_thread (void)
111 {
112 struct thread_info *tp;
113
114 DEBUG ("require");
115
116 tp = find_thread_ptid (inferior_ptid);
117 if (tp == NULL)
118 error (_("No thread."));
119
120 validate_registers_access ();
121
122 btrace_fetch (tp);
123
124 if (btrace_is_empty (tp))
125 error (_("No trace."));
126
127 return tp;
128 }
129
130 /* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
132
133 Throws an error if there is no thread or no trace. This function never
134 returns NULL. */
135
136 static struct btrace_thread_info *
137 require_btrace (void)
138 {
139 struct thread_info *tp;
140
141 tp = require_btrace_thread ();
142
143 return &tp->btrace;
144 }
145
146 /* Enable branch tracing for one thread. Warn on errors. */
147
148 static void
149 record_btrace_enable_warn (struct thread_info *tp)
150 {
151 TRY
152 {
153 btrace_enable (tp, &record_btrace_conf);
154 }
155 CATCH (error, RETURN_MASK_ERROR)
156 {
157 warning ("%s", error.message);
158 }
159 END_CATCH
160 }
161
162 /* Callback function to disable branch tracing for one thread. */
163
164 static void
165 record_btrace_disable_callback (void *arg)
166 {
167 struct thread_info *tp = (struct thread_info *) arg;
168
169 btrace_disable (tp);
170 }
171
172 /* Enable automatic tracing of new threads. */
173
174 static void
175 record_btrace_auto_enable (void)
176 {
177 DEBUG ("attach thread observer");
178
179 record_btrace_thread_observer
180 = observer_attach_new_thread (record_btrace_enable_warn);
181 }
182
183 /* Disable automatic tracing of new threads. */
184
185 static void
186 record_btrace_auto_disable (void)
187 {
188 /* The observer may have been detached, already. */
189 if (record_btrace_thread_observer == NULL)
190 return;
191
192 DEBUG ("detach thread observer");
193
194 observer_detach_new_thread (record_btrace_thread_observer);
195 record_btrace_thread_observer = NULL;
196 }
197
198 /* The record-btrace async event handler function. */
199
200 static void
201 record_btrace_handle_async_inferior_event (gdb_client_data data)
202 {
203 inferior_event_handler (INF_REG_EVENT, NULL);
204 }
205
206 /* See record-btrace.h. */
207
208 void
209 record_btrace_push_target (void)
210 {
211 const char *format;
212
213 record_btrace_auto_enable ();
214
215 push_target (&record_btrace_ops);
216
217 record_btrace_async_inferior_event_handler
218 = create_async_event_handler (record_btrace_handle_async_inferior_event,
219 NULL);
220 record_btrace_generating_corefile = 0;
221
222 format = btrace_format_short_string (record_btrace_conf.format);
223 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
224 }
225
226 /* The to_open method of target record-btrace. */
227
228 static void
229 record_btrace_open (const char *args, int from_tty)
230 {
231 struct cleanup *disable_chain;
232 struct thread_info *tp;
233
234 DEBUG ("open");
235
236 record_preopen ();
237
238 if (!target_has_execution)
239 error (_("The program is not being run."));
240
241 gdb_assert (record_btrace_thread_observer == NULL);
242
243 disable_chain = make_cleanup (null_cleanup, NULL);
244 ALL_NON_EXITED_THREADS (tp)
245 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
246 {
247 btrace_enable (tp, &record_btrace_conf);
248
249 make_cleanup (record_btrace_disable_callback, tp);
250 }
251
252 record_btrace_push_target ();
253
254 discard_cleanups (disable_chain);
255 }
256
257 /* The to_stop_recording method of target record-btrace. */
258
259 static void
260 record_btrace_stop_recording (struct target_ops *self)
261 {
262 struct thread_info *tp;
263
264 DEBUG ("stop recording");
265
266 record_btrace_auto_disable ();
267
268 ALL_NON_EXITED_THREADS (tp)
269 if (tp->btrace.target != NULL)
270 btrace_disable (tp);
271 }
272
273 /* The to_disconnect method of target record-btrace. */
274
275 static void
276 record_btrace_disconnect (struct target_ops *self, const char *args,
277 int from_tty)
278 {
279 struct target_ops *beneath = self->beneath;
280
281 /* Do not stop recording, just clean up GDB side. */
282 unpush_target (self);
283
284 /* Forward disconnect. */
285 beneath->to_disconnect (beneath, args, from_tty);
286 }
287
288 /* The to_close method of target record-btrace. */
289
290 static void
291 record_btrace_close (struct target_ops *self)
292 {
293 struct thread_info *tp;
294
295 if (record_btrace_async_inferior_event_handler != NULL)
296 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
297
298 /* Make sure automatic recording gets disabled even if we did not stop
299 recording before closing the record-btrace target. */
300 record_btrace_auto_disable ();
301
302 /* We should have already stopped recording.
303 Tear down btrace in case we have not. */
304 ALL_NON_EXITED_THREADS (tp)
305 btrace_teardown (tp);
306 }
307
308 /* The to_async method of target record-btrace. */
309
310 static void
311 record_btrace_async (struct target_ops *ops, int enable)
312 {
313 if (enable)
314 mark_async_event_handler (record_btrace_async_inferior_event_handler);
315 else
316 clear_async_event_handler (record_btrace_async_inferior_event_handler);
317
318 ops->beneath->to_async (ops->beneath, enable);
319 }
320
321 /* Adjusts the size and returns a human readable size suffix. */
322
323 static const char *
324 record_btrace_adjust_size (unsigned int *size)
325 {
326 unsigned int sz;
327
328 sz = *size;
329
330 if ((sz & ((1u << 30) - 1)) == 0)
331 {
332 *size = sz >> 30;
333 return "GB";
334 }
335 else if ((sz & ((1u << 20) - 1)) == 0)
336 {
337 *size = sz >> 20;
338 return "MB";
339 }
340 else if ((sz & ((1u << 10) - 1)) == 0)
341 {
342 *size = sz >> 10;
343 return "kB";
344 }
345 else
346 return "";
347 }
348
349 /* Print a BTS configuration. */
350
351 static void
352 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
353 {
354 const char *suffix;
355 unsigned int size;
356
357 size = conf->size;
358 if (size > 0)
359 {
360 suffix = record_btrace_adjust_size (&size);
361 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
362 }
363 }
364
365 /* Print an Intel Processor Trace configuration. */
366
367 static void
368 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
369 {
370 const char *suffix;
371 unsigned int size;
372
373 size = conf->size;
374 if (size > 0)
375 {
376 suffix = record_btrace_adjust_size (&size);
377 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
378 }
379 }
380
381 /* Print a branch tracing configuration. */
382
383 static void
384 record_btrace_print_conf (const struct btrace_config *conf)
385 {
386 printf_unfiltered (_("Recording format: %s.\n"),
387 btrace_format_string (conf->format));
388
389 switch (conf->format)
390 {
391 case BTRACE_FORMAT_NONE:
392 return;
393
394 case BTRACE_FORMAT_BTS:
395 record_btrace_print_bts_conf (&conf->bts);
396 return;
397
398 case BTRACE_FORMAT_PT:
399 record_btrace_print_pt_conf (&conf->pt);
400 return;
401 }
402
403 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
404 }
405
406 /* The to_info_record method of target record-btrace. */
407
408 static void
409 record_btrace_info (struct target_ops *self)
410 {
411 struct btrace_thread_info *btinfo;
412 const struct btrace_config *conf;
413 struct thread_info *tp;
414 unsigned int insns, calls, gaps;
415
416 DEBUG ("info");
417
418 tp = find_thread_ptid (inferior_ptid);
419 if (tp == NULL)
420 error (_("No thread."));
421
422 validate_registers_access ();
423
424 btinfo = &tp->btrace;
425
426 conf = btrace_conf (btinfo);
427 if (conf != NULL)
428 record_btrace_print_conf (conf);
429
430 btrace_fetch (tp);
431
432 insns = 0;
433 calls = 0;
434 gaps = 0;
435
436 if (!btrace_is_empty (tp))
437 {
438 struct btrace_call_iterator call;
439 struct btrace_insn_iterator insn;
440
441 btrace_call_end (&call, btinfo);
442 btrace_call_prev (&call, 1);
443 calls = btrace_call_number (&call);
444
445 btrace_insn_end (&insn, btinfo);
446 insns = btrace_insn_number (&insn);
447
448 /* If the last instruction is not a gap, it is the current instruction
449 that is not actually part of the record. */
450 if (btrace_insn_get (&insn) != NULL)
451 insns -= 1;
452
453 gaps = btinfo->ngaps;
454 }
455
456 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
457 "for thread %s (%s).\n"), insns, calls, gaps,
458 print_thread_id (tp), target_pid_to_str (tp->ptid));
459
460 if (btrace_is_replaying (tp))
461 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
462 btrace_insn_number (btinfo->replay));
463 }
464
465 /* Print a decode error. */
466
467 static void
468 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
469 enum btrace_format format)
470 {
471 const char *errstr = btrace_decode_error (format, errcode);
472
473 uiout->text (_("["));
474 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
475 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
476 {
477 uiout->text (_("decode error ("));
478 uiout->field_int ("errcode", errcode);
479 uiout->text (_("): "));
480 }
481 uiout->text (errstr);
482 uiout->text (_("]\n"));
483 }
484
485 /* Print an unsigned int. */
486
487 static void
488 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
489 {
490 uiout->field_fmt (fld, "%u", val);
491 }
492
493 /* A range of source lines. */
494
495 struct btrace_line_range
496 {
497 /* The symtab this line is from. */
498 struct symtab *symtab;
499
500 /* The first line (inclusive). */
501 int begin;
502
503 /* The last line (exclusive). */
504 int end;
505 };
506
507 /* Construct a line range. */
508
509 static struct btrace_line_range
510 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
511 {
512 struct btrace_line_range range;
513
514 range.symtab = symtab;
515 range.begin = begin;
516 range.end = end;
517
518 return range;
519 }
520
521 /* Add a line to a line range. */
522
523 static struct btrace_line_range
524 btrace_line_range_add (struct btrace_line_range range, int line)
525 {
526 if (range.end <= range.begin)
527 {
528 /* This is the first entry. */
529 range.begin = line;
530 range.end = line + 1;
531 }
532 else if (line < range.begin)
533 range.begin = line;
534 else if (range.end < line)
535 range.end = line;
536
537 return range;
538 }
539
540 /* Return non-zero if RANGE is empty, zero otherwise. */
541
542 static int
543 btrace_line_range_is_empty (struct btrace_line_range range)
544 {
545 return range.end <= range.begin;
546 }
547
548 /* Return non-zero if LHS contains RHS, zero otherwise. */
549
550 static int
551 btrace_line_range_contains_range (struct btrace_line_range lhs,
552 struct btrace_line_range rhs)
553 {
554 return ((lhs.symtab == rhs.symtab)
555 && (lhs.begin <= rhs.begin)
556 && (rhs.end <= lhs.end));
557 }
558
559 /* Find the line range associated with PC. */
560
561 static struct btrace_line_range
562 btrace_find_line_range (CORE_ADDR pc)
563 {
564 struct btrace_line_range range;
565 struct linetable_entry *lines;
566 struct linetable *ltable;
567 struct symtab *symtab;
568 int nlines, i;
569
570 symtab = find_pc_line_symtab (pc);
571 if (symtab == NULL)
572 return btrace_mk_line_range (NULL, 0, 0);
573
574 ltable = SYMTAB_LINETABLE (symtab);
575 if (ltable == NULL)
576 return btrace_mk_line_range (symtab, 0, 0);
577
578 nlines = ltable->nitems;
579 lines = ltable->item;
580 if (nlines <= 0)
581 return btrace_mk_line_range (symtab, 0, 0);
582
583 range = btrace_mk_line_range (symtab, 0, 0);
584 for (i = 0; i < nlines - 1; i++)
585 {
586 if ((lines[i].pc == pc) && (lines[i].line != 0))
587 range = btrace_line_range_add (range, lines[i].line);
588 }
589
590 return range;
591 }
592
593 /* Print source lines in LINES to UIOUT.
594
595 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
596 instructions corresponding to that source line. When printing a new source
597 line, we do the cleanups for the open chain and open a new cleanup chain for
598 the new source line. If the source line range in LINES is not empty, this
599 function will leave the cleanup chain for the last printed source line open
600 so instructions can be added to it. */
601
602 static void
603 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
604 struct cleanup **ui_item_chain, gdb_disassembly_flags flags)
605 {
606 print_source_lines_flags psl_flags;
607 int line;
608
609 psl_flags = 0;
610 if (flags & DISASSEMBLY_FILENAME)
611 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
612
613 for (line = lines.begin; line < lines.end; ++line)
614 {
615 if (*ui_item_chain != NULL)
616 do_cleanups (*ui_item_chain);
617
618 *ui_item_chain
619 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
620
621 print_source_lines (lines.symtab, line, line + 1, psl_flags);
622
623 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
624 }
625 }
626
627 /* Disassemble a section of the recorded instruction trace. */
628
629 static void
630 btrace_insn_history (struct ui_out *uiout,
631 const struct btrace_thread_info *btinfo,
632 const struct btrace_insn_iterator *begin,
633 const struct btrace_insn_iterator *end,
634 gdb_disassembly_flags flags)
635 {
636 struct cleanup *cleanups, *ui_item_chain;
637 struct gdbarch *gdbarch;
638 struct btrace_insn_iterator it;
639 struct btrace_line_range last_lines;
640
641 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
642 btrace_insn_number (begin), btrace_insn_number (end));
643
644 flags |= DISASSEMBLY_SPECULATIVE;
645
646 gdbarch = target_gdbarch ();
647 last_lines = btrace_mk_line_range (NULL, 0, 0);
648
649 cleanups = make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
650
651 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
652 instructions corresponding to that line. */
653 ui_item_chain = NULL;
654
655 gdb_pretty_print_disassembler disasm (gdbarch);
656
657 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
658 {
659 const struct btrace_insn *insn;
660
661 insn = btrace_insn_get (&it);
662
663 /* A NULL instruction indicates a gap in the trace. */
664 if (insn == NULL)
665 {
666 const struct btrace_config *conf;
667
668 conf = btrace_conf (btinfo);
669
670 /* We have trace so we must have a configuration. */
671 gdb_assert (conf != NULL);
672
673 uiout->field_fmt ("insn-number", "%u",
674 btrace_insn_number (&it));
675 uiout->text ("\t");
676
677 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
678 conf->format);
679 }
680 else
681 {
682 struct disasm_insn dinsn;
683
684 if ((flags & DISASSEMBLY_SOURCE) != 0)
685 {
686 struct btrace_line_range lines;
687
688 lines = btrace_find_line_range (insn->pc);
689 if (!btrace_line_range_is_empty (lines)
690 && !btrace_line_range_contains_range (last_lines, lines))
691 {
692 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
693 last_lines = lines;
694 }
695 else if (ui_item_chain == NULL)
696 {
697 ui_item_chain
698 = make_cleanup_ui_out_tuple_begin_end (uiout,
699 "src_and_asm_line");
700 /* No source information. */
701 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
702 }
703
704 gdb_assert (ui_item_chain != NULL);
705 }
706
707 memset (&dinsn, 0, sizeof (dinsn));
708 dinsn.number = btrace_insn_number (&it);
709 dinsn.addr = insn->pc;
710
711 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
712 dinsn.is_speculative = 1;
713
714 disasm.pretty_print_insn (uiout, &dinsn, flags);
715 }
716 }
717
718 do_cleanups (cleanups);
719 }
720
721 /* The to_insn_history method of target record-btrace. */
722
723 static void
724 record_btrace_insn_history (struct target_ops *self, int size,
725 gdb_disassembly_flags flags)
726 {
727 struct btrace_thread_info *btinfo;
728 struct btrace_insn_history *history;
729 struct btrace_insn_iterator begin, end;
730 struct ui_out *uiout;
731 unsigned int context, covered;
732
733 uiout = current_uiout;
734 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
735 context = abs (size);
736 if (context == 0)
737 error (_("Bad record instruction-history-size."));
738
739 btinfo = require_btrace ();
740 history = btinfo->insn_history;
741 if (history == NULL)
742 {
743 struct btrace_insn_iterator *replay;
744
745 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
746
747 /* If we're replaying, we start at the replay position. Otherwise, we
748 start at the tail of the trace. */
749 replay = btinfo->replay;
750 if (replay != NULL)
751 begin = *replay;
752 else
753 btrace_insn_end (&begin, btinfo);
754
755 /* We start from here and expand in the requested direction. Then we
756 expand in the other direction, as well, to fill up any remaining
757 context. */
758 end = begin;
759 if (size < 0)
760 {
761 /* We want the current position covered, as well. */
762 covered = btrace_insn_next (&end, 1);
763 covered += btrace_insn_prev (&begin, context - covered);
764 covered += btrace_insn_next (&end, context - covered);
765 }
766 else
767 {
768 covered = btrace_insn_next (&end, context);
769 covered += btrace_insn_prev (&begin, context - covered);
770 }
771 }
772 else
773 {
774 begin = history->begin;
775 end = history->end;
776
777 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
778 btrace_insn_number (&begin), btrace_insn_number (&end));
779
780 if (size < 0)
781 {
782 end = begin;
783 covered = btrace_insn_prev (&begin, context);
784 }
785 else
786 {
787 begin = end;
788 covered = btrace_insn_next (&end, context);
789 }
790 }
791
792 if (covered > 0)
793 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
794 else
795 {
796 if (size < 0)
797 printf_unfiltered (_("At the start of the branch trace record.\n"));
798 else
799 printf_unfiltered (_("At the end of the branch trace record.\n"));
800 }
801
802 btrace_set_insn_history (btinfo, &begin, &end);
803 }
804
805 /* The to_insn_history_range method of target record-btrace. */
806
807 static void
808 record_btrace_insn_history_range (struct target_ops *self,
809 ULONGEST from, ULONGEST to,
810 gdb_disassembly_flags flags)
811 {
812 struct btrace_thread_info *btinfo;
813 struct btrace_insn_iterator begin, end;
814 struct ui_out *uiout;
815 unsigned int low, high;
816 int found;
817
818 uiout = current_uiout;
819 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
820 low = from;
821 high = to;
822
823 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
824
825 /* Check for wrap-arounds. */
826 if (low != from || high != to)
827 error (_("Bad range."));
828
829 if (high < low)
830 error (_("Bad range."));
831
832 btinfo = require_btrace ();
833
834 found = btrace_find_insn_by_number (&begin, btinfo, low);
835 if (found == 0)
836 error (_("Range out of bounds."));
837
838 found = btrace_find_insn_by_number (&end, btinfo, high);
839 if (found == 0)
840 {
841 /* Silently truncate the range. */
842 btrace_insn_end (&end, btinfo);
843 }
844 else
845 {
846 /* We want both begin and end to be inclusive. */
847 btrace_insn_next (&end, 1);
848 }
849
850 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
851 btrace_set_insn_history (btinfo, &begin, &end);
852 }
853
854 /* The to_insn_history_from method of target record-btrace. */
855
856 static void
857 record_btrace_insn_history_from (struct target_ops *self,
858 ULONGEST from, int size,
859 gdb_disassembly_flags flags)
860 {
861 ULONGEST begin, end, context;
862
863 context = abs (size);
864 if (context == 0)
865 error (_("Bad record instruction-history-size."));
866
867 if (size < 0)
868 {
869 end = from;
870
871 if (from < context)
872 begin = 0;
873 else
874 begin = from - context + 1;
875 }
876 else
877 {
878 begin = from;
879 end = from + context - 1;
880
881 /* Check for wrap-around. */
882 if (end < begin)
883 end = ULONGEST_MAX;
884 }
885
886 record_btrace_insn_history_range (self, begin, end, flags);
887 }
888
889 /* Print the instruction number range for a function call history line. */
890
891 static void
892 btrace_call_history_insn_range (struct ui_out *uiout,
893 const struct btrace_function *bfun)
894 {
895 unsigned int begin, end, size;
896
897 size = bfun->insn.size ();
898 gdb_assert (size > 0);
899
900 begin = bfun->insn_offset;
901 end = begin + size - 1;
902
903 ui_out_field_uint (uiout, "insn begin", begin);
904 uiout->text (",");
905 ui_out_field_uint (uiout, "insn end", end);
906 }
907
908 /* Compute the lowest and highest source line for the instructions in BFUN
909 and return them in PBEGIN and PEND.
910 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
911 result from inlining or macro expansion. */
912
913 static void
914 btrace_compute_src_line_range (const struct btrace_function *bfun,
915 int *pbegin, int *pend)
916 {
917 struct symtab *symtab;
918 struct symbol *sym;
919 int begin, end;
920
921 begin = INT_MAX;
922 end = INT_MIN;
923
924 sym = bfun->sym;
925 if (sym == NULL)
926 goto out;
927
928 symtab = symbol_symtab (sym);
929
930 for (const btrace_insn &insn : bfun->insn)
931 {
932 struct symtab_and_line sal;
933
934 sal = find_pc_line (insn.pc, 0);
935 if (sal.symtab != symtab || sal.line == 0)
936 continue;
937
938 begin = std::min (begin, sal.line);
939 end = std::max (end, sal.line);
940 }
941
942 out:
943 *pbegin = begin;
944 *pend = end;
945 }
946
947 /* Print the source line information for a function call history line. */
948
949 static void
950 btrace_call_history_src_line (struct ui_out *uiout,
951 const struct btrace_function *bfun)
952 {
953 struct symbol *sym;
954 int begin, end;
955
956 sym = bfun->sym;
957 if (sym == NULL)
958 return;
959
960 uiout->field_string ("file",
961 symtab_to_filename_for_display (symbol_symtab (sym)));
962
963 btrace_compute_src_line_range (bfun, &begin, &end);
964 if (end < begin)
965 return;
966
967 uiout->text (":");
968 uiout->field_int ("min line", begin);
969
970 if (end == begin)
971 return;
972
973 uiout->text (",");
974 uiout->field_int ("max line", end);
975 }
976
977 /* Get the name of a branch trace function. */
978
979 static const char *
980 btrace_get_bfun_name (const struct btrace_function *bfun)
981 {
982 struct minimal_symbol *msym;
983 struct symbol *sym;
984
985 if (bfun == NULL)
986 return "??";
987
988 msym = bfun->msym;
989 sym = bfun->sym;
990
991 if (sym != NULL)
992 return SYMBOL_PRINT_NAME (sym);
993 else if (msym != NULL)
994 return MSYMBOL_PRINT_NAME (msym);
995 else
996 return "??";
997 }
998
999 /* Disassemble a section of the recorded function trace. */
1000
1001 static void
1002 btrace_call_history (struct ui_out *uiout,
1003 const struct btrace_thread_info *btinfo,
1004 const struct btrace_call_iterator *begin,
1005 const struct btrace_call_iterator *end,
1006 int int_flags)
1007 {
1008 struct btrace_call_iterator it;
1009 record_print_flags flags = (enum record_print_flag) int_flags;
1010
1011 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1012 btrace_call_number (end));
1013
1014 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1015 {
1016 const struct btrace_function *bfun;
1017 struct minimal_symbol *msym;
1018 struct symbol *sym;
1019
1020 bfun = btrace_call_get (&it);
1021 sym = bfun->sym;
1022 msym = bfun->msym;
1023
1024 /* Print the function index. */
1025 ui_out_field_uint (uiout, "index", bfun->number);
1026 uiout->text ("\t");
1027
1028 /* Indicate gaps in the trace. */
1029 if (bfun->errcode != 0)
1030 {
1031 const struct btrace_config *conf;
1032
1033 conf = btrace_conf (btinfo);
1034
1035 /* We have trace so we must have a configuration. */
1036 gdb_assert (conf != NULL);
1037
1038 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1039
1040 continue;
1041 }
1042
1043 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1044 {
1045 int level = bfun->level + btinfo->level, i;
1046
1047 for (i = 0; i < level; ++i)
1048 uiout->text (" ");
1049 }
1050
1051 if (sym != NULL)
1052 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
1053 else if (msym != NULL)
1054 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1055 else if (!uiout->is_mi_like_p ())
1056 uiout->field_string ("function", "??");
1057
1058 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1059 {
1060 uiout->text (_("\tinst "));
1061 btrace_call_history_insn_range (uiout, bfun);
1062 }
1063
1064 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1065 {
1066 uiout->text (_("\tat "));
1067 btrace_call_history_src_line (uiout, bfun);
1068 }
1069
1070 uiout->text ("\n");
1071 }
1072 }
1073
1074 /* The to_call_history method of target record-btrace. */
1075
1076 static void
1077 record_btrace_call_history (struct target_ops *self, int size,
1078 record_print_flags flags)
1079 {
1080 struct btrace_thread_info *btinfo;
1081 struct btrace_call_history *history;
1082 struct btrace_call_iterator begin, end;
1083 struct ui_out *uiout;
1084 unsigned int context, covered;
1085
1086 uiout = current_uiout;
1087 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1088 context = abs (size);
1089 if (context == 0)
1090 error (_("Bad record function-call-history-size."));
1091
1092 btinfo = require_btrace ();
1093 history = btinfo->call_history;
1094 if (history == NULL)
1095 {
1096 struct btrace_insn_iterator *replay;
1097
1098 DEBUG ("call-history (0x%x): %d", (int) flags, size);
1099
1100 /* If we're replaying, we start at the replay position. Otherwise, we
1101 start at the tail of the trace. */
1102 replay = btinfo->replay;
1103 if (replay != NULL)
1104 {
1105 begin.btinfo = btinfo;
1106 begin.index = replay->call_index;
1107 }
1108 else
1109 btrace_call_end (&begin, btinfo);
1110
1111 /* We start from here and expand in the requested direction. Then we
1112 expand in the other direction, as well, to fill up any remaining
1113 context. */
1114 end = begin;
1115 if (size < 0)
1116 {
1117 /* We want the current position covered, as well. */
1118 covered = btrace_call_next (&end, 1);
1119 covered += btrace_call_prev (&begin, context - covered);
1120 covered += btrace_call_next (&end, context - covered);
1121 }
1122 else
1123 {
1124 covered = btrace_call_next (&end, context);
1125 covered += btrace_call_prev (&begin, context- covered);
1126 }
1127 }
1128 else
1129 {
1130 begin = history->begin;
1131 end = history->end;
1132
1133 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1134 btrace_call_number (&begin), btrace_call_number (&end));
1135
1136 if (size < 0)
1137 {
1138 end = begin;
1139 covered = btrace_call_prev (&begin, context);
1140 }
1141 else
1142 {
1143 begin = end;
1144 covered = btrace_call_next (&end, context);
1145 }
1146 }
1147
1148 if (covered > 0)
1149 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1150 else
1151 {
1152 if (size < 0)
1153 printf_unfiltered (_("At the start of the branch trace record.\n"));
1154 else
1155 printf_unfiltered (_("At the end of the branch trace record.\n"));
1156 }
1157
1158 btrace_set_call_history (btinfo, &begin, &end);
1159 }
1160
1161 /* The to_call_history_range method of target record-btrace. */
1162
1163 static void
1164 record_btrace_call_history_range (struct target_ops *self,
1165 ULONGEST from, ULONGEST to,
1166 record_print_flags flags)
1167 {
1168 struct btrace_thread_info *btinfo;
1169 struct btrace_call_iterator begin, end;
1170 struct ui_out *uiout;
1171 unsigned int low, high;
1172 int found;
1173
1174 uiout = current_uiout;
1175 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1176 low = from;
1177 high = to;
1178
1179 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1180
1181 /* Check for wrap-arounds. */
1182 if (low != from || high != to)
1183 error (_("Bad range."));
1184
1185 if (high < low)
1186 error (_("Bad range."));
1187
1188 btinfo = require_btrace ();
1189
1190 found = btrace_find_call_by_number (&begin, btinfo, low);
1191 if (found == 0)
1192 error (_("Range out of bounds."));
1193
1194 found = btrace_find_call_by_number (&end, btinfo, high);
1195 if (found == 0)
1196 {
1197 /* Silently truncate the range. */
1198 btrace_call_end (&end, btinfo);
1199 }
1200 else
1201 {
1202 /* We want both begin and end to be inclusive. */
1203 btrace_call_next (&end, 1);
1204 }
1205
1206 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1207 btrace_set_call_history (btinfo, &begin, &end);
1208 }
1209
1210 /* The to_call_history_from method of target record-btrace. */
1211
1212 static void
1213 record_btrace_call_history_from (struct target_ops *self,
1214 ULONGEST from, int size,
1215 record_print_flags flags)
1216 {
1217 ULONGEST begin, end, context;
1218
1219 context = abs (size);
1220 if (context == 0)
1221 error (_("Bad record function-call-history-size."));
1222
1223 if (size < 0)
1224 {
1225 end = from;
1226
1227 if (from < context)
1228 begin = 0;
1229 else
1230 begin = from - context + 1;
1231 }
1232 else
1233 {
1234 begin = from;
1235 end = from + context - 1;
1236
1237 /* Check for wrap-around. */
1238 if (end < begin)
1239 end = ULONGEST_MAX;
1240 }
1241
1242 record_btrace_call_history_range (self, begin, end, flags);
1243 }
1244
1245 /* The to_record_method method of target record-btrace. */
1246
1247 static enum record_method
1248 record_btrace_record_method (struct target_ops *self, ptid_t ptid)
1249 {
1250 struct thread_info * const tp = find_thread_ptid (ptid);
1251
1252 if (tp == NULL)
1253 error (_("No thread."));
1254
1255 if (tp->btrace.target == NULL)
1256 return RECORD_METHOD_NONE;
1257
1258 return RECORD_METHOD_BTRACE;
1259 }
1260
1261 /* The to_record_is_replaying method of target record-btrace. */
1262
1263 static int
1264 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1265 {
1266 struct thread_info *tp;
1267
1268 ALL_NON_EXITED_THREADS (tp)
1269 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1270 return 1;
1271
1272 return 0;
1273 }
1274
1275 /* The to_record_will_replay method of target record-btrace. */
1276
1277 static int
1278 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1279 {
1280 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1281 }
1282
1283 /* The to_xfer_partial method of target record-btrace. */
1284
1285 static enum target_xfer_status
1286 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1287 const char *annex, gdb_byte *readbuf,
1288 const gdb_byte *writebuf, ULONGEST offset,
1289 ULONGEST len, ULONGEST *xfered_len)
1290 {
1291 /* Filter out requests that don't make sense during replay. */
1292 if (replay_memory_access == replay_memory_access_read_only
1293 && !record_btrace_generating_corefile
1294 && record_btrace_is_replaying (ops, inferior_ptid))
1295 {
1296 switch (object)
1297 {
1298 case TARGET_OBJECT_MEMORY:
1299 {
1300 struct target_section *section;
1301
1302 /* We do not allow writing memory in general. */
1303 if (writebuf != NULL)
1304 {
1305 *xfered_len = len;
1306 return TARGET_XFER_UNAVAILABLE;
1307 }
1308
1309 /* We allow reading readonly memory. */
1310 section = target_section_by_addr (ops, offset);
1311 if (section != NULL)
1312 {
1313 /* Check if the section we found is readonly. */
1314 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1315 section->the_bfd_section)
1316 & SEC_READONLY) != 0)
1317 {
1318 /* Truncate the request to fit into this section. */
1319 len = std::min (len, section->endaddr - offset);
1320 break;
1321 }
1322 }
1323
1324 *xfered_len = len;
1325 return TARGET_XFER_UNAVAILABLE;
1326 }
1327 }
1328 }
1329
1330 /* Forward the request. */
1331 ops = ops->beneath;
1332 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1333 offset, len, xfered_len);
1334 }
1335
1336 /* The to_insert_breakpoint method of target record-btrace. */
1337
1338 static int
1339 record_btrace_insert_breakpoint (struct target_ops *ops,
1340 struct gdbarch *gdbarch,
1341 struct bp_target_info *bp_tgt)
1342 {
1343 const char *old;
1344 int ret;
1345
1346 /* Inserting breakpoints requires accessing memory. Allow it for the
1347 duration of this function. */
1348 old = replay_memory_access;
1349 replay_memory_access = replay_memory_access_read_write;
1350
1351 ret = 0;
1352 TRY
1353 {
1354 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1355 }
1356 CATCH (except, RETURN_MASK_ALL)
1357 {
1358 replay_memory_access = old;
1359 throw_exception (except);
1360 }
1361 END_CATCH
1362 replay_memory_access = old;
1363
1364 return ret;
1365 }
1366
1367 /* The to_remove_breakpoint method of target record-btrace. */
1368
1369 static int
1370 record_btrace_remove_breakpoint (struct target_ops *ops,
1371 struct gdbarch *gdbarch,
1372 struct bp_target_info *bp_tgt,
1373 enum remove_bp_reason reason)
1374 {
1375 const char *old;
1376 int ret;
1377
1378 /* Removing breakpoints requires accessing memory. Allow it for the
1379 duration of this function. */
1380 old = replay_memory_access;
1381 replay_memory_access = replay_memory_access_read_write;
1382
1383 ret = 0;
1384 TRY
1385 {
1386 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1387 reason);
1388 }
1389 CATCH (except, RETURN_MASK_ALL)
1390 {
1391 replay_memory_access = old;
1392 throw_exception (except);
1393 }
1394 END_CATCH
1395 replay_memory_access = old;
1396
1397 return ret;
1398 }
1399
1400 /* The to_fetch_registers method of target record-btrace. */
1401
1402 static void
1403 record_btrace_fetch_registers (struct target_ops *ops,
1404 struct regcache *regcache, int regno)
1405 {
1406 struct btrace_insn_iterator *replay;
1407 struct thread_info *tp;
1408
1409 tp = find_thread_ptid (regcache_get_ptid (regcache));
1410 gdb_assert (tp != NULL);
1411
1412 replay = tp->btrace.replay;
1413 if (replay != NULL && !record_btrace_generating_corefile)
1414 {
1415 const struct btrace_insn *insn;
1416 struct gdbarch *gdbarch;
1417 int pcreg;
1418
1419 gdbarch = regcache->arch ();
1420 pcreg = gdbarch_pc_regnum (gdbarch);
1421 if (pcreg < 0)
1422 return;
1423
1424 /* We can only provide the PC register. */
1425 if (regno >= 0 && regno != pcreg)
1426 return;
1427
1428 insn = btrace_insn_get (replay);
1429 gdb_assert (insn != NULL);
1430
1431 regcache_raw_supply (regcache, regno, &insn->pc);
1432 }
1433 else
1434 {
1435 struct target_ops *t = ops->beneath;
1436
1437 t->to_fetch_registers (t, regcache, regno);
1438 }
1439 }
1440
1441 /* The to_store_registers method of target record-btrace. */
1442
1443 static void
1444 record_btrace_store_registers (struct target_ops *ops,
1445 struct regcache *regcache, int regno)
1446 {
1447 struct target_ops *t;
1448
1449 if (!record_btrace_generating_corefile
1450 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1451 error (_("Cannot write registers while replaying."));
1452
1453 gdb_assert (may_write_registers != 0);
1454
1455 t = ops->beneath;
1456 t->to_store_registers (t, regcache, regno);
1457 }
1458
1459 /* The to_prepare_to_store method of target record-btrace. */
1460
1461 static void
1462 record_btrace_prepare_to_store (struct target_ops *ops,
1463 struct regcache *regcache)
1464 {
1465 struct target_ops *t;
1466
1467 if (!record_btrace_generating_corefile
1468 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1469 return;
1470
1471 t = ops->beneath;
1472 t->to_prepare_to_store (t, regcache);
1473 }
1474
1475 /* The branch trace frame cache. */
1476
1477 struct btrace_frame_cache
1478 {
1479 /* The thread. */
1480 struct thread_info *tp;
1481
1482 /* The frame info. */
1483 struct frame_info *frame;
1484
1485 /* The branch trace function segment. */
1486 const struct btrace_function *bfun;
1487 };
1488
1489 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1490
1491 static htab_t bfcache;
1492
1493 /* hash_f for htab_create_alloc of bfcache. */
1494
1495 static hashval_t
1496 bfcache_hash (const void *arg)
1497 {
1498 const struct btrace_frame_cache *cache
1499 = (const struct btrace_frame_cache *) arg;
1500
1501 return htab_hash_pointer (cache->frame);
1502 }
1503
1504 /* eq_f for htab_create_alloc of bfcache. */
1505
1506 static int
1507 bfcache_eq (const void *arg1, const void *arg2)
1508 {
1509 const struct btrace_frame_cache *cache1
1510 = (const struct btrace_frame_cache *) arg1;
1511 const struct btrace_frame_cache *cache2
1512 = (const struct btrace_frame_cache *) arg2;
1513
1514 return cache1->frame == cache2->frame;
1515 }
1516
1517 /* Create a new btrace frame cache. */
1518
1519 static struct btrace_frame_cache *
1520 bfcache_new (struct frame_info *frame)
1521 {
1522 struct btrace_frame_cache *cache;
1523 void **slot;
1524
1525 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1526 cache->frame = frame;
1527
1528 slot = htab_find_slot (bfcache, cache, INSERT);
1529 gdb_assert (*slot == NULL);
1530 *slot = cache;
1531
1532 return cache;
1533 }
1534
1535 /* Extract the branch trace function from a branch trace frame. */
1536
1537 static const struct btrace_function *
1538 btrace_get_frame_function (struct frame_info *frame)
1539 {
1540 const struct btrace_frame_cache *cache;
1541 struct btrace_frame_cache pattern;
1542 void **slot;
1543
1544 pattern.frame = frame;
1545
1546 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1547 if (slot == NULL)
1548 return NULL;
1549
1550 cache = (const struct btrace_frame_cache *) *slot;
1551 return cache->bfun;
1552 }
1553
1554 /* Implement stop_reason method for record_btrace_frame_unwind. */
1555
1556 static enum unwind_stop_reason
1557 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1558 void **this_cache)
1559 {
1560 const struct btrace_frame_cache *cache;
1561 const struct btrace_function *bfun;
1562
1563 cache = (const struct btrace_frame_cache *) *this_cache;
1564 bfun = cache->bfun;
1565 gdb_assert (bfun != NULL);
1566
1567 if (bfun->up == 0)
1568 return UNWIND_UNAVAILABLE;
1569
1570 return UNWIND_NO_REASON;
1571 }
1572
1573 /* Implement this_id method for record_btrace_frame_unwind. */
1574
1575 static void
1576 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1577 struct frame_id *this_id)
1578 {
1579 const struct btrace_frame_cache *cache;
1580 const struct btrace_function *bfun;
1581 struct btrace_call_iterator it;
1582 CORE_ADDR code, special;
1583
1584 cache = (const struct btrace_frame_cache *) *this_cache;
1585
1586 bfun = cache->bfun;
1587 gdb_assert (bfun != NULL);
1588
1589 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1590 bfun = btrace_call_get (&it);
1591
1592 code = get_frame_func (this_frame);
1593 special = bfun->number;
1594
1595 *this_id = frame_id_build_unavailable_stack_special (code, special);
1596
1597 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1598 btrace_get_bfun_name (cache->bfun),
1599 core_addr_to_string_nz (this_id->code_addr),
1600 core_addr_to_string_nz (this_id->special_addr));
1601 }
1602
1603 /* Implement prev_register method for record_btrace_frame_unwind. */
1604
1605 static struct value *
1606 record_btrace_frame_prev_register (struct frame_info *this_frame,
1607 void **this_cache,
1608 int regnum)
1609 {
1610 const struct btrace_frame_cache *cache;
1611 const struct btrace_function *bfun, *caller;
1612 struct btrace_call_iterator it;
1613 struct gdbarch *gdbarch;
1614 CORE_ADDR pc;
1615 int pcreg;
1616
1617 gdbarch = get_frame_arch (this_frame);
1618 pcreg = gdbarch_pc_regnum (gdbarch);
1619 if (pcreg < 0 || regnum != pcreg)
1620 throw_error (NOT_AVAILABLE_ERROR,
1621 _("Registers are not available in btrace record history"));
1622
1623 cache = (const struct btrace_frame_cache *) *this_cache;
1624 bfun = cache->bfun;
1625 gdb_assert (bfun != NULL);
1626
1627 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1628 throw_error (NOT_AVAILABLE_ERROR,
1629 _("No caller in btrace record history"));
1630
1631 caller = btrace_call_get (&it);
1632
1633 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1634 pc = caller->insn.front ().pc;
1635 else
1636 {
1637 pc = caller->insn.back ().pc;
1638 pc += gdb_insn_length (gdbarch, pc);
1639 }
1640
1641 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1642 btrace_get_bfun_name (bfun), bfun->level,
1643 core_addr_to_string_nz (pc));
1644
1645 return frame_unwind_got_address (this_frame, regnum, pc);
1646 }
1647
1648 /* Implement sniffer method for record_btrace_frame_unwind. */
1649
1650 static int
1651 record_btrace_frame_sniffer (const struct frame_unwind *self,
1652 struct frame_info *this_frame,
1653 void **this_cache)
1654 {
1655 const struct btrace_function *bfun;
1656 struct btrace_frame_cache *cache;
1657 struct thread_info *tp;
1658 struct frame_info *next;
1659
1660 /* THIS_FRAME does not contain a reference to its thread. */
1661 tp = find_thread_ptid (inferior_ptid);
1662 gdb_assert (tp != NULL);
1663
1664 bfun = NULL;
1665 next = get_next_frame (this_frame);
1666 if (next == NULL)
1667 {
1668 const struct btrace_insn_iterator *replay;
1669
1670 replay = tp->btrace.replay;
1671 if (replay != NULL)
1672 bfun = &replay->btinfo->functions[replay->call_index];
1673 }
1674 else
1675 {
1676 const struct btrace_function *callee;
1677 struct btrace_call_iterator it;
1678
1679 callee = btrace_get_frame_function (next);
1680 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1681 return 0;
1682
1683 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1684 return 0;
1685
1686 bfun = btrace_call_get (&it);
1687 }
1688
1689 if (bfun == NULL)
1690 return 0;
1691
1692 DEBUG ("[frame] sniffed frame for %s on level %d",
1693 btrace_get_bfun_name (bfun), bfun->level);
1694
1695 /* This is our frame. Initialize the frame cache. */
1696 cache = bfcache_new (this_frame);
1697 cache->tp = tp;
1698 cache->bfun = bfun;
1699
1700 *this_cache = cache;
1701 return 1;
1702 }
1703
1704 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1705
1706 static int
1707 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1708 struct frame_info *this_frame,
1709 void **this_cache)
1710 {
1711 const struct btrace_function *bfun, *callee;
1712 struct btrace_frame_cache *cache;
1713 struct btrace_call_iterator it;
1714 struct frame_info *next;
1715 struct thread_info *tinfo;
1716
1717 next = get_next_frame (this_frame);
1718 if (next == NULL)
1719 return 0;
1720
1721 callee = btrace_get_frame_function (next);
1722 if (callee == NULL)
1723 return 0;
1724
1725 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1726 return 0;
1727
1728 tinfo = find_thread_ptid (inferior_ptid);
1729 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1730 return 0;
1731
1732 bfun = btrace_call_get (&it);
1733
1734 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1735 btrace_get_bfun_name (bfun), bfun->level);
1736
1737 /* This is our frame. Initialize the frame cache. */
1738 cache = bfcache_new (this_frame);
1739 cache->tp = tinfo;
1740 cache->bfun = bfun;
1741
1742 *this_cache = cache;
1743 return 1;
1744 }
1745
1746 static void
1747 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1748 {
1749 struct btrace_frame_cache *cache;
1750 void **slot;
1751
1752 cache = (struct btrace_frame_cache *) this_cache;
1753
1754 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1755 gdb_assert (slot != NULL);
1756
1757 htab_remove_elt (bfcache, cache);
1758 }
1759
1760 /* btrace recording does not store previous memory content, neither the stack
1761 frames content. Any unwinding would return errorneous results as the stack
1762 contents no longer matches the changed PC value restored from history.
1763 Therefore this unwinder reports any possibly unwound registers as
1764 <unavailable>. */
1765
1766 const struct frame_unwind record_btrace_frame_unwind =
1767 {
1768 NORMAL_FRAME,
1769 record_btrace_frame_unwind_stop_reason,
1770 record_btrace_frame_this_id,
1771 record_btrace_frame_prev_register,
1772 NULL,
1773 record_btrace_frame_sniffer,
1774 record_btrace_frame_dealloc_cache
1775 };
1776
1777 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1778 {
1779 TAILCALL_FRAME,
1780 record_btrace_frame_unwind_stop_reason,
1781 record_btrace_frame_this_id,
1782 record_btrace_frame_prev_register,
1783 NULL,
1784 record_btrace_tailcall_frame_sniffer,
1785 record_btrace_frame_dealloc_cache
1786 };
1787
1788 /* Implement the to_get_unwinder method. */
1789
1790 static const struct frame_unwind *
1791 record_btrace_to_get_unwinder (struct target_ops *self)
1792 {
1793 return &record_btrace_frame_unwind;
1794 }
1795
1796 /* Implement the to_get_tailcall_unwinder method. */
1797
1798 static const struct frame_unwind *
1799 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1800 {
1801 return &record_btrace_tailcall_frame_unwind;
1802 }
1803
1804 /* Return a human-readable string for FLAG. */
1805
1806 static const char *
1807 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1808 {
1809 switch (flag)
1810 {
1811 case BTHR_STEP:
1812 return "step";
1813
1814 case BTHR_RSTEP:
1815 return "reverse-step";
1816
1817 case BTHR_CONT:
1818 return "cont";
1819
1820 case BTHR_RCONT:
1821 return "reverse-cont";
1822
1823 case BTHR_STOP:
1824 return "stop";
1825 }
1826
1827 return "<invalid>";
1828 }
1829
1830 /* Indicate that TP should be resumed according to FLAG. */
1831
1832 static void
1833 record_btrace_resume_thread (struct thread_info *tp,
1834 enum btrace_thread_flag flag)
1835 {
1836 struct btrace_thread_info *btinfo;
1837
1838 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1839 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1840
1841 btinfo = &tp->btrace;
1842
1843 /* Fetch the latest branch trace. */
1844 btrace_fetch (tp);
1845
1846 /* A resume request overwrites a preceding resume or stop request. */
1847 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1848 btinfo->flags |= flag;
1849 }
1850
1851 /* Get the current frame for TP. */
1852
1853 static struct frame_info *
1854 get_thread_current_frame (struct thread_info *tp)
1855 {
1856 struct frame_info *frame;
1857 ptid_t old_inferior_ptid;
1858 int executing;
1859
1860 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1861 old_inferior_ptid = inferior_ptid;
1862 inferior_ptid = tp->ptid;
1863
1864 /* Clear the executing flag to allow changes to the current frame.
1865 We are not actually running, yet. We just started a reverse execution
1866 command or a record goto command.
1867 For the latter, EXECUTING is false and this has no effect.
1868 For the former, EXECUTING is true and we're in to_wait, about to
1869 move the thread. Since we need to recompute the stack, we temporarily
1870 set EXECUTING to flase. */
1871 executing = is_executing (inferior_ptid);
1872 set_executing (inferior_ptid, 0);
1873
1874 frame = NULL;
1875 TRY
1876 {
1877 frame = get_current_frame ();
1878 }
1879 CATCH (except, RETURN_MASK_ALL)
1880 {
1881 /* Restore the previous execution state. */
1882 set_executing (inferior_ptid, executing);
1883
1884 /* Restore the previous inferior_ptid. */
1885 inferior_ptid = old_inferior_ptid;
1886
1887 throw_exception (except);
1888 }
1889 END_CATCH
1890
1891 /* Restore the previous execution state. */
1892 set_executing (inferior_ptid, executing);
1893
1894 /* Restore the previous inferior_ptid. */
1895 inferior_ptid = old_inferior_ptid;
1896
1897 return frame;
1898 }
1899
1900 /* Start replaying a thread. */
1901
1902 static struct btrace_insn_iterator *
1903 record_btrace_start_replaying (struct thread_info *tp)
1904 {
1905 struct btrace_insn_iterator *replay;
1906 struct btrace_thread_info *btinfo;
1907
1908 btinfo = &tp->btrace;
1909 replay = NULL;
1910
1911 /* We can't start replaying without trace. */
1912 if (btinfo->functions.empty ())
1913 return NULL;
1914
1915 /* GDB stores the current frame_id when stepping in order to detects steps
1916 into subroutines.
1917 Since frames are computed differently when we're replaying, we need to
1918 recompute those stored frames and fix them up so we can still detect
1919 subroutines after we started replaying. */
1920 TRY
1921 {
1922 struct frame_info *frame;
1923 struct frame_id frame_id;
1924 int upd_step_frame_id, upd_step_stack_frame_id;
1925
1926 /* The current frame without replaying - computed via normal unwind. */
1927 frame = get_thread_current_frame (tp);
1928 frame_id = get_frame_id (frame);
1929
1930 /* Check if we need to update any stepping-related frame id's. */
1931 upd_step_frame_id = frame_id_eq (frame_id,
1932 tp->control.step_frame_id);
1933 upd_step_stack_frame_id = frame_id_eq (frame_id,
1934 tp->control.step_stack_frame_id);
1935
1936 /* We start replaying at the end of the branch trace. This corresponds
1937 to the current instruction. */
1938 replay = XNEW (struct btrace_insn_iterator);
1939 btrace_insn_end (replay, btinfo);
1940
1941 /* Skip gaps at the end of the trace. */
1942 while (btrace_insn_get (replay) == NULL)
1943 {
1944 unsigned int steps;
1945
1946 steps = btrace_insn_prev (replay, 1);
1947 if (steps == 0)
1948 error (_("No trace."));
1949 }
1950
1951 /* We're not replaying, yet. */
1952 gdb_assert (btinfo->replay == NULL);
1953 btinfo->replay = replay;
1954
1955 /* Make sure we're not using any stale registers. */
1956 registers_changed_ptid (tp->ptid);
1957
1958 /* The current frame with replaying - computed via btrace unwind. */
1959 frame = get_thread_current_frame (tp);
1960 frame_id = get_frame_id (frame);
1961
1962 /* Replace stepping related frames where necessary. */
1963 if (upd_step_frame_id)
1964 tp->control.step_frame_id = frame_id;
1965 if (upd_step_stack_frame_id)
1966 tp->control.step_stack_frame_id = frame_id;
1967 }
1968 CATCH (except, RETURN_MASK_ALL)
1969 {
1970 xfree (btinfo->replay);
1971 btinfo->replay = NULL;
1972
1973 registers_changed_ptid (tp->ptid);
1974
1975 throw_exception (except);
1976 }
1977 END_CATCH
1978
1979 return replay;
1980 }
1981
1982 /* Stop replaying a thread. */
1983
1984 static void
1985 record_btrace_stop_replaying (struct thread_info *tp)
1986 {
1987 struct btrace_thread_info *btinfo;
1988
1989 btinfo = &tp->btrace;
1990
1991 xfree (btinfo->replay);
1992 btinfo->replay = NULL;
1993
1994 /* Make sure we're not leaving any stale registers. */
1995 registers_changed_ptid (tp->ptid);
1996 }
1997
1998 /* Stop replaying TP if it is at the end of its execution history. */
1999
2000 static void
2001 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2002 {
2003 struct btrace_insn_iterator *replay, end;
2004 struct btrace_thread_info *btinfo;
2005
2006 btinfo = &tp->btrace;
2007 replay = btinfo->replay;
2008
2009 if (replay == NULL)
2010 return;
2011
2012 btrace_insn_end (&end, btinfo);
2013
2014 if (btrace_insn_cmp (replay, &end) == 0)
2015 record_btrace_stop_replaying (tp);
2016 }
2017
2018 /* The to_resume method of target record-btrace. */
2019
2020 static void
2021 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2022 enum gdb_signal signal)
2023 {
2024 struct thread_info *tp;
2025 enum btrace_thread_flag flag, cflag;
2026
2027 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2028 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2029 step ? "step" : "cont");
2030
2031 /* Store the execution direction of the last resume.
2032
2033 If there is more than one to_resume call, we have to rely on infrun
2034 to not change the execution direction in-between. */
2035 record_btrace_resume_exec_dir = execution_direction;
2036
2037 /* As long as we're not replaying, just forward the request.
2038
2039 For non-stop targets this means that no thread is replaying. In order to
2040 make progress, we may need to explicitly move replaying threads to the end
2041 of their execution history. */
2042 if ((execution_direction != EXEC_REVERSE)
2043 && !record_btrace_is_replaying (ops, minus_one_ptid))
2044 {
2045 ops = ops->beneath;
2046 ops->to_resume (ops, ptid, step, signal);
2047 return;
2048 }
2049
2050 /* Compute the btrace thread flag for the requested move. */
2051 if (execution_direction == EXEC_REVERSE)
2052 {
2053 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2054 cflag = BTHR_RCONT;
2055 }
2056 else
2057 {
2058 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2059 cflag = BTHR_CONT;
2060 }
2061
2062 /* We just indicate the resume intent here. The actual stepping happens in
2063 record_btrace_wait below.
2064
2065 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2066 if (!target_is_non_stop_p ())
2067 {
2068 gdb_assert (ptid_match (inferior_ptid, ptid));
2069
2070 ALL_NON_EXITED_THREADS (tp)
2071 if (ptid_match (tp->ptid, ptid))
2072 {
2073 if (ptid_match (tp->ptid, inferior_ptid))
2074 record_btrace_resume_thread (tp, flag);
2075 else
2076 record_btrace_resume_thread (tp, cflag);
2077 }
2078 }
2079 else
2080 {
2081 ALL_NON_EXITED_THREADS (tp)
2082 if (ptid_match (tp->ptid, ptid))
2083 record_btrace_resume_thread (tp, flag);
2084 }
2085
2086 /* Async support. */
2087 if (target_can_async_p ())
2088 {
2089 target_async (1);
2090 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2091 }
2092 }
2093
2094 /* The to_commit_resume method of target record-btrace. */
2095
2096 static void
2097 record_btrace_commit_resume (struct target_ops *ops)
2098 {
2099 if ((execution_direction != EXEC_REVERSE)
2100 && !record_btrace_is_replaying (ops, minus_one_ptid))
2101 ops->beneath->to_commit_resume (ops->beneath);
2102 }
2103
2104 /* Cancel resuming TP. */
2105
2106 static void
2107 record_btrace_cancel_resume (struct thread_info *tp)
2108 {
2109 enum btrace_thread_flag flags;
2110
2111 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2112 if (flags == 0)
2113 return;
2114
2115 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2116 print_thread_id (tp),
2117 target_pid_to_str (tp->ptid), flags,
2118 btrace_thread_flag_to_str (flags));
2119
2120 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2121 record_btrace_stop_replaying_at_end (tp);
2122 }
2123
2124 /* Return a target_waitstatus indicating that we ran out of history. */
2125
2126 static struct target_waitstatus
2127 btrace_step_no_history (void)
2128 {
2129 struct target_waitstatus status;
2130
2131 status.kind = TARGET_WAITKIND_NO_HISTORY;
2132
2133 return status;
2134 }
2135
2136 /* Return a target_waitstatus indicating that a step finished. */
2137
2138 static struct target_waitstatus
2139 btrace_step_stopped (void)
2140 {
2141 struct target_waitstatus status;
2142
2143 status.kind = TARGET_WAITKIND_STOPPED;
2144 status.value.sig = GDB_SIGNAL_TRAP;
2145
2146 return status;
2147 }
2148
2149 /* Return a target_waitstatus indicating that a thread was stopped as
2150 requested. */
2151
2152 static struct target_waitstatus
2153 btrace_step_stopped_on_request (void)
2154 {
2155 struct target_waitstatus status;
2156
2157 status.kind = TARGET_WAITKIND_STOPPED;
2158 status.value.sig = GDB_SIGNAL_0;
2159
2160 return status;
2161 }
2162
2163 /* Return a target_waitstatus indicating a spurious stop. */
2164
2165 static struct target_waitstatus
2166 btrace_step_spurious (void)
2167 {
2168 struct target_waitstatus status;
2169
2170 status.kind = TARGET_WAITKIND_SPURIOUS;
2171
2172 return status;
2173 }
2174
2175 /* Return a target_waitstatus indicating that the thread was not resumed. */
2176
2177 static struct target_waitstatus
2178 btrace_step_no_resumed (void)
2179 {
2180 struct target_waitstatus status;
2181
2182 status.kind = TARGET_WAITKIND_NO_RESUMED;
2183
2184 return status;
2185 }
2186
2187 /* Return a target_waitstatus indicating that we should wait again. */
2188
2189 static struct target_waitstatus
2190 btrace_step_again (void)
2191 {
2192 struct target_waitstatus status;
2193
2194 status.kind = TARGET_WAITKIND_IGNORE;
2195
2196 return status;
2197 }
2198
2199 /* Clear the record histories. */
2200
2201 static void
2202 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2203 {
2204 xfree (btinfo->insn_history);
2205 xfree (btinfo->call_history);
2206
2207 btinfo->insn_history = NULL;
2208 btinfo->call_history = NULL;
2209 }
2210
2211 /* Check whether TP's current replay position is at a breakpoint. */
2212
2213 static int
2214 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2215 {
2216 struct btrace_insn_iterator *replay;
2217 struct btrace_thread_info *btinfo;
2218 const struct btrace_insn *insn;
2219 struct inferior *inf;
2220
2221 btinfo = &tp->btrace;
2222 replay = btinfo->replay;
2223
2224 if (replay == NULL)
2225 return 0;
2226
2227 insn = btrace_insn_get (replay);
2228 if (insn == NULL)
2229 return 0;
2230
2231 inf = find_inferior_ptid (tp->ptid);
2232 if (inf == NULL)
2233 return 0;
2234
2235 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2236 &btinfo->stop_reason);
2237 }
2238
2239 /* Step one instruction in forward direction. */
2240
2241 static struct target_waitstatus
2242 record_btrace_single_step_forward (struct thread_info *tp)
2243 {
2244 struct btrace_insn_iterator *replay, end, start;
2245 struct btrace_thread_info *btinfo;
2246
2247 btinfo = &tp->btrace;
2248 replay = btinfo->replay;
2249
2250 /* We're done if we're not replaying. */
2251 if (replay == NULL)
2252 return btrace_step_no_history ();
2253
2254 /* Check if we're stepping a breakpoint. */
2255 if (record_btrace_replay_at_breakpoint (tp))
2256 return btrace_step_stopped ();
2257
2258 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2259 jump back to the instruction at which we started. */
2260 start = *replay;
2261 do
2262 {
2263 unsigned int steps;
2264
2265 /* We will bail out here if we continue stepping after reaching the end
2266 of the execution history. */
2267 steps = btrace_insn_next (replay, 1);
2268 if (steps == 0)
2269 {
2270 *replay = start;
2271 return btrace_step_no_history ();
2272 }
2273 }
2274 while (btrace_insn_get (replay) == NULL);
2275
2276 /* Determine the end of the instruction trace. */
2277 btrace_insn_end (&end, btinfo);
2278
2279 /* The execution trace contains (and ends with) the current instruction.
2280 This instruction has not been executed, yet, so the trace really ends
2281 one instruction earlier. */
2282 if (btrace_insn_cmp (replay, &end) == 0)
2283 return btrace_step_no_history ();
2284
2285 return btrace_step_spurious ();
2286 }
2287
2288 /* Step one instruction in backward direction. */
2289
2290 static struct target_waitstatus
2291 record_btrace_single_step_backward (struct thread_info *tp)
2292 {
2293 struct btrace_insn_iterator *replay, start;
2294 struct btrace_thread_info *btinfo;
2295
2296 btinfo = &tp->btrace;
2297 replay = btinfo->replay;
2298
2299 /* Start replaying if we're not already doing so. */
2300 if (replay == NULL)
2301 replay = record_btrace_start_replaying (tp);
2302
2303 /* If we can't step any further, we reached the end of the history.
2304 Skip gaps during replay. If we end up at a gap (at the beginning of
2305 the trace), jump back to the instruction at which we started. */
2306 start = *replay;
2307 do
2308 {
2309 unsigned int steps;
2310
2311 steps = btrace_insn_prev (replay, 1);
2312 if (steps == 0)
2313 {
2314 *replay = start;
2315 return btrace_step_no_history ();
2316 }
2317 }
2318 while (btrace_insn_get (replay) == NULL);
2319
2320 /* Check if we're stepping a breakpoint.
2321
2322 For reverse-stepping, this check is after the step. There is logic in
2323 infrun.c that handles reverse-stepping separately. See, for example,
2324 proceed and adjust_pc_after_break.
2325
2326 This code assumes that for reverse-stepping, PC points to the last
2327 de-executed instruction, whereas for forward-stepping PC points to the
2328 next to-be-executed instruction. */
2329 if (record_btrace_replay_at_breakpoint (tp))
2330 return btrace_step_stopped ();
2331
2332 return btrace_step_spurious ();
2333 }
2334
2335 /* Step a single thread. */
2336
2337 static struct target_waitstatus
2338 record_btrace_step_thread (struct thread_info *tp)
2339 {
2340 struct btrace_thread_info *btinfo;
2341 struct target_waitstatus status;
2342 enum btrace_thread_flag flags;
2343
2344 btinfo = &tp->btrace;
2345
2346 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2347 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2348
2349 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2350 target_pid_to_str (tp->ptid), flags,
2351 btrace_thread_flag_to_str (flags));
2352
2353 /* We can't step without an execution history. */
2354 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2355 return btrace_step_no_history ();
2356
2357 switch (flags)
2358 {
2359 default:
2360 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2361
2362 case BTHR_STOP:
2363 return btrace_step_stopped_on_request ();
2364
2365 case BTHR_STEP:
2366 status = record_btrace_single_step_forward (tp);
2367 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2368 break;
2369
2370 return btrace_step_stopped ();
2371
2372 case BTHR_RSTEP:
2373 status = record_btrace_single_step_backward (tp);
2374 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2375 break;
2376
2377 return btrace_step_stopped ();
2378
2379 case BTHR_CONT:
2380 status = record_btrace_single_step_forward (tp);
2381 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2382 break;
2383
2384 btinfo->flags |= flags;
2385 return btrace_step_again ();
2386
2387 case BTHR_RCONT:
2388 status = record_btrace_single_step_backward (tp);
2389 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2390 break;
2391
2392 btinfo->flags |= flags;
2393 return btrace_step_again ();
2394 }
2395
2396 /* We keep threads moving at the end of their execution history. The to_wait
2397 method will stop the thread for whom the event is reported. */
2398 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2399 btinfo->flags |= flags;
2400
2401 return status;
2402 }
2403
2404 /* A vector of threads. */
2405
2406 typedef struct thread_info * tp_t;
2407 DEF_VEC_P (tp_t);
2408
2409 /* Announce further events if necessary. */
2410
2411 static void
2412 record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2413 const VEC (tp_t) *no_history)
2414 {
2415 int more_moving, more_no_history;
2416
2417 more_moving = !VEC_empty (tp_t, moving);
2418 more_no_history = !VEC_empty (tp_t, no_history);
2419
2420 if (!more_moving && !more_no_history)
2421 return;
2422
2423 if (more_moving)
2424 DEBUG ("movers pending");
2425
2426 if (more_no_history)
2427 DEBUG ("no-history pending");
2428
2429 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2430 }
2431
2432 /* The to_wait method of target record-btrace. */
2433
2434 static ptid_t
2435 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2436 struct target_waitstatus *status, int options)
2437 {
2438 VEC (tp_t) *moving, *no_history;
2439 struct thread_info *tp, *eventing;
2440 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2441
2442 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2443
2444 /* As long as we're not replaying, just forward the request. */
2445 if ((execution_direction != EXEC_REVERSE)
2446 && !record_btrace_is_replaying (ops, minus_one_ptid))
2447 {
2448 ops = ops->beneath;
2449 return ops->to_wait (ops, ptid, status, options);
2450 }
2451
2452 moving = NULL;
2453 no_history = NULL;
2454
2455 make_cleanup (VEC_cleanup (tp_t), &moving);
2456 make_cleanup (VEC_cleanup (tp_t), &no_history);
2457
2458 /* Keep a work list of moving threads. */
2459 ALL_NON_EXITED_THREADS (tp)
2460 if (ptid_match (tp->ptid, ptid)
2461 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2462 VEC_safe_push (tp_t, moving, tp);
2463
2464 if (VEC_empty (tp_t, moving))
2465 {
2466 *status = btrace_step_no_resumed ();
2467
2468 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2469 target_waitstatus_to_string (status).c_str ());
2470
2471 do_cleanups (cleanups);
2472 return null_ptid;
2473 }
2474
2475 /* Step moving threads one by one, one step each, until either one thread
2476 reports an event or we run out of threads to step.
2477
2478 When stepping more than one thread, chances are that some threads reach
2479 the end of their execution history earlier than others. If we reported
2480 this immediately, all-stop on top of non-stop would stop all threads and
2481 resume the same threads next time. And we would report the same thread
2482 having reached the end of its execution history again.
2483
2484 In the worst case, this would starve the other threads. But even if other
2485 threads would be allowed to make progress, this would result in far too
2486 many intermediate stops.
2487
2488 We therefore delay the reporting of "no execution history" until we have
2489 nothing else to report. By this time, all threads should have moved to
2490 either the beginning or the end of their execution history. There will
2491 be a single user-visible stop. */
2492 eventing = NULL;
2493 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2494 {
2495 unsigned int ix;
2496
2497 ix = 0;
2498 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2499 {
2500 *status = record_btrace_step_thread (tp);
2501
2502 switch (status->kind)
2503 {
2504 case TARGET_WAITKIND_IGNORE:
2505 ix++;
2506 break;
2507
2508 case TARGET_WAITKIND_NO_HISTORY:
2509 VEC_safe_push (tp_t, no_history,
2510 VEC_ordered_remove (tp_t, moving, ix));
2511 break;
2512
2513 default:
2514 eventing = VEC_unordered_remove (tp_t, moving, ix);
2515 break;
2516 }
2517 }
2518 }
2519
2520 if (eventing == NULL)
2521 {
2522 /* We started with at least one moving thread. This thread must have
2523 either stopped or reached the end of its execution history.
2524
2525 In the former case, EVENTING must not be NULL.
2526 In the latter case, NO_HISTORY must not be empty. */
2527 gdb_assert (!VEC_empty (tp_t, no_history));
2528
2529 /* We kept threads moving at the end of their execution history. Stop
2530 EVENTING now that we are going to report its stop. */
2531 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2532 eventing->btrace.flags &= ~BTHR_MOVE;
2533
2534 *status = btrace_step_no_history ();
2535 }
2536
2537 gdb_assert (eventing != NULL);
2538
2539 /* We kept threads replaying at the end of their execution history. Stop
2540 replaying EVENTING now that we are going to report its stop. */
2541 record_btrace_stop_replaying_at_end (eventing);
2542
2543 /* Stop all other threads. */
2544 if (!target_is_non_stop_p ())
2545 ALL_NON_EXITED_THREADS (tp)
2546 record_btrace_cancel_resume (tp);
2547
2548 /* In async mode, we need to announce further events. */
2549 if (target_is_async_p ())
2550 record_btrace_maybe_mark_async_event (moving, no_history);
2551
2552 /* Start record histories anew from the current position. */
2553 record_btrace_clear_histories (&eventing->btrace);
2554
2555 /* We moved the replay position but did not update registers. */
2556 registers_changed_ptid (eventing->ptid);
2557
2558 DEBUG ("wait ended by thread %s (%s): %s",
2559 print_thread_id (eventing),
2560 target_pid_to_str (eventing->ptid),
2561 target_waitstatus_to_string (status).c_str ());
2562
2563 do_cleanups (cleanups);
2564 return eventing->ptid;
2565 }
2566
2567 /* The to_stop method of target record-btrace. */
2568
2569 static void
2570 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2571 {
2572 DEBUG ("stop %s", target_pid_to_str (ptid));
2573
2574 /* As long as we're not replaying, just forward the request. */
2575 if ((execution_direction != EXEC_REVERSE)
2576 && !record_btrace_is_replaying (ops, minus_one_ptid))
2577 {
2578 ops = ops->beneath;
2579 ops->to_stop (ops, ptid);
2580 }
2581 else
2582 {
2583 struct thread_info *tp;
2584
2585 ALL_NON_EXITED_THREADS (tp)
2586 if (ptid_match (tp->ptid, ptid))
2587 {
2588 tp->btrace.flags &= ~BTHR_MOVE;
2589 tp->btrace.flags |= BTHR_STOP;
2590 }
2591 }
2592 }
2593
2594 /* The to_can_execute_reverse method of target record-btrace. */
2595
2596 static int
2597 record_btrace_can_execute_reverse (struct target_ops *self)
2598 {
2599 return 1;
2600 }
2601
2602 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2603
2604 static int
2605 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2606 {
2607 if (record_btrace_is_replaying (ops, minus_one_ptid))
2608 {
2609 struct thread_info *tp = inferior_thread ();
2610
2611 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2612 }
2613
2614 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2615 }
2616
2617 /* The to_supports_stopped_by_sw_breakpoint method of target
2618 record-btrace. */
2619
2620 static int
2621 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2622 {
2623 if (record_btrace_is_replaying (ops, minus_one_ptid))
2624 return 1;
2625
2626 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2627 }
2628
2629 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2630
2631 static int
2632 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2633 {
2634 if (record_btrace_is_replaying (ops, minus_one_ptid))
2635 {
2636 struct thread_info *tp = inferior_thread ();
2637
2638 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2639 }
2640
2641 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2642 }
2643
2644 /* The to_supports_stopped_by_hw_breakpoint method of target
2645 record-btrace. */
2646
2647 static int
2648 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2649 {
2650 if (record_btrace_is_replaying (ops, minus_one_ptid))
2651 return 1;
2652
2653 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2654 }
2655
2656 /* The to_update_thread_list method of target record-btrace. */
2657
2658 static void
2659 record_btrace_update_thread_list (struct target_ops *ops)
2660 {
2661 /* We don't add or remove threads during replay. */
2662 if (record_btrace_is_replaying (ops, minus_one_ptid))
2663 return;
2664
2665 /* Forward the request. */
2666 ops = ops->beneath;
2667 ops->to_update_thread_list (ops);
2668 }
2669
2670 /* The to_thread_alive method of target record-btrace. */
2671
2672 static int
2673 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2674 {
2675 /* We don't add or remove threads during replay. */
2676 if (record_btrace_is_replaying (ops, minus_one_ptid))
2677 return find_thread_ptid (ptid) != NULL;
2678
2679 /* Forward the request. */
2680 ops = ops->beneath;
2681 return ops->to_thread_alive (ops, ptid);
2682 }
2683
2684 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2685 is stopped. */
2686
2687 static void
2688 record_btrace_set_replay (struct thread_info *tp,
2689 const struct btrace_insn_iterator *it)
2690 {
2691 struct btrace_thread_info *btinfo;
2692
2693 btinfo = &tp->btrace;
2694
2695 if (it == NULL)
2696 record_btrace_stop_replaying (tp);
2697 else
2698 {
2699 if (btinfo->replay == NULL)
2700 record_btrace_start_replaying (tp);
2701 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2702 return;
2703
2704 *btinfo->replay = *it;
2705 registers_changed_ptid (tp->ptid);
2706 }
2707
2708 /* Start anew from the new replay position. */
2709 record_btrace_clear_histories (btinfo);
2710
2711 stop_pc = regcache_read_pc (get_current_regcache ());
2712 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2713 }
2714
2715 /* The to_goto_record_begin method of target record-btrace. */
2716
2717 static void
2718 record_btrace_goto_begin (struct target_ops *self)
2719 {
2720 struct thread_info *tp;
2721 struct btrace_insn_iterator begin;
2722
2723 tp = require_btrace_thread ();
2724
2725 btrace_insn_begin (&begin, &tp->btrace);
2726
2727 /* Skip gaps at the beginning of the trace. */
2728 while (btrace_insn_get (&begin) == NULL)
2729 {
2730 unsigned int steps;
2731
2732 steps = btrace_insn_next (&begin, 1);
2733 if (steps == 0)
2734 error (_("No trace."));
2735 }
2736
2737 record_btrace_set_replay (tp, &begin);
2738 }
2739
2740 /* The to_goto_record_end method of target record-btrace. */
2741
2742 static void
2743 record_btrace_goto_end (struct target_ops *ops)
2744 {
2745 struct thread_info *tp;
2746
2747 tp = require_btrace_thread ();
2748
2749 record_btrace_set_replay (tp, NULL);
2750 }
2751
2752 /* The to_goto_record method of target record-btrace. */
2753
2754 static void
2755 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2756 {
2757 struct thread_info *tp;
2758 struct btrace_insn_iterator it;
2759 unsigned int number;
2760 int found;
2761
2762 number = insn;
2763
2764 /* Check for wrap-arounds. */
2765 if (number != insn)
2766 error (_("Instruction number out of range."));
2767
2768 tp = require_btrace_thread ();
2769
2770 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2771
2772 /* Check if the instruction could not be found or is a gap. */
2773 if (found == 0 || btrace_insn_get (&it) == NULL)
2774 error (_("No such instruction."));
2775
2776 record_btrace_set_replay (tp, &it);
2777 }
2778
2779 /* The to_record_stop_replaying method of target record-btrace. */
2780
2781 static void
2782 record_btrace_stop_replaying_all (struct target_ops *self)
2783 {
2784 struct thread_info *tp;
2785
2786 ALL_NON_EXITED_THREADS (tp)
2787 record_btrace_stop_replaying (tp);
2788 }
2789
2790 /* The to_execution_direction target method. */
2791
2792 static enum exec_direction_kind
2793 record_btrace_execution_direction (struct target_ops *self)
2794 {
2795 return record_btrace_resume_exec_dir;
2796 }
2797
2798 /* The to_prepare_to_generate_core target method. */
2799
2800 static void
2801 record_btrace_prepare_to_generate_core (struct target_ops *self)
2802 {
2803 record_btrace_generating_corefile = 1;
2804 }
2805
2806 /* The to_done_generating_core target method. */
2807
2808 static void
2809 record_btrace_done_generating_core (struct target_ops *self)
2810 {
2811 record_btrace_generating_corefile = 0;
2812 }
2813
2814 /* Initialize the record-btrace target ops. */
2815
2816 static void
2817 init_record_btrace_ops (void)
2818 {
2819 struct target_ops *ops;
2820
2821 ops = &record_btrace_ops;
2822 ops->to_shortname = "record-btrace";
2823 ops->to_longname = "Branch tracing target";
2824 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2825 ops->to_open = record_btrace_open;
2826 ops->to_close = record_btrace_close;
2827 ops->to_async = record_btrace_async;
2828 ops->to_detach = record_detach;
2829 ops->to_disconnect = record_btrace_disconnect;
2830 ops->to_mourn_inferior = record_mourn_inferior;
2831 ops->to_kill = record_kill;
2832 ops->to_stop_recording = record_btrace_stop_recording;
2833 ops->to_info_record = record_btrace_info;
2834 ops->to_insn_history = record_btrace_insn_history;
2835 ops->to_insn_history_from = record_btrace_insn_history_from;
2836 ops->to_insn_history_range = record_btrace_insn_history_range;
2837 ops->to_call_history = record_btrace_call_history;
2838 ops->to_call_history_from = record_btrace_call_history_from;
2839 ops->to_call_history_range = record_btrace_call_history_range;
2840 ops->to_record_method = record_btrace_record_method;
2841 ops->to_record_is_replaying = record_btrace_is_replaying;
2842 ops->to_record_will_replay = record_btrace_will_replay;
2843 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2844 ops->to_xfer_partial = record_btrace_xfer_partial;
2845 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2846 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2847 ops->to_fetch_registers = record_btrace_fetch_registers;
2848 ops->to_store_registers = record_btrace_store_registers;
2849 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2850 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2851 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2852 ops->to_resume = record_btrace_resume;
2853 ops->to_commit_resume = record_btrace_commit_resume;
2854 ops->to_wait = record_btrace_wait;
2855 ops->to_stop = record_btrace_stop;
2856 ops->to_update_thread_list = record_btrace_update_thread_list;
2857 ops->to_thread_alive = record_btrace_thread_alive;
2858 ops->to_goto_record_begin = record_btrace_goto_begin;
2859 ops->to_goto_record_end = record_btrace_goto_end;
2860 ops->to_goto_record = record_btrace_goto;
2861 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2862 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2863 ops->to_supports_stopped_by_sw_breakpoint
2864 = record_btrace_supports_stopped_by_sw_breakpoint;
2865 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2866 ops->to_supports_stopped_by_hw_breakpoint
2867 = record_btrace_supports_stopped_by_hw_breakpoint;
2868 ops->to_execution_direction = record_btrace_execution_direction;
2869 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2870 ops->to_done_generating_core = record_btrace_done_generating_core;
2871 ops->to_stratum = record_stratum;
2872 ops->to_magic = OPS_MAGIC;
2873 }
2874
2875 /* Start recording in BTS format. */
2876
2877 static void
2878 cmd_record_btrace_bts_start (const char *args, int from_tty)
2879 {
2880 if (args != NULL && *args != 0)
2881 error (_("Invalid argument."));
2882
2883 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2884
2885 TRY
2886 {
2887 execute_command ("target record-btrace", from_tty);
2888 }
2889 CATCH (exception, RETURN_MASK_ALL)
2890 {
2891 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2892 throw_exception (exception);
2893 }
2894 END_CATCH
2895 }
2896
2897 /* Start recording in Intel Processor Trace format. */
2898
2899 static void
2900 cmd_record_btrace_pt_start (const char *args, int from_tty)
2901 {
2902 if (args != NULL && *args != 0)
2903 error (_("Invalid argument."));
2904
2905 record_btrace_conf.format = BTRACE_FORMAT_PT;
2906
2907 TRY
2908 {
2909 execute_command ("target record-btrace", from_tty);
2910 }
2911 CATCH (exception, RETURN_MASK_ALL)
2912 {
2913 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2914 throw_exception (exception);
2915 }
2916 END_CATCH
2917 }
2918
2919 /* Alias for "target record". */
2920
2921 static void
2922 cmd_record_btrace_start (const char *args, int from_tty)
2923 {
2924 if (args != NULL && *args != 0)
2925 error (_("Invalid argument."));
2926
2927 record_btrace_conf.format = BTRACE_FORMAT_PT;
2928
2929 TRY
2930 {
2931 execute_command ("target record-btrace", from_tty);
2932 }
2933 CATCH (exception, RETURN_MASK_ALL)
2934 {
2935 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2936
2937 TRY
2938 {
2939 execute_command ("target record-btrace", from_tty);
2940 }
2941 CATCH (exception, RETURN_MASK_ALL)
2942 {
2943 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2944 throw_exception (exception);
2945 }
2946 END_CATCH
2947 }
2948 END_CATCH
2949 }
2950
2951 /* The "set record btrace" command. */
2952
2953 static void
2954 cmd_set_record_btrace (const char *args, int from_tty)
2955 {
2956 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2957 }
2958
2959 /* The "show record btrace" command. */
2960
2961 static void
2962 cmd_show_record_btrace (const char *args, int from_tty)
2963 {
2964 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2965 }
2966
2967 /* The "show record btrace replay-memory-access" command. */
2968
2969 static void
2970 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2971 struct cmd_list_element *c, const char *value)
2972 {
2973 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2974 replay_memory_access);
2975 }
2976
2977 /* The "set record btrace bts" command. */
2978
2979 static void
2980 cmd_set_record_btrace_bts (const char *args, int from_tty)
2981 {
2982 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2983 "by an appropriate subcommand.\n"));
2984 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2985 all_commands, gdb_stdout);
2986 }
2987
2988 /* The "show record btrace bts" command. */
2989
2990 static void
2991 cmd_show_record_btrace_bts (const char *args, int from_tty)
2992 {
2993 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2994 }
2995
2996 /* The "set record btrace pt" command. */
2997
2998 static void
2999 cmd_set_record_btrace_pt (const char *args, int from_tty)
3000 {
3001 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3002 "by an appropriate subcommand.\n"));
3003 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3004 all_commands, gdb_stdout);
3005 }
3006
3007 /* The "show record btrace pt" command. */
3008
3009 static void
3010 cmd_show_record_btrace_pt (const char *args, int from_tty)
3011 {
3012 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3013 }
3014
3015 /* The "record bts buffer-size" show value function. */
3016
3017 static void
3018 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3019 struct cmd_list_element *c,
3020 const char *value)
3021 {
3022 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3023 value);
3024 }
3025
3026 /* The "record pt buffer-size" show value function. */
3027
3028 static void
3029 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3030 struct cmd_list_element *c,
3031 const char *value)
3032 {
3033 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3034 value);
3035 }
3036
3037 /* Initialize btrace commands. */
3038
3039 void
3040 _initialize_record_btrace (void)
3041 {
3042 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3043 _("Start branch trace recording."), &record_btrace_cmdlist,
3044 "record btrace ", 0, &record_cmdlist);
3045 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3046
3047 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3048 _("\
3049 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3050 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3051 This format may not be available on all processors."),
3052 &record_btrace_cmdlist);
3053 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3054
3055 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3056 _("\
3057 Start branch trace recording in Intel Processor Trace format.\n\n\
3058 This format may not be available on all processors."),
3059 &record_btrace_cmdlist);
3060 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3061
3062 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3063 _("Set record options"), &set_record_btrace_cmdlist,
3064 "set record btrace ", 0, &set_record_cmdlist);
3065
3066 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3067 _("Show record options"), &show_record_btrace_cmdlist,
3068 "show record btrace ", 0, &show_record_cmdlist);
3069
3070 add_setshow_enum_cmd ("replay-memory-access", no_class,
3071 replay_memory_access_types, &replay_memory_access, _("\
3072 Set what memory accesses are allowed during replay."), _("\
3073 Show what memory accesses are allowed during replay."),
3074 _("Default is READ-ONLY.\n\n\
3075 The btrace record target does not trace data.\n\
3076 The memory therefore corresponds to the live target and not \
3077 to the current replay position.\n\n\
3078 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3079 When READ-WRITE, allow accesses to read-only and read-write memory during \
3080 replay."),
3081 NULL, cmd_show_replay_memory_access,
3082 &set_record_btrace_cmdlist,
3083 &show_record_btrace_cmdlist);
3084
3085 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3086 _("Set record btrace bts options"),
3087 &set_record_btrace_bts_cmdlist,
3088 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3089
3090 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3091 _("Show record btrace bts options"),
3092 &show_record_btrace_bts_cmdlist,
3093 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3094
3095 add_setshow_uinteger_cmd ("buffer-size", no_class,
3096 &record_btrace_conf.bts.size,
3097 _("Set the record/replay bts buffer size."),
3098 _("Show the record/replay bts buffer size."), _("\
3099 When starting recording request a trace buffer of this size. \
3100 The actual buffer size may differ from the requested size. \
3101 Use \"info record\" to see the actual buffer size.\n\n\
3102 Bigger buffers allow longer recording but also take more time to process \
3103 the recorded execution trace.\n\n\
3104 The trace buffer size may not be changed while recording."), NULL,
3105 show_record_bts_buffer_size_value,
3106 &set_record_btrace_bts_cmdlist,
3107 &show_record_btrace_bts_cmdlist);
3108
3109 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3110 _("Set record btrace pt options"),
3111 &set_record_btrace_pt_cmdlist,
3112 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3113
3114 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3115 _("Show record btrace pt options"),
3116 &show_record_btrace_pt_cmdlist,
3117 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3118
3119 add_setshow_uinteger_cmd ("buffer-size", no_class,
3120 &record_btrace_conf.pt.size,
3121 _("Set the record/replay pt buffer size."),
3122 _("Show the record/replay pt buffer size."), _("\
3123 Bigger buffers allow longer recording but also take more time to process \
3124 the recorded execution.\n\
3125 The actual buffer size may differ from the requested size. Use \"info record\" \
3126 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3127 &set_record_btrace_pt_cmdlist,
3128 &show_record_btrace_pt_cmdlist);
3129
3130 init_record_btrace_ops ();
3131 add_target (&record_btrace_ops);
3132
3133 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3134 xcalloc, xfree);
3135
3136 record_btrace_conf.bts.size = 64 * 1024;
3137 record_btrace_conf.pt.size = 16 * 1024;
3138 }
This page took 0.37188 seconds and 3 git commands to generate.