btrace: Store function segments as objects.
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observer.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "event-loop.h"
40 #include "inf-loop.h"
41 #include "vec.h"
42 #include <algorithm>
43
44 /* The target_ops of record-btrace. */
45 static struct target_ops record_btrace_ops;
46
47 /* A new thread observer enabling branch tracing for the new thread. */
48 static struct observer *record_btrace_thread_observer;
49
50 /* Memory access types used in set/show record btrace replay-memory-access. */
51 static const char replay_memory_access_read_only[] = "read-only";
52 static const char replay_memory_access_read_write[] = "read-write";
53 static const char *const replay_memory_access_types[] =
54 {
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
57 NULL
58 };
59
60 /* The currently allowed replay memory access type. */
61 static const char *replay_memory_access = replay_memory_access_read_only;
62
63 /* Command lists for "set/show record btrace". */
64 static struct cmd_list_element *set_record_btrace_cmdlist;
65 static struct cmd_list_element *show_record_btrace_cmdlist;
66
67 /* The execution direction of the last resume we got. See record-full.c. */
68 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
69
70 /* The async event handler for reverse/replay execution. */
71 static struct async_event_handler *record_btrace_async_inferior_event_handler;
72
73 /* A flag indicating that we are currently generating a core file. */
74 static int record_btrace_generating_corefile;
75
76 /* The current branch trace configuration. */
77 static struct btrace_config record_btrace_conf;
78
79 /* Command list for "record btrace". */
80 static struct cmd_list_element *record_btrace_cmdlist;
81
82 /* Command lists for "set/show record btrace bts". */
83 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
85
86 /* Command lists for "set/show record btrace pt". */
87 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
89
90 /* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
92
93 #define DEBUG(msg, args...) \
94 do \
95 { \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
99 } \
100 while (0)
101
102
103 /* Update the branch trace for the current thread and return a pointer to its
104 thread_info.
105
106 Throws an error if there is no thread or no trace. This function never
107 returns NULL. */
108
109 static struct thread_info *
110 require_btrace_thread (void)
111 {
112 struct thread_info *tp;
113
114 DEBUG ("require");
115
116 tp = find_thread_ptid (inferior_ptid);
117 if (tp == NULL)
118 error (_("No thread."));
119
120 validate_registers_access ();
121
122 btrace_fetch (tp);
123
124 if (btrace_is_empty (tp))
125 error (_("No trace."));
126
127 return tp;
128 }
129
130 /* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
132
133 Throws an error if there is no thread or no trace. This function never
134 returns NULL. */
135
136 static struct btrace_thread_info *
137 require_btrace (void)
138 {
139 struct thread_info *tp;
140
141 tp = require_btrace_thread ();
142
143 return &tp->btrace;
144 }
145
146 /* Enable branch tracing for one thread. Warn on errors. */
147
148 static void
149 record_btrace_enable_warn (struct thread_info *tp)
150 {
151 TRY
152 {
153 btrace_enable (tp, &record_btrace_conf);
154 }
155 CATCH (error, RETURN_MASK_ERROR)
156 {
157 warning ("%s", error.message);
158 }
159 END_CATCH
160 }
161
162 /* Callback function to disable branch tracing for one thread. */
163
164 static void
165 record_btrace_disable_callback (void *arg)
166 {
167 struct thread_info *tp = (struct thread_info *) arg;
168
169 btrace_disable (tp);
170 }
171
172 /* Enable automatic tracing of new threads. */
173
174 static void
175 record_btrace_auto_enable (void)
176 {
177 DEBUG ("attach thread observer");
178
179 record_btrace_thread_observer
180 = observer_attach_new_thread (record_btrace_enable_warn);
181 }
182
183 /* Disable automatic tracing of new threads. */
184
185 static void
186 record_btrace_auto_disable (void)
187 {
188 /* The observer may have been detached, already. */
189 if (record_btrace_thread_observer == NULL)
190 return;
191
192 DEBUG ("detach thread observer");
193
194 observer_detach_new_thread (record_btrace_thread_observer);
195 record_btrace_thread_observer = NULL;
196 }
197
198 /* The record-btrace async event handler function. */
199
200 static void
201 record_btrace_handle_async_inferior_event (gdb_client_data data)
202 {
203 inferior_event_handler (INF_REG_EVENT, NULL);
204 }
205
206 /* See record-btrace.h. */
207
208 void
209 record_btrace_push_target (void)
210 {
211 const char *format;
212
213 record_btrace_auto_enable ();
214
215 push_target (&record_btrace_ops);
216
217 record_btrace_async_inferior_event_handler
218 = create_async_event_handler (record_btrace_handle_async_inferior_event,
219 NULL);
220 record_btrace_generating_corefile = 0;
221
222 format = btrace_format_short_string (record_btrace_conf.format);
223 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
224 }
225
226 /* The to_open method of target record-btrace. */
227
228 static void
229 record_btrace_open (const char *args, int from_tty)
230 {
231 struct cleanup *disable_chain;
232 struct thread_info *tp;
233
234 DEBUG ("open");
235
236 record_preopen ();
237
238 if (!target_has_execution)
239 error (_("The program is not being run."));
240
241 gdb_assert (record_btrace_thread_observer == NULL);
242
243 disable_chain = make_cleanup (null_cleanup, NULL);
244 ALL_NON_EXITED_THREADS (tp)
245 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
246 {
247 btrace_enable (tp, &record_btrace_conf);
248
249 make_cleanup (record_btrace_disable_callback, tp);
250 }
251
252 record_btrace_push_target ();
253
254 discard_cleanups (disable_chain);
255 }
256
257 /* The to_stop_recording method of target record-btrace. */
258
259 static void
260 record_btrace_stop_recording (struct target_ops *self)
261 {
262 struct thread_info *tp;
263
264 DEBUG ("stop recording");
265
266 record_btrace_auto_disable ();
267
268 ALL_NON_EXITED_THREADS (tp)
269 if (tp->btrace.target != NULL)
270 btrace_disable (tp);
271 }
272
273 /* The to_disconnect method of target record-btrace. */
274
275 static void
276 record_btrace_disconnect (struct target_ops *self, const char *args,
277 int from_tty)
278 {
279 struct target_ops *beneath = self->beneath;
280
281 /* Do not stop recording, just clean up GDB side. */
282 unpush_target (self);
283
284 /* Forward disconnect. */
285 beneath->to_disconnect (beneath, args, from_tty);
286 }
287
288 /* The to_close method of target record-btrace. */
289
290 static void
291 record_btrace_close (struct target_ops *self)
292 {
293 struct thread_info *tp;
294
295 if (record_btrace_async_inferior_event_handler != NULL)
296 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
297
298 /* Make sure automatic recording gets disabled even if we did not stop
299 recording before closing the record-btrace target. */
300 record_btrace_auto_disable ();
301
302 /* We should have already stopped recording.
303 Tear down btrace in case we have not. */
304 ALL_NON_EXITED_THREADS (tp)
305 btrace_teardown (tp);
306 }
307
308 /* The to_async method of target record-btrace. */
309
310 static void
311 record_btrace_async (struct target_ops *ops, int enable)
312 {
313 if (enable)
314 mark_async_event_handler (record_btrace_async_inferior_event_handler);
315 else
316 clear_async_event_handler (record_btrace_async_inferior_event_handler);
317
318 ops->beneath->to_async (ops->beneath, enable);
319 }
320
321 /* Adjusts the size and returns a human readable size suffix. */
322
323 static const char *
324 record_btrace_adjust_size (unsigned int *size)
325 {
326 unsigned int sz;
327
328 sz = *size;
329
330 if ((sz & ((1u << 30) - 1)) == 0)
331 {
332 *size = sz >> 30;
333 return "GB";
334 }
335 else if ((sz & ((1u << 20) - 1)) == 0)
336 {
337 *size = sz >> 20;
338 return "MB";
339 }
340 else if ((sz & ((1u << 10) - 1)) == 0)
341 {
342 *size = sz >> 10;
343 return "kB";
344 }
345 else
346 return "";
347 }
348
349 /* Print a BTS configuration. */
350
351 static void
352 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
353 {
354 const char *suffix;
355 unsigned int size;
356
357 size = conf->size;
358 if (size > 0)
359 {
360 suffix = record_btrace_adjust_size (&size);
361 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
362 }
363 }
364
365 /* Print an Intel Processor Trace configuration. */
366
367 static void
368 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
369 {
370 const char *suffix;
371 unsigned int size;
372
373 size = conf->size;
374 if (size > 0)
375 {
376 suffix = record_btrace_adjust_size (&size);
377 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
378 }
379 }
380
381 /* Print a branch tracing configuration. */
382
383 static void
384 record_btrace_print_conf (const struct btrace_config *conf)
385 {
386 printf_unfiltered (_("Recording format: %s.\n"),
387 btrace_format_string (conf->format));
388
389 switch (conf->format)
390 {
391 case BTRACE_FORMAT_NONE:
392 return;
393
394 case BTRACE_FORMAT_BTS:
395 record_btrace_print_bts_conf (&conf->bts);
396 return;
397
398 case BTRACE_FORMAT_PT:
399 record_btrace_print_pt_conf (&conf->pt);
400 return;
401 }
402
403 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
404 }
405
406 /* The to_info_record method of target record-btrace. */
407
408 static void
409 record_btrace_info (struct target_ops *self)
410 {
411 struct btrace_thread_info *btinfo;
412 const struct btrace_config *conf;
413 struct thread_info *tp;
414 unsigned int insns, calls, gaps;
415
416 DEBUG ("info");
417
418 tp = find_thread_ptid (inferior_ptid);
419 if (tp == NULL)
420 error (_("No thread."));
421
422 validate_registers_access ();
423
424 btinfo = &tp->btrace;
425
426 conf = btrace_conf (btinfo);
427 if (conf != NULL)
428 record_btrace_print_conf (conf);
429
430 btrace_fetch (tp);
431
432 insns = 0;
433 calls = 0;
434 gaps = 0;
435
436 if (!btrace_is_empty (tp))
437 {
438 struct btrace_call_iterator call;
439 struct btrace_insn_iterator insn;
440
441 btrace_call_end (&call, btinfo);
442 btrace_call_prev (&call, 1);
443 calls = btrace_call_number (&call);
444
445 btrace_insn_end (&insn, btinfo);
446 insns = btrace_insn_number (&insn);
447
448 /* If the last instruction is not a gap, it is the current instruction
449 that is not actually part of the record. */
450 if (btrace_insn_get (&insn) != NULL)
451 insns -= 1;
452
453 gaps = btinfo->ngaps;
454 }
455
456 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
457 "for thread %s (%s).\n"), insns, calls, gaps,
458 print_thread_id (tp), target_pid_to_str (tp->ptid));
459
460 if (btrace_is_replaying (tp))
461 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
462 btrace_insn_number (btinfo->replay));
463 }
464
465 /* Print a decode error. */
466
467 static void
468 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
469 enum btrace_format format)
470 {
471 const char *errstr = btrace_decode_error (format, errcode);
472
473 uiout->text (_("["));
474 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
475 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
476 {
477 uiout->text (_("decode error ("));
478 uiout->field_int ("errcode", errcode);
479 uiout->text (_("): "));
480 }
481 uiout->text (errstr);
482 uiout->text (_("]\n"));
483 }
484
485 /* Print an unsigned int. */
486
487 static void
488 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
489 {
490 uiout->field_fmt (fld, "%u", val);
491 }
492
493 /* A range of source lines. */
494
495 struct btrace_line_range
496 {
497 /* The symtab this line is from. */
498 struct symtab *symtab;
499
500 /* The first line (inclusive). */
501 int begin;
502
503 /* The last line (exclusive). */
504 int end;
505 };
506
507 /* Construct a line range. */
508
509 static struct btrace_line_range
510 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
511 {
512 struct btrace_line_range range;
513
514 range.symtab = symtab;
515 range.begin = begin;
516 range.end = end;
517
518 return range;
519 }
520
521 /* Add a line to a line range. */
522
523 static struct btrace_line_range
524 btrace_line_range_add (struct btrace_line_range range, int line)
525 {
526 if (range.end <= range.begin)
527 {
528 /* This is the first entry. */
529 range.begin = line;
530 range.end = line + 1;
531 }
532 else if (line < range.begin)
533 range.begin = line;
534 else if (range.end < line)
535 range.end = line;
536
537 return range;
538 }
539
540 /* Return non-zero if RANGE is empty, zero otherwise. */
541
542 static int
543 btrace_line_range_is_empty (struct btrace_line_range range)
544 {
545 return range.end <= range.begin;
546 }
547
548 /* Return non-zero if LHS contains RHS, zero otherwise. */
549
550 static int
551 btrace_line_range_contains_range (struct btrace_line_range lhs,
552 struct btrace_line_range rhs)
553 {
554 return ((lhs.symtab == rhs.symtab)
555 && (lhs.begin <= rhs.begin)
556 && (rhs.end <= lhs.end));
557 }
558
559 /* Find the line range associated with PC. */
560
561 static struct btrace_line_range
562 btrace_find_line_range (CORE_ADDR pc)
563 {
564 struct btrace_line_range range;
565 struct linetable_entry *lines;
566 struct linetable *ltable;
567 struct symtab *symtab;
568 int nlines, i;
569
570 symtab = find_pc_line_symtab (pc);
571 if (symtab == NULL)
572 return btrace_mk_line_range (NULL, 0, 0);
573
574 ltable = SYMTAB_LINETABLE (symtab);
575 if (ltable == NULL)
576 return btrace_mk_line_range (symtab, 0, 0);
577
578 nlines = ltable->nitems;
579 lines = ltable->item;
580 if (nlines <= 0)
581 return btrace_mk_line_range (symtab, 0, 0);
582
583 range = btrace_mk_line_range (symtab, 0, 0);
584 for (i = 0; i < nlines - 1; i++)
585 {
586 if ((lines[i].pc == pc) && (lines[i].line != 0))
587 range = btrace_line_range_add (range, lines[i].line);
588 }
589
590 return range;
591 }
592
593 /* Print source lines in LINES to UIOUT.
594
595 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
596 instructions corresponding to that source line. When printing a new source
597 line, we do the cleanups for the open chain and open a new cleanup chain for
598 the new source line. If the source line range in LINES is not empty, this
599 function will leave the cleanup chain for the last printed source line open
600 so instructions can be added to it. */
601
602 static void
603 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
604 struct cleanup **ui_item_chain, int flags)
605 {
606 print_source_lines_flags psl_flags;
607 int line;
608
609 psl_flags = 0;
610 if (flags & DISASSEMBLY_FILENAME)
611 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
612
613 for (line = lines.begin; line < lines.end; ++line)
614 {
615 if (*ui_item_chain != NULL)
616 do_cleanups (*ui_item_chain);
617
618 *ui_item_chain
619 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
620
621 print_source_lines (lines.symtab, line, line + 1, psl_flags);
622
623 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
624 }
625 }
626
627 /* Disassemble a section of the recorded instruction trace. */
628
629 static void
630 btrace_insn_history (struct ui_out *uiout,
631 const struct btrace_thread_info *btinfo,
632 const struct btrace_insn_iterator *begin,
633 const struct btrace_insn_iterator *end, int flags)
634 {
635 struct cleanup *cleanups, *ui_item_chain;
636 struct gdbarch *gdbarch;
637 struct btrace_insn_iterator it;
638 struct btrace_line_range last_lines;
639
640 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
641 btrace_insn_number (end));
642
643 flags |= DISASSEMBLY_SPECULATIVE;
644
645 gdbarch = target_gdbarch ();
646 last_lines = btrace_mk_line_range (NULL, 0, 0);
647
648 cleanups = make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
649
650 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
651 instructions corresponding to that line. */
652 ui_item_chain = NULL;
653
654 gdb_pretty_print_disassembler disasm (gdbarch);
655
656 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
657 {
658 const struct btrace_insn *insn;
659
660 insn = btrace_insn_get (&it);
661
662 /* A NULL instruction indicates a gap in the trace. */
663 if (insn == NULL)
664 {
665 const struct btrace_config *conf;
666
667 conf = btrace_conf (btinfo);
668
669 /* We have trace so we must have a configuration. */
670 gdb_assert (conf != NULL);
671
672 uiout->field_fmt ("insn-number", "%u",
673 btrace_insn_number (&it));
674 uiout->text ("\t");
675
676 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
677 conf->format);
678 }
679 else
680 {
681 struct disasm_insn dinsn;
682
683 if ((flags & DISASSEMBLY_SOURCE) != 0)
684 {
685 struct btrace_line_range lines;
686
687 lines = btrace_find_line_range (insn->pc);
688 if (!btrace_line_range_is_empty (lines)
689 && !btrace_line_range_contains_range (last_lines, lines))
690 {
691 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
692 last_lines = lines;
693 }
694 else if (ui_item_chain == NULL)
695 {
696 ui_item_chain
697 = make_cleanup_ui_out_tuple_begin_end (uiout,
698 "src_and_asm_line");
699 /* No source information. */
700 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
701 }
702
703 gdb_assert (ui_item_chain != NULL);
704 }
705
706 memset (&dinsn, 0, sizeof (dinsn));
707 dinsn.number = btrace_insn_number (&it);
708 dinsn.addr = insn->pc;
709
710 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
711 dinsn.is_speculative = 1;
712
713 disasm.pretty_print_insn (uiout, &dinsn, flags);
714 }
715 }
716
717 do_cleanups (cleanups);
718 }
719
720 /* The to_insn_history method of target record-btrace. */
721
722 static void
723 record_btrace_insn_history (struct target_ops *self, int size, int flags)
724 {
725 struct btrace_thread_info *btinfo;
726 struct btrace_insn_history *history;
727 struct btrace_insn_iterator begin, end;
728 struct ui_out *uiout;
729 unsigned int context, covered;
730
731 uiout = current_uiout;
732 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
733 context = abs (size);
734 if (context == 0)
735 error (_("Bad record instruction-history-size."));
736
737 btinfo = require_btrace ();
738 history = btinfo->insn_history;
739 if (history == NULL)
740 {
741 struct btrace_insn_iterator *replay;
742
743 DEBUG ("insn-history (0x%x): %d", flags, size);
744
745 /* If we're replaying, we start at the replay position. Otherwise, we
746 start at the tail of the trace. */
747 replay = btinfo->replay;
748 if (replay != NULL)
749 begin = *replay;
750 else
751 btrace_insn_end (&begin, btinfo);
752
753 /* We start from here and expand in the requested direction. Then we
754 expand in the other direction, as well, to fill up any remaining
755 context. */
756 end = begin;
757 if (size < 0)
758 {
759 /* We want the current position covered, as well. */
760 covered = btrace_insn_next (&end, 1);
761 covered += btrace_insn_prev (&begin, context - covered);
762 covered += btrace_insn_next (&end, context - covered);
763 }
764 else
765 {
766 covered = btrace_insn_next (&end, context);
767 covered += btrace_insn_prev (&begin, context - covered);
768 }
769 }
770 else
771 {
772 begin = history->begin;
773 end = history->end;
774
775 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
776 btrace_insn_number (&begin), btrace_insn_number (&end));
777
778 if (size < 0)
779 {
780 end = begin;
781 covered = btrace_insn_prev (&begin, context);
782 }
783 else
784 {
785 begin = end;
786 covered = btrace_insn_next (&end, context);
787 }
788 }
789
790 if (covered > 0)
791 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
792 else
793 {
794 if (size < 0)
795 printf_unfiltered (_("At the start of the branch trace record.\n"));
796 else
797 printf_unfiltered (_("At the end of the branch trace record.\n"));
798 }
799
800 btrace_set_insn_history (btinfo, &begin, &end);
801 }
802
803 /* The to_insn_history_range method of target record-btrace. */
804
805 static void
806 record_btrace_insn_history_range (struct target_ops *self,
807 ULONGEST from, ULONGEST to, int flags)
808 {
809 struct btrace_thread_info *btinfo;
810 struct btrace_insn_history *history;
811 struct btrace_insn_iterator begin, end;
812 struct ui_out *uiout;
813 unsigned int low, high;
814 int found;
815
816 uiout = current_uiout;
817 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
818 low = from;
819 high = to;
820
821 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
822
823 /* Check for wrap-arounds. */
824 if (low != from || high != to)
825 error (_("Bad range."));
826
827 if (high < low)
828 error (_("Bad range."));
829
830 btinfo = require_btrace ();
831
832 found = btrace_find_insn_by_number (&begin, btinfo, low);
833 if (found == 0)
834 error (_("Range out of bounds."));
835
836 found = btrace_find_insn_by_number (&end, btinfo, high);
837 if (found == 0)
838 {
839 /* Silently truncate the range. */
840 btrace_insn_end (&end, btinfo);
841 }
842 else
843 {
844 /* We want both begin and end to be inclusive. */
845 btrace_insn_next (&end, 1);
846 }
847
848 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
849 btrace_set_insn_history (btinfo, &begin, &end);
850 }
851
852 /* The to_insn_history_from method of target record-btrace. */
853
854 static void
855 record_btrace_insn_history_from (struct target_ops *self,
856 ULONGEST from, int size, int flags)
857 {
858 ULONGEST begin, end, context;
859
860 context = abs (size);
861 if (context == 0)
862 error (_("Bad record instruction-history-size."));
863
864 if (size < 0)
865 {
866 end = from;
867
868 if (from < context)
869 begin = 0;
870 else
871 begin = from - context + 1;
872 }
873 else
874 {
875 begin = from;
876 end = from + context - 1;
877
878 /* Check for wrap-around. */
879 if (end < begin)
880 end = ULONGEST_MAX;
881 }
882
883 record_btrace_insn_history_range (self, begin, end, flags);
884 }
885
886 /* Print the instruction number range for a function call history line. */
887
888 static void
889 btrace_call_history_insn_range (struct ui_out *uiout,
890 const struct btrace_function *bfun)
891 {
892 unsigned int begin, end, size;
893
894 size = VEC_length (btrace_insn_s, bfun->insn);
895 gdb_assert (size > 0);
896
897 begin = bfun->insn_offset;
898 end = begin + size - 1;
899
900 ui_out_field_uint (uiout, "insn begin", begin);
901 uiout->text (",");
902 ui_out_field_uint (uiout, "insn end", end);
903 }
904
905 /* Compute the lowest and highest source line for the instructions in BFUN
906 and return them in PBEGIN and PEND.
907 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
908 result from inlining or macro expansion. */
909
910 static void
911 btrace_compute_src_line_range (const struct btrace_function *bfun,
912 int *pbegin, int *pend)
913 {
914 struct btrace_insn *insn;
915 struct symtab *symtab;
916 struct symbol *sym;
917 unsigned int idx;
918 int begin, end;
919
920 begin = INT_MAX;
921 end = INT_MIN;
922
923 sym = bfun->sym;
924 if (sym == NULL)
925 goto out;
926
927 symtab = symbol_symtab (sym);
928
929 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
930 {
931 struct symtab_and_line sal;
932
933 sal = find_pc_line (insn->pc, 0);
934 if (sal.symtab != symtab || sal.line == 0)
935 continue;
936
937 begin = std::min (begin, sal.line);
938 end = std::max (end, sal.line);
939 }
940
941 out:
942 *pbegin = begin;
943 *pend = end;
944 }
945
946 /* Print the source line information for a function call history line. */
947
948 static void
949 btrace_call_history_src_line (struct ui_out *uiout,
950 const struct btrace_function *bfun)
951 {
952 struct symbol *sym;
953 int begin, end;
954
955 sym = bfun->sym;
956 if (sym == NULL)
957 return;
958
959 uiout->field_string ("file",
960 symtab_to_filename_for_display (symbol_symtab (sym)));
961
962 btrace_compute_src_line_range (bfun, &begin, &end);
963 if (end < begin)
964 return;
965
966 uiout->text (":");
967 uiout->field_int ("min line", begin);
968
969 if (end == begin)
970 return;
971
972 uiout->text (",");
973 uiout->field_int ("max line", end);
974 }
975
976 /* Get the name of a branch trace function. */
977
978 static const char *
979 btrace_get_bfun_name (const struct btrace_function *bfun)
980 {
981 struct minimal_symbol *msym;
982 struct symbol *sym;
983
984 if (bfun == NULL)
985 return "??";
986
987 msym = bfun->msym;
988 sym = bfun->sym;
989
990 if (sym != NULL)
991 return SYMBOL_PRINT_NAME (sym);
992 else if (msym != NULL)
993 return MSYMBOL_PRINT_NAME (msym);
994 else
995 return "??";
996 }
997
998 /* Disassemble a section of the recorded function trace. */
999
1000 static void
1001 btrace_call_history (struct ui_out *uiout,
1002 const struct btrace_thread_info *btinfo,
1003 const struct btrace_call_iterator *begin,
1004 const struct btrace_call_iterator *end,
1005 int int_flags)
1006 {
1007 struct btrace_call_iterator it;
1008 record_print_flags flags = (enum record_print_flag) int_flags;
1009
1010 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1011 btrace_call_number (end));
1012
1013 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1014 {
1015 const struct btrace_function *bfun;
1016 struct minimal_symbol *msym;
1017 struct symbol *sym;
1018
1019 bfun = btrace_call_get (&it);
1020 sym = bfun->sym;
1021 msym = bfun->msym;
1022
1023 /* Print the function index. */
1024 ui_out_field_uint (uiout, "index", bfun->number);
1025 uiout->text ("\t");
1026
1027 /* Indicate gaps in the trace. */
1028 if (bfun->errcode != 0)
1029 {
1030 const struct btrace_config *conf;
1031
1032 conf = btrace_conf (btinfo);
1033
1034 /* We have trace so we must have a configuration. */
1035 gdb_assert (conf != NULL);
1036
1037 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1038
1039 continue;
1040 }
1041
1042 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1043 {
1044 int level = bfun->level + btinfo->level, i;
1045
1046 for (i = 0; i < level; ++i)
1047 uiout->text (" ");
1048 }
1049
1050 if (sym != NULL)
1051 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
1052 else if (msym != NULL)
1053 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1054 else if (!uiout->is_mi_like_p ())
1055 uiout->field_string ("function", "??");
1056
1057 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1058 {
1059 uiout->text (_("\tinst "));
1060 btrace_call_history_insn_range (uiout, bfun);
1061 }
1062
1063 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1064 {
1065 uiout->text (_("\tat "));
1066 btrace_call_history_src_line (uiout, bfun);
1067 }
1068
1069 uiout->text ("\n");
1070 }
1071 }
1072
1073 /* The to_call_history method of target record-btrace. */
1074
1075 static void
1076 record_btrace_call_history (struct target_ops *self, int size, int int_flags)
1077 {
1078 struct btrace_thread_info *btinfo;
1079 struct btrace_call_history *history;
1080 struct btrace_call_iterator begin, end;
1081 struct ui_out *uiout;
1082 unsigned int context, covered;
1083 record_print_flags flags = (enum record_print_flag) int_flags;
1084
1085 uiout = current_uiout;
1086 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1087 context = abs (size);
1088 if (context == 0)
1089 error (_("Bad record function-call-history-size."));
1090
1091 btinfo = require_btrace ();
1092 history = btinfo->call_history;
1093 if (history == NULL)
1094 {
1095 struct btrace_insn_iterator *replay;
1096
1097 DEBUG ("call-history (0x%x): %d", int_flags, size);
1098
1099 /* If we're replaying, we start at the replay position. Otherwise, we
1100 start at the tail of the trace. */
1101 replay = btinfo->replay;
1102 if (replay != NULL)
1103 {
1104 begin.btinfo = btinfo;
1105 begin.index = replay->call_index;
1106 }
1107 else
1108 btrace_call_end (&begin, btinfo);
1109
1110 /* We start from here and expand in the requested direction. Then we
1111 expand in the other direction, as well, to fill up any remaining
1112 context. */
1113 end = begin;
1114 if (size < 0)
1115 {
1116 /* We want the current position covered, as well. */
1117 covered = btrace_call_next (&end, 1);
1118 covered += btrace_call_prev (&begin, context - covered);
1119 covered += btrace_call_next (&end, context - covered);
1120 }
1121 else
1122 {
1123 covered = btrace_call_next (&end, context);
1124 covered += btrace_call_prev (&begin, context- covered);
1125 }
1126 }
1127 else
1128 {
1129 begin = history->begin;
1130 end = history->end;
1131
1132 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
1133 btrace_call_number (&begin), btrace_call_number (&end));
1134
1135 if (size < 0)
1136 {
1137 end = begin;
1138 covered = btrace_call_prev (&begin, context);
1139 }
1140 else
1141 {
1142 begin = end;
1143 covered = btrace_call_next (&end, context);
1144 }
1145 }
1146
1147 if (covered > 0)
1148 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1149 else
1150 {
1151 if (size < 0)
1152 printf_unfiltered (_("At the start of the branch trace record.\n"));
1153 else
1154 printf_unfiltered (_("At the end of the branch trace record.\n"));
1155 }
1156
1157 btrace_set_call_history (btinfo, &begin, &end);
1158 }
1159
1160 /* The to_call_history_range method of target record-btrace. */
1161
1162 static void
1163 record_btrace_call_history_range (struct target_ops *self,
1164 ULONGEST from, ULONGEST to,
1165 int int_flags)
1166 {
1167 struct btrace_thread_info *btinfo;
1168 struct btrace_call_history *history;
1169 struct btrace_call_iterator begin, end;
1170 struct ui_out *uiout;
1171 unsigned int low, high;
1172 int found;
1173 record_print_flags flags = (enum record_print_flag) int_flags;
1174
1175 uiout = current_uiout;
1176 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1177 low = from;
1178 high = to;
1179
1180 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
1181
1182 /* Check for wrap-arounds. */
1183 if (low != from || high != to)
1184 error (_("Bad range."));
1185
1186 if (high < low)
1187 error (_("Bad range."));
1188
1189 btinfo = require_btrace ();
1190
1191 found = btrace_find_call_by_number (&begin, btinfo, low);
1192 if (found == 0)
1193 error (_("Range out of bounds."));
1194
1195 found = btrace_find_call_by_number (&end, btinfo, high);
1196 if (found == 0)
1197 {
1198 /* Silently truncate the range. */
1199 btrace_call_end (&end, btinfo);
1200 }
1201 else
1202 {
1203 /* We want both begin and end to be inclusive. */
1204 btrace_call_next (&end, 1);
1205 }
1206
1207 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1208 btrace_set_call_history (btinfo, &begin, &end);
1209 }
1210
1211 /* The to_call_history_from method of target record-btrace. */
1212
1213 static void
1214 record_btrace_call_history_from (struct target_ops *self,
1215 ULONGEST from, int size,
1216 int int_flags)
1217 {
1218 ULONGEST begin, end, context;
1219 record_print_flags flags = (enum record_print_flag) int_flags;
1220
1221 context = abs (size);
1222 if (context == 0)
1223 error (_("Bad record function-call-history-size."));
1224
1225 if (size < 0)
1226 {
1227 end = from;
1228
1229 if (from < context)
1230 begin = 0;
1231 else
1232 begin = from - context + 1;
1233 }
1234 else
1235 {
1236 begin = from;
1237 end = from + context - 1;
1238
1239 /* Check for wrap-around. */
1240 if (end < begin)
1241 end = ULONGEST_MAX;
1242 }
1243
1244 record_btrace_call_history_range (self, begin, end, flags);
1245 }
1246
1247 /* The to_record_method method of target record-btrace. */
1248
1249 static enum record_method
1250 record_btrace_record_method (struct target_ops *self, ptid_t ptid)
1251 {
1252 const struct btrace_config *config;
1253 struct thread_info * const tp = find_thread_ptid (ptid);
1254
1255 if (tp == NULL)
1256 error (_("No thread."));
1257
1258 if (tp->btrace.target == NULL)
1259 return RECORD_METHOD_NONE;
1260
1261 return RECORD_METHOD_BTRACE;
1262 }
1263
1264 /* The to_record_is_replaying method of target record-btrace. */
1265
1266 static int
1267 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1268 {
1269 struct thread_info *tp;
1270
1271 ALL_NON_EXITED_THREADS (tp)
1272 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1273 return 1;
1274
1275 return 0;
1276 }
1277
1278 /* The to_record_will_replay method of target record-btrace. */
1279
1280 static int
1281 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1282 {
1283 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1284 }
1285
1286 /* The to_xfer_partial method of target record-btrace. */
1287
1288 static enum target_xfer_status
1289 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1290 const char *annex, gdb_byte *readbuf,
1291 const gdb_byte *writebuf, ULONGEST offset,
1292 ULONGEST len, ULONGEST *xfered_len)
1293 {
1294 struct target_ops *t;
1295
1296 /* Filter out requests that don't make sense during replay. */
1297 if (replay_memory_access == replay_memory_access_read_only
1298 && !record_btrace_generating_corefile
1299 && record_btrace_is_replaying (ops, inferior_ptid))
1300 {
1301 switch (object)
1302 {
1303 case TARGET_OBJECT_MEMORY:
1304 {
1305 struct target_section *section;
1306
1307 /* We do not allow writing memory in general. */
1308 if (writebuf != NULL)
1309 {
1310 *xfered_len = len;
1311 return TARGET_XFER_UNAVAILABLE;
1312 }
1313
1314 /* We allow reading readonly memory. */
1315 section = target_section_by_addr (ops, offset);
1316 if (section != NULL)
1317 {
1318 /* Check if the section we found is readonly. */
1319 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1320 section->the_bfd_section)
1321 & SEC_READONLY) != 0)
1322 {
1323 /* Truncate the request to fit into this section. */
1324 len = std::min (len, section->endaddr - offset);
1325 break;
1326 }
1327 }
1328
1329 *xfered_len = len;
1330 return TARGET_XFER_UNAVAILABLE;
1331 }
1332 }
1333 }
1334
1335 /* Forward the request. */
1336 ops = ops->beneath;
1337 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1338 offset, len, xfered_len);
1339 }
1340
1341 /* The to_insert_breakpoint method of target record-btrace. */
1342
1343 static int
1344 record_btrace_insert_breakpoint (struct target_ops *ops,
1345 struct gdbarch *gdbarch,
1346 struct bp_target_info *bp_tgt)
1347 {
1348 const char *old;
1349 int ret;
1350
1351 /* Inserting breakpoints requires accessing memory. Allow it for the
1352 duration of this function. */
1353 old = replay_memory_access;
1354 replay_memory_access = replay_memory_access_read_write;
1355
1356 ret = 0;
1357 TRY
1358 {
1359 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1360 }
1361 CATCH (except, RETURN_MASK_ALL)
1362 {
1363 replay_memory_access = old;
1364 throw_exception (except);
1365 }
1366 END_CATCH
1367 replay_memory_access = old;
1368
1369 return ret;
1370 }
1371
1372 /* The to_remove_breakpoint method of target record-btrace. */
1373
1374 static int
1375 record_btrace_remove_breakpoint (struct target_ops *ops,
1376 struct gdbarch *gdbarch,
1377 struct bp_target_info *bp_tgt,
1378 enum remove_bp_reason reason)
1379 {
1380 const char *old;
1381 int ret;
1382
1383 /* Removing breakpoints requires accessing memory. Allow it for the
1384 duration of this function. */
1385 old = replay_memory_access;
1386 replay_memory_access = replay_memory_access_read_write;
1387
1388 ret = 0;
1389 TRY
1390 {
1391 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1392 reason);
1393 }
1394 CATCH (except, RETURN_MASK_ALL)
1395 {
1396 replay_memory_access = old;
1397 throw_exception (except);
1398 }
1399 END_CATCH
1400 replay_memory_access = old;
1401
1402 return ret;
1403 }
1404
1405 /* The to_fetch_registers method of target record-btrace. */
1406
1407 static void
1408 record_btrace_fetch_registers (struct target_ops *ops,
1409 struct regcache *regcache, int regno)
1410 {
1411 struct btrace_insn_iterator *replay;
1412 struct thread_info *tp;
1413
1414 tp = find_thread_ptid (regcache_get_ptid (regcache));
1415 gdb_assert (tp != NULL);
1416
1417 replay = tp->btrace.replay;
1418 if (replay != NULL && !record_btrace_generating_corefile)
1419 {
1420 const struct btrace_insn *insn;
1421 struct gdbarch *gdbarch;
1422 int pcreg;
1423
1424 gdbarch = get_regcache_arch (regcache);
1425 pcreg = gdbarch_pc_regnum (gdbarch);
1426 if (pcreg < 0)
1427 return;
1428
1429 /* We can only provide the PC register. */
1430 if (regno >= 0 && regno != pcreg)
1431 return;
1432
1433 insn = btrace_insn_get (replay);
1434 gdb_assert (insn != NULL);
1435
1436 regcache_raw_supply (regcache, regno, &insn->pc);
1437 }
1438 else
1439 {
1440 struct target_ops *t = ops->beneath;
1441
1442 t->to_fetch_registers (t, regcache, regno);
1443 }
1444 }
1445
1446 /* The to_store_registers method of target record-btrace. */
1447
1448 static void
1449 record_btrace_store_registers (struct target_ops *ops,
1450 struct regcache *regcache, int regno)
1451 {
1452 struct target_ops *t;
1453
1454 if (!record_btrace_generating_corefile
1455 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1456 error (_("Cannot write registers while replaying."));
1457
1458 gdb_assert (may_write_registers != 0);
1459
1460 t = ops->beneath;
1461 t->to_store_registers (t, regcache, regno);
1462 }
1463
1464 /* The to_prepare_to_store method of target record-btrace. */
1465
1466 static void
1467 record_btrace_prepare_to_store (struct target_ops *ops,
1468 struct regcache *regcache)
1469 {
1470 struct target_ops *t;
1471
1472 if (!record_btrace_generating_corefile
1473 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1474 return;
1475
1476 t = ops->beneath;
1477 t->to_prepare_to_store (t, regcache);
1478 }
1479
1480 /* The branch trace frame cache. */
1481
1482 struct btrace_frame_cache
1483 {
1484 /* The thread. */
1485 struct thread_info *tp;
1486
1487 /* The frame info. */
1488 struct frame_info *frame;
1489
1490 /* The branch trace function segment. */
1491 const struct btrace_function *bfun;
1492 };
1493
1494 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1495
1496 static htab_t bfcache;
1497
1498 /* hash_f for htab_create_alloc of bfcache. */
1499
1500 static hashval_t
1501 bfcache_hash (const void *arg)
1502 {
1503 const struct btrace_frame_cache *cache
1504 = (const struct btrace_frame_cache *) arg;
1505
1506 return htab_hash_pointer (cache->frame);
1507 }
1508
1509 /* eq_f for htab_create_alloc of bfcache. */
1510
1511 static int
1512 bfcache_eq (const void *arg1, const void *arg2)
1513 {
1514 const struct btrace_frame_cache *cache1
1515 = (const struct btrace_frame_cache *) arg1;
1516 const struct btrace_frame_cache *cache2
1517 = (const struct btrace_frame_cache *) arg2;
1518
1519 return cache1->frame == cache2->frame;
1520 }
1521
1522 /* Create a new btrace frame cache. */
1523
1524 static struct btrace_frame_cache *
1525 bfcache_new (struct frame_info *frame)
1526 {
1527 struct btrace_frame_cache *cache;
1528 void **slot;
1529
1530 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1531 cache->frame = frame;
1532
1533 slot = htab_find_slot (bfcache, cache, INSERT);
1534 gdb_assert (*slot == NULL);
1535 *slot = cache;
1536
1537 return cache;
1538 }
1539
1540 /* Extract the branch trace function from a branch trace frame. */
1541
1542 static const struct btrace_function *
1543 btrace_get_frame_function (struct frame_info *frame)
1544 {
1545 const struct btrace_frame_cache *cache;
1546 const struct btrace_function *bfun;
1547 struct btrace_frame_cache pattern;
1548 void **slot;
1549
1550 pattern.frame = frame;
1551
1552 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1553 if (slot == NULL)
1554 return NULL;
1555
1556 cache = (const struct btrace_frame_cache *) *slot;
1557 return cache->bfun;
1558 }
1559
1560 /* Implement stop_reason method for record_btrace_frame_unwind. */
1561
1562 static enum unwind_stop_reason
1563 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1564 void **this_cache)
1565 {
1566 const struct btrace_frame_cache *cache;
1567 const struct btrace_function *bfun;
1568
1569 cache = (const struct btrace_frame_cache *) *this_cache;
1570 bfun = cache->bfun;
1571 gdb_assert (bfun != NULL);
1572
1573 if (bfun->up == 0)
1574 return UNWIND_UNAVAILABLE;
1575
1576 return UNWIND_NO_REASON;
1577 }
1578
1579 /* Implement this_id method for record_btrace_frame_unwind. */
1580
1581 static void
1582 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1583 struct frame_id *this_id)
1584 {
1585 const struct btrace_frame_cache *cache;
1586 const struct btrace_function *bfun;
1587 struct btrace_call_iterator it;
1588 CORE_ADDR code, special;
1589
1590 cache = (const struct btrace_frame_cache *) *this_cache;
1591
1592 bfun = cache->bfun;
1593 gdb_assert (bfun != NULL);
1594
1595 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1596 bfun = btrace_call_get (&it);
1597
1598 code = get_frame_func (this_frame);
1599 special = bfun->number;
1600
1601 *this_id = frame_id_build_unavailable_stack_special (code, special);
1602
1603 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1604 btrace_get_bfun_name (cache->bfun),
1605 core_addr_to_string_nz (this_id->code_addr),
1606 core_addr_to_string_nz (this_id->special_addr));
1607 }
1608
1609 /* Implement prev_register method for record_btrace_frame_unwind. */
1610
1611 static struct value *
1612 record_btrace_frame_prev_register (struct frame_info *this_frame,
1613 void **this_cache,
1614 int regnum)
1615 {
1616 const struct btrace_frame_cache *cache;
1617 const struct btrace_function *bfun, *caller;
1618 const struct btrace_insn *insn;
1619 struct btrace_call_iterator it;
1620 struct gdbarch *gdbarch;
1621 CORE_ADDR pc;
1622 int pcreg;
1623
1624 gdbarch = get_frame_arch (this_frame);
1625 pcreg = gdbarch_pc_regnum (gdbarch);
1626 if (pcreg < 0 || regnum != pcreg)
1627 throw_error (NOT_AVAILABLE_ERROR,
1628 _("Registers are not available in btrace record history"));
1629
1630 cache = (const struct btrace_frame_cache *) *this_cache;
1631 bfun = cache->bfun;
1632 gdb_assert (bfun != NULL);
1633
1634 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1635 throw_error (NOT_AVAILABLE_ERROR,
1636 _("No caller in btrace record history"));
1637
1638 caller = btrace_call_get (&it);
1639
1640 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1641 {
1642 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1643 pc = insn->pc;
1644 }
1645 else
1646 {
1647 insn = VEC_last (btrace_insn_s, caller->insn);
1648 pc = insn->pc;
1649
1650 pc += gdb_insn_length (gdbarch, pc);
1651 }
1652
1653 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1654 btrace_get_bfun_name (bfun), bfun->level,
1655 core_addr_to_string_nz (pc));
1656
1657 return frame_unwind_got_address (this_frame, regnum, pc);
1658 }
1659
1660 /* Implement sniffer method for record_btrace_frame_unwind. */
1661
1662 static int
1663 record_btrace_frame_sniffer (const struct frame_unwind *self,
1664 struct frame_info *this_frame,
1665 void **this_cache)
1666 {
1667 const struct btrace_function *bfun;
1668 struct btrace_frame_cache *cache;
1669 struct thread_info *tp;
1670 struct frame_info *next;
1671
1672 /* THIS_FRAME does not contain a reference to its thread. */
1673 tp = find_thread_ptid (inferior_ptid);
1674 gdb_assert (tp != NULL);
1675
1676 bfun = NULL;
1677 next = get_next_frame (this_frame);
1678 if (next == NULL)
1679 {
1680 const struct btrace_insn_iterator *replay;
1681
1682 replay = tp->btrace.replay;
1683 if (replay != NULL)
1684 bfun = &replay->btinfo->functions[replay->call_index];
1685 }
1686 else
1687 {
1688 const struct btrace_function *callee;
1689 struct btrace_call_iterator it;
1690
1691 callee = btrace_get_frame_function (next);
1692 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1693 return 0;
1694
1695 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1696 return 0;
1697
1698 bfun = btrace_call_get (&it);
1699 }
1700
1701 if (bfun == NULL)
1702 return 0;
1703
1704 DEBUG ("[frame] sniffed frame for %s on level %d",
1705 btrace_get_bfun_name (bfun), bfun->level);
1706
1707 /* This is our frame. Initialize the frame cache. */
1708 cache = bfcache_new (this_frame);
1709 cache->tp = tp;
1710 cache->bfun = bfun;
1711
1712 *this_cache = cache;
1713 return 1;
1714 }
1715
1716 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1717
1718 static int
1719 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1720 struct frame_info *this_frame,
1721 void **this_cache)
1722 {
1723 const struct btrace_function *bfun, *callee;
1724 struct btrace_frame_cache *cache;
1725 struct btrace_call_iterator it;
1726 struct frame_info *next;
1727 struct thread_info *tinfo;
1728
1729 next = get_next_frame (this_frame);
1730 if (next == NULL)
1731 return 0;
1732
1733 callee = btrace_get_frame_function (next);
1734 if (callee == NULL)
1735 return 0;
1736
1737 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1738 return 0;
1739
1740 tinfo = find_thread_ptid (inferior_ptid);
1741 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1742 return 0;
1743
1744 bfun = btrace_call_get (&it);
1745
1746 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1747 btrace_get_bfun_name (bfun), bfun->level);
1748
1749 /* This is our frame. Initialize the frame cache. */
1750 cache = bfcache_new (this_frame);
1751 cache->tp = tinfo;
1752 cache->bfun = bfun;
1753
1754 *this_cache = cache;
1755 return 1;
1756 }
1757
1758 static void
1759 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1760 {
1761 struct btrace_frame_cache *cache;
1762 void **slot;
1763
1764 cache = (struct btrace_frame_cache *) this_cache;
1765
1766 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1767 gdb_assert (slot != NULL);
1768
1769 htab_remove_elt (bfcache, cache);
1770 }
1771
1772 /* btrace recording does not store previous memory content, neither the stack
1773 frames content. Any unwinding would return errorneous results as the stack
1774 contents no longer matches the changed PC value restored from history.
1775 Therefore this unwinder reports any possibly unwound registers as
1776 <unavailable>. */
1777
1778 const struct frame_unwind record_btrace_frame_unwind =
1779 {
1780 NORMAL_FRAME,
1781 record_btrace_frame_unwind_stop_reason,
1782 record_btrace_frame_this_id,
1783 record_btrace_frame_prev_register,
1784 NULL,
1785 record_btrace_frame_sniffer,
1786 record_btrace_frame_dealloc_cache
1787 };
1788
1789 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1790 {
1791 TAILCALL_FRAME,
1792 record_btrace_frame_unwind_stop_reason,
1793 record_btrace_frame_this_id,
1794 record_btrace_frame_prev_register,
1795 NULL,
1796 record_btrace_tailcall_frame_sniffer,
1797 record_btrace_frame_dealloc_cache
1798 };
1799
1800 /* Implement the to_get_unwinder method. */
1801
1802 static const struct frame_unwind *
1803 record_btrace_to_get_unwinder (struct target_ops *self)
1804 {
1805 return &record_btrace_frame_unwind;
1806 }
1807
1808 /* Implement the to_get_tailcall_unwinder method. */
1809
1810 static const struct frame_unwind *
1811 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1812 {
1813 return &record_btrace_tailcall_frame_unwind;
1814 }
1815
1816 /* Return a human-readable string for FLAG. */
1817
1818 static const char *
1819 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1820 {
1821 switch (flag)
1822 {
1823 case BTHR_STEP:
1824 return "step";
1825
1826 case BTHR_RSTEP:
1827 return "reverse-step";
1828
1829 case BTHR_CONT:
1830 return "cont";
1831
1832 case BTHR_RCONT:
1833 return "reverse-cont";
1834
1835 case BTHR_STOP:
1836 return "stop";
1837 }
1838
1839 return "<invalid>";
1840 }
1841
1842 /* Indicate that TP should be resumed according to FLAG. */
1843
1844 static void
1845 record_btrace_resume_thread (struct thread_info *tp,
1846 enum btrace_thread_flag flag)
1847 {
1848 struct btrace_thread_info *btinfo;
1849
1850 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1851 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1852
1853 btinfo = &tp->btrace;
1854
1855 /* Fetch the latest branch trace. */
1856 btrace_fetch (tp);
1857
1858 /* A resume request overwrites a preceding resume or stop request. */
1859 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1860 btinfo->flags |= flag;
1861 }
1862
1863 /* Get the current frame for TP. */
1864
1865 static struct frame_info *
1866 get_thread_current_frame (struct thread_info *tp)
1867 {
1868 struct frame_info *frame;
1869 ptid_t old_inferior_ptid;
1870 int executing;
1871
1872 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1873 old_inferior_ptid = inferior_ptid;
1874 inferior_ptid = tp->ptid;
1875
1876 /* Clear the executing flag to allow changes to the current frame.
1877 We are not actually running, yet. We just started a reverse execution
1878 command or a record goto command.
1879 For the latter, EXECUTING is false and this has no effect.
1880 For the former, EXECUTING is true and we're in to_wait, about to
1881 move the thread. Since we need to recompute the stack, we temporarily
1882 set EXECUTING to flase. */
1883 executing = is_executing (inferior_ptid);
1884 set_executing (inferior_ptid, 0);
1885
1886 frame = NULL;
1887 TRY
1888 {
1889 frame = get_current_frame ();
1890 }
1891 CATCH (except, RETURN_MASK_ALL)
1892 {
1893 /* Restore the previous execution state. */
1894 set_executing (inferior_ptid, executing);
1895
1896 /* Restore the previous inferior_ptid. */
1897 inferior_ptid = old_inferior_ptid;
1898
1899 throw_exception (except);
1900 }
1901 END_CATCH
1902
1903 /* Restore the previous execution state. */
1904 set_executing (inferior_ptid, executing);
1905
1906 /* Restore the previous inferior_ptid. */
1907 inferior_ptid = old_inferior_ptid;
1908
1909 return frame;
1910 }
1911
1912 /* Start replaying a thread. */
1913
1914 static struct btrace_insn_iterator *
1915 record_btrace_start_replaying (struct thread_info *tp)
1916 {
1917 struct btrace_insn_iterator *replay;
1918 struct btrace_thread_info *btinfo;
1919
1920 btinfo = &tp->btrace;
1921 replay = NULL;
1922
1923 /* We can't start replaying without trace. */
1924 if (btinfo->functions.empty ())
1925 return NULL;
1926
1927 /* GDB stores the current frame_id when stepping in order to detects steps
1928 into subroutines.
1929 Since frames are computed differently when we're replaying, we need to
1930 recompute those stored frames and fix them up so we can still detect
1931 subroutines after we started replaying. */
1932 TRY
1933 {
1934 struct frame_info *frame;
1935 struct frame_id frame_id;
1936 int upd_step_frame_id, upd_step_stack_frame_id;
1937
1938 /* The current frame without replaying - computed via normal unwind. */
1939 frame = get_thread_current_frame (tp);
1940 frame_id = get_frame_id (frame);
1941
1942 /* Check if we need to update any stepping-related frame id's. */
1943 upd_step_frame_id = frame_id_eq (frame_id,
1944 tp->control.step_frame_id);
1945 upd_step_stack_frame_id = frame_id_eq (frame_id,
1946 tp->control.step_stack_frame_id);
1947
1948 /* We start replaying at the end of the branch trace. This corresponds
1949 to the current instruction. */
1950 replay = XNEW (struct btrace_insn_iterator);
1951 btrace_insn_end (replay, btinfo);
1952
1953 /* Skip gaps at the end of the trace. */
1954 while (btrace_insn_get (replay) == NULL)
1955 {
1956 unsigned int steps;
1957
1958 steps = btrace_insn_prev (replay, 1);
1959 if (steps == 0)
1960 error (_("No trace."));
1961 }
1962
1963 /* We're not replaying, yet. */
1964 gdb_assert (btinfo->replay == NULL);
1965 btinfo->replay = replay;
1966
1967 /* Make sure we're not using any stale registers. */
1968 registers_changed_ptid (tp->ptid);
1969
1970 /* The current frame with replaying - computed via btrace unwind. */
1971 frame = get_thread_current_frame (tp);
1972 frame_id = get_frame_id (frame);
1973
1974 /* Replace stepping related frames where necessary. */
1975 if (upd_step_frame_id)
1976 tp->control.step_frame_id = frame_id;
1977 if (upd_step_stack_frame_id)
1978 tp->control.step_stack_frame_id = frame_id;
1979 }
1980 CATCH (except, RETURN_MASK_ALL)
1981 {
1982 xfree (btinfo->replay);
1983 btinfo->replay = NULL;
1984
1985 registers_changed_ptid (tp->ptid);
1986
1987 throw_exception (except);
1988 }
1989 END_CATCH
1990
1991 return replay;
1992 }
1993
1994 /* Stop replaying a thread. */
1995
1996 static void
1997 record_btrace_stop_replaying (struct thread_info *tp)
1998 {
1999 struct btrace_thread_info *btinfo;
2000
2001 btinfo = &tp->btrace;
2002
2003 xfree (btinfo->replay);
2004 btinfo->replay = NULL;
2005
2006 /* Make sure we're not leaving any stale registers. */
2007 registers_changed_ptid (tp->ptid);
2008 }
2009
2010 /* Stop replaying TP if it is at the end of its execution history. */
2011
2012 static void
2013 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2014 {
2015 struct btrace_insn_iterator *replay, end;
2016 struct btrace_thread_info *btinfo;
2017
2018 btinfo = &tp->btrace;
2019 replay = btinfo->replay;
2020
2021 if (replay == NULL)
2022 return;
2023
2024 btrace_insn_end (&end, btinfo);
2025
2026 if (btrace_insn_cmp (replay, &end) == 0)
2027 record_btrace_stop_replaying (tp);
2028 }
2029
2030 /* The to_resume method of target record-btrace. */
2031
2032 static void
2033 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2034 enum gdb_signal signal)
2035 {
2036 struct thread_info *tp;
2037 enum btrace_thread_flag flag, cflag;
2038
2039 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2040 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2041 step ? "step" : "cont");
2042
2043 /* Store the execution direction of the last resume.
2044
2045 If there is more than one to_resume call, we have to rely on infrun
2046 to not change the execution direction in-between. */
2047 record_btrace_resume_exec_dir = execution_direction;
2048
2049 /* As long as we're not replaying, just forward the request.
2050
2051 For non-stop targets this means that no thread is replaying. In order to
2052 make progress, we may need to explicitly move replaying threads to the end
2053 of their execution history. */
2054 if ((execution_direction != EXEC_REVERSE)
2055 && !record_btrace_is_replaying (ops, minus_one_ptid))
2056 {
2057 ops = ops->beneath;
2058 ops->to_resume (ops, ptid, step, signal);
2059 return;
2060 }
2061
2062 /* Compute the btrace thread flag for the requested move. */
2063 if (execution_direction == EXEC_REVERSE)
2064 {
2065 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2066 cflag = BTHR_RCONT;
2067 }
2068 else
2069 {
2070 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2071 cflag = BTHR_CONT;
2072 }
2073
2074 /* We just indicate the resume intent here. The actual stepping happens in
2075 record_btrace_wait below.
2076
2077 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2078 if (!target_is_non_stop_p ())
2079 {
2080 gdb_assert (ptid_match (inferior_ptid, ptid));
2081
2082 ALL_NON_EXITED_THREADS (tp)
2083 if (ptid_match (tp->ptid, ptid))
2084 {
2085 if (ptid_match (tp->ptid, inferior_ptid))
2086 record_btrace_resume_thread (tp, flag);
2087 else
2088 record_btrace_resume_thread (tp, cflag);
2089 }
2090 }
2091 else
2092 {
2093 ALL_NON_EXITED_THREADS (tp)
2094 if (ptid_match (tp->ptid, ptid))
2095 record_btrace_resume_thread (tp, flag);
2096 }
2097
2098 /* Async support. */
2099 if (target_can_async_p ())
2100 {
2101 target_async (1);
2102 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2103 }
2104 }
2105
2106 /* The to_commit_resume method of target record-btrace. */
2107
2108 static void
2109 record_btrace_commit_resume (struct target_ops *ops)
2110 {
2111 if ((execution_direction != EXEC_REVERSE)
2112 && !record_btrace_is_replaying (ops, minus_one_ptid))
2113 ops->beneath->to_commit_resume (ops->beneath);
2114 }
2115
2116 /* Cancel resuming TP. */
2117
2118 static void
2119 record_btrace_cancel_resume (struct thread_info *tp)
2120 {
2121 enum btrace_thread_flag flags;
2122
2123 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2124 if (flags == 0)
2125 return;
2126
2127 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2128 print_thread_id (tp),
2129 target_pid_to_str (tp->ptid), flags,
2130 btrace_thread_flag_to_str (flags));
2131
2132 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2133 record_btrace_stop_replaying_at_end (tp);
2134 }
2135
2136 /* Return a target_waitstatus indicating that we ran out of history. */
2137
2138 static struct target_waitstatus
2139 btrace_step_no_history (void)
2140 {
2141 struct target_waitstatus status;
2142
2143 status.kind = TARGET_WAITKIND_NO_HISTORY;
2144
2145 return status;
2146 }
2147
2148 /* Return a target_waitstatus indicating that a step finished. */
2149
2150 static struct target_waitstatus
2151 btrace_step_stopped (void)
2152 {
2153 struct target_waitstatus status;
2154
2155 status.kind = TARGET_WAITKIND_STOPPED;
2156 status.value.sig = GDB_SIGNAL_TRAP;
2157
2158 return status;
2159 }
2160
2161 /* Return a target_waitstatus indicating that a thread was stopped as
2162 requested. */
2163
2164 static struct target_waitstatus
2165 btrace_step_stopped_on_request (void)
2166 {
2167 struct target_waitstatus status;
2168
2169 status.kind = TARGET_WAITKIND_STOPPED;
2170 status.value.sig = GDB_SIGNAL_0;
2171
2172 return status;
2173 }
2174
2175 /* Return a target_waitstatus indicating a spurious stop. */
2176
2177 static struct target_waitstatus
2178 btrace_step_spurious (void)
2179 {
2180 struct target_waitstatus status;
2181
2182 status.kind = TARGET_WAITKIND_SPURIOUS;
2183
2184 return status;
2185 }
2186
2187 /* Return a target_waitstatus indicating that the thread was not resumed. */
2188
2189 static struct target_waitstatus
2190 btrace_step_no_resumed (void)
2191 {
2192 struct target_waitstatus status;
2193
2194 status.kind = TARGET_WAITKIND_NO_RESUMED;
2195
2196 return status;
2197 }
2198
2199 /* Return a target_waitstatus indicating that we should wait again. */
2200
2201 static struct target_waitstatus
2202 btrace_step_again (void)
2203 {
2204 struct target_waitstatus status;
2205
2206 status.kind = TARGET_WAITKIND_IGNORE;
2207
2208 return status;
2209 }
2210
2211 /* Clear the record histories. */
2212
2213 static void
2214 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2215 {
2216 xfree (btinfo->insn_history);
2217 xfree (btinfo->call_history);
2218
2219 btinfo->insn_history = NULL;
2220 btinfo->call_history = NULL;
2221 }
2222
2223 /* Check whether TP's current replay position is at a breakpoint. */
2224
2225 static int
2226 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2227 {
2228 struct btrace_insn_iterator *replay;
2229 struct btrace_thread_info *btinfo;
2230 const struct btrace_insn *insn;
2231 struct inferior *inf;
2232
2233 btinfo = &tp->btrace;
2234 replay = btinfo->replay;
2235
2236 if (replay == NULL)
2237 return 0;
2238
2239 insn = btrace_insn_get (replay);
2240 if (insn == NULL)
2241 return 0;
2242
2243 inf = find_inferior_ptid (tp->ptid);
2244 if (inf == NULL)
2245 return 0;
2246
2247 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2248 &btinfo->stop_reason);
2249 }
2250
2251 /* Step one instruction in forward direction. */
2252
2253 static struct target_waitstatus
2254 record_btrace_single_step_forward (struct thread_info *tp)
2255 {
2256 struct btrace_insn_iterator *replay, end, start;
2257 struct btrace_thread_info *btinfo;
2258
2259 btinfo = &tp->btrace;
2260 replay = btinfo->replay;
2261
2262 /* We're done if we're not replaying. */
2263 if (replay == NULL)
2264 return btrace_step_no_history ();
2265
2266 /* Check if we're stepping a breakpoint. */
2267 if (record_btrace_replay_at_breakpoint (tp))
2268 return btrace_step_stopped ();
2269
2270 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2271 jump back to the instruction at which we started. */
2272 start = *replay;
2273 do
2274 {
2275 unsigned int steps;
2276
2277 /* We will bail out here if we continue stepping after reaching the end
2278 of the execution history. */
2279 steps = btrace_insn_next (replay, 1);
2280 if (steps == 0)
2281 {
2282 *replay = start;
2283 return btrace_step_no_history ();
2284 }
2285 }
2286 while (btrace_insn_get (replay) == NULL);
2287
2288 /* Determine the end of the instruction trace. */
2289 btrace_insn_end (&end, btinfo);
2290
2291 /* The execution trace contains (and ends with) the current instruction.
2292 This instruction has not been executed, yet, so the trace really ends
2293 one instruction earlier. */
2294 if (btrace_insn_cmp (replay, &end) == 0)
2295 return btrace_step_no_history ();
2296
2297 return btrace_step_spurious ();
2298 }
2299
2300 /* Step one instruction in backward direction. */
2301
2302 static struct target_waitstatus
2303 record_btrace_single_step_backward (struct thread_info *tp)
2304 {
2305 struct btrace_insn_iterator *replay, start;
2306 struct btrace_thread_info *btinfo;
2307
2308 btinfo = &tp->btrace;
2309 replay = btinfo->replay;
2310
2311 /* Start replaying if we're not already doing so. */
2312 if (replay == NULL)
2313 replay = record_btrace_start_replaying (tp);
2314
2315 /* If we can't step any further, we reached the end of the history.
2316 Skip gaps during replay. If we end up at a gap (at the beginning of
2317 the trace), jump back to the instruction at which we started. */
2318 start = *replay;
2319 do
2320 {
2321 unsigned int steps;
2322
2323 steps = btrace_insn_prev (replay, 1);
2324 if (steps == 0)
2325 {
2326 *replay = start;
2327 return btrace_step_no_history ();
2328 }
2329 }
2330 while (btrace_insn_get (replay) == NULL);
2331
2332 /* Check if we're stepping a breakpoint.
2333
2334 For reverse-stepping, this check is after the step. There is logic in
2335 infrun.c that handles reverse-stepping separately. See, for example,
2336 proceed and adjust_pc_after_break.
2337
2338 This code assumes that for reverse-stepping, PC points to the last
2339 de-executed instruction, whereas for forward-stepping PC points to the
2340 next to-be-executed instruction. */
2341 if (record_btrace_replay_at_breakpoint (tp))
2342 return btrace_step_stopped ();
2343
2344 return btrace_step_spurious ();
2345 }
2346
2347 /* Step a single thread. */
2348
2349 static struct target_waitstatus
2350 record_btrace_step_thread (struct thread_info *tp)
2351 {
2352 struct btrace_thread_info *btinfo;
2353 struct target_waitstatus status;
2354 enum btrace_thread_flag flags;
2355
2356 btinfo = &tp->btrace;
2357
2358 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2359 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2360
2361 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2362 target_pid_to_str (tp->ptid), flags,
2363 btrace_thread_flag_to_str (flags));
2364
2365 /* We can't step without an execution history. */
2366 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2367 return btrace_step_no_history ();
2368
2369 switch (flags)
2370 {
2371 default:
2372 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2373
2374 case BTHR_STOP:
2375 return btrace_step_stopped_on_request ();
2376
2377 case BTHR_STEP:
2378 status = record_btrace_single_step_forward (tp);
2379 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2380 break;
2381
2382 return btrace_step_stopped ();
2383
2384 case BTHR_RSTEP:
2385 status = record_btrace_single_step_backward (tp);
2386 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2387 break;
2388
2389 return btrace_step_stopped ();
2390
2391 case BTHR_CONT:
2392 status = record_btrace_single_step_forward (tp);
2393 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2394 break;
2395
2396 btinfo->flags |= flags;
2397 return btrace_step_again ();
2398
2399 case BTHR_RCONT:
2400 status = record_btrace_single_step_backward (tp);
2401 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2402 break;
2403
2404 btinfo->flags |= flags;
2405 return btrace_step_again ();
2406 }
2407
2408 /* We keep threads moving at the end of their execution history. The to_wait
2409 method will stop the thread for whom the event is reported. */
2410 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2411 btinfo->flags |= flags;
2412
2413 return status;
2414 }
2415
2416 /* A vector of threads. */
2417
2418 typedef struct thread_info * tp_t;
2419 DEF_VEC_P (tp_t);
2420
2421 /* Announce further events if necessary. */
2422
2423 static void
2424 record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2425 const VEC (tp_t) *no_history)
2426 {
2427 int more_moving, more_no_history;
2428
2429 more_moving = !VEC_empty (tp_t, moving);
2430 more_no_history = !VEC_empty (tp_t, no_history);
2431
2432 if (!more_moving && !more_no_history)
2433 return;
2434
2435 if (more_moving)
2436 DEBUG ("movers pending");
2437
2438 if (more_no_history)
2439 DEBUG ("no-history pending");
2440
2441 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2442 }
2443
2444 /* The to_wait method of target record-btrace. */
2445
2446 static ptid_t
2447 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2448 struct target_waitstatus *status, int options)
2449 {
2450 VEC (tp_t) *moving, *no_history;
2451 struct thread_info *tp, *eventing;
2452 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2453
2454 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2455
2456 /* As long as we're not replaying, just forward the request. */
2457 if ((execution_direction != EXEC_REVERSE)
2458 && !record_btrace_is_replaying (ops, minus_one_ptid))
2459 {
2460 ops = ops->beneath;
2461 return ops->to_wait (ops, ptid, status, options);
2462 }
2463
2464 moving = NULL;
2465 no_history = NULL;
2466
2467 make_cleanup (VEC_cleanup (tp_t), &moving);
2468 make_cleanup (VEC_cleanup (tp_t), &no_history);
2469
2470 /* Keep a work list of moving threads. */
2471 ALL_NON_EXITED_THREADS (tp)
2472 if (ptid_match (tp->ptid, ptid)
2473 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2474 VEC_safe_push (tp_t, moving, tp);
2475
2476 if (VEC_empty (tp_t, moving))
2477 {
2478 *status = btrace_step_no_resumed ();
2479
2480 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2481 target_waitstatus_to_string (status));
2482
2483 do_cleanups (cleanups);
2484 return null_ptid;
2485 }
2486
2487 /* Step moving threads one by one, one step each, until either one thread
2488 reports an event or we run out of threads to step.
2489
2490 When stepping more than one thread, chances are that some threads reach
2491 the end of their execution history earlier than others. If we reported
2492 this immediately, all-stop on top of non-stop would stop all threads and
2493 resume the same threads next time. And we would report the same thread
2494 having reached the end of its execution history again.
2495
2496 In the worst case, this would starve the other threads. But even if other
2497 threads would be allowed to make progress, this would result in far too
2498 many intermediate stops.
2499
2500 We therefore delay the reporting of "no execution history" until we have
2501 nothing else to report. By this time, all threads should have moved to
2502 either the beginning or the end of their execution history. There will
2503 be a single user-visible stop. */
2504 eventing = NULL;
2505 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2506 {
2507 unsigned int ix;
2508
2509 ix = 0;
2510 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2511 {
2512 *status = record_btrace_step_thread (tp);
2513
2514 switch (status->kind)
2515 {
2516 case TARGET_WAITKIND_IGNORE:
2517 ix++;
2518 break;
2519
2520 case TARGET_WAITKIND_NO_HISTORY:
2521 VEC_safe_push (tp_t, no_history,
2522 VEC_ordered_remove (tp_t, moving, ix));
2523 break;
2524
2525 default:
2526 eventing = VEC_unordered_remove (tp_t, moving, ix);
2527 break;
2528 }
2529 }
2530 }
2531
2532 if (eventing == NULL)
2533 {
2534 /* We started with at least one moving thread. This thread must have
2535 either stopped or reached the end of its execution history.
2536
2537 In the former case, EVENTING must not be NULL.
2538 In the latter case, NO_HISTORY must not be empty. */
2539 gdb_assert (!VEC_empty (tp_t, no_history));
2540
2541 /* We kept threads moving at the end of their execution history. Stop
2542 EVENTING now that we are going to report its stop. */
2543 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2544 eventing->btrace.flags &= ~BTHR_MOVE;
2545
2546 *status = btrace_step_no_history ();
2547 }
2548
2549 gdb_assert (eventing != NULL);
2550
2551 /* We kept threads replaying at the end of their execution history. Stop
2552 replaying EVENTING now that we are going to report its stop. */
2553 record_btrace_stop_replaying_at_end (eventing);
2554
2555 /* Stop all other threads. */
2556 if (!target_is_non_stop_p ())
2557 ALL_NON_EXITED_THREADS (tp)
2558 record_btrace_cancel_resume (tp);
2559
2560 /* In async mode, we need to announce further events. */
2561 if (target_is_async_p ())
2562 record_btrace_maybe_mark_async_event (moving, no_history);
2563
2564 /* Start record histories anew from the current position. */
2565 record_btrace_clear_histories (&eventing->btrace);
2566
2567 /* We moved the replay position but did not update registers. */
2568 registers_changed_ptid (eventing->ptid);
2569
2570 DEBUG ("wait ended by thread %s (%s): %s",
2571 print_thread_id (eventing),
2572 target_pid_to_str (eventing->ptid),
2573 target_waitstatus_to_string (status));
2574
2575 do_cleanups (cleanups);
2576 return eventing->ptid;
2577 }
2578
2579 /* The to_stop method of target record-btrace. */
2580
2581 static void
2582 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2583 {
2584 DEBUG ("stop %s", target_pid_to_str (ptid));
2585
2586 /* As long as we're not replaying, just forward the request. */
2587 if ((execution_direction != EXEC_REVERSE)
2588 && !record_btrace_is_replaying (ops, minus_one_ptid))
2589 {
2590 ops = ops->beneath;
2591 ops->to_stop (ops, ptid);
2592 }
2593 else
2594 {
2595 struct thread_info *tp;
2596
2597 ALL_NON_EXITED_THREADS (tp)
2598 if (ptid_match (tp->ptid, ptid))
2599 {
2600 tp->btrace.flags &= ~BTHR_MOVE;
2601 tp->btrace.flags |= BTHR_STOP;
2602 }
2603 }
2604 }
2605
2606 /* The to_can_execute_reverse method of target record-btrace. */
2607
2608 static int
2609 record_btrace_can_execute_reverse (struct target_ops *self)
2610 {
2611 return 1;
2612 }
2613
2614 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2615
2616 static int
2617 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2618 {
2619 if (record_btrace_is_replaying (ops, minus_one_ptid))
2620 {
2621 struct thread_info *tp = inferior_thread ();
2622
2623 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2624 }
2625
2626 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2627 }
2628
2629 /* The to_supports_stopped_by_sw_breakpoint method of target
2630 record-btrace. */
2631
2632 static int
2633 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2634 {
2635 if (record_btrace_is_replaying (ops, minus_one_ptid))
2636 return 1;
2637
2638 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2639 }
2640
2641 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2642
2643 static int
2644 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2645 {
2646 if (record_btrace_is_replaying (ops, minus_one_ptid))
2647 {
2648 struct thread_info *tp = inferior_thread ();
2649
2650 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2651 }
2652
2653 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2654 }
2655
2656 /* The to_supports_stopped_by_hw_breakpoint method of target
2657 record-btrace. */
2658
2659 static int
2660 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2661 {
2662 if (record_btrace_is_replaying (ops, minus_one_ptid))
2663 return 1;
2664
2665 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2666 }
2667
2668 /* The to_update_thread_list method of target record-btrace. */
2669
2670 static void
2671 record_btrace_update_thread_list (struct target_ops *ops)
2672 {
2673 /* We don't add or remove threads during replay. */
2674 if (record_btrace_is_replaying (ops, minus_one_ptid))
2675 return;
2676
2677 /* Forward the request. */
2678 ops = ops->beneath;
2679 ops->to_update_thread_list (ops);
2680 }
2681
2682 /* The to_thread_alive method of target record-btrace. */
2683
2684 static int
2685 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2686 {
2687 /* We don't add or remove threads during replay. */
2688 if (record_btrace_is_replaying (ops, minus_one_ptid))
2689 return find_thread_ptid (ptid) != NULL;
2690
2691 /* Forward the request. */
2692 ops = ops->beneath;
2693 return ops->to_thread_alive (ops, ptid);
2694 }
2695
2696 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2697 is stopped. */
2698
2699 static void
2700 record_btrace_set_replay (struct thread_info *tp,
2701 const struct btrace_insn_iterator *it)
2702 {
2703 struct btrace_thread_info *btinfo;
2704
2705 btinfo = &tp->btrace;
2706
2707 if (it == NULL)
2708 record_btrace_stop_replaying (tp);
2709 else
2710 {
2711 if (btinfo->replay == NULL)
2712 record_btrace_start_replaying (tp);
2713 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2714 return;
2715
2716 *btinfo->replay = *it;
2717 registers_changed_ptid (tp->ptid);
2718 }
2719
2720 /* Start anew from the new replay position. */
2721 record_btrace_clear_histories (btinfo);
2722
2723 stop_pc = regcache_read_pc (get_current_regcache ());
2724 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2725 }
2726
2727 /* The to_goto_record_begin method of target record-btrace. */
2728
2729 static void
2730 record_btrace_goto_begin (struct target_ops *self)
2731 {
2732 struct thread_info *tp;
2733 struct btrace_insn_iterator begin;
2734
2735 tp = require_btrace_thread ();
2736
2737 btrace_insn_begin (&begin, &tp->btrace);
2738
2739 /* Skip gaps at the beginning of the trace. */
2740 while (btrace_insn_get (&begin) == NULL)
2741 {
2742 unsigned int steps;
2743
2744 steps = btrace_insn_next (&begin, 1);
2745 if (steps == 0)
2746 error (_("No trace."));
2747 }
2748
2749 record_btrace_set_replay (tp, &begin);
2750 }
2751
2752 /* The to_goto_record_end method of target record-btrace. */
2753
2754 static void
2755 record_btrace_goto_end (struct target_ops *ops)
2756 {
2757 struct thread_info *tp;
2758
2759 tp = require_btrace_thread ();
2760
2761 record_btrace_set_replay (tp, NULL);
2762 }
2763
2764 /* The to_goto_record method of target record-btrace. */
2765
2766 static void
2767 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2768 {
2769 struct thread_info *tp;
2770 struct btrace_insn_iterator it;
2771 unsigned int number;
2772 int found;
2773
2774 number = insn;
2775
2776 /* Check for wrap-arounds. */
2777 if (number != insn)
2778 error (_("Instruction number out of range."));
2779
2780 tp = require_btrace_thread ();
2781
2782 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2783
2784 /* Check if the instruction could not be found or is a gap. */
2785 if (found == 0 || btrace_insn_get (&it) == NULL)
2786 error (_("No such instruction."));
2787
2788 record_btrace_set_replay (tp, &it);
2789 }
2790
2791 /* The to_record_stop_replaying method of target record-btrace. */
2792
2793 static void
2794 record_btrace_stop_replaying_all (struct target_ops *self)
2795 {
2796 struct thread_info *tp;
2797
2798 ALL_NON_EXITED_THREADS (tp)
2799 record_btrace_stop_replaying (tp);
2800 }
2801
2802 /* The to_execution_direction target method. */
2803
2804 static enum exec_direction_kind
2805 record_btrace_execution_direction (struct target_ops *self)
2806 {
2807 return record_btrace_resume_exec_dir;
2808 }
2809
2810 /* The to_prepare_to_generate_core target method. */
2811
2812 static void
2813 record_btrace_prepare_to_generate_core (struct target_ops *self)
2814 {
2815 record_btrace_generating_corefile = 1;
2816 }
2817
2818 /* The to_done_generating_core target method. */
2819
2820 static void
2821 record_btrace_done_generating_core (struct target_ops *self)
2822 {
2823 record_btrace_generating_corefile = 0;
2824 }
2825
2826 /* Initialize the record-btrace target ops. */
2827
2828 static void
2829 init_record_btrace_ops (void)
2830 {
2831 struct target_ops *ops;
2832
2833 ops = &record_btrace_ops;
2834 ops->to_shortname = "record-btrace";
2835 ops->to_longname = "Branch tracing target";
2836 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2837 ops->to_open = record_btrace_open;
2838 ops->to_close = record_btrace_close;
2839 ops->to_async = record_btrace_async;
2840 ops->to_detach = record_detach;
2841 ops->to_disconnect = record_btrace_disconnect;
2842 ops->to_mourn_inferior = record_mourn_inferior;
2843 ops->to_kill = record_kill;
2844 ops->to_stop_recording = record_btrace_stop_recording;
2845 ops->to_info_record = record_btrace_info;
2846 ops->to_insn_history = record_btrace_insn_history;
2847 ops->to_insn_history_from = record_btrace_insn_history_from;
2848 ops->to_insn_history_range = record_btrace_insn_history_range;
2849 ops->to_call_history = record_btrace_call_history;
2850 ops->to_call_history_from = record_btrace_call_history_from;
2851 ops->to_call_history_range = record_btrace_call_history_range;
2852 ops->to_record_method = record_btrace_record_method;
2853 ops->to_record_is_replaying = record_btrace_is_replaying;
2854 ops->to_record_will_replay = record_btrace_will_replay;
2855 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2856 ops->to_xfer_partial = record_btrace_xfer_partial;
2857 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2858 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2859 ops->to_fetch_registers = record_btrace_fetch_registers;
2860 ops->to_store_registers = record_btrace_store_registers;
2861 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2862 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2863 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2864 ops->to_resume = record_btrace_resume;
2865 ops->to_commit_resume = record_btrace_commit_resume;
2866 ops->to_wait = record_btrace_wait;
2867 ops->to_stop = record_btrace_stop;
2868 ops->to_update_thread_list = record_btrace_update_thread_list;
2869 ops->to_thread_alive = record_btrace_thread_alive;
2870 ops->to_goto_record_begin = record_btrace_goto_begin;
2871 ops->to_goto_record_end = record_btrace_goto_end;
2872 ops->to_goto_record = record_btrace_goto;
2873 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2874 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2875 ops->to_supports_stopped_by_sw_breakpoint
2876 = record_btrace_supports_stopped_by_sw_breakpoint;
2877 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2878 ops->to_supports_stopped_by_hw_breakpoint
2879 = record_btrace_supports_stopped_by_hw_breakpoint;
2880 ops->to_execution_direction = record_btrace_execution_direction;
2881 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2882 ops->to_done_generating_core = record_btrace_done_generating_core;
2883 ops->to_stratum = record_stratum;
2884 ops->to_magic = OPS_MAGIC;
2885 }
2886
2887 /* Start recording in BTS format. */
2888
2889 static void
2890 cmd_record_btrace_bts_start (char *args, int from_tty)
2891 {
2892 if (args != NULL && *args != 0)
2893 error (_("Invalid argument."));
2894
2895 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2896
2897 TRY
2898 {
2899 execute_command ((char *) "target record-btrace", from_tty);
2900 }
2901 CATCH (exception, RETURN_MASK_ALL)
2902 {
2903 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2904 throw_exception (exception);
2905 }
2906 END_CATCH
2907 }
2908
2909 /* Start recording in Intel Processor Trace format. */
2910
2911 static void
2912 cmd_record_btrace_pt_start (char *args, int from_tty)
2913 {
2914 if (args != NULL && *args != 0)
2915 error (_("Invalid argument."));
2916
2917 record_btrace_conf.format = BTRACE_FORMAT_PT;
2918
2919 TRY
2920 {
2921 execute_command ((char *) "target record-btrace", from_tty);
2922 }
2923 CATCH (exception, RETURN_MASK_ALL)
2924 {
2925 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2926 throw_exception (exception);
2927 }
2928 END_CATCH
2929 }
2930
2931 /* Alias for "target record". */
2932
2933 static void
2934 cmd_record_btrace_start (char *args, int from_tty)
2935 {
2936 if (args != NULL && *args != 0)
2937 error (_("Invalid argument."));
2938
2939 record_btrace_conf.format = BTRACE_FORMAT_PT;
2940
2941 TRY
2942 {
2943 execute_command ((char *) "target record-btrace", from_tty);
2944 }
2945 CATCH (exception, RETURN_MASK_ALL)
2946 {
2947 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2948
2949 TRY
2950 {
2951 execute_command ((char *) "target record-btrace", from_tty);
2952 }
2953 CATCH (exception, RETURN_MASK_ALL)
2954 {
2955 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2956 throw_exception (exception);
2957 }
2958 END_CATCH
2959 }
2960 END_CATCH
2961 }
2962
2963 /* The "set record btrace" command. */
2964
2965 static void
2966 cmd_set_record_btrace (char *args, int from_tty)
2967 {
2968 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2969 }
2970
2971 /* The "show record btrace" command. */
2972
2973 static void
2974 cmd_show_record_btrace (char *args, int from_tty)
2975 {
2976 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2977 }
2978
2979 /* The "show record btrace replay-memory-access" command. */
2980
2981 static void
2982 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2983 struct cmd_list_element *c, const char *value)
2984 {
2985 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2986 replay_memory_access);
2987 }
2988
2989 /* The "set record btrace bts" command. */
2990
2991 static void
2992 cmd_set_record_btrace_bts (char *args, int from_tty)
2993 {
2994 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2995 "by an appropriate subcommand.\n"));
2996 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2997 all_commands, gdb_stdout);
2998 }
2999
3000 /* The "show record btrace bts" command. */
3001
3002 static void
3003 cmd_show_record_btrace_bts (char *args, int from_tty)
3004 {
3005 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3006 }
3007
3008 /* The "set record btrace pt" command. */
3009
3010 static void
3011 cmd_set_record_btrace_pt (char *args, int from_tty)
3012 {
3013 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3014 "by an appropriate subcommand.\n"));
3015 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3016 all_commands, gdb_stdout);
3017 }
3018
3019 /* The "show record btrace pt" command. */
3020
3021 static void
3022 cmd_show_record_btrace_pt (char *args, int from_tty)
3023 {
3024 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3025 }
3026
3027 /* The "record bts buffer-size" show value function. */
3028
3029 static void
3030 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3031 struct cmd_list_element *c,
3032 const char *value)
3033 {
3034 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3035 value);
3036 }
3037
3038 /* The "record pt buffer-size" show value function. */
3039
3040 static void
3041 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3042 struct cmd_list_element *c,
3043 const char *value)
3044 {
3045 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3046 value);
3047 }
3048
3049 void _initialize_record_btrace (void);
3050
3051 /* Initialize btrace commands. */
3052
3053 void
3054 _initialize_record_btrace (void)
3055 {
3056 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3057 _("Start branch trace recording."), &record_btrace_cmdlist,
3058 "record btrace ", 0, &record_cmdlist);
3059 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3060
3061 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3062 _("\
3063 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3064 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3065 This format may not be available on all processors."),
3066 &record_btrace_cmdlist);
3067 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3068
3069 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3070 _("\
3071 Start branch trace recording in Intel Processor Trace format.\n\n\
3072 This format may not be available on all processors."),
3073 &record_btrace_cmdlist);
3074 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3075
3076 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3077 _("Set record options"), &set_record_btrace_cmdlist,
3078 "set record btrace ", 0, &set_record_cmdlist);
3079
3080 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3081 _("Show record options"), &show_record_btrace_cmdlist,
3082 "show record btrace ", 0, &show_record_cmdlist);
3083
3084 add_setshow_enum_cmd ("replay-memory-access", no_class,
3085 replay_memory_access_types, &replay_memory_access, _("\
3086 Set what memory accesses are allowed during replay."), _("\
3087 Show what memory accesses are allowed during replay."),
3088 _("Default is READ-ONLY.\n\n\
3089 The btrace record target does not trace data.\n\
3090 The memory therefore corresponds to the live target and not \
3091 to the current replay position.\n\n\
3092 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3093 When READ-WRITE, allow accesses to read-only and read-write memory during \
3094 replay."),
3095 NULL, cmd_show_replay_memory_access,
3096 &set_record_btrace_cmdlist,
3097 &show_record_btrace_cmdlist);
3098
3099 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3100 _("Set record btrace bts options"),
3101 &set_record_btrace_bts_cmdlist,
3102 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3103
3104 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3105 _("Show record btrace bts options"),
3106 &show_record_btrace_bts_cmdlist,
3107 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3108
3109 add_setshow_uinteger_cmd ("buffer-size", no_class,
3110 &record_btrace_conf.bts.size,
3111 _("Set the record/replay bts buffer size."),
3112 _("Show the record/replay bts buffer size."), _("\
3113 When starting recording request a trace buffer of this size. \
3114 The actual buffer size may differ from the requested size. \
3115 Use \"info record\" to see the actual buffer size.\n\n\
3116 Bigger buffers allow longer recording but also take more time to process \
3117 the recorded execution trace.\n\n\
3118 The trace buffer size may not be changed while recording."), NULL,
3119 show_record_bts_buffer_size_value,
3120 &set_record_btrace_bts_cmdlist,
3121 &show_record_btrace_bts_cmdlist);
3122
3123 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3124 _("Set record btrace pt options"),
3125 &set_record_btrace_pt_cmdlist,
3126 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3127
3128 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3129 _("Show record btrace pt options"),
3130 &show_record_btrace_pt_cmdlist,
3131 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3132
3133 add_setshow_uinteger_cmd ("buffer-size", no_class,
3134 &record_btrace_conf.pt.size,
3135 _("Set the record/replay pt buffer size."),
3136 _("Show the record/replay pt buffer size."), _("\
3137 Bigger buffers allow longer recording but also take more time to process \
3138 the recorded execution.\n\
3139 The actual buffer size may differ from the requested size. Use \"info record\" \
3140 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3141 &set_record_btrace_pt_cmdlist,
3142 &show_record_btrace_pt_cmdlist);
3143
3144 init_record_btrace_ops ();
3145 add_target (&record_btrace_ops);
3146
3147 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3148 xcalloc, xfree);
3149
3150 record_btrace_conf.bts.size = 64 * 1024;
3151 record_btrace_conf.pt.size = 16 * 1024;
3152 }
This page took 0.100223 seconds and 5 git commands to generate.