Update copyright year range in all GDB files
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observer.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "event-loop.h"
40 #include "inf-loop.h"
41 #include "vec.h"
42 #include <algorithm>
43
44 /* The target_ops of record-btrace. */
45 static struct target_ops record_btrace_ops;
46
47 /* A new thread observer enabling branch tracing for the new thread. */
48 static struct observer *record_btrace_thread_observer;
49
50 /* Memory access types used in set/show record btrace replay-memory-access. */
51 static const char replay_memory_access_read_only[] = "read-only";
52 static const char replay_memory_access_read_write[] = "read-write";
53 static const char *const replay_memory_access_types[] =
54 {
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
57 NULL
58 };
59
60 /* The currently allowed replay memory access type. */
61 static const char *replay_memory_access = replay_memory_access_read_only;
62
63 /* Command lists for "set/show record btrace". */
64 static struct cmd_list_element *set_record_btrace_cmdlist;
65 static struct cmd_list_element *show_record_btrace_cmdlist;
66
67 /* The execution direction of the last resume we got. See record-full.c. */
68 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
69
70 /* The async event handler for reverse/replay execution. */
71 static struct async_event_handler *record_btrace_async_inferior_event_handler;
72
73 /* A flag indicating that we are currently generating a core file. */
74 static int record_btrace_generating_corefile;
75
76 /* The current branch trace configuration. */
77 static struct btrace_config record_btrace_conf;
78
79 /* Command list for "record btrace". */
80 static struct cmd_list_element *record_btrace_cmdlist;
81
82 /* Command lists for "set/show record btrace bts". */
83 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
85
86 /* Command lists for "set/show record btrace pt". */
87 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
89
90 /* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
92
93 #define DEBUG(msg, args...) \
94 do \
95 { \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
99 } \
100 while (0)
101
102
103 /* Update the branch trace for the current thread and return a pointer to its
104 thread_info.
105
106 Throws an error if there is no thread or no trace. This function never
107 returns NULL. */
108
109 static struct thread_info *
110 require_btrace_thread (void)
111 {
112 struct thread_info *tp;
113
114 DEBUG ("require");
115
116 tp = find_thread_ptid (inferior_ptid);
117 if (tp == NULL)
118 error (_("No thread."));
119
120 validate_registers_access ();
121
122 btrace_fetch (tp);
123
124 if (btrace_is_empty (tp))
125 error (_("No trace."));
126
127 return tp;
128 }
129
130 /* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
132
133 Throws an error if there is no thread or no trace. This function never
134 returns NULL. */
135
136 static struct btrace_thread_info *
137 require_btrace (void)
138 {
139 struct thread_info *tp;
140
141 tp = require_btrace_thread ();
142
143 return &tp->btrace;
144 }
145
146 /* Enable branch tracing for one thread. Warn on errors. */
147
148 static void
149 record_btrace_enable_warn (struct thread_info *tp)
150 {
151 TRY
152 {
153 btrace_enable (tp, &record_btrace_conf);
154 }
155 CATCH (error, RETURN_MASK_ERROR)
156 {
157 warning ("%s", error.message);
158 }
159 END_CATCH
160 }
161
162 /* Callback function to disable branch tracing for one thread. */
163
164 static void
165 record_btrace_disable_callback (void *arg)
166 {
167 struct thread_info *tp = (struct thread_info *) arg;
168
169 btrace_disable (tp);
170 }
171
172 /* Enable automatic tracing of new threads. */
173
174 static void
175 record_btrace_auto_enable (void)
176 {
177 DEBUG ("attach thread observer");
178
179 record_btrace_thread_observer
180 = observer_attach_new_thread (record_btrace_enable_warn);
181 }
182
183 /* Disable automatic tracing of new threads. */
184
185 static void
186 record_btrace_auto_disable (void)
187 {
188 /* The observer may have been detached, already. */
189 if (record_btrace_thread_observer == NULL)
190 return;
191
192 DEBUG ("detach thread observer");
193
194 observer_detach_new_thread (record_btrace_thread_observer);
195 record_btrace_thread_observer = NULL;
196 }
197
198 /* The record-btrace async event handler function. */
199
200 static void
201 record_btrace_handle_async_inferior_event (gdb_client_data data)
202 {
203 inferior_event_handler (INF_REG_EVENT, NULL);
204 }
205
206 /* See record-btrace.h. */
207
208 void
209 record_btrace_push_target (void)
210 {
211 const char *format;
212
213 record_btrace_auto_enable ();
214
215 push_target (&record_btrace_ops);
216
217 record_btrace_async_inferior_event_handler
218 = create_async_event_handler (record_btrace_handle_async_inferior_event,
219 NULL);
220 record_btrace_generating_corefile = 0;
221
222 format = btrace_format_short_string (record_btrace_conf.format);
223 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
224 }
225
226 /* The to_open method of target record-btrace. */
227
228 static void
229 record_btrace_open (const char *args, int from_tty)
230 {
231 struct cleanup *disable_chain;
232 struct thread_info *tp;
233
234 DEBUG ("open");
235
236 record_preopen ();
237
238 if (!target_has_execution)
239 error (_("The program is not being run."));
240
241 gdb_assert (record_btrace_thread_observer == NULL);
242
243 disable_chain = make_cleanup (null_cleanup, NULL);
244 ALL_NON_EXITED_THREADS (tp)
245 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
246 {
247 btrace_enable (tp, &record_btrace_conf);
248
249 make_cleanup (record_btrace_disable_callback, tp);
250 }
251
252 record_btrace_push_target ();
253
254 discard_cleanups (disable_chain);
255 }
256
257 /* The to_stop_recording method of target record-btrace. */
258
259 static void
260 record_btrace_stop_recording (struct target_ops *self)
261 {
262 struct thread_info *tp;
263
264 DEBUG ("stop recording");
265
266 record_btrace_auto_disable ();
267
268 ALL_NON_EXITED_THREADS (tp)
269 if (tp->btrace.target != NULL)
270 btrace_disable (tp);
271 }
272
273 /* The to_disconnect method of target record-btrace. */
274
275 static void
276 record_btrace_disconnect (struct target_ops *self, const char *args,
277 int from_tty)
278 {
279 struct target_ops *beneath = self->beneath;
280
281 /* Do not stop recording, just clean up GDB side. */
282 unpush_target (self);
283
284 /* Forward disconnect. */
285 beneath->to_disconnect (beneath, args, from_tty);
286 }
287
288 /* The to_close method of target record-btrace. */
289
290 static void
291 record_btrace_close (struct target_ops *self)
292 {
293 struct thread_info *tp;
294
295 if (record_btrace_async_inferior_event_handler != NULL)
296 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
297
298 /* Make sure automatic recording gets disabled even if we did not stop
299 recording before closing the record-btrace target. */
300 record_btrace_auto_disable ();
301
302 /* We should have already stopped recording.
303 Tear down btrace in case we have not. */
304 ALL_NON_EXITED_THREADS (tp)
305 btrace_teardown (tp);
306 }
307
308 /* The to_async method of target record-btrace. */
309
310 static void
311 record_btrace_async (struct target_ops *ops, int enable)
312 {
313 if (enable)
314 mark_async_event_handler (record_btrace_async_inferior_event_handler);
315 else
316 clear_async_event_handler (record_btrace_async_inferior_event_handler);
317
318 ops->beneath->to_async (ops->beneath, enable);
319 }
320
321 /* Adjusts the size and returns a human readable size suffix. */
322
323 static const char *
324 record_btrace_adjust_size (unsigned int *size)
325 {
326 unsigned int sz;
327
328 sz = *size;
329
330 if ((sz & ((1u << 30) - 1)) == 0)
331 {
332 *size = sz >> 30;
333 return "GB";
334 }
335 else if ((sz & ((1u << 20) - 1)) == 0)
336 {
337 *size = sz >> 20;
338 return "MB";
339 }
340 else if ((sz & ((1u << 10) - 1)) == 0)
341 {
342 *size = sz >> 10;
343 return "kB";
344 }
345 else
346 return "";
347 }
348
349 /* Print a BTS configuration. */
350
351 static void
352 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
353 {
354 const char *suffix;
355 unsigned int size;
356
357 size = conf->size;
358 if (size > 0)
359 {
360 suffix = record_btrace_adjust_size (&size);
361 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
362 }
363 }
364
365 /* Print an Intel Processor Trace configuration. */
366
367 static void
368 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
369 {
370 const char *suffix;
371 unsigned int size;
372
373 size = conf->size;
374 if (size > 0)
375 {
376 suffix = record_btrace_adjust_size (&size);
377 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
378 }
379 }
380
381 /* Print a branch tracing configuration. */
382
383 static void
384 record_btrace_print_conf (const struct btrace_config *conf)
385 {
386 printf_unfiltered (_("Recording format: %s.\n"),
387 btrace_format_string (conf->format));
388
389 switch (conf->format)
390 {
391 case BTRACE_FORMAT_NONE:
392 return;
393
394 case BTRACE_FORMAT_BTS:
395 record_btrace_print_bts_conf (&conf->bts);
396 return;
397
398 case BTRACE_FORMAT_PT:
399 record_btrace_print_pt_conf (&conf->pt);
400 return;
401 }
402
403 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
404 }
405
406 /* The to_info_record method of target record-btrace. */
407
408 static void
409 record_btrace_info (struct target_ops *self)
410 {
411 struct btrace_thread_info *btinfo;
412 const struct btrace_config *conf;
413 struct thread_info *tp;
414 unsigned int insns, calls, gaps;
415
416 DEBUG ("info");
417
418 tp = find_thread_ptid (inferior_ptid);
419 if (tp == NULL)
420 error (_("No thread."));
421
422 validate_registers_access ();
423
424 btinfo = &tp->btrace;
425
426 conf = btrace_conf (btinfo);
427 if (conf != NULL)
428 record_btrace_print_conf (conf);
429
430 btrace_fetch (tp);
431
432 insns = 0;
433 calls = 0;
434 gaps = 0;
435
436 if (!btrace_is_empty (tp))
437 {
438 struct btrace_call_iterator call;
439 struct btrace_insn_iterator insn;
440
441 btrace_call_end (&call, btinfo);
442 btrace_call_prev (&call, 1);
443 calls = btrace_call_number (&call);
444
445 btrace_insn_end (&insn, btinfo);
446 insns = btrace_insn_number (&insn);
447
448 /* If the last instruction is not a gap, it is the current instruction
449 that is not actually part of the record. */
450 if (btrace_insn_get (&insn) != NULL)
451 insns -= 1;
452
453 gaps = btinfo->ngaps;
454 }
455
456 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
457 "for thread %s (%s).\n"), insns, calls, gaps,
458 print_thread_id (tp), target_pid_to_str (tp->ptid));
459
460 if (btrace_is_replaying (tp))
461 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
462 btrace_insn_number (btinfo->replay));
463 }
464
465 /* Print a decode error. */
466
467 static void
468 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
469 enum btrace_format format)
470 {
471 const char *errstr = btrace_decode_error (format, errcode);
472
473 uiout->text (_("["));
474 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
475 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
476 {
477 uiout->text (_("decode error ("));
478 uiout->field_int ("errcode", errcode);
479 uiout->text (_("): "));
480 }
481 uiout->text (errstr);
482 uiout->text (_("]\n"));
483 }
484
485 /* Print an unsigned int. */
486
487 static void
488 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
489 {
490 uiout->field_fmt (fld, "%u", val);
491 }
492
493 /* A range of source lines. */
494
495 struct btrace_line_range
496 {
497 /* The symtab this line is from. */
498 struct symtab *symtab;
499
500 /* The first line (inclusive). */
501 int begin;
502
503 /* The last line (exclusive). */
504 int end;
505 };
506
507 /* Construct a line range. */
508
509 static struct btrace_line_range
510 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
511 {
512 struct btrace_line_range range;
513
514 range.symtab = symtab;
515 range.begin = begin;
516 range.end = end;
517
518 return range;
519 }
520
521 /* Add a line to a line range. */
522
523 static struct btrace_line_range
524 btrace_line_range_add (struct btrace_line_range range, int line)
525 {
526 if (range.end <= range.begin)
527 {
528 /* This is the first entry. */
529 range.begin = line;
530 range.end = line + 1;
531 }
532 else if (line < range.begin)
533 range.begin = line;
534 else if (range.end < line)
535 range.end = line;
536
537 return range;
538 }
539
540 /* Return non-zero if RANGE is empty, zero otherwise. */
541
542 static int
543 btrace_line_range_is_empty (struct btrace_line_range range)
544 {
545 return range.end <= range.begin;
546 }
547
548 /* Return non-zero if LHS contains RHS, zero otherwise. */
549
550 static int
551 btrace_line_range_contains_range (struct btrace_line_range lhs,
552 struct btrace_line_range rhs)
553 {
554 return ((lhs.symtab == rhs.symtab)
555 && (lhs.begin <= rhs.begin)
556 && (rhs.end <= lhs.end));
557 }
558
559 /* Find the line range associated with PC. */
560
561 static struct btrace_line_range
562 btrace_find_line_range (CORE_ADDR pc)
563 {
564 struct btrace_line_range range;
565 struct linetable_entry *lines;
566 struct linetable *ltable;
567 struct symtab *symtab;
568 int nlines, i;
569
570 symtab = find_pc_line_symtab (pc);
571 if (symtab == NULL)
572 return btrace_mk_line_range (NULL, 0, 0);
573
574 ltable = SYMTAB_LINETABLE (symtab);
575 if (ltable == NULL)
576 return btrace_mk_line_range (symtab, 0, 0);
577
578 nlines = ltable->nitems;
579 lines = ltable->item;
580 if (nlines <= 0)
581 return btrace_mk_line_range (symtab, 0, 0);
582
583 range = btrace_mk_line_range (symtab, 0, 0);
584 for (i = 0; i < nlines - 1; i++)
585 {
586 if ((lines[i].pc == pc) && (lines[i].line != 0))
587 range = btrace_line_range_add (range, lines[i].line);
588 }
589
590 return range;
591 }
592
593 /* Print source lines in LINES to UIOUT.
594
595 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
596 instructions corresponding to that source line. When printing a new source
597 line, we do the cleanups for the open chain and open a new cleanup chain for
598 the new source line. If the source line range in LINES is not empty, this
599 function will leave the cleanup chain for the last printed source line open
600 so instructions can be added to it. */
601
602 static void
603 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
604 struct cleanup **ui_item_chain, int flags)
605 {
606 print_source_lines_flags psl_flags;
607 int line;
608
609 psl_flags = 0;
610 if (flags & DISASSEMBLY_FILENAME)
611 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
612
613 for (line = lines.begin; line < lines.end; ++line)
614 {
615 if (*ui_item_chain != NULL)
616 do_cleanups (*ui_item_chain);
617
618 *ui_item_chain
619 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
620
621 print_source_lines (lines.symtab, line, line + 1, psl_flags);
622
623 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
624 }
625 }
626
627 /* Disassemble a section of the recorded instruction trace. */
628
629 static void
630 btrace_insn_history (struct ui_out *uiout,
631 const struct btrace_thread_info *btinfo,
632 const struct btrace_insn_iterator *begin,
633 const struct btrace_insn_iterator *end,
634 gdb_disassembly_flags flags)
635 {
636 struct cleanup *cleanups, *ui_item_chain;
637 struct gdbarch *gdbarch;
638 struct btrace_insn_iterator it;
639 struct btrace_line_range last_lines;
640
641 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
642 btrace_insn_number (begin), btrace_insn_number (end));
643
644 flags |= DISASSEMBLY_SPECULATIVE;
645
646 gdbarch = target_gdbarch ();
647 last_lines = btrace_mk_line_range (NULL, 0, 0);
648
649 cleanups = make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
650
651 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
652 instructions corresponding to that line. */
653 ui_item_chain = NULL;
654
655 gdb_pretty_print_disassembler disasm (gdbarch);
656
657 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
658 {
659 const struct btrace_insn *insn;
660
661 insn = btrace_insn_get (&it);
662
663 /* A NULL instruction indicates a gap in the trace. */
664 if (insn == NULL)
665 {
666 const struct btrace_config *conf;
667
668 conf = btrace_conf (btinfo);
669
670 /* We have trace so we must have a configuration. */
671 gdb_assert (conf != NULL);
672
673 uiout->field_fmt ("insn-number", "%u",
674 btrace_insn_number (&it));
675 uiout->text ("\t");
676
677 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
678 conf->format);
679 }
680 else
681 {
682 struct disasm_insn dinsn;
683
684 if ((flags & DISASSEMBLY_SOURCE) != 0)
685 {
686 struct btrace_line_range lines;
687
688 lines = btrace_find_line_range (insn->pc);
689 if (!btrace_line_range_is_empty (lines)
690 && !btrace_line_range_contains_range (last_lines, lines))
691 {
692 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
693 last_lines = lines;
694 }
695 else if (ui_item_chain == NULL)
696 {
697 ui_item_chain
698 = make_cleanup_ui_out_tuple_begin_end (uiout,
699 "src_and_asm_line");
700 /* No source information. */
701 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
702 }
703
704 gdb_assert (ui_item_chain != NULL);
705 }
706
707 memset (&dinsn, 0, sizeof (dinsn));
708 dinsn.number = btrace_insn_number (&it);
709 dinsn.addr = insn->pc;
710
711 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
712 dinsn.is_speculative = 1;
713
714 disasm.pretty_print_insn (uiout, &dinsn, flags);
715 }
716 }
717
718 do_cleanups (cleanups);
719 }
720
721 /* The to_insn_history method of target record-btrace. */
722
723 static void
724 record_btrace_insn_history (struct target_ops *self, int size,
725 gdb_disassembly_flags flags)
726 {
727 struct btrace_thread_info *btinfo;
728 struct btrace_insn_history *history;
729 struct btrace_insn_iterator begin, end;
730 struct ui_out *uiout;
731 unsigned int context, covered;
732
733 uiout = current_uiout;
734 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
735 context = abs (size);
736 if (context == 0)
737 error (_("Bad record instruction-history-size."));
738
739 btinfo = require_btrace ();
740 history = btinfo->insn_history;
741 if (history == NULL)
742 {
743 struct btrace_insn_iterator *replay;
744
745 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
746
747 /* If we're replaying, we start at the replay position. Otherwise, we
748 start at the tail of the trace. */
749 replay = btinfo->replay;
750 if (replay != NULL)
751 begin = *replay;
752 else
753 btrace_insn_end (&begin, btinfo);
754
755 /* We start from here and expand in the requested direction. Then we
756 expand in the other direction, as well, to fill up any remaining
757 context. */
758 end = begin;
759 if (size < 0)
760 {
761 /* We want the current position covered, as well. */
762 covered = btrace_insn_next (&end, 1);
763 covered += btrace_insn_prev (&begin, context - covered);
764 covered += btrace_insn_next (&end, context - covered);
765 }
766 else
767 {
768 covered = btrace_insn_next (&end, context);
769 covered += btrace_insn_prev (&begin, context - covered);
770 }
771 }
772 else
773 {
774 begin = history->begin;
775 end = history->end;
776
777 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
778 btrace_insn_number (&begin), btrace_insn_number (&end));
779
780 if (size < 0)
781 {
782 end = begin;
783 covered = btrace_insn_prev (&begin, context);
784 }
785 else
786 {
787 begin = end;
788 covered = btrace_insn_next (&end, context);
789 }
790 }
791
792 if (covered > 0)
793 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
794 else
795 {
796 if (size < 0)
797 printf_unfiltered (_("At the start of the branch trace record.\n"));
798 else
799 printf_unfiltered (_("At the end of the branch trace record.\n"));
800 }
801
802 btrace_set_insn_history (btinfo, &begin, &end);
803 }
804
805 /* The to_insn_history_range method of target record-btrace. */
806
807 static void
808 record_btrace_insn_history_range (struct target_ops *self,
809 ULONGEST from, ULONGEST to,
810 gdb_disassembly_flags flags)
811 {
812 struct btrace_thread_info *btinfo;
813 struct btrace_insn_iterator begin, end;
814 struct ui_out *uiout;
815 unsigned int low, high;
816 int found;
817
818 uiout = current_uiout;
819 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
820 low = from;
821 high = to;
822
823 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
824
825 /* Check for wrap-arounds. */
826 if (low != from || high != to)
827 error (_("Bad range."));
828
829 if (high < low)
830 error (_("Bad range."));
831
832 btinfo = require_btrace ();
833
834 found = btrace_find_insn_by_number (&begin, btinfo, low);
835 if (found == 0)
836 error (_("Range out of bounds."));
837
838 found = btrace_find_insn_by_number (&end, btinfo, high);
839 if (found == 0)
840 {
841 /* Silently truncate the range. */
842 btrace_insn_end (&end, btinfo);
843 }
844 else
845 {
846 /* We want both begin and end to be inclusive. */
847 btrace_insn_next (&end, 1);
848 }
849
850 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
851 btrace_set_insn_history (btinfo, &begin, &end);
852 }
853
854 /* The to_insn_history_from method of target record-btrace. */
855
856 static void
857 record_btrace_insn_history_from (struct target_ops *self,
858 ULONGEST from, int size,
859 gdb_disassembly_flags flags)
860 {
861 ULONGEST begin, end, context;
862
863 context = abs (size);
864 if (context == 0)
865 error (_("Bad record instruction-history-size."));
866
867 if (size < 0)
868 {
869 end = from;
870
871 if (from < context)
872 begin = 0;
873 else
874 begin = from - context + 1;
875 }
876 else
877 {
878 begin = from;
879 end = from + context - 1;
880
881 /* Check for wrap-around. */
882 if (end < begin)
883 end = ULONGEST_MAX;
884 }
885
886 record_btrace_insn_history_range (self, begin, end, flags);
887 }
888
889 /* Print the instruction number range for a function call history line. */
890
891 static void
892 btrace_call_history_insn_range (struct ui_out *uiout,
893 const struct btrace_function *bfun)
894 {
895 unsigned int begin, end, size;
896
897 size = bfun->insn.size ();
898 gdb_assert (size > 0);
899
900 begin = bfun->insn_offset;
901 end = begin + size - 1;
902
903 ui_out_field_uint (uiout, "insn begin", begin);
904 uiout->text (",");
905 ui_out_field_uint (uiout, "insn end", end);
906 }
907
908 /* Compute the lowest and highest source line for the instructions in BFUN
909 and return them in PBEGIN and PEND.
910 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
911 result from inlining or macro expansion. */
912
913 static void
914 btrace_compute_src_line_range (const struct btrace_function *bfun,
915 int *pbegin, int *pend)
916 {
917 struct symtab *symtab;
918 struct symbol *sym;
919 int begin, end;
920
921 begin = INT_MAX;
922 end = INT_MIN;
923
924 sym = bfun->sym;
925 if (sym == NULL)
926 goto out;
927
928 symtab = symbol_symtab (sym);
929
930 for (const btrace_insn &insn : bfun->insn)
931 {
932 struct symtab_and_line sal;
933
934 sal = find_pc_line (insn.pc, 0);
935 if (sal.symtab != symtab || sal.line == 0)
936 continue;
937
938 begin = std::min (begin, sal.line);
939 end = std::max (end, sal.line);
940 }
941
942 out:
943 *pbegin = begin;
944 *pend = end;
945 }
946
947 /* Print the source line information for a function call history line. */
948
949 static void
950 btrace_call_history_src_line (struct ui_out *uiout,
951 const struct btrace_function *bfun)
952 {
953 struct symbol *sym;
954 int begin, end;
955
956 sym = bfun->sym;
957 if (sym == NULL)
958 return;
959
960 uiout->field_string ("file",
961 symtab_to_filename_for_display (symbol_symtab (sym)));
962
963 btrace_compute_src_line_range (bfun, &begin, &end);
964 if (end < begin)
965 return;
966
967 uiout->text (":");
968 uiout->field_int ("min line", begin);
969
970 if (end == begin)
971 return;
972
973 uiout->text (",");
974 uiout->field_int ("max line", end);
975 }
976
977 /* Get the name of a branch trace function. */
978
979 static const char *
980 btrace_get_bfun_name (const struct btrace_function *bfun)
981 {
982 struct minimal_symbol *msym;
983 struct symbol *sym;
984
985 if (bfun == NULL)
986 return "??";
987
988 msym = bfun->msym;
989 sym = bfun->sym;
990
991 if (sym != NULL)
992 return SYMBOL_PRINT_NAME (sym);
993 else if (msym != NULL)
994 return MSYMBOL_PRINT_NAME (msym);
995 else
996 return "??";
997 }
998
999 /* Disassemble a section of the recorded function trace. */
1000
1001 static void
1002 btrace_call_history (struct ui_out *uiout,
1003 const struct btrace_thread_info *btinfo,
1004 const struct btrace_call_iterator *begin,
1005 const struct btrace_call_iterator *end,
1006 int int_flags)
1007 {
1008 struct btrace_call_iterator it;
1009 record_print_flags flags = (enum record_print_flag) int_flags;
1010
1011 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1012 btrace_call_number (end));
1013
1014 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1015 {
1016 const struct btrace_function *bfun;
1017 struct minimal_symbol *msym;
1018 struct symbol *sym;
1019
1020 bfun = btrace_call_get (&it);
1021 sym = bfun->sym;
1022 msym = bfun->msym;
1023
1024 /* Print the function index. */
1025 ui_out_field_uint (uiout, "index", bfun->number);
1026 uiout->text ("\t");
1027
1028 /* Indicate gaps in the trace. */
1029 if (bfun->errcode != 0)
1030 {
1031 const struct btrace_config *conf;
1032
1033 conf = btrace_conf (btinfo);
1034
1035 /* We have trace so we must have a configuration. */
1036 gdb_assert (conf != NULL);
1037
1038 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1039
1040 continue;
1041 }
1042
1043 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1044 {
1045 int level = bfun->level + btinfo->level, i;
1046
1047 for (i = 0; i < level; ++i)
1048 uiout->text (" ");
1049 }
1050
1051 if (sym != NULL)
1052 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
1053 else if (msym != NULL)
1054 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1055 else if (!uiout->is_mi_like_p ())
1056 uiout->field_string ("function", "??");
1057
1058 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1059 {
1060 uiout->text (_("\tinst "));
1061 btrace_call_history_insn_range (uiout, bfun);
1062 }
1063
1064 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1065 {
1066 uiout->text (_("\tat "));
1067 btrace_call_history_src_line (uiout, bfun);
1068 }
1069
1070 uiout->text ("\n");
1071 }
1072 }
1073
1074 /* The to_call_history method of target record-btrace. */
1075
1076 static void
1077 record_btrace_call_history (struct target_ops *self, int size, int int_flags)
1078 {
1079 struct btrace_thread_info *btinfo;
1080 struct btrace_call_history *history;
1081 struct btrace_call_iterator begin, end;
1082 struct ui_out *uiout;
1083 unsigned int context, covered;
1084 record_print_flags flags = (enum record_print_flag) int_flags;
1085
1086 uiout = current_uiout;
1087 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1088 context = abs (size);
1089 if (context == 0)
1090 error (_("Bad record function-call-history-size."));
1091
1092 btinfo = require_btrace ();
1093 history = btinfo->call_history;
1094 if (history == NULL)
1095 {
1096 struct btrace_insn_iterator *replay;
1097
1098 DEBUG ("call-history (0x%x): %d", int_flags, size);
1099
1100 /* If we're replaying, we start at the replay position. Otherwise, we
1101 start at the tail of the trace. */
1102 replay = btinfo->replay;
1103 if (replay != NULL)
1104 {
1105 begin.btinfo = btinfo;
1106 begin.index = replay->call_index;
1107 }
1108 else
1109 btrace_call_end (&begin, btinfo);
1110
1111 /* We start from here and expand in the requested direction. Then we
1112 expand in the other direction, as well, to fill up any remaining
1113 context. */
1114 end = begin;
1115 if (size < 0)
1116 {
1117 /* We want the current position covered, as well. */
1118 covered = btrace_call_next (&end, 1);
1119 covered += btrace_call_prev (&begin, context - covered);
1120 covered += btrace_call_next (&end, context - covered);
1121 }
1122 else
1123 {
1124 covered = btrace_call_next (&end, context);
1125 covered += btrace_call_prev (&begin, context- covered);
1126 }
1127 }
1128 else
1129 {
1130 begin = history->begin;
1131 end = history->end;
1132
1133 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
1134 btrace_call_number (&begin), btrace_call_number (&end));
1135
1136 if (size < 0)
1137 {
1138 end = begin;
1139 covered = btrace_call_prev (&begin, context);
1140 }
1141 else
1142 {
1143 begin = end;
1144 covered = btrace_call_next (&end, context);
1145 }
1146 }
1147
1148 if (covered > 0)
1149 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1150 else
1151 {
1152 if (size < 0)
1153 printf_unfiltered (_("At the start of the branch trace record.\n"));
1154 else
1155 printf_unfiltered (_("At the end of the branch trace record.\n"));
1156 }
1157
1158 btrace_set_call_history (btinfo, &begin, &end);
1159 }
1160
1161 /* The to_call_history_range method of target record-btrace. */
1162
1163 static void
1164 record_btrace_call_history_range (struct target_ops *self,
1165 ULONGEST from, ULONGEST to,
1166 int int_flags)
1167 {
1168 struct btrace_thread_info *btinfo;
1169 struct btrace_call_iterator begin, end;
1170 struct ui_out *uiout;
1171 unsigned int low, high;
1172 int found;
1173 record_print_flags flags = (enum record_print_flag) int_flags;
1174
1175 uiout = current_uiout;
1176 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1177 low = from;
1178 high = to;
1179
1180 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
1181
1182 /* Check for wrap-arounds. */
1183 if (low != from || high != to)
1184 error (_("Bad range."));
1185
1186 if (high < low)
1187 error (_("Bad range."));
1188
1189 btinfo = require_btrace ();
1190
1191 found = btrace_find_call_by_number (&begin, btinfo, low);
1192 if (found == 0)
1193 error (_("Range out of bounds."));
1194
1195 found = btrace_find_call_by_number (&end, btinfo, high);
1196 if (found == 0)
1197 {
1198 /* Silently truncate the range. */
1199 btrace_call_end (&end, btinfo);
1200 }
1201 else
1202 {
1203 /* We want both begin and end to be inclusive. */
1204 btrace_call_next (&end, 1);
1205 }
1206
1207 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1208 btrace_set_call_history (btinfo, &begin, &end);
1209 }
1210
1211 /* The to_call_history_from method of target record-btrace. */
1212
1213 static void
1214 record_btrace_call_history_from (struct target_ops *self,
1215 ULONGEST from, int size,
1216 int int_flags)
1217 {
1218 ULONGEST begin, end, context;
1219 record_print_flags flags = (enum record_print_flag) int_flags;
1220
1221 context = abs (size);
1222 if (context == 0)
1223 error (_("Bad record function-call-history-size."));
1224
1225 if (size < 0)
1226 {
1227 end = from;
1228
1229 if (from < context)
1230 begin = 0;
1231 else
1232 begin = from - context + 1;
1233 }
1234 else
1235 {
1236 begin = from;
1237 end = from + context - 1;
1238
1239 /* Check for wrap-around. */
1240 if (end < begin)
1241 end = ULONGEST_MAX;
1242 }
1243
1244 record_btrace_call_history_range (self, begin, end, flags);
1245 }
1246
1247 /* The to_record_method method of target record-btrace. */
1248
1249 static enum record_method
1250 record_btrace_record_method (struct target_ops *self, ptid_t ptid)
1251 {
1252 struct thread_info * const tp = find_thread_ptid (ptid);
1253
1254 if (tp == NULL)
1255 error (_("No thread."));
1256
1257 if (tp->btrace.target == NULL)
1258 return RECORD_METHOD_NONE;
1259
1260 return RECORD_METHOD_BTRACE;
1261 }
1262
1263 /* The to_record_is_replaying method of target record-btrace. */
1264
1265 static int
1266 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1267 {
1268 struct thread_info *tp;
1269
1270 ALL_NON_EXITED_THREADS (tp)
1271 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1272 return 1;
1273
1274 return 0;
1275 }
1276
1277 /* The to_record_will_replay method of target record-btrace. */
1278
1279 static int
1280 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1281 {
1282 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1283 }
1284
1285 /* The to_xfer_partial method of target record-btrace. */
1286
1287 static enum target_xfer_status
1288 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1289 const char *annex, gdb_byte *readbuf,
1290 const gdb_byte *writebuf, ULONGEST offset,
1291 ULONGEST len, ULONGEST *xfered_len)
1292 {
1293 /* Filter out requests that don't make sense during replay. */
1294 if (replay_memory_access == replay_memory_access_read_only
1295 && !record_btrace_generating_corefile
1296 && record_btrace_is_replaying (ops, inferior_ptid))
1297 {
1298 switch (object)
1299 {
1300 case TARGET_OBJECT_MEMORY:
1301 {
1302 struct target_section *section;
1303
1304 /* We do not allow writing memory in general. */
1305 if (writebuf != NULL)
1306 {
1307 *xfered_len = len;
1308 return TARGET_XFER_UNAVAILABLE;
1309 }
1310
1311 /* We allow reading readonly memory. */
1312 section = target_section_by_addr (ops, offset);
1313 if (section != NULL)
1314 {
1315 /* Check if the section we found is readonly. */
1316 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1317 section->the_bfd_section)
1318 & SEC_READONLY) != 0)
1319 {
1320 /* Truncate the request to fit into this section. */
1321 len = std::min (len, section->endaddr - offset);
1322 break;
1323 }
1324 }
1325
1326 *xfered_len = len;
1327 return TARGET_XFER_UNAVAILABLE;
1328 }
1329 }
1330 }
1331
1332 /* Forward the request. */
1333 ops = ops->beneath;
1334 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1335 offset, len, xfered_len);
1336 }
1337
1338 /* The to_insert_breakpoint method of target record-btrace. */
1339
1340 static int
1341 record_btrace_insert_breakpoint (struct target_ops *ops,
1342 struct gdbarch *gdbarch,
1343 struct bp_target_info *bp_tgt)
1344 {
1345 const char *old;
1346 int ret;
1347
1348 /* Inserting breakpoints requires accessing memory. Allow it for the
1349 duration of this function. */
1350 old = replay_memory_access;
1351 replay_memory_access = replay_memory_access_read_write;
1352
1353 ret = 0;
1354 TRY
1355 {
1356 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1357 }
1358 CATCH (except, RETURN_MASK_ALL)
1359 {
1360 replay_memory_access = old;
1361 throw_exception (except);
1362 }
1363 END_CATCH
1364 replay_memory_access = old;
1365
1366 return ret;
1367 }
1368
1369 /* The to_remove_breakpoint method of target record-btrace. */
1370
1371 static int
1372 record_btrace_remove_breakpoint (struct target_ops *ops,
1373 struct gdbarch *gdbarch,
1374 struct bp_target_info *bp_tgt,
1375 enum remove_bp_reason reason)
1376 {
1377 const char *old;
1378 int ret;
1379
1380 /* Removing breakpoints requires accessing memory. Allow it for the
1381 duration of this function. */
1382 old = replay_memory_access;
1383 replay_memory_access = replay_memory_access_read_write;
1384
1385 ret = 0;
1386 TRY
1387 {
1388 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1389 reason);
1390 }
1391 CATCH (except, RETURN_MASK_ALL)
1392 {
1393 replay_memory_access = old;
1394 throw_exception (except);
1395 }
1396 END_CATCH
1397 replay_memory_access = old;
1398
1399 return ret;
1400 }
1401
1402 /* The to_fetch_registers method of target record-btrace. */
1403
1404 static void
1405 record_btrace_fetch_registers (struct target_ops *ops,
1406 struct regcache *regcache, int regno)
1407 {
1408 struct btrace_insn_iterator *replay;
1409 struct thread_info *tp;
1410
1411 tp = find_thread_ptid (regcache_get_ptid (regcache));
1412 gdb_assert (tp != NULL);
1413
1414 replay = tp->btrace.replay;
1415 if (replay != NULL && !record_btrace_generating_corefile)
1416 {
1417 const struct btrace_insn *insn;
1418 struct gdbarch *gdbarch;
1419 int pcreg;
1420
1421 gdbarch = regcache->arch ();
1422 pcreg = gdbarch_pc_regnum (gdbarch);
1423 if (pcreg < 0)
1424 return;
1425
1426 /* We can only provide the PC register. */
1427 if (regno >= 0 && regno != pcreg)
1428 return;
1429
1430 insn = btrace_insn_get (replay);
1431 gdb_assert (insn != NULL);
1432
1433 regcache_raw_supply (regcache, regno, &insn->pc);
1434 }
1435 else
1436 {
1437 struct target_ops *t = ops->beneath;
1438
1439 t->to_fetch_registers (t, regcache, regno);
1440 }
1441 }
1442
1443 /* The to_store_registers method of target record-btrace. */
1444
1445 static void
1446 record_btrace_store_registers (struct target_ops *ops,
1447 struct regcache *regcache, int regno)
1448 {
1449 struct target_ops *t;
1450
1451 if (!record_btrace_generating_corefile
1452 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1453 error (_("Cannot write registers while replaying."));
1454
1455 gdb_assert (may_write_registers != 0);
1456
1457 t = ops->beneath;
1458 t->to_store_registers (t, regcache, regno);
1459 }
1460
1461 /* The to_prepare_to_store method of target record-btrace. */
1462
1463 static void
1464 record_btrace_prepare_to_store (struct target_ops *ops,
1465 struct regcache *regcache)
1466 {
1467 struct target_ops *t;
1468
1469 if (!record_btrace_generating_corefile
1470 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1471 return;
1472
1473 t = ops->beneath;
1474 t->to_prepare_to_store (t, regcache);
1475 }
1476
1477 /* The branch trace frame cache. */
1478
1479 struct btrace_frame_cache
1480 {
1481 /* The thread. */
1482 struct thread_info *tp;
1483
1484 /* The frame info. */
1485 struct frame_info *frame;
1486
1487 /* The branch trace function segment. */
1488 const struct btrace_function *bfun;
1489 };
1490
1491 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1492
1493 static htab_t bfcache;
1494
1495 /* hash_f for htab_create_alloc of bfcache. */
1496
1497 static hashval_t
1498 bfcache_hash (const void *arg)
1499 {
1500 const struct btrace_frame_cache *cache
1501 = (const struct btrace_frame_cache *) arg;
1502
1503 return htab_hash_pointer (cache->frame);
1504 }
1505
1506 /* eq_f for htab_create_alloc of bfcache. */
1507
1508 static int
1509 bfcache_eq (const void *arg1, const void *arg2)
1510 {
1511 const struct btrace_frame_cache *cache1
1512 = (const struct btrace_frame_cache *) arg1;
1513 const struct btrace_frame_cache *cache2
1514 = (const struct btrace_frame_cache *) arg2;
1515
1516 return cache1->frame == cache2->frame;
1517 }
1518
1519 /* Create a new btrace frame cache. */
1520
1521 static struct btrace_frame_cache *
1522 bfcache_new (struct frame_info *frame)
1523 {
1524 struct btrace_frame_cache *cache;
1525 void **slot;
1526
1527 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1528 cache->frame = frame;
1529
1530 slot = htab_find_slot (bfcache, cache, INSERT);
1531 gdb_assert (*slot == NULL);
1532 *slot = cache;
1533
1534 return cache;
1535 }
1536
1537 /* Extract the branch trace function from a branch trace frame. */
1538
1539 static const struct btrace_function *
1540 btrace_get_frame_function (struct frame_info *frame)
1541 {
1542 const struct btrace_frame_cache *cache;
1543 struct btrace_frame_cache pattern;
1544 void **slot;
1545
1546 pattern.frame = frame;
1547
1548 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1549 if (slot == NULL)
1550 return NULL;
1551
1552 cache = (const struct btrace_frame_cache *) *slot;
1553 return cache->bfun;
1554 }
1555
1556 /* Implement stop_reason method for record_btrace_frame_unwind. */
1557
1558 static enum unwind_stop_reason
1559 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1560 void **this_cache)
1561 {
1562 const struct btrace_frame_cache *cache;
1563 const struct btrace_function *bfun;
1564
1565 cache = (const struct btrace_frame_cache *) *this_cache;
1566 bfun = cache->bfun;
1567 gdb_assert (bfun != NULL);
1568
1569 if (bfun->up == 0)
1570 return UNWIND_UNAVAILABLE;
1571
1572 return UNWIND_NO_REASON;
1573 }
1574
1575 /* Implement this_id method for record_btrace_frame_unwind. */
1576
1577 static void
1578 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1579 struct frame_id *this_id)
1580 {
1581 const struct btrace_frame_cache *cache;
1582 const struct btrace_function *bfun;
1583 struct btrace_call_iterator it;
1584 CORE_ADDR code, special;
1585
1586 cache = (const struct btrace_frame_cache *) *this_cache;
1587
1588 bfun = cache->bfun;
1589 gdb_assert (bfun != NULL);
1590
1591 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1592 bfun = btrace_call_get (&it);
1593
1594 code = get_frame_func (this_frame);
1595 special = bfun->number;
1596
1597 *this_id = frame_id_build_unavailable_stack_special (code, special);
1598
1599 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1600 btrace_get_bfun_name (cache->bfun),
1601 core_addr_to_string_nz (this_id->code_addr),
1602 core_addr_to_string_nz (this_id->special_addr));
1603 }
1604
1605 /* Implement prev_register method for record_btrace_frame_unwind. */
1606
1607 static struct value *
1608 record_btrace_frame_prev_register (struct frame_info *this_frame,
1609 void **this_cache,
1610 int regnum)
1611 {
1612 const struct btrace_frame_cache *cache;
1613 const struct btrace_function *bfun, *caller;
1614 struct btrace_call_iterator it;
1615 struct gdbarch *gdbarch;
1616 CORE_ADDR pc;
1617 int pcreg;
1618
1619 gdbarch = get_frame_arch (this_frame);
1620 pcreg = gdbarch_pc_regnum (gdbarch);
1621 if (pcreg < 0 || regnum != pcreg)
1622 throw_error (NOT_AVAILABLE_ERROR,
1623 _("Registers are not available in btrace record history"));
1624
1625 cache = (const struct btrace_frame_cache *) *this_cache;
1626 bfun = cache->bfun;
1627 gdb_assert (bfun != NULL);
1628
1629 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1630 throw_error (NOT_AVAILABLE_ERROR,
1631 _("No caller in btrace record history"));
1632
1633 caller = btrace_call_get (&it);
1634
1635 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1636 pc = caller->insn.front ().pc;
1637 else
1638 {
1639 pc = caller->insn.back ().pc;
1640 pc += gdb_insn_length (gdbarch, pc);
1641 }
1642
1643 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1644 btrace_get_bfun_name (bfun), bfun->level,
1645 core_addr_to_string_nz (pc));
1646
1647 return frame_unwind_got_address (this_frame, regnum, pc);
1648 }
1649
1650 /* Implement sniffer method for record_btrace_frame_unwind. */
1651
1652 static int
1653 record_btrace_frame_sniffer (const struct frame_unwind *self,
1654 struct frame_info *this_frame,
1655 void **this_cache)
1656 {
1657 const struct btrace_function *bfun;
1658 struct btrace_frame_cache *cache;
1659 struct thread_info *tp;
1660 struct frame_info *next;
1661
1662 /* THIS_FRAME does not contain a reference to its thread. */
1663 tp = find_thread_ptid (inferior_ptid);
1664 gdb_assert (tp != NULL);
1665
1666 bfun = NULL;
1667 next = get_next_frame (this_frame);
1668 if (next == NULL)
1669 {
1670 const struct btrace_insn_iterator *replay;
1671
1672 replay = tp->btrace.replay;
1673 if (replay != NULL)
1674 bfun = &replay->btinfo->functions[replay->call_index];
1675 }
1676 else
1677 {
1678 const struct btrace_function *callee;
1679 struct btrace_call_iterator it;
1680
1681 callee = btrace_get_frame_function (next);
1682 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1683 return 0;
1684
1685 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1686 return 0;
1687
1688 bfun = btrace_call_get (&it);
1689 }
1690
1691 if (bfun == NULL)
1692 return 0;
1693
1694 DEBUG ("[frame] sniffed frame for %s on level %d",
1695 btrace_get_bfun_name (bfun), bfun->level);
1696
1697 /* This is our frame. Initialize the frame cache. */
1698 cache = bfcache_new (this_frame);
1699 cache->tp = tp;
1700 cache->bfun = bfun;
1701
1702 *this_cache = cache;
1703 return 1;
1704 }
1705
1706 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1707
1708 static int
1709 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1710 struct frame_info *this_frame,
1711 void **this_cache)
1712 {
1713 const struct btrace_function *bfun, *callee;
1714 struct btrace_frame_cache *cache;
1715 struct btrace_call_iterator it;
1716 struct frame_info *next;
1717 struct thread_info *tinfo;
1718
1719 next = get_next_frame (this_frame);
1720 if (next == NULL)
1721 return 0;
1722
1723 callee = btrace_get_frame_function (next);
1724 if (callee == NULL)
1725 return 0;
1726
1727 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1728 return 0;
1729
1730 tinfo = find_thread_ptid (inferior_ptid);
1731 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1732 return 0;
1733
1734 bfun = btrace_call_get (&it);
1735
1736 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1737 btrace_get_bfun_name (bfun), bfun->level);
1738
1739 /* This is our frame. Initialize the frame cache. */
1740 cache = bfcache_new (this_frame);
1741 cache->tp = tinfo;
1742 cache->bfun = bfun;
1743
1744 *this_cache = cache;
1745 return 1;
1746 }
1747
1748 static void
1749 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1750 {
1751 struct btrace_frame_cache *cache;
1752 void **slot;
1753
1754 cache = (struct btrace_frame_cache *) this_cache;
1755
1756 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1757 gdb_assert (slot != NULL);
1758
1759 htab_remove_elt (bfcache, cache);
1760 }
1761
1762 /* btrace recording does not store previous memory content, neither the stack
1763 frames content. Any unwinding would return errorneous results as the stack
1764 contents no longer matches the changed PC value restored from history.
1765 Therefore this unwinder reports any possibly unwound registers as
1766 <unavailable>. */
1767
1768 const struct frame_unwind record_btrace_frame_unwind =
1769 {
1770 NORMAL_FRAME,
1771 record_btrace_frame_unwind_stop_reason,
1772 record_btrace_frame_this_id,
1773 record_btrace_frame_prev_register,
1774 NULL,
1775 record_btrace_frame_sniffer,
1776 record_btrace_frame_dealloc_cache
1777 };
1778
1779 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1780 {
1781 TAILCALL_FRAME,
1782 record_btrace_frame_unwind_stop_reason,
1783 record_btrace_frame_this_id,
1784 record_btrace_frame_prev_register,
1785 NULL,
1786 record_btrace_tailcall_frame_sniffer,
1787 record_btrace_frame_dealloc_cache
1788 };
1789
1790 /* Implement the to_get_unwinder method. */
1791
1792 static const struct frame_unwind *
1793 record_btrace_to_get_unwinder (struct target_ops *self)
1794 {
1795 return &record_btrace_frame_unwind;
1796 }
1797
1798 /* Implement the to_get_tailcall_unwinder method. */
1799
1800 static const struct frame_unwind *
1801 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1802 {
1803 return &record_btrace_tailcall_frame_unwind;
1804 }
1805
1806 /* Return a human-readable string for FLAG. */
1807
1808 static const char *
1809 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1810 {
1811 switch (flag)
1812 {
1813 case BTHR_STEP:
1814 return "step";
1815
1816 case BTHR_RSTEP:
1817 return "reverse-step";
1818
1819 case BTHR_CONT:
1820 return "cont";
1821
1822 case BTHR_RCONT:
1823 return "reverse-cont";
1824
1825 case BTHR_STOP:
1826 return "stop";
1827 }
1828
1829 return "<invalid>";
1830 }
1831
1832 /* Indicate that TP should be resumed according to FLAG. */
1833
1834 static void
1835 record_btrace_resume_thread (struct thread_info *tp,
1836 enum btrace_thread_flag flag)
1837 {
1838 struct btrace_thread_info *btinfo;
1839
1840 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1841 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1842
1843 btinfo = &tp->btrace;
1844
1845 /* Fetch the latest branch trace. */
1846 btrace_fetch (tp);
1847
1848 /* A resume request overwrites a preceding resume or stop request. */
1849 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1850 btinfo->flags |= flag;
1851 }
1852
1853 /* Get the current frame for TP. */
1854
1855 static struct frame_info *
1856 get_thread_current_frame (struct thread_info *tp)
1857 {
1858 struct frame_info *frame;
1859 ptid_t old_inferior_ptid;
1860 int executing;
1861
1862 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1863 old_inferior_ptid = inferior_ptid;
1864 inferior_ptid = tp->ptid;
1865
1866 /* Clear the executing flag to allow changes to the current frame.
1867 We are not actually running, yet. We just started a reverse execution
1868 command or a record goto command.
1869 For the latter, EXECUTING is false and this has no effect.
1870 For the former, EXECUTING is true and we're in to_wait, about to
1871 move the thread. Since we need to recompute the stack, we temporarily
1872 set EXECUTING to flase. */
1873 executing = is_executing (inferior_ptid);
1874 set_executing (inferior_ptid, 0);
1875
1876 frame = NULL;
1877 TRY
1878 {
1879 frame = get_current_frame ();
1880 }
1881 CATCH (except, RETURN_MASK_ALL)
1882 {
1883 /* Restore the previous execution state. */
1884 set_executing (inferior_ptid, executing);
1885
1886 /* Restore the previous inferior_ptid. */
1887 inferior_ptid = old_inferior_ptid;
1888
1889 throw_exception (except);
1890 }
1891 END_CATCH
1892
1893 /* Restore the previous execution state. */
1894 set_executing (inferior_ptid, executing);
1895
1896 /* Restore the previous inferior_ptid. */
1897 inferior_ptid = old_inferior_ptid;
1898
1899 return frame;
1900 }
1901
1902 /* Start replaying a thread. */
1903
1904 static struct btrace_insn_iterator *
1905 record_btrace_start_replaying (struct thread_info *tp)
1906 {
1907 struct btrace_insn_iterator *replay;
1908 struct btrace_thread_info *btinfo;
1909
1910 btinfo = &tp->btrace;
1911 replay = NULL;
1912
1913 /* We can't start replaying without trace. */
1914 if (btinfo->functions.empty ())
1915 return NULL;
1916
1917 /* GDB stores the current frame_id when stepping in order to detects steps
1918 into subroutines.
1919 Since frames are computed differently when we're replaying, we need to
1920 recompute those stored frames and fix them up so we can still detect
1921 subroutines after we started replaying. */
1922 TRY
1923 {
1924 struct frame_info *frame;
1925 struct frame_id frame_id;
1926 int upd_step_frame_id, upd_step_stack_frame_id;
1927
1928 /* The current frame without replaying - computed via normal unwind. */
1929 frame = get_thread_current_frame (tp);
1930 frame_id = get_frame_id (frame);
1931
1932 /* Check if we need to update any stepping-related frame id's. */
1933 upd_step_frame_id = frame_id_eq (frame_id,
1934 tp->control.step_frame_id);
1935 upd_step_stack_frame_id = frame_id_eq (frame_id,
1936 tp->control.step_stack_frame_id);
1937
1938 /* We start replaying at the end of the branch trace. This corresponds
1939 to the current instruction. */
1940 replay = XNEW (struct btrace_insn_iterator);
1941 btrace_insn_end (replay, btinfo);
1942
1943 /* Skip gaps at the end of the trace. */
1944 while (btrace_insn_get (replay) == NULL)
1945 {
1946 unsigned int steps;
1947
1948 steps = btrace_insn_prev (replay, 1);
1949 if (steps == 0)
1950 error (_("No trace."));
1951 }
1952
1953 /* We're not replaying, yet. */
1954 gdb_assert (btinfo->replay == NULL);
1955 btinfo->replay = replay;
1956
1957 /* Make sure we're not using any stale registers. */
1958 registers_changed_ptid (tp->ptid);
1959
1960 /* The current frame with replaying - computed via btrace unwind. */
1961 frame = get_thread_current_frame (tp);
1962 frame_id = get_frame_id (frame);
1963
1964 /* Replace stepping related frames where necessary. */
1965 if (upd_step_frame_id)
1966 tp->control.step_frame_id = frame_id;
1967 if (upd_step_stack_frame_id)
1968 tp->control.step_stack_frame_id = frame_id;
1969 }
1970 CATCH (except, RETURN_MASK_ALL)
1971 {
1972 xfree (btinfo->replay);
1973 btinfo->replay = NULL;
1974
1975 registers_changed_ptid (tp->ptid);
1976
1977 throw_exception (except);
1978 }
1979 END_CATCH
1980
1981 return replay;
1982 }
1983
1984 /* Stop replaying a thread. */
1985
1986 static void
1987 record_btrace_stop_replaying (struct thread_info *tp)
1988 {
1989 struct btrace_thread_info *btinfo;
1990
1991 btinfo = &tp->btrace;
1992
1993 xfree (btinfo->replay);
1994 btinfo->replay = NULL;
1995
1996 /* Make sure we're not leaving any stale registers. */
1997 registers_changed_ptid (tp->ptid);
1998 }
1999
2000 /* Stop replaying TP if it is at the end of its execution history. */
2001
2002 static void
2003 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2004 {
2005 struct btrace_insn_iterator *replay, end;
2006 struct btrace_thread_info *btinfo;
2007
2008 btinfo = &tp->btrace;
2009 replay = btinfo->replay;
2010
2011 if (replay == NULL)
2012 return;
2013
2014 btrace_insn_end (&end, btinfo);
2015
2016 if (btrace_insn_cmp (replay, &end) == 0)
2017 record_btrace_stop_replaying (tp);
2018 }
2019
2020 /* The to_resume method of target record-btrace. */
2021
2022 static void
2023 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2024 enum gdb_signal signal)
2025 {
2026 struct thread_info *tp;
2027 enum btrace_thread_flag flag, cflag;
2028
2029 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2030 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2031 step ? "step" : "cont");
2032
2033 /* Store the execution direction of the last resume.
2034
2035 If there is more than one to_resume call, we have to rely on infrun
2036 to not change the execution direction in-between. */
2037 record_btrace_resume_exec_dir = execution_direction;
2038
2039 /* As long as we're not replaying, just forward the request.
2040
2041 For non-stop targets this means that no thread is replaying. In order to
2042 make progress, we may need to explicitly move replaying threads to the end
2043 of their execution history. */
2044 if ((execution_direction != EXEC_REVERSE)
2045 && !record_btrace_is_replaying (ops, minus_one_ptid))
2046 {
2047 ops = ops->beneath;
2048 ops->to_resume (ops, ptid, step, signal);
2049 return;
2050 }
2051
2052 /* Compute the btrace thread flag for the requested move. */
2053 if (execution_direction == EXEC_REVERSE)
2054 {
2055 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2056 cflag = BTHR_RCONT;
2057 }
2058 else
2059 {
2060 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2061 cflag = BTHR_CONT;
2062 }
2063
2064 /* We just indicate the resume intent here. The actual stepping happens in
2065 record_btrace_wait below.
2066
2067 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2068 if (!target_is_non_stop_p ())
2069 {
2070 gdb_assert (ptid_match (inferior_ptid, ptid));
2071
2072 ALL_NON_EXITED_THREADS (tp)
2073 if (ptid_match (tp->ptid, ptid))
2074 {
2075 if (ptid_match (tp->ptid, inferior_ptid))
2076 record_btrace_resume_thread (tp, flag);
2077 else
2078 record_btrace_resume_thread (tp, cflag);
2079 }
2080 }
2081 else
2082 {
2083 ALL_NON_EXITED_THREADS (tp)
2084 if (ptid_match (tp->ptid, ptid))
2085 record_btrace_resume_thread (tp, flag);
2086 }
2087
2088 /* Async support. */
2089 if (target_can_async_p ())
2090 {
2091 target_async (1);
2092 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2093 }
2094 }
2095
2096 /* The to_commit_resume method of target record-btrace. */
2097
2098 static void
2099 record_btrace_commit_resume (struct target_ops *ops)
2100 {
2101 if ((execution_direction != EXEC_REVERSE)
2102 && !record_btrace_is_replaying (ops, minus_one_ptid))
2103 ops->beneath->to_commit_resume (ops->beneath);
2104 }
2105
2106 /* Cancel resuming TP. */
2107
2108 static void
2109 record_btrace_cancel_resume (struct thread_info *tp)
2110 {
2111 enum btrace_thread_flag flags;
2112
2113 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2114 if (flags == 0)
2115 return;
2116
2117 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2118 print_thread_id (tp),
2119 target_pid_to_str (tp->ptid), flags,
2120 btrace_thread_flag_to_str (flags));
2121
2122 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2123 record_btrace_stop_replaying_at_end (tp);
2124 }
2125
2126 /* Return a target_waitstatus indicating that we ran out of history. */
2127
2128 static struct target_waitstatus
2129 btrace_step_no_history (void)
2130 {
2131 struct target_waitstatus status;
2132
2133 status.kind = TARGET_WAITKIND_NO_HISTORY;
2134
2135 return status;
2136 }
2137
2138 /* Return a target_waitstatus indicating that a step finished. */
2139
2140 static struct target_waitstatus
2141 btrace_step_stopped (void)
2142 {
2143 struct target_waitstatus status;
2144
2145 status.kind = TARGET_WAITKIND_STOPPED;
2146 status.value.sig = GDB_SIGNAL_TRAP;
2147
2148 return status;
2149 }
2150
2151 /* Return a target_waitstatus indicating that a thread was stopped as
2152 requested. */
2153
2154 static struct target_waitstatus
2155 btrace_step_stopped_on_request (void)
2156 {
2157 struct target_waitstatus status;
2158
2159 status.kind = TARGET_WAITKIND_STOPPED;
2160 status.value.sig = GDB_SIGNAL_0;
2161
2162 return status;
2163 }
2164
2165 /* Return a target_waitstatus indicating a spurious stop. */
2166
2167 static struct target_waitstatus
2168 btrace_step_spurious (void)
2169 {
2170 struct target_waitstatus status;
2171
2172 status.kind = TARGET_WAITKIND_SPURIOUS;
2173
2174 return status;
2175 }
2176
2177 /* Return a target_waitstatus indicating that the thread was not resumed. */
2178
2179 static struct target_waitstatus
2180 btrace_step_no_resumed (void)
2181 {
2182 struct target_waitstatus status;
2183
2184 status.kind = TARGET_WAITKIND_NO_RESUMED;
2185
2186 return status;
2187 }
2188
2189 /* Return a target_waitstatus indicating that we should wait again. */
2190
2191 static struct target_waitstatus
2192 btrace_step_again (void)
2193 {
2194 struct target_waitstatus status;
2195
2196 status.kind = TARGET_WAITKIND_IGNORE;
2197
2198 return status;
2199 }
2200
2201 /* Clear the record histories. */
2202
2203 static void
2204 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2205 {
2206 xfree (btinfo->insn_history);
2207 xfree (btinfo->call_history);
2208
2209 btinfo->insn_history = NULL;
2210 btinfo->call_history = NULL;
2211 }
2212
2213 /* Check whether TP's current replay position is at a breakpoint. */
2214
2215 static int
2216 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2217 {
2218 struct btrace_insn_iterator *replay;
2219 struct btrace_thread_info *btinfo;
2220 const struct btrace_insn *insn;
2221 struct inferior *inf;
2222
2223 btinfo = &tp->btrace;
2224 replay = btinfo->replay;
2225
2226 if (replay == NULL)
2227 return 0;
2228
2229 insn = btrace_insn_get (replay);
2230 if (insn == NULL)
2231 return 0;
2232
2233 inf = find_inferior_ptid (tp->ptid);
2234 if (inf == NULL)
2235 return 0;
2236
2237 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2238 &btinfo->stop_reason);
2239 }
2240
2241 /* Step one instruction in forward direction. */
2242
2243 static struct target_waitstatus
2244 record_btrace_single_step_forward (struct thread_info *tp)
2245 {
2246 struct btrace_insn_iterator *replay, end, start;
2247 struct btrace_thread_info *btinfo;
2248
2249 btinfo = &tp->btrace;
2250 replay = btinfo->replay;
2251
2252 /* We're done if we're not replaying. */
2253 if (replay == NULL)
2254 return btrace_step_no_history ();
2255
2256 /* Check if we're stepping a breakpoint. */
2257 if (record_btrace_replay_at_breakpoint (tp))
2258 return btrace_step_stopped ();
2259
2260 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2261 jump back to the instruction at which we started. */
2262 start = *replay;
2263 do
2264 {
2265 unsigned int steps;
2266
2267 /* We will bail out here if we continue stepping after reaching the end
2268 of the execution history. */
2269 steps = btrace_insn_next (replay, 1);
2270 if (steps == 0)
2271 {
2272 *replay = start;
2273 return btrace_step_no_history ();
2274 }
2275 }
2276 while (btrace_insn_get (replay) == NULL);
2277
2278 /* Determine the end of the instruction trace. */
2279 btrace_insn_end (&end, btinfo);
2280
2281 /* The execution trace contains (and ends with) the current instruction.
2282 This instruction has not been executed, yet, so the trace really ends
2283 one instruction earlier. */
2284 if (btrace_insn_cmp (replay, &end) == 0)
2285 return btrace_step_no_history ();
2286
2287 return btrace_step_spurious ();
2288 }
2289
2290 /* Step one instruction in backward direction. */
2291
2292 static struct target_waitstatus
2293 record_btrace_single_step_backward (struct thread_info *tp)
2294 {
2295 struct btrace_insn_iterator *replay, start;
2296 struct btrace_thread_info *btinfo;
2297
2298 btinfo = &tp->btrace;
2299 replay = btinfo->replay;
2300
2301 /* Start replaying if we're not already doing so. */
2302 if (replay == NULL)
2303 replay = record_btrace_start_replaying (tp);
2304
2305 /* If we can't step any further, we reached the end of the history.
2306 Skip gaps during replay. If we end up at a gap (at the beginning of
2307 the trace), jump back to the instruction at which we started. */
2308 start = *replay;
2309 do
2310 {
2311 unsigned int steps;
2312
2313 steps = btrace_insn_prev (replay, 1);
2314 if (steps == 0)
2315 {
2316 *replay = start;
2317 return btrace_step_no_history ();
2318 }
2319 }
2320 while (btrace_insn_get (replay) == NULL);
2321
2322 /* Check if we're stepping a breakpoint.
2323
2324 For reverse-stepping, this check is after the step. There is logic in
2325 infrun.c that handles reverse-stepping separately. See, for example,
2326 proceed and adjust_pc_after_break.
2327
2328 This code assumes that for reverse-stepping, PC points to the last
2329 de-executed instruction, whereas for forward-stepping PC points to the
2330 next to-be-executed instruction. */
2331 if (record_btrace_replay_at_breakpoint (tp))
2332 return btrace_step_stopped ();
2333
2334 return btrace_step_spurious ();
2335 }
2336
2337 /* Step a single thread. */
2338
2339 static struct target_waitstatus
2340 record_btrace_step_thread (struct thread_info *tp)
2341 {
2342 struct btrace_thread_info *btinfo;
2343 struct target_waitstatus status;
2344 enum btrace_thread_flag flags;
2345
2346 btinfo = &tp->btrace;
2347
2348 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2349 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2350
2351 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2352 target_pid_to_str (tp->ptid), flags,
2353 btrace_thread_flag_to_str (flags));
2354
2355 /* We can't step without an execution history. */
2356 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2357 return btrace_step_no_history ();
2358
2359 switch (flags)
2360 {
2361 default:
2362 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2363
2364 case BTHR_STOP:
2365 return btrace_step_stopped_on_request ();
2366
2367 case BTHR_STEP:
2368 status = record_btrace_single_step_forward (tp);
2369 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2370 break;
2371
2372 return btrace_step_stopped ();
2373
2374 case BTHR_RSTEP:
2375 status = record_btrace_single_step_backward (tp);
2376 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2377 break;
2378
2379 return btrace_step_stopped ();
2380
2381 case BTHR_CONT:
2382 status = record_btrace_single_step_forward (tp);
2383 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2384 break;
2385
2386 btinfo->flags |= flags;
2387 return btrace_step_again ();
2388
2389 case BTHR_RCONT:
2390 status = record_btrace_single_step_backward (tp);
2391 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2392 break;
2393
2394 btinfo->flags |= flags;
2395 return btrace_step_again ();
2396 }
2397
2398 /* We keep threads moving at the end of their execution history. The to_wait
2399 method will stop the thread for whom the event is reported. */
2400 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2401 btinfo->flags |= flags;
2402
2403 return status;
2404 }
2405
2406 /* A vector of threads. */
2407
2408 typedef struct thread_info * tp_t;
2409 DEF_VEC_P (tp_t);
2410
2411 /* Announce further events if necessary. */
2412
2413 static void
2414 record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2415 const VEC (tp_t) *no_history)
2416 {
2417 int more_moving, more_no_history;
2418
2419 more_moving = !VEC_empty (tp_t, moving);
2420 more_no_history = !VEC_empty (tp_t, no_history);
2421
2422 if (!more_moving && !more_no_history)
2423 return;
2424
2425 if (more_moving)
2426 DEBUG ("movers pending");
2427
2428 if (more_no_history)
2429 DEBUG ("no-history pending");
2430
2431 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2432 }
2433
2434 /* The to_wait method of target record-btrace. */
2435
2436 static ptid_t
2437 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2438 struct target_waitstatus *status, int options)
2439 {
2440 VEC (tp_t) *moving, *no_history;
2441 struct thread_info *tp, *eventing;
2442 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2443
2444 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2445
2446 /* As long as we're not replaying, just forward the request. */
2447 if ((execution_direction != EXEC_REVERSE)
2448 && !record_btrace_is_replaying (ops, minus_one_ptid))
2449 {
2450 ops = ops->beneath;
2451 return ops->to_wait (ops, ptid, status, options);
2452 }
2453
2454 moving = NULL;
2455 no_history = NULL;
2456
2457 make_cleanup (VEC_cleanup (tp_t), &moving);
2458 make_cleanup (VEC_cleanup (tp_t), &no_history);
2459
2460 /* Keep a work list of moving threads. */
2461 ALL_NON_EXITED_THREADS (tp)
2462 if (ptid_match (tp->ptid, ptid)
2463 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2464 VEC_safe_push (tp_t, moving, tp);
2465
2466 if (VEC_empty (tp_t, moving))
2467 {
2468 *status = btrace_step_no_resumed ();
2469
2470 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2471 target_waitstatus_to_string (status).c_str ());
2472
2473 do_cleanups (cleanups);
2474 return null_ptid;
2475 }
2476
2477 /* Step moving threads one by one, one step each, until either one thread
2478 reports an event or we run out of threads to step.
2479
2480 When stepping more than one thread, chances are that some threads reach
2481 the end of their execution history earlier than others. If we reported
2482 this immediately, all-stop on top of non-stop would stop all threads and
2483 resume the same threads next time. And we would report the same thread
2484 having reached the end of its execution history again.
2485
2486 In the worst case, this would starve the other threads. But even if other
2487 threads would be allowed to make progress, this would result in far too
2488 many intermediate stops.
2489
2490 We therefore delay the reporting of "no execution history" until we have
2491 nothing else to report. By this time, all threads should have moved to
2492 either the beginning or the end of their execution history. There will
2493 be a single user-visible stop. */
2494 eventing = NULL;
2495 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2496 {
2497 unsigned int ix;
2498
2499 ix = 0;
2500 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2501 {
2502 *status = record_btrace_step_thread (tp);
2503
2504 switch (status->kind)
2505 {
2506 case TARGET_WAITKIND_IGNORE:
2507 ix++;
2508 break;
2509
2510 case TARGET_WAITKIND_NO_HISTORY:
2511 VEC_safe_push (tp_t, no_history,
2512 VEC_ordered_remove (tp_t, moving, ix));
2513 break;
2514
2515 default:
2516 eventing = VEC_unordered_remove (tp_t, moving, ix);
2517 break;
2518 }
2519 }
2520 }
2521
2522 if (eventing == NULL)
2523 {
2524 /* We started with at least one moving thread. This thread must have
2525 either stopped or reached the end of its execution history.
2526
2527 In the former case, EVENTING must not be NULL.
2528 In the latter case, NO_HISTORY must not be empty. */
2529 gdb_assert (!VEC_empty (tp_t, no_history));
2530
2531 /* We kept threads moving at the end of their execution history. Stop
2532 EVENTING now that we are going to report its stop. */
2533 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2534 eventing->btrace.flags &= ~BTHR_MOVE;
2535
2536 *status = btrace_step_no_history ();
2537 }
2538
2539 gdb_assert (eventing != NULL);
2540
2541 /* We kept threads replaying at the end of their execution history. Stop
2542 replaying EVENTING now that we are going to report its stop. */
2543 record_btrace_stop_replaying_at_end (eventing);
2544
2545 /* Stop all other threads. */
2546 if (!target_is_non_stop_p ())
2547 ALL_NON_EXITED_THREADS (tp)
2548 record_btrace_cancel_resume (tp);
2549
2550 /* In async mode, we need to announce further events. */
2551 if (target_is_async_p ())
2552 record_btrace_maybe_mark_async_event (moving, no_history);
2553
2554 /* Start record histories anew from the current position. */
2555 record_btrace_clear_histories (&eventing->btrace);
2556
2557 /* We moved the replay position but did not update registers. */
2558 registers_changed_ptid (eventing->ptid);
2559
2560 DEBUG ("wait ended by thread %s (%s): %s",
2561 print_thread_id (eventing),
2562 target_pid_to_str (eventing->ptid),
2563 target_waitstatus_to_string (status).c_str ());
2564
2565 do_cleanups (cleanups);
2566 return eventing->ptid;
2567 }
2568
2569 /* The to_stop method of target record-btrace. */
2570
2571 static void
2572 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2573 {
2574 DEBUG ("stop %s", target_pid_to_str (ptid));
2575
2576 /* As long as we're not replaying, just forward the request. */
2577 if ((execution_direction != EXEC_REVERSE)
2578 && !record_btrace_is_replaying (ops, minus_one_ptid))
2579 {
2580 ops = ops->beneath;
2581 ops->to_stop (ops, ptid);
2582 }
2583 else
2584 {
2585 struct thread_info *tp;
2586
2587 ALL_NON_EXITED_THREADS (tp)
2588 if (ptid_match (tp->ptid, ptid))
2589 {
2590 tp->btrace.flags &= ~BTHR_MOVE;
2591 tp->btrace.flags |= BTHR_STOP;
2592 }
2593 }
2594 }
2595
2596 /* The to_can_execute_reverse method of target record-btrace. */
2597
2598 static int
2599 record_btrace_can_execute_reverse (struct target_ops *self)
2600 {
2601 return 1;
2602 }
2603
2604 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2605
2606 static int
2607 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2608 {
2609 if (record_btrace_is_replaying (ops, minus_one_ptid))
2610 {
2611 struct thread_info *tp = inferior_thread ();
2612
2613 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2614 }
2615
2616 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2617 }
2618
2619 /* The to_supports_stopped_by_sw_breakpoint method of target
2620 record-btrace. */
2621
2622 static int
2623 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2624 {
2625 if (record_btrace_is_replaying (ops, minus_one_ptid))
2626 return 1;
2627
2628 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2629 }
2630
2631 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2632
2633 static int
2634 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2635 {
2636 if (record_btrace_is_replaying (ops, minus_one_ptid))
2637 {
2638 struct thread_info *tp = inferior_thread ();
2639
2640 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2641 }
2642
2643 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2644 }
2645
2646 /* The to_supports_stopped_by_hw_breakpoint method of target
2647 record-btrace. */
2648
2649 static int
2650 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2651 {
2652 if (record_btrace_is_replaying (ops, minus_one_ptid))
2653 return 1;
2654
2655 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2656 }
2657
2658 /* The to_update_thread_list method of target record-btrace. */
2659
2660 static void
2661 record_btrace_update_thread_list (struct target_ops *ops)
2662 {
2663 /* We don't add or remove threads during replay. */
2664 if (record_btrace_is_replaying (ops, minus_one_ptid))
2665 return;
2666
2667 /* Forward the request. */
2668 ops = ops->beneath;
2669 ops->to_update_thread_list (ops);
2670 }
2671
2672 /* The to_thread_alive method of target record-btrace. */
2673
2674 static int
2675 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2676 {
2677 /* We don't add or remove threads during replay. */
2678 if (record_btrace_is_replaying (ops, minus_one_ptid))
2679 return find_thread_ptid (ptid) != NULL;
2680
2681 /* Forward the request. */
2682 ops = ops->beneath;
2683 return ops->to_thread_alive (ops, ptid);
2684 }
2685
2686 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2687 is stopped. */
2688
2689 static void
2690 record_btrace_set_replay (struct thread_info *tp,
2691 const struct btrace_insn_iterator *it)
2692 {
2693 struct btrace_thread_info *btinfo;
2694
2695 btinfo = &tp->btrace;
2696
2697 if (it == NULL)
2698 record_btrace_stop_replaying (tp);
2699 else
2700 {
2701 if (btinfo->replay == NULL)
2702 record_btrace_start_replaying (tp);
2703 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2704 return;
2705
2706 *btinfo->replay = *it;
2707 registers_changed_ptid (tp->ptid);
2708 }
2709
2710 /* Start anew from the new replay position. */
2711 record_btrace_clear_histories (btinfo);
2712
2713 stop_pc = regcache_read_pc (get_current_regcache ());
2714 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2715 }
2716
2717 /* The to_goto_record_begin method of target record-btrace. */
2718
2719 static void
2720 record_btrace_goto_begin (struct target_ops *self)
2721 {
2722 struct thread_info *tp;
2723 struct btrace_insn_iterator begin;
2724
2725 tp = require_btrace_thread ();
2726
2727 btrace_insn_begin (&begin, &tp->btrace);
2728
2729 /* Skip gaps at the beginning of the trace. */
2730 while (btrace_insn_get (&begin) == NULL)
2731 {
2732 unsigned int steps;
2733
2734 steps = btrace_insn_next (&begin, 1);
2735 if (steps == 0)
2736 error (_("No trace."));
2737 }
2738
2739 record_btrace_set_replay (tp, &begin);
2740 }
2741
2742 /* The to_goto_record_end method of target record-btrace. */
2743
2744 static void
2745 record_btrace_goto_end (struct target_ops *ops)
2746 {
2747 struct thread_info *tp;
2748
2749 tp = require_btrace_thread ();
2750
2751 record_btrace_set_replay (tp, NULL);
2752 }
2753
2754 /* The to_goto_record method of target record-btrace. */
2755
2756 static void
2757 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2758 {
2759 struct thread_info *tp;
2760 struct btrace_insn_iterator it;
2761 unsigned int number;
2762 int found;
2763
2764 number = insn;
2765
2766 /* Check for wrap-arounds. */
2767 if (number != insn)
2768 error (_("Instruction number out of range."));
2769
2770 tp = require_btrace_thread ();
2771
2772 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2773
2774 /* Check if the instruction could not be found or is a gap. */
2775 if (found == 0 || btrace_insn_get (&it) == NULL)
2776 error (_("No such instruction."));
2777
2778 record_btrace_set_replay (tp, &it);
2779 }
2780
2781 /* The to_record_stop_replaying method of target record-btrace. */
2782
2783 static void
2784 record_btrace_stop_replaying_all (struct target_ops *self)
2785 {
2786 struct thread_info *tp;
2787
2788 ALL_NON_EXITED_THREADS (tp)
2789 record_btrace_stop_replaying (tp);
2790 }
2791
2792 /* The to_execution_direction target method. */
2793
2794 static enum exec_direction_kind
2795 record_btrace_execution_direction (struct target_ops *self)
2796 {
2797 return record_btrace_resume_exec_dir;
2798 }
2799
2800 /* The to_prepare_to_generate_core target method. */
2801
2802 static void
2803 record_btrace_prepare_to_generate_core (struct target_ops *self)
2804 {
2805 record_btrace_generating_corefile = 1;
2806 }
2807
2808 /* The to_done_generating_core target method. */
2809
2810 static void
2811 record_btrace_done_generating_core (struct target_ops *self)
2812 {
2813 record_btrace_generating_corefile = 0;
2814 }
2815
2816 /* Initialize the record-btrace target ops. */
2817
2818 static void
2819 init_record_btrace_ops (void)
2820 {
2821 struct target_ops *ops;
2822
2823 ops = &record_btrace_ops;
2824 ops->to_shortname = "record-btrace";
2825 ops->to_longname = "Branch tracing target";
2826 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2827 ops->to_open = record_btrace_open;
2828 ops->to_close = record_btrace_close;
2829 ops->to_async = record_btrace_async;
2830 ops->to_detach = record_detach;
2831 ops->to_disconnect = record_btrace_disconnect;
2832 ops->to_mourn_inferior = record_mourn_inferior;
2833 ops->to_kill = record_kill;
2834 ops->to_stop_recording = record_btrace_stop_recording;
2835 ops->to_info_record = record_btrace_info;
2836 ops->to_insn_history = record_btrace_insn_history;
2837 ops->to_insn_history_from = record_btrace_insn_history_from;
2838 ops->to_insn_history_range = record_btrace_insn_history_range;
2839 ops->to_call_history = record_btrace_call_history;
2840 ops->to_call_history_from = record_btrace_call_history_from;
2841 ops->to_call_history_range = record_btrace_call_history_range;
2842 ops->to_record_method = record_btrace_record_method;
2843 ops->to_record_is_replaying = record_btrace_is_replaying;
2844 ops->to_record_will_replay = record_btrace_will_replay;
2845 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2846 ops->to_xfer_partial = record_btrace_xfer_partial;
2847 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2848 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2849 ops->to_fetch_registers = record_btrace_fetch_registers;
2850 ops->to_store_registers = record_btrace_store_registers;
2851 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2852 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2853 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2854 ops->to_resume = record_btrace_resume;
2855 ops->to_commit_resume = record_btrace_commit_resume;
2856 ops->to_wait = record_btrace_wait;
2857 ops->to_stop = record_btrace_stop;
2858 ops->to_update_thread_list = record_btrace_update_thread_list;
2859 ops->to_thread_alive = record_btrace_thread_alive;
2860 ops->to_goto_record_begin = record_btrace_goto_begin;
2861 ops->to_goto_record_end = record_btrace_goto_end;
2862 ops->to_goto_record = record_btrace_goto;
2863 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2864 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2865 ops->to_supports_stopped_by_sw_breakpoint
2866 = record_btrace_supports_stopped_by_sw_breakpoint;
2867 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2868 ops->to_supports_stopped_by_hw_breakpoint
2869 = record_btrace_supports_stopped_by_hw_breakpoint;
2870 ops->to_execution_direction = record_btrace_execution_direction;
2871 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2872 ops->to_done_generating_core = record_btrace_done_generating_core;
2873 ops->to_stratum = record_stratum;
2874 ops->to_magic = OPS_MAGIC;
2875 }
2876
2877 /* Start recording in BTS format. */
2878
2879 static void
2880 cmd_record_btrace_bts_start (const char *args, int from_tty)
2881 {
2882 if (args != NULL && *args != 0)
2883 error (_("Invalid argument."));
2884
2885 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2886
2887 TRY
2888 {
2889 execute_command ("target record-btrace", from_tty);
2890 }
2891 CATCH (exception, RETURN_MASK_ALL)
2892 {
2893 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2894 throw_exception (exception);
2895 }
2896 END_CATCH
2897 }
2898
2899 /* Start recording in Intel Processor Trace format. */
2900
2901 static void
2902 cmd_record_btrace_pt_start (const char *args, int from_tty)
2903 {
2904 if (args != NULL && *args != 0)
2905 error (_("Invalid argument."));
2906
2907 record_btrace_conf.format = BTRACE_FORMAT_PT;
2908
2909 TRY
2910 {
2911 execute_command ("target record-btrace", from_tty);
2912 }
2913 CATCH (exception, RETURN_MASK_ALL)
2914 {
2915 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2916 throw_exception (exception);
2917 }
2918 END_CATCH
2919 }
2920
2921 /* Alias for "target record". */
2922
2923 static void
2924 cmd_record_btrace_start (const char *args, int from_tty)
2925 {
2926 if (args != NULL && *args != 0)
2927 error (_("Invalid argument."));
2928
2929 record_btrace_conf.format = BTRACE_FORMAT_PT;
2930
2931 TRY
2932 {
2933 execute_command ("target record-btrace", from_tty);
2934 }
2935 CATCH (exception, RETURN_MASK_ALL)
2936 {
2937 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2938
2939 TRY
2940 {
2941 execute_command ("target record-btrace", from_tty);
2942 }
2943 CATCH (exception, RETURN_MASK_ALL)
2944 {
2945 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2946 throw_exception (exception);
2947 }
2948 END_CATCH
2949 }
2950 END_CATCH
2951 }
2952
2953 /* The "set record btrace" command. */
2954
2955 static void
2956 cmd_set_record_btrace (const char *args, int from_tty)
2957 {
2958 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2959 }
2960
2961 /* The "show record btrace" command. */
2962
2963 static void
2964 cmd_show_record_btrace (const char *args, int from_tty)
2965 {
2966 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2967 }
2968
2969 /* The "show record btrace replay-memory-access" command. */
2970
2971 static void
2972 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2973 struct cmd_list_element *c, const char *value)
2974 {
2975 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2976 replay_memory_access);
2977 }
2978
2979 /* The "set record btrace bts" command. */
2980
2981 static void
2982 cmd_set_record_btrace_bts (const char *args, int from_tty)
2983 {
2984 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2985 "by an appropriate subcommand.\n"));
2986 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2987 all_commands, gdb_stdout);
2988 }
2989
2990 /* The "show record btrace bts" command. */
2991
2992 static void
2993 cmd_show_record_btrace_bts (const char *args, int from_tty)
2994 {
2995 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2996 }
2997
2998 /* The "set record btrace pt" command. */
2999
3000 static void
3001 cmd_set_record_btrace_pt (const char *args, int from_tty)
3002 {
3003 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3004 "by an appropriate subcommand.\n"));
3005 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3006 all_commands, gdb_stdout);
3007 }
3008
3009 /* The "show record btrace pt" command. */
3010
3011 static void
3012 cmd_show_record_btrace_pt (const char *args, int from_tty)
3013 {
3014 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3015 }
3016
3017 /* The "record bts buffer-size" show value function. */
3018
3019 static void
3020 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3021 struct cmd_list_element *c,
3022 const char *value)
3023 {
3024 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3025 value);
3026 }
3027
3028 /* The "record pt buffer-size" show value function. */
3029
3030 static void
3031 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3032 struct cmd_list_element *c,
3033 const char *value)
3034 {
3035 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3036 value);
3037 }
3038
3039 /* Initialize btrace commands. */
3040
3041 void
3042 _initialize_record_btrace (void)
3043 {
3044 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3045 _("Start branch trace recording."), &record_btrace_cmdlist,
3046 "record btrace ", 0, &record_cmdlist);
3047 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3048
3049 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3050 _("\
3051 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3052 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3053 This format may not be available on all processors."),
3054 &record_btrace_cmdlist);
3055 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3056
3057 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3058 _("\
3059 Start branch trace recording in Intel Processor Trace format.\n\n\
3060 This format may not be available on all processors."),
3061 &record_btrace_cmdlist);
3062 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3063
3064 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3065 _("Set record options"), &set_record_btrace_cmdlist,
3066 "set record btrace ", 0, &set_record_cmdlist);
3067
3068 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3069 _("Show record options"), &show_record_btrace_cmdlist,
3070 "show record btrace ", 0, &show_record_cmdlist);
3071
3072 add_setshow_enum_cmd ("replay-memory-access", no_class,
3073 replay_memory_access_types, &replay_memory_access, _("\
3074 Set what memory accesses are allowed during replay."), _("\
3075 Show what memory accesses are allowed during replay."),
3076 _("Default is READ-ONLY.\n\n\
3077 The btrace record target does not trace data.\n\
3078 The memory therefore corresponds to the live target and not \
3079 to the current replay position.\n\n\
3080 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3081 When READ-WRITE, allow accesses to read-only and read-write memory during \
3082 replay."),
3083 NULL, cmd_show_replay_memory_access,
3084 &set_record_btrace_cmdlist,
3085 &show_record_btrace_cmdlist);
3086
3087 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3088 _("Set record btrace bts options"),
3089 &set_record_btrace_bts_cmdlist,
3090 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3091
3092 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3093 _("Show record btrace bts options"),
3094 &show_record_btrace_bts_cmdlist,
3095 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3096
3097 add_setshow_uinteger_cmd ("buffer-size", no_class,
3098 &record_btrace_conf.bts.size,
3099 _("Set the record/replay bts buffer size."),
3100 _("Show the record/replay bts buffer size."), _("\
3101 When starting recording request a trace buffer of this size. \
3102 The actual buffer size may differ from the requested size. \
3103 Use \"info record\" to see the actual buffer size.\n\n\
3104 Bigger buffers allow longer recording but also take more time to process \
3105 the recorded execution trace.\n\n\
3106 The trace buffer size may not be changed while recording."), NULL,
3107 show_record_bts_buffer_size_value,
3108 &set_record_btrace_bts_cmdlist,
3109 &show_record_btrace_bts_cmdlist);
3110
3111 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3112 _("Set record btrace pt options"),
3113 &set_record_btrace_pt_cmdlist,
3114 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3115
3116 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3117 _("Show record btrace pt options"),
3118 &show_record_btrace_pt_cmdlist,
3119 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3120
3121 add_setshow_uinteger_cmd ("buffer-size", no_class,
3122 &record_btrace_conf.pt.size,
3123 _("Set the record/replay pt buffer size."),
3124 _("Show the record/replay pt buffer size."), _("\
3125 Bigger buffers allow longer recording but also take more time to process \
3126 the recorded execution.\n\
3127 The actual buffer size may differ from the requested size. Use \"info record\" \
3128 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3129 &set_record_btrace_pt_cmdlist,
3130 &show_record_btrace_pt_cmdlist);
3131
3132 init_record_btrace_ops ();
3133 add_target (&record_btrace_ops);
3134
3135 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3136 xcalloc, xfree);
3137
3138 record_btrace_conf.bts.size = 64 * 1024;
3139 record_btrace_conf.pt.size = 16 * 1024;
3140 }
This page took 0.142463 seconds and 5 git commands to generate.