btrace: Export btrace_decode_error function.
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observer.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "event-loop.h"
40 #include "inf-loop.h"
41 #include "vec.h"
42 #include <algorithm>
43
44 /* The target_ops of record-btrace. */
45 static struct target_ops record_btrace_ops;
46
47 /* A new thread observer enabling branch tracing for the new thread. */
48 static struct observer *record_btrace_thread_observer;
49
50 /* Memory access types used in set/show record btrace replay-memory-access. */
51 static const char replay_memory_access_read_only[] = "read-only";
52 static const char replay_memory_access_read_write[] = "read-write";
53 static const char *const replay_memory_access_types[] =
54 {
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
57 NULL
58 };
59
60 /* The currently allowed replay memory access type. */
61 static const char *replay_memory_access = replay_memory_access_read_only;
62
63 /* Command lists for "set/show record btrace". */
64 static struct cmd_list_element *set_record_btrace_cmdlist;
65 static struct cmd_list_element *show_record_btrace_cmdlist;
66
67 /* The execution direction of the last resume we got. See record-full.c. */
68 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
69
70 /* The async event handler for reverse/replay execution. */
71 static struct async_event_handler *record_btrace_async_inferior_event_handler;
72
73 /* A flag indicating that we are currently generating a core file. */
74 static int record_btrace_generating_corefile;
75
76 /* The current branch trace configuration. */
77 static struct btrace_config record_btrace_conf;
78
79 /* Command list for "record btrace". */
80 static struct cmd_list_element *record_btrace_cmdlist;
81
82 /* Command lists for "set/show record btrace bts". */
83 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
85
86 /* Command lists for "set/show record btrace pt". */
87 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
89
90 /* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
92
93 #define DEBUG(msg, args...) \
94 do \
95 { \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
99 } \
100 while (0)
101
102
103 /* Update the branch trace for the current thread and return a pointer to its
104 thread_info.
105
106 Throws an error if there is no thread or no trace. This function never
107 returns NULL. */
108
109 static struct thread_info *
110 require_btrace_thread (void)
111 {
112 struct thread_info *tp;
113
114 DEBUG ("require");
115
116 tp = find_thread_ptid (inferior_ptid);
117 if (tp == NULL)
118 error (_("No thread."));
119
120 validate_registers_access ();
121
122 btrace_fetch (tp);
123
124 if (btrace_is_empty (tp))
125 error (_("No trace."));
126
127 return tp;
128 }
129
130 /* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
132
133 Throws an error if there is no thread or no trace. This function never
134 returns NULL. */
135
136 static struct btrace_thread_info *
137 require_btrace (void)
138 {
139 struct thread_info *tp;
140
141 tp = require_btrace_thread ();
142
143 return &tp->btrace;
144 }
145
146 /* Enable branch tracing for one thread. Warn on errors. */
147
148 static void
149 record_btrace_enable_warn (struct thread_info *tp)
150 {
151 TRY
152 {
153 btrace_enable (tp, &record_btrace_conf);
154 }
155 CATCH (error, RETURN_MASK_ERROR)
156 {
157 warning ("%s", error.message);
158 }
159 END_CATCH
160 }
161
162 /* Callback function to disable branch tracing for one thread. */
163
164 static void
165 record_btrace_disable_callback (void *arg)
166 {
167 struct thread_info *tp = (struct thread_info *) arg;
168
169 btrace_disable (tp);
170 }
171
172 /* Enable automatic tracing of new threads. */
173
174 static void
175 record_btrace_auto_enable (void)
176 {
177 DEBUG ("attach thread observer");
178
179 record_btrace_thread_observer
180 = observer_attach_new_thread (record_btrace_enable_warn);
181 }
182
183 /* Disable automatic tracing of new threads. */
184
185 static void
186 record_btrace_auto_disable (void)
187 {
188 /* The observer may have been detached, already. */
189 if (record_btrace_thread_observer == NULL)
190 return;
191
192 DEBUG ("detach thread observer");
193
194 observer_detach_new_thread (record_btrace_thread_observer);
195 record_btrace_thread_observer = NULL;
196 }
197
198 /* The record-btrace async event handler function. */
199
200 static void
201 record_btrace_handle_async_inferior_event (gdb_client_data data)
202 {
203 inferior_event_handler (INF_REG_EVENT, NULL);
204 }
205
206 /* See record-btrace.h. */
207
208 void
209 record_btrace_push_target (void)
210 {
211 const char *format;
212
213 record_btrace_auto_enable ();
214
215 push_target (&record_btrace_ops);
216
217 record_btrace_async_inferior_event_handler
218 = create_async_event_handler (record_btrace_handle_async_inferior_event,
219 NULL);
220 record_btrace_generating_corefile = 0;
221
222 format = btrace_format_short_string (record_btrace_conf.format);
223 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
224 }
225
226 /* The to_open method of target record-btrace. */
227
228 static void
229 record_btrace_open (const char *args, int from_tty)
230 {
231 struct cleanup *disable_chain;
232 struct thread_info *tp;
233
234 DEBUG ("open");
235
236 record_preopen ();
237
238 if (!target_has_execution)
239 error (_("The program is not being run."));
240
241 gdb_assert (record_btrace_thread_observer == NULL);
242
243 disable_chain = make_cleanup (null_cleanup, NULL);
244 ALL_NON_EXITED_THREADS (tp)
245 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
246 {
247 btrace_enable (tp, &record_btrace_conf);
248
249 make_cleanup (record_btrace_disable_callback, tp);
250 }
251
252 record_btrace_push_target ();
253
254 discard_cleanups (disable_chain);
255 }
256
257 /* The to_stop_recording method of target record-btrace. */
258
259 static void
260 record_btrace_stop_recording (struct target_ops *self)
261 {
262 struct thread_info *tp;
263
264 DEBUG ("stop recording");
265
266 record_btrace_auto_disable ();
267
268 ALL_NON_EXITED_THREADS (tp)
269 if (tp->btrace.target != NULL)
270 btrace_disable (tp);
271 }
272
273 /* The to_disconnect method of target record-btrace. */
274
275 static void
276 record_btrace_disconnect (struct target_ops *self, const char *args,
277 int from_tty)
278 {
279 struct target_ops *beneath = self->beneath;
280
281 /* Do not stop recording, just clean up GDB side. */
282 unpush_target (self);
283
284 /* Forward disconnect. */
285 beneath->to_disconnect (beneath, args, from_tty);
286 }
287
288 /* The to_close method of target record-btrace. */
289
290 static void
291 record_btrace_close (struct target_ops *self)
292 {
293 struct thread_info *tp;
294
295 if (record_btrace_async_inferior_event_handler != NULL)
296 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
297
298 /* Make sure automatic recording gets disabled even if we did not stop
299 recording before closing the record-btrace target. */
300 record_btrace_auto_disable ();
301
302 /* We should have already stopped recording.
303 Tear down btrace in case we have not. */
304 ALL_NON_EXITED_THREADS (tp)
305 btrace_teardown (tp);
306 }
307
308 /* The to_async method of target record-btrace. */
309
310 static void
311 record_btrace_async (struct target_ops *ops, int enable)
312 {
313 if (enable)
314 mark_async_event_handler (record_btrace_async_inferior_event_handler);
315 else
316 clear_async_event_handler (record_btrace_async_inferior_event_handler);
317
318 ops->beneath->to_async (ops->beneath, enable);
319 }
320
321 /* Adjusts the size and returns a human readable size suffix. */
322
323 static const char *
324 record_btrace_adjust_size (unsigned int *size)
325 {
326 unsigned int sz;
327
328 sz = *size;
329
330 if ((sz & ((1u << 30) - 1)) == 0)
331 {
332 *size = sz >> 30;
333 return "GB";
334 }
335 else if ((sz & ((1u << 20) - 1)) == 0)
336 {
337 *size = sz >> 20;
338 return "MB";
339 }
340 else if ((sz & ((1u << 10) - 1)) == 0)
341 {
342 *size = sz >> 10;
343 return "kB";
344 }
345 else
346 return "";
347 }
348
349 /* Print a BTS configuration. */
350
351 static void
352 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
353 {
354 const char *suffix;
355 unsigned int size;
356
357 size = conf->size;
358 if (size > 0)
359 {
360 suffix = record_btrace_adjust_size (&size);
361 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
362 }
363 }
364
365 /* Print an Intel Processor Trace configuration. */
366
367 static void
368 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
369 {
370 const char *suffix;
371 unsigned int size;
372
373 size = conf->size;
374 if (size > 0)
375 {
376 suffix = record_btrace_adjust_size (&size);
377 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
378 }
379 }
380
381 /* Print a branch tracing configuration. */
382
383 static void
384 record_btrace_print_conf (const struct btrace_config *conf)
385 {
386 printf_unfiltered (_("Recording format: %s.\n"),
387 btrace_format_string (conf->format));
388
389 switch (conf->format)
390 {
391 case BTRACE_FORMAT_NONE:
392 return;
393
394 case BTRACE_FORMAT_BTS:
395 record_btrace_print_bts_conf (&conf->bts);
396 return;
397
398 case BTRACE_FORMAT_PT:
399 record_btrace_print_pt_conf (&conf->pt);
400 return;
401 }
402
403 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
404 }
405
406 /* The to_info_record method of target record-btrace. */
407
408 static void
409 record_btrace_info (struct target_ops *self)
410 {
411 struct btrace_thread_info *btinfo;
412 const struct btrace_config *conf;
413 struct thread_info *tp;
414 unsigned int insns, calls, gaps;
415
416 DEBUG ("info");
417
418 tp = find_thread_ptid (inferior_ptid);
419 if (tp == NULL)
420 error (_("No thread."));
421
422 validate_registers_access ();
423
424 btinfo = &tp->btrace;
425
426 conf = btrace_conf (btinfo);
427 if (conf != NULL)
428 record_btrace_print_conf (conf);
429
430 btrace_fetch (tp);
431
432 insns = 0;
433 calls = 0;
434 gaps = 0;
435
436 if (!btrace_is_empty (tp))
437 {
438 struct btrace_call_iterator call;
439 struct btrace_insn_iterator insn;
440
441 btrace_call_end (&call, btinfo);
442 btrace_call_prev (&call, 1);
443 calls = btrace_call_number (&call);
444
445 btrace_insn_end (&insn, btinfo);
446 insns = btrace_insn_number (&insn);
447
448 /* If the last instruction is not a gap, it is the current instruction
449 that is not actually part of the record. */
450 if (btrace_insn_get (&insn) != NULL)
451 insns -= 1;
452
453 gaps = btinfo->ngaps;
454 }
455
456 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
457 "for thread %s (%s).\n"), insns, calls, gaps,
458 print_thread_id (tp), target_pid_to_str (tp->ptid));
459
460 if (btrace_is_replaying (tp))
461 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
462 btrace_insn_number (btinfo->replay));
463 }
464
465 /* Print a decode error. */
466
467 static void
468 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
469 enum btrace_format format)
470 {
471 const char *errstr = btrace_decode_error (format, errcode);
472
473 uiout->text (_("["));
474 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
475 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
476 {
477 uiout->text (_("decode error ("));
478 uiout->field_int ("errcode", errcode);
479 uiout->text (_("): "));
480 }
481 uiout->text (errstr);
482 uiout->text (_("]\n"));
483 }
484
485 /* Print an unsigned int. */
486
487 static void
488 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
489 {
490 uiout->field_fmt (fld, "%u", val);
491 }
492
493 /* A range of source lines. */
494
495 struct btrace_line_range
496 {
497 /* The symtab this line is from. */
498 struct symtab *symtab;
499
500 /* The first line (inclusive). */
501 int begin;
502
503 /* The last line (exclusive). */
504 int end;
505 };
506
507 /* Construct a line range. */
508
509 static struct btrace_line_range
510 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
511 {
512 struct btrace_line_range range;
513
514 range.symtab = symtab;
515 range.begin = begin;
516 range.end = end;
517
518 return range;
519 }
520
521 /* Add a line to a line range. */
522
523 static struct btrace_line_range
524 btrace_line_range_add (struct btrace_line_range range, int line)
525 {
526 if (range.end <= range.begin)
527 {
528 /* This is the first entry. */
529 range.begin = line;
530 range.end = line + 1;
531 }
532 else if (line < range.begin)
533 range.begin = line;
534 else if (range.end < line)
535 range.end = line;
536
537 return range;
538 }
539
540 /* Return non-zero if RANGE is empty, zero otherwise. */
541
542 static int
543 btrace_line_range_is_empty (struct btrace_line_range range)
544 {
545 return range.end <= range.begin;
546 }
547
548 /* Return non-zero if LHS contains RHS, zero otherwise. */
549
550 static int
551 btrace_line_range_contains_range (struct btrace_line_range lhs,
552 struct btrace_line_range rhs)
553 {
554 return ((lhs.symtab == rhs.symtab)
555 && (lhs.begin <= rhs.begin)
556 && (rhs.end <= lhs.end));
557 }
558
559 /* Find the line range associated with PC. */
560
561 static struct btrace_line_range
562 btrace_find_line_range (CORE_ADDR pc)
563 {
564 struct btrace_line_range range;
565 struct linetable_entry *lines;
566 struct linetable *ltable;
567 struct symtab *symtab;
568 int nlines, i;
569
570 symtab = find_pc_line_symtab (pc);
571 if (symtab == NULL)
572 return btrace_mk_line_range (NULL, 0, 0);
573
574 ltable = SYMTAB_LINETABLE (symtab);
575 if (ltable == NULL)
576 return btrace_mk_line_range (symtab, 0, 0);
577
578 nlines = ltable->nitems;
579 lines = ltable->item;
580 if (nlines <= 0)
581 return btrace_mk_line_range (symtab, 0, 0);
582
583 range = btrace_mk_line_range (symtab, 0, 0);
584 for (i = 0; i < nlines - 1; i++)
585 {
586 if ((lines[i].pc == pc) && (lines[i].line != 0))
587 range = btrace_line_range_add (range, lines[i].line);
588 }
589
590 return range;
591 }
592
593 /* Print source lines in LINES to UIOUT.
594
595 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
596 instructions corresponding to that source line. When printing a new source
597 line, we do the cleanups for the open chain and open a new cleanup chain for
598 the new source line. If the source line range in LINES is not empty, this
599 function will leave the cleanup chain for the last printed source line open
600 so instructions can be added to it. */
601
602 static void
603 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
604 struct cleanup **ui_item_chain, int flags)
605 {
606 print_source_lines_flags psl_flags;
607 int line;
608
609 psl_flags = 0;
610 if (flags & DISASSEMBLY_FILENAME)
611 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
612
613 for (line = lines.begin; line < lines.end; ++line)
614 {
615 if (*ui_item_chain != NULL)
616 do_cleanups (*ui_item_chain);
617
618 *ui_item_chain
619 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
620
621 print_source_lines (lines.symtab, line, line + 1, psl_flags);
622
623 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
624 }
625 }
626
627 /* Disassemble a section of the recorded instruction trace. */
628
629 static void
630 btrace_insn_history (struct ui_out *uiout,
631 const struct btrace_thread_info *btinfo,
632 const struct btrace_insn_iterator *begin,
633 const struct btrace_insn_iterator *end, int flags)
634 {
635 struct cleanup *cleanups, *ui_item_chain;
636 struct gdbarch *gdbarch;
637 struct btrace_insn_iterator it;
638 struct btrace_line_range last_lines;
639
640 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
641 btrace_insn_number (end));
642
643 flags |= DISASSEMBLY_SPECULATIVE;
644
645 gdbarch = target_gdbarch ();
646 last_lines = btrace_mk_line_range (NULL, 0, 0);
647
648 cleanups = make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
649
650 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
651 instructions corresponding to that line. */
652 ui_item_chain = NULL;
653
654 gdb_pretty_print_disassembler disasm (gdbarch);
655
656 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
657 {
658 const struct btrace_insn *insn;
659
660 insn = btrace_insn_get (&it);
661
662 /* A NULL instruction indicates a gap in the trace. */
663 if (insn == NULL)
664 {
665 const struct btrace_config *conf;
666
667 conf = btrace_conf (btinfo);
668
669 /* We have trace so we must have a configuration. */
670 gdb_assert (conf != NULL);
671
672 uiout->field_fmt ("insn-number", "%u",
673 btrace_insn_number (&it));
674 uiout->text ("\t");
675
676 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
677 conf->format);
678 }
679 else
680 {
681 struct disasm_insn dinsn;
682
683 if ((flags & DISASSEMBLY_SOURCE) != 0)
684 {
685 struct btrace_line_range lines;
686
687 lines = btrace_find_line_range (insn->pc);
688 if (!btrace_line_range_is_empty (lines)
689 && !btrace_line_range_contains_range (last_lines, lines))
690 {
691 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
692 last_lines = lines;
693 }
694 else if (ui_item_chain == NULL)
695 {
696 ui_item_chain
697 = make_cleanup_ui_out_tuple_begin_end (uiout,
698 "src_and_asm_line");
699 /* No source information. */
700 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
701 }
702
703 gdb_assert (ui_item_chain != NULL);
704 }
705
706 memset (&dinsn, 0, sizeof (dinsn));
707 dinsn.number = btrace_insn_number (&it);
708 dinsn.addr = insn->pc;
709
710 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
711 dinsn.is_speculative = 1;
712
713 disasm.pretty_print_insn (uiout, &dinsn, flags);
714 }
715 }
716
717 do_cleanups (cleanups);
718 }
719
720 /* The to_insn_history method of target record-btrace. */
721
722 static void
723 record_btrace_insn_history (struct target_ops *self, int size, int flags)
724 {
725 struct btrace_thread_info *btinfo;
726 struct btrace_insn_history *history;
727 struct btrace_insn_iterator begin, end;
728 struct cleanup *uiout_cleanup;
729 struct ui_out *uiout;
730 unsigned int context, covered;
731
732 uiout = current_uiout;
733 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
734 "insn history");
735 context = abs (size);
736 if (context == 0)
737 error (_("Bad record instruction-history-size."));
738
739 btinfo = require_btrace ();
740 history = btinfo->insn_history;
741 if (history == NULL)
742 {
743 struct btrace_insn_iterator *replay;
744
745 DEBUG ("insn-history (0x%x): %d", flags, size);
746
747 /* If we're replaying, we start at the replay position. Otherwise, we
748 start at the tail of the trace. */
749 replay = btinfo->replay;
750 if (replay != NULL)
751 begin = *replay;
752 else
753 btrace_insn_end (&begin, btinfo);
754
755 /* We start from here and expand in the requested direction. Then we
756 expand in the other direction, as well, to fill up any remaining
757 context. */
758 end = begin;
759 if (size < 0)
760 {
761 /* We want the current position covered, as well. */
762 covered = btrace_insn_next (&end, 1);
763 covered += btrace_insn_prev (&begin, context - covered);
764 covered += btrace_insn_next (&end, context - covered);
765 }
766 else
767 {
768 covered = btrace_insn_next (&end, context);
769 covered += btrace_insn_prev (&begin, context - covered);
770 }
771 }
772 else
773 {
774 begin = history->begin;
775 end = history->end;
776
777 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
778 btrace_insn_number (&begin), btrace_insn_number (&end));
779
780 if (size < 0)
781 {
782 end = begin;
783 covered = btrace_insn_prev (&begin, context);
784 }
785 else
786 {
787 begin = end;
788 covered = btrace_insn_next (&end, context);
789 }
790 }
791
792 if (covered > 0)
793 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
794 else
795 {
796 if (size < 0)
797 printf_unfiltered (_("At the start of the branch trace record.\n"));
798 else
799 printf_unfiltered (_("At the end of the branch trace record.\n"));
800 }
801
802 btrace_set_insn_history (btinfo, &begin, &end);
803 do_cleanups (uiout_cleanup);
804 }
805
806 /* The to_insn_history_range method of target record-btrace. */
807
808 static void
809 record_btrace_insn_history_range (struct target_ops *self,
810 ULONGEST from, ULONGEST to, int flags)
811 {
812 struct btrace_thread_info *btinfo;
813 struct btrace_insn_history *history;
814 struct btrace_insn_iterator begin, end;
815 struct cleanup *uiout_cleanup;
816 struct ui_out *uiout;
817 unsigned int low, high;
818 int found;
819
820 uiout = current_uiout;
821 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
822 "insn history");
823 low = from;
824 high = to;
825
826 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
827
828 /* Check for wrap-arounds. */
829 if (low != from || high != to)
830 error (_("Bad range."));
831
832 if (high < low)
833 error (_("Bad range."));
834
835 btinfo = require_btrace ();
836
837 found = btrace_find_insn_by_number (&begin, btinfo, low);
838 if (found == 0)
839 error (_("Range out of bounds."));
840
841 found = btrace_find_insn_by_number (&end, btinfo, high);
842 if (found == 0)
843 {
844 /* Silently truncate the range. */
845 btrace_insn_end (&end, btinfo);
846 }
847 else
848 {
849 /* We want both begin and end to be inclusive. */
850 btrace_insn_next (&end, 1);
851 }
852
853 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
854 btrace_set_insn_history (btinfo, &begin, &end);
855
856 do_cleanups (uiout_cleanup);
857 }
858
859 /* The to_insn_history_from method of target record-btrace. */
860
861 static void
862 record_btrace_insn_history_from (struct target_ops *self,
863 ULONGEST from, int size, int flags)
864 {
865 ULONGEST begin, end, context;
866
867 context = abs (size);
868 if (context == 0)
869 error (_("Bad record instruction-history-size."));
870
871 if (size < 0)
872 {
873 end = from;
874
875 if (from < context)
876 begin = 0;
877 else
878 begin = from - context + 1;
879 }
880 else
881 {
882 begin = from;
883 end = from + context - 1;
884
885 /* Check for wrap-around. */
886 if (end < begin)
887 end = ULONGEST_MAX;
888 }
889
890 record_btrace_insn_history_range (self, begin, end, flags);
891 }
892
893 /* Print the instruction number range for a function call history line. */
894
895 static void
896 btrace_call_history_insn_range (struct ui_out *uiout,
897 const struct btrace_function *bfun)
898 {
899 unsigned int begin, end, size;
900
901 size = VEC_length (btrace_insn_s, bfun->insn);
902 gdb_assert (size > 0);
903
904 begin = bfun->insn_offset;
905 end = begin + size - 1;
906
907 ui_out_field_uint (uiout, "insn begin", begin);
908 uiout->text (",");
909 ui_out_field_uint (uiout, "insn end", end);
910 }
911
912 /* Compute the lowest and highest source line for the instructions in BFUN
913 and return them in PBEGIN and PEND.
914 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
915 result from inlining or macro expansion. */
916
917 static void
918 btrace_compute_src_line_range (const struct btrace_function *bfun,
919 int *pbegin, int *pend)
920 {
921 struct btrace_insn *insn;
922 struct symtab *symtab;
923 struct symbol *sym;
924 unsigned int idx;
925 int begin, end;
926
927 begin = INT_MAX;
928 end = INT_MIN;
929
930 sym = bfun->sym;
931 if (sym == NULL)
932 goto out;
933
934 symtab = symbol_symtab (sym);
935
936 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
937 {
938 struct symtab_and_line sal;
939
940 sal = find_pc_line (insn->pc, 0);
941 if (sal.symtab != symtab || sal.line == 0)
942 continue;
943
944 begin = std::min (begin, sal.line);
945 end = std::max (end, sal.line);
946 }
947
948 out:
949 *pbegin = begin;
950 *pend = end;
951 }
952
953 /* Print the source line information for a function call history line. */
954
955 static void
956 btrace_call_history_src_line (struct ui_out *uiout,
957 const struct btrace_function *bfun)
958 {
959 struct symbol *sym;
960 int begin, end;
961
962 sym = bfun->sym;
963 if (sym == NULL)
964 return;
965
966 uiout->field_string ("file",
967 symtab_to_filename_for_display (symbol_symtab (sym)));
968
969 btrace_compute_src_line_range (bfun, &begin, &end);
970 if (end < begin)
971 return;
972
973 uiout->text (":");
974 uiout->field_int ("min line", begin);
975
976 if (end == begin)
977 return;
978
979 uiout->text (",");
980 uiout->field_int ("max line", end);
981 }
982
983 /* Get the name of a branch trace function. */
984
985 static const char *
986 btrace_get_bfun_name (const struct btrace_function *bfun)
987 {
988 struct minimal_symbol *msym;
989 struct symbol *sym;
990
991 if (bfun == NULL)
992 return "??";
993
994 msym = bfun->msym;
995 sym = bfun->sym;
996
997 if (sym != NULL)
998 return SYMBOL_PRINT_NAME (sym);
999 else if (msym != NULL)
1000 return MSYMBOL_PRINT_NAME (msym);
1001 else
1002 return "??";
1003 }
1004
1005 /* Disassemble a section of the recorded function trace. */
1006
1007 static void
1008 btrace_call_history (struct ui_out *uiout,
1009 const struct btrace_thread_info *btinfo,
1010 const struct btrace_call_iterator *begin,
1011 const struct btrace_call_iterator *end,
1012 int int_flags)
1013 {
1014 struct btrace_call_iterator it;
1015 record_print_flags flags = (enum record_print_flag) int_flags;
1016
1017 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1018 btrace_call_number (end));
1019
1020 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1021 {
1022 const struct btrace_function *bfun;
1023 struct minimal_symbol *msym;
1024 struct symbol *sym;
1025
1026 bfun = btrace_call_get (&it);
1027 sym = bfun->sym;
1028 msym = bfun->msym;
1029
1030 /* Print the function index. */
1031 ui_out_field_uint (uiout, "index", bfun->number);
1032 uiout->text ("\t");
1033
1034 /* Indicate gaps in the trace. */
1035 if (bfun->errcode != 0)
1036 {
1037 const struct btrace_config *conf;
1038
1039 conf = btrace_conf (btinfo);
1040
1041 /* We have trace so we must have a configuration. */
1042 gdb_assert (conf != NULL);
1043
1044 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1045
1046 continue;
1047 }
1048
1049 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1050 {
1051 int level = bfun->level + btinfo->level, i;
1052
1053 for (i = 0; i < level; ++i)
1054 uiout->text (" ");
1055 }
1056
1057 if (sym != NULL)
1058 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
1059 else if (msym != NULL)
1060 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1061 else if (!uiout->is_mi_like_p ())
1062 uiout->field_string ("function", "??");
1063
1064 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1065 {
1066 uiout->text (_("\tinst "));
1067 btrace_call_history_insn_range (uiout, bfun);
1068 }
1069
1070 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1071 {
1072 uiout->text (_("\tat "));
1073 btrace_call_history_src_line (uiout, bfun);
1074 }
1075
1076 uiout->text ("\n");
1077 }
1078 }
1079
1080 /* The to_call_history method of target record-btrace. */
1081
1082 static void
1083 record_btrace_call_history (struct target_ops *self, int size, int int_flags)
1084 {
1085 struct btrace_thread_info *btinfo;
1086 struct btrace_call_history *history;
1087 struct btrace_call_iterator begin, end;
1088 struct cleanup *uiout_cleanup;
1089 struct ui_out *uiout;
1090 unsigned int context, covered;
1091 record_print_flags flags = (enum record_print_flag) int_flags;
1092
1093 uiout = current_uiout;
1094 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1095 "insn history");
1096 context = abs (size);
1097 if (context == 0)
1098 error (_("Bad record function-call-history-size."));
1099
1100 btinfo = require_btrace ();
1101 history = btinfo->call_history;
1102 if (history == NULL)
1103 {
1104 struct btrace_insn_iterator *replay;
1105
1106 DEBUG ("call-history (0x%x): %d", int_flags, size);
1107
1108 /* If we're replaying, we start at the replay position. Otherwise, we
1109 start at the tail of the trace. */
1110 replay = btinfo->replay;
1111 if (replay != NULL)
1112 {
1113 begin.function = replay->function;
1114 begin.btinfo = btinfo;
1115 }
1116 else
1117 btrace_call_end (&begin, btinfo);
1118
1119 /* We start from here and expand in the requested direction. Then we
1120 expand in the other direction, as well, to fill up any remaining
1121 context. */
1122 end = begin;
1123 if (size < 0)
1124 {
1125 /* We want the current position covered, as well. */
1126 covered = btrace_call_next (&end, 1);
1127 covered += btrace_call_prev (&begin, context - covered);
1128 covered += btrace_call_next (&end, context - covered);
1129 }
1130 else
1131 {
1132 covered = btrace_call_next (&end, context);
1133 covered += btrace_call_prev (&begin, context- covered);
1134 }
1135 }
1136 else
1137 {
1138 begin = history->begin;
1139 end = history->end;
1140
1141 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
1142 btrace_call_number (&begin), btrace_call_number (&end));
1143
1144 if (size < 0)
1145 {
1146 end = begin;
1147 covered = btrace_call_prev (&begin, context);
1148 }
1149 else
1150 {
1151 begin = end;
1152 covered = btrace_call_next (&end, context);
1153 }
1154 }
1155
1156 if (covered > 0)
1157 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1158 else
1159 {
1160 if (size < 0)
1161 printf_unfiltered (_("At the start of the branch trace record.\n"));
1162 else
1163 printf_unfiltered (_("At the end of the branch trace record.\n"));
1164 }
1165
1166 btrace_set_call_history (btinfo, &begin, &end);
1167 do_cleanups (uiout_cleanup);
1168 }
1169
1170 /* The to_call_history_range method of target record-btrace. */
1171
1172 static void
1173 record_btrace_call_history_range (struct target_ops *self,
1174 ULONGEST from, ULONGEST to,
1175 int int_flags)
1176 {
1177 struct btrace_thread_info *btinfo;
1178 struct btrace_call_history *history;
1179 struct btrace_call_iterator begin, end;
1180 struct cleanup *uiout_cleanup;
1181 struct ui_out *uiout;
1182 unsigned int low, high;
1183 int found;
1184 record_print_flags flags = (enum record_print_flag) int_flags;
1185
1186 uiout = current_uiout;
1187 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1188 "func history");
1189 low = from;
1190 high = to;
1191
1192 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
1193
1194 /* Check for wrap-arounds. */
1195 if (low != from || high != to)
1196 error (_("Bad range."));
1197
1198 if (high < low)
1199 error (_("Bad range."));
1200
1201 btinfo = require_btrace ();
1202
1203 found = btrace_find_call_by_number (&begin, btinfo, low);
1204 if (found == 0)
1205 error (_("Range out of bounds."));
1206
1207 found = btrace_find_call_by_number (&end, btinfo, high);
1208 if (found == 0)
1209 {
1210 /* Silently truncate the range. */
1211 btrace_call_end (&end, btinfo);
1212 }
1213 else
1214 {
1215 /* We want both begin and end to be inclusive. */
1216 btrace_call_next (&end, 1);
1217 }
1218
1219 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1220 btrace_set_call_history (btinfo, &begin, &end);
1221
1222 do_cleanups (uiout_cleanup);
1223 }
1224
1225 /* The to_call_history_from method of target record-btrace. */
1226
1227 static void
1228 record_btrace_call_history_from (struct target_ops *self,
1229 ULONGEST from, int size,
1230 int int_flags)
1231 {
1232 ULONGEST begin, end, context;
1233 record_print_flags flags = (enum record_print_flag) int_flags;
1234
1235 context = abs (size);
1236 if (context == 0)
1237 error (_("Bad record function-call-history-size."));
1238
1239 if (size < 0)
1240 {
1241 end = from;
1242
1243 if (from < context)
1244 begin = 0;
1245 else
1246 begin = from - context + 1;
1247 }
1248 else
1249 {
1250 begin = from;
1251 end = from + context - 1;
1252
1253 /* Check for wrap-around. */
1254 if (end < begin)
1255 end = ULONGEST_MAX;
1256 }
1257
1258 record_btrace_call_history_range (self, begin, end, flags);
1259 }
1260
1261 /* The to_record_is_replaying method of target record-btrace. */
1262
1263 static int
1264 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1265 {
1266 struct thread_info *tp;
1267
1268 ALL_NON_EXITED_THREADS (tp)
1269 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1270 return 1;
1271
1272 return 0;
1273 }
1274
1275 /* The to_record_will_replay method of target record-btrace. */
1276
1277 static int
1278 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1279 {
1280 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1281 }
1282
1283 /* The to_xfer_partial method of target record-btrace. */
1284
1285 static enum target_xfer_status
1286 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1287 const char *annex, gdb_byte *readbuf,
1288 const gdb_byte *writebuf, ULONGEST offset,
1289 ULONGEST len, ULONGEST *xfered_len)
1290 {
1291 struct target_ops *t;
1292
1293 /* Filter out requests that don't make sense during replay. */
1294 if (replay_memory_access == replay_memory_access_read_only
1295 && !record_btrace_generating_corefile
1296 && record_btrace_is_replaying (ops, inferior_ptid))
1297 {
1298 switch (object)
1299 {
1300 case TARGET_OBJECT_MEMORY:
1301 {
1302 struct target_section *section;
1303
1304 /* We do not allow writing memory in general. */
1305 if (writebuf != NULL)
1306 {
1307 *xfered_len = len;
1308 return TARGET_XFER_UNAVAILABLE;
1309 }
1310
1311 /* We allow reading readonly memory. */
1312 section = target_section_by_addr (ops, offset);
1313 if (section != NULL)
1314 {
1315 /* Check if the section we found is readonly. */
1316 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1317 section->the_bfd_section)
1318 & SEC_READONLY) != 0)
1319 {
1320 /* Truncate the request to fit into this section. */
1321 len = std::min (len, section->endaddr - offset);
1322 break;
1323 }
1324 }
1325
1326 *xfered_len = len;
1327 return TARGET_XFER_UNAVAILABLE;
1328 }
1329 }
1330 }
1331
1332 /* Forward the request. */
1333 ops = ops->beneath;
1334 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1335 offset, len, xfered_len);
1336 }
1337
1338 /* The to_insert_breakpoint method of target record-btrace. */
1339
1340 static int
1341 record_btrace_insert_breakpoint (struct target_ops *ops,
1342 struct gdbarch *gdbarch,
1343 struct bp_target_info *bp_tgt)
1344 {
1345 const char *old;
1346 int ret;
1347
1348 /* Inserting breakpoints requires accessing memory. Allow it for the
1349 duration of this function. */
1350 old = replay_memory_access;
1351 replay_memory_access = replay_memory_access_read_write;
1352
1353 ret = 0;
1354 TRY
1355 {
1356 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1357 }
1358 CATCH (except, RETURN_MASK_ALL)
1359 {
1360 replay_memory_access = old;
1361 throw_exception (except);
1362 }
1363 END_CATCH
1364 replay_memory_access = old;
1365
1366 return ret;
1367 }
1368
1369 /* The to_remove_breakpoint method of target record-btrace. */
1370
1371 static int
1372 record_btrace_remove_breakpoint (struct target_ops *ops,
1373 struct gdbarch *gdbarch,
1374 struct bp_target_info *bp_tgt,
1375 enum remove_bp_reason reason)
1376 {
1377 const char *old;
1378 int ret;
1379
1380 /* Removing breakpoints requires accessing memory. Allow it for the
1381 duration of this function. */
1382 old = replay_memory_access;
1383 replay_memory_access = replay_memory_access_read_write;
1384
1385 ret = 0;
1386 TRY
1387 {
1388 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1389 reason);
1390 }
1391 CATCH (except, RETURN_MASK_ALL)
1392 {
1393 replay_memory_access = old;
1394 throw_exception (except);
1395 }
1396 END_CATCH
1397 replay_memory_access = old;
1398
1399 return ret;
1400 }
1401
1402 /* The to_fetch_registers method of target record-btrace. */
1403
1404 static void
1405 record_btrace_fetch_registers (struct target_ops *ops,
1406 struct regcache *regcache, int regno)
1407 {
1408 struct btrace_insn_iterator *replay;
1409 struct thread_info *tp;
1410
1411 tp = find_thread_ptid (inferior_ptid);
1412 gdb_assert (tp != NULL);
1413
1414 replay = tp->btrace.replay;
1415 if (replay != NULL && !record_btrace_generating_corefile)
1416 {
1417 const struct btrace_insn *insn;
1418 struct gdbarch *gdbarch;
1419 int pcreg;
1420
1421 gdbarch = get_regcache_arch (regcache);
1422 pcreg = gdbarch_pc_regnum (gdbarch);
1423 if (pcreg < 0)
1424 return;
1425
1426 /* We can only provide the PC register. */
1427 if (regno >= 0 && regno != pcreg)
1428 return;
1429
1430 insn = btrace_insn_get (replay);
1431 gdb_assert (insn != NULL);
1432
1433 regcache_raw_supply (regcache, regno, &insn->pc);
1434 }
1435 else
1436 {
1437 struct target_ops *t = ops->beneath;
1438
1439 t->to_fetch_registers (t, regcache, regno);
1440 }
1441 }
1442
1443 /* The to_store_registers method of target record-btrace. */
1444
1445 static void
1446 record_btrace_store_registers (struct target_ops *ops,
1447 struct regcache *regcache, int regno)
1448 {
1449 struct target_ops *t;
1450
1451 if (!record_btrace_generating_corefile
1452 && record_btrace_is_replaying (ops, inferior_ptid))
1453 error (_("Cannot write registers while replaying."));
1454
1455 gdb_assert (may_write_registers != 0);
1456
1457 t = ops->beneath;
1458 t->to_store_registers (t, regcache, regno);
1459 }
1460
1461 /* The to_prepare_to_store method of target record-btrace. */
1462
1463 static void
1464 record_btrace_prepare_to_store (struct target_ops *ops,
1465 struct regcache *regcache)
1466 {
1467 struct target_ops *t;
1468
1469 if (!record_btrace_generating_corefile
1470 && record_btrace_is_replaying (ops, inferior_ptid))
1471 return;
1472
1473 t = ops->beneath;
1474 t->to_prepare_to_store (t, regcache);
1475 }
1476
1477 /* The branch trace frame cache. */
1478
1479 struct btrace_frame_cache
1480 {
1481 /* The thread. */
1482 struct thread_info *tp;
1483
1484 /* The frame info. */
1485 struct frame_info *frame;
1486
1487 /* The branch trace function segment. */
1488 const struct btrace_function *bfun;
1489 };
1490
1491 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1492
1493 static htab_t bfcache;
1494
1495 /* hash_f for htab_create_alloc of bfcache. */
1496
1497 static hashval_t
1498 bfcache_hash (const void *arg)
1499 {
1500 const struct btrace_frame_cache *cache
1501 = (const struct btrace_frame_cache *) arg;
1502
1503 return htab_hash_pointer (cache->frame);
1504 }
1505
1506 /* eq_f for htab_create_alloc of bfcache. */
1507
1508 static int
1509 bfcache_eq (const void *arg1, const void *arg2)
1510 {
1511 const struct btrace_frame_cache *cache1
1512 = (const struct btrace_frame_cache *) arg1;
1513 const struct btrace_frame_cache *cache2
1514 = (const struct btrace_frame_cache *) arg2;
1515
1516 return cache1->frame == cache2->frame;
1517 }
1518
1519 /* Create a new btrace frame cache. */
1520
1521 static struct btrace_frame_cache *
1522 bfcache_new (struct frame_info *frame)
1523 {
1524 struct btrace_frame_cache *cache;
1525 void **slot;
1526
1527 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1528 cache->frame = frame;
1529
1530 slot = htab_find_slot (bfcache, cache, INSERT);
1531 gdb_assert (*slot == NULL);
1532 *slot = cache;
1533
1534 return cache;
1535 }
1536
1537 /* Extract the branch trace function from a branch trace frame. */
1538
1539 static const struct btrace_function *
1540 btrace_get_frame_function (struct frame_info *frame)
1541 {
1542 const struct btrace_frame_cache *cache;
1543 const struct btrace_function *bfun;
1544 struct btrace_frame_cache pattern;
1545 void **slot;
1546
1547 pattern.frame = frame;
1548
1549 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1550 if (slot == NULL)
1551 return NULL;
1552
1553 cache = (const struct btrace_frame_cache *) *slot;
1554 return cache->bfun;
1555 }
1556
1557 /* Implement stop_reason method for record_btrace_frame_unwind. */
1558
1559 static enum unwind_stop_reason
1560 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1561 void **this_cache)
1562 {
1563 const struct btrace_frame_cache *cache;
1564 const struct btrace_function *bfun;
1565
1566 cache = (const struct btrace_frame_cache *) *this_cache;
1567 bfun = cache->bfun;
1568 gdb_assert (bfun != NULL);
1569
1570 if (bfun->up == NULL)
1571 return UNWIND_UNAVAILABLE;
1572
1573 return UNWIND_NO_REASON;
1574 }
1575
1576 /* Implement this_id method for record_btrace_frame_unwind. */
1577
1578 static void
1579 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1580 struct frame_id *this_id)
1581 {
1582 const struct btrace_frame_cache *cache;
1583 const struct btrace_function *bfun;
1584 CORE_ADDR code, special;
1585
1586 cache = (const struct btrace_frame_cache *) *this_cache;
1587
1588 bfun = cache->bfun;
1589 gdb_assert (bfun != NULL);
1590
1591 while (bfun->segment.prev != NULL)
1592 bfun = bfun->segment.prev;
1593
1594 code = get_frame_func (this_frame);
1595 special = bfun->number;
1596
1597 *this_id = frame_id_build_unavailable_stack_special (code, special);
1598
1599 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1600 btrace_get_bfun_name (cache->bfun),
1601 core_addr_to_string_nz (this_id->code_addr),
1602 core_addr_to_string_nz (this_id->special_addr));
1603 }
1604
1605 /* Implement prev_register method for record_btrace_frame_unwind. */
1606
1607 static struct value *
1608 record_btrace_frame_prev_register (struct frame_info *this_frame,
1609 void **this_cache,
1610 int regnum)
1611 {
1612 const struct btrace_frame_cache *cache;
1613 const struct btrace_function *bfun, *caller;
1614 const struct btrace_insn *insn;
1615 struct gdbarch *gdbarch;
1616 CORE_ADDR pc;
1617 int pcreg;
1618
1619 gdbarch = get_frame_arch (this_frame);
1620 pcreg = gdbarch_pc_regnum (gdbarch);
1621 if (pcreg < 0 || regnum != pcreg)
1622 throw_error (NOT_AVAILABLE_ERROR,
1623 _("Registers are not available in btrace record history"));
1624
1625 cache = (const struct btrace_frame_cache *) *this_cache;
1626 bfun = cache->bfun;
1627 gdb_assert (bfun != NULL);
1628
1629 caller = bfun->up;
1630 if (caller == NULL)
1631 throw_error (NOT_AVAILABLE_ERROR,
1632 _("No caller in btrace record history"));
1633
1634 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1635 {
1636 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1637 pc = insn->pc;
1638 }
1639 else
1640 {
1641 insn = VEC_last (btrace_insn_s, caller->insn);
1642 pc = insn->pc;
1643
1644 pc += gdb_insn_length (gdbarch, pc);
1645 }
1646
1647 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1648 btrace_get_bfun_name (bfun), bfun->level,
1649 core_addr_to_string_nz (pc));
1650
1651 return frame_unwind_got_address (this_frame, regnum, pc);
1652 }
1653
1654 /* Implement sniffer method for record_btrace_frame_unwind. */
1655
1656 static int
1657 record_btrace_frame_sniffer (const struct frame_unwind *self,
1658 struct frame_info *this_frame,
1659 void **this_cache)
1660 {
1661 const struct btrace_function *bfun;
1662 struct btrace_frame_cache *cache;
1663 struct thread_info *tp;
1664 struct frame_info *next;
1665
1666 /* THIS_FRAME does not contain a reference to its thread. */
1667 tp = find_thread_ptid (inferior_ptid);
1668 gdb_assert (tp != NULL);
1669
1670 bfun = NULL;
1671 next = get_next_frame (this_frame);
1672 if (next == NULL)
1673 {
1674 const struct btrace_insn_iterator *replay;
1675
1676 replay = tp->btrace.replay;
1677 if (replay != NULL)
1678 bfun = replay->function;
1679 }
1680 else
1681 {
1682 const struct btrace_function *callee;
1683
1684 callee = btrace_get_frame_function (next);
1685 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1686 bfun = callee->up;
1687 }
1688
1689 if (bfun == NULL)
1690 return 0;
1691
1692 DEBUG ("[frame] sniffed frame for %s on level %d",
1693 btrace_get_bfun_name (bfun), bfun->level);
1694
1695 /* This is our frame. Initialize the frame cache. */
1696 cache = bfcache_new (this_frame);
1697 cache->tp = tp;
1698 cache->bfun = bfun;
1699
1700 *this_cache = cache;
1701 return 1;
1702 }
1703
1704 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1705
1706 static int
1707 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1708 struct frame_info *this_frame,
1709 void **this_cache)
1710 {
1711 const struct btrace_function *bfun, *callee;
1712 struct btrace_frame_cache *cache;
1713 struct frame_info *next;
1714
1715 next = get_next_frame (this_frame);
1716 if (next == NULL)
1717 return 0;
1718
1719 callee = btrace_get_frame_function (next);
1720 if (callee == NULL)
1721 return 0;
1722
1723 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1724 return 0;
1725
1726 bfun = callee->up;
1727 if (bfun == NULL)
1728 return 0;
1729
1730 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1731 btrace_get_bfun_name (bfun), bfun->level);
1732
1733 /* This is our frame. Initialize the frame cache. */
1734 cache = bfcache_new (this_frame);
1735 cache->tp = find_thread_ptid (inferior_ptid);
1736 cache->bfun = bfun;
1737
1738 *this_cache = cache;
1739 return 1;
1740 }
1741
1742 static void
1743 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1744 {
1745 struct btrace_frame_cache *cache;
1746 void **slot;
1747
1748 cache = (struct btrace_frame_cache *) this_cache;
1749
1750 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1751 gdb_assert (slot != NULL);
1752
1753 htab_remove_elt (bfcache, cache);
1754 }
1755
1756 /* btrace recording does not store previous memory content, neither the stack
1757 frames content. Any unwinding would return errorneous results as the stack
1758 contents no longer matches the changed PC value restored from history.
1759 Therefore this unwinder reports any possibly unwound registers as
1760 <unavailable>. */
1761
1762 const struct frame_unwind record_btrace_frame_unwind =
1763 {
1764 NORMAL_FRAME,
1765 record_btrace_frame_unwind_stop_reason,
1766 record_btrace_frame_this_id,
1767 record_btrace_frame_prev_register,
1768 NULL,
1769 record_btrace_frame_sniffer,
1770 record_btrace_frame_dealloc_cache
1771 };
1772
1773 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1774 {
1775 TAILCALL_FRAME,
1776 record_btrace_frame_unwind_stop_reason,
1777 record_btrace_frame_this_id,
1778 record_btrace_frame_prev_register,
1779 NULL,
1780 record_btrace_tailcall_frame_sniffer,
1781 record_btrace_frame_dealloc_cache
1782 };
1783
1784 /* Implement the to_get_unwinder method. */
1785
1786 static const struct frame_unwind *
1787 record_btrace_to_get_unwinder (struct target_ops *self)
1788 {
1789 return &record_btrace_frame_unwind;
1790 }
1791
1792 /* Implement the to_get_tailcall_unwinder method. */
1793
1794 static const struct frame_unwind *
1795 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1796 {
1797 return &record_btrace_tailcall_frame_unwind;
1798 }
1799
1800 /* Return a human-readable string for FLAG. */
1801
1802 static const char *
1803 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1804 {
1805 switch (flag)
1806 {
1807 case BTHR_STEP:
1808 return "step";
1809
1810 case BTHR_RSTEP:
1811 return "reverse-step";
1812
1813 case BTHR_CONT:
1814 return "cont";
1815
1816 case BTHR_RCONT:
1817 return "reverse-cont";
1818
1819 case BTHR_STOP:
1820 return "stop";
1821 }
1822
1823 return "<invalid>";
1824 }
1825
1826 /* Indicate that TP should be resumed according to FLAG. */
1827
1828 static void
1829 record_btrace_resume_thread (struct thread_info *tp,
1830 enum btrace_thread_flag flag)
1831 {
1832 struct btrace_thread_info *btinfo;
1833
1834 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1835 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1836
1837 btinfo = &tp->btrace;
1838
1839 /* Fetch the latest branch trace. */
1840 btrace_fetch (tp);
1841
1842 /* A resume request overwrites a preceding resume or stop request. */
1843 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1844 btinfo->flags |= flag;
1845 }
1846
1847 /* Get the current frame for TP. */
1848
1849 static struct frame_info *
1850 get_thread_current_frame (struct thread_info *tp)
1851 {
1852 struct frame_info *frame;
1853 ptid_t old_inferior_ptid;
1854 int executing;
1855
1856 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1857 old_inferior_ptid = inferior_ptid;
1858 inferior_ptid = tp->ptid;
1859
1860 /* Clear the executing flag to allow changes to the current frame.
1861 We are not actually running, yet. We just started a reverse execution
1862 command or a record goto command.
1863 For the latter, EXECUTING is false and this has no effect.
1864 For the former, EXECUTING is true and we're in to_wait, about to
1865 move the thread. Since we need to recompute the stack, we temporarily
1866 set EXECUTING to flase. */
1867 executing = is_executing (inferior_ptid);
1868 set_executing (inferior_ptid, 0);
1869
1870 frame = NULL;
1871 TRY
1872 {
1873 frame = get_current_frame ();
1874 }
1875 CATCH (except, RETURN_MASK_ALL)
1876 {
1877 /* Restore the previous execution state. */
1878 set_executing (inferior_ptid, executing);
1879
1880 /* Restore the previous inferior_ptid. */
1881 inferior_ptid = old_inferior_ptid;
1882
1883 throw_exception (except);
1884 }
1885 END_CATCH
1886
1887 /* Restore the previous execution state. */
1888 set_executing (inferior_ptid, executing);
1889
1890 /* Restore the previous inferior_ptid. */
1891 inferior_ptid = old_inferior_ptid;
1892
1893 return frame;
1894 }
1895
1896 /* Start replaying a thread. */
1897
1898 static struct btrace_insn_iterator *
1899 record_btrace_start_replaying (struct thread_info *tp)
1900 {
1901 struct btrace_insn_iterator *replay;
1902 struct btrace_thread_info *btinfo;
1903
1904 btinfo = &tp->btrace;
1905 replay = NULL;
1906
1907 /* We can't start replaying without trace. */
1908 if (btinfo->begin == NULL)
1909 return NULL;
1910
1911 /* GDB stores the current frame_id when stepping in order to detects steps
1912 into subroutines.
1913 Since frames are computed differently when we're replaying, we need to
1914 recompute those stored frames and fix them up so we can still detect
1915 subroutines after we started replaying. */
1916 TRY
1917 {
1918 struct frame_info *frame;
1919 struct frame_id frame_id;
1920 int upd_step_frame_id, upd_step_stack_frame_id;
1921
1922 /* The current frame without replaying - computed via normal unwind. */
1923 frame = get_thread_current_frame (tp);
1924 frame_id = get_frame_id (frame);
1925
1926 /* Check if we need to update any stepping-related frame id's. */
1927 upd_step_frame_id = frame_id_eq (frame_id,
1928 tp->control.step_frame_id);
1929 upd_step_stack_frame_id = frame_id_eq (frame_id,
1930 tp->control.step_stack_frame_id);
1931
1932 /* We start replaying at the end of the branch trace. This corresponds
1933 to the current instruction. */
1934 replay = XNEW (struct btrace_insn_iterator);
1935 btrace_insn_end (replay, btinfo);
1936
1937 /* Skip gaps at the end of the trace. */
1938 while (btrace_insn_get (replay) == NULL)
1939 {
1940 unsigned int steps;
1941
1942 steps = btrace_insn_prev (replay, 1);
1943 if (steps == 0)
1944 error (_("No trace."));
1945 }
1946
1947 /* We're not replaying, yet. */
1948 gdb_assert (btinfo->replay == NULL);
1949 btinfo->replay = replay;
1950
1951 /* Make sure we're not using any stale registers. */
1952 registers_changed_ptid (tp->ptid);
1953
1954 /* The current frame with replaying - computed via btrace unwind. */
1955 frame = get_thread_current_frame (tp);
1956 frame_id = get_frame_id (frame);
1957
1958 /* Replace stepping related frames where necessary. */
1959 if (upd_step_frame_id)
1960 tp->control.step_frame_id = frame_id;
1961 if (upd_step_stack_frame_id)
1962 tp->control.step_stack_frame_id = frame_id;
1963 }
1964 CATCH (except, RETURN_MASK_ALL)
1965 {
1966 xfree (btinfo->replay);
1967 btinfo->replay = NULL;
1968
1969 registers_changed_ptid (tp->ptid);
1970
1971 throw_exception (except);
1972 }
1973 END_CATCH
1974
1975 return replay;
1976 }
1977
1978 /* Stop replaying a thread. */
1979
1980 static void
1981 record_btrace_stop_replaying (struct thread_info *tp)
1982 {
1983 struct btrace_thread_info *btinfo;
1984
1985 btinfo = &tp->btrace;
1986
1987 xfree (btinfo->replay);
1988 btinfo->replay = NULL;
1989
1990 /* Make sure we're not leaving any stale registers. */
1991 registers_changed_ptid (tp->ptid);
1992 }
1993
1994 /* Stop replaying TP if it is at the end of its execution history. */
1995
1996 static void
1997 record_btrace_stop_replaying_at_end (struct thread_info *tp)
1998 {
1999 struct btrace_insn_iterator *replay, end;
2000 struct btrace_thread_info *btinfo;
2001
2002 btinfo = &tp->btrace;
2003 replay = btinfo->replay;
2004
2005 if (replay == NULL)
2006 return;
2007
2008 btrace_insn_end (&end, btinfo);
2009
2010 if (btrace_insn_cmp (replay, &end) == 0)
2011 record_btrace_stop_replaying (tp);
2012 }
2013
2014 /* The to_resume method of target record-btrace. */
2015
2016 static void
2017 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2018 enum gdb_signal signal)
2019 {
2020 struct thread_info *tp;
2021 enum btrace_thread_flag flag, cflag;
2022
2023 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2024 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2025 step ? "step" : "cont");
2026
2027 /* Store the execution direction of the last resume.
2028
2029 If there is more than one to_resume call, we have to rely on infrun
2030 to not change the execution direction in-between. */
2031 record_btrace_resume_exec_dir = execution_direction;
2032
2033 /* As long as we're not replaying, just forward the request.
2034
2035 For non-stop targets this means that no thread is replaying. In order to
2036 make progress, we may need to explicitly move replaying threads to the end
2037 of their execution history. */
2038 if ((execution_direction != EXEC_REVERSE)
2039 && !record_btrace_is_replaying (ops, minus_one_ptid))
2040 {
2041 ops = ops->beneath;
2042 ops->to_resume (ops, ptid, step, signal);
2043 return;
2044 }
2045
2046 /* Compute the btrace thread flag for the requested move. */
2047 if (execution_direction == EXEC_REVERSE)
2048 {
2049 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2050 cflag = BTHR_RCONT;
2051 }
2052 else
2053 {
2054 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2055 cflag = BTHR_CONT;
2056 }
2057
2058 /* We just indicate the resume intent here. The actual stepping happens in
2059 record_btrace_wait below.
2060
2061 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2062 if (!target_is_non_stop_p ())
2063 {
2064 gdb_assert (ptid_match (inferior_ptid, ptid));
2065
2066 ALL_NON_EXITED_THREADS (tp)
2067 if (ptid_match (tp->ptid, ptid))
2068 {
2069 if (ptid_match (tp->ptid, inferior_ptid))
2070 record_btrace_resume_thread (tp, flag);
2071 else
2072 record_btrace_resume_thread (tp, cflag);
2073 }
2074 }
2075 else
2076 {
2077 ALL_NON_EXITED_THREADS (tp)
2078 if (ptid_match (tp->ptid, ptid))
2079 record_btrace_resume_thread (tp, flag);
2080 }
2081
2082 /* Async support. */
2083 if (target_can_async_p ())
2084 {
2085 target_async (1);
2086 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2087 }
2088 }
2089
2090 /* The to_commit_resume method of target record-btrace. */
2091
2092 static void
2093 record_btrace_commit_resume (struct target_ops *ops)
2094 {
2095 if ((execution_direction != EXEC_REVERSE)
2096 && !record_btrace_is_replaying (ops, minus_one_ptid))
2097 ops->beneath->to_commit_resume (ops->beneath);
2098 }
2099
2100 /* Cancel resuming TP. */
2101
2102 static void
2103 record_btrace_cancel_resume (struct thread_info *tp)
2104 {
2105 enum btrace_thread_flag flags;
2106
2107 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2108 if (flags == 0)
2109 return;
2110
2111 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2112 print_thread_id (tp),
2113 target_pid_to_str (tp->ptid), flags,
2114 btrace_thread_flag_to_str (flags));
2115
2116 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2117 record_btrace_stop_replaying_at_end (tp);
2118 }
2119
2120 /* Return a target_waitstatus indicating that we ran out of history. */
2121
2122 static struct target_waitstatus
2123 btrace_step_no_history (void)
2124 {
2125 struct target_waitstatus status;
2126
2127 status.kind = TARGET_WAITKIND_NO_HISTORY;
2128
2129 return status;
2130 }
2131
2132 /* Return a target_waitstatus indicating that a step finished. */
2133
2134 static struct target_waitstatus
2135 btrace_step_stopped (void)
2136 {
2137 struct target_waitstatus status;
2138
2139 status.kind = TARGET_WAITKIND_STOPPED;
2140 status.value.sig = GDB_SIGNAL_TRAP;
2141
2142 return status;
2143 }
2144
2145 /* Return a target_waitstatus indicating that a thread was stopped as
2146 requested. */
2147
2148 static struct target_waitstatus
2149 btrace_step_stopped_on_request (void)
2150 {
2151 struct target_waitstatus status;
2152
2153 status.kind = TARGET_WAITKIND_STOPPED;
2154 status.value.sig = GDB_SIGNAL_0;
2155
2156 return status;
2157 }
2158
2159 /* Return a target_waitstatus indicating a spurious stop. */
2160
2161 static struct target_waitstatus
2162 btrace_step_spurious (void)
2163 {
2164 struct target_waitstatus status;
2165
2166 status.kind = TARGET_WAITKIND_SPURIOUS;
2167
2168 return status;
2169 }
2170
2171 /* Return a target_waitstatus indicating that the thread was not resumed. */
2172
2173 static struct target_waitstatus
2174 btrace_step_no_resumed (void)
2175 {
2176 struct target_waitstatus status;
2177
2178 status.kind = TARGET_WAITKIND_NO_RESUMED;
2179
2180 return status;
2181 }
2182
2183 /* Return a target_waitstatus indicating that we should wait again. */
2184
2185 static struct target_waitstatus
2186 btrace_step_again (void)
2187 {
2188 struct target_waitstatus status;
2189
2190 status.kind = TARGET_WAITKIND_IGNORE;
2191
2192 return status;
2193 }
2194
2195 /* Clear the record histories. */
2196
2197 static void
2198 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2199 {
2200 xfree (btinfo->insn_history);
2201 xfree (btinfo->call_history);
2202
2203 btinfo->insn_history = NULL;
2204 btinfo->call_history = NULL;
2205 }
2206
2207 /* Check whether TP's current replay position is at a breakpoint. */
2208
2209 static int
2210 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2211 {
2212 struct btrace_insn_iterator *replay;
2213 struct btrace_thread_info *btinfo;
2214 const struct btrace_insn *insn;
2215 struct inferior *inf;
2216
2217 btinfo = &tp->btrace;
2218 replay = btinfo->replay;
2219
2220 if (replay == NULL)
2221 return 0;
2222
2223 insn = btrace_insn_get (replay);
2224 if (insn == NULL)
2225 return 0;
2226
2227 inf = find_inferior_ptid (tp->ptid);
2228 if (inf == NULL)
2229 return 0;
2230
2231 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2232 &btinfo->stop_reason);
2233 }
2234
2235 /* Step one instruction in forward direction. */
2236
2237 static struct target_waitstatus
2238 record_btrace_single_step_forward (struct thread_info *tp)
2239 {
2240 struct btrace_insn_iterator *replay, end, start;
2241 struct btrace_thread_info *btinfo;
2242
2243 btinfo = &tp->btrace;
2244 replay = btinfo->replay;
2245
2246 /* We're done if we're not replaying. */
2247 if (replay == NULL)
2248 return btrace_step_no_history ();
2249
2250 /* Check if we're stepping a breakpoint. */
2251 if (record_btrace_replay_at_breakpoint (tp))
2252 return btrace_step_stopped ();
2253
2254 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2255 jump back to the instruction at which we started. */
2256 start = *replay;
2257 do
2258 {
2259 unsigned int steps;
2260
2261 /* We will bail out here if we continue stepping after reaching the end
2262 of the execution history. */
2263 steps = btrace_insn_next (replay, 1);
2264 if (steps == 0)
2265 {
2266 *replay = start;
2267 return btrace_step_no_history ();
2268 }
2269 }
2270 while (btrace_insn_get (replay) == NULL);
2271
2272 /* Determine the end of the instruction trace. */
2273 btrace_insn_end (&end, btinfo);
2274
2275 /* The execution trace contains (and ends with) the current instruction.
2276 This instruction has not been executed, yet, so the trace really ends
2277 one instruction earlier. */
2278 if (btrace_insn_cmp (replay, &end) == 0)
2279 return btrace_step_no_history ();
2280
2281 return btrace_step_spurious ();
2282 }
2283
2284 /* Step one instruction in backward direction. */
2285
2286 static struct target_waitstatus
2287 record_btrace_single_step_backward (struct thread_info *tp)
2288 {
2289 struct btrace_insn_iterator *replay, start;
2290 struct btrace_thread_info *btinfo;
2291
2292 btinfo = &tp->btrace;
2293 replay = btinfo->replay;
2294
2295 /* Start replaying if we're not already doing so. */
2296 if (replay == NULL)
2297 replay = record_btrace_start_replaying (tp);
2298
2299 /* If we can't step any further, we reached the end of the history.
2300 Skip gaps during replay. If we end up at a gap (at the beginning of
2301 the trace), jump back to the instruction at which we started. */
2302 start = *replay;
2303 do
2304 {
2305 unsigned int steps;
2306
2307 steps = btrace_insn_prev (replay, 1);
2308 if (steps == 0)
2309 {
2310 *replay = start;
2311 return btrace_step_no_history ();
2312 }
2313 }
2314 while (btrace_insn_get (replay) == NULL);
2315
2316 /* Check if we're stepping a breakpoint.
2317
2318 For reverse-stepping, this check is after the step. There is logic in
2319 infrun.c that handles reverse-stepping separately. See, for example,
2320 proceed and adjust_pc_after_break.
2321
2322 This code assumes that for reverse-stepping, PC points to the last
2323 de-executed instruction, whereas for forward-stepping PC points to the
2324 next to-be-executed instruction. */
2325 if (record_btrace_replay_at_breakpoint (tp))
2326 return btrace_step_stopped ();
2327
2328 return btrace_step_spurious ();
2329 }
2330
2331 /* Step a single thread. */
2332
2333 static struct target_waitstatus
2334 record_btrace_step_thread (struct thread_info *tp)
2335 {
2336 struct btrace_thread_info *btinfo;
2337 struct target_waitstatus status;
2338 enum btrace_thread_flag flags;
2339
2340 btinfo = &tp->btrace;
2341
2342 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2343 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2344
2345 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2346 target_pid_to_str (tp->ptid), flags,
2347 btrace_thread_flag_to_str (flags));
2348
2349 /* We can't step without an execution history. */
2350 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2351 return btrace_step_no_history ();
2352
2353 switch (flags)
2354 {
2355 default:
2356 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2357
2358 case BTHR_STOP:
2359 return btrace_step_stopped_on_request ();
2360
2361 case BTHR_STEP:
2362 status = record_btrace_single_step_forward (tp);
2363 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2364 break;
2365
2366 return btrace_step_stopped ();
2367
2368 case BTHR_RSTEP:
2369 status = record_btrace_single_step_backward (tp);
2370 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2371 break;
2372
2373 return btrace_step_stopped ();
2374
2375 case BTHR_CONT:
2376 status = record_btrace_single_step_forward (tp);
2377 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2378 break;
2379
2380 btinfo->flags |= flags;
2381 return btrace_step_again ();
2382
2383 case BTHR_RCONT:
2384 status = record_btrace_single_step_backward (tp);
2385 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2386 break;
2387
2388 btinfo->flags |= flags;
2389 return btrace_step_again ();
2390 }
2391
2392 /* We keep threads moving at the end of their execution history. The to_wait
2393 method will stop the thread for whom the event is reported. */
2394 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2395 btinfo->flags |= flags;
2396
2397 return status;
2398 }
2399
2400 /* A vector of threads. */
2401
2402 typedef struct thread_info * tp_t;
2403 DEF_VEC_P (tp_t);
2404
2405 /* Announce further events if necessary. */
2406
2407 static void
2408 record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2409 const VEC (tp_t) *no_history)
2410 {
2411 int more_moving, more_no_history;
2412
2413 more_moving = !VEC_empty (tp_t, moving);
2414 more_no_history = !VEC_empty (tp_t, no_history);
2415
2416 if (!more_moving && !more_no_history)
2417 return;
2418
2419 if (more_moving)
2420 DEBUG ("movers pending");
2421
2422 if (more_no_history)
2423 DEBUG ("no-history pending");
2424
2425 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2426 }
2427
2428 /* The to_wait method of target record-btrace. */
2429
2430 static ptid_t
2431 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2432 struct target_waitstatus *status, int options)
2433 {
2434 VEC (tp_t) *moving, *no_history;
2435 struct thread_info *tp, *eventing;
2436 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2437
2438 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2439
2440 /* As long as we're not replaying, just forward the request. */
2441 if ((execution_direction != EXEC_REVERSE)
2442 && !record_btrace_is_replaying (ops, minus_one_ptid))
2443 {
2444 ops = ops->beneath;
2445 return ops->to_wait (ops, ptid, status, options);
2446 }
2447
2448 moving = NULL;
2449 no_history = NULL;
2450
2451 make_cleanup (VEC_cleanup (tp_t), &moving);
2452 make_cleanup (VEC_cleanup (tp_t), &no_history);
2453
2454 /* Keep a work list of moving threads. */
2455 ALL_NON_EXITED_THREADS (tp)
2456 if (ptid_match (tp->ptid, ptid)
2457 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2458 VEC_safe_push (tp_t, moving, tp);
2459
2460 if (VEC_empty (tp_t, moving))
2461 {
2462 *status = btrace_step_no_resumed ();
2463
2464 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2465 target_waitstatus_to_string (status));
2466
2467 do_cleanups (cleanups);
2468 return null_ptid;
2469 }
2470
2471 /* Step moving threads one by one, one step each, until either one thread
2472 reports an event or we run out of threads to step.
2473
2474 When stepping more than one thread, chances are that some threads reach
2475 the end of their execution history earlier than others. If we reported
2476 this immediately, all-stop on top of non-stop would stop all threads and
2477 resume the same threads next time. And we would report the same thread
2478 having reached the end of its execution history again.
2479
2480 In the worst case, this would starve the other threads. But even if other
2481 threads would be allowed to make progress, this would result in far too
2482 many intermediate stops.
2483
2484 We therefore delay the reporting of "no execution history" until we have
2485 nothing else to report. By this time, all threads should have moved to
2486 either the beginning or the end of their execution history. There will
2487 be a single user-visible stop. */
2488 eventing = NULL;
2489 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2490 {
2491 unsigned int ix;
2492
2493 ix = 0;
2494 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2495 {
2496 *status = record_btrace_step_thread (tp);
2497
2498 switch (status->kind)
2499 {
2500 case TARGET_WAITKIND_IGNORE:
2501 ix++;
2502 break;
2503
2504 case TARGET_WAITKIND_NO_HISTORY:
2505 VEC_safe_push (tp_t, no_history,
2506 VEC_ordered_remove (tp_t, moving, ix));
2507 break;
2508
2509 default:
2510 eventing = VEC_unordered_remove (tp_t, moving, ix);
2511 break;
2512 }
2513 }
2514 }
2515
2516 if (eventing == NULL)
2517 {
2518 /* We started with at least one moving thread. This thread must have
2519 either stopped or reached the end of its execution history.
2520
2521 In the former case, EVENTING must not be NULL.
2522 In the latter case, NO_HISTORY must not be empty. */
2523 gdb_assert (!VEC_empty (tp_t, no_history));
2524
2525 /* We kept threads moving at the end of their execution history. Stop
2526 EVENTING now that we are going to report its stop. */
2527 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2528 eventing->btrace.flags &= ~BTHR_MOVE;
2529
2530 *status = btrace_step_no_history ();
2531 }
2532
2533 gdb_assert (eventing != NULL);
2534
2535 /* We kept threads replaying at the end of their execution history. Stop
2536 replaying EVENTING now that we are going to report its stop. */
2537 record_btrace_stop_replaying_at_end (eventing);
2538
2539 /* Stop all other threads. */
2540 if (!target_is_non_stop_p ())
2541 ALL_NON_EXITED_THREADS (tp)
2542 record_btrace_cancel_resume (tp);
2543
2544 /* In async mode, we need to announce further events. */
2545 if (target_is_async_p ())
2546 record_btrace_maybe_mark_async_event (moving, no_history);
2547
2548 /* Start record histories anew from the current position. */
2549 record_btrace_clear_histories (&eventing->btrace);
2550
2551 /* We moved the replay position but did not update registers. */
2552 registers_changed_ptid (eventing->ptid);
2553
2554 DEBUG ("wait ended by thread %s (%s): %s",
2555 print_thread_id (eventing),
2556 target_pid_to_str (eventing->ptid),
2557 target_waitstatus_to_string (status));
2558
2559 do_cleanups (cleanups);
2560 return eventing->ptid;
2561 }
2562
2563 /* The to_stop method of target record-btrace. */
2564
2565 static void
2566 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2567 {
2568 DEBUG ("stop %s", target_pid_to_str (ptid));
2569
2570 /* As long as we're not replaying, just forward the request. */
2571 if ((execution_direction != EXEC_REVERSE)
2572 && !record_btrace_is_replaying (ops, minus_one_ptid))
2573 {
2574 ops = ops->beneath;
2575 ops->to_stop (ops, ptid);
2576 }
2577 else
2578 {
2579 struct thread_info *tp;
2580
2581 ALL_NON_EXITED_THREADS (tp)
2582 if (ptid_match (tp->ptid, ptid))
2583 {
2584 tp->btrace.flags &= ~BTHR_MOVE;
2585 tp->btrace.flags |= BTHR_STOP;
2586 }
2587 }
2588 }
2589
2590 /* The to_can_execute_reverse method of target record-btrace. */
2591
2592 static int
2593 record_btrace_can_execute_reverse (struct target_ops *self)
2594 {
2595 return 1;
2596 }
2597
2598 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2599
2600 static int
2601 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2602 {
2603 if (record_btrace_is_replaying (ops, minus_one_ptid))
2604 {
2605 struct thread_info *tp = inferior_thread ();
2606
2607 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2608 }
2609
2610 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2611 }
2612
2613 /* The to_supports_stopped_by_sw_breakpoint method of target
2614 record-btrace. */
2615
2616 static int
2617 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2618 {
2619 if (record_btrace_is_replaying (ops, minus_one_ptid))
2620 return 1;
2621
2622 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2623 }
2624
2625 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2626
2627 static int
2628 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2629 {
2630 if (record_btrace_is_replaying (ops, minus_one_ptid))
2631 {
2632 struct thread_info *tp = inferior_thread ();
2633
2634 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2635 }
2636
2637 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2638 }
2639
2640 /* The to_supports_stopped_by_hw_breakpoint method of target
2641 record-btrace. */
2642
2643 static int
2644 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2645 {
2646 if (record_btrace_is_replaying (ops, minus_one_ptid))
2647 return 1;
2648
2649 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2650 }
2651
2652 /* The to_update_thread_list method of target record-btrace. */
2653
2654 static void
2655 record_btrace_update_thread_list (struct target_ops *ops)
2656 {
2657 /* We don't add or remove threads during replay. */
2658 if (record_btrace_is_replaying (ops, minus_one_ptid))
2659 return;
2660
2661 /* Forward the request. */
2662 ops = ops->beneath;
2663 ops->to_update_thread_list (ops);
2664 }
2665
2666 /* The to_thread_alive method of target record-btrace. */
2667
2668 static int
2669 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2670 {
2671 /* We don't add or remove threads during replay. */
2672 if (record_btrace_is_replaying (ops, minus_one_ptid))
2673 return find_thread_ptid (ptid) != NULL;
2674
2675 /* Forward the request. */
2676 ops = ops->beneath;
2677 return ops->to_thread_alive (ops, ptid);
2678 }
2679
2680 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2681 is stopped. */
2682
2683 static void
2684 record_btrace_set_replay (struct thread_info *tp,
2685 const struct btrace_insn_iterator *it)
2686 {
2687 struct btrace_thread_info *btinfo;
2688
2689 btinfo = &tp->btrace;
2690
2691 if (it == NULL || it->function == NULL)
2692 record_btrace_stop_replaying (tp);
2693 else
2694 {
2695 if (btinfo->replay == NULL)
2696 record_btrace_start_replaying (tp);
2697 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2698 return;
2699
2700 *btinfo->replay = *it;
2701 registers_changed_ptid (tp->ptid);
2702 }
2703
2704 /* Start anew from the new replay position. */
2705 record_btrace_clear_histories (btinfo);
2706
2707 stop_pc = regcache_read_pc (get_current_regcache ());
2708 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2709 }
2710
2711 /* The to_goto_record_begin method of target record-btrace. */
2712
2713 static void
2714 record_btrace_goto_begin (struct target_ops *self)
2715 {
2716 struct thread_info *tp;
2717 struct btrace_insn_iterator begin;
2718
2719 tp = require_btrace_thread ();
2720
2721 btrace_insn_begin (&begin, &tp->btrace);
2722
2723 /* Skip gaps at the beginning of the trace. */
2724 while (btrace_insn_get (&begin) == NULL)
2725 {
2726 unsigned int steps;
2727
2728 steps = btrace_insn_next (&begin, 1);
2729 if (steps == 0)
2730 error (_("No trace."));
2731 }
2732
2733 record_btrace_set_replay (tp, &begin);
2734 }
2735
2736 /* The to_goto_record_end method of target record-btrace. */
2737
2738 static void
2739 record_btrace_goto_end (struct target_ops *ops)
2740 {
2741 struct thread_info *tp;
2742
2743 tp = require_btrace_thread ();
2744
2745 record_btrace_set_replay (tp, NULL);
2746 }
2747
2748 /* The to_goto_record method of target record-btrace. */
2749
2750 static void
2751 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2752 {
2753 struct thread_info *tp;
2754 struct btrace_insn_iterator it;
2755 unsigned int number;
2756 int found;
2757
2758 number = insn;
2759
2760 /* Check for wrap-arounds. */
2761 if (number != insn)
2762 error (_("Instruction number out of range."));
2763
2764 tp = require_btrace_thread ();
2765
2766 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2767
2768 /* Check if the instruction could not be found or is a gap. */
2769 if (found == 0 || btrace_insn_get (&it) == NULL)
2770 error (_("No such instruction."));
2771
2772 record_btrace_set_replay (tp, &it);
2773 }
2774
2775 /* The to_record_stop_replaying method of target record-btrace. */
2776
2777 static void
2778 record_btrace_stop_replaying_all (struct target_ops *self)
2779 {
2780 struct thread_info *tp;
2781
2782 ALL_NON_EXITED_THREADS (tp)
2783 record_btrace_stop_replaying (tp);
2784 }
2785
2786 /* The to_execution_direction target method. */
2787
2788 static enum exec_direction_kind
2789 record_btrace_execution_direction (struct target_ops *self)
2790 {
2791 return record_btrace_resume_exec_dir;
2792 }
2793
2794 /* The to_prepare_to_generate_core target method. */
2795
2796 static void
2797 record_btrace_prepare_to_generate_core (struct target_ops *self)
2798 {
2799 record_btrace_generating_corefile = 1;
2800 }
2801
2802 /* The to_done_generating_core target method. */
2803
2804 static void
2805 record_btrace_done_generating_core (struct target_ops *self)
2806 {
2807 record_btrace_generating_corefile = 0;
2808 }
2809
2810 /* Initialize the record-btrace target ops. */
2811
2812 static void
2813 init_record_btrace_ops (void)
2814 {
2815 struct target_ops *ops;
2816
2817 ops = &record_btrace_ops;
2818 ops->to_shortname = "record-btrace";
2819 ops->to_longname = "Branch tracing target";
2820 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2821 ops->to_open = record_btrace_open;
2822 ops->to_close = record_btrace_close;
2823 ops->to_async = record_btrace_async;
2824 ops->to_detach = record_detach;
2825 ops->to_disconnect = record_btrace_disconnect;
2826 ops->to_mourn_inferior = record_mourn_inferior;
2827 ops->to_kill = record_kill;
2828 ops->to_stop_recording = record_btrace_stop_recording;
2829 ops->to_info_record = record_btrace_info;
2830 ops->to_insn_history = record_btrace_insn_history;
2831 ops->to_insn_history_from = record_btrace_insn_history_from;
2832 ops->to_insn_history_range = record_btrace_insn_history_range;
2833 ops->to_call_history = record_btrace_call_history;
2834 ops->to_call_history_from = record_btrace_call_history_from;
2835 ops->to_call_history_range = record_btrace_call_history_range;
2836 ops->to_record_is_replaying = record_btrace_is_replaying;
2837 ops->to_record_will_replay = record_btrace_will_replay;
2838 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2839 ops->to_xfer_partial = record_btrace_xfer_partial;
2840 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2841 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2842 ops->to_fetch_registers = record_btrace_fetch_registers;
2843 ops->to_store_registers = record_btrace_store_registers;
2844 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2845 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2846 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2847 ops->to_resume = record_btrace_resume;
2848 ops->to_commit_resume = record_btrace_commit_resume;
2849 ops->to_wait = record_btrace_wait;
2850 ops->to_stop = record_btrace_stop;
2851 ops->to_update_thread_list = record_btrace_update_thread_list;
2852 ops->to_thread_alive = record_btrace_thread_alive;
2853 ops->to_goto_record_begin = record_btrace_goto_begin;
2854 ops->to_goto_record_end = record_btrace_goto_end;
2855 ops->to_goto_record = record_btrace_goto;
2856 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2857 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2858 ops->to_supports_stopped_by_sw_breakpoint
2859 = record_btrace_supports_stopped_by_sw_breakpoint;
2860 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2861 ops->to_supports_stopped_by_hw_breakpoint
2862 = record_btrace_supports_stopped_by_hw_breakpoint;
2863 ops->to_execution_direction = record_btrace_execution_direction;
2864 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2865 ops->to_done_generating_core = record_btrace_done_generating_core;
2866 ops->to_stratum = record_stratum;
2867 ops->to_magic = OPS_MAGIC;
2868 }
2869
2870 /* Start recording in BTS format. */
2871
2872 static void
2873 cmd_record_btrace_bts_start (char *args, int from_tty)
2874 {
2875 if (args != NULL && *args != 0)
2876 error (_("Invalid argument."));
2877
2878 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2879
2880 TRY
2881 {
2882 execute_command ("target record-btrace", from_tty);
2883 }
2884 CATCH (exception, RETURN_MASK_ALL)
2885 {
2886 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2887 throw_exception (exception);
2888 }
2889 END_CATCH
2890 }
2891
2892 /* Start recording in Intel Processor Trace format. */
2893
2894 static void
2895 cmd_record_btrace_pt_start (char *args, int from_tty)
2896 {
2897 if (args != NULL && *args != 0)
2898 error (_("Invalid argument."));
2899
2900 record_btrace_conf.format = BTRACE_FORMAT_PT;
2901
2902 TRY
2903 {
2904 execute_command ("target record-btrace", from_tty);
2905 }
2906 CATCH (exception, RETURN_MASK_ALL)
2907 {
2908 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2909 throw_exception (exception);
2910 }
2911 END_CATCH
2912 }
2913
2914 /* Alias for "target record". */
2915
2916 static void
2917 cmd_record_btrace_start (char *args, int from_tty)
2918 {
2919 if (args != NULL && *args != 0)
2920 error (_("Invalid argument."));
2921
2922 record_btrace_conf.format = BTRACE_FORMAT_PT;
2923
2924 TRY
2925 {
2926 execute_command ("target record-btrace", from_tty);
2927 }
2928 CATCH (exception, RETURN_MASK_ALL)
2929 {
2930 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2931
2932 TRY
2933 {
2934 execute_command ("target record-btrace", from_tty);
2935 }
2936 CATCH (exception, RETURN_MASK_ALL)
2937 {
2938 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2939 throw_exception (exception);
2940 }
2941 END_CATCH
2942 }
2943 END_CATCH
2944 }
2945
2946 /* The "set record btrace" command. */
2947
2948 static void
2949 cmd_set_record_btrace (char *args, int from_tty)
2950 {
2951 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2952 }
2953
2954 /* The "show record btrace" command. */
2955
2956 static void
2957 cmd_show_record_btrace (char *args, int from_tty)
2958 {
2959 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2960 }
2961
2962 /* The "show record btrace replay-memory-access" command. */
2963
2964 static void
2965 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2966 struct cmd_list_element *c, const char *value)
2967 {
2968 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2969 replay_memory_access);
2970 }
2971
2972 /* The "set record btrace bts" command. */
2973
2974 static void
2975 cmd_set_record_btrace_bts (char *args, int from_tty)
2976 {
2977 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2978 "by an appropriate subcommand.\n"));
2979 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2980 all_commands, gdb_stdout);
2981 }
2982
2983 /* The "show record btrace bts" command. */
2984
2985 static void
2986 cmd_show_record_btrace_bts (char *args, int from_tty)
2987 {
2988 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2989 }
2990
2991 /* The "set record btrace pt" command. */
2992
2993 static void
2994 cmd_set_record_btrace_pt (char *args, int from_tty)
2995 {
2996 printf_unfiltered (_("\"set record btrace pt\" must be followed "
2997 "by an appropriate subcommand.\n"));
2998 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
2999 all_commands, gdb_stdout);
3000 }
3001
3002 /* The "show record btrace pt" command. */
3003
3004 static void
3005 cmd_show_record_btrace_pt (char *args, int from_tty)
3006 {
3007 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3008 }
3009
3010 /* The "record bts buffer-size" show value function. */
3011
3012 static void
3013 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3014 struct cmd_list_element *c,
3015 const char *value)
3016 {
3017 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3018 value);
3019 }
3020
3021 /* The "record pt buffer-size" show value function. */
3022
3023 static void
3024 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3025 struct cmd_list_element *c,
3026 const char *value)
3027 {
3028 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3029 value);
3030 }
3031
3032 void _initialize_record_btrace (void);
3033
3034 /* Initialize btrace commands. */
3035
3036 void
3037 _initialize_record_btrace (void)
3038 {
3039 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3040 _("Start branch trace recording."), &record_btrace_cmdlist,
3041 "record btrace ", 0, &record_cmdlist);
3042 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3043
3044 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3045 _("\
3046 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3047 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3048 This format may not be available on all processors."),
3049 &record_btrace_cmdlist);
3050 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3051
3052 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3053 _("\
3054 Start branch trace recording in Intel Processor Trace format.\n\n\
3055 This format may not be available on all processors."),
3056 &record_btrace_cmdlist);
3057 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3058
3059 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3060 _("Set record options"), &set_record_btrace_cmdlist,
3061 "set record btrace ", 0, &set_record_cmdlist);
3062
3063 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3064 _("Show record options"), &show_record_btrace_cmdlist,
3065 "show record btrace ", 0, &show_record_cmdlist);
3066
3067 add_setshow_enum_cmd ("replay-memory-access", no_class,
3068 replay_memory_access_types, &replay_memory_access, _("\
3069 Set what memory accesses are allowed during replay."), _("\
3070 Show what memory accesses are allowed during replay."),
3071 _("Default is READ-ONLY.\n\n\
3072 The btrace record target does not trace data.\n\
3073 The memory therefore corresponds to the live target and not \
3074 to the current replay position.\n\n\
3075 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3076 When READ-WRITE, allow accesses to read-only and read-write memory during \
3077 replay."),
3078 NULL, cmd_show_replay_memory_access,
3079 &set_record_btrace_cmdlist,
3080 &show_record_btrace_cmdlist);
3081
3082 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3083 _("Set record btrace bts options"),
3084 &set_record_btrace_bts_cmdlist,
3085 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3086
3087 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3088 _("Show record btrace bts options"),
3089 &show_record_btrace_bts_cmdlist,
3090 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3091
3092 add_setshow_uinteger_cmd ("buffer-size", no_class,
3093 &record_btrace_conf.bts.size,
3094 _("Set the record/replay bts buffer size."),
3095 _("Show the record/replay bts buffer size."), _("\
3096 When starting recording request a trace buffer of this size. \
3097 The actual buffer size may differ from the requested size. \
3098 Use \"info record\" to see the actual buffer size.\n\n\
3099 Bigger buffers allow longer recording but also take more time to process \
3100 the recorded execution trace.\n\n\
3101 The trace buffer size may not be changed while recording."), NULL,
3102 show_record_bts_buffer_size_value,
3103 &set_record_btrace_bts_cmdlist,
3104 &show_record_btrace_bts_cmdlist);
3105
3106 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3107 _("Set record btrace pt options"),
3108 &set_record_btrace_pt_cmdlist,
3109 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3110
3111 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3112 _("Show record btrace pt options"),
3113 &show_record_btrace_pt_cmdlist,
3114 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3115
3116 add_setshow_uinteger_cmd ("buffer-size", no_class,
3117 &record_btrace_conf.pt.size,
3118 _("Set the record/replay pt buffer size."),
3119 _("Show the record/replay pt buffer size."), _("\
3120 Bigger buffers allow longer recording but also take more time to process \
3121 the recorded execution.\n\
3122 The actual buffer size may differ from the requested size. Use \"info record\" \
3123 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3124 &set_record_btrace_pt_cmdlist,
3125 &show_record_btrace_pt_cmdlist);
3126
3127 init_record_btrace_ops ();
3128 add_target (&record_btrace_ops);
3129
3130 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3131 xcalloc, xfree);
3132
3133 record_btrace_conf.bts.size = 64 * 1024;
3134 record_btrace_conf.pt.size = 16 * 1024;
3135 }
This page took 0.098128 seconds and 4 git commands to generate.