record-btrace: indicate gaps
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "gdbthread.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "disasm.h"
28 #include "observer.h"
29 #include "cli/cli-utils.h"
30 #include "source.h"
31 #include "ui-out.h"
32 #include "symtab.h"
33 #include "filenames.h"
34 #include "regcache.h"
35 #include "frame-unwind.h"
36 #include "hashtab.h"
37 #include "infrun.h"
38 #include "event-loop.h"
39 #include "inf-loop.h"
40
41 /* The target_ops of record-btrace. */
42 static struct target_ops record_btrace_ops;
43
44 /* A new thread observer enabling branch tracing for the new thread. */
45 static struct observer *record_btrace_thread_observer;
46
47 /* Memory access types used in set/show record btrace replay-memory-access. */
48 static const char replay_memory_access_read_only[] = "read-only";
49 static const char replay_memory_access_read_write[] = "read-write";
50 static const char *const replay_memory_access_types[] =
51 {
52 replay_memory_access_read_only,
53 replay_memory_access_read_write,
54 NULL
55 };
56
57 /* The currently allowed replay memory access type. */
58 static const char *replay_memory_access = replay_memory_access_read_only;
59
60 /* Command lists for "set/show record btrace". */
61 static struct cmd_list_element *set_record_btrace_cmdlist;
62 static struct cmd_list_element *show_record_btrace_cmdlist;
63
64 /* The execution direction of the last resume we got. See record-full.c. */
65 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
66
67 /* The async event handler for reverse/replay execution. */
68 static struct async_event_handler *record_btrace_async_inferior_event_handler;
69
70 /* A flag indicating that we are currently generating a core file. */
71 static int record_btrace_generating_corefile;
72
73 /* The current branch trace configuration. */
74 static struct btrace_config record_btrace_conf;
75
76 /* Command list for "record btrace". */
77 static struct cmd_list_element *record_btrace_cmdlist;
78
79 /* Command lists for "set/show record btrace". */
80 static struct cmd_list_element *set_record_btrace_cmdlist;
81 static struct cmd_list_element *show_record_btrace_cmdlist;
82
83 /* Command lists for "set/show record btrace bts". */
84 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
85 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
86
87 /* Print a record-btrace debug message. Use do ... while (0) to avoid
88 ambiguities when used in if statements. */
89
90 #define DEBUG(msg, args...) \
91 do \
92 { \
93 if (record_debug != 0) \
94 fprintf_unfiltered (gdb_stdlog, \
95 "[record-btrace] " msg "\n", ##args); \
96 } \
97 while (0)
98
99
100 /* Update the branch trace for the current thread and return a pointer to its
101 thread_info.
102
103 Throws an error if there is no thread or no trace. This function never
104 returns NULL. */
105
106 static struct thread_info *
107 require_btrace_thread (void)
108 {
109 struct thread_info *tp;
110
111 DEBUG ("require");
112
113 tp = find_thread_ptid (inferior_ptid);
114 if (tp == NULL)
115 error (_("No thread."));
116
117 btrace_fetch (tp);
118
119 if (btrace_is_empty (tp))
120 error (_("No trace."));
121
122 return tp;
123 }
124
125 /* Update the branch trace for the current thread and return a pointer to its
126 branch trace information struct.
127
128 Throws an error if there is no thread or no trace. This function never
129 returns NULL. */
130
131 static struct btrace_thread_info *
132 require_btrace (void)
133 {
134 struct thread_info *tp;
135
136 tp = require_btrace_thread ();
137
138 return &tp->btrace;
139 }
140
141 /* Enable branch tracing for one thread. Warn on errors. */
142
143 static void
144 record_btrace_enable_warn (struct thread_info *tp)
145 {
146 volatile struct gdb_exception error;
147
148 TRY_CATCH (error, RETURN_MASK_ERROR)
149 btrace_enable (tp, &record_btrace_conf);
150
151 if (error.message != NULL)
152 warning ("%s", error.message);
153 }
154
155 /* Callback function to disable branch tracing for one thread. */
156
157 static void
158 record_btrace_disable_callback (void *arg)
159 {
160 struct thread_info *tp;
161
162 tp = arg;
163
164 btrace_disable (tp);
165 }
166
167 /* Enable automatic tracing of new threads. */
168
169 static void
170 record_btrace_auto_enable (void)
171 {
172 DEBUG ("attach thread observer");
173
174 record_btrace_thread_observer
175 = observer_attach_new_thread (record_btrace_enable_warn);
176 }
177
178 /* Disable automatic tracing of new threads. */
179
180 static void
181 record_btrace_auto_disable (void)
182 {
183 /* The observer may have been detached, already. */
184 if (record_btrace_thread_observer == NULL)
185 return;
186
187 DEBUG ("detach thread observer");
188
189 observer_detach_new_thread (record_btrace_thread_observer);
190 record_btrace_thread_observer = NULL;
191 }
192
193 /* The record-btrace async event handler function. */
194
195 static void
196 record_btrace_handle_async_inferior_event (gdb_client_data data)
197 {
198 inferior_event_handler (INF_REG_EVENT, NULL);
199 }
200
201 /* The to_open method of target record-btrace. */
202
203 static void
204 record_btrace_open (const char *args, int from_tty)
205 {
206 struct cleanup *disable_chain;
207 struct thread_info *tp;
208
209 DEBUG ("open");
210
211 record_preopen ();
212
213 if (!target_has_execution)
214 error (_("The program is not being run."));
215
216 if (non_stop)
217 error (_("Record btrace can't debug inferior in non-stop mode."));
218
219 gdb_assert (record_btrace_thread_observer == NULL);
220
221 disable_chain = make_cleanup (null_cleanup, NULL);
222 ALL_NON_EXITED_THREADS (tp)
223 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
224 {
225 btrace_enable (tp, &record_btrace_conf);
226
227 make_cleanup (record_btrace_disable_callback, tp);
228 }
229
230 record_btrace_auto_enable ();
231
232 push_target (&record_btrace_ops);
233
234 record_btrace_async_inferior_event_handler
235 = create_async_event_handler (record_btrace_handle_async_inferior_event,
236 NULL);
237 record_btrace_generating_corefile = 0;
238
239 observer_notify_record_changed (current_inferior (), 1);
240
241 discard_cleanups (disable_chain);
242 }
243
244 /* The to_stop_recording method of target record-btrace. */
245
246 static void
247 record_btrace_stop_recording (struct target_ops *self)
248 {
249 struct thread_info *tp;
250
251 DEBUG ("stop recording");
252
253 record_btrace_auto_disable ();
254
255 ALL_NON_EXITED_THREADS (tp)
256 if (tp->btrace.target != NULL)
257 btrace_disable (tp);
258 }
259
260 /* The to_close method of target record-btrace. */
261
262 static void
263 record_btrace_close (struct target_ops *self)
264 {
265 struct thread_info *tp;
266
267 if (record_btrace_async_inferior_event_handler != NULL)
268 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
269
270 /* Make sure automatic recording gets disabled even if we did not stop
271 recording before closing the record-btrace target. */
272 record_btrace_auto_disable ();
273
274 /* We should have already stopped recording.
275 Tear down btrace in case we have not. */
276 ALL_NON_EXITED_THREADS (tp)
277 btrace_teardown (tp);
278 }
279
280 /* The to_async method of target record-btrace. */
281
282 static void
283 record_btrace_async (struct target_ops *ops,
284 void (*callback) (enum inferior_event_type event_type,
285 void *context),
286 void *context)
287 {
288 if (callback != NULL)
289 mark_async_event_handler (record_btrace_async_inferior_event_handler);
290 else
291 clear_async_event_handler (record_btrace_async_inferior_event_handler);
292
293 ops->beneath->to_async (ops->beneath, callback, context);
294 }
295
296 /* Adjusts the size and returns a human readable size suffix. */
297
298 static const char *
299 record_btrace_adjust_size (unsigned int *size)
300 {
301 unsigned int sz;
302
303 sz = *size;
304
305 if ((sz & ((1u << 30) - 1)) == 0)
306 {
307 *size = sz >> 30;
308 return "GB";
309 }
310 else if ((sz & ((1u << 20) - 1)) == 0)
311 {
312 *size = sz >> 20;
313 return "MB";
314 }
315 else if ((sz & ((1u << 10) - 1)) == 0)
316 {
317 *size = sz >> 10;
318 return "kB";
319 }
320 else
321 return "";
322 }
323
324 /* Print a BTS configuration. */
325
326 static void
327 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
328 {
329 const char *suffix;
330 unsigned int size;
331
332 size = conf->size;
333 if (size > 0)
334 {
335 suffix = record_btrace_adjust_size (&size);
336 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
337 }
338 }
339
340 /* Print a branch tracing configuration. */
341
342 static void
343 record_btrace_print_conf (const struct btrace_config *conf)
344 {
345 printf_unfiltered (_("Recording format: %s.\n"),
346 btrace_format_string (conf->format));
347
348 switch (conf->format)
349 {
350 case BTRACE_FORMAT_NONE:
351 return;
352
353 case BTRACE_FORMAT_BTS:
354 record_btrace_print_bts_conf (&conf->bts);
355 return;
356 }
357
358 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
359 }
360
361 /* The to_info_record method of target record-btrace. */
362
363 static void
364 record_btrace_info (struct target_ops *self)
365 {
366 struct btrace_thread_info *btinfo;
367 const struct btrace_config *conf;
368 struct thread_info *tp;
369 unsigned int insns, calls, gaps;
370
371 DEBUG ("info");
372
373 tp = find_thread_ptid (inferior_ptid);
374 if (tp == NULL)
375 error (_("No thread."));
376
377 btinfo = &tp->btrace;
378
379 conf = btrace_conf (btinfo);
380 if (conf != NULL)
381 record_btrace_print_conf (conf);
382
383 btrace_fetch (tp);
384
385 insns = 0;
386 calls = 0;
387 gaps = 0;
388
389 if (!btrace_is_empty (tp))
390 {
391 struct btrace_call_iterator call;
392 struct btrace_insn_iterator insn;
393
394 btrace_call_end (&call, btinfo);
395 btrace_call_prev (&call, 1);
396 calls = btrace_call_number (&call);
397
398 btrace_insn_end (&insn, btinfo);
399
400 insns = btrace_insn_number (&insn);
401 if (insns != 0)
402 {
403 /* The last instruction does not really belong to the trace. */
404 insns -= 1;
405 }
406 else
407 {
408 unsigned int steps;
409
410 /* Skip gaps at the end. */
411 do
412 {
413 steps = btrace_insn_prev (&insn, 1);
414 if (steps == 0)
415 break;
416
417 insns = btrace_insn_number (&insn);
418 }
419 while (insns == 0);
420 }
421
422 gaps = btinfo->ngaps;
423 }
424
425 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
426 "for thread %d (%s).\n"), insns, calls, gaps,
427 tp->num, target_pid_to_str (tp->ptid));
428
429 if (btrace_is_replaying (tp))
430 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
431 btrace_insn_number (btinfo->replay));
432 }
433
434 /* Print a decode error. */
435
436 static void
437 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
438 enum btrace_format format)
439 {
440 const char *errstr;
441 int is_error;
442
443 errstr = _("unknown");
444 is_error = 1;
445
446 switch (format)
447 {
448 default:
449 break;
450
451 case BTRACE_FORMAT_BTS:
452 switch (errcode)
453 {
454 default:
455 break;
456
457 case BDE_BTS_OVERFLOW:
458 errstr = _("instruction overflow");
459 break;
460
461 case BDE_BTS_INSN_SIZE:
462 errstr = _("unknown instruction");
463 break;
464 }
465 break;
466 }
467
468 ui_out_text (uiout, _("["));
469 if (is_error)
470 {
471 ui_out_text (uiout, _("decode error ("));
472 ui_out_field_int (uiout, "errcode", errcode);
473 ui_out_text (uiout, _("): "));
474 }
475 ui_out_text (uiout, errstr);
476 ui_out_text (uiout, _("]\n"));
477 }
478
479 /* Print an unsigned int. */
480
481 static void
482 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
483 {
484 ui_out_field_fmt (uiout, fld, "%u", val);
485 }
486
487 /* Disassemble a section of the recorded instruction trace. */
488
489 static void
490 btrace_insn_history (struct ui_out *uiout,
491 const struct btrace_thread_info *btinfo,
492 const struct btrace_insn_iterator *begin,
493 const struct btrace_insn_iterator *end, int flags)
494 {
495 struct gdbarch *gdbarch;
496 struct btrace_insn_iterator it;
497
498 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
499 btrace_insn_number (end));
500
501 gdbarch = target_gdbarch ();
502
503 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
504 {
505 const struct btrace_insn *insn;
506
507 insn = btrace_insn_get (&it);
508
509 /* A NULL instruction indicates a gap in the trace. */
510 if (insn == NULL)
511 {
512 const struct btrace_config *conf;
513
514 conf = btrace_conf (btinfo);
515
516 /* We have trace so we must have a configuration. */
517 gdb_assert (conf != NULL);
518
519 btrace_ui_out_decode_error (uiout, it.function->errcode,
520 conf->format);
521 }
522 else
523 {
524 /* Print the instruction index. */
525 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
526 ui_out_text (uiout, "\t");
527
528 /* Disassembly with '/m' flag may not produce the expected result.
529 See PR gdb/11833. */
530 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc,
531 insn->pc + 1);
532 }
533 }
534 }
535
536 /* The to_insn_history method of target record-btrace. */
537
538 static void
539 record_btrace_insn_history (struct target_ops *self, int size, int flags)
540 {
541 struct btrace_thread_info *btinfo;
542 struct btrace_insn_history *history;
543 struct btrace_insn_iterator begin, end;
544 struct cleanup *uiout_cleanup;
545 struct ui_out *uiout;
546 unsigned int context, covered;
547
548 uiout = current_uiout;
549 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
550 "insn history");
551 context = abs (size);
552 if (context == 0)
553 error (_("Bad record instruction-history-size."));
554
555 btinfo = require_btrace ();
556 history = btinfo->insn_history;
557 if (history == NULL)
558 {
559 struct btrace_insn_iterator *replay;
560
561 DEBUG ("insn-history (0x%x): %d", flags, size);
562
563 /* If we're replaying, we start at the replay position. Otherwise, we
564 start at the tail of the trace. */
565 replay = btinfo->replay;
566 if (replay != NULL)
567 begin = *replay;
568 else
569 btrace_insn_end (&begin, btinfo);
570
571 /* We start from here and expand in the requested direction. Then we
572 expand in the other direction, as well, to fill up any remaining
573 context. */
574 end = begin;
575 if (size < 0)
576 {
577 /* We want the current position covered, as well. */
578 covered = btrace_insn_next (&end, 1);
579 covered += btrace_insn_prev (&begin, context - covered);
580 covered += btrace_insn_next (&end, context - covered);
581 }
582 else
583 {
584 covered = btrace_insn_next (&end, context);
585 covered += btrace_insn_prev (&begin, context - covered);
586 }
587 }
588 else
589 {
590 begin = history->begin;
591 end = history->end;
592
593 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
594 btrace_insn_number (&begin), btrace_insn_number (&end));
595
596 if (size < 0)
597 {
598 end = begin;
599 covered = btrace_insn_prev (&begin, context);
600 }
601 else
602 {
603 begin = end;
604 covered = btrace_insn_next (&end, context);
605 }
606 }
607
608 if (covered > 0)
609 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
610 else
611 {
612 if (size < 0)
613 printf_unfiltered (_("At the start of the branch trace record.\n"));
614 else
615 printf_unfiltered (_("At the end of the branch trace record.\n"));
616 }
617
618 btrace_set_insn_history (btinfo, &begin, &end);
619 do_cleanups (uiout_cleanup);
620 }
621
622 /* The to_insn_history_range method of target record-btrace. */
623
624 static void
625 record_btrace_insn_history_range (struct target_ops *self,
626 ULONGEST from, ULONGEST to, int flags)
627 {
628 struct btrace_thread_info *btinfo;
629 struct btrace_insn_history *history;
630 struct btrace_insn_iterator begin, end;
631 struct cleanup *uiout_cleanup;
632 struct ui_out *uiout;
633 unsigned int low, high;
634 int found;
635
636 uiout = current_uiout;
637 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
638 "insn history");
639 low = from;
640 high = to;
641
642 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
643
644 /* Check for wrap-arounds. */
645 if (low != from || high != to)
646 error (_("Bad range."));
647
648 if (high < low)
649 error (_("Bad range."));
650
651 btinfo = require_btrace ();
652
653 found = btrace_find_insn_by_number (&begin, btinfo, low);
654 if (found == 0)
655 error (_("Range out of bounds."));
656
657 found = btrace_find_insn_by_number (&end, btinfo, high);
658 if (found == 0)
659 {
660 /* Silently truncate the range. */
661 btrace_insn_end (&end, btinfo);
662 }
663 else
664 {
665 /* We want both begin and end to be inclusive. */
666 btrace_insn_next (&end, 1);
667 }
668
669 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
670 btrace_set_insn_history (btinfo, &begin, &end);
671
672 do_cleanups (uiout_cleanup);
673 }
674
675 /* The to_insn_history_from method of target record-btrace. */
676
677 static void
678 record_btrace_insn_history_from (struct target_ops *self,
679 ULONGEST from, int size, int flags)
680 {
681 ULONGEST begin, end, context;
682
683 context = abs (size);
684 if (context == 0)
685 error (_("Bad record instruction-history-size."));
686
687 if (size < 0)
688 {
689 end = from;
690
691 if (from < context)
692 begin = 0;
693 else
694 begin = from - context + 1;
695 }
696 else
697 {
698 begin = from;
699 end = from + context - 1;
700
701 /* Check for wrap-around. */
702 if (end < begin)
703 end = ULONGEST_MAX;
704 }
705
706 record_btrace_insn_history_range (self, begin, end, flags);
707 }
708
709 /* Print the instruction number range for a function call history line. */
710
711 static void
712 btrace_call_history_insn_range (struct ui_out *uiout,
713 const struct btrace_function *bfun)
714 {
715 unsigned int begin, end, size;
716
717 size = VEC_length (btrace_insn_s, bfun->insn);
718 gdb_assert (size > 0);
719
720 begin = bfun->insn_offset;
721 end = begin + size - 1;
722
723 ui_out_field_uint (uiout, "insn begin", begin);
724 ui_out_text (uiout, ",");
725 ui_out_field_uint (uiout, "insn end", end);
726 }
727
728 /* Print the source line information for a function call history line. */
729
730 static void
731 btrace_call_history_src_line (struct ui_out *uiout,
732 const struct btrace_function *bfun)
733 {
734 struct symbol *sym;
735 int begin, end;
736
737 sym = bfun->sym;
738 if (sym == NULL)
739 return;
740
741 ui_out_field_string (uiout, "file",
742 symtab_to_filename_for_display (symbol_symtab (sym)));
743
744 begin = bfun->lbegin;
745 end = bfun->lend;
746
747 if (end < begin)
748 return;
749
750 ui_out_text (uiout, ":");
751 ui_out_field_int (uiout, "min line", begin);
752
753 if (end == begin)
754 return;
755
756 ui_out_text (uiout, ",");
757 ui_out_field_int (uiout, "max line", end);
758 }
759
760 /* Get the name of a branch trace function. */
761
762 static const char *
763 btrace_get_bfun_name (const struct btrace_function *bfun)
764 {
765 struct minimal_symbol *msym;
766 struct symbol *sym;
767
768 if (bfun == NULL)
769 return "??";
770
771 msym = bfun->msym;
772 sym = bfun->sym;
773
774 if (sym != NULL)
775 return SYMBOL_PRINT_NAME (sym);
776 else if (msym != NULL)
777 return MSYMBOL_PRINT_NAME (msym);
778 else
779 return "??";
780 }
781
782 /* Disassemble a section of the recorded function trace. */
783
784 static void
785 btrace_call_history (struct ui_out *uiout,
786 const struct btrace_thread_info *btinfo,
787 const struct btrace_call_iterator *begin,
788 const struct btrace_call_iterator *end,
789 enum record_print_flag flags)
790 {
791 struct btrace_call_iterator it;
792
793 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
794 btrace_call_number (end));
795
796 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
797 {
798 const struct btrace_function *bfun;
799 struct minimal_symbol *msym;
800 struct symbol *sym;
801
802 bfun = btrace_call_get (&it);
803 sym = bfun->sym;
804 msym = bfun->msym;
805
806 /* Print the function index. */
807 ui_out_field_uint (uiout, "index", bfun->number);
808 ui_out_text (uiout, "\t");
809
810 /* Indicate gaps in the trace. */
811 if (bfun->errcode != 0)
812 {
813 const struct btrace_config *conf;
814
815 conf = btrace_conf (btinfo);
816
817 /* We have trace so we must have a configuration. */
818 gdb_assert (conf != NULL);
819
820 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
821
822 continue;
823 }
824
825 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
826 {
827 int level = bfun->level + btinfo->level, i;
828
829 for (i = 0; i < level; ++i)
830 ui_out_text (uiout, " ");
831 }
832
833 if (sym != NULL)
834 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
835 else if (msym != NULL)
836 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
837 else if (!ui_out_is_mi_like_p (uiout))
838 ui_out_field_string (uiout, "function", "??");
839
840 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
841 {
842 ui_out_text (uiout, _("\tinst "));
843 btrace_call_history_insn_range (uiout, bfun);
844 }
845
846 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
847 {
848 ui_out_text (uiout, _("\tat "));
849 btrace_call_history_src_line (uiout, bfun);
850 }
851
852 ui_out_text (uiout, "\n");
853 }
854 }
855
856 /* The to_call_history method of target record-btrace. */
857
858 static void
859 record_btrace_call_history (struct target_ops *self, int size, int flags)
860 {
861 struct btrace_thread_info *btinfo;
862 struct btrace_call_history *history;
863 struct btrace_call_iterator begin, end;
864 struct cleanup *uiout_cleanup;
865 struct ui_out *uiout;
866 unsigned int context, covered;
867
868 uiout = current_uiout;
869 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
870 "insn history");
871 context = abs (size);
872 if (context == 0)
873 error (_("Bad record function-call-history-size."));
874
875 btinfo = require_btrace ();
876 history = btinfo->call_history;
877 if (history == NULL)
878 {
879 struct btrace_insn_iterator *replay;
880
881 DEBUG ("call-history (0x%x): %d", flags, size);
882
883 /* If we're replaying, we start at the replay position. Otherwise, we
884 start at the tail of the trace. */
885 replay = btinfo->replay;
886 if (replay != NULL)
887 {
888 begin.function = replay->function;
889 begin.btinfo = btinfo;
890 }
891 else
892 btrace_call_end (&begin, btinfo);
893
894 /* We start from here and expand in the requested direction. Then we
895 expand in the other direction, as well, to fill up any remaining
896 context. */
897 end = begin;
898 if (size < 0)
899 {
900 /* We want the current position covered, as well. */
901 covered = btrace_call_next (&end, 1);
902 covered += btrace_call_prev (&begin, context - covered);
903 covered += btrace_call_next (&end, context - covered);
904 }
905 else
906 {
907 covered = btrace_call_next (&end, context);
908 covered += btrace_call_prev (&begin, context- covered);
909 }
910 }
911 else
912 {
913 begin = history->begin;
914 end = history->end;
915
916 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
917 btrace_call_number (&begin), btrace_call_number (&end));
918
919 if (size < 0)
920 {
921 end = begin;
922 covered = btrace_call_prev (&begin, context);
923 }
924 else
925 {
926 begin = end;
927 covered = btrace_call_next (&end, context);
928 }
929 }
930
931 if (covered > 0)
932 btrace_call_history (uiout, btinfo, &begin, &end, flags);
933 else
934 {
935 if (size < 0)
936 printf_unfiltered (_("At the start of the branch trace record.\n"));
937 else
938 printf_unfiltered (_("At the end of the branch trace record.\n"));
939 }
940
941 btrace_set_call_history (btinfo, &begin, &end);
942 do_cleanups (uiout_cleanup);
943 }
944
945 /* The to_call_history_range method of target record-btrace. */
946
947 static void
948 record_btrace_call_history_range (struct target_ops *self,
949 ULONGEST from, ULONGEST to, int flags)
950 {
951 struct btrace_thread_info *btinfo;
952 struct btrace_call_history *history;
953 struct btrace_call_iterator begin, end;
954 struct cleanup *uiout_cleanup;
955 struct ui_out *uiout;
956 unsigned int low, high;
957 int found;
958
959 uiout = current_uiout;
960 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
961 "func history");
962 low = from;
963 high = to;
964
965 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
966
967 /* Check for wrap-arounds. */
968 if (low != from || high != to)
969 error (_("Bad range."));
970
971 if (high < low)
972 error (_("Bad range."));
973
974 btinfo = require_btrace ();
975
976 found = btrace_find_call_by_number (&begin, btinfo, low);
977 if (found == 0)
978 error (_("Range out of bounds."));
979
980 found = btrace_find_call_by_number (&end, btinfo, high);
981 if (found == 0)
982 {
983 /* Silently truncate the range. */
984 btrace_call_end (&end, btinfo);
985 }
986 else
987 {
988 /* We want both begin and end to be inclusive. */
989 btrace_call_next (&end, 1);
990 }
991
992 btrace_call_history (uiout, btinfo, &begin, &end, flags);
993 btrace_set_call_history (btinfo, &begin, &end);
994
995 do_cleanups (uiout_cleanup);
996 }
997
998 /* The to_call_history_from method of target record-btrace. */
999
1000 static void
1001 record_btrace_call_history_from (struct target_ops *self,
1002 ULONGEST from, int size, int flags)
1003 {
1004 ULONGEST begin, end, context;
1005
1006 context = abs (size);
1007 if (context == 0)
1008 error (_("Bad record function-call-history-size."));
1009
1010 if (size < 0)
1011 {
1012 end = from;
1013
1014 if (from < context)
1015 begin = 0;
1016 else
1017 begin = from - context + 1;
1018 }
1019 else
1020 {
1021 begin = from;
1022 end = from + context - 1;
1023
1024 /* Check for wrap-around. */
1025 if (end < begin)
1026 end = ULONGEST_MAX;
1027 }
1028
1029 record_btrace_call_history_range (self, begin, end, flags);
1030 }
1031
1032 /* The to_record_is_replaying method of target record-btrace. */
1033
1034 static int
1035 record_btrace_is_replaying (struct target_ops *self)
1036 {
1037 struct thread_info *tp;
1038
1039 ALL_NON_EXITED_THREADS (tp)
1040 if (btrace_is_replaying (tp))
1041 return 1;
1042
1043 return 0;
1044 }
1045
1046 /* The to_xfer_partial method of target record-btrace. */
1047
1048 static enum target_xfer_status
1049 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1050 const char *annex, gdb_byte *readbuf,
1051 const gdb_byte *writebuf, ULONGEST offset,
1052 ULONGEST len, ULONGEST *xfered_len)
1053 {
1054 struct target_ops *t;
1055
1056 /* Filter out requests that don't make sense during replay. */
1057 if (replay_memory_access == replay_memory_access_read_only
1058 && !record_btrace_generating_corefile
1059 && record_btrace_is_replaying (ops))
1060 {
1061 switch (object)
1062 {
1063 case TARGET_OBJECT_MEMORY:
1064 {
1065 struct target_section *section;
1066
1067 /* We do not allow writing memory in general. */
1068 if (writebuf != NULL)
1069 {
1070 *xfered_len = len;
1071 return TARGET_XFER_UNAVAILABLE;
1072 }
1073
1074 /* We allow reading readonly memory. */
1075 section = target_section_by_addr (ops, offset);
1076 if (section != NULL)
1077 {
1078 /* Check if the section we found is readonly. */
1079 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1080 section->the_bfd_section)
1081 & SEC_READONLY) != 0)
1082 {
1083 /* Truncate the request to fit into this section. */
1084 len = min (len, section->endaddr - offset);
1085 break;
1086 }
1087 }
1088
1089 *xfered_len = len;
1090 return TARGET_XFER_UNAVAILABLE;
1091 }
1092 }
1093 }
1094
1095 /* Forward the request. */
1096 ops = ops->beneath;
1097 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1098 offset, len, xfered_len);
1099 }
1100
1101 /* The to_insert_breakpoint method of target record-btrace. */
1102
1103 static int
1104 record_btrace_insert_breakpoint (struct target_ops *ops,
1105 struct gdbarch *gdbarch,
1106 struct bp_target_info *bp_tgt)
1107 {
1108 volatile struct gdb_exception except;
1109 const char *old;
1110 int ret;
1111
1112 /* Inserting breakpoints requires accessing memory. Allow it for the
1113 duration of this function. */
1114 old = replay_memory_access;
1115 replay_memory_access = replay_memory_access_read_write;
1116
1117 ret = 0;
1118 TRY_CATCH (except, RETURN_MASK_ALL)
1119 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1120
1121 replay_memory_access = old;
1122
1123 if (except.reason < 0)
1124 throw_exception (except);
1125
1126 return ret;
1127 }
1128
1129 /* The to_remove_breakpoint method of target record-btrace. */
1130
1131 static int
1132 record_btrace_remove_breakpoint (struct target_ops *ops,
1133 struct gdbarch *gdbarch,
1134 struct bp_target_info *bp_tgt)
1135 {
1136 volatile struct gdb_exception except;
1137 const char *old;
1138 int ret;
1139
1140 /* Removing breakpoints requires accessing memory. Allow it for the
1141 duration of this function. */
1142 old = replay_memory_access;
1143 replay_memory_access = replay_memory_access_read_write;
1144
1145 ret = 0;
1146 TRY_CATCH (except, RETURN_MASK_ALL)
1147 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1148
1149 replay_memory_access = old;
1150
1151 if (except.reason < 0)
1152 throw_exception (except);
1153
1154 return ret;
1155 }
1156
1157 /* The to_fetch_registers method of target record-btrace. */
1158
1159 static void
1160 record_btrace_fetch_registers (struct target_ops *ops,
1161 struct regcache *regcache, int regno)
1162 {
1163 struct btrace_insn_iterator *replay;
1164 struct thread_info *tp;
1165
1166 tp = find_thread_ptid (inferior_ptid);
1167 gdb_assert (tp != NULL);
1168
1169 replay = tp->btrace.replay;
1170 if (replay != NULL && !record_btrace_generating_corefile)
1171 {
1172 const struct btrace_insn *insn;
1173 struct gdbarch *gdbarch;
1174 int pcreg;
1175
1176 gdbarch = get_regcache_arch (regcache);
1177 pcreg = gdbarch_pc_regnum (gdbarch);
1178 if (pcreg < 0)
1179 return;
1180
1181 /* We can only provide the PC register. */
1182 if (regno >= 0 && regno != pcreg)
1183 return;
1184
1185 insn = btrace_insn_get (replay);
1186 gdb_assert (insn != NULL);
1187
1188 regcache_raw_supply (regcache, regno, &insn->pc);
1189 }
1190 else
1191 {
1192 struct target_ops *t = ops->beneath;
1193
1194 t->to_fetch_registers (t, regcache, regno);
1195 }
1196 }
1197
1198 /* The to_store_registers method of target record-btrace. */
1199
1200 static void
1201 record_btrace_store_registers (struct target_ops *ops,
1202 struct regcache *regcache, int regno)
1203 {
1204 struct target_ops *t;
1205
1206 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1207 error (_("This record target does not allow writing registers."));
1208
1209 gdb_assert (may_write_registers != 0);
1210
1211 t = ops->beneath;
1212 t->to_store_registers (t, regcache, regno);
1213 }
1214
1215 /* The to_prepare_to_store method of target record-btrace. */
1216
1217 static void
1218 record_btrace_prepare_to_store (struct target_ops *ops,
1219 struct regcache *regcache)
1220 {
1221 struct target_ops *t;
1222
1223 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1224 return;
1225
1226 t = ops->beneath;
1227 t->to_prepare_to_store (t, regcache);
1228 }
1229
1230 /* The branch trace frame cache. */
1231
1232 struct btrace_frame_cache
1233 {
1234 /* The thread. */
1235 struct thread_info *tp;
1236
1237 /* The frame info. */
1238 struct frame_info *frame;
1239
1240 /* The branch trace function segment. */
1241 const struct btrace_function *bfun;
1242 };
1243
1244 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1245
1246 static htab_t bfcache;
1247
1248 /* hash_f for htab_create_alloc of bfcache. */
1249
1250 static hashval_t
1251 bfcache_hash (const void *arg)
1252 {
1253 const struct btrace_frame_cache *cache = arg;
1254
1255 return htab_hash_pointer (cache->frame);
1256 }
1257
1258 /* eq_f for htab_create_alloc of bfcache. */
1259
1260 static int
1261 bfcache_eq (const void *arg1, const void *arg2)
1262 {
1263 const struct btrace_frame_cache *cache1 = arg1;
1264 const struct btrace_frame_cache *cache2 = arg2;
1265
1266 return cache1->frame == cache2->frame;
1267 }
1268
1269 /* Create a new btrace frame cache. */
1270
1271 static struct btrace_frame_cache *
1272 bfcache_new (struct frame_info *frame)
1273 {
1274 struct btrace_frame_cache *cache;
1275 void **slot;
1276
1277 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1278 cache->frame = frame;
1279
1280 slot = htab_find_slot (bfcache, cache, INSERT);
1281 gdb_assert (*slot == NULL);
1282 *slot = cache;
1283
1284 return cache;
1285 }
1286
1287 /* Extract the branch trace function from a branch trace frame. */
1288
1289 static const struct btrace_function *
1290 btrace_get_frame_function (struct frame_info *frame)
1291 {
1292 const struct btrace_frame_cache *cache;
1293 const struct btrace_function *bfun;
1294 struct btrace_frame_cache pattern;
1295 void **slot;
1296
1297 pattern.frame = frame;
1298
1299 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1300 if (slot == NULL)
1301 return NULL;
1302
1303 cache = *slot;
1304 return cache->bfun;
1305 }
1306
1307 /* Implement stop_reason method for record_btrace_frame_unwind. */
1308
1309 static enum unwind_stop_reason
1310 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1311 void **this_cache)
1312 {
1313 const struct btrace_frame_cache *cache;
1314 const struct btrace_function *bfun;
1315
1316 cache = *this_cache;
1317 bfun = cache->bfun;
1318 gdb_assert (bfun != NULL);
1319
1320 if (bfun->up == NULL)
1321 return UNWIND_UNAVAILABLE;
1322
1323 return UNWIND_NO_REASON;
1324 }
1325
1326 /* Implement this_id method for record_btrace_frame_unwind. */
1327
1328 static void
1329 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1330 struct frame_id *this_id)
1331 {
1332 const struct btrace_frame_cache *cache;
1333 const struct btrace_function *bfun;
1334 CORE_ADDR code, special;
1335
1336 cache = *this_cache;
1337
1338 bfun = cache->bfun;
1339 gdb_assert (bfun != NULL);
1340
1341 while (bfun->segment.prev != NULL)
1342 bfun = bfun->segment.prev;
1343
1344 code = get_frame_func (this_frame);
1345 special = bfun->number;
1346
1347 *this_id = frame_id_build_unavailable_stack_special (code, special);
1348
1349 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1350 btrace_get_bfun_name (cache->bfun),
1351 core_addr_to_string_nz (this_id->code_addr),
1352 core_addr_to_string_nz (this_id->special_addr));
1353 }
1354
1355 /* Implement prev_register method for record_btrace_frame_unwind. */
1356
1357 static struct value *
1358 record_btrace_frame_prev_register (struct frame_info *this_frame,
1359 void **this_cache,
1360 int regnum)
1361 {
1362 const struct btrace_frame_cache *cache;
1363 const struct btrace_function *bfun, *caller;
1364 const struct btrace_insn *insn;
1365 struct gdbarch *gdbarch;
1366 CORE_ADDR pc;
1367 int pcreg;
1368
1369 gdbarch = get_frame_arch (this_frame);
1370 pcreg = gdbarch_pc_regnum (gdbarch);
1371 if (pcreg < 0 || regnum != pcreg)
1372 throw_error (NOT_AVAILABLE_ERROR,
1373 _("Registers are not available in btrace record history"));
1374
1375 cache = *this_cache;
1376 bfun = cache->bfun;
1377 gdb_assert (bfun != NULL);
1378
1379 caller = bfun->up;
1380 if (caller == NULL)
1381 throw_error (NOT_AVAILABLE_ERROR,
1382 _("No caller in btrace record history"));
1383
1384 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1385 {
1386 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1387 pc = insn->pc;
1388 }
1389 else
1390 {
1391 insn = VEC_last (btrace_insn_s, caller->insn);
1392 pc = insn->pc;
1393
1394 pc += gdb_insn_length (gdbarch, pc);
1395 }
1396
1397 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1398 btrace_get_bfun_name (bfun), bfun->level,
1399 core_addr_to_string_nz (pc));
1400
1401 return frame_unwind_got_address (this_frame, regnum, pc);
1402 }
1403
1404 /* Implement sniffer method for record_btrace_frame_unwind. */
1405
1406 static int
1407 record_btrace_frame_sniffer (const struct frame_unwind *self,
1408 struct frame_info *this_frame,
1409 void **this_cache)
1410 {
1411 const struct btrace_function *bfun;
1412 struct btrace_frame_cache *cache;
1413 struct thread_info *tp;
1414 struct frame_info *next;
1415
1416 /* THIS_FRAME does not contain a reference to its thread. */
1417 tp = find_thread_ptid (inferior_ptid);
1418 gdb_assert (tp != NULL);
1419
1420 bfun = NULL;
1421 next = get_next_frame (this_frame);
1422 if (next == NULL)
1423 {
1424 const struct btrace_insn_iterator *replay;
1425
1426 replay = tp->btrace.replay;
1427 if (replay != NULL)
1428 bfun = replay->function;
1429 }
1430 else
1431 {
1432 const struct btrace_function *callee;
1433
1434 callee = btrace_get_frame_function (next);
1435 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1436 bfun = callee->up;
1437 }
1438
1439 if (bfun == NULL)
1440 return 0;
1441
1442 DEBUG ("[frame] sniffed frame for %s on level %d",
1443 btrace_get_bfun_name (bfun), bfun->level);
1444
1445 /* This is our frame. Initialize the frame cache. */
1446 cache = bfcache_new (this_frame);
1447 cache->tp = tp;
1448 cache->bfun = bfun;
1449
1450 *this_cache = cache;
1451 return 1;
1452 }
1453
1454 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1455
1456 static int
1457 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1458 struct frame_info *this_frame,
1459 void **this_cache)
1460 {
1461 const struct btrace_function *bfun, *callee;
1462 struct btrace_frame_cache *cache;
1463 struct frame_info *next;
1464
1465 next = get_next_frame (this_frame);
1466 if (next == NULL)
1467 return 0;
1468
1469 callee = btrace_get_frame_function (next);
1470 if (callee == NULL)
1471 return 0;
1472
1473 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1474 return 0;
1475
1476 bfun = callee->up;
1477 if (bfun == NULL)
1478 return 0;
1479
1480 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1481 btrace_get_bfun_name (bfun), bfun->level);
1482
1483 /* This is our frame. Initialize the frame cache. */
1484 cache = bfcache_new (this_frame);
1485 cache->tp = find_thread_ptid (inferior_ptid);
1486 cache->bfun = bfun;
1487
1488 *this_cache = cache;
1489 return 1;
1490 }
1491
1492 static void
1493 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1494 {
1495 struct btrace_frame_cache *cache;
1496 void **slot;
1497
1498 cache = this_cache;
1499
1500 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1501 gdb_assert (slot != NULL);
1502
1503 htab_remove_elt (bfcache, cache);
1504 }
1505
1506 /* btrace recording does not store previous memory content, neither the stack
1507 frames content. Any unwinding would return errorneous results as the stack
1508 contents no longer matches the changed PC value restored from history.
1509 Therefore this unwinder reports any possibly unwound registers as
1510 <unavailable>. */
1511
1512 const struct frame_unwind record_btrace_frame_unwind =
1513 {
1514 NORMAL_FRAME,
1515 record_btrace_frame_unwind_stop_reason,
1516 record_btrace_frame_this_id,
1517 record_btrace_frame_prev_register,
1518 NULL,
1519 record_btrace_frame_sniffer,
1520 record_btrace_frame_dealloc_cache
1521 };
1522
1523 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1524 {
1525 TAILCALL_FRAME,
1526 record_btrace_frame_unwind_stop_reason,
1527 record_btrace_frame_this_id,
1528 record_btrace_frame_prev_register,
1529 NULL,
1530 record_btrace_tailcall_frame_sniffer,
1531 record_btrace_frame_dealloc_cache
1532 };
1533
1534 /* Implement the to_get_unwinder method. */
1535
1536 static const struct frame_unwind *
1537 record_btrace_to_get_unwinder (struct target_ops *self)
1538 {
1539 return &record_btrace_frame_unwind;
1540 }
1541
1542 /* Implement the to_get_tailcall_unwinder method. */
1543
1544 static const struct frame_unwind *
1545 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1546 {
1547 return &record_btrace_tailcall_frame_unwind;
1548 }
1549
1550 /* Indicate that TP should be resumed according to FLAG. */
1551
1552 static void
1553 record_btrace_resume_thread (struct thread_info *tp,
1554 enum btrace_thread_flag flag)
1555 {
1556 struct btrace_thread_info *btinfo;
1557
1558 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1559
1560 btinfo = &tp->btrace;
1561
1562 if ((btinfo->flags & BTHR_MOVE) != 0)
1563 error (_("Thread already moving."));
1564
1565 /* Fetch the latest branch trace. */
1566 btrace_fetch (tp);
1567
1568 btinfo->flags |= flag;
1569 }
1570
1571 /* Find the thread to resume given a PTID. */
1572
1573 static struct thread_info *
1574 record_btrace_find_resume_thread (ptid_t ptid)
1575 {
1576 struct thread_info *tp;
1577
1578 /* When asked to resume everything, we pick the current thread. */
1579 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1580 ptid = inferior_ptid;
1581
1582 return find_thread_ptid (ptid);
1583 }
1584
1585 /* Start replaying a thread. */
1586
1587 static struct btrace_insn_iterator *
1588 record_btrace_start_replaying (struct thread_info *tp)
1589 {
1590 volatile struct gdb_exception except;
1591 struct btrace_insn_iterator *replay;
1592 struct btrace_thread_info *btinfo;
1593 int executing;
1594
1595 btinfo = &tp->btrace;
1596 replay = NULL;
1597
1598 /* We can't start replaying without trace. */
1599 if (btinfo->begin == NULL)
1600 return NULL;
1601
1602 /* Clear the executing flag to allow changes to the current frame.
1603 We are not actually running, yet. We just started a reverse execution
1604 command or a record goto command.
1605 For the latter, EXECUTING is false and this has no effect.
1606 For the former, EXECUTING is true and we're in to_wait, about to
1607 move the thread. Since we need to recompute the stack, we temporarily
1608 set EXECUTING to flase. */
1609 executing = is_executing (tp->ptid);
1610 set_executing (tp->ptid, 0);
1611
1612 /* GDB stores the current frame_id when stepping in order to detects steps
1613 into subroutines.
1614 Since frames are computed differently when we're replaying, we need to
1615 recompute those stored frames and fix them up so we can still detect
1616 subroutines after we started replaying. */
1617 TRY_CATCH (except, RETURN_MASK_ALL)
1618 {
1619 struct frame_info *frame;
1620 struct frame_id frame_id;
1621 int upd_step_frame_id, upd_step_stack_frame_id;
1622
1623 /* The current frame without replaying - computed via normal unwind. */
1624 frame = get_current_frame ();
1625 frame_id = get_frame_id (frame);
1626
1627 /* Check if we need to update any stepping-related frame id's. */
1628 upd_step_frame_id = frame_id_eq (frame_id,
1629 tp->control.step_frame_id);
1630 upd_step_stack_frame_id = frame_id_eq (frame_id,
1631 tp->control.step_stack_frame_id);
1632
1633 /* We start replaying at the end of the branch trace. This corresponds
1634 to the current instruction. */
1635 replay = xmalloc (sizeof (*replay));
1636 btrace_insn_end (replay, btinfo);
1637
1638 /* Skip gaps at the end of the trace. */
1639 while (btrace_insn_get (replay) == NULL)
1640 {
1641 unsigned int steps;
1642
1643 steps = btrace_insn_prev (replay, 1);
1644 if (steps == 0)
1645 error (_("No trace."));
1646 }
1647
1648 /* We're not replaying, yet. */
1649 gdb_assert (btinfo->replay == NULL);
1650 btinfo->replay = replay;
1651
1652 /* Make sure we're not using any stale registers. */
1653 registers_changed_ptid (tp->ptid);
1654
1655 /* The current frame with replaying - computed via btrace unwind. */
1656 frame = get_current_frame ();
1657 frame_id = get_frame_id (frame);
1658
1659 /* Replace stepping related frames where necessary. */
1660 if (upd_step_frame_id)
1661 tp->control.step_frame_id = frame_id;
1662 if (upd_step_stack_frame_id)
1663 tp->control.step_stack_frame_id = frame_id;
1664 }
1665
1666 /* Restore the previous execution state. */
1667 set_executing (tp->ptid, executing);
1668
1669 if (except.reason < 0)
1670 {
1671 xfree (btinfo->replay);
1672 btinfo->replay = NULL;
1673
1674 registers_changed_ptid (tp->ptid);
1675
1676 throw_exception (except);
1677 }
1678
1679 return replay;
1680 }
1681
1682 /* Stop replaying a thread. */
1683
1684 static void
1685 record_btrace_stop_replaying (struct thread_info *tp)
1686 {
1687 struct btrace_thread_info *btinfo;
1688
1689 btinfo = &tp->btrace;
1690
1691 xfree (btinfo->replay);
1692 btinfo->replay = NULL;
1693
1694 /* Make sure we're not leaving any stale registers. */
1695 registers_changed_ptid (tp->ptid);
1696 }
1697
1698 /* The to_resume method of target record-btrace. */
1699
1700 static void
1701 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1702 enum gdb_signal signal)
1703 {
1704 struct thread_info *tp, *other;
1705 enum btrace_thread_flag flag;
1706
1707 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1708
1709 /* Store the execution direction of the last resume. */
1710 record_btrace_resume_exec_dir = execution_direction;
1711
1712 tp = record_btrace_find_resume_thread (ptid);
1713 if (tp == NULL)
1714 error (_("Cannot find thread to resume."));
1715
1716 /* Stop replaying other threads if the thread to resume is not replaying. */
1717 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1718 ALL_NON_EXITED_THREADS (other)
1719 record_btrace_stop_replaying (other);
1720
1721 /* As long as we're not replaying, just forward the request. */
1722 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1723 {
1724 ops = ops->beneath;
1725 return ops->to_resume (ops, ptid, step, signal);
1726 }
1727
1728 /* Compute the btrace thread flag for the requested move. */
1729 if (step == 0)
1730 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1731 else
1732 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1733
1734 /* At the moment, we only move a single thread. We could also move
1735 all threads in parallel by single-stepping each resumed thread
1736 until the first runs into an event.
1737 When we do that, we would want to continue all other threads.
1738 For now, just resume one thread to not confuse to_wait. */
1739 record_btrace_resume_thread (tp, flag);
1740
1741 /* We just indicate the resume intent here. The actual stepping happens in
1742 record_btrace_wait below. */
1743
1744 /* Async support. */
1745 if (target_can_async_p ())
1746 {
1747 target_async (inferior_event_handler, 0);
1748 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1749 }
1750 }
1751
1752 /* Find a thread to move. */
1753
1754 static struct thread_info *
1755 record_btrace_find_thread_to_move (ptid_t ptid)
1756 {
1757 struct thread_info *tp;
1758
1759 /* First check the parameter thread. */
1760 tp = find_thread_ptid (ptid);
1761 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1762 return tp;
1763
1764 /* Otherwise, find one other thread that has been resumed. */
1765 ALL_NON_EXITED_THREADS (tp)
1766 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1767 return tp;
1768
1769 return NULL;
1770 }
1771
1772 /* Return a target_waitstatus indicating that we ran out of history. */
1773
1774 static struct target_waitstatus
1775 btrace_step_no_history (void)
1776 {
1777 struct target_waitstatus status;
1778
1779 status.kind = TARGET_WAITKIND_NO_HISTORY;
1780
1781 return status;
1782 }
1783
1784 /* Return a target_waitstatus indicating that a step finished. */
1785
1786 static struct target_waitstatus
1787 btrace_step_stopped (void)
1788 {
1789 struct target_waitstatus status;
1790
1791 status.kind = TARGET_WAITKIND_STOPPED;
1792 status.value.sig = GDB_SIGNAL_TRAP;
1793
1794 return status;
1795 }
1796
1797 /* Clear the record histories. */
1798
1799 static void
1800 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1801 {
1802 xfree (btinfo->insn_history);
1803 xfree (btinfo->call_history);
1804
1805 btinfo->insn_history = NULL;
1806 btinfo->call_history = NULL;
1807 }
1808
1809 /* Step a single thread. */
1810
1811 static struct target_waitstatus
1812 record_btrace_step_thread (struct thread_info *tp)
1813 {
1814 struct btrace_insn_iterator *replay, end;
1815 struct btrace_thread_info *btinfo;
1816 struct address_space *aspace;
1817 struct inferior *inf;
1818 enum btrace_thread_flag flags;
1819 unsigned int steps;
1820
1821 /* We can't step without an execution history. */
1822 if (btrace_is_empty (tp))
1823 return btrace_step_no_history ();
1824
1825 btinfo = &tp->btrace;
1826 replay = btinfo->replay;
1827
1828 flags = btinfo->flags & BTHR_MOVE;
1829 btinfo->flags &= ~BTHR_MOVE;
1830
1831 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1832
1833 switch (flags)
1834 {
1835 default:
1836 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1837
1838 case BTHR_STEP:
1839 /* We're done if we're not replaying. */
1840 if (replay == NULL)
1841 return btrace_step_no_history ();
1842
1843 /* Skip gaps during replay. */
1844 do
1845 {
1846 steps = btrace_insn_next (replay, 1);
1847 if (steps == 0)
1848 {
1849 record_btrace_stop_replaying (tp);
1850 return btrace_step_no_history ();
1851 }
1852 }
1853 while (btrace_insn_get (replay) == NULL);
1854
1855 /* Determine the end of the instruction trace. */
1856 btrace_insn_end (&end, btinfo);
1857
1858 /* We stop replaying if we reached the end of the trace. */
1859 if (btrace_insn_cmp (replay, &end) == 0)
1860 record_btrace_stop_replaying (tp);
1861
1862 return btrace_step_stopped ();
1863
1864 case BTHR_RSTEP:
1865 /* Start replaying if we're not already doing so. */
1866 if (replay == NULL)
1867 replay = record_btrace_start_replaying (tp);
1868
1869 /* If we can't step any further, we reached the end of the history.
1870 Skip gaps during replay. */
1871 do
1872 {
1873 steps = btrace_insn_prev (replay, 1);
1874 if (steps == 0)
1875 return btrace_step_no_history ();
1876
1877 }
1878 while (btrace_insn_get (replay) == NULL);
1879
1880 return btrace_step_stopped ();
1881
1882 case BTHR_CONT:
1883 /* We're done if we're not replaying. */
1884 if (replay == NULL)
1885 return btrace_step_no_history ();
1886
1887 inf = find_inferior_ptid (tp->ptid);
1888 aspace = inf->aspace;
1889
1890 /* Determine the end of the instruction trace. */
1891 btrace_insn_end (&end, btinfo);
1892
1893 for (;;)
1894 {
1895 const struct btrace_insn *insn;
1896
1897 /* Skip gaps during replay. */
1898 do
1899 {
1900 steps = btrace_insn_next (replay, 1);
1901 if (steps == 0)
1902 {
1903 record_btrace_stop_replaying (tp);
1904 return btrace_step_no_history ();
1905 }
1906
1907 insn = btrace_insn_get (replay);
1908 }
1909 while (insn == NULL);
1910
1911 /* We stop replaying if we reached the end of the trace. */
1912 if (btrace_insn_cmp (replay, &end) == 0)
1913 {
1914 record_btrace_stop_replaying (tp);
1915 return btrace_step_no_history ();
1916 }
1917
1918 DEBUG ("stepping %d (%s) ... %s", tp->num,
1919 target_pid_to_str (tp->ptid),
1920 core_addr_to_string_nz (insn->pc));
1921
1922 if (breakpoint_here_p (aspace, insn->pc))
1923 return btrace_step_stopped ();
1924 }
1925
1926 case BTHR_RCONT:
1927 /* Start replaying if we're not already doing so. */
1928 if (replay == NULL)
1929 replay = record_btrace_start_replaying (tp);
1930
1931 inf = find_inferior_ptid (tp->ptid);
1932 aspace = inf->aspace;
1933
1934 for (;;)
1935 {
1936 const struct btrace_insn *insn;
1937
1938 /* If we can't step any further, we reached the end of the history.
1939 Skip gaps during replay. */
1940 do
1941 {
1942 steps = btrace_insn_prev (replay, 1);
1943 if (steps == 0)
1944 return btrace_step_no_history ();
1945
1946 insn = btrace_insn_get (replay);
1947 }
1948 while (insn == NULL);
1949
1950 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1951 target_pid_to_str (tp->ptid),
1952 core_addr_to_string_nz (insn->pc));
1953
1954 if (breakpoint_here_p (aspace, insn->pc))
1955 return btrace_step_stopped ();
1956 }
1957 }
1958 }
1959
1960 /* The to_wait method of target record-btrace. */
1961
1962 static ptid_t
1963 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1964 struct target_waitstatus *status, int options)
1965 {
1966 struct thread_info *tp, *other;
1967
1968 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1969
1970 /* As long as we're not replaying, just forward the request. */
1971 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1972 {
1973 ops = ops->beneath;
1974 return ops->to_wait (ops, ptid, status, options);
1975 }
1976
1977 /* Let's find a thread to move. */
1978 tp = record_btrace_find_thread_to_move (ptid);
1979 if (tp == NULL)
1980 {
1981 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1982
1983 status->kind = TARGET_WAITKIND_IGNORE;
1984 return minus_one_ptid;
1985 }
1986
1987 /* We only move a single thread. We're not able to correlate threads. */
1988 *status = record_btrace_step_thread (tp);
1989
1990 /* Stop all other threads. */
1991 if (!non_stop)
1992 ALL_NON_EXITED_THREADS (other)
1993 other->btrace.flags &= ~BTHR_MOVE;
1994
1995 /* Start record histories anew from the current position. */
1996 record_btrace_clear_histories (&tp->btrace);
1997
1998 /* We moved the replay position but did not update registers. */
1999 registers_changed_ptid (tp->ptid);
2000
2001 return tp->ptid;
2002 }
2003
2004 /* The to_can_execute_reverse method of target record-btrace. */
2005
2006 static int
2007 record_btrace_can_execute_reverse (struct target_ops *self)
2008 {
2009 return 1;
2010 }
2011
2012 /* The to_decr_pc_after_break method of target record-btrace. */
2013
2014 static CORE_ADDR
2015 record_btrace_decr_pc_after_break (struct target_ops *ops,
2016 struct gdbarch *gdbarch)
2017 {
2018 /* When replaying, we do not actually execute the breakpoint instruction
2019 so there is no need to adjust the PC after hitting a breakpoint. */
2020 if (record_btrace_is_replaying (ops))
2021 return 0;
2022
2023 return ops->beneath->to_decr_pc_after_break (ops->beneath, gdbarch);
2024 }
2025
2026 /* The to_update_thread_list method of target record-btrace. */
2027
2028 static void
2029 record_btrace_update_thread_list (struct target_ops *ops)
2030 {
2031 /* We don't add or remove threads during replay. */
2032 if (record_btrace_is_replaying (ops))
2033 return;
2034
2035 /* Forward the request. */
2036 ops = ops->beneath;
2037 ops->to_update_thread_list (ops);
2038 }
2039
2040 /* The to_thread_alive method of target record-btrace. */
2041
2042 static int
2043 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2044 {
2045 /* We don't add or remove threads during replay. */
2046 if (record_btrace_is_replaying (ops))
2047 return find_thread_ptid (ptid) != NULL;
2048
2049 /* Forward the request. */
2050 ops = ops->beneath;
2051 return ops->to_thread_alive (ops, ptid);
2052 }
2053
2054 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2055 is stopped. */
2056
2057 static void
2058 record_btrace_set_replay (struct thread_info *tp,
2059 const struct btrace_insn_iterator *it)
2060 {
2061 struct btrace_thread_info *btinfo;
2062
2063 btinfo = &tp->btrace;
2064
2065 if (it == NULL || it->function == NULL)
2066 record_btrace_stop_replaying (tp);
2067 else
2068 {
2069 if (btinfo->replay == NULL)
2070 record_btrace_start_replaying (tp);
2071 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2072 return;
2073
2074 *btinfo->replay = *it;
2075 registers_changed_ptid (tp->ptid);
2076 }
2077
2078 /* Start anew from the new replay position. */
2079 record_btrace_clear_histories (btinfo);
2080 }
2081
2082 /* The to_goto_record_begin method of target record-btrace. */
2083
2084 static void
2085 record_btrace_goto_begin (struct target_ops *self)
2086 {
2087 struct thread_info *tp;
2088 struct btrace_insn_iterator begin;
2089
2090 tp = require_btrace_thread ();
2091
2092 btrace_insn_begin (&begin, &tp->btrace);
2093 record_btrace_set_replay (tp, &begin);
2094
2095 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2096 }
2097
2098 /* The to_goto_record_end method of target record-btrace. */
2099
2100 static void
2101 record_btrace_goto_end (struct target_ops *ops)
2102 {
2103 struct thread_info *tp;
2104
2105 tp = require_btrace_thread ();
2106
2107 record_btrace_set_replay (tp, NULL);
2108
2109 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2110 }
2111
2112 /* The to_goto_record method of target record-btrace. */
2113
2114 static void
2115 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2116 {
2117 struct thread_info *tp;
2118 struct btrace_insn_iterator it;
2119 unsigned int number;
2120 int found;
2121
2122 number = insn;
2123
2124 /* Check for wrap-arounds. */
2125 if (number != insn)
2126 error (_("Instruction number out of range."));
2127
2128 tp = require_btrace_thread ();
2129
2130 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2131 if (found == 0)
2132 error (_("No such instruction."));
2133
2134 record_btrace_set_replay (tp, &it);
2135
2136 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2137 }
2138
2139 /* The to_execution_direction target method. */
2140
2141 static enum exec_direction_kind
2142 record_btrace_execution_direction (struct target_ops *self)
2143 {
2144 return record_btrace_resume_exec_dir;
2145 }
2146
2147 /* The to_prepare_to_generate_core target method. */
2148
2149 static void
2150 record_btrace_prepare_to_generate_core (struct target_ops *self)
2151 {
2152 record_btrace_generating_corefile = 1;
2153 }
2154
2155 /* The to_done_generating_core target method. */
2156
2157 static void
2158 record_btrace_done_generating_core (struct target_ops *self)
2159 {
2160 record_btrace_generating_corefile = 0;
2161 }
2162
2163 /* Initialize the record-btrace target ops. */
2164
2165 static void
2166 init_record_btrace_ops (void)
2167 {
2168 struct target_ops *ops;
2169
2170 ops = &record_btrace_ops;
2171 ops->to_shortname = "record-btrace";
2172 ops->to_longname = "Branch tracing target";
2173 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2174 ops->to_open = record_btrace_open;
2175 ops->to_close = record_btrace_close;
2176 ops->to_async = record_btrace_async;
2177 ops->to_detach = record_detach;
2178 ops->to_disconnect = record_disconnect;
2179 ops->to_mourn_inferior = record_mourn_inferior;
2180 ops->to_kill = record_kill;
2181 ops->to_stop_recording = record_btrace_stop_recording;
2182 ops->to_info_record = record_btrace_info;
2183 ops->to_insn_history = record_btrace_insn_history;
2184 ops->to_insn_history_from = record_btrace_insn_history_from;
2185 ops->to_insn_history_range = record_btrace_insn_history_range;
2186 ops->to_call_history = record_btrace_call_history;
2187 ops->to_call_history_from = record_btrace_call_history_from;
2188 ops->to_call_history_range = record_btrace_call_history_range;
2189 ops->to_record_is_replaying = record_btrace_is_replaying;
2190 ops->to_xfer_partial = record_btrace_xfer_partial;
2191 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2192 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2193 ops->to_fetch_registers = record_btrace_fetch_registers;
2194 ops->to_store_registers = record_btrace_store_registers;
2195 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2196 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2197 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2198 ops->to_resume = record_btrace_resume;
2199 ops->to_wait = record_btrace_wait;
2200 ops->to_update_thread_list = record_btrace_update_thread_list;
2201 ops->to_thread_alive = record_btrace_thread_alive;
2202 ops->to_goto_record_begin = record_btrace_goto_begin;
2203 ops->to_goto_record_end = record_btrace_goto_end;
2204 ops->to_goto_record = record_btrace_goto;
2205 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2206 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
2207 ops->to_execution_direction = record_btrace_execution_direction;
2208 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2209 ops->to_done_generating_core = record_btrace_done_generating_core;
2210 ops->to_stratum = record_stratum;
2211 ops->to_magic = OPS_MAGIC;
2212 }
2213
2214 /* Start recording in BTS format. */
2215
2216 static void
2217 cmd_record_btrace_bts_start (char *args, int from_tty)
2218 {
2219 volatile struct gdb_exception exception;
2220
2221 if (args != NULL && *args != 0)
2222 error (_("Invalid argument."));
2223
2224 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2225
2226 TRY_CATCH (exception, RETURN_MASK_ALL)
2227 execute_command ("target record-btrace", from_tty);
2228
2229 if (exception.error != 0)
2230 {
2231 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2232 throw_exception (exception);
2233 }
2234 }
2235
2236 /* Alias for "target record". */
2237
2238 static void
2239 cmd_record_btrace_start (char *args, int from_tty)
2240 {
2241 volatile struct gdb_exception exception;
2242
2243 if (args != NULL && *args != 0)
2244 error (_("Invalid argument."));
2245
2246 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2247
2248 TRY_CATCH (exception, RETURN_MASK_ALL)
2249 execute_command ("target record-btrace", from_tty);
2250
2251 if (exception.error == 0)
2252 return;
2253
2254 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2255 throw_exception (exception);
2256 }
2257
2258 /* The "set record btrace" command. */
2259
2260 static void
2261 cmd_set_record_btrace (char *args, int from_tty)
2262 {
2263 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2264 }
2265
2266 /* The "show record btrace" command. */
2267
2268 static void
2269 cmd_show_record_btrace (char *args, int from_tty)
2270 {
2271 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2272 }
2273
2274 /* The "show record btrace replay-memory-access" command. */
2275
2276 static void
2277 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2278 struct cmd_list_element *c, const char *value)
2279 {
2280 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2281 replay_memory_access);
2282 }
2283
2284 /* The "set record btrace bts" command. */
2285
2286 static void
2287 cmd_set_record_btrace_bts (char *args, int from_tty)
2288 {
2289 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2290 "by an apporpriate subcommand.\n"));
2291 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2292 all_commands, gdb_stdout);
2293 }
2294
2295 /* The "show record btrace bts" command. */
2296
2297 static void
2298 cmd_show_record_btrace_bts (char *args, int from_tty)
2299 {
2300 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2301 }
2302
2303 void _initialize_record_btrace (void);
2304
2305 /* Initialize btrace commands. */
2306
2307 void
2308 _initialize_record_btrace (void)
2309 {
2310 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2311 _("Start branch trace recording."), &record_btrace_cmdlist,
2312 "record btrace ", 0, &record_cmdlist);
2313 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2314
2315 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
2316 _("\
2317 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2318 The processor stores a from/to record for each branch into a cyclic buffer.\n\
2319 This format may not be available on all processors."),
2320 &record_btrace_cmdlist);
2321 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
2322
2323 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2324 _("Set record options"), &set_record_btrace_cmdlist,
2325 "set record btrace ", 0, &set_record_cmdlist);
2326
2327 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2328 _("Show record options"), &show_record_btrace_cmdlist,
2329 "show record btrace ", 0, &show_record_cmdlist);
2330
2331 add_setshow_enum_cmd ("replay-memory-access", no_class,
2332 replay_memory_access_types, &replay_memory_access, _("\
2333 Set what memory accesses are allowed during replay."), _("\
2334 Show what memory accesses are allowed during replay."),
2335 _("Default is READ-ONLY.\n\n\
2336 The btrace record target does not trace data.\n\
2337 The memory therefore corresponds to the live target and not \
2338 to the current replay position.\n\n\
2339 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2340 When READ-WRITE, allow accesses to read-only and read-write memory during \
2341 replay."),
2342 NULL, cmd_show_replay_memory_access,
2343 &set_record_btrace_cmdlist,
2344 &show_record_btrace_cmdlist);
2345
2346 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
2347 _("Set record btrace bts options"),
2348 &set_record_btrace_bts_cmdlist,
2349 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
2350
2351 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
2352 _("Show record btrace bts options"),
2353 &show_record_btrace_bts_cmdlist,
2354 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
2355
2356 add_setshow_uinteger_cmd ("buffer-size", no_class,
2357 &record_btrace_conf.bts.size,
2358 _("Set the record/replay bts buffer size."),
2359 _("Show the record/replay bts buffer size."), _("\
2360 When starting recording request a trace buffer of this size. \
2361 The actual buffer size may differ from the requested size. \
2362 Use \"info record\" to see the actual buffer size.\n\n\
2363 Bigger buffers allow longer recording but also take more time to process \
2364 the recorded execution trace.\n\n\
2365 The trace buffer size may not be changed while recording."), NULL, NULL,
2366 &set_record_btrace_bts_cmdlist,
2367 &show_record_btrace_bts_cmdlist);
2368
2369 init_record_btrace_ops ();
2370 add_target (&record_btrace_ops);
2371
2372 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2373 xcalloc, xfree);
2374
2375 record_btrace_conf.bts.size = 64 * 1024;
2376 }
This page took 0.086889 seconds and 4 git commands to generate.