enum lwp_stop_reason -> enum target_stop_reason
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "gdbthread.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "disasm.h"
28 #include "observer.h"
29 #include "cli/cli-utils.h"
30 #include "source.h"
31 #include "ui-out.h"
32 #include "symtab.h"
33 #include "filenames.h"
34 #include "regcache.h"
35 #include "frame-unwind.h"
36 #include "hashtab.h"
37 #include "infrun.h"
38 #include "event-loop.h"
39 #include "inf-loop.h"
40
41 /* The target_ops of record-btrace. */
42 static struct target_ops record_btrace_ops;
43
44 /* A new thread observer enabling branch tracing for the new thread. */
45 static struct observer *record_btrace_thread_observer;
46
47 /* Memory access types used in set/show record btrace replay-memory-access. */
48 static const char replay_memory_access_read_only[] = "read-only";
49 static const char replay_memory_access_read_write[] = "read-write";
50 static const char *const replay_memory_access_types[] =
51 {
52 replay_memory_access_read_only,
53 replay_memory_access_read_write,
54 NULL
55 };
56
57 /* The currently allowed replay memory access type. */
58 static const char *replay_memory_access = replay_memory_access_read_only;
59
60 /* Command lists for "set/show record btrace". */
61 static struct cmd_list_element *set_record_btrace_cmdlist;
62 static struct cmd_list_element *show_record_btrace_cmdlist;
63
64 /* The execution direction of the last resume we got. See record-full.c. */
65 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
66
67 /* The async event handler for reverse/replay execution. */
68 static struct async_event_handler *record_btrace_async_inferior_event_handler;
69
70 /* A flag indicating that we are currently generating a core file. */
71 static int record_btrace_generating_corefile;
72
73 /* The current branch trace configuration. */
74 static struct btrace_config record_btrace_conf;
75
76 /* Command list for "record btrace". */
77 static struct cmd_list_element *record_btrace_cmdlist;
78
79 /* Command lists for "set/show record btrace bts". */
80 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
81 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
82
83 /* Print a record-btrace debug message. Use do ... while (0) to avoid
84 ambiguities when used in if statements. */
85
86 #define DEBUG(msg, args...) \
87 do \
88 { \
89 if (record_debug != 0) \
90 fprintf_unfiltered (gdb_stdlog, \
91 "[record-btrace] " msg "\n", ##args); \
92 } \
93 while (0)
94
95
96 /* Update the branch trace for the current thread and return a pointer to its
97 thread_info.
98
99 Throws an error if there is no thread or no trace. This function never
100 returns NULL. */
101
102 static struct thread_info *
103 require_btrace_thread (void)
104 {
105 struct thread_info *tp;
106
107 DEBUG ("require");
108
109 tp = find_thread_ptid (inferior_ptid);
110 if (tp == NULL)
111 error (_("No thread."));
112
113 btrace_fetch (tp);
114
115 if (btrace_is_empty (tp))
116 error (_("No trace."));
117
118 return tp;
119 }
120
121 /* Update the branch trace for the current thread and return a pointer to its
122 branch trace information struct.
123
124 Throws an error if there is no thread or no trace. This function never
125 returns NULL. */
126
127 static struct btrace_thread_info *
128 require_btrace (void)
129 {
130 struct thread_info *tp;
131
132 tp = require_btrace_thread ();
133
134 return &tp->btrace;
135 }
136
137 /* Enable branch tracing for one thread. Warn on errors. */
138
139 static void
140 record_btrace_enable_warn (struct thread_info *tp)
141 {
142 volatile struct gdb_exception error;
143
144 TRY_CATCH (error, RETURN_MASK_ERROR)
145 btrace_enable (tp, &record_btrace_conf);
146
147 if (error.message != NULL)
148 warning ("%s", error.message);
149 }
150
151 /* Callback function to disable branch tracing for one thread. */
152
153 static void
154 record_btrace_disable_callback (void *arg)
155 {
156 struct thread_info *tp;
157
158 tp = arg;
159
160 btrace_disable (tp);
161 }
162
163 /* Enable automatic tracing of new threads. */
164
165 static void
166 record_btrace_auto_enable (void)
167 {
168 DEBUG ("attach thread observer");
169
170 record_btrace_thread_observer
171 = observer_attach_new_thread (record_btrace_enable_warn);
172 }
173
174 /* Disable automatic tracing of new threads. */
175
176 static void
177 record_btrace_auto_disable (void)
178 {
179 /* The observer may have been detached, already. */
180 if (record_btrace_thread_observer == NULL)
181 return;
182
183 DEBUG ("detach thread observer");
184
185 observer_detach_new_thread (record_btrace_thread_observer);
186 record_btrace_thread_observer = NULL;
187 }
188
189 /* The record-btrace async event handler function. */
190
191 static void
192 record_btrace_handle_async_inferior_event (gdb_client_data data)
193 {
194 inferior_event_handler (INF_REG_EVENT, NULL);
195 }
196
197 /* The to_open method of target record-btrace. */
198
199 static void
200 record_btrace_open (const char *args, int from_tty)
201 {
202 struct cleanup *disable_chain;
203 struct thread_info *tp;
204
205 DEBUG ("open");
206
207 record_preopen ();
208
209 if (!target_has_execution)
210 error (_("The program is not being run."));
211
212 if (non_stop)
213 error (_("Record btrace can't debug inferior in non-stop mode."));
214
215 gdb_assert (record_btrace_thread_observer == NULL);
216
217 disable_chain = make_cleanup (null_cleanup, NULL);
218 ALL_NON_EXITED_THREADS (tp)
219 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
220 {
221 btrace_enable (tp, &record_btrace_conf);
222
223 make_cleanup (record_btrace_disable_callback, tp);
224 }
225
226 record_btrace_auto_enable ();
227
228 push_target (&record_btrace_ops);
229
230 record_btrace_async_inferior_event_handler
231 = create_async_event_handler (record_btrace_handle_async_inferior_event,
232 NULL);
233 record_btrace_generating_corefile = 0;
234
235 observer_notify_record_changed (current_inferior (), 1);
236
237 discard_cleanups (disable_chain);
238 }
239
240 /* The to_stop_recording method of target record-btrace. */
241
242 static void
243 record_btrace_stop_recording (struct target_ops *self)
244 {
245 struct thread_info *tp;
246
247 DEBUG ("stop recording");
248
249 record_btrace_auto_disable ();
250
251 ALL_NON_EXITED_THREADS (tp)
252 if (tp->btrace.target != NULL)
253 btrace_disable (tp);
254 }
255
256 /* The to_close method of target record-btrace. */
257
258 static void
259 record_btrace_close (struct target_ops *self)
260 {
261 struct thread_info *tp;
262
263 if (record_btrace_async_inferior_event_handler != NULL)
264 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
265
266 /* Make sure automatic recording gets disabled even if we did not stop
267 recording before closing the record-btrace target. */
268 record_btrace_auto_disable ();
269
270 /* We should have already stopped recording.
271 Tear down btrace in case we have not. */
272 ALL_NON_EXITED_THREADS (tp)
273 btrace_teardown (tp);
274 }
275
276 /* The to_async method of target record-btrace. */
277
278 static void
279 record_btrace_async (struct target_ops *ops,
280 void (*callback) (enum inferior_event_type event_type,
281 void *context),
282 void *context)
283 {
284 if (callback != NULL)
285 mark_async_event_handler (record_btrace_async_inferior_event_handler);
286 else
287 clear_async_event_handler (record_btrace_async_inferior_event_handler);
288
289 ops->beneath->to_async (ops->beneath, callback, context);
290 }
291
292 /* Adjusts the size and returns a human readable size suffix. */
293
294 static const char *
295 record_btrace_adjust_size (unsigned int *size)
296 {
297 unsigned int sz;
298
299 sz = *size;
300
301 if ((sz & ((1u << 30) - 1)) == 0)
302 {
303 *size = sz >> 30;
304 return "GB";
305 }
306 else if ((sz & ((1u << 20) - 1)) == 0)
307 {
308 *size = sz >> 20;
309 return "MB";
310 }
311 else if ((sz & ((1u << 10) - 1)) == 0)
312 {
313 *size = sz >> 10;
314 return "kB";
315 }
316 else
317 return "";
318 }
319
320 /* Print a BTS configuration. */
321
322 static void
323 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
324 {
325 const char *suffix;
326 unsigned int size;
327
328 size = conf->size;
329 if (size > 0)
330 {
331 suffix = record_btrace_adjust_size (&size);
332 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
333 }
334 }
335
336 /* Print a branch tracing configuration. */
337
338 static void
339 record_btrace_print_conf (const struct btrace_config *conf)
340 {
341 printf_unfiltered (_("Recording format: %s.\n"),
342 btrace_format_string (conf->format));
343
344 switch (conf->format)
345 {
346 case BTRACE_FORMAT_NONE:
347 return;
348
349 case BTRACE_FORMAT_BTS:
350 record_btrace_print_bts_conf (&conf->bts);
351 return;
352 }
353
354 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
355 }
356
357 /* The to_info_record method of target record-btrace. */
358
359 static void
360 record_btrace_info (struct target_ops *self)
361 {
362 struct btrace_thread_info *btinfo;
363 const struct btrace_config *conf;
364 struct thread_info *tp;
365 unsigned int insns, calls, gaps;
366
367 DEBUG ("info");
368
369 tp = find_thread_ptid (inferior_ptid);
370 if (tp == NULL)
371 error (_("No thread."));
372
373 btinfo = &tp->btrace;
374
375 conf = btrace_conf (btinfo);
376 if (conf != NULL)
377 record_btrace_print_conf (conf);
378
379 btrace_fetch (tp);
380
381 insns = 0;
382 calls = 0;
383 gaps = 0;
384
385 if (!btrace_is_empty (tp))
386 {
387 struct btrace_call_iterator call;
388 struct btrace_insn_iterator insn;
389
390 btrace_call_end (&call, btinfo);
391 btrace_call_prev (&call, 1);
392 calls = btrace_call_number (&call);
393
394 btrace_insn_end (&insn, btinfo);
395
396 insns = btrace_insn_number (&insn);
397 if (insns != 0)
398 {
399 /* The last instruction does not really belong to the trace. */
400 insns -= 1;
401 }
402 else
403 {
404 unsigned int steps;
405
406 /* Skip gaps at the end. */
407 do
408 {
409 steps = btrace_insn_prev (&insn, 1);
410 if (steps == 0)
411 break;
412
413 insns = btrace_insn_number (&insn);
414 }
415 while (insns == 0);
416 }
417
418 gaps = btinfo->ngaps;
419 }
420
421 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
422 "for thread %d (%s).\n"), insns, calls, gaps,
423 tp->num, target_pid_to_str (tp->ptid));
424
425 if (btrace_is_replaying (tp))
426 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
427 btrace_insn_number (btinfo->replay));
428 }
429
430 /* Print a decode error. */
431
432 static void
433 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
434 enum btrace_format format)
435 {
436 const char *errstr;
437 int is_error;
438
439 errstr = _("unknown");
440 is_error = 1;
441
442 switch (format)
443 {
444 default:
445 break;
446
447 case BTRACE_FORMAT_BTS:
448 switch (errcode)
449 {
450 default:
451 break;
452
453 case BDE_BTS_OVERFLOW:
454 errstr = _("instruction overflow");
455 break;
456
457 case BDE_BTS_INSN_SIZE:
458 errstr = _("unknown instruction");
459 break;
460 }
461 break;
462 }
463
464 ui_out_text (uiout, _("["));
465 if (is_error)
466 {
467 ui_out_text (uiout, _("decode error ("));
468 ui_out_field_int (uiout, "errcode", errcode);
469 ui_out_text (uiout, _("): "));
470 }
471 ui_out_text (uiout, errstr);
472 ui_out_text (uiout, _("]\n"));
473 }
474
475 /* Print an unsigned int. */
476
477 static void
478 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
479 {
480 ui_out_field_fmt (uiout, fld, "%u", val);
481 }
482
483 /* Disassemble a section of the recorded instruction trace. */
484
485 static void
486 btrace_insn_history (struct ui_out *uiout,
487 const struct btrace_thread_info *btinfo,
488 const struct btrace_insn_iterator *begin,
489 const struct btrace_insn_iterator *end, int flags)
490 {
491 struct gdbarch *gdbarch;
492 struct btrace_insn_iterator it;
493
494 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
495 btrace_insn_number (end));
496
497 gdbarch = target_gdbarch ();
498
499 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
500 {
501 const struct btrace_insn *insn;
502
503 insn = btrace_insn_get (&it);
504
505 /* A NULL instruction indicates a gap in the trace. */
506 if (insn == NULL)
507 {
508 const struct btrace_config *conf;
509
510 conf = btrace_conf (btinfo);
511
512 /* We have trace so we must have a configuration. */
513 gdb_assert (conf != NULL);
514
515 btrace_ui_out_decode_error (uiout, it.function->errcode,
516 conf->format);
517 }
518 else
519 {
520 /* Print the instruction index. */
521 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
522 ui_out_text (uiout, "\t");
523
524 /* Disassembly with '/m' flag may not produce the expected result.
525 See PR gdb/11833. */
526 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc,
527 insn->pc + 1);
528 }
529 }
530 }
531
532 /* The to_insn_history method of target record-btrace. */
533
534 static void
535 record_btrace_insn_history (struct target_ops *self, int size, int flags)
536 {
537 struct btrace_thread_info *btinfo;
538 struct btrace_insn_history *history;
539 struct btrace_insn_iterator begin, end;
540 struct cleanup *uiout_cleanup;
541 struct ui_out *uiout;
542 unsigned int context, covered;
543
544 uiout = current_uiout;
545 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
546 "insn history");
547 context = abs (size);
548 if (context == 0)
549 error (_("Bad record instruction-history-size."));
550
551 btinfo = require_btrace ();
552 history = btinfo->insn_history;
553 if (history == NULL)
554 {
555 struct btrace_insn_iterator *replay;
556
557 DEBUG ("insn-history (0x%x): %d", flags, size);
558
559 /* If we're replaying, we start at the replay position. Otherwise, we
560 start at the tail of the trace. */
561 replay = btinfo->replay;
562 if (replay != NULL)
563 begin = *replay;
564 else
565 btrace_insn_end (&begin, btinfo);
566
567 /* We start from here and expand in the requested direction. Then we
568 expand in the other direction, as well, to fill up any remaining
569 context. */
570 end = begin;
571 if (size < 0)
572 {
573 /* We want the current position covered, as well. */
574 covered = btrace_insn_next (&end, 1);
575 covered += btrace_insn_prev (&begin, context - covered);
576 covered += btrace_insn_next (&end, context - covered);
577 }
578 else
579 {
580 covered = btrace_insn_next (&end, context);
581 covered += btrace_insn_prev (&begin, context - covered);
582 }
583 }
584 else
585 {
586 begin = history->begin;
587 end = history->end;
588
589 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
590 btrace_insn_number (&begin), btrace_insn_number (&end));
591
592 if (size < 0)
593 {
594 end = begin;
595 covered = btrace_insn_prev (&begin, context);
596 }
597 else
598 {
599 begin = end;
600 covered = btrace_insn_next (&end, context);
601 }
602 }
603
604 if (covered > 0)
605 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
606 else
607 {
608 if (size < 0)
609 printf_unfiltered (_("At the start of the branch trace record.\n"));
610 else
611 printf_unfiltered (_("At the end of the branch trace record.\n"));
612 }
613
614 btrace_set_insn_history (btinfo, &begin, &end);
615 do_cleanups (uiout_cleanup);
616 }
617
618 /* The to_insn_history_range method of target record-btrace. */
619
620 static void
621 record_btrace_insn_history_range (struct target_ops *self,
622 ULONGEST from, ULONGEST to, int flags)
623 {
624 struct btrace_thread_info *btinfo;
625 struct btrace_insn_history *history;
626 struct btrace_insn_iterator begin, end;
627 struct cleanup *uiout_cleanup;
628 struct ui_out *uiout;
629 unsigned int low, high;
630 int found;
631
632 uiout = current_uiout;
633 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
634 "insn history");
635 low = from;
636 high = to;
637
638 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
639
640 /* Check for wrap-arounds. */
641 if (low != from || high != to)
642 error (_("Bad range."));
643
644 if (high < low)
645 error (_("Bad range."));
646
647 btinfo = require_btrace ();
648
649 found = btrace_find_insn_by_number (&begin, btinfo, low);
650 if (found == 0)
651 error (_("Range out of bounds."));
652
653 found = btrace_find_insn_by_number (&end, btinfo, high);
654 if (found == 0)
655 {
656 /* Silently truncate the range. */
657 btrace_insn_end (&end, btinfo);
658 }
659 else
660 {
661 /* We want both begin and end to be inclusive. */
662 btrace_insn_next (&end, 1);
663 }
664
665 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
666 btrace_set_insn_history (btinfo, &begin, &end);
667
668 do_cleanups (uiout_cleanup);
669 }
670
671 /* The to_insn_history_from method of target record-btrace. */
672
673 static void
674 record_btrace_insn_history_from (struct target_ops *self,
675 ULONGEST from, int size, int flags)
676 {
677 ULONGEST begin, end, context;
678
679 context = abs (size);
680 if (context == 0)
681 error (_("Bad record instruction-history-size."));
682
683 if (size < 0)
684 {
685 end = from;
686
687 if (from < context)
688 begin = 0;
689 else
690 begin = from - context + 1;
691 }
692 else
693 {
694 begin = from;
695 end = from + context - 1;
696
697 /* Check for wrap-around. */
698 if (end < begin)
699 end = ULONGEST_MAX;
700 }
701
702 record_btrace_insn_history_range (self, begin, end, flags);
703 }
704
705 /* Print the instruction number range for a function call history line. */
706
707 static void
708 btrace_call_history_insn_range (struct ui_out *uiout,
709 const struct btrace_function *bfun)
710 {
711 unsigned int begin, end, size;
712
713 size = VEC_length (btrace_insn_s, bfun->insn);
714 gdb_assert (size > 0);
715
716 begin = bfun->insn_offset;
717 end = begin + size - 1;
718
719 ui_out_field_uint (uiout, "insn begin", begin);
720 ui_out_text (uiout, ",");
721 ui_out_field_uint (uiout, "insn end", end);
722 }
723
724 /* Compute the lowest and highest source line for the instructions in BFUN
725 and return them in PBEGIN and PEND.
726 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
727 result from inlining or macro expansion. */
728
729 static void
730 btrace_compute_src_line_range (const struct btrace_function *bfun,
731 int *pbegin, int *pend)
732 {
733 struct btrace_insn *insn;
734 struct symtab *symtab;
735 struct symbol *sym;
736 unsigned int idx;
737 int begin, end;
738
739 begin = INT_MAX;
740 end = INT_MIN;
741
742 sym = bfun->sym;
743 if (sym == NULL)
744 goto out;
745
746 symtab = symbol_symtab (sym);
747
748 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
749 {
750 struct symtab_and_line sal;
751
752 sal = find_pc_line (insn->pc, 0);
753 if (sal.symtab != symtab || sal.line == 0)
754 continue;
755
756 begin = min (begin, sal.line);
757 end = max (end, sal.line);
758 }
759
760 out:
761 *pbegin = begin;
762 *pend = end;
763 }
764
765 /* Print the source line information for a function call history line. */
766
767 static void
768 btrace_call_history_src_line (struct ui_out *uiout,
769 const struct btrace_function *bfun)
770 {
771 struct symbol *sym;
772 int begin, end;
773
774 sym = bfun->sym;
775 if (sym == NULL)
776 return;
777
778 ui_out_field_string (uiout, "file",
779 symtab_to_filename_for_display (symbol_symtab (sym)));
780
781 btrace_compute_src_line_range (bfun, &begin, &end);
782 if (end < begin)
783 return;
784
785 ui_out_text (uiout, ":");
786 ui_out_field_int (uiout, "min line", begin);
787
788 if (end == begin)
789 return;
790
791 ui_out_text (uiout, ",");
792 ui_out_field_int (uiout, "max line", end);
793 }
794
795 /* Get the name of a branch trace function. */
796
797 static const char *
798 btrace_get_bfun_name (const struct btrace_function *bfun)
799 {
800 struct minimal_symbol *msym;
801 struct symbol *sym;
802
803 if (bfun == NULL)
804 return "??";
805
806 msym = bfun->msym;
807 sym = bfun->sym;
808
809 if (sym != NULL)
810 return SYMBOL_PRINT_NAME (sym);
811 else if (msym != NULL)
812 return MSYMBOL_PRINT_NAME (msym);
813 else
814 return "??";
815 }
816
817 /* Disassemble a section of the recorded function trace. */
818
819 static void
820 btrace_call_history (struct ui_out *uiout,
821 const struct btrace_thread_info *btinfo,
822 const struct btrace_call_iterator *begin,
823 const struct btrace_call_iterator *end,
824 enum record_print_flag flags)
825 {
826 struct btrace_call_iterator it;
827
828 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
829 btrace_call_number (end));
830
831 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
832 {
833 const struct btrace_function *bfun;
834 struct minimal_symbol *msym;
835 struct symbol *sym;
836
837 bfun = btrace_call_get (&it);
838 sym = bfun->sym;
839 msym = bfun->msym;
840
841 /* Print the function index. */
842 ui_out_field_uint (uiout, "index", bfun->number);
843 ui_out_text (uiout, "\t");
844
845 /* Indicate gaps in the trace. */
846 if (bfun->errcode != 0)
847 {
848 const struct btrace_config *conf;
849
850 conf = btrace_conf (btinfo);
851
852 /* We have trace so we must have a configuration. */
853 gdb_assert (conf != NULL);
854
855 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
856
857 continue;
858 }
859
860 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
861 {
862 int level = bfun->level + btinfo->level, i;
863
864 for (i = 0; i < level; ++i)
865 ui_out_text (uiout, " ");
866 }
867
868 if (sym != NULL)
869 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
870 else if (msym != NULL)
871 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
872 else if (!ui_out_is_mi_like_p (uiout))
873 ui_out_field_string (uiout, "function", "??");
874
875 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
876 {
877 ui_out_text (uiout, _("\tinst "));
878 btrace_call_history_insn_range (uiout, bfun);
879 }
880
881 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
882 {
883 ui_out_text (uiout, _("\tat "));
884 btrace_call_history_src_line (uiout, bfun);
885 }
886
887 ui_out_text (uiout, "\n");
888 }
889 }
890
891 /* The to_call_history method of target record-btrace. */
892
893 static void
894 record_btrace_call_history (struct target_ops *self, int size, int flags)
895 {
896 struct btrace_thread_info *btinfo;
897 struct btrace_call_history *history;
898 struct btrace_call_iterator begin, end;
899 struct cleanup *uiout_cleanup;
900 struct ui_out *uiout;
901 unsigned int context, covered;
902
903 uiout = current_uiout;
904 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
905 "insn history");
906 context = abs (size);
907 if (context == 0)
908 error (_("Bad record function-call-history-size."));
909
910 btinfo = require_btrace ();
911 history = btinfo->call_history;
912 if (history == NULL)
913 {
914 struct btrace_insn_iterator *replay;
915
916 DEBUG ("call-history (0x%x): %d", flags, size);
917
918 /* If we're replaying, we start at the replay position. Otherwise, we
919 start at the tail of the trace. */
920 replay = btinfo->replay;
921 if (replay != NULL)
922 {
923 begin.function = replay->function;
924 begin.btinfo = btinfo;
925 }
926 else
927 btrace_call_end (&begin, btinfo);
928
929 /* We start from here and expand in the requested direction. Then we
930 expand in the other direction, as well, to fill up any remaining
931 context. */
932 end = begin;
933 if (size < 0)
934 {
935 /* We want the current position covered, as well. */
936 covered = btrace_call_next (&end, 1);
937 covered += btrace_call_prev (&begin, context - covered);
938 covered += btrace_call_next (&end, context - covered);
939 }
940 else
941 {
942 covered = btrace_call_next (&end, context);
943 covered += btrace_call_prev (&begin, context- covered);
944 }
945 }
946 else
947 {
948 begin = history->begin;
949 end = history->end;
950
951 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
952 btrace_call_number (&begin), btrace_call_number (&end));
953
954 if (size < 0)
955 {
956 end = begin;
957 covered = btrace_call_prev (&begin, context);
958 }
959 else
960 {
961 begin = end;
962 covered = btrace_call_next (&end, context);
963 }
964 }
965
966 if (covered > 0)
967 btrace_call_history (uiout, btinfo, &begin, &end, flags);
968 else
969 {
970 if (size < 0)
971 printf_unfiltered (_("At the start of the branch trace record.\n"));
972 else
973 printf_unfiltered (_("At the end of the branch trace record.\n"));
974 }
975
976 btrace_set_call_history (btinfo, &begin, &end);
977 do_cleanups (uiout_cleanup);
978 }
979
980 /* The to_call_history_range method of target record-btrace. */
981
982 static void
983 record_btrace_call_history_range (struct target_ops *self,
984 ULONGEST from, ULONGEST to, int flags)
985 {
986 struct btrace_thread_info *btinfo;
987 struct btrace_call_history *history;
988 struct btrace_call_iterator begin, end;
989 struct cleanup *uiout_cleanup;
990 struct ui_out *uiout;
991 unsigned int low, high;
992 int found;
993
994 uiout = current_uiout;
995 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
996 "func history");
997 low = from;
998 high = to;
999
1000 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
1001
1002 /* Check for wrap-arounds. */
1003 if (low != from || high != to)
1004 error (_("Bad range."));
1005
1006 if (high < low)
1007 error (_("Bad range."));
1008
1009 btinfo = require_btrace ();
1010
1011 found = btrace_find_call_by_number (&begin, btinfo, low);
1012 if (found == 0)
1013 error (_("Range out of bounds."));
1014
1015 found = btrace_find_call_by_number (&end, btinfo, high);
1016 if (found == 0)
1017 {
1018 /* Silently truncate the range. */
1019 btrace_call_end (&end, btinfo);
1020 }
1021 else
1022 {
1023 /* We want both begin and end to be inclusive. */
1024 btrace_call_next (&end, 1);
1025 }
1026
1027 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1028 btrace_set_call_history (btinfo, &begin, &end);
1029
1030 do_cleanups (uiout_cleanup);
1031 }
1032
1033 /* The to_call_history_from method of target record-btrace. */
1034
1035 static void
1036 record_btrace_call_history_from (struct target_ops *self,
1037 ULONGEST from, int size, int flags)
1038 {
1039 ULONGEST begin, end, context;
1040
1041 context = abs (size);
1042 if (context == 0)
1043 error (_("Bad record function-call-history-size."));
1044
1045 if (size < 0)
1046 {
1047 end = from;
1048
1049 if (from < context)
1050 begin = 0;
1051 else
1052 begin = from - context + 1;
1053 }
1054 else
1055 {
1056 begin = from;
1057 end = from + context - 1;
1058
1059 /* Check for wrap-around. */
1060 if (end < begin)
1061 end = ULONGEST_MAX;
1062 }
1063
1064 record_btrace_call_history_range (self, begin, end, flags);
1065 }
1066
1067 /* The to_record_is_replaying method of target record-btrace. */
1068
1069 static int
1070 record_btrace_is_replaying (struct target_ops *self)
1071 {
1072 struct thread_info *tp;
1073
1074 ALL_NON_EXITED_THREADS (tp)
1075 if (btrace_is_replaying (tp))
1076 return 1;
1077
1078 return 0;
1079 }
1080
1081 /* The to_xfer_partial method of target record-btrace. */
1082
1083 static enum target_xfer_status
1084 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1085 const char *annex, gdb_byte *readbuf,
1086 const gdb_byte *writebuf, ULONGEST offset,
1087 ULONGEST len, ULONGEST *xfered_len)
1088 {
1089 struct target_ops *t;
1090
1091 /* Filter out requests that don't make sense during replay. */
1092 if (replay_memory_access == replay_memory_access_read_only
1093 && !record_btrace_generating_corefile
1094 && record_btrace_is_replaying (ops))
1095 {
1096 switch (object)
1097 {
1098 case TARGET_OBJECT_MEMORY:
1099 {
1100 struct target_section *section;
1101
1102 /* We do not allow writing memory in general. */
1103 if (writebuf != NULL)
1104 {
1105 *xfered_len = len;
1106 return TARGET_XFER_UNAVAILABLE;
1107 }
1108
1109 /* We allow reading readonly memory. */
1110 section = target_section_by_addr (ops, offset);
1111 if (section != NULL)
1112 {
1113 /* Check if the section we found is readonly. */
1114 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1115 section->the_bfd_section)
1116 & SEC_READONLY) != 0)
1117 {
1118 /* Truncate the request to fit into this section. */
1119 len = min (len, section->endaddr - offset);
1120 break;
1121 }
1122 }
1123
1124 *xfered_len = len;
1125 return TARGET_XFER_UNAVAILABLE;
1126 }
1127 }
1128 }
1129
1130 /* Forward the request. */
1131 ops = ops->beneath;
1132 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1133 offset, len, xfered_len);
1134 }
1135
1136 /* The to_insert_breakpoint method of target record-btrace. */
1137
1138 static int
1139 record_btrace_insert_breakpoint (struct target_ops *ops,
1140 struct gdbarch *gdbarch,
1141 struct bp_target_info *bp_tgt)
1142 {
1143 volatile struct gdb_exception except;
1144 const char *old;
1145 int ret;
1146
1147 /* Inserting breakpoints requires accessing memory. Allow it for the
1148 duration of this function. */
1149 old = replay_memory_access;
1150 replay_memory_access = replay_memory_access_read_write;
1151
1152 ret = 0;
1153 TRY_CATCH (except, RETURN_MASK_ALL)
1154 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1155
1156 replay_memory_access = old;
1157
1158 if (except.reason < 0)
1159 throw_exception (except);
1160
1161 return ret;
1162 }
1163
1164 /* The to_remove_breakpoint method of target record-btrace. */
1165
1166 static int
1167 record_btrace_remove_breakpoint (struct target_ops *ops,
1168 struct gdbarch *gdbarch,
1169 struct bp_target_info *bp_tgt)
1170 {
1171 volatile struct gdb_exception except;
1172 const char *old;
1173 int ret;
1174
1175 /* Removing breakpoints requires accessing memory. Allow it for the
1176 duration of this function. */
1177 old = replay_memory_access;
1178 replay_memory_access = replay_memory_access_read_write;
1179
1180 ret = 0;
1181 TRY_CATCH (except, RETURN_MASK_ALL)
1182 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1183
1184 replay_memory_access = old;
1185
1186 if (except.reason < 0)
1187 throw_exception (except);
1188
1189 return ret;
1190 }
1191
1192 /* The to_fetch_registers method of target record-btrace. */
1193
1194 static void
1195 record_btrace_fetch_registers (struct target_ops *ops,
1196 struct regcache *regcache, int regno)
1197 {
1198 struct btrace_insn_iterator *replay;
1199 struct thread_info *tp;
1200
1201 tp = find_thread_ptid (inferior_ptid);
1202 gdb_assert (tp != NULL);
1203
1204 replay = tp->btrace.replay;
1205 if (replay != NULL && !record_btrace_generating_corefile)
1206 {
1207 const struct btrace_insn *insn;
1208 struct gdbarch *gdbarch;
1209 int pcreg;
1210
1211 gdbarch = get_regcache_arch (regcache);
1212 pcreg = gdbarch_pc_regnum (gdbarch);
1213 if (pcreg < 0)
1214 return;
1215
1216 /* We can only provide the PC register. */
1217 if (regno >= 0 && regno != pcreg)
1218 return;
1219
1220 insn = btrace_insn_get (replay);
1221 gdb_assert (insn != NULL);
1222
1223 regcache_raw_supply (regcache, regno, &insn->pc);
1224 }
1225 else
1226 {
1227 struct target_ops *t = ops->beneath;
1228
1229 t->to_fetch_registers (t, regcache, regno);
1230 }
1231 }
1232
1233 /* The to_store_registers method of target record-btrace. */
1234
1235 static void
1236 record_btrace_store_registers (struct target_ops *ops,
1237 struct regcache *regcache, int regno)
1238 {
1239 struct target_ops *t;
1240
1241 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1242 error (_("This record target does not allow writing registers."));
1243
1244 gdb_assert (may_write_registers != 0);
1245
1246 t = ops->beneath;
1247 t->to_store_registers (t, regcache, regno);
1248 }
1249
1250 /* The to_prepare_to_store method of target record-btrace. */
1251
1252 static void
1253 record_btrace_prepare_to_store (struct target_ops *ops,
1254 struct regcache *regcache)
1255 {
1256 struct target_ops *t;
1257
1258 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1259 return;
1260
1261 t = ops->beneath;
1262 t->to_prepare_to_store (t, regcache);
1263 }
1264
1265 /* The branch trace frame cache. */
1266
1267 struct btrace_frame_cache
1268 {
1269 /* The thread. */
1270 struct thread_info *tp;
1271
1272 /* The frame info. */
1273 struct frame_info *frame;
1274
1275 /* The branch trace function segment. */
1276 const struct btrace_function *bfun;
1277 };
1278
1279 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1280
1281 static htab_t bfcache;
1282
1283 /* hash_f for htab_create_alloc of bfcache. */
1284
1285 static hashval_t
1286 bfcache_hash (const void *arg)
1287 {
1288 const struct btrace_frame_cache *cache = arg;
1289
1290 return htab_hash_pointer (cache->frame);
1291 }
1292
1293 /* eq_f for htab_create_alloc of bfcache. */
1294
1295 static int
1296 bfcache_eq (const void *arg1, const void *arg2)
1297 {
1298 const struct btrace_frame_cache *cache1 = arg1;
1299 const struct btrace_frame_cache *cache2 = arg2;
1300
1301 return cache1->frame == cache2->frame;
1302 }
1303
1304 /* Create a new btrace frame cache. */
1305
1306 static struct btrace_frame_cache *
1307 bfcache_new (struct frame_info *frame)
1308 {
1309 struct btrace_frame_cache *cache;
1310 void **slot;
1311
1312 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1313 cache->frame = frame;
1314
1315 slot = htab_find_slot (bfcache, cache, INSERT);
1316 gdb_assert (*slot == NULL);
1317 *slot = cache;
1318
1319 return cache;
1320 }
1321
1322 /* Extract the branch trace function from a branch trace frame. */
1323
1324 static const struct btrace_function *
1325 btrace_get_frame_function (struct frame_info *frame)
1326 {
1327 const struct btrace_frame_cache *cache;
1328 const struct btrace_function *bfun;
1329 struct btrace_frame_cache pattern;
1330 void **slot;
1331
1332 pattern.frame = frame;
1333
1334 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1335 if (slot == NULL)
1336 return NULL;
1337
1338 cache = *slot;
1339 return cache->bfun;
1340 }
1341
1342 /* Implement stop_reason method for record_btrace_frame_unwind. */
1343
1344 static enum unwind_stop_reason
1345 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1346 void **this_cache)
1347 {
1348 const struct btrace_frame_cache *cache;
1349 const struct btrace_function *bfun;
1350
1351 cache = *this_cache;
1352 bfun = cache->bfun;
1353 gdb_assert (bfun != NULL);
1354
1355 if (bfun->up == NULL)
1356 return UNWIND_UNAVAILABLE;
1357
1358 return UNWIND_NO_REASON;
1359 }
1360
1361 /* Implement this_id method for record_btrace_frame_unwind. */
1362
1363 static void
1364 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1365 struct frame_id *this_id)
1366 {
1367 const struct btrace_frame_cache *cache;
1368 const struct btrace_function *bfun;
1369 CORE_ADDR code, special;
1370
1371 cache = *this_cache;
1372
1373 bfun = cache->bfun;
1374 gdb_assert (bfun != NULL);
1375
1376 while (bfun->segment.prev != NULL)
1377 bfun = bfun->segment.prev;
1378
1379 code = get_frame_func (this_frame);
1380 special = bfun->number;
1381
1382 *this_id = frame_id_build_unavailable_stack_special (code, special);
1383
1384 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1385 btrace_get_bfun_name (cache->bfun),
1386 core_addr_to_string_nz (this_id->code_addr),
1387 core_addr_to_string_nz (this_id->special_addr));
1388 }
1389
1390 /* Implement prev_register method for record_btrace_frame_unwind. */
1391
1392 static struct value *
1393 record_btrace_frame_prev_register (struct frame_info *this_frame,
1394 void **this_cache,
1395 int regnum)
1396 {
1397 const struct btrace_frame_cache *cache;
1398 const struct btrace_function *bfun, *caller;
1399 const struct btrace_insn *insn;
1400 struct gdbarch *gdbarch;
1401 CORE_ADDR pc;
1402 int pcreg;
1403
1404 gdbarch = get_frame_arch (this_frame);
1405 pcreg = gdbarch_pc_regnum (gdbarch);
1406 if (pcreg < 0 || regnum != pcreg)
1407 throw_error (NOT_AVAILABLE_ERROR,
1408 _("Registers are not available in btrace record history"));
1409
1410 cache = *this_cache;
1411 bfun = cache->bfun;
1412 gdb_assert (bfun != NULL);
1413
1414 caller = bfun->up;
1415 if (caller == NULL)
1416 throw_error (NOT_AVAILABLE_ERROR,
1417 _("No caller in btrace record history"));
1418
1419 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1420 {
1421 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1422 pc = insn->pc;
1423 }
1424 else
1425 {
1426 insn = VEC_last (btrace_insn_s, caller->insn);
1427 pc = insn->pc;
1428
1429 pc += gdb_insn_length (gdbarch, pc);
1430 }
1431
1432 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1433 btrace_get_bfun_name (bfun), bfun->level,
1434 core_addr_to_string_nz (pc));
1435
1436 return frame_unwind_got_address (this_frame, regnum, pc);
1437 }
1438
1439 /* Implement sniffer method for record_btrace_frame_unwind. */
1440
1441 static int
1442 record_btrace_frame_sniffer (const struct frame_unwind *self,
1443 struct frame_info *this_frame,
1444 void **this_cache)
1445 {
1446 const struct btrace_function *bfun;
1447 struct btrace_frame_cache *cache;
1448 struct thread_info *tp;
1449 struct frame_info *next;
1450
1451 /* THIS_FRAME does not contain a reference to its thread. */
1452 tp = find_thread_ptid (inferior_ptid);
1453 gdb_assert (tp != NULL);
1454
1455 bfun = NULL;
1456 next = get_next_frame (this_frame);
1457 if (next == NULL)
1458 {
1459 const struct btrace_insn_iterator *replay;
1460
1461 replay = tp->btrace.replay;
1462 if (replay != NULL)
1463 bfun = replay->function;
1464 }
1465 else
1466 {
1467 const struct btrace_function *callee;
1468
1469 callee = btrace_get_frame_function (next);
1470 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1471 bfun = callee->up;
1472 }
1473
1474 if (bfun == NULL)
1475 return 0;
1476
1477 DEBUG ("[frame] sniffed frame for %s on level %d",
1478 btrace_get_bfun_name (bfun), bfun->level);
1479
1480 /* This is our frame. Initialize the frame cache. */
1481 cache = bfcache_new (this_frame);
1482 cache->tp = tp;
1483 cache->bfun = bfun;
1484
1485 *this_cache = cache;
1486 return 1;
1487 }
1488
1489 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1490
1491 static int
1492 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1493 struct frame_info *this_frame,
1494 void **this_cache)
1495 {
1496 const struct btrace_function *bfun, *callee;
1497 struct btrace_frame_cache *cache;
1498 struct frame_info *next;
1499
1500 next = get_next_frame (this_frame);
1501 if (next == NULL)
1502 return 0;
1503
1504 callee = btrace_get_frame_function (next);
1505 if (callee == NULL)
1506 return 0;
1507
1508 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1509 return 0;
1510
1511 bfun = callee->up;
1512 if (bfun == NULL)
1513 return 0;
1514
1515 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1516 btrace_get_bfun_name (bfun), bfun->level);
1517
1518 /* This is our frame. Initialize the frame cache. */
1519 cache = bfcache_new (this_frame);
1520 cache->tp = find_thread_ptid (inferior_ptid);
1521 cache->bfun = bfun;
1522
1523 *this_cache = cache;
1524 return 1;
1525 }
1526
1527 static void
1528 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1529 {
1530 struct btrace_frame_cache *cache;
1531 void **slot;
1532
1533 cache = this_cache;
1534
1535 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1536 gdb_assert (slot != NULL);
1537
1538 htab_remove_elt (bfcache, cache);
1539 }
1540
1541 /* btrace recording does not store previous memory content, neither the stack
1542 frames content. Any unwinding would return errorneous results as the stack
1543 contents no longer matches the changed PC value restored from history.
1544 Therefore this unwinder reports any possibly unwound registers as
1545 <unavailable>. */
1546
1547 const struct frame_unwind record_btrace_frame_unwind =
1548 {
1549 NORMAL_FRAME,
1550 record_btrace_frame_unwind_stop_reason,
1551 record_btrace_frame_this_id,
1552 record_btrace_frame_prev_register,
1553 NULL,
1554 record_btrace_frame_sniffer,
1555 record_btrace_frame_dealloc_cache
1556 };
1557
1558 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1559 {
1560 TAILCALL_FRAME,
1561 record_btrace_frame_unwind_stop_reason,
1562 record_btrace_frame_this_id,
1563 record_btrace_frame_prev_register,
1564 NULL,
1565 record_btrace_tailcall_frame_sniffer,
1566 record_btrace_frame_dealloc_cache
1567 };
1568
1569 /* Implement the to_get_unwinder method. */
1570
1571 static const struct frame_unwind *
1572 record_btrace_to_get_unwinder (struct target_ops *self)
1573 {
1574 return &record_btrace_frame_unwind;
1575 }
1576
1577 /* Implement the to_get_tailcall_unwinder method. */
1578
1579 static const struct frame_unwind *
1580 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1581 {
1582 return &record_btrace_tailcall_frame_unwind;
1583 }
1584
1585 /* Indicate that TP should be resumed according to FLAG. */
1586
1587 static void
1588 record_btrace_resume_thread (struct thread_info *tp,
1589 enum btrace_thread_flag flag)
1590 {
1591 struct btrace_thread_info *btinfo;
1592
1593 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1594
1595 btinfo = &tp->btrace;
1596
1597 if ((btinfo->flags & BTHR_MOVE) != 0)
1598 error (_("Thread already moving."));
1599
1600 /* Fetch the latest branch trace. */
1601 btrace_fetch (tp);
1602
1603 btinfo->flags |= flag;
1604 }
1605
1606 /* Find the thread to resume given a PTID. */
1607
1608 static struct thread_info *
1609 record_btrace_find_resume_thread (ptid_t ptid)
1610 {
1611 struct thread_info *tp;
1612
1613 /* When asked to resume everything, we pick the current thread. */
1614 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1615 ptid = inferior_ptid;
1616
1617 return find_thread_ptid (ptid);
1618 }
1619
1620 /* Start replaying a thread. */
1621
1622 static struct btrace_insn_iterator *
1623 record_btrace_start_replaying (struct thread_info *tp)
1624 {
1625 volatile struct gdb_exception except;
1626 struct btrace_insn_iterator *replay;
1627 struct btrace_thread_info *btinfo;
1628 int executing;
1629
1630 btinfo = &tp->btrace;
1631 replay = NULL;
1632
1633 /* We can't start replaying without trace. */
1634 if (btinfo->begin == NULL)
1635 return NULL;
1636
1637 /* Clear the executing flag to allow changes to the current frame.
1638 We are not actually running, yet. We just started a reverse execution
1639 command or a record goto command.
1640 For the latter, EXECUTING is false and this has no effect.
1641 For the former, EXECUTING is true and we're in to_wait, about to
1642 move the thread. Since we need to recompute the stack, we temporarily
1643 set EXECUTING to flase. */
1644 executing = is_executing (tp->ptid);
1645 set_executing (tp->ptid, 0);
1646
1647 /* GDB stores the current frame_id when stepping in order to detects steps
1648 into subroutines.
1649 Since frames are computed differently when we're replaying, we need to
1650 recompute those stored frames and fix them up so we can still detect
1651 subroutines after we started replaying. */
1652 TRY_CATCH (except, RETURN_MASK_ALL)
1653 {
1654 struct frame_info *frame;
1655 struct frame_id frame_id;
1656 int upd_step_frame_id, upd_step_stack_frame_id;
1657
1658 /* The current frame without replaying - computed via normal unwind. */
1659 frame = get_current_frame ();
1660 frame_id = get_frame_id (frame);
1661
1662 /* Check if we need to update any stepping-related frame id's. */
1663 upd_step_frame_id = frame_id_eq (frame_id,
1664 tp->control.step_frame_id);
1665 upd_step_stack_frame_id = frame_id_eq (frame_id,
1666 tp->control.step_stack_frame_id);
1667
1668 /* We start replaying at the end of the branch trace. This corresponds
1669 to the current instruction. */
1670 replay = xmalloc (sizeof (*replay));
1671 btrace_insn_end (replay, btinfo);
1672
1673 /* Skip gaps at the end of the trace. */
1674 while (btrace_insn_get (replay) == NULL)
1675 {
1676 unsigned int steps;
1677
1678 steps = btrace_insn_prev (replay, 1);
1679 if (steps == 0)
1680 error (_("No trace."));
1681 }
1682
1683 /* We're not replaying, yet. */
1684 gdb_assert (btinfo->replay == NULL);
1685 btinfo->replay = replay;
1686
1687 /* Make sure we're not using any stale registers. */
1688 registers_changed_ptid (tp->ptid);
1689
1690 /* The current frame with replaying - computed via btrace unwind. */
1691 frame = get_current_frame ();
1692 frame_id = get_frame_id (frame);
1693
1694 /* Replace stepping related frames where necessary. */
1695 if (upd_step_frame_id)
1696 tp->control.step_frame_id = frame_id;
1697 if (upd_step_stack_frame_id)
1698 tp->control.step_stack_frame_id = frame_id;
1699 }
1700
1701 /* Restore the previous execution state. */
1702 set_executing (tp->ptid, executing);
1703
1704 if (except.reason < 0)
1705 {
1706 xfree (btinfo->replay);
1707 btinfo->replay = NULL;
1708
1709 registers_changed_ptid (tp->ptid);
1710
1711 throw_exception (except);
1712 }
1713
1714 return replay;
1715 }
1716
1717 /* Stop replaying a thread. */
1718
1719 static void
1720 record_btrace_stop_replaying (struct thread_info *tp)
1721 {
1722 struct btrace_thread_info *btinfo;
1723
1724 btinfo = &tp->btrace;
1725
1726 xfree (btinfo->replay);
1727 btinfo->replay = NULL;
1728
1729 /* Make sure we're not leaving any stale registers. */
1730 registers_changed_ptid (tp->ptid);
1731 }
1732
1733 /* The to_resume method of target record-btrace. */
1734
1735 static void
1736 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1737 enum gdb_signal signal)
1738 {
1739 struct thread_info *tp, *other;
1740 enum btrace_thread_flag flag;
1741
1742 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1743
1744 /* Store the execution direction of the last resume. */
1745 record_btrace_resume_exec_dir = execution_direction;
1746
1747 tp = record_btrace_find_resume_thread (ptid);
1748 if (tp == NULL)
1749 error (_("Cannot find thread to resume."));
1750
1751 /* Stop replaying other threads if the thread to resume is not replaying. */
1752 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1753 ALL_NON_EXITED_THREADS (other)
1754 record_btrace_stop_replaying (other);
1755
1756 /* As long as we're not replaying, just forward the request. */
1757 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1758 {
1759 ops = ops->beneath;
1760 return ops->to_resume (ops, ptid, step, signal);
1761 }
1762
1763 /* Compute the btrace thread flag for the requested move. */
1764 if (step == 0)
1765 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1766 else
1767 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1768
1769 /* At the moment, we only move a single thread. We could also move
1770 all threads in parallel by single-stepping each resumed thread
1771 until the first runs into an event.
1772 When we do that, we would want to continue all other threads.
1773 For now, just resume one thread to not confuse to_wait. */
1774 record_btrace_resume_thread (tp, flag);
1775
1776 /* We just indicate the resume intent here. The actual stepping happens in
1777 record_btrace_wait below. */
1778
1779 /* Async support. */
1780 if (target_can_async_p ())
1781 {
1782 target_async (inferior_event_handler, 0);
1783 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1784 }
1785 }
1786
1787 /* Find a thread to move. */
1788
1789 static struct thread_info *
1790 record_btrace_find_thread_to_move (ptid_t ptid)
1791 {
1792 struct thread_info *tp;
1793
1794 /* First check the parameter thread. */
1795 tp = find_thread_ptid (ptid);
1796 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1797 return tp;
1798
1799 /* Otherwise, find one other thread that has been resumed. */
1800 ALL_NON_EXITED_THREADS (tp)
1801 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1802 return tp;
1803
1804 return NULL;
1805 }
1806
1807 /* Return a target_waitstatus indicating that we ran out of history. */
1808
1809 static struct target_waitstatus
1810 btrace_step_no_history (void)
1811 {
1812 struct target_waitstatus status;
1813
1814 status.kind = TARGET_WAITKIND_NO_HISTORY;
1815
1816 return status;
1817 }
1818
1819 /* Return a target_waitstatus indicating that a step finished. */
1820
1821 static struct target_waitstatus
1822 btrace_step_stopped (void)
1823 {
1824 struct target_waitstatus status;
1825
1826 status.kind = TARGET_WAITKIND_STOPPED;
1827 status.value.sig = GDB_SIGNAL_TRAP;
1828
1829 return status;
1830 }
1831
1832 /* Clear the record histories. */
1833
1834 static void
1835 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1836 {
1837 xfree (btinfo->insn_history);
1838 xfree (btinfo->call_history);
1839
1840 btinfo->insn_history = NULL;
1841 btinfo->call_history = NULL;
1842 }
1843
1844 /* Step a single thread. */
1845
1846 static struct target_waitstatus
1847 record_btrace_step_thread (struct thread_info *tp)
1848 {
1849 struct btrace_insn_iterator *replay, end;
1850 struct btrace_thread_info *btinfo;
1851 struct address_space *aspace;
1852 struct inferior *inf;
1853 enum btrace_thread_flag flags;
1854 unsigned int steps;
1855
1856 /* We can't step without an execution history. */
1857 if (btrace_is_empty (tp))
1858 return btrace_step_no_history ();
1859
1860 btinfo = &tp->btrace;
1861 replay = btinfo->replay;
1862
1863 flags = btinfo->flags & BTHR_MOVE;
1864 btinfo->flags &= ~BTHR_MOVE;
1865
1866 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1867
1868 switch (flags)
1869 {
1870 default:
1871 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1872
1873 case BTHR_STEP:
1874 /* We're done if we're not replaying. */
1875 if (replay == NULL)
1876 return btrace_step_no_history ();
1877
1878 /* Skip gaps during replay. */
1879 do
1880 {
1881 steps = btrace_insn_next (replay, 1);
1882 if (steps == 0)
1883 {
1884 record_btrace_stop_replaying (tp);
1885 return btrace_step_no_history ();
1886 }
1887 }
1888 while (btrace_insn_get (replay) == NULL);
1889
1890 /* Determine the end of the instruction trace. */
1891 btrace_insn_end (&end, btinfo);
1892
1893 /* We stop replaying if we reached the end of the trace. */
1894 if (btrace_insn_cmp (replay, &end) == 0)
1895 record_btrace_stop_replaying (tp);
1896
1897 return btrace_step_stopped ();
1898
1899 case BTHR_RSTEP:
1900 /* Start replaying if we're not already doing so. */
1901 if (replay == NULL)
1902 replay = record_btrace_start_replaying (tp);
1903
1904 /* If we can't step any further, we reached the end of the history.
1905 Skip gaps during replay. */
1906 do
1907 {
1908 steps = btrace_insn_prev (replay, 1);
1909 if (steps == 0)
1910 return btrace_step_no_history ();
1911
1912 }
1913 while (btrace_insn_get (replay) == NULL);
1914
1915 return btrace_step_stopped ();
1916
1917 case BTHR_CONT:
1918 /* We're done if we're not replaying. */
1919 if (replay == NULL)
1920 return btrace_step_no_history ();
1921
1922 inf = find_inferior_ptid (tp->ptid);
1923 aspace = inf->aspace;
1924
1925 /* Determine the end of the instruction trace. */
1926 btrace_insn_end (&end, btinfo);
1927
1928 for (;;)
1929 {
1930 const struct btrace_insn *insn;
1931
1932 /* Skip gaps during replay. */
1933 do
1934 {
1935 steps = btrace_insn_next (replay, 1);
1936 if (steps == 0)
1937 {
1938 record_btrace_stop_replaying (tp);
1939 return btrace_step_no_history ();
1940 }
1941
1942 insn = btrace_insn_get (replay);
1943 }
1944 while (insn == NULL);
1945
1946 /* We stop replaying if we reached the end of the trace. */
1947 if (btrace_insn_cmp (replay, &end) == 0)
1948 {
1949 record_btrace_stop_replaying (tp);
1950 return btrace_step_no_history ();
1951 }
1952
1953 DEBUG ("stepping %d (%s) ... %s", tp->num,
1954 target_pid_to_str (tp->ptid),
1955 core_addr_to_string_nz (insn->pc));
1956
1957 if (breakpoint_here_p (aspace, insn->pc))
1958 return btrace_step_stopped ();
1959 }
1960
1961 case BTHR_RCONT:
1962 /* Start replaying if we're not already doing so. */
1963 if (replay == NULL)
1964 replay = record_btrace_start_replaying (tp);
1965
1966 inf = find_inferior_ptid (tp->ptid);
1967 aspace = inf->aspace;
1968
1969 for (;;)
1970 {
1971 const struct btrace_insn *insn;
1972
1973 /* If we can't step any further, we reached the end of the history.
1974 Skip gaps during replay. */
1975 do
1976 {
1977 steps = btrace_insn_prev (replay, 1);
1978 if (steps == 0)
1979 return btrace_step_no_history ();
1980
1981 insn = btrace_insn_get (replay);
1982 }
1983 while (insn == NULL);
1984
1985 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1986 target_pid_to_str (tp->ptid),
1987 core_addr_to_string_nz (insn->pc));
1988
1989 if (breakpoint_here_p (aspace, insn->pc))
1990 return btrace_step_stopped ();
1991 }
1992 }
1993 }
1994
1995 /* The to_wait method of target record-btrace. */
1996
1997 static ptid_t
1998 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1999 struct target_waitstatus *status, int options)
2000 {
2001 struct thread_info *tp, *other;
2002
2003 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2004
2005 /* As long as we're not replaying, just forward the request. */
2006 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
2007 {
2008 ops = ops->beneath;
2009 return ops->to_wait (ops, ptid, status, options);
2010 }
2011
2012 /* Let's find a thread to move. */
2013 tp = record_btrace_find_thread_to_move (ptid);
2014 if (tp == NULL)
2015 {
2016 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
2017
2018 status->kind = TARGET_WAITKIND_IGNORE;
2019 return minus_one_ptid;
2020 }
2021
2022 /* We only move a single thread. We're not able to correlate threads. */
2023 *status = record_btrace_step_thread (tp);
2024
2025 /* Stop all other threads. */
2026 if (!non_stop)
2027 ALL_NON_EXITED_THREADS (other)
2028 other->btrace.flags &= ~BTHR_MOVE;
2029
2030 /* Start record histories anew from the current position. */
2031 record_btrace_clear_histories (&tp->btrace);
2032
2033 /* We moved the replay position but did not update registers. */
2034 registers_changed_ptid (tp->ptid);
2035
2036 return tp->ptid;
2037 }
2038
2039 /* The to_can_execute_reverse method of target record-btrace. */
2040
2041 static int
2042 record_btrace_can_execute_reverse (struct target_ops *self)
2043 {
2044 return 1;
2045 }
2046
2047 /* The to_decr_pc_after_break method of target record-btrace. */
2048
2049 static CORE_ADDR
2050 record_btrace_decr_pc_after_break (struct target_ops *ops,
2051 struct gdbarch *gdbarch)
2052 {
2053 /* When replaying, we do not actually execute the breakpoint instruction
2054 so there is no need to adjust the PC after hitting a breakpoint. */
2055 if (record_btrace_is_replaying (ops))
2056 return 0;
2057
2058 return ops->beneath->to_decr_pc_after_break (ops->beneath, gdbarch);
2059 }
2060
2061 /* The to_update_thread_list method of target record-btrace. */
2062
2063 static void
2064 record_btrace_update_thread_list (struct target_ops *ops)
2065 {
2066 /* We don't add or remove threads during replay. */
2067 if (record_btrace_is_replaying (ops))
2068 return;
2069
2070 /* Forward the request. */
2071 ops = ops->beneath;
2072 ops->to_update_thread_list (ops);
2073 }
2074
2075 /* The to_thread_alive method of target record-btrace. */
2076
2077 static int
2078 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2079 {
2080 /* We don't add or remove threads during replay. */
2081 if (record_btrace_is_replaying (ops))
2082 return find_thread_ptid (ptid) != NULL;
2083
2084 /* Forward the request. */
2085 ops = ops->beneath;
2086 return ops->to_thread_alive (ops, ptid);
2087 }
2088
2089 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2090 is stopped. */
2091
2092 static void
2093 record_btrace_set_replay (struct thread_info *tp,
2094 const struct btrace_insn_iterator *it)
2095 {
2096 struct btrace_thread_info *btinfo;
2097
2098 btinfo = &tp->btrace;
2099
2100 if (it == NULL || it->function == NULL)
2101 record_btrace_stop_replaying (tp);
2102 else
2103 {
2104 if (btinfo->replay == NULL)
2105 record_btrace_start_replaying (tp);
2106 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2107 return;
2108
2109 *btinfo->replay = *it;
2110 registers_changed_ptid (tp->ptid);
2111 }
2112
2113 /* Start anew from the new replay position. */
2114 record_btrace_clear_histories (btinfo);
2115 }
2116
2117 /* The to_goto_record_begin method of target record-btrace. */
2118
2119 static void
2120 record_btrace_goto_begin (struct target_ops *self)
2121 {
2122 struct thread_info *tp;
2123 struct btrace_insn_iterator begin;
2124
2125 tp = require_btrace_thread ();
2126
2127 btrace_insn_begin (&begin, &tp->btrace);
2128 record_btrace_set_replay (tp, &begin);
2129
2130 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2131 }
2132
2133 /* The to_goto_record_end method of target record-btrace. */
2134
2135 static void
2136 record_btrace_goto_end (struct target_ops *ops)
2137 {
2138 struct thread_info *tp;
2139
2140 tp = require_btrace_thread ();
2141
2142 record_btrace_set_replay (tp, NULL);
2143
2144 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2145 }
2146
2147 /* The to_goto_record method of target record-btrace. */
2148
2149 static void
2150 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2151 {
2152 struct thread_info *tp;
2153 struct btrace_insn_iterator it;
2154 unsigned int number;
2155 int found;
2156
2157 number = insn;
2158
2159 /* Check for wrap-arounds. */
2160 if (number != insn)
2161 error (_("Instruction number out of range."));
2162
2163 tp = require_btrace_thread ();
2164
2165 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2166 if (found == 0)
2167 error (_("No such instruction."));
2168
2169 record_btrace_set_replay (tp, &it);
2170
2171 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2172 }
2173
2174 /* The to_execution_direction target method. */
2175
2176 static enum exec_direction_kind
2177 record_btrace_execution_direction (struct target_ops *self)
2178 {
2179 return record_btrace_resume_exec_dir;
2180 }
2181
2182 /* The to_prepare_to_generate_core target method. */
2183
2184 static void
2185 record_btrace_prepare_to_generate_core (struct target_ops *self)
2186 {
2187 record_btrace_generating_corefile = 1;
2188 }
2189
2190 /* The to_done_generating_core target method. */
2191
2192 static void
2193 record_btrace_done_generating_core (struct target_ops *self)
2194 {
2195 record_btrace_generating_corefile = 0;
2196 }
2197
2198 /* Initialize the record-btrace target ops. */
2199
2200 static void
2201 init_record_btrace_ops (void)
2202 {
2203 struct target_ops *ops;
2204
2205 ops = &record_btrace_ops;
2206 ops->to_shortname = "record-btrace";
2207 ops->to_longname = "Branch tracing target";
2208 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2209 ops->to_open = record_btrace_open;
2210 ops->to_close = record_btrace_close;
2211 ops->to_async = record_btrace_async;
2212 ops->to_detach = record_detach;
2213 ops->to_disconnect = record_disconnect;
2214 ops->to_mourn_inferior = record_mourn_inferior;
2215 ops->to_kill = record_kill;
2216 ops->to_stop_recording = record_btrace_stop_recording;
2217 ops->to_info_record = record_btrace_info;
2218 ops->to_insn_history = record_btrace_insn_history;
2219 ops->to_insn_history_from = record_btrace_insn_history_from;
2220 ops->to_insn_history_range = record_btrace_insn_history_range;
2221 ops->to_call_history = record_btrace_call_history;
2222 ops->to_call_history_from = record_btrace_call_history_from;
2223 ops->to_call_history_range = record_btrace_call_history_range;
2224 ops->to_record_is_replaying = record_btrace_is_replaying;
2225 ops->to_xfer_partial = record_btrace_xfer_partial;
2226 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2227 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2228 ops->to_fetch_registers = record_btrace_fetch_registers;
2229 ops->to_store_registers = record_btrace_store_registers;
2230 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2231 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2232 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2233 ops->to_resume = record_btrace_resume;
2234 ops->to_wait = record_btrace_wait;
2235 ops->to_update_thread_list = record_btrace_update_thread_list;
2236 ops->to_thread_alive = record_btrace_thread_alive;
2237 ops->to_goto_record_begin = record_btrace_goto_begin;
2238 ops->to_goto_record_end = record_btrace_goto_end;
2239 ops->to_goto_record = record_btrace_goto;
2240 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2241 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
2242 ops->to_execution_direction = record_btrace_execution_direction;
2243 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2244 ops->to_done_generating_core = record_btrace_done_generating_core;
2245 ops->to_stratum = record_stratum;
2246 ops->to_magic = OPS_MAGIC;
2247 }
2248
2249 /* Start recording in BTS format. */
2250
2251 static void
2252 cmd_record_btrace_bts_start (char *args, int from_tty)
2253 {
2254 volatile struct gdb_exception exception;
2255
2256 if (args != NULL && *args != 0)
2257 error (_("Invalid argument."));
2258
2259 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2260
2261 TRY_CATCH (exception, RETURN_MASK_ALL)
2262 execute_command ("target record-btrace", from_tty);
2263
2264 if (exception.error != 0)
2265 {
2266 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2267 throw_exception (exception);
2268 }
2269 }
2270
2271 /* Alias for "target record". */
2272
2273 static void
2274 cmd_record_btrace_start (char *args, int from_tty)
2275 {
2276 volatile struct gdb_exception exception;
2277
2278 if (args != NULL && *args != 0)
2279 error (_("Invalid argument."));
2280
2281 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2282
2283 TRY_CATCH (exception, RETURN_MASK_ALL)
2284 execute_command ("target record-btrace", from_tty);
2285
2286 if (exception.error == 0)
2287 return;
2288
2289 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2290 throw_exception (exception);
2291 }
2292
2293 /* The "set record btrace" command. */
2294
2295 static void
2296 cmd_set_record_btrace (char *args, int from_tty)
2297 {
2298 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2299 }
2300
2301 /* The "show record btrace" command. */
2302
2303 static void
2304 cmd_show_record_btrace (char *args, int from_tty)
2305 {
2306 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2307 }
2308
2309 /* The "show record btrace replay-memory-access" command. */
2310
2311 static void
2312 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2313 struct cmd_list_element *c, const char *value)
2314 {
2315 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2316 replay_memory_access);
2317 }
2318
2319 /* The "set record btrace bts" command. */
2320
2321 static void
2322 cmd_set_record_btrace_bts (char *args, int from_tty)
2323 {
2324 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2325 "by an apporpriate subcommand.\n"));
2326 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2327 all_commands, gdb_stdout);
2328 }
2329
2330 /* The "show record btrace bts" command. */
2331
2332 static void
2333 cmd_show_record_btrace_bts (char *args, int from_tty)
2334 {
2335 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2336 }
2337
2338 void _initialize_record_btrace (void);
2339
2340 /* Initialize btrace commands. */
2341
2342 void
2343 _initialize_record_btrace (void)
2344 {
2345 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2346 _("Start branch trace recording."), &record_btrace_cmdlist,
2347 "record btrace ", 0, &record_cmdlist);
2348 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2349
2350 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
2351 _("\
2352 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2353 The processor stores a from/to record for each branch into a cyclic buffer.\n\
2354 This format may not be available on all processors."),
2355 &record_btrace_cmdlist);
2356 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
2357
2358 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2359 _("Set record options"), &set_record_btrace_cmdlist,
2360 "set record btrace ", 0, &set_record_cmdlist);
2361
2362 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2363 _("Show record options"), &show_record_btrace_cmdlist,
2364 "show record btrace ", 0, &show_record_cmdlist);
2365
2366 add_setshow_enum_cmd ("replay-memory-access", no_class,
2367 replay_memory_access_types, &replay_memory_access, _("\
2368 Set what memory accesses are allowed during replay."), _("\
2369 Show what memory accesses are allowed during replay."),
2370 _("Default is READ-ONLY.\n\n\
2371 The btrace record target does not trace data.\n\
2372 The memory therefore corresponds to the live target and not \
2373 to the current replay position.\n\n\
2374 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2375 When READ-WRITE, allow accesses to read-only and read-write memory during \
2376 replay."),
2377 NULL, cmd_show_replay_memory_access,
2378 &set_record_btrace_cmdlist,
2379 &show_record_btrace_cmdlist);
2380
2381 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
2382 _("Set record btrace bts options"),
2383 &set_record_btrace_bts_cmdlist,
2384 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
2385
2386 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
2387 _("Show record btrace bts options"),
2388 &show_record_btrace_bts_cmdlist,
2389 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
2390
2391 add_setshow_uinteger_cmd ("buffer-size", no_class,
2392 &record_btrace_conf.bts.size,
2393 _("Set the record/replay bts buffer size."),
2394 _("Show the record/replay bts buffer size."), _("\
2395 When starting recording request a trace buffer of this size. \
2396 The actual buffer size may differ from the requested size. \
2397 Use \"info record\" to see the actual buffer size.\n\n\
2398 Bigger buffers allow longer recording but also take more time to process \
2399 the recorded execution trace.\n\n\
2400 The trace buffer size may not be changed while recording."), NULL, NULL,
2401 &set_record_btrace_bts_cmdlist,
2402 &show_record_btrace_bts_cmdlist);
2403
2404 init_record_btrace_ops ();
2405 add_target (&record_btrace_ops);
2406
2407 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2408 xcalloc, xfree);
2409
2410 record_btrace_conf.bts.size = 64 * 1024;
2411 }
This page took 0.086977 seconds and 5 git commands to generate.