Fetch all registers before writing the core register notes.
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "gdbthread.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "disasm.h"
28 #include "observer.h"
29 #include "cli/cli-utils.h"
30 #include "source.h"
31 #include "ui-out.h"
32 #include "symtab.h"
33 #include "filenames.h"
34 #include "regcache.h"
35 #include "frame-unwind.h"
36 #include "hashtab.h"
37 #include "infrun.h"
38 #include "event-loop.h"
39 #include "inf-loop.h"
40
41 /* The target_ops of record-btrace. */
42 static struct target_ops record_btrace_ops;
43
44 /* A new thread observer enabling branch tracing for the new thread. */
45 static struct observer *record_btrace_thread_observer;
46
47 /* Memory access types used in set/show record btrace replay-memory-access. */
48 static const char replay_memory_access_read_only[] = "read-only";
49 static const char replay_memory_access_read_write[] = "read-write";
50 static const char *const replay_memory_access_types[] =
51 {
52 replay_memory_access_read_only,
53 replay_memory_access_read_write,
54 NULL
55 };
56
57 /* The currently allowed replay memory access type. */
58 static const char *replay_memory_access = replay_memory_access_read_only;
59
60 /* Command lists for "set/show record btrace". */
61 static struct cmd_list_element *set_record_btrace_cmdlist;
62 static struct cmd_list_element *show_record_btrace_cmdlist;
63
64 /* The execution direction of the last resume we got. See record-full.c. */
65 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
66
67 /* The async event handler for reverse/replay execution. */
68 static struct async_event_handler *record_btrace_async_inferior_event_handler;
69
70 /* A flag indicating that we are currently generating a core file. */
71 static int record_btrace_generating_corefile;
72
73 /* The current branch trace configuration. */
74 static struct btrace_config record_btrace_conf;
75
76 /* Command list for "record btrace". */
77 static struct cmd_list_element *record_btrace_cmdlist;
78
79 /* Command lists for "set/show record btrace bts". */
80 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
81 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
82
83 /* Print a record-btrace debug message. Use do ... while (0) to avoid
84 ambiguities when used in if statements. */
85
86 #define DEBUG(msg, args...) \
87 do \
88 { \
89 if (record_debug != 0) \
90 fprintf_unfiltered (gdb_stdlog, \
91 "[record-btrace] " msg "\n", ##args); \
92 } \
93 while (0)
94
95
96 /* Update the branch trace for the current thread and return a pointer to its
97 thread_info.
98
99 Throws an error if there is no thread or no trace. This function never
100 returns NULL. */
101
102 static struct thread_info *
103 require_btrace_thread (void)
104 {
105 struct thread_info *tp;
106
107 DEBUG ("require");
108
109 tp = find_thread_ptid (inferior_ptid);
110 if (tp == NULL)
111 error (_("No thread."));
112
113 btrace_fetch (tp);
114
115 if (btrace_is_empty (tp))
116 error (_("No trace."));
117
118 return tp;
119 }
120
121 /* Update the branch trace for the current thread and return a pointer to its
122 branch trace information struct.
123
124 Throws an error if there is no thread or no trace. This function never
125 returns NULL. */
126
127 static struct btrace_thread_info *
128 require_btrace (void)
129 {
130 struct thread_info *tp;
131
132 tp = require_btrace_thread ();
133
134 return &tp->btrace;
135 }
136
137 /* Enable branch tracing for one thread. Warn on errors. */
138
139 static void
140 record_btrace_enable_warn (struct thread_info *tp)
141 {
142 TRY
143 {
144 btrace_enable (tp, &record_btrace_conf);
145 }
146 CATCH (error, RETURN_MASK_ERROR)
147 {
148 warning ("%s", error.message);
149 }
150 END_CATCH
151 }
152
153 /* Callback function to disable branch tracing for one thread. */
154
155 static void
156 record_btrace_disable_callback (void *arg)
157 {
158 struct thread_info *tp;
159
160 tp = arg;
161
162 btrace_disable (tp);
163 }
164
165 /* Enable automatic tracing of new threads. */
166
167 static void
168 record_btrace_auto_enable (void)
169 {
170 DEBUG ("attach thread observer");
171
172 record_btrace_thread_observer
173 = observer_attach_new_thread (record_btrace_enable_warn);
174 }
175
176 /* Disable automatic tracing of new threads. */
177
178 static void
179 record_btrace_auto_disable (void)
180 {
181 /* The observer may have been detached, already. */
182 if (record_btrace_thread_observer == NULL)
183 return;
184
185 DEBUG ("detach thread observer");
186
187 observer_detach_new_thread (record_btrace_thread_observer);
188 record_btrace_thread_observer = NULL;
189 }
190
191 /* The record-btrace async event handler function. */
192
193 static void
194 record_btrace_handle_async_inferior_event (gdb_client_data data)
195 {
196 inferior_event_handler (INF_REG_EVENT, NULL);
197 }
198
199 /* The to_open method of target record-btrace. */
200
201 static void
202 record_btrace_open (const char *args, int from_tty)
203 {
204 struct cleanup *disable_chain;
205 struct thread_info *tp;
206
207 DEBUG ("open");
208
209 record_preopen ();
210
211 if (!target_has_execution)
212 error (_("The program is not being run."));
213
214 if (non_stop)
215 error (_("Record btrace can't debug inferior in non-stop mode."));
216
217 gdb_assert (record_btrace_thread_observer == NULL);
218
219 disable_chain = make_cleanup (null_cleanup, NULL);
220 ALL_NON_EXITED_THREADS (tp)
221 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
222 {
223 btrace_enable (tp, &record_btrace_conf);
224
225 make_cleanup (record_btrace_disable_callback, tp);
226 }
227
228 record_btrace_auto_enable ();
229
230 push_target (&record_btrace_ops);
231
232 record_btrace_async_inferior_event_handler
233 = create_async_event_handler (record_btrace_handle_async_inferior_event,
234 NULL);
235 record_btrace_generating_corefile = 0;
236
237 observer_notify_record_changed (current_inferior (), 1);
238
239 discard_cleanups (disable_chain);
240 }
241
242 /* The to_stop_recording method of target record-btrace. */
243
244 static void
245 record_btrace_stop_recording (struct target_ops *self)
246 {
247 struct thread_info *tp;
248
249 DEBUG ("stop recording");
250
251 record_btrace_auto_disable ();
252
253 ALL_NON_EXITED_THREADS (tp)
254 if (tp->btrace.target != NULL)
255 btrace_disable (tp);
256 }
257
258 /* The to_close method of target record-btrace. */
259
260 static void
261 record_btrace_close (struct target_ops *self)
262 {
263 struct thread_info *tp;
264
265 if (record_btrace_async_inferior_event_handler != NULL)
266 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
267
268 /* Make sure automatic recording gets disabled even if we did not stop
269 recording before closing the record-btrace target. */
270 record_btrace_auto_disable ();
271
272 /* We should have already stopped recording.
273 Tear down btrace in case we have not. */
274 ALL_NON_EXITED_THREADS (tp)
275 btrace_teardown (tp);
276 }
277
278 /* The to_async method of target record-btrace. */
279
280 static void
281 record_btrace_async (struct target_ops *ops,
282 void (*callback) (enum inferior_event_type event_type,
283 void *context),
284 void *context)
285 {
286 if (callback != NULL)
287 mark_async_event_handler (record_btrace_async_inferior_event_handler);
288 else
289 clear_async_event_handler (record_btrace_async_inferior_event_handler);
290
291 ops->beneath->to_async (ops->beneath, callback, context);
292 }
293
294 /* Adjusts the size and returns a human readable size suffix. */
295
296 static const char *
297 record_btrace_adjust_size (unsigned int *size)
298 {
299 unsigned int sz;
300
301 sz = *size;
302
303 if ((sz & ((1u << 30) - 1)) == 0)
304 {
305 *size = sz >> 30;
306 return "GB";
307 }
308 else if ((sz & ((1u << 20) - 1)) == 0)
309 {
310 *size = sz >> 20;
311 return "MB";
312 }
313 else if ((sz & ((1u << 10) - 1)) == 0)
314 {
315 *size = sz >> 10;
316 return "kB";
317 }
318 else
319 return "";
320 }
321
322 /* Print a BTS configuration. */
323
324 static void
325 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
326 {
327 const char *suffix;
328 unsigned int size;
329
330 size = conf->size;
331 if (size > 0)
332 {
333 suffix = record_btrace_adjust_size (&size);
334 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
335 }
336 }
337
338 /* Print a branch tracing configuration. */
339
340 static void
341 record_btrace_print_conf (const struct btrace_config *conf)
342 {
343 printf_unfiltered (_("Recording format: %s.\n"),
344 btrace_format_string (conf->format));
345
346 switch (conf->format)
347 {
348 case BTRACE_FORMAT_NONE:
349 return;
350
351 case BTRACE_FORMAT_BTS:
352 record_btrace_print_bts_conf (&conf->bts);
353 return;
354 }
355
356 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
357 }
358
359 /* The to_info_record method of target record-btrace. */
360
361 static void
362 record_btrace_info (struct target_ops *self)
363 {
364 struct btrace_thread_info *btinfo;
365 const struct btrace_config *conf;
366 struct thread_info *tp;
367 unsigned int insns, calls, gaps;
368
369 DEBUG ("info");
370
371 tp = find_thread_ptid (inferior_ptid);
372 if (tp == NULL)
373 error (_("No thread."));
374
375 btinfo = &tp->btrace;
376
377 conf = btrace_conf (btinfo);
378 if (conf != NULL)
379 record_btrace_print_conf (conf);
380
381 btrace_fetch (tp);
382
383 insns = 0;
384 calls = 0;
385 gaps = 0;
386
387 if (!btrace_is_empty (tp))
388 {
389 struct btrace_call_iterator call;
390 struct btrace_insn_iterator insn;
391
392 btrace_call_end (&call, btinfo);
393 btrace_call_prev (&call, 1);
394 calls = btrace_call_number (&call);
395
396 btrace_insn_end (&insn, btinfo);
397
398 insns = btrace_insn_number (&insn);
399 if (insns != 0)
400 {
401 /* The last instruction does not really belong to the trace. */
402 insns -= 1;
403 }
404 else
405 {
406 unsigned int steps;
407
408 /* Skip gaps at the end. */
409 do
410 {
411 steps = btrace_insn_prev (&insn, 1);
412 if (steps == 0)
413 break;
414
415 insns = btrace_insn_number (&insn);
416 }
417 while (insns == 0);
418 }
419
420 gaps = btinfo->ngaps;
421 }
422
423 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
424 "for thread %d (%s).\n"), insns, calls, gaps,
425 tp->num, target_pid_to_str (tp->ptid));
426
427 if (btrace_is_replaying (tp))
428 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
429 btrace_insn_number (btinfo->replay));
430 }
431
432 /* Print a decode error. */
433
434 static void
435 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
436 enum btrace_format format)
437 {
438 const char *errstr;
439 int is_error;
440
441 errstr = _("unknown");
442 is_error = 1;
443
444 switch (format)
445 {
446 default:
447 break;
448
449 case BTRACE_FORMAT_BTS:
450 switch (errcode)
451 {
452 default:
453 break;
454
455 case BDE_BTS_OVERFLOW:
456 errstr = _("instruction overflow");
457 break;
458
459 case BDE_BTS_INSN_SIZE:
460 errstr = _("unknown instruction");
461 break;
462 }
463 break;
464 }
465
466 ui_out_text (uiout, _("["));
467 if (is_error)
468 {
469 ui_out_text (uiout, _("decode error ("));
470 ui_out_field_int (uiout, "errcode", errcode);
471 ui_out_text (uiout, _("): "));
472 }
473 ui_out_text (uiout, errstr);
474 ui_out_text (uiout, _("]\n"));
475 }
476
477 /* Print an unsigned int. */
478
479 static void
480 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
481 {
482 ui_out_field_fmt (uiout, fld, "%u", val);
483 }
484
485 /* Disassemble a section of the recorded instruction trace. */
486
487 static void
488 btrace_insn_history (struct ui_out *uiout,
489 const struct btrace_thread_info *btinfo,
490 const struct btrace_insn_iterator *begin,
491 const struct btrace_insn_iterator *end, int flags)
492 {
493 struct gdbarch *gdbarch;
494 struct btrace_insn_iterator it;
495
496 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
497 btrace_insn_number (end));
498
499 gdbarch = target_gdbarch ();
500
501 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
502 {
503 const struct btrace_insn *insn;
504
505 insn = btrace_insn_get (&it);
506
507 /* A NULL instruction indicates a gap in the trace. */
508 if (insn == NULL)
509 {
510 const struct btrace_config *conf;
511
512 conf = btrace_conf (btinfo);
513
514 /* We have trace so we must have a configuration. */
515 gdb_assert (conf != NULL);
516
517 btrace_ui_out_decode_error (uiout, it.function->errcode,
518 conf->format);
519 }
520 else
521 {
522 /* Print the instruction index. */
523 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
524 ui_out_text (uiout, "\t");
525
526 /* Disassembly with '/m' flag may not produce the expected result.
527 See PR gdb/11833. */
528 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc,
529 insn->pc + 1);
530 }
531 }
532 }
533
534 /* The to_insn_history method of target record-btrace. */
535
536 static void
537 record_btrace_insn_history (struct target_ops *self, int size, int flags)
538 {
539 struct btrace_thread_info *btinfo;
540 struct btrace_insn_history *history;
541 struct btrace_insn_iterator begin, end;
542 struct cleanup *uiout_cleanup;
543 struct ui_out *uiout;
544 unsigned int context, covered;
545
546 uiout = current_uiout;
547 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
548 "insn history");
549 context = abs (size);
550 if (context == 0)
551 error (_("Bad record instruction-history-size."));
552
553 btinfo = require_btrace ();
554 history = btinfo->insn_history;
555 if (history == NULL)
556 {
557 struct btrace_insn_iterator *replay;
558
559 DEBUG ("insn-history (0x%x): %d", flags, size);
560
561 /* If we're replaying, we start at the replay position. Otherwise, we
562 start at the tail of the trace. */
563 replay = btinfo->replay;
564 if (replay != NULL)
565 begin = *replay;
566 else
567 btrace_insn_end (&begin, btinfo);
568
569 /* We start from here and expand in the requested direction. Then we
570 expand in the other direction, as well, to fill up any remaining
571 context. */
572 end = begin;
573 if (size < 0)
574 {
575 /* We want the current position covered, as well. */
576 covered = btrace_insn_next (&end, 1);
577 covered += btrace_insn_prev (&begin, context - covered);
578 covered += btrace_insn_next (&end, context - covered);
579 }
580 else
581 {
582 covered = btrace_insn_next (&end, context);
583 covered += btrace_insn_prev (&begin, context - covered);
584 }
585 }
586 else
587 {
588 begin = history->begin;
589 end = history->end;
590
591 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
592 btrace_insn_number (&begin), btrace_insn_number (&end));
593
594 if (size < 0)
595 {
596 end = begin;
597 covered = btrace_insn_prev (&begin, context);
598 }
599 else
600 {
601 begin = end;
602 covered = btrace_insn_next (&end, context);
603 }
604 }
605
606 if (covered > 0)
607 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
608 else
609 {
610 if (size < 0)
611 printf_unfiltered (_("At the start of the branch trace record.\n"));
612 else
613 printf_unfiltered (_("At the end of the branch trace record.\n"));
614 }
615
616 btrace_set_insn_history (btinfo, &begin, &end);
617 do_cleanups (uiout_cleanup);
618 }
619
620 /* The to_insn_history_range method of target record-btrace. */
621
622 static void
623 record_btrace_insn_history_range (struct target_ops *self,
624 ULONGEST from, ULONGEST to, int flags)
625 {
626 struct btrace_thread_info *btinfo;
627 struct btrace_insn_history *history;
628 struct btrace_insn_iterator begin, end;
629 struct cleanup *uiout_cleanup;
630 struct ui_out *uiout;
631 unsigned int low, high;
632 int found;
633
634 uiout = current_uiout;
635 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
636 "insn history");
637 low = from;
638 high = to;
639
640 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
641
642 /* Check for wrap-arounds. */
643 if (low != from || high != to)
644 error (_("Bad range."));
645
646 if (high < low)
647 error (_("Bad range."));
648
649 btinfo = require_btrace ();
650
651 found = btrace_find_insn_by_number (&begin, btinfo, low);
652 if (found == 0)
653 error (_("Range out of bounds."));
654
655 found = btrace_find_insn_by_number (&end, btinfo, high);
656 if (found == 0)
657 {
658 /* Silently truncate the range. */
659 btrace_insn_end (&end, btinfo);
660 }
661 else
662 {
663 /* We want both begin and end to be inclusive. */
664 btrace_insn_next (&end, 1);
665 }
666
667 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
668 btrace_set_insn_history (btinfo, &begin, &end);
669
670 do_cleanups (uiout_cleanup);
671 }
672
673 /* The to_insn_history_from method of target record-btrace. */
674
675 static void
676 record_btrace_insn_history_from (struct target_ops *self,
677 ULONGEST from, int size, int flags)
678 {
679 ULONGEST begin, end, context;
680
681 context = abs (size);
682 if (context == 0)
683 error (_("Bad record instruction-history-size."));
684
685 if (size < 0)
686 {
687 end = from;
688
689 if (from < context)
690 begin = 0;
691 else
692 begin = from - context + 1;
693 }
694 else
695 {
696 begin = from;
697 end = from + context - 1;
698
699 /* Check for wrap-around. */
700 if (end < begin)
701 end = ULONGEST_MAX;
702 }
703
704 record_btrace_insn_history_range (self, begin, end, flags);
705 }
706
707 /* Print the instruction number range for a function call history line. */
708
709 static void
710 btrace_call_history_insn_range (struct ui_out *uiout,
711 const struct btrace_function *bfun)
712 {
713 unsigned int begin, end, size;
714
715 size = VEC_length (btrace_insn_s, bfun->insn);
716 gdb_assert (size > 0);
717
718 begin = bfun->insn_offset;
719 end = begin + size - 1;
720
721 ui_out_field_uint (uiout, "insn begin", begin);
722 ui_out_text (uiout, ",");
723 ui_out_field_uint (uiout, "insn end", end);
724 }
725
726 /* Compute the lowest and highest source line for the instructions in BFUN
727 and return them in PBEGIN and PEND.
728 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
729 result from inlining or macro expansion. */
730
731 static void
732 btrace_compute_src_line_range (const struct btrace_function *bfun,
733 int *pbegin, int *pend)
734 {
735 struct btrace_insn *insn;
736 struct symtab *symtab;
737 struct symbol *sym;
738 unsigned int idx;
739 int begin, end;
740
741 begin = INT_MAX;
742 end = INT_MIN;
743
744 sym = bfun->sym;
745 if (sym == NULL)
746 goto out;
747
748 symtab = symbol_symtab (sym);
749
750 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
751 {
752 struct symtab_and_line sal;
753
754 sal = find_pc_line (insn->pc, 0);
755 if (sal.symtab != symtab || sal.line == 0)
756 continue;
757
758 begin = min (begin, sal.line);
759 end = max (end, sal.line);
760 }
761
762 out:
763 *pbegin = begin;
764 *pend = end;
765 }
766
767 /* Print the source line information for a function call history line. */
768
769 static void
770 btrace_call_history_src_line (struct ui_out *uiout,
771 const struct btrace_function *bfun)
772 {
773 struct symbol *sym;
774 int begin, end;
775
776 sym = bfun->sym;
777 if (sym == NULL)
778 return;
779
780 ui_out_field_string (uiout, "file",
781 symtab_to_filename_for_display (symbol_symtab (sym)));
782
783 btrace_compute_src_line_range (bfun, &begin, &end);
784 if (end < begin)
785 return;
786
787 ui_out_text (uiout, ":");
788 ui_out_field_int (uiout, "min line", begin);
789
790 if (end == begin)
791 return;
792
793 ui_out_text (uiout, ",");
794 ui_out_field_int (uiout, "max line", end);
795 }
796
797 /* Get the name of a branch trace function. */
798
799 static const char *
800 btrace_get_bfun_name (const struct btrace_function *bfun)
801 {
802 struct minimal_symbol *msym;
803 struct symbol *sym;
804
805 if (bfun == NULL)
806 return "??";
807
808 msym = bfun->msym;
809 sym = bfun->sym;
810
811 if (sym != NULL)
812 return SYMBOL_PRINT_NAME (sym);
813 else if (msym != NULL)
814 return MSYMBOL_PRINT_NAME (msym);
815 else
816 return "??";
817 }
818
819 /* Disassemble a section of the recorded function trace. */
820
821 static void
822 btrace_call_history (struct ui_out *uiout,
823 const struct btrace_thread_info *btinfo,
824 const struct btrace_call_iterator *begin,
825 const struct btrace_call_iterator *end,
826 enum record_print_flag flags)
827 {
828 struct btrace_call_iterator it;
829
830 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
831 btrace_call_number (end));
832
833 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
834 {
835 const struct btrace_function *bfun;
836 struct minimal_symbol *msym;
837 struct symbol *sym;
838
839 bfun = btrace_call_get (&it);
840 sym = bfun->sym;
841 msym = bfun->msym;
842
843 /* Print the function index. */
844 ui_out_field_uint (uiout, "index", bfun->number);
845 ui_out_text (uiout, "\t");
846
847 /* Indicate gaps in the trace. */
848 if (bfun->errcode != 0)
849 {
850 const struct btrace_config *conf;
851
852 conf = btrace_conf (btinfo);
853
854 /* We have trace so we must have a configuration. */
855 gdb_assert (conf != NULL);
856
857 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
858
859 continue;
860 }
861
862 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
863 {
864 int level = bfun->level + btinfo->level, i;
865
866 for (i = 0; i < level; ++i)
867 ui_out_text (uiout, " ");
868 }
869
870 if (sym != NULL)
871 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
872 else if (msym != NULL)
873 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
874 else if (!ui_out_is_mi_like_p (uiout))
875 ui_out_field_string (uiout, "function", "??");
876
877 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
878 {
879 ui_out_text (uiout, _("\tinst "));
880 btrace_call_history_insn_range (uiout, bfun);
881 }
882
883 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
884 {
885 ui_out_text (uiout, _("\tat "));
886 btrace_call_history_src_line (uiout, bfun);
887 }
888
889 ui_out_text (uiout, "\n");
890 }
891 }
892
893 /* The to_call_history method of target record-btrace. */
894
895 static void
896 record_btrace_call_history (struct target_ops *self, int size, int flags)
897 {
898 struct btrace_thread_info *btinfo;
899 struct btrace_call_history *history;
900 struct btrace_call_iterator begin, end;
901 struct cleanup *uiout_cleanup;
902 struct ui_out *uiout;
903 unsigned int context, covered;
904
905 uiout = current_uiout;
906 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
907 "insn history");
908 context = abs (size);
909 if (context == 0)
910 error (_("Bad record function-call-history-size."));
911
912 btinfo = require_btrace ();
913 history = btinfo->call_history;
914 if (history == NULL)
915 {
916 struct btrace_insn_iterator *replay;
917
918 DEBUG ("call-history (0x%x): %d", flags, size);
919
920 /* If we're replaying, we start at the replay position. Otherwise, we
921 start at the tail of the trace. */
922 replay = btinfo->replay;
923 if (replay != NULL)
924 {
925 begin.function = replay->function;
926 begin.btinfo = btinfo;
927 }
928 else
929 btrace_call_end (&begin, btinfo);
930
931 /* We start from here and expand in the requested direction. Then we
932 expand in the other direction, as well, to fill up any remaining
933 context. */
934 end = begin;
935 if (size < 0)
936 {
937 /* We want the current position covered, as well. */
938 covered = btrace_call_next (&end, 1);
939 covered += btrace_call_prev (&begin, context - covered);
940 covered += btrace_call_next (&end, context - covered);
941 }
942 else
943 {
944 covered = btrace_call_next (&end, context);
945 covered += btrace_call_prev (&begin, context- covered);
946 }
947 }
948 else
949 {
950 begin = history->begin;
951 end = history->end;
952
953 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
954 btrace_call_number (&begin), btrace_call_number (&end));
955
956 if (size < 0)
957 {
958 end = begin;
959 covered = btrace_call_prev (&begin, context);
960 }
961 else
962 {
963 begin = end;
964 covered = btrace_call_next (&end, context);
965 }
966 }
967
968 if (covered > 0)
969 btrace_call_history (uiout, btinfo, &begin, &end, flags);
970 else
971 {
972 if (size < 0)
973 printf_unfiltered (_("At the start of the branch trace record.\n"));
974 else
975 printf_unfiltered (_("At the end of the branch trace record.\n"));
976 }
977
978 btrace_set_call_history (btinfo, &begin, &end);
979 do_cleanups (uiout_cleanup);
980 }
981
982 /* The to_call_history_range method of target record-btrace. */
983
984 static void
985 record_btrace_call_history_range (struct target_ops *self,
986 ULONGEST from, ULONGEST to, int flags)
987 {
988 struct btrace_thread_info *btinfo;
989 struct btrace_call_history *history;
990 struct btrace_call_iterator begin, end;
991 struct cleanup *uiout_cleanup;
992 struct ui_out *uiout;
993 unsigned int low, high;
994 int found;
995
996 uiout = current_uiout;
997 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
998 "func history");
999 low = from;
1000 high = to;
1001
1002 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
1003
1004 /* Check for wrap-arounds. */
1005 if (low != from || high != to)
1006 error (_("Bad range."));
1007
1008 if (high < low)
1009 error (_("Bad range."));
1010
1011 btinfo = require_btrace ();
1012
1013 found = btrace_find_call_by_number (&begin, btinfo, low);
1014 if (found == 0)
1015 error (_("Range out of bounds."));
1016
1017 found = btrace_find_call_by_number (&end, btinfo, high);
1018 if (found == 0)
1019 {
1020 /* Silently truncate the range. */
1021 btrace_call_end (&end, btinfo);
1022 }
1023 else
1024 {
1025 /* We want both begin and end to be inclusive. */
1026 btrace_call_next (&end, 1);
1027 }
1028
1029 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1030 btrace_set_call_history (btinfo, &begin, &end);
1031
1032 do_cleanups (uiout_cleanup);
1033 }
1034
1035 /* The to_call_history_from method of target record-btrace. */
1036
1037 static void
1038 record_btrace_call_history_from (struct target_ops *self,
1039 ULONGEST from, int size, int flags)
1040 {
1041 ULONGEST begin, end, context;
1042
1043 context = abs (size);
1044 if (context == 0)
1045 error (_("Bad record function-call-history-size."));
1046
1047 if (size < 0)
1048 {
1049 end = from;
1050
1051 if (from < context)
1052 begin = 0;
1053 else
1054 begin = from - context + 1;
1055 }
1056 else
1057 {
1058 begin = from;
1059 end = from + context - 1;
1060
1061 /* Check for wrap-around. */
1062 if (end < begin)
1063 end = ULONGEST_MAX;
1064 }
1065
1066 record_btrace_call_history_range (self, begin, end, flags);
1067 }
1068
1069 /* The to_record_is_replaying method of target record-btrace. */
1070
1071 static int
1072 record_btrace_is_replaying (struct target_ops *self)
1073 {
1074 struct thread_info *tp;
1075
1076 ALL_NON_EXITED_THREADS (tp)
1077 if (btrace_is_replaying (tp))
1078 return 1;
1079
1080 return 0;
1081 }
1082
1083 /* The to_xfer_partial method of target record-btrace. */
1084
1085 static enum target_xfer_status
1086 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1087 const char *annex, gdb_byte *readbuf,
1088 const gdb_byte *writebuf, ULONGEST offset,
1089 ULONGEST len, ULONGEST *xfered_len)
1090 {
1091 struct target_ops *t;
1092
1093 /* Filter out requests that don't make sense during replay. */
1094 if (replay_memory_access == replay_memory_access_read_only
1095 && !record_btrace_generating_corefile
1096 && record_btrace_is_replaying (ops))
1097 {
1098 switch (object)
1099 {
1100 case TARGET_OBJECT_MEMORY:
1101 {
1102 struct target_section *section;
1103
1104 /* We do not allow writing memory in general. */
1105 if (writebuf != NULL)
1106 {
1107 *xfered_len = len;
1108 return TARGET_XFER_UNAVAILABLE;
1109 }
1110
1111 /* We allow reading readonly memory. */
1112 section = target_section_by_addr (ops, offset);
1113 if (section != NULL)
1114 {
1115 /* Check if the section we found is readonly. */
1116 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1117 section->the_bfd_section)
1118 & SEC_READONLY) != 0)
1119 {
1120 /* Truncate the request to fit into this section. */
1121 len = min (len, section->endaddr - offset);
1122 break;
1123 }
1124 }
1125
1126 *xfered_len = len;
1127 return TARGET_XFER_UNAVAILABLE;
1128 }
1129 }
1130 }
1131
1132 /* Forward the request. */
1133 ops = ops->beneath;
1134 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1135 offset, len, xfered_len);
1136 }
1137
1138 /* The to_insert_breakpoint method of target record-btrace. */
1139
1140 static int
1141 record_btrace_insert_breakpoint (struct target_ops *ops,
1142 struct gdbarch *gdbarch,
1143 struct bp_target_info *bp_tgt)
1144 {
1145 const char *old;
1146 int ret;
1147
1148 /* Inserting breakpoints requires accessing memory. Allow it for the
1149 duration of this function. */
1150 old = replay_memory_access;
1151 replay_memory_access = replay_memory_access_read_write;
1152
1153 ret = 0;
1154 TRY
1155 {
1156 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1157 }
1158 CATCH (except, RETURN_MASK_ALL)
1159 {
1160 replay_memory_access = old;
1161 throw_exception (except);
1162 }
1163 END_CATCH
1164 replay_memory_access = old;
1165
1166 return ret;
1167 }
1168
1169 /* The to_remove_breakpoint method of target record-btrace. */
1170
1171 static int
1172 record_btrace_remove_breakpoint (struct target_ops *ops,
1173 struct gdbarch *gdbarch,
1174 struct bp_target_info *bp_tgt)
1175 {
1176 const char *old;
1177 int ret;
1178
1179 /* Removing breakpoints requires accessing memory. Allow it for the
1180 duration of this function. */
1181 old = replay_memory_access;
1182 replay_memory_access = replay_memory_access_read_write;
1183
1184 ret = 0;
1185 TRY
1186 {
1187 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1188 }
1189 CATCH (except, RETURN_MASK_ALL)
1190 {
1191 replay_memory_access = old;
1192 throw_exception (except);
1193 }
1194 END_CATCH
1195 replay_memory_access = old;
1196
1197 return ret;
1198 }
1199
1200 /* The to_fetch_registers method of target record-btrace. */
1201
1202 static void
1203 record_btrace_fetch_registers (struct target_ops *ops,
1204 struct regcache *regcache, int regno)
1205 {
1206 struct btrace_insn_iterator *replay;
1207 struct thread_info *tp;
1208
1209 tp = find_thread_ptid (inferior_ptid);
1210 gdb_assert (tp != NULL);
1211
1212 replay = tp->btrace.replay;
1213 if (replay != NULL && !record_btrace_generating_corefile)
1214 {
1215 const struct btrace_insn *insn;
1216 struct gdbarch *gdbarch;
1217 int pcreg;
1218
1219 gdbarch = get_regcache_arch (regcache);
1220 pcreg = gdbarch_pc_regnum (gdbarch);
1221 if (pcreg < 0)
1222 return;
1223
1224 /* We can only provide the PC register. */
1225 if (regno >= 0 && regno != pcreg)
1226 return;
1227
1228 insn = btrace_insn_get (replay);
1229 gdb_assert (insn != NULL);
1230
1231 regcache_raw_supply (regcache, regno, &insn->pc);
1232 }
1233 else
1234 {
1235 struct target_ops *t = ops->beneath;
1236
1237 t->to_fetch_registers (t, regcache, regno);
1238 }
1239 }
1240
1241 /* The to_store_registers method of target record-btrace. */
1242
1243 static void
1244 record_btrace_store_registers (struct target_ops *ops,
1245 struct regcache *regcache, int regno)
1246 {
1247 struct target_ops *t;
1248
1249 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1250 error (_("This record target does not allow writing registers."));
1251
1252 gdb_assert (may_write_registers != 0);
1253
1254 t = ops->beneath;
1255 t->to_store_registers (t, regcache, regno);
1256 }
1257
1258 /* The to_prepare_to_store method of target record-btrace. */
1259
1260 static void
1261 record_btrace_prepare_to_store (struct target_ops *ops,
1262 struct regcache *regcache)
1263 {
1264 struct target_ops *t;
1265
1266 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1267 return;
1268
1269 t = ops->beneath;
1270 t->to_prepare_to_store (t, regcache);
1271 }
1272
1273 /* The branch trace frame cache. */
1274
1275 struct btrace_frame_cache
1276 {
1277 /* The thread. */
1278 struct thread_info *tp;
1279
1280 /* The frame info. */
1281 struct frame_info *frame;
1282
1283 /* The branch trace function segment. */
1284 const struct btrace_function *bfun;
1285 };
1286
1287 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1288
1289 static htab_t bfcache;
1290
1291 /* hash_f for htab_create_alloc of bfcache. */
1292
1293 static hashval_t
1294 bfcache_hash (const void *arg)
1295 {
1296 const struct btrace_frame_cache *cache = arg;
1297
1298 return htab_hash_pointer (cache->frame);
1299 }
1300
1301 /* eq_f for htab_create_alloc of bfcache. */
1302
1303 static int
1304 bfcache_eq (const void *arg1, const void *arg2)
1305 {
1306 const struct btrace_frame_cache *cache1 = arg1;
1307 const struct btrace_frame_cache *cache2 = arg2;
1308
1309 return cache1->frame == cache2->frame;
1310 }
1311
1312 /* Create a new btrace frame cache. */
1313
1314 static struct btrace_frame_cache *
1315 bfcache_new (struct frame_info *frame)
1316 {
1317 struct btrace_frame_cache *cache;
1318 void **slot;
1319
1320 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1321 cache->frame = frame;
1322
1323 slot = htab_find_slot (bfcache, cache, INSERT);
1324 gdb_assert (*slot == NULL);
1325 *slot = cache;
1326
1327 return cache;
1328 }
1329
1330 /* Extract the branch trace function from a branch trace frame. */
1331
1332 static const struct btrace_function *
1333 btrace_get_frame_function (struct frame_info *frame)
1334 {
1335 const struct btrace_frame_cache *cache;
1336 const struct btrace_function *bfun;
1337 struct btrace_frame_cache pattern;
1338 void **slot;
1339
1340 pattern.frame = frame;
1341
1342 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1343 if (slot == NULL)
1344 return NULL;
1345
1346 cache = *slot;
1347 return cache->bfun;
1348 }
1349
1350 /* Implement stop_reason method for record_btrace_frame_unwind. */
1351
1352 static enum unwind_stop_reason
1353 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1354 void **this_cache)
1355 {
1356 const struct btrace_frame_cache *cache;
1357 const struct btrace_function *bfun;
1358
1359 cache = *this_cache;
1360 bfun = cache->bfun;
1361 gdb_assert (bfun != NULL);
1362
1363 if (bfun->up == NULL)
1364 return UNWIND_UNAVAILABLE;
1365
1366 return UNWIND_NO_REASON;
1367 }
1368
1369 /* Implement this_id method for record_btrace_frame_unwind. */
1370
1371 static void
1372 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1373 struct frame_id *this_id)
1374 {
1375 const struct btrace_frame_cache *cache;
1376 const struct btrace_function *bfun;
1377 CORE_ADDR code, special;
1378
1379 cache = *this_cache;
1380
1381 bfun = cache->bfun;
1382 gdb_assert (bfun != NULL);
1383
1384 while (bfun->segment.prev != NULL)
1385 bfun = bfun->segment.prev;
1386
1387 code = get_frame_func (this_frame);
1388 special = bfun->number;
1389
1390 *this_id = frame_id_build_unavailable_stack_special (code, special);
1391
1392 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1393 btrace_get_bfun_name (cache->bfun),
1394 core_addr_to_string_nz (this_id->code_addr),
1395 core_addr_to_string_nz (this_id->special_addr));
1396 }
1397
1398 /* Implement prev_register method for record_btrace_frame_unwind. */
1399
1400 static struct value *
1401 record_btrace_frame_prev_register (struct frame_info *this_frame,
1402 void **this_cache,
1403 int regnum)
1404 {
1405 const struct btrace_frame_cache *cache;
1406 const struct btrace_function *bfun, *caller;
1407 const struct btrace_insn *insn;
1408 struct gdbarch *gdbarch;
1409 CORE_ADDR pc;
1410 int pcreg;
1411
1412 gdbarch = get_frame_arch (this_frame);
1413 pcreg = gdbarch_pc_regnum (gdbarch);
1414 if (pcreg < 0 || regnum != pcreg)
1415 throw_error (NOT_AVAILABLE_ERROR,
1416 _("Registers are not available in btrace record history"));
1417
1418 cache = *this_cache;
1419 bfun = cache->bfun;
1420 gdb_assert (bfun != NULL);
1421
1422 caller = bfun->up;
1423 if (caller == NULL)
1424 throw_error (NOT_AVAILABLE_ERROR,
1425 _("No caller in btrace record history"));
1426
1427 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1428 {
1429 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1430 pc = insn->pc;
1431 }
1432 else
1433 {
1434 insn = VEC_last (btrace_insn_s, caller->insn);
1435 pc = insn->pc;
1436
1437 pc += gdb_insn_length (gdbarch, pc);
1438 }
1439
1440 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1441 btrace_get_bfun_name (bfun), bfun->level,
1442 core_addr_to_string_nz (pc));
1443
1444 return frame_unwind_got_address (this_frame, regnum, pc);
1445 }
1446
1447 /* Implement sniffer method for record_btrace_frame_unwind. */
1448
1449 static int
1450 record_btrace_frame_sniffer (const struct frame_unwind *self,
1451 struct frame_info *this_frame,
1452 void **this_cache)
1453 {
1454 const struct btrace_function *bfun;
1455 struct btrace_frame_cache *cache;
1456 struct thread_info *tp;
1457 struct frame_info *next;
1458
1459 /* THIS_FRAME does not contain a reference to its thread. */
1460 tp = find_thread_ptid (inferior_ptid);
1461 gdb_assert (tp != NULL);
1462
1463 bfun = NULL;
1464 next = get_next_frame (this_frame);
1465 if (next == NULL)
1466 {
1467 const struct btrace_insn_iterator *replay;
1468
1469 replay = tp->btrace.replay;
1470 if (replay != NULL)
1471 bfun = replay->function;
1472 }
1473 else
1474 {
1475 const struct btrace_function *callee;
1476
1477 callee = btrace_get_frame_function (next);
1478 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1479 bfun = callee->up;
1480 }
1481
1482 if (bfun == NULL)
1483 return 0;
1484
1485 DEBUG ("[frame] sniffed frame for %s on level %d",
1486 btrace_get_bfun_name (bfun), bfun->level);
1487
1488 /* This is our frame. Initialize the frame cache. */
1489 cache = bfcache_new (this_frame);
1490 cache->tp = tp;
1491 cache->bfun = bfun;
1492
1493 *this_cache = cache;
1494 return 1;
1495 }
1496
1497 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1498
1499 static int
1500 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1501 struct frame_info *this_frame,
1502 void **this_cache)
1503 {
1504 const struct btrace_function *bfun, *callee;
1505 struct btrace_frame_cache *cache;
1506 struct frame_info *next;
1507
1508 next = get_next_frame (this_frame);
1509 if (next == NULL)
1510 return 0;
1511
1512 callee = btrace_get_frame_function (next);
1513 if (callee == NULL)
1514 return 0;
1515
1516 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1517 return 0;
1518
1519 bfun = callee->up;
1520 if (bfun == NULL)
1521 return 0;
1522
1523 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1524 btrace_get_bfun_name (bfun), bfun->level);
1525
1526 /* This is our frame. Initialize the frame cache. */
1527 cache = bfcache_new (this_frame);
1528 cache->tp = find_thread_ptid (inferior_ptid);
1529 cache->bfun = bfun;
1530
1531 *this_cache = cache;
1532 return 1;
1533 }
1534
1535 static void
1536 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1537 {
1538 struct btrace_frame_cache *cache;
1539 void **slot;
1540
1541 cache = this_cache;
1542
1543 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1544 gdb_assert (slot != NULL);
1545
1546 htab_remove_elt (bfcache, cache);
1547 }
1548
1549 /* btrace recording does not store previous memory content, neither the stack
1550 frames content. Any unwinding would return errorneous results as the stack
1551 contents no longer matches the changed PC value restored from history.
1552 Therefore this unwinder reports any possibly unwound registers as
1553 <unavailable>. */
1554
1555 const struct frame_unwind record_btrace_frame_unwind =
1556 {
1557 NORMAL_FRAME,
1558 record_btrace_frame_unwind_stop_reason,
1559 record_btrace_frame_this_id,
1560 record_btrace_frame_prev_register,
1561 NULL,
1562 record_btrace_frame_sniffer,
1563 record_btrace_frame_dealloc_cache
1564 };
1565
1566 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1567 {
1568 TAILCALL_FRAME,
1569 record_btrace_frame_unwind_stop_reason,
1570 record_btrace_frame_this_id,
1571 record_btrace_frame_prev_register,
1572 NULL,
1573 record_btrace_tailcall_frame_sniffer,
1574 record_btrace_frame_dealloc_cache
1575 };
1576
1577 /* Implement the to_get_unwinder method. */
1578
1579 static const struct frame_unwind *
1580 record_btrace_to_get_unwinder (struct target_ops *self)
1581 {
1582 return &record_btrace_frame_unwind;
1583 }
1584
1585 /* Implement the to_get_tailcall_unwinder method. */
1586
1587 static const struct frame_unwind *
1588 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1589 {
1590 return &record_btrace_tailcall_frame_unwind;
1591 }
1592
1593 /* Indicate that TP should be resumed according to FLAG. */
1594
1595 static void
1596 record_btrace_resume_thread (struct thread_info *tp,
1597 enum btrace_thread_flag flag)
1598 {
1599 struct btrace_thread_info *btinfo;
1600
1601 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1602
1603 btinfo = &tp->btrace;
1604
1605 if ((btinfo->flags & BTHR_MOVE) != 0)
1606 error (_("Thread already moving."));
1607
1608 /* Fetch the latest branch trace. */
1609 btrace_fetch (tp);
1610
1611 btinfo->flags |= flag;
1612 }
1613
1614 /* Find the thread to resume given a PTID. */
1615
1616 static struct thread_info *
1617 record_btrace_find_resume_thread (ptid_t ptid)
1618 {
1619 struct thread_info *tp;
1620
1621 /* When asked to resume everything, we pick the current thread. */
1622 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1623 ptid = inferior_ptid;
1624
1625 return find_thread_ptid (ptid);
1626 }
1627
1628 /* Start replaying a thread. */
1629
1630 static struct btrace_insn_iterator *
1631 record_btrace_start_replaying (struct thread_info *tp)
1632 {
1633 struct btrace_insn_iterator *replay;
1634 struct btrace_thread_info *btinfo;
1635 int executing;
1636
1637 btinfo = &tp->btrace;
1638 replay = NULL;
1639
1640 /* We can't start replaying without trace. */
1641 if (btinfo->begin == NULL)
1642 return NULL;
1643
1644 /* Clear the executing flag to allow changes to the current frame.
1645 We are not actually running, yet. We just started a reverse execution
1646 command or a record goto command.
1647 For the latter, EXECUTING is false and this has no effect.
1648 For the former, EXECUTING is true and we're in to_wait, about to
1649 move the thread. Since we need to recompute the stack, we temporarily
1650 set EXECUTING to flase. */
1651 executing = is_executing (tp->ptid);
1652 set_executing (tp->ptid, 0);
1653
1654 /* GDB stores the current frame_id when stepping in order to detects steps
1655 into subroutines.
1656 Since frames are computed differently when we're replaying, we need to
1657 recompute those stored frames and fix them up so we can still detect
1658 subroutines after we started replaying. */
1659 TRY
1660 {
1661 struct frame_info *frame;
1662 struct frame_id frame_id;
1663 int upd_step_frame_id, upd_step_stack_frame_id;
1664
1665 /* The current frame without replaying - computed via normal unwind. */
1666 frame = get_current_frame ();
1667 frame_id = get_frame_id (frame);
1668
1669 /* Check if we need to update any stepping-related frame id's. */
1670 upd_step_frame_id = frame_id_eq (frame_id,
1671 tp->control.step_frame_id);
1672 upd_step_stack_frame_id = frame_id_eq (frame_id,
1673 tp->control.step_stack_frame_id);
1674
1675 /* We start replaying at the end of the branch trace. This corresponds
1676 to the current instruction. */
1677 replay = xmalloc (sizeof (*replay));
1678 btrace_insn_end (replay, btinfo);
1679
1680 /* Skip gaps at the end of the trace. */
1681 while (btrace_insn_get (replay) == NULL)
1682 {
1683 unsigned int steps;
1684
1685 steps = btrace_insn_prev (replay, 1);
1686 if (steps == 0)
1687 error (_("No trace."));
1688 }
1689
1690 /* We're not replaying, yet. */
1691 gdb_assert (btinfo->replay == NULL);
1692 btinfo->replay = replay;
1693
1694 /* Make sure we're not using any stale registers. */
1695 registers_changed_ptid (tp->ptid);
1696
1697 /* The current frame with replaying - computed via btrace unwind. */
1698 frame = get_current_frame ();
1699 frame_id = get_frame_id (frame);
1700
1701 /* Replace stepping related frames where necessary. */
1702 if (upd_step_frame_id)
1703 tp->control.step_frame_id = frame_id;
1704 if (upd_step_stack_frame_id)
1705 tp->control.step_stack_frame_id = frame_id;
1706 }
1707 CATCH (except, RETURN_MASK_ALL)
1708 {
1709 /* Restore the previous execution state. */
1710 set_executing (tp->ptid, executing);
1711
1712 xfree (btinfo->replay);
1713 btinfo->replay = NULL;
1714
1715 registers_changed_ptid (tp->ptid);
1716
1717 throw_exception (except);
1718 }
1719 END_CATCH
1720
1721 /* Restore the previous execution state. */
1722 set_executing (tp->ptid, executing);
1723
1724 return replay;
1725 }
1726
1727 /* Stop replaying a thread. */
1728
1729 static void
1730 record_btrace_stop_replaying (struct thread_info *tp)
1731 {
1732 struct btrace_thread_info *btinfo;
1733
1734 btinfo = &tp->btrace;
1735
1736 xfree (btinfo->replay);
1737 btinfo->replay = NULL;
1738
1739 /* Make sure we're not leaving any stale registers. */
1740 registers_changed_ptid (tp->ptid);
1741 }
1742
1743 /* The to_resume method of target record-btrace. */
1744
1745 static void
1746 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1747 enum gdb_signal signal)
1748 {
1749 struct thread_info *tp, *other;
1750 enum btrace_thread_flag flag;
1751
1752 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1753
1754 /* Store the execution direction of the last resume. */
1755 record_btrace_resume_exec_dir = execution_direction;
1756
1757 tp = record_btrace_find_resume_thread (ptid);
1758 if (tp == NULL)
1759 error (_("Cannot find thread to resume."));
1760
1761 /* Stop replaying other threads if the thread to resume is not replaying. */
1762 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1763 ALL_NON_EXITED_THREADS (other)
1764 record_btrace_stop_replaying (other);
1765
1766 /* As long as we're not replaying, just forward the request. */
1767 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1768 {
1769 ops = ops->beneath;
1770 return ops->to_resume (ops, ptid, step, signal);
1771 }
1772
1773 /* Compute the btrace thread flag for the requested move. */
1774 if (step == 0)
1775 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1776 else
1777 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1778
1779 /* At the moment, we only move a single thread. We could also move
1780 all threads in parallel by single-stepping each resumed thread
1781 until the first runs into an event.
1782 When we do that, we would want to continue all other threads.
1783 For now, just resume one thread to not confuse to_wait. */
1784 record_btrace_resume_thread (tp, flag);
1785
1786 /* We just indicate the resume intent here. The actual stepping happens in
1787 record_btrace_wait below. */
1788
1789 /* Async support. */
1790 if (target_can_async_p ())
1791 {
1792 target_async (inferior_event_handler, 0);
1793 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1794 }
1795 }
1796
1797 /* Find a thread to move. */
1798
1799 static struct thread_info *
1800 record_btrace_find_thread_to_move (ptid_t ptid)
1801 {
1802 struct thread_info *tp;
1803
1804 /* First check the parameter thread. */
1805 tp = find_thread_ptid (ptid);
1806 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1807 return tp;
1808
1809 /* Otherwise, find one other thread that has been resumed. */
1810 ALL_NON_EXITED_THREADS (tp)
1811 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1812 return tp;
1813
1814 return NULL;
1815 }
1816
1817 /* Return a target_waitstatus indicating that we ran out of history. */
1818
1819 static struct target_waitstatus
1820 btrace_step_no_history (void)
1821 {
1822 struct target_waitstatus status;
1823
1824 status.kind = TARGET_WAITKIND_NO_HISTORY;
1825
1826 return status;
1827 }
1828
1829 /* Return a target_waitstatus indicating that a step finished. */
1830
1831 static struct target_waitstatus
1832 btrace_step_stopped (void)
1833 {
1834 struct target_waitstatus status;
1835
1836 status.kind = TARGET_WAITKIND_STOPPED;
1837 status.value.sig = GDB_SIGNAL_TRAP;
1838
1839 return status;
1840 }
1841
1842 /* Clear the record histories. */
1843
1844 static void
1845 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1846 {
1847 xfree (btinfo->insn_history);
1848 xfree (btinfo->call_history);
1849
1850 btinfo->insn_history = NULL;
1851 btinfo->call_history = NULL;
1852 }
1853
1854 /* Step a single thread. */
1855
1856 static struct target_waitstatus
1857 record_btrace_step_thread (struct thread_info *tp)
1858 {
1859 struct btrace_insn_iterator *replay, end;
1860 struct btrace_thread_info *btinfo;
1861 struct address_space *aspace;
1862 struct inferior *inf;
1863 enum btrace_thread_flag flags;
1864 unsigned int steps;
1865
1866 /* We can't step without an execution history. */
1867 if (btrace_is_empty (tp))
1868 return btrace_step_no_history ();
1869
1870 btinfo = &tp->btrace;
1871 replay = btinfo->replay;
1872
1873 flags = btinfo->flags & BTHR_MOVE;
1874 btinfo->flags &= ~BTHR_MOVE;
1875
1876 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1877
1878 switch (flags)
1879 {
1880 default:
1881 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1882
1883 case BTHR_STEP:
1884 /* We're done if we're not replaying. */
1885 if (replay == NULL)
1886 return btrace_step_no_history ();
1887
1888 /* Skip gaps during replay. */
1889 do
1890 {
1891 steps = btrace_insn_next (replay, 1);
1892 if (steps == 0)
1893 {
1894 record_btrace_stop_replaying (tp);
1895 return btrace_step_no_history ();
1896 }
1897 }
1898 while (btrace_insn_get (replay) == NULL);
1899
1900 /* Determine the end of the instruction trace. */
1901 btrace_insn_end (&end, btinfo);
1902
1903 /* We stop replaying if we reached the end of the trace. */
1904 if (btrace_insn_cmp (replay, &end) == 0)
1905 record_btrace_stop_replaying (tp);
1906
1907 return btrace_step_stopped ();
1908
1909 case BTHR_RSTEP:
1910 /* Start replaying if we're not already doing so. */
1911 if (replay == NULL)
1912 replay = record_btrace_start_replaying (tp);
1913
1914 /* If we can't step any further, we reached the end of the history.
1915 Skip gaps during replay. */
1916 do
1917 {
1918 steps = btrace_insn_prev (replay, 1);
1919 if (steps == 0)
1920 return btrace_step_no_history ();
1921
1922 }
1923 while (btrace_insn_get (replay) == NULL);
1924
1925 return btrace_step_stopped ();
1926
1927 case BTHR_CONT:
1928 /* We're done if we're not replaying. */
1929 if (replay == NULL)
1930 return btrace_step_no_history ();
1931
1932 inf = find_inferior_ptid (tp->ptid);
1933 aspace = inf->aspace;
1934
1935 /* Determine the end of the instruction trace. */
1936 btrace_insn_end (&end, btinfo);
1937
1938 for (;;)
1939 {
1940 const struct btrace_insn *insn;
1941
1942 /* Skip gaps during replay. */
1943 do
1944 {
1945 steps = btrace_insn_next (replay, 1);
1946 if (steps == 0)
1947 {
1948 record_btrace_stop_replaying (tp);
1949 return btrace_step_no_history ();
1950 }
1951
1952 insn = btrace_insn_get (replay);
1953 }
1954 while (insn == NULL);
1955
1956 /* We stop replaying if we reached the end of the trace. */
1957 if (btrace_insn_cmp (replay, &end) == 0)
1958 {
1959 record_btrace_stop_replaying (tp);
1960 return btrace_step_no_history ();
1961 }
1962
1963 DEBUG ("stepping %d (%s) ... %s", tp->num,
1964 target_pid_to_str (tp->ptid),
1965 core_addr_to_string_nz (insn->pc));
1966
1967 if (record_check_stopped_by_breakpoint (aspace, insn->pc,
1968 &btinfo->stop_reason))
1969 return btrace_step_stopped ();
1970 }
1971
1972 case BTHR_RCONT:
1973 /* Start replaying if we're not already doing so. */
1974 if (replay == NULL)
1975 replay = record_btrace_start_replaying (tp);
1976
1977 inf = find_inferior_ptid (tp->ptid);
1978 aspace = inf->aspace;
1979
1980 for (;;)
1981 {
1982 const struct btrace_insn *insn;
1983
1984 /* If we can't step any further, we reached the end of the history.
1985 Skip gaps during replay. */
1986 do
1987 {
1988 steps = btrace_insn_prev (replay, 1);
1989 if (steps == 0)
1990 return btrace_step_no_history ();
1991
1992 insn = btrace_insn_get (replay);
1993 }
1994 while (insn == NULL);
1995
1996 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1997 target_pid_to_str (tp->ptid),
1998 core_addr_to_string_nz (insn->pc));
1999
2000 if (record_check_stopped_by_breakpoint (aspace, insn->pc,
2001 &btinfo->stop_reason))
2002 return btrace_step_stopped ();
2003 }
2004 }
2005 }
2006
2007 /* The to_wait method of target record-btrace. */
2008
2009 static ptid_t
2010 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2011 struct target_waitstatus *status, int options)
2012 {
2013 struct thread_info *tp, *other;
2014
2015 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2016
2017 /* As long as we're not replaying, just forward the request. */
2018 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
2019 {
2020 ops = ops->beneath;
2021 return ops->to_wait (ops, ptid, status, options);
2022 }
2023
2024 /* Let's find a thread to move. */
2025 tp = record_btrace_find_thread_to_move (ptid);
2026 if (tp == NULL)
2027 {
2028 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
2029
2030 status->kind = TARGET_WAITKIND_IGNORE;
2031 return minus_one_ptid;
2032 }
2033
2034 /* We only move a single thread. We're not able to correlate threads. */
2035 *status = record_btrace_step_thread (tp);
2036
2037 /* Stop all other threads. */
2038 if (!non_stop)
2039 ALL_NON_EXITED_THREADS (other)
2040 other->btrace.flags &= ~BTHR_MOVE;
2041
2042 /* Start record histories anew from the current position. */
2043 record_btrace_clear_histories (&tp->btrace);
2044
2045 /* We moved the replay position but did not update registers. */
2046 registers_changed_ptid (tp->ptid);
2047
2048 return tp->ptid;
2049 }
2050
2051 /* The to_can_execute_reverse method of target record-btrace. */
2052
2053 static int
2054 record_btrace_can_execute_reverse (struct target_ops *self)
2055 {
2056 return 1;
2057 }
2058
2059 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2060
2061 static int
2062 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2063 {
2064 if (record_btrace_is_replaying (ops))
2065 {
2066 struct thread_info *tp = inferior_thread ();
2067
2068 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2069 }
2070
2071 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2072 }
2073
2074 /* The to_supports_stopped_by_sw_breakpoint method of target
2075 record-btrace. */
2076
2077 static int
2078 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2079 {
2080 if (record_btrace_is_replaying (ops))
2081 return 1;
2082
2083 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2084 }
2085
2086 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2087
2088 static int
2089 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2090 {
2091 if (record_btrace_is_replaying (ops))
2092 {
2093 struct thread_info *tp = inferior_thread ();
2094
2095 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2096 }
2097
2098 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2099 }
2100
2101 /* The to_supports_stopped_by_hw_breakpoint method of target
2102 record-btrace. */
2103
2104 static int
2105 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2106 {
2107 if (record_btrace_is_replaying (ops))
2108 return 1;
2109
2110 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2111 }
2112
2113 /* The to_update_thread_list method of target record-btrace. */
2114
2115 static void
2116 record_btrace_update_thread_list (struct target_ops *ops)
2117 {
2118 /* We don't add or remove threads during replay. */
2119 if (record_btrace_is_replaying (ops))
2120 return;
2121
2122 /* Forward the request. */
2123 ops = ops->beneath;
2124 ops->to_update_thread_list (ops);
2125 }
2126
2127 /* The to_thread_alive method of target record-btrace. */
2128
2129 static int
2130 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2131 {
2132 /* We don't add or remove threads during replay. */
2133 if (record_btrace_is_replaying (ops))
2134 return find_thread_ptid (ptid) != NULL;
2135
2136 /* Forward the request. */
2137 ops = ops->beneath;
2138 return ops->to_thread_alive (ops, ptid);
2139 }
2140
2141 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2142 is stopped. */
2143
2144 static void
2145 record_btrace_set_replay (struct thread_info *tp,
2146 const struct btrace_insn_iterator *it)
2147 {
2148 struct btrace_thread_info *btinfo;
2149
2150 btinfo = &tp->btrace;
2151
2152 if (it == NULL || it->function == NULL)
2153 record_btrace_stop_replaying (tp);
2154 else
2155 {
2156 if (btinfo->replay == NULL)
2157 record_btrace_start_replaying (tp);
2158 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2159 return;
2160
2161 *btinfo->replay = *it;
2162 registers_changed_ptid (tp->ptid);
2163 }
2164
2165 /* Start anew from the new replay position. */
2166 record_btrace_clear_histories (btinfo);
2167 }
2168
2169 /* The to_goto_record_begin method of target record-btrace. */
2170
2171 static void
2172 record_btrace_goto_begin (struct target_ops *self)
2173 {
2174 struct thread_info *tp;
2175 struct btrace_insn_iterator begin;
2176
2177 tp = require_btrace_thread ();
2178
2179 btrace_insn_begin (&begin, &tp->btrace);
2180 record_btrace_set_replay (tp, &begin);
2181
2182 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2183 }
2184
2185 /* The to_goto_record_end method of target record-btrace. */
2186
2187 static void
2188 record_btrace_goto_end (struct target_ops *ops)
2189 {
2190 struct thread_info *tp;
2191
2192 tp = require_btrace_thread ();
2193
2194 record_btrace_set_replay (tp, NULL);
2195
2196 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2197 }
2198
2199 /* The to_goto_record method of target record-btrace. */
2200
2201 static void
2202 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2203 {
2204 struct thread_info *tp;
2205 struct btrace_insn_iterator it;
2206 unsigned int number;
2207 int found;
2208
2209 number = insn;
2210
2211 /* Check for wrap-arounds. */
2212 if (number != insn)
2213 error (_("Instruction number out of range."));
2214
2215 tp = require_btrace_thread ();
2216
2217 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2218 if (found == 0)
2219 error (_("No such instruction."));
2220
2221 record_btrace_set_replay (tp, &it);
2222
2223 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2224 }
2225
2226 /* The to_execution_direction target method. */
2227
2228 static enum exec_direction_kind
2229 record_btrace_execution_direction (struct target_ops *self)
2230 {
2231 return record_btrace_resume_exec_dir;
2232 }
2233
2234 /* The to_prepare_to_generate_core target method. */
2235
2236 static void
2237 record_btrace_prepare_to_generate_core (struct target_ops *self)
2238 {
2239 record_btrace_generating_corefile = 1;
2240 }
2241
2242 /* The to_done_generating_core target method. */
2243
2244 static void
2245 record_btrace_done_generating_core (struct target_ops *self)
2246 {
2247 record_btrace_generating_corefile = 0;
2248 }
2249
2250 /* Initialize the record-btrace target ops. */
2251
2252 static void
2253 init_record_btrace_ops (void)
2254 {
2255 struct target_ops *ops;
2256
2257 ops = &record_btrace_ops;
2258 ops->to_shortname = "record-btrace";
2259 ops->to_longname = "Branch tracing target";
2260 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2261 ops->to_open = record_btrace_open;
2262 ops->to_close = record_btrace_close;
2263 ops->to_async = record_btrace_async;
2264 ops->to_detach = record_detach;
2265 ops->to_disconnect = record_disconnect;
2266 ops->to_mourn_inferior = record_mourn_inferior;
2267 ops->to_kill = record_kill;
2268 ops->to_stop_recording = record_btrace_stop_recording;
2269 ops->to_info_record = record_btrace_info;
2270 ops->to_insn_history = record_btrace_insn_history;
2271 ops->to_insn_history_from = record_btrace_insn_history_from;
2272 ops->to_insn_history_range = record_btrace_insn_history_range;
2273 ops->to_call_history = record_btrace_call_history;
2274 ops->to_call_history_from = record_btrace_call_history_from;
2275 ops->to_call_history_range = record_btrace_call_history_range;
2276 ops->to_record_is_replaying = record_btrace_is_replaying;
2277 ops->to_xfer_partial = record_btrace_xfer_partial;
2278 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2279 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2280 ops->to_fetch_registers = record_btrace_fetch_registers;
2281 ops->to_store_registers = record_btrace_store_registers;
2282 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2283 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2284 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2285 ops->to_resume = record_btrace_resume;
2286 ops->to_wait = record_btrace_wait;
2287 ops->to_update_thread_list = record_btrace_update_thread_list;
2288 ops->to_thread_alive = record_btrace_thread_alive;
2289 ops->to_goto_record_begin = record_btrace_goto_begin;
2290 ops->to_goto_record_end = record_btrace_goto_end;
2291 ops->to_goto_record = record_btrace_goto;
2292 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2293 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2294 ops->to_supports_stopped_by_sw_breakpoint
2295 = record_btrace_supports_stopped_by_sw_breakpoint;
2296 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2297 ops->to_supports_stopped_by_hw_breakpoint
2298 = record_btrace_supports_stopped_by_hw_breakpoint;
2299 ops->to_execution_direction = record_btrace_execution_direction;
2300 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2301 ops->to_done_generating_core = record_btrace_done_generating_core;
2302 ops->to_stratum = record_stratum;
2303 ops->to_magic = OPS_MAGIC;
2304 }
2305
2306 /* Start recording in BTS format. */
2307
2308 static void
2309 cmd_record_btrace_bts_start (char *args, int from_tty)
2310 {
2311
2312 if (args != NULL && *args != 0)
2313 error (_("Invalid argument."));
2314
2315 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2316
2317 TRY
2318 {
2319 execute_command ("target record-btrace", from_tty);
2320 }
2321 CATCH (exception, RETURN_MASK_ALL)
2322 {
2323 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2324 throw_exception (exception);
2325 }
2326 END_CATCH
2327 }
2328
2329 /* Alias for "target record". */
2330
2331 static void
2332 cmd_record_btrace_start (char *args, int from_tty)
2333 {
2334
2335 if (args != NULL && *args != 0)
2336 error (_("Invalid argument."));
2337
2338 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2339
2340 TRY
2341 {
2342 execute_command ("target record-btrace", from_tty);
2343 }
2344 CATCH (exception, RETURN_MASK_ALL)
2345 {
2346 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2347 throw_exception (exception);
2348 }
2349 END_CATCH
2350 }
2351
2352 /* The "set record btrace" command. */
2353
2354 static void
2355 cmd_set_record_btrace (char *args, int from_tty)
2356 {
2357 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2358 }
2359
2360 /* The "show record btrace" command. */
2361
2362 static void
2363 cmd_show_record_btrace (char *args, int from_tty)
2364 {
2365 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2366 }
2367
2368 /* The "show record btrace replay-memory-access" command. */
2369
2370 static void
2371 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2372 struct cmd_list_element *c, const char *value)
2373 {
2374 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2375 replay_memory_access);
2376 }
2377
2378 /* The "set record btrace bts" command. */
2379
2380 static void
2381 cmd_set_record_btrace_bts (char *args, int from_tty)
2382 {
2383 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2384 "by an apporpriate subcommand.\n"));
2385 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2386 all_commands, gdb_stdout);
2387 }
2388
2389 /* The "show record btrace bts" command. */
2390
2391 static void
2392 cmd_show_record_btrace_bts (char *args, int from_tty)
2393 {
2394 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2395 }
2396
2397 void _initialize_record_btrace (void);
2398
2399 /* Initialize btrace commands. */
2400
2401 void
2402 _initialize_record_btrace (void)
2403 {
2404 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2405 _("Start branch trace recording."), &record_btrace_cmdlist,
2406 "record btrace ", 0, &record_cmdlist);
2407 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2408
2409 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
2410 _("\
2411 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2412 The processor stores a from/to record for each branch into a cyclic buffer.\n\
2413 This format may not be available on all processors."),
2414 &record_btrace_cmdlist);
2415 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
2416
2417 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2418 _("Set record options"), &set_record_btrace_cmdlist,
2419 "set record btrace ", 0, &set_record_cmdlist);
2420
2421 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2422 _("Show record options"), &show_record_btrace_cmdlist,
2423 "show record btrace ", 0, &show_record_cmdlist);
2424
2425 add_setshow_enum_cmd ("replay-memory-access", no_class,
2426 replay_memory_access_types, &replay_memory_access, _("\
2427 Set what memory accesses are allowed during replay."), _("\
2428 Show what memory accesses are allowed during replay."),
2429 _("Default is READ-ONLY.\n\n\
2430 The btrace record target does not trace data.\n\
2431 The memory therefore corresponds to the live target and not \
2432 to the current replay position.\n\n\
2433 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2434 When READ-WRITE, allow accesses to read-only and read-write memory during \
2435 replay."),
2436 NULL, cmd_show_replay_memory_access,
2437 &set_record_btrace_cmdlist,
2438 &show_record_btrace_cmdlist);
2439
2440 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
2441 _("Set record btrace bts options"),
2442 &set_record_btrace_bts_cmdlist,
2443 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
2444
2445 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
2446 _("Show record btrace bts options"),
2447 &show_record_btrace_bts_cmdlist,
2448 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
2449
2450 add_setshow_uinteger_cmd ("buffer-size", no_class,
2451 &record_btrace_conf.bts.size,
2452 _("Set the record/replay bts buffer size."),
2453 _("Show the record/replay bts buffer size."), _("\
2454 When starting recording request a trace buffer of this size. \
2455 The actual buffer size may differ from the requested size. \
2456 Use \"info record\" to see the actual buffer size.\n\n\
2457 Bigger buffers allow longer recording but also take more time to process \
2458 the recorded execution trace.\n\n\
2459 The trace buffer size may not be changed while recording."), NULL, NULL,
2460 &set_record_btrace_bts_cmdlist,
2461 &show_record_btrace_bts_cmdlist);
2462
2463 init_record_btrace_ops ();
2464 add_target (&record_btrace_ops);
2465
2466 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2467 xcalloc, xfree);
2468
2469 record_btrace_conf.bts.size = 64 * 1024;
2470 }
This page took 0.082189 seconds and 4 git commands to generate.