record-btrace: show trace from enable location
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "gdbthread.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "disasm.h"
28 #include "observer.h"
29 #include "exceptions.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38
39 /* The target_ops of record-btrace. */
40 static struct target_ops record_btrace_ops;
41
42 /* A new thread observer enabling branch tracing for the new thread. */
43 static struct observer *record_btrace_thread_observer;
44
45 /* Temporarily allow memory accesses. */
46 static int record_btrace_allow_memory_access;
47
48 /* Print a record-btrace debug message. Use do ... while (0) to avoid
49 ambiguities when used in if statements. */
50
51 #define DEBUG(msg, args...) \
52 do \
53 { \
54 if (record_debug != 0) \
55 fprintf_unfiltered (gdb_stdlog, \
56 "[record-btrace] " msg "\n", ##args); \
57 } \
58 while (0)
59
60
61 /* Update the branch trace for the current thread and return a pointer to its
62 thread_info.
63
64 Throws an error if there is no thread or no trace. This function never
65 returns NULL. */
66
67 static struct thread_info *
68 require_btrace_thread (void)
69 {
70 struct thread_info *tp;
71
72 DEBUG ("require");
73
74 tp = find_thread_ptid (inferior_ptid);
75 if (tp == NULL)
76 error (_("No thread."));
77
78 btrace_fetch (tp);
79
80 if (btrace_is_empty (tp))
81 error (_("No trace."));
82
83 return tp;
84 }
85
86 /* Update the branch trace for the current thread and return a pointer to its
87 branch trace information struct.
88
89 Throws an error if there is no thread or no trace. This function never
90 returns NULL. */
91
92 static struct btrace_thread_info *
93 require_btrace (void)
94 {
95 struct thread_info *tp;
96
97 tp = require_btrace_thread ();
98
99 return &tp->btrace;
100 }
101
102 /* Enable branch tracing for one thread. Warn on errors. */
103
104 static void
105 record_btrace_enable_warn (struct thread_info *tp)
106 {
107 volatile struct gdb_exception error;
108
109 TRY_CATCH (error, RETURN_MASK_ERROR)
110 btrace_enable (tp);
111
112 if (error.message != NULL)
113 warning ("%s", error.message);
114 }
115
116 /* Callback function to disable branch tracing for one thread. */
117
118 static void
119 record_btrace_disable_callback (void *arg)
120 {
121 struct thread_info *tp;
122
123 tp = arg;
124
125 btrace_disable (tp);
126 }
127
128 /* Enable automatic tracing of new threads. */
129
130 static void
131 record_btrace_auto_enable (void)
132 {
133 DEBUG ("attach thread observer");
134
135 record_btrace_thread_observer
136 = observer_attach_new_thread (record_btrace_enable_warn);
137 }
138
139 /* Disable automatic tracing of new threads. */
140
141 static void
142 record_btrace_auto_disable (void)
143 {
144 /* The observer may have been detached, already. */
145 if (record_btrace_thread_observer == NULL)
146 return;
147
148 DEBUG ("detach thread observer");
149
150 observer_detach_new_thread (record_btrace_thread_observer);
151 record_btrace_thread_observer = NULL;
152 }
153
154 /* The to_open method of target record-btrace. */
155
156 static void
157 record_btrace_open (char *args, int from_tty)
158 {
159 struct cleanup *disable_chain;
160 struct thread_info *tp;
161
162 DEBUG ("open");
163
164 record_preopen ();
165
166 if (!target_has_execution)
167 error (_("The program is not being run."));
168
169 if (!target_supports_btrace ())
170 error (_("Target does not support branch tracing."));
171
172 gdb_assert (record_btrace_thread_observer == NULL);
173
174 disable_chain = make_cleanup (null_cleanup, NULL);
175 ALL_THREADS (tp)
176 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
177 {
178 btrace_enable (tp);
179
180 make_cleanup (record_btrace_disable_callback, tp);
181 }
182
183 record_btrace_auto_enable ();
184
185 push_target (&record_btrace_ops);
186
187 observer_notify_record_changed (current_inferior (), 1);
188
189 discard_cleanups (disable_chain);
190 }
191
192 /* The to_stop_recording method of target record-btrace. */
193
194 static void
195 record_btrace_stop_recording (void)
196 {
197 struct thread_info *tp;
198
199 DEBUG ("stop recording");
200
201 record_btrace_auto_disable ();
202
203 ALL_THREADS (tp)
204 if (tp->btrace.target != NULL)
205 btrace_disable (tp);
206 }
207
208 /* The to_close method of target record-btrace. */
209
210 static void
211 record_btrace_close (void)
212 {
213 /* Make sure automatic recording gets disabled even if we did not stop
214 recording before closing the record-btrace target. */
215 record_btrace_auto_disable ();
216
217 /* We already stopped recording. */
218 }
219
220 /* The to_info_record method of target record-btrace. */
221
222 static void
223 record_btrace_info (void)
224 {
225 struct btrace_thread_info *btinfo;
226 struct thread_info *tp;
227 unsigned int insns, calls;
228
229 DEBUG ("info");
230
231 tp = find_thread_ptid (inferior_ptid);
232 if (tp == NULL)
233 error (_("No thread."));
234
235 btrace_fetch (tp);
236
237 insns = 0;
238 calls = 0;
239
240 btinfo = &tp->btrace;
241
242 if (!btrace_is_empty (tp))
243 {
244 struct btrace_call_iterator call;
245 struct btrace_insn_iterator insn;
246
247 btrace_call_end (&call, btinfo);
248 btrace_call_prev (&call, 1);
249 calls = btrace_call_number (&call);
250
251 btrace_insn_end (&insn, btinfo);
252 btrace_insn_prev (&insn, 1);
253 insns = btrace_insn_number (&insn);
254 }
255
256 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
257 "%d (%s).\n"), insns, calls, tp->num,
258 target_pid_to_str (tp->ptid));
259
260 if (btrace_is_replaying (tp))
261 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
262 btrace_insn_number (btinfo->replay));
263 }
264
265 /* Print an unsigned int. */
266
267 static void
268 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
269 {
270 ui_out_field_fmt (uiout, fld, "%u", val);
271 }
272
273 /* Disassemble a section of the recorded instruction trace. */
274
275 static void
276 btrace_insn_history (struct ui_out *uiout,
277 const struct btrace_insn_iterator *begin,
278 const struct btrace_insn_iterator *end, int flags)
279 {
280 struct gdbarch *gdbarch;
281 struct btrace_insn_iterator it;
282
283 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
284 btrace_insn_number (end));
285
286 gdbarch = target_gdbarch ();
287
288 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
289 {
290 const struct btrace_insn *insn;
291
292 insn = btrace_insn_get (&it);
293
294 /* Print the instruction index. */
295 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
296 ui_out_text (uiout, "\t");
297
298 /* Disassembly with '/m' flag may not produce the expected result.
299 See PR gdb/11833. */
300 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
301 }
302 }
303
304 /* The to_insn_history method of target record-btrace. */
305
306 static void
307 record_btrace_insn_history (int size, int flags)
308 {
309 struct btrace_thread_info *btinfo;
310 struct btrace_insn_history *history;
311 struct btrace_insn_iterator begin, end;
312 struct cleanup *uiout_cleanup;
313 struct ui_out *uiout;
314 unsigned int context, covered;
315
316 uiout = current_uiout;
317 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
318 "insn history");
319 context = abs (size);
320 if (context == 0)
321 error (_("Bad record instruction-history-size."));
322
323 btinfo = require_btrace ();
324 history = btinfo->insn_history;
325 if (history == NULL)
326 {
327 struct btrace_insn_iterator *replay;
328
329 DEBUG ("insn-history (0x%x): %d", flags, size);
330
331 /* If we're replaying, we start at the replay position. Otherwise, we
332 start at the tail of the trace. */
333 replay = btinfo->replay;
334 if (replay != NULL)
335 begin = *replay;
336 else
337 btrace_insn_end (&begin, btinfo);
338
339 /* We start from here and expand in the requested direction. Then we
340 expand in the other direction, as well, to fill up any remaining
341 context. */
342 end = begin;
343 if (size < 0)
344 {
345 /* We want the current position covered, as well. */
346 covered = btrace_insn_next (&end, 1);
347 covered += btrace_insn_prev (&begin, context - covered);
348 covered += btrace_insn_next (&end, context - covered);
349 }
350 else
351 {
352 covered = btrace_insn_next (&end, context);
353 covered += btrace_insn_prev (&begin, context - covered);
354 }
355 }
356 else
357 {
358 begin = history->begin;
359 end = history->end;
360
361 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
362 btrace_insn_number (&begin), btrace_insn_number (&end));
363
364 if (size < 0)
365 {
366 end = begin;
367 covered = btrace_insn_prev (&begin, context);
368 }
369 else
370 {
371 begin = end;
372 covered = btrace_insn_next (&end, context);
373 }
374 }
375
376 if (covered > 0)
377 btrace_insn_history (uiout, &begin, &end, flags);
378 else
379 {
380 if (size < 0)
381 printf_unfiltered (_("At the start of the branch trace record.\n"));
382 else
383 printf_unfiltered (_("At the end of the branch trace record.\n"));
384 }
385
386 btrace_set_insn_history (btinfo, &begin, &end);
387 do_cleanups (uiout_cleanup);
388 }
389
390 /* The to_insn_history_range method of target record-btrace. */
391
392 static void
393 record_btrace_insn_history_range (ULONGEST from, ULONGEST to, int flags)
394 {
395 struct btrace_thread_info *btinfo;
396 struct btrace_insn_history *history;
397 struct btrace_insn_iterator begin, end;
398 struct cleanup *uiout_cleanup;
399 struct ui_out *uiout;
400 unsigned int low, high;
401 int found;
402
403 uiout = current_uiout;
404 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
405 "insn history");
406 low = from;
407 high = to;
408
409 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
410
411 /* Check for wrap-arounds. */
412 if (low != from || high != to)
413 error (_("Bad range."));
414
415 if (high < low)
416 error (_("Bad range."));
417
418 btinfo = require_btrace ();
419
420 found = btrace_find_insn_by_number (&begin, btinfo, low);
421 if (found == 0)
422 error (_("Range out of bounds."));
423
424 found = btrace_find_insn_by_number (&end, btinfo, high);
425 if (found == 0)
426 {
427 /* Silently truncate the range. */
428 btrace_insn_end (&end, btinfo);
429 }
430 else
431 {
432 /* We want both begin and end to be inclusive. */
433 btrace_insn_next (&end, 1);
434 }
435
436 btrace_insn_history (uiout, &begin, &end, flags);
437 btrace_set_insn_history (btinfo, &begin, &end);
438
439 do_cleanups (uiout_cleanup);
440 }
441
442 /* The to_insn_history_from method of target record-btrace. */
443
444 static void
445 record_btrace_insn_history_from (ULONGEST from, int size, int flags)
446 {
447 ULONGEST begin, end, context;
448
449 context = abs (size);
450 if (context == 0)
451 error (_("Bad record instruction-history-size."));
452
453 if (size < 0)
454 {
455 end = from;
456
457 if (from < context)
458 begin = 0;
459 else
460 begin = from - context + 1;
461 }
462 else
463 {
464 begin = from;
465 end = from + context - 1;
466
467 /* Check for wrap-around. */
468 if (end < begin)
469 end = ULONGEST_MAX;
470 }
471
472 record_btrace_insn_history_range (begin, end, flags);
473 }
474
475 /* Print the instruction number range for a function call history line. */
476
477 static void
478 btrace_call_history_insn_range (struct ui_out *uiout,
479 const struct btrace_function *bfun)
480 {
481 unsigned int begin, end, size;
482
483 size = VEC_length (btrace_insn_s, bfun->insn);
484 gdb_assert (size > 0);
485
486 begin = bfun->insn_offset;
487 end = begin + size - 1;
488
489 ui_out_field_uint (uiout, "insn begin", begin);
490 ui_out_text (uiout, ",");
491 ui_out_field_uint (uiout, "insn end", end);
492 }
493
494 /* Print the source line information for a function call history line. */
495
496 static void
497 btrace_call_history_src_line (struct ui_out *uiout,
498 const struct btrace_function *bfun)
499 {
500 struct symbol *sym;
501 int begin, end;
502
503 sym = bfun->sym;
504 if (sym == NULL)
505 return;
506
507 ui_out_field_string (uiout, "file",
508 symtab_to_filename_for_display (sym->symtab));
509
510 begin = bfun->lbegin;
511 end = bfun->lend;
512
513 if (end < begin)
514 return;
515
516 ui_out_text (uiout, ":");
517 ui_out_field_int (uiout, "min line", begin);
518
519 if (end == begin)
520 return;
521
522 ui_out_text (uiout, ",");
523 ui_out_field_int (uiout, "max line", end);
524 }
525
526 /* Get the name of a branch trace function. */
527
528 static const char *
529 btrace_get_bfun_name (const struct btrace_function *bfun)
530 {
531 struct minimal_symbol *msym;
532 struct symbol *sym;
533
534 if (bfun == NULL)
535 return "??";
536
537 msym = bfun->msym;
538 sym = bfun->sym;
539
540 if (sym != NULL)
541 return SYMBOL_PRINT_NAME (sym);
542 else if (msym != NULL)
543 return SYMBOL_PRINT_NAME (msym);
544 else
545 return "??";
546 }
547
548 /* Disassemble a section of the recorded function trace. */
549
550 static void
551 btrace_call_history (struct ui_out *uiout,
552 const struct btrace_thread_info *btinfo,
553 const struct btrace_call_iterator *begin,
554 const struct btrace_call_iterator *end,
555 enum record_print_flag flags)
556 {
557 struct btrace_call_iterator it;
558
559 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
560 btrace_call_number (end));
561
562 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
563 {
564 const struct btrace_function *bfun;
565 struct minimal_symbol *msym;
566 struct symbol *sym;
567
568 bfun = btrace_call_get (&it);
569 sym = bfun->sym;
570 msym = bfun->msym;
571
572 /* Print the function index. */
573 ui_out_field_uint (uiout, "index", bfun->number);
574 ui_out_text (uiout, "\t");
575
576 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
577 {
578 int level = bfun->level + btinfo->level, i;
579
580 for (i = 0; i < level; ++i)
581 ui_out_text (uiout, " ");
582 }
583
584 if (sym != NULL)
585 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
586 else if (msym != NULL)
587 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (msym));
588 else if (!ui_out_is_mi_like_p (uiout))
589 ui_out_field_string (uiout, "function", "??");
590
591 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
592 {
593 ui_out_text (uiout, _("\tinst "));
594 btrace_call_history_insn_range (uiout, bfun);
595 }
596
597 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
598 {
599 ui_out_text (uiout, _("\tat "));
600 btrace_call_history_src_line (uiout, bfun);
601 }
602
603 ui_out_text (uiout, "\n");
604 }
605 }
606
607 /* The to_call_history method of target record-btrace. */
608
609 static void
610 record_btrace_call_history (int size, int flags)
611 {
612 struct btrace_thread_info *btinfo;
613 struct btrace_call_history *history;
614 struct btrace_call_iterator begin, end;
615 struct cleanup *uiout_cleanup;
616 struct ui_out *uiout;
617 unsigned int context, covered;
618
619 uiout = current_uiout;
620 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
621 "insn history");
622 context = abs (size);
623 if (context == 0)
624 error (_("Bad record function-call-history-size."));
625
626 btinfo = require_btrace ();
627 history = btinfo->call_history;
628 if (history == NULL)
629 {
630 struct btrace_insn_iterator *replay;
631
632 DEBUG ("call-history (0x%x): %d", flags, size);
633
634 /* If we're replaying, we start at the replay position. Otherwise, we
635 start at the tail of the trace. */
636 replay = btinfo->replay;
637 if (replay != NULL)
638 {
639 begin.function = replay->function;
640 begin.btinfo = btinfo;
641 }
642 else
643 btrace_call_end (&begin, btinfo);
644
645 /* We start from here and expand in the requested direction. Then we
646 expand in the other direction, as well, to fill up any remaining
647 context. */
648 end = begin;
649 if (size < 0)
650 {
651 /* We want the current position covered, as well. */
652 covered = btrace_call_next (&end, 1);
653 covered += btrace_call_prev (&begin, context - covered);
654 covered += btrace_call_next (&end, context - covered);
655 }
656 else
657 {
658 covered = btrace_call_next (&end, context);
659 covered += btrace_call_prev (&begin, context- covered);
660 }
661 }
662 else
663 {
664 begin = history->begin;
665 end = history->end;
666
667 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
668 btrace_call_number (&begin), btrace_call_number (&end));
669
670 if (size < 0)
671 {
672 end = begin;
673 covered = btrace_call_prev (&begin, context);
674 }
675 else
676 {
677 begin = end;
678 covered = btrace_call_next (&end, context);
679 }
680 }
681
682 if (covered > 0)
683 btrace_call_history (uiout, btinfo, &begin, &end, flags);
684 else
685 {
686 if (size < 0)
687 printf_unfiltered (_("At the start of the branch trace record.\n"));
688 else
689 printf_unfiltered (_("At the end of the branch trace record.\n"));
690 }
691
692 btrace_set_call_history (btinfo, &begin, &end);
693 do_cleanups (uiout_cleanup);
694 }
695
696 /* The to_call_history_range method of target record-btrace. */
697
698 static void
699 record_btrace_call_history_range (ULONGEST from, ULONGEST to, int flags)
700 {
701 struct btrace_thread_info *btinfo;
702 struct btrace_call_history *history;
703 struct btrace_call_iterator begin, end;
704 struct cleanup *uiout_cleanup;
705 struct ui_out *uiout;
706 unsigned int low, high;
707 int found;
708
709 uiout = current_uiout;
710 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
711 "func history");
712 low = from;
713 high = to;
714
715 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
716
717 /* Check for wrap-arounds. */
718 if (low != from || high != to)
719 error (_("Bad range."));
720
721 if (high < low)
722 error (_("Bad range."));
723
724 btinfo = require_btrace ();
725
726 found = btrace_find_call_by_number (&begin, btinfo, low);
727 if (found == 0)
728 error (_("Range out of bounds."));
729
730 found = btrace_find_call_by_number (&end, btinfo, high);
731 if (found == 0)
732 {
733 /* Silently truncate the range. */
734 btrace_call_end (&end, btinfo);
735 }
736 else
737 {
738 /* We want both begin and end to be inclusive. */
739 btrace_call_next (&end, 1);
740 }
741
742 btrace_call_history (uiout, btinfo, &begin, &end, flags);
743 btrace_set_call_history (btinfo, &begin, &end);
744
745 do_cleanups (uiout_cleanup);
746 }
747
748 /* The to_call_history_from method of target record-btrace. */
749
750 static void
751 record_btrace_call_history_from (ULONGEST from, int size, int flags)
752 {
753 ULONGEST begin, end, context;
754
755 context = abs (size);
756 if (context == 0)
757 error (_("Bad record function-call-history-size."));
758
759 if (size < 0)
760 {
761 end = from;
762
763 if (from < context)
764 begin = 0;
765 else
766 begin = from - context + 1;
767 }
768 else
769 {
770 begin = from;
771 end = from + context - 1;
772
773 /* Check for wrap-around. */
774 if (end < begin)
775 end = ULONGEST_MAX;
776 }
777
778 record_btrace_call_history_range (begin, end, flags);
779 }
780
781 /* The to_record_is_replaying method of target record-btrace. */
782
783 static int
784 record_btrace_is_replaying (void)
785 {
786 struct thread_info *tp;
787
788 ALL_THREADS (tp)
789 if (btrace_is_replaying (tp))
790 return 1;
791
792 return 0;
793 }
794
795 /* The to_xfer_partial method of target record-btrace. */
796
797 static LONGEST
798 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
799 const char *annex, gdb_byte *readbuf,
800 const gdb_byte *writebuf, ULONGEST offset,
801 ULONGEST len)
802 {
803 struct target_ops *t;
804
805 /* Filter out requests that don't make sense during replay. */
806 if (!record_btrace_allow_memory_access && record_btrace_is_replaying ())
807 {
808 switch (object)
809 {
810 case TARGET_OBJECT_MEMORY:
811 {
812 struct target_section *section;
813
814 /* We do not allow writing memory in general. */
815 if (writebuf != NULL)
816 return TARGET_XFER_E_UNAVAILABLE;
817
818 /* We allow reading readonly memory. */
819 section = target_section_by_addr (ops, offset);
820 if (section != NULL)
821 {
822 /* Check if the section we found is readonly. */
823 if ((bfd_get_section_flags (section->the_bfd_section->owner,
824 section->the_bfd_section)
825 & SEC_READONLY) != 0)
826 {
827 /* Truncate the request to fit into this section. */
828 len = min (len, section->endaddr - offset);
829 break;
830 }
831 }
832
833 return TARGET_XFER_E_UNAVAILABLE;
834 }
835 }
836 }
837
838 /* Forward the request. */
839 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
840 if (ops->to_xfer_partial != NULL)
841 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
842 offset, len);
843
844 return TARGET_XFER_E_UNAVAILABLE;
845 }
846
847 /* The to_insert_breakpoint method of target record-btrace. */
848
849 static int
850 record_btrace_insert_breakpoint (struct target_ops *ops,
851 struct gdbarch *gdbarch,
852 struct bp_target_info *bp_tgt)
853 {
854 volatile struct gdb_exception except;
855 int old, ret;
856
857 /* Inserting breakpoints requires accessing memory. Allow it for the
858 duration of this function. */
859 old = record_btrace_allow_memory_access;
860 record_btrace_allow_memory_access = 1;
861
862 ret = 0;
863 TRY_CATCH (except, RETURN_MASK_ALL)
864 ret = forward_target_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
865
866 record_btrace_allow_memory_access = old;
867
868 if (except.reason < 0)
869 throw_exception (except);
870
871 return ret;
872 }
873
874 /* The to_remove_breakpoint method of target record-btrace. */
875
876 static int
877 record_btrace_remove_breakpoint (struct target_ops *ops,
878 struct gdbarch *gdbarch,
879 struct bp_target_info *bp_tgt)
880 {
881 volatile struct gdb_exception except;
882 int old, ret;
883
884 /* Removing breakpoints requires accessing memory. Allow it for the
885 duration of this function. */
886 old = record_btrace_allow_memory_access;
887 record_btrace_allow_memory_access = 1;
888
889 ret = 0;
890 TRY_CATCH (except, RETURN_MASK_ALL)
891 ret = forward_target_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
892
893 record_btrace_allow_memory_access = old;
894
895 if (except.reason < 0)
896 throw_exception (except);
897
898 return ret;
899 }
900
901 /* The to_fetch_registers method of target record-btrace. */
902
903 static void
904 record_btrace_fetch_registers (struct target_ops *ops,
905 struct regcache *regcache, int regno)
906 {
907 struct btrace_insn_iterator *replay;
908 struct thread_info *tp;
909
910 tp = find_thread_ptid (inferior_ptid);
911 gdb_assert (tp != NULL);
912
913 replay = tp->btrace.replay;
914 if (replay != NULL)
915 {
916 const struct btrace_insn *insn;
917 struct gdbarch *gdbarch;
918 int pcreg;
919
920 gdbarch = get_regcache_arch (regcache);
921 pcreg = gdbarch_pc_regnum (gdbarch);
922 if (pcreg < 0)
923 return;
924
925 /* We can only provide the PC register. */
926 if (regno >= 0 && regno != pcreg)
927 return;
928
929 insn = btrace_insn_get (replay);
930 gdb_assert (insn != NULL);
931
932 regcache_raw_supply (regcache, regno, &insn->pc);
933 }
934 else
935 {
936 struct target_ops *t;
937
938 for (t = ops->beneath; t != NULL; t = t->beneath)
939 if (t->to_fetch_registers != NULL)
940 {
941 t->to_fetch_registers (t, regcache, regno);
942 break;
943 }
944 }
945 }
946
947 /* The to_store_registers method of target record-btrace. */
948
949 static void
950 record_btrace_store_registers (struct target_ops *ops,
951 struct regcache *regcache, int regno)
952 {
953 struct target_ops *t;
954
955 if (record_btrace_is_replaying ())
956 error (_("This record target does not allow writing registers."));
957
958 gdb_assert (may_write_registers != 0);
959
960 for (t = ops->beneath; t != NULL; t = t->beneath)
961 if (t->to_store_registers != NULL)
962 {
963 t->to_store_registers (t, regcache, regno);
964 return;
965 }
966
967 noprocess ();
968 }
969
970 /* The to_prepare_to_store method of target record-btrace. */
971
972 static void
973 record_btrace_prepare_to_store (struct target_ops *ops,
974 struct regcache *regcache)
975 {
976 struct target_ops *t;
977
978 if (record_btrace_is_replaying ())
979 return;
980
981 for (t = ops->beneath; t != NULL; t = t->beneath)
982 if (t->to_prepare_to_store != NULL)
983 {
984 t->to_prepare_to_store (t, regcache);
985 return;
986 }
987 }
988
989 /* The branch trace frame cache. */
990
991 struct btrace_frame_cache
992 {
993 /* The thread. */
994 struct thread_info *tp;
995
996 /* The frame info. */
997 struct frame_info *frame;
998
999 /* The branch trace function segment. */
1000 const struct btrace_function *bfun;
1001 };
1002
1003 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1004
1005 static htab_t bfcache;
1006
1007 /* hash_f for htab_create_alloc of bfcache. */
1008
1009 static hashval_t
1010 bfcache_hash (const void *arg)
1011 {
1012 const struct btrace_frame_cache *cache = arg;
1013
1014 return htab_hash_pointer (cache->frame);
1015 }
1016
1017 /* eq_f for htab_create_alloc of bfcache. */
1018
1019 static int
1020 bfcache_eq (const void *arg1, const void *arg2)
1021 {
1022 const struct btrace_frame_cache *cache1 = arg1;
1023 const struct btrace_frame_cache *cache2 = arg2;
1024
1025 return cache1->frame == cache2->frame;
1026 }
1027
1028 /* Create a new btrace frame cache. */
1029
1030 static struct btrace_frame_cache *
1031 bfcache_new (struct frame_info *frame)
1032 {
1033 struct btrace_frame_cache *cache;
1034 void **slot;
1035
1036 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1037 cache->frame = frame;
1038
1039 slot = htab_find_slot (bfcache, cache, INSERT);
1040 gdb_assert (*slot == NULL);
1041 *slot = cache;
1042
1043 return cache;
1044 }
1045
1046 /* Extract the branch trace function from a branch trace frame. */
1047
1048 static const struct btrace_function *
1049 btrace_get_frame_function (struct frame_info *frame)
1050 {
1051 const struct btrace_frame_cache *cache;
1052 const struct btrace_function *bfun;
1053 struct btrace_frame_cache pattern;
1054 void **slot;
1055
1056 pattern.frame = frame;
1057
1058 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1059 if (slot == NULL)
1060 return NULL;
1061
1062 cache = *slot;
1063 return cache->bfun;
1064 }
1065
1066 /* Implement stop_reason method for record_btrace_frame_unwind. */
1067
1068 static enum unwind_stop_reason
1069 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1070 void **this_cache)
1071 {
1072 const struct btrace_frame_cache *cache;
1073 const struct btrace_function *bfun;
1074
1075 cache = *this_cache;
1076 bfun = cache->bfun;
1077 gdb_assert (bfun != NULL);
1078
1079 if (bfun->up == NULL)
1080 return UNWIND_UNAVAILABLE;
1081
1082 return UNWIND_NO_REASON;
1083 }
1084
1085 /* Implement this_id method for record_btrace_frame_unwind. */
1086
1087 static void
1088 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1089 struct frame_id *this_id)
1090 {
1091 const struct btrace_frame_cache *cache;
1092 const struct btrace_function *bfun;
1093 CORE_ADDR code, special;
1094
1095 cache = *this_cache;
1096
1097 bfun = cache->bfun;
1098 gdb_assert (bfun != NULL);
1099
1100 while (bfun->segment.prev != NULL)
1101 bfun = bfun->segment.prev;
1102
1103 code = get_frame_func (this_frame);
1104 special = bfun->number;
1105
1106 *this_id = frame_id_build_unavailable_stack_special (code, special);
1107
1108 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1109 btrace_get_bfun_name (cache->bfun),
1110 core_addr_to_string_nz (this_id->code_addr),
1111 core_addr_to_string_nz (this_id->special_addr));
1112 }
1113
1114 /* Implement prev_register method for record_btrace_frame_unwind. */
1115
1116 static struct value *
1117 record_btrace_frame_prev_register (struct frame_info *this_frame,
1118 void **this_cache,
1119 int regnum)
1120 {
1121 const struct btrace_frame_cache *cache;
1122 const struct btrace_function *bfun, *caller;
1123 const struct btrace_insn *insn;
1124 struct gdbarch *gdbarch;
1125 CORE_ADDR pc;
1126 int pcreg;
1127
1128 gdbarch = get_frame_arch (this_frame);
1129 pcreg = gdbarch_pc_regnum (gdbarch);
1130 if (pcreg < 0 || regnum != pcreg)
1131 throw_error (NOT_AVAILABLE_ERROR,
1132 _("Registers are not available in btrace record history"));
1133
1134 cache = *this_cache;
1135 bfun = cache->bfun;
1136 gdb_assert (bfun != NULL);
1137
1138 caller = bfun->up;
1139 if (caller == NULL)
1140 throw_error (NOT_AVAILABLE_ERROR,
1141 _("No caller in btrace record history"));
1142
1143 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1144 {
1145 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1146 pc = insn->pc;
1147 }
1148 else
1149 {
1150 insn = VEC_last (btrace_insn_s, caller->insn);
1151 pc = insn->pc;
1152
1153 pc += gdb_insn_length (gdbarch, pc);
1154 }
1155
1156 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1157 btrace_get_bfun_name (bfun), bfun->level,
1158 core_addr_to_string_nz (pc));
1159
1160 return frame_unwind_got_address (this_frame, regnum, pc);
1161 }
1162
1163 /* Implement sniffer method for record_btrace_frame_unwind. */
1164
1165 static int
1166 record_btrace_frame_sniffer (const struct frame_unwind *self,
1167 struct frame_info *this_frame,
1168 void **this_cache)
1169 {
1170 const struct btrace_function *bfun;
1171 struct btrace_frame_cache *cache;
1172 struct thread_info *tp;
1173 struct frame_info *next;
1174
1175 /* THIS_FRAME does not contain a reference to its thread. */
1176 tp = find_thread_ptid (inferior_ptid);
1177 gdb_assert (tp != NULL);
1178
1179 bfun = NULL;
1180 next = get_next_frame (this_frame);
1181 if (next == NULL)
1182 {
1183 const struct btrace_insn_iterator *replay;
1184
1185 replay = tp->btrace.replay;
1186 if (replay != NULL)
1187 bfun = replay->function;
1188 }
1189 else
1190 {
1191 const struct btrace_function *callee;
1192
1193 callee = btrace_get_frame_function (next);
1194 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1195 bfun = callee->up;
1196 }
1197
1198 if (bfun == NULL)
1199 return 0;
1200
1201 DEBUG ("[frame] sniffed frame for %s on level %d",
1202 btrace_get_bfun_name (bfun), bfun->level);
1203
1204 /* This is our frame. Initialize the frame cache. */
1205 cache = bfcache_new (this_frame);
1206 cache->tp = tp;
1207 cache->bfun = bfun;
1208
1209 *this_cache = cache;
1210 return 1;
1211 }
1212
1213 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1214
1215 static int
1216 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1217 struct frame_info *this_frame,
1218 void **this_cache)
1219 {
1220 const struct btrace_function *bfun, *callee;
1221 struct btrace_frame_cache *cache;
1222 struct frame_info *next;
1223
1224 next = get_next_frame (this_frame);
1225 if (next == NULL)
1226 return 0;
1227
1228 callee = btrace_get_frame_function (next);
1229 if (callee == NULL)
1230 return 0;
1231
1232 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1233 return 0;
1234
1235 bfun = callee->up;
1236 if (bfun == NULL)
1237 return 0;
1238
1239 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1240 btrace_get_bfun_name (bfun), bfun->level);
1241
1242 /* This is our frame. Initialize the frame cache. */
1243 cache = bfcache_new (this_frame);
1244 cache->tp = find_thread_ptid (inferior_ptid);
1245 cache->bfun = bfun;
1246
1247 *this_cache = cache;
1248 return 1;
1249 }
1250
1251 static void
1252 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1253 {
1254 struct btrace_frame_cache *cache;
1255 void **slot;
1256
1257 cache = this_cache;
1258
1259 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1260 gdb_assert (slot != NULL);
1261
1262 htab_remove_elt (bfcache, cache);
1263 }
1264
1265 /* btrace recording does not store previous memory content, neither the stack
1266 frames content. Any unwinding would return errorneous results as the stack
1267 contents no longer matches the changed PC value restored from history.
1268 Therefore this unwinder reports any possibly unwound registers as
1269 <unavailable>. */
1270
1271 const struct frame_unwind record_btrace_frame_unwind =
1272 {
1273 NORMAL_FRAME,
1274 record_btrace_frame_unwind_stop_reason,
1275 record_btrace_frame_this_id,
1276 record_btrace_frame_prev_register,
1277 NULL,
1278 record_btrace_frame_sniffer,
1279 record_btrace_frame_dealloc_cache
1280 };
1281
1282 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1283 {
1284 TAILCALL_FRAME,
1285 record_btrace_frame_unwind_stop_reason,
1286 record_btrace_frame_this_id,
1287 record_btrace_frame_prev_register,
1288 NULL,
1289 record_btrace_tailcall_frame_sniffer,
1290 record_btrace_frame_dealloc_cache
1291 };
1292
1293 /* The to_resume method of target record-btrace. */
1294
1295 static void
1296 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1297 enum gdb_signal signal)
1298 {
1299 /* As long as we're not replaying, just forward the request. */
1300 if (!record_btrace_is_replaying ())
1301 {
1302 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1303 if (ops->to_resume != NULL)
1304 return ops->to_resume (ops, ptid, step, signal);
1305
1306 error (_("Cannot find target for stepping."));
1307 }
1308
1309 error (_("You can't do this from here. Do 'record goto end', first."));
1310 }
1311
1312 /* The to_wait method of target record-btrace. */
1313
1314 static ptid_t
1315 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1316 struct target_waitstatus *status, int options)
1317 {
1318 /* As long as we're not replaying, just forward the request. */
1319 if (!record_btrace_is_replaying ())
1320 {
1321 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1322 if (ops->to_wait != NULL)
1323 return ops->to_wait (ops, ptid, status, options);
1324
1325 error (_("Cannot find target for waiting."));
1326 }
1327
1328 error (_("You can't do this from here. Do 'record goto end', first."));
1329 }
1330
1331 /* The to_find_new_threads method of target record-btrace. */
1332
1333 static void
1334 record_btrace_find_new_threads (struct target_ops *ops)
1335 {
1336 /* Don't expect new threads if we're replaying. */
1337 if (record_btrace_is_replaying ())
1338 return;
1339
1340 /* Forward the request. */
1341 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1342 if (ops->to_find_new_threads != NULL)
1343 {
1344 ops->to_find_new_threads (ops);
1345 break;
1346 }
1347 }
1348
1349 /* The to_thread_alive method of target record-btrace. */
1350
1351 static int
1352 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1353 {
1354 /* We don't add or remove threads during replay. */
1355 if (record_btrace_is_replaying ())
1356 return find_thread_ptid (ptid) != NULL;
1357
1358 /* Forward the request. */
1359 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1360 if (ops->to_thread_alive != NULL)
1361 return ops->to_thread_alive (ops, ptid);
1362
1363 return 0;
1364 }
1365
1366 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
1367 is stopped. */
1368
1369 static void
1370 record_btrace_set_replay (struct thread_info *tp,
1371 const struct btrace_insn_iterator *it)
1372 {
1373 struct btrace_thread_info *btinfo;
1374
1375 btinfo = &tp->btrace;
1376
1377 if (it == NULL || it->function == NULL)
1378 {
1379 if (btinfo->replay == NULL)
1380 return;
1381
1382 xfree (btinfo->replay);
1383 btinfo->replay = NULL;
1384 }
1385 else
1386 {
1387 if (btinfo->replay == NULL)
1388 btinfo->replay = xmalloc (sizeof (*btinfo->replay));
1389 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
1390 return;
1391
1392 *btinfo->replay = *it;
1393 }
1394
1395 /* Clear the function call and instruction histories so we start anew
1396 from the new replay position. */
1397 xfree (btinfo->insn_history);
1398 xfree (btinfo->call_history);
1399
1400 btinfo->insn_history = NULL;
1401 btinfo->call_history = NULL;
1402
1403 registers_changed_ptid (tp->ptid);
1404 }
1405
1406 /* The to_goto_record_begin method of target record-btrace. */
1407
1408 static void
1409 record_btrace_goto_begin (void)
1410 {
1411 struct thread_info *tp;
1412 struct btrace_insn_iterator begin;
1413
1414 tp = require_btrace_thread ();
1415
1416 btrace_insn_begin (&begin, &tp->btrace);
1417 record_btrace_set_replay (tp, &begin);
1418
1419 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1420 }
1421
1422 /* The to_goto_record_end method of target record-btrace. */
1423
1424 static void
1425 record_btrace_goto_end (void)
1426 {
1427 struct thread_info *tp;
1428
1429 tp = require_btrace_thread ();
1430
1431 record_btrace_set_replay (tp, NULL);
1432
1433 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1434 }
1435
1436 /* The to_goto_record method of target record-btrace. */
1437
1438 static void
1439 record_btrace_goto (ULONGEST insn)
1440 {
1441 struct thread_info *tp;
1442 struct btrace_insn_iterator it;
1443 unsigned int number;
1444 int found;
1445
1446 number = insn;
1447
1448 /* Check for wrap-arounds. */
1449 if (number != insn)
1450 error (_("Instruction number out of range."));
1451
1452 tp = require_btrace_thread ();
1453
1454 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
1455 if (found == 0)
1456 error (_("No such instruction."));
1457
1458 record_btrace_set_replay (tp, &it);
1459
1460 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1461 }
1462
1463 /* Initialize the record-btrace target ops. */
1464
1465 static void
1466 init_record_btrace_ops (void)
1467 {
1468 struct target_ops *ops;
1469
1470 ops = &record_btrace_ops;
1471 ops->to_shortname = "record-btrace";
1472 ops->to_longname = "Branch tracing target";
1473 ops->to_doc = "Collect control-flow trace and provide the execution history.";
1474 ops->to_open = record_btrace_open;
1475 ops->to_close = record_btrace_close;
1476 ops->to_detach = record_detach;
1477 ops->to_disconnect = record_disconnect;
1478 ops->to_mourn_inferior = record_mourn_inferior;
1479 ops->to_kill = record_kill;
1480 ops->to_create_inferior = find_default_create_inferior;
1481 ops->to_stop_recording = record_btrace_stop_recording;
1482 ops->to_info_record = record_btrace_info;
1483 ops->to_insn_history = record_btrace_insn_history;
1484 ops->to_insn_history_from = record_btrace_insn_history_from;
1485 ops->to_insn_history_range = record_btrace_insn_history_range;
1486 ops->to_call_history = record_btrace_call_history;
1487 ops->to_call_history_from = record_btrace_call_history_from;
1488 ops->to_call_history_range = record_btrace_call_history_range;
1489 ops->to_record_is_replaying = record_btrace_is_replaying;
1490 ops->to_xfer_partial = record_btrace_xfer_partial;
1491 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
1492 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1493 ops->to_fetch_registers = record_btrace_fetch_registers;
1494 ops->to_store_registers = record_btrace_store_registers;
1495 ops->to_prepare_to_store = record_btrace_prepare_to_store;
1496 ops->to_get_unwinder = &record_btrace_frame_unwind;
1497 ops->to_get_tailcall_unwinder = &record_btrace_tailcall_frame_unwind;
1498 ops->to_resume = record_btrace_resume;
1499 ops->to_wait = record_btrace_wait;
1500 ops->to_find_new_threads = record_btrace_find_new_threads;
1501 ops->to_thread_alive = record_btrace_thread_alive;
1502 ops->to_goto_record_begin = record_btrace_goto_begin;
1503 ops->to_goto_record_end = record_btrace_goto_end;
1504 ops->to_goto_record = record_btrace_goto;
1505 ops->to_stratum = record_stratum;
1506 ops->to_magic = OPS_MAGIC;
1507 }
1508
1509 /* Alias for "target record". */
1510
1511 static void
1512 cmd_record_btrace_start (char *args, int from_tty)
1513 {
1514 if (args != NULL && *args != 0)
1515 error (_("Invalid argument."));
1516
1517 execute_command ("target record-btrace", from_tty);
1518 }
1519
1520 void _initialize_record_btrace (void);
1521
1522 /* Initialize btrace commands. */
1523
1524 void
1525 _initialize_record_btrace (void)
1526 {
1527 add_cmd ("btrace", class_obscure, cmd_record_btrace_start,
1528 _("Start branch trace recording."),
1529 &record_cmdlist);
1530 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
1531
1532 init_record_btrace_ops ();
1533 add_target (&record_btrace_ops);
1534
1535 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
1536 xcalloc, xfree);
1537 }
This page took 0.060873 seconds and 4 git commands to generate.