Rename TARGET_XFER_E_UNAVAILABLE to TARGET_XFER_UNAVAILABLE
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "gdbthread.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "disasm.h"
28 #include "observer.h"
29 #include "exceptions.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38
39 /* The target_ops of record-btrace. */
40 static struct target_ops record_btrace_ops;
41
42 /* A new thread observer enabling branch tracing for the new thread. */
43 static struct observer *record_btrace_thread_observer;
44
45 /* Temporarily allow memory accesses. */
46 static int record_btrace_allow_memory_access;
47
48 /* Print a record-btrace debug message. Use do ... while (0) to avoid
49 ambiguities when used in if statements. */
50
51 #define DEBUG(msg, args...) \
52 do \
53 { \
54 if (record_debug != 0) \
55 fprintf_unfiltered (gdb_stdlog, \
56 "[record-btrace] " msg "\n", ##args); \
57 } \
58 while (0)
59
60
61 /* Update the branch trace for the current thread and return a pointer to its
62 thread_info.
63
64 Throws an error if there is no thread or no trace. This function never
65 returns NULL. */
66
67 static struct thread_info *
68 require_btrace_thread (void)
69 {
70 struct thread_info *tp;
71
72 DEBUG ("require");
73
74 tp = find_thread_ptid (inferior_ptid);
75 if (tp == NULL)
76 error (_("No thread."));
77
78 btrace_fetch (tp);
79
80 if (btrace_is_empty (tp))
81 error (_("No trace."));
82
83 return tp;
84 }
85
86 /* Update the branch trace for the current thread and return a pointer to its
87 branch trace information struct.
88
89 Throws an error if there is no thread or no trace. This function never
90 returns NULL. */
91
92 static struct btrace_thread_info *
93 require_btrace (void)
94 {
95 struct thread_info *tp;
96
97 tp = require_btrace_thread ();
98
99 return &tp->btrace;
100 }
101
102 /* Enable branch tracing for one thread. Warn on errors. */
103
104 static void
105 record_btrace_enable_warn (struct thread_info *tp)
106 {
107 volatile struct gdb_exception error;
108
109 TRY_CATCH (error, RETURN_MASK_ERROR)
110 btrace_enable (tp);
111
112 if (error.message != NULL)
113 warning ("%s", error.message);
114 }
115
116 /* Callback function to disable branch tracing for one thread. */
117
118 static void
119 record_btrace_disable_callback (void *arg)
120 {
121 struct thread_info *tp;
122
123 tp = arg;
124
125 btrace_disable (tp);
126 }
127
128 /* Enable automatic tracing of new threads. */
129
130 static void
131 record_btrace_auto_enable (void)
132 {
133 DEBUG ("attach thread observer");
134
135 record_btrace_thread_observer
136 = observer_attach_new_thread (record_btrace_enable_warn);
137 }
138
139 /* Disable automatic tracing of new threads. */
140
141 static void
142 record_btrace_auto_disable (void)
143 {
144 /* The observer may have been detached, already. */
145 if (record_btrace_thread_observer == NULL)
146 return;
147
148 DEBUG ("detach thread observer");
149
150 observer_detach_new_thread (record_btrace_thread_observer);
151 record_btrace_thread_observer = NULL;
152 }
153
154 /* The to_open method of target record-btrace. */
155
156 static void
157 record_btrace_open (char *args, int from_tty)
158 {
159 struct cleanup *disable_chain;
160 struct thread_info *tp;
161
162 DEBUG ("open");
163
164 record_preopen ();
165
166 if (!target_has_execution)
167 error (_("The program is not being run."));
168
169 if (!target_supports_btrace ())
170 error (_("Target does not support branch tracing."));
171
172 if (non_stop)
173 error (_("Record btrace can't debug inferior in non-stop mode."));
174
175 gdb_assert (record_btrace_thread_observer == NULL);
176
177 disable_chain = make_cleanup (null_cleanup, NULL);
178 ALL_THREADS (tp)
179 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
180 {
181 btrace_enable (tp);
182
183 make_cleanup (record_btrace_disable_callback, tp);
184 }
185
186 record_btrace_auto_enable ();
187
188 push_target (&record_btrace_ops);
189
190 observer_notify_record_changed (current_inferior (), 1);
191
192 discard_cleanups (disable_chain);
193 }
194
195 /* The to_stop_recording method of target record-btrace. */
196
197 static void
198 record_btrace_stop_recording (struct target_ops *self)
199 {
200 struct thread_info *tp;
201
202 DEBUG ("stop recording");
203
204 record_btrace_auto_disable ();
205
206 ALL_THREADS (tp)
207 if (tp->btrace.target != NULL)
208 btrace_disable (tp);
209 }
210
211 /* The to_close method of target record-btrace. */
212
213 static void
214 record_btrace_close (struct target_ops *self)
215 {
216 struct thread_info *tp;
217
218 /* Make sure automatic recording gets disabled even if we did not stop
219 recording before closing the record-btrace target. */
220 record_btrace_auto_disable ();
221
222 /* We should have already stopped recording.
223 Tear down btrace in case we have not. */
224 ALL_THREADS (tp)
225 btrace_teardown (tp);
226 }
227
228 /* The to_info_record method of target record-btrace. */
229
230 static void
231 record_btrace_info (struct target_ops *self)
232 {
233 struct btrace_thread_info *btinfo;
234 struct thread_info *tp;
235 unsigned int insns, calls;
236
237 DEBUG ("info");
238
239 tp = find_thread_ptid (inferior_ptid);
240 if (tp == NULL)
241 error (_("No thread."));
242
243 btrace_fetch (tp);
244
245 insns = 0;
246 calls = 0;
247
248 btinfo = &tp->btrace;
249
250 if (!btrace_is_empty (tp))
251 {
252 struct btrace_call_iterator call;
253 struct btrace_insn_iterator insn;
254
255 btrace_call_end (&call, btinfo);
256 btrace_call_prev (&call, 1);
257 calls = btrace_call_number (&call);
258
259 btrace_insn_end (&insn, btinfo);
260 btrace_insn_prev (&insn, 1);
261 insns = btrace_insn_number (&insn);
262 }
263
264 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
265 "%d (%s).\n"), insns, calls, tp->num,
266 target_pid_to_str (tp->ptid));
267
268 if (btrace_is_replaying (tp))
269 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
270 btrace_insn_number (btinfo->replay));
271 }
272
273 /* Print an unsigned int. */
274
275 static void
276 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
277 {
278 ui_out_field_fmt (uiout, fld, "%u", val);
279 }
280
281 /* Disassemble a section of the recorded instruction trace. */
282
283 static void
284 btrace_insn_history (struct ui_out *uiout,
285 const struct btrace_insn_iterator *begin,
286 const struct btrace_insn_iterator *end, int flags)
287 {
288 struct gdbarch *gdbarch;
289 struct btrace_insn_iterator it;
290
291 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
292 btrace_insn_number (end));
293
294 gdbarch = target_gdbarch ();
295
296 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
297 {
298 const struct btrace_insn *insn;
299
300 insn = btrace_insn_get (&it);
301
302 /* Print the instruction index. */
303 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
304 ui_out_text (uiout, "\t");
305
306 /* Disassembly with '/m' flag may not produce the expected result.
307 See PR gdb/11833. */
308 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
309 }
310 }
311
312 /* The to_insn_history method of target record-btrace. */
313
314 static void
315 record_btrace_insn_history (struct target_ops *self, int size, int flags)
316 {
317 struct btrace_thread_info *btinfo;
318 struct btrace_insn_history *history;
319 struct btrace_insn_iterator begin, end;
320 struct cleanup *uiout_cleanup;
321 struct ui_out *uiout;
322 unsigned int context, covered;
323
324 uiout = current_uiout;
325 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
326 "insn history");
327 context = abs (size);
328 if (context == 0)
329 error (_("Bad record instruction-history-size."));
330
331 btinfo = require_btrace ();
332 history = btinfo->insn_history;
333 if (history == NULL)
334 {
335 struct btrace_insn_iterator *replay;
336
337 DEBUG ("insn-history (0x%x): %d", flags, size);
338
339 /* If we're replaying, we start at the replay position. Otherwise, we
340 start at the tail of the trace. */
341 replay = btinfo->replay;
342 if (replay != NULL)
343 begin = *replay;
344 else
345 btrace_insn_end (&begin, btinfo);
346
347 /* We start from here and expand in the requested direction. Then we
348 expand in the other direction, as well, to fill up any remaining
349 context. */
350 end = begin;
351 if (size < 0)
352 {
353 /* We want the current position covered, as well. */
354 covered = btrace_insn_next (&end, 1);
355 covered += btrace_insn_prev (&begin, context - covered);
356 covered += btrace_insn_next (&end, context - covered);
357 }
358 else
359 {
360 covered = btrace_insn_next (&end, context);
361 covered += btrace_insn_prev (&begin, context - covered);
362 }
363 }
364 else
365 {
366 begin = history->begin;
367 end = history->end;
368
369 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
370 btrace_insn_number (&begin), btrace_insn_number (&end));
371
372 if (size < 0)
373 {
374 end = begin;
375 covered = btrace_insn_prev (&begin, context);
376 }
377 else
378 {
379 begin = end;
380 covered = btrace_insn_next (&end, context);
381 }
382 }
383
384 if (covered > 0)
385 btrace_insn_history (uiout, &begin, &end, flags);
386 else
387 {
388 if (size < 0)
389 printf_unfiltered (_("At the start of the branch trace record.\n"));
390 else
391 printf_unfiltered (_("At the end of the branch trace record.\n"));
392 }
393
394 btrace_set_insn_history (btinfo, &begin, &end);
395 do_cleanups (uiout_cleanup);
396 }
397
398 /* The to_insn_history_range method of target record-btrace. */
399
400 static void
401 record_btrace_insn_history_range (struct target_ops *self,
402 ULONGEST from, ULONGEST to, int flags)
403 {
404 struct btrace_thread_info *btinfo;
405 struct btrace_insn_history *history;
406 struct btrace_insn_iterator begin, end;
407 struct cleanup *uiout_cleanup;
408 struct ui_out *uiout;
409 unsigned int low, high;
410 int found;
411
412 uiout = current_uiout;
413 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
414 "insn history");
415 low = from;
416 high = to;
417
418 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
419
420 /* Check for wrap-arounds. */
421 if (low != from || high != to)
422 error (_("Bad range."));
423
424 if (high < low)
425 error (_("Bad range."));
426
427 btinfo = require_btrace ();
428
429 found = btrace_find_insn_by_number (&begin, btinfo, low);
430 if (found == 0)
431 error (_("Range out of bounds."));
432
433 found = btrace_find_insn_by_number (&end, btinfo, high);
434 if (found == 0)
435 {
436 /* Silently truncate the range. */
437 btrace_insn_end (&end, btinfo);
438 }
439 else
440 {
441 /* We want both begin and end to be inclusive. */
442 btrace_insn_next (&end, 1);
443 }
444
445 btrace_insn_history (uiout, &begin, &end, flags);
446 btrace_set_insn_history (btinfo, &begin, &end);
447
448 do_cleanups (uiout_cleanup);
449 }
450
451 /* The to_insn_history_from method of target record-btrace. */
452
453 static void
454 record_btrace_insn_history_from (struct target_ops *self,
455 ULONGEST from, int size, int flags)
456 {
457 ULONGEST begin, end, context;
458
459 context = abs (size);
460 if (context == 0)
461 error (_("Bad record instruction-history-size."));
462
463 if (size < 0)
464 {
465 end = from;
466
467 if (from < context)
468 begin = 0;
469 else
470 begin = from - context + 1;
471 }
472 else
473 {
474 begin = from;
475 end = from + context - 1;
476
477 /* Check for wrap-around. */
478 if (end < begin)
479 end = ULONGEST_MAX;
480 }
481
482 record_btrace_insn_history_range (self, begin, end, flags);
483 }
484
485 /* Print the instruction number range for a function call history line. */
486
487 static void
488 btrace_call_history_insn_range (struct ui_out *uiout,
489 const struct btrace_function *bfun)
490 {
491 unsigned int begin, end, size;
492
493 size = VEC_length (btrace_insn_s, bfun->insn);
494 gdb_assert (size > 0);
495
496 begin = bfun->insn_offset;
497 end = begin + size - 1;
498
499 ui_out_field_uint (uiout, "insn begin", begin);
500 ui_out_text (uiout, ",");
501 ui_out_field_uint (uiout, "insn end", end);
502 }
503
504 /* Print the source line information for a function call history line. */
505
506 static void
507 btrace_call_history_src_line (struct ui_out *uiout,
508 const struct btrace_function *bfun)
509 {
510 struct symbol *sym;
511 int begin, end;
512
513 sym = bfun->sym;
514 if (sym == NULL)
515 return;
516
517 ui_out_field_string (uiout, "file",
518 symtab_to_filename_for_display (sym->symtab));
519
520 begin = bfun->lbegin;
521 end = bfun->lend;
522
523 if (end < begin)
524 return;
525
526 ui_out_text (uiout, ":");
527 ui_out_field_int (uiout, "min line", begin);
528
529 if (end == begin)
530 return;
531
532 ui_out_text (uiout, ",");
533 ui_out_field_int (uiout, "max line", end);
534 }
535
536 /* Get the name of a branch trace function. */
537
538 static const char *
539 btrace_get_bfun_name (const struct btrace_function *bfun)
540 {
541 struct minimal_symbol *msym;
542 struct symbol *sym;
543
544 if (bfun == NULL)
545 return "??";
546
547 msym = bfun->msym;
548 sym = bfun->sym;
549
550 if (sym != NULL)
551 return SYMBOL_PRINT_NAME (sym);
552 else if (msym != NULL)
553 return SYMBOL_PRINT_NAME (msym);
554 else
555 return "??";
556 }
557
558 /* Disassemble a section of the recorded function trace. */
559
560 static void
561 btrace_call_history (struct ui_out *uiout,
562 const struct btrace_thread_info *btinfo,
563 const struct btrace_call_iterator *begin,
564 const struct btrace_call_iterator *end,
565 enum record_print_flag flags)
566 {
567 struct btrace_call_iterator it;
568
569 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
570 btrace_call_number (end));
571
572 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
573 {
574 const struct btrace_function *bfun;
575 struct minimal_symbol *msym;
576 struct symbol *sym;
577
578 bfun = btrace_call_get (&it);
579 sym = bfun->sym;
580 msym = bfun->msym;
581
582 /* Print the function index. */
583 ui_out_field_uint (uiout, "index", bfun->number);
584 ui_out_text (uiout, "\t");
585
586 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
587 {
588 int level = bfun->level + btinfo->level, i;
589
590 for (i = 0; i < level; ++i)
591 ui_out_text (uiout, " ");
592 }
593
594 if (sym != NULL)
595 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
596 else if (msym != NULL)
597 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (msym));
598 else if (!ui_out_is_mi_like_p (uiout))
599 ui_out_field_string (uiout, "function", "??");
600
601 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
602 {
603 ui_out_text (uiout, _("\tinst "));
604 btrace_call_history_insn_range (uiout, bfun);
605 }
606
607 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
608 {
609 ui_out_text (uiout, _("\tat "));
610 btrace_call_history_src_line (uiout, bfun);
611 }
612
613 ui_out_text (uiout, "\n");
614 }
615 }
616
617 /* The to_call_history method of target record-btrace. */
618
619 static void
620 record_btrace_call_history (struct target_ops *self, int size, int flags)
621 {
622 struct btrace_thread_info *btinfo;
623 struct btrace_call_history *history;
624 struct btrace_call_iterator begin, end;
625 struct cleanup *uiout_cleanup;
626 struct ui_out *uiout;
627 unsigned int context, covered;
628
629 uiout = current_uiout;
630 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
631 "insn history");
632 context = abs (size);
633 if (context == 0)
634 error (_("Bad record function-call-history-size."));
635
636 btinfo = require_btrace ();
637 history = btinfo->call_history;
638 if (history == NULL)
639 {
640 struct btrace_insn_iterator *replay;
641
642 DEBUG ("call-history (0x%x): %d", flags, size);
643
644 /* If we're replaying, we start at the replay position. Otherwise, we
645 start at the tail of the trace. */
646 replay = btinfo->replay;
647 if (replay != NULL)
648 {
649 begin.function = replay->function;
650 begin.btinfo = btinfo;
651 }
652 else
653 btrace_call_end (&begin, btinfo);
654
655 /* We start from here and expand in the requested direction. Then we
656 expand in the other direction, as well, to fill up any remaining
657 context. */
658 end = begin;
659 if (size < 0)
660 {
661 /* We want the current position covered, as well. */
662 covered = btrace_call_next (&end, 1);
663 covered += btrace_call_prev (&begin, context - covered);
664 covered += btrace_call_next (&end, context - covered);
665 }
666 else
667 {
668 covered = btrace_call_next (&end, context);
669 covered += btrace_call_prev (&begin, context- covered);
670 }
671 }
672 else
673 {
674 begin = history->begin;
675 end = history->end;
676
677 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
678 btrace_call_number (&begin), btrace_call_number (&end));
679
680 if (size < 0)
681 {
682 end = begin;
683 covered = btrace_call_prev (&begin, context);
684 }
685 else
686 {
687 begin = end;
688 covered = btrace_call_next (&end, context);
689 }
690 }
691
692 if (covered > 0)
693 btrace_call_history (uiout, btinfo, &begin, &end, flags);
694 else
695 {
696 if (size < 0)
697 printf_unfiltered (_("At the start of the branch trace record.\n"));
698 else
699 printf_unfiltered (_("At the end of the branch trace record.\n"));
700 }
701
702 btrace_set_call_history (btinfo, &begin, &end);
703 do_cleanups (uiout_cleanup);
704 }
705
706 /* The to_call_history_range method of target record-btrace. */
707
708 static void
709 record_btrace_call_history_range (struct target_ops *self,
710 ULONGEST from, ULONGEST to, int flags)
711 {
712 struct btrace_thread_info *btinfo;
713 struct btrace_call_history *history;
714 struct btrace_call_iterator begin, end;
715 struct cleanup *uiout_cleanup;
716 struct ui_out *uiout;
717 unsigned int low, high;
718 int found;
719
720 uiout = current_uiout;
721 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
722 "func history");
723 low = from;
724 high = to;
725
726 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
727
728 /* Check for wrap-arounds. */
729 if (low != from || high != to)
730 error (_("Bad range."));
731
732 if (high < low)
733 error (_("Bad range."));
734
735 btinfo = require_btrace ();
736
737 found = btrace_find_call_by_number (&begin, btinfo, low);
738 if (found == 0)
739 error (_("Range out of bounds."));
740
741 found = btrace_find_call_by_number (&end, btinfo, high);
742 if (found == 0)
743 {
744 /* Silently truncate the range. */
745 btrace_call_end (&end, btinfo);
746 }
747 else
748 {
749 /* We want both begin and end to be inclusive. */
750 btrace_call_next (&end, 1);
751 }
752
753 btrace_call_history (uiout, btinfo, &begin, &end, flags);
754 btrace_set_call_history (btinfo, &begin, &end);
755
756 do_cleanups (uiout_cleanup);
757 }
758
759 /* The to_call_history_from method of target record-btrace. */
760
761 static void
762 record_btrace_call_history_from (struct target_ops *self,
763 ULONGEST from, int size, int flags)
764 {
765 ULONGEST begin, end, context;
766
767 context = abs (size);
768 if (context == 0)
769 error (_("Bad record function-call-history-size."));
770
771 if (size < 0)
772 {
773 end = from;
774
775 if (from < context)
776 begin = 0;
777 else
778 begin = from - context + 1;
779 }
780 else
781 {
782 begin = from;
783 end = from + context - 1;
784
785 /* Check for wrap-around. */
786 if (end < begin)
787 end = ULONGEST_MAX;
788 }
789
790 record_btrace_call_history_range (self, begin, end, flags);
791 }
792
793 /* The to_record_is_replaying method of target record-btrace. */
794
795 static int
796 record_btrace_is_replaying (struct target_ops *self)
797 {
798 struct thread_info *tp;
799
800 ALL_THREADS (tp)
801 if (btrace_is_replaying (tp))
802 return 1;
803
804 return 0;
805 }
806
807 /* The to_xfer_partial method of target record-btrace. */
808
809 static enum target_xfer_status
810 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
811 const char *annex, gdb_byte *readbuf,
812 const gdb_byte *writebuf, ULONGEST offset,
813 ULONGEST len, ULONGEST *xfered_len)
814 {
815 struct target_ops *t;
816
817 /* Filter out requests that don't make sense during replay. */
818 if (!record_btrace_allow_memory_access && record_btrace_is_replaying (ops))
819 {
820 switch (object)
821 {
822 case TARGET_OBJECT_MEMORY:
823 {
824 struct target_section *section;
825
826 /* We do not allow writing memory in general. */
827 if (writebuf != NULL)
828 {
829 *xfered_len = len;
830 return TARGET_XFER_UNAVAILABLE;
831 }
832
833 /* We allow reading readonly memory. */
834 section = target_section_by_addr (ops, offset);
835 if (section != NULL)
836 {
837 /* Check if the section we found is readonly. */
838 if ((bfd_get_section_flags (section->the_bfd_section->owner,
839 section->the_bfd_section)
840 & SEC_READONLY) != 0)
841 {
842 /* Truncate the request to fit into this section. */
843 len = min (len, section->endaddr - offset);
844 break;
845 }
846 }
847
848 *xfered_len = len;
849 return TARGET_XFER_UNAVAILABLE;
850 }
851 }
852 }
853
854 /* Forward the request. */
855 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
856 if (ops->to_xfer_partial != NULL)
857 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
858 offset, len, xfered_len);
859
860 *xfered_len = len;
861 return TARGET_XFER_UNAVAILABLE;
862 }
863
864 /* The to_insert_breakpoint method of target record-btrace. */
865
866 static int
867 record_btrace_insert_breakpoint (struct target_ops *ops,
868 struct gdbarch *gdbarch,
869 struct bp_target_info *bp_tgt)
870 {
871 volatile struct gdb_exception except;
872 int old, ret;
873
874 /* Inserting breakpoints requires accessing memory. Allow it for the
875 duration of this function. */
876 old = record_btrace_allow_memory_access;
877 record_btrace_allow_memory_access = 1;
878
879 ret = 0;
880 TRY_CATCH (except, RETURN_MASK_ALL)
881 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
882
883 record_btrace_allow_memory_access = old;
884
885 if (except.reason < 0)
886 throw_exception (except);
887
888 return ret;
889 }
890
891 /* The to_remove_breakpoint method of target record-btrace. */
892
893 static int
894 record_btrace_remove_breakpoint (struct target_ops *ops,
895 struct gdbarch *gdbarch,
896 struct bp_target_info *bp_tgt)
897 {
898 volatile struct gdb_exception except;
899 int old, ret;
900
901 /* Removing breakpoints requires accessing memory. Allow it for the
902 duration of this function. */
903 old = record_btrace_allow_memory_access;
904 record_btrace_allow_memory_access = 1;
905
906 ret = 0;
907 TRY_CATCH (except, RETURN_MASK_ALL)
908 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
909
910 record_btrace_allow_memory_access = old;
911
912 if (except.reason < 0)
913 throw_exception (except);
914
915 return ret;
916 }
917
918 /* The to_fetch_registers method of target record-btrace. */
919
920 static void
921 record_btrace_fetch_registers (struct target_ops *ops,
922 struct regcache *regcache, int regno)
923 {
924 struct btrace_insn_iterator *replay;
925 struct thread_info *tp;
926
927 tp = find_thread_ptid (inferior_ptid);
928 gdb_assert (tp != NULL);
929
930 replay = tp->btrace.replay;
931 if (replay != NULL)
932 {
933 const struct btrace_insn *insn;
934 struct gdbarch *gdbarch;
935 int pcreg;
936
937 gdbarch = get_regcache_arch (regcache);
938 pcreg = gdbarch_pc_regnum (gdbarch);
939 if (pcreg < 0)
940 return;
941
942 /* We can only provide the PC register. */
943 if (regno >= 0 && regno != pcreg)
944 return;
945
946 insn = btrace_insn_get (replay);
947 gdb_assert (insn != NULL);
948
949 regcache_raw_supply (regcache, regno, &insn->pc);
950 }
951 else
952 {
953 struct target_ops *t;
954
955 for (t = ops->beneath; t != NULL; t = t->beneath)
956 if (t->to_fetch_registers != NULL)
957 {
958 t->to_fetch_registers (t, regcache, regno);
959 break;
960 }
961 }
962 }
963
964 /* The to_store_registers method of target record-btrace. */
965
966 static void
967 record_btrace_store_registers (struct target_ops *ops,
968 struct regcache *regcache, int regno)
969 {
970 struct target_ops *t;
971
972 if (record_btrace_is_replaying (ops))
973 error (_("This record target does not allow writing registers."));
974
975 gdb_assert (may_write_registers != 0);
976
977 for (t = ops->beneath; t != NULL; t = t->beneath)
978 if (t->to_store_registers != NULL)
979 {
980 t->to_store_registers (t, regcache, regno);
981 return;
982 }
983
984 noprocess ();
985 }
986
987 /* The to_prepare_to_store method of target record-btrace. */
988
989 static void
990 record_btrace_prepare_to_store (struct target_ops *ops,
991 struct regcache *regcache)
992 {
993 struct target_ops *t;
994
995 if (record_btrace_is_replaying (ops))
996 return;
997
998 for (t = ops->beneath; t != NULL; t = t->beneath)
999 if (t->to_prepare_to_store != NULL)
1000 {
1001 t->to_prepare_to_store (t, regcache);
1002 return;
1003 }
1004 }
1005
1006 /* The branch trace frame cache. */
1007
1008 struct btrace_frame_cache
1009 {
1010 /* The thread. */
1011 struct thread_info *tp;
1012
1013 /* The frame info. */
1014 struct frame_info *frame;
1015
1016 /* The branch trace function segment. */
1017 const struct btrace_function *bfun;
1018 };
1019
1020 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1021
1022 static htab_t bfcache;
1023
1024 /* hash_f for htab_create_alloc of bfcache. */
1025
1026 static hashval_t
1027 bfcache_hash (const void *arg)
1028 {
1029 const struct btrace_frame_cache *cache = arg;
1030
1031 return htab_hash_pointer (cache->frame);
1032 }
1033
1034 /* eq_f for htab_create_alloc of bfcache. */
1035
1036 static int
1037 bfcache_eq (const void *arg1, const void *arg2)
1038 {
1039 const struct btrace_frame_cache *cache1 = arg1;
1040 const struct btrace_frame_cache *cache2 = arg2;
1041
1042 return cache1->frame == cache2->frame;
1043 }
1044
1045 /* Create a new btrace frame cache. */
1046
1047 static struct btrace_frame_cache *
1048 bfcache_new (struct frame_info *frame)
1049 {
1050 struct btrace_frame_cache *cache;
1051 void **slot;
1052
1053 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1054 cache->frame = frame;
1055
1056 slot = htab_find_slot (bfcache, cache, INSERT);
1057 gdb_assert (*slot == NULL);
1058 *slot = cache;
1059
1060 return cache;
1061 }
1062
1063 /* Extract the branch trace function from a branch trace frame. */
1064
1065 static const struct btrace_function *
1066 btrace_get_frame_function (struct frame_info *frame)
1067 {
1068 const struct btrace_frame_cache *cache;
1069 const struct btrace_function *bfun;
1070 struct btrace_frame_cache pattern;
1071 void **slot;
1072
1073 pattern.frame = frame;
1074
1075 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1076 if (slot == NULL)
1077 return NULL;
1078
1079 cache = *slot;
1080 return cache->bfun;
1081 }
1082
1083 /* Implement stop_reason method for record_btrace_frame_unwind. */
1084
1085 static enum unwind_stop_reason
1086 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1087 void **this_cache)
1088 {
1089 const struct btrace_frame_cache *cache;
1090 const struct btrace_function *bfun;
1091
1092 cache = *this_cache;
1093 bfun = cache->bfun;
1094 gdb_assert (bfun != NULL);
1095
1096 if (bfun->up == NULL)
1097 return UNWIND_UNAVAILABLE;
1098
1099 return UNWIND_NO_REASON;
1100 }
1101
1102 /* Implement this_id method for record_btrace_frame_unwind. */
1103
1104 static void
1105 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1106 struct frame_id *this_id)
1107 {
1108 const struct btrace_frame_cache *cache;
1109 const struct btrace_function *bfun;
1110 CORE_ADDR code, special;
1111
1112 cache = *this_cache;
1113
1114 bfun = cache->bfun;
1115 gdb_assert (bfun != NULL);
1116
1117 while (bfun->segment.prev != NULL)
1118 bfun = bfun->segment.prev;
1119
1120 code = get_frame_func (this_frame);
1121 special = bfun->number;
1122
1123 *this_id = frame_id_build_unavailable_stack_special (code, special);
1124
1125 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1126 btrace_get_bfun_name (cache->bfun),
1127 core_addr_to_string_nz (this_id->code_addr),
1128 core_addr_to_string_nz (this_id->special_addr));
1129 }
1130
1131 /* Implement prev_register method for record_btrace_frame_unwind. */
1132
1133 static struct value *
1134 record_btrace_frame_prev_register (struct frame_info *this_frame,
1135 void **this_cache,
1136 int regnum)
1137 {
1138 const struct btrace_frame_cache *cache;
1139 const struct btrace_function *bfun, *caller;
1140 const struct btrace_insn *insn;
1141 struct gdbarch *gdbarch;
1142 CORE_ADDR pc;
1143 int pcreg;
1144
1145 gdbarch = get_frame_arch (this_frame);
1146 pcreg = gdbarch_pc_regnum (gdbarch);
1147 if (pcreg < 0 || regnum != pcreg)
1148 throw_error (NOT_AVAILABLE_ERROR,
1149 _("Registers are not available in btrace record history"));
1150
1151 cache = *this_cache;
1152 bfun = cache->bfun;
1153 gdb_assert (bfun != NULL);
1154
1155 caller = bfun->up;
1156 if (caller == NULL)
1157 throw_error (NOT_AVAILABLE_ERROR,
1158 _("No caller in btrace record history"));
1159
1160 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1161 {
1162 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1163 pc = insn->pc;
1164 }
1165 else
1166 {
1167 insn = VEC_last (btrace_insn_s, caller->insn);
1168 pc = insn->pc;
1169
1170 pc += gdb_insn_length (gdbarch, pc);
1171 }
1172
1173 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1174 btrace_get_bfun_name (bfun), bfun->level,
1175 core_addr_to_string_nz (pc));
1176
1177 return frame_unwind_got_address (this_frame, regnum, pc);
1178 }
1179
1180 /* Implement sniffer method for record_btrace_frame_unwind. */
1181
1182 static int
1183 record_btrace_frame_sniffer (const struct frame_unwind *self,
1184 struct frame_info *this_frame,
1185 void **this_cache)
1186 {
1187 const struct btrace_function *bfun;
1188 struct btrace_frame_cache *cache;
1189 struct thread_info *tp;
1190 struct frame_info *next;
1191
1192 /* THIS_FRAME does not contain a reference to its thread. */
1193 tp = find_thread_ptid (inferior_ptid);
1194 gdb_assert (tp != NULL);
1195
1196 bfun = NULL;
1197 next = get_next_frame (this_frame);
1198 if (next == NULL)
1199 {
1200 const struct btrace_insn_iterator *replay;
1201
1202 replay = tp->btrace.replay;
1203 if (replay != NULL)
1204 bfun = replay->function;
1205 }
1206 else
1207 {
1208 const struct btrace_function *callee;
1209
1210 callee = btrace_get_frame_function (next);
1211 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1212 bfun = callee->up;
1213 }
1214
1215 if (bfun == NULL)
1216 return 0;
1217
1218 DEBUG ("[frame] sniffed frame for %s on level %d",
1219 btrace_get_bfun_name (bfun), bfun->level);
1220
1221 /* This is our frame. Initialize the frame cache. */
1222 cache = bfcache_new (this_frame);
1223 cache->tp = tp;
1224 cache->bfun = bfun;
1225
1226 *this_cache = cache;
1227 return 1;
1228 }
1229
1230 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1231
1232 static int
1233 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1234 struct frame_info *this_frame,
1235 void **this_cache)
1236 {
1237 const struct btrace_function *bfun, *callee;
1238 struct btrace_frame_cache *cache;
1239 struct frame_info *next;
1240
1241 next = get_next_frame (this_frame);
1242 if (next == NULL)
1243 return 0;
1244
1245 callee = btrace_get_frame_function (next);
1246 if (callee == NULL)
1247 return 0;
1248
1249 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1250 return 0;
1251
1252 bfun = callee->up;
1253 if (bfun == NULL)
1254 return 0;
1255
1256 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1257 btrace_get_bfun_name (bfun), bfun->level);
1258
1259 /* This is our frame. Initialize the frame cache. */
1260 cache = bfcache_new (this_frame);
1261 cache->tp = find_thread_ptid (inferior_ptid);
1262 cache->bfun = bfun;
1263
1264 *this_cache = cache;
1265 return 1;
1266 }
1267
1268 static void
1269 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1270 {
1271 struct btrace_frame_cache *cache;
1272 void **slot;
1273
1274 cache = this_cache;
1275
1276 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1277 gdb_assert (slot != NULL);
1278
1279 htab_remove_elt (bfcache, cache);
1280 }
1281
1282 /* btrace recording does not store previous memory content, neither the stack
1283 frames content. Any unwinding would return errorneous results as the stack
1284 contents no longer matches the changed PC value restored from history.
1285 Therefore this unwinder reports any possibly unwound registers as
1286 <unavailable>. */
1287
1288 const struct frame_unwind record_btrace_frame_unwind =
1289 {
1290 NORMAL_FRAME,
1291 record_btrace_frame_unwind_stop_reason,
1292 record_btrace_frame_this_id,
1293 record_btrace_frame_prev_register,
1294 NULL,
1295 record_btrace_frame_sniffer,
1296 record_btrace_frame_dealloc_cache
1297 };
1298
1299 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1300 {
1301 TAILCALL_FRAME,
1302 record_btrace_frame_unwind_stop_reason,
1303 record_btrace_frame_this_id,
1304 record_btrace_frame_prev_register,
1305 NULL,
1306 record_btrace_tailcall_frame_sniffer,
1307 record_btrace_frame_dealloc_cache
1308 };
1309
1310 /* Implement the to_get_unwinder method. */
1311
1312 static const struct frame_unwind *
1313 record_btrace_to_get_unwinder (struct target_ops *self)
1314 {
1315 return &record_btrace_frame_unwind;
1316 }
1317
1318 /* Implement the to_get_tailcall_unwinder method. */
1319
1320 static const struct frame_unwind *
1321 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1322 {
1323 return &record_btrace_tailcall_frame_unwind;
1324 }
1325
1326 /* Indicate that TP should be resumed according to FLAG. */
1327
1328 static void
1329 record_btrace_resume_thread (struct thread_info *tp,
1330 enum btrace_thread_flag flag)
1331 {
1332 struct btrace_thread_info *btinfo;
1333
1334 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1335
1336 btinfo = &tp->btrace;
1337
1338 if ((btinfo->flags & BTHR_MOVE) != 0)
1339 error (_("Thread already moving."));
1340
1341 /* Fetch the latest branch trace. */
1342 btrace_fetch (tp);
1343
1344 btinfo->flags |= flag;
1345 }
1346
1347 /* Find the thread to resume given a PTID. */
1348
1349 static struct thread_info *
1350 record_btrace_find_resume_thread (ptid_t ptid)
1351 {
1352 struct thread_info *tp;
1353
1354 /* When asked to resume everything, we pick the current thread. */
1355 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1356 ptid = inferior_ptid;
1357
1358 return find_thread_ptid (ptid);
1359 }
1360
1361 /* Start replaying a thread. */
1362
1363 static struct btrace_insn_iterator *
1364 record_btrace_start_replaying (struct thread_info *tp)
1365 {
1366 volatile struct gdb_exception except;
1367 struct btrace_insn_iterator *replay;
1368 struct btrace_thread_info *btinfo;
1369 int executing;
1370
1371 btinfo = &tp->btrace;
1372 replay = NULL;
1373
1374 /* We can't start replaying without trace. */
1375 if (btinfo->begin == NULL)
1376 return NULL;
1377
1378 /* Clear the executing flag to allow changes to the current frame.
1379 We are not actually running, yet. We just started a reverse execution
1380 command or a record goto command.
1381 For the latter, EXECUTING is false and this has no effect.
1382 For the former, EXECUTING is true and we're in to_wait, about to
1383 move the thread. Since we need to recompute the stack, we temporarily
1384 set EXECUTING to flase. */
1385 executing = is_executing (tp->ptid);
1386 set_executing (tp->ptid, 0);
1387
1388 /* GDB stores the current frame_id when stepping in order to detects steps
1389 into subroutines.
1390 Since frames are computed differently when we're replaying, we need to
1391 recompute those stored frames and fix them up so we can still detect
1392 subroutines after we started replaying. */
1393 TRY_CATCH (except, RETURN_MASK_ALL)
1394 {
1395 struct frame_info *frame;
1396 struct frame_id frame_id;
1397 int upd_step_frame_id, upd_step_stack_frame_id;
1398
1399 /* The current frame without replaying - computed via normal unwind. */
1400 frame = get_current_frame ();
1401 frame_id = get_frame_id (frame);
1402
1403 /* Check if we need to update any stepping-related frame id's. */
1404 upd_step_frame_id = frame_id_eq (frame_id,
1405 tp->control.step_frame_id);
1406 upd_step_stack_frame_id = frame_id_eq (frame_id,
1407 tp->control.step_stack_frame_id);
1408
1409 /* We start replaying at the end of the branch trace. This corresponds
1410 to the current instruction. */
1411 replay = xmalloc (sizeof (*replay));
1412 btrace_insn_end (replay, btinfo);
1413
1414 /* We're not replaying, yet. */
1415 gdb_assert (btinfo->replay == NULL);
1416 btinfo->replay = replay;
1417
1418 /* Make sure we're not using any stale registers. */
1419 registers_changed_ptid (tp->ptid);
1420
1421 /* The current frame with replaying - computed via btrace unwind. */
1422 frame = get_current_frame ();
1423 frame_id = get_frame_id (frame);
1424
1425 /* Replace stepping related frames where necessary. */
1426 if (upd_step_frame_id)
1427 tp->control.step_frame_id = frame_id;
1428 if (upd_step_stack_frame_id)
1429 tp->control.step_stack_frame_id = frame_id;
1430 }
1431
1432 /* Restore the previous execution state. */
1433 set_executing (tp->ptid, executing);
1434
1435 if (except.reason < 0)
1436 {
1437 xfree (btinfo->replay);
1438 btinfo->replay = NULL;
1439
1440 registers_changed_ptid (tp->ptid);
1441
1442 throw_exception (except);
1443 }
1444
1445 return replay;
1446 }
1447
1448 /* Stop replaying a thread. */
1449
1450 static void
1451 record_btrace_stop_replaying (struct thread_info *tp)
1452 {
1453 struct btrace_thread_info *btinfo;
1454
1455 btinfo = &tp->btrace;
1456
1457 xfree (btinfo->replay);
1458 btinfo->replay = NULL;
1459
1460 /* Make sure we're not leaving any stale registers. */
1461 registers_changed_ptid (tp->ptid);
1462 }
1463
1464 /* The to_resume method of target record-btrace. */
1465
1466 static void
1467 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1468 enum gdb_signal signal)
1469 {
1470 struct thread_info *tp, *other;
1471 enum btrace_thread_flag flag;
1472
1473 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1474
1475 tp = record_btrace_find_resume_thread (ptid);
1476 if (tp == NULL)
1477 error (_("Cannot find thread to resume."));
1478
1479 /* Stop replaying other threads if the thread to resume is not replaying. */
1480 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1481 ALL_THREADS (other)
1482 record_btrace_stop_replaying (other);
1483
1484 /* As long as we're not replaying, just forward the request. */
1485 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1486 {
1487 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1488 if (ops->to_resume != NULL)
1489 return ops->to_resume (ops, ptid, step, signal);
1490
1491 error (_("Cannot find target for stepping."));
1492 }
1493
1494 /* Compute the btrace thread flag for the requested move. */
1495 if (step == 0)
1496 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1497 else
1498 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1499
1500 /* At the moment, we only move a single thread. We could also move
1501 all threads in parallel by single-stepping each resumed thread
1502 until the first runs into an event.
1503 When we do that, we would want to continue all other threads.
1504 For now, just resume one thread to not confuse to_wait. */
1505 record_btrace_resume_thread (tp, flag);
1506
1507 /* We just indicate the resume intent here. The actual stepping happens in
1508 record_btrace_wait below. */
1509 }
1510
1511 /* Find a thread to move. */
1512
1513 static struct thread_info *
1514 record_btrace_find_thread_to_move (ptid_t ptid)
1515 {
1516 struct thread_info *tp;
1517
1518 /* First check the parameter thread. */
1519 tp = find_thread_ptid (ptid);
1520 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1521 return tp;
1522
1523 /* Otherwise, find one other thread that has been resumed. */
1524 ALL_THREADS (tp)
1525 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1526 return tp;
1527
1528 return NULL;
1529 }
1530
1531 /* Return a target_waitstatus indicating that we ran out of history. */
1532
1533 static struct target_waitstatus
1534 btrace_step_no_history (void)
1535 {
1536 struct target_waitstatus status;
1537
1538 status.kind = TARGET_WAITKIND_NO_HISTORY;
1539
1540 return status;
1541 }
1542
1543 /* Return a target_waitstatus indicating that a step finished. */
1544
1545 static struct target_waitstatus
1546 btrace_step_stopped (void)
1547 {
1548 struct target_waitstatus status;
1549
1550 status.kind = TARGET_WAITKIND_STOPPED;
1551 status.value.sig = GDB_SIGNAL_TRAP;
1552
1553 return status;
1554 }
1555
1556 /* Clear the record histories. */
1557
1558 static void
1559 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1560 {
1561 xfree (btinfo->insn_history);
1562 xfree (btinfo->call_history);
1563
1564 btinfo->insn_history = NULL;
1565 btinfo->call_history = NULL;
1566 }
1567
1568 /* Step a single thread. */
1569
1570 static struct target_waitstatus
1571 record_btrace_step_thread (struct thread_info *tp)
1572 {
1573 struct btrace_insn_iterator *replay, end;
1574 struct btrace_thread_info *btinfo;
1575 struct address_space *aspace;
1576 struct inferior *inf;
1577 enum btrace_thread_flag flags;
1578 unsigned int steps;
1579
1580 btinfo = &tp->btrace;
1581 replay = btinfo->replay;
1582
1583 flags = btinfo->flags & BTHR_MOVE;
1584 btinfo->flags &= ~BTHR_MOVE;
1585
1586 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1587
1588 switch (flags)
1589 {
1590 default:
1591 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1592
1593 case BTHR_STEP:
1594 /* We're done if we're not replaying. */
1595 if (replay == NULL)
1596 return btrace_step_no_history ();
1597
1598 /* We are always able to step at least once. */
1599 steps = btrace_insn_next (replay, 1);
1600 gdb_assert (steps == 1);
1601
1602 /* Determine the end of the instruction trace. */
1603 btrace_insn_end (&end, btinfo);
1604
1605 /* We stop replaying if we reached the end of the trace. */
1606 if (btrace_insn_cmp (replay, &end) == 0)
1607 record_btrace_stop_replaying (tp);
1608
1609 return btrace_step_stopped ();
1610
1611 case BTHR_RSTEP:
1612 /* Start replaying if we're not already doing so. */
1613 if (replay == NULL)
1614 replay = record_btrace_start_replaying (tp);
1615
1616 /* If we can't step any further, we reached the end of the history. */
1617 steps = btrace_insn_prev (replay, 1);
1618 if (steps == 0)
1619 return btrace_step_no_history ();
1620
1621 return btrace_step_stopped ();
1622
1623 case BTHR_CONT:
1624 /* We're done if we're not replaying. */
1625 if (replay == NULL)
1626 return btrace_step_no_history ();
1627
1628 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1629 aspace = inf->aspace;
1630
1631 /* Determine the end of the instruction trace. */
1632 btrace_insn_end (&end, btinfo);
1633
1634 for (;;)
1635 {
1636 const struct btrace_insn *insn;
1637
1638 /* We are always able to step at least once. */
1639 steps = btrace_insn_next (replay, 1);
1640 gdb_assert (steps == 1);
1641
1642 /* We stop replaying if we reached the end of the trace. */
1643 if (btrace_insn_cmp (replay, &end) == 0)
1644 {
1645 record_btrace_stop_replaying (tp);
1646 return btrace_step_no_history ();
1647 }
1648
1649 insn = btrace_insn_get (replay);
1650 gdb_assert (insn);
1651
1652 DEBUG ("stepping %d (%s) ... %s", tp->num,
1653 target_pid_to_str (tp->ptid),
1654 core_addr_to_string_nz (insn->pc));
1655
1656 if (breakpoint_here_p (aspace, insn->pc))
1657 return btrace_step_stopped ();
1658 }
1659
1660 case BTHR_RCONT:
1661 /* Start replaying if we're not already doing so. */
1662 if (replay == NULL)
1663 replay = record_btrace_start_replaying (tp);
1664
1665 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1666 aspace = inf->aspace;
1667
1668 for (;;)
1669 {
1670 const struct btrace_insn *insn;
1671
1672 /* If we can't step any further, we're done. */
1673 steps = btrace_insn_prev (replay, 1);
1674 if (steps == 0)
1675 return btrace_step_no_history ();
1676
1677 insn = btrace_insn_get (replay);
1678 gdb_assert (insn);
1679
1680 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1681 target_pid_to_str (tp->ptid),
1682 core_addr_to_string_nz (insn->pc));
1683
1684 if (breakpoint_here_p (aspace, insn->pc))
1685 return btrace_step_stopped ();
1686 }
1687 }
1688 }
1689
1690 /* The to_wait method of target record-btrace. */
1691
1692 static ptid_t
1693 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1694 struct target_waitstatus *status, int options)
1695 {
1696 struct thread_info *tp, *other;
1697
1698 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1699
1700 /* As long as we're not replaying, just forward the request. */
1701 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1702 {
1703 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1704 if (ops->to_wait != NULL)
1705 return ops->to_wait (ops, ptid, status, options);
1706
1707 error (_("Cannot find target for waiting."));
1708 }
1709
1710 /* Let's find a thread to move. */
1711 tp = record_btrace_find_thread_to_move (ptid);
1712 if (tp == NULL)
1713 {
1714 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1715
1716 status->kind = TARGET_WAITKIND_IGNORE;
1717 return minus_one_ptid;
1718 }
1719
1720 /* We only move a single thread. We're not able to correlate threads. */
1721 *status = record_btrace_step_thread (tp);
1722
1723 /* Stop all other threads. */
1724 if (!non_stop)
1725 ALL_THREADS (other)
1726 other->btrace.flags &= ~BTHR_MOVE;
1727
1728 /* Start record histories anew from the current position. */
1729 record_btrace_clear_histories (&tp->btrace);
1730
1731 /* We moved the replay position but did not update registers. */
1732 registers_changed_ptid (tp->ptid);
1733
1734 return tp->ptid;
1735 }
1736
1737 /* The to_can_execute_reverse method of target record-btrace. */
1738
1739 static int
1740 record_btrace_can_execute_reverse (struct target_ops *self)
1741 {
1742 return 1;
1743 }
1744
1745 /* The to_decr_pc_after_break method of target record-btrace. */
1746
1747 static CORE_ADDR
1748 record_btrace_decr_pc_after_break (struct target_ops *ops,
1749 struct gdbarch *gdbarch)
1750 {
1751 /* When replaying, we do not actually execute the breakpoint instruction
1752 so there is no need to adjust the PC after hitting a breakpoint. */
1753 if (record_btrace_is_replaying (ops))
1754 return 0;
1755
1756 return ops->beneath->to_decr_pc_after_break (ops->beneath, gdbarch);
1757 }
1758
1759 /* The to_find_new_threads method of target record-btrace. */
1760
1761 static void
1762 record_btrace_find_new_threads (struct target_ops *ops)
1763 {
1764 /* Don't expect new threads if we're replaying. */
1765 if (record_btrace_is_replaying (ops))
1766 return;
1767
1768 /* Forward the request. */
1769 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1770 if (ops->to_find_new_threads != NULL)
1771 {
1772 ops->to_find_new_threads (ops);
1773 break;
1774 }
1775 }
1776
1777 /* The to_thread_alive method of target record-btrace. */
1778
1779 static int
1780 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1781 {
1782 /* We don't add or remove threads during replay. */
1783 if (record_btrace_is_replaying (ops))
1784 return find_thread_ptid (ptid) != NULL;
1785
1786 /* Forward the request. */
1787 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1788 if (ops->to_thread_alive != NULL)
1789 return ops->to_thread_alive (ops, ptid);
1790
1791 return 0;
1792 }
1793
1794 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
1795 is stopped. */
1796
1797 static void
1798 record_btrace_set_replay (struct thread_info *tp,
1799 const struct btrace_insn_iterator *it)
1800 {
1801 struct btrace_thread_info *btinfo;
1802
1803 btinfo = &tp->btrace;
1804
1805 if (it == NULL || it->function == NULL)
1806 record_btrace_stop_replaying (tp);
1807 else
1808 {
1809 if (btinfo->replay == NULL)
1810 record_btrace_start_replaying (tp);
1811 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
1812 return;
1813
1814 *btinfo->replay = *it;
1815 registers_changed_ptid (tp->ptid);
1816 }
1817
1818 /* Start anew from the new replay position. */
1819 record_btrace_clear_histories (btinfo);
1820 }
1821
1822 /* The to_goto_record_begin method of target record-btrace. */
1823
1824 static void
1825 record_btrace_goto_begin (struct target_ops *self)
1826 {
1827 struct thread_info *tp;
1828 struct btrace_insn_iterator begin;
1829
1830 tp = require_btrace_thread ();
1831
1832 btrace_insn_begin (&begin, &tp->btrace);
1833 record_btrace_set_replay (tp, &begin);
1834
1835 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1836 }
1837
1838 /* The to_goto_record_end method of target record-btrace. */
1839
1840 static void
1841 record_btrace_goto_end (struct target_ops *ops)
1842 {
1843 struct thread_info *tp;
1844
1845 tp = require_btrace_thread ();
1846
1847 record_btrace_set_replay (tp, NULL);
1848
1849 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1850 }
1851
1852 /* The to_goto_record method of target record-btrace. */
1853
1854 static void
1855 record_btrace_goto (struct target_ops *self, ULONGEST insn)
1856 {
1857 struct thread_info *tp;
1858 struct btrace_insn_iterator it;
1859 unsigned int number;
1860 int found;
1861
1862 number = insn;
1863
1864 /* Check for wrap-arounds. */
1865 if (number != insn)
1866 error (_("Instruction number out of range."));
1867
1868 tp = require_btrace_thread ();
1869
1870 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
1871 if (found == 0)
1872 error (_("No such instruction."));
1873
1874 record_btrace_set_replay (tp, &it);
1875
1876 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1877 }
1878
1879 /* Initialize the record-btrace target ops. */
1880
1881 static void
1882 init_record_btrace_ops (void)
1883 {
1884 struct target_ops *ops;
1885
1886 ops = &record_btrace_ops;
1887 ops->to_shortname = "record-btrace";
1888 ops->to_longname = "Branch tracing target";
1889 ops->to_doc = "Collect control-flow trace and provide the execution history.";
1890 ops->to_open = record_btrace_open;
1891 ops->to_close = record_btrace_close;
1892 ops->to_detach = record_detach;
1893 ops->to_disconnect = record_disconnect;
1894 ops->to_mourn_inferior = record_mourn_inferior;
1895 ops->to_kill = record_kill;
1896 ops->to_create_inferior = find_default_create_inferior;
1897 ops->to_stop_recording = record_btrace_stop_recording;
1898 ops->to_info_record = record_btrace_info;
1899 ops->to_insn_history = record_btrace_insn_history;
1900 ops->to_insn_history_from = record_btrace_insn_history_from;
1901 ops->to_insn_history_range = record_btrace_insn_history_range;
1902 ops->to_call_history = record_btrace_call_history;
1903 ops->to_call_history_from = record_btrace_call_history_from;
1904 ops->to_call_history_range = record_btrace_call_history_range;
1905 ops->to_record_is_replaying = record_btrace_is_replaying;
1906 ops->to_xfer_partial = record_btrace_xfer_partial;
1907 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
1908 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1909 ops->to_fetch_registers = record_btrace_fetch_registers;
1910 ops->to_store_registers = record_btrace_store_registers;
1911 ops->to_prepare_to_store = record_btrace_prepare_to_store;
1912 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
1913 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
1914 ops->to_resume = record_btrace_resume;
1915 ops->to_wait = record_btrace_wait;
1916 ops->to_find_new_threads = record_btrace_find_new_threads;
1917 ops->to_thread_alive = record_btrace_thread_alive;
1918 ops->to_goto_record_begin = record_btrace_goto_begin;
1919 ops->to_goto_record_end = record_btrace_goto_end;
1920 ops->to_goto_record = record_btrace_goto;
1921 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
1922 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
1923 ops->to_stratum = record_stratum;
1924 ops->to_magic = OPS_MAGIC;
1925 }
1926
1927 /* Alias for "target record". */
1928
1929 static void
1930 cmd_record_btrace_start (char *args, int from_tty)
1931 {
1932 if (args != NULL && *args != 0)
1933 error (_("Invalid argument."));
1934
1935 execute_command ("target record-btrace", from_tty);
1936 }
1937
1938 void _initialize_record_btrace (void);
1939
1940 /* Initialize btrace commands. */
1941
1942 void
1943 _initialize_record_btrace (void)
1944 {
1945 add_cmd ("btrace", class_obscure, cmd_record_btrace_start,
1946 _("Start branch trace recording."),
1947 &record_cmdlist);
1948 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
1949
1950 init_record_btrace_ops ();
1951 add_target (&record_btrace_ops);
1952
1953 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
1954 xcalloc, xfree);
1955 }
This page took 0.100026 seconds and 4 git commands to generate.