Fix next over threaded execl with "set scheduler-locking step".
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "gdbthread.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "disasm.h"
28 #include "observer.h"
29 #include "exceptions.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "event-loop.h"
40 #include "inf-loop.h"
41
42 /* The target_ops of record-btrace. */
43 static struct target_ops record_btrace_ops;
44
45 /* A new thread observer enabling branch tracing for the new thread. */
46 static struct observer *record_btrace_thread_observer;
47
48 /* Memory access types used in set/show record btrace replay-memory-access. */
49 static const char replay_memory_access_read_only[] = "read-only";
50 static const char replay_memory_access_read_write[] = "read-write";
51 static const char *const replay_memory_access_types[] =
52 {
53 replay_memory_access_read_only,
54 replay_memory_access_read_write,
55 NULL
56 };
57
58 /* The currently allowed replay memory access type. */
59 static const char *replay_memory_access = replay_memory_access_read_only;
60
61 /* Command lists for "set/show record btrace". */
62 static struct cmd_list_element *set_record_btrace_cmdlist;
63 static struct cmd_list_element *show_record_btrace_cmdlist;
64
65 /* The execution direction of the last resume we got. See record-full.c. */
66 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
67
68 /* The async event handler for reverse/replay execution. */
69 static struct async_event_handler *record_btrace_async_inferior_event_handler;
70
71 /* Print a record-btrace debug message. Use do ... while (0) to avoid
72 ambiguities when used in if statements. */
73
74 #define DEBUG(msg, args...) \
75 do \
76 { \
77 if (record_debug != 0) \
78 fprintf_unfiltered (gdb_stdlog, \
79 "[record-btrace] " msg "\n", ##args); \
80 } \
81 while (0)
82
83
84 /* Update the branch trace for the current thread and return a pointer to its
85 thread_info.
86
87 Throws an error if there is no thread or no trace. This function never
88 returns NULL. */
89
90 static struct thread_info *
91 require_btrace_thread (void)
92 {
93 struct thread_info *tp;
94
95 DEBUG ("require");
96
97 tp = find_thread_ptid (inferior_ptid);
98 if (tp == NULL)
99 error (_("No thread."));
100
101 btrace_fetch (tp);
102
103 if (btrace_is_empty (tp))
104 error (_("No trace."));
105
106 return tp;
107 }
108
109 /* Update the branch trace for the current thread and return a pointer to its
110 branch trace information struct.
111
112 Throws an error if there is no thread or no trace. This function never
113 returns NULL. */
114
115 static struct btrace_thread_info *
116 require_btrace (void)
117 {
118 struct thread_info *tp;
119
120 tp = require_btrace_thread ();
121
122 return &tp->btrace;
123 }
124
125 /* Enable branch tracing for one thread. Warn on errors. */
126
127 static void
128 record_btrace_enable_warn (struct thread_info *tp)
129 {
130 volatile struct gdb_exception error;
131
132 TRY_CATCH (error, RETURN_MASK_ERROR)
133 btrace_enable (tp);
134
135 if (error.message != NULL)
136 warning ("%s", error.message);
137 }
138
139 /* Callback function to disable branch tracing for one thread. */
140
141 static void
142 record_btrace_disable_callback (void *arg)
143 {
144 struct thread_info *tp;
145
146 tp = arg;
147
148 btrace_disable (tp);
149 }
150
151 /* Enable automatic tracing of new threads. */
152
153 static void
154 record_btrace_auto_enable (void)
155 {
156 DEBUG ("attach thread observer");
157
158 record_btrace_thread_observer
159 = observer_attach_new_thread (record_btrace_enable_warn);
160 }
161
162 /* Disable automatic tracing of new threads. */
163
164 static void
165 record_btrace_auto_disable (void)
166 {
167 /* The observer may have been detached, already. */
168 if (record_btrace_thread_observer == NULL)
169 return;
170
171 DEBUG ("detach thread observer");
172
173 observer_detach_new_thread (record_btrace_thread_observer);
174 record_btrace_thread_observer = NULL;
175 }
176
177 /* The record-btrace async event handler function. */
178
179 static void
180 record_btrace_handle_async_inferior_event (gdb_client_data data)
181 {
182 inferior_event_handler (INF_REG_EVENT, NULL);
183 }
184
185 /* The to_open method of target record-btrace. */
186
187 static void
188 record_btrace_open (char *args, int from_tty)
189 {
190 struct cleanup *disable_chain;
191 struct thread_info *tp;
192
193 DEBUG ("open");
194
195 record_preopen ();
196
197 if (!target_has_execution)
198 error (_("The program is not being run."));
199
200 if (!target_supports_btrace ())
201 error (_("Target does not support branch tracing."));
202
203 if (non_stop)
204 error (_("Record btrace can't debug inferior in non-stop mode."));
205
206 gdb_assert (record_btrace_thread_observer == NULL);
207
208 disable_chain = make_cleanup (null_cleanup, NULL);
209 ALL_NON_EXITED_THREADS (tp)
210 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
211 {
212 btrace_enable (tp);
213
214 make_cleanup (record_btrace_disable_callback, tp);
215 }
216
217 record_btrace_auto_enable ();
218
219 push_target (&record_btrace_ops);
220
221 record_btrace_async_inferior_event_handler
222 = create_async_event_handler (record_btrace_handle_async_inferior_event,
223 NULL);
224
225 observer_notify_record_changed (current_inferior (), 1);
226
227 discard_cleanups (disable_chain);
228 }
229
230 /* The to_stop_recording method of target record-btrace. */
231
232 static void
233 record_btrace_stop_recording (struct target_ops *self)
234 {
235 struct thread_info *tp;
236
237 DEBUG ("stop recording");
238
239 record_btrace_auto_disable ();
240
241 ALL_NON_EXITED_THREADS (tp)
242 if (tp->btrace.target != NULL)
243 btrace_disable (tp);
244 }
245
246 /* The to_close method of target record-btrace. */
247
248 static void
249 record_btrace_close (struct target_ops *self)
250 {
251 struct thread_info *tp;
252
253 if (record_btrace_async_inferior_event_handler != NULL)
254 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
255
256 /* Make sure automatic recording gets disabled even if we did not stop
257 recording before closing the record-btrace target. */
258 record_btrace_auto_disable ();
259
260 /* We should have already stopped recording.
261 Tear down btrace in case we have not. */
262 ALL_NON_EXITED_THREADS (tp)
263 btrace_teardown (tp);
264 }
265
266 /* The to_info_record method of target record-btrace. */
267
268 static void
269 record_btrace_info (struct target_ops *self)
270 {
271 struct btrace_thread_info *btinfo;
272 struct thread_info *tp;
273 unsigned int insns, calls;
274
275 DEBUG ("info");
276
277 tp = find_thread_ptid (inferior_ptid);
278 if (tp == NULL)
279 error (_("No thread."));
280
281 btrace_fetch (tp);
282
283 insns = 0;
284 calls = 0;
285
286 btinfo = &tp->btrace;
287
288 if (!btrace_is_empty (tp))
289 {
290 struct btrace_call_iterator call;
291 struct btrace_insn_iterator insn;
292
293 btrace_call_end (&call, btinfo);
294 btrace_call_prev (&call, 1);
295 calls = btrace_call_number (&call);
296
297 btrace_insn_end (&insn, btinfo);
298 btrace_insn_prev (&insn, 1);
299 insns = btrace_insn_number (&insn);
300 }
301
302 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
303 "%d (%s).\n"), insns, calls, tp->num,
304 target_pid_to_str (tp->ptid));
305
306 if (btrace_is_replaying (tp))
307 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
308 btrace_insn_number (btinfo->replay));
309 }
310
311 /* Print an unsigned int. */
312
313 static void
314 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
315 {
316 ui_out_field_fmt (uiout, fld, "%u", val);
317 }
318
319 /* Disassemble a section of the recorded instruction trace. */
320
321 static void
322 btrace_insn_history (struct ui_out *uiout,
323 const struct btrace_insn_iterator *begin,
324 const struct btrace_insn_iterator *end, int flags)
325 {
326 struct gdbarch *gdbarch;
327 struct btrace_insn_iterator it;
328
329 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
330 btrace_insn_number (end));
331
332 gdbarch = target_gdbarch ();
333
334 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
335 {
336 const struct btrace_insn *insn;
337
338 insn = btrace_insn_get (&it);
339
340 /* Print the instruction index. */
341 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
342 ui_out_text (uiout, "\t");
343
344 /* Disassembly with '/m' flag may not produce the expected result.
345 See PR gdb/11833. */
346 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
347 }
348 }
349
350 /* The to_insn_history method of target record-btrace. */
351
352 static void
353 record_btrace_insn_history (struct target_ops *self, int size, int flags)
354 {
355 struct btrace_thread_info *btinfo;
356 struct btrace_insn_history *history;
357 struct btrace_insn_iterator begin, end;
358 struct cleanup *uiout_cleanup;
359 struct ui_out *uiout;
360 unsigned int context, covered;
361
362 uiout = current_uiout;
363 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
364 "insn history");
365 context = abs (size);
366 if (context == 0)
367 error (_("Bad record instruction-history-size."));
368
369 btinfo = require_btrace ();
370 history = btinfo->insn_history;
371 if (history == NULL)
372 {
373 struct btrace_insn_iterator *replay;
374
375 DEBUG ("insn-history (0x%x): %d", flags, size);
376
377 /* If we're replaying, we start at the replay position. Otherwise, we
378 start at the tail of the trace. */
379 replay = btinfo->replay;
380 if (replay != NULL)
381 begin = *replay;
382 else
383 btrace_insn_end (&begin, btinfo);
384
385 /* We start from here and expand in the requested direction. Then we
386 expand in the other direction, as well, to fill up any remaining
387 context. */
388 end = begin;
389 if (size < 0)
390 {
391 /* We want the current position covered, as well. */
392 covered = btrace_insn_next (&end, 1);
393 covered += btrace_insn_prev (&begin, context - covered);
394 covered += btrace_insn_next (&end, context - covered);
395 }
396 else
397 {
398 covered = btrace_insn_next (&end, context);
399 covered += btrace_insn_prev (&begin, context - covered);
400 }
401 }
402 else
403 {
404 begin = history->begin;
405 end = history->end;
406
407 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
408 btrace_insn_number (&begin), btrace_insn_number (&end));
409
410 if (size < 0)
411 {
412 end = begin;
413 covered = btrace_insn_prev (&begin, context);
414 }
415 else
416 {
417 begin = end;
418 covered = btrace_insn_next (&end, context);
419 }
420 }
421
422 if (covered > 0)
423 btrace_insn_history (uiout, &begin, &end, flags);
424 else
425 {
426 if (size < 0)
427 printf_unfiltered (_("At the start of the branch trace record.\n"));
428 else
429 printf_unfiltered (_("At the end of the branch trace record.\n"));
430 }
431
432 btrace_set_insn_history (btinfo, &begin, &end);
433 do_cleanups (uiout_cleanup);
434 }
435
436 /* The to_insn_history_range method of target record-btrace. */
437
438 static void
439 record_btrace_insn_history_range (struct target_ops *self,
440 ULONGEST from, ULONGEST to, int flags)
441 {
442 struct btrace_thread_info *btinfo;
443 struct btrace_insn_history *history;
444 struct btrace_insn_iterator begin, end;
445 struct cleanup *uiout_cleanup;
446 struct ui_out *uiout;
447 unsigned int low, high;
448 int found;
449
450 uiout = current_uiout;
451 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
452 "insn history");
453 low = from;
454 high = to;
455
456 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
457
458 /* Check for wrap-arounds. */
459 if (low != from || high != to)
460 error (_("Bad range."));
461
462 if (high < low)
463 error (_("Bad range."));
464
465 btinfo = require_btrace ();
466
467 found = btrace_find_insn_by_number (&begin, btinfo, low);
468 if (found == 0)
469 error (_("Range out of bounds."));
470
471 found = btrace_find_insn_by_number (&end, btinfo, high);
472 if (found == 0)
473 {
474 /* Silently truncate the range. */
475 btrace_insn_end (&end, btinfo);
476 }
477 else
478 {
479 /* We want both begin and end to be inclusive. */
480 btrace_insn_next (&end, 1);
481 }
482
483 btrace_insn_history (uiout, &begin, &end, flags);
484 btrace_set_insn_history (btinfo, &begin, &end);
485
486 do_cleanups (uiout_cleanup);
487 }
488
489 /* The to_insn_history_from method of target record-btrace. */
490
491 static void
492 record_btrace_insn_history_from (struct target_ops *self,
493 ULONGEST from, int size, int flags)
494 {
495 ULONGEST begin, end, context;
496
497 context = abs (size);
498 if (context == 0)
499 error (_("Bad record instruction-history-size."));
500
501 if (size < 0)
502 {
503 end = from;
504
505 if (from < context)
506 begin = 0;
507 else
508 begin = from - context + 1;
509 }
510 else
511 {
512 begin = from;
513 end = from + context - 1;
514
515 /* Check for wrap-around. */
516 if (end < begin)
517 end = ULONGEST_MAX;
518 }
519
520 record_btrace_insn_history_range (self, begin, end, flags);
521 }
522
523 /* Print the instruction number range for a function call history line. */
524
525 static void
526 btrace_call_history_insn_range (struct ui_out *uiout,
527 const struct btrace_function *bfun)
528 {
529 unsigned int begin, end, size;
530
531 size = VEC_length (btrace_insn_s, bfun->insn);
532 gdb_assert (size > 0);
533
534 begin = bfun->insn_offset;
535 end = begin + size - 1;
536
537 ui_out_field_uint (uiout, "insn begin", begin);
538 ui_out_text (uiout, ",");
539 ui_out_field_uint (uiout, "insn end", end);
540 }
541
542 /* Print the source line information for a function call history line. */
543
544 static void
545 btrace_call_history_src_line (struct ui_out *uiout,
546 const struct btrace_function *bfun)
547 {
548 struct symbol *sym;
549 int begin, end;
550
551 sym = bfun->sym;
552 if (sym == NULL)
553 return;
554
555 ui_out_field_string (uiout, "file",
556 symtab_to_filename_for_display (sym->symtab));
557
558 begin = bfun->lbegin;
559 end = bfun->lend;
560
561 if (end < begin)
562 return;
563
564 ui_out_text (uiout, ":");
565 ui_out_field_int (uiout, "min line", begin);
566
567 if (end == begin)
568 return;
569
570 ui_out_text (uiout, ",");
571 ui_out_field_int (uiout, "max line", end);
572 }
573
574 /* Get the name of a branch trace function. */
575
576 static const char *
577 btrace_get_bfun_name (const struct btrace_function *bfun)
578 {
579 struct minimal_symbol *msym;
580 struct symbol *sym;
581
582 if (bfun == NULL)
583 return "??";
584
585 msym = bfun->msym;
586 sym = bfun->sym;
587
588 if (sym != NULL)
589 return SYMBOL_PRINT_NAME (sym);
590 else if (msym != NULL)
591 return MSYMBOL_PRINT_NAME (msym);
592 else
593 return "??";
594 }
595
596 /* Disassemble a section of the recorded function trace. */
597
598 static void
599 btrace_call_history (struct ui_out *uiout,
600 const struct btrace_thread_info *btinfo,
601 const struct btrace_call_iterator *begin,
602 const struct btrace_call_iterator *end,
603 enum record_print_flag flags)
604 {
605 struct btrace_call_iterator it;
606
607 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
608 btrace_call_number (end));
609
610 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
611 {
612 const struct btrace_function *bfun;
613 struct minimal_symbol *msym;
614 struct symbol *sym;
615
616 bfun = btrace_call_get (&it);
617 sym = bfun->sym;
618 msym = bfun->msym;
619
620 /* Print the function index. */
621 ui_out_field_uint (uiout, "index", bfun->number);
622 ui_out_text (uiout, "\t");
623
624 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
625 {
626 int level = bfun->level + btinfo->level, i;
627
628 for (i = 0; i < level; ++i)
629 ui_out_text (uiout, " ");
630 }
631
632 if (sym != NULL)
633 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
634 else if (msym != NULL)
635 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
636 else if (!ui_out_is_mi_like_p (uiout))
637 ui_out_field_string (uiout, "function", "??");
638
639 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
640 {
641 ui_out_text (uiout, _("\tinst "));
642 btrace_call_history_insn_range (uiout, bfun);
643 }
644
645 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
646 {
647 ui_out_text (uiout, _("\tat "));
648 btrace_call_history_src_line (uiout, bfun);
649 }
650
651 ui_out_text (uiout, "\n");
652 }
653 }
654
655 /* The to_call_history method of target record-btrace. */
656
657 static void
658 record_btrace_call_history (struct target_ops *self, int size, int flags)
659 {
660 struct btrace_thread_info *btinfo;
661 struct btrace_call_history *history;
662 struct btrace_call_iterator begin, end;
663 struct cleanup *uiout_cleanup;
664 struct ui_out *uiout;
665 unsigned int context, covered;
666
667 uiout = current_uiout;
668 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
669 "insn history");
670 context = abs (size);
671 if (context == 0)
672 error (_("Bad record function-call-history-size."));
673
674 btinfo = require_btrace ();
675 history = btinfo->call_history;
676 if (history == NULL)
677 {
678 struct btrace_insn_iterator *replay;
679
680 DEBUG ("call-history (0x%x): %d", flags, size);
681
682 /* If we're replaying, we start at the replay position. Otherwise, we
683 start at the tail of the trace. */
684 replay = btinfo->replay;
685 if (replay != NULL)
686 {
687 begin.function = replay->function;
688 begin.btinfo = btinfo;
689 }
690 else
691 btrace_call_end (&begin, btinfo);
692
693 /* We start from here and expand in the requested direction. Then we
694 expand in the other direction, as well, to fill up any remaining
695 context. */
696 end = begin;
697 if (size < 0)
698 {
699 /* We want the current position covered, as well. */
700 covered = btrace_call_next (&end, 1);
701 covered += btrace_call_prev (&begin, context - covered);
702 covered += btrace_call_next (&end, context - covered);
703 }
704 else
705 {
706 covered = btrace_call_next (&end, context);
707 covered += btrace_call_prev (&begin, context- covered);
708 }
709 }
710 else
711 {
712 begin = history->begin;
713 end = history->end;
714
715 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
716 btrace_call_number (&begin), btrace_call_number (&end));
717
718 if (size < 0)
719 {
720 end = begin;
721 covered = btrace_call_prev (&begin, context);
722 }
723 else
724 {
725 begin = end;
726 covered = btrace_call_next (&end, context);
727 }
728 }
729
730 if (covered > 0)
731 btrace_call_history (uiout, btinfo, &begin, &end, flags);
732 else
733 {
734 if (size < 0)
735 printf_unfiltered (_("At the start of the branch trace record.\n"));
736 else
737 printf_unfiltered (_("At the end of the branch trace record.\n"));
738 }
739
740 btrace_set_call_history (btinfo, &begin, &end);
741 do_cleanups (uiout_cleanup);
742 }
743
744 /* The to_call_history_range method of target record-btrace. */
745
746 static void
747 record_btrace_call_history_range (struct target_ops *self,
748 ULONGEST from, ULONGEST to, int flags)
749 {
750 struct btrace_thread_info *btinfo;
751 struct btrace_call_history *history;
752 struct btrace_call_iterator begin, end;
753 struct cleanup *uiout_cleanup;
754 struct ui_out *uiout;
755 unsigned int low, high;
756 int found;
757
758 uiout = current_uiout;
759 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
760 "func history");
761 low = from;
762 high = to;
763
764 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
765
766 /* Check for wrap-arounds. */
767 if (low != from || high != to)
768 error (_("Bad range."));
769
770 if (high < low)
771 error (_("Bad range."));
772
773 btinfo = require_btrace ();
774
775 found = btrace_find_call_by_number (&begin, btinfo, low);
776 if (found == 0)
777 error (_("Range out of bounds."));
778
779 found = btrace_find_call_by_number (&end, btinfo, high);
780 if (found == 0)
781 {
782 /* Silently truncate the range. */
783 btrace_call_end (&end, btinfo);
784 }
785 else
786 {
787 /* We want both begin and end to be inclusive. */
788 btrace_call_next (&end, 1);
789 }
790
791 btrace_call_history (uiout, btinfo, &begin, &end, flags);
792 btrace_set_call_history (btinfo, &begin, &end);
793
794 do_cleanups (uiout_cleanup);
795 }
796
797 /* The to_call_history_from method of target record-btrace. */
798
799 static void
800 record_btrace_call_history_from (struct target_ops *self,
801 ULONGEST from, int size, int flags)
802 {
803 ULONGEST begin, end, context;
804
805 context = abs (size);
806 if (context == 0)
807 error (_("Bad record function-call-history-size."));
808
809 if (size < 0)
810 {
811 end = from;
812
813 if (from < context)
814 begin = 0;
815 else
816 begin = from - context + 1;
817 }
818 else
819 {
820 begin = from;
821 end = from + context - 1;
822
823 /* Check for wrap-around. */
824 if (end < begin)
825 end = ULONGEST_MAX;
826 }
827
828 record_btrace_call_history_range (self, begin, end, flags);
829 }
830
831 /* The to_record_is_replaying method of target record-btrace. */
832
833 static int
834 record_btrace_is_replaying (struct target_ops *self)
835 {
836 struct thread_info *tp;
837
838 ALL_NON_EXITED_THREADS (tp)
839 if (btrace_is_replaying (tp))
840 return 1;
841
842 return 0;
843 }
844
845 /* The to_xfer_partial method of target record-btrace. */
846
847 static enum target_xfer_status
848 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
849 const char *annex, gdb_byte *readbuf,
850 const gdb_byte *writebuf, ULONGEST offset,
851 ULONGEST len, ULONGEST *xfered_len)
852 {
853 struct target_ops *t;
854
855 /* Filter out requests that don't make sense during replay. */
856 if (replay_memory_access == replay_memory_access_read_only
857 && record_btrace_is_replaying (ops))
858 {
859 switch (object)
860 {
861 case TARGET_OBJECT_MEMORY:
862 {
863 struct target_section *section;
864
865 /* We do not allow writing memory in general. */
866 if (writebuf != NULL)
867 {
868 *xfered_len = len;
869 return TARGET_XFER_UNAVAILABLE;
870 }
871
872 /* We allow reading readonly memory. */
873 section = target_section_by_addr (ops, offset);
874 if (section != NULL)
875 {
876 /* Check if the section we found is readonly. */
877 if ((bfd_get_section_flags (section->the_bfd_section->owner,
878 section->the_bfd_section)
879 & SEC_READONLY) != 0)
880 {
881 /* Truncate the request to fit into this section. */
882 len = min (len, section->endaddr - offset);
883 break;
884 }
885 }
886
887 *xfered_len = len;
888 return TARGET_XFER_UNAVAILABLE;
889 }
890 }
891 }
892
893 /* Forward the request. */
894 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
895 if (ops->to_xfer_partial != NULL)
896 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
897 offset, len, xfered_len);
898
899 *xfered_len = len;
900 return TARGET_XFER_UNAVAILABLE;
901 }
902
903 /* The to_insert_breakpoint method of target record-btrace. */
904
905 static int
906 record_btrace_insert_breakpoint (struct target_ops *ops,
907 struct gdbarch *gdbarch,
908 struct bp_target_info *bp_tgt)
909 {
910 volatile struct gdb_exception except;
911 const char *old;
912 int ret;
913
914 /* Inserting breakpoints requires accessing memory. Allow it for the
915 duration of this function. */
916 old = replay_memory_access;
917 replay_memory_access = replay_memory_access_read_write;
918
919 ret = 0;
920 TRY_CATCH (except, RETURN_MASK_ALL)
921 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
922
923 replay_memory_access = old;
924
925 if (except.reason < 0)
926 throw_exception (except);
927
928 return ret;
929 }
930
931 /* The to_remove_breakpoint method of target record-btrace. */
932
933 static int
934 record_btrace_remove_breakpoint (struct target_ops *ops,
935 struct gdbarch *gdbarch,
936 struct bp_target_info *bp_tgt)
937 {
938 volatile struct gdb_exception except;
939 const char *old;
940 int ret;
941
942 /* Removing breakpoints requires accessing memory. Allow it for the
943 duration of this function. */
944 old = replay_memory_access;
945 replay_memory_access = replay_memory_access_read_write;
946
947 ret = 0;
948 TRY_CATCH (except, RETURN_MASK_ALL)
949 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
950
951 replay_memory_access = old;
952
953 if (except.reason < 0)
954 throw_exception (except);
955
956 return ret;
957 }
958
959 /* The to_fetch_registers method of target record-btrace. */
960
961 static void
962 record_btrace_fetch_registers (struct target_ops *ops,
963 struct regcache *regcache, int regno)
964 {
965 struct btrace_insn_iterator *replay;
966 struct thread_info *tp;
967
968 tp = find_thread_ptid (inferior_ptid);
969 gdb_assert (tp != NULL);
970
971 replay = tp->btrace.replay;
972 if (replay != NULL)
973 {
974 const struct btrace_insn *insn;
975 struct gdbarch *gdbarch;
976 int pcreg;
977
978 gdbarch = get_regcache_arch (regcache);
979 pcreg = gdbarch_pc_regnum (gdbarch);
980 if (pcreg < 0)
981 return;
982
983 /* We can only provide the PC register. */
984 if (regno >= 0 && regno != pcreg)
985 return;
986
987 insn = btrace_insn_get (replay);
988 gdb_assert (insn != NULL);
989
990 regcache_raw_supply (regcache, regno, &insn->pc);
991 }
992 else
993 {
994 struct target_ops *t;
995
996 for (t = ops->beneath; t != NULL; t = t->beneath)
997 if (t->to_fetch_registers != NULL)
998 {
999 t->to_fetch_registers (t, regcache, regno);
1000 break;
1001 }
1002 }
1003 }
1004
1005 /* The to_store_registers method of target record-btrace. */
1006
1007 static void
1008 record_btrace_store_registers (struct target_ops *ops,
1009 struct regcache *regcache, int regno)
1010 {
1011 struct target_ops *t;
1012
1013 if (record_btrace_is_replaying (ops))
1014 error (_("This record target does not allow writing registers."));
1015
1016 gdb_assert (may_write_registers != 0);
1017
1018 for (t = ops->beneath; t != NULL; t = t->beneath)
1019 if (t->to_store_registers != NULL)
1020 {
1021 t->to_store_registers (t, regcache, regno);
1022 return;
1023 }
1024
1025 noprocess ();
1026 }
1027
1028 /* The to_prepare_to_store method of target record-btrace. */
1029
1030 static void
1031 record_btrace_prepare_to_store (struct target_ops *ops,
1032 struct regcache *regcache)
1033 {
1034 struct target_ops *t;
1035
1036 if (record_btrace_is_replaying (ops))
1037 return;
1038
1039 for (t = ops->beneath; t != NULL; t = t->beneath)
1040 if (t->to_prepare_to_store != NULL)
1041 {
1042 t->to_prepare_to_store (t, regcache);
1043 return;
1044 }
1045 }
1046
1047 /* The branch trace frame cache. */
1048
1049 struct btrace_frame_cache
1050 {
1051 /* The thread. */
1052 struct thread_info *tp;
1053
1054 /* The frame info. */
1055 struct frame_info *frame;
1056
1057 /* The branch trace function segment. */
1058 const struct btrace_function *bfun;
1059 };
1060
1061 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1062
1063 static htab_t bfcache;
1064
1065 /* hash_f for htab_create_alloc of bfcache. */
1066
1067 static hashval_t
1068 bfcache_hash (const void *arg)
1069 {
1070 const struct btrace_frame_cache *cache = arg;
1071
1072 return htab_hash_pointer (cache->frame);
1073 }
1074
1075 /* eq_f for htab_create_alloc of bfcache. */
1076
1077 static int
1078 bfcache_eq (const void *arg1, const void *arg2)
1079 {
1080 const struct btrace_frame_cache *cache1 = arg1;
1081 const struct btrace_frame_cache *cache2 = arg2;
1082
1083 return cache1->frame == cache2->frame;
1084 }
1085
1086 /* Create a new btrace frame cache. */
1087
1088 static struct btrace_frame_cache *
1089 bfcache_new (struct frame_info *frame)
1090 {
1091 struct btrace_frame_cache *cache;
1092 void **slot;
1093
1094 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1095 cache->frame = frame;
1096
1097 slot = htab_find_slot (bfcache, cache, INSERT);
1098 gdb_assert (*slot == NULL);
1099 *slot = cache;
1100
1101 return cache;
1102 }
1103
1104 /* Extract the branch trace function from a branch trace frame. */
1105
1106 static const struct btrace_function *
1107 btrace_get_frame_function (struct frame_info *frame)
1108 {
1109 const struct btrace_frame_cache *cache;
1110 const struct btrace_function *bfun;
1111 struct btrace_frame_cache pattern;
1112 void **slot;
1113
1114 pattern.frame = frame;
1115
1116 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1117 if (slot == NULL)
1118 return NULL;
1119
1120 cache = *slot;
1121 return cache->bfun;
1122 }
1123
1124 /* Implement stop_reason method for record_btrace_frame_unwind. */
1125
1126 static enum unwind_stop_reason
1127 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1128 void **this_cache)
1129 {
1130 const struct btrace_frame_cache *cache;
1131 const struct btrace_function *bfun;
1132
1133 cache = *this_cache;
1134 bfun = cache->bfun;
1135 gdb_assert (bfun != NULL);
1136
1137 if (bfun->up == NULL)
1138 return UNWIND_UNAVAILABLE;
1139
1140 return UNWIND_NO_REASON;
1141 }
1142
1143 /* Implement this_id method for record_btrace_frame_unwind. */
1144
1145 static void
1146 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1147 struct frame_id *this_id)
1148 {
1149 const struct btrace_frame_cache *cache;
1150 const struct btrace_function *bfun;
1151 CORE_ADDR code, special;
1152
1153 cache = *this_cache;
1154
1155 bfun = cache->bfun;
1156 gdb_assert (bfun != NULL);
1157
1158 while (bfun->segment.prev != NULL)
1159 bfun = bfun->segment.prev;
1160
1161 code = get_frame_func (this_frame);
1162 special = bfun->number;
1163
1164 *this_id = frame_id_build_unavailable_stack_special (code, special);
1165
1166 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1167 btrace_get_bfun_name (cache->bfun),
1168 core_addr_to_string_nz (this_id->code_addr),
1169 core_addr_to_string_nz (this_id->special_addr));
1170 }
1171
1172 /* Implement prev_register method for record_btrace_frame_unwind. */
1173
1174 static struct value *
1175 record_btrace_frame_prev_register (struct frame_info *this_frame,
1176 void **this_cache,
1177 int regnum)
1178 {
1179 const struct btrace_frame_cache *cache;
1180 const struct btrace_function *bfun, *caller;
1181 const struct btrace_insn *insn;
1182 struct gdbarch *gdbarch;
1183 CORE_ADDR pc;
1184 int pcreg;
1185
1186 gdbarch = get_frame_arch (this_frame);
1187 pcreg = gdbarch_pc_regnum (gdbarch);
1188 if (pcreg < 0 || regnum != pcreg)
1189 throw_error (NOT_AVAILABLE_ERROR,
1190 _("Registers are not available in btrace record history"));
1191
1192 cache = *this_cache;
1193 bfun = cache->bfun;
1194 gdb_assert (bfun != NULL);
1195
1196 caller = bfun->up;
1197 if (caller == NULL)
1198 throw_error (NOT_AVAILABLE_ERROR,
1199 _("No caller in btrace record history"));
1200
1201 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1202 {
1203 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1204 pc = insn->pc;
1205 }
1206 else
1207 {
1208 insn = VEC_last (btrace_insn_s, caller->insn);
1209 pc = insn->pc;
1210
1211 pc += gdb_insn_length (gdbarch, pc);
1212 }
1213
1214 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1215 btrace_get_bfun_name (bfun), bfun->level,
1216 core_addr_to_string_nz (pc));
1217
1218 return frame_unwind_got_address (this_frame, regnum, pc);
1219 }
1220
1221 /* Implement sniffer method for record_btrace_frame_unwind. */
1222
1223 static int
1224 record_btrace_frame_sniffer (const struct frame_unwind *self,
1225 struct frame_info *this_frame,
1226 void **this_cache)
1227 {
1228 const struct btrace_function *bfun;
1229 struct btrace_frame_cache *cache;
1230 struct thread_info *tp;
1231 struct frame_info *next;
1232
1233 /* THIS_FRAME does not contain a reference to its thread. */
1234 tp = find_thread_ptid (inferior_ptid);
1235 gdb_assert (tp != NULL);
1236
1237 bfun = NULL;
1238 next = get_next_frame (this_frame);
1239 if (next == NULL)
1240 {
1241 const struct btrace_insn_iterator *replay;
1242
1243 replay = tp->btrace.replay;
1244 if (replay != NULL)
1245 bfun = replay->function;
1246 }
1247 else
1248 {
1249 const struct btrace_function *callee;
1250
1251 callee = btrace_get_frame_function (next);
1252 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1253 bfun = callee->up;
1254 }
1255
1256 if (bfun == NULL)
1257 return 0;
1258
1259 DEBUG ("[frame] sniffed frame for %s on level %d",
1260 btrace_get_bfun_name (bfun), bfun->level);
1261
1262 /* This is our frame. Initialize the frame cache. */
1263 cache = bfcache_new (this_frame);
1264 cache->tp = tp;
1265 cache->bfun = bfun;
1266
1267 *this_cache = cache;
1268 return 1;
1269 }
1270
1271 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1272
1273 static int
1274 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1275 struct frame_info *this_frame,
1276 void **this_cache)
1277 {
1278 const struct btrace_function *bfun, *callee;
1279 struct btrace_frame_cache *cache;
1280 struct frame_info *next;
1281
1282 next = get_next_frame (this_frame);
1283 if (next == NULL)
1284 return 0;
1285
1286 callee = btrace_get_frame_function (next);
1287 if (callee == NULL)
1288 return 0;
1289
1290 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1291 return 0;
1292
1293 bfun = callee->up;
1294 if (bfun == NULL)
1295 return 0;
1296
1297 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1298 btrace_get_bfun_name (bfun), bfun->level);
1299
1300 /* This is our frame. Initialize the frame cache. */
1301 cache = bfcache_new (this_frame);
1302 cache->tp = find_thread_ptid (inferior_ptid);
1303 cache->bfun = bfun;
1304
1305 *this_cache = cache;
1306 return 1;
1307 }
1308
1309 static void
1310 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1311 {
1312 struct btrace_frame_cache *cache;
1313 void **slot;
1314
1315 cache = this_cache;
1316
1317 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1318 gdb_assert (slot != NULL);
1319
1320 htab_remove_elt (bfcache, cache);
1321 }
1322
1323 /* btrace recording does not store previous memory content, neither the stack
1324 frames content. Any unwinding would return errorneous results as the stack
1325 contents no longer matches the changed PC value restored from history.
1326 Therefore this unwinder reports any possibly unwound registers as
1327 <unavailable>. */
1328
1329 const struct frame_unwind record_btrace_frame_unwind =
1330 {
1331 NORMAL_FRAME,
1332 record_btrace_frame_unwind_stop_reason,
1333 record_btrace_frame_this_id,
1334 record_btrace_frame_prev_register,
1335 NULL,
1336 record_btrace_frame_sniffer,
1337 record_btrace_frame_dealloc_cache
1338 };
1339
1340 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1341 {
1342 TAILCALL_FRAME,
1343 record_btrace_frame_unwind_stop_reason,
1344 record_btrace_frame_this_id,
1345 record_btrace_frame_prev_register,
1346 NULL,
1347 record_btrace_tailcall_frame_sniffer,
1348 record_btrace_frame_dealloc_cache
1349 };
1350
1351 /* Implement the to_get_unwinder method. */
1352
1353 static const struct frame_unwind *
1354 record_btrace_to_get_unwinder (struct target_ops *self)
1355 {
1356 return &record_btrace_frame_unwind;
1357 }
1358
1359 /* Implement the to_get_tailcall_unwinder method. */
1360
1361 static const struct frame_unwind *
1362 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1363 {
1364 return &record_btrace_tailcall_frame_unwind;
1365 }
1366
1367 /* Indicate that TP should be resumed according to FLAG. */
1368
1369 static void
1370 record_btrace_resume_thread (struct thread_info *tp,
1371 enum btrace_thread_flag flag)
1372 {
1373 struct btrace_thread_info *btinfo;
1374
1375 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1376
1377 btinfo = &tp->btrace;
1378
1379 if ((btinfo->flags & BTHR_MOVE) != 0)
1380 error (_("Thread already moving."));
1381
1382 /* Fetch the latest branch trace. */
1383 btrace_fetch (tp);
1384
1385 btinfo->flags |= flag;
1386 }
1387
1388 /* Find the thread to resume given a PTID. */
1389
1390 static struct thread_info *
1391 record_btrace_find_resume_thread (ptid_t ptid)
1392 {
1393 struct thread_info *tp;
1394
1395 /* When asked to resume everything, we pick the current thread. */
1396 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1397 ptid = inferior_ptid;
1398
1399 return find_thread_ptid (ptid);
1400 }
1401
1402 /* Start replaying a thread. */
1403
1404 static struct btrace_insn_iterator *
1405 record_btrace_start_replaying (struct thread_info *tp)
1406 {
1407 volatile struct gdb_exception except;
1408 struct btrace_insn_iterator *replay;
1409 struct btrace_thread_info *btinfo;
1410 int executing;
1411
1412 btinfo = &tp->btrace;
1413 replay = NULL;
1414
1415 /* We can't start replaying without trace. */
1416 if (btinfo->begin == NULL)
1417 return NULL;
1418
1419 /* Clear the executing flag to allow changes to the current frame.
1420 We are not actually running, yet. We just started a reverse execution
1421 command or a record goto command.
1422 For the latter, EXECUTING is false and this has no effect.
1423 For the former, EXECUTING is true and we're in to_wait, about to
1424 move the thread. Since we need to recompute the stack, we temporarily
1425 set EXECUTING to flase. */
1426 executing = is_executing (tp->ptid);
1427 set_executing (tp->ptid, 0);
1428
1429 /* GDB stores the current frame_id when stepping in order to detects steps
1430 into subroutines.
1431 Since frames are computed differently when we're replaying, we need to
1432 recompute those stored frames and fix them up so we can still detect
1433 subroutines after we started replaying. */
1434 TRY_CATCH (except, RETURN_MASK_ALL)
1435 {
1436 struct frame_info *frame;
1437 struct frame_id frame_id;
1438 int upd_step_frame_id, upd_step_stack_frame_id;
1439
1440 /* The current frame without replaying - computed via normal unwind. */
1441 frame = get_current_frame ();
1442 frame_id = get_frame_id (frame);
1443
1444 /* Check if we need to update any stepping-related frame id's. */
1445 upd_step_frame_id = frame_id_eq (frame_id,
1446 tp->control.step_frame_id);
1447 upd_step_stack_frame_id = frame_id_eq (frame_id,
1448 tp->control.step_stack_frame_id);
1449
1450 /* We start replaying at the end of the branch trace. This corresponds
1451 to the current instruction. */
1452 replay = xmalloc (sizeof (*replay));
1453 btrace_insn_end (replay, btinfo);
1454
1455 /* We're not replaying, yet. */
1456 gdb_assert (btinfo->replay == NULL);
1457 btinfo->replay = replay;
1458
1459 /* Make sure we're not using any stale registers. */
1460 registers_changed_ptid (tp->ptid);
1461
1462 /* The current frame with replaying - computed via btrace unwind. */
1463 frame = get_current_frame ();
1464 frame_id = get_frame_id (frame);
1465
1466 /* Replace stepping related frames where necessary. */
1467 if (upd_step_frame_id)
1468 tp->control.step_frame_id = frame_id;
1469 if (upd_step_stack_frame_id)
1470 tp->control.step_stack_frame_id = frame_id;
1471 }
1472
1473 /* Restore the previous execution state. */
1474 set_executing (tp->ptid, executing);
1475
1476 if (except.reason < 0)
1477 {
1478 xfree (btinfo->replay);
1479 btinfo->replay = NULL;
1480
1481 registers_changed_ptid (tp->ptid);
1482
1483 throw_exception (except);
1484 }
1485
1486 return replay;
1487 }
1488
1489 /* Stop replaying a thread. */
1490
1491 static void
1492 record_btrace_stop_replaying (struct thread_info *tp)
1493 {
1494 struct btrace_thread_info *btinfo;
1495
1496 btinfo = &tp->btrace;
1497
1498 xfree (btinfo->replay);
1499 btinfo->replay = NULL;
1500
1501 /* Make sure we're not leaving any stale registers. */
1502 registers_changed_ptid (tp->ptid);
1503 }
1504
1505 /* The to_resume method of target record-btrace. */
1506
1507 static void
1508 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1509 enum gdb_signal signal)
1510 {
1511 struct thread_info *tp, *other;
1512 enum btrace_thread_flag flag;
1513
1514 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1515
1516 /* Store the execution direction of the last resume. */
1517 record_btrace_resume_exec_dir = execution_direction;
1518
1519 tp = record_btrace_find_resume_thread (ptid);
1520 if (tp == NULL)
1521 error (_("Cannot find thread to resume."));
1522
1523 /* Stop replaying other threads if the thread to resume is not replaying. */
1524 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1525 ALL_NON_EXITED_THREADS (other)
1526 record_btrace_stop_replaying (other);
1527
1528 /* As long as we're not replaying, just forward the request. */
1529 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1530 {
1531 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1532 if (ops->to_resume != NULL)
1533 return ops->to_resume (ops, ptid, step, signal);
1534
1535 error (_("Cannot find target for stepping."));
1536 }
1537
1538 /* Compute the btrace thread flag for the requested move. */
1539 if (step == 0)
1540 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1541 else
1542 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1543
1544 /* At the moment, we only move a single thread. We could also move
1545 all threads in parallel by single-stepping each resumed thread
1546 until the first runs into an event.
1547 When we do that, we would want to continue all other threads.
1548 For now, just resume one thread to not confuse to_wait. */
1549 record_btrace_resume_thread (tp, flag);
1550
1551 /* We just indicate the resume intent here. The actual stepping happens in
1552 record_btrace_wait below. */
1553
1554 /* Async support. */
1555 if (target_can_async_p ())
1556 {
1557 target_async (inferior_event_handler, 0);
1558 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1559 }
1560 }
1561
1562 /* Find a thread to move. */
1563
1564 static struct thread_info *
1565 record_btrace_find_thread_to_move (ptid_t ptid)
1566 {
1567 struct thread_info *tp;
1568
1569 /* First check the parameter thread. */
1570 tp = find_thread_ptid (ptid);
1571 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1572 return tp;
1573
1574 /* Otherwise, find one other thread that has been resumed. */
1575 ALL_NON_EXITED_THREADS (tp)
1576 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1577 return tp;
1578
1579 return NULL;
1580 }
1581
1582 /* Return a target_waitstatus indicating that we ran out of history. */
1583
1584 static struct target_waitstatus
1585 btrace_step_no_history (void)
1586 {
1587 struct target_waitstatus status;
1588
1589 status.kind = TARGET_WAITKIND_NO_HISTORY;
1590
1591 return status;
1592 }
1593
1594 /* Return a target_waitstatus indicating that a step finished. */
1595
1596 static struct target_waitstatus
1597 btrace_step_stopped (void)
1598 {
1599 struct target_waitstatus status;
1600
1601 status.kind = TARGET_WAITKIND_STOPPED;
1602 status.value.sig = GDB_SIGNAL_TRAP;
1603
1604 return status;
1605 }
1606
1607 /* Clear the record histories. */
1608
1609 static void
1610 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1611 {
1612 xfree (btinfo->insn_history);
1613 xfree (btinfo->call_history);
1614
1615 btinfo->insn_history = NULL;
1616 btinfo->call_history = NULL;
1617 }
1618
1619 /* Step a single thread. */
1620
1621 static struct target_waitstatus
1622 record_btrace_step_thread (struct thread_info *tp)
1623 {
1624 struct btrace_insn_iterator *replay, end;
1625 struct btrace_thread_info *btinfo;
1626 struct address_space *aspace;
1627 struct inferior *inf;
1628 enum btrace_thread_flag flags;
1629 unsigned int steps;
1630
1631 /* We can't step without an execution history. */
1632 if (btrace_is_empty (tp))
1633 return btrace_step_no_history ();
1634
1635 btinfo = &tp->btrace;
1636 replay = btinfo->replay;
1637
1638 flags = btinfo->flags & BTHR_MOVE;
1639 btinfo->flags &= ~BTHR_MOVE;
1640
1641 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1642
1643 switch (flags)
1644 {
1645 default:
1646 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1647
1648 case BTHR_STEP:
1649 /* We're done if we're not replaying. */
1650 if (replay == NULL)
1651 return btrace_step_no_history ();
1652
1653 /* We are always able to step at least once. */
1654 steps = btrace_insn_next (replay, 1);
1655 gdb_assert (steps == 1);
1656
1657 /* Determine the end of the instruction trace. */
1658 btrace_insn_end (&end, btinfo);
1659
1660 /* We stop replaying if we reached the end of the trace. */
1661 if (btrace_insn_cmp (replay, &end) == 0)
1662 record_btrace_stop_replaying (tp);
1663
1664 return btrace_step_stopped ();
1665
1666 case BTHR_RSTEP:
1667 /* Start replaying if we're not already doing so. */
1668 if (replay == NULL)
1669 replay = record_btrace_start_replaying (tp);
1670
1671 /* If we can't step any further, we reached the end of the history. */
1672 steps = btrace_insn_prev (replay, 1);
1673 if (steps == 0)
1674 return btrace_step_no_history ();
1675
1676 return btrace_step_stopped ();
1677
1678 case BTHR_CONT:
1679 /* We're done if we're not replaying. */
1680 if (replay == NULL)
1681 return btrace_step_no_history ();
1682
1683 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1684 aspace = inf->aspace;
1685
1686 /* Determine the end of the instruction trace. */
1687 btrace_insn_end (&end, btinfo);
1688
1689 for (;;)
1690 {
1691 const struct btrace_insn *insn;
1692
1693 /* We are always able to step at least once. */
1694 steps = btrace_insn_next (replay, 1);
1695 gdb_assert (steps == 1);
1696
1697 /* We stop replaying if we reached the end of the trace. */
1698 if (btrace_insn_cmp (replay, &end) == 0)
1699 {
1700 record_btrace_stop_replaying (tp);
1701 return btrace_step_no_history ();
1702 }
1703
1704 insn = btrace_insn_get (replay);
1705 gdb_assert (insn);
1706
1707 DEBUG ("stepping %d (%s) ... %s", tp->num,
1708 target_pid_to_str (tp->ptid),
1709 core_addr_to_string_nz (insn->pc));
1710
1711 if (breakpoint_here_p (aspace, insn->pc))
1712 return btrace_step_stopped ();
1713 }
1714
1715 case BTHR_RCONT:
1716 /* Start replaying if we're not already doing so. */
1717 if (replay == NULL)
1718 replay = record_btrace_start_replaying (tp);
1719
1720 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1721 aspace = inf->aspace;
1722
1723 for (;;)
1724 {
1725 const struct btrace_insn *insn;
1726
1727 /* If we can't step any further, we're done. */
1728 steps = btrace_insn_prev (replay, 1);
1729 if (steps == 0)
1730 return btrace_step_no_history ();
1731
1732 insn = btrace_insn_get (replay);
1733 gdb_assert (insn);
1734
1735 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1736 target_pid_to_str (tp->ptid),
1737 core_addr_to_string_nz (insn->pc));
1738
1739 if (breakpoint_here_p (aspace, insn->pc))
1740 return btrace_step_stopped ();
1741 }
1742 }
1743 }
1744
1745 /* The to_wait method of target record-btrace. */
1746
1747 static ptid_t
1748 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1749 struct target_waitstatus *status, int options)
1750 {
1751 struct thread_info *tp, *other;
1752
1753 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1754
1755 /* As long as we're not replaying, just forward the request. */
1756 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1757 {
1758 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1759 if (ops->to_wait != NULL)
1760 return ops->to_wait (ops, ptid, status, options);
1761
1762 error (_("Cannot find target for waiting."));
1763 }
1764
1765 /* Let's find a thread to move. */
1766 tp = record_btrace_find_thread_to_move (ptid);
1767 if (tp == NULL)
1768 {
1769 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1770
1771 status->kind = TARGET_WAITKIND_IGNORE;
1772 return minus_one_ptid;
1773 }
1774
1775 /* We only move a single thread. We're not able to correlate threads. */
1776 *status = record_btrace_step_thread (tp);
1777
1778 /* Stop all other threads. */
1779 if (!non_stop)
1780 ALL_NON_EXITED_THREADS (other)
1781 other->btrace.flags &= ~BTHR_MOVE;
1782
1783 /* Start record histories anew from the current position. */
1784 record_btrace_clear_histories (&tp->btrace);
1785
1786 /* We moved the replay position but did not update registers. */
1787 registers_changed_ptid (tp->ptid);
1788
1789 return tp->ptid;
1790 }
1791
1792 /* The to_can_execute_reverse method of target record-btrace. */
1793
1794 static int
1795 record_btrace_can_execute_reverse (struct target_ops *self)
1796 {
1797 return 1;
1798 }
1799
1800 /* The to_decr_pc_after_break method of target record-btrace. */
1801
1802 static CORE_ADDR
1803 record_btrace_decr_pc_after_break (struct target_ops *ops,
1804 struct gdbarch *gdbarch)
1805 {
1806 /* When replaying, we do not actually execute the breakpoint instruction
1807 so there is no need to adjust the PC after hitting a breakpoint. */
1808 if (record_btrace_is_replaying (ops))
1809 return 0;
1810
1811 return ops->beneath->to_decr_pc_after_break (ops->beneath, gdbarch);
1812 }
1813
1814 /* The to_find_new_threads method of target record-btrace. */
1815
1816 static void
1817 record_btrace_find_new_threads (struct target_ops *ops)
1818 {
1819 /* Don't expect new threads if we're replaying. */
1820 if (record_btrace_is_replaying (ops))
1821 return;
1822
1823 /* Forward the request. */
1824 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1825 if (ops->to_find_new_threads != NULL)
1826 {
1827 ops->to_find_new_threads (ops);
1828 break;
1829 }
1830 }
1831
1832 /* The to_thread_alive method of target record-btrace. */
1833
1834 static int
1835 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1836 {
1837 /* We don't add or remove threads during replay. */
1838 if (record_btrace_is_replaying (ops))
1839 return find_thread_ptid (ptid) != NULL;
1840
1841 /* Forward the request. */
1842 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1843 if (ops->to_thread_alive != NULL)
1844 return ops->to_thread_alive (ops, ptid);
1845
1846 return 0;
1847 }
1848
1849 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
1850 is stopped. */
1851
1852 static void
1853 record_btrace_set_replay (struct thread_info *tp,
1854 const struct btrace_insn_iterator *it)
1855 {
1856 struct btrace_thread_info *btinfo;
1857
1858 btinfo = &tp->btrace;
1859
1860 if (it == NULL || it->function == NULL)
1861 record_btrace_stop_replaying (tp);
1862 else
1863 {
1864 if (btinfo->replay == NULL)
1865 record_btrace_start_replaying (tp);
1866 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
1867 return;
1868
1869 *btinfo->replay = *it;
1870 registers_changed_ptid (tp->ptid);
1871 }
1872
1873 /* Start anew from the new replay position. */
1874 record_btrace_clear_histories (btinfo);
1875 }
1876
1877 /* The to_goto_record_begin method of target record-btrace. */
1878
1879 static void
1880 record_btrace_goto_begin (struct target_ops *self)
1881 {
1882 struct thread_info *tp;
1883 struct btrace_insn_iterator begin;
1884
1885 tp = require_btrace_thread ();
1886
1887 btrace_insn_begin (&begin, &tp->btrace);
1888 record_btrace_set_replay (tp, &begin);
1889
1890 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1891 }
1892
1893 /* The to_goto_record_end method of target record-btrace. */
1894
1895 static void
1896 record_btrace_goto_end (struct target_ops *ops)
1897 {
1898 struct thread_info *tp;
1899
1900 tp = require_btrace_thread ();
1901
1902 record_btrace_set_replay (tp, NULL);
1903
1904 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1905 }
1906
1907 /* The to_goto_record method of target record-btrace. */
1908
1909 static void
1910 record_btrace_goto (struct target_ops *self, ULONGEST insn)
1911 {
1912 struct thread_info *tp;
1913 struct btrace_insn_iterator it;
1914 unsigned int number;
1915 int found;
1916
1917 number = insn;
1918
1919 /* Check for wrap-arounds. */
1920 if (number != insn)
1921 error (_("Instruction number out of range."));
1922
1923 tp = require_btrace_thread ();
1924
1925 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
1926 if (found == 0)
1927 error (_("No such instruction."));
1928
1929 record_btrace_set_replay (tp, &it);
1930
1931 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1932 }
1933
1934 /* The to_execution_direction target method. */
1935
1936 static enum exec_direction_kind
1937 record_btrace_execution_direction (struct target_ops *self)
1938 {
1939 return record_btrace_resume_exec_dir;
1940 }
1941
1942 /* Initialize the record-btrace target ops. */
1943
1944 static void
1945 init_record_btrace_ops (void)
1946 {
1947 struct target_ops *ops;
1948
1949 ops = &record_btrace_ops;
1950 ops->to_shortname = "record-btrace";
1951 ops->to_longname = "Branch tracing target";
1952 ops->to_doc = "Collect control-flow trace and provide the execution history.";
1953 ops->to_open = record_btrace_open;
1954 ops->to_close = record_btrace_close;
1955 ops->to_detach = record_detach;
1956 ops->to_disconnect = record_disconnect;
1957 ops->to_mourn_inferior = record_mourn_inferior;
1958 ops->to_kill = record_kill;
1959 ops->to_stop_recording = record_btrace_stop_recording;
1960 ops->to_info_record = record_btrace_info;
1961 ops->to_insn_history = record_btrace_insn_history;
1962 ops->to_insn_history_from = record_btrace_insn_history_from;
1963 ops->to_insn_history_range = record_btrace_insn_history_range;
1964 ops->to_call_history = record_btrace_call_history;
1965 ops->to_call_history_from = record_btrace_call_history_from;
1966 ops->to_call_history_range = record_btrace_call_history_range;
1967 ops->to_record_is_replaying = record_btrace_is_replaying;
1968 ops->to_xfer_partial = record_btrace_xfer_partial;
1969 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
1970 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1971 ops->to_fetch_registers = record_btrace_fetch_registers;
1972 ops->to_store_registers = record_btrace_store_registers;
1973 ops->to_prepare_to_store = record_btrace_prepare_to_store;
1974 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
1975 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
1976 ops->to_resume = record_btrace_resume;
1977 ops->to_wait = record_btrace_wait;
1978 ops->to_find_new_threads = record_btrace_find_new_threads;
1979 ops->to_thread_alive = record_btrace_thread_alive;
1980 ops->to_goto_record_begin = record_btrace_goto_begin;
1981 ops->to_goto_record_end = record_btrace_goto_end;
1982 ops->to_goto_record = record_btrace_goto;
1983 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
1984 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
1985 ops->to_execution_direction = record_btrace_execution_direction;
1986 ops->to_stratum = record_stratum;
1987 ops->to_magic = OPS_MAGIC;
1988 }
1989
1990 /* Alias for "target record". */
1991
1992 static void
1993 cmd_record_btrace_start (char *args, int from_tty)
1994 {
1995 if (args != NULL && *args != 0)
1996 error (_("Invalid argument."));
1997
1998 execute_command ("target record-btrace", from_tty);
1999 }
2000
2001 /* The "set record btrace" command. */
2002
2003 static void
2004 cmd_set_record_btrace (char *args, int from_tty)
2005 {
2006 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2007 }
2008
2009 /* The "show record btrace" command. */
2010
2011 static void
2012 cmd_show_record_btrace (char *args, int from_tty)
2013 {
2014 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2015 }
2016
2017 /* The "show record btrace replay-memory-access" command. */
2018
2019 static void
2020 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2021 struct cmd_list_element *c, const char *value)
2022 {
2023 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2024 replay_memory_access);
2025 }
2026
2027 void _initialize_record_btrace (void);
2028
2029 /* Initialize btrace commands. */
2030
2031 void
2032 _initialize_record_btrace (void)
2033 {
2034 add_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2035 _("Start branch trace recording."),
2036 &record_cmdlist);
2037 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2038
2039 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2040 _("Set record options"), &set_record_btrace_cmdlist,
2041 "set record btrace ", 0, &set_record_cmdlist);
2042
2043 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2044 _("Show record options"), &show_record_btrace_cmdlist,
2045 "show record btrace ", 0, &show_record_cmdlist);
2046
2047 add_setshow_enum_cmd ("replay-memory-access", no_class,
2048 replay_memory_access_types, &replay_memory_access, _("\
2049 Set what memory accesses are allowed during replay."), _("\
2050 Show what memory accesses are allowed during replay."),
2051 _("Default is READ-ONLY.\n\n\
2052 The btrace record target does not trace data.\n\
2053 The memory therefore corresponds to the live target and not \
2054 to the current replay position.\n\n\
2055 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2056 When READ-WRITE, allow accesses to read-only and read-write memory during \
2057 replay."),
2058 NULL, cmd_show_replay_memory_access,
2059 &set_record_btrace_cmdlist,
2060 &show_record_btrace_cmdlist);
2061
2062 init_record_btrace_ops ();
2063 add_target (&record_btrace_ops);
2064
2065 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2066 xcalloc, xfree);
2067 }
This page took 0.091147 seconds and 4 git commands to generate.