constify to_open
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "gdbthread.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "disasm.h"
28 #include "observer.h"
29 #include "exceptions.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "event-loop.h"
40 #include "inf-loop.h"
41
42 /* The target_ops of record-btrace. */
43 static struct target_ops record_btrace_ops;
44
45 /* A new thread observer enabling branch tracing for the new thread. */
46 static struct observer *record_btrace_thread_observer;
47
48 /* Memory access types used in set/show record btrace replay-memory-access. */
49 static const char replay_memory_access_read_only[] = "read-only";
50 static const char replay_memory_access_read_write[] = "read-write";
51 static const char *const replay_memory_access_types[] =
52 {
53 replay_memory_access_read_only,
54 replay_memory_access_read_write,
55 NULL
56 };
57
58 /* The currently allowed replay memory access type. */
59 static const char *replay_memory_access = replay_memory_access_read_only;
60
61 /* Command lists for "set/show record btrace". */
62 static struct cmd_list_element *set_record_btrace_cmdlist;
63 static struct cmd_list_element *show_record_btrace_cmdlist;
64
65 /* The execution direction of the last resume we got. See record-full.c. */
66 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
67
68 /* The async event handler for reverse/replay execution. */
69 static struct async_event_handler *record_btrace_async_inferior_event_handler;
70
71 /* A flag indicating that we are currently generating a core file. */
72 static int record_btrace_generating_corefile;
73
74 /* Print a record-btrace debug message. Use do ... while (0) to avoid
75 ambiguities when used in if statements. */
76
77 #define DEBUG(msg, args...) \
78 do \
79 { \
80 if (record_debug != 0) \
81 fprintf_unfiltered (gdb_stdlog, \
82 "[record-btrace] " msg "\n", ##args); \
83 } \
84 while (0)
85
86
87 /* Update the branch trace for the current thread and return a pointer to its
88 thread_info.
89
90 Throws an error if there is no thread or no trace. This function never
91 returns NULL. */
92
93 static struct thread_info *
94 require_btrace_thread (void)
95 {
96 struct thread_info *tp;
97
98 DEBUG ("require");
99
100 tp = find_thread_ptid (inferior_ptid);
101 if (tp == NULL)
102 error (_("No thread."));
103
104 btrace_fetch (tp);
105
106 if (btrace_is_empty (tp))
107 error (_("No trace."));
108
109 return tp;
110 }
111
112 /* Update the branch trace for the current thread and return a pointer to its
113 branch trace information struct.
114
115 Throws an error if there is no thread or no trace. This function never
116 returns NULL. */
117
118 static struct btrace_thread_info *
119 require_btrace (void)
120 {
121 struct thread_info *tp;
122
123 tp = require_btrace_thread ();
124
125 return &tp->btrace;
126 }
127
128 /* Enable branch tracing for one thread. Warn on errors. */
129
130 static void
131 record_btrace_enable_warn (struct thread_info *tp)
132 {
133 volatile struct gdb_exception error;
134
135 TRY_CATCH (error, RETURN_MASK_ERROR)
136 btrace_enable (tp);
137
138 if (error.message != NULL)
139 warning ("%s", error.message);
140 }
141
142 /* Callback function to disable branch tracing for one thread. */
143
144 static void
145 record_btrace_disable_callback (void *arg)
146 {
147 struct thread_info *tp;
148
149 tp = arg;
150
151 btrace_disable (tp);
152 }
153
154 /* Enable automatic tracing of new threads. */
155
156 static void
157 record_btrace_auto_enable (void)
158 {
159 DEBUG ("attach thread observer");
160
161 record_btrace_thread_observer
162 = observer_attach_new_thread (record_btrace_enable_warn);
163 }
164
165 /* Disable automatic tracing of new threads. */
166
167 static void
168 record_btrace_auto_disable (void)
169 {
170 /* The observer may have been detached, already. */
171 if (record_btrace_thread_observer == NULL)
172 return;
173
174 DEBUG ("detach thread observer");
175
176 observer_detach_new_thread (record_btrace_thread_observer);
177 record_btrace_thread_observer = NULL;
178 }
179
180 /* The record-btrace async event handler function. */
181
182 static void
183 record_btrace_handle_async_inferior_event (gdb_client_data data)
184 {
185 inferior_event_handler (INF_REG_EVENT, NULL);
186 }
187
188 /* The to_open method of target record-btrace. */
189
190 static void
191 record_btrace_open (const char *args, int from_tty)
192 {
193 struct cleanup *disable_chain;
194 struct thread_info *tp;
195
196 DEBUG ("open");
197
198 record_preopen ();
199
200 if (!target_has_execution)
201 error (_("The program is not being run."));
202
203 if (!target_supports_btrace ())
204 error (_("Target does not support branch tracing."));
205
206 if (non_stop)
207 error (_("Record btrace can't debug inferior in non-stop mode."));
208
209 gdb_assert (record_btrace_thread_observer == NULL);
210
211 disable_chain = make_cleanup (null_cleanup, NULL);
212 ALL_NON_EXITED_THREADS (tp)
213 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
214 {
215 btrace_enable (tp);
216
217 make_cleanup (record_btrace_disable_callback, tp);
218 }
219
220 record_btrace_auto_enable ();
221
222 push_target (&record_btrace_ops);
223
224 record_btrace_async_inferior_event_handler
225 = create_async_event_handler (record_btrace_handle_async_inferior_event,
226 NULL);
227 record_btrace_generating_corefile = 0;
228
229 observer_notify_record_changed (current_inferior (), 1);
230
231 discard_cleanups (disable_chain);
232 }
233
234 /* The to_stop_recording method of target record-btrace. */
235
236 static void
237 record_btrace_stop_recording (struct target_ops *self)
238 {
239 struct thread_info *tp;
240
241 DEBUG ("stop recording");
242
243 record_btrace_auto_disable ();
244
245 ALL_NON_EXITED_THREADS (tp)
246 if (tp->btrace.target != NULL)
247 btrace_disable (tp);
248 }
249
250 /* The to_close method of target record-btrace. */
251
252 static void
253 record_btrace_close (struct target_ops *self)
254 {
255 struct thread_info *tp;
256
257 if (record_btrace_async_inferior_event_handler != NULL)
258 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
259
260 /* Make sure automatic recording gets disabled even if we did not stop
261 recording before closing the record-btrace target. */
262 record_btrace_auto_disable ();
263
264 /* We should have already stopped recording.
265 Tear down btrace in case we have not. */
266 ALL_NON_EXITED_THREADS (tp)
267 btrace_teardown (tp);
268 }
269
270 /* The to_info_record method of target record-btrace. */
271
272 static void
273 record_btrace_info (struct target_ops *self)
274 {
275 struct btrace_thread_info *btinfo;
276 struct thread_info *tp;
277 unsigned int insns, calls;
278
279 DEBUG ("info");
280
281 tp = find_thread_ptid (inferior_ptid);
282 if (tp == NULL)
283 error (_("No thread."));
284
285 btrace_fetch (tp);
286
287 insns = 0;
288 calls = 0;
289
290 btinfo = &tp->btrace;
291
292 if (!btrace_is_empty (tp))
293 {
294 struct btrace_call_iterator call;
295 struct btrace_insn_iterator insn;
296
297 btrace_call_end (&call, btinfo);
298 btrace_call_prev (&call, 1);
299 calls = btrace_call_number (&call);
300
301 btrace_insn_end (&insn, btinfo);
302 btrace_insn_prev (&insn, 1);
303 insns = btrace_insn_number (&insn);
304 }
305
306 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
307 "%d (%s).\n"), insns, calls, tp->num,
308 target_pid_to_str (tp->ptid));
309
310 if (btrace_is_replaying (tp))
311 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
312 btrace_insn_number (btinfo->replay));
313 }
314
315 /* Print an unsigned int. */
316
317 static void
318 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
319 {
320 ui_out_field_fmt (uiout, fld, "%u", val);
321 }
322
323 /* Disassemble a section of the recorded instruction trace. */
324
325 static void
326 btrace_insn_history (struct ui_out *uiout,
327 const struct btrace_insn_iterator *begin,
328 const struct btrace_insn_iterator *end, int flags)
329 {
330 struct gdbarch *gdbarch;
331 struct btrace_insn_iterator it;
332
333 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
334 btrace_insn_number (end));
335
336 gdbarch = target_gdbarch ();
337
338 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
339 {
340 const struct btrace_insn *insn;
341
342 insn = btrace_insn_get (&it);
343
344 /* Print the instruction index. */
345 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
346 ui_out_text (uiout, "\t");
347
348 /* Disassembly with '/m' flag may not produce the expected result.
349 See PR gdb/11833. */
350 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
351 }
352 }
353
354 /* The to_insn_history method of target record-btrace. */
355
356 static void
357 record_btrace_insn_history (struct target_ops *self, int size, int flags)
358 {
359 struct btrace_thread_info *btinfo;
360 struct btrace_insn_history *history;
361 struct btrace_insn_iterator begin, end;
362 struct cleanup *uiout_cleanup;
363 struct ui_out *uiout;
364 unsigned int context, covered;
365
366 uiout = current_uiout;
367 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
368 "insn history");
369 context = abs (size);
370 if (context == 0)
371 error (_("Bad record instruction-history-size."));
372
373 btinfo = require_btrace ();
374 history = btinfo->insn_history;
375 if (history == NULL)
376 {
377 struct btrace_insn_iterator *replay;
378
379 DEBUG ("insn-history (0x%x): %d", flags, size);
380
381 /* If we're replaying, we start at the replay position. Otherwise, we
382 start at the tail of the trace. */
383 replay = btinfo->replay;
384 if (replay != NULL)
385 begin = *replay;
386 else
387 btrace_insn_end (&begin, btinfo);
388
389 /* We start from here and expand in the requested direction. Then we
390 expand in the other direction, as well, to fill up any remaining
391 context. */
392 end = begin;
393 if (size < 0)
394 {
395 /* We want the current position covered, as well. */
396 covered = btrace_insn_next (&end, 1);
397 covered += btrace_insn_prev (&begin, context - covered);
398 covered += btrace_insn_next (&end, context - covered);
399 }
400 else
401 {
402 covered = btrace_insn_next (&end, context);
403 covered += btrace_insn_prev (&begin, context - covered);
404 }
405 }
406 else
407 {
408 begin = history->begin;
409 end = history->end;
410
411 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
412 btrace_insn_number (&begin), btrace_insn_number (&end));
413
414 if (size < 0)
415 {
416 end = begin;
417 covered = btrace_insn_prev (&begin, context);
418 }
419 else
420 {
421 begin = end;
422 covered = btrace_insn_next (&end, context);
423 }
424 }
425
426 if (covered > 0)
427 btrace_insn_history (uiout, &begin, &end, flags);
428 else
429 {
430 if (size < 0)
431 printf_unfiltered (_("At the start of the branch trace record.\n"));
432 else
433 printf_unfiltered (_("At the end of the branch trace record.\n"));
434 }
435
436 btrace_set_insn_history (btinfo, &begin, &end);
437 do_cleanups (uiout_cleanup);
438 }
439
440 /* The to_insn_history_range method of target record-btrace. */
441
442 static void
443 record_btrace_insn_history_range (struct target_ops *self,
444 ULONGEST from, ULONGEST to, int flags)
445 {
446 struct btrace_thread_info *btinfo;
447 struct btrace_insn_history *history;
448 struct btrace_insn_iterator begin, end;
449 struct cleanup *uiout_cleanup;
450 struct ui_out *uiout;
451 unsigned int low, high;
452 int found;
453
454 uiout = current_uiout;
455 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
456 "insn history");
457 low = from;
458 high = to;
459
460 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
461
462 /* Check for wrap-arounds. */
463 if (low != from || high != to)
464 error (_("Bad range."));
465
466 if (high < low)
467 error (_("Bad range."));
468
469 btinfo = require_btrace ();
470
471 found = btrace_find_insn_by_number (&begin, btinfo, low);
472 if (found == 0)
473 error (_("Range out of bounds."));
474
475 found = btrace_find_insn_by_number (&end, btinfo, high);
476 if (found == 0)
477 {
478 /* Silently truncate the range. */
479 btrace_insn_end (&end, btinfo);
480 }
481 else
482 {
483 /* We want both begin and end to be inclusive. */
484 btrace_insn_next (&end, 1);
485 }
486
487 btrace_insn_history (uiout, &begin, &end, flags);
488 btrace_set_insn_history (btinfo, &begin, &end);
489
490 do_cleanups (uiout_cleanup);
491 }
492
493 /* The to_insn_history_from method of target record-btrace. */
494
495 static void
496 record_btrace_insn_history_from (struct target_ops *self,
497 ULONGEST from, int size, int flags)
498 {
499 ULONGEST begin, end, context;
500
501 context = abs (size);
502 if (context == 0)
503 error (_("Bad record instruction-history-size."));
504
505 if (size < 0)
506 {
507 end = from;
508
509 if (from < context)
510 begin = 0;
511 else
512 begin = from - context + 1;
513 }
514 else
515 {
516 begin = from;
517 end = from + context - 1;
518
519 /* Check for wrap-around. */
520 if (end < begin)
521 end = ULONGEST_MAX;
522 }
523
524 record_btrace_insn_history_range (self, begin, end, flags);
525 }
526
527 /* Print the instruction number range for a function call history line. */
528
529 static void
530 btrace_call_history_insn_range (struct ui_out *uiout,
531 const struct btrace_function *bfun)
532 {
533 unsigned int begin, end, size;
534
535 size = VEC_length (btrace_insn_s, bfun->insn);
536 gdb_assert (size > 0);
537
538 begin = bfun->insn_offset;
539 end = begin + size - 1;
540
541 ui_out_field_uint (uiout, "insn begin", begin);
542 ui_out_text (uiout, ",");
543 ui_out_field_uint (uiout, "insn end", end);
544 }
545
546 /* Print the source line information for a function call history line. */
547
548 static void
549 btrace_call_history_src_line (struct ui_out *uiout,
550 const struct btrace_function *bfun)
551 {
552 struct symbol *sym;
553 int begin, end;
554
555 sym = bfun->sym;
556 if (sym == NULL)
557 return;
558
559 ui_out_field_string (uiout, "file",
560 symtab_to_filename_for_display (sym->symtab));
561
562 begin = bfun->lbegin;
563 end = bfun->lend;
564
565 if (end < begin)
566 return;
567
568 ui_out_text (uiout, ":");
569 ui_out_field_int (uiout, "min line", begin);
570
571 if (end == begin)
572 return;
573
574 ui_out_text (uiout, ",");
575 ui_out_field_int (uiout, "max line", end);
576 }
577
578 /* Get the name of a branch trace function. */
579
580 static const char *
581 btrace_get_bfun_name (const struct btrace_function *bfun)
582 {
583 struct minimal_symbol *msym;
584 struct symbol *sym;
585
586 if (bfun == NULL)
587 return "??";
588
589 msym = bfun->msym;
590 sym = bfun->sym;
591
592 if (sym != NULL)
593 return SYMBOL_PRINT_NAME (sym);
594 else if (msym != NULL)
595 return MSYMBOL_PRINT_NAME (msym);
596 else
597 return "??";
598 }
599
600 /* Disassemble a section of the recorded function trace. */
601
602 static void
603 btrace_call_history (struct ui_out *uiout,
604 const struct btrace_thread_info *btinfo,
605 const struct btrace_call_iterator *begin,
606 const struct btrace_call_iterator *end,
607 enum record_print_flag flags)
608 {
609 struct btrace_call_iterator it;
610
611 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
612 btrace_call_number (end));
613
614 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
615 {
616 const struct btrace_function *bfun;
617 struct minimal_symbol *msym;
618 struct symbol *sym;
619
620 bfun = btrace_call_get (&it);
621 sym = bfun->sym;
622 msym = bfun->msym;
623
624 /* Print the function index. */
625 ui_out_field_uint (uiout, "index", bfun->number);
626 ui_out_text (uiout, "\t");
627
628 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
629 {
630 int level = bfun->level + btinfo->level, i;
631
632 for (i = 0; i < level; ++i)
633 ui_out_text (uiout, " ");
634 }
635
636 if (sym != NULL)
637 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
638 else if (msym != NULL)
639 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
640 else if (!ui_out_is_mi_like_p (uiout))
641 ui_out_field_string (uiout, "function", "??");
642
643 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
644 {
645 ui_out_text (uiout, _("\tinst "));
646 btrace_call_history_insn_range (uiout, bfun);
647 }
648
649 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
650 {
651 ui_out_text (uiout, _("\tat "));
652 btrace_call_history_src_line (uiout, bfun);
653 }
654
655 ui_out_text (uiout, "\n");
656 }
657 }
658
659 /* The to_call_history method of target record-btrace. */
660
661 static void
662 record_btrace_call_history (struct target_ops *self, int size, int flags)
663 {
664 struct btrace_thread_info *btinfo;
665 struct btrace_call_history *history;
666 struct btrace_call_iterator begin, end;
667 struct cleanup *uiout_cleanup;
668 struct ui_out *uiout;
669 unsigned int context, covered;
670
671 uiout = current_uiout;
672 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
673 "insn history");
674 context = abs (size);
675 if (context == 0)
676 error (_("Bad record function-call-history-size."));
677
678 btinfo = require_btrace ();
679 history = btinfo->call_history;
680 if (history == NULL)
681 {
682 struct btrace_insn_iterator *replay;
683
684 DEBUG ("call-history (0x%x): %d", flags, size);
685
686 /* If we're replaying, we start at the replay position. Otherwise, we
687 start at the tail of the trace. */
688 replay = btinfo->replay;
689 if (replay != NULL)
690 {
691 begin.function = replay->function;
692 begin.btinfo = btinfo;
693 }
694 else
695 btrace_call_end (&begin, btinfo);
696
697 /* We start from here and expand in the requested direction. Then we
698 expand in the other direction, as well, to fill up any remaining
699 context. */
700 end = begin;
701 if (size < 0)
702 {
703 /* We want the current position covered, as well. */
704 covered = btrace_call_next (&end, 1);
705 covered += btrace_call_prev (&begin, context - covered);
706 covered += btrace_call_next (&end, context - covered);
707 }
708 else
709 {
710 covered = btrace_call_next (&end, context);
711 covered += btrace_call_prev (&begin, context- covered);
712 }
713 }
714 else
715 {
716 begin = history->begin;
717 end = history->end;
718
719 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
720 btrace_call_number (&begin), btrace_call_number (&end));
721
722 if (size < 0)
723 {
724 end = begin;
725 covered = btrace_call_prev (&begin, context);
726 }
727 else
728 {
729 begin = end;
730 covered = btrace_call_next (&end, context);
731 }
732 }
733
734 if (covered > 0)
735 btrace_call_history (uiout, btinfo, &begin, &end, flags);
736 else
737 {
738 if (size < 0)
739 printf_unfiltered (_("At the start of the branch trace record.\n"));
740 else
741 printf_unfiltered (_("At the end of the branch trace record.\n"));
742 }
743
744 btrace_set_call_history (btinfo, &begin, &end);
745 do_cleanups (uiout_cleanup);
746 }
747
748 /* The to_call_history_range method of target record-btrace. */
749
750 static void
751 record_btrace_call_history_range (struct target_ops *self,
752 ULONGEST from, ULONGEST to, int flags)
753 {
754 struct btrace_thread_info *btinfo;
755 struct btrace_call_history *history;
756 struct btrace_call_iterator begin, end;
757 struct cleanup *uiout_cleanup;
758 struct ui_out *uiout;
759 unsigned int low, high;
760 int found;
761
762 uiout = current_uiout;
763 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
764 "func history");
765 low = from;
766 high = to;
767
768 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
769
770 /* Check for wrap-arounds. */
771 if (low != from || high != to)
772 error (_("Bad range."));
773
774 if (high < low)
775 error (_("Bad range."));
776
777 btinfo = require_btrace ();
778
779 found = btrace_find_call_by_number (&begin, btinfo, low);
780 if (found == 0)
781 error (_("Range out of bounds."));
782
783 found = btrace_find_call_by_number (&end, btinfo, high);
784 if (found == 0)
785 {
786 /* Silently truncate the range. */
787 btrace_call_end (&end, btinfo);
788 }
789 else
790 {
791 /* We want both begin and end to be inclusive. */
792 btrace_call_next (&end, 1);
793 }
794
795 btrace_call_history (uiout, btinfo, &begin, &end, flags);
796 btrace_set_call_history (btinfo, &begin, &end);
797
798 do_cleanups (uiout_cleanup);
799 }
800
801 /* The to_call_history_from method of target record-btrace. */
802
803 static void
804 record_btrace_call_history_from (struct target_ops *self,
805 ULONGEST from, int size, int flags)
806 {
807 ULONGEST begin, end, context;
808
809 context = abs (size);
810 if (context == 0)
811 error (_("Bad record function-call-history-size."));
812
813 if (size < 0)
814 {
815 end = from;
816
817 if (from < context)
818 begin = 0;
819 else
820 begin = from - context + 1;
821 }
822 else
823 {
824 begin = from;
825 end = from + context - 1;
826
827 /* Check for wrap-around. */
828 if (end < begin)
829 end = ULONGEST_MAX;
830 }
831
832 record_btrace_call_history_range (self, begin, end, flags);
833 }
834
835 /* The to_record_is_replaying method of target record-btrace. */
836
837 static int
838 record_btrace_is_replaying (struct target_ops *self)
839 {
840 struct thread_info *tp;
841
842 ALL_NON_EXITED_THREADS (tp)
843 if (btrace_is_replaying (tp))
844 return 1;
845
846 return 0;
847 }
848
849 /* The to_xfer_partial method of target record-btrace. */
850
851 static enum target_xfer_status
852 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
853 const char *annex, gdb_byte *readbuf,
854 const gdb_byte *writebuf, ULONGEST offset,
855 ULONGEST len, ULONGEST *xfered_len)
856 {
857 struct target_ops *t;
858
859 /* Filter out requests that don't make sense during replay. */
860 if (replay_memory_access == replay_memory_access_read_only
861 && !record_btrace_generating_corefile
862 && record_btrace_is_replaying (ops))
863 {
864 switch (object)
865 {
866 case TARGET_OBJECT_MEMORY:
867 {
868 struct target_section *section;
869
870 /* We do not allow writing memory in general. */
871 if (writebuf != NULL)
872 {
873 *xfered_len = len;
874 return TARGET_XFER_UNAVAILABLE;
875 }
876
877 /* We allow reading readonly memory. */
878 section = target_section_by_addr (ops, offset);
879 if (section != NULL)
880 {
881 /* Check if the section we found is readonly. */
882 if ((bfd_get_section_flags (section->the_bfd_section->owner,
883 section->the_bfd_section)
884 & SEC_READONLY) != 0)
885 {
886 /* Truncate the request to fit into this section. */
887 len = min (len, section->endaddr - offset);
888 break;
889 }
890 }
891
892 *xfered_len = len;
893 return TARGET_XFER_UNAVAILABLE;
894 }
895 }
896 }
897
898 /* Forward the request. */
899 ops = ops->beneath;
900 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
901 offset, len, xfered_len);
902 }
903
904 /* The to_insert_breakpoint method of target record-btrace. */
905
906 static int
907 record_btrace_insert_breakpoint (struct target_ops *ops,
908 struct gdbarch *gdbarch,
909 struct bp_target_info *bp_tgt)
910 {
911 volatile struct gdb_exception except;
912 const char *old;
913 int ret;
914
915 /* Inserting breakpoints requires accessing memory. Allow it for the
916 duration of this function. */
917 old = replay_memory_access;
918 replay_memory_access = replay_memory_access_read_write;
919
920 ret = 0;
921 TRY_CATCH (except, RETURN_MASK_ALL)
922 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
923
924 replay_memory_access = old;
925
926 if (except.reason < 0)
927 throw_exception (except);
928
929 return ret;
930 }
931
932 /* The to_remove_breakpoint method of target record-btrace. */
933
934 static int
935 record_btrace_remove_breakpoint (struct target_ops *ops,
936 struct gdbarch *gdbarch,
937 struct bp_target_info *bp_tgt)
938 {
939 volatile struct gdb_exception except;
940 const char *old;
941 int ret;
942
943 /* Removing breakpoints requires accessing memory. Allow it for the
944 duration of this function. */
945 old = replay_memory_access;
946 replay_memory_access = replay_memory_access_read_write;
947
948 ret = 0;
949 TRY_CATCH (except, RETURN_MASK_ALL)
950 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
951
952 replay_memory_access = old;
953
954 if (except.reason < 0)
955 throw_exception (except);
956
957 return ret;
958 }
959
960 /* The to_fetch_registers method of target record-btrace. */
961
962 static void
963 record_btrace_fetch_registers (struct target_ops *ops,
964 struct regcache *regcache, int regno)
965 {
966 struct btrace_insn_iterator *replay;
967 struct thread_info *tp;
968
969 tp = find_thread_ptid (inferior_ptid);
970 gdb_assert (tp != NULL);
971
972 replay = tp->btrace.replay;
973 if (replay != NULL && !record_btrace_generating_corefile)
974 {
975 const struct btrace_insn *insn;
976 struct gdbarch *gdbarch;
977 int pcreg;
978
979 gdbarch = get_regcache_arch (regcache);
980 pcreg = gdbarch_pc_regnum (gdbarch);
981 if (pcreg < 0)
982 return;
983
984 /* We can only provide the PC register. */
985 if (regno >= 0 && regno != pcreg)
986 return;
987
988 insn = btrace_insn_get (replay);
989 gdb_assert (insn != NULL);
990
991 regcache_raw_supply (regcache, regno, &insn->pc);
992 }
993 else
994 {
995 struct target_ops *t = ops->beneath;
996
997 t->to_fetch_registers (t, regcache, regno);
998 }
999 }
1000
1001 /* The to_store_registers method of target record-btrace. */
1002
1003 static void
1004 record_btrace_store_registers (struct target_ops *ops,
1005 struct regcache *regcache, int regno)
1006 {
1007 struct target_ops *t;
1008
1009 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1010 error (_("This record target does not allow writing registers."));
1011
1012 gdb_assert (may_write_registers != 0);
1013
1014 t = ops->beneath;
1015 t->to_store_registers (t, regcache, regno);
1016 }
1017
1018 /* The to_prepare_to_store method of target record-btrace. */
1019
1020 static void
1021 record_btrace_prepare_to_store (struct target_ops *ops,
1022 struct regcache *regcache)
1023 {
1024 struct target_ops *t;
1025
1026 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1027 return;
1028
1029 t = ops->beneath;
1030 t->to_prepare_to_store (t, regcache);
1031 }
1032
1033 /* The branch trace frame cache. */
1034
1035 struct btrace_frame_cache
1036 {
1037 /* The thread. */
1038 struct thread_info *tp;
1039
1040 /* The frame info. */
1041 struct frame_info *frame;
1042
1043 /* The branch trace function segment. */
1044 const struct btrace_function *bfun;
1045 };
1046
1047 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1048
1049 static htab_t bfcache;
1050
1051 /* hash_f for htab_create_alloc of bfcache. */
1052
1053 static hashval_t
1054 bfcache_hash (const void *arg)
1055 {
1056 const struct btrace_frame_cache *cache = arg;
1057
1058 return htab_hash_pointer (cache->frame);
1059 }
1060
1061 /* eq_f for htab_create_alloc of bfcache. */
1062
1063 static int
1064 bfcache_eq (const void *arg1, const void *arg2)
1065 {
1066 const struct btrace_frame_cache *cache1 = arg1;
1067 const struct btrace_frame_cache *cache2 = arg2;
1068
1069 return cache1->frame == cache2->frame;
1070 }
1071
1072 /* Create a new btrace frame cache. */
1073
1074 static struct btrace_frame_cache *
1075 bfcache_new (struct frame_info *frame)
1076 {
1077 struct btrace_frame_cache *cache;
1078 void **slot;
1079
1080 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1081 cache->frame = frame;
1082
1083 slot = htab_find_slot (bfcache, cache, INSERT);
1084 gdb_assert (*slot == NULL);
1085 *slot = cache;
1086
1087 return cache;
1088 }
1089
1090 /* Extract the branch trace function from a branch trace frame. */
1091
1092 static const struct btrace_function *
1093 btrace_get_frame_function (struct frame_info *frame)
1094 {
1095 const struct btrace_frame_cache *cache;
1096 const struct btrace_function *bfun;
1097 struct btrace_frame_cache pattern;
1098 void **slot;
1099
1100 pattern.frame = frame;
1101
1102 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1103 if (slot == NULL)
1104 return NULL;
1105
1106 cache = *slot;
1107 return cache->bfun;
1108 }
1109
1110 /* Implement stop_reason method for record_btrace_frame_unwind. */
1111
1112 static enum unwind_stop_reason
1113 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1114 void **this_cache)
1115 {
1116 const struct btrace_frame_cache *cache;
1117 const struct btrace_function *bfun;
1118
1119 cache = *this_cache;
1120 bfun = cache->bfun;
1121 gdb_assert (bfun != NULL);
1122
1123 if (bfun->up == NULL)
1124 return UNWIND_UNAVAILABLE;
1125
1126 return UNWIND_NO_REASON;
1127 }
1128
1129 /* Implement this_id method for record_btrace_frame_unwind. */
1130
1131 static void
1132 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1133 struct frame_id *this_id)
1134 {
1135 const struct btrace_frame_cache *cache;
1136 const struct btrace_function *bfun;
1137 CORE_ADDR code, special;
1138
1139 cache = *this_cache;
1140
1141 bfun = cache->bfun;
1142 gdb_assert (bfun != NULL);
1143
1144 while (bfun->segment.prev != NULL)
1145 bfun = bfun->segment.prev;
1146
1147 code = get_frame_func (this_frame);
1148 special = bfun->number;
1149
1150 *this_id = frame_id_build_unavailable_stack_special (code, special);
1151
1152 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1153 btrace_get_bfun_name (cache->bfun),
1154 core_addr_to_string_nz (this_id->code_addr),
1155 core_addr_to_string_nz (this_id->special_addr));
1156 }
1157
1158 /* Implement prev_register method for record_btrace_frame_unwind. */
1159
1160 static struct value *
1161 record_btrace_frame_prev_register (struct frame_info *this_frame,
1162 void **this_cache,
1163 int regnum)
1164 {
1165 const struct btrace_frame_cache *cache;
1166 const struct btrace_function *bfun, *caller;
1167 const struct btrace_insn *insn;
1168 struct gdbarch *gdbarch;
1169 CORE_ADDR pc;
1170 int pcreg;
1171
1172 gdbarch = get_frame_arch (this_frame);
1173 pcreg = gdbarch_pc_regnum (gdbarch);
1174 if (pcreg < 0 || regnum != pcreg)
1175 throw_error (NOT_AVAILABLE_ERROR,
1176 _("Registers are not available in btrace record history"));
1177
1178 cache = *this_cache;
1179 bfun = cache->bfun;
1180 gdb_assert (bfun != NULL);
1181
1182 caller = bfun->up;
1183 if (caller == NULL)
1184 throw_error (NOT_AVAILABLE_ERROR,
1185 _("No caller in btrace record history"));
1186
1187 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1188 {
1189 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1190 pc = insn->pc;
1191 }
1192 else
1193 {
1194 insn = VEC_last (btrace_insn_s, caller->insn);
1195 pc = insn->pc;
1196
1197 pc += gdb_insn_length (gdbarch, pc);
1198 }
1199
1200 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1201 btrace_get_bfun_name (bfun), bfun->level,
1202 core_addr_to_string_nz (pc));
1203
1204 return frame_unwind_got_address (this_frame, regnum, pc);
1205 }
1206
1207 /* Implement sniffer method for record_btrace_frame_unwind. */
1208
1209 static int
1210 record_btrace_frame_sniffer (const struct frame_unwind *self,
1211 struct frame_info *this_frame,
1212 void **this_cache)
1213 {
1214 const struct btrace_function *bfun;
1215 struct btrace_frame_cache *cache;
1216 struct thread_info *tp;
1217 struct frame_info *next;
1218
1219 /* THIS_FRAME does not contain a reference to its thread. */
1220 tp = find_thread_ptid (inferior_ptid);
1221 gdb_assert (tp != NULL);
1222
1223 bfun = NULL;
1224 next = get_next_frame (this_frame);
1225 if (next == NULL)
1226 {
1227 const struct btrace_insn_iterator *replay;
1228
1229 replay = tp->btrace.replay;
1230 if (replay != NULL)
1231 bfun = replay->function;
1232 }
1233 else
1234 {
1235 const struct btrace_function *callee;
1236
1237 callee = btrace_get_frame_function (next);
1238 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1239 bfun = callee->up;
1240 }
1241
1242 if (bfun == NULL)
1243 return 0;
1244
1245 DEBUG ("[frame] sniffed frame for %s on level %d",
1246 btrace_get_bfun_name (bfun), bfun->level);
1247
1248 /* This is our frame. Initialize the frame cache. */
1249 cache = bfcache_new (this_frame);
1250 cache->tp = tp;
1251 cache->bfun = bfun;
1252
1253 *this_cache = cache;
1254 return 1;
1255 }
1256
1257 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1258
1259 static int
1260 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1261 struct frame_info *this_frame,
1262 void **this_cache)
1263 {
1264 const struct btrace_function *bfun, *callee;
1265 struct btrace_frame_cache *cache;
1266 struct frame_info *next;
1267
1268 next = get_next_frame (this_frame);
1269 if (next == NULL)
1270 return 0;
1271
1272 callee = btrace_get_frame_function (next);
1273 if (callee == NULL)
1274 return 0;
1275
1276 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1277 return 0;
1278
1279 bfun = callee->up;
1280 if (bfun == NULL)
1281 return 0;
1282
1283 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1284 btrace_get_bfun_name (bfun), bfun->level);
1285
1286 /* This is our frame. Initialize the frame cache. */
1287 cache = bfcache_new (this_frame);
1288 cache->tp = find_thread_ptid (inferior_ptid);
1289 cache->bfun = bfun;
1290
1291 *this_cache = cache;
1292 return 1;
1293 }
1294
1295 static void
1296 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1297 {
1298 struct btrace_frame_cache *cache;
1299 void **slot;
1300
1301 cache = this_cache;
1302
1303 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1304 gdb_assert (slot != NULL);
1305
1306 htab_remove_elt (bfcache, cache);
1307 }
1308
1309 /* btrace recording does not store previous memory content, neither the stack
1310 frames content. Any unwinding would return errorneous results as the stack
1311 contents no longer matches the changed PC value restored from history.
1312 Therefore this unwinder reports any possibly unwound registers as
1313 <unavailable>. */
1314
1315 const struct frame_unwind record_btrace_frame_unwind =
1316 {
1317 NORMAL_FRAME,
1318 record_btrace_frame_unwind_stop_reason,
1319 record_btrace_frame_this_id,
1320 record_btrace_frame_prev_register,
1321 NULL,
1322 record_btrace_frame_sniffer,
1323 record_btrace_frame_dealloc_cache
1324 };
1325
1326 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1327 {
1328 TAILCALL_FRAME,
1329 record_btrace_frame_unwind_stop_reason,
1330 record_btrace_frame_this_id,
1331 record_btrace_frame_prev_register,
1332 NULL,
1333 record_btrace_tailcall_frame_sniffer,
1334 record_btrace_frame_dealloc_cache
1335 };
1336
1337 /* Implement the to_get_unwinder method. */
1338
1339 static const struct frame_unwind *
1340 record_btrace_to_get_unwinder (struct target_ops *self)
1341 {
1342 return &record_btrace_frame_unwind;
1343 }
1344
1345 /* Implement the to_get_tailcall_unwinder method. */
1346
1347 static const struct frame_unwind *
1348 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1349 {
1350 return &record_btrace_tailcall_frame_unwind;
1351 }
1352
1353 /* Indicate that TP should be resumed according to FLAG. */
1354
1355 static void
1356 record_btrace_resume_thread (struct thread_info *tp,
1357 enum btrace_thread_flag flag)
1358 {
1359 struct btrace_thread_info *btinfo;
1360
1361 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1362
1363 btinfo = &tp->btrace;
1364
1365 if ((btinfo->flags & BTHR_MOVE) != 0)
1366 error (_("Thread already moving."));
1367
1368 /* Fetch the latest branch trace. */
1369 btrace_fetch (tp);
1370
1371 btinfo->flags |= flag;
1372 }
1373
1374 /* Find the thread to resume given a PTID. */
1375
1376 static struct thread_info *
1377 record_btrace_find_resume_thread (ptid_t ptid)
1378 {
1379 struct thread_info *tp;
1380
1381 /* When asked to resume everything, we pick the current thread. */
1382 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1383 ptid = inferior_ptid;
1384
1385 return find_thread_ptid (ptid);
1386 }
1387
1388 /* Start replaying a thread. */
1389
1390 static struct btrace_insn_iterator *
1391 record_btrace_start_replaying (struct thread_info *tp)
1392 {
1393 volatile struct gdb_exception except;
1394 struct btrace_insn_iterator *replay;
1395 struct btrace_thread_info *btinfo;
1396 int executing;
1397
1398 btinfo = &tp->btrace;
1399 replay = NULL;
1400
1401 /* We can't start replaying without trace. */
1402 if (btinfo->begin == NULL)
1403 return NULL;
1404
1405 /* Clear the executing flag to allow changes to the current frame.
1406 We are not actually running, yet. We just started a reverse execution
1407 command or a record goto command.
1408 For the latter, EXECUTING is false and this has no effect.
1409 For the former, EXECUTING is true and we're in to_wait, about to
1410 move the thread. Since we need to recompute the stack, we temporarily
1411 set EXECUTING to flase. */
1412 executing = is_executing (tp->ptid);
1413 set_executing (tp->ptid, 0);
1414
1415 /* GDB stores the current frame_id when stepping in order to detects steps
1416 into subroutines.
1417 Since frames are computed differently when we're replaying, we need to
1418 recompute those stored frames and fix them up so we can still detect
1419 subroutines after we started replaying. */
1420 TRY_CATCH (except, RETURN_MASK_ALL)
1421 {
1422 struct frame_info *frame;
1423 struct frame_id frame_id;
1424 int upd_step_frame_id, upd_step_stack_frame_id;
1425
1426 /* The current frame without replaying - computed via normal unwind. */
1427 frame = get_current_frame ();
1428 frame_id = get_frame_id (frame);
1429
1430 /* Check if we need to update any stepping-related frame id's. */
1431 upd_step_frame_id = frame_id_eq (frame_id,
1432 tp->control.step_frame_id);
1433 upd_step_stack_frame_id = frame_id_eq (frame_id,
1434 tp->control.step_stack_frame_id);
1435
1436 /* We start replaying at the end of the branch trace. This corresponds
1437 to the current instruction. */
1438 replay = xmalloc (sizeof (*replay));
1439 btrace_insn_end (replay, btinfo);
1440
1441 /* We're not replaying, yet. */
1442 gdb_assert (btinfo->replay == NULL);
1443 btinfo->replay = replay;
1444
1445 /* Make sure we're not using any stale registers. */
1446 registers_changed_ptid (tp->ptid);
1447
1448 /* The current frame with replaying - computed via btrace unwind. */
1449 frame = get_current_frame ();
1450 frame_id = get_frame_id (frame);
1451
1452 /* Replace stepping related frames where necessary. */
1453 if (upd_step_frame_id)
1454 tp->control.step_frame_id = frame_id;
1455 if (upd_step_stack_frame_id)
1456 tp->control.step_stack_frame_id = frame_id;
1457 }
1458
1459 /* Restore the previous execution state. */
1460 set_executing (tp->ptid, executing);
1461
1462 if (except.reason < 0)
1463 {
1464 xfree (btinfo->replay);
1465 btinfo->replay = NULL;
1466
1467 registers_changed_ptid (tp->ptid);
1468
1469 throw_exception (except);
1470 }
1471
1472 return replay;
1473 }
1474
1475 /* Stop replaying a thread. */
1476
1477 static void
1478 record_btrace_stop_replaying (struct thread_info *tp)
1479 {
1480 struct btrace_thread_info *btinfo;
1481
1482 btinfo = &tp->btrace;
1483
1484 xfree (btinfo->replay);
1485 btinfo->replay = NULL;
1486
1487 /* Make sure we're not leaving any stale registers. */
1488 registers_changed_ptid (tp->ptid);
1489 }
1490
1491 /* The to_resume method of target record-btrace. */
1492
1493 static void
1494 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1495 enum gdb_signal signal)
1496 {
1497 struct thread_info *tp, *other;
1498 enum btrace_thread_flag flag;
1499
1500 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1501
1502 /* Store the execution direction of the last resume. */
1503 record_btrace_resume_exec_dir = execution_direction;
1504
1505 tp = record_btrace_find_resume_thread (ptid);
1506 if (tp == NULL)
1507 error (_("Cannot find thread to resume."));
1508
1509 /* Stop replaying other threads if the thread to resume is not replaying. */
1510 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1511 ALL_NON_EXITED_THREADS (other)
1512 record_btrace_stop_replaying (other);
1513
1514 /* As long as we're not replaying, just forward the request. */
1515 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1516 {
1517 ops = ops->beneath;
1518 return ops->to_resume (ops, ptid, step, signal);
1519 }
1520
1521 /* Compute the btrace thread flag for the requested move. */
1522 if (step == 0)
1523 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1524 else
1525 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1526
1527 /* At the moment, we only move a single thread. We could also move
1528 all threads in parallel by single-stepping each resumed thread
1529 until the first runs into an event.
1530 When we do that, we would want to continue all other threads.
1531 For now, just resume one thread to not confuse to_wait. */
1532 record_btrace_resume_thread (tp, flag);
1533
1534 /* We just indicate the resume intent here. The actual stepping happens in
1535 record_btrace_wait below. */
1536
1537 /* Async support. */
1538 if (target_can_async_p ())
1539 {
1540 target_async (inferior_event_handler, 0);
1541 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1542 }
1543 }
1544
1545 /* Find a thread to move. */
1546
1547 static struct thread_info *
1548 record_btrace_find_thread_to_move (ptid_t ptid)
1549 {
1550 struct thread_info *tp;
1551
1552 /* First check the parameter thread. */
1553 tp = find_thread_ptid (ptid);
1554 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1555 return tp;
1556
1557 /* Otherwise, find one other thread that has been resumed. */
1558 ALL_NON_EXITED_THREADS (tp)
1559 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1560 return tp;
1561
1562 return NULL;
1563 }
1564
1565 /* Return a target_waitstatus indicating that we ran out of history. */
1566
1567 static struct target_waitstatus
1568 btrace_step_no_history (void)
1569 {
1570 struct target_waitstatus status;
1571
1572 status.kind = TARGET_WAITKIND_NO_HISTORY;
1573
1574 return status;
1575 }
1576
1577 /* Return a target_waitstatus indicating that a step finished. */
1578
1579 static struct target_waitstatus
1580 btrace_step_stopped (void)
1581 {
1582 struct target_waitstatus status;
1583
1584 status.kind = TARGET_WAITKIND_STOPPED;
1585 status.value.sig = GDB_SIGNAL_TRAP;
1586
1587 return status;
1588 }
1589
1590 /* Clear the record histories. */
1591
1592 static void
1593 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1594 {
1595 xfree (btinfo->insn_history);
1596 xfree (btinfo->call_history);
1597
1598 btinfo->insn_history = NULL;
1599 btinfo->call_history = NULL;
1600 }
1601
1602 /* Step a single thread. */
1603
1604 static struct target_waitstatus
1605 record_btrace_step_thread (struct thread_info *tp)
1606 {
1607 struct btrace_insn_iterator *replay, end;
1608 struct btrace_thread_info *btinfo;
1609 struct address_space *aspace;
1610 struct inferior *inf;
1611 enum btrace_thread_flag flags;
1612 unsigned int steps;
1613
1614 /* We can't step without an execution history. */
1615 if (btrace_is_empty (tp))
1616 return btrace_step_no_history ();
1617
1618 btinfo = &tp->btrace;
1619 replay = btinfo->replay;
1620
1621 flags = btinfo->flags & BTHR_MOVE;
1622 btinfo->flags &= ~BTHR_MOVE;
1623
1624 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1625
1626 switch (flags)
1627 {
1628 default:
1629 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1630
1631 case BTHR_STEP:
1632 /* We're done if we're not replaying. */
1633 if (replay == NULL)
1634 return btrace_step_no_history ();
1635
1636 /* We are always able to step at least once. */
1637 steps = btrace_insn_next (replay, 1);
1638 gdb_assert (steps == 1);
1639
1640 /* Determine the end of the instruction trace. */
1641 btrace_insn_end (&end, btinfo);
1642
1643 /* We stop replaying if we reached the end of the trace. */
1644 if (btrace_insn_cmp (replay, &end) == 0)
1645 record_btrace_stop_replaying (tp);
1646
1647 return btrace_step_stopped ();
1648
1649 case BTHR_RSTEP:
1650 /* Start replaying if we're not already doing so. */
1651 if (replay == NULL)
1652 replay = record_btrace_start_replaying (tp);
1653
1654 /* If we can't step any further, we reached the end of the history. */
1655 steps = btrace_insn_prev (replay, 1);
1656 if (steps == 0)
1657 return btrace_step_no_history ();
1658
1659 return btrace_step_stopped ();
1660
1661 case BTHR_CONT:
1662 /* We're done if we're not replaying. */
1663 if (replay == NULL)
1664 return btrace_step_no_history ();
1665
1666 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1667 aspace = inf->aspace;
1668
1669 /* Determine the end of the instruction trace. */
1670 btrace_insn_end (&end, btinfo);
1671
1672 for (;;)
1673 {
1674 const struct btrace_insn *insn;
1675
1676 /* We are always able to step at least once. */
1677 steps = btrace_insn_next (replay, 1);
1678 gdb_assert (steps == 1);
1679
1680 /* We stop replaying if we reached the end of the trace. */
1681 if (btrace_insn_cmp (replay, &end) == 0)
1682 {
1683 record_btrace_stop_replaying (tp);
1684 return btrace_step_no_history ();
1685 }
1686
1687 insn = btrace_insn_get (replay);
1688 gdb_assert (insn);
1689
1690 DEBUG ("stepping %d (%s) ... %s", tp->num,
1691 target_pid_to_str (tp->ptid),
1692 core_addr_to_string_nz (insn->pc));
1693
1694 if (breakpoint_here_p (aspace, insn->pc))
1695 return btrace_step_stopped ();
1696 }
1697
1698 case BTHR_RCONT:
1699 /* Start replaying if we're not already doing so. */
1700 if (replay == NULL)
1701 replay = record_btrace_start_replaying (tp);
1702
1703 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1704 aspace = inf->aspace;
1705
1706 for (;;)
1707 {
1708 const struct btrace_insn *insn;
1709
1710 /* If we can't step any further, we're done. */
1711 steps = btrace_insn_prev (replay, 1);
1712 if (steps == 0)
1713 return btrace_step_no_history ();
1714
1715 insn = btrace_insn_get (replay);
1716 gdb_assert (insn);
1717
1718 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1719 target_pid_to_str (tp->ptid),
1720 core_addr_to_string_nz (insn->pc));
1721
1722 if (breakpoint_here_p (aspace, insn->pc))
1723 return btrace_step_stopped ();
1724 }
1725 }
1726 }
1727
1728 /* The to_wait method of target record-btrace. */
1729
1730 static ptid_t
1731 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1732 struct target_waitstatus *status, int options)
1733 {
1734 struct thread_info *tp, *other;
1735
1736 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1737
1738 /* As long as we're not replaying, just forward the request. */
1739 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1740 {
1741 ops = ops->beneath;
1742 return ops->to_wait (ops, ptid, status, options);
1743 }
1744
1745 /* Let's find a thread to move. */
1746 tp = record_btrace_find_thread_to_move (ptid);
1747 if (tp == NULL)
1748 {
1749 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1750
1751 status->kind = TARGET_WAITKIND_IGNORE;
1752 return minus_one_ptid;
1753 }
1754
1755 /* We only move a single thread. We're not able to correlate threads. */
1756 *status = record_btrace_step_thread (tp);
1757
1758 /* Stop all other threads. */
1759 if (!non_stop)
1760 ALL_NON_EXITED_THREADS (other)
1761 other->btrace.flags &= ~BTHR_MOVE;
1762
1763 /* Start record histories anew from the current position. */
1764 record_btrace_clear_histories (&tp->btrace);
1765
1766 /* We moved the replay position but did not update registers. */
1767 registers_changed_ptid (tp->ptid);
1768
1769 return tp->ptid;
1770 }
1771
1772 /* The to_can_execute_reverse method of target record-btrace. */
1773
1774 static int
1775 record_btrace_can_execute_reverse (struct target_ops *self)
1776 {
1777 return 1;
1778 }
1779
1780 /* The to_decr_pc_after_break method of target record-btrace. */
1781
1782 static CORE_ADDR
1783 record_btrace_decr_pc_after_break (struct target_ops *ops,
1784 struct gdbarch *gdbarch)
1785 {
1786 /* When replaying, we do not actually execute the breakpoint instruction
1787 so there is no need to adjust the PC after hitting a breakpoint. */
1788 if (record_btrace_is_replaying (ops))
1789 return 0;
1790
1791 return ops->beneath->to_decr_pc_after_break (ops->beneath, gdbarch);
1792 }
1793
1794 /* The to_find_new_threads method of target record-btrace. */
1795
1796 static void
1797 record_btrace_find_new_threads (struct target_ops *ops)
1798 {
1799 /* Don't expect new threads if we're replaying. */
1800 if (record_btrace_is_replaying (ops))
1801 return;
1802
1803 /* Forward the request. */
1804 ops = ops->beneath;
1805 ops->to_find_new_threads (ops);
1806 }
1807
1808 /* The to_thread_alive method of target record-btrace. */
1809
1810 static int
1811 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1812 {
1813 /* We don't add or remove threads during replay. */
1814 if (record_btrace_is_replaying (ops))
1815 return find_thread_ptid (ptid) != NULL;
1816
1817 /* Forward the request. */
1818 ops = ops->beneath;
1819 return ops->to_thread_alive (ops, ptid);
1820 }
1821
1822 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
1823 is stopped. */
1824
1825 static void
1826 record_btrace_set_replay (struct thread_info *tp,
1827 const struct btrace_insn_iterator *it)
1828 {
1829 struct btrace_thread_info *btinfo;
1830
1831 btinfo = &tp->btrace;
1832
1833 if (it == NULL || it->function == NULL)
1834 record_btrace_stop_replaying (tp);
1835 else
1836 {
1837 if (btinfo->replay == NULL)
1838 record_btrace_start_replaying (tp);
1839 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
1840 return;
1841
1842 *btinfo->replay = *it;
1843 registers_changed_ptid (tp->ptid);
1844 }
1845
1846 /* Start anew from the new replay position. */
1847 record_btrace_clear_histories (btinfo);
1848 }
1849
1850 /* The to_goto_record_begin method of target record-btrace. */
1851
1852 static void
1853 record_btrace_goto_begin (struct target_ops *self)
1854 {
1855 struct thread_info *tp;
1856 struct btrace_insn_iterator begin;
1857
1858 tp = require_btrace_thread ();
1859
1860 btrace_insn_begin (&begin, &tp->btrace);
1861 record_btrace_set_replay (tp, &begin);
1862
1863 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1864 }
1865
1866 /* The to_goto_record_end method of target record-btrace. */
1867
1868 static void
1869 record_btrace_goto_end (struct target_ops *ops)
1870 {
1871 struct thread_info *tp;
1872
1873 tp = require_btrace_thread ();
1874
1875 record_btrace_set_replay (tp, NULL);
1876
1877 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1878 }
1879
1880 /* The to_goto_record method of target record-btrace. */
1881
1882 static void
1883 record_btrace_goto (struct target_ops *self, ULONGEST insn)
1884 {
1885 struct thread_info *tp;
1886 struct btrace_insn_iterator it;
1887 unsigned int number;
1888 int found;
1889
1890 number = insn;
1891
1892 /* Check for wrap-arounds. */
1893 if (number != insn)
1894 error (_("Instruction number out of range."));
1895
1896 tp = require_btrace_thread ();
1897
1898 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
1899 if (found == 0)
1900 error (_("No such instruction."));
1901
1902 record_btrace_set_replay (tp, &it);
1903
1904 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1905 }
1906
1907 /* The to_execution_direction target method. */
1908
1909 static enum exec_direction_kind
1910 record_btrace_execution_direction (struct target_ops *self)
1911 {
1912 return record_btrace_resume_exec_dir;
1913 }
1914
1915 /* The to_prepare_to_generate_core target method. */
1916
1917 static void
1918 record_btrace_prepare_to_generate_core (struct target_ops *self)
1919 {
1920 record_btrace_generating_corefile = 1;
1921 }
1922
1923 /* The to_done_generating_core target method. */
1924
1925 static void
1926 record_btrace_done_generating_core (struct target_ops *self)
1927 {
1928 record_btrace_generating_corefile = 0;
1929 }
1930
1931 /* Initialize the record-btrace target ops. */
1932
1933 static void
1934 init_record_btrace_ops (void)
1935 {
1936 struct target_ops *ops;
1937
1938 ops = &record_btrace_ops;
1939 ops->to_shortname = "record-btrace";
1940 ops->to_longname = "Branch tracing target";
1941 ops->to_doc = "Collect control-flow trace and provide the execution history.";
1942 ops->to_open = record_btrace_open;
1943 ops->to_close = record_btrace_close;
1944 ops->to_detach = record_detach;
1945 ops->to_disconnect = record_disconnect;
1946 ops->to_mourn_inferior = record_mourn_inferior;
1947 ops->to_kill = record_kill;
1948 ops->to_stop_recording = record_btrace_stop_recording;
1949 ops->to_info_record = record_btrace_info;
1950 ops->to_insn_history = record_btrace_insn_history;
1951 ops->to_insn_history_from = record_btrace_insn_history_from;
1952 ops->to_insn_history_range = record_btrace_insn_history_range;
1953 ops->to_call_history = record_btrace_call_history;
1954 ops->to_call_history_from = record_btrace_call_history_from;
1955 ops->to_call_history_range = record_btrace_call_history_range;
1956 ops->to_record_is_replaying = record_btrace_is_replaying;
1957 ops->to_xfer_partial = record_btrace_xfer_partial;
1958 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
1959 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1960 ops->to_fetch_registers = record_btrace_fetch_registers;
1961 ops->to_store_registers = record_btrace_store_registers;
1962 ops->to_prepare_to_store = record_btrace_prepare_to_store;
1963 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
1964 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
1965 ops->to_resume = record_btrace_resume;
1966 ops->to_wait = record_btrace_wait;
1967 ops->to_find_new_threads = record_btrace_find_new_threads;
1968 ops->to_thread_alive = record_btrace_thread_alive;
1969 ops->to_goto_record_begin = record_btrace_goto_begin;
1970 ops->to_goto_record_end = record_btrace_goto_end;
1971 ops->to_goto_record = record_btrace_goto;
1972 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
1973 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
1974 ops->to_execution_direction = record_btrace_execution_direction;
1975 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
1976 ops->to_done_generating_core = record_btrace_done_generating_core;
1977 ops->to_stratum = record_stratum;
1978 ops->to_magic = OPS_MAGIC;
1979 }
1980
1981 /* Alias for "target record". */
1982
1983 static void
1984 cmd_record_btrace_start (char *args, int from_tty)
1985 {
1986 if (args != NULL && *args != 0)
1987 error (_("Invalid argument."));
1988
1989 execute_command ("target record-btrace", from_tty);
1990 }
1991
1992 /* The "set record btrace" command. */
1993
1994 static void
1995 cmd_set_record_btrace (char *args, int from_tty)
1996 {
1997 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
1998 }
1999
2000 /* The "show record btrace" command. */
2001
2002 static void
2003 cmd_show_record_btrace (char *args, int from_tty)
2004 {
2005 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2006 }
2007
2008 /* The "show record btrace replay-memory-access" command. */
2009
2010 static void
2011 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2012 struct cmd_list_element *c, const char *value)
2013 {
2014 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2015 replay_memory_access);
2016 }
2017
2018 void _initialize_record_btrace (void);
2019
2020 /* Initialize btrace commands. */
2021
2022 void
2023 _initialize_record_btrace (void)
2024 {
2025 add_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2026 _("Start branch trace recording."),
2027 &record_cmdlist);
2028 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2029
2030 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2031 _("Set record options"), &set_record_btrace_cmdlist,
2032 "set record btrace ", 0, &set_record_cmdlist);
2033
2034 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2035 _("Show record options"), &show_record_btrace_cmdlist,
2036 "show record btrace ", 0, &show_record_cmdlist);
2037
2038 add_setshow_enum_cmd ("replay-memory-access", no_class,
2039 replay_memory_access_types, &replay_memory_access, _("\
2040 Set what memory accesses are allowed during replay."), _("\
2041 Show what memory accesses are allowed during replay."),
2042 _("Default is READ-ONLY.\n\n\
2043 The btrace record target does not trace data.\n\
2044 The memory therefore corresponds to the live target and not \
2045 to the current replay position.\n\n\
2046 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2047 When READ-WRITE, allow accesses to read-only and read-write memory during \
2048 replay."),
2049 NULL, cmd_show_replay_memory_access,
2050 &set_record_btrace_cmdlist,
2051 &show_record_btrace_cmdlist);
2052
2053 init_record_btrace_ops ();
2054 add_target (&record_btrace_ops);
2055
2056 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2057 xcalloc, xfree);
2058 }
This page took 0.07269 seconds and 4 git commands to generate.