btrace: async
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "gdbthread.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "disasm.h"
28 #include "observer.h"
29 #include "cli/cli-utils.h"
30 #include "source.h"
31 #include "ui-out.h"
32 #include "symtab.h"
33 #include "filenames.h"
34 #include "regcache.h"
35 #include "frame-unwind.h"
36 #include "hashtab.h"
37 #include "infrun.h"
38 #include "event-loop.h"
39 #include "inf-loop.h"
40 #include "vec.h"
41
42 /* The target_ops of record-btrace. */
43 static struct target_ops record_btrace_ops;
44
45 /* A new thread observer enabling branch tracing for the new thread. */
46 static struct observer *record_btrace_thread_observer;
47
48 /* Memory access types used in set/show record btrace replay-memory-access. */
49 static const char replay_memory_access_read_only[] = "read-only";
50 static const char replay_memory_access_read_write[] = "read-write";
51 static const char *const replay_memory_access_types[] =
52 {
53 replay_memory_access_read_only,
54 replay_memory_access_read_write,
55 NULL
56 };
57
58 /* The currently allowed replay memory access type. */
59 static const char *replay_memory_access = replay_memory_access_read_only;
60
61 /* Command lists for "set/show record btrace". */
62 static struct cmd_list_element *set_record_btrace_cmdlist;
63 static struct cmd_list_element *show_record_btrace_cmdlist;
64
65 /* The execution direction of the last resume we got. See record-full.c. */
66 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
67
68 /* The async event handler for reverse/replay execution. */
69 static struct async_event_handler *record_btrace_async_inferior_event_handler;
70
71 /* A flag indicating that we are currently generating a core file. */
72 static int record_btrace_generating_corefile;
73
74 /* The current branch trace configuration. */
75 static struct btrace_config record_btrace_conf;
76
77 /* Command list for "record btrace". */
78 static struct cmd_list_element *record_btrace_cmdlist;
79
80 /* Command lists for "set/show record btrace bts". */
81 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
82 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
83
84 /* Command lists for "set/show record btrace pt". */
85 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
86 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
87
88 /* Print a record-btrace debug message. Use do ... while (0) to avoid
89 ambiguities when used in if statements. */
90
91 #define DEBUG(msg, args...) \
92 do \
93 { \
94 if (record_debug != 0) \
95 fprintf_unfiltered (gdb_stdlog, \
96 "[record-btrace] " msg "\n", ##args); \
97 } \
98 while (0)
99
100
101 /* Update the branch trace for the current thread and return a pointer to its
102 thread_info.
103
104 Throws an error if there is no thread or no trace. This function never
105 returns NULL. */
106
107 static struct thread_info *
108 require_btrace_thread (void)
109 {
110 struct thread_info *tp;
111
112 DEBUG ("require");
113
114 tp = find_thread_ptid (inferior_ptid);
115 if (tp == NULL)
116 error (_("No thread."));
117
118 btrace_fetch (tp);
119
120 if (btrace_is_empty (tp))
121 error (_("No trace."));
122
123 return tp;
124 }
125
126 /* Update the branch trace for the current thread and return a pointer to its
127 branch trace information struct.
128
129 Throws an error if there is no thread or no trace. This function never
130 returns NULL. */
131
132 static struct btrace_thread_info *
133 require_btrace (void)
134 {
135 struct thread_info *tp;
136
137 tp = require_btrace_thread ();
138
139 return &tp->btrace;
140 }
141
142 /* Enable branch tracing for one thread. Warn on errors. */
143
144 static void
145 record_btrace_enable_warn (struct thread_info *tp)
146 {
147 TRY
148 {
149 btrace_enable (tp, &record_btrace_conf);
150 }
151 CATCH (error, RETURN_MASK_ERROR)
152 {
153 warning ("%s", error.message);
154 }
155 END_CATCH
156 }
157
158 /* Callback function to disable branch tracing for one thread. */
159
160 static void
161 record_btrace_disable_callback (void *arg)
162 {
163 struct thread_info *tp;
164
165 tp = arg;
166
167 btrace_disable (tp);
168 }
169
170 /* Enable automatic tracing of new threads. */
171
172 static void
173 record_btrace_auto_enable (void)
174 {
175 DEBUG ("attach thread observer");
176
177 record_btrace_thread_observer
178 = observer_attach_new_thread (record_btrace_enable_warn);
179 }
180
181 /* Disable automatic tracing of new threads. */
182
183 static void
184 record_btrace_auto_disable (void)
185 {
186 /* The observer may have been detached, already. */
187 if (record_btrace_thread_observer == NULL)
188 return;
189
190 DEBUG ("detach thread observer");
191
192 observer_detach_new_thread (record_btrace_thread_observer);
193 record_btrace_thread_observer = NULL;
194 }
195
196 /* The record-btrace async event handler function. */
197
198 static void
199 record_btrace_handle_async_inferior_event (gdb_client_data data)
200 {
201 inferior_event_handler (INF_REG_EVENT, NULL);
202 }
203
204 /* The to_open method of target record-btrace. */
205
206 static void
207 record_btrace_open (const char *args, int from_tty)
208 {
209 struct cleanup *disable_chain;
210 struct thread_info *tp;
211
212 DEBUG ("open");
213
214 record_preopen ();
215
216 if (!target_has_execution)
217 error (_("The program is not being run."));
218
219 if (non_stop)
220 error (_("Record btrace can't debug inferior in non-stop mode."));
221
222 gdb_assert (record_btrace_thread_observer == NULL);
223
224 disable_chain = make_cleanup (null_cleanup, NULL);
225 ALL_NON_EXITED_THREADS (tp)
226 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
227 {
228 btrace_enable (tp, &record_btrace_conf);
229
230 make_cleanup (record_btrace_disable_callback, tp);
231 }
232
233 record_btrace_auto_enable ();
234
235 push_target (&record_btrace_ops);
236
237 record_btrace_async_inferior_event_handler
238 = create_async_event_handler (record_btrace_handle_async_inferior_event,
239 NULL);
240 record_btrace_generating_corefile = 0;
241
242 observer_notify_record_changed (current_inferior (), 1);
243
244 discard_cleanups (disable_chain);
245 }
246
247 /* The to_stop_recording method of target record-btrace. */
248
249 static void
250 record_btrace_stop_recording (struct target_ops *self)
251 {
252 struct thread_info *tp;
253
254 DEBUG ("stop recording");
255
256 record_btrace_auto_disable ();
257
258 ALL_NON_EXITED_THREADS (tp)
259 if (tp->btrace.target != NULL)
260 btrace_disable (tp);
261 }
262
263 /* The to_close method of target record-btrace. */
264
265 static void
266 record_btrace_close (struct target_ops *self)
267 {
268 struct thread_info *tp;
269
270 if (record_btrace_async_inferior_event_handler != NULL)
271 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
272
273 /* Make sure automatic recording gets disabled even if we did not stop
274 recording before closing the record-btrace target. */
275 record_btrace_auto_disable ();
276
277 /* We should have already stopped recording.
278 Tear down btrace in case we have not. */
279 ALL_NON_EXITED_THREADS (tp)
280 btrace_teardown (tp);
281 }
282
283 /* The to_async method of target record-btrace. */
284
285 static void
286 record_btrace_async (struct target_ops *ops, int enable)
287 {
288 if (enable)
289 mark_async_event_handler (record_btrace_async_inferior_event_handler);
290 else
291 clear_async_event_handler (record_btrace_async_inferior_event_handler);
292
293 ops->beneath->to_async (ops->beneath, enable);
294 }
295
296 /* Adjusts the size and returns a human readable size suffix. */
297
298 static const char *
299 record_btrace_adjust_size (unsigned int *size)
300 {
301 unsigned int sz;
302
303 sz = *size;
304
305 if ((sz & ((1u << 30) - 1)) == 0)
306 {
307 *size = sz >> 30;
308 return "GB";
309 }
310 else if ((sz & ((1u << 20) - 1)) == 0)
311 {
312 *size = sz >> 20;
313 return "MB";
314 }
315 else if ((sz & ((1u << 10) - 1)) == 0)
316 {
317 *size = sz >> 10;
318 return "kB";
319 }
320 else
321 return "";
322 }
323
324 /* Print a BTS configuration. */
325
326 static void
327 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
328 {
329 const char *suffix;
330 unsigned int size;
331
332 size = conf->size;
333 if (size > 0)
334 {
335 suffix = record_btrace_adjust_size (&size);
336 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
337 }
338 }
339
340 /* Print an Intel(R) Processor Trace configuration. */
341
342 static void
343 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
344 {
345 const char *suffix;
346 unsigned int size;
347
348 size = conf->size;
349 if (size > 0)
350 {
351 suffix = record_btrace_adjust_size (&size);
352 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
353 }
354 }
355
356 /* Print a branch tracing configuration. */
357
358 static void
359 record_btrace_print_conf (const struct btrace_config *conf)
360 {
361 printf_unfiltered (_("Recording format: %s.\n"),
362 btrace_format_string (conf->format));
363
364 switch (conf->format)
365 {
366 case BTRACE_FORMAT_NONE:
367 return;
368
369 case BTRACE_FORMAT_BTS:
370 record_btrace_print_bts_conf (&conf->bts);
371 return;
372
373 case BTRACE_FORMAT_PT:
374 record_btrace_print_pt_conf (&conf->pt);
375 return;
376 }
377
378 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
379 }
380
381 /* The to_info_record method of target record-btrace. */
382
383 static void
384 record_btrace_info (struct target_ops *self)
385 {
386 struct btrace_thread_info *btinfo;
387 const struct btrace_config *conf;
388 struct thread_info *tp;
389 unsigned int insns, calls, gaps;
390
391 DEBUG ("info");
392
393 tp = find_thread_ptid (inferior_ptid);
394 if (tp == NULL)
395 error (_("No thread."));
396
397 btinfo = &tp->btrace;
398
399 conf = btrace_conf (btinfo);
400 if (conf != NULL)
401 record_btrace_print_conf (conf);
402
403 btrace_fetch (tp);
404
405 insns = 0;
406 calls = 0;
407 gaps = 0;
408
409 if (!btrace_is_empty (tp))
410 {
411 struct btrace_call_iterator call;
412 struct btrace_insn_iterator insn;
413
414 btrace_call_end (&call, btinfo);
415 btrace_call_prev (&call, 1);
416 calls = btrace_call_number (&call);
417
418 btrace_insn_end (&insn, btinfo);
419
420 insns = btrace_insn_number (&insn);
421 if (insns != 0)
422 {
423 /* The last instruction does not really belong to the trace. */
424 insns -= 1;
425 }
426 else
427 {
428 unsigned int steps;
429
430 /* Skip gaps at the end. */
431 do
432 {
433 steps = btrace_insn_prev (&insn, 1);
434 if (steps == 0)
435 break;
436
437 insns = btrace_insn_number (&insn);
438 }
439 while (insns == 0);
440 }
441
442 gaps = btinfo->ngaps;
443 }
444
445 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
446 "for thread %d (%s).\n"), insns, calls, gaps,
447 tp->num, target_pid_to_str (tp->ptid));
448
449 if (btrace_is_replaying (tp))
450 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
451 btrace_insn_number (btinfo->replay));
452 }
453
454 /* Print a decode error. */
455
456 static void
457 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
458 enum btrace_format format)
459 {
460 const char *errstr;
461 int is_error;
462
463 errstr = _("unknown");
464 is_error = 1;
465
466 switch (format)
467 {
468 default:
469 break;
470
471 case BTRACE_FORMAT_BTS:
472 switch (errcode)
473 {
474 default:
475 break;
476
477 case BDE_BTS_OVERFLOW:
478 errstr = _("instruction overflow");
479 break;
480
481 case BDE_BTS_INSN_SIZE:
482 errstr = _("unknown instruction");
483 break;
484 }
485 break;
486
487 #if defined (HAVE_LIBIPT)
488 case BTRACE_FORMAT_PT:
489 switch (errcode)
490 {
491 case BDE_PT_USER_QUIT:
492 is_error = 0;
493 errstr = _("trace decode cancelled");
494 break;
495
496 case BDE_PT_DISABLED:
497 is_error = 0;
498 errstr = _("disabled");
499 break;
500
501 case BDE_PT_OVERFLOW:
502 is_error = 0;
503 errstr = _("overflow");
504 break;
505
506 default:
507 if (errcode < 0)
508 errstr = pt_errstr (pt_errcode (errcode));
509 break;
510 }
511 break;
512 #endif /* defined (HAVE_LIBIPT) */
513 }
514
515 ui_out_text (uiout, _("["));
516 if (is_error)
517 {
518 ui_out_text (uiout, _("decode error ("));
519 ui_out_field_int (uiout, "errcode", errcode);
520 ui_out_text (uiout, _("): "));
521 }
522 ui_out_text (uiout, errstr);
523 ui_out_text (uiout, _("]\n"));
524 }
525
526 /* Print an unsigned int. */
527
528 static void
529 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
530 {
531 ui_out_field_fmt (uiout, fld, "%u", val);
532 }
533
534 /* Disassemble a section of the recorded instruction trace. */
535
536 static void
537 btrace_insn_history (struct ui_out *uiout,
538 const struct btrace_thread_info *btinfo,
539 const struct btrace_insn_iterator *begin,
540 const struct btrace_insn_iterator *end, int flags)
541 {
542 struct gdbarch *gdbarch;
543 struct btrace_insn_iterator it;
544
545 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
546 btrace_insn_number (end));
547
548 gdbarch = target_gdbarch ();
549
550 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
551 {
552 const struct btrace_insn *insn;
553
554 insn = btrace_insn_get (&it);
555
556 /* A NULL instruction indicates a gap in the trace. */
557 if (insn == NULL)
558 {
559 const struct btrace_config *conf;
560
561 conf = btrace_conf (btinfo);
562
563 /* We have trace so we must have a configuration. */
564 gdb_assert (conf != NULL);
565
566 btrace_ui_out_decode_error (uiout, it.function->errcode,
567 conf->format);
568 }
569 else
570 {
571 char prefix[4];
572
573 /* We may add a speculation prefix later. We use the same space
574 that is used for the pc prefix. */
575 if ((flags & DISASSEMBLY_OMIT_PC) == 0)
576 strncpy (prefix, pc_prefix (insn->pc), 3);
577 else
578 {
579 prefix[0] = ' ';
580 prefix[1] = ' ';
581 prefix[2] = ' ';
582 }
583 prefix[3] = 0;
584
585 /* Print the instruction index. */
586 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
587 ui_out_text (uiout, "\t");
588
589 /* Indicate speculative execution by a leading '?'. */
590 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
591 prefix[0] = '?';
592
593 /* Print the prefix; we tell gdb_disassembly below to omit it. */
594 ui_out_field_fmt (uiout, "prefix", "%s", prefix);
595
596 /* Disassembly with '/m' flag may not produce the expected result.
597 See PR gdb/11833. */
598 gdb_disassembly (gdbarch, uiout, NULL, flags | DISASSEMBLY_OMIT_PC,
599 1, insn->pc, insn->pc + 1);
600 }
601 }
602 }
603
604 /* The to_insn_history method of target record-btrace. */
605
606 static void
607 record_btrace_insn_history (struct target_ops *self, int size, int flags)
608 {
609 struct btrace_thread_info *btinfo;
610 struct btrace_insn_history *history;
611 struct btrace_insn_iterator begin, end;
612 struct cleanup *uiout_cleanup;
613 struct ui_out *uiout;
614 unsigned int context, covered;
615
616 uiout = current_uiout;
617 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
618 "insn history");
619 context = abs (size);
620 if (context == 0)
621 error (_("Bad record instruction-history-size."));
622
623 btinfo = require_btrace ();
624 history = btinfo->insn_history;
625 if (history == NULL)
626 {
627 struct btrace_insn_iterator *replay;
628
629 DEBUG ("insn-history (0x%x): %d", flags, size);
630
631 /* If we're replaying, we start at the replay position. Otherwise, we
632 start at the tail of the trace. */
633 replay = btinfo->replay;
634 if (replay != NULL)
635 begin = *replay;
636 else
637 btrace_insn_end (&begin, btinfo);
638
639 /* We start from here and expand in the requested direction. Then we
640 expand in the other direction, as well, to fill up any remaining
641 context. */
642 end = begin;
643 if (size < 0)
644 {
645 /* We want the current position covered, as well. */
646 covered = btrace_insn_next (&end, 1);
647 covered += btrace_insn_prev (&begin, context - covered);
648 covered += btrace_insn_next (&end, context - covered);
649 }
650 else
651 {
652 covered = btrace_insn_next (&end, context);
653 covered += btrace_insn_prev (&begin, context - covered);
654 }
655 }
656 else
657 {
658 begin = history->begin;
659 end = history->end;
660
661 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
662 btrace_insn_number (&begin), btrace_insn_number (&end));
663
664 if (size < 0)
665 {
666 end = begin;
667 covered = btrace_insn_prev (&begin, context);
668 }
669 else
670 {
671 begin = end;
672 covered = btrace_insn_next (&end, context);
673 }
674 }
675
676 if (covered > 0)
677 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
678 else
679 {
680 if (size < 0)
681 printf_unfiltered (_("At the start of the branch trace record.\n"));
682 else
683 printf_unfiltered (_("At the end of the branch trace record.\n"));
684 }
685
686 btrace_set_insn_history (btinfo, &begin, &end);
687 do_cleanups (uiout_cleanup);
688 }
689
690 /* The to_insn_history_range method of target record-btrace. */
691
692 static void
693 record_btrace_insn_history_range (struct target_ops *self,
694 ULONGEST from, ULONGEST to, int flags)
695 {
696 struct btrace_thread_info *btinfo;
697 struct btrace_insn_history *history;
698 struct btrace_insn_iterator begin, end;
699 struct cleanup *uiout_cleanup;
700 struct ui_out *uiout;
701 unsigned int low, high;
702 int found;
703
704 uiout = current_uiout;
705 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
706 "insn history");
707 low = from;
708 high = to;
709
710 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
711
712 /* Check for wrap-arounds. */
713 if (low != from || high != to)
714 error (_("Bad range."));
715
716 if (high < low)
717 error (_("Bad range."));
718
719 btinfo = require_btrace ();
720
721 found = btrace_find_insn_by_number (&begin, btinfo, low);
722 if (found == 0)
723 error (_("Range out of bounds."));
724
725 found = btrace_find_insn_by_number (&end, btinfo, high);
726 if (found == 0)
727 {
728 /* Silently truncate the range. */
729 btrace_insn_end (&end, btinfo);
730 }
731 else
732 {
733 /* We want both begin and end to be inclusive. */
734 btrace_insn_next (&end, 1);
735 }
736
737 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
738 btrace_set_insn_history (btinfo, &begin, &end);
739
740 do_cleanups (uiout_cleanup);
741 }
742
743 /* The to_insn_history_from method of target record-btrace. */
744
745 static void
746 record_btrace_insn_history_from (struct target_ops *self,
747 ULONGEST from, int size, int flags)
748 {
749 ULONGEST begin, end, context;
750
751 context = abs (size);
752 if (context == 0)
753 error (_("Bad record instruction-history-size."));
754
755 if (size < 0)
756 {
757 end = from;
758
759 if (from < context)
760 begin = 0;
761 else
762 begin = from - context + 1;
763 }
764 else
765 {
766 begin = from;
767 end = from + context - 1;
768
769 /* Check for wrap-around. */
770 if (end < begin)
771 end = ULONGEST_MAX;
772 }
773
774 record_btrace_insn_history_range (self, begin, end, flags);
775 }
776
777 /* Print the instruction number range for a function call history line. */
778
779 static void
780 btrace_call_history_insn_range (struct ui_out *uiout,
781 const struct btrace_function *bfun)
782 {
783 unsigned int begin, end, size;
784
785 size = VEC_length (btrace_insn_s, bfun->insn);
786 gdb_assert (size > 0);
787
788 begin = bfun->insn_offset;
789 end = begin + size - 1;
790
791 ui_out_field_uint (uiout, "insn begin", begin);
792 ui_out_text (uiout, ",");
793 ui_out_field_uint (uiout, "insn end", end);
794 }
795
796 /* Compute the lowest and highest source line for the instructions in BFUN
797 and return them in PBEGIN and PEND.
798 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
799 result from inlining or macro expansion. */
800
801 static void
802 btrace_compute_src_line_range (const struct btrace_function *bfun,
803 int *pbegin, int *pend)
804 {
805 struct btrace_insn *insn;
806 struct symtab *symtab;
807 struct symbol *sym;
808 unsigned int idx;
809 int begin, end;
810
811 begin = INT_MAX;
812 end = INT_MIN;
813
814 sym = bfun->sym;
815 if (sym == NULL)
816 goto out;
817
818 symtab = symbol_symtab (sym);
819
820 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
821 {
822 struct symtab_and_line sal;
823
824 sal = find_pc_line (insn->pc, 0);
825 if (sal.symtab != symtab || sal.line == 0)
826 continue;
827
828 begin = min (begin, sal.line);
829 end = max (end, sal.line);
830 }
831
832 out:
833 *pbegin = begin;
834 *pend = end;
835 }
836
837 /* Print the source line information for a function call history line. */
838
839 static void
840 btrace_call_history_src_line (struct ui_out *uiout,
841 const struct btrace_function *bfun)
842 {
843 struct symbol *sym;
844 int begin, end;
845
846 sym = bfun->sym;
847 if (sym == NULL)
848 return;
849
850 ui_out_field_string (uiout, "file",
851 symtab_to_filename_for_display (symbol_symtab (sym)));
852
853 btrace_compute_src_line_range (bfun, &begin, &end);
854 if (end < begin)
855 return;
856
857 ui_out_text (uiout, ":");
858 ui_out_field_int (uiout, "min line", begin);
859
860 if (end == begin)
861 return;
862
863 ui_out_text (uiout, ",");
864 ui_out_field_int (uiout, "max line", end);
865 }
866
867 /* Get the name of a branch trace function. */
868
869 static const char *
870 btrace_get_bfun_name (const struct btrace_function *bfun)
871 {
872 struct minimal_symbol *msym;
873 struct symbol *sym;
874
875 if (bfun == NULL)
876 return "??";
877
878 msym = bfun->msym;
879 sym = bfun->sym;
880
881 if (sym != NULL)
882 return SYMBOL_PRINT_NAME (sym);
883 else if (msym != NULL)
884 return MSYMBOL_PRINT_NAME (msym);
885 else
886 return "??";
887 }
888
889 /* Disassemble a section of the recorded function trace. */
890
891 static void
892 btrace_call_history (struct ui_out *uiout,
893 const struct btrace_thread_info *btinfo,
894 const struct btrace_call_iterator *begin,
895 const struct btrace_call_iterator *end,
896 enum record_print_flag flags)
897 {
898 struct btrace_call_iterator it;
899
900 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
901 btrace_call_number (end));
902
903 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
904 {
905 const struct btrace_function *bfun;
906 struct minimal_symbol *msym;
907 struct symbol *sym;
908
909 bfun = btrace_call_get (&it);
910 sym = bfun->sym;
911 msym = bfun->msym;
912
913 /* Print the function index. */
914 ui_out_field_uint (uiout, "index", bfun->number);
915 ui_out_text (uiout, "\t");
916
917 /* Indicate gaps in the trace. */
918 if (bfun->errcode != 0)
919 {
920 const struct btrace_config *conf;
921
922 conf = btrace_conf (btinfo);
923
924 /* We have trace so we must have a configuration. */
925 gdb_assert (conf != NULL);
926
927 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
928
929 continue;
930 }
931
932 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
933 {
934 int level = bfun->level + btinfo->level, i;
935
936 for (i = 0; i < level; ++i)
937 ui_out_text (uiout, " ");
938 }
939
940 if (sym != NULL)
941 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
942 else if (msym != NULL)
943 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
944 else if (!ui_out_is_mi_like_p (uiout))
945 ui_out_field_string (uiout, "function", "??");
946
947 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
948 {
949 ui_out_text (uiout, _("\tinst "));
950 btrace_call_history_insn_range (uiout, bfun);
951 }
952
953 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
954 {
955 ui_out_text (uiout, _("\tat "));
956 btrace_call_history_src_line (uiout, bfun);
957 }
958
959 ui_out_text (uiout, "\n");
960 }
961 }
962
963 /* The to_call_history method of target record-btrace. */
964
965 static void
966 record_btrace_call_history (struct target_ops *self, int size, int flags)
967 {
968 struct btrace_thread_info *btinfo;
969 struct btrace_call_history *history;
970 struct btrace_call_iterator begin, end;
971 struct cleanup *uiout_cleanup;
972 struct ui_out *uiout;
973 unsigned int context, covered;
974
975 uiout = current_uiout;
976 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
977 "insn history");
978 context = abs (size);
979 if (context == 0)
980 error (_("Bad record function-call-history-size."));
981
982 btinfo = require_btrace ();
983 history = btinfo->call_history;
984 if (history == NULL)
985 {
986 struct btrace_insn_iterator *replay;
987
988 DEBUG ("call-history (0x%x): %d", flags, size);
989
990 /* If we're replaying, we start at the replay position. Otherwise, we
991 start at the tail of the trace. */
992 replay = btinfo->replay;
993 if (replay != NULL)
994 {
995 begin.function = replay->function;
996 begin.btinfo = btinfo;
997 }
998 else
999 btrace_call_end (&begin, btinfo);
1000
1001 /* We start from here and expand in the requested direction. Then we
1002 expand in the other direction, as well, to fill up any remaining
1003 context. */
1004 end = begin;
1005 if (size < 0)
1006 {
1007 /* We want the current position covered, as well. */
1008 covered = btrace_call_next (&end, 1);
1009 covered += btrace_call_prev (&begin, context - covered);
1010 covered += btrace_call_next (&end, context - covered);
1011 }
1012 else
1013 {
1014 covered = btrace_call_next (&end, context);
1015 covered += btrace_call_prev (&begin, context- covered);
1016 }
1017 }
1018 else
1019 {
1020 begin = history->begin;
1021 end = history->end;
1022
1023 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
1024 btrace_call_number (&begin), btrace_call_number (&end));
1025
1026 if (size < 0)
1027 {
1028 end = begin;
1029 covered = btrace_call_prev (&begin, context);
1030 }
1031 else
1032 {
1033 begin = end;
1034 covered = btrace_call_next (&end, context);
1035 }
1036 }
1037
1038 if (covered > 0)
1039 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1040 else
1041 {
1042 if (size < 0)
1043 printf_unfiltered (_("At the start of the branch trace record.\n"));
1044 else
1045 printf_unfiltered (_("At the end of the branch trace record.\n"));
1046 }
1047
1048 btrace_set_call_history (btinfo, &begin, &end);
1049 do_cleanups (uiout_cleanup);
1050 }
1051
1052 /* The to_call_history_range method of target record-btrace. */
1053
1054 static void
1055 record_btrace_call_history_range (struct target_ops *self,
1056 ULONGEST from, ULONGEST to, int flags)
1057 {
1058 struct btrace_thread_info *btinfo;
1059 struct btrace_call_history *history;
1060 struct btrace_call_iterator begin, end;
1061 struct cleanup *uiout_cleanup;
1062 struct ui_out *uiout;
1063 unsigned int low, high;
1064 int found;
1065
1066 uiout = current_uiout;
1067 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1068 "func history");
1069 low = from;
1070 high = to;
1071
1072 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
1073
1074 /* Check for wrap-arounds. */
1075 if (low != from || high != to)
1076 error (_("Bad range."));
1077
1078 if (high < low)
1079 error (_("Bad range."));
1080
1081 btinfo = require_btrace ();
1082
1083 found = btrace_find_call_by_number (&begin, btinfo, low);
1084 if (found == 0)
1085 error (_("Range out of bounds."));
1086
1087 found = btrace_find_call_by_number (&end, btinfo, high);
1088 if (found == 0)
1089 {
1090 /* Silently truncate the range. */
1091 btrace_call_end (&end, btinfo);
1092 }
1093 else
1094 {
1095 /* We want both begin and end to be inclusive. */
1096 btrace_call_next (&end, 1);
1097 }
1098
1099 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1100 btrace_set_call_history (btinfo, &begin, &end);
1101
1102 do_cleanups (uiout_cleanup);
1103 }
1104
1105 /* The to_call_history_from method of target record-btrace. */
1106
1107 static void
1108 record_btrace_call_history_from (struct target_ops *self,
1109 ULONGEST from, int size, int flags)
1110 {
1111 ULONGEST begin, end, context;
1112
1113 context = abs (size);
1114 if (context == 0)
1115 error (_("Bad record function-call-history-size."));
1116
1117 if (size < 0)
1118 {
1119 end = from;
1120
1121 if (from < context)
1122 begin = 0;
1123 else
1124 begin = from - context + 1;
1125 }
1126 else
1127 {
1128 begin = from;
1129 end = from + context - 1;
1130
1131 /* Check for wrap-around. */
1132 if (end < begin)
1133 end = ULONGEST_MAX;
1134 }
1135
1136 record_btrace_call_history_range (self, begin, end, flags);
1137 }
1138
1139 /* The to_record_is_replaying method of target record-btrace. */
1140
1141 static int
1142 record_btrace_is_replaying (struct target_ops *self)
1143 {
1144 struct thread_info *tp;
1145
1146 ALL_NON_EXITED_THREADS (tp)
1147 if (btrace_is_replaying (tp))
1148 return 1;
1149
1150 return 0;
1151 }
1152
1153 /* The to_xfer_partial method of target record-btrace. */
1154
1155 static enum target_xfer_status
1156 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1157 const char *annex, gdb_byte *readbuf,
1158 const gdb_byte *writebuf, ULONGEST offset,
1159 ULONGEST len, ULONGEST *xfered_len)
1160 {
1161 struct target_ops *t;
1162
1163 /* Filter out requests that don't make sense during replay. */
1164 if (replay_memory_access == replay_memory_access_read_only
1165 && !record_btrace_generating_corefile
1166 && record_btrace_is_replaying (ops))
1167 {
1168 switch (object)
1169 {
1170 case TARGET_OBJECT_MEMORY:
1171 {
1172 struct target_section *section;
1173
1174 /* We do not allow writing memory in general. */
1175 if (writebuf != NULL)
1176 {
1177 *xfered_len = len;
1178 return TARGET_XFER_UNAVAILABLE;
1179 }
1180
1181 /* We allow reading readonly memory. */
1182 section = target_section_by_addr (ops, offset);
1183 if (section != NULL)
1184 {
1185 /* Check if the section we found is readonly. */
1186 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1187 section->the_bfd_section)
1188 & SEC_READONLY) != 0)
1189 {
1190 /* Truncate the request to fit into this section. */
1191 len = min (len, section->endaddr - offset);
1192 break;
1193 }
1194 }
1195
1196 *xfered_len = len;
1197 return TARGET_XFER_UNAVAILABLE;
1198 }
1199 }
1200 }
1201
1202 /* Forward the request. */
1203 ops = ops->beneath;
1204 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1205 offset, len, xfered_len);
1206 }
1207
1208 /* The to_insert_breakpoint method of target record-btrace. */
1209
1210 static int
1211 record_btrace_insert_breakpoint (struct target_ops *ops,
1212 struct gdbarch *gdbarch,
1213 struct bp_target_info *bp_tgt)
1214 {
1215 const char *old;
1216 int ret;
1217
1218 /* Inserting breakpoints requires accessing memory. Allow it for the
1219 duration of this function. */
1220 old = replay_memory_access;
1221 replay_memory_access = replay_memory_access_read_write;
1222
1223 ret = 0;
1224 TRY
1225 {
1226 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1227 }
1228 CATCH (except, RETURN_MASK_ALL)
1229 {
1230 replay_memory_access = old;
1231 throw_exception (except);
1232 }
1233 END_CATCH
1234 replay_memory_access = old;
1235
1236 return ret;
1237 }
1238
1239 /* The to_remove_breakpoint method of target record-btrace. */
1240
1241 static int
1242 record_btrace_remove_breakpoint (struct target_ops *ops,
1243 struct gdbarch *gdbarch,
1244 struct bp_target_info *bp_tgt)
1245 {
1246 const char *old;
1247 int ret;
1248
1249 /* Removing breakpoints requires accessing memory. Allow it for the
1250 duration of this function. */
1251 old = replay_memory_access;
1252 replay_memory_access = replay_memory_access_read_write;
1253
1254 ret = 0;
1255 TRY
1256 {
1257 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1258 }
1259 CATCH (except, RETURN_MASK_ALL)
1260 {
1261 replay_memory_access = old;
1262 throw_exception (except);
1263 }
1264 END_CATCH
1265 replay_memory_access = old;
1266
1267 return ret;
1268 }
1269
1270 /* The to_fetch_registers method of target record-btrace. */
1271
1272 static void
1273 record_btrace_fetch_registers (struct target_ops *ops,
1274 struct regcache *regcache, int regno)
1275 {
1276 struct btrace_insn_iterator *replay;
1277 struct thread_info *tp;
1278
1279 tp = find_thread_ptid (inferior_ptid);
1280 gdb_assert (tp != NULL);
1281
1282 replay = tp->btrace.replay;
1283 if (replay != NULL && !record_btrace_generating_corefile)
1284 {
1285 const struct btrace_insn *insn;
1286 struct gdbarch *gdbarch;
1287 int pcreg;
1288
1289 gdbarch = get_regcache_arch (regcache);
1290 pcreg = gdbarch_pc_regnum (gdbarch);
1291 if (pcreg < 0)
1292 return;
1293
1294 /* We can only provide the PC register. */
1295 if (regno >= 0 && regno != pcreg)
1296 return;
1297
1298 insn = btrace_insn_get (replay);
1299 gdb_assert (insn != NULL);
1300
1301 regcache_raw_supply (regcache, regno, &insn->pc);
1302 }
1303 else
1304 {
1305 struct target_ops *t = ops->beneath;
1306
1307 t->to_fetch_registers (t, regcache, regno);
1308 }
1309 }
1310
1311 /* The to_store_registers method of target record-btrace. */
1312
1313 static void
1314 record_btrace_store_registers (struct target_ops *ops,
1315 struct regcache *regcache, int regno)
1316 {
1317 struct target_ops *t;
1318
1319 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1320 error (_("This record target does not allow writing registers."));
1321
1322 gdb_assert (may_write_registers != 0);
1323
1324 t = ops->beneath;
1325 t->to_store_registers (t, regcache, regno);
1326 }
1327
1328 /* The to_prepare_to_store method of target record-btrace. */
1329
1330 static void
1331 record_btrace_prepare_to_store (struct target_ops *ops,
1332 struct regcache *regcache)
1333 {
1334 struct target_ops *t;
1335
1336 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1337 return;
1338
1339 t = ops->beneath;
1340 t->to_prepare_to_store (t, regcache);
1341 }
1342
1343 /* The branch trace frame cache. */
1344
1345 struct btrace_frame_cache
1346 {
1347 /* The thread. */
1348 struct thread_info *tp;
1349
1350 /* The frame info. */
1351 struct frame_info *frame;
1352
1353 /* The branch trace function segment. */
1354 const struct btrace_function *bfun;
1355 };
1356
1357 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1358
1359 static htab_t bfcache;
1360
1361 /* hash_f for htab_create_alloc of bfcache. */
1362
1363 static hashval_t
1364 bfcache_hash (const void *arg)
1365 {
1366 const struct btrace_frame_cache *cache = arg;
1367
1368 return htab_hash_pointer (cache->frame);
1369 }
1370
1371 /* eq_f for htab_create_alloc of bfcache. */
1372
1373 static int
1374 bfcache_eq (const void *arg1, const void *arg2)
1375 {
1376 const struct btrace_frame_cache *cache1 = arg1;
1377 const struct btrace_frame_cache *cache2 = arg2;
1378
1379 return cache1->frame == cache2->frame;
1380 }
1381
1382 /* Create a new btrace frame cache. */
1383
1384 static struct btrace_frame_cache *
1385 bfcache_new (struct frame_info *frame)
1386 {
1387 struct btrace_frame_cache *cache;
1388 void **slot;
1389
1390 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1391 cache->frame = frame;
1392
1393 slot = htab_find_slot (bfcache, cache, INSERT);
1394 gdb_assert (*slot == NULL);
1395 *slot = cache;
1396
1397 return cache;
1398 }
1399
1400 /* Extract the branch trace function from a branch trace frame. */
1401
1402 static const struct btrace_function *
1403 btrace_get_frame_function (struct frame_info *frame)
1404 {
1405 const struct btrace_frame_cache *cache;
1406 const struct btrace_function *bfun;
1407 struct btrace_frame_cache pattern;
1408 void **slot;
1409
1410 pattern.frame = frame;
1411
1412 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1413 if (slot == NULL)
1414 return NULL;
1415
1416 cache = *slot;
1417 return cache->bfun;
1418 }
1419
1420 /* Implement stop_reason method for record_btrace_frame_unwind. */
1421
1422 static enum unwind_stop_reason
1423 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1424 void **this_cache)
1425 {
1426 const struct btrace_frame_cache *cache;
1427 const struct btrace_function *bfun;
1428
1429 cache = *this_cache;
1430 bfun = cache->bfun;
1431 gdb_assert (bfun != NULL);
1432
1433 if (bfun->up == NULL)
1434 return UNWIND_UNAVAILABLE;
1435
1436 return UNWIND_NO_REASON;
1437 }
1438
1439 /* Implement this_id method for record_btrace_frame_unwind. */
1440
1441 static void
1442 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1443 struct frame_id *this_id)
1444 {
1445 const struct btrace_frame_cache *cache;
1446 const struct btrace_function *bfun;
1447 CORE_ADDR code, special;
1448
1449 cache = *this_cache;
1450
1451 bfun = cache->bfun;
1452 gdb_assert (bfun != NULL);
1453
1454 while (bfun->segment.prev != NULL)
1455 bfun = bfun->segment.prev;
1456
1457 code = get_frame_func (this_frame);
1458 special = bfun->number;
1459
1460 *this_id = frame_id_build_unavailable_stack_special (code, special);
1461
1462 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1463 btrace_get_bfun_name (cache->bfun),
1464 core_addr_to_string_nz (this_id->code_addr),
1465 core_addr_to_string_nz (this_id->special_addr));
1466 }
1467
1468 /* Implement prev_register method for record_btrace_frame_unwind. */
1469
1470 static struct value *
1471 record_btrace_frame_prev_register (struct frame_info *this_frame,
1472 void **this_cache,
1473 int regnum)
1474 {
1475 const struct btrace_frame_cache *cache;
1476 const struct btrace_function *bfun, *caller;
1477 const struct btrace_insn *insn;
1478 struct gdbarch *gdbarch;
1479 CORE_ADDR pc;
1480 int pcreg;
1481
1482 gdbarch = get_frame_arch (this_frame);
1483 pcreg = gdbarch_pc_regnum (gdbarch);
1484 if (pcreg < 0 || regnum != pcreg)
1485 throw_error (NOT_AVAILABLE_ERROR,
1486 _("Registers are not available in btrace record history"));
1487
1488 cache = *this_cache;
1489 bfun = cache->bfun;
1490 gdb_assert (bfun != NULL);
1491
1492 caller = bfun->up;
1493 if (caller == NULL)
1494 throw_error (NOT_AVAILABLE_ERROR,
1495 _("No caller in btrace record history"));
1496
1497 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1498 {
1499 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1500 pc = insn->pc;
1501 }
1502 else
1503 {
1504 insn = VEC_last (btrace_insn_s, caller->insn);
1505 pc = insn->pc;
1506
1507 pc += gdb_insn_length (gdbarch, pc);
1508 }
1509
1510 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1511 btrace_get_bfun_name (bfun), bfun->level,
1512 core_addr_to_string_nz (pc));
1513
1514 return frame_unwind_got_address (this_frame, regnum, pc);
1515 }
1516
1517 /* Implement sniffer method for record_btrace_frame_unwind. */
1518
1519 static int
1520 record_btrace_frame_sniffer (const struct frame_unwind *self,
1521 struct frame_info *this_frame,
1522 void **this_cache)
1523 {
1524 const struct btrace_function *bfun;
1525 struct btrace_frame_cache *cache;
1526 struct thread_info *tp;
1527 struct frame_info *next;
1528
1529 /* THIS_FRAME does not contain a reference to its thread. */
1530 tp = find_thread_ptid (inferior_ptid);
1531 gdb_assert (tp != NULL);
1532
1533 bfun = NULL;
1534 next = get_next_frame (this_frame);
1535 if (next == NULL)
1536 {
1537 const struct btrace_insn_iterator *replay;
1538
1539 replay = tp->btrace.replay;
1540 if (replay != NULL)
1541 bfun = replay->function;
1542 }
1543 else
1544 {
1545 const struct btrace_function *callee;
1546
1547 callee = btrace_get_frame_function (next);
1548 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1549 bfun = callee->up;
1550 }
1551
1552 if (bfun == NULL)
1553 return 0;
1554
1555 DEBUG ("[frame] sniffed frame for %s on level %d",
1556 btrace_get_bfun_name (bfun), bfun->level);
1557
1558 /* This is our frame. Initialize the frame cache. */
1559 cache = bfcache_new (this_frame);
1560 cache->tp = tp;
1561 cache->bfun = bfun;
1562
1563 *this_cache = cache;
1564 return 1;
1565 }
1566
1567 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1568
1569 static int
1570 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1571 struct frame_info *this_frame,
1572 void **this_cache)
1573 {
1574 const struct btrace_function *bfun, *callee;
1575 struct btrace_frame_cache *cache;
1576 struct frame_info *next;
1577
1578 next = get_next_frame (this_frame);
1579 if (next == NULL)
1580 return 0;
1581
1582 callee = btrace_get_frame_function (next);
1583 if (callee == NULL)
1584 return 0;
1585
1586 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1587 return 0;
1588
1589 bfun = callee->up;
1590 if (bfun == NULL)
1591 return 0;
1592
1593 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1594 btrace_get_bfun_name (bfun), bfun->level);
1595
1596 /* This is our frame. Initialize the frame cache. */
1597 cache = bfcache_new (this_frame);
1598 cache->tp = find_thread_ptid (inferior_ptid);
1599 cache->bfun = bfun;
1600
1601 *this_cache = cache;
1602 return 1;
1603 }
1604
1605 static void
1606 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1607 {
1608 struct btrace_frame_cache *cache;
1609 void **slot;
1610
1611 cache = this_cache;
1612
1613 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1614 gdb_assert (slot != NULL);
1615
1616 htab_remove_elt (bfcache, cache);
1617 }
1618
1619 /* btrace recording does not store previous memory content, neither the stack
1620 frames content. Any unwinding would return errorneous results as the stack
1621 contents no longer matches the changed PC value restored from history.
1622 Therefore this unwinder reports any possibly unwound registers as
1623 <unavailable>. */
1624
1625 const struct frame_unwind record_btrace_frame_unwind =
1626 {
1627 NORMAL_FRAME,
1628 record_btrace_frame_unwind_stop_reason,
1629 record_btrace_frame_this_id,
1630 record_btrace_frame_prev_register,
1631 NULL,
1632 record_btrace_frame_sniffer,
1633 record_btrace_frame_dealloc_cache
1634 };
1635
1636 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1637 {
1638 TAILCALL_FRAME,
1639 record_btrace_frame_unwind_stop_reason,
1640 record_btrace_frame_this_id,
1641 record_btrace_frame_prev_register,
1642 NULL,
1643 record_btrace_tailcall_frame_sniffer,
1644 record_btrace_frame_dealloc_cache
1645 };
1646
1647 /* Implement the to_get_unwinder method. */
1648
1649 static const struct frame_unwind *
1650 record_btrace_to_get_unwinder (struct target_ops *self)
1651 {
1652 return &record_btrace_frame_unwind;
1653 }
1654
1655 /* Implement the to_get_tailcall_unwinder method. */
1656
1657 static const struct frame_unwind *
1658 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1659 {
1660 return &record_btrace_tailcall_frame_unwind;
1661 }
1662
1663 /* Return a human-readable string for FLAG. */
1664
1665 static const char *
1666 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1667 {
1668 switch (flag)
1669 {
1670 case BTHR_STEP:
1671 return "step";
1672
1673 case BTHR_RSTEP:
1674 return "reverse-step";
1675
1676 case BTHR_CONT:
1677 return "cont";
1678
1679 case BTHR_RCONT:
1680 return "reverse-cont";
1681
1682 case BTHR_STOP:
1683 return "stop";
1684 }
1685
1686 return "<invalid>";
1687 }
1688
1689 /* Indicate that TP should be resumed according to FLAG. */
1690
1691 static void
1692 record_btrace_resume_thread (struct thread_info *tp,
1693 enum btrace_thread_flag flag)
1694 {
1695 struct btrace_thread_info *btinfo;
1696
1697 DEBUG ("resuming thread %d (%s): %x (%s)", tp->num,
1698 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1699
1700 btinfo = &tp->btrace;
1701
1702 /* Fetch the latest branch trace. */
1703 btrace_fetch (tp);
1704
1705 /* A resume request overwrites a preceding resume or stop request. */
1706 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1707 btinfo->flags |= flag;
1708 }
1709
1710 /* Get the current frame for TP. */
1711
1712 static struct frame_info *
1713 get_thread_current_frame (struct thread_info *tp)
1714 {
1715 struct frame_info *frame;
1716 ptid_t old_inferior_ptid;
1717 int executing;
1718
1719 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1720 old_inferior_ptid = inferior_ptid;
1721 inferior_ptid = tp->ptid;
1722
1723 /* Clear the executing flag to allow changes to the current frame.
1724 We are not actually running, yet. We just started a reverse execution
1725 command or a record goto command.
1726 For the latter, EXECUTING is false and this has no effect.
1727 For the former, EXECUTING is true and we're in to_wait, about to
1728 move the thread. Since we need to recompute the stack, we temporarily
1729 set EXECUTING to flase. */
1730 executing = is_executing (inferior_ptid);
1731 set_executing (inferior_ptid, 0);
1732
1733 frame = NULL;
1734 TRY
1735 {
1736 frame = get_current_frame ();
1737 }
1738 CATCH (except, RETURN_MASK_ALL)
1739 {
1740 /* Restore the previous execution state. */
1741 set_executing (inferior_ptid, executing);
1742
1743 /* Restore the previous inferior_ptid. */
1744 inferior_ptid = old_inferior_ptid;
1745
1746 throw_exception (except);
1747 }
1748 END_CATCH
1749
1750 /* Restore the previous execution state. */
1751 set_executing (inferior_ptid, executing);
1752
1753 /* Restore the previous inferior_ptid. */
1754 inferior_ptid = old_inferior_ptid;
1755
1756 return frame;
1757 }
1758
1759 /* Start replaying a thread. */
1760
1761 static struct btrace_insn_iterator *
1762 record_btrace_start_replaying (struct thread_info *tp)
1763 {
1764 struct btrace_insn_iterator *replay;
1765 struct btrace_thread_info *btinfo;
1766
1767 btinfo = &tp->btrace;
1768 replay = NULL;
1769
1770 /* We can't start replaying without trace. */
1771 if (btinfo->begin == NULL)
1772 return NULL;
1773
1774 /* GDB stores the current frame_id when stepping in order to detects steps
1775 into subroutines.
1776 Since frames are computed differently when we're replaying, we need to
1777 recompute those stored frames and fix them up so we can still detect
1778 subroutines after we started replaying. */
1779 TRY
1780 {
1781 struct frame_info *frame;
1782 struct frame_id frame_id;
1783 int upd_step_frame_id, upd_step_stack_frame_id;
1784
1785 /* The current frame without replaying - computed via normal unwind. */
1786 frame = get_thread_current_frame (tp);
1787 frame_id = get_frame_id (frame);
1788
1789 /* Check if we need to update any stepping-related frame id's. */
1790 upd_step_frame_id = frame_id_eq (frame_id,
1791 tp->control.step_frame_id);
1792 upd_step_stack_frame_id = frame_id_eq (frame_id,
1793 tp->control.step_stack_frame_id);
1794
1795 /* We start replaying at the end of the branch trace. This corresponds
1796 to the current instruction. */
1797 replay = XNEW (struct btrace_insn_iterator);
1798 btrace_insn_end (replay, btinfo);
1799
1800 /* Skip gaps at the end of the trace. */
1801 while (btrace_insn_get (replay) == NULL)
1802 {
1803 unsigned int steps;
1804
1805 steps = btrace_insn_prev (replay, 1);
1806 if (steps == 0)
1807 error (_("No trace."));
1808 }
1809
1810 /* We're not replaying, yet. */
1811 gdb_assert (btinfo->replay == NULL);
1812 btinfo->replay = replay;
1813
1814 /* Make sure we're not using any stale registers. */
1815 registers_changed_ptid (tp->ptid);
1816
1817 /* The current frame with replaying - computed via btrace unwind. */
1818 frame = get_thread_current_frame (tp);
1819 frame_id = get_frame_id (frame);
1820
1821 /* Replace stepping related frames where necessary. */
1822 if (upd_step_frame_id)
1823 tp->control.step_frame_id = frame_id;
1824 if (upd_step_stack_frame_id)
1825 tp->control.step_stack_frame_id = frame_id;
1826 }
1827 CATCH (except, RETURN_MASK_ALL)
1828 {
1829 xfree (btinfo->replay);
1830 btinfo->replay = NULL;
1831
1832 registers_changed_ptid (tp->ptid);
1833
1834 throw_exception (except);
1835 }
1836 END_CATCH
1837
1838 return replay;
1839 }
1840
1841 /* Stop replaying a thread. */
1842
1843 static void
1844 record_btrace_stop_replaying (struct thread_info *tp)
1845 {
1846 struct btrace_thread_info *btinfo;
1847
1848 btinfo = &tp->btrace;
1849
1850 xfree (btinfo->replay);
1851 btinfo->replay = NULL;
1852
1853 /* Make sure we're not leaving any stale registers. */
1854 registers_changed_ptid (tp->ptid);
1855 }
1856
1857 /* Stop replaying TP if it is at the end of its execution history. */
1858
1859 static void
1860 record_btrace_stop_replaying_at_end (struct thread_info *tp)
1861 {
1862 struct btrace_insn_iterator *replay, end;
1863 struct btrace_thread_info *btinfo;
1864
1865 btinfo = &tp->btrace;
1866 replay = btinfo->replay;
1867
1868 if (replay == NULL)
1869 return;
1870
1871 btrace_insn_end (&end, btinfo);
1872
1873 if (btrace_insn_cmp (replay, &end) == 0)
1874 record_btrace_stop_replaying (tp);
1875 }
1876
1877 /* The to_resume method of target record-btrace. */
1878
1879 static void
1880 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1881 enum gdb_signal signal)
1882 {
1883 struct thread_info *tp;
1884 enum btrace_thread_flag flag;
1885 ptid_t orig_ptid;
1886
1887 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
1888 execution_direction == EXEC_REVERSE ? "reverse-" : "",
1889 step ? "step" : "cont");
1890
1891 orig_ptid = ptid;
1892
1893 /* Store the execution direction of the last resume.
1894
1895 If there is more than one to_resume call, we have to rely on infrun
1896 to not change the execution direction in-between. */
1897 record_btrace_resume_exec_dir = execution_direction;
1898
1899 /* For all-stop targets... */
1900 if (!target_is_non_stop_p ())
1901 {
1902 /* ...we pick the current thread when asked to resume an entire process
1903 or everything. */
1904 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1905 ptid = inferior_ptid;
1906
1907 tp = find_thread_ptid (ptid);
1908 if (tp == NULL)
1909 error (_("Cannot find thread to resume."));
1910
1911 /* ...and we stop replaying other threads if the thread to resume is not
1912 replaying. */
1913 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1914 ALL_NON_EXITED_THREADS (tp)
1915 record_btrace_stop_replaying (tp);
1916 }
1917
1918 /* As long as we're not replaying, just forward the request.
1919
1920 For non-stop targets this means that no thread is replaying. In order to
1921 make progress, we may need to explicitly move replaying threads to the end
1922 of their execution history. */
1923 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1924 {
1925 ops = ops->beneath;
1926 return ops->to_resume (ops, orig_ptid, step, signal);
1927 }
1928
1929 /* Compute the btrace thread flag for the requested move. */
1930 if (step == 0)
1931 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1932 else
1933 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1934
1935 /* We just indicate the resume intent here. The actual stepping happens in
1936 record_btrace_wait below. */
1937 ALL_NON_EXITED_THREADS (tp)
1938 if (ptid_match (tp->ptid, ptid))
1939 record_btrace_resume_thread (tp, flag);
1940
1941 /* Async support. */
1942 if (target_can_async_p ())
1943 {
1944 target_async (1);
1945 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1946 }
1947 }
1948
1949 /* Cancel resuming TP. */
1950
1951 static void
1952 record_btrace_cancel_resume (struct thread_info *tp)
1953 {
1954 enum btrace_thread_flag flags;
1955
1956 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
1957 if (flags == 0)
1958 return;
1959
1960 DEBUG ("cancel resume thread %d (%s): %x (%s)", tp->num,
1961 target_pid_to_str (tp->ptid), flags,
1962 btrace_thread_flag_to_str (flags));
1963
1964 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
1965 record_btrace_stop_replaying_at_end (tp);
1966 }
1967
1968 /* Return a target_waitstatus indicating that we ran out of history. */
1969
1970 static struct target_waitstatus
1971 btrace_step_no_history (void)
1972 {
1973 struct target_waitstatus status;
1974
1975 status.kind = TARGET_WAITKIND_NO_HISTORY;
1976
1977 return status;
1978 }
1979
1980 /* Return a target_waitstatus indicating that a step finished. */
1981
1982 static struct target_waitstatus
1983 btrace_step_stopped (void)
1984 {
1985 struct target_waitstatus status;
1986
1987 status.kind = TARGET_WAITKIND_STOPPED;
1988 status.value.sig = GDB_SIGNAL_TRAP;
1989
1990 return status;
1991 }
1992
1993 /* Return a target_waitstatus indicating that a thread was stopped as
1994 requested. */
1995
1996 static struct target_waitstatus
1997 btrace_step_stopped_on_request (void)
1998 {
1999 struct target_waitstatus status;
2000
2001 status.kind = TARGET_WAITKIND_STOPPED;
2002 status.value.sig = GDB_SIGNAL_0;
2003
2004 return status;
2005 }
2006
2007 /* Return a target_waitstatus indicating a spurious stop. */
2008
2009 static struct target_waitstatus
2010 btrace_step_spurious (void)
2011 {
2012 struct target_waitstatus status;
2013
2014 status.kind = TARGET_WAITKIND_SPURIOUS;
2015
2016 return status;
2017 }
2018
2019 /* Return a target_waitstatus indicating that the thread was not resumed. */
2020
2021 static struct target_waitstatus
2022 btrace_step_no_resumed (void)
2023 {
2024 struct target_waitstatus status;
2025
2026 status.kind = TARGET_WAITKIND_NO_RESUMED;
2027
2028 return status;
2029 }
2030
2031 /* Return a target_waitstatus indicating that we should wait again. */
2032
2033 static struct target_waitstatus
2034 btrace_step_again (void)
2035 {
2036 struct target_waitstatus status;
2037
2038 status.kind = TARGET_WAITKIND_IGNORE;
2039
2040 return status;
2041 }
2042
2043 /* Clear the record histories. */
2044
2045 static void
2046 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2047 {
2048 xfree (btinfo->insn_history);
2049 xfree (btinfo->call_history);
2050
2051 btinfo->insn_history = NULL;
2052 btinfo->call_history = NULL;
2053 }
2054
2055 /* Check whether TP's current replay position is at a breakpoint. */
2056
2057 static int
2058 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2059 {
2060 struct btrace_insn_iterator *replay;
2061 struct btrace_thread_info *btinfo;
2062 const struct btrace_insn *insn;
2063 struct inferior *inf;
2064
2065 btinfo = &tp->btrace;
2066 replay = btinfo->replay;
2067
2068 if (replay == NULL)
2069 return 0;
2070
2071 insn = btrace_insn_get (replay);
2072 if (insn == NULL)
2073 return 0;
2074
2075 inf = find_inferior_ptid (tp->ptid);
2076 if (inf == NULL)
2077 return 0;
2078
2079 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2080 &btinfo->stop_reason);
2081 }
2082
2083 /* Step one instruction in forward direction. */
2084
2085 static struct target_waitstatus
2086 record_btrace_single_step_forward (struct thread_info *tp)
2087 {
2088 struct btrace_insn_iterator *replay, end;
2089 struct btrace_thread_info *btinfo;
2090
2091 btinfo = &tp->btrace;
2092 replay = btinfo->replay;
2093
2094 /* We're done if we're not replaying. */
2095 if (replay == NULL)
2096 return btrace_step_no_history ();
2097
2098 /* Check if we're stepping a breakpoint. */
2099 if (record_btrace_replay_at_breakpoint (tp))
2100 return btrace_step_stopped ();
2101
2102 /* Skip gaps during replay. */
2103 do
2104 {
2105 unsigned int steps;
2106
2107 /* We will bail out here if we continue stepping after reaching the end
2108 of the execution history. */
2109 steps = btrace_insn_next (replay, 1);
2110 if (steps == 0)
2111 return btrace_step_no_history ();
2112 }
2113 while (btrace_insn_get (replay) == NULL);
2114
2115 /* Determine the end of the instruction trace. */
2116 btrace_insn_end (&end, btinfo);
2117
2118 /* The execution trace contains (and ends with) the current instruction.
2119 This instruction has not been executed, yet, so the trace really ends
2120 one instruction earlier. */
2121 if (btrace_insn_cmp (replay, &end) == 0)
2122 return btrace_step_no_history ();
2123
2124 return btrace_step_spurious ();
2125 }
2126
2127 /* Step one instruction in backward direction. */
2128
2129 static struct target_waitstatus
2130 record_btrace_single_step_backward (struct thread_info *tp)
2131 {
2132 struct btrace_insn_iterator *replay;
2133 struct btrace_thread_info *btinfo;
2134
2135 btinfo = &tp->btrace;
2136 replay = btinfo->replay;
2137
2138 /* Start replaying if we're not already doing so. */
2139 if (replay == NULL)
2140 replay = record_btrace_start_replaying (tp);
2141
2142 /* If we can't step any further, we reached the end of the history.
2143 Skip gaps during replay. */
2144 do
2145 {
2146 unsigned int steps;
2147
2148 steps = btrace_insn_prev (replay, 1);
2149 if (steps == 0)
2150 return btrace_step_no_history ();
2151 }
2152 while (btrace_insn_get (replay) == NULL);
2153
2154 /* Check if we're stepping a breakpoint.
2155
2156 For reverse-stepping, this check is after the step. There is logic in
2157 infrun.c that handles reverse-stepping separately. See, for example,
2158 proceed and adjust_pc_after_break.
2159
2160 This code assumes that for reverse-stepping, PC points to the last
2161 de-executed instruction, whereas for forward-stepping PC points to the
2162 next to-be-executed instruction. */
2163 if (record_btrace_replay_at_breakpoint (tp))
2164 return btrace_step_stopped ();
2165
2166 return btrace_step_spurious ();
2167 }
2168
2169 /* Step a single thread. */
2170
2171 static struct target_waitstatus
2172 record_btrace_step_thread (struct thread_info *tp)
2173 {
2174 struct btrace_thread_info *btinfo;
2175 struct target_waitstatus status;
2176 enum btrace_thread_flag flags;
2177
2178 btinfo = &tp->btrace;
2179
2180 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2181 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2182
2183 DEBUG ("stepping thread %d (%s): %x (%s)", tp->num,
2184 target_pid_to_str (tp->ptid), flags,
2185 btrace_thread_flag_to_str (flags));
2186
2187 /* We can't step without an execution history. */
2188 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2189 return btrace_step_no_history ();
2190
2191 switch (flags)
2192 {
2193 default:
2194 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2195
2196 case BTHR_STOP:
2197 return btrace_step_stopped_on_request ();
2198
2199 case BTHR_STEP:
2200 status = record_btrace_single_step_forward (tp);
2201 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2202 break;
2203
2204 return btrace_step_stopped ();
2205
2206 case BTHR_RSTEP:
2207 status = record_btrace_single_step_backward (tp);
2208 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2209 break;
2210
2211 return btrace_step_stopped ();
2212
2213 case BTHR_CONT:
2214 status = record_btrace_single_step_forward (tp);
2215 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2216 break;
2217
2218 btinfo->flags |= flags;
2219 return btrace_step_again ();
2220
2221 case BTHR_RCONT:
2222 status = record_btrace_single_step_backward (tp);
2223 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2224 break;
2225
2226 btinfo->flags |= flags;
2227 return btrace_step_again ();
2228 }
2229
2230 /* We keep threads moving at the end of their execution history. The to_wait
2231 method will stop the thread for whom the event is reported. */
2232 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2233 btinfo->flags |= flags;
2234
2235 return status;
2236 }
2237
2238 /* A vector of threads. */
2239
2240 typedef struct thread_info * tp_t;
2241 DEF_VEC_P (tp_t);
2242
2243 /* Announce further events if necessary. */
2244
2245 static void
2246 record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2247 const VEC (tp_t) *no_history)
2248 {
2249 int more_moving, more_no_history;
2250
2251 more_moving = !VEC_empty (tp_t, moving);
2252 more_no_history = !VEC_empty (tp_t, no_history);
2253
2254 if (!more_moving && !more_no_history)
2255 return;
2256
2257 if (more_moving)
2258 DEBUG ("movers pending");
2259
2260 if (more_no_history)
2261 DEBUG ("no-history pending");
2262
2263 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2264 }
2265
2266 /* The to_wait method of target record-btrace. */
2267
2268 static ptid_t
2269 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2270 struct target_waitstatus *status, int options)
2271 {
2272 VEC (tp_t) *moving, *no_history;
2273 struct thread_info *tp, *eventing;
2274 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2275
2276 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2277
2278 /* As long as we're not replaying, just forward the request. */
2279 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
2280 {
2281 ops = ops->beneath;
2282 return ops->to_wait (ops, ptid, status, options);
2283 }
2284
2285 moving = NULL;
2286 no_history = NULL;
2287
2288 make_cleanup (VEC_cleanup (tp_t), &moving);
2289 make_cleanup (VEC_cleanup (tp_t), &no_history);
2290
2291 /* Keep a work list of moving threads. */
2292 ALL_NON_EXITED_THREADS (tp)
2293 if (ptid_match (tp->ptid, ptid)
2294 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2295 VEC_safe_push (tp_t, moving, tp);
2296
2297 if (VEC_empty (tp_t, moving))
2298 {
2299 *status = btrace_step_no_resumed ();
2300
2301 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2302 target_waitstatus_to_string (status));
2303
2304 do_cleanups (cleanups);
2305 return null_ptid;
2306 }
2307
2308 /* Step moving threads one by one, one step each, until either one thread
2309 reports an event or we run out of threads to step.
2310
2311 When stepping more than one thread, chances are that some threads reach
2312 the end of their execution history earlier than others. If we reported
2313 this immediately, all-stop on top of non-stop would stop all threads and
2314 resume the same threads next time. And we would report the same thread
2315 having reached the end of its execution history again.
2316
2317 In the worst case, this would starve the other threads. But even if other
2318 threads would be allowed to make progress, this would result in far too
2319 many intermediate stops.
2320
2321 We therefore delay the reporting of "no execution history" until we have
2322 nothing else to report. By this time, all threads should have moved to
2323 either the beginning or the end of their execution history. There will
2324 be a single user-visible stop. */
2325 eventing = NULL;
2326 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2327 {
2328 unsigned int ix;
2329
2330 ix = 0;
2331 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2332 {
2333 *status = record_btrace_step_thread (tp);
2334
2335 switch (status->kind)
2336 {
2337 case TARGET_WAITKIND_IGNORE:
2338 ix++;
2339 break;
2340
2341 case TARGET_WAITKIND_NO_HISTORY:
2342 VEC_safe_push (tp_t, no_history,
2343 VEC_ordered_remove (tp_t, moving, ix));
2344 break;
2345
2346 default:
2347 eventing = VEC_unordered_remove (tp_t, moving, ix);
2348 break;
2349 }
2350 }
2351 }
2352
2353 if (eventing == NULL)
2354 {
2355 /* We started with at least one moving thread. This thread must have
2356 either stopped or reached the end of its execution history.
2357
2358 In the former case, EVENTING must not be NULL.
2359 In the latter case, NO_HISTORY must not be empty. */
2360 gdb_assert (!VEC_empty (tp_t, no_history));
2361
2362 /* We kept threads moving at the end of their execution history. Stop
2363 EVENTING now that we are going to report its stop. */
2364 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2365 eventing->btrace.flags &= ~BTHR_MOVE;
2366
2367 *status = btrace_step_no_history ();
2368 }
2369
2370 gdb_assert (eventing != NULL);
2371
2372 /* We kept threads replaying at the end of their execution history. Stop
2373 replaying EVENTING now that we are going to report its stop. */
2374 record_btrace_stop_replaying_at_end (eventing);
2375
2376 /* Stop all other threads. */
2377 if (!target_is_non_stop_p ())
2378 ALL_NON_EXITED_THREADS (tp)
2379 record_btrace_cancel_resume (tp);
2380
2381 /* In async mode, we need to announce further events. */
2382 if (target_is_async_p ())
2383 record_btrace_maybe_mark_async_event (moving, no_history);
2384
2385 /* Start record histories anew from the current position. */
2386 record_btrace_clear_histories (&eventing->btrace);
2387
2388 /* We moved the replay position but did not update registers. */
2389 registers_changed_ptid (eventing->ptid);
2390
2391 DEBUG ("wait ended by thread %d (%s): %s", eventing->num,
2392 target_pid_to_str (eventing->ptid),
2393 target_waitstatus_to_string (status));
2394
2395 do_cleanups (cleanups);
2396 return eventing->ptid;
2397 }
2398
2399 /* The to_stop method of target record-btrace. */
2400
2401 static void
2402 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2403 {
2404 DEBUG ("stop %s", target_pid_to_str (ptid));
2405
2406 /* As long as we're not replaying, just forward the request. */
2407 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
2408 {
2409 ops = ops->beneath;
2410 ops->to_stop (ops, ptid);
2411 }
2412 else
2413 {
2414 struct thread_info *tp;
2415
2416 ALL_NON_EXITED_THREADS (tp)
2417 if (ptid_match (tp->ptid, ptid))
2418 {
2419 tp->btrace.flags &= ~BTHR_MOVE;
2420 tp->btrace.flags |= BTHR_STOP;
2421 }
2422 }
2423 }
2424
2425 /* The to_can_execute_reverse method of target record-btrace. */
2426
2427 static int
2428 record_btrace_can_execute_reverse (struct target_ops *self)
2429 {
2430 return 1;
2431 }
2432
2433 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2434
2435 static int
2436 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2437 {
2438 if (record_btrace_is_replaying (ops))
2439 {
2440 struct thread_info *tp = inferior_thread ();
2441
2442 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2443 }
2444
2445 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2446 }
2447
2448 /* The to_supports_stopped_by_sw_breakpoint method of target
2449 record-btrace. */
2450
2451 static int
2452 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2453 {
2454 if (record_btrace_is_replaying (ops))
2455 return 1;
2456
2457 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2458 }
2459
2460 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2461
2462 static int
2463 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2464 {
2465 if (record_btrace_is_replaying (ops))
2466 {
2467 struct thread_info *tp = inferior_thread ();
2468
2469 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2470 }
2471
2472 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2473 }
2474
2475 /* The to_supports_stopped_by_hw_breakpoint method of target
2476 record-btrace. */
2477
2478 static int
2479 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2480 {
2481 if (record_btrace_is_replaying (ops))
2482 return 1;
2483
2484 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2485 }
2486
2487 /* The to_update_thread_list method of target record-btrace. */
2488
2489 static void
2490 record_btrace_update_thread_list (struct target_ops *ops)
2491 {
2492 /* We don't add or remove threads during replay. */
2493 if (record_btrace_is_replaying (ops))
2494 return;
2495
2496 /* Forward the request. */
2497 ops = ops->beneath;
2498 ops->to_update_thread_list (ops);
2499 }
2500
2501 /* The to_thread_alive method of target record-btrace. */
2502
2503 static int
2504 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2505 {
2506 /* We don't add or remove threads during replay. */
2507 if (record_btrace_is_replaying (ops))
2508 return find_thread_ptid (ptid) != NULL;
2509
2510 /* Forward the request. */
2511 ops = ops->beneath;
2512 return ops->to_thread_alive (ops, ptid);
2513 }
2514
2515 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2516 is stopped. */
2517
2518 static void
2519 record_btrace_set_replay (struct thread_info *tp,
2520 const struct btrace_insn_iterator *it)
2521 {
2522 struct btrace_thread_info *btinfo;
2523
2524 btinfo = &tp->btrace;
2525
2526 if (it == NULL || it->function == NULL)
2527 record_btrace_stop_replaying (tp);
2528 else
2529 {
2530 if (btinfo->replay == NULL)
2531 record_btrace_start_replaying (tp);
2532 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2533 return;
2534
2535 *btinfo->replay = *it;
2536 registers_changed_ptid (tp->ptid);
2537 }
2538
2539 /* Start anew from the new replay position. */
2540 record_btrace_clear_histories (btinfo);
2541
2542 stop_pc = regcache_read_pc (get_current_regcache ());
2543 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2544 }
2545
2546 /* The to_goto_record_begin method of target record-btrace. */
2547
2548 static void
2549 record_btrace_goto_begin (struct target_ops *self)
2550 {
2551 struct thread_info *tp;
2552 struct btrace_insn_iterator begin;
2553
2554 tp = require_btrace_thread ();
2555
2556 btrace_insn_begin (&begin, &tp->btrace);
2557 record_btrace_set_replay (tp, &begin);
2558 }
2559
2560 /* The to_goto_record_end method of target record-btrace. */
2561
2562 static void
2563 record_btrace_goto_end (struct target_ops *ops)
2564 {
2565 struct thread_info *tp;
2566
2567 tp = require_btrace_thread ();
2568
2569 record_btrace_set_replay (tp, NULL);
2570 }
2571
2572 /* The to_goto_record method of target record-btrace. */
2573
2574 static void
2575 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2576 {
2577 struct thread_info *tp;
2578 struct btrace_insn_iterator it;
2579 unsigned int number;
2580 int found;
2581
2582 number = insn;
2583
2584 /* Check for wrap-arounds. */
2585 if (number != insn)
2586 error (_("Instruction number out of range."));
2587
2588 tp = require_btrace_thread ();
2589
2590 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2591 if (found == 0)
2592 error (_("No such instruction."));
2593
2594 record_btrace_set_replay (tp, &it);
2595 }
2596
2597 /* The to_execution_direction target method. */
2598
2599 static enum exec_direction_kind
2600 record_btrace_execution_direction (struct target_ops *self)
2601 {
2602 return record_btrace_resume_exec_dir;
2603 }
2604
2605 /* The to_prepare_to_generate_core target method. */
2606
2607 static void
2608 record_btrace_prepare_to_generate_core (struct target_ops *self)
2609 {
2610 record_btrace_generating_corefile = 1;
2611 }
2612
2613 /* The to_done_generating_core target method. */
2614
2615 static void
2616 record_btrace_done_generating_core (struct target_ops *self)
2617 {
2618 record_btrace_generating_corefile = 0;
2619 }
2620
2621 /* Initialize the record-btrace target ops. */
2622
2623 static void
2624 init_record_btrace_ops (void)
2625 {
2626 struct target_ops *ops;
2627
2628 ops = &record_btrace_ops;
2629 ops->to_shortname = "record-btrace";
2630 ops->to_longname = "Branch tracing target";
2631 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2632 ops->to_open = record_btrace_open;
2633 ops->to_close = record_btrace_close;
2634 ops->to_async = record_btrace_async;
2635 ops->to_detach = record_detach;
2636 ops->to_disconnect = record_disconnect;
2637 ops->to_mourn_inferior = record_mourn_inferior;
2638 ops->to_kill = record_kill;
2639 ops->to_stop_recording = record_btrace_stop_recording;
2640 ops->to_info_record = record_btrace_info;
2641 ops->to_insn_history = record_btrace_insn_history;
2642 ops->to_insn_history_from = record_btrace_insn_history_from;
2643 ops->to_insn_history_range = record_btrace_insn_history_range;
2644 ops->to_call_history = record_btrace_call_history;
2645 ops->to_call_history_from = record_btrace_call_history_from;
2646 ops->to_call_history_range = record_btrace_call_history_range;
2647 ops->to_record_is_replaying = record_btrace_is_replaying;
2648 ops->to_xfer_partial = record_btrace_xfer_partial;
2649 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2650 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2651 ops->to_fetch_registers = record_btrace_fetch_registers;
2652 ops->to_store_registers = record_btrace_store_registers;
2653 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2654 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2655 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2656 ops->to_resume = record_btrace_resume;
2657 ops->to_wait = record_btrace_wait;
2658 ops->to_stop = record_btrace_stop;
2659 ops->to_update_thread_list = record_btrace_update_thread_list;
2660 ops->to_thread_alive = record_btrace_thread_alive;
2661 ops->to_goto_record_begin = record_btrace_goto_begin;
2662 ops->to_goto_record_end = record_btrace_goto_end;
2663 ops->to_goto_record = record_btrace_goto;
2664 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2665 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2666 ops->to_supports_stopped_by_sw_breakpoint
2667 = record_btrace_supports_stopped_by_sw_breakpoint;
2668 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2669 ops->to_supports_stopped_by_hw_breakpoint
2670 = record_btrace_supports_stopped_by_hw_breakpoint;
2671 ops->to_execution_direction = record_btrace_execution_direction;
2672 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2673 ops->to_done_generating_core = record_btrace_done_generating_core;
2674 ops->to_stratum = record_stratum;
2675 ops->to_magic = OPS_MAGIC;
2676 }
2677
2678 /* Start recording in BTS format. */
2679
2680 static void
2681 cmd_record_btrace_bts_start (char *args, int from_tty)
2682 {
2683 if (args != NULL && *args != 0)
2684 error (_("Invalid argument."));
2685
2686 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2687
2688 TRY
2689 {
2690 execute_command ("target record-btrace", from_tty);
2691 }
2692 CATCH (exception, RETURN_MASK_ALL)
2693 {
2694 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2695 throw_exception (exception);
2696 }
2697 END_CATCH
2698 }
2699
2700 /* Start recording Intel(R) Processor Trace. */
2701
2702 static void
2703 cmd_record_btrace_pt_start (char *args, int from_tty)
2704 {
2705 if (args != NULL && *args != 0)
2706 error (_("Invalid argument."));
2707
2708 record_btrace_conf.format = BTRACE_FORMAT_PT;
2709
2710 TRY
2711 {
2712 execute_command ("target record-btrace", from_tty);
2713 }
2714 CATCH (exception, RETURN_MASK_ALL)
2715 {
2716 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2717 throw_exception (exception);
2718 }
2719 END_CATCH
2720 }
2721
2722 /* Alias for "target record". */
2723
2724 static void
2725 cmd_record_btrace_start (char *args, int from_tty)
2726 {
2727 if (args != NULL && *args != 0)
2728 error (_("Invalid argument."));
2729
2730 record_btrace_conf.format = BTRACE_FORMAT_PT;
2731
2732 TRY
2733 {
2734 execute_command ("target record-btrace", from_tty);
2735 }
2736 CATCH (exception, RETURN_MASK_ALL)
2737 {
2738 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2739
2740 TRY
2741 {
2742 execute_command ("target record-btrace", from_tty);
2743 }
2744 CATCH (exception, RETURN_MASK_ALL)
2745 {
2746 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2747 throw_exception (exception);
2748 }
2749 END_CATCH
2750 }
2751 END_CATCH
2752 }
2753
2754 /* The "set record btrace" command. */
2755
2756 static void
2757 cmd_set_record_btrace (char *args, int from_tty)
2758 {
2759 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2760 }
2761
2762 /* The "show record btrace" command. */
2763
2764 static void
2765 cmd_show_record_btrace (char *args, int from_tty)
2766 {
2767 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2768 }
2769
2770 /* The "show record btrace replay-memory-access" command. */
2771
2772 static void
2773 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2774 struct cmd_list_element *c, const char *value)
2775 {
2776 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2777 replay_memory_access);
2778 }
2779
2780 /* The "set record btrace bts" command. */
2781
2782 static void
2783 cmd_set_record_btrace_bts (char *args, int from_tty)
2784 {
2785 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2786 "by an appropriate subcommand.\n"));
2787 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2788 all_commands, gdb_stdout);
2789 }
2790
2791 /* The "show record btrace bts" command. */
2792
2793 static void
2794 cmd_show_record_btrace_bts (char *args, int from_tty)
2795 {
2796 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2797 }
2798
2799 /* The "set record btrace pt" command. */
2800
2801 static void
2802 cmd_set_record_btrace_pt (char *args, int from_tty)
2803 {
2804 printf_unfiltered (_("\"set record btrace pt\" must be followed "
2805 "by an appropriate subcommand.\n"));
2806 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
2807 all_commands, gdb_stdout);
2808 }
2809
2810 /* The "show record btrace pt" command. */
2811
2812 static void
2813 cmd_show_record_btrace_pt (char *args, int from_tty)
2814 {
2815 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
2816 }
2817
2818 /* The "record bts buffer-size" show value function. */
2819
2820 static void
2821 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
2822 struct cmd_list_element *c,
2823 const char *value)
2824 {
2825 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
2826 value);
2827 }
2828
2829 /* The "record pt buffer-size" show value function. */
2830
2831 static void
2832 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
2833 struct cmd_list_element *c,
2834 const char *value)
2835 {
2836 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
2837 value);
2838 }
2839
2840 void _initialize_record_btrace (void);
2841
2842 /* Initialize btrace commands. */
2843
2844 void
2845 _initialize_record_btrace (void)
2846 {
2847 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2848 _("Start branch trace recording."), &record_btrace_cmdlist,
2849 "record btrace ", 0, &record_cmdlist);
2850 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2851
2852 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
2853 _("\
2854 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2855 The processor stores a from/to record for each branch into a cyclic buffer.\n\
2856 This format may not be available on all processors."),
2857 &record_btrace_cmdlist);
2858 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
2859
2860 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
2861 _("\
2862 Start branch trace recording in Intel(R) Processor Trace format.\n\n\
2863 This format may not be available on all processors."),
2864 &record_btrace_cmdlist);
2865 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
2866
2867 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2868 _("Set record options"), &set_record_btrace_cmdlist,
2869 "set record btrace ", 0, &set_record_cmdlist);
2870
2871 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2872 _("Show record options"), &show_record_btrace_cmdlist,
2873 "show record btrace ", 0, &show_record_cmdlist);
2874
2875 add_setshow_enum_cmd ("replay-memory-access", no_class,
2876 replay_memory_access_types, &replay_memory_access, _("\
2877 Set what memory accesses are allowed during replay."), _("\
2878 Show what memory accesses are allowed during replay."),
2879 _("Default is READ-ONLY.\n\n\
2880 The btrace record target does not trace data.\n\
2881 The memory therefore corresponds to the live target and not \
2882 to the current replay position.\n\n\
2883 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2884 When READ-WRITE, allow accesses to read-only and read-write memory during \
2885 replay."),
2886 NULL, cmd_show_replay_memory_access,
2887 &set_record_btrace_cmdlist,
2888 &show_record_btrace_cmdlist);
2889
2890 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
2891 _("Set record btrace bts options"),
2892 &set_record_btrace_bts_cmdlist,
2893 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
2894
2895 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
2896 _("Show record btrace bts options"),
2897 &show_record_btrace_bts_cmdlist,
2898 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
2899
2900 add_setshow_uinteger_cmd ("buffer-size", no_class,
2901 &record_btrace_conf.bts.size,
2902 _("Set the record/replay bts buffer size."),
2903 _("Show the record/replay bts buffer size."), _("\
2904 When starting recording request a trace buffer of this size. \
2905 The actual buffer size may differ from the requested size. \
2906 Use \"info record\" to see the actual buffer size.\n\n\
2907 Bigger buffers allow longer recording but also take more time to process \
2908 the recorded execution trace.\n\n\
2909 The trace buffer size may not be changed while recording."), NULL,
2910 show_record_bts_buffer_size_value,
2911 &set_record_btrace_bts_cmdlist,
2912 &show_record_btrace_bts_cmdlist);
2913
2914 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
2915 _("Set record btrace pt options"),
2916 &set_record_btrace_pt_cmdlist,
2917 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
2918
2919 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
2920 _("Show record btrace pt options"),
2921 &show_record_btrace_pt_cmdlist,
2922 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
2923
2924 add_setshow_uinteger_cmd ("buffer-size", no_class,
2925 &record_btrace_conf.pt.size,
2926 _("Set the record/replay pt buffer size."),
2927 _("Show the record/replay pt buffer size."), _("\
2928 Bigger buffers allow longer recording but also take more time to process \
2929 the recorded execution.\n\
2930 The actual buffer size may differ from the requested size. Use \"info record\" \
2931 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
2932 &set_record_btrace_pt_cmdlist,
2933 &show_record_btrace_pt_cmdlist);
2934
2935 init_record_btrace_ops ();
2936 add_target (&record_btrace_ops);
2937
2938 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2939 xcalloc, xfree);
2940
2941 record_btrace_conf.bts.size = 64 * 1024;
2942 record_btrace_conf.pt.size = 16 * 1024;
2943 }
This page took 0.135645 seconds and 5 git commands to generate.