Class-ify ui_out
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2016 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observer.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "event-loop.h"
40 #include "inf-loop.h"
41 #include "vec.h"
42 #include <algorithm>
43
44 /* The target_ops of record-btrace. */
45 static struct target_ops record_btrace_ops;
46
47 /* A new thread observer enabling branch tracing for the new thread. */
48 static struct observer *record_btrace_thread_observer;
49
50 /* Memory access types used in set/show record btrace replay-memory-access. */
51 static const char replay_memory_access_read_only[] = "read-only";
52 static const char replay_memory_access_read_write[] = "read-write";
53 static const char *const replay_memory_access_types[] =
54 {
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
57 NULL
58 };
59
60 /* The currently allowed replay memory access type. */
61 static const char *replay_memory_access = replay_memory_access_read_only;
62
63 /* Command lists for "set/show record btrace". */
64 static struct cmd_list_element *set_record_btrace_cmdlist;
65 static struct cmd_list_element *show_record_btrace_cmdlist;
66
67 /* The execution direction of the last resume we got. See record-full.c. */
68 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
69
70 /* The async event handler for reverse/replay execution. */
71 static struct async_event_handler *record_btrace_async_inferior_event_handler;
72
73 /* A flag indicating that we are currently generating a core file. */
74 static int record_btrace_generating_corefile;
75
76 /* The current branch trace configuration. */
77 static struct btrace_config record_btrace_conf;
78
79 /* Command list for "record btrace". */
80 static struct cmd_list_element *record_btrace_cmdlist;
81
82 /* Command lists for "set/show record btrace bts". */
83 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
85
86 /* Command lists for "set/show record btrace pt". */
87 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
89
90 /* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
92
93 #define DEBUG(msg, args...) \
94 do \
95 { \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
99 } \
100 while (0)
101
102
103 /* Update the branch trace for the current thread and return a pointer to its
104 thread_info.
105
106 Throws an error if there is no thread or no trace. This function never
107 returns NULL. */
108
109 static struct thread_info *
110 require_btrace_thread (void)
111 {
112 struct thread_info *tp;
113
114 DEBUG ("require");
115
116 tp = find_thread_ptid (inferior_ptid);
117 if (tp == NULL)
118 error (_("No thread."));
119
120 btrace_fetch (tp);
121
122 if (btrace_is_empty (tp))
123 error (_("No trace."));
124
125 return tp;
126 }
127
128 /* Update the branch trace for the current thread and return a pointer to its
129 branch trace information struct.
130
131 Throws an error if there is no thread or no trace. This function never
132 returns NULL. */
133
134 static struct btrace_thread_info *
135 require_btrace (void)
136 {
137 struct thread_info *tp;
138
139 tp = require_btrace_thread ();
140
141 return &tp->btrace;
142 }
143
144 /* Enable branch tracing for one thread. Warn on errors. */
145
146 static void
147 record_btrace_enable_warn (struct thread_info *tp)
148 {
149 TRY
150 {
151 btrace_enable (tp, &record_btrace_conf);
152 }
153 CATCH (error, RETURN_MASK_ERROR)
154 {
155 warning ("%s", error.message);
156 }
157 END_CATCH
158 }
159
160 /* Callback function to disable branch tracing for one thread. */
161
162 static void
163 record_btrace_disable_callback (void *arg)
164 {
165 struct thread_info *tp = (struct thread_info *) arg;
166
167 btrace_disable (tp);
168 }
169
170 /* Enable automatic tracing of new threads. */
171
172 static void
173 record_btrace_auto_enable (void)
174 {
175 DEBUG ("attach thread observer");
176
177 record_btrace_thread_observer
178 = observer_attach_new_thread (record_btrace_enable_warn);
179 }
180
181 /* Disable automatic tracing of new threads. */
182
183 static void
184 record_btrace_auto_disable (void)
185 {
186 /* The observer may have been detached, already. */
187 if (record_btrace_thread_observer == NULL)
188 return;
189
190 DEBUG ("detach thread observer");
191
192 observer_detach_new_thread (record_btrace_thread_observer);
193 record_btrace_thread_observer = NULL;
194 }
195
196 /* The record-btrace async event handler function. */
197
198 static void
199 record_btrace_handle_async_inferior_event (gdb_client_data data)
200 {
201 inferior_event_handler (INF_REG_EVENT, NULL);
202 }
203
204 /* See record-btrace.h. */
205
206 void
207 record_btrace_push_target (void)
208 {
209 const char *format;
210
211 record_btrace_auto_enable ();
212
213 push_target (&record_btrace_ops);
214
215 record_btrace_async_inferior_event_handler
216 = create_async_event_handler (record_btrace_handle_async_inferior_event,
217 NULL);
218 record_btrace_generating_corefile = 0;
219
220 format = btrace_format_short_string (record_btrace_conf.format);
221 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
222 }
223
224 /* The to_open method of target record-btrace. */
225
226 static void
227 record_btrace_open (const char *args, int from_tty)
228 {
229 struct cleanup *disable_chain;
230 struct thread_info *tp;
231
232 DEBUG ("open");
233
234 record_preopen ();
235
236 if (!target_has_execution)
237 error (_("The program is not being run."));
238
239 gdb_assert (record_btrace_thread_observer == NULL);
240
241 disable_chain = make_cleanup (null_cleanup, NULL);
242 ALL_NON_EXITED_THREADS (tp)
243 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
244 {
245 btrace_enable (tp, &record_btrace_conf);
246
247 make_cleanup (record_btrace_disable_callback, tp);
248 }
249
250 record_btrace_push_target ();
251
252 discard_cleanups (disable_chain);
253 }
254
255 /* The to_stop_recording method of target record-btrace. */
256
257 static void
258 record_btrace_stop_recording (struct target_ops *self)
259 {
260 struct thread_info *tp;
261
262 DEBUG ("stop recording");
263
264 record_btrace_auto_disable ();
265
266 ALL_NON_EXITED_THREADS (tp)
267 if (tp->btrace.target != NULL)
268 btrace_disable (tp);
269 }
270
271 /* The to_disconnect method of target record-btrace. */
272
273 static void
274 record_btrace_disconnect (struct target_ops *self, const char *args,
275 int from_tty)
276 {
277 struct target_ops *beneath = self->beneath;
278
279 /* Do not stop recording, just clean up GDB side. */
280 unpush_target (self);
281
282 /* Forward disconnect. */
283 beneath->to_disconnect (beneath, args, from_tty);
284 }
285
286 /* The to_close method of target record-btrace. */
287
288 static void
289 record_btrace_close (struct target_ops *self)
290 {
291 struct thread_info *tp;
292
293 if (record_btrace_async_inferior_event_handler != NULL)
294 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
295
296 /* Make sure automatic recording gets disabled even if we did not stop
297 recording before closing the record-btrace target. */
298 record_btrace_auto_disable ();
299
300 /* We should have already stopped recording.
301 Tear down btrace in case we have not. */
302 ALL_NON_EXITED_THREADS (tp)
303 btrace_teardown (tp);
304 }
305
306 /* The to_async method of target record-btrace. */
307
308 static void
309 record_btrace_async (struct target_ops *ops, int enable)
310 {
311 if (enable)
312 mark_async_event_handler (record_btrace_async_inferior_event_handler);
313 else
314 clear_async_event_handler (record_btrace_async_inferior_event_handler);
315
316 ops->beneath->to_async (ops->beneath, enable);
317 }
318
319 /* Adjusts the size and returns a human readable size suffix. */
320
321 static const char *
322 record_btrace_adjust_size (unsigned int *size)
323 {
324 unsigned int sz;
325
326 sz = *size;
327
328 if ((sz & ((1u << 30) - 1)) == 0)
329 {
330 *size = sz >> 30;
331 return "GB";
332 }
333 else if ((sz & ((1u << 20) - 1)) == 0)
334 {
335 *size = sz >> 20;
336 return "MB";
337 }
338 else if ((sz & ((1u << 10) - 1)) == 0)
339 {
340 *size = sz >> 10;
341 return "kB";
342 }
343 else
344 return "";
345 }
346
347 /* Print a BTS configuration. */
348
349 static void
350 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
351 {
352 const char *suffix;
353 unsigned int size;
354
355 size = conf->size;
356 if (size > 0)
357 {
358 suffix = record_btrace_adjust_size (&size);
359 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
360 }
361 }
362
363 /* Print an Intel Processor Trace configuration. */
364
365 static void
366 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
367 {
368 const char *suffix;
369 unsigned int size;
370
371 size = conf->size;
372 if (size > 0)
373 {
374 suffix = record_btrace_adjust_size (&size);
375 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
376 }
377 }
378
379 /* Print a branch tracing configuration. */
380
381 static void
382 record_btrace_print_conf (const struct btrace_config *conf)
383 {
384 printf_unfiltered (_("Recording format: %s.\n"),
385 btrace_format_string (conf->format));
386
387 switch (conf->format)
388 {
389 case BTRACE_FORMAT_NONE:
390 return;
391
392 case BTRACE_FORMAT_BTS:
393 record_btrace_print_bts_conf (&conf->bts);
394 return;
395
396 case BTRACE_FORMAT_PT:
397 record_btrace_print_pt_conf (&conf->pt);
398 return;
399 }
400
401 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
402 }
403
404 /* The to_info_record method of target record-btrace. */
405
406 static void
407 record_btrace_info (struct target_ops *self)
408 {
409 struct btrace_thread_info *btinfo;
410 const struct btrace_config *conf;
411 struct thread_info *tp;
412 unsigned int insns, calls, gaps;
413
414 DEBUG ("info");
415
416 tp = find_thread_ptid (inferior_ptid);
417 if (tp == NULL)
418 error (_("No thread."));
419
420 btinfo = &tp->btrace;
421
422 conf = btrace_conf (btinfo);
423 if (conf != NULL)
424 record_btrace_print_conf (conf);
425
426 btrace_fetch (tp);
427
428 insns = 0;
429 calls = 0;
430 gaps = 0;
431
432 if (!btrace_is_empty (tp))
433 {
434 struct btrace_call_iterator call;
435 struct btrace_insn_iterator insn;
436
437 btrace_call_end (&call, btinfo);
438 btrace_call_prev (&call, 1);
439 calls = btrace_call_number (&call);
440
441 btrace_insn_end (&insn, btinfo);
442
443 insns = btrace_insn_number (&insn);
444 if (insns != 0)
445 {
446 /* The last instruction does not really belong to the trace. */
447 insns -= 1;
448 }
449 else
450 {
451 unsigned int steps;
452
453 /* Skip gaps at the end. */
454 do
455 {
456 steps = btrace_insn_prev (&insn, 1);
457 if (steps == 0)
458 break;
459
460 insns = btrace_insn_number (&insn);
461 }
462 while (insns == 0);
463 }
464
465 gaps = btinfo->ngaps;
466 }
467
468 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
469 "for thread %s (%s).\n"), insns, calls, gaps,
470 print_thread_id (tp), target_pid_to_str (tp->ptid));
471
472 if (btrace_is_replaying (tp))
473 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
474 btrace_insn_number (btinfo->replay));
475 }
476
477 /* Print a decode error. */
478
479 static void
480 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
481 enum btrace_format format)
482 {
483 const char *errstr;
484 int is_error;
485
486 errstr = _("unknown");
487 is_error = 1;
488
489 switch (format)
490 {
491 default:
492 break;
493
494 case BTRACE_FORMAT_BTS:
495 switch (errcode)
496 {
497 default:
498 break;
499
500 case BDE_BTS_OVERFLOW:
501 errstr = _("instruction overflow");
502 break;
503
504 case BDE_BTS_INSN_SIZE:
505 errstr = _("unknown instruction");
506 break;
507 }
508 break;
509
510 #if defined (HAVE_LIBIPT)
511 case BTRACE_FORMAT_PT:
512 switch (errcode)
513 {
514 case BDE_PT_USER_QUIT:
515 is_error = 0;
516 errstr = _("trace decode cancelled");
517 break;
518
519 case BDE_PT_DISABLED:
520 is_error = 0;
521 errstr = _("disabled");
522 break;
523
524 case BDE_PT_OVERFLOW:
525 is_error = 0;
526 errstr = _("overflow");
527 break;
528
529 default:
530 if (errcode < 0)
531 errstr = pt_errstr (pt_errcode (errcode));
532 break;
533 }
534 break;
535 #endif /* defined (HAVE_LIBIPT) */
536 }
537
538 uiout->text (_("["));
539 if (is_error)
540 {
541 uiout->text (_("decode error ("));
542 uiout->field_int ("errcode", errcode);
543 uiout->text (_("): "));
544 }
545 uiout->text (errstr);
546 uiout->text (_("]\n"));
547 }
548
549 /* Print an unsigned int. */
550
551 static void
552 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
553 {
554 uiout->field_fmt (fld, "%u", val);
555 }
556
557 /* A range of source lines. */
558
559 struct btrace_line_range
560 {
561 /* The symtab this line is from. */
562 struct symtab *symtab;
563
564 /* The first line (inclusive). */
565 int begin;
566
567 /* The last line (exclusive). */
568 int end;
569 };
570
571 /* Construct a line range. */
572
573 static struct btrace_line_range
574 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
575 {
576 struct btrace_line_range range;
577
578 range.symtab = symtab;
579 range.begin = begin;
580 range.end = end;
581
582 return range;
583 }
584
585 /* Add a line to a line range. */
586
587 static struct btrace_line_range
588 btrace_line_range_add (struct btrace_line_range range, int line)
589 {
590 if (range.end <= range.begin)
591 {
592 /* This is the first entry. */
593 range.begin = line;
594 range.end = line + 1;
595 }
596 else if (line < range.begin)
597 range.begin = line;
598 else if (range.end < line)
599 range.end = line;
600
601 return range;
602 }
603
604 /* Return non-zero if RANGE is empty, zero otherwise. */
605
606 static int
607 btrace_line_range_is_empty (struct btrace_line_range range)
608 {
609 return range.end <= range.begin;
610 }
611
612 /* Return non-zero if LHS contains RHS, zero otherwise. */
613
614 static int
615 btrace_line_range_contains_range (struct btrace_line_range lhs,
616 struct btrace_line_range rhs)
617 {
618 return ((lhs.symtab == rhs.symtab)
619 && (lhs.begin <= rhs.begin)
620 && (rhs.end <= lhs.end));
621 }
622
623 /* Find the line range associated with PC. */
624
625 static struct btrace_line_range
626 btrace_find_line_range (CORE_ADDR pc)
627 {
628 struct btrace_line_range range;
629 struct linetable_entry *lines;
630 struct linetable *ltable;
631 struct symtab *symtab;
632 int nlines, i;
633
634 symtab = find_pc_line_symtab (pc);
635 if (symtab == NULL)
636 return btrace_mk_line_range (NULL, 0, 0);
637
638 ltable = SYMTAB_LINETABLE (symtab);
639 if (ltable == NULL)
640 return btrace_mk_line_range (symtab, 0, 0);
641
642 nlines = ltable->nitems;
643 lines = ltable->item;
644 if (nlines <= 0)
645 return btrace_mk_line_range (symtab, 0, 0);
646
647 range = btrace_mk_line_range (symtab, 0, 0);
648 for (i = 0; i < nlines - 1; i++)
649 {
650 if ((lines[i].pc == pc) && (lines[i].line != 0))
651 range = btrace_line_range_add (range, lines[i].line);
652 }
653
654 return range;
655 }
656
657 /* Print source lines in LINES to UIOUT.
658
659 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
660 instructions corresponding to that source line. When printing a new source
661 line, we do the cleanups for the open chain and open a new cleanup chain for
662 the new source line. If the source line range in LINES is not empty, this
663 function will leave the cleanup chain for the last printed source line open
664 so instructions can be added to it. */
665
666 static void
667 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
668 struct cleanup **ui_item_chain, int flags)
669 {
670 print_source_lines_flags psl_flags;
671 int line;
672
673 psl_flags = 0;
674 if (flags & DISASSEMBLY_FILENAME)
675 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
676
677 for (line = lines.begin; line < lines.end; ++line)
678 {
679 if (*ui_item_chain != NULL)
680 do_cleanups (*ui_item_chain);
681
682 *ui_item_chain
683 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
684
685 print_source_lines (lines.symtab, line, line + 1, psl_flags);
686
687 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
688 }
689 }
690
691 /* Disassemble a section of the recorded instruction trace. */
692
693 static void
694 btrace_insn_history (struct ui_out *uiout,
695 const struct btrace_thread_info *btinfo,
696 const struct btrace_insn_iterator *begin,
697 const struct btrace_insn_iterator *end, int flags)
698 {
699 struct ui_file *stb;
700 struct cleanup *cleanups, *ui_item_chain;
701 struct disassemble_info di;
702 struct gdbarch *gdbarch;
703 struct btrace_insn_iterator it;
704 struct btrace_line_range last_lines;
705
706 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
707 btrace_insn_number (end));
708
709 flags |= DISASSEMBLY_SPECULATIVE;
710
711 gdbarch = target_gdbarch ();
712 stb = mem_fileopen ();
713 cleanups = make_cleanup_ui_file_delete (stb);
714 di = gdb_disassemble_info (gdbarch, stb);
715 last_lines = btrace_mk_line_range (NULL, 0, 0);
716
717 make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
718
719 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
720 instructions corresponding to that line. */
721 ui_item_chain = NULL;
722
723 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
724 {
725 const struct btrace_insn *insn;
726
727 insn = btrace_insn_get (&it);
728
729 /* A NULL instruction indicates a gap in the trace. */
730 if (insn == NULL)
731 {
732 const struct btrace_config *conf;
733
734 conf = btrace_conf (btinfo);
735
736 /* We have trace so we must have a configuration. */
737 gdb_assert (conf != NULL);
738
739 btrace_ui_out_decode_error (uiout, it.function->errcode,
740 conf->format);
741 }
742 else
743 {
744 struct disasm_insn dinsn;
745
746 if ((flags & DISASSEMBLY_SOURCE) != 0)
747 {
748 struct btrace_line_range lines;
749
750 lines = btrace_find_line_range (insn->pc);
751 if (!btrace_line_range_is_empty (lines)
752 && !btrace_line_range_contains_range (last_lines, lines))
753 {
754 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
755 last_lines = lines;
756 }
757 else if (ui_item_chain == NULL)
758 {
759 ui_item_chain
760 = make_cleanup_ui_out_tuple_begin_end (uiout,
761 "src_and_asm_line");
762 /* No source information. */
763 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
764 }
765
766 gdb_assert (ui_item_chain != NULL);
767 }
768
769 memset (&dinsn, 0, sizeof (dinsn));
770 dinsn.number = btrace_insn_number (&it);
771 dinsn.addr = insn->pc;
772
773 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
774 dinsn.is_speculative = 1;
775
776 gdb_pretty_print_insn (gdbarch, uiout, &di, &dinsn, flags, stb);
777 }
778 }
779
780 do_cleanups (cleanups);
781 }
782
783 /* The to_insn_history method of target record-btrace. */
784
785 static void
786 record_btrace_insn_history (struct target_ops *self, int size, int flags)
787 {
788 struct btrace_thread_info *btinfo;
789 struct btrace_insn_history *history;
790 struct btrace_insn_iterator begin, end;
791 struct cleanup *uiout_cleanup;
792 struct ui_out *uiout;
793 unsigned int context, covered;
794
795 uiout = current_uiout;
796 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
797 "insn history");
798 context = abs (size);
799 if (context == 0)
800 error (_("Bad record instruction-history-size."));
801
802 btinfo = require_btrace ();
803 history = btinfo->insn_history;
804 if (history == NULL)
805 {
806 struct btrace_insn_iterator *replay;
807
808 DEBUG ("insn-history (0x%x): %d", flags, size);
809
810 /* If we're replaying, we start at the replay position. Otherwise, we
811 start at the tail of the trace. */
812 replay = btinfo->replay;
813 if (replay != NULL)
814 begin = *replay;
815 else
816 btrace_insn_end (&begin, btinfo);
817
818 /* We start from here and expand in the requested direction. Then we
819 expand in the other direction, as well, to fill up any remaining
820 context. */
821 end = begin;
822 if (size < 0)
823 {
824 /* We want the current position covered, as well. */
825 covered = btrace_insn_next (&end, 1);
826 covered += btrace_insn_prev (&begin, context - covered);
827 covered += btrace_insn_next (&end, context - covered);
828 }
829 else
830 {
831 covered = btrace_insn_next (&end, context);
832 covered += btrace_insn_prev (&begin, context - covered);
833 }
834 }
835 else
836 {
837 begin = history->begin;
838 end = history->end;
839
840 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
841 btrace_insn_number (&begin), btrace_insn_number (&end));
842
843 if (size < 0)
844 {
845 end = begin;
846 covered = btrace_insn_prev (&begin, context);
847 }
848 else
849 {
850 begin = end;
851 covered = btrace_insn_next (&end, context);
852 }
853 }
854
855 if (covered > 0)
856 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
857 else
858 {
859 if (size < 0)
860 printf_unfiltered (_("At the start of the branch trace record.\n"));
861 else
862 printf_unfiltered (_("At the end of the branch trace record.\n"));
863 }
864
865 btrace_set_insn_history (btinfo, &begin, &end);
866 do_cleanups (uiout_cleanup);
867 }
868
869 /* The to_insn_history_range method of target record-btrace. */
870
871 static void
872 record_btrace_insn_history_range (struct target_ops *self,
873 ULONGEST from, ULONGEST to, int flags)
874 {
875 struct btrace_thread_info *btinfo;
876 struct btrace_insn_history *history;
877 struct btrace_insn_iterator begin, end;
878 struct cleanup *uiout_cleanup;
879 struct ui_out *uiout;
880 unsigned int low, high;
881 int found;
882
883 uiout = current_uiout;
884 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
885 "insn history");
886 low = from;
887 high = to;
888
889 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
890
891 /* Check for wrap-arounds. */
892 if (low != from || high != to)
893 error (_("Bad range."));
894
895 if (high < low)
896 error (_("Bad range."));
897
898 btinfo = require_btrace ();
899
900 found = btrace_find_insn_by_number (&begin, btinfo, low);
901 if (found == 0)
902 error (_("Range out of bounds."));
903
904 found = btrace_find_insn_by_number (&end, btinfo, high);
905 if (found == 0)
906 {
907 /* Silently truncate the range. */
908 btrace_insn_end (&end, btinfo);
909 }
910 else
911 {
912 /* We want both begin and end to be inclusive. */
913 btrace_insn_next (&end, 1);
914 }
915
916 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
917 btrace_set_insn_history (btinfo, &begin, &end);
918
919 do_cleanups (uiout_cleanup);
920 }
921
922 /* The to_insn_history_from method of target record-btrace. */
923
924 static void
925 record_btrace_insn_history_from (struct target_ops *self,
926 ULONGEST from, int size, int flags)
927 {
928 ULONGEST begin, end, context;
929
930 context = abs (size);
931 if (context == 0)
932 error (_("Bad record instruction-history-size."));
933
934 if (size < 0)
935 {
936 end = from;
937
938 if (from < context)
939 begin = 0;
940 else
941 begin = from - context + 1;
942 }
943 else
944 {
945 begin = from;
946 end = from + context - 1;
947
948 /* Check for wrap-around. */
949 if (end < begin)
950 end = ULONGEST_MAX;
951 }
952
953 record_btrace_insn_history_range (self, begin, end, flags);
954 }
955
956 /* Print the instruction number range for a function call history line. */
957
958 static void
959 btrace_call_history_insn_range (struct ui_out *uiout,
960 const struct btrace_function *bfun)
961 {
962 unsigned int begin, end, size;
963
964 size = VEC_length (btrace_insn_s, bfun->insn);
965 gdb_assert (size > 0);
966
967 begin = bfun->insn_offset;
968 end = begin + size - 1;
969
970 ui_out_field_uint (uiout, "insn begin", begin);
971 uiout->text (",");
972 ui_out_field_uint (uiout, "insn end", end);
973 }
974
975 /* Compute the lowest and highest source line for the instructions in BFUN
976 and return them in PBEGIN and PEND.
977 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
978 result from inlining or macro expansion. */
979
980 static void
981 btrace_compute_src_line_range (const struct btrace_function *bfun,
982 int *pbegin, int *pend)
983 {
984 struct btrace_insn *insn;
985 struct symtab *symtab;
986 struct symbol *sym;
987 unsigned int idx;
988 int begin, end;
989
990 begin = INT_MAX;
991 end = INT_MIN;
992
993 sym = bfun->sym;
994 if (sym == NULL)
995 goto out;
996
997 symtab = symbol_symtab (sym);
998
999 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
1000 {
1001 struct symtab_and_line sal;
1002
1003 sal = find_pc_line (insn->pc, 0);
1004 if (sal.symtab != symtab || sal.line == 0)
1005 continue;
1006
1007 begin = std::min (begin, sal.line);
1008 end = std::max (end, sal.line);
1009 }
1010
1011 out:
1012 *pbegin = begin;
1013 *pend = end;
1014 }
1015
1016 /* Print the source line information for a function call history line. */
1017
1018 static void
1019 btrace_call_history_src_line (struct ui_out *uiout,
1020 const struct btrace_function *bfun)
1021 {
1022 struct symbol *sym;
1023 int begin, end;
1024
1025 sym = bfun->sym;
1026 if (sym == NULL)
1027 return;
1028
1029 uiout->field_string ("file",
1030 symtab_to_filename_for_display (symbol_symtab (sym)));
1031
1032 btrace_compute_src_line_range (bfun, &begin, &end);
1033 if (end < begin)
1034 return;
1035
1036 uiout->text (":");
1037 uiout->field_int ("min line", begin);
1038
1039 if (end == begin)
1040 return;
1041
1042 uiout->text (",");
1043 uiout->field_int ("max line", end);
1044 }
1045
1046 /* Get the name of a branch trace function. */
1047
1048 static const char *
1049 btrace_get_bfun_name (const struct btrace_function *bfun)
1050 {
1051 struct minimal_symbol *msym;
1052 struct symbol *sym;
1053
1054 if (bfun == NULL)
1055 return "??";
1056
1057 msym = bfun->msym;
1058 sym = bfun->sym;
1059
1060 if (sym != NULL)
1061 return SYMBOL_PRINT_NAME (sym);
1062 else if (msym != NULL)
1063 return MSYMBOL_PRINT_NAME (msym);
1064 else
1065 return "??";
1066 }
1067
1068 /* Disassemble a section of the recorded function trace. */
1069
1070 static void
1071 btrace_call_history (struct ui_out *uiout,
1072 const struct btrace_thread_info *btinfo,
1073 const struct btrace_call_iterator *begin,
1074 const struct btrace_call_iterator *end,
1075 int int_flags)
1076 {
1077 struct btrace_call_iterator it;
1078 record_print_flags flags = (enum record_print_flag) int_flags;
1079
1080 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1081 btrace_call_number (end));
1082
1083 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1084 {
1085 const struct btrace_function *bfun;
1086 struct minimal_symbol *msym;
1087 struct symbol *sym;
1088
1089 bfun = btrace_call_get (&it);
1090 sym = bfun->sym;
1091 msym = bfun->msym;
1092
1093 /* Print the function index. */
1094 ui_out_field_uint (uiout, "index", bfun->number);
1095 uiout->text ("\t");
1096
1097 /* Indicate gaps in the trace. */
1098 if (bfun->errcode != 0)
1099 {
1100 const struct btrace_config *conf;
1101
1102 conf = btrace_conf (btinfo);
1103
1104 /* We have trace so we must have a configuration. */
1105 gdb_assert (conf != NULL);
1106
1107 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1108
1109 continue;
1110 }
1111
1112 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1113 {
1114 int level = bfun->level + btinfo->level, i;
1115
1116 for (i = 0; i < level; ++i)
1117 uiout->text (" ");
1118 }
1119
1120 if (sym != NULL)
1121 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
1122 else if (msym != NULL)
1123 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1124 else if (!uiout->is_mi_like_p ())
1125 uiout->field_string ("function", "??");
1126
1127 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1128 {
1129 uiout->text (_("\tinst "));
1130 btrace_call_history_insn_range (uiout, bfun);
1131 }
1132
1133 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1134 {
1135 uiout->text (_("\tat "));
1136 btrace_call_history_src_line (uiout, bfun);
1137 }
1138
1139 uiout->text ("\n");
1140 }
1141 }
1142
1143 /* The to_call_history method of target record-btrace. */
1144
1145 static void
1146 record_btrace_call_history (struct target_ops *self, int size, int int_flags)
1147 {
1148 struct btrace_thread_info *btinfo;
1149 struct btrace_call_history *history;
1150 struct btrace_call_iterator begin, end;
1151 struct cleanup *uiout_cleanup;
1152 struct ui_out *uiout;
1153 unsigned int context, covered;
1154 record_print_flags flags = (enum record_print_flag) int_flags;
1155
1156 uiout = current_uiout;
1157 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1158 "insn history");
1159 context = abs (size);
1160 if (context == 0)
1161 error (_("Bad record function-call-history-size."));
1162
1163 btinfo = require_btrace ();
1164 history = btinfo->call_history;
1165 if (history == NULL)
1166 {
1167 struct btrace_insn_iterator *replay;
1168
1169 DEBUG ("call-history (0x%x): %d", int_flags, size);
1170
1171 /* If we're replaying, we start at the replay position. Otherwise, we
1172 start at the tail of the trace. */
1173 replay = btinfo->replay;
1174 if (replay != NULL)
1175 {
1176 begin.function = replay->function;
1177 begin.btinfo = btinfo;
1178 }
1179 else
1180 btrace_call_end (&begin, btinfo);
1181
1182 /* We start from here and expand in the requested direction. Then we
1183 expand in the other direction, as well, to fill up any remaining
1184 context. */
1185 end = begin;
1186 if (size < 0)
1187 {
1188 /* We want the current position covered, as well. */
1189 covered = btrace_call_next (&end, 1);
1190 covered += btrace_call_prev (&begin, context - covered);
1191 covered += btrace_call_next (&end, context - covered);
1192 }
1193 else
1194 {
1195 covered = btrace_call_next (&end, context);
1196 covered += btrace_call_prev (&begin, context- covered);
1197 }
1198 }
1199 else
1200 {
1201 begin = history->begin;
1202 end = history->end;
1203
1204 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
1205 btrace_call_number (&begin), btrace_call_number (&end));
1206
1207 if (size < 0)
1208 {
1209 end = begin;
1210 covered = btrace_call_prev (&begin, context);
1211 }
1212 else
1213 {
1214 begin = end;
1215 covered = btrace_call_next (&end, context);
1216 }
1217 }
1218
1219 if (covered > 0)
1220 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1221 else
1222 {
1223 if (size < 0)
1224 printf_unfiltered (_("At the start of the branch trace record.\n"));
1225 else
1226 printf_unfiltered (_("At the end of the branch trace record.\n"));
1227 }
1228
1229 btrace_set_call_history (btinfo, &begin, &end);
1230 do_cleanups (uiout_cleanup);
1231 }
1232
1233 /* The to_call_history_range method of target record-btrace. */
1234
1235 static void
1236 record_btrace_call_history_range (struct target_ops *self,
1237 ULONGEST from, ULONGEST to,
1238 int int_flags)
1239 {
1240 struct btrace_thread_info *btinfo;
1241 struct btrace_call_history *history;
1242 struct btrace_call_iterator begin, end;
1243 struct cleanup *uiout_cleanup;
1244 struct ui_out *uiout;
1245 unsigned int low, high;
1246 int found;
1247 record_print_flags flags = (enum record_print_flag) int_flags;
1248
1249 uiout = current_uiout;
1250 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1251 "func history");
1252 low = from;
1253 high = to;
1254
1255 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
1256
1257 /* Check for wrap-arounds. */
1258 if (low != from || high != to)
1259 error (_("Bad range."));
1260
1261 if (high < low)
1262 error (_("Bad range."));
1263
1264 btinfo = require_btrace ();
1265
1266 found = btrace_find_call_by_number (&begin, btinfo, low);
1267 if (found == 0)
1268 error (_("Range out of bounds."));
1269
1270 found = btrace_find_call_by_number (&end, btinfo, high);
1271 if (found == 0)
1272 {
1273 /* Silently truncate the range. */
1274 btrace_call_end (&end, btinfo);
1275 }
1276 else
1277 {
1278 /* We want both begin and end to be inclusive. */
1279 btrace_call_next (&end, 1);
1280 }
1281
1282 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1283 btrace_set_call_history (btinfo, &begin, &end);
1284
1285 do_cleanups (uiout_cleanup);
1286 }
1287
1288 /* The to_call_history_from method of target record-btrace. */
1289
1290 static void
1291 record_btrace_call_history_from (struct target_ops *self,
1292 ULONGEST from, int size,
1293 int int_flags)
1294 {
1295 ULONGEST begin, end, context;
1296 record_print_flags flags = (enum record_print_flag) int_flags;
1297
1298 context = abs (size);
1299 if (context == 0)
1300 error (_("Bad record function-call-history-size."));
1301
1302 if (size < 0)
1303 {
1304 end = from;
1305
1306 if (from < context)
1307 begin = 0;
1308 else
1309 begin = from - context + 1;
1310 }
1311 else
1312 {
1313 begin = from;
1314 end = from + context - 1;
1315
1316 /* Check for wrap-around. */
1317 if (end < begin)
1318 end = ULONGEST_MAX;
1319 }
1320
1321 record_btrace_call_history_range (self, begin, end, flags);
1322 }
1323
1324 /* The to_record_is_replaying method of target record-btrace. */
1325
1326 static int
1327 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1328 {
1329 struct thread_info *tp;
1330
1331 ALL_NON_EXITED_THREADS (tp)
1332 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1333 return 1;
1334
1335 return 0;
1336 }
1337
1338 /* The to_record_will_replay method of target record-btrace. */
1339
1340 static int
1341 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1342 {
1343 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1344 }
1345
1346 /* The to_xfer_partial method of target record-btrace. */
1347
1348 static enum target_xfer_status
1349 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1350 const char *annex, gdb_byte *readbuf,
1351 const gdb_byte *writebuf, ULONGEST offset,
1352 ULONGEST len, ULONGEST *xfered_len)
1353 {
1354 struct target_ops *t;
1355
1356 /* Filter out requests that don't make sense during replay. */
1357 if (replay_memory_access == replay_memory_access_read_only
1358 && !record_btrace_generating_corefile
1359 && record_btrace_is_replaying (ops, inferior_ptid))
1360 {
1361 switch (object)
1362 {
1363 case TARGET_OBJECT_MEMORY:
1364 {
1365 struct target_section *section;
1366
1367 /* We do not allow writing memory in general. */
1368 if (writebuf != NULL)
1369 {
1370 *xfered_len = len;
1371 return TARGET_XFER_UNAVAILABLE;
1372 }
1373
1374 /* We allow reading readonly memory. */
1375 section = target_section_by_addr (ops, offset);
1376 if (section != NULL)
1377 {
1378 /* Check if the section we found is readonly. */
1379 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1380 section->the_bfd_section)
1381 & SEC_READONLY) != 0)
1382 {
1383 /* Truncate the request to fit into this section. */
1384 len = std::min (len, section->endaddr - offset);
1385 break;
1386 }
1387 }
1388
1389 *xfered_len = len;
1390 return TARGET_XFER_UNAVAILABLE;
1391 }
1392 }
1393 }
1394
1395 /* Forward the request. */
1396 ops = ops->beneath;
1397 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1398 offset, len, xfered_len);
1399 }
1400
1401 /* The to_insert_breakpoint method of target record-btrace. */
1402
1403 static int
1404 record_btrace_insert_breakpoint (struct target_ops *ops,
1405 struct gdbarch *gdbarch,
1406 struct bp_target_info *bp_tgt)
1407 {
1408 const char *old;
1409 int ret;
1410
1411 /* Inserting breakpoints requires accessing memory. Allow it for the
1412 duration of this function. */
1413 old = replay_memory_access;
1414 replay_memory_access = replay_memory_access_read_write;
1415
1416 ret = 0;
1417 TRY
1418 {
1419 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1420 }
1421 CATCH (except, RETURN_MASK_ALL)
1422 {
1423 replay_memory_access = old;
1424 throw_exception (except);
1425 }
1426 END_CATCH
1427 replay_memory_access = old;
1428
1429 return ret;
1430 }
1431
1432 /* The to_remove_breakpoint method of target record-btrace. */
1433
1434 static int
1435 record_btrace_remove_breakpoint (struct target_ops *ops,
1436 struct gdbarch *gdbarch,
1437 struct bp_target_info *bp_tgt,
1438 enum remove_bp_reason reason)
1439 {
1440 const char *old;
1441 int ret;
1442
1443 /* Removing breakpoints requires accessing memory. Allow it for the
1444 duration of this function. */
1445 old = replay_memory_access;
1446 replay_memory_access = replay_memory_access_read_write;
1447
1448 ret = 0;
1449 TRY
1450 {
1451 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1452 reason);
1453 }
1454 CATCH (except, RETURN_MASK_ALL)
1455 {
1456 replay_memory_access = old;
1457 throw_exception (except);
1458 }
1459 END_CATCH
1460 replay_memory_access = old;
1461
1462 return ret;
1463 }
1464
1465 /* The to_fetch_registers method of target record-btrace. */
1466
1467 static void
1468 record_btrace_fetch_registers (struct target_ops *ops,
1469 struct regcache *regcache, int regno)
1470 {
1471 struct btrace_insn_iterator *replay;
1472 struct thread_info *tp;
1473
1474 tp = find_thread_ptid (inferior_ptid);
1475 gdb_assert (tp != NULL);
1476
1477 replay = tp->btrace.replay;
1478 if (replay != NULL && !record_btrace_generating_corefile)
1479 {
1480 const struct btrace_insn *insn;
1481 struct gdbarch *gdbarch;
1482 int pcreg;
1483
1484 gdbarch = get_regcache_arch (regcache);
1485 pcreg = gdbarch_pc_regnum (gdbarch);
1486 if (pcreg < 0)
1487 return;
1488
1489 /* We can only provide the PC register. */
1490 if (regno >= 0 && regno != pcreg)
1491 return;
1492
1493 insn = btrace_insn_get (replay);
1494 gdb_assert (insn != NULL);
1495
1496 regcache_raw_supply (regcache, regno, &insn->pc);
1497 }
1498 else
1499 {
1500 struct target_ops *t = ops->beneath;
1501
1502 t->to_fetch_registers (t, regcache, regno);
1503 }
1504 }
1505
1506 /* The to_store_registers method of target record-btrace. */
1507
1508 static void
1509 record_btrace_store_registers (struct target_ops *ops,
1510 struct regcache *regcache, int regno)
1511 {
1512 struct target_ops *t;
1513
1514 if (!record_btrace_generating_corefile
1515 && record_btrace_is_replaying (ops, inferior_ptid))
1516 error (_("Cannot write registers while replaying."));
1517
1518 gdb_assert (may_write_registers != 0);
1519
1520 t = ops->beneath;
1521 t->to_store_registers (t, regcache, regno);
1522 }
1523
1524 /* The to_prepare_to_store method of target record-btrace. */
1525
1526 static void
1527 record_btrace_prepare_to_store (struct target_ops *ops,
1528 struct regcache *regcache)
1529 {
1530 struct target_ops *t;
1531
1532 if (!record_btrace_generating_corefile
1533 && record_btrace_is_replaying (ops, inferior_ptid))
1534 return;
1535
1536 t = ops->beneath;
1537 t->to_prepare_to_store (t, regcache);
1538 }
1539
1540 /* The branch trace frame cache. */
1541
1542 struct btrace_frame_cache
1543 {
1544 /* The thread. */
1545 struct thread_info *tp;
1546
1547 /* The frame info. */
1548 struct frame_info *frame;
1549
1550 /* The branch trace function segment. */
1551 const struct btrace_function *bfun;
1552 };
1553
1554 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1555
1556 static htab_t bfcache;
1557
1558 /* hash_f for htab_create_alloc of bfcache. */
1559
1560 static hashval_t
1561 bfcache_hash (const void *arg)
1562 {
1563 const struct btrace_frame_cache *cache
1564 = (const struct btrace_frame_cache *) arg;
1565
1566 return htab_hash_pointer (cache->frame);
1567 }
1568
1569 /* eq_f for htab_create_alloc of bfcache. */
1570
1571 static int
1572 bfcache_eq (const void *arg1, const void *arg2)
1573 {
1574 const struct btrace_frame_cache *cache1
1575 = (const struct btrace_frame_cache *) arg1;
1576 const struct btrace_frame_cache *cache2
1577 = (const struct btrace_frame_cache *) arg2;
1578
1579 return cache1->frame == cache2->frame;
1580 }
1581
1582 /* Create a new btrace frame cache. */
1583
1584 static struct btrace_frame_cache *
1585 bfcache_new (struct frame_info *frame)
1586 {
1587 struct btrace_frame_cache *cache;
1588 void **slot;
1589
1590 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1591 cache->frame = frame;
1592
1593 slot = htab_find_slot (bfcache, cache, INSERT);
1594 gdb_assert (*slot == NULL);
1595 *slot = cache;
1596
1597 return cache;
1598 }
1599
1600 /* Extract the branch trace function from a branch trace frame. */
1601
1602 static const struct btrace_function *
1603 btrace_get_frame_function (struct frame_info *frame)
1604 {
1605 const struct btrace_frame_cache *cache;
1606 const struct btrace_function *bfun;
1607 struct btrace_frame_cache pattern;
1608 void **slot;
1609
1610 pattern.frame = frame;
1611
1612 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1613 if (slot == NULL)
1614 return NULL;
1615
1616 cache = (const struct btrace_frame_cache *) *slot;
1617 return cache->bfun;
1618 }
1619
1620 /* Implement stop_reason method for record_btrace_frame_unwind. */
1621
1622 static enum unwind_stop_reason
1623 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1624 void **this_cache)
1625 {
1626 const struct btrace_frame_cache *cache;
1627 const struct btrace_function *bfun;
1628
1629 cache = (const struct btrace_frame_cache *) *this_cache;
1630 bfun = cache->bfun;
1631 gdb_assert (bfun != NULL);
1632
1633 if (bfun->up == NULL)
1634 return UNWIND_UNAVAILABLE;
1635
1636 return UNWIND_NO_REASON;
1637 }
1638
1639 /* Implement this_id method for record_btrace_frame_unwind. */
1640
1641 static void
1642 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1643 struct frame_id *this_id)
1644 {
1645 const struct btrace_frame_cache *cache;
1646 const struct btrace_function *bfun;
1647 CORE_ADDR code, special;
1648
1649 cache = (const struct btrace_frame_cache *) *this_cache;
1650
1651 bfun = cache->bfun;
1652 gdb_assert (bfun != NULL);
1653
1654 while (bfun->segment.prev != NULL)
1655 bfun = bfun->segment.prev;
1656
1657 code = get_frame_func (this_frame);
1658 special = bfun->number;
1659
1660 *this_id = frame_id_build_unavailable_stack_special (code, special);
1661
1662 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1663 btrace_get_bfun_name (cache->bfun),
1664 core_addr_to_string_nz (this_id->code_addr),
1665 core_addr_to_string_nz (this_id->special_addr));
1666 }
1667
1668 /* Implement prev_register method for record_btrace_frame_unwind. */
1669
1670 static struct value *
1671 record_btrace_frame_prev_register (struct frame_info *this_frame,
1672 void **this_cache,
1673 int regnum)
1674 {
1675 const struct btrace_frame_cache *cache;
1676 const struct btrace_function *bfun, *caller;
1677 const struct btrace_insn *insn;
1678 struct gdbarch *gdbarch;
1679 CORE_ADDR pc;
1680 int pcreg;
1681
1682 gdbarch = get_frame_arch (this_frame);
1683 pcreg = gdbarch_pc_regnum (gdbarch);
1684 if (pcreg < 0 || regnum != pcreg)
1685 throw_error (NOT_AVAILABLE_ERROR,
1686 _("Registers are not available in btrace record history"));
1687
1688 cache = (const struct btrace_frame_cache *) *this_cache;
1689 bfun = cache->bfun;
1690 gdb_assert (bfun != NULL);
1691
1692 caller = bfun->up;
1693 if (caller == NULL)
1694 throw_error (NOT_AVAILABLE_ERROR,
1695 _("No caller in btrace record history"));
1696
1697 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1698 {
1699 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1700 pc = insn->pc;
1701 }
1702 else
1703 {
1704 insn = VEC_last (btrace_insn_s, caller->insn);
1705 pc = insn->pc;
1706
1707 pc += gdb_insn_length (gdbarch, pc);
1708 }
1709
1710 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1711 btrace_get_bfun_name (bfun), bfun->level,
1712 core_addr_to_string_nz (pc));
1713
1714 return frame_unwind_got_address (this_frame, regnum, pc);
1715 }
1716
1717 /* Implement sniffer method for record_btrace_frame_unwind. */
1718
1719 static int
1720 record_btrace_frame_sniffer (const struct frame_unwind *self,
1721 struct frame_info *this_frame,
1722 void **this_cache)
1723 {
1724 const struct btrace_function *bfun;
1725 struct btrace_frame_cache *cache;
1726 struct thread_info *tp;
1727 struct frame_info *next;
1728
1729 /* THIS_FRAME does not contain a reference to its thread. */
1730 tp = find_thread_ptid (inferior_ptid);
1731 gdb_assert (tp != NULL);
1732
1733 bfun = NULL;
1734 next = get_next_frame (this_frame);
1735 if (next == NULL)
1736 {
1737 const struct btrace_insn_iterator *replay;
1738
1739 replay = tp->btrace.replay;
1740 if (replay != NULL)
1741 bfun = replay->function;
1742 }
1743 else
1744 {
1745 const struct btrace_function *callee;
1746
1747 callee = btrace_get_frame_function (next);
1748 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1749 bfun = callee->up;
1750 }
1751
1752 if (bfun == NULL)
1753 return 0;
1754
1755 DEBUG ("[frame] sniffed frame for %s on level %d",
1756 btrace_get_bfun_name (bfun), bfun->level);
1757
1758 /* This is our frame. Initialize the frame cache. */
1759 cache = bfcache_new (this_frame);
1760 cache->tp = tp;
1761 cache->bfun = bfun;
1762
1763 *this_cache = cache;
1764 return 1;
1765 }
1766
1767 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1768
1769 static int
1770 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1771 struct frame_info *this_frame,
1772 void **this_cache)
1773 {
1774 const struct btrace_function *bfun, *callee;
1775 struct btrace_frame_cache *cache;
1776 struct frame_info *next;
1777
1778 next = get_next_frame (this_frame);
1779 if (next == NULL)
1780 return 0;
1781
1782 callee = btrace_get_frame_function (next);
1783 if (callee == NULL)
1784 return 0;
1785
1786 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1787 return 0;
1788
1789 bfun = callee->up;
1790 if (bfun == NULL)
1791 return 0;
1792
1793 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1794 btrace_get_bfun_name (bfun), bfun->level);
1795
1796 /* This is our frame. Initialize the frame cache. */
1797 cache = bfcache_new (this_frame);
1798 cache->tp = find_thread_ptid (inferior_ptid);
1799 cache->bfun = bfun;
1800
1801 *this_cache = cache;
1802 return 1;
1803 }
1804
1805 static void
1806 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1807 {
1808 struct btrace_frame_cache *cache;
1809 void **slot;
1810
1811 cache = (struct btrace_frame_cache *) this_cache;
1812
1813 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1814 gdb_assert (slot != NULL);
1815
1816 htab_remove_elt (bfcache, cache);
1817 }
1818
1819 /* btrace recording does not store previous memory content, neither the stack
1820 frames content. Any unwinding would return errorneous results as the stack
1821 contents no longer matches the changed PC value restored from history.
1822 Therefore this unwinder reports any possibly unwound registers as
1823 <unavailable>. */
1824
1825 const struct frame_unwind record_btrace_frame_unwind =
1826 {
1827 NORMAL_FRAME,
1828 record_btrace_frame_unwind_stop_reason,
1829 record_btrace_frame_this_id,
1830 record_btrace_frame_prev_register,
1831 NULL,
1832 record_btrace_frame_sniffer,
1833 record_btrace_frame_dealloc_cache
1834 };
1835
1836 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1837 {
1838 TAILCALL_FRAME,
1839 record_btrace_frame_unwind_stop_reason,
1840 record_btrace_frame_this_id,
1841 record_btrace_frame_prev_register,
1842 NULL,
1843 record_btrace_tailcall_frame_sniffer,
1844 record_btrace_frame_dealloc_cache
1845 };
1846
1847 /* Implement the to_get_unwinder method. */
1848
1849 static const struct frame_unwind *
1850 record_btrace_to_get_unwinder (struct target_ops *self)
1851 {
1852 return &record_btrace_frame_unwind;
1853 }
1854
1855 /* Implement the to_get_tailcall_unwinder method. */
1856
1857 static const struct frame_unwind *
1858 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1859 {
1860 return &record_btrace_tailcall_frame_unwind;
1861 }
1862
1863 /* Return a human-readable string for FLAG. */
1864
1865 static const char *
1866 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1867 {
1868 switch (flag)
1869 {
1870 case BTHR_STEP:
1871 return "step";
1872
1873 case BTHR_RSTEP:
1874 return "reverse-step";
1875
1876 case BTHR_CONT:
1877 return "cont";
1878
1879 case BTHR_RCONT:
1880 return "reverse-cont";
1881
1882 case BTHR_STOP:
1883 return "stop";
1884 }
1885
1886 return "<invalid>";
1887 }
1888
1889 /* Indicate that TP should be resumed according to FLAG. */
1890
1891 static void
1892 record_btrace_resume_thread (struct thread_info *tp,
1893 enum btrace_thread_flag flag)
1894 {
1895 struct btrace_thread_info *btinfo;
1896
1897 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1898 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1899
1900 btinfo = &tp->btrace;
1901
1902 /* Fetch the latest branch trace. */
1903 btrace_fetch (tp);
1904
1905 /* A resume request overwrites a preceding resume or stop request. */
1906 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1907 btinfo->flags |= flag;
1908 }
1909
1910 /* Get the current frame for TP. */
1911
1912 static struct frame_info *
1913 get_thread_current_frame (struct thread_info *tp)
1914 {
1915 struct frame_info *frame;
1916 ptid_t old_inferior_ptid;
1917 int executing;
1918
1919 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1920 old_inferior_ptid = inferior_ptid;
1921 inferior_ptid = tp->ptid;
1922
1923 /* Clear the executing flag to allow changes to the current frame.
1924 We are not actually running, yet. We just started a reverse execution
1925 command or a record goto command.
1926 For the latter, EXECUTING is false and this has no effect.
1927 For the former, EXECUTING is true and we're in to_wait, about to
1928 move the thread. Since we need to recompute the stack, we temporarily
1929 set EXECUTING to flase. */
1930 executing = is_executing (inferior_ptid);
1931 set_executing (inferior_ptid, 0);
1932
1933 frame = NULL;
1934 TRY
1935 {
1936 frame = get_current_frame ();
1937 }
1938 CATCH (except, RETURN_MASK_ALL)
1939 {
1940 /* Restore the previous execution state. */
1941 set_executing (inferior_ptid, executing);
1942
1943 /* Restore the previous inferior_ptid. */
1944 inferior_ptid = old_inferior_ptid;
1945
1946 throw_exception (except);
1947 }
1948 END_CATCH
1949
1950 /* Restore the previous execution state. */
1951 set_executing (inferior_ptid, executing);
1952
1953 /* Restore the previous inferior_ptid. */
1954 inferior_ptid = old_inferior_ptid;
1955
1956 return frame;
1957 }
1958
1959 /* Start replaying a thread. */
1960
1961 static struct btrace_insn_iterator *
1962 record_btrace_start_replaying (struct thread_info *tp)
1963 {
1964 struct btrace_insn_iterator *replay;
1965 struct btrace_thread_info *btinfo;
1966
1967 btinfo = &tp->btrace;
1968 replay = NULL;
1969
1970 /* We can't start replaying without trace. */
1971 if (btinfo->begin == NULL)
1972 return NULL;
1973
1974 /* GDB stores the current frame_id when stepping in order to detects steps
1975 into subroutines.
1976 Since frames are computed differently when we're replaying, we need to
1977 recompute those stored frames and fix them up so we can still detect
1978 subroutines after we started replaying. */
1979 TRY
1980 {
1981 struct frame_info *frame;
1982 struct frame_id frame_id;
1983 int upd_step_frame_id, upd_step_stack_frame_id;
1984
1985 /* The current frame without replaying - computed via normal unwind. */
1986 frame = get_thread_current_frame (tp);
1987 frame_id = get_frame_id (frame);
1988
1989 /* Check if we need to update any stepping-related frame id's. */
1990 upd_step_frame_id = frame_id_eq (frame_id,
1991 tp->control.step_frame_id);
1992 upd_step_stack_frame_id = frame_id_eq (frame_id,
1993 tp->control.step_stack_frame_id);
1994
1995 /* We start replaying at the end of the branch trace. This corresponds
1996 to the current instruction. */
1997 replay = XNEW (struct btrace_insn_iterator);
1998 btrace_insn_end (replay, btinfo);
1999
2000 /* Skip gaps at the end of the trace. */
2001 while (btrace_insn_get (replay) == NULL)
2002 {
2003 unsigned int steps;
2004
2005 steps = btrace_insn_prev (replay, 1);
2006 if (steps == 0)
2007 error (_("No trace."));
2008 }
2009
2010 /* We're not replaying, yet. */
2011 gdb_assert (btinfo->replay == NULL);
2012 btinfo->replay = replay;
2013
2014 /* Make sure we're not using any stale registers. */
2015 registers_changed_ptid (tp->ptid);
2016
2017 /* The current frame with replaying - computed via btrace unwind. */
2018 frame = get_thread_current_frame (tp);
2019 frame_id = get_frame_id (frame);
2020
2021 /* Replace stepping related frames where necessary. */
2022 if (upd_step_frame_id)
2023 tp->control.step_frame_id = frame_id;
2024 if (upd_step_stack_frame_id)
2025 tp->control.step_stack_frame_id = frame_id;
2026 }
2027 CATCH (except, RETURN_MASK_ALL)
2028 {
2029 xfree (btinfo->replay);
2030 btinfo->replay = NULL;
2031
2032 registers_changed_ptid (tp->ptid);
2033
2034 throw_exception (except);
2035 }
2036 END_CATCH
2037
2038 return replay;
2039 }
2040
2041 /* Stop replaying a thread. */
2042
2043 static void
2044 record_btrace_stop_replaying (struct thread_info *tp)
2045 {
2046 struct btrace_thread_info *btinfo;
2047
2048 btinfo = &tp->btrace;
2049
2050 xfree (btinfo->replay);
2051 btinfo->replay = NULL;
2052
2053 /* Make sure we're not leaving any stale registers. */
2054 registers_changed_ptid (tp->ptid);
2055 }
2056
2057 /* Stop replaying TP if it is at the end of its execution history. */
2058
2059 static void
2060 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2061 {
2062 struct btrace_insn_iterator *replay, end;
2063 struct btrace_thread_info *btinfo;
2064
2065 btinfo = &tp->btrace;
2066 replay = btinfo->replay;
2067
2068 if (replay == NULL)
2069 return;
2070
2071 btrace_insn_end (&end, btinfo);
2072
2073 if (btrace_insn_cmp (replay, &end) == 0)
2074 record_btrace_stop_replaying (tp);
2075 }
2076
2077 /* The to_resume method of target record-btrace. */
2078
2079 static void
2080 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2081 enum gdb_signal signal)
2082 {
2083 struct thread_info *tp;
2084 enum btrace_thread_flag flag, cflag;
2085
2086 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2087 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2088 step ? "step" : "cont");
2089
2090 /* Store the execution direction of the last resume.
2091
2092 If there is more than one to_resume call, we have to rely on infrun
2093 to not change the execution direction in-between. */
2094 record_btrace_resume_exec_dir = execution_direction;
2095
2096 /* As long as we're not replaying, just forward the request.
2097
2098 For non-stop targets this means that no thread is replaying. In order to
2099 make progress, we may need to explicitly move replaying threads to the end
2100 of their execution history. */
2101 if ((execution_direction != EXEC_REVERSE)
2102 && !record_btrace_is_replaying (ops, minus_one_ptid))
2103 {
2104 ops = ops->beneath;
2105 ops->to_resume (ops, ptid, step, signal);
2106 return;
2107 }
2108
2109 /* Compute the btrace thread flag for the requested move. */
2110 if (execution_direction == EXEC_REVERSE)
2111 {
2112 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2113 cflag = BTHR_RCONT;
2114 }
2115 else
2116 {
2117 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2118 cflag = BTHR_CONT;
2119 }
2120
2121 /* We just indicate the resume intent here. The actual stepping happens in
2122 record_btrace_wait below.
2123
2124 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2125 if (!target_is_non_stop_p ())
2126 {
2127 gdb_assert (ptid_match (inferior_ptid, ptid));
2128
2129 ALL_NON_EXITED_THREADS (tp)
2130 if (ptid_match (tp->ptid, ptid))
2131 {
2132 if (ptid_match (tp->ptid, inferior_ptid))
2133 record_btrace_resume_thread (tp, flag);
2134 else
2135 record_btrace_resume_thread (tp, cflag);
2136 }
2137 }
2138 else
2139 {
2140 ALL_NON_EXITED_THREADS (tp)
2141 if (ptid_match (tp->ptid, ptid))
2142 record_btrace_resume_thread (tp, flag);
2143 }
2144
2145 /* Async support. */
2146 if (target_can_async_p ())
2147 {
2148 target_async (1);
2149 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2150 }
2151 }
2152
2153 /* The to_commit_resume method of target record-btrace. */
2154
2155 static void
2156 record_btrace_commit_resume (struct target_ops *ops)
2157 {
2158 if ((execution_direction != EXEC_REVERSE)
2159 && !record_btrace_is_replaying (ops, minus_one_ptid))
2160 ops->beneath->to_commit_resume (ops->beneath);
2161 }
2162
2163 /* Cancel resuming TP. */
2164
2165 static void
2166 record_btrace_cancel_resume (struct thread_info *tp)
2167 {
2168 enum btrace_thread_flag flags;
2169
2170 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2171 if (flags == 0)
2172 return;
2173
2174 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2175 print_thread_id (tp),
2176 target_pid_to_str (tp->ptid), flags,
2177 btrace_thread_flag_to_str (flags));
2178
2179 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2180 record_btrace_stop_replaying_at_end (tp);
2181 }
2182
2183 /* Return a target_waitstatus indicating that we ran out of history. */
2184
2185 static struct target_waitstatus
2186 btrace_step_no_history (void)
2187 {
2188 struct target_waitstatus status;
2189
2190 status.kind = TARGET_WAITKIND_NO_HISTORY;
2191
2192 return status;
2193 }
2194
2195 /* Return a target_waitstatus indicating that a step finished. */
2196
2197 static struct target_waitstatus
2198 btrace_step_stopped (void)
2199 {
2200 struct target_waitstatus status;
2201
2202 status.kind = TARGET_WAITKIND_STOPPED;
2203 status.value.sig = GDB_SIGNAL_TRAP;
2204
2205 return status;
2206 }
2207
2208 /* Return a target_waitstatus indicating that a thread was stopped as
2209 requested. */
2210
2211 static struct target_waitstatus
2212 btrace_step_stopped_on_request (void)
2213 {
2214 struct target_waitstatus status;
2215
2216 status.kind = TARGET_WAITKIND_STOPPED;
2217 status.value.sig = GDB_SIGNAL_0;
2218
2219 return status;
2220 }
2221
2222 /* Return a target_waitstatus indicating a spurious stop. */
2223
2224 static struct target_waitstatus
2225 btrace_step_spurious (void)
2226 {
2227 struct target_waitstatus status;
2228
2229 status.kind = TARGET_WAITKIND_SPURIOUS;
2230
2231 return status;
2232 }
2233
2234 /* Return a target_waitstatus indicating that the thread was not resumed. */
2235
2236 static struct target_waitstatus
2237 btrace_step_no_resumed (void)
2238 {
2239 struct target_waitstatus status;
2240
2241 status.kind = TARGET_WAITKIND_NO_RESUMED;
2242
2243 return status;
2244 }
2245
2246 /* Return a target_waitstatus indicating that we should wait again. */
2247
2248 static struct target_waitstatus
2249 btrace_step_again (void)
2250 {
2251 struct target_waitstatus status;
2252
2253 status.kind = TARGET_WAITKIND_IGNORE;
2254
2255 return status;
2256 }
2257
2258 /* Clear the record histories. */
2259
2260 static void
2261 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2262 {
2263 xfree (btinfo->insn_history);
2264 xfree (btinfo->call_history);
2265
2266 btinfo->insn_history = NULL;
2267 btinfo->call_history = NULL;
2268 }
2269
2270 /* Check whether TP's current replay position is at a breakpoint. */
2271
2272 static int
2273 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2274 {
2275 struct btrace_insn_iterator *replay;
2276 struct btrace_thread_info *btinfo;
2277 const struct btrace_insn *insn;
2278 struct inferior *inf;
2279
2280 btinfo = &tp->btrace;
2281 replay = btinfo->replay;
2282
2283 if (replay == NULL)
2284 return 0;
2285
2286 insn = btrace_insn_get (replay);
2287 if (insn == NULL)
2288 return 0;
2289
2290 inf = find_inferior_ptid (tp->ptid);
2291 if (inf == NULL)
2292 return 0;
2293
2294 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2295 &btinfo->stop_reason);
2296 }
2297
2298 /* Step one instruction in forward direction. */
2299
2300 static struct target_waitstatus
2301 record_btrace_single_step_forward (struct thread_info *tp)
2302 {
2303 struct btrace_insn_iterator *replay, end, start;
2304 struct btrace_thread_info *btinfo;
2305
2306 btinfo = &tp->btrace;
2307 replay = btinfo->replay;
2308
2309 /* We're done if we're not replaying. */
2310 if (replay == NULL)
2311 return btrace_step_no_history ();
2312
2313 /* Check if we're stepping a breakpoint. */
2314 if (record_btrace_replay_at_breakpoint (tp))
2315 return btrace_step_stopped ();
2316
2317 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2318 jump back to the instruction at which we started. */
2319 start = *replay;
2320 do
2321 {
2322 unsigned int steps;
2323
2324 /* We will bail out here if we continue stepping after reaching the end
2325 of the execution history. */
2326 steps = btrace_insn_next (replay, 1);
2327 if (steps == 0)
2328 {
2329 *replay = start;
2330 return btrace_step_no_history ();
2331 }
2332 }
2333 while (btrace_insn_get (replay) == NULL);
2334
2335 /* Determine the end of the instruction trace. */
2336 btrace_insn_end (&end, btinfo);
2337
2338 /* The execution trace contains (and ends with) the current instruction.
2339 This instruction has not been executed, yet, so the trace really ends
2340 one instruction earlier. */
2341 if (btrace_insn_cmp (replay, &end) == 0)
2342 return btrace_step_no_history ();
2343
2344 return btrace_step_spurious ();
2345 }
2346
2347 /* Step one instruction in backward direction. */
2348
2349 static struct target_waitstatus
2350 record_btrace_single_step_backward (struct thread_info *tp)
2351 {
2352 struct btrace_insn_iterator *replay, start;
2353 struct btrace_thread_info *btinfo;
2354
2355 btinfo = &tp->btrace;
2356 replay = btinfo->replay;
2357
2358 /* Start replaying if we're not already doing so. */
2359 if (replay == NULL)
2360 replay = record_btrace_start_replaying (tp);
2361
2362 /* If we can't step any further, we reached the end of the history.
2363 Skip gaps during replay. If we end up at a gap (at the beginning of
2364 the trace), jump back to the instruction at which we started. */
2365 start = *replay;
2366 do
2367 {
2368 unsigned int steps;
2369
2370 steps = btrace_insn_prev (replay, 1);
2371 if (steps == 0)
2372 {
2373 *replay = start;
2374 return btrace_step_no_history ();
2375 }
2376 }
2377 while (btrace_insn_get (replay) == NULL);
2378
2379 /* Check if we're stepping a breakpoint.
2380
2381 For reverse-stepping, this check is after the step. There is logic in
2382 infrun.c that handles reverse-stepping separately. See, for example,
2383 proceed and adjust_pc_after_break.
2384
2385 This code assumes that for reverse-stepping, PC points to the last
2386 de-executed instruction, whereas for forward-stepping PC points to the
2387 next to-be-executed instruction. */
2388 if (record_btrace_replay_at_breakpoint (tp))
2389 return btrace_step_stopped ();
2390
2391 return btrace_step_spurious ();
2392 }
2393
2394 /* Step a single thread. */
2395
2396 static struct target_waitstatus
2397 record_btrace_step_thread (struct thread_info *tp)
2398 {
2399 struct btrace_thread_info *btinfo;
2400 struct target_waitstatus status;
2401 enum btrace_thread_flag flags;
2402
2403 btinfo = &tp->btrace;
2404
2405 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2406 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2407
2408 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2409 target_pid_to_str (tp->ptid), flags,
2410 btrace_thread_flag_to_str (flags));
2411
2412 /* We can't step without an execution history. */
2413 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2414 return btrace_step_no_history ();
2415
2416 switch (flags)
2417 {
2418 default:
2419 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2420
2421 case BTHR_STOP:
2422 return btrace_step_stopped_on_request ();
2423
2424 case BTHR_STEP:
2425 status = record_btrace_single_step_forward (tp);
2426 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2427 break;
2428
2429 return btrace_step_stopped ();
2430
2431 case BTHR_RSTEP:
2432 status = record_btrace_single_step_backward (tp);
2433 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2434 break;
2435
2436 return btrace_step_stopped ();
2437
2438 case BTHR_CONT:
2439 status = record_btrace_single_step_forward (tp);
2440 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2441 break;
2442
2443 btinfo->flags |= flags;
2444 return btrace_step_again ();
2445
2446 case BTHR_RCONT:
2447 status = record_btrace_single_step_backward (tp);
2448 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2449 break;
2450
2451 btinfo->flags |= flags;
2452 return btrace_step_again ();
2453 }
2454
2455 /* We keep threads moving at the end of their execution history. The to_wait
2456 method will stop the thread for whom the event is reported. */
2457 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2458 btinfo->flags |= flags;
2459
2460 return status;
2461 }
2462
2463 /* A vector of threads. */
2464
2465 typedef struct thread_info * tp_t;
2466 DEF_VEC_P (tp_t);
2467
2468 /* Announce further events if necessary. */
2469
2470 static void
2471 record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2472 const VEC (tp_t) *no_history)
2473 {
2474 int more_moving, more_no_history;
2475
2476 more_moving = !VEC_empty (tp_t, moving);
2477 more_no_history = !VEC_empty (tp_t, no_history);
2478
2479 if (!more_moving && !more_no_history)
2480 return;
2481
2482 if (more_moving)
2483 DEBUG ("movers pending");
2484
2485 if (more_no_history)
2486 DEBUG ("no-history pending");
2487
2488 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2489 }
2490
2491 /* The to_wait method of target record-btrace. */
2492
2493 static ptid_t
2494 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2495 struct target_waitstatus *status, int options)
2496 {
2497 VEC (tp_t) *moving, *no_history;
2498 struct thread_info *tp, *eventing;
2499 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2500
2501 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2502
2503 /* As long as we're not replaying, just forward the request. */
2504 if ((execution_direction != EXEC_REVERSE)
2505 && !record_btrace_is_replaying (ops, minus_one_ptid))
2506 {
2507 ops = ops->beneath;
2508 return ops->to_wait (ops, ptid, status, options);
2509 }
2510
2511 moving = NULL;
2512 no_history = NULL;
2513
2514 make_cleanup (VEC_cleanup (tp_t), &moving);
2515 make_cleanup (VEC_cleanup (tp_t), &no_history);
2516
2517 /* Keep a work list of moving threads. */
2518 ALL_NON_EXITED_THREADS (tp)
2519 if (ptid_match (tp->ptid, ptid)
2520 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2521 VEC_safe_push (tp_t, moving, tp);
2522
2523 if (VEC_empty (tp_t, moving))
2524 {
2525 *status = btrace_step_no_resumed ();
2526
2527 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2528 target_waitstatus_to_string (status));
2529
2530 do_cleanups (cleanups);
2531 return null_ptid;
2532 }
2533
2534 /* Step moving threads one by one, one step each, until either one thread
2535 reports an event or we run out of threads to step.
2536
2537 When stepping more than one thread, chances are that some threads reach
2538 the end of their execution history earlier than others. If we reported
2539 this immediately, all-stop on top of non-stop would stop all threads and
2540 resume the same threads next time. And we would report the same thread
2541 having reached the end of its execution history again.
2542
2543 In the worst case, this would starve the other threads. But even if other
2544 threads would be allowed to make progress, this would result in far too
2545 many intermediate stops.
2546
2547 We therefore delay the reporting of "no execution history" until we have
2548 nothing else to report. By this time, all threads should have moved to
2549 either the beginning or the end of their execution history. There will
2550 be a single user-visible stop. */
2551 eventing = NULL;
2552 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2553 {
2554 unsigned int ix;
2555
2556 ix = 0;
2557 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2558 {
2559 *status = record_btrace_step_thread (tp);
2560
2561 switch (status->kind)
2562 {
2563 case TARGET_WAITKIND_IGNORE:
2564 ix++;
2565 break;
2566
2567 case TARGET_WAITKIND_NO_HISTORY:
2568 VEC_safe_push (tp_t, no_history,
2569 VEC_ordered_remove (tp_t, moving, ix));
2570 break;
2571
2572 default:
2573 eventing = VEC_unordered_remove (tp_t, moving, ix);
2574 break;
2575 }
2576 }
2577 }
2578
2579 if (eventing == NULL)
2580 {
2581 /* We started with at least one moving thread. This thread must have
2582 either stopped or reached the end of its execution history.
2583
2584 In the former case, EVENTING must not be NULL.
2585 In the latter case, NO_HISTORY must not be empty. */
2586 gdb_assert (!VEC_empty (tp_t, no_history));
2587
2588 /* We kept threads moving at the end of their execution history. Stop
2589 EVENTING now that we are going to report its stop. */
2590 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2591 eventing->btrace.flags &= ~BTHR_MOVE;
2592
2593 *status = btrace_step_no_history ();
2594 }
2595
2596 gdb_assert (eventing != NULL);
2597
2598 /* We kept threads replaying at the end of their execution history. Stop
2599 replaying EVENTING now that we are going to report its stop. */
2600 record_btrace_stop_replaying_at_end (eventing);
2601
2602 /* Stop all other threads. */
2603 if (!target_is_non_stop_p ())
2604 ALL_NON_EXITED_THREADS (tp)
2605 record_btrace_cancel_resume (tp);
2606
2607 /* In async mode, we need to announce further events. */
2608 if (target_is_async_p ())
2609 record_btrace_maybe_mark_async_event (moving, no_history);
2610
2611 /* Start record histories anew from the current position. */
2612 record_btrace_clear_histories (&eventing->btrace);
2613
2614 /* We moved the replay position but did not update registers. */
2615 registers_changed_ptid (eventing->ptid);
2616
2617 DEBUG ("wait ended by thread %s (%s): %s",
2618 print_thread_id (eventing),
2619 target_pid_to_str (eventing->ptid),
2620 target_waitstatus_to_string (status));
2621
2622 do_cleanups (cleanups);
2623 return eventing->ptid;
2624 }
2625
2626 /* The to_stop method of target record-btrace. */
2627
2628 static void
2629 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2630 {
2631 DEBUG ("stop %s", target_pid_to_str (ptid));
2632
2633 /* As long as we're not replaying, just forward the request. */
2634 if ((execution_direction != EXEC_REVERSE)
2635 && !record_btrace_is_replaying (ops, minus_one_ptid))
2636 {
2637 ops = ops->beneath;
2638 ops->to_stop (ops, ptid);
2639 }
2640 else
2641 {
2642 struct thread_info *tp;
2643
2644 ALL_NON_EXITED_THREADS (tp)
2645 if (ptid_match (tp->ptid, ptid))
2646 {
2647 tp->btrace.flags &= ~BTHR_MOVE;
2648 tp->btrace.flags |= BTHR_STOP;
2649 }
2650 }
2651 }
2652
2653 /* The to_can_execute_reverse method of target record-btrace. */
2654
2655 static int
2656 record_btrace_can_execute_reverse (struct target_ops *self)
2657 {
2658 return 1;
2659 }
2660
2661 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2662
2663 static int
2664 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2665 {
2666 if (record_btrace_is_replaying (ops, minus_one_ptid))
2667 {
2668 struct thread_info *tp = inferior_thread ();
2669
2670 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2671 }
2672
2673 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2674 }
2675
2676 /* The to_supports_stopped_by_sw_breakpoint method of target
2677 record-btrace. */
2678
2679 static int
2680 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2681 {
2682 if (record_btrace_is_replaying (ops, minus_one_ptid))
2683 return 1;
2684
2685 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2686 }
2687
2688 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2689
2690 static int
2691 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2692 {
2693 if (record_btrace_is_replaying (ops, minus_one_ptid))
2694 {
2695 struct thread_info *tp = inferior_thread ();
2696
2697 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2698 }
2699
2700 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2701 }
2702
2703 /* The to_supports_stopped_by_hw_breakpoint method of target
2704 record-btrace. */
2705
2706 static int
2707 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2708 {
2709 if (record_btrace_is_replaying (ops, minus_one_ptid))
2710 return 1;
2711
2712 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2713 }
2714
2715 /* The to_update_thread_list method of target record-btrace. */
2716
2717 static void
2718 record_btrace_update_thread_list (struct target_ops *ops)
2719 {
2720 /* We don't add or remove threads during replay. */
2721 if (record_btrace_is_replaying (ops, minus_one_ptid))
2722 return;
2723
2724 /* Forward the request. */
2725 ops = ops->beneath;
2726 ops->to_update_thread_list (ops);
2727 }
2728
2729 /* The to_thread_alive method of target record-btrace. */
2730
2731 static int
2732 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2733 {
2734 /* We don't add or remove threads during replay. */
2735 if (record_btrace_is_replaying (ops, minus_one_ptid))
2736 return find_thread_ptid (ptid) != NULL;
2737
2738 /* Forward the request. */
2739 ops = ops->beneath;
2740 return ops->to_thread_alive (ops, ptid);
2741 }
2742
2743 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2744 is stopped. */
2745
2746 static void
2747 record_btrace_set_replay (struct thread_info *tp,
2748 const struct btrace_insn_iterator *it)
2749 {
2750 struct btrace_thread_info *btinfo;
2751
2752 btinfo = &tp->btrace;
2753
2754 if (it == NULL || it->function == NULL)
2755 record_btrace_stop_replaying (tp);
2756 else
2757 {
2758 if (btinfo->replay == NULL)
2759 record_btrace_start_replaying (tp);
2760 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2761 return;
2762
2763 *btinfo->replay = *it;
2764 registers_changed_ptid (tp->ptid);
2765 }
2766
2767 /* Start anew from the new replay position. */
2768 record_btrace_clear_histories (btinfo);
2769
2770 stop_pc = regcache_read_pc (get_current_regcache ());
2771 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2772 }
2773
2774 /* The to_goto_record_begin method of target record-btrace. */
2775
2776 static void
2777 record_btrace_goto_begin (struct target_ops *self)
2778 {
2779 struct thread_info *tp;
2780 struct btrace_insn_iterator begin;
2781
2782 tp = require_btrace_thread ();
2783
2784 btrace_insn_begin (&begin, &tp->btrace);
2785
2786 /* Skip gaps at the beginning of the trace. */
2787 while (btrace_insn_get (&begin) == NULL)
2788 {
2789 unsigned int steps;
2790
2791 steps = btrace_insn_next (&begin, 1);
2792 if (steps == 0)
2793 error (_("No trace."));
2794 }
2795
2796 record_btrace_set_replay (tp, &begin);
2797 }
2798
2799 /* The to_goto_record_end method of target record-btrace. */
2800
2801 static void
2802 record_btrace_goto_end (struct target_ops *ops)
2803 {
2804 struct thread_info *tp;
2805
2806 tp = require_btrace_thread ();
2807
2808 record_btrace_set_replay (tp, NULL);
2809 }
2810
2811 /* The to_goto_record method of target record-btrace. */
2812
2813 static void
2814 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2815 {
2816 struct thread_info *tp;
2817 struct btrace_insn_iterator it;
2818 unsigned int number;
2819 int found;
2820
2821 number = insn;
2822
2823 /* Check for wrap-arounds. */
2824 if (number != insn)
2825 error (_("Instruction number out of range."));
2826
2827 tp = require_btrace_thread ();
2828
2829 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2830 if (found == 0)
2831 error (_("No such instruction."));
2832
2833 record_btrace_set_replay (tp, &it);
2834 }
2835
2836 /* The to_record_stop_replaying method of target record-btrace. */
2837
2838 static void
2839 record_btrace_stop_replaying_all (struct target_ops *self)
2840 {
2841 struct thread_info *tp;
2842
2843 ALL_NON_EXITED_THREADS (tp)
2844 record_btrace_stop_replaying (tp);
2845 }
2846
2847 /* The to_execution_direction target method. */
2848
2849 static enum exec_direction_kind
2850 record_btrace_execution_direction (struct target_ops *self)
2851 {
2852 return record_btrace_resume_exec_dir;
2853 }
2854
2855 /* The to_prepare_to_generate_core target method. */
2856
2857 static void
2858 record_btrace_prepare_to_generate_core (struct target_ops *self)
2859 {
2860 record_btrace_generating_corefile = 1;
2861 }
2862
2863 /* The to_done_generating_core target method. */
2864
2865 static void
2866 record_btrace_done_generating_core (struct target_ops *self)
2867 {
2868 record_btrace_generating_corefile = 0;
2869 }
2870
2871 /* Initialize the record-btrace target ops. */
2872
2873 static void
2874 init_record_btrace_ops (void)
2875 {
2876 struct target_ops *ops;
2877
2878 ops = &record_btrace_ops;
2879 ops->to_shortname = "record-btrace";
2880 ops->to_longname = "Branch tracing target";
2881 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2882 ops->to_open = record_btrace_open;
2883 ops->to_close = record_btrace_close;
2884 ops->to_async = record_btrace_async;
2885 ops->to_detach = record_detach;
2886 ops->to_disconnect = record_btrace_disconnect;
2887 ops->to_mourn_inferior = record_mourn_inferior;
2888 ops->to_kill = record_kill;
2889 ops->to_stop_recording = record_btrace_stop_recording;
2890 ops->to_info_record = record_btrace_info;
2891 ops->to_insn_history = record_btrace_insn_history;
2892 ops->to_insn_history_from = record_btrace_insn_history_from;
2893 ops->to_insn_history_range = record_btrace_insn_history_range;
2894 ops->to_call_history = record_btrace_call_history;
2895 ops->to_call_history_from = record_btrace_call_history_from;
2896 ops->to_call_history_range = record_btrace_call_history_range;
2897 ops->to_record_is_replaying = record_btrace_is_replaying;
2898 ops->to_record_will_replay = record_btrace_will_replay;
2899 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2900 ops->to_xfer_partial = record_btrace_xfer_partial;
2901 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2902 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2903 ops->to_fetch_registers = record_btrace_fetch_registers;
2904 ops->to_store_registers = record_btrace_store_registers;
2905 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2906 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2907 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2908 ops->to_resume = record_btrace_resume;
2909 ops->to_commit_resume = record_btrace_commit_resume;
2910 ops->to_wait = record_btrace_wait;
2911 ops->to_stop = record_btrace_stop;
2912 ops->to_update_thread_list = record_btrace_update_thread_list;
2913 ops->to_thread_alive = record_btrace_thread_alive;
2914 ops->to_goto_record_begin = record_btrace_goto_begin;
2915 ops->to_goto_record_end = record_btrace_goto_end;
2916 ops->to_goto_record = record_btrace_goto;
2917 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2918 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2919 ops->to_supports_stopped_by_sw_breakpoint
2920 = record_btrace_supports_stopped_by_sw_breakpoint;
2921 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2922 ops->to_supports_stopped_by_hw_breakpoint
2923 = record_btrace_supports_stopped_by_hw_breakpoint;
2924 ops->to_execution_direction = record_btrace_execution_direction;
2925 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2926 ops->to_done_generating_core = record_btrace_done_generating_core;
2927 ops->to_stratum = record_stratum;
2928 ops->to_magic = OPS_MAGIC;
2929 }
2930
2931 /* Start recording in BTS format. */
2932
2933 static void
2934 cmd_record_btrace_bts_start (char *args, int from_tty)
2935 {
2936 if (args != NULL && *args != 0)
2937 error (_("Invalid argument."));
2938
2939 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2940
2941 TRY
2942 {
2943 execute_command ("target record-btrace", from_tty);
2944 }
2945 CATCH (exception, RETURN_MASK_ALL)
2946 {
2947 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2948 throw_exception (exception);
2949 }
2950 END_CATCH
2951 }
2952
2953 /* Start recording in Intel Processor Trace format. */
2954
2955 static void
2956 cmd_record_btrace_pt_start (char *args, int from_tty)
2957 {
2958 if (args != NULL && *args != 0)
2959 error (_("Invalid argument."));
2960
2961 record_btrace_conf.format = BTRACE_FORMAT_PT;
2962
2963 TRY
2964 {
2965 execute_command ("target record-btrace", from_tty);
2966 }
2967 CATCH (exception, RETURN_MASK_ALL)
2968 {
2969 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2970 throw_exception (exception);
2971 }
2972 END_CATCH
2973 }
2974
2975 /* Alias for "target record". */
2976
2977 static void
2978 cmd_record_btrace_start (char *args, int from_tty)
2979 {
2980 if (args != NULL && *args != 0)
2981 error (_("Invalid argument."));
2982
2983 record_btrace_conf.format = BTRACE_FORMAT_PT;
2984
2985 TRY
2986 {
2987 execute_command ("target record-btrace", from_tty);
2988 }
2989 CATCH (exception, RETURN_MASK_ALL)
2990 {
2991 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2992
2993 TRY
2994 {
2995 execute_command ("target record-btrace", from_tty);
2996 }
2997 CATCH (exception, RETURN_MASK_ALL)
2998 {
2999 record_btrace_conf.format = BTRACE_FORMAT_NONE;
3000 throw_exception (exception);
3001 }
3002 END_CATCH
3003 }
3004 END_CATCH
3005 }
3006
3007 /* The "set record btrace" command. */
3008
3009 static void
3010 cmd_set_record_btrace (char *args, int from_tty)
3011 {
3012 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
3013 }
3014
3015 /* The "show record btrace" command. */
3016
3017 static void
3018 cmd_show_record_btrace (char *args, int from_tty)
3019 {
3020 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
3021 }
3022
3023 /* The "show record btrace replay-memory-access" command. */
3024
3025 static void
3026 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
3027 struct cmd_list_element *c, const char *value)
3028 {
3029 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
3030 replay_memory_access);
3031 }
3032
3033 /* The "set record btrace bts" command. */
3034
3035 static void
3036 cmd_set_record_btrace_bts (char *args, int from_tty)
3037 {
3038 printf_unfiltered (_("\"set record btrace bts\" must be followed "
3039 "by an appropriate subcommand.\n"));
3040 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3041 all_commands, gdb_stdout);
3042 }
3043
3044 /* The "show record btrace bts" command. */
3045
3046 static void
3047 cmd_show_record_btrace_bts (char *args, int from_tty)
3048 {
3049 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3050 }
3051
3052 /* The "set record btrace pt" command. */
3053
3054 static void
3055 cmd_set_record_btrace_pt (char *args, int from_tty)
3056 {
3057 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3058 "by an appropriate subcommand.\n"));
3059 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3060 all_commands, gdb_stdout);
3061 }
3062
3063 /* The "show record btrace pt" command. */
3064
3065 static void
3066 cmd_show_record_btrace_pt (char *args, int from_tty)
3067 {
3068 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3069 }
3070
3071 /* The "record bts buffer-size" show value function. */
3072
3073 static void
3074 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3075 struct cmd_list_element *c,
3076 const char *value)
3077 {
3078 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3079 value);
3080 }
3081
3082 /* The "record pt buffer-size" show value function. */
3083
3084 static void
3085 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3086 struct cmd_list_element *c,
3087 const char *value)
3088 {
3089 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3090 value);
3091 }
3092
3093 void _initialize_record_btrace (void);
3094
3095 /* Initialize btrace commands. */
3096
3097 void
3098 _initialize_record_btrace (void)
3099 {
3100 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3101 _("Start branch trace recording."), &record_btrace_cmdlist,
3102 "record btrace ", 0, &record_cmdlist);
3103 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3104
3105 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3106 _("\
3107 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3108 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3109 This format may not be available on all processors."),
3110 &record_btrace_cmdlist);
3111 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3112
3113 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3114 _("\
3115 Start branch trace recording in Intel Processor Trace format.\n\n\
3116 This format may not be available on all processors."),
3117 &record_btrace_cmdlist);
3118 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3119
3120 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3121 _("Set record options"), &set_record_btrace_cmdlist,
3122 "set record btrace ", 0, &set_record_cmdlist);
3123
3124 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3125 _("Show record options"), &show_record_btrace_cmdlist,
3126 "show record btrace ", 0, &show_record_cmdlist);
3127
3128 add_setshow_enum_cmd ("replay-memory-access", no_class,
3129 replay_memory_access_types, &replay_memory_access, _("\
3130 Set what memory accesses are allowed during replay."), _("\
3131 Show what memory accesses are allowed during replay."),
3132 _("Default is READ-ONLY.\n\n\
3133 The btrace record target does not trace data.\n\
3134 The memory therefore corresponds to the live target and not \
3135 to the current replay position.\n\n\
3136 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3137 When READ-WRITE, allow accesses to read-only and read-write memory during \
3138 replay."),
3139 NULL, cmd_show_replay_memory_access,
3140 &set_record_btrace_cmdlist,
3141 &show_record_btrace_cmdlist);
3142
3143 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3144 _("Set record btrace bts options"),
3145 &set_record_btrace_bts_cmdlist,
3146 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3147
3148 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3149 _("Show record btrace bts options"),
3150 &show_record_btrace_bts_cmdlist,
3151 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3152
3153 add_setshow_uinteger_cmd ("buffer-size", no_class,
3154 &record_btrace_conf.bts.size,
3155 _("Set the record/replay bts buffer size."),
3156 _("Show the record/replay bts buffer size."), _("\
3157 When starting recording request a trace buffer of this size. \
3158 The actual buffer size may differ from the requested size. \
3159 Use \"info record\" to see the actual buffer size.\n\n\
3160 Bigger buffers allow longer recording but also take more time to process \
3161 the recorded execution trace.\n\n\
3162 The trace buffer size may not be changed while recording."), NULL,
3163 show_record_bts_buffer_size_value,
3164 &set_record_btrace_bts_cmdlist,
3165 &show_record_btrace_bts_cmdlist);
3166
3167 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3168 _("Set record btrace pt options"),
3169 &set_record_btrace_pt_cmdlist,
3170 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3171
3172 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3173 _("Show record btrace pt options"),
3174 &show_record_btrace_pt_cmdlist,
3175 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3176
3177 add_setshow_uinteger_cmd ("buffer-size", no_class,
3178 &record_btrace_conf.pt.size,
3179 _("Set the record/replay pt buffer size."),
3180 _("Show the record/replay pt buffer size."), _("\
3181 Bigger buffers allow longer recording but also take more time to process \
3182 the recorded execution.\n\
3183 The actual buffer size may differ from the requested size. Use \"info record\" \
3184 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3185 &set_record_btrace_pt_cmdlist,
3186 &show_record_btrace_pt_cmdlist);
3187
3188 init_record_btrace_ops ();
3189 add_target (&record_btrace_ops);
3190
3191 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3192 xcalloc, xfree);
3193
3194 record_btrace_conf.bts.size = 64 * 1024;
3195 record_btrace_conf.pt.size = 16 * 1024;
3196 }
This page took 0.299766 seconds and 5 git commands to generate.