-Wwrite-strings: execute_command calls with string literals
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observer.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "event-loop.h"
40 #include "inf-loop.h"
41 #include "vec.h"
42 #include <algorithm>
43
44 /* The target_ops of record-btrace. */
45 static struct target_ops record_btrace_ops;
46
47 /* A new thread observer enabling branch tracing for the new thread. */
48 static struct observer *record_btrace_thread_observer;
49
50 /* Memory access types used in set/show record btrace replay-memory-access. */
51 static const char replay_memory_access_read_only[] = "read-only";
52 static const char replay_memory_access_read_write[] = "read-write";
53 static const char *const replay_memory_access_types[] =
54 {
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
57 NULL
58 };
59
60 /* The currently allowed replay memory access type. */
61 static const char *replay_memory_access = replay_memory_access_read_only;
62
63 /* Command lists for "set/show record btrace". */
64 static struct cmd_list_element *set_record_btrace_cmdlist;
65 static struct cmd_list_element *show_record_btrace_cmdlist;
66
67 /* The execution direction of the last resume we got. See record-full.c. */
68 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
69
70 /* The async event handler for reverse/replay execution. */
71 static struct async_event_handler *record_btrace_async_inferior_event_handler;
72
73 /* A flag indicating that we are currently generating a core file. */
74 static int record_btrace_generating_corefile;
75
76 /* The current branch trace configuration. */
77 static struct btrace_config record_btrace_conf;
78
79 /* Command list for "record btrace". */
80 static struct cmd_list_element *record_btrace_cmdlist;
81
82 /* Command lists for "set/show record btrace bts". */
83 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
85
86 /* Command lists for "set/show record btrace pt". */
87 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
89
90 /* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
92
93 #define DEBUG(msg, args...) \
94 do \
95 { \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
99 } \
100 while (0)
101
102
103 /* Update the branch trace for the current thread and return a pointer to its
104 thread_info.
105
106 Throws an error if there is no thread or no trace. This function never
107 returns NULL. */
108
109 static struct thread_info *
110 require_btrace_thread (void)
111 {
112 struct thread_info *tp;
113
114 DEBUG ("require");
115
116 tp = find_thread_ptid (inferior_ptid);
117 if (tp == NULL)
118 error (_("No thread."));
119
120 validate_registers_access ();
121
122 btrace_fetch (tp);
123
124 if (btrace_is_empty (tp))
125 error (_("No trace."));
126
127 return tp;
128 }
129
130 /* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
132
133 Throws an error if there is no thread or no trace. This function never
134 returns NULL. */
135
136 static struct btrace_thread_info *
137 require_btrace (void)
138 {
139 struct thread_info *tp;
140
141 tp = require_btrace_thread ();
142
143 return &tp->btrace;
144 }
145
146 /* Enable branch tracing for one thread. Warn on errors. */
147
148 static void
149 record_btrace_enable_warn (struct thread_info *tp)
150 {
151 TRY
152 {
153 btrace_enable (tp, &record_btrace_conf);
154 }
155 CATCH (error, RETURN_MASK_ERROR)
156 {
157 warning ("%s", error.message);
158 }
159 END_CATCH
160 }
161
162 /* Callback function to disable branch tracing for one thread. */
163
164 static void
165 record_btrace_disable_callback (void *arg)
166 {
167 struct thread_info *tp = (struct thread_info *) arg;
168
169 btrace_disable (tp);
170 }
171
172 /* Enable automatic tracing of new threads. */
173
174 static void
175 record_btrace_auto_enable (void)
176 {
177 DEBUG ("attach thread observer");
178
179 record_btrace_thread_observer
180 = observer_attach_new_thread (record_btrace_enable_warn);
181 }
182
183 /* Disable automatic tracing of new threads. */
184
185 static void
186 record_btrace_auto_disable (void)
187 {
188 /* The observer may have been detached, already. */
189 if (record_btrace_thread_observer == NULL)
190 return;
191
192 DEBUG ("detach thread observer");
193
194 observer_detach_new_thread (record_btrace_thread_observer);
195 record_btrace_thread_observer = NULL;
196 }
197
198 /* The record-btrace async event handler function. */
199
200 static void
201 record_btrace_handle_async_inferior_event (gdb_client_data data)
202 {
203 inferior_event_handler (INF_REG_EVENT, NULL);
204 }
205
206 /* See record-btrace.h. */
207
208 void
209 record_btrace_push_target (void)
210 {
211 const char *format;
212
213 record_btrace_auto_enable ();
214
215 push_target (&record_btrace_ops);
216
217 record_btrace_async_inferior_event_handler
218 = create_async_event_handler (record_btrace_handle_async_inferior_event,
219 NULL);
220 record_btrace_generating_corefile = 0;
221
222 format = btrace_format_short_string (record_btrace_conf.format);
223 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
224 }
225
226 /* The to_open method of target record-btrace. */
227
228 static void
229 record_btrace_open (const char *args, int from_tty)
230 {
231 struct cleanup *disable_chain;
232 struct thread_info *tp;
233
234 DEBUG ("open");
235
236 record_preopen ();
237
238 if (!target_has_execution)
239 error (_("The program is not being run."));
240
241 gdb_assert (record_btrace_thread_observer == NULL);
242
243 disable_chain = make_cleanup (null_cleanup, NULL);
244 ALL_NON_EXITED_THREADS (tp)
245 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
246 {
247 btrace_enable (tp, &record_btrace_conf);
248
249 make_cleanup (record_btrace_disable_callback, tp);
250 }
251
252 record_btrace_push_target ();
253
254 discard_cleanups (disable_chain);
255 }
256
257 /* The to_stop_recording method of target record-btrace. */
258
259 static void
260 record_btrace_stop_recording (struct target_ops *self)
261 {
262 struct thread_info *tp;
263
264 DEBUG ("stop recording");
265
266 record_btrace_auto_disable ();
267
268 ALL_NON_EXITED_THREADS (tp)
269 if (tp->btrace.target != NULL)
270 btrace_disable (tp);
271 }
272
273 /* The to_disconnect method of target record-btrace. */
274
275 static void
276 record_btrace_disconnect (struct target_ops *self, const char *args,
277 int from_tty)
278 {
279 struct target_ops *beneath = self->beneath;
280
281 /* Do not stop recording, just clean up GDB side. */
282 unpush_target (self);
283
284 /* Forward disconnect. */
285 beneath->to_disconnect (beneath, args, from_tty);
286 }
287
288 /* The to_close method of target record-btrace. */
289
290 static void
291 record_btrace_close (struct target_ops *self)
292 {
293 struct thread_info *tp;
294
295 if (record_btrace_async_inferior_event_handler != NULL)
296 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
297
298 /* Make sure automatic recording gets disabled even if we did not stop
299 recording before closing the record-btrace target. */
300 record_btrace_auto_disable ();
301
302 /* We should have already stopped recording.
303 Tear down btrace in case we have not. */
304 ALL_NON_EXITED_THREADS (tp)
305 btrace_teardown (tp);
306 }
307
308 /* The to_async method of target record-btrace. */
309
310 static void
311 record_btrace_async (struct target_ops *ops, int enable)
312 {
313 if (enable)
314 mark_async_event_handler (record_btrace_async_inferior_event_handler);
315 else
316 clear_async_event_handler (record_btrace_async_inferior_event_handler);
317
318 ops->beneath->to_async (ops->beneath, enable);
319 }
320
321 /* Adjusts the size and returns a human readable size suffix. */
322
323 static const char *
324 record_btrace_adjust_size (unsigned int *size)
325 {
326 unsigned int sz;
327
328 sz = *size;
329
330 if ((sz & ((1u << 30) - 1)) == 0)
331 {
332 *size = sz >> 30;
333 return "GB";
334 }
335 else if ((sz & ((1u << 20) - 1)) == 0)
336 {
337 *size = sz >> 20;
338 return "MB";
339 }
340 else if ((sz & ((1u << 10) - 1)) == 0)
341 {
342 *size = sz >> 10;
343 return "kB";
344 }
345 else
346 return "";
347 }
348
349 /* Print a BTS configuration. */
350
351 static void
352 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
353 {
354 const char *suffix;
355 unsigned int size;
356
357 size = conf->size;
358 if (size > 0)
359 {
360 suffix = record_btrace_adjust_size (&size);
361 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
362 }
363 }
364
365 /* Print an Intel Processor Trace configuration. */
366
367 static void
368 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
369 {
370 const char *suffix;
371 unsigned int size;
372
373 size = conf->size;
374 if (size > 0)
375 {
376 suffix = record_btrace_adjust_size (&size);
377 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
378 }
379 }
380
381 /* Print a branch tracing configuration. */
382
383 static void
384 record_btrace_print_conf (const struct btrace_config *conf)
385 {
386 printf_unfiltered (_("Recording format: %s.\n"),
387 btrace_format_string (conf->format));
388
389 switch (conf->format)
390 {
391 case BTRACE_FORMAT_NONE:
392 return;
393
394 case BTRACE_FORMAT_BTS:
395 record_btrace_print_bts_conf (&conf->bts);
396 return;
397
398 case BTRACE_FORMAT_PT:
399 record_btrace_print_pt_conf (&conf->pt);
400 return;
401 }
402
403 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
404 }
405
406 /* The to_info_record method of target record-btrace. */
407
408 static void
409 record_btrace_info (struct target_ops *self)
410 {
411 struct btrace_thread_info *btinfo;
412 const struct btrace_config *conf;
413 struct thread_info *tp;
414 unsigned int insns, calls, gaps;
415
416 DEBUG ("info");
417
418 tp = find_thread_ptid (inferior_ptid);
419 if (tp == NULL)
420 error (_("No thread."));
421
422 validate_registers_access ();
423
424 btinfo = &tp->btrace;
425
426 conf = btrace_conf (btinfo);
427 if (conf != NULL)
428 record_btrace_print_conf (conf);
429
430 btrace_fetch (tp);
431
432 insns = 0;
433 calls = 0;
434 gaps = 0;
435
436 if (!btrace_is_empty (tp))
437 {
438 struct btrace_call_iterator call;
439 struct btrace_insn_iterator insn;
440
441 btrace_call_end (&call, btinfo);
442 btrace_call_prev (&call, 1);
443 calls = btrace_call_number (&call);
444
445 btrace_insn_end (&insn, btinfo);
446 insns = btrace_insn_number (&insn);
447
448 /* If the last instruction is not a gap, it is the current instruction
449 that is not actually part of the record. */
450 if (btrace_insn_get (&insn) != NULL)
451 insns -= 1;
452
453 gaps = btinfo->ngaps;
454 }
455
456 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
457 "for thread %s (%s).\n"), insns, calls, gaps,
458 print_thread_id (tp), target_pid_to_str (tp->ptid));
459
460 if (btrace_is_replaying (tp))
461 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
462 btrace_insn_number (btinfo->replay));
463 }
464
465 /* Print a decode error. */
466
467 static void
468 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
469 enum btrace_format format)
470 {
471 const char *errstr = btrace_decode_error (format, errcode);
472
473 uiout->text (_("["));
474 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
475 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
476 {
477 uiout->text (_("decode error ("));
478 uiout->field_int ("errcode", errcode);
479 uiout->text (_("): "));
480 }
481 uiout->text (errstr);
482 uiout->text (_("]\n"));
483 }
484
485 /* Print an unsigned int. */
486
487 static void
488 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
489 {
490 uiout->field_fmt (fld, "%u", val);
491 }
492
493 /* A range of source lines. */
494
495 struct btrace_line_range
496 {
497 /* The symtab this line is from. */
498 struct symtab *symtab;
499
500 /* The first line (inclusive). */
501 int begin;
502
503 /* The last line (exclusive). */
504 int end;
505 };
506
507 /* Construct a line range. */
508
509 static struct btrace_line_range
510 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
511 {
512 struct btrace_line_range range;
513
514 range.symtab = symtab;
515 range.begin = begin;
516 range.end = end;
517
518 return range;
519 }
520
521 /* Add a line to a line range. */
522
523 static struct btrace_line_range
524 btrace_line_range_add (struct btrace_line_range range, int line)
525 {
526 if (range.end <= range.begin)
527 {
528 /* This is the first entry. */
529 range.begin = line;
530 range.end = line + 1;
531 }
532 else if (line < range.begin)
533 range.begin = line;
534 else if (range.end < line)
535 range.end = line;
536
537 return range;
538 }
539
540 /* Return non-zero if RANGE is empty, zero otherwise. */
541
542 static int
543 btrace_line_range_is_empty (struct btrace_line_range range)
544 {
545 return range.end <= range.begin;
546 }
547
548 /* Return non-zero if LHS contains RHS, zero otherwise. */
549
550 static int
551 btrace_line_range_contains_range (struct btrace_line_range lhs,
552 struct btrace_line_range rhs)
553 {
554 return ((lhs.symtab == rhs.symtab)
555 && (lhs.begin <= rhs.begin)
556 && (rhs.end <= lhs.end));
557 }
558
559 /* Find the line range associated with PC. */
560
561 static struct btrace_line_range
562 btrace_find_line_range (CORE_ADDR pc)
563 {
564 struct btrace_line_range range;
565 struct linetable_entry *lines;
566 struct linetable *ltable;
567 struct symtab *symtab;
568 int nlines, i;
569
570 symtab = find_pc_line_symtab (pc);
571 if (symtab == NULL)
572 return btrace_mk_line_range (NULL, 0, 0);
573
574 ltable = SYMTAB_LINETABLE (symtab);
575 if (ltable == NULL)
576 return btrace_mk_line_range (symtab, 0, 0);
577
578 nlines = ltable->nitems;
579 lines = ltable->item;
580 if (nlines <= 0)
581 return btrace_mk_line_range (symtab, 0, 0);
582
583 range = btrace_mk_line_range (symtab, 0, 0);
584 for (i = 0; i < nlines - 1; i++)
585 {
586 if ((lines[i].pc == pc) && (lines[i].line != 0))
587 range = btrace_line_range_add (range, lines[i].line);
588 }
589
590 return range;
591 }
592
593 /* Print source lines in LINES to UIOUT.
594
595 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
596 instructions corresponding to that source line. When printing a new source
597 line, we do the cleanups for the open chain and open a new cleanup chain for
598 the new source line. If the source line range in LINES is not empty, this
599 function will leave the cleanup chain for the last printed source line open
600 so instructions can be added to it. */
601
602 static void
603 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
604 struct cleanup **ui_item_chain, int flags)
605 {
606 print_source_lines_flags psl_flags;
607 int line;
608
609 psl_flags = 0;
610 if (flags & DISASSEMBLY_FILENAME)
611 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
612
613 for (line = lines.begin; line < lines.end; ++line)
614 {
615 if (*ui_item_chain != NULL)
616 do_cleanups (*ui_item_chain);
617
618 *ui_item_chain
619 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
620
621 print_source_lines (lines.symtab, line, line + 1, psl_flags);
622
623 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
624 }
625 }
626
627 /* Disassemble a section of the recorded instruction trace. */
628
629 static void
630 btrace_insn_history (struct ui_out *uiout,
631 const struct btrace_thread_info *btinfo,
632 const struct btrace_insn_iterator *begin,
633 const struct btrace_insn_iterator *end, int flags)
634 {
635 struct cleanup *cleanups, *ui_item_chain;
636 struct gdbarch *gdbarch;
637 struct btrace_insn_iterator it;
638 struct btrace_line_range last_lines;
639
640 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
641 btrace_insn_number (end));
642
643 flags |= DISASSEMBLY_SPECULATIVE;
644
645 gdbarch = target_gdbarch ();
646 last_lines = btrace_mk_line_range (NULL, 0, 0);
647
648 cleanups = make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
649
650 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
651 instructions corresponding to that line. */
652 ui_item_chain = NULL;
653
654 gdb_pretty_print_disassembler disasm (gdbarch);
655
656 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
657 {
658 const struct btrace_insn *insn;
659
660 insn = btrace_insn_get (&it);
661
662 /* A NULL instruction indicates a gap in the trace. */
663 if (insn == NULL)
664 {
665 const struct btrace_config *conf;
666
667 conf = btrace_conf (btinfo);
668
669 /* We have trace so we must have a configuration. */
670 gdb_assert (conf != NULL);
671
672 uiout->field_fmt ("insn-number", "%u",
673 btrace_insn_number (&it));
674 uiout->text ("\t");
675
676 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
677 conf->format);
678 }
679 else
680 {
681 struct disasm_insn dinsn;
682
683 if ((flags & DISASSEMBLY_SOURCE) != 0)
684 {
685 struct btrace_line_range lines;
686
687 lines = btrace_find_line_range (insn->pc);
688 if (!btrace_line_range_is_empty (lines)
689 && !btrace_line_range_contains_range (last_lines, lines))
690 {
691 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
692 last_lines = lines;
693 }
694 else if (ui_item_chain == NULL)
695 {
696 ui_item_chain
697 = make_cleanup_ui_out_tuple_begin_end (uiout,
698 "src_and_asm_line");
699 /* No source information. */
700 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
701 }
702
703 gdb_assert (ui_item_chain != NULL);
704 }
705
706 memset (&dinsn, 0, sizeof (dinsn));
707 dinsn.number = btrace_insn_number (&it);
708 dinsn.addr = insn->pc;
709
710 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
711 dinsn.is_speculative = 1;
712
713 disasm.pretty_print_insn (uiout, &dinsn, flags);
714 }
715 }
716
717 do_cleanups (cleanups);
718 }
719
720 /* The to_insn_history method of target record-btrace. */
721
722 static void
723 record_btrace_insn_history (struct target_ops *self, int size, int flags)
724 {
725 struct btrace_thread_info *btinfo;
726 struct btrace_insn_history *history;
727 struct btrace_insn_iterator begin, end;
728 struct cleanup *uiout_cleanup;
729 struct ui_out *uiout;
730 unsigned int context, covered;
731
732 uiout = current_uiout;
733 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
734 "insn history");
735 context = abs (size);
736 if (context == 0)
737 error (_("Bad record instruction-history-size."));
738
739 btinfo = require_btrace ();
740 history = btinfo->insn_history;
741 if (history == NULL)
742 {
743 struct btrace_insn_iterator *replay;
744
745 DEBUG ("insn-history (0x%x): %d", flags, size);
746
747 /* If we're replaying, we start at the replay position. Otherwise, we
748 start at the tail of the trace. */
749 replay = btinfo->replay;
750 if (replay != NULL)
751 begin = *replay;
752 else
753 btrace_insn_end (&begin, btinfo);
754
755 /* We start from here and expand in the requested direction. Then we
756 expand in the other direction, as well, to fill up any remaining
757 context. */
758 end = begin;
759 if (size < 0)
760 {
761 /* We want the current position covered, as well. */
762 covered = btrace_insn_next (&end, 1);
763 covered += btrace_insn_prev (&begin, context - covered);
764 covered += btrace_insn_next (&end, context - covered);
765 }
766 else
767 {
768 covered = btrace_insn_next (&end, context);
769 covered += btrace_insn_prev (&begin, context - covered);
770 }
771 }
772 else
773 {
774 begin = history->begin;
775 end = history->end;
776
777 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
778 btrace_insn_number (&begin), btrace_insn_number (&end));
779
780 if (size < 0)
781 {
782 end = begin;
783 covered = btrace_insn_prev (&begin, context);
784 }
785 else
786 {
787 begin = end;
788 covered = btrace_insn_next (&end, context);
789 }
790 }
791
792 if (covered > 0)
793 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
794 else
795 {
796 if (size < 0)
797 printf_unfiltered (_("At the start of the branch trace record.\n"));
798 else
799 printf_unfiltered (_("At the end of the branch trace record.\n"));
800 }
801
802 btrace_set_insn_history (btinfo, &begin, &end);
803 do_cleanups (uiout_cleanup);
804 }
805
806 /* The to_insn_history_range method of target record-btrace. */
807
808 static void
809 record_btrace_insn_history_range (struct target_ops *self,
810 ULONGEST from, ULONGEST to, int flags)
811 {
812 struct btrace_thread_info *btinfo;
813 struct btrace_insn_history *history;
814 struct btrace_insn_iterator begin, end;
815 struct cleanup *uiout_cleanup;
816 struct ui_out *uiout;
817 unsigned int low, high;
818 int found;
819
820 uiout = current_uiout;
821 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
822 "insn history");
823 low = from;
824 high = to;
825
826 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
827
828 /* Check for wrap-arounds. */
829 if (low != from || high != to)
830 error (_("Bad range."));
831
832 if (high < low)
833 error (_("Bad range."));
834
835 btinfo = require_btrace ();
836
837 found = btrace_find_insn_by_number (&begin, btinfo, low);
838 if (found == 0)
839 error (_("Range out of bounds."));
840
841 found = btrace_find_insn_by_number (&end, btinfo, high);
842 if (found == 0)
843 {
844 /* Silently truncate the range. */
845 btrace_insn_end (&end, btinfo);
846 }
847 else
848 {
849 /* We want both begin and end to be inclusive. */
850 btrace_insn_next (&end, 1);
851 }
852
853 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
854 btrace_set_insn_history (btinfo, &begin, &end);
855
856 do_cleanups (uiout_cleanup);
857 }
858
859 /* The to_insn_history_from method of target record-btrace. */
860
861 static void
862 record_btrace_insn_history_from (struct target_ops *self,
863 ULONGEST from, int size, int flags)
864 {
865 ULONGEST begin, end, context;
866
867 context = abs (size);
868 if (context == 0)
869 error (_("Bad record instruction-history-size."));
870
871 if (size < 0)
872 {
873 end = from;
874
875 if (from < context)
876 begin = 0;
877 else
878 begin = from - context + 1;
879 }
880 else
881 {
882 begin = from;
883 end = from + context - 1;
884
885 /* Check for wrap-around. */
886 if (end < begin)
887 end = ULONGEST_MAX;
888 }
889
890 record_btrace_insn_history_range (self, begin, end, flags);
891 }
892
893 /* Print the instruction number range for a function call history line. */
894
895 static void
896 btrace_call_history_insn_range (struct ui_out *uiout,
897 const struct btrace_function *bfun)
898 {
899 unsigned int begin, end, size;
900
901 size = VEC_length (btrace_insn_s, bfun->insn);
902 gdb_assert (size > 0);
903
904 begin = bfun->insn_offset;
905 end = begin + size - 1;
906
907 ui_out_field_uint (uiout, "insn begin", begin);
908 uiout->text (",");
909 ui_out_field_uint (uiout, "insn end", end);
910 }
911
912 /* Compute the lowest and highest source line for the instructions in BFUN
913 and return them in PBEGIN and PEND.
914 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
915 result from inlining or macro expansion. */
916
917 static void
918 btrace_compute_src_line_range (const struct btrace_function *bfun,
919 int *pbegin, int *pend)
920 {
921 struct btrace_insn *insn;
922 struct symtab *symtab;
923 struct symbol *sym;
924 unsigned int idx;
925 int begin, end;
926
927 begin = INT_MAX;
928 end = INT_MIN;
929
930 sym = bfun->sym;
931 if (sym == NULL)
932 goto out;
933
934 symtab = symbol_symtab (sym);
935
936 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
937 {
938 struct symtab_and_line sal;
939
940 sal = find_pc_line (insn->pc, 0);
941 if (sal.symtab != symtab || sal.line == 0)
942 continue;
943
944 begin = std::min (begin, sal.line);
945 end = std::max (end, sal.line);
946 }
947
948 out:
949 *pbegin = begin;
950 *pend = end;
951 }
952
953 /* Print the source line information for a function call history line. */
954
955 static void
956 btrace_call_history_src_line (struct ui_out *uiout,
957 const struct btrace_function *bfun)
958 {
959 struct symbol *sym;
960 int begin, end;
961
962 sym = bfun->sym;
963 if (sym == NULL)
964 return;
965
966 uiout->field_string ("file",
967 symtab_to_filename_for_display (symbol_symtab (sym)));
968
969 btrace_compute_src_line_range (bfun, &begin, &end);
970 if (end < begin)
971 return;
972
973 uiout->text (":");
974 uiout->field_int ("min line", begin);
975
976 if (end == begin)
977 return;
978
979 uiout->text (",");
980 uiout->field_int ("max line", end);
981 }
982
983 /* Get the name of a branch trace function. */
984
985 static const char *
986 btrace_get_bfun_name (const struct btrace_function *bfun)
987 {
988 struct minimal_symbol *msym;
989 struct symbol *sym;
990
991 if (bfun == NULL)
992 return "??";
993
994 msym = bfun->msym;
995 sym = bfun->sym;
996
997 if (sym != NULL)
998 return SYMBOL_PRINT_NAME (sym);
999 else if (msym != NULL)
1000 return MSYMBOL_PRINT_NAME (msym);
1001 else
1002 return "??";
1003 }
1004
1005 /* Disassemble a section of the recorded function trace. */
1006
1007 static void
1008 btrace_call_history (struct ui_out *uiout,
1009 const struct btrace_thread_info *btinfo,
1010 const struct btrace_call_iterator *begin,
1011 const struct btrace_call_iterator *end,
1012 int int_flags)
1013 {
1014 struct btrace_call_iterator it;
1015 record_print_flags flags = (enum record_print_flag) int_flags;
1016
1017 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1018 btrace_call_number (end));
1019
1020 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1021 {
1022 const struct btrace_function *bfun;
1023 struct minimal_symbol *msym;
1024 struct symbol *sym;
1025
1026 bfun = btrace_call_get (&it);
1027 sym = bfun->sym;
1028 msym = bfun->msym;
1029
1030 /* Print the function index. */
1031 ui_out_field_uint (uiout, "index", bfun->number);
1032 uiout->text ("\t");
1033
1034 /* Indicate gaps in the trace. */
1035 if (bfun->errcode != 0)
1036 {
1037 const struct btrace_config *conf;
1038
1039 conf = btrace_conf (btinfo);
1040
1041 /* We have trace so we must have a configuration. */
1042 gdb_assert (conf != NULL);
1043
1044 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1045
1046 continue;
1047 }
1048
1049 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1050 {
1051 int level = bfun->level + btinfo->level, i;
1052
1053 for (i = 0; i < level; ++i)
1054 uiout->text (" ");
1055 }
1056
1057 if (sym != NULL)
1058 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
1059 else if (msym != NULL)
1060 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1061 else if (!uiout->is_mi_like_p ())
1062 uiout->field_string ("function", "??");
1063
1064 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1065 {
1066 uiout->text (_("\tinst "));
1067 btrace_call_history_insn_range (uiout, bfun);
1068 }
1069
1070 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1071 {
1072 uiout->text (_("\tat "));
1073 btrace_call_history_src_line (uiout, bfun);
1074 }
1075
1076 uiout->text ("\n");
1077 }
1078 }
1079
1080 /* The to_call_history method of target record-btrace. */
1081
1082 static void
1083 record_btrace_call_history (struct target_ops *self, int size, int int_flags)
1084 {
1085 struct btrace_thread_info *btinfo;
1086 struct btrace_call_history *history;
1087 struct btrace_call_iterator begin, end;
1088 struct cleanup *uiout_cleanup;
1089 struct ui_out *uiout;
1090 unsigned int context, covered;
1091 record_print_flags flags = (enum record_print_flag) int_flags;
1092
1093 uiout = current_uiout;
1094 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1095 "insn history");
1096 context = abs (size);
1097 if (context == 0)
1098 error (_("Bad record function-call-history-size."));
1099
1100 btinfo = require_btrace ();
1101 history = btinfo->call_history;
1102 if (history == NULL)
1103 {
1104 struct btrace_insn_iterator *replay;
1105
1106 DEBUG ("call-history (0x%x): %d", int_flags, size);
1107
1108 /* If we're replaying, we start at the replay position. Otherwise, we
1109 start at the tail of the trace. */
1110 replay = btinfo->replay;
1111 if (replay != NULL)
1112 {
1113 begin.function = replay->function;
1114 begin.btinfo = btinfo;
1115 }
1116 else
1117 btrace_call_end (&begin, btinfo);
1118
1119 /* We start from here and expand in the requested direction. Then we
1120 expand in the other direction, as well, to fill up any remaining
1121 context. */
1122 end = begin;
1123 if (size < 0)
1124 {
1125 /* We want the current position covered, as well. */
1126 covered = btrace_call_next (&end, 1);
1127 covered += btrace_call_prev (&begin, context - covered);
1128 covered += btrace_call_next (&end, context - covered);
1129 }
1130 else
1131 {
1132 covered = btrace_call_next (&end, context);
1133 covered += btrace_call_prev (&begin, context- covered);
1134 }
1135 }
1136 else
1137 {
1138 begin = history->begin;
1139 end = history->end;
1140
1141 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
1142 btrace_call_number (&begin), btrace_call_number (&end));
1143
1144 if (size < 0)
1145 {
1146 end = begin;
1147 covered = btrace_call_prev (&begin, context);
1148 }
1149 else
1150 {
1151 begin = end;
1152 covered = btrace_call_next (&end, context);
1153 }
1154 }
1155
1156 if (covered > 0)
1157 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1158 else
1159 {
1160 if (size < 0)
1161 printf_unfiltered (_("At the start of the branch trace record.\n"));
1162 else
1163 printf_unfiltered (_("At the end of the branch trace record.\n"));
1164 }
1165
1166 btrace_set_call_history (btinfo, &begin, &end);
1167 do_cleanups (uiout_cleanup);
1168 }
1169
1170 /* The to_call_history_range method of target record-btrace. */
1171
1172 static void
1173 record_btrace_call_history_range (struct target_ops *self,
1174 ULONGEST from, ULONGEST to,
1175 int int_flags)
1176 {
1177 struct btrace_thread_info *btinfo;
1178 struct btrace_call_history *history;
1179 struct btrace_call_iterator begin, end;
1180 struct cleanup *uiout_cleanup;
1181 struct ui_out *uiout;
1182 unsigned int low, high;
1183 int found;
1184 record_print_flags flags = (enum record_print_flag) int_flags;
1185
1186 uiout = current_uiout;
1187 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1188 "func history");
1189 low = from;
1190 high = to;
1191
1192 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
1193
1194 /* Check for wrap-arounds. */
1195 if (low != from || high != to)
1196 error (_("Bad range."));
1197
1198 if (high < low)
1199 error (_("Bad range."));
1200
1201 btinfo = require_btrace ();
1202
1203 found = btrace_find_call_by_number (&begin, btinfo, low);
1204 if (found == 0)
1205 error (_("Range out of bounds."));
1206
1207 found = btrace_find_call_by_number (&end, btinfo, high);
1208 if (found == 0)
1209 {
1210 /* Silently truncate the range. */
1211 btrace_call_end (&end, btinfo);
1212 }
1213 else
1214 {
1215 /* We want both begin and end to be inclusive. */
1216 btrace_call_next (&end, 1);
1217 }
1218
1219 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1220 btrace_set_call_history (btinfo, &begin, &end);
1221
1222 do_cleanups (uiout_cleanup);
1223 }
1224
1225 /* The to_call_history_from method of target record-btrace. */
1226
1227 static void
1228 record_btrace_call_history_from (struct target_ops *self,
1229 ULONGEST from, int size,
1230 int int_flags)
1231 {
1232 ULONGEST begin, end, context;
1233 record_print_flags flags = (enum record_print_flag) int_flags;
1234
1235 context = abs (size);
1236 if (context == 0)
1237 error (_("Bad record function-call-history-size."));
1238
1239 if (size < 0)
1240 {
1241 end = from;
1242
1243 if (from < context)
1244 begin = 0;
1245 else
1246 begin = from - context + 1;
1247 }
1248 else
1249 {
1250 begin = from;
1251 end = from + context - 1;
1252
1253 /* Check for wrap-around. */
1254 if (end < begin)
1255 end = ULONGEST_MAX;
1256 }
1257
1258 record_btrace_call_history_range (self, begin, end, flags);
1259 }
1260
1261 /* The to_record_method method of target record-btrace. */
1262
1263 static enum record_method
1264 record_btrace_record_method (struct target_ops *self, ptid_t ptid)
1265 {
1266 const struct btrace_config *config;
1267 struct thread_info * const tp = find_thread_ptid (ptid);
1268
1269 if (tp == NULL)
1270 error (_("No thread."));
1271
1272 if (tp->btrace.target == NULL)
1273 return RECORD_METHOD_NONE;
1274
1275 return RECORD_METHOD_BTRACE;
1276 }
1277
1278 /* The to_record_is_replaying method of target record-btrace. */
1279
1280 static int
1281 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1282 {
1283 struct thread_info *tp;
1284
1285 ALL_NON_EXITED_THREADS (tp)
1286 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1287 return 1;
1288
1289 return 0;
1290 }
1291
1292 /* The to_record_will_replay method of target record-btrace. */
1293
1294 static int
1295 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1296 {
1297 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1298 }
1299
1300 /* The to_xfer_partial method of target record-btrace. */
1301
1302 static enum target_xfer_status
1303 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1304 const char *annex, gdb_byte *readbuf,
1305 const gdb_byte *writebuf, ULONGEST offset,
1306 ULONGEST len, ULONGEST *xfered_len)
1307 {
1308 struct target_ops *t;
1309
1310 /* Filter out requests that don't make sense during replay. */
1311 if (replay_memory_access == replay_memory_access_read_only
1312 && !record_btrace_generating_corefile
1313 && record_btrace_is_replaying (ops, inferior_ptid))
1314 {
1315 switch (object)
1316 {
1317 case TARGET_OBJECT_MEMORY:
1318 {
1319 struct target_section *section;
1320
1321 /* We do not allow writing memory in general. */
1322 if (writebuf != NULL)
1323 {
1324 *xfered_len = len;
1325 return TARGET_XFER_UNAVAILABLE;
1326 }
1327
1328 /* We allow reading readonly memory. */
1329 section = target_section_by_addr (ops, offset);
1330 if (section != NULL)
1331 {
1332 /* Check if the section we found is readonly. */
1333 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1334 section->the_bfd_section)
1335 & SEC_READONLY) != 0)
1336 {
1337 /* Truncate the request to fit into this section. */
1338 len = std::min (len, section->endaddr - offset);
1339 break;
1340 }
1341 }
1342
1343 *xfered_len = len;
1344 return TARGET_XFER_UNAVAILABLE;
1345 }
1346 }
1347 }
1348
1349 /* Forward the request. */
1350 ops = ops->beneath;
1351 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1352 offset, len, xfered_len);
1353 }
1354
1355 /* The to_insert_breakpoint method of target record-btrace. */
1356
1357 static int
1358 record_btrace_insert_breakpoint (struct target_ops *ops,
1359 struct gdbarch *gdbarch,
1360 struct bp_target_info *bp_tgt)
1361 {
1362 const char *old;
1363 int ret;
1364
1365 /* Inserting breakpoints requires accessing memory. Allow it for the
1366 duration of this function. */
1367 old = replay_memory_access;
1368 replay_memory_access = replay_memory_access_read_write;
1369
1370 ret = 0;
1371 TRY
1372 {
1373 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1374 }
1375 CATCH (except, RETURN_MASK_ALL)
1376 {
1377 replay_memory_access = old;
1378 throw_exception (except);
1379 }
1380 END_CATCH
1381 replay_memory_access = old;
1382
1383 return ret;
1384 }
1385
1386 /* The to_remove_breakpoint method of target record-btrace. */
1387
1388 static int
1389 record_btrace_remove_breakpoint (struct target_ops *ops,
1390 struct gdbarch *gdbarch,
1391 struct bp_target_info *bp_tgt,
1392 enum remove_bp_reason reason)
1393 {
1394 const char *old;
1395 int ret;
1396
1397 /* Removing breakpoints requires accessing memory. Allow it for the
1398 duration of this function. */
1399 old = replay_memory_access;
1400 replay_memory_access = replay_memory_access_read_write;
1401
1402 ret = 0;
1403 TRY
1404 {
1405 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1406 reason);
1407 }
1408 CATCH (except, RETURN_MASK_ALL)
1409 {
1410 replay_memory_access = old;
1411 throw_exception (except);
1412 }
1413 END_CATCH
1414 replay_memory_access = old;
1415
1416 return ret;
1417 }
1418
1419 /* The to_fetch_registers method of target record-btrace. */
1420
1421 static void
1422 record_btrace_fetch_registers (struct target_ops *ops,
1423 struct regcache *regcache, int regno)
1424 {
1425 struct btrace_insn_iterator *replay;
1426 struct thread_info *tp;
1427
1428 tp = find_thread_ptid (regcache_get_ptid (regcache));
1429 gdb_assert (tp != NULL);
1430
1431 replay = tp->btrace.replay;
1432 if (replay != NULL && !record_btrace_generating_corefile)
1433 {
1434 const struct btrace_insn *insn;
1435 struct gdbarch *gdbarch;
1436 int pcreg;
1437
1438 gdbarch = get_regcache_arch (regcache);
1439 pcreg = gdbarch_pc_regnum (gdbarch);
1440 if (pcreg < 0)
1441 return;
1442
1443 /* We can only provide the PC register. */
1444 if (regno >= 0 && regno != pcreg)
1445 return;
1446
1447 insn = btrace_insn_get (replay);
1448 gdb_assert (insn != NULL);
1449
1450 regcache_raw_supply (regcache, regno, &insn->pc);
1451 }
1452 else
1453 {
1454 struct target_ops *t = ops->beneath;
1455
1456 t->to_fetch_registers (t, regcache, regno);
1457 }
1458 }
1459
1460 /* The to_store_registers method of target record-btrace. */
1461
1462 static void
1463 record_btrace_store_registers (struct target_ops *ops,
1464 struct regcache *regcache, int regno)
1465 {
1466 struct target_ops *t;
1467
1468 if (!record_btrace_generating_corefile
1469 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1470 error (_("Cannot write registers while replaying."));
1471
1472 gdb_assert (may_write_registers != 0);
1473
1474 t = ops->beneath;
1475 t->to_store_registers (t, regcache, regno);
1476 }
1477
1478 /* The to_prepare_to_store method of target record-btrace. */
1479
1480 static void
1481 record_btrace_prepare_to_store (struct target_ops *ops,
1482 struct regcache *regcache)
1483 {
1484 struct target_ops *t;
1485
1486 if (!record_btrace_generating_corefile
1487 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1488 return;
1489
1490 t = ops->beneath;
1491 t->to_prepare_to_store (t, regcache);
1492 }
1493
1494 /* The branch trace frame cache. */
1495
1496 struct btrace_frame_cache
1497 {
1498 /* The thread. */
1499 struct thread_info *tp;
1500
1501 /* The frame info. */
1502 struct frame_info *frame;
1503
1504 /* The branch trace function segment. */
1505 const struct btrace_function *bfun;
1506 };
1507
1508 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1509
1510 static htab_t bfcache;
1511
1512 /* hash_f for htab_create_alloc of bfcache. */
1513
1514 static hashval_t
1515 bfcache_hash (const void *arg)
1516 {
1517 const struct btrace_frame_cache *cache
1518 = (const struct btrace_frame_cache *) arg;
1519
1520 return htab_hash_pointer (cache->frame);
1521 }
1522
1523 /* eq_f for htab_create_alloc of bfcache. */
1524
1525 static int
1526 bfcache_eq (const void *arg1, const void *arg2)
1527 {
1528 const struct btrace_frame_cache *cache1
1529 = (const struct btrace_frame_cache *) arg1;
1530 const struct btrace_frame_cache *cache2
1531 = (const struct btrace_frame_cache *) arg2;
1532
1533 return cache1->frame == cache2->frame;
1534 }
1535
1536 /* Create a new btrace frame cache. */
1537
1538 static struct btrace_frame_cache *
1539 bfcache_new (struct frame_info *frame)
1540 {
1541 struct btrace_frame_cache *cache;
1542 void **slot;
1543
1544 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1545 cache->frame = frame;
1546
1547 slot = htab_find_slot (bfcache, cache, INSERT);
1548 gdb_assert (*slot == NULL);
1549 *slot = cache;
1550
1551 return cache;
1552 }
1553
1554 /* Extract the branch trace function from a branch trace frame. */
1555
1556 static const struct btrace_function *
1557 btrace_get_frame_function (struct frame_info *frame)
1558 {
1559 const struct btrace_frame_cache *cache;
1560 const struct btrace_function *bfun;
1561 struct btrace_frame_cache pattern;
1562 void **slot;
1563
1564 pattern.frame = frame;
1565
1566 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1567 if (slot == NULL)
1568 return NULL;
1569
1570 cache = (const struct btrace_frame_cache *) *slot;
1571 return cache->bfun;
1572 }
1573
1574 /* Implement stop_reason method for record_btrace_frame_unwind. */
1575
1576 static enum unwind_stop_reason
1577 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1578 void **this_cache)
1579 {
1580 const struct btrace_frame_cache *cache;
1581 const struct btrace_function *bfun;
1582
1583 cache = (const struct btrace_frame_cache *) *this_cache;
1584 bfun = cache->bfun;
1585 gdb_assert (bfun != NULL);
1586
1587 if (bfun->up == NULL)
1588 return UNWIND_UNAVAILABLE;
1589
1590 return UNWIND_NO_REASON;
1591 }
1592
1593 /* Implement this_id method for record_btrace_frame_unwind. */
1594
1595 static void
1596 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1597 struct frame_id *this_id)
1598 {
1599 const struct btrace_frame_cache *cache;
1600 const struct btrace_function *bfun;
1601 CORE_ADDR code, special;
1602
1603 cache = (const struct btrace_frame_cache *) *this_cache;
1604
1605 bfun = cache->bfun;
1606 gdb_assert (bfun != NULL);
1607
1608 while (bfun->segment.prev != NULL)
1609 bfun = bfun->segment.prev;
1610
1611 code = get_frame_func (this_frame);
1612 special = bfun->number;
1613
1614 *this_id = frame_id_build_unavailable_stack_special (code, special);
1615
1616 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1617 btrace_get_bfun_name (cache->bfun),
1618 core_addr_to_string_nz (this_id->code_addr),
1619 core_addr_to_string_nz (this_id->special_addr));
1620 }
1621
1622 /* Implement prev_register method for record_btrace_frame_unwind. */
1623
1624 static struct value *
1625 record_btrace_frame_prev_register (struct frame_info *this_frame,
1626 void **this_cache,
1627 int regnum)
1628 {
1629 const struct btrace_frame_cache *cache;
1630 const struct btrace_function *bfun, *caller;
1631 const struct btrace_insn *insn;
1632 struct gdbarch *gdbarch;
1633 CORE_ADDR pc;
1634 int pcreg;
1635
1636 gdbarch = get_frame_arch (this_frame);
1637 pcreg = gdbarch_pc_regnum (gdbarch);
1638 if (pcreg < 0 || regnum != pcreg)
1639 throw_error (NOT_AVAILABLE_ERROR,
1640 _("Registers are not available in btrace record history"));
1641
1642 cache = (const struct btrace_frame_cache *) *this_cache;
1643 bfun = cache->bfun;
1644 gdb_assert (bfun != NULL);
1645
1646 caller = bfun->up;
1647 if (caller == NULL)
1648 throw_error (NOT_AVAILABLE_ERROR,
1649 _("No caller in btrace record history"));
1650
1651 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1652 {
1653 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1654 pc = insn->pc;
1655 }
1656 else
1657 {
1658 insn = VEC_last (btrace_insn_s, caller->insn);
1659 pc = insn->pc;
1660
1661 pc += gdb_insn_length (gdbarch, pc);
1662 }
1663
1664 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1665 btrace_get_bfun_name (bfun), bfun->level,
1666 core_addr_to_string_nz (pc));
1667
1668 return frame_unwind_got_address (this_frame, regnum, pc);
1669 }
1670
1671 /* Implement sniffer method for record_btrace_frame_unwind. */
1672
1673 static int
1674 record_btrace_frame_sniffer (const struct frame_unwind *self,
1675 struct frame_info *this_frame,
1676 void **this_cache)
1677 {
1678 const struct btrace_function *bfun;
1679 struct btrace_frame_cache *cache;
1680 struct thread_info *tp;
1681 struct frame_info *next;
1682
1683 /* THIS_FRAME does not contain a reference to its thread. */
1684 tp = find_thread_ptid (inferior_ptid);
1685 gdb_assert (tp != NULL);
1686
1687 bfun = NULL;
1688 next = get_next_frame (this_frame);
1689 if (next == NULL)
1690 {
1691 const struct btrace_insn_iterator *replay;
1692
1693 replay = tp->btrace.replay;
1694 if (replay != NULL)
1695 bfun = replay->function;
1696 }
1697 else
1698 {
1699 const struct btrace_function *callee;
1700
1701 callee = btrace_get_frame_function (next);
1702 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1703 bfun = callee->up;
1704 }
1705
1706 if (bfun == NULL)
1707 return 0;
1708
1709 DEBUG ("[frame] sniffed frame for %s on level %d",
1710 btrace_get_bfun_name (bfun), bfun->level);
1711
1712 /* This is our frame. Initialize the frame cache. */
1713 cache = bfcache_new (this_frame);
1714 cache->tp = tp;
1715 cache->bfun = bfun;
1716
1717 *this_cache = cache;
1718 return 1;
1719 }
1720
1721 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1722
1723 static int
1724 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1725 struct frame_info *this_frame,
1726 void **this_cache)
1727 {
1728 const struct btrace_function *bfun, *callee;
1729 struct btrace_frame_cache *cache;
1730 struct frame_info *next;
1731
1732 next = get_next_frame (this_frame);
1733 if (next == NULL)
1734 return 0;
1735
1736 callee = btrace_get_frame_function (next);
1737 if (callee == NULL)
1738 return 0;
1739
1740 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1741 return 0;
1742
1743 bfun = callee->up;
1744 if (bfun == NULL)
1745 return 0;
1746
1747 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1748 btrace_get_bfun_name (bfun), bfun->level);
1749
1750 /* This is our frame. Initialize the frame cache. */
1751 cache = bfcache_new (this_frame);
1752 cache->tp = find_thread_ptid (inferior_ptid);
1753 cache->bfun = bfun;
1754
1755 *this_cache = cache;
1756 return 1;
1757 }
1758
1759 static void
1760 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1761 {
1762 struct btrace_frame_cache *cache;
1763 void **slot;
1764
1765 cache = (struct btrace_frame_cache *) this_cache;
1766
1767 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1768 gdb_assert (slot != NULL);
1769
1770 htab_remove_elt (bfcache, cache);
1771 }
1772
1773 /* btrace recording does not store previous memory content, neither the stack
1774 frames content. Any unwinding would return errorneous results as the stack
1775 contents no longer matches the changed PC value restored from history.
1776 Therefore this unwinder reports any possibly unwound registers as
1777 <unavailable>. */
1778
1779 const struct frame_unwind record_btrace_frame_unwind =
1780 {
1781 NORMAL_FRAME,
1782 record_btrace_frame_unwind_stop_reason,
1783 record_btrace_frame_this_id,
1784 record_btrace_frame_prev_register,
1785 NULL,
1786 record_btrace_frame_sniffer,
1787 record_btrace_frame_dealloc_cache
1788 };
1789
1790 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1791 {
1792 TAILCALL_FRAME,
1793 record_btrace_frame_unwind_stop_reason,
1794 record_btrace_frame_this_id,
1795 record_btrace_frame_prev_register,
1796 NULL,
1797 record_btrace_tailcall_frame_sniffer,
1798 record_btrace_frame_dealloc_cache
1799 };
1800
1801 /* Implement the to_get_unwinder method. */
1802
1803 static const struct frame_unwind *
1804 record_btrace_to_get_unwinder (struct target_ops *self)
1805 {
1806 return &record_btrace_frame_unwind;
1807 }
1808
1809 /* Implement the to_get_tailcall_unwinder method. */
1810
1811 static const struct frame_unwind *
1812 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1813 {
1814 return &record_btrace_tailcall_frame_unwind;
1815 }
1816
1817 /* Return a human-readable string for FLAG. */
1818
1819 static const char *
1820 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1821 {
1822 switch (flag)
1823 {
1824 case BTHR_STEP:
1825 return "step";
1826
1827 case BTHR_RSTEP:
1828 return "reverse-step";
1829
1830 case BTHR_CONT:
1831 return "cont";
1832
1833 case BTHR_RCONT:
1834 return "reverse-cont";
1835
1836 case BTHR_STOP:
1837 return "stop";
1838 }
1839
1840 return "<invalid>";
1841 }
1842
1843 /* Indicate that TP should be resumed according to FLAG. */
1844
1845 static void
1846 record_btrace_resume_thread (struct thread_info *tp,
1847 enum btrace_thread_flag flag)
1848 {
1849 struct btrace_thread_info *btinfo;
1850
1851 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1852 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1853
1854 btinfo = &tp->btrace;
1855
1856 /* Fetch the latest branch trace. */
1857 btrace_fetch (tp);
1858
1859 /* A resume request overwrites a preceding resume or stop request. */
1860 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1861 btinfo->flags |= flag;
1862 }
1863
1864 /* Get the current frame for TP. */
1865
1866 static struct frame_info *
1867 get_thread_current_frame (struct thread_info *tp)
1868 {
1869 struct frame_info *frame;
1870 ptid_t old_inferior_ptid;
1871 int executing;
1872
1873 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1874 old_inferior_ptid = inferior_ptid;
1875 inferior_ptid = tp->ptid;
1876
1877 /* Clear the executing flag to allow changes to the current frame.
1878 We are not actually running, yet. We just started a reverse execution
1879 command or a record goto command.
1880 For the latter, EXECUTING is false and this has no effect.
1881 For the former, EXECUTING is true and we're in to_wait, about to
1882 move the thread. Since we need to recompute the stack, we temporarily
1883 set EXECUTING to flase. */
1884 executing = is_executing (inferior_ptid);
1885 set_executing (inferior_ptid, 0);
1886
1887 frame = NULL;
1888 TRY
1889 {
1890 frame = get_current_frame ();
1891 }
1892 CATCH (except, RETURN_MASK_ALL)
1893 {
1894 /* Restore the previous execution state. */
1895 set_executing (inferior_ptid, executing);
1896
1897 /* Restore the previous inferior_ptid. */
1898 inferior_ptid = old_inferior_ptid;
1899
1900 throw_exception (except);
1901 }
1902 END_CATCH
1903
1904 /* Restore the previous execution state. */
1905 set_executing (inferior_ptid, executing);
1906
1907 /* Restore the previous inferior_ptid. */
1908 inferior_ptid = old_inferior_ptid;
1909
1910 return frame;
1911 }
1912
1913 /* Start replaying a thread. */
1914
1915 static struct btrace_insn_iterator *
1916 record_btrace_start_replaying (struct thread_info *tp)
1917 {
1918 struct btrace_insn_iterator *replay;
1919 struct btrace_thread_info *btinfo;
1920
1921 btinfo = &tp->btrace;
1922 replay = NULL;
1923
1924 /* We can't start replaying without trace. */
1925 if (btinfo->begin == NULL)
1926 return NULL;
1927
1928 /* GDB stores the current frame_id when stepping in order to detects steps
1929 into subroutines.
1930 Since frames are computed differently when we're replaying, we need to
1931 recompute those stored frames and fix them up so we can still detect
1932 subroutines after we started replaying. */
1933 TRY
1934 {
1935 struct frame_info *frame;
1936 struct frame_id frame_id;
1937 int upd_step_frame_id, upd_step_stack_frame_id;
1938
1939 /* The current frame without replaying - computed via normal unwind. */
1940 frame = get_thread_current_frame (tp);
1941 frame_id = get_frame_id (frame);
1942
1943 /* Check if we need to update any stepping-related frame id's. */
1944 upd_step_frame_id = frame_id_eq (frame_id,
1945 tp->control.step_frame_id);
1946 upd_step_stack_frame_id = frame_id_eq (frame_id,
1947 tp->control.step_stack_frame_id);
1948
1949 /* We start replaying at the end of the branch trace. This corresponds
1950 to the current instruction. */
1951 replay = XNEW (struct btrace_insn_iterator);
1952 btrace_insn_end (replay, btinfo);
1953
1954 /* Skip gaps at the end of the trace. */
1955 while (btrace_insn_get (replay) == NULL)
1956 {
1957 unsigned int steps;
1958
1959 steps = btrace_insn_prev (replay, 1);
1960 if (steps == 0)
1961 error (_("No trace."));
1962 }
1963
1964 /* We're not replaying, yet. */
1965 gdb_assert (btinfo->replay == NULL);
1966 btinfo->replay = replay;
1967
1968 /* Make sure we're not using any stale registers. */
1969 registers_changed_ptid (tp->ptid);
1970
1971 /* The current frame with replaying - computed via btrace unwind. */
1972 frame = get_thread_current_frame (tp);
1973 frame_id = get_frame_id (frame);
1974
1975 /* Replace stepping related frames where necessary. */
1976 if (upd_step_frame_id)
1977 tp->control.step_frame_id = frame_id;
1978 if (upd_step_stack_frame_id)
1979 tp->control.step_stack_frame_id = frame_id;
1980 }
1981 CATCH (except, RETURN_MASK_ALL)
1982 {
1983 xfree (btinfo->replay);
1984 btinfo->replay = NULL;
1985
1986 registers_changed_ptid (tp->ptid);
1987
1988 throw_exception (except);
1989 }
1990 END_CATCH
1991
1992 return replay;
1993 }
1994
1995 /* Stop replaying a thread. */
1996
1997 static void
1998 record_btrace_stop_replaying (struct thread_info *tp)
1999 {
2000 struct btrace_thread_info *btinfo;
2001
2002 btinfo = &tp->btrace;
2003
2004 xfree (btinfo->replay);
2005 btinfo->replay = NULL;
2006
2007 /* Make sure we're not leaving any stale registers. */
2008 registers_changed_ptid (tp->ptid);
2009 }
2010
2011 /* Stop replaying TP if it is at the end of its execution history. */
2012
2013 static void
2014 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2015 {
2016 struct btrace_insn_iterator *replay, end;
2017 struct btrace_thread_info *btinfo;
2018
2019 btinfo = &tp->btrace;
2020 replay = btinfo->replay;
2021
2022 if (replay == NULL)
2023 return;
2024
2025 btrace_insn_end (&end, btinfo);
2026
2027 if (btrace_insn_cmp (replay, &end) == 0)
2028 record_btrace_stop_replaying (tp);
2029 }
2030
2031 /* The to_resume method of target record-btrace. */
2032
2033 static void
2034 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2035 enum gdb_signal signal)
2036 {
2037 struct thread_info *tp;
2038 enum btrace_thread_flag flag, cflag;
2039
2040 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2041 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2042 step ? "step" : "cont");
2043
2044 /* Store the execution direction of the last resume.
2045
2046 If there is more than one to_resume call, we have to rely on infrun
2047 to not change the execution direction in-between. */
2048 record_btrace_resume_exec_dir = execution_direction;
2049
2050 /* As long as we're not replaying, just forward the request.
2051
2052 For non-stop targets this means that no thread is replaying. In order to
2053 make progress, we may need to explicitly move replaying threads to the end
2054 of their execution history. */
2055 if ((execution_direction != EXEC_REVERSE)
2056 && !record_btrace_is_replaying (ops, minus_one_ptid))
2057 {
2058 ops = ops->beneath;
2059 ops->to_resume (ops, ptid, step, signal);
2060 return;
2061 }
2062
2063 /* Compute the btrace thread flag for the requested move. */
2064 if (execution_direction == EXEC_REVERSE)
2065 {
2066 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2067 cflag = BTHR_RCONT;
2068 }
2069 else
2070 {
2071 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2072 cflag = BTHR_CONT;
2073 }
2074
2075 /* We just indicate the resume intent here. The actual stepping happens in
2076 record_btrace_wait below.
2077
2078 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2079 if (!target_is_non_stop_p ())
2080 {
2081 gdb_assert (ptid_match (inferior_ptid, ptid));
2082
2083 ALL_NON_EXITED_THREADS (tp)
2084 if (ptid_match (tp->ptid, ptid))
2085 {
2086 if (ptid_match (tp->ptid, inferior_ptid))
2087 record_btrace_resume_thread (tp, flag);
2088 else
2089 record_btrace_resume_thread (tp, cflag);
2090 }
2091 }
2092 else
2093 {
2094 ALL_NON_EXITED_THREADS (tp)
2095 if (ptid_match (tp->ptid, ptid))
2096 record_btrace_resume_thread (tp, flag);
2097 }
2098
2099 /* Async support. */
2100 if (target_can_async_p ())
2101 {
2102 target_async (1);
2103 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2104 }
2105 }
2106
2107 /* The to_commit_resume method of target record-btrace. */
2108
2109 static void
2110 record_btrace_commit_resume (struct target_ops *ops)
2111 {
2112 if ((execution_direction != EXEC_REVERSE)
2113 && !record_btrace_is_replaying (ops, minus_one_ptid))
2114 ops->beneath->to_commit_resume (ops->beneath);
2115 }
2116
2117 /* Cancel resuming TP. */
2118
2119 static void
2120 record_btrace_cancel_resume (struct thread_info *tp)
2121 {
2122 enum btrace_thread_flag flags;
2123
2124 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2125 if (flags == 0)
2126 return;
2127
2128 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2129 print_thread_id (tp),
2130 target_pid_to_str (tp->ptid), flags,
2131 btrace_thread_flag_to_str (flags));
2132
2133 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2134 record_btrace_stop_replaying_at_end (tp);
2135 }
2136
2137 /* Return a target_waitstatus indicating that we ran out of history. */
2138
2139 static struct target_waitstatus
2140 btrace_step_no_history (void)
2141 {
2142 struct target_waitstatus status;
2143
2144 status.kind = TARGET_WAITKIND_NO_HISTORY;
2145
2146 return status;
2147 }
2148
2149 /* Return a target_waitstatus indicating that a step finished. */
2150
2151 static struct target_waitstatus
2152 btrace_step_stopped (void)
2153 {
2154 struct target_waitstatus status;
2155
2156 status.kind = TARGET_WAITKIND_STOPPED;
2157 status.value.sig = GDB_SIGNAL_TRAP;
2158
2159 return status;
2160 }
2161
2162 /* Return a target_waitstatus indicating that a thread was stopped as
2163 requested. */
2164
2165 static struct target_waitstatus
2166 btrace_step_stopped_on_request (void)
2167 {
2168 struct target_waitstatus status;
2169
2170 status.kind = TARGET_WAITKIND_STOPPED;
2171 status.value.sig = GDB_SIGNAL_0;
2172
2173 return status;
2174 }
2175
2176 /* Return a target_waitstatus indicating a spurious stop. */
2177
2178 static struct target_waitstatus
2179 btrace_step_spurious (void)
2180 {
2181 struct target_waitstatus status;
2182
2183 status.kind = TARGET_WAITKIND_SPURIOUS;
2184
2185 return status;
2186 }
2187
2188 /* Return a target_waitstatus indicating that the thread was not resumed. */
2189
2190 static struct target_waitstatus
2191 btrace_step_no_resumed (void)
2192 {
2193 struct target_waitstatus status;
2194
2195 status.kind = TARGET_WAITKIND_NO_RESUMED;
2196
2197 return status;
2198 }
2199
2200 /* Return a target_waitstatus indicating that we should wait again. */
2201
2202 static struct target_waitstatus
2203 btrace_step_again (void)
2204 {
2205 struct target_waitstatus status;
2206
2207 status.kind = TARGET_WAITKIND_IGNORE;
2208
2209 return status;
2210 }
2211
2212 /* Clear the record histories. */
2213
2214 static void
2215 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2216 {
2217 xfree (btinfo->insn_history);
2218 xfree (btinfo->call_history);
2219
2220 btinfo->insn_history = NULL;
2221 btinfo->call_history = NULL;
2222 }
2223
2224 /* Check whether TP's current replay position is at a breakpoint. */
2225
2226 static int
2227 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2228 {
2229 struct btrace_insn_iterator *replay;
2230 struct btrace_thread_info *btinfo;
2231 const struct btrace_insn *insn;
2232 struct inferior *inf;
2233
2234 btinfo = &tp->btrace;
2235 replay = btinfo->replay;
2236
2237 if (replay == NULL)
2238 return 0;
2239
2240 insn = btrace_insn_get (replay);
2241 if (insn == NULL)
2242 return 0;
2243
2244 inf = find_inferior_ptid (tp->ptid);
2245 if (inf == NULL)
2246 return 0;
2247
2248 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2249 &btinfo->stop_reason);
2250 }
2251
2252 /* Step one instruction in forward direction. */
2253
2254 static struct target_waitstatus
2255 record_btrace_single_step_forward (struct thread_info *tp)
2256 {
2257 struct btrace_insn_iterator *replay, end, start;
2258 struct btrace_thread_info *btinfo;
2259
2260 btinfo = &tp->btrace;
2261 replay = btinfo->replay;
2262
2263 /* We're done if we're not replaying. */
2264 if (replay == NULL)
2265 return btrace_step_no_history ();
2266
2267 /* Check if we're stepping a breakpoint. */
2268 if (record_btrace_replay_at_breakpoint (tp))
2269 return btrace_step_stopped ();
2270
2271 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2272 jump back to the instruction at which we started. */
2273 start = *replay;
2274 do
2275 {
2276 unsigned int steps;
2277
2278 /* We will bail out here if we continue stepping after reaching the end
2279 of the execution history. */
2280 steps = btrace_insn_next (replay, 1);
2281 if (steps == 0)
2282 {
2283 *replay = start;
2284 return btrace_step_no_history ();
2285 }
2286 }
2287 while (btrace_insn_get (replay) == NULL);
2288
2289 /* Determine the end of the instruction trace. */
2290 btrace_insn_end (&end, btinfo);
2291
2292 /* The execution trace contains (and ends with) the current instruction.
2293 This instruction has not been executed, yet, so the trace really ends
2294 one instruction earlier. */
2295 if (btrace_insn_cmp (replay, &end) == 0)
2296 return btrace_step_no_history ();
2297
2298 return btrace_step_spurious ();
2299 }
2300
2301 /* Step one instruction in backward direction. */
2302
2303 static struct target_waitstatus
2304 record_btrace_single_step_backward (struct thread_info *tp)
2305 {
2306 struct btrace_insn_iterator *replay, start;
2307 struct btrace_thread_info *btinfo;
2308
2309 btinfo = &tp->btrace;
2310 replay = btinfo->replay;
2311
2312 /* Start replaying if we're not already doing so. */
2313 if (replay == NULL)
2314 replay = record_btrace_start_replaying (tp);
2315
2316 /* If we can't step any further, we reached the end of the history.
2317 Skip gaps during replay. If we end up at a gap (at the beginning of
2318 the trace), jump back to the instruction at which we started. */
2319 start = *replay;
2320 do
2321 {
2322 unsigned int steps;
2323
2324 steps = btrace_insn_prev (replay, 1);
2325 if (steps == 0)
2326 {
2327 *replay = start;
2328 return btrace_step_no_history ();
2329 }
2330 }
2331 while (btrace_insn_get (replay) == NULL);
2332
2333 /* Check if we're stepping a breakpoint.
2334
2335 For reverse-stepping, this check is after the step. There is logic in
2336 infrun.c that handles reverse-stepping separately. See, for example,
2337 proceed and adjust_pc_after_break.
2338
2339 This code assumes that for reverse-stepping, PC points to the last
2340 de-executed instruction, whereas for forward-stepping PC points to the
2341 next to-be-executed instruction. */
2342 if (record_btrace_replay_at_breakpoint (tp))
2343 return btrace_step_stopped ();
2344
2345 return btrace_step_spurious ();
2346 }
2347
2348 /* Step a single thread. */
2349
2350 static struct target_waitstatus
2351 record_btrace_step_thread (struct thread_info *tp)
2352 {
2353 struct btrace_thread_info *btinfo;
2354 struct target_waitstatus status;
2355 enum btrace_thread_flag flags;
2356
2357 btinfo = &tp->btrace;
2358
2359 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2360 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2361
2362 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2363 target_pid_to_str (tp->ptid), flags,
2364 btrace_thread_flag_to_str (flags));
2365
2366 /* We can't step without an execution history. */
2367 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2368 return btrace_step_no_history ();
2369
2370 switch (flags)
2371 {
2372 default:
2373 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2374
2375 case BTHR_STOP:
2376 return btrace_step_stopped_on_request ();
2377
2378 case BTHR_STEP:
2379 status = record_btrace_single_step_forward (tp);
2380 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2381 break;
2382
2383 return btrace_step_stopped ();
2384
2385 case BTHR_RSTEP:
2386 status = record_btrace_single_step_backward (tp);
2387 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2388 break;
2389
2390 return btrace_step_stopped ();
2391
2392 case BTHR_CONT:
2393 status = record_btrace_single_step_forward (tp);
2394 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2395 break;
2396
2397 btinfo->flags |= flags;
2398 return btrace_step_again ();
2399
2400 case BTHR_RCONT:
2401 status = record_btrace_single_step_backward (tp);
2402 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2403 break;
2404
2405 btinfo->flags |= flags;
2406 return btrace_step_again ();
2407 }
2408
2409 /* We keep threads moving at the end of their execution history. The to_wait
2410 method will stop the thread for whom the event is reported. */
2411 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2412 btinfo->flags |= flags;
2413
2414 return status;
2415 }
2416
2417 /* A vector of threads. */
2418
2419 typedef struct thread_info * tp_t;
2420 DEF_VEC_P (tp_t);
2421
2422 /* Announce further events if necessary. */
2423
2424 static void
2425 record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2426 const VEC (tp_t) *no_history)
2427 {
2428 int more_moving, more_no_history;
2429
2430 more_moving = !VEC_empty (tp_t, moving);
2431 more_no_history = !VEC_empty (tp_t, no_history);
2432
2433 if (!more_moving && !more_no_history)
2434 return;
2435
2436 if (more_moving)
2437 DEBUG ("movers pending");
2438
2439 if (more_no_history)
2440 DEBUG ("no-history pending");
2441
2442 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2443 }
2444
2445 /* The to_wait method of target record-btrace. */
2446
2447 static ptid_t
2448 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2449 struct target_waitstatus *status, int options)
2450 {
2451 VEC (tp_t) *moving, *no_history;
2452 struct thread_info *tp, *eventing;
2453 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2454
2455 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2456
2457 /* As long as we're not replaying, just forward the request. */
2458 if ((execution_direction != EXEC_REVERSE)
2459 && !record_btrace_is_replaying (ops, minus_one_ptid))
2460 {
2461 ops = ops->beneath;
2462 return ops->to_wait (ops, ptid, status, options);
2463 }
2464
2465 moving = NULL;
2466 no_history = NULL;
2467
2468 make_cleanup (VEC_cleanup (tp_t), &moving);
2469 make_cleanup (VEC_cleanup (tp_t), &no_history);
2470
2471 /* Keep a work list of moving threads. */
2472 ALL_NON_EXITED_THREADS (tp)
2473 if (ptid_match (tp->ptid, ptid)
2474 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2475 VEC_safe_push (tp_t, moving, tp);
2476
2477 if (VEC_empty (tp_t, moving))
2478 {
2479 *status = btrace_step_no_resumed ();
2480
2481 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2482 target_waitstatus_to_string (status));
2483
2484 do_cleanups (cleanups);
2485 return null_ptid;
2486 }
2487
2488 /* Step moving threads one by one, one step each, until either one thread
2489 reports an event or we run out of threads to step.
2490
2491 When stepping more than one thread, chances are that some threads reach
2492 the end of their execution history earlier than others. If we reported
2493 this immediately, all-stop on top of non-stop would stop all threads and
2494 resume the same threads next time. And we would report the same thread
2495 having reached the end of its execution history again.
2496
2497 In the worst case, this would starve the other threads. But even if other
2498 threads would be allowed to make progress, this would result in far too
2499 many intermediate stops.
2500
2501 We therefore delay the reporting of "no execution history" until we have
2502 nothing else to report. By this time, all threads should have moved to
2503 either the beginning or the end of their execution history. There will
2504 be a single user-visible stop. */
2505 eventing = NULL;
2506 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2507 {
2508 unsigned int ix;
2509
2510 ix = 0;
2511 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2512 {
2513 *status = record_btrace_step_thread (tp);
2514
2515 switch (status->kind)
2516 {
2517 case TARGET_WAITKIND_IGNORE:
2518 ix++;
2519 break;
2520
2521 case TARGET_WAITKIND_NO_HISTORY:
2522 VEC_safe_push (tp_t, no_history,
2523 VEC_ordered_remove (tp_t, moving, ix));
2524 break;
2525
2526 default:
2527 eventing = VEC_unordered_remove (tp_t, moving, ix);
2528 break;
2529 }
2530 }
2531 }
2532
2533 if (eventing == NULL)
2534 {
2535 /* We started with at least one moving thread. This thread must have
2536 either stopped or reached the end of its execution history.
2537
2538 In the former case, EVENTING must not be NULL.
2539 In the latter case, NO_HISTORY must not be empty. */
2540 gdb_assert (!VEC_empty (tp_t, no_history));
2541
2542 /* We kept threads moving at the end of their execution history. Stop
2543 EVENTING now that we are going to report its stop. */
2544 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2545 eventing->btrace.flags &= ~BTHR_MOVE;
2546
2547 *status = btrace_step_no_history ();
2548 }
2549
2550 gdb_assert (eventing != NULL);
2551
2552 /* We kept threads replaying at the end of their execution history. Stop
2553 replaying EVENTING now that we are going to report its stop. */
2554 record_btrace_stop_replaying_at_end (eventing);
2555
2556 /* Stop all other threads. */
2557 if (!target_is_non_stop_p ())
2558 ALL_NON_EXITED_THREADS (tp)
2559 record_btrace_cancel_resume (tp);
2560
2561 /* In async mode, we need to announce further events. */
2562 if (target_is_async_p ())
2563 record_btrace_maybe_mark_async_event (moving, no_history);
2564
2565 /* Start record histories anew from the current position. */
2566 record_btrace_clear_histories (&eventing->btrace);
2567
2568 /* We moved the replay position but did not update registers. */
2569 registers_changed_ptid (eventing->ptid);
2570
2571 DEBUG ("wait ended by thread %s (%s): %s",
2572 print_thread_id (eventing),
2573 target_pid_to_str (eventing->ptid),
2574 target_waitstatus_to_string (status));
2575
2576 do_cleanups (cleanups);
2577 return eventing->ptid;
2578 }
2579
2580 /* The to_stop method of target record-btrace. */
2581
2582 static void
2583 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2584 {
2585 DEBUG ("stop %s", target_pid_to_str (ptid));
2586
2587 /* As long as we're not replaying, just forward the request. */
2588 if ((execution_direction != EXEC_REVERSE)
2589 && !record_btrace_is_replaying (ops, minus_one_ptid))
2590 {
2591 ops = ops->beneath;
2592 ops->to_stop (ops, ptid);
2593 }
2594 else
2595 {
2596 struct thread_info *tp;
2597
2598 ALL_NON_EXITED_THREADS (tp)
2599 if (ptid_match (tp->ptid, ptid))
2600 {
2601 tp->btrace.flags &= ~BTHR_MOVE;
2602 tp->btrace.flags |= BTHR_STOP;
2603 }
2604 }
2605 }
2606
2607 /* The to_can_execute_reverse method of target record-btrace. */
2608
2609 static int
2610 record_btrace_can_execute_reverse (struct target_ops *self)
2611 {
2612 return 1;
2613 }
2614
2615 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2616
2617 static int
2618 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2619 {
2620 if (record_btrace_is_replaying (ops, minus_one_ptid))
2621 {
2622 struct thread_info *tp = inferior_thread ();
2623
2624 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2625 }
2626
2627 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2628 }
2629
2630 /* The to_supports_stopped_by_sw_breakpoint method of target
2631 record-btrace. */
2632
2633 static int
2634 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2635 {
2636 if (record_btrace_is_replaying (ops, minus_one_ptid))
2637 return 1;
2638
2639 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2640 }
2641
2642 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2643
2644 static int
2645 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2646 {
2647 if (record_btrace_is_replaying (ops, minus_one_ptid))
2648 {
2649 struct thread_info *tp = inferior_thread ();
2650
2651 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2652 }
2653
2654 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2655 }
2656
2657 /* The to_supports_stopped_by_hw_breakpoint method of target
2658 record-btrace. */
2659
2660 static int
2661 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2662 {
2663 if (record_btrace_is_replaying (ops, minus_one_ptid))
2664 return 1;
2665
2666 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2667 }
2668
2669 /* The to_update_thread_list method of target record-btrace. */
2670
2671 static void
2672 record_btrace_update_thread_list (struct target_ops *ops)
2673 {
2674 /* We don't add or remove threads during replay. */
2675 if (record_btrace_is_replaying (ops, minus_one_ptid))
2676 return;
2677
2678 /* Forward the request. */
2679 ops = ops->beneath;
2680 ops->to_update_thread_list (ops);
2681 }
2682
2683 /* The to_thread_alive method of target record-btrace. */
2684
2685 static int
2686 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2687 {
2688 /* We don't add or remove threads during replay. */
2689 if (record_btrace_is_replaying (ops, minus_one_ptid))
2690 return find_thread_ptid (ptid) != NULL;
2691
2692 /* Forward the request. */
2693 ops = ops->beneath;
2694 return ops->to_thread_alive (ops, ptid);
2695 }
2696
2697 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2698 is stopped. */
2699
2700 static void
2701 record_btrace_set_replay (struct thread_info *tp,
2702 const struct btrace_insn_iterator *it)
2703 {
2704 struct btrace_thread_info *btinfo;
2705
2706 btinfo = &tp->btrace;
2707
2708 if (it == NULL || it->function == NULL)
2709 record_btrace_stop_replaying (tp);
2710 else
2711 {
2712 if (btinfo->replay == NULL)
2713 record_btrace_start_replaying (tp);
2714 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2715 return;
2716
2717 *btinfo->replay = *it;
2718 registers_changed_ptid (tp->ptid);
2719 }
2720
2721 /* Start anew from the new replay position. */
2722 record_btrace_clear_histories (btinfo);
2723
2724 stop_pc = regcache_read_pc (get_current_regcache ());
2725 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2726 }
2727
2728 /* The to_goto_record_begin method of target record-btrace. */
2729
2730 static void
2731 record_btrace_goto_begin (struct target_ops *self)
2732 {
2733 struct thread_info *tp;
2734 struct btrace_insn_iterator begin;
2735
2736 tp = require_btrace_thread ();
2737
2738 btrace_insn_begin (&begin, &tp->btrace);
2739
2740 /* Skip gaps at the beginning of the trace. */
2741 while (btrace_insn_get (&begin) == NULL)
2742 {
2743 unsigned int steps;
2744
2745 steps = btrace_insn_next (&begin, 1);
2746 if (steps == 0)
2747 error (_("No trace."));
2748 }
2749
2750 record_btrace_set_replay (tp, &begin);
2751 }
2752
2753 /* The to_goto_record_end method of target record-btrace. */
2754
2755 static void
2756 record_btrace_goto_end (struct target_ops *ops)
2757 {
2758 struct thread_info *tp;
2759
2760 tp = require_btrace_thread ();
2761
2762 record_btrace_set_replay (tp, NULL);
2763 }
2764
2765 /* The to_goto_record method of target record-btrace. */
2766
2767 static void
2768 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2769 {
2770 struct thread_info *tp;
2771 struct btrace_insn_iterator it;
2772 unsigned int number;
2773 int found;
2774
2775 number = insn;
2776
2777 /* Check for wrap-arounds. */
2778 if (number != insn)
2779 error (_("Instruction number out of range."));
2780
2781 tp = require_btrace_thread ();
2782
2783 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2784
2785 /* Check if the instruction could not be found or is a gap. */
2786 if (found == 0 || btrace_insn_get (&it) == NULL)
2787 error (_("No such instruction."));
2788
2789 record_btrace_set_replay (tp, &it);
2790 }
2791
2792 /* The to_record_stop_replaying method of target record-btrace. */
2793
2794 static void
2795 record_btrace_stop_replaying_all (struct target_ops *self)
2796 {
2797 struct thread_info *tp;
2798
2799 ALL_NON_EXITED_THREADS (tp)
2800 record_btrace_stop_replaying (tp);
2801 }
2802
2803 /* The to_execution_direction target method. */
2804
2805 static enum exec_direction_kind
2806 record_btrace_execution_direction (struct target_ops *self)
2807 {
2808 return record_btrace_resume_exec_dir;
2809 }
2810
2811 /* The to_prepare_to_generate_core target method. */
2812
2813 static void
2814 record_btrace_prepare_to_generate_core (struct target_ops *self)
2815 {
2816 record_btrace_generating_corefile = 1;
2817 }
2818
2819 /* The to_done_generating_core target method. */
2820
2821 static void
2822 record_btrace_done_generating_core (struct target_ops *self)
2823 {
2824 record_btrace_generating_corefile = 0;
2825 }
2826
2827 /* Initialize the record-btrace target ops. */
2828
2829 static void
2830 init_record_btrace_ops (void)
2831 {
2832 struct target_ops *ops;
2833
2834 ops = &record_btrace_ops;
2835 ops->to_shortname = "record-btrace";
2836 ops->to_longname = "Branch tracing target";
2837 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2838 ops->to_open = record_btrace_open;
2839 ops->to_close = record_btrace_close;
2840 ops->to_async = record_btrace_async;
2841 ops->to_detach = record_detach;
2842 ops->to_disconnect = record_btrace_disconnect;
2843 ops->to_mourn_inferior = record_mourn_inferior;
2844 ops->to_kill = record_kill;
2845 ops->to_stop_recording = record_btrace_stop_recording;
2846 ops->to_info_record = record_btrace_info;
2847 ops->to_insn_history = record_btrace_insn_history;
2848 ops->to_insn_history_from = record_btrace_insn_history_from;
2849 ops->to_insn_history_range = record_btrace_insn_history_range;
2850 ops->to_call_history = record_btrace_call_history;
2851 ops->to_call_history_from = record_btrace_call_history_from;
2852 ops->to_call_history_range = record_btrace_call_history_range;
2853 ops->to_record_method = record_btrace_record_method;
2854 ops->to_record_is_replaying = record_btrace_is_replaying;
2855 ops->to_record_will_replay = record_btrace_will_replay;
2856 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2857 ops->to_xfer_partial = record_btrace_xfer_partial;
2858 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2859 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2860 ops->to_fetch_registers = record_btrace_fetch_registers;
2861 ops->to_store_registers = record_btrace_store_registers;
2862 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2863 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2864 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2865 ops->to_resume = record_btrace_resume;
2866 ops->to_commit_resume = record_btrace_commit_resume;
2867 ops->to_wait = record_btrace_wait;
2868 ops->to_stop = record_btrace_stop;
2869 ops->to_update_thread_list = record_btrace_update_thread_list;
2870 ops->to_thread_alive = record_btrace_thread_alive;
2871 ops->to_goto_record_begin = record_btrace_goto_begin;
2872 ops->to_goto_record_end = record_btrace_goto_end;
2873 ops->to_goto_record = record_btrace_goto;
2874 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2875 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2876 ops->to_supports_stopped_by_sw_breakpoint
2877 = record_btrace_supports_stopped_by_sw_breakpoint;
2878 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2879 ops->to_supports_stopped_by_hw_breakpoint
2880 = record_btrace_supports_stopped_by_hw_breakpoint;
2881 ops->to_execution_direction = record_btrace_execution_direction;
2882 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2883 ops->to_done_generating_core = record_btrace_done_generating_core;
2884 ops->to_stratum = record_stratum;
2885 ops->to_magic = OPS_MAGIC;
2886 }
2887
2888 /* Start recording in BTS format. */
2889
2890 static void
2891 cmd_record_btrace_bts_start (char *args, int from_tty)
2892 {
2893 if (args != NULL && *args != 0)
2894 error (_("Invalid argument."));
2895
2896 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2897
2898 TRY
2899 {
2900 execute_command ((char *) "target record-btrace", from_tty);
2901 }
2902 CATCH (exception, RETURN_MASK_ALL)
2903 {
2904 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2905 throw_exception (exception);
2906 }
2907 END_CATCH
2908 }
2909
2910 /* Start recording in Intel Processor Trace format. */
2911
2912 static void
2913 cmd_record_btrace_pt_start (char *args, int from_tty)
2914 {
2915 if (args != NULL && *args != 0)
2916 error (_("Invalid argument."));
2917
2918 record_btrace_conf.format = BTRACE_FORMAT_PT;
2919
2920 TRY
2921 {
2922 execute_command ((char *) "target record-btrace", from_tty);
2923 }
2924 CATCH (exception, RETURN_MASK_ALL)
2925 {
2926 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2927 throw_exception (exception);
2928 }
2929 END_CATCH
2930 }
2931
2932 /* Alias for "target record". */
2933
2934 static void
2935 cmd_record_btrace_start (char *args, int from_tty)
2936 {
2937 if (args != NULL && *args != 0)
2938 error (_("Invalid argument."));
2939
2940 record_btrace_conf.format = BTRACE_FORMAT_PT;
2941
2942 TRY
2943 {
2944 execute_command ((char *) "target record-btrace", from_tty);
2945 }
2946 CATCH (exception, RETURN_MASK_ALL)
2947 {
2948 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2949
2950 TRY
2951 {
2952 execute_command ((char *) "target record-btrace", from_tty);
2953 }
2954 CATCH (exception, RETURN_MASK_ALL)
2955 {
2956 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2957 throw_exception (exception);
2958 }
2959 END_CATCH
2960 }
2961 END_CATCH
2962 }
2963
2964 /* The "set record btrace" command. */
2965
2966 static void
2967 cmd_set_record_btrace (char *args, int from_tty)
2968 {
2969 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2970 }
2971
2972 /* The "show record btrace" command. */
2973
2974 static void
2975 cmd_show_record_btrace (char *args, int from_tty)
2976 {
2977 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2978 }
2979
2980 /* The "show record btrace replay-memory-access" command. */
2981
2982 static void
2983 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2984 struct cmd_list_element *c, const char *value)
2985 {
2986 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2987 replay_memory_access);
2988 }
2989
2990 /* The "set record btrace bts" command. */
2991
2992 static void
2993 cmd_set_record_btrace_bts (char *args, int from_tty)
2994 {
2995 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2996 "by an appropriate subcommand.\n"));
2997 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2998 all_commands, gdb_stdout);
2999 }
3000
3001 /* The "show record btrace bts" command. */
3002
3003 static void
3004 cmd_show_record_btrace_bts (char *args, int from_tty)
3005 {
3006 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3007 }
3008
3009 /* The "set record btrace pt" command. */
3010
3011 static void
3012 cmd_set_record_btrace_pt (char *args, int from_tty)
3013 {
3014 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3015 "by an appropriate subcommand.\n"));
3016 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3017 all_commands, gdb_stdout);
3018 }
3019
3020 /* The "show record btrace pt" command. */
3021
3022 static void
3023 cmd_show_record_btrace_pt (char *args, int from_tty)
3024 {
3025 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3026 }
3027
3028 /* The "record bts buffer-size" show value function. */
3029
3030 static void
3031 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3032 struct cmd_list_element *c,
3033 const char *value)
3034 {
3035 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3036 value);
3037 }
3038
3039 /* The "record pt buffer-size" show value function. */
3040
3041 static void
3042 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3043 struct cmd_list_element *c,
3044 const char *value)
3045 {
3046 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3047 value);
3048 }
3049
3050 void _initialize_record_btrace (void);
3051
3052 /* Initialize btrace commands. */
3053
3054 void
3055 _initialize_record_btrace (void)
3056 {
3057 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3058 _("Start branch trace recording."), &record_btrace_cmdlist,
3059 "record btrace ", 0, &record_cmdlist);
3060 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3061
3062 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3063 _("\
3064 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3065 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3066 This format may not be available on all processors."),
3067 &record_btrace_cmdlist);
3068 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3069
3070 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3071 _("\
3072 Start branch trace recording in Intel Processor Trace format.\n\n\
3073 This format may not be available on all processors."),
3074 &record_btrace_cmdlist);
3075 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3076
3077 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3078 _("Set record options"), &set_record_btrace_cmdlist,
3079 "set record btrace ", 0, &set_record_cmdlist);
3080
3081 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3082 _("Show record options"), &show_record_btrace_cmdlist,
3083 "show record btrace ", 0, &show_record_cmdlist);
3084
3085 add_setshow_enum_cmd ("replay-memory-access", no_class,
3086 replay_memory_access_types, &replay_memory_access, _("\
3087 Set what memory accesses are allowed during replay."), _("\
3088 Show what memory accesses are allowed during replay."),
3089 _("Default is READ-ONLY.\n\n\
3090 The btrace record target does not trace data.\n\
3091 The memory therefore corresponds to the live target and not \
3092 to the current replay position.\n\n\
3093 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3094 When READ-WRITE, allow accesses to read-only and read-write memory during \
3095 replay."),
3096 NULL, cmd_show_replay_memory_access,
3097 &set_record_btrace_cmdlist,
3098 &show_record_btrace_cmdlist);
3099
3100 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3101 _("Set record btrace bts options"),
3102 &set_record_btrace_bts_cmdlist,
3103 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3104
3105 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3106 _("Show record btrace bts options"),
3107 &show_record_btrace_bts_cmdlist,
3108 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3109
3110 add_setshow_uinteger_cmd ("buffer-size", no_class,
3111 &record_btrace_conf.bts.size,
3112 _("Set the record/replay bts buffer size."),
3113 _("Show the record/replay bts buffer size."), _("\
3114 When starting recording request a trace buffer of this size. \
3115 The actual buffer size may differ from the requested size. \
3116 Use \"info record\" to see the actual buffer size.\n\n\
3117 Bigger buffers allow longer recording but also take more time to process \
3118 the recorded execution trace.\n\n\
3119 The trace buffer size may not be changed while recording."), NULL,
3120 show_record_bts_buffer_size_value,
3121 &set_record_btrace_bts_cmdlist,
3122 &show_record_btrace_bts_cmdlist);
3123
3124 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3125 _("Set record btrace pt options"),
3126 &set_record_btrace_pt_cmdlist,
3127 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3128
3129 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3130 _("Show record btrace pt options"),
3131 &show_record_btrace_pt_cmdlist,
3132 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3133
3134 add_setshow_uinteger_cmd ("buffer-size", no_class,
3135 &record_btrace_conf.pt.size,
3136 _("Set the record/replay pt buffer size."),
3137 _("Show the record/replay pt buffer size."), _("\
3138 Bigger buffers allow longer recording but also take more time to process \
3139 the recorded execution.\n\
3140 The actual buffer size may differ from the requested size. Use \"info record\" \
3141 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3142 &set_record_btrace_pt_cmdlist,
3143 &show_record_btrace_pt_cmdlist);
3144
3145 init_record_btrace_ops ();
3146 add_target (&record_btrace_ops);
3147
3148 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3149 xcalloc, xfree);
3150
3151 record_btrace_conf.bts.size = 64 * 1024;
3152 record_btrace_conf.pt.size = 16 * 1024;
3153 }
This page took 0.099183 seconds and 5 git commands to generate.