btrace: Remove btrace disable cleanup
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observer.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "event-loop.h"
40 #include "inf-loop.h"
41 #include "vec.h"
42 #include <algorithm>
43
44 /* The target_ops of record-btrace. */
45 static struct target_ops record_btrace_ops;
46
47 /* A new thread observer enabling branch tracing for the new thread. */
48 static struct observer *record_btrace_thread_observer;
49
50 /* Memory access types used in set/show record btrace replay-memory-access. */
51 static const char replay_memory_access_read_only[] = "read-only";
52 static const char replay_memory_access_read_write[] = "read-write";
53 static const char *const replay_memory_access_types[] =
54 {
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
57 NULL
58 };
59
60 /* The currently allowed replay memory access type. */
61 static const char *replay_memory_access = replay_memory_access_read_only;
62
63 /* Command lists for "set/show record btrace". */
64 static struct cmd_list_element *set_record_btrace_cmdlist;
65 static struct cmd_list_element *show_record_btrace_cmdlist;
66
67 /* The execution direction of the last resume we got. See record-full.c. */
68 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
69
70 /* The async event handler for reverse/replay execution. */
71 static struct async_event_handler *record_btrace_async_inferior_event_handler;
72
73 /* A flag indicating that we are currently generating a core file. */
74 static int record_btrace_generating_corefile;
75
76 /* The current branch trace configuration. */
77 static struct btrace_config record_btrace_conf;
78
79 /* Command list for "record btrace". */
80 static struct cmd_list_element *record_btrace_cmdlist;
81
82 /* Command lists for "set/show record btrace bts". */
83 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
85
86 /* Command lists for "set/show record btrace pt". */
87 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
89
90 /* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
92
93 #define DEBUG(msg, args...) \
94 do \
95 { \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
99 } \
100 while (0)
101
102
103 /* Update the branch trace for the current thread and return a pointer to its
104 thread_info.
105
106 Throws an error if there is no thread or no trace. This function never
107 returns NULL. */
108
109 static struct thread_info *
110 require_btrace_thread (void)
111 {
112 struct thread_info *tp;
113
114 DEBUG ("require");
115
116 tp = find_thread_ptid (inferior_ptid);
117 if (tp == NULL)
118 error (_("No thread."));
119
120 validate_registers_access ();
121
122 btrace_fetch (tp);
123
124 if (btrace_is_empty (tp))
125 error (_("No trace."));
126
127 return tp;
128 }
129
130 /* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
132
133 Throws an error if there is no thread or no trace. This function never
134 returns NULL. */
135
136 static struct btrace_thread_info *
137 require_btrace (void)
138 {
139 struct thread_info *tp;
140
141 tp = require_btrace_thread ();
142
143 return &tp->btrace;
144 }
145
146 /* Enable branch tracing for one thread. Warn on errors. */
147
148 static void
149 record_btrace_enable_warn (struct thread_info *tp)
150 {
151 TRY
152 {
153 btrace_enable (tp, &record_btrace_conf);
154 }
155 CATCH (error, RETURN_MASK_ERROR)
156 {
157 warning ("%s", error.message);
158 }
159 END_CATCH
160 }
161
162 /* Enable automatic tracing of new threads. */
163
164 static void
165 record_btrace_auto_enable (void)
166 {
167 DEBUG ("attach thread observer");
168
169 record_btrace_thread_observer
170 = observer_attach_new_thread (record_btrace_enable_warn);
171 }
172
173 /* Disable automatic tracing of new threads. */
174
175 static void
176 record_btrace_auto_disable (void)
177 {
178 /* The observer may have been detached, already. */
179 if (record_btrace_thread_observer == NULL)
180 return;
181
182 DEBUG ("detach thread observer");
183
184 observer_detach_new_thread (record_btrace_thread_observer);
185 record_btrace_thread_observer = NULL;
186 }
187
188 /* The record-btrace async event handler function. */
189
190 static void
191 record_btrace_handle_async_inferior_event (gdb_client_data data)
192 {
193 inferior_event_handler (INF_REG_EVENT, NULL);
194 }
195
196 /* See record-btrace.h. */
197
198 void
199 record_btrace_push_target (void)
200 {
201 const char *format;
202
203 record_btrace_auto_enable ();
204
205 push_target (&record_btrace_ops);
206
207 record_btrace_async_inferior_event_handler
208 = create_async_event_handler (record_btrace_handle_async_inferior_event,
209 NULL);
210 record_btrace_generating_corefile = 0;
211
212 format = btrace_format_short_string (record_btrace_conf.format);
213 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
214 }
215
216 /* Disable btrace on a set of threads on scope exit. */
217
218 struct scoped_btrace_disable
219 {
220 scoped_btrace_disable () = default;
221
222 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
223
224 ~scoped_btrace_disable ()
225 {
226 for (thread_info *tp : m_threads)
227 btrace_disable (tp);
228 }
229
230 void add_thread (thread_info *thread)
231 {
232 m_threads.push_front (thread);
233 }
234
235 void discard ()
236 {
237 m_threads.clear ();
238 }
239
240 private:
241 std::forward_list<thread_info *> m_threads;
242 };
243
244 /* The to_open method of target record-btrace. */
245
246 static void
247 record_btrace_open (const char *args, int from_tty)
248 {
249 /* If we fail to enable btrace for one thread, disable it for the threads for
250 which it was successfully enabled. */
251 scoped_btrace_disable btrace_disable;
252 struct thread_info *tp;
253
254 DEBUG ("open");
255
256 record_preopen ();
257
258 if (!target_has_execution)
259 error (_("The program is not being run."));
260
261 gdb_assert (record_btrace_thread_observer == NULL);
262
263 ALL_NON_EXITED_THREADS (tp)
264 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
265 {
266 btrace_enable (tp, &record_btrace_conf);
267
268 btrace_disable.add_thread (tp);
269 }
270
271 record_btrace_push_target ();
272
273 btrace_disable.discard ();
274 }
275
276 /* The to_stop_recording method of target record-btrace. */
277
278 static void
279 record_btrace_stop_recording (struct target_ops *self)
280 {
281 struct thread_info *tp;
282
283 DEBUG ("stop recording");
284
285 record_btrace_auto_disable ();
286
287 ALL_NON_EXITED_THREADS (tp)
288 if (tp->btrace.target != NULL)
289 btrace_disable (tp);
290 }
291
292 /* The to_disconnect method of target record-btrace. */
293
294 static void
295 record_btrace_disconnect (struct target_ops *self, const char *args,
296 int from_tty)
297 {
298 struct target_ops *beneath = self->beneath;
299
300 /* Do not stop recording, just clean up GDB side. */
301 unpush_target (self);
302
303 /* Forward disconnect. */
304 beneath->to_disconnect (beneath, args, from_tty);
305 }
306
307 /* The to_close method of target record-btrace. */
308
309 static void
310 record_btrace_close (struct target_ops *self)
311 {
312 struct thread_info *tp;
313
314 if (record_btrace_async_inferior_event_handler != NULL)
315 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
316
317 /* Make sure automatic recording gets disabled even if we did not stop
318 recording before closing the record-btrace target. */
319 record_btrace_auto_disable ();
320
321 /* We should have already stopped recording.
322 Tear down btrace in case we have not. */
323 ALL_NON_EXITED_THREADS (tp)
324 btrace_teardown (tp);
325 }
326
327 /* The to_async method of target record-btrace. */
328
329 static void
330 record_btrace_async (struct target_ops *ops, int enable)
331 {
332 if (enable)
333 mark_async_event_handler (record_btrace_async_inferior_event_handler);
334 else
335 clear_async_event_handler (record_btrace_async_inferior_event_handler);
336
337 ops->beneath->to_async (ops->beneath, enable);
338 }
339
340 /* Adjusts the size and returns a human readable size suffix. */
341
342 static const char *
343 record_btrace_adjust_size (unsigned int *size)
344 {
345 unsigned int sz;
346
347 sz = *size;
348
349 if ((sz & ((1u << 30) - 1)) == 0)
350 {
351 *size = sz >> 30;
352 return "GB";
353 }
354 else if ((sz & ((1u << 20) - 1)) == 0)
355 {
356 *size = sz >> 20;
357 return "MB";
358 }
359 else if ((sz & ((1u << 10) - 1)) == 0)
360 {
361 *size = sz >> 10;
362 return "kB";
363 }
364 else
365 return "";
366 }
367
368 /* Print a BTS configuration. */
369
370 static void
371 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
372 {
373 const char *suffix;
374 unsigned int size;
375
376 size = conf->size;
377 if (size > 0)
378 {
379 suffix = record_btrace_adjust_size (&size);
380 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
381 }
382 }
383
384 /* Print an Intel Processor Trace configuration. */
385
386 static void
387 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
388 {
389 const char *suffix;
390 unsigned int size;
391
392 size = conf->size;
393 if (size > 0)
394 {
395 suffix = record_btrace_adjust_size (&size);
396 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
397 }
398 }
399
400 /* Print a branch tracing configuration. */
401
402 static void
403 record_btrace_print_conf (const struct btrace_config *conf)
404 {
405 printf_unfiltered (_("Recording format: %s.\n"),
406 btrace_format_string (conf->format));
407
408 switch (conf->format)
409 {
410 case BTRACE_FORMAT_NONE:
411 return;
412
413 case BTRACE_FORMAT_BTS:
414 record_btrace_print_bts_conf (&conf->bts);
415 return;
416
417 case BTRACE_FORMAT_PT:
418 record_btrace_print_pt_conf (&conf->pt);
419 return;
420 }
421
422 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
423 }
424
425 /* The to_info_record method of target record-btrace. */
426
427 static void
428 record_btrace_info (struct target_ops *self)
429 {
430 struct btrace_thread_info *btinfo;
431 const struct btrace_config *conf;
432 struct thread_info *tp;
433 unsigned int insns, calls, gaps;
434
435 DEBUG ("info");
436
437 tp = find_thread_ptid (inferior_ptid);
438 if (tp == NULL)
439 error (_("No thread."));
440
441 validate_registers_access ();
442
443 btinfo = &tp->btrace;
444
445 conf = btrace_conf (btinfo);
446 if (conf != NULL)
447 record_btrace_print_conf (conf);
448
449 btrace_fetch (tp);
450
451 insns = 0;
452 calls = 0;
453 gaps = 0;
454
455 if (!btrace_is_empty (tp))
456 {
457 struct btrace_call_iterator call;
458 struct btrace_insn_iterator insn;
459
460 btrace_call_end (&call, btinfo);
461 btrace_call_prev (&call, 1);
462 calls = btrace_call_number (&call);
463
464 btrace_insn_end (&insn, btinfo);
465 insns = btrace_insn_number (&insn);
466
467 /* If the last instruction is not a gap, it is the current instruction
468 that is not actually part of the record. */
469 if (btrace_insn_get (&insn) != NULL)
470 insns -= 1;
471
472 gaps = btinfo->ngaps;
473 }
474
475 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
476 "for thread %s (%s).\n"), insns, calls, gaps,
477 print_thread_id (tp), target_pid_to_str (tp->ptid));
478
479 if (btrace_is_replaying (tp))
480 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
481 btrace_insn_number (btinfo->replay));
482 }
483
484 /* Print a decode error. */
485
486 static void
487 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
488 enum btrace_format format)
489 {
490 const char *errstr = btrace_decode_error (format, errcode);
491
492 uiout->text (_("["));
493 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
494 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
495 {
496 uiout->text (_("decode error ("));
497 uiout->field_int ("errcode", errcode);
498 uiout->text (_("): "));
499 }
500 uiout->text (errstr);
501 uiout->text (_("]\n"));
502 }
503
504 /* Print an unsigned int. */
505
506 static void
507 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
508 {
509 uiout->field_fmt (fld, "%u", val);
510 }
511
512 /* A range of source lines. */
513
514 struct btrace_line_range
515 {
516 /* The symtab this line is from. */
517 struct symtab *symtab;
518
519 /* The first line (inclusive). */
520 int begin;
521
522 /* The last line (exclusive). */
523 int end;
524 };
525
526 /* Construct a line range. */
527
528 static struct btrace_line_range
529 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
530 {
531 struct btrace_line_range range;
532
533 range.symtab = symtab;
534 range.begin = begin;
535 range.end = end;
536
537 return range;
538 }
539
540 /* Add a line to a line range. */
541
542 static struct btrace_line_range
543 btrace_line_range_add (struct btrace_line_range range, int line)
544 {
545 if (range.end <= range.begin)
546 {
547 /* This is the first entry. */
548 range.begin = line;
549 range.end = line + 1;
550 }
551 else if (line < range.begin)
552 range.begin = line;
553 else if (range.end < line)
554 range.end = line;
555
556 return range;
557 }
558
559 /* Return non-zero if RANGE is empty, zero otherwise. */
560
561 static int
562 btrace_line_range_is_empty (struct btrace_line_range range)
563 {
564 return range.end <= range.begin;
565 }
566
567 /* Return non-zero if LHS contains RHS, zero otherwise. */
568
569 static int
570 btrace_line_range_contains_range (struct btrace_line_range lhs,
571 struct btrace_line_range rhs)
572 {
573 return ((lhs.symtab == rhs.symtab)
574 && (lhs.begin <= rhs.begin)
575 && (rhs.end <= lhs.end));
576 }
577
578 /* Find the line range associated with PC. */
579
580 static struct btrace_line_range
581 btrace_find_line_range (CORE_ADDR pc)
582 {
583 struct btrace_line_range range;
584 struct linetable_entry *lines;
585 struct linetable *ltable;
586 struct symtab *symtab;
587 int nlines, i;
588
589 symtab = find_pc_line_symtab (pc);
590 if (symtab == NULL)
591 return btrace_mk_line_range (NULL, 0, 0);
592
593 ltable = SYMTAB_LINETABLE (symtab);
594 if (ltable == NULL)
595 return btrace_mk_line_range (symtab, 0, 0);
596
597 nlines = ltable->nitems;
598 lines = ltable->item;
599 if (nlines <= 0)
600 return btrace_mk_line_range (symtab, 0, 0);
601
602 range = btrace_mk_line_range (symtab, 0, 0);
603 for (i = 0; i < nlines - 1; i++)
604 {
605 if ((lines[i].pc == pc) && (lines[i].line != 0))
606 range = btrace_line_range_add (range, lines[i].line);
607 }
608
609 return range;
610 }
611
612 /* Print source lines in LINES to UIOUT.
613
614 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
615 instructions corresponding to that source line. When printing a new source
616 line, we do the cleanups for the open chain and open a new cleanup chain for
617 the new source line. If the source line range in LINES is not empty, this
618 function will leave the cleanup chain for the last printed source line open
619 so instructions can be added to it. */
620
621 static void
622 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
623 struct cleanup **ui_item_chain, gdb_disassembly_flags flags)
624 {
625 print_source_lines_flags psl_flags;
626 int line;
627
628 psl_flags = 0;
629 if (flags & DISASSEMBLY_FILENAME)
630 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
631
632 for (line = lines.begin; line < lines.end; ++line)
633 {
634 if (*ui_item_chain != NULL)
635 do_cleanups (*ui_item_chain);
636
637 *ui_item_chain
638 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
639
640 print_source_lines (lines.symtab, line, line + 1, psl_flags);
641
642 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
643 }
644 }
645
646 /* Disassemble a section of the recorded instruction trace. */
647
648 static void
649 btrace_insn_history (struct ui_out *uiout,
650 const struct btrace_thread_info *btinfo,
651 const struct btrace_insn_iterator *begin,
652 const struct btrace_insn_iterator *end,
653 gdb_disassembly_flags flags)
654 {
655 struct cleanup *cleanups, *ui_item_chain;
656 struct gdbarch *gdbarch;
657 struct btrace_insn_iterator it;
658 struct btrace_line_range last_lines;
659
660 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
661 btrace_insn_number (begin), btrace_insn_number (end));
662
663 flags |= DISASSEMBLY_SPECULATIVE;
664
665 gdbarch = target_gdbarch ();
666 last_lines = btrace_mk_line_range (NULL, 0, 0);
667
668 cleanups = make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
669
670 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
671 instructions corresponding to that line. */
672 ui_item_chain = NULL;
673
674 gdb_pretty_print_disassembler disasm (gdbarch);
675
676 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
677 {
678 const struct btrace_insn *insn;
679
680 insn = btrace_insn_get (&it);
681
682 /* A NULL instruction indicates a gap in the trace. */
683 if (insn == NULL)
684 {
685 const struct btrace_config *conf;
686
687 conf = btrace_conf (btinfo);
688
689 /* We have trace so we must have a configuration. */
690 gdb_assert (conf != NULL);
691
692 uiout->field_fmt ("insn-number", "%u",
693 btrace_insn_number (&it));
694 uiout->text ("\t");
695
696 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
697 conf->format);
698 }
699 else
700 {
701 struct disasm_insn dinsn;
702
703 if ((flags & DISASSEMBLY_SOURCE) != 0)
704 {
705 struct btrace_line_range lines;
706
707 lines = btrace_find_line_range (insn->pc);
708 if (!btrace_line_range_is_empty (lines)
709 && !btrace_line_range_contains_range (last_lines, lines))
710 {
711 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
712 last_lines = lines;
713 }
714 else if (ui_item_chain == NULL)
715 {
716 ui_item_chain
717 = make_cleanup_ui_out_tuple_begin_end (uiout,
718 "src_and_asm_line");
719 /* No source information. */
720 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
721 }
722
723 gdb_assert (ui_item_chain != NULL);
724 }
725
726 memset (&dinsn, 0, sizeof (dinsn));
727 dinsn.number = btrace_insn_number (&it);
728 dinsn.addr = insn->pc;
729
730 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
731 dinsn.is_speculative = 1;
732
733 disasm.pretty_print_insn (uiout, &dinsn, flags);
734 }
735 }
736
737 do_cleanups (cleanups);
738 }
739
740 /* The to_insn_history method of target record-btrace. */
741
742 static void
743 record_btrace_insn_history (struct target_ops *self, int size,
744 gdb_disassembly_flags flags)
745 {
746 struct btrace_thread_info *btinfo;
747 struct btrace_insn_history *history;
748 struct btrace_insn_iterator begin, end;
749 struct ui_out *uiout;
750 unsigned int context, covered;
751
752 uiout = current_uiout;
753 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
754 context = abs (size);
755 if (context == 0)
756 error (_("Bad record instruction-history-size."));
757
758 btinfo = require_btrace ();
759 history = btinfo->insn_history;
760 if (history == NULL)
761 {
762 struct btrace_insn_iterator *replay;
763
764 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
765
766 /* If we're replaying, we start at the replay position. Otherwise, we
767 start at the tail of the trace. */
768 replay = btinfo->replay;
769 if (replay != NULL)
770 begin = *replay;
771 else
772 btrace_insn_end (&begin, btinfo);
773
774 /* We start from here and expand in the requested direction. Then we
775 expand in the other direction, as well, to fill up any remaining
776 context. */
777 end = begin;
778 if (size < 0)
779 {
780 /* We want the current position covered, as well. */
781 covered = btrace_insn_next (&end, 1);
782 covered += btrace_insn_prev (&begin, context - covered);
783 covered += btrace_insn_next (&end, context - covered);
784 }
785 else
786 {
787 covered = btrace_insn_next (&end, context);
788 covered += btrace_insn_prev (&begin, context - covered);
789 }
790 }
791 else
792 {
793 begin = history->begin;
794 end = history->end;
795
796 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
797 btrace_insn_number (&begin), btrace_insn_number (&end));
798
799 if (size < 0)
800 {
801 end = begin;
802 covered = btrace_insn_prev (&begin, context);
803 }
804 else
805 {
806 begin = end;
807 covered = btrace_insn_next (&end, context);
808 }
809 }
810
811 if (covered > 0)
812 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
813 else
814 {
815 if (size < 0)
816 printf_unfiltered (_("At the start of the branch trace record.\n"));
817 else
818 printf_unfiltered (_("At the end of the branch trace record.\n"));
819 }
820
821 btrace_set_insn_history (btinfo, &begin, &end);
822 }
823
824 /* The to_insn_history_range method of target record-btrace. */
825
826 static void
827 record_btrace_insn_history_range (struct target_ops *self,
828 ULONGEST from, ULONGEST to,
829 gdb_disassembly_flags flags)
830 {
831 struct btrace_thread_info *btinfo;
832 struct btrace_insn_iterator begin, end;
833 struct ui_out *uiout;
834 unsigned int low, high;
835 int found;
836
837 uiout = current_uiout;
838 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
839 low = from;
840 high = to;
841
842 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
843
844 /* Check for wrap-arounds. */
845 if (low != from || high != to)
846 error (_("Bad range."));
847
848 if (high < low)
849 error (_("Bad range."));
850
851 btinfo = require_btrace ();
852
853 found = btrace_find_insn_by_number (&begin, btinfo, low);
854 if (found == 0)
855 error (_("Range out of bounds."));
856
857 found = btrace_find_insn_by_number (&end, btinfo, high);
858 if (found == 0)
859 {
860 /* Silently truncate the range. */
861 btrace_insn_end (&end, btinfo);
862 }
863 else
864 {
865 /* We want both begin and end to be inclusive. */
866 btrace_insn_next (&end, 1);
867 }
868
869 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
870 btrace_set_insn_history (btinfo, &begin, &end);
871 }
872
873 /* The to_insn_history_from method of target record-btrace. */
874
875 static void
876 record_btrace_insn_history_from (struct target_ops *self,
877 ULONGEST from, int size,
878 gdb_disassembly_flags flags)
879 {
880 ULONGEST begin, end, context;
881
882 context = abs (size);
883 if (context == 0)
884 error (_("Bad record instruction-history-size."));
885
886 if (size < 0)
887 {
888 end = from;
889
890 if (from < context)
891 begin = 0;
892 else
893 begin = from - context + 1;
894 }
895 else
896 {
897 begin = from;
898 end = from + context - 1;
899
900 /* Check for wrap-around. */
901 if (end < begin)
902 end = ULONGEST_MAX;
903 }
904
905 record_btrace_insn_history_range (self, begin, end, flags);
906 }
907
908 /* Print the instruction number range for a function call history line. */
909
910 static void
911 btrace_call_history_insn_range (struct ui_out *uiout,
912 const struct btrace_function *bfun)
913 {
914 unsigned int begin, end, size;
915
916 size = bfun->insn.size ();
917 gdb_assert (size > 0);
918
919 begin = bfun->insn_offset;
920 end = begin + size - 1;
921
922 ui_out_field_uint (uiout, "insn begin", begin);
923 uiout->text (",");
924 ui_out_field_uint (uiout, "insn end", end);
925 }
926
927 /* Compute the lowest and highest source line for the instructions in BFUN
928 and return them in PBEGIN and PEND.
929 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
930 result from inlining or macro expansion. */
931
932 static void
933 btrace_compute_src_line_range (const struct btrace_function *bfun,
934 int *pbegin, int *pend)
935 {
936 struct symtab *symtab;
937 struct symbol *sym;
938 int begin, end;
939
940 begin = INT_MAX;
941 end = INT_MIN;
942
943 sym = bfun->sym;
944 if (sym == NULL)
945 goto out;
946
947 symtab = symbol_symtab (sym);
948
949 for (const btrace_insn &insn : bfun->insn)
950 {
951 struct symtab_and_line sal;
952
953 sal = find_pc_line (insn.pc, 0);
954 if (sal.symtab != symtab || sal.line == 0)
955 continue;
956
957 begin = std::min (begin, sal.line);
958 end = std::max (end, sal.line);
959 }
960
961 out:
962 *pbegin = begin;
963 *pend = end;
964 }
965
966 /* Print the source line information for a function call history line. */
967
968 static void
969 btrace_call_history_src_line (struct ui_out *uiout,
970 const struct btrace_function *bfun)
971 {
972 struct symbol *sym;
973 int begin, end;
974
975 sym = bfun->sym;
976 if (sym == NULL)
977 return;
978
979 uiout->field_string ("file",
980 symtab_to_filename_for_display (symbol_symtab (sym)));
981
982 btrace_compute_src_line_range (bfun, &begin, &end);
983 if (end < begin)
984 return;
985
986 uiout->text (":");
987 uiout->field_int ("min line", begin);
988
989 if (end == begin)
990 return;
991
992 uiout->text (",");
993 uiout->field_int ("max line", end);
994 }
995
996 /* Get the name of a branch trace function. */
997
998 static const char *
999 btrace_get_bfun_name (const struct btrace_function *bfun)
1000 {
1001 struct minimal_symbol *msym;
1002 struct symbol *sym;
1003
1004 if (bfun == NULL)
1005 return "??";
1006
1007 msym = bfun->msym;
1008 sym = bfun->sym;
1009
1010 if (sym != NULL)
1011 return SYMBOL_PRINT_NAME (sym);
1012 else if (msym != NULL)
1013 return MSYMBOL_PRINT_NAME (msym);
1014 else
1015 return "??";
1016 }
1017
1018 /* Disassemble a section of the recorded function trace. */
1019
1020 static void
1021 btrace_call_history (struct ui_out *uiout,
1022 const struct btrace_thread_info *btinfo,
1023 const struct btrace_call_iterator *begin,
1024 const struct btrace_call_iterator *end,
1025 int int_flags)
1026 {
1027 struct btrace_call_iterator it;
1028 record_print_flags flags = (enum record_print_flag) int_flags;
1029
1030 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1031 btrace_call_number (end));
1032
1033 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1034 {
1035 const struct btrace_function *bfun;
1036 struct minimal_symbol *msym;
1037 struct symbol *sym;
1038
1039 bfun = btrace_call_get (&it);
1040 sym = bfun->sym;
1041 msym = bfun->msym;
1042
1043 /* Print the function index. */
1044 ui_out_field_uint (uiout, "index", bfun->number);
1045 uiout->text ("\t");
1046
1047 /* Indicate gaps in the trace. */
1048 if (bfun->errcode != 0)
1049 {
1050 const struct btrace_config *conf;
1051
1052 conf = btrace_conf (btinfo);
1053
1054 /* We have trace so we must have a configuration. */
1055 gdb_assert (conf != NULL);
1056
1057 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1058
1059 continue;
1060 }
1061
1062 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1063 {
1064 int level = bfun->level + btinfo->level, i;
1065
1066 for (i = 0; i < level; ++i)
1067 uiout->text (" ");
1068 }
1069
1070 if (sym != NULL)
1071 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
1072 else if (msym != NULL)
1073 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1074 else if (!uiout->is_mi_like_p ())
1075 uiout->field_string ("function", "??");
1076
1077 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1078 {
1079 uiout->text (_("\tinst "));
1080 btrace_call_history_insn_range (uiout, bfun);
1081 }
1082
1083 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1084 {
1085 uiout->text (_("\tat "));
1086 btrace_call_history_src_line (uiout, bfun);
1087 }
1088
1089 uiout->text ("\n");
1090 }
1091 }
1092
1093 /* The to_call_history method of target record-btrace. */
1094
1095 static void
1096 record_btrace_call_history (struct target_ops *self, int size,
1097 record_print_flags flags)
1098 {
1099 struct btrace_thread_info *btinfo;
1100 struct btrace_call_history *history;
1101 struct btrace_call_iterator begin, end;
1102 struct ui_out *uiout;
1103 unsigned int context, covered;
1104
1105 uiout = current_uiout;
1106 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1107 context = abs (size);
1108 if (context == 0)
1109 error (_("Bad record function-call-history-size."));
1110
1111 btinfo = require_btrace ();
1112 history = btinfo->call_history;
1113 if (history == NULL)
1114 {
1115 struct btrace_insn_iterator *replay;
1116
1117 DEBUG ("call-history (0x%x): %d", (int) flags, size);
1118
1119 /* If we're replaying, we start at the replay position. Otherwise, we
1120 start at the tail of the trace. */
1121 replay = btinfo->replay;
1122 if (replay != NULL)
1123 {
1124 begin.btinfo = btinfo;
1125 begin.index = replay->call_index;
1126 }
1127 else
1128 btrace_call_end (&begin, btinfo);
1129
1130 /* We start from here and expand in the requested direction. Then we
1131 expand in the other direction, as well, to fill up any remaining
1132 context. */
1133 end = begin;
1134 if (size < 0)
1135 {
1136 /* We want the current position covered, as well. */
1137 covered = btrace_call_next (&end, 1);
1138 covered += btrace_call_prev (&begin, context - covered);
1139 covered += btrace_call_next (&end, context - covered);
1140 }
1141 else
1142 {
1143 covered = btrace_call_next (&end, context);
1144 covered += btrace_call_prev (&begin, context- covered);
1145 }
1146 }
1147 else
1148 {
1149 begin = history->begin;
1150 end = history->end;
1151
1152 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1153 btrace_call_number (&begin), btrace_call_number (&end));
1154
1155 if (size < 0)
1156 {
1157 end = begin;
1158 covered = btrace_call_prev (&begin, context);
1159 }
1160 else
1161 {
1162 begin = end;
1163 covered = btrace_call_next (&end, context);
1164 }
1165 }
1166
1167 if (covered > 0)
1168 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1169 else
1170 {
1171 if (size < 0)
1172 printf_unfiltered (_("At the start of the branch trace record.\n"));
1173 else
1174 printf_unfiltered (_("At the end of the branch trace record.\n"));
1175 }
1176
1177 btrace_set_call_history (btinfo, &begin, &end);
1178 }
1179
1180 /* The to_call_history_range method of target record-btrace. */
1181
1182 static void
1183 record_btrace_call_history_range (struct target_ops *self,
1184 ULONGEST from, ULONGEST to,
1185 record_print_flags flags)
1186 {
1187 struct btrace_thread_info *btinfo;
1188 struct btrace_call_iterator begin, end;
1189 struct ui_out *uiout;
1190 unsigned int low, high;
1191 int found;
1192
1193 uiout = current_uiout;
1194 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1195 low = from;
1196 high = to;
1197
1198 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1199
1200 /* Check for wrap-arounds. */
1201 if (low != from || high != to)
1202 error (_("Bad range."));
1203
1204 if (high < low)
1205 error (_("Bad range."));
1206
1207 btinfo = require_btrace ();
1208
1209 found = btrace_find_call_by_number (&begin, btinfo, low);
1210 if (found == 0)
1211 error (_("Range out of bounds."));
1212
1213 found = btrace_find_call_by_number (&end, btinfo, high);
1214 if (found == 0)
1215 {
1216 /* Silently truncate the range. */
1217 btrace_call_end (&end, btinfo);
1218 }
1219 else
1220 {
1221 /* We want both begin and end to be inclusive. */
1222 btrace_call_next (&end, 1);
1223 }
1224
1225 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1226 btrace_set_call_history (btinfo, &begin, &end);
1227 }
1228
1229 /* The to_call_history_from method of target record-btrace. */
1230
1231 static void
1232 record_btrace_call_history_from (struct target_ops *self,
1233 ULONGEST from, int size,
1234 record_print_flags flags)
1235 {
1236 ULONGEST begin, end, context;
1237
1238 context = abs (size);
1239 if (context == 0)
1240 error (_("Bad record function-call-history-size."));
1241
1242 if (size < 0)
1243 {
1244 end = from;
1245
1246 if (from < context)
1247 begin = 0;
1248 else
1249 begin = from - context + 1;
1250 }
1251 else
1252 {
1253 begin = from;
1254 end = from + context - 1;
1255
1256 /* Check for wrap-around. */
1257 if (end < begin)
1258 end = ULONGEST_MAX;
1259 }
1260
1261 record_btrace_call_history_range (self, begin, end, flags);
1262 }
1263
1264 /* The to_record_method method of target record-btrace. */
1265
1266 static enum record_method
1267 record_btrace_record_method (struct target_ops *self, ptid_t ptid)
1268 {
1269 struct thread_info * const tp = find_thread_ptid (ptid);
1270
1271 if (tp == NULL)
1272 error (_("No thread."));
1273
1274 if (tp->btrace.target == NULL)
1275 return RECORD_METHOD_NONE;
1276
1277 return RECORD_METHOD_BTRACE;
1278 }
1279
1280 /* The to_record_is_replaying method of target record-btrace. */
1281
1282 static int
1283 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1284 {
1285 struct thread_info *tp;
1286
1287 ALL_NON_EXITED_THREADS (tp)
1288 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1289 return 1;
1290
1291 return 0;
1292 }
1293
1294 /* The to_record_will_replay method of target record-btrace. */
1295
1296 static int
1297 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1298 {
1299 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1300 }
1301
1302 /* The to_xfer_partial method of target record-btrace. */
1303
1304 static enum target_xfer_status
1305 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1306 const char *annex, gdb_byte *readbuf,
1307 const gdb_byte *writebuf, ULONGEST offset,
1308 ULONGEST len, ULONGEST *xfered_len)
1309 {
1310 /* Filter out requests that don't make sense during replay. */
1311 if (replay_memory_access == replay_memory_access_read_only
1312 && !record_btrace_generating_corefile
1313 && record_btrace_is_replaying (ops, inferior_ptid))
1314 {
1315 switch (object)
1316 {
1317 case TARGET_OBJECT_MEMORY:
1318 {
1319 struct target_section *section;
1320
1321 /* We do not allow writing memory in general. */
1322 if (writebuf != NULL)
1323 {
1324 *xfered_len = len;
1325 return TARGET_XFER_UNAVAILABLE;
1326 }
1327
1328 /* We allow reading readonly memory. */
1329 section = target_section_by_addr (ops, offset);
1330 if (section != NULL)
1331 {
1332 /* Check if the section we found is readonly. */
1333 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1334 section->the_bfd_section)
1335 & SEC_READONLY) != 0)
1336 {
1337 /* Truncate the request to fit into this section. */
1338 len = std::min (len, section->endaddr - offset);
1339 break;
1340 }
1341 }
1342
1343 *xfered_len = len;
1344 return TARGET_XFER_UNAVAILABLE;
1345 }
1346 }
1347 }
1348
1349 /* Forward the request. */
1350 ops = ops->beneath;
1351 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1352 offset, len, xfered_len);
1353 }
1354
1355 /* The to_insert_breakpoint method of target record-btrace. */
1356
1357 static int
1358 record_btrace_insert_breakpoint (struct target_ops *ops,
1359 struct gdbarch *gdbarch,
1360 struct bp_target_info *bp_tgt)
1361 {
1362 const char *old;
1363 int ret;
1364
1365 /* Inserting breakpoints requires accessing memory. Allow it for the
1366 duration of this function. */
1367 old = replay_memory_access;
1368 replay_memory_access = replay_memory_access_read_write;
1369
1370 ret = 0;
1371 TRY
1372 {
1373 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1374 }
1375 CATCH (except, RETURN_MASK_ALL)
1376 {
1377 replay_memory_access = old;
1378 throw_exception (except);
1379 }
1380 END_CATCH
1381 replay_memory_access = old;
1382
1383 return ret;
1384 }
1385
1386 /* The to_remove_breakpoint method of target record-btrace. */
1387
1388 static int
1389 record_btrace_remove_breakpoint (struct target_ops *ops,
1390 struct gdbarch *gdbarch,
1391 struct bp_target_info *bp_tgt,
1392 enum remove_bp_reason reason)
1393 {
1394 const char *old;
1395 int ret;
1396
1397 /* Removing breakpoints requires accessing memory. Allow it for the
1398 duration of this function. */
1399 old = replay_memory_access;
1400 replay_memory_access = replay_memory_access_read_write;
1401
1402 ret = 0;
1403 TRY
1404 {
1405 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1406 reason);
1407 }
1408 CATCH (except, RETURN_MASK_ALL)
1409 {
1410 replay_memory_access = old;
1411 throw_exception (except);
1412 }
1413 END_CATCH
1414 replay_memory_access = old;
1415
1416 return ret;
1417 }
1418
1419 /* The to_fetch_registers method of target record-btrace. */
1420
1421 static void
1422 record_btrace_fetch_registers (struct target_ops *ops,
1423 struct regcache *regcache, int regno)
1424 {
1425 struct btrace_insn_iterator *replay;
1426 struct thread_info *tp;
1427
1428 tp = find_thread_ptid (regcache_get_ptid (regcache));
1429 gdb_assert (tp != NULL);
1430
1431 replay = tp->btrace.replay;
1432 if (replay != NULL && !record_btrace_generating_corefile)
1433 {
1434 const struct btrace_insn *insn;
1435 struct gdbarch *gdbarch;
1436 int pcreg;
1437
1438 gdbarch = regcache->arch ();
1439 pcreg = gdbarch_pc_regnum (gdbarch);
1440 if (pcreg < 0)
1441 return;
1442
1443 /* We can only provide the PC register. */
1444 if (regno >= 0 && regno != pcreg)
1445 return;
1446
1447 insn = btrace_insn_get (replay);
1448 gdb_assert (insn != NULL);
1449
1450 regcache_raw_supply (regcache, regno, &insn->pc);
1451 }
1452 else
1453 {
1454 struct target_ops *t = ops->beneath;
1455
1456 t->to_fetch_registers (t, regcache, regno);
1457 }
1458 }
1459
1460 /* The to_store_registers method of target record-btrace. */
1461
1462 static void
1463 record_btrace_store_registers (struct target_ops *ops,
1464 struct regcache *regcache, int regno)
1465 {
1466 struct target_ops *t;
1467
1468 if (!record_btrace_generating_corefile
1469 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1470 error (_("Cannot write registers while replaying."));
1471
1472 gdb_assert (may_write_registers != 0);
1473
1474 t = ops->beneath;
1475 t->to_store_registers (t, regcache, regno);
1476 }
1477
1478 /* The to_prepare_to_store method of target record-btrace. */
1479
1480 static void
1481 record_btrace_prepare_to_store (struct target_ops *ops,
1482 struct regcache *regcache)
1483 {
1484 struct target_ops *t;
1485
1486 if (!record_btrace_generating_corefile
1487 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1488 return;
1489
1490 t = ops->beneath;
1491 t->to_prepare_to_store (t, regcache);
1492 }
1493
1494 /* The branch trace frame cache. */
1495
1496 struct btrace_frame_cache
1497 {
1498 /* The thread. */
1499 struct thread_info *tp;
1500
1501 /* The frame info. */
1502 struct frame_info *frame;
1503
1504 /* The branch trace function segment. */
1505 const struct btrace_function *bfun;
1506 };
1507
1508 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1509
1510 static htab_t bfcache;
1511
1512 /* hash_f for htab_create_alloc of bfcache. */
1513
1514 static hashval_t
1515 bfcache_hash (const void *arg)
1516 {
1517 const struct btrace_frame_cache *cache
1518 = (const struct btrace_frame_cache *) arg;
1519
1520 return htab_hash_pointer (cache->frame);
1521 }
1522
1523 /* eq_f for htab_create_alloc of bfcache. */
1524
1525 static int
1526 bfcache_eq (const void *arg1, const void *arg2)
1527 {
1528 const struct btrace_frame_cache *cache1
1529 = (const struct btrace_frame_cache *) arg1;
1530 const struct btrace_frame_cache *cache2
1531 = (const struct btrace_frame_cache *) arg2;
1532
1533 return cache1->frame == cache2->frame;
1534 }
1535
1536 /* Create a new btrace frame cache. */
1537
1538 static struct btrace_frame_cache *
1539 bfcache_new (struct frame_info *frame)
1540 {
1541 struct btrace_frame_cache *cache;
1542 void **slot;
1543
1544 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1545 cache->frame = frame;
1546
1547 slot = htab_find_slot (bfcache, cache, INSERT);
1548 gdb_assert (*slot == NULL);
1549 *slot = cache;
1550
1551 return cache;
1552 }
1553
1554 /* Extract the branch trace function from a branch trace frame. */
1555
1556 static const struct btrace_function *
1557 btrace_get_frame_function (struct frame_info *frame)
1558 {
1559 const struct btrace_frame_cache *cache;
1560 struct btrace_frame_cache pattern;
1561 void **slot;
1562
1563 pattern.frame = frame;
1564
1565 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1566 if (slot == NULL)
1567 return NULL;
1568
1569 cache = (const struct btrace_frame_cache *) *slot;
1570 return cache->bfun;
1571 }
1572
1573 /* Implement stop_reason method for record_btrace_frame_unwind. */
1574
1575 static enum unwind_stop_reason
1576 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1577 void **this_cache)
1578 {
1579 const struct btrace_frame_cache *cache;
1580 const struct btrace_function *bfun;
1581
1582 cache = (const struct btrace_frame_cache *) *this_cache;
1583 bfun = cache->bfun;
1584 gdb_assert (bfun != NULL);
1585
1586 if (bfun->up == 0)
1587 return UNWIND_UNAVAILABLE;
1588
1589 return UNWIND_NO_REASON;
1590 }
1591
1592 /* Implement this_id method for record_btrace_frame_unwind. */
1593
1594 static void
1595 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1596 struct frame_id *this_id)
1597 {
1598 const struct btrace_frame_cache *cache;
1599 const struct btrace_function *bfun;
1600 struct btrace_call_iterator it;
1601 CORE_ADDR code, special;
1602
1603 cache = (const struct btrace_frame_cache *) *this_cache;
1604
1605 bfun = cache->bfun;
1606 gdb_assert (bfun != NULL);
1607
1608 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1609 bfun = btrace_call_get (&it);
1610
1611 code = get_frame_func (this_frame);
1612 special = bfun->number;
1613
1614 *this_id = frame_id_build_unavailable_stack_special (code, special);
1615
1616 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1617 btrace_get_bfun_name (cache->bfun),
1618 core_addr_to_string_nz (this_id->code_addr),
1619 core_addr_to_string_nz (this_id->special_addr));
1620 }
1621
1622 /* Implement prev_register method for record_btrace_frame_unwind. */
1623
1624 static struct value *
1625 record_btrace_frame_prev_register (struct frame_info *this_frame,
1626 void **this_cache,
1627 int regnum)
1628 {
1629 const struct btrace_frame_cache *cache;
1630 const struct btrace_function *bfun, *caller;
1631 struct btrace_call_iterator it;
1632 struct gdbarch *gdbarch;
1633 CORE_ADDR pc;
1634 int pcreg;
1635
1636 gdbarch = get_frame_arch (this_frame);
1637 pcreg = gdbarch_pc_regnum (gdbarch);
1638 if (pcreg < 0 || regnum != pcreg)
1639 throw_error (NOT_AVAILABLE_ERROR,
1640 _("Registers are not available in btrace record history"));
1641
1642 cache = (const struct btrace_frame_cache *) *this_cache;
1643 bfun = cache->bfun;
1644 gdb_assert (bfun != NULL);
1645
1646 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1647 throw_error (NOT_AVAILABLE_ERROR,
1648 _("No caller in btrace record history"));
1649
1650 caller = btrace_call_get (&it);
1651
1652 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1653 pc = caller->insn.front ().pc;
1654 else
1655 {
1656 pc = caller->insn.back ().pc;
1657 pc += gdb_insn_length (gdbarch, pc);
1658 }
1659
1660 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1661 btrace_get_bfun_name (bfun), bfun->level,
1662 core_addr_to_string_nz (pc));
1663
1664 return frame_unwind_got_address (this_frame, regnum, pc);
1665 }
1666
1667 /* Implement sniffer method for record_btrace_frame_unwind. */
1668
1669 static int
1670 record_btrace_frame_sniffer (const struct frame_unwind *self,
1671 struct frame_info *this_frame,
1672 void **this_cache)
1673 {
1674 const struct btrace_function *bfun;
1675 struct btrace_frame_cache *cache;
1676 struct thread_info *tp;
1677 struct frame_info *next;
1678
1679 /* THIS_FRAME does not contain a reference to its thread. */
1680 tp = find_thread_ptid (inferior_ptid);
1681 gdb_assert (tp != NULL);
1682
1683 bfun = NULL;
1684 next = get_next_frame (this_frame);
1685 if (next == NULL)
1686 {
1687 const struct btrace_insn_iterator *replay;
1688
1689 replay = tp->btrace.replay;
1690 if (replay != NULL)
1691 bfun = &replay->btinfo->functions[replay->call_index];
1692 }
1693 else
1694 {
1695 const struct btrace_function *callee;
1696 struct btrace_call_iterator it;
1697
1698 callee = btrace_get_frame_function (next);
1699 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1700 return 0;
1701
1702 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1703 return 0;
1704
1705 bfun = btrace_call_get (&it);
1706 }
1707
1708 if (bfun == NULL)
1709 return 0;
1710
1711 DEBUG ("[frame] sniffed frame for %s on level %d",
1712 btrace_get_bfun_name (bfun), bfun->level);
1713
1714 /* This is our frame. Initialize the frame cache. */
1715 cache = bfcache_new (this_frame);
1716 cache->tp = tp;
1717 cache->bfun = bfun;
1718
1719 *this_cache = cache;
1720 return 1;
1721 }
1722
1723 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1724
1725 static int
1726 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1727 struct frame_info *this_frame,
1728 void **this_cache)
1729 {
1730 const struct btrace_function *bfun, *callee;
1731 struct btrace_frame_cache *cache;
1732 struct btrace_call_iterator it;
1733 struct frame_info *next;
1734 struct thread_info *tinfo;
1735
1736 next = get_next_frame (this_frame);
1737 if (next == NULL)
1738 return 0;
1739
1740 callee = btrace_get_frame_function (next);
1741 if (callee == NULL)
1742 return 0;
1743
1744 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1745 return 0;
1746
1747 tinfo = find_thread_ptid (inferior_ptid);
1748 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1749 return 0;
1750
1751 bfun = btrace_call_get (&it);
1752
1753 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1754 btrace_get_bfun_name (bfun), bfun->level);
1755
1756 /* This is our frame. Initialize the frame cache. */
1757 cache = bfcache_new (this_frame);
1758 cache->tp = tinfo;
1759 cache->bfun = bfun;
1760
1761 *this_cache = cache;
1762 return 1;
1763 }
1764
1765 static void
1766 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1767 {
1768 struct btrace_frame_cache *cache;
1769 void **slot;
1770
1771 cache = (struct btrace_frame_cache *) this_cache;
1772
1773 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1774 gdb_assert (slot != NULL);
1775
1776 htab_remove_elt (bfcache, cache);
1777 }
1778
1779 /* btrace recording does not store previous memory content, neither the stack
1780 frames content. Any unwinding would return errorneous results as the stack
1781 contents no longer matches the changed PC value restored from history.
1782 Therefore this unwinder reports any possibly unwound registers as
1783 <unavailable>. */
1784
1785 const struct frame_unwind record_btrace_frame_unwind =
1786 {
1787 NORMAL_FRAME,
1788 record_btrace_frame_unwind_stop_reason,
1789 record_btrace_frame_this_id,
1790 record_btrace_frame_prev_register,
1791 NULL,
1792 record_btrace_frame_sniffer,
1793 record_btrace_frame_dealloc_cache
1794 };
1795
1796 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1797 {
1798 TAILCALL_FRAME,
1799 record_btrace_frame_unwind_stop_reason,
1800 record_btrace_frame_this_id,
1801 record_btrace_frame_prev_register,
1802 NULL,
1803 record_btrace_tailcall_frame_sniffer,
1804 record_btrace_frame_dealloc_cache
1805 };
1806
1807 /* Implement the to_get_unwinder method. */
1808
1809 static const struct frame_unwind *
1810 record_btrace_to_get_unwinder (struct target_ops *self)
1811 {
1812 return &record_btrace_frame_unwind;
1813 }
1814
1815 /* Implement the to_get_tailcall_unwinder method. */
1816
1817 static const struct frame_unwind *
1818 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1819 {
1820 return &record_btrace_tailcall_frame_unwind;
1821 }
1822
1823 /* Return a human-readable string for FLAG. */
1824
1825 static const char *
1826 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1827 {
1828 switch (flag)
1829 {
1830 case BTHR_STEP:
1831 return "step";
1832
1833 case BTHR_RSTEP:
1834 return "reverse-step";
1835
1836 case BTHR_CONT:
1837 return "cont";
1838
1839 case BTHR_RCONT:
1840 return "reverse-cont";
1841
1842 case BTHR_STOP:
1843 return "stop";
1844 }
1845
1846 return "<invalid>";
1847 }
1848
1849 /* Indicate that TP should be resumed according to FLAG. */
1850
1851 static void
1852 record_btrace_resume_thread (struct thread_info *tp,
1853 enum btrace_thread_flag flag)
1854 {
1855 struct btrace_thread_info *btinfo;
1856
1857 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1858 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1859
1860 btinfo = &tp->btrace;
1861
1862 /* Fetch the latest branch trace. */
1863 btrace_fetch (tp);
1864
1865 /* A resume request overwrites a preceding resume or stop request. */
1866 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1867 btinfo->flags |= flag;
1868 }
1869
1870 /* Get the current frame for TP. */
1871
1872 static struct frame_info *
1873 get_thread_current_frame (struct thread_info *tp)
1874 {
1875 struct frame_info *frame;
1876 ptid_t old_inferior_ptid;
1877 int executing;
1878
1879 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1880 old_inferior_ptid = inferior_ptid;
1881 inferior_ptid = tp->ptid;
1882
1883 /* Clear the executing flag to allow changes to the current frame.
1884 We are not actually running, yet. We just started a reverse execution
1885 command or a record goto command.
1886 For the latter, EXECUTING is false and this has no effect.
1887 For the former, EXECUTING is true and we're in to_wait, about to
1888 move the thread. Since we need to recompute the stack, we temporarily
1889 set EXECUTING to flase. */
1890 executing = is_executing (inferior_ptid);
1891 set_executing (inferior_ptid, 0);
1892
1893 frame = NULL;
1894 TRY
1895 {
1896 frame = get_current_frame ();
1897 }
1898 CATCH (except, RETURN_MASK_ALL)
1899 {
1900 /* Restore the previous execution state. */
1901 set_executing (inferior_ptid, executing);
1902
1903 /* Restore the previous inferior_ptid. */
1904 inferior_ptid = old_inferior_ptid;
1905
1906 throw_exception (except);
1907 }
1908 END_CATCH
1909
1910 /* Restore the previous execution state. */
1911 set_executing (inferior_ptid, executing);
1912
1913 /* Restore the previous inferior_ptid. */
1914 inferior_ptid = old_inferior_ptid;
1915
1916 return frame;
1917 }
1918
1919 /* Start replaying a thread. */
1920
1921 static struct btrace_insn_iterator *
1922 record_btrace_start_replaying (struct thread_info *tp)
1923 {
1924 struct btrace_insn_iterator *replay;
1925 struct btrace_thread_info *btinfo;
1926
1927 btinfo = &tp->btrace;
1928 replay = NULL;
1929
1930 /* We can't start replaying without trace. */
1931 if (btinfo->functions.empty ())
1932 return NULL;
1933
1934 /* GDB stores the current frame_id when stepping in order to detects steps
1935 into subroutines.
1936 Since frames are computed differently when we're replaying, we need to
1937 recompute those stored frames and fix them up so we can still detect
1938 subroutines after we started replaying. */
1939 TRY
1940 {
1941 struct frame_info *frame;
1942 struct frame_id frame_id;
1943 int upd_step_frame_id, upd_step_stack_frame_id;
1944
1945 /* The current frame without replaying - computed via normal unwind. */
1946 frame = get_thread_current_frame (tp);
1947 frame_id = get_frame_id (frame);
1948
1949 /* Check if we need to update any stepping-related frame id's. */
1950 upd_step_frame_id = frame_id_eq (frame_id,
1951 tp->control.step_frame_id);
1952 upd_step_stack_frame_id = frame_id_eq (frame_id,
1953 tp->control.step_stack_frame_id);
1954
1955 /* We start replaying at the end of the branch trace. This corresponds
1956 to the current instruction. */
1957 replay = XNEW (struct btrace_insn_iterator);
1958 btrace_insn_end (replay, btinfo);
1959
1960 /* Skip gaps at the end of the trace. */
1961 while (btrace_insn_get (replay) == NULL)
1962 {
1963 unsigned int steps;
1964
1965 steps = btrace_insn_prev (replay, 1);
1966 if (steps == 0)
1967 error (_("No trace."));
1968 }
1969
1970 /* We're not replaying, yet. */
1971 gdb_assert (btinfo->replay == NULL);
1972 btinfo->replay = replay;
1973
1974 /* Make sure we're not using any stale registers. */
1975 registers_changed_ptid (tp->ptid);
1976
1977 /* The current frame with replaying - computed via btrace unwind. */
1978 frame = get_thread_current_frame (tp);
1979 frame_id = get_frame_id (frame);
1980
1981 /* Replace stepping related frames where necessary. */
1982 if (upd_step_frame_id)
1983 tp->control.step_frame_id = frame_id;
1984 if (upd_step_stack_frame_id)
1985 tp->control.step_stack_frame_id = frame_id;
1986 }
1987 CATCH (except, RETURN_MASK_ALL)
1988 {
1989 xfree (btinfo->replay);
1990 btinfo->replay = NULL;
1991
1992 registers_changed_ptid (tp->ptid);
1993
1994 throw_exception (except);
1995 }
1996 END_CATCH
1997
1998 return replay;
1999 }
2000
2001 /* Stop replaying a thread. */
2002
2003 static void
2004 record_btrace_stop_replaying (struct thread_info *tp)
2005 {
2006 struct btrace_thread_info *btinfo;
2007
2008 btinfo = &tp->btrace;
2009
2010 xfree (btinfo->replay);
2011 btinfo->replay = NULL;
2012
2013 /* Make sure we're not leaving any stale registers. */
2014 registers_changed_ptid (tp->ptid);
2015 }
2016
2017 /* Stop replaying TP if it is at the end of its execution history. */
2018
2019 static void
2020 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2021 {
2022 struct btrace_insn_iterator *replay, end;
2023 struct btrace_thread_info *btinfo;
2024
2025 btinfo = &tp->btrace;
2026 replay = btinfo->replay;
2027
2028 if (replay == NULL)
2029 return;
2030
2031 btrace_insn_end (&end, btinfo);
2032
2033 if (btrace_insn_cmp (replay, &end) == 0)
2034 record_btrace_stop_replaying (tp);
2035 }
2036
2037 /* The to_resume method of target record-btrace. */
2038
2039 static void
2040 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2041 enum gdb_signal signal)
2042 {
2043 struct thread_info *tp;
2044 enum btrace_thread_flag flag, cflag;
2045
2046 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2047 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2048 step ? "step" : "cont");
2049
2050 /* Store the execution direction of the last resume.
2051
2052 If there is more than one to_resume call, we have to rely on infrun
2053 to not change the execution direction in-between. */
2054 record_btrace_resume_exec_dir = execution_direction;
2055
2056 /* As long as we're not replaying, just forward the request.
2057
2058 For non-stop targets this means that no thread is replaying. In order to
2059 make progress, we may need to explicitly move replaying threads to the end
2060 of their execution history. */
2061 if ((execution_direction != EXEC_REVERSE)
2062 && !record_btrace_is_replaying (ops, minus_one_ptid))
2063 {
2064 ops = ops->beneath;
2065 ops->to_resume (ops, ptid, step, signal);
2066 return;
2067 }
2068
2069 /* Compute the btrace thread flag for the requested move. */
2070 if (execution_direction == EXEC_REVERSE)
2071 {
2072 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2073 cflag = BTHR_RCONT;
2074 }
2075 else
2076 {
2077 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2078 cflag = BTHR_CONT;
2079 }
2080
2081 /* We just indicate the resume intent here. The actual stepping happens in
2082 record_btrace_wait below.
2083
2084 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2085 if (!target_is_non_stop_p ())
2086 {
2087 gdb_assert (ptid_match (inferior_ptid, ptid));
2088
2089 ALL_NON_EXITED_THREADS (tp)
2090 if (ptid_match (tp->ptid, ptid))
2091 {
2092 if (ptid_match (tp->ptid, inferior_ptid))
2093 record_btrace_resume_thread (tp, flag);
2094 else
2095 record_btrace_resume_thread (tp, cflag);
2096 }
2097 }
2098 else
2099 {
2100 ALL_NON_EXITED_THREADS (tp)
2101 if (ptid_match (tp->ptid, ptid))
2102 record_btrace_resume_thread (tp, flag);
2103 }
2104
2105 /* Async support. */
2106 if (target_can_async_p ())
2107 {
2108 target_async (1);
2109 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2110 }
2111 }
2112
2113 /* The to_commit_resume method of target record-btrace. */
2114
2115 static void
2116 record_btrace_commit_resume (struct target_ops *ops)
2117 {
2118 if ((execution_direction != EXEC_REVERSE)
2119 && !record_btrace_is_replaying (ops, minus_one_ptid))
2120 ops->beneath->to_commit_resume (ops->beneath);
2121 }
2122
2123 /* Cancel resuming TP. */
2124
2125 static void
2126 record_btrace_cancel_resume (struct thread_info *tp)
2127 {
2128 enum btrace_thread_flag flags;
2129
2130 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2131 if (flags == 0)
2132 return;
2133
2134 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2135 print_thread_id (tp),
2136 target_pid_to_str (tp->ptid), flags,
2137 btrace_thread_flag_to_str (flags));
2138
2139 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2140 record_btrace_stop_replaying_at_end (tp);
2141 }
2142
2143 /* Return a target_waitstatus indicating that we ran out of history. */
2144
2145 static struct target_waitstatus
2146 btrace_step_no_history (void)
2147 {
2148 struct target_waitstatus status;
2149
2150 status.kind = TARGET_WAITKIND_NO_HISTORY;
2151
2152 return status;
2153 }
2154
2155 /* Return a target_waitstatus indicating that a step finished. */
2156
2157 static struct target_waitstatus
2158 btrace_step_stopped (void)
2159 {
2160 struct target_waitstatus status;
2161
2162 status.kind = TARGET_WAITKIND_STOPPED;
2163 status.value.sig = GDB_SIGNAL_TRAP;
2164
2165 return status;
2166 }
2167
2168 /* Return a target_waitstatus indicating that a thread was stopped as
2169 requested. */
2170
2171 static struct target_waitstatus
2172 btrace_step_stopped_on_request (void)
2173 {
2174 struct target_waitstatus status;
2175
2176 status.kind = TARGET_WAITKIND_STOPPED;
2177 status.value.sig = GDB_SIGNAL_0;
2178
2179 return status;
2180 }
2181
2182 /* Return a target_waitstatus indicating a spurious stop. */
2183
2184 static struct target_waitstatus
2185 btrace_step_spurious (void)
2186 {
2187 struct target_waitstatus status;
2188
2189 status.kind = TARGET_WAITKIND_SPURIOUS;
2190
2191 return status;
2192 }
2193
2194 /* Return a target_waitstatus indicating that the thread was not resumed. */
2195
2196 static struct target_waitstatus
2197 btrace_step_no_resumed (void)
2198 {
2199 struct target_waitstatus status;
2200
2201 status.kind = TARGET_WAITKIND_NO_RESUMED;
2202
2203 return status;
2204 }
2205
2206 /* Return a target_waitstatus indicating that we should wait again. */
2207
2208 static struct target_waitstatus
2209 btrace_step_again (void)
2210 {
2211 struct target_waitstatus status;
2212
2213 status.kind = TARGET_WAITKIND_IGNORE;
2214
2215 return status;
2216 }
2217
2218 /* Clear the record histories. */
2219
2220 static void
2221 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2222 {
2223 xfree (btinfo->insn_history);
2224 xfree (btinfo->call_history);
2225
2226 btinfo->insn_history = NULL;
2227 btinfo->call_history = NULL;
2228 }
2229
2230 /* Check whether TP's current replay position is at a breakpoint. */
2231
2232 static int
2233 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2234 {
2235 struct btrace_insn_iterator *replay;
2236 struct btrace_thread_info *btinfo;
2237 const struct btrace_insn *insn;
2238 struct inferior *inf;
2239
2240 btinfo = &tp->btrace;
2241 replay = btinfo->replay;
2242
2243 if (replay == NULL)
2244 return 0;
2245
2246 insn = btrace_insn_get (replay);
2247 if (insn == NULL)
2248 return 0;
2249
2250 inf = find_inferior_ptid (tp->ptid);
2251 if (inf == NULL)
2252 return 0;
2253
2254 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2255 &btinfo->stop_reason);
2256 }
2257
2258 /* Step one instruction in forward direction. */
2259
2260 static struct target_waitstatus
2261 record_btrace_single_step_forward (struct thread_info *tp)
2262 {
2263 struct btrace_insn_iterator *replay, end, start;
2264 struct btrace_thread_info *btinfo;
2265
2266 btinfo = &tp->btrace;
2267 replay = btinfo->replay;
2268
2269 /* We're done if we're not replaying. */
2270 if (replay == NULL)
2271 return btrace_step_no_history ();
2272
2273 /* Check if we're stepping a breakpoint. */
2274 if (record_btrace_replay_at_breakpoint (tp))
2275 return btrace_step_stopped ();
2276
2277 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2278 jump back to the instruction at which we started. */
2279 start = *replay;
2280 do
2281 {
2282 unsigned int steps;
2283
2284 /* We will bail out here if we continue stepping after reaching the end
2285 of the execution history. */
2286 steps = btrace_insn_next (replay, 1);
2287 if (steps == 0)
2288 {
2289 *replay = start;
2290 return btrace_step_no_history ();
2291 }
2292 }
2293 while (btrace_insn_get (replay) == NULL);
2294
2295 /* Determine the end of the instruction trace. */
2296 btrace_insn_end (&end, btinfo);
2297
2298 /* The execution trace contains (and ends with) the current instruction.
2299 This instruction has not been executed, yet, so the trace really ends
2300 one instruction earlier. */
2301 if (btrace_insn_cmp (replay, &end) == 0)
2302 return btrace_step_no_history ();
2303
2304 return btrace_step_spurious ();
2305 }
2306
2307 /* Step one instruction in backward direction. */
2308
2309 static struct target_waitstatus
2310 record_btrace_single_step_backward (struct thread_info *tp)
2311 {
2312 struct btrace_insn_iterator *replay, start;
2313 struct btrace_thread_info *btinfo;
2314
2315 btinfo = &tp->btrace;
2316 replay = btinfo->replay;
2317
2318 /* Start replaying if we're not already doing so. */
2319 if (replay == NULL)
2320 replay = record_btrace_start_replaying (tp);
2321
2322 /* If we can't step any further, we reached the end of the history.
2323 Skip gaps during replay. If we end up at a gap (at the beginning of
2324 the trace), jump back to the instruction at which we started. */
2325 start = *replay;
2326 do
2327 {
2328 unsigned int steps;
2329
2330 steps = btrace_insn_prev (replay, 1);
2331 if (steps == 0)
2332 {
2333 *replay = start;
2334 return btrace_step_no_history ();
2335 }
2336 }
2337 while (btrace_insn_get (replay) == NULL);
2338
2339 /* Check if we're stepping a breakpoint.
2340
2341 For reverse-stepping, this check is after the step. There is logic in
2342 infrun.c that handles reverse-stepping separately. See, for example,
2343 proceed and adjust_pc_after_break.
2344
2345 This code assumes that for reverse-stepping, PC points to the last
2346 de-executed instruction, whereas for forward-stepping PC points to the
2347 next to-be-executed instruction. */
2348 if (record_btrace_replay_at_breakpoint (tp))
2349 return btrace_step_stopped ();
2350
2351 return btrace_step_spurious ();
2352 }
2353
2354 /* Step a single thread. */
2355
2356 static struct target_waitstatus
2357 record_btrace_step_thread (struct thread_info *tp)
2358 {
2359 struct btrace_thread_info *btinfo;
2360 struct target_waitstatus status;
2361 enum btrace_thread_flag flags;
2362
2363 btinfo = &tp->btrace;
2364
2365 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2366 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2367
2368 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2369 target_pid_to_str (tp->ptid), flags,
2370 btrace_thread_flag_to_str (flags));
2371
2372 /* We can't step without an execution history. */
2373 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2374 return btrace_step_no_history ();
2375
2376 switch (flags)
2377 {
2378 default:
2379 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2380
2381 case BTHR_STOP:
2382 return btrace_step_stopped_on_request ();
2383
2384 case BTHR_STEP:
2385 status = record_btrace_single_step_forward (tp);
2386 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2387 break;
2388
2389 return btrace_step_stopped ();
2390
2391 case BTHR_RSTEP:
2392 status = record_btrace_single_step_backward (tp);
2393 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2394 break;
2395
2396 return btrace_step_stopped ();
2397
2398 case BTHR_CONT:
2399 status = record_btrace_single_step_forward (tp);
2400 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2401 break;
2402
2403 btinfo->flags |= flags;
2404 return btrace_step_again ();
2405
2406 case BTHR_RCONT:
2407 status = record_btrace_single_step_backward (tp);
2408 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2409 break;
2410
2411 btinfo->flags |= flags;
2412 return btrace_step_again ();
2413 }
2414
2415 /* We keep threads moving at the end of their execution history. The to_wait
2416 method will stop the thread for whom the event is reported. */
2417 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2418 btinfo->flags |= flags;
2419
2420 return status;
2421 }
2422
2423 /* A vector of threads. */
2424
2425 typedef struct thread_info * tp_t;
2426 DEF_VEC_P (tp_t);
2427
2428 /* Announce further events if necessary. */
2429
2430 static void
2431 record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2432 const VEC (tp_t) *no_history)
2433 {
2434 int more_moving, more_no_history;
2435
2436 more_moving = !VEC_empty (tp_t, moving);
2437 more_no_history = !VEC_empty (tp_t, no_history);
2438
2439 if (!more_moving && !more_no_history)
2440 return;
2441
2442 if (more_moving)
2443 DEBUG ("movers pending");
2444
2445 if (more_no_history)
2446 DEBUG ("no-history pending");
2447
2448 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2449 }
2450
2451 /* The to_wait method of target record-btrace. */
2452
2453 static ptid_t
2454 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2455 struct target_waitstatus *status, int options)
2456 {
2457 VEC (tp_t) *moving, *no_history;
2458 struct thread_info *tp, *eventing;
2459 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2460
2461 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2462
2463 /* As long as we're not replaying, just forward the request. */
2464 if ((execution_direction != EXEC_REVERSE)
2465 && !record_btrace_is_replaying (ops, minus_one_ptid))
2466 {
2467 ops = ops->beneath;
2468 return ops->to_wait (ops, ptid, status, options);
2469 }
2470
2471 moving = NULL;
2472 no_history = NULL;
2473
2474 make_cleanup (VEC_cleanup (tp_t), &moving);
2475 make_cleanup (VEC_cleanup (tp_t), &no_history);
2476
2477 /* Keep a work list of moving threads. */
2478 ALL_NON_EXITED_THREADS (tp)
2479 if (ptid_match (tp->ptid, ptid)
2480 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2481 VEC_safe_push (tp_t, moving, tp);
2482
2483 if (VEC_empty (tp_t, moving))
2484 {
2485 *status = btrace_step_no_resumed ();
2486
2487 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2488 target_waitstatus_to_string (status).c_str ());
2489
2490 do_cleanups (cleanups);
2491 return null_ptid;
2492 }
2493
2494 /* Step moving threads one by one, one step each, until either one thread
2495 reports an event or we run out of threads to step.
2496
2497 When stepping more than one thread, chances are that some threads reach
2498 the end of their execution history earlier than others. If we reported
2499 this immediately, all-stop on top of non-stop would stop all threads and
2500 resume the same threads next time. And we would report the same thread
2501 having reached the end of its execution history again.
2502
2503 In the worst case, this would starve the other threads. But even if other
2504 threads would be allowed to make progress, this would result in far too
2505 many intermediate stops.
2506
2507 We therefore delay the reporting of "no execution history" until we have
2508 nothing else to report. By this time, all threads should have moved to
2509 either the beginning or the end of their execution history. There will
2510 be a single user-visible stop. */
2511 eventing = NULL;
2512 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2513 {
2514 unsigned int ix;
2515
2516 ix = 0;
2517 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2518 {
2519 *status = record_btrace_step_thread (tp);
2520
2521 switch (status->kind)
2522 {
2523 case TARGET_WAITKIND_IGNORE:
2524 ix++;
2525 break;
2526
2527 case TARGET_WAITKIND_NO_HISTORY:
2528 VEC_safe_push (tp_t, no_history,
2529 VEC_ordered_remove (tp_t, moving, ix));
2530 break;
2531
2532 default:
2533 eventing = VEC_unordered_remove (tp_t, moving, ix);
2534 break;
2535 }
2536 }
2537 }
2538
2539 if (eventing == NULL)
2540 {
2541 /* We started with at least one moving thread. This thread must have
2542 either stopped or reached the end of its execution history.
2543
2544 In the former case, EVENTING must not be NULL.
2545 In the latter case, NO_HISTORY must not be empty. */
2546 gdb_assert (!VEC_empty (tp_t, no_history));
2547
2548 /* We kept threads moving at the end of their execution history. Stop
2549 EVENTING now that we are going to report its stop. */
2550 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2551 eventing->btrace.flags &= ~BTHR_MOVE;
2552
2553 *status = btrace_step_no_history ();
2554 }
2555
2556 gdb_assert (eventing != NULL);
2557
2558 /* We kept threads replaying at the end of their execution history. Stop
2559 replaying EVENTING now that we are going to report its stop. */
2560 record_btrace_stop_replaying_at_end (eventing);
2561
2562 /* Stop all other threads. */
2563 if (!target_is_non_stop_p ())
2564 ALL_NON_EXITED_THREADS (tp)
2565 record_btrace_cancel_resume (tp);
2566
2567 /* In async mode, we need to announce further events. */
2568 if (target_is_async_p ())
2569 record_btrace_maybe_mark_async_event (moving, no_history);
2570
2571 /* Start record histories anew from the current position. */
2572 record_btrace_clear_histories (&eventing->btrace);
2573
2574 /* We moved the replay position but did not update registers. */
2575 registers_changed_ptid (eventing->ptid);
2576
2577 DEBUG ("wait ended by thread %s (%s): %s",
2578 print_thread_id (eventing),
2579 target_pid_to_str (eventing->ptid),
2580 target_waitstatus_to_string (status).c_str ());
2581
2582 do_cleanups (cleanups);
2583 return eventing->ptid;
2584 }
2585
2586 /* The to_stop method of target record-btrace. */
2587
2588 static void
2589 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2590 {
2591 DEBUG ("stop %s", target_pid_to_str (ptid));
2592
2593 /* As long as we're not replaying, just forward the request. */
2594 if ((execution_direction != EXEC_REVERSE)
2595 && !record_btrace_is_replaying (ops, minus_one_ptid))
2596 {
2597 ops = ops->beneath;
2598 ops->to_stop (ops, ptid);
2599 }
2600 else
2601 {
2602 struct thread_info *tp;
2603
2604 ALL_NON_EXITED_THREADS (tp)
2605 if (ptid_match (tp->ptid, ptid))
2606 {
2607 tp->btrace.flags &= ~BTHR_MOVE;
2608 tp->btrace.flags |= BTHR_STOP;
2609 }
2610 }
2611 }
2612
2613 /* The to_can_execute_reverse method of target record-btrace. */
2614
2615 static int
2616 record_btrace_can_execute_reverse (struct target_ops *self)
2617 {
2618 return 1;
2619 }
2620
2621 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2622
2623 static int
2624 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2625 {
2626 if (record_btrace_is_replaying (ops, minus_one_ptid))
2627 {
2628 struct thread_info *tp = inferior_thread ();
2629
2630 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2631 }
2632
2633 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2634 }
2635
2636 /* The to_supports_stopped_by_sw_breakpoint method of target
2637 record-btrace. */
2638
2639 static int
2640 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2641 {
2642 if (record_btrace_is_replaying (ops, minus_one_ptid))
2643 return 1;
2644
2645 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2646 }
2647
2648 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2649
2650 static int
2651 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2652 {
2653 if (record_btrace_is_replaying (ops, minus_one_ptid))
2654 {
2655 struct thread_info *tp = inferior_thread ();
2656
2657 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2658 }
2659
2660 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2661 }
2662
2663 /* The to_supports_stopped_by_hw_breakpoint method of target
2664 record-btrace. */
2665
2666 static int
2667 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2668 {
2669 if (record_btrace_is_replaying (ops, minus_one_ptid))
2670 return 1;
2671
2672 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2673 }
2674
2675 /* The to_update_thread_list method of target record-btrace. */
2676
2677 static void
2678 record_btrace_update_thread_list (struct target_ops *ops)
2679 {
2680 /* We don't add or remove threads during replay. */
2681 if (record_btrace_is_replaying (ops, minus_one_ptid))
2682 return;
2683
2684 /* Forward the request. */
2685 ops = ops->beneath;
2686 ops->to_update_thread_list (ops);
2687 }
2688
2689 /* The to_thread_alive method of target record-btrace. */
2690
2691 static int
2692 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2693 {
2694 /* We don't add or remove threads during replay. */
2695 if (record_btrace_is_replaying (ops, minus_one_ptid))
2696 return find_thread_ptid (ptid) != NULL;
2697
2698 /* Forward the request. */
2699 ops = ops->beneath;
2700 return ops->to_thread_alive (ops, ptid);
2701 }
2702
2703 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2704 is stopped. */
2705
2706 static void
2707 record_btrace_set_replay (struct thread_info *tp,
2708 const struct btrace_insn_iterator *it)
2709 {
2710 struct btrace_thread_info *btinfo;
2711
2712 btinfo = &tp->btrace;
2713
2714 if (it == NULL)
2715 record_btrace_stop_replaying (tp);
2716 else
2717 {
2718 if (btinfo->replay == NULL)
2719 record_btrace_start_replaying (tp);
2720 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2721 return;
2722
2723 *btinfo->replay = *it;
2724 registers_changed_ptid (tp->ptid);
2725 }
2726
2727 /* Start anew from the new replay position. */
2728 record_btrace_clear_histories (btinfo);
2729
2730 stop_pc = regcache_read_pc (get_current_regcache ());
2731 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2732 }
2733
2734 /* The to_goto_record_begin method of target record-btrace. */
2735
2736 static void
2737 record_btrace_goto_begin (struct target_ops *self)
2738 {
2739 struct thread_info *tp;
2740 struct btrace_insn_iterator begin;
2741
2742 tp = require_btrace_thread ();
2743
2744 btrace_insn_begin (&begin, &tp->btrace);
2745
2746 /* Skip gaps at the beginning of the trace. */
2747 while (btrace_insn_get (&begin) == NULL)
2748 {
2749 unsigned int steps;
2750
2751 steps = btrace_insn_next (&begin, 1);
2752 if (steps == 0)
2753 error (_("No trace."));
2754 }
2755
2756 record_btrace_set_replay (tp, &begin);
2757 }
2758
2759 /* The to_goto_record_end method of target record-btrace. */
2760
2761 static void
2762 record_btrace_goto_end (struct target_ops *ops)
2763 {
2764 struct thread_info *tp;
2765
2766 tp = require_btrace_thread ();
2767
2768 record_btrace_set_replay (tp, NULL);
2769 }
2770
2771 /* The to_goto_record method of target record-btrace. */
2772
2773 static void
2774 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2775 {
2776 struct thread_info *tp;
2777 struct btrace_insn_iterator it;
2778 unsigned int number;
2779 int found;
2780
2781 number = insn;
2782
2783 /* Check for wrap-arounds. */
2784 if (number != insn)
2785 error (_("Instruction number out of range."));
2786
2787 tp = require_btrace_thread ();
2788
2789 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2790
2791 /* Check if the instruction could not be found or is a gap. */
2792 if (found == 0 || btrace_insn_get (&it) == NULL)
2793 error (_("No such instruction."));
2794
2795 record_btrace_set_replay (tp, &it);
2796 }
2797
2798 /* The to_record_stop_replaying method of target record-btrace. */
2799
2800 static void
2801 record_btrace_stop_replaying_all (struct target_ops *self)
2802 {
2803 struct thread_info *tp;
2804
2805 ALL_NON_EXITED_THREADS (tp)
2806 record_btrace_stop_replaying (tp);
2807 }
2808
2809 /* The to_execution_direction target method. */
2810
2811 static enum exec_direction_kind
2812 record_btrace_execution_direction (struct target_ops *self)
2813 {
2814 return record_btrace_resume_exec_dir;
2815 }
2816
2817 /* The to_prepare_to_generate_core target method. */
2818
2819 static void
2820 record_btrace_prepare_to_generate_core (struct target_ops *self)
2821 {
2822 record_btrace_generating_corefile = 1;
2823 }
2824
2825 /* The to_done_generating_core target method. */
2826
2827 static void
2828 record_btrace_done_generating_core (struct target_ops *self)
2829 {
2830 record_btrace_generating_corefile = 0;
2831 }
2832
2833 /* Initialize the record-btrace target ops. */
2834
2835 static void
2836 init_record_btrace_ops (void)
2837 {
2838 struct target_ops *ops;
2839
2840 ops = &record_btrace_ops;
2841 ops->to_shortname = "record-btrace";
2842 ops->to_longname = "Branch tracing target";
2843 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2844 ops->to_open = record_btrace_open;
2845 ops->to_close = record_btrace_close;
2846 ops->to_async = record_btrace_async;
2847 ops->to_detach = record_detach;
2848 ops->to_disconnect = record_btrace_disconnect;
2849 ops->to_mourn_inferior = record_mourn_inferior;
2850 ops->to_kill = record_kill;
2851 ops->to_stop_recording = record_btrace_stop_recording;
2852 ops->to_info_record = record_btrace_info;
2853 ops->to_insn_history = record_btrace_insn_history;
2854 ops->to_insn_history_from = record_btrace_insn_history_from;
2855 ops->to_insn_history_range = record_btrace_insn_history_range;
2856 ops->to_call_history = record_btrace_call_history;
2857 ops->to_call_history_from = record_btrace_call_history_from;
2858 ops->to_call_history_range = record_btrace_call_history_range;
2859 ops->to_record_method = record_btrace_record_method;
2860 ops->to_record_is_replaying = record_btrace_is_replaying;
2861 ops->to_record_will_replay = record_btrace_will_replay;
2862 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2863 ops->to_xfer_partial = record_btrace_xfer_partial;
2864 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2865 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2866 ops->to_fetch_registers = record_btrace_fetch_registers;
2867 ops->to_store_registers = record_btrace_store_registers;
2868 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2869 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2870 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2871 ops->to_resume = record_btrace_resume;
2872 ops->to_commit_resume = record_btrace_commit_resume;
2873 ops->to_wait = record_btrace_wait;
2874 ops->to_stop = record_btrace_stop;
2875 ops->to_update_thread_list = record_btrace_update_thread_list;
2876 ops->to_thread_alive = record_btrace_thread_alive;
2877 ops->to_goto_record_begin = record_btrace_goto_begin;
2878 ops->to_goto_record_end = record_btrace_goto_end;
2879 ops->to_goto_record = record_btrace_goto;
2880 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2881 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2882 ops->to_supports_stopped_by_sw_breakpoint
2883 = record_btrace_supports_stopped_by_sw_breakpoint;
2884 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2885 ops->to_supports_stopped_by_hw_breakpoint
2886 = record_btrace_supports_stopped_by_hw_breakpoint;
2887 ops->to_execution_direction = record_btrace_execution_direction;
2888 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2889 ops->to_done_generating_core = record_btrace_done_generating_core;
2890 ops->to_stratum = record_stratum;
2891 ops->to_magic = OPS_MAGIC;
2892 }
2893
2894 /* Start recording in BTS format. */
2895
2896 static void
2897 cmd_record_btrace_bts_start (const char *args, int from_tty)
2898 {
2899 if (args != NULL && *args != 0)
2900 error (_("Invalid argument."));
2901
2902 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2903
2904 TRY
2905 {
2906 execute_command ("target record-btrace", from_tty);
2907 }
2908 CATCH (exception, RETURN_MASK_ALL)
2909 {
2910 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2911 throw_exception (exception);
2912 }
2913 END_CATCH
2914 }
2915
2916 /* Start recording in Intel Processor Trace format. */
2917
2918 static void
2919 cmd_record_btrace_pt_start (const char *args, int from_tty)
2920 {
2921 if (args != NULL && *args != 0)
2922 error (_("Invalid argument."));
2923
2924 record_btrace_conf.format = BTRACE_FORMAT_PT;
2925
2926 TRY
2927 {
2928 execute_command ("target record-btrace", from_tty);
2929 }
2930 CATCH (exception, RETURN_MASK_ALL)
2931 {
2932 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2933 throw_exception (exception);
2934 }
2935 END_CATCH
2936 }
2937
2938 /* Alias for "target record". */
2939
2940 static void
2941 cmd_record_btrace_start (const char *args, int from_tty)
2942 {
2943 if (args != NULL && *args != 0)
2944 error (_("Invalid argument."));
2945
2946 record_btrace_conf.format = BTRACE_FORMAT_PT;
2947
2948 TRY
2949 {
2950 execute_command ("target record-btrace", from_tty);
2951 }
2952 CATCH (exception, RETURN_MASK_ALL)
2953 {
2954 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2955
2956 TRY
2957 {
2958 execute_command ("target record-btrace", from_tty);
2959 }
2960 CATCH (exception, RETURN_MASK_ALL)
2961 {
2962 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2963 throw_exception (exception);
2964 }
2965 END_CATCH
2966 }
2967 END_CATCH
2968 }
2969
2970 /* The "set record btrace" command. */
2971
2972 static void
2973 cmd_set_record_btrace (const char *args, int from_tty)
2974 {
2975 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2976 }
2977
2978 /* The "show record btrace" command. */
2979
2980 static void
2981 cmd_show_record_btrace (const char *args, int from_tty)
2982 {
2983 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2984 }
2985
2986 /* The "show record btrace replay-memory-access" command. */
2987
2988 static void
2989 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2990 struct cmd_list_element *c, const char *value)
2991 {
2992 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2993 replay_memory_access);
2994 }
2995
2996 /* The "set record btrace bts" command. */
2997
2998 static void
2999 cmd_set_record_btrace_bts (const char *args, int from_tty)
3000 {
3001 printf_unfiltered (_("\"set record btrace bts\" must be followed "
3002 "by an appropriate subcommand.\n"));
3003 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3004 all_commands, gdb_stdout);
3005 }
3006
3007 /* The "show record btrace bts" command. */
3008
3009 static void
3010 cmd_show_record_btrace_bts (const char *args, int from_tty)
3011 {
3012 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3013 }
3014
3015 /* The "set record btrace pt" command. */
3016
3017 static void
3018 cmd_set_record_btrace_pt (const char *args, int from_tty)
3019 {
3020 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3021 "by an appropriate subcommand.\n"));
3022 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3023 all_commands, gdb_stdout);
3024 }
3025
3026 /* The "show record btrace pt" command. */
3027
3028 static void
3029 cmd_show_record_btrace_pt (const char *args, int from_tty)
3030 {
3031 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3032 }
3033
3034 /* The "record bts buffer-size" show value function. */
3035
3036 static void
3037 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3038 struct cmd_list_element *c,
3039 const char *value)
3040 {
3041 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3042 value);
3043 }
3044
3045 /* The "record pt buffer-size" show value function. */
3046
3047 static void
3048 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3049 struct cmd_list_element *c,
3050 const char *value)
3051 {
3052 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3053 value);
3054 }
3055
3056 /* Initialize btrace commands. */
3057
3058 void
3059 _initialize_record_btrace (void)
3060 {
3061 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3062 _("Start branch trace recording."), &record_btrace_cmdlist,
3063 "record btrace ", 0, &record_cmdlist);
3064 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3065
3066 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3067 _("\
3068 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3069 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3070 This format may not be available on all processors."),
3071 &record_btrace_cmdlist);
3072 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3073
3074 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3075 _("\
3076 Start branch trace recording in Intel Processor Trace format.\n\n\
3077 This format may not be available on all processors."),
3078 &record_btrace_cmdlist);
3079 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3080
3081 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3082 _("Set record options"), &set_record_btrace_cmdlist,
3083 "set record btrace ", 0, &set_record_cmdlist);
3084
3085 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3086 _("Show record options"), &show_record_btrace_cmdlist,
3087 "show record btrace ", 0, &show_record_cmdlist);
3088
3089 add_setshow_enum_cmd ("replay-memory-access", no_class,
3090 replay_memory_access_types, &replay_memory_access, _("\
3091 Set what memory accesses are allowed during replay."), _("\
3092 Show what memory accesses are allowed during replay."),
3093 _("Default is READ-ONLY.\n\n\
3094 The btrace record target does not trace data.\n\
3095 The memory therefore corresponds to the live target and not \
3096 to the current replay position.\n\n\
3097 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3098 When READ-WRITE, allow accesses to read-only and read-write memory during \
3099 replay."),
3100 NULL, cmd_show_replay_memory_access,
3101 &set_record_btrace_cmdlist,
3102 &show_record_btrace_cmdlist);
3103
3104 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3105 _("Set record btrace bts options"),
3106 &set_record_btrace_bts_cmdlist,
3107 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3108
3109 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3110 _("Show record btrace bts options"),
3111 &show_record_btrace_bts_cmdlist,
3112 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3113
3114 add_setshow_uinteger_cmd ("buffer-size", no_class,
3115 &record_btrace_conf.bts.size,
3116 _("Set the record/replay bts buffer size."),
3117 _("Show the record/replay bts buffer size."), _("\
3118 When starting recording request a trace buffer of this size. \
3119 The actual buffer size may differ from the requested size. \
3120 Use \"info record\" to see the actual buffer size.\n\n\
3121 Bigger buffers allow longer recording but also take more time to process \
3122 the recorded execution trace.\n\n\
3123 The trace buffer size may not be changed while recording."), NULL,
3124 show_record_bts_buffer_size_value,
3125 &set_record_btrace_bts_cmdlist,
3126 &show_record_btrace_bts_cmdlist);
3127
3128 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3129 _("Set record btrace pt options"),
3130 &set_record_btrace_pt_cmdlist,
3131 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3132
3133 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3134 _("Show record btrace pt options"),
3135 &show_record_btrace_pt_cmdlist,
3136 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3137
3138 add_setshow_uinteger_cmd ("buffer-size", no_class,
3139 &record_btrace_conf.pt.size,
3140 _("Set the record/replay pt buffer size."),
3141 _("Show the record/replay pt buffer size."), _("\
3142 Bigger buffers allow longer recording but also take more time to process \
3143 the recorded execution.\n\
3144 The actual buffer size may differ from the requested size. Use \"info record\" \
3145 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3146 &set_record_btrace_pt_cmdlist,
3147 &show_record_btrace_pt_cmdlist);
3148
3149 init_record_btrace_ops ();
3150 add_target (&record_btrace_ops);
3151
3152 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3153 xcalloc, xfree);
3154
3155 record_btrace_conf.bts.size = 64 * 1024;
3156 record_btrace_conf.pt.size = 16 * 1024;
3157 }
This page took 0.098769 seconds and 4 git commands to generate.