Convert observers to C++
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "event-loop.h"
40 #include "inf-loop.h"
41 #include "vec.h"
42 #include <algorithm>
43
44 /* The target_ops of record-btrace. */
45 static struct target_ops record_btrace_ops;
46
47 /* Token associated with a new-thread observer enabling branch tracing
48 for the new thread. */
49 static const gdb::observers::token record_btrace_thread_observer_token;
50
51 /* Memory access types used in set/show record btrace replay-memory-access. */
52 static const char replay_memory_access_read_only[] = "read-only";
53 static const char replay_memory_access_read_write[] = "read-write";
54 static const char *const replay_memory_access_types[] =
55 {
56 replay_memory_access_read_only,
57 replay_memory_access_read_write,
58 NULL
59 };
60
61 /* The currently allowed replay memory access type. */
62 static const char *replay_memory_access = replay_memory_access_read_only;
63
64 /* Command lists for "set/show record btrace". */
65 static struct cmd_list_element *set_record_btrace_cmdlist;
66 static struct cmd_list_element *show_record_btrace_cmdlist;
67
68 /* The execution direction of the last resume we got. See record-full.c. */
69 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
70
71 /* The async event handler for reverse/replay execution. */
72 static struct async_event_handler *record_btrace_async_inferior_event_handler;
73
74 /* A flag indicating that we are currently generating a core file. */
75 static int record_btrace_generating_corefile;
76
77 /* The current branch trace configuration. */
78 static struct btrace_config record_btrace_conf;
79
80 /* Command list for "record btrace". */
81 static struct cmd_list_element *record_btrace_cmdlist;
82
83 /* Command lists for "set/show record btrace bts". */
84 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
85 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
86
87 /* Command lists for "set/show record btrace pt". */
88 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
89 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
90
91 /* Print a record-btrace debug message. Use do ... while (0) to avoid
92 ambiguities when used in if statements. */
93
94 #define DEBUG(msg, args...) \
95 do \
96 { \
97 if (record_debug != 0) \
98 fprintf_unfiltered (gdb_stdlog, \
99 "[record-btrace] " msg "\n", ##args); \
100 } \
101 while (0)
102
103
104 /* Update the branch trace for the current thread and return a pointer to its
105 thread_info.
106
107 Throws an error if there is no thread or no trace. This function never
108 returns NULL. */
109
110 static struct thread_info *
111 require_btrace_thread (void)
112 {
113 struct thread_info *tp;
114
115 DEBUG ("require");
116
117 tp = find_thread_ptid (inferior_ptid);
118 if (tp == NULL)
119 error (_("No thread."));
120
121 validate_registers_access ();
122
123 btrace_fetch (tp);
124
125 if (btrace_is_empty (tp))
126 error (_("No trace."));
127
128 return tp;
129 }
130
131 /* Update the branch trace for the current thread and return a pointer to its
132 branch trace information struct.
133
134 Throws an error if there is no thread or no trace. This function never
135 returns NULL. */
136
137 static struct btrace_thread_info *
138 require_btrace (void)
139 {
140 struct thread_info *tp;
141
142 tp = require_btrace_thread ();
143
144 return &tp->btrace;
145 }
146
147 /* Enable branch tracing for one thread. Warn on errors. */
148
149 static void
150 record_btrace_enable_warn (struct thread_info *tp)
151 {
152 TRY
153 {
154 btrace_enable (tp, &record_btrace_conf);
155 }
156 CATCH (error, RETURN_MASK_ERROR)
157 {
158 warning ("%s", error.message);
159 }
160 END_CATCH
161 }
162
163 /* Enable automatic tracing of new threads. */
164
165 static void
166 record_btrace_auto_enable (void)
167 {
168 DEBUG ("attach thread observer");
169
170 gdb::observers::new_thread.attach (record_btrace_enable_warn,
171 record_btrace_thread_observer_token);
172 }
173
174 /* Disable automatic tracing of new threads. */
175
176 static void
177 record_btrace_auto_disable (void)
178 {
179 DEBUG ("detach thread observer");
180
181 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
182 }
183
184 /* The record-btrace async event handler function. */
185
186 static void
187 record_btrace_handle_async_inferior_event (gdb_client_data data)
188 {
189 inferior_event_handler (INF_REG_EVENT, NULL);
190 }
191
192 /* See record-btrace.h. */
193
194 void
195 record_btrace_push_target (void)
196 {
197 const char *format;
198
199 record_btrace_auto_enable ();
200
201 push_target (&record_btrace_ops);
202
203 record_btrace_async_inferior_event_handler
204 = create_async_event_handler (record_btrace_handle_async_inferior_event,
205 NULL);
206 record_btrace_generating_corefile = 0;
207
208 format = btrace_format_short_string (record_btrace_conf.format);
209 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
210 }
211
212 /* Disable btrace on a set of threads on scope exit. */
213
214 struct scoped_btrace_disable
215 {
216 scoped_btrace_disable () = default;
217
218 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
219
220 ~scoped_btrace_disable ()
221 {
222 for (thread_info *tp : m_threads)
223 btrace_disable (tp);
224 }
225
226 void add_thread (thread_info *thread)
227 {
228 m_threads.push_front (thread);
229 }
230
231 void discard ()
232 {
233 m_threads.clear ();
234 }
235
236 private:
237 std::forward_list<thread_info *> m_threads;
238 };
239
240 /* The to_open method of target record-btrace. */
241
242 static void
243 record_btrace_open (const char *args, int from_tty)
244 {
245 /* If we fail to enable btrace for one thread, disable it for the threads for
246 which it was successfully enabled. */
247 scoped_btrace_disable btrace_disable;
248 struct thread_info *tp;
249
250 DEBUG ("open");
251
252 record_preopen ();
253
254 if (!target_has_execution)
255 error (_("The program is not being run."));
256
257 ALL_NON_EXITED_THREADS (tp)
258 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
259 {
260 btrace_enable (tp, &record_btrace_conf);
261
262 btrace_disable.add_thread (tp);
263 }
264
265 record_btrace_push_target ();
266
267 btrace_disable.discard ();
268 }
269
270 /* The to_stop_recording method of target record-btrace. */
271
272 static void
273 record_btrace_stop_recording (struct target_ops *self)
274 {
275 struct thread_info *tp;
276
277 DEBUG ("stop recording");
278
279 record_btrace_auto_disable ();
280
281 ALL_NON_EXITED_THREADS (tp)
282 if (tp->btrace.target != NULL)
283 btrace_disable (tp);
284 }
285
286 /* The to_disconnect method of target record-btrace. */
287
288 static void
289 record_btrace_disconnect (struct target_ops *self, const char *args,
290 int from_tty)
291 {
292 struct target_ops *beneath = self->beneath;
293
294 /* Do not stop recording, just clean up GDB side. */
295 unpush_target (self);
296
297 /* Forward disconnect. */
298 beneath->to_disconnect (beneath, args, from_tty);
299 }
300
301 /* The to_close method of target record-btrace. */
302
303 static void
304 record_btrace_close (struct target_ops *self)
305 {
306 struct thread_info *tp;
307
308 if (record_btrace_async_inferior_event_handler != NULL)
309 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
310
311 /* Make sure automatic recording gets disabled even if we did not stop
312 recording before closing the record-btrace target. */
313 record_btrace_auto_disable ();
314
315 /* We should have already stopped recording.
316 Tear down btrace in case we have not. */
317 ALL_NON_EXITED_THREADS (tp)
318 btrace_teardown (tp);
319 }
320
321 /* The to_async method of target record-btrace. */
322
323 static void
324 record_btrace_async (struct target_ops *ops, int enable)
325 {
326 if (enable)
327 mark_async_event_handler (record_btrace_async_inferior_event_handler);
328 else
329 clear_async_event_handler (record_btrace_async_inferior_event_handler);
330
331 ops->beneath->to_async (ops->beneath, enable);
332 }
333
334 /* Adjusts the size and returns a human readable size suffix. */
335
336 static const char *
337 record_btrace_adjust_size (unsigned int *size)
338 {
339 unsigned int sz;
340
341 sz = *size;
342
343 if ((sz & ((1u << 30) - 1)) == 0)
344 {
345 *size = sz >> 30;
346 return "GB";
347 }
348 else if ((sz & ((1u << 20) - 1)) == 0)
349 {
350 *size = sz >> 20;
351 return "MB";
352 }
353 else if ((sz & ((1u << 10) - 1)) == 0)
354 {
355 *size = sz >> 10;
356 return "kB";
357 }
358 else
359 return "";
360 }
361
362 /* Print a BTS configuration. */
363
364 static void
365 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
366 {
367 const char *suffix;
368 unsigned int size;
369
370 size = conf->size;
371 if (size > 0)
372 {
373 suffix = record_btrace_adjust_size (&size);
374 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
375 }
376 }
377
378 /* Print an Intel Processor Trace configuration. */
379
380 static void
381 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
382 {
383 const char *suffix;
384 unsigned int size;
385
386 size = conf->size;
387 if (size > 0)
388 {
389 suffix = record_btrace_adjust_size (&size);
390 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
391 }
392 }
393
394 /* Print a branch tracing configuration. */
395
396 static void
397 record_btrace_print_conf (const struct btrace_config *conf)
398 {
399 printf_unfiltered (_("Recording format: %s.\n"),
400 btrace_format_string (conf->format));
401
402 switch (conf->format)
403 {
404 case BTRACE_FORMAT_NONE:
405 return;
406
407 case BTRACE_FORMAT_BTS:
408 record_btrace_print_bts_conf (&conf->bts);
409 return;
410
411 case BTRACE_FORMAT_PT:
412 record_btrace_print_pt_conf (&conf->pt);
413 return;
414 }
415
416 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
417 }
418
419 /* The to_info_record method of target record-btrace. */
420
421 static void
422 record_btrace_info (struct target_ops *self)
423 {
424 struct btrace_thread_info *btinfo;
425 const struct btrace_config *conf;
426 struct thread_info *tp;
427 unsigned int insns, calls, gaps;
428
429 DEBUG ("info");
430
431 tp = find_thread_ptid (inferior_ptid);
432 if (tp == NULL)
433 error (_("No thread."));
434
435 validate_registers_access ();
436
437 btinfo = &tp->btrace;
438
439 conf = btrace_conf (btinfo);
440 if (conf != NULL)
441 record_btrace_print_conf (conf);
442
443 btrace_fetch (tp);
444
445 insns = 0;
446 calls = 0;
447 gaps = 0;
448
449 if (!btrace_is_empty (tp))
450 {
451 struct btrace_call_iterator call;
452 struct btrace_insn_iterator insn;
453
454 btrace_call_end (&call, btinfo);
455 btrace_call_prev (&call, 1);
456 calls = btrace_call_number (&call);
457
458 btrace_insn_end (&insn, btinfo);
459 insns = btrace_insn_number (&insn);
460
461 /* If the last instruction is not a gap, it is the current instruction
462 that is not actually part of the record. */
463 if (btrace_insn_get (&insn) != NULL)
464 insns -= 1;
465
466 gaps = btinfo->ngaps;
467 }
468
469 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
470 "for thread %s (%s).\n"), insns, calls, gaps,
471 print_thread_id (tp), target_pid_to_str (tp->ptid));
472
473 if (btrace_is_replaying (tp))
474 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
475 btrace_insn_number (btinfo->replay));
476 }
477
478 /* Print a decode error. */
479
480 static void
481 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
482 enum btrace_format format)
483 {
484 const char *errstr = btrace_decode_error (format, errcode);
485
486 uiout->text (_("["));
487 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
488 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
489 {
490 uiout->text (_("decode error ("));
491 uiout->field_int ("errcode", errcode);
492 uiout->text (_("): "));
493 }
494 uiout->text (errstr);
495 uiout->text (_("]\n"));
496 }
497
498 /* Print an unsigned int. */
499
500 static void
501 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
502 {
503 uiout->field_fmt (fld, "%u", val);
504 }
505
506 /* A range of source lines. */
507
508 struct btrace_line_range
509 {
510 /* The symtab this line is from. */
511 struct symtab *symtab;
512
513 /* The first line (inclusive). */
514 int begin;
515
516 /* The last line (exclusive). */
517 int end;
518 };
519
520 /* Construct a line range. */
521
522 static struct btrace_line_range
523 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
524 {
525 struct btrace_line_range range;
526
527 range.symtab = symtab;
528 range.begin = begin;
529 range.end = end;
530
531 return range;
532 }
533
534 /* Add a line to a line range. */
535
536 static struct btrace_line_range
537 btrace_line_range_add (struct btrace_line_range range, int line)
538 {
539 if (range.end <= range.begin)
540 {
541 /* This is the first entry. */
542 range.begin = line;
543 range.end = line + 1;
544 }
545 else if (line < range.begin)
546 range.begin = line;
547 else if (range.end < line)
548 range.end = line;
549
550 return range;
551 }
552
553 /* Return non-zero if RANGE is empty, zero otherwise. */
554
555 static int
556 btrace_line_range_is_empty (struct btrace_line_range range)
557 {
558 return range.end <= range.begin;
559 }
560
561 /* Return non-zero if LHS contains RHS, zero otherwise. */
562
563 static int
564 btrace_line_range_contains_range (struct btrace_line_range lhs,
565 struct btrace_line_range rhs)
566 {
567 return ((lhs.symtab == rhs.symtab)
568 && (lhs.begin <= rhs.begin)
569 && (rhs.end <= lhs.end));
570 }
571
572 /* Find the line range associated with PC. */
573
574 static struct btrace_line_range
575 btrace_find_line_range (CORE_ADDR pc)
576 {
577 struct btrace_line_range range;
578 struct linetable_entry *lines;
579 struct linetable *ltable;
580 struct symtab *symtab;
581 int nlines, i;
582
583 symtab = find_pc_line_symtab (pc);
584 if (symtab == NULL)
585 return btrace_mk_line_range (NULL, 0, 0);
586
587 ltable = SYMTAB_LINETABLE (symtab);
588 if (ltable == NULL)
589 return btrace_mk_line_range (symtab, 0, 0);
590
591 nlines = ltable->nitems;
592 lines = ltable->item;
593 if (nlines <= 0)
594 return btrace_mk_line_range (symtab, 0, 0);
595
596 range = btrace_mk_line_range (symtab, 0, 0);
597 for (i = 0; i < nlines - 1; i++)
598 {
599 if ((lines[i].pc == pc) && (lines[i].line != 0))
600 range = btrace_line_range_add (range, lines[i].line);
601 }
602
603 return range;
604 }
605
606 /* Print source lines in LINES to UIOUT.
607
608 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
609 instructions corresponding to that source line. When printing a new source
610 line, we do the cleanups for the open chain and open a new cleanup chain for
611 the new source line. If the source line range in LINES is not empty, this
612 function will leave the cleanup chain for the last printed source line open
613 so instructions can be added to it. */
614
615 static void
616 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
617 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
618 gdb::optional<ui_out_emit_list> *asm_list,
619 gdb_disassembly_flags flags)
620 {
621 print_source_lines_flags psl_flags;
622
623 if (flags & DISASSEMBLY_FILENAME)
624 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
625
626 for (int line = lines.begin; line < lines.end; ++line)
627 {
628 asm_list->reset ();
629
630 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
631
632 print_source_lines (lines.symtab, line, line + 1, psl_flags);
633
634 asm_list->emplace (uiout, "line_asm_insn");
635 }
636 }
637
638 /* Disassemble a section of the recorded instruction trace. */
639
640 static void
641 btrace_insn_history (struct ui_out *uiout,
642 const struct btrace_thread_info *btinfo,
643 const struct btrace_insn_iterator *begin,
644 const struct btrace_insn_iterator *end,
645 gdb_disassembly_flags flags)
646 {
647 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
648 btrace_insn_number (begin), btrace_insn_number (end));
649
650 flags |= DISASSEMBLY_SPECULATIVE;
651
652 struct gdbarch *gdbarch = target_gdbarch ();
653 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
654
655 ui_out_emit_list list_emitter (uiout, "asm_insns");
656
657 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
658 gdb::optional<ui_out_emit_list> asm_list;
659
660 gdb_pretty_print_disassembler disasm (gdbarch);
661
662 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
663 btrace_insn_next (&it, 1))
664 {
665 const struct btrace_insn *insn;
666
667 insn = btrace_insn_get (&it);
668
669 /* A NULL instruction indicates a gap in the trace. */
670 if (insn == NULL)
671 {
672 const struct btrace_config *conf;
673
674 conf = btrace_conf (btinfo);
675
676 /* We have trace so we must have a configuration. */
677 gdb_assert (conf != NULL);
678
679 uiout->field_fmt ("insn-number", "%u",
680 btrace_insn_number (&it));
681 uiout->text ("\t");
682
683 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
684 conf->format);
685 }
686 else
687 {
688 struct disasm_insn dinsn;
689
690 if ((flags & DISASSEMBLY_SOURCE) != 0)
691 {
692 struct btrace_line_range lines;
693
694 lines = btrace_find_line_range (insn->pc);
695 if (!btrace_line_range_is_empty (lines)
696 && !btrace_line_range_contains_range (last_lines, lines))
697 {
698 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
699 flags);
700 last_lines = lines;
701 }
702 else if (!src_and_asm_tuple.has_value ())
703 {
704 gdb_assert (!asm_list.has_value ());
705
706 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
707
708 /* No source information. */
709 asm_list.emplace (uiout, "line_asm_insn");
710 }
711
712 gdb_assert (src_and_asm_tuple.has_value ());
713 gdb_assert (asm_list.has_value ());
714 }
715
716 memset (&dinsn, 0, sizeof (dinsn));
717 dinsn.number = btrace_insn_number (&it);
718 dinsn.addr = insn->pc;
719
720 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
721 dinsn.is_speculative = 1;
722
723 disasm.pretty_print_insn (uiout, &dinsn, flags);
724 }
725 }
726 }
727
728 /* The to_insn_history method of target record-btrace. */
729
730 static void
731 record_btrace_insn_history (struct target_ops *self, int size,
732 gdb_disassembly_flags flags)
733 {
734 struct btrace_thread_info *btinfo;
735 struct btrace_insn_history *history;
736 struct btrace_insn_iterator begin, end;
737 struct ui_out *uiout;
738 unsigned int context, covered;
739
740 uiout = current_uiout;
741 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
742 context = abs (size);
743 if (context == 0)
744 error (_("Bad record instruction-history-size."));
745
746 btinfo = require_btrace ();
747 history = btinfo->insn_history;
748 if (history == NULL)
749 {
750 struct btrace_insn_iterator *replay;
751
752 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
753
754 /* If we're replaying, we start at the replay position. Otherwise, we
755 start at the tail of the trace. */
756 replay = btinfo->replay;
757 if (replay != NULL)
758 begin = *replay;
759 else
760 btrace_insn_end (&begin, btinfo);
761
762 /* We start from here and expand in the requested direction. Then we
763 expand in the other direction, as well, to fill up any remaining
764 context. */
765 end = begin;
766 if (size < 0)
767 {
768 /* We want the current position covered, as well. */
769 covered = btrace_insn_next (&end, 1);
770 covered += btrace_insn_prev (&begin, context - covered);
771 covered += btrace_insn_next (&end, context - covered);
772 }
773 else
774 {
775 covered = btrace_insn_next (&end, context);
776 covered += btrace_insn_prev (&begin, context - covered);
777 }
778 }
779 else
780 {
781 begin = history->begin;
782 end = history->end;
783
784 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
785 btrace_insn_number (&begin), btrace_insn_number (&end));
786
787 if (size < 0)
788 {
789 end = begin;
790 covered = btrace_insn_prev (&begin, context);
791 }
792 else
793 {
794 begin = end;
795 covered = btrace_insn_next (&end, context);
796 }
797 }
798
799 if (covered > 0)
800 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
801 else
802 {
803 if (size < 0)
804 printf_unfiltered (_("At the start of the branch trace record.\n"));
805 else
806 printf_unfiltered (_("At the end of the branch trace record.\n"));
807 }
808
809 btrace_set_insn_history (btinfo, &begin, &end);
810 }
811
812 /* The to_insn_history_range method of target record-btrace. */
813
814 static void
815 record_btrace_insn_history_range (struct target_ops *self,
816 ULONGEST from, ULONGEST to,
817 gdb_disassembly_flags flags)
818 {
819 struct btrace_thread_info *btinfo;
820 struct btrace_insn_iterator begin, end;
821 struct ui_out *uiout;
822 unsigned int low, high;
823 int found;
824
825 uiout = current_uiout;
826 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
827 low = from;
828 high = to;
829
830 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
831
832 /* Check for wrap-arounds. */
833 if (low != from || high != to)
834 error (_("Bad range."));
835
836 if (high < low)
837 error (_("Bad range."));
838
839 btinfo = require_btrace ();
840
841 found = btrace_find_insn_by_number (&begin, btinfo, low);
842 if (found == 0)
843 error (_("Range out of bounds."));
844
845 found = btrace_find_insn_by_number (&end, btinfo, high);
846 if (found == 0)
847 {
848 /* Silently truncate the range. */
849 btrace_insn_end (&end, btinfo);
850 }
851 else
852 {
853 /* We want both begin and end to be inclusive. */
854 btrace_insn_next (&end, 1);
855 }
856
857 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
858 btrace_set_insn_history (btinfo, &begin, &end);
859 }
860
861 /* The to_insn_history_from method of target record-btrace. */
862
863 static void
864 record_btrace_insn_history_from (struct target_ops *self,
865 ULONGEST from, int size,
866 gdb_disassembly_flags flags)
867 {
868 ULONGEST begin, end, context;
869
870 context = abs (size);
871 if (context == 0)
872 error (_("Bad record instruction-history-size."));
873
874 if (size < 0)
875 {
876 end = from;
877
878 if (from < context)
879 begin = 0;
880 else
881 begin = from - context + 1;
882 }
883 else
884 {
885 begin = from;
886 end = from + context - 1;
887
888 /* Check for wrap-around. */
889 if (end < begin)
890 end = ULONGEST_MAX;
891 }
892
893 record_btrace_insn_history_range (self, begin, end, flags);
894 }
895
896 /* Print the instruction number range for a function call history line. */
897
898 static void
899 btrace_call_history_insn_range (struct ui_out *uiout,
900 const struct btrace_function *bfun)
901 {
902 unsigned int begin, end, size;
903
904 size = bfun->insn.size ();
905 gdb_assert (size > 0);
906
907 begin = bfun->insn_offset;
908 end = begin + size - 1;
909
910 ui_out_field_uint (uiout, "insn begin", begin);
911 uiout->text (",");
912 ui_out_field_uint (uiout, "insn end", end);
913 }
914
915 /* Compute the lowest and highest source line for the instructions in BFUN
916 and return them in PBEGIN and PEND.
917 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
918 result from inlining or macro expansion. */
919
920 static void
921 btrace_compute_src_line_range (const struct btrace_function *bfun,
922 int *pbegin, int *pend)
923 {
924 struct symtab *symtab;
925 struct symbol *sym;
926 int begin, end;
927
928 begin = INT_MAX;
929 end = INT_MIN;
930
931 sym = bfun->sym;
932 if (sym == NULL)
933 goto out;
934
935 symtab = symbol_symtab (sym);
936
937 for (const btrace_insn &insn : bfun->insn)
938 {
939 struct symtab_and_line sal;
940
941 sal = find_pc_line (insn.pc, 0);
942 if (sal.symtab != symtab || sal.line == 0)
943 continue;
944
945 begin = std::min (begin, sal.line);
946 end = std::max (end, sal.line);
947 }
948
949 out:
950 *pbegin = begin;
951 *pend = end;
952 }
953
954 /* Print the source line information for a function call history line. */
955
956 static void
957 btrace_call_history_src_line (struct ui_out *uiout,
958 const struct btrace_function *bfun)
959 {
960 struct symbol *sym;
961 int begin, end;
962
963 sym = bfun->sym;
964 if (sym == NULL)
965 return;
966
967 uiout->field_string ("file",
968 symtab_to_filename_for_display (symbol_symtab (sym)));
969
970 btrace_compute_src_line_range (bfun, &begin, &end);
971 if (end < begin)
972 return;
973
974 uiout->text (":");
975 uiout->field_int ("min line", begin);
976
977 if (end == begin)
978 return;
979
980 uiout->text (",");
981 uiout->field_int ("max line", end);
982 }
983
984 /* Get the name of a branch trace function. */
985
986 static const char *
987 btrace_get_bfun_name (const struct btrace_function *bfun)
988 {
989 struct minimal_symbol *msym;
990 struct symbol *sym;
991
992 if (bfun == NULL)
993 return "??";
994
995 msym = bfun->msym;
996 sym = bfun->sym;
997
998 if (sym != NULL)
999 return SYMBOL_PRINT_NAME (sym);
1000 else if (msym != NULL)
1001 return MSYMBOL_PRINT_NAME (msym);
1002 else
1003 return "??";
1004 }
1005
1006 /* Disassemble a section of the recorded function trace. */
1007
1008 static void
1009 btrace_call_history (struct ui_out *uiout,
1010 const struct btrace_thread_info *btinfo,
1011 const struct btrace_call_iterator *begin,
1012 const struct btrace_call_iterator *end,
1013 int int_flags)
1014 {
1015 struct btrace_call_iterator it;
1016 record_print_flags flags = (enum record_print_flag) int_flags;
1017
1018 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1019 btrace_call_number (end));
1020
1021 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1022 {
1023 const struct btrace_function *bfun;
1024 struct minimal_symbol *msym;
1025 struct symbol *sym;
1026
1027 bfun = btrace_call_get (&it);
1028 sym = bfun->sym;
1029 msym = bfun->msym;
1030
1031 /* Print the function index. */
1032 ui_out_field_uint (uiout, "index", bfun->number);
1033 uiout->text ("\t");
1034
1035 /* Indicate gaps in the trace. */
1036 if (bfun->errcode != 0)
1037 {
1038 const struct btrace_config *conf;
1039
1040 conf = btrace_conf (btinfo);
1041
1042 /* We have trace so we must have a configuration. */
1043 gdb_assert (conf != NULL);
1044
1045 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1046
1047 continue;
1048 }
1049
1050 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1051 {
1052 int level = bfun->level + btinfo->level, i;
1053
1054 for (i = 0; i < level; ++i)
1055 uiout->text (" ");
1056 }
1057
1058 if (sym != NULL)
1059 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
1060 else if (msym != NULL)
1061 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1062 else if (!uiout->is_mi_like_p ())
1063 uiout->field_string ("function", "??");
1064
1065 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1066 {
1067 uiout->text (_("\tinst "));
1068 btrace_call_history_insn_range (uiout, bfun);
1069 }
1070
1071 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1072 {
1073 uiout->text (_("\tat "));
1074 btrace_call_history_src_line (uiout, bfun);
1075 }
1076
1077 uiout->text ("\n");
1078 }
1079 }
1080
1081 /* The to_call_history method of target record-btrace. */
1082
1083 static void
1084 record_btrace_call_history (struct target_ops *self, int size,
1085 record_print_flags flags)
1086 {
1087 struct btrace_thread_info *btinfo;
1088 struct btrace_call_history *history;
1089 struct btrace_call_iterator begin, end;
1090 struct ui_out *uiout;
1091 unsigned int context, covered;
1092
1093 uiout = current_uiout;
1094 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1095 context = abs (size);
1096 if (context == 0)
1097 error (_("Bad record function-call-history-size."));
1098
1099 btinfo = require_btrace ();
1100 history = btinfo->call_history;
1101 if (history == NULL)
1102 {
1103 struct btrace_insn_iterator *replay;
1104
1105 DEBUG ("call-history (0x%x): %d", (int) flags, size);
1106
1107 /* If we're replaying, we start at the replay position. Otherwise, we
1108 start at the tail of the trace. */
1109 replay = btinfo->replay;
1110 if (replay != NULL)
1111 {
1112 begin.btinfo = btinfo;
1113 begin.index = replay->call_index;
1114 }
1115 else
1116 btrace_call_end (&begin, btinfo);
1117
1118 /* We start from here and expand in the requested direction. Then we
1119 expand in the other direction, as well, to fill up any remaining
1120 context. */
1121 end = begin;
1122 if (size < 0)
1123 {
1124 /* We want the current position covered, as well. */
1125 covered = btrace_call_next (&end, 1);
1126 covered += btrace_call_prev (&begin, context - covered);
1127 covered += btrace_call_next (&end, context - covered);
1128 }
1129 else
1130 {
1131 covered = btrace_call_next (&end, context);
1132 covered += btrace_call_prev (&begin, context- covered);
1133 }
1134 }
1135 else
1136 {
1137 begin = history->begin;
1138 end = history->end;
1139
1140 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1141 btrace_call_number (&begin), btrace_call_number (&end));
1142
1143 if (size < 0)
1144 {
1145 end = begin;
1146 covered = btrace_call_prev (&begin, context);
1147 }
1148 else
1149 {
1150 begin = end;
1151 covered = btrace_call_next (&end, context);
1152 }
1153 }
1154
1155 if (covered > 0)
1156 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1157 else
1158 {
1159 if (size < 0)
1160 printf_unfiltered (_("At the start of the branch trace record.\n"));
1161 else
1162 printf_unfiltered (_("At the end of the branch trace record.\n"));
1163 }
1164
1165 btrace_set_call_history (btinfo, &begin, &end);
1166 }
1167
1168 /* The to_call_history_range method of target record-btrace. */
1169
1170 static void
1171 record_btrace_call_history_range (struct target_ops *self,
1172 ULONGEST from, ULONGEST to,
1173 record_print_flags flags)
1174 {
1175 struct btrace_thread_info *btinfo;
1176 struct btrace_call_iterator begin, end;
1177 struct ui_out *uiout;
1178 unsigned int low, high;
1179 int found;
1180
1181 uiout = current_uiout;
1182 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1183 low = from;
1184 high = to;
1185
1186 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1187
1188 /* Check for wrap-arounds. */
1189 if (low != from || high != to)
1190 error (_("Bad range."));
1191
1192 if (high < low)
1193 error (_("Bad range."));
1194
1195 btinfo = require_btrace ();
1196
1197 found = btrace_find_call_by_number (&begin, btinfo, low);
1198 if (found == 0)
1199 error (_("Range out of bounds."));
1200
1201 found = btrace_find_call_by_number (&end, btinfo, high);
1202 if (found == 0)
1203 {
1204 /* Silently truncate the range. */
1205 btrace_call_end (&end, btinfo);
1206 }
1207 else
1208 {
1209 /* We want both begin and end to be inclusive. */
1210 btrace_call_next (&end, 1);
1211 }
1212
1213 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1214 btrace_set_call_history (btinfo, &begin, &end);
1215 }
1216
1217 /* The to_call_history_from method of target record-btrace. */
1218
1219 static void
1220 record_btrace_call_history_from (struct target_ops *self,
1221 ULONGEST from, int size,
1222 record_print_flags flags)
1223 {
1224 ULONGEST begin, end, context;
1225
1226 context = abs (size);
1227 if (context == 0)
1228 error (_("Bad record function-call-history-size."));
1229
1230 if (size < 0)
1231 {
1232 end = from;
1233
1234 if (from < context)
1235 begin = 0;
1236 else
1237 begin = from - context + 1;
1238 }
1239 else
1240 {
1241 begin = from;
1242 end = from + context - 1;
1243
1244 /* Check for wrap-around. */
1245 if (end < begin)
1246 end = ULONGEST_MAX;
1247 }
1248
1249 record_btrace_call_history_range (self, begin, end, flags);
1250 }
1251
1252 /* The to_record_method method of target record-btrace. */
1253
1254 static enum record_method
1255 record_btrace_record_method (struct target_ops *self, ptid_t ptid)
1256 {
1257 struct thread_info * const tp = find_thread_ptid (ptid);
1258
1259 if (tp == NULL)
1260 error (_("No thread."));
1261
1262 if (tp->btrace.target == NULL)
1263 return RECORD_METHOD_NONE;
1264
1265 return RECORD_METHOD_BTRACE;
1266 }
1267
1268 /* The to_record_is_replaying method of target record-btrace. */
1269
1270 static int
1271 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1272 {
1273 struct thread_info *tp;
1274
1275 ALL_NON_EXITED_THREADS (tp)
1276 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1277 return 1;
1278
1279 return 0;
1280 }
1281
1282 /* The to_record_will_replay method of target record-btrace. */
1283
1284 static int
1285 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1286 {
1287 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1288 }
1289
1290 /* The to_xfer_partial method of target record-btrace. */
1291
1292 static enum target_xfer_status
1293 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1294 const char *annex, gdb_byte *readbuf,
1295 const gdb_byte *writebuf, ULONGEST offset,
1296 ULONGEST len, ULONGEST *xfered_len)
1297 {
1298 /* Filter out requests that don't make sense during replay. */
1299 if (replay_memory_access == replay_memory_access_read_only
1300 && !record_btrace_generating_corefile
1301 && record_btrace_is_replaying (ops, inferior_ptid))
1302 {
1303 switch (object)
1304 {
1305 case TARGET_OBJECT_MEMORY:
1306 {
1307 struct target_section *section;
1308
1309 /* We do not allow writing memory in general. */
1310 if (writebuf != NULL)
1311 {
1312 *xfered_len = len;
1313 return TARGET_XFER_UNAVAILABLE;
1314 }
1315
1316 /* We allow reading readonly memory. */
1317 section = target_section_by_addr (ops, offset);
1318 if (section != NULL)
1319 {
1320 /* Check if the section we found is readonly. */
1321 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1322 section->the_bfd_section)
1323 & SEC_READONLY) != 0)
1324 {
1325 /* Truncate the request to fit into this section. */
1326 len = std::min (len, section->endaddr - offset);
1327 break;
1328 }
1329 }
1330
1331 *xfered_len = len;
1332 return TARGET_XFER_UNAVAILABLE;
1333 }
1334 }
1335 }
1336
1337 /* Forward the request. */
1338 ops = ops->beneath;
1339 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1340 offset, len, xfered_len);
1341 }
1342
1343 /* The to_insert_breakpoint method of target record-btrace. */
1344
1345 static int
1346 record_btrace_insert_breakpoint (struct target_ops *ops,
1347 struct gdbarch *gdbarch,
1348 struct bp_target_info *bp_tgt)
1349 {
1350 const char *old;
1351 int ret;
1352
1353 /* Inserting breakpoints requires accessing memory. Allow it for the
1354 duration of this function. */
1355 old = replay_memory_access;
1356 replay_memory_access = replay_memory_access_read_write;
1357
1358 ret = 0;
1359 TRY
1360 {
1361 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1362 }
1363 CATCH (except, RETURN_MASK_ALL)
1364 {
1365 replay_memory_access = old;
1366 throw_exception (except);
1367 }
1368 END_CATCH
1369 replay_memory_access = old;
1370
1371 return ret;
1372 }
1373
1374 /* The to_remove_breakpoint method of target record-btrace. */
1375
1376 static int
1377 record_btrace_remove_breakpoint (struct target_ops *ops,
1378 struct gdbarch *gdbarch,
1379 struct bp_target_info *bp_tgt,
1380 enum remove_bp_reason reason)
1381 {
1382 const char *old;
1383 int ret;
1384
1385 /* Removing breakpoints requires accessing memory. Allow it for the
1386 duration of this function. */
1387 old = replay_memory_access;
1388 replay_memory_access = replay_memory_access_read_write;
1389
1390 ret = 0;
1391 TRY
1392 {
1393 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1394 reason);
1395 }
1396 CATCH (except, RETURN_MASK_ALL)
1397 {
1398 replay_memory_access = old;
1399 throw_exception (except);
1400 }
1401 END_CATCH
1402 replay_memory_access = old;
1403
1404 return ret;
1405 }
1406
1407 /* The to_fetch_registers method of target record-btrace. */
1408
1409 static void
1410 record_btrace_fetch_registers (struct target_ops *ops,
1411 struct regcache *regcache, int regno)
1412 {
1413 struct btrace_insn_iterator *replay;
1414 struct thread_info *tp;
1415
1416 tp = find_thread_ptid (regcache_get_ptid (regcache));
1417 gdb_assert (tp != NULL);
1418
1419 replay = tp->btrace.replay;
1420 if (replay != NULL && !record_btrace_generating_corefile)
1421 {
1422 const struct btrace_insn *insn;
1423 struct gdbarch *gdbarch;
1424 int pcreg;
1425
1426 gdbarch = regcache->arch ();
1427 pcreg = gdbarch_pc_regnum (gdbarch);
1428 if (pcreg < 0)
1429 return;
1430
1431 /* We can only provide the PC register. */
1432 if (regno >= 0 && regno != pcreg)
1433 return;
1434
1435 insn = btrace_insn_get (replay);
1436 gdb_assert (insn != NULL);
1437
1438 regcache_raw_supply (regcache, regno, &insn->pc);
1439 }
1440 else
1441 {
1442 struct target_ops *t = ops->beneath;
1443
1444 t->to_fetch_registers (t, regcache, regno);
1445 }
1446 }
1447
1448 /* The to_store_registers method of target record-btrace. */
1449
1450 static void
1451 record_btrace_store_registers (struct target_ops *ops,
1452 struct regcache *regcache, int regno)
1453 {
1454 struct target_ops *t;
1455
1456 if (!record_btrace_generating_corefile
1457 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1458 error (_("Cannot write registers while replaying."));
1459
1460 gdb_assert (may_write_registers != 0);
1461
1462 t = ops->beneath;
1463 t->to_store_registers (t, regcache, regno);
1464 }
1465
1466 /* The to_prepare_to_store method of target record-btrace. */
1467
1468 static void
1469 record_btrace_prepare_to_store (struct target_ops *ops,
1470 struct regcache *regcache)
1471 {
1472 struct target_ops *t;
1473
1474 if (!record_btrace_generating_corefile
1475 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1476 return;
1477
1478 t = ops->beneath;
1479 t->to_prepare_to_store (t, regcache);
1480 }
1481
1482 /* The branch trace frame cache. */
1483
1484 struct btrace_frame_cache
1485 {
1486 /* The thread. */
1487 struct thread_info *tp;
1488
1489 /* The frame info. */
1490 struct frame_info *frame;
1491
1492 /* The branch trace function segment. */
1493 const struct btrace_function *bfun;
1494 };
1495
1496 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1497
1498 static htab_t bfcache;
1499
1500 /* hash_f for htab_create_alloc of bfcache. */
1501
1502 static hashval_t
1503 bfcache_hash (const void *arg)
1504 {
1505 const struct btrace_frame_cache *cache
1506 = (const struct btrace_frame_cache *) arg;
1507
1508 return htab_hash_pointer (cache->frame);
1509 }
1510
1511 /* eq_f for htab_create_alloc of bfcache. */
1512
1513 static int
1514 bfcache_eq (const void *arg1, const void *arg2)
1515 {
1516 const struct btrace_frame_cache *cache1
1517 = (const struct btrace_frame_cache *) arg1;
1518 const struct btrace_frame_cache *cache2
1519 = (const struct btrace_frame_cache *) arg2;
1520
1521 return cache1->frame == cache2->frame;
1522 }
1523
1524 /* Create a new btrace frame cache. */
1525
1526 static struct btrace_frame_cache *
1527 bfcache_new (struct frame_info *frame)
1528 {
1529 struct btrace_frame_cache *cache;
1530 void **slot;
1531
1532 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1533 cache->frame = frame;
1534
1535 slot = htab_find_slot (bfcache, cache, INSERT);
1536 gdb_assert (*slot == NULL);
1537 *slot = cache;
1538
1539 return cache;
1540 }
1541
1542 /* Extract the branch trace function from a branch trace frame. */
1543
1544 static const struct btrace_function *
1545 btrace_get_frame_function (struct frame_info *frame)
1546 {
1547 const struct btrace_frame_cache *cache;
1548 struct btrace_frame_cache pattern;
1549 void **slot;
1550
1551 pattern.frame = frame;
1552
1553 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1554 if (slot == NULL)
1555 return NULL;
1556
1557 cache = (const struct btrace_frame_cache *) *slot;
1558 return cache->bfun;
1559 }
1560
1561 /* Implement stop_reason method for record_btrace_frame_unwind. */
1562
1563 static enum unwind_stop_reason
1564 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1565 void **this_cache)
1566 {
1567 const struct btrace_frame_cache *cache;
1568 const struct btrace_function *bfun;
1569
1570 cache = (const struct btrace_frame_cache *) *this_cache;
1571 bfun = cache->bfun;
1572 gdb_assert (bfun != NULL);
1573
1574 if (bfun->up == 0)
1575 return UNWIND_UNAVAILABLE;
1576
1577 return UNWIND_NO_REASON;
1578 }
1579
1580 /* Implement this_id method for record_btrace_frame_unwind. */
1581
1582 static void
1583 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1584 struct frame_id *this_id)
1585 {
1586 const struct btrace_frame_cache *cache;
1587 const struct btrace_function *bfun;
1588 struct btrace_call_iterator it;
1589 CORE_ADDR code, special;
1590
1591 cache = (const struct btrace_frame_cache *) *this_cache;
1592
1593 bfun = cache->bfun;
1594 gdb_assert (bfun != NULL);
1595
1596 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1597 bfun = btrace_call_get (&it);
1598
1599 code = get_frame_func (this_frame);
1600 special = bfun->number;
1601
1602 *this_id = frame_id_build_unavailable_stack_special (code, special);
1603
1604 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1605 btrace_get_bfun_name (cache->bfun),
1606 core_addr_to_string_nz (this_id->code_addr),
1607 core_addr_to_string_nz (this_id->special_addr));
1608 }
1609
1610 /* Implement prev_register method for record_btrace_frame_unwind. */
1611
1612 static struct value *
1613 record_btrace_frame_prev_register (struct frame_info *this_frame,
1614 void **this_cache,
1615 int regnum)
1616 {
1617 const struct btrace_frame_cache *cache;
1618 const struct btrace_function *bfun, *caller;
1619 struct btrace_call_iterator it;
1620 struct gdbarch *gdbarch;
1621 CORE_ADDR pc;
1622 int pcreg;
1623
1624 gdbarch = get_frame_arch (this_frame);
1625 pcreg = gdbarch_pc_regnum (gdbarch);
1626 if (pcreg < 0 || regnum != pcreg)
1627 throw_error (NOT_AVAILABLE_ERROR,
1628 _("Registers are not available in btrace record history"));
1629
1630 cache = (const struct btrace_frame_cache *) *this_cache;
1631 bfun = cache->bfun;
1632 gdb_assert (bfun != NULL);
1633
1634 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1635 throw_error (NOT_AVAILABLE_ERROR,
1636 _("No caller in btrace record history"));
1637
1638 caller = btrace_call_get (&it);
1639
1640 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1641 pc = caller->insn.front ().pc;
1642 else
1643 {
1644 pc = caller->insn.back ().pc;
1645 pc += gdb_insn_length (gdbarch, pc);
1646 }
1647
1648 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1649 btrace_get_bfun_name (bfun), bfun->level,
1650 core_addr_to_string_nz (pc));
1651
1652 return frame_unwind_got_address (this_frame, regnum, pc);
1653 }
1654
1655 /* Implement sniffer method for record_btrace_frame_unwind. */
1656
1657 static int
1658 record_btrace_frame_sniffer (const struct frame_unwind *self,
1659 struct frame_info *this_frame,
1660 void **this_cache)
1661 {
1662 const struct btrace_function *bfun;
1663 struct btrace_frame_cache *cache;
1664 struct thread_info *tp;
1665 struct frame_info *next;
1666
1667 /* THIS_FRAME does not contain a reference to its thread. */
1668 tp = find_thread_ptid (inferior_ptid);
1669 gdb_assert (tp != NULL);
1670
1671 bfun = NULL;
1672 next = get_next_frame (this_frame);
1673 if (next == NULL)
1674 {
1675 const struct btrace_insn_iterator *replay;
1676
1677 replay = tp->btrace.replay;
1678 if (replay != NULL)
1679 bfun = &replay->btinfo->functions[replay->call_index];
1680 }
1681 else
1682 {
1683 const struct btrace_function *callee;
1684 struct btrace_call_iterator it;
1685
1686 callee = btrace_get_frame_function (next);
1687 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1688 return 0;
1689
1690 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1691 return 0;
1692
1693 bfun = btrace_call_get (&it);
1694 }
1695
1696 if (bfun == NULL)
1697 return 0;
1698
1699 DEBUG ("[frame] sniffed frame for %s on level %d",
1700 btrace_get_bfun_name (bfun), bfun->level);
1701
1702 /* This is our frame. Initialize the frame cache. */
1703 cache = bfcache_new (this_frame);
1704 cache->tp = tp;
1705 cache->bfun = bfun;
1706
1707 *this_cache = cache;
1708 return 1;
1709 }
1710
1711 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1712
1713 static int
1714 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1715 struct frame_info *this_frame,
1716 void **this_cache)
1717 {
1718 const struct btrace_function *bfun, *callee;
1719 struct btrace_frame_cache *cache;
1720 struct btrace_call_iterator it;
1721 struct frame_info *next;
1722 struct thread_info *tinfo;
1723
1724 next = get_next_frame (this_frame);
1725 if (next == NULL)
1726 return 0;
1727
1728 callee = btrace_get_frame_function (next);
1729 if (callee == NULL)
1730 return 0;
1731
1732 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1733 return 0;
1734
1735 tinfo = find_thread_ptid (inferior_ptid);
1736 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1737 return 0;
1738
1739 bfun = btrace_call_get (&it);
1740
1741 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1742 btrace_get_bfun_name (bfun), bfun->level);
1743
1744 /* This is our frame. Initialize the frame cache. */
1745 cache = bfcache_new (this_frame);
1746 cache->tp = tinfo;
1747 cache->bfun = bfun;
1748
1749 *this_cache = cache;
1750 return 1;
1751 }
1752
1753 static void
1754 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1755 {
1756 struct btrace_frame_cache *cache;
1757 void **slot;
1758
1759 cache = (struct btrace_frame_cache *) this_cache;
1760
1761 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1762 gdb_assert (slot != NULL);
1763
1764 htab_remove_elt (bfcache, cache);
1765 }
1766
1767 /* btrace recording does not store previous memory content, neither the stack
1768 frames content. Any unwinding would return errorneous results as the stack
1769 contents no longer matches the changed PC value restored from history.
1770 Therefore this unwinder reports any possibly unwound registers as
1771 <unavailable>. */
1772
1773 const struct frame_unwind record_btrace_frame_unwind =
1774 {
1775 NORMAL_FRAME,
1776 record_btrace_frame_unwind_stop_reason,
1777 record_btrace_frame_this_id,
1778 record_btrace_frame_prev_register,
1779 NULL,
1780 record_btrace_frame_sniffer,
1781 record_btrace_frame_dealloc_cache
1782 };
1783
1784 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1785 {
1786 TAILCALL_FRAME,
1787 record_btrace_frame_unwind_stop_reason,
1788 record_btrace_frame_this_id,
1789 record_btrace_frame_prev_register,
1790 NULL,
1791 record_btrace_tailcall_frame_sniffer,
1792 record_btrace_frame_dealloc_cache
1793 };
1794
1795 /* Implement the to_get_unwinder method. */
1796
1797 static const struct frame_unwind *
1798 record_btrace_to_get_unwinder (struct target_ops *self)
1799 {
1800 return &record_btrace_frame_unwind;
1801 }
1802
1803 /* Implement the to_get_tailcall_unwinder method. */
1804
1805 static const struct frame_unwind *
1806 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1807 {
1808 return &record_btrace_tailcall_frame_unwind;
1809 }
1810
1811 /* Return a human-readable string for FLAG. */
1812
1813 static const char *
1814 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1815 {
1816 switch (flag)
1817 {
1818 case BTHR_STEP:
1819 return "step";
1820
1821 case BTHR_RSTEP:
1822 return "reverse-step";
1823
1824 case BTHR_CONT:
1825 return "cont";
1826
1827 case BTHR_RCONT:
1828 return "reverse-cont";
1829
1830 case BTHR_STOP:
1831 return "stop";
1832 }
1833
1834 return "<invalid>";
1835 }
1836
1837 /* Indicate that TP should be resumed according to FLAG. */
1838
1839 static void
1840 record_btrace_resume_thread (struct thread_info *tp,
1841 enum btrace_thread_flag flag)
1842 {
1843 struct btrace_thread_info *btinfo;
1844
1845 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1846 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1847
1848 btinfo = &tp->btrace;
1849
1850 /* Fetch the latest branch trace. */
1851 btrace_fetch (tp);
1852
1853 /* A resume request overwrites a preceding resume or stop request. */
1854 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1855 btinfo->flags |= flag;
1856 }
1857
1858 /* Get the current frame for TP. */
1859
1860 static struct frame_info *
1861 get_thread_current_frame (struct thread_info *tp)
1862 {
1863 struct frame_info *frame;
1864 ptid_t old_inferior_ptid;
1865 int executing;
1866
1867 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1868 old_inferior_ptid = inferior_ptid;
1869 inferior_ptid = tp->ptid;
1870
1871 /* Clear the executing flag to allow changes to the current frame.
1872 We are not actually running, yet. We just started a reverse execution
1873 command or a record goto command.
1874 For the latter, EXECUTING is false and this has no effect.
1875 For the former, EXECUTING is true and we're in to_wait, about to
1876 move the thread. Since we need to recompute the stack, we temporarily
1877 set EXECUTING to flase. */
1878 executing = is_executing (inferior_ptid);
1879 set_executing (inferior_ptid, 0);
1880
1881 frame = NULL;
1882 TRY
1883 {
1884 frame = get_current_frame ();
1885 }
1886 CATCH (except, RETURN_MASK_ALL)
1887 {
1888 /* Restore the previous execution state. */
1889 set_executing (inferior_ptid, executing);
1890
1891 /* Restore the previous inferior_ptid. */
1892 inferior_ptid = old_inferior_ptid;
1893
1894 throw_exception (except);
1895 }
1896 END_CATCH
1897
1898 /* Restore the previous execution state. */
1899 set_executing (inferior_ptid, executing);
1900
1901 /* Restore the previous inferior_ptid. */
1902 inferior_ptid = old_inferior_ptid;
1903
1904 return frame;
1905 }
1906
1907 /* Start replaying a thread. */
1908
1909 static struct btrace_insn_iterator *
1910 record_btrace_start_replaying (struct thread_info *tp)
1911 {
1912 struct btrace_insn_iterator *replay;
1913 struct btrace_thread_info *btinfo;
1914
1915 btinfo = &tp->btrace;
1916 replay = NULL;
1917
1918 /* We can't start replaying without trace. */
1919 if (btinfo->functions.empty ())
1920 return NULL;
1921
1922 /* GDB stores the current frame_id when stepping in order to detects steps
1923 into subroutines.
1924 Since frames are computed differently when we're replaying, we need to
1925 recompute those stored frames and fix them up so we can still detect
1926 subroutines after we started replaying. */
1927 TRY
1928 {
1929 struct frame_info *frame;
1930 struct frame_id frame_id;
1931 int upd_step_frame_id, upd_step_stack_frame_id;
1932
1933 /* The current frame without replaying - computed via normal unwind. */
1934 frame = get_thread_current_frame (tp);
1935 frame_id = get_frame_id (frame);
1936
1937 /* Check if we need to update any stepping-related frame id's. */
1938 upd_step_frame_id = frame_id_eq (frame_id,
1939 tp->control.step_frame_id);
1940 upd_step_stack_frame_id = frame_id_eq (frame_id,
1941 tp->control.step_stack_frame_id);
1942
1943 /* We start replaying at the end of the branch trace. This corresponds
1944 to the current instruction. */
1945 replay = XNEW (struct btrace_insn_iterator);
1946 btrace_insn_end (replay, btinfo);
1947
1948 /* Skip gaps at the end of the trace. */
1949 while (btrace_insn_get (replay) == NULL)
1950 {
1951 unsigned int steps;
1952
1953 steps = btrace_insn_prev (replay, 1);
1954 if (steps == 0)
1955 error (_("No trace."));
1956 }
1957
1958 /* We're not replaying, yet. */
1959 gdb_assert (btinfo->replay == NULL);
1960 btinfo->replay = replay;
1961
1962 /* Make sure we're not using any stale registers. */
1963 registers_changed_ptid (tp->ptid);
1964
1965 /* The current frame with replaying - computed via btrace unwind. */
1966 frame = get_thread_current_frame (tp);
1967 frame_id = get_frame_id (frame);
1968
1969 /* Replace stepping related frames where necessary. */
1970 if (upd_step_frame_id)
1971 tp->control.step_frame_id = frame_id;
1972 if (upd_step_stack_frame_id)
1973 tp->control.step_stack_frame_id = frame_id;
1974 }
1975 CATCH (except, RETURN_MASK_ALL)
1976 {
1977 xfree (btinfo->replay);
1978 btinfo->replay = NULL;
1979
1980 registers_changed_ptid (tp->ptid);
1981
1982 throw_exception (except);
1983 }
1984 END_CATCH
1985
1986 return replay;
1987 }
1988
1989 /* Stop replaying a thread. */
1990
1991 static void
1992 record_btrace_stop_replaying (struct thread_info *tp)
1993 {
1994 struct btrace_thread_info *btinfo;
1995
1996 btinfo = &tp->btrace;
1997
1998 xfree (btinfo->replay);
1999 btinfo->replay = NULL;
2000
2001 /* Make sure we're not leaving any stale registers. */
2002 registers_changed_ptid (tp->ptid);
2003 }
2004
2005 /* Stop replaying TP if it is at the end of its execution history. */
2006
2007 static void
2008 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2009 {
2010 struct btrace_insn_iterator *replay, end;
2011 struct btrace_thread_info *btinfo;
2012
2013 btinfo = &tp->btrace;
2014 replay = btinfo->replay;
2015
2016 if (replay == NULL)
2017 return;
2018
2019 btrace_insn_end (&end, btinfo);
2020
2021 if (btrace_insn_cmp (replay, &end) == 0)
2022 record_btrace_stop_replaying (tp);
2023 }
2024
2025 /* The to_resume method of target record-btrace. */
2026
2027 static void
2028 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2029 enum gdb_signal signal)
2030 {
2031 struct thread_info *tp;
2032 enum btrace_thread_flag flag, cflag;
2033
2034 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2035 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2036 step ? "step" : "cont");
2037
2038 /* Store the execution direction of the last resume.
2039
2040 If there is more than one to_resume call, we have to rely on infrun
2041 to not change the execution direction in-between. */
2042 record_btrace_resume_exec_dir = execution_direction;
2043
2044 /* As long as we're not replaying, just forward the request.
2045
2046 For non-stop targets this means that no thread is replaying. In order to
2047 make progress, we may need to explicitly move replaying threads to the end
2048 of their execution history. */
2049 if ((execution_direction != EXEC_REVERSE)
2050 && !record_btrace_is_replaying (ops, minus_one_ptid))
2051 {
2052 ops = ops->beneath;
2053 ops->to_resume (ops, ptid, step, signal);
2054 return;
2055 }
2056
2057 /* Compute the btrace thread flag for the requested move. */
2058 if (execution_direction == EXEC_REVERSE)
2059 {
2060 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2061 cflag = BTHR_RCONT;
2062 }
2063 else
2064 {
2065 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2066 cflag = BTHR_CONT;
2067 }
2068
2069 /* We just indicate the resume intent here. The actual stepping happens in
2070 record_btrace_wait below.
2071
2072 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2073 if (!target_is_non_stop_p ())
2074 {
2075 gdb_assert (ptid_match (inferior_ptid, ptid));
2076
2077 ALL_NON_EXITED_THREADS (tp)
2078 if (ptid_match (tp->ptid, ptid))
2079 {
2080 if (ptid_match (tp->ptid, inferior_ptid))
2081 record_btrace_resume_thread (tp, flag);
2082 else
2083 record_btrace_resume_thread (tp, cflag);
2084 }
2085 }
2086 else
2087 {
2088 ALL_NON_EXITED_THREADS (tp)
2089 if (ptid_match (tp->ptid, ptid))
2090 record_btrace_resume_thread (tp, flag);
2091 }
2092
2093 /* Async support. */
2094 if (target_can_async_p ())
2095 {
2096 target_async (1);
2097 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2098 }
2099 }
2100
2101 /* The to_commit_resume method of target record-btrace. */
2102
2103 static void
2104 record_btrace_commit_resume (struct target_ops *ops)
2105 {
2106 if ((execution_direction != EXEC_REVERSE)
2107 && !record_btrace_is_replaying (ops, minus_one_ptid))
2108 ops->beneath->to_commit_resume (ops->beneath);
2109 }
2110
2111 /* Cancel resuming TP. */
2112
2113 static void
2114 record_btrace_cancel_resume (struct thread_info *tp)
2115 {
2116 enum btrace_thread_flag flags;
2117
2118 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2119 if (flags == 0)
2120 return;
2121
2122 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2123 print_thread_id (tp),
2124 target_pid_to_str (tp->ptid), flags,
2125 btrace_thread_flag_to_str (flags));
2126
2127 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2128 record_btrace_stop_replaying_at_end (tp);
2129 }
2130
2131 /* Return a target_waitstatus indicating that we ran out of history. */
2132
2133 static struct target_waitstatus
2134 btrace_step_no_history (void)
2135 {
2136 struct target_waitstatus status;
2137
2138 status.kind = TARGET_WAITKIND_NO_HISTORY;
2139
2140 return status;
2141 }
2142
2143 /* Return a target_waitstatus indicating that a step finished. */
2144
2145 static struct target_waitstatus
2146 btrace_step_stopped (void)
2147 {
2148 struct target_waitstatus status;
2149
2150 status.kind = TARGET_WAITKIND_STOPPED;
2151 status.value.sig = GDB_SIGNAL_TRAP;
2152
2153 return status;
2154 }
2155
2156 /* Return a target_waitstatus indicating that a thread was stopped as
2157 requested. */
2158
2159 static struct target_waitstatus
2160 btrace_step_stopped_on_request (void)
2161 {
2162 struct target_waitstatus status;
2163
2164 status.kind = TARGET_WAITKIND_STOPPED;
2165 status.value.sig = GDB_SIGNAL_0;
2166
2167 return status;
2168 }
2169
2170 /* Return a target_waitstatus indicating a spurious stop. */
2171
2172 static struct target_waitstatus
2173 btrace_step_spurious (void)
2174 {
2175 struct target_waitstatus status;
2176
2177 status.kind = TARGET_WAITKIND_SPURIOUS;
2178
2179 return status;
2180 }
2181
2182 /* Return a target_waitstatus indicating that the thread was not resumed. */
2183
2184 static struct target_waitstatus
2185 btrace_step_no_resumed (void)
2186 {
2187 struct target_waitstatus status;
2188
2189 status.kind = TARGET_WAITKIND_NO_RESUMED;
2190
2191 return status;
2192 }
2193
2194 /* Return a target_waitstatus indicating that we should wait again. */
2195
2196 static struct target_waitstatus
2197 btrace_step_again (void)
2198 {
2199 struct target_waitstatus status;
2200
2201 status.kind = TARGET_WAITKIND_IGNORE;
2202
2203 return status;
2204 }
2205
2206 /* Clear the record histories. */
2207
2208 static void
2209 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2210 {
2211 xfree (btinfo->insn_history);
2212 xfree (btinfo->call_history);
2213
2214 btinfo->insn_history = NULL;
2215 btinfo->call_history = NULL;
2216 }
2217
2218 /* Check whether TP's current replay position is at a breakpoint. */
2219
2220 static int
2221 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2222 {
2223 struct btrace_insn_iterator *replay;
2224 struct btrace_thread_info *btinfo;
2225 const struct btrace_insn *insn;
2226 struct inferior *inf;
2227
2228 btinfo = &tp->btrace;
2229 replay = btinfo->replay;
2230
2231 if (replay == NULL)
2232 return 0;
2233
2234 insn = btrace_insn_get (replay);
2235 if (insn == NULL)
2236 return 0;
2237
2238 inf = find_inferior_ptid (tp->ptid);
2239 if (inf == NULL)
2240 return 0;
2241
2242 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2243 &btinfo->stop_reason);
2244 }
2245
2246 /* Step one instruction in forward direction. */
2247
2248 static struct target_waitstatus
2249 record_btrace_single_step_forward (struct thread_info *tp)
2250 {
2251 struct btrace_insn_iterator *replay, end, start;
2252 struct btrace_thread_info *btinfo;
2253
2254 btinfo = &tp->btrace;
2255 replay = btinfo->replay;
2256
2257 /* We're done if we're not replaying. */
2258 if (replay == NULL)
2259 return btrace_step_no_history ();
2260
2261 /* Check if we're stepping a breakpoint. */
2262 if (record_btrace_replay_at_breakpoint (tp))
2263 return btrace_step_stopped ();
2264
2265 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2266 jump back to the instruction at which we started. */
2267 start = *replay;
2268 do
2269 {
2270 unsigned int steps;
2271
2272 /* We will bail out here if we continue stepping after reaching the end
2273 of the execution history. */
2274 steps = btrace_insn_next (replay, 1);
2275 if (steps == 0)
2276 {
2277 *replay = start;
2278 return btrace_step_no_history ();
2279 }
2280 }
2281 while (btrace_insn_get (replay) == NULL);
2282
2283 /* Determine the end of the instruction trace. */
2284 btrace_insn_end (&end, btinfo);
2285
2286 /* The execution trace contains (and ends with) the current instruction.
2287 This instruction has not been executed, yet, so the trace really ends
2288 one instruction earlier. */
2289 if (btrace_insn_cmp (replay, &end) == 0)
2290 return btrace_step_no_history ();
2291
2292 return btrace_step_spurious ();
2293 }
2294
2295 /* Step one instruction in backward direction. */
2296
2297 static struct target_waitstatus
2298 record_btrace_single_step_backward (struct thread_info *tp)
2299 {
2300 struct btrace_insn_iterator *replay, start;
2301 struct btrace_thread_info *btinfo;
2302
2303 btinfo = &tp->btrace;
2304 replay = btinfo->replay;
2305
2306 /* Start replaying if we're not already doing so. */
2307 if (replay == NULL)
2308 replay = record_btrace_start_replaying (tp);
2309
2310 /* If we can't step any further, we reached the end of the history.
2311 Skip gaps during replay. If we end up at a gap (at the beginning of
2312 the trace), jump back to the instruction at which we started. */
2313 start = *replay;
2314 do
2315 {
2316 unsigned int steps;
2317
2318 steps = btrace_insn_prev (replay, 1);
2319 if (steps == 0)
2320 {
2321 *replay = start;
2322 return btrace_step_no_history ();
2323 }
2324 }
2325 while (btrace_insn_get (replay) == NULL);
2326
2327 /* Check if we're stepping a breakpoint.
2328
2329 For reverse-stepping, this check is after the step. There is logic in
2330 infrun.c that handles reverse-stepping separately. See, for example,
2331 proceed and adjust_pc_after_break.
2332
2333 This code assumes that for reverse-stepping, PC points to the last
2334 de-executed instruction, whereas for forward-stepping PC points to the
2335 next to-be-executed instruction. */
2336 if (record_btrace_replay_at_breakpoint (tp))
2337 return btrace_step_stopped ();
2338
2339 return btrace_step_spurious ();
2340 }
2341
2342 /* Step a single thread. */
2343
2344 static struct target_waitstatus
2345 record_btrace_step_thread (struct thread_info *tp)
2346 {
2347 struct btrace_thread_info *btinfo;
2348 struct target_waitstatus status;
2349 enum btrace_thread_flag flags;
2350
2351 btinfo = &tp->btrace;
2352
2353 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2354 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2355
2356 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2357 target_pid_to_str (tp->ptid), flags,
2358 btrace_thread_flag_to_str (flags));
2359
2360 /* We can't step without an execution history. */
2361 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2362 return btrace_step_no_history ();
2363
2364 switch (flags)
2365 {
2366 default:
2367 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2368
2369 case BTHR_STOP:
2370 return btrace_step_stopped_on_request ();
2371
2372 case BTHR_STEP:
2373 status = record_btrace_single_step_forward (tp);
2374 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2375 break;
2376
2377 return btrace_step_stopped ();
2378
2379 case BTHR_RSTEP:
2380 status = record_btrace_single_step_backward (tp);
2381 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2382 break;
2383
2384 return btrace_step_stopped ();
2385
2386 case BTHR_CONT:
2387 status = record_btrace_single_step_forward (tp);
2388 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2389 break;
2390
2391 btinfo->flags |= flags;
2392 return btrace_step_again ();
2393
2394 case BTHR_RCONT:
2395 status = record_btrace_single_step_backward (tp);
2396 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2397 break;
2398
2399 btinfo->flags |= flags;
2400 return btrace_step_again ();
2401 }
2402
2403 /* We keep threads moving at the end of their execution history. The to_wait
2404 method will stop the thread for whom the event is reported. */
2405 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2406 btinfo->flags |= flags;
2407
2408 return status;
2409 }
2410
2411 /* A vector of threads. */
2412
2413 typedef struct thread_info * tp_t;
2414 DEF_VEC_P (tp_t);
2415
2416 /* Announce further events if necessary. */
2417
2418 static void
2419 record_btrace_maybe_mark_async_event
2420 (const std::vector<thread_info *> &moving,
2421 const std::vector<thread_info *> &no_history)
2422 {
2423 bool more_moving = !moving.empty ();
2424 bool more_no_history = !no_history.empty ();;
2425
2426 if (!more_moving && !more_no_history)
2427 return;
2428
2429 if (more_moving)
2430 DEBUG ("movers pending");
2431
2432 if (more_no_history)
2433 DEBUG ("no-history pending");
2434
2435 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2436 }
2437
2438 /* The to_wait method of target record-btrace. */
2439
2440 static ptid_t
2441 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2442 struct target_waitstatus *status, int options)
2443 {
2444 std::vector<thread_info *> moving;
2445 std::vector<thread_info *> no_history;
2446
2447 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2448
2449 /* As long as we're not replaying, just forward the request. */
2450 if ((execution_direction != EXEC_REVERSE)
2451 && !record_btrace_is_replaying (ops, minus_one_ptid))
2452 {
2453 ops = ops->beneath;
2454 return ops->to_wait (ops, ptid, status, options);
2455 }
2456
2457 /* Keep a work list of moving threads. */
2458 {
2459 thread_info *tp;
2460
2461 ALL_NON_EXITED_THREADS (tp)
2462 {
2463 if (ptid_match (tp->ptid, ptid)
2464 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2465 moving.push_back (tp);
2466 }
2467 }
2468
2469 if (moving.empty ())
2470 {
2471 *status = btrace_step_no_resumed ();
2472
2473 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2474 target_waitstatus_to_string (status).c_str ());
2475
2476 return null_ptid;
2477 }
2478
2479 /* Step moving threads one by one, one step each, until either one thread
2480 reports an event or we run out of threads to step.
2481
2482 When stepping more than one thread, chances are that some threads reach
2483 the end of their execution history earlier than others. If we reported
2484 this immediately, all-stop on top of non-stop would stop all threads and
2485 resume the same threads next time. And we would report the same thread
2486 having reached the end of its execution history again.
2487
2488 In the worst case, this would starve the other threads. But even if other
2489 threads would be allowed to make progress, this would result in far too
2490 many intermediate stops.
2491
2492 We therefore delay the reporting of "no execution history" until we have
2493 nothing else to report. By this time, all threads should have moved to
2494 either the beginning or the end of their execution history. There will
2495 be a single user-visible stop. */
2496 struct thread_info *eventing = NULL;
2497 while ((eventing == NULL) && !moving.empty ())
2498 {
2499 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
2500 {
2501 thread_info *tp = moving[ix];
2502
2503 *status = record_btrace_step_thread (tp);
2504
2505 switch (status->kind)
2506 {
2507 case TARGET_WAITKIND_IGNORE:
2508 ix++;
2509 break;
2510
2511 case TARGET_WAITKIND_NO_HISTORY:
2512 no_history.push_back (ordered_remove (moving, ix));
2513 break;
2514
2515 default:
2516 eventing = unordered_remove (moving, ix);
2517 break;
2518 }
2519 }
2520 }
2521
2522 if (eventing == NULL)
2523 {
2524 /* We started with at least one moving thread. This thread must have
2525 either stopped or reached the end of its execution history.
2526
2527 In the former case, EVENTING must not be NULL.
2528 In the latter case, NO_HISTORY must not be empty. */
2529 gdb_assert (!no_history.empty ());
2530
2531 /* We kept threads moving at the end of their execution history. Stop
2532 EVENTING now that we are going to report its stop. */
2533 eventing = unordered_remove (no_history, 0);
2534 eventing->btrace.flags &= ~BTHR_MOVE;
2535
2536 *status = btrace_step_no_history ();
2537 }
2538
2539 gdb_assert (eventing != NULL);
2540
2541 /* We kept threads replaying at the end of their execution history. Stop
2542 replaying EVENTING now that we are going to report its stop. */
2543 record_btrace_stop_replaying_at_end (eventing);
2544
2545 /* Stop all other threads. */
2546 if (!target_is_non_stop_p ())
2547 {
2548 thread_info *tp;
2549
2550 ALL_NON_EXITED_THREADS (tp)
2551 record_btrace_cancel_resume (tp);
2552 }
2553
2554 /* In async mode, we need to announce further events. */
2555 if (target_is_async_p ())
2556 record_btrace_maybe_mark_async_event (moving, no_history);
2557
2558 /* Start record histories anew from the current position. */
2559 record_btrace_clear_histories (&eventing->btrace);
2560
2561 /* We moved the replay position but did not update registers. */
2562 registers_changed_ptid (eventing->ptid);
2563
2564 DEBUG ("wait ended by thread %s (%s): %s",
2565 print_thread_id (eventing),
2566 target_pid_to_str (eventing->ptid),
2567 target_waitstatus_to_string (status).c_str ());
2568
2569 return eventing->ptid;
2570 }
2571
2572 /* The to_stop method of target record-btrace. */
2573
2574 static void
2575 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2576 {
2577 DEBUG ("stop %s", target_pid_to_str (ptid));
2578
2579 /* As long as we're not replaying, just forward the request. */
2580 if ((execution_direction != EXEC_REVERSE)
2581 && !record_btrace_is_replaying (ops, minus_one_ptid))
2582 {
2583 ops = ops->beneath;
2584 ops->to_stop (ops, ptid);
2585 }
2586 else
2587 {
2588 struct thread_info *tp;
2589
2590 ALL_NON_EXITED_THREADS (tp)
2591 if (ptid_match (tp->ptid, ptid))
2592 {
2593 tp->btrace.flags &= ~BTHR_MOVE;
2594 tp->btrace.flags |= BTHR_STOP;
2595 }
2596 }
2597 }
2598
2599 /* The to_can_execute_reverse method of target record-btrace. */
2600
2601 static int
2602 record_btrace_can_execute_reverse (struct target_ops *self)
2603 {
2604 return 1;
2605 }
2606
2607 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2608
2609 static int
2610 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2611 {
2612 if (record_btrace_is_replaying (ops, minus_one_ptid))
2613 {
2614 struct thread_info *tp = inferior_thread ();
2615
2616 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2617 }
2618
2619 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2620 }
2621
2622 /* The to_supports_stopped_by_sw_breakpoint method of target
2623 record-btrace. */
2624
2625 static int
2626 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2627 {
2628 if (record_btrace_is_replaying (ops, minus_one_ptid))
2629 return 1;
2630
2631 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2632 }
2633
2634 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2635
2636 static int
2637 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2638 {
2639 if (record_btrace_is_replaying (ops, minus_one_ptid))
2640 {
2641 struct thread_info *tp = inferior_thread ();
2642
2643 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2644 }
2645
2646 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2647 }
2648
2649 /* The to_supports_stopped_by_hw_breakpoint method of target
2650 record-btrace. */
2651
2652 static int
2653 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2654 {
2655 if (record_btrace_is_replaying (ops, minus_one_ptid))
2656 return 1;
2657
2658 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2659 }
2660
2661 /* The to_update_thread_list method of target record-btrace. */
2662
2663 static void
2664 record_btrace_update_thread_list (struct target_ops *ops)
2665 {
2666 /* We don't add or remove threads during replay. */
2667 if (record_btrace_is_replaying (ops, minus_one_ptid))
2668 return;
2669
2670 /* Forward the request. */
2671 ops = ops->beneath;
2672 ops->to_update_thread_list (ops);
2673 }
2674
2675 /* The to_thread_alive method of target record-btrace. */
2676
2677 static int
2678 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2679 {
2680 /* We don't add or remove threads during replay. */
2681 if (record_btrace_is_replaying (ops, minus_one_ptid))
2682 return find_thread_ptid (ptid) != NULL;
2683
2684 /* Forward the request. */
2685 ops = ops->beneath;
2686 return ops->to_thread_alive (ops, ptid);
2687 }
2688
2689 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2690 is stopped. */
2691
2692 static void
2693 record_btrace_set_replay (struct thread_info *tp,
2694 const struct btrace_insn_iterator *it)
2695 {
2696 struct btrace_thread_info *btinfo;
2697
2698 btinfo = &tp->btrace;
2699
2700 if (it == NULL)
2701 record_btrace_stop_replaying (tp);
2702 else
2703 {
2704 if (btinfo->replay == NULL)
2705 record_btrace_start_replaying (tp);
2706 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2707 return;
2708
2709 *btinfo->replay = *it;
2710 registers_changed_ptid (tp->ptid);
2711 }
2712
2713 /* Start anew from the new replay position. */
2714 record_btrace_clear_histories (btinfo);
2715
2716 stop_pc = regcache_read_pc (get_current_regcache ());
2717 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2718 }
2719
2720 /* The to_goto_record_begin method of target record-btrace. */
2721
2722 static void
2723 record_btrace_goto_begin (struct target_ops *self)
2724 {
2725 struct thread_info *tp;
2726 struct btrace_insn_iterator begin;
2727
2728 tp = require_btrace_thread ();
2729
2730 btrace_insn_begin (&begin, &tp->btrace);
2731
2732 /* Skip gaps at the beginning of the trace. */
2733 while (btrace_insn_get (&begin) == NULL)
2734 {
2735 unsigned int steps;
2736
2737 steps = btrace_insn_next (&begin, 1);
2738 if (steps == 0)
2739 error (_("No trace."));
2740 }
2741
2742 record_btrace_set_replay (tp, &begin);
2743 }
2744
2745 /* The to_goto_record_end method of target record-btrace. */
2746
2747 static void
2748 record_btrace_goto_end (struct target_ops *ops)
2749 {
2750 struct thread_info *tp;
2751
2752 tp = require_btrace_thread ();
2753
2754 record_btrace_set_replay (tp, NULL);
2755 }
2756
2757 /* The to_goto_record method of target record-btrace. */
2758
2759 static void
2760 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2761 {
2762 struct thread_info *tp;
2763 struct btrace_insn_iterator it;
2764 unsigned int number;
2765 int found;
2766
2767 number = insn;
2768
2769 /* Check for wrap-arounds. */
2770 if (number != insn)
2771 error (_("Instruction number out of range."));
2772
2773 tp = require_btrace_thread ();
2774
2775 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2776
2777 /* Check if the instruction could not be found or is a gap. */
2778 if (found == 0 || btrace_insn_get (&it) == NULL)
2779 error (_("No such instruction."));
2780
2781 record_btrace_set_replay (tp, &it);
2782 }
2783
2784 /* The to_record_stop_replaying method of target record-btrace. */
2785
2786 static void
2787 record_btrace_stop_replaying_all (struct target_ops *self)
2788 {
2789 struct thread_info *tp;
2790
2791 ALL_NON_EXITED_THREADS (tp)
2792 record_btrace_stop_replaying (tp);
2793 }
2794
2795 /* The to_execution_direction target method. */
2796
2797 static enum exec_direction_kind
2798 record_btrace_execution_direction (struct target_ops *self)
2799 {
2800 return record_btrace_resume_exec_dir;
2801 }
2802
2803 /* The to_prepare_to_generate_core target method. */
2804
2805 static void
2806 record_btrace_prepare_to_generate_core (struct target_ops *self)
2807 {
2808 record_btrace_generating_corefile = 1;
2809 }
2810
2811 /* The to_done_generating_core target method. */
2812
2813 static void
2814 record_btrace_done_generating_core (struct target_ops *self)
2815 {
2816 record_btrace_generating_corefile = 0;
2817 }
2818
2819 /* Initialize the record-btrace target ops. */
2820
2821 static void
2822 init_record_btrace_ops (void)
2823 {
2824 struct target_ops *ops;
2825
2826 ops = &record_btrace_ops;
2827 ops->to_shortname = "record-btrace";
2828 ops->to_longname = "Branch tracing target";
2829 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2830 ops->to_open = record_btrace_open;
2831 ops->to_close = record_btrace_close;
2832 ops->to_async = record_btrace_async;
2833 ops->to_detach = record_detach;
2834 ops->to_disconnect = record_btrace_disconnect;
2835 ops->to_mourn_inferior = record_mourn_inferior;
2836 ops->to_kill = record_kill;
2837 ops->to_stop_recording = record_btrace_stop_recording;
2838 ops->to_info_record = record_btrace_info;
2839 ops->to_insn_history = record_btrace_insn_history;
2840 ops->to_insn_history_from = record_btrace_insn_history_from;
2841 ops->to_insn_history_range = record_btrace_insn_history_range;
2842 ops->to_call_history = record_btrace_call_history;
2843 ops->to_call_history_from = record_btrace_call_history_from;
2844 ops->to_call_history_range = record_btrace_call_history_range;
2845 ops->to_record_method = record_btrace_record_method;
2846 ops->to_record_is_replaying = record_btrace_is_replaying;
2847 ops->to_record_will_replay = record_btrace_will_replay;
2848 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2849 ops->to_xfer_partial = record_btrace_xfer_partial;
2850 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2851 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2852 ops->to_fetch_registers = record_btrace_fetch_registers;
2853 ops->to_store_registers = record_btrace_store_registers;
2854 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2855 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2856 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2857 ops->to_resume = record_btrace_resume;
2858 ops->to_commit_resume = record_btrace_commit_resume;
2859 ops->to_wait = record_btrace_wait;
2860 ops->to_stop = record_btrace_stop;
2861 ops->to_update_thread_list = record_btrace_update_thread_list;
2862 ops->to_thread_alive = record_btrace_thread_alive;
2863 ops->to_goto_record_begin = record_btrace_goto_begin;
2864 ops->to_goto_record_end = record_btrace_goto_end;
2865 ops->to_goto_record = record_btrace_goto;
2866 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2867 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2868 ops->to_supports_stopped_by_sw_breakpoint
2869 = record_btrace_supports_stopped_by_sw_breakpoint;
2870 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2871 ops->to_supports_stopped_by_hw_breakpoint
2872 = record_btrace_supports_stopped_by_hw_breakpoint;
2873 ops->to_execution_direction = record_btrace_execution_direction;
2874 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2875 ops->to_done_generating_core = record_btrace_done_generating_core;
2876 ops->to_stratum = record_stratum;
2877 ops->to_magic = OPS_MAGIC;
2878 }
2879
2880 /* Start recording in BTS format. */
2881
2882 static void
2883 cmd_record_btrace_bts_start (const char *args, int from_tty)
2884 {
2885 if (args != NULL && *args != 0)
2886 error (_("Invalid argument."));
2887
2888 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2889
2890 TRY
2891 {
2892 execute_command ("target record-btrace", from_tty);
2893 }
2894 CATCH (exception, RETURN_MASK_ALL)
2895 {
2896 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2897 throw_exception (exception);
2898 }
2899 END_CATCH
2900 }
2901
2902 /* Start recording in Intel Processor Trace format. */
2903
2904 static void
2905 cmd_record_btrace_pt_start (const char *args, int from_tty)
2906 {
2907 if (args != NULL && *args != 0)
2908 error (_("Invalid argument."));
2909
2910 record_btrace_conf.format = BTRACE_FORMAT_PT;
2911
2912 TRY
2913 {
2914 execute_command ("target record-btrace", from_tty);
2915 }
2916 CATCH (exception, RETURN_MASK_ALL)
2917 {
2918 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2919 throw_exception (exception);
2920 }
2921 END_CATCH
2922 }
2923
2924 /* Alias for "target record". */
2925
2926 static void
2927 cmd_record_btrace_start (const char *args, int from_tty)
2928 {
2929 if (args != NULL && *args != 0)
2930 error (_("Invalid argument."));
2931
2932 record_btrace_conf.format = BTRACE_FORMAT_PT;
2933
2934 TRY
2935 {
2936 execute_command ("target record-btrace", from_tty);
2937 }
2938 CATCH (exception, RETURN_MASK_ALL)
2939 {
2940 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2941
2942 TRY
2943 {
2944 execute_command ("target record-btrace", from_tty);
2945 }
2946 CATCH (exception, RETURN_MASK_ALL)
2947 {
2948 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2949 throw_exception (exception);
2950 }
2951 END_CATCH
2952 }
2953 END_CATCH
2954 }
2955
2956 /* The "set record btrace" command. */
2957
2958 static void
2959 cmd_set_record_btrace (const char *args, int from_tty)
2960 {
2961 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2962 }
2963
2964 /* The "show record btrace" command. */
2965
2966 static void
2967 cmd_show_record_btrace (const char *args, int from_tty)
2968 {
2969 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2970 }
2971
2972 /* The "show record btrace replay-memory-access" command. */
2973
2974 static void
2975 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2976 struct cmd_list_element *c, const char *value)
2977 {
2978 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2979 replay_memory_access);
2980 }
2981
2982 /* The "set record btrace bts" command. */
2983
2984 static void
2985 cmd_set_record_btrace_bts (const char *args, int from_tty)
2986 {
2987 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2988 "by an appropriate subcommand.\n"));
2989 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2990 all_commands, gdb_stdout);
2991 }
2992
2993 /* The "show record btrace bts" command. */
2994
2995 static void
2996 cmd_show_record_btrace_bts (const char *args, int from_tty)
2997 {
2998 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2999 }
3000
3001 /* The "set record btrace pt" command. */
3002
3003 static void
3004 cmd_set_record_btrace_pt (const char *args, int from_tty)
3005 {
3006 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3007 "by an appropriate subcommand.\n"));
3008 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3009 all_commands, gdb_stdout);
3010 }
3011
3012 /* The "show record btrace pt" command. */
3013
3014 static void
3015 cmd_show_record_btrace_pt (const char *args, int from_tty)
3016 {
3017 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3018 }
3019
3020 /* The "record bts buffer-size" show value function. */
3021
3022 static void
3023 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3024 struct cmd_list_element *c,
3025 const char *value)
3026 {
3027 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3028 value);
3029 }
3030
3031 /* The "record pt buffer-size" show value function. */
3032
3033 static void
3034 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3035 struct cmd_list_element *c,
3036 const char *value)
3037 {
3038 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3039 value);
3040 }
3041
3042 /* Initialize btrace commands. */
3043
3044 void
3045 _initialize_record_btrace (void)
3046 {
3047 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3048 _("Start branch trace recording."), &record_btrace_cmdlist,
3049 "record btrace ", 0, &record_cmdlist);
3050 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3051
3052 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3053 _("\
3054 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3055 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3056 This format may not be available on all processors."),
3057 &record_btrace_cmdlist);
3058 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3059
3060 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3061 _("\
3062 Start branch trace recording in Intel Processor Trace format.\n\n\
3063 This format may not be available on all processors."),
3064 &record_btrace_cmdlist);
3065 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3066
3067 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3068 _("Set record options"), &set_record_btrace_cmdlist,
3069 "set record btrace ", 0, &set_record_cmdlist);
3070
3071 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3072 _("Show record options"), &show_record_btrace_cmdlist,
3073 "show record btrace ", 0, &show_record_cmdlist);
3074
3075 add_setshow_enum_cmd ("replay-memory-access", no_class,
3076 replay_memory_access_types, &replay_memory_access, _("\
3077 Set what memory accesses are allowed during replay."), _("\
3078 Show what memory accesses are allowed during replay."),
3079 _("Default is READ-ONLY.\n\n\
3080 The btrace record target does not trace data.\n\
3081 The memory therefore corresponds to the live target and not \
3082 to the current replay position.\n\n\
3083 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3084 When READ-WRITE, allow accesses to read-only and read-write memory during \
3085 replay."),
3086 NULL, cmd_show_replay_memory_access,
3087 &set_record_btrace_cmdlist,
3088 &show_record_btrace_cmdlist);
3089
3090 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3091 _("Set record btrace bts options"),
3092 &set_record_btrace_bts_cmdlist,
3093 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3094
3095 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3096 _("Show record btrace bts options"),
3097 &show_record_btrace_bts_cmdlist,
3098 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3099
3100 add_setshow_uinteger_cmd ("buffer-size", no_class,
3101 &record_btrace_conf.bts.size,
3102 _("Set the record/replay bts buffer size."),
3103 _("Show the record/replay bts buffer size."), _("\
3104 When starting recording request a trace buffer of this size. \
3105 The actual buffer size may differ from the requested size. \
3106 Use \"info record\" to see the actual buffer size.\n\n\
3107 Bigger buffers allow longer recording but also take more time to process \
3108 the recorded execution trace.\n\n\
3109 The trace buffer size may not be changed while recording."), NULL,
3110 show_record_bts_buffer_size_value,
3111 &set_record_btrace_bts_cmdlist,
3112 &show_record_btrace_bts_cmdlist);
3113
3114 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3115 _("Set record btrace pt options"),
3116 &set_record_btrace_pt_cmdlist,
3117 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3118
3119 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3120 _("Show record btrace pt options"),
3121 &show_record_btrace_pt_cmdlist,
3122 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3123
3124 add_setshow_uinteger_cmd ("buffer-size", no_class,
3125 &record_btrace_conf.pt.size,
3126 _("Set the record/replay pt buffer size."),
3127 _("Show the record/replay pt buffer size."), _("\
3128 Bigger buffers allow longer recording but also take more time to process \
3129 the recorded execution.\n\
3130 The actual buffer size may differ from the requested size. Use \"info record\" \
3131 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3132 &set_record_btrace_pt_cmdlist,
3133 &show_record_btrace_pt_cmdlist);
3134
3135 init_record_btrace_ops ();
3136 add_target (&record_btrace_ops);
3137
3138 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3139 xcalloc, xfree);
3140
3141 record_btrace_conf.bts.size = 64 * 1024;
3142 record_btrace_conf.pt.size = 16 * 1024;
3143 }
This page took 0.140223 seconds and 4 git commands to generate.