Fix formattting of gdb/ChangeLog and gdb/testsuite/ChangeLog enties
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
618f726f 3 Copyright (C) 2013-2016 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
c0272db5 24#include "record-btrace.h"
afedecd3
MM
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
29#include "observer.h"
afedecd3
MM
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
70ad5bff
MM
39#include "event-loop.h"
40#include "inf-loop.h"
e3cfc1c7 41#include "vec.h"
325fac50 42#include <algorithm>
afedecd3
MM
43
44/* The target_ops of record-btrace. */
45static struct target_ops record_btrace_ops;
46
47/* A new thread observer enabling branch tracing for the new thread. */
48static struct observer *record_btrace_thread_observer;
49
67b5c0c1
MM
50/* Memory access types used in set/show record btrace replay-memory-access. */
51static const char replay_memory_access_read_only[] = "read-only";
52static const char replay_memory_access_read_write[] = "read-write";
53static const char *const replay_memory_access_types[] =
54{
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
57 NULL
58};
59
60/* The currently allowed replay memory access type. */
61static const char *replay_memory_access = replay_memory_access_read_only;
62
63/* Command lists for "set/show record btrace". */
64static struct cmd_list_element *set_record_btrace_cmdlist;
65static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 66
70ad5bff
MM
67/* The execution direction of the last resume we got. See record-full.c. */
68static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
69
70/* The async event handler for reverse/replay execution. */
71static struct async_event_handler *record_btrace_async_inferior_event_handler;
72
aef92902
MM
73/* A flag indicating that we are currently generating a core file. */
74static int record_btrace_generating_corefile;
75
f4abbc16
MM
76/* The current branch trace configuration. */
77static struct btrace_config record_btrace_conf;
78
79/* Command list for "record btrace". */
80static struct cmd_list_element *record_btrace_cmdlist;
81
d33501a5
MM
82/* Command lists for "set/show record btrace bts". */
83static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84static struct cmd_list_element *show_record_btrace_bts_cmdlist;
85
b20a6524
MM
86/* Command lists for "set/show record btrace pt". */
87static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88static struct cmd_list_element *show_record_btrace_pt_cmdlist;
89
afedecd3
MM
90/* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
92
93#define DEBUG(msg, args...) \
94 do \
95 { \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
99 } \
100 while (0)
101
102
103/* Update the branch trace for the current thread and return a pointer to its
066ce621 104 thread_info.
afedecd3
MM
105
106 Throws an error if there is no thread or no trace. This function never
107 returns NULL. */
108
066ce621
MM
109static struct thread_info *
110require_btrace_thread (void)
afedecd3
MM
111{
112 struct thread_info *tp;
afedecd3
MM
113
114 DEBUG ("require");
115
116 tp = find_thread_ptid (inferior_ptid);
117 if (tp == NULL)
118 error (_("No thread."));
119
120 btrace_fetch (tp);
121
6e07b1d2 122 if (btrace_is_empty (tp))
afedecd3
MM
123 error (_("No trace."));
124
066ce621
MM
125 return tp;
126}
127
128/* Update the branch trace for the current thread and return a pointer to its
129 branch trace information struct.
130
131 Throws an error if there is no thread or no trace. This function never
132 returns NULL. */
133
134static struct btrace_thread_info *
135require_btrace (void)
136{
137 struct thread_info *tp;
138
139 tp = require_btrace_thread ();
140
141 return &tp->btrace;
afedecd3
MM
142}
143
144/* Enable branch tracing for one thread. Warn on errors. */
145
146static void
147record_btrace_enable_warn (struct thread_info *tp)
148{
492d29ea
PA
149 TRY
150 {
151 btrace_enable (tp, &record_btrace_conf);
152 }
153 CATCH (error, RETURN_MASK_ERROR)
154 {
155 warning ("%s", error.message);
156 }
157 END_CATCH
afedecd3
MM
158}
159
160/* Callback function to disable branch tracing for one thread. */
161
162static void
163record_btrace_disable_callback (void *arg)
164{
19ba03f4 165 struct thread_info *tp = (struct thread_info *) arg;
afedecd3
MM
166
167 btrace_disable (tp);
168}
169
170/* Enable automatic tracing of new threads. */
171
172static void
173record_btrace_auto_enable (void)
174{
175 DEBUG ("attach thread observer");
176
177 record_btrace_thread_observer
178 = observer_attach_new_thread (record_btrace_enable_warn);
179}
180
181/* Disable automatic tracing of new threads. */
182
183static void
184record_btrace_auto_disable (void)
185{
186 /* The observer may have been detached, already. */
187 if (record_btrace_thread_observer == NULL)
188 return;
189
190 DEBUG ("detach thread observer");
191
192 observer_detach_new_thread (record_btrace_thread_observer);
193 record_btrace_thread_observer = NULL;
194}
195
70ad5bff
MM
196/* The record-btrace async event handler function. */
197
198static void
199record_btrace_handle_async_inferior_event (gdb_client_data data)
200{
201 inferior_event_handler (INF_REG_EVENT, NULL);
202}
203
c0272db5
TW
204/* See record-btrace.h. */
205
206void
207record_btrace_push_target (void)
208{
209 const char *format;
210
211 record_btrace_auto_enable ();
212
213 push_target (&record_btrace_ops);
214
215 record_btrace_async_inferior_event_handler
216 = create_async_event_handler (record_btrace_handle_async_inferior_event,
217 NULL);
218 record_btrace_generating_corefile = 0;
219
220 format = btrace_format_short_string (record_btrace_conf.format);
221 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
222}
223
afedecd3
MM
224/* The to_open method of target record-btrace. */
225
226static void
014f9477 227record_btrace_open (const char *args, int from_tty)
afedecd3
MM
228{
229 struct cleanup *disable_chain;
230 struct thread_info *tp;
231
232 DEBUG ("open");
233
8213266a 234 record_preopen ();
afedecd3
MM
235
236 if (!target_has_execution)
237 error (_("The program is not being run."));
238
afedecd3
MM
239 gdb_assert (record_btrace_thread_observer == NULL);
240
241 disable_chain = make_cleanup (null_cleanup, NULL);
034f788c 242 ALL_NON_EXITED_THREADS (tp)
5d5658a1 243 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
afedecd3 244 {
f4abbc16 245 btrace_enable (tp, &record_btrace_conf);
afedecd3
MM
246
247 make_cleanup (record_btrace_disable_callback, tp);
248 }
249
c0272db5 250 record_btrace_push_target ();
afedecd3
MM
251
252 discard_cleanups (disable_chain);
253}
254
255/* The to_stop_recording method of target record-btrace. */
256
257static void
c6cd7c02 258record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
259{
260 struct thread_info *tp;
261
262 DEBUG ("stop recording");
263
264 record_btrace_auto_disable ();
265
034f788c 266 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
267 if (tp->btrace.target != NULL)
268 btrace_disable (tp);
269}
270
c0272db5
TW
271/* The to_disconnect method of target record-btrace. */
272
273static void
274record_btrace_disconnect (struct target_ops *self, const char *args,
275 int from_tty)
276{
277 struct target_ops *beneath = self->beneath;
278
279 /* Do not stop recording, just clean up GDB side. */
280 unpush_target (self);
281
282 /* Forward disconnect. */
283 beneath->to_disconnect (beneath, args, from_tty);
284}
285
afedecd3
MM
286/* The to_close method of target record-btrace. */
287
288static void
de90e03d 289record_btrace_close (struct target_ops *self)
afedecd3 290{
568e808b
MM
291 struct thread_info *tp;
292
70ad5bff
MM
293 if (record_btrace_async_inferior_event_handler != NULL)
294 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
295
99c819ee
MM
296 /* Make sure automatic recording gets disabled even if we did not stop
297 recording before closing the record-btrace target. */
298 record_btrace_auto_disable ();
299
568e808b
MM
300 /* We should have already stopped recording.
301 Tear down btrace in case we have not. */
034f788c 302 ALL_NON_EXITED_THREADS (tp)
568e808b 303 btrace_teardown (tp);
afedecd3
MM
304}
305
b7d2e916
PA
306/* The to_async method of target record-btrace. */
307
308static void
6a3753b3 309record_btrace_async (struct target_ops *ops, int enable)
b7d2e916 310{
6a3753b3 311 if (enable)
b7d2e916
PA
312 mark_async_event_handler (record_btrace_async_inferior_event_handler);
313 else
314 clear_async_event_handler (record_btrace_async_inferior_event_handler);
315
6a3753b3 316 ops->beneath->to_async (ops->beneath, enable);
b7d2e916
PA
317}
318
d33501a5
MM
319/* Adjusts the size and returns a human readable size suffix. */
320
321static const char *
322record_btrace_adjust_size (unsigned int *size)
323{
324 unsigned int sz;
325
326 sz = *size;
327
328 if ((sz & ((1u << 30) - 1)) == 0)
329 {
330 *size = sz >> 30;
331 return "GB";
332 }
333 else if ((sz & ((1u << 20) - 1)) == 0)
334 {
335 *size = sz >> 20;
336 return "MB";
337 }
338 else if ((sz & ((1u << 10) - 1)) == 0)
339 {
340 *size = sz >> 10;
341 return "kB";
342 }
343 else
344 return "";
345}
346
347/* Print a BTS configuration. */
348
349static void
350record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
351{
352 const char *suffix;
353 unsigned int size;
354
355 size = conf->size;
356 if (size > 0)
357 {
358 suffix = record_btrace_adjust_size (&size);
359 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
360 }
361}
362
bc504a31 363/* Print an Intel Processor Trace configuration. */
b20a6524
MM
364
365static void
366record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
367{
368 const char *suffix;
369 unsigned int size;
370
371 size = conf->size;
372 if (size > 0)
373 {
374 suffix = record_btrace_adjust_size (&size);
375 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
376 }
377}
378
d33501a5
MM
379/* Print a branch tracing configuration. */
380
381static void
382record_btrace_print_conf (const struct btrace_config *conf)
383{
384 printf_unfiltered (_("Recording format: %s.\n"),
385 btrace_format_string (conf->format));
386
387 switch (conf->format)
388 {
389 case BTRACE_FORMAT_NONE:
390 return;
391
392 case BTRACE_FORMAT_BTS:
393 record_btrace_print_bts_conf (&conf->bts);
394 return;
b20a6524
MM
395
396 case BTRACE_FORMAT_PT:
397 record_btrace_print_pt_conf (&conf->pt);
398 return;
d33501a5
MM
399 }
400
401 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
402}
403
afedecd3
MM
404/* The to_info_record method of target record-btrace. */
405
406static void
630d6a4a 407record_btrace_info (struct target_ops *self)
afedecd3
MM
408{
409 struct btrace_thread_info *btinfo;
f4abbc16 410 const struct btrace_config *conf;
afedecd3 411 struct thread_info *tp;
31fd9caa 412 unsigned int insns, calls, gaps;
afedecd3
MM
413
414 DEBUG ("info");
415
416 tp = find_thread_ptid (inferior_ptid);
417 if (tp == NULL)
418 error (_("No thread."));
419
f4abbc16
MM
420 btinfo = &tp->btrace;
421
422 conf = btrace_conf (btinfo);
423 if (conf != NULL)
d33501a5 424 record_btrace_print_conf (conf);
f4abbc16 425
afedecd3
MM
426 btrace_fetch (tp);
427
23a7fe75
MM
428 insns = 0;
429 calls = 0;
31fd9caa 430 gaps = 0;
23a7fe75 431
6e07b1d2 432 if (!btrace_is_empty (tp))
23a7fe75
MM
433 {
434 struct btrace_call_iterator call;
435 struct btrace_insn_iterator insn;
436
437 btrace_call_end (&call, btinfo);
438 btrace_call_prev (&call, 1);
5de9129b 439 calls = btrace_call_number (&call);
23a7fe75
MM
440
441 btrace_insn_end (&insn, btinfo);
31fd9caa 442
5de9129b 443 insns = btrace_insn_number (&insn);
31fd9caa
MM
444 if (insns != 0)
445 {
446 /* The last instruction does not really belong to the trace. */
447 insns -= 1;
448 }
449 else
450 {
451 unsigned int steps;
452
453 /* Skip gaps at the end. */
454 do
455 {
456 steps = btrace_insn_prev (&insn, 1);
457 if (steps == 0)
458 break;
459
460 insns = btrace_insn_number (&insn);
461 }
462 while (insns == 0);
463 }
464
465 gaps = btinfo->ngaps;
23a7fe75 466 }
afedecd3 467
31fd9caa 468 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
43792cf0
PA
469 "for thread %s (%s).\n"), insns, calls, gaps,
470 print_thread_id (tp), target_pid_to_str (tp->ptid));
07bbe694
MM
471
472 if (btrace_is_replaying (tp))
473 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
474 btrace_insn_number (btinfo->replay));
afedecd3
MM
475}
476
31fd9caa
MM
477/* Print a decode error. */
478
479static void
480btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
481 enum btrace_format format)
482{
483 const char *errstr;
484 int is_error;
485
486 errstr = _("unknown");
487 is_error = 1;
488
489 switch (format)
490 {
491 default:
492 break;
493
494 case BTRACE_FORMAT_BTS:
495 switch (errcode)
496 {
497 default:
498 break;
499
500 case BDE_BTS_OVERFLOW:
501 errstr = _("instruction overflow");
502 break;
503
504 case BDE_BTS_INSN_SIZE:
505 errstr = _("unknown instruction");
506 break;
507 }
508 break;
b20a6524
MM
509
510#if defined (HAVE_LIBIPT)
511 case BTRACE_FORMAT_PT:
512 switch (errcode)
513 {
514 case BDE_PT_USER_QUIT:
515 is_error = 0;
516 errstr = _("trace decode cancelled");
517 break;
518
519 case BDE_PT_DISABLED:
520 is_error = 0;
521 errstr = _("disabled");
522 break;
523
524 case BDE_PT_OVERFLOW:
525 is_error = 0;
526 errstr = _("overflow");
527 break;
528
529 default:
530 if (errcode < 0)
531 errstr = pt_errstr (pt_errcode (errcode));
532 break;
533 }
534 break;
535#endif /* defined (HAVE_LIBIPT) */
31fd9caa
MM
536 }
537
538 ui_out_text (uiout, _("["));
539 if (is_error)
540 {
541 ui_out_text (uiout, _("decode error ("));
542 ui_out_field_int (uiout, "errcode", errcode);
543 ui_out_text (uiout, _("): "));
544 }
545 ui_out_text (uiout, errstr);
546 ui_out_text (uiout, _("]\n"));
547}
548
afedecd3
MM
549/* Print an unsigned int. */
550
551static void
552ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
553{
554 ui_out_field_fmt (uiout, fld, "%u", val);
555}
556
f94cc897
MM
557/* A range of source lines. */
558
559struct btrace_line_range
560{
561 /* The symtab this line is from. */
562 struct symtab *symtab;
563
564 /* The first line (inclusive). */
565 int begin;
566
567 /* The last line (exclusive). */
568 int end;
569};
570
571/* Construct a line range. */
572
573static struct btrace_line_range
574btrace_mk_line_range (struct symtab *symtab, int begin, int end)
575{
576 struct btrace_line_range range;
577
578 range.symtab = symtab;
579 range.begin = begin;
580 range.end = end;
581
582 return range;
583}
584
585/* Add a line to a line range. */
586
587static struct btrace_line_range
588btrace_line_range_add (struct btrace_line_range range, int line)
589{
590 if (range.end <= range.begin)
591 {
592 /* This is the first entry. */
593 range.begin = line;
594 range.end = line + 1;
595 }
596 else if (line < range.begin)
597 range.begin = line;
598 else if (range.end < line)
599 range.end = line;
600
601 return range;
602}
603
604/* Return non-zero if RANGE is empty, zero otherwise. */
605
606static int
607btrace_line_range_is_empty (struct btrace_line_range range)
608{
609 return range.end <= range.begin;
610}
611
612/* Return non-zero if LHS contains RHS, zero otherwise. */
613
614static int
615btrace_line_range_contains_range (struct btrace_line_range lhs,
616 struct btrace_line_range rhs)
617{
618 return ((lhs.symtab == rhs.symtab)
619 && (lhs.begin <= rhs.begin)
620 && (rhs.end <= lhs.end));
621}
622
623/* Find the line range associated with PC. */
624
625static struct btrace_line_range
626btrace_find_line_range (CORE_ADDR pc)
627{
628 struct btrace_line_range range;
629 struct linetable_entry *lines;
630 struct linetable *ltable;
631 struct symtab *symtab;
632 int nlines, i;
633
634 symtab = find_pc_line_symtab (pc);
635 if (symtab == NULL)
636 return btrace_mk_line_range (NULL, 0, 0);
637
638 ltable = SYMTAB_LINETABLE (symtab);
639 if (ltable == NULL)
640 return btrace_mk_line_range (symtab, 0, 0);
641
642 nlines = ltable->nitems;
643 lines = ltable->item;
644 if (nlines <= 0)
645 return btrace_mk_line_range (symtab, 0, 0);
646
647 range = btrace_mk_line_range (symtab, 0, 0);
648 for (i = 0; i < nlines - 1; i++)
649 {
650 if ((lines[i].pc == pc) && (lines[i].line != 0))
651 range = btrace_line_range_add (range, lines[i].line);
652 }
653
654 return range;
655}
656
657/* Print source lines in LINES to UIOUT.
658
659 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
660 instructions corresponding to that source line. When printing a new source
661 line, we do the cleanups for the open chain and open a new cleanup chain for
662 the new source line. If the source line range in LINES is not empty, this
663 function will leave the cleanup chain for the last printed source line open
664 so instructions can be added to it. */
665
666static void
667btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
668 struct cleanup **ui_item_chain, int flags)
669{
8d297bbf 670 print_source_lines_flags psl_flags;
f94cc897
MM
671 int line;
672
673 psl_flags = 0;
674 if (flags & DISASSEMBLY_FILENAME)
675 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
676
677 for (line = lines.begin; line < lines.end; ++line)
678 {
679 if (*ui_item_chain != NULL)
680 do_cleanups (*ui_item_chain);
681
682 *ui_item_chain
683 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
684
685 print_source_lines (lines.symtab, line, line + 1, psl_flags);
686
687 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
688 }
689}
690
afedecd3
MM
691/* Disassemble a section of the recorded instruction trace. */
692
693static void
23a7fe75 694btrace_insn_history (struct ui_out *uiout,
31fd9caa 695 const struct btrace_thread_info *btinfo,
23a7fe75
MM
696 const struct btrace_insn_iterator *begin,
697 const struct btrace_insn_iterator *end, int flags)
afedecd3 698{
f94cc897
MM
699 struct ui_file *stb;
700 struct cleanup *cleanups, *ui_item_chain;
701 struct disassemble_info di;
afedecd3 702 struct gdbarch *gdbarch;
23a7fe75 703 struct btrace_insn_iterator it;
f94cc897 704 struct btrace_line_range last_lines;
afedecd3 705
23a7fe75
MM
706 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
707 btrace_insn_number (end));
afedecd3 708
f94cc897
MM
709 flags |= DISASSEMBLY_SPECULATIVE;
710
afedecd3 711 gdbarch = target_gdbarch ();
f94cc897
MM
712 stb = mem_fileopen ();
713 cleanups = make_cleanup_ui_file_delete (stb);
714 di = gdb_disassemble_info (gdbarch, stb);
715 last_lines = btrace_mk_line_range (NULL, 0, 0);
716
717 make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
718
719 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
720 instructions corresponding to that line. */
721 ui_item_chain = NULL;
afedecd3 722
23a7fe75 723 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 724 {
23a7fe75
MM
725 const struct btrace_insn *insn;
726
727 insn = btrace_insn_get (&it);
728
31fd9caa
MM
729 /* A NULL instruction indicates a gap in the trace. */
730 if (insn == NULL)
731 {
732 const struct btrace_config *conf;
733
734 conf = btrace_conf (btinfo);
afedecd3 735
31fd9caa
MM
736 /* We have trace so we must have a configuration. */
737 gdb_assert (conf != NULL);
738
739 btrace_ui_out_decode_error (uiout, it.function->errcode,
740 conf->format);
741 }
742 else
743 {
f94cc897 744 struct disasm_insn dinsn;
da8c46d2 745
f94cc897 746 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 747 {
f94cc897
MM
748 struct btrace_line_range lines;
749
750 lines = btrace_find_line_range (insn->pc);
751 if (!btrace_line_range_is_empty (lines)
752 && !btrace_line_range_contains_range (last_lines, lines))
753 {
754 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
755 last_lines = lines;
756 }
757 else if (ui_item_chain == NULL)
758 {
759 ui_item_chain
760 = make_cleanup_ui_out_tuple_begin_end (uiout,
761 "src_and_asm_line");
762 /* No source information. */
763 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
764 }
765
766 gdb_assert (ui_item_chain != NULL);
da8c46d2 767 }
da8c46d2 768
f94cc897
MM
769 memset (&dinsn, 0, sizeof (dinsn));
770 dinsn.number = btrace_insn_number (&it);
771 dinsn.addr = insn->pc;
31fd9caa 772
da8c46d2 773 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 774 dinsn.is_speculative = 1;
da8c46d2 775
f94cc897 776 gdb_pretty_print_insn (gdbarch, uiout, &di, &dinsn, flags, stb);
31fd9caa 777 }
afedecd3 778 }
f94cc897
MM
779
780 do_cleanups (cleanups);
afedecd3
MM
781}
782
783/* The to_insn_history method of target record-btrace. */
784
785static void
7a6c5609 786record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
787{
788 struct btrace_thread_info *btinfo;
23a7fe75
MM
789 struct btrace_insn_history *history;
790 struct btrace_insn_iterator begin, end;
afedecd3
MM
791 struct cleanup *uiout_cleanup;
792 struct ui_out *uiout;
23a7fe75 793 unsigned int context, covered;
afedecd3
MM
794
795 uiout = current_uiout;
796 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
797 "insn history");
afedecd3 798 context = abs (size);
afedecd3
MM
799 if (context == 0)
800 error (_("Bad record instruction-history-size."));
801
23a7fe75
MM
802 btinfo = require_btrace ();
803 history = btinfo->insn_history;
804 if (history == NULL)
afedecd3 805 {
07bbe694 806 struct btrace_insn_iterator *replay;
afedecd3 807
23a7fe75 808 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 809
07bbe694
MM
810 /* If we're replaying, we start at the replay position. Otherwise, we
811 start at the tail of the trace. */
812 replay = btinfo->replay;
813 if (replay != NULL)
814 begin = *replay;
815 else
816 btrace_insn_end (&begin, btinfo);
817
818 /* We start from here and expand in the requested direction. Then we
819 expand in the other direction, as well, to fill up any remaining
820 context. */
821 end = begin;
822 if (size < 0)
823 {
824 /* We want the current position covered, as well. */
825 covered = btrace_insn_next (&end, 1);
826 covered += btrace_insn_prev (&begin, context - covered);
827 covered += btrace_insn_next (&end, context - covered);
828 }
829 else
830 {
831 covered = btrace_insn_next (&end, context);
832 covered += btrace_insn_prev (&begin, context - covered);
833 }
afedecd3
MM
834 }
835 else
836 {
23a7fe75
MM
837 begin = history->begin;
838 end = history->end;
afedecd3 839
23a7fe75
MM
840 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
841 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 842
23a7fe75
MM
843 if (size < 0)
844 {
845 end = begin;
846 covered = btrace_insn_prev (&begin, context);
847 }
848 else
849 {
850 begin = end;
851 covered = btrace_insn_next (&end, context);
852 }
afedecd3
MM
853 }
854
23a7fe75 855 if (covered > 0)
31fd9caa 856 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
857 else
858 {
859 if (size < 0)
860 printf_unfiltered (_("At the start of the branch trace record.\n"));
861 else
862 printf_unfiltered (_("At the end of the branch trace record.\n"));
863 }
afedecd3 864
23a7fe75 865 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
866 do_cleanups (uiout_cleanup);
867}
868
869/* The to_insn_history_range method of target record-btrace. */
870
871static void
4e99c6b7
TT
872record_btrace_insn_history_range (struct target_ops *self,
873 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
874{
875 struct btrace_thread_info *btinfo;
23a7fe75
MM
876 struct btrace_insn_history *history;
877 struct btrace_insn_iterator begin, end;
afedecd3
MM
878 struct cleanup *uiout_cleanup;
879 struct ui_out *uiout;
23a7fe75
MM
880 unsigned int low, high;
881 int found;
afedecd3
MM
882
883 uiout = current_uiout;
884 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
885 "insn history");
23a7fe75
MM
886 low = from;
887 high = to;
afedecd3 888
23a7fe75 889 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
890
891 /* Check for wrap-arounds. */
23a7fe75 892 if (low != from || high != to)
afedecd3
MM
893 error (_("Bad range."));
894
0688d04e 895 if (high < low)
afedecd3
MM
896 error (_("Bad range."));
897
23a7fe75 898 btinfo = require_btrace ();
afedecd3 899
23a7fe75
MM
900 found = btrace_find_insn_by_number (&begin, btinfo, low);
901 if (found == 0)
902 error (_("Range out of bounds."));
afedecd3 903
23a7fe75
MM
904 found = btrace_find_insn_by_number (&end, btinfo, high);
905 if (found == 0)
0688d04e
MM
906 {
907 /* Silently truncate the range. */
908 btrace_insn_end (&end, btinfo);
909 }
910 else
911 {
912 /* We want both begin and end to be inclusive. */
913 btrace_insn_next (&end, 1);
914 }
afedecd3 915
31fd9caa 916 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 917 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
918
919 do_cleanups (uiout_cleanup);
920}
921
922/* The to_insn_history_from method of target record-btrace. */
923
924static void
9abc3ff3
TT
925record_btrace_insn_history_from (struct target_ops *self,
926 ULONGEST from, int size, int flags)
afedecd3
MM
927{
928 ULONGEST begin, end, context;
929
930 context = abs (size);
0688d04e
MM
931 if (context == 0)
932 error (_("Bad record instruction-history-size."));
afedecd3
MM
933
934 if (size < 0)
935 {
936 end = from;
937
938 if (from < context)
939 begin = 0;
940 else
0688d04e 941 begin = from - context + 1;
afedecd3
MM
942 }
943 else
944 {
945 begin = from;
0688d04e 946 end = from + context - 1;
afedecd3
MM
947
948 /* Check for wrap-around. */
949 if (end < begin)
950 end = ULONGEST_MAX;
951 }
952
4e99c6b7 953 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
954}
955
956/* Print the instruction number range for a function call history line. */
957
958static void
23a7fe75
MM
959btrace_call_history_insn_range (struct ui_out *uiout,
960 const struct btrace_function *bfun)
afedecd3 961{
7acbe133
MM
962 unsigned int begin, end, size;
963
964 size = VEC_length (btrace_insn_s, bfun->insn);
965 gdb_assert (size > 0);
afedecd3 966
23a7fe75 967 begin = bfun->insn_offset;
7acbe133 968 end = begin + size - 1;
afedecd3 969
23a7fe75 970 ui_out_field_uint (uiout, "insn begin", begin);
8710b709 971 ui_out_text (uiout, ",");
23a7fe75 972 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
973}
974
ce0dfbea
MM
975/* Compute the lowest and highest source line for the instructions in BFUN
976 and return them in PBEGIN and PEND.
977 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
978 result from inlining or macro expansion. */
979
980static void
981btrace_compute_src_line_range (const struct btrace_function *bfun,
982 int *pbegin, int *pend)
983{
984 struct btrace_insn *insn;
985 struct symtab *symtab;
986 struct symbol *sym;
987 unsigned int idx;
988 int begin, end;
989
990 begin = INT_MAX;
991 end = INT_MIN;
992
993 sym = bfun->sym;
994 if (sym == NULL)
995 goto out;
996
997 symtab = symbol_symtab (sym);
998
999 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
1000 {
1001 struct symtab_and_line sal;
1002
1003 sal = find_pc_line (insn->pc, 0);
1004 if (sal.symtab != symtab || sal.line == 0)
1005 continue;
1006
325fac50
PA
1007 begin = std::min (begin, sal.line);
1008 end = std::max (end, sal.line);
ce0dfbea
MM
1009 }
1010
1011 out:
1012 *pbegin = begin;
1013 *pend = end;
1014}
1015
afedecd3
MM
1016/* Print the source line information for a function call history line. */
1017
1018static void
23a7fe75
MM
1019btrace_call_history_src_line (struct ui_out *uiout,
1020 const struct btrace_function *bfun)
afedecd3
MM
1021{
1022 struct symbol *sym;
23a7fe75 1023 int begin, end;
afedecd3
MM
1024
1025 sym = bfun->sym;
1026 if (sym == NULL)
1027 return;
1028
1029 ui_out_field_string (uiout, "file",
08be3fe3 1030 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 1031
ce0dfbea 1032 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 1033 if (end < begin)
afedecd3
MM
1034 return;
1035
1036 ui_out_text (uiout, ":");
23a7fe75 1037 ui_out_field_int (uiout, "min line", begin);
afedecd3 1038
23a7fe75 1039 if (end == begin)
afedecd3
MM
1040 return;
1041
8710b709 1042 ui_out_text (uiout, ",");
23a7fe75 1043 ui_out_field_int (uiout, "max line", end);
afedecd3
MM
1044}
1045
0b722aec
MM
1046/* Get the name of a branch trace function. */
1047
1048static const char *
1049btrace_get_bfun_name (const struct btrace_function *bfun)
1050{
1051 struct minimal_symbol *msym;
1052 struct symbol *sym;
1053
1054 if (bfun == NULL)
1055 return "??";
1056
1057 msym = bfun->msym;
1058 sym = bfun->sym;
1059
1060 if (sym != NULL)
1061 return SYMBOL_PRINT_NAME (sym);
1062 else if (msym != NULL)
efd66ac6 1063 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
1064 else
1065 return "??";
1066}
1067
afedecd3
MM
1068/* Disassemble a section of the recorded function trace. */
1069
1070static void
23a7fe75 1071btrace_call_history (struct ui_out *uiout,
8710b709 1072 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1073 const struct btrace_call_iterator *begin,
1074 const struct btrace_call_iterator *end,
8d297bbf 1075 int int_flags)
afedecd3 1076{
23a7fe75 1077 struct btrace_call_iterator it;
8d297bbf 1078 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1079
8d297bbf 1080 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1081 btrace_call_number (end));
afedecd3 1082
23a7fe75 1083 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1084 {
23a7fe75
MM
1085 const struct btrace_function *bfun;
1086 struct minimal_symbol *msym;
1087 struct symbol *sym;
1088
1089 bfun = btrace_call_get (&it);
23a7fe75 1090 sym = bfun->sym;
0b722aec 1091 msym = bfun->msym;
23a7fe75 1092
afedecd3 1093 /* Print the function index. */
23a7fe75 1094 ui_out_field_uint (uiout, "index", bfun->number);
afedecd3
MM
1095 ui_out_text (uiout, "\t");
1096
31fd9caa
MM
1097 /* Indicate gaps in the trace. */
1098 if (bfun->errcode != 0)
1099 {
1100 const struct btrace_config *conf;
1101
1102 conf = btrace_conf (btinfo);
1103
1104 /* We have trace so we must have a configuration. */
1105 gdb_assert (conf != NULL);
1106
1107 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1108
1109 continue;
1110 }
1111
8710b709
MM
1112 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1113 {
1114 int level = bfun->level + btinfo->level, i;
1115
1116 for (i = 0; i < level; ++i)
1117 ui_out_text (uiout, " ");
1118 }
1119
1120 if (sym != NULL)
1121 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
1122 else if (msym != NULL)
efd66ac6 1123 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
8710b709
MM
1124 else if (!ui_out_is_mi_like_p (uiout))
1125 ui_out_field_string (uiout, "function", "??");
1126
1e038f67 1127 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1128 {
8710b709 1129 ui_out_text (uiout, _("\tinst "));
23a7fe75 1130 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1131 }
1132
1e038f67 1133 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1134 {
8710b709 1135 ui_out_text (uiout, _("\tat "));
23a7fe75 1136 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1137 }
1138
afedecd3
MM
1139 ui_out_text (uiout, "\n");
1140 }
1141}
1142
1143/* The to_call_history method of target record-btrace. */
1144
1145static void
8d297bbf 1146record_btrace_call_history (struct target_ops *self, int size, int int_flags)
afedecd3
MM
1147{
1148 struct btrace_thread_info *btinfo;
23a7fe75
MM
1149 struct btrace_call_history *history;
1150 struct btrace_call_iterator begin, end;
afedecd3
MM
1151 struct cleanup *uiout_cleanup;
1152 struct ui_out *uiout;
23a7fe75 1153 unsigned int context, covered;
8d297bbf 1154 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1155
1156 uiout = current_uiout;
1157 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1158 "insn history");
afedecd3 1159 context = abs (size);
afedecd3
MM
1160 if (context == 0)
1161 error (_("Bad record function-call-history-size."));
1162
23a7fe75
MM
1163 btinfo = require_btrace ();
1164 history = btinfo->call_history;
1165 if (history == NULL)
afedecd3 1166 {
07bbe694 1167 struct btrace_insn_iterator *replay;
afedecd3 1168
8d297bbf 1169 DEBUG ("call-history (0x%x): %d", int_flags, size);
afedecd3 1170
07bbe694
MM
1171 /* If we're replaying, we start at the replay position. Otherwise, we
1172 start at the tail of the trace. */
1173 replay = btinfo->replay;
1174 if (replay != NULL)
1175 {
1176 begin.function = replay->function;
1177 begin.btinfo = btinfo;
1178 }
1179 else
1180 btrace_call_end (&begin, btinfo);
1181
1182 /* We start from here and expand in the requested direction. Then we
1183 expand in the other direction, as well, to fill up any remaining
1184 context. */
1185 end = begin;
1186 if (size < 0)
1187 {
1188 /* We want the current position covered, as well. */
1189 covered = btrace_call_next (&end, 1);
1190 covered += btrace_call_prev (&begin, context - covered);
1191 covered += btrace_call_next (&end, context - covered);
1192 }
1193 else
1194 {
1195 covered = btrace_call_next (&end, context);
1196 covered += btrace_call_prev (&begin, context- covered);
1197 }
afedecd3
MM
1198 }
1199 else
1200 {
23a7fe75
MM
1201 begin = history->begin;
1202 end = history->end;
afedecd3 1203
8d297bbf 1204 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
23a7fe75 1205 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1206
23a7fe75
MM
1207 if (size < 0)
1208 {
1209 end = begin;
1210 covered = btrace_call_prev (&begin, context);
1211 }
1212 else
1213 {
1214 begin = end;
1215 covered = btrace_call_next (&end, context);
1216 }
afedecd3
MM
1217 }
1218
23a7fe75 1219 if (covered > 0)
8710b709 1220 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1221 else
1222 {
1223 if (size < 0)
1224 printf_unfiltered (_("At the start of the branch trace record.\n"));
1225 else
1226 printf_unfiltered (_("At the end of the branch trace record.\n"));
1227 }
afedecd3 1228
23a7fe75 1229 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1230 do_cleanups (uiout_cleanup);
1231}
1232
1233/* The to_call_history_range method of target record-btrace. */
1234
1235static void
f0d960ea 1236record_btrace_call_history_range (struct target_ops *self,
8d297bbf
PA
1237 ULONGEST from, ULONGEST to,
1238 int int_flags)
afedecd3
MM
1239{
1240 struct btrace_thread_info *btinfo;
23a7fe75
MM
1241 struct btrace_call_history *history;
1242 struct btrace_call_iterator begin, end;
afedecd3
MM
1243 struct cleanup *uiout_cleanup;
1244 struct ui_out *uiout;
23a7fe75
MM
1245 unsigned int low, high;
1246 int found;
8d297bbf 1247 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1248
1249 uiout = current_uiout;
1250 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1251 "func history");
23a7fe75
MM
1252 low = from;
1253 high = to;
afedecd3 1254
8d297bbf 1255 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
afedecd3
MM
1256
1257 /* Check for wrap-arounds. */
23a7fe75 1258 if (low != from || high != to)
afedecd3
MM
1259 error (_("Bad range."));
1260
0688d04e 1261 if (high < low)
afedecd3
MM
1262 error (_("Bad range."));
1263
23a7fe75 1264 btinfo = require_btrace ();
afedecd3 1265
23a7fe75
MM
1266 found = btrace_find_call_by_number (&begin, btinfo, low);
1267 if (found == 0)
1268 error (_("Range out of bounds."));
afedecd3 1269
23a7fe75
MM
1270 found = btrace_find_call_by_number (&end, btinfo, high);
1271 if (found == 0)
0688d04e
MM
1272 {
1273 /* Silently truncate the range. */
1274 btrace_call_end (&end, btinfo);
1275 }
1276 else
1277 {
1278 /* We want both begin and end to be inclusive. */
1279 btrace_call_next (&end, 1);
1280 }
afedecd3 1281
8710b709 1282 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1283 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1284
1285 do_cleanups (uiout_cleanup);
1286}
1287
1288/* The to_call_history_from method of target record-btrace. */
1289
1290static void
ec0aea04 1291record_btrace_call_history_from (struct target_ops *self,
8d297bbf
PA
1292 ULONGEST from, int size,
1293 int int_flags)
afedecd3
MM
1294{
1295 ULONGEST begin, end, context;
8d297bbf 1296 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1297
1298 context = abs (size);
0688d04e
MM
1299 if (context == 0)
1300 error (_("Bad record function-call-history-size."));
afedecd3
MM
1301
1302 if (size < 0)
1303 {
1304 end = from;
1305
1306 if (from < context)
1307 begin = 0;
1308 else
0688d04e 1309 begin = from - context + 1;
afedecd3
MM
1310 }
1311 else
1312 {
1313 begin = from;
0688d04e 1314 end = from + context - 1;
afedecd3
MM
1315
1316 /* Check for wrap-around. */
1317 if (end < begin)
1318 end = ULONGEST_MAX;
1319 }
1320
f0d960ea 1321 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
1322}
1323
07bbe694
MM
1324/* The to_record_is_replaying method of target record-btrace. */
1325
1326static int
a52eab48 1327record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
07bbe694
MM
1328{
1329 struct thread_info *tp;
1330
034f788c 1331 ALL_NON_EXITED_THREADS (tp)
a52eab48 1332 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
07bbe694
MM
1333 return 1;
1334
1335 return 0;
1336}
1337
7ff27e9b
MM
1338/* The to_record_will_replay method of target record-btrace. */
1339
1340static int
1341record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1342{
1343 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1344}
1345
633785ff
MM
1346/* The to_xfer_partial method of target record-btrace. */
1347
9b409511 1348static enum target_xfer_status
633785ff
MM
1349record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1350 const char *annex, gdb_byte *readbuf,
1351 const gdb_byte *writebuf, ULONGEST offset,
9b409511 1352 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
1353{
1354 struct target_ops *t;
1355
1356 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1357 if (replay_memory_access == replay_memory_access_read_only
aef92902 1358 && !record_btrace_generating_corefile
4d10e986 1359 && record_btrace_is_replaying (ops, inferior_ptid))
633785ff
MM
1360 {
1361 switch (object)
1362 {
1363 case TARGET_OBJECT_MEMORY:
1364 {
1365 struct target_section *section;
1366
1367 /* We do not allow writing memory in general. */
1368 if (writebuf != NULL)
9b409511
YQ
1369 {
1370 *xfered_len = len;
bc113b4e 1371 return TARGET_XFER_UNAVAILABLE;
9b409511 1372 }
633785ff
MM
1373
1374 /* We allow reading readonly memory. */
1375 section = target_section_by_addr (ops, offset);
1376 if (section != NULL)
1377 {
1378 /* Check if the section we found is readonly. */
1379 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1380 section->the_bfd_section)
1381 & SEC_READONLY) != 0)
1382 {
1383 /* Truncate the request to fit into this section. */
325fac50 1384 len = std::min (len, section->endaddr - offset);
633785ff
MM
1385 break;
1386 }
1387 }
1388
9b409511 1389 *xfered_len = len;
bc113b4e 1390 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1391 }
1392 }
1393 }
1394
1395 /* Forward the request. */
e75fdfca
TT
1396 ops = ops->beneath;
1397 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1398 offset, len, xfered_len);
633785ff
MM
1399}
1400
1401/* The to_insert_breakpoint method of target record-btrace. */
1402
1403static int
1404record_btrace_insert_breakpoint (struct target_ops *ops,
1405 struct gdbarch *gdbarch,
1406 struct bp_target_info *bp_tgt)
1407{
67b5c0c1
MM
1408 const char *old;
1409 int ret;
633785ff
MM
1410
1411 /* Inserting breakpoints requires accessing memory. Allow it for the
1412 duration of this function. */
67b5c0c1
MM
1413 old = replay_memory_access;
1414 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1415
1416 ret = 0;
492d29ea
PA
1417 TRY
1418 {
1419 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1420 }
492d29ea
PA
1421 CATCH (except, RETURN_MASK_ALL)
1422 {
6c63c96a 1423 replay_memory_access = old;
492d29ea
PA
1424 throw_exception (except);
1425 }
1426 END_CATCH
6c63c96a 1427 replay_memory_access = old;
633785ff
MM
1428
1429 return ret;
1430}
1431
1432/* The to_remove_breakpoint method of target record-btrace. */
1433
1434static int
1435record_btrace_remove_breakpoint (struct target_ops *ops,
1436 struct gdbarch *gdbarch,
73971819
PA
1437 struct bp_target_info *bp_tgt,
1438 enum remove_bp_reason reason)
633785ff 1439{
67b5c0c1
MM
1440 const char *old;
1441 int ret;
633785ff
MM
1442
1443 /* Removing breakpoints requires accessing memory. Allow it for the
1444 duration of this function. */
67b5c0c1
MM
1445 old = replay_memory_access;
1446 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1447
1448 ret = 0;
492d29ea
PA
1449 TRY
1450 {
73971819
PA
1451 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1452 reason);
492d29ea 1453 }
492d29ea
PA
1454 CATCH (except, RETURN_MASK_ALL)
1455 {
6c63c96a 1456 replay_memory_access = old;
492d29ea
PA
1457 throw_exception (except);
1458 }
1459 END_CATCH
6c63c96a 1460 replay_memory_access = old;
633785ff
MM
1461
1462 return ret;
1463}
1464
1f3ef581
MM
1465/* The to_fetch_registers method of target record-btrace. */
1466
1467static void
1468record_btrace_fetch_registers (struct target_ops *ops,
1469 struct regcache *regcache, int regno)
1470{
1471 struct btrace_insn_iterator *replay;
1472 struct thread_info *tp;
1473
1474 tp = find_thread_ptid (inferior_ptid);
1475 gdb_assert (tp != NULL);
1476
1477 replay = tp->btrace.replay;
aef92902 1478 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1479 {
1480 const struct btrace_insn *insn;
1481 struct gdbarch *gdbarch;
1482 int pcreg;
1483
1484 gdbarch = get_regcache_arch (regcache);
1485 pcreg = gdbarch_pc_regnum (gdbarch);
1486 if (pcreg < 0)
1487 return;
1488
1489 /* We can only provide the PC register. */
1490 if (regno >= 0 && regno != pcreg)
1491 return;
1492
1493 insn = btrace_insn_get (replay);
1494 gdb_assert (insn != NULL);
1495
1496 regcache_raw_supply (regcache, regno, &insn->pc);
1497 }
1498 else
1499 {
e75fdfca 1500 struct target_ops *t = ops->beneath;
1f3ef581 1501
e75fdfca 1502 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
1503 }
1504}
1505
1506/* The to_store_registers method of target record-btrace. */
1507
1508static void
1509record_btrace_store_registers (struct target_ops *ops,
1510 struct regcache *regcache, int regno)
1511{
1512 struct target_ops *t;
1513
a52eab48 1514 if (!record_btrace_generating_corefile
4d10e986
MM
1515 && record_btrace_is_replaying (ops, inferior_ptid))
1516 error (_("Cannot write registers while replaying."));
1f3ef581
MM
1517
1518 gdb_assert (may_write_registers != 0);
1519
e75fdfca
TT
1520 t = ops->beneath;
1521 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1522}
1523
1524/* The to_prepare_to_store method of target record-btrace. */
1525
1526static void
1527record_btrace_prepare_to_store (struct target_ops *ops,
1528 struct regcache *regcache)
1529{
1530 struct target_ops *t;
1531
a52eab48 1532 if (!record_btrace_generating_corefile
4d10e986 1533 && record_btrace_is_replaying (ops, inferior_ptid))
1f3ef581
MM
1534 return;
1535
e75fdfca
TT
1536 t = ops->beneath;
1537 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1538}
1539
0b722aec
MM
1540/* The branch trace frame cache. */
1541
1542struct btrace_frame_cache
1543{
1544 /* The thread. */
1545 struct thread_info *tp;
1546
1547 /* The frame info. */
1548 struct frame_info *frame;
1549
1550 /* The branch trace function segment. */
1551 const struct btrace_function *bfun;
1552};
1553
1554/* A struct btrace_frame_cache hash table indexed by NEXT. */
1555
1556static htab_t bfcache;
1557
1558/* hash_f for htab_create_alloc of bfcache. */
1559
1560static hashval_t
1561bfcache_hash (const void *arg)
1562{
19ba03f4
SM
1563 const struct btrace_frame_cache *cache
1564 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1565
1566 return htab_hash_pointer (cache->frame);
1567}
1568
1569/* eq_f for htab_create_alloc of bfcache. */
1570
1571static int
1572bfcache_eq (const void *arg1, const void *arg2)
1573{
19ba03f4
SM
1574 const struct btrace_frame_cache *cache1
1575 = (const struct btrace_frame_cache *) arg1;
1576 const struct btrace_frame_cache *cache2
1577 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1578
1579 return cache1->frame == cache2->frame;
1580}
1581
1582/* Create a new btrace frame cache. */
1583
1584static struct btrace_frame_cache *
1585bfcache_new (struct frame_info *frame)
1586{
1587 struct btrace_frame_cache *cache;
1588 void **slot;
1589
1590 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1591 cache->frame = frame;
1592
1593 slot = htab_find_slot (bfcache, cache, INSERT);
1594 gdb_assert (*slot == NULL);
1595 *slot = cache;
1596
1597 return cache;
1598}
1599
1600/* Extract the branch trace function from a branch trace frame. */
1601
1602static const struct btrace_function *
1603btrace_get_frame_function (struct frame_info *frame)
1604{
1605 const struct btrace_frame_cache *cache;
1606 const struct btrace_function *bfun;
1607 struct btrace_frame_cache pattern;
1608 void **slot;
1609
1610 pattern.frame = frame;
1611
1612 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1613 if (slot == NULL)
1614 return NULL;
1615
19ba03f4 1616 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1617 return cache->bfun;
1618}
1619
cecac1ab
MM
1620/* Implement stop_reason method for record_btrace_frame_unwind. */
1621
1622static enum unwind_stop_reason
1623record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1624 void **this_cache)
1625{
0b722aec
MM
1626 const struct btrace_frame_cache *cache;
1627 const struct btrace_function *bfun;
1628
19ba03f4 1629 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1630 bfun = cache->bfun;
1631 gdb_assert (bfun != NULL);
1632
1633 if (bfun->up == NULL)
1634 return UNWIND_UNAVAILABLE;
1635
1636 return UNWIND_NO_REASON;
cecac1ab
MM
1637}
1638
1639/* Implement this_id method for record_btrace_frame_unwind. */
1640
1641static void
1642record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1643 struct frame_id *this_id)
1644{
0b722aec
MM
1645 const struct btrace_frame_cache *cache;
1646 const struct btrace_function *bfun;
1647 CORE_ADDR code, special;
1648
19ba03f4 1649 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1650
1651 bfun = cache->bfun;
1652 gdb_assert (bfun != NULL);
1653
1654 while (bfun->segment.prev != NULL)
1655 bfun = bfun->segment.prev;
1656
1657 code = get_frame_func (this_frame);
1658 special = bfun->number;
1659
1660 *this_id = frame_id_build_unavailable_stack_special (code, special);
1661
1662 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1663 btrace_get_bfun_name (cache->bfun),
1664 core_addr_to_string_nz (this_id->code_addr),
1665 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1666}
1667
1668/* Implement prev_register method for record_btrace_frame_unwind. */
1669
1670static struct value *
1671record_btrace_frame_prev_register (struct frame_info *this_frame,
1672 void **this_cache,
1673 int regnum)
1674{
0b722aec
MM
1675 const struct btrace_frame_cache *cache;
1676 const struct btrace_function *bfun, *caller;
1677 const struct btrace_insn *insn;
1678 struct gdbarch *gdbarch;
1679 CORE_ADDR pc;
1680 int pcreg;
1681
1682 gdbarch = get_frame_arch (this_frame);
1683 pcreg = gdbarch_pc_regnum (gdbarch);
1684 if (pcreg < 0 || regnum != pcreg)
1685 throw_error (NOT_AVAILABLE_ERROR,
1686 _("Registers are not available in btrace record history"));
1687
19ba03f4 1688 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1689 bfun = cache->bfun;
1690 gdb_assert (bfun != NULL);
1691
1692 caller = bfun->up;
1693 if (caller == NULL)
1694 throw_error (NOT_AVAILABLE_ERROR,
1695 _("No caller in btrace record history"));
1696
1697 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1698 {
1699 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1700 pc = insn->pc;
1701 }
1702 else
1703 {
1704 insn = VEC_last (btrace_insn_s, caller->insn);
1705 pc = insn->pc;
1706
1707 pc += gdb_insn_length (gdbarch, pc);
1708 }
1709
1710 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1711 btrace_get_bfun_name (bfun), bfun->level,
1712 core_addr_to_string_nz (pc));
1713
1714 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1715}
1716
1717/* Implement sniffer method for record_btrace_frame_unwind. */
1718
1719static int
1720record_btrace_frame_sniffer (const struct frame_unwind *self,
1721 struct frame_info *this_frame,
1722 void **this_cache)
1723{
0b722aec
MM
1724 const struct btrace_function *bfun;
1725 struct btrace_frame_cache *cache;
cecac1ab 1726 struct thread_info *tp;
0b722aec 1727 struct frame_info *next;
cecac1ab
MM
1728
1729 /* THIS_FRAME does not contain a reference to its thread. */
1730 tp = find_thread_ptid (inferior_ptid);
1731 gdb_assert (tp != NULL);
1732
0b722aec
MM
1733 bfun = NULL;
1734 next = get_next_frame (this_frame);
1735 if (next == NULL)
1736 {
1737 const struct btrace_insn_iterator *replay;
1738
1739 replay = tp->btrace.replay;
1740 if (replay != NULL)
1741 bfun = replay->function;
1742 }
1743 else
1744 {
1745 const struct btrace_function *callee;
1746
1747 callee = btrace_get_frame_function (next);
1748 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1749 bfun = callee->up;
1750 }
1751
1752 if (bfun == NULL)
1753 return 0;
1754
1755 DEBUG ("[frame] sniffed frame for %s on level %d",
1756 btrace_get_bfun_name (bfun), bfun->level);
1757
1758 /* This is our frame. Initialize the frame cache. */
1759 cache = bfcache_new (this_frame);
1760 cache->tp = tp;
1761 cache->bfun = bfun;
1762
1763 *this_cache = cache;
1764 return 1;
1765}
1766
1767/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1768
1769static int
1770record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1771 struct frame_info *this_frame,
1772 void **this_cache)
1773{
1774 const struct btrace_function *bfun, *callee;
1775 struct btrace_frame_cache *cache;
1776 struct frame_info *next;
1777
1778 next = get_next_frame (this_frame);
1779 if (next == NULL)
1780 return 0;
1781
1782 callee = btrace_get_frame_function (next);
1783 if (callee == NULL)
1784 return 0;
1785
1786 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1787 return 0;
1788
1789 bfun = callee->up;
1790 if (bfun == NULL)
1791 return 0;
1792
1793 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1794 btrace_get_bfun_name (bfun), bfun->level);
1795
1796 /* This is our frame. Initialize the frame cache. */
1797 cache = bfcache_new (this_frame);
1798 cache->tp = find_thread_ptid (inferior_ptid);
1799 cache->bfun = bfun;
1800
1801 *this_cache = cache;
1802 return 1;
1803}
1804
1805static void
1806record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1807{
1808 struct btrace_frame_cache *cache;
1809 void **slot;
1810
19ba03f4 1811 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1812
1813 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1814 gdb_assert (slot != NULL);
1815
1816 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1817}
1818
1819/* btrace recording does not store previous memory content, neither the stack
1820 frames content. Any unwinding would return errorneous results as the stack
1821 contents no longer matches the changed PC value restored from history.
1822 Therefore this unwinder reports any possibly unwound registers as
1823 <unavailable>. */
1824
0b722aec 1825const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1826{
1827 NORMAL_FRAME,
1828 record_btrace_frame_unwind_stop_reason,
1829 record_btrace_frame_this_id,
1830 record_btrace_frame_prev_register,
1831 NULL,
0b722aec
MM
1832 record_btrace_frame_sniffer,
1833 record_btrace_frame_dealloc_cache
1834};
1835
1836const struct frame_unwind record_btrace_tailcall_frame_unwind =
1837{
1838 TAILCALL_FRAME,
1839 record_btrace_frame_unwind_stop_reason,
1840 record_btrace_frame_this_id,
1841 record_btrace_frame_prev_register,
1842 NULL,
1843 record_btrace_tailcall_frame_sniffer,
1844 record_btrace_frame_dealloc_cache
cecac1ab 1845};
b2f4cfde 1846
ac01945b
TT
1847/* Implement the to_get_unwinder method. */
1848
1849static const struct frame_unwind *
1850record_btrace_to_get_unwinder (struct target_ops *self)
1851{
1852 return &record_btrace_frame_unwind;
1853}
1854
1855/* Implement the to_get_tailcall_unwinder method. */
1856
1857static const struct frame_unwind *
1858record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1859{
1860 return &record_btrace_tailcall_frame_unwind;
1861}
1862
987e68b1
MM
1863/* Return a human-readable string for FLAG. */
1864
1865static const char *
1866btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1867{
1868 switch (flag)
1869 {
1870 case BTHR_STEP:
1871 return "step";
1872
1873 case BTHR_RSTEP:
1874 return "reverse-step";
1875
1876 case BTHR_CONT:
1877 return "cont";
1878
1879 case BTHR_RCONT:
1880 return "reverse-cont";
1881
1882 case BTHR_STOP:
1883 return "stop";
1884 }
1885
1886 return "<invalid>";
1887}
1888
52834460
MM
1889/* Indicate that TP should be resumed according to FLAG. */
1890
1891static void
1892record_btrace_resume_thread (struct thread_info *tp,
1893 enum btrace_thread_flag flag)
1894{
1895 struct btrace_thread_info *btinfo;
1896
43792cf0 1897 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1 1898 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
52834460
MM
1899
1900 btinfo = &tp->btrace;
1901
52834460
MM
1902 /* Fetch the latest branch trace. */
1903 btrace_fetch (tp);
1904
0ca912df
MM
1905 /* A resume request overwrites a preceding resume or stop request. */
1906 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1907 btinfo->flags |= flag;
1908}
1909
ec71cc2f
MM
1910/* Get the current frame for TP. */
1911
1912static struct frame_info *
1913get_thread_current_frame (struct thread_info *tp)
1914{
1915 struct frame_info *frame;
1916 ptid_t old_inferior_ptid;
1917 int executing;
1918
1919 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1920 old_inferior_ptid = inferior_ptid;
1921 inferior_ptid = tp->ptid;
1922
1923 /* Clear the executing flag to allow changes to the current frame.
1924 We are not actually running, yet. We just started a reverse execution
1925 command or a record goto command.
1926 For the latter, EXECUTING is false and this has no effect.
1927 For the former, EXECUTING is true and we're in to_wait, about to
1928 move the thread. Since we need to recompute the stack, we temporarily
1929 set EXECUTING to flase. */
1930 executing = is_executing (inferior_ptid);
1931 set_executing (inferior_ptid, 0);
1932
1933 frame = NULL;
1934 TRY
1935 {
1936 frame = get_current_frame ();
1937 }
1938 CATCH (except, RETURN_MASK_ALL)
1939 {
1940 /* Restore the previous execution state. */
1941 set_executing (inferior_ptid, executing);
1942
1943 /* Restore the previous inferior_ptid. */
1944 inferior_ptid = old_inferior_ptid;
1945
1946 throw_exception (except);
1947 }
1948 END_CATCH
1949
1950 /* Restore the previous execution state. */
1951 set_executing (inferior_ptid, executing);
1952
1953 /* Restore the previous inferior_ptid. */
1954 inferior_ptid = old_inferior_ptid;
1955
1956 return frame;
1957}
1958
52834460
MM
1959/* Start replaying a thread. */
1960
1961static struct btrace_insn_iterator *
1962record_btrace_start_replaying (struct thread_info *tp)
1963{
52834460
MM
1964 struct btrace_insn_iterator *replay;
1965 struct btrace_thread_info *btinfo;
52834460
MM
1966
1967 btinfo = &tp->btrace;
1968 replay = NULL;
1969
1970 /* We can't start replaying without trace. */
1971 if (btinfo->begin == NULL)
1972 return NULL;
1973
52834460
MM
1974 /* GDB stores the current frame_id when stepping in order to detects steps
1975 into subroutines.
1976 Since frames are computed differently when we're replaying, we need to
1977 recompute those stored frames and fix them up so we can still detect
1978 subroutines after we started replaying. */
492d29ea 1979 TRY
52834460
MM
1980 {
1981 struct frame_info *frame;
1982 struct frame_id frame_id;
1983 int upd_step_frame_id, upd_step_stack_frame_id;
1984
1985 /* The current frame without replaying - computed via normal unwind. */
ec71cc2f 1986 frame = get_thread_current_frame (tp);
52834460
MM
1987 frame_id = get_frame_id (frame);
1988
1989 /* Check if we need to update any stepping-related frame id's. */
1990 upd_step_frame_id = frame_id_eq (frame_id,
1991 tp->control.step_frame_id);
1992 upd_step_stack_frame_id = frame_id_eq (frame_id,
1993 tp->control.step_stack_frame_id);
1994
1995 /* We start replaying at the end of the branch trace. This corresponds
1996 to the current instruction. */
8d749320 1997 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
1998 btrace_insn_end (replay, btinfo);
1999
31fd9caa
MM
2000 /* Skip gaps at the end of the trace. */
2001 while (btrace_insn_get (replay) == NULL)
2002 {
2003 unsigned int steps;
2004
2005 steps = btrace_insn_prev (replay, 1);
2006 if (steps == 0)
2007 error (_("No trace."));
2008 }
2009
52834460
MM
2010 /* We're not replaying, yet. */
2011 gdb_assert (btinfo->replay == NULL);
2012 btinfo->replay = replay;
2013
2014 /* Make sure we're not using any stale registers. */
2015 registers_changed_ptid (tp->ptid);
2016
2017 /* The current frame with replaying - computed via btrace unwind. */
ec71cc2f 2018 frame = get_thread_current_frame (tp);
52834460
MM
2019 frame_id = get_frame_id (frame);
2020
2021 /* Replace stepping related frames where necessary. */
2022 if (upd_step_frame_id)
2023 tp->control.step_frame_id = frame_id;
2024 if (upd_step_stack_frame_id)
2025 tp->control.step_stack_frame_id = frame_id;
2026 }
492d29ea 2027 CATCH (except, RETURN_MASK_ALL)
52834460
MM
2028 {
2029 xfree (btinfo->replay);
2030 btinfo->replay = NULL;
2031
2032 registers_changed_ptid (tp->ptid);
2033
2034 throw_exception (except);
2035 }
492d29ea 2036 END_CATCH
52834460
MM
2037
2038 return replay;
2039}
2040
2041/* Stop replaying a thread. */
2042
2043static void
2044record_btrace_stop_replaying (struct thread_info *tp)
2045{
2046 struct btrace_thread_info *btinfo;
2047
2048 btinfo = &tp->btrace;
2049
2050 xfree (btinfo->replay);
2051 btinfo->replay = NULL;
2052
2053 /* Make sure we're not leaving any stale registers. */
2054 registers_changed_ptid (tp->ptid);
2055}
2056
e3cfc1c7
MM
2057/* Stop replaying TP if it is at the end of its execution history. */
2058
2059static void
2060record_btrace_stop_replaying_at_end (struct thread_info *tp)
2061{
2062 struct btrace_insn_iterator *replay, end;
2063 struct btrace_thread_info *btinfo;
2064
2065 btinfo = &tp->btrace;
2066 replay = btinfo->replay;
2067
2068 if (replay == NULL)
2069 return;
2070
2071 btrace_insn_end (&end, btinfo);
2072
2073 if (btrace_insn_cmp (replay, &end) == 0)
2074 record_btrace_stop_replaying (tp);
2075}
2076
b2f4cfde
MM
2077/* The to_resume method of target record-btrace. */
2078
2079static void
2080record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2081 enum gdb_signal signal)
2082{
0ca912df 2083 struct thread_info *tp;
d2939ba2 2084 enum btrace_thread_flag flag, cflag;
52834460 2085
987e68b1
MM
2086 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2087 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2088 step ? "step" : "cont");
52834460 2089
0ca912df
MM
2090 /* Store the execution direction of the last resume.
2091
2092 If there is more than one to_resume call, we have to rely on infrun
2093 to not change the execution direction in-between. */
70ad5bff
MM
2094 record_btrace_resume_exec_dir = execution_direction;
2095
0ca912df 2096 /* As long as we're not replaying, just forward the request.
52834460 2097
0ca912df
MM
2098 For non-stop targets this means that no thread is replaying. In order to
2099 make progress, we may need to explicitly move replaying threads to the end
2100 of their execution history. */
a52eab48
MM
2101 if ((execution_direction != EXEC_REVERSE)
2102 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2103 {
e75fdfca 2104 ops = ops->beneath;
04c4fe8c
MM
2105 ops->to_resume (ops, ptid, step, signal);
2106 return;
b2f4cfde
MM
2107 }
2108
52834460 2109 /* Compute the btrace thread flag for the requested move. */
d2939ba2
MM
2110 if (execution_direction == EXEC_REVERSE)
2111 {
2112 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2113 cflag = BTHR_RCONT;
2114 }
52834460 2115 else
d2939ba2
MM
2116 {
2117 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2118 cflag = BTHR_CONT;
2119 }
52834460 2120
52834460 2121 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2122 record_btrace_wait below.
2123
2124 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2125 if (!target_is_non_stop_p ())
2126 {
2127 gdb_assert (ptid_match (inferior_ptid, ptid));
2128
2129 ALL_NON_EXITED_THREADS (tp)
2130 if (ptid_match (tp->ptid, ptid))
2131 {
2132 if (ptid_match (tp->ptid, inferior_ptid))
2133 record_btrace_resume_thread (tp, flag);
2134 else
2135 record_btrace_resume_thread (tp, cflag);
2136 }
2137 }
2138 else
2139 {
2140 ALL_NON_EXITED_THREADS (tp)
2141 if (ptid_match (tp->ptid, ptid))
2142 record_btrace_resume_thread (tp, flag);
2143 }
70ad5bff
MM
2144
2145 /* Async support. */
2146 if (target_can_async_p ())
2147 {
6a3753b3 2148 target_async (1);
70ad5bff
MM
2149 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2150 }
52834460
MM
2151}
2152
85ad3aaf
PA
2153/* The to_commit_resume method of target record-btrace. */
2154
2155static void
2156record_btrace_commit_resume (struct target_ops *ops)
2157{
2158 if ((execution_direction != EXEC_REVERSE)
2159 && !record_btrace_is_replaying (ops, minus_one_ptid))
2160 ops->beneath->to_commit_resume (ops->beneath);
2161}
2162
987e68b1
MM
2163/* Cancel resuming TP. */
2164
2165static void
2166record_btrace_cancel_resume (struct thread_info *tp)
2167{
2168 enum btrace_thread_flag flags;
2169
2170 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2171 if (flags == 0)
2172 return;
2173
43792cf0
PA
2174 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2175 print_thread_id (tp),
987e68b1
MM
2176 target_pid_to_str (tp->ptid), flags,
2177 btrace_thread_flag_to_str (flags));
2178
2179 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2180 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2181}
2182
2183/* Return a target_waitstatus indicating that we ran out of history. */
2184
2185static struct target_waitstatus
2186btrace_step_no_history (void)
2187{
2188 struct target_waitstatus status;
2189
2190 status.kind = TARGET_WAITKIND_NO_HISTORY;
2191
2192 return status;
2193}
2194
2195/* Return a target_waitstatus indicating that a step finished. */
2196
2197static struct target_waitstatus
2198btrace_step_stopped (void)
2199{
2200 struct target_waitstatus status;
2201
2202 status.kind = TARGET_WAITKIND_STOPPED;
2203 status.value.sig = GDB_SIGNAL_TRAP;
2204
2205 return status;
2206}
2207
6e4879f0
MM
2208/* Return a target_waitstatus indicating that a thread was stopped as
2209 requested. */
2210
2211static struct target_waitstatus
2212btrace_step_stopped_on_request (void)
2213{
2214 struct target_waitstatus status;
2215
2216 status.kind = TARGET_WAITKIND_STOPPED;
2217 status.value.sig = GDB_SIGNAL_0;
2218
2219 return status;
2220}
2221
d825d248
MM
2222/* Return a target_waitstatus indicating a spurious stop. */
2223
2224static struct target_waitstatus
2225btrace_step_spurious (void)
2226{
2227 struct target_waitstatus status;
2228
2229 status.kind = TARGET_WAITKIND_SPURIOUS;
2230
2231 return status;
2232}
2233
e3cfc1c7
MM
2234/* Return a target_waitstatus indicating that the thread was not resumed. */
2235
2236static struct target_waitstatus
2237btrace_step_no_resumed (void)
2238{
2239 struct target_waitstatus status;
2240
2241 status.kind = TARGET_WAITKIND_NO_RESUMED;
2242
2243 return status;
2244}
2245
2246/* Return a target_waitstatus indicating that we should wait again. */
2247
2248static struct target_waitstatus
2249btrace_step_again (void)
2250{
2251 struct target_waitstatus status;
2252
2253 status.kind = TARGET_WAITKIND_IGNORE;
2254
2255 return status;
2256}
2257
52834460
MM
2258/* Clear the record histories. */
2259
2260static void
2261record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2262{
2263 xfree (btinfo->insn_history);
2264 xfree (btinfo->call_history);
2265
2266 btinfo->insn_history = NULL;
2267 btinfo->call_history = NULL;
2268}
2269
3c615f99
MM
2270/* Check whether TP's current replay position is at a breakpoint. */
2271
2272static int
2273record_btrace_replay_at_breakpoint (struct thread_info *tp)
2274{
2275 struct btrace_insn_iterator *replay;
2276 struct btrace_thread_info *btinfo;
2277 const struct btrace_insn *insn;
2278 struct inferior *inf;
2279
2280 btinfo = &tp->btrace;
2281 replay = btinfo->replay;
2282
2283 if (replay == NULL)
2284 return 0;
2285
2286 insn = btrace_insn_get (replay);
2287 if (insn == NULL)
2288 return 0;
2289
2290 inf = find_inferior_ptid (tp->ptid);
2291 if (inf == NULL)
2292 return 0;
2293
2294 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2295 &btinfo->stop_reason);
2296}
2297
d825d248 2298/* Step one instruction in forward direction. */
52834460
MM
2299
2300static struct target_waitstatus
d825d248 2301record_btrace_single_step_forward (struct thread_info *tp)
52834460 2302{
b61ce85c 2303 struct btrace_insn_iterator *replay, end, start;
52834460 2304 struct btrace_thread_info *btinfo;
52834460 2305
d825d248
MM
2306 btinfo = &tp->btrace;
2307 replay = btinfo->replay;
2308
2309 /* We're done if we're not replaying. */
2310 if (replay == NULL)
2311 return btrace_step_no_history ();
2312
011c71b6
MM
2313 /* Check if we're stepping a breakpoint. */
2314 if (record_btrace_replay_at_breakpoint (tp))
2315 return btrace_step_stopped ();
2316
b61ce85c
MM
2317 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2318 jump back to the instruction at which we started. */
2319 start = *replay;
d825d248
MM
2320 do
2321 {
2322 unsigned int steps;
2323
e3cfc1c7
MM
2324 /* We will bail out here if we continue stepping after reaching the end
2325 of the execution history. */
d825d248
MM
2326 steps = btrace_insn_next (replay, 1);
2327 if (steps == 0)
b61ce85c
MM
2328 {
2329 *replay = start;
2330 return btrace_step_no_history ();
2331 }
d825d248
MM
2332 }
2333 while (btrace_insn_get (replay) == NULL);
2334
2335 /* Determine the end of the instruction trace. */
2336 btrace_insn_end (&end, btinfo);
2337
e3cfc1c7
MM
2338 /* The execution trace contains (and ends with) the current instruction.
2339 This instruction has not been executed, yet, so the trace really ends
2340 one instruction earlier. */
d825d248 2341 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2342 return btrace_step_no_history ();
d825d248
MM
2343
2344 return btrace_step_spurious ();
2345}
2346
2347/* Step one instruction in backward direction. */
2348
2349static struct target_waitstatus
2350record_btrace_single_step_backward (struct thread_info *tp)
2351{
b61ce85c 2352 struct btrace_insn_iterator *replay, start;
d825d248 2353 struct btrace_thread_info *btinfo;
e59fa00f 2354
52834460
MM
2355 btinfo = &tp->btrace;
2356 replay = btinfo->replay;
2357
d825d248
MM
2358 /* Start replaying if we're not already doing so. */
2359 if (replay == NULL)
2360 replay = record_btrace_start_replaying (tp);
2361
2362 /* If we can't step any further, we reached the end of the history.
b61ce85c
MM
2363 Skip gaps during replay. If we end up at a gap (at the beginning of
2364 the trace), jump back to the instruction at which we started. */
2365 start = *replay;
d825d248
MM
2366 do
2367 {
2368 unsigned int steps;
2369
2370 steps = btrace_insn_prev (replay, 1);
2371 if (steps == 0)
b61ce85c
MM
2372 {
2373 *replay = start;
2374 return btrace_step_no_history ();
2375 }
d825d248
MM
2376 }
2377 while (btrace_insn_get (replay) == NULL);
2378
011c71b6
MM
2379 /* Check if we're stepping a breakpoint.
2380
2381 For reverse-stepping, this check is after the step. There is logic in
2382 infrun.c that handles reverse-stepping separately. See, for example,
2383 proceed and adjust_pc_after_break.
2384
2385 This code assumes that for reverse-stepping, PC points to the last
2386 de-executed instruction, whereas for forward-stepping PC points to the
2387 next to-be-executed instruction. */
2388 if (record_btrace_replay_at_breakpoint (tp))
2389 return btrace_step_stopped ();
2390
d825d248
MM
2391 return btrace_step_spurious ();
2392}
2393
2394/* Step a single thread. */
2395
2396static struct target_waitstatus
2397record_btrace_step_thread (struct thread_info *tp)
2398{
2399 struct btrace_thread_info *btinfo;
2400 struct target_waitstatus status;
2401 enum btrace_thread_flag flags;
2402
2403 btinfo = &tp->btrace;
2404
6e4879f0
MM
2405 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2406 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2407
43792cf0 2408 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1
MM
2409 target_pid_to_str (tp->ptid), flags,
2410 btrace_thread_flag_to_str (flags));
52834460 2411
6e4879f0
MM
2412 /* We can't step without an execution history. */
2413 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2414 return btrace_step_no_history ();
2415
52834460
MM
2416 switch (flags)
2417 {
2418 default:
2419 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2420
6e4879f0
MM
2421 case BTHR_STOP:
2422 return btrace_step_stopped_on_request ();
2423
52834460 2424 case BTHR_STEP:
d825d248
MM
2425 status = record_btrace_single_step_forward (tp);
2426 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2427 break;
52834460
MM
2428
2429 return btrace_step_stopped ();
2430
2431 case BTHR_RSTEP:
d825d248
MM
2432 status = record_btrace_single_step_backward (tp);
2433 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2434 break;
52834460
MM
2435
2436 return btrace_step_stopped ();
2437
2438 case BTHR_CONT:
e3cfc1c7
MM
2439 status = record_btrace_single_step_forward (tp);
2440 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2441 break;
52834460 2442
e3cfc1c7
MM
2443 btinfo->flags |= flags;
2444 return btrace_step_again ();
52834460
MM
2445
2446 case BTHR_RCONT:
e3cfc1c7
MM
2447 status = record_btrace_single_step_backward (tp);
2448 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2449 break;
52834460 2450
e3cfc1c7
MM
2451 btinfo->flags |= flags;
2452 return btrace_step_again ();
2453 }
d825d248 2454
e3cfc1c7
MM
2455 /* We keep threads moving at the end of their execution history. The to_wait
2456 method will stop the thread for whom the event is reported. */
2457 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2458 btinfo->flags |= flags;
52834460 2459
e3cfc1c7 2460 return status;
b2f4cfde
MM
2461}
2462
e3cfc1c7
MM
2463/* A vector of threads. */
2464
2465typedef struct thread_info * tp_t;
2466DEF_VEC_P (tp_t);
2467
a6b5be76
MM
2468/* Announce further events if necessary. */
2469
2470static void
2471record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2472 const VEC (tp_t) *no_history)
2473{
2474 int more_moving, more_no_history;
2475
2476 more_moving = !VEC_empty (tp_t, moving);
2477 more_no_history = !VEC_empty (tp_t, no_history);
2478
2479 if (!more_moving && !more_no_history)
2480 return;
2481
2482 if (more_moving)
2483 DEBUG ("movers pending");
2484
2485 if (more_no_history)
2486 DEBUG ("no-history pending");
2487
2488 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2489}
2490
b2f4cfde
MM
2491/* The to_wait method of target record-btrace. */
2492
2493static ptid_t
2494record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2495 struct target_waitstatus *status, int options)
2496{
e3cfc1c7
MM
2497 VEC (tp_t) *moving, *no_history;
2498 struct thread_info *tp, *eventing;
2499 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
52834460
MM
2500
2501 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2502
b2f4cfde 2503 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2504 if ((execution_direction != EXEC_REVERSE)
2505 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2506 {
e75fdfca
TT
2507 ops = ops->beneath;
2508 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
2509 }
2510
e3cfc1c7
MM
2511 moving = NULL;
2512 no_history = NULL;
2513
2514 make_cleanup (VEC_cleanup (tp_t), &moving);
2515 make_cleanup (VEC_cleanup (tp_t), &no_history);
2516
2517 /* Keep a work list of moving threads. */
2518 ALL_NON_EXITED_THREADS (tp)
2519 if (ptid_match (tp->ptid, ptid)
2520 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2521 VEC_safe_push (tp_t, moving, tp);
2522
2523 if (VEC_empty (tp_t, moving))
52834460 2524 {
e3cfc1c7 2525 *status = btrace_step_no_resumed ();
52834460 2526
e3cfc1c7
MM
2527 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2528 target_waitstatus_to_string (status));
2529
2530 do_cleanups (cleanups);
2531 return null_ptid;
52834460
MM
2532 }
2533
e3cfc1c7
MM
2534 /* Step moving threads one by one, one step each, until either one thread
2535 reports an event or we run out of threads to step.
2536
2537 When stepping more than one thread, chances are that some threads reach
2538 the end of their execution history earlier than others. If we reported
2539 this immediately, all-stop on top of non-stop would stop all threads and
2540 resume the same threads next time. And we would report the same thread
2541 having reached the end of its execution history again.
2542
2543 In the worst case, this would starve the other threads. But even if other
2544 threads would be allowed to make progress, this would result in far too
2545 many intermediate stops.
2546
2547 We therefore delay the reporting of "no execution history" until we have
2548 nothing else to report. By this time, all threads should have moved to
2549 either the beginning or the end of their execution history. There will
2550 be a single user-visible stop. */
2551 eventing = NULL;
2552 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2553 {
2554 unsigned int ix;
2555
2556 ix = 0;
2557 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2558 {
2559 *status = record_btrace_step_thread (tp);
2560
2561 switch (status->kind)
2562 {
2563 case TARGET_WAITKIND_IGNORE:
2564 ix++;
2565 break;
2566
2567 case TARGET_WAITKIND_NO_HISTORY:
2568 VEC_safe_push (tp_t, no_history,
2569 VEC_ordered_remove (tp_t, moving, ix));
2570 break;
2571
2572 default:
2573 eventing = VEC_unordered_remove (tp_t, moving, ix);
2574 break;
2575 }
2576 }
2577 }
2578
2579 if (eventing == NULL)
2580 {
2581 /* We started with at least one moving thread. This thread must have
2582 either stopped or reached the end of its execution history.
2583
2584 In the former case, EVENTING must not be NULL.
2585 In the latter case, NO_HISTORY must not be empty. */
2586 gdb_assert (!VEC_empty (tp_t, no_history));
2587
2588 /* We kept threads moving at the end of their execution history. Stop
2589 EVENTING now that we are going to report its stop. */
2590 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2591 eventing->btrace.flags &= ~BTHR_MOVE;
2592
2593 *status = btrace_step_no_history ();
2594 }
2595
2596 gdb_assert (eventing != NULL);
2597
2598 /* We kept threads replaying at the end of their execution history. Stop
2599 replaying EVENTING now that we are going to report its stop. */
2600 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2601
2602 /* Stop all other threads. */
5953356c 2603 if (!target_is_non_stop_p ())
e3cfc1c7
MM
2604 ALL_NON_EXITED_THREADS (tp)
2605 record_btrace_cancel_resume (tp);
52834460 2606
a6b5be76
MM
2607 /* In async mode, we need to announce further events. */
2608 if (target_is_async_p ())
2609 record_btrace_maybe_mark_async_event (moving, no_history);
2610
52834460 2611 /* Start record histories anew from the current position. */
e3cfc1c7 2612 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2613
2614 /* We moved the replay position but did not update registers. */
e3cfc1c7
MM
2615 registers_changed_ptid (eventing->ptid);
2616
43792cf0
PA
2617 DEBUG ("wait ended by thread %s (%s): %s",
2618 print_thread_id (eventing),
e3cfc1c7
MM
2619 target_pid_to_str (eventing->ptid),
2620 target_waitstatus_to_string (status));
52834460 2621
e3cfc1c7
MM
2622 do_cleanups (cleanups);
2623 return eventing->ptid;
52834460
MM
2624}
2625
6e4879f0
MM
2626/* The to_stop method of target record-btrace. */
2627
2628static void
2629record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2630{
2631 DEBUG ("stop %s", target_pid_to_str (ptid));
2632
2633 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2634 if ((execution_direction != EXEC_REVERSE)
2635 && !record_btrace_is_replaying (ops, minus_one_ptid))
6e4879f0
MM
2636 {
2637 ops = ops->beneath;
2638 ops->to_stop (ops, ptid);
2639 }
2640 else
2641 {
2642 struct thread_info *tp;
2643
2644 ALL_NON_EXITED_THREADS (tp)
2645 if (ptid_match (tp->ptid, ptid))
2646 {
2647 tp->btrace.flags &= ~BTHR_MOVE;
2648 tp->btrace.flags |= BTHR_STOP;
2649 }
2650 }
2651 }
2652
52834460
MM
2653/* The to_can_execute_reverse method of target record-btrace. */
2654
2655static int
19db3e69 2656record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
2657{
2658 return 1;
2659}
2660
9e8915c6 2661/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2662
9e8915c6
PA
2663static int
2664record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
52834460 2665{
a52eab48 2666 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2667 {
2668 struct thread_info *tp = inferior_thread ();
2669
2670 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2671 }
2672
2673 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2674}
2675
2676/* The to_supports_stopped_by_sw_breakpoint method of target
2677 record-btrace. */
2678
2679static int
2680record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2681{
a52eab48 2682 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2683 return 1;
2684
2685 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2686}
2687
2688/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2689
2690static int
2691record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2692{
a52eab48 2693 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2694 {
2695 struct thread_info *tp = inferior_thread ();
2696
2697 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2698 }
2699
2700 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2701}
2702
2703/* The to_supports_stopped_by_hw_breakpoint method of target
2704 record-btrace. */
2705
2706static int
2707record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2708{
a52eab48 2709 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6 2710 return 1;
52834460 2711
9e8915c6 2712 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
b2f4cfde
MM
2713}
2714
e8032dde 2715/* The to_update_thread_list method of target record-btrace. */
e2887aa3
MM
2716
2717static void
e8032dde 2718record_btrace_update_thread_list (struct target_ops *ops)
e2887aa3 2719{
e8032dde 2720 /* We don't add or remove threads during replay. */
a52eab48 2721 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2722 return;
2723
2724 /* Forward the request. */
e75fdfca 2725 ops = ops->beneath;
e8032dde 2726 ops->to_update_thread_list (ops);
e2887aa3
MM
2727}
2728
2729/* The to_thread_alive method of target record-btrace. */
2730
2731static int
2732record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2733{
2734 /* We don't add or remove threads during replay. */
a52eab48 2735 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2736 return find_thread_ptid (ptid) != NULL;
2737
2738 /* Forward the request. */
e75fdfca
TT
2739 ops = ops->beneath;
2740 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
2741}
2742
066ce621
MM
2743/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2744 is stopped. */
2745
2746static void
2747record_btrace_set_replay (struct thread_info *tp,
2748 const struct btrace_insn_iterator *it)
2749{
2750 struct btrace_thread_info *btinfo;
2751
2752 btinfo = &tp->btrace;
2753
2754 if (it == NULL || it->function == NULL)
52834460 2755 record_btrace_stop_replaying (tp);
066ce621
MM
2756 else
2757 {
2758 if (btinfo->replay == NULL)
52834460 2759 record_btrace_start_replaying (tp);
066ce621
MM
2760 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2761 return;
2762
2763 *btinfo->replay = *it;
52834460 2764 registers_changed_ptid (tp->ptid);
066ce621
MM
2765 }
2766
52834460
MM
2767 /* Start anew from the new replay position. */
2768 record_btrace_clear_histories (btinfo);
485668e5
MM
2769
2770 stop_pc = regcache_read_pc (get_current_regcache ());
2771 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2772}
2773
2774/* The to_goto_record_begin method of target record-btrace. */
2775
2776static void
08475817 2777record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
2778{
2779 struct thread_info *tp;
2780 struct btrace_insn_iterator begin;
2781
2782 tp = require_btrace_thread ();
2783
2784 btrace_insn_begin (&begin, &tp->btrace);
b61ce85c
MM
2785
2786 /* Skip gaps at the beginning of the trace. */
2787 while (btrace_insn_get (&begin) == NULL)
2788 {
2789 unsigned int steps;
2790
2791 steps = btrace_insn_next (&begin, 1);
2792 if (steps == 0)
2793 error (_("No trace."));
2794 }
2795
066ce621 2796 record_btrace_set_replay (tp, &begin);
066ce621
MM
2797}
2798
2799/* The to_goto_record_end method of target record-btrace. */
2800
2801static void
307a1b91 2802record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
2803{
2804 struct thread_info *tp;
2805
2806 tp = require_btrace_thread ();
2807
2808 record_btrace_set_replay (tp, NULL);
066ce621
MM
2809}
2810
2811/* The to_goto_record method of target record-btrace. */
2812
2813static void
606183ac 2814record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
2815{
2816 struct thread_info *tp;
2817 struct btrace_insn_iterator it;
2818 unsigned int number;
2819 int found;
2820
2821 number = insn;
2822
2823 /* Check for wrap-arounds. */
2824 if (number != insn)
2825 error (_("Instruction number out of range."));
2826
2827 tp = require_btrace_thread ();
2828
2829 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2830 if (found == 0)
2831 error (_("No such instruction."));
2832
2833 record_btrace_set_replay (tp, &it);
066ce621
MM
2834}
2835
797094dd
MM
2836/* The to_record_stop_replaying method of target record-btrace. */
2837
2838static void
2839record_btrace_stop_replaying_all (struct target_ops *self)
2840{
2841 struct thread_info *tp;
2842
2843 ALL_NON_EXITED_THREADS (tp)
2844 record_btrace_stop_replaying (tp);
2845}
2846
70ad5bff
MM
2847/* The to_execution_direction target method. */
2848
2849static enum exec_direction_kind
2850record_btrace_execution_direction (struct target_ops *self)
2851{
2852 return record_btrace_resume_exec_dir;
2853}
2854
aef92902
MM
2855/* The to_prepare_to_generate_core target method. */
2856
2857static void
2858record_btrace_prepare_to_generate_core (struct target_ops *self)
2859{
2860 record_btrace_generating_corefile = 1;
2861}
2862
2863/* The to_done_generating_core target method. */
2864
2865static void
2866record_btrace_done_generating_core (struct target_ops *self)
2867{
2868 record_btrace_generating_corefile = 0;
2869}
2870
afedecd3
MM
2871/* Initialize the record-btrace target ops. */
2872
2873static void
2874init_record_btrace_ops (void)
2875{
2876 struct target_ops *ops;
2877
2878 ops = &record_btrace_ops;
2879 ops->to_shortname = "record-btrace";
2880 ops->to_longname = "Branch tracing target";
2881 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2882 ops->to_open = record_btrace_open;
2883 ops->to_close = record_btrace_close;
b7d2e916 2884 ops->to_async = record_btrace_async;
afedecd3 2885 ops->to_detach = record_detach;
c0272db5 2886 ops->to_disconnect = record_btrace_disconnect;
afedecd3
MM
2887 ops->to_mourn_inferior = record_mourn_inferior;
2888 ops->to_kill = record_kill;
afedecd3
MM
2889 ops->to_stop_recording = record_btrace_stop_recording;
2890 ops->to_info_record = record_btrace_info;
2891 ops->to_insn_history = record_btrace_insn_history;
2892 ops->to_insn_history_from = record_btrace_insn_history_from;
2893 ops->to_insn_history_range = record_btrace_insn_history_range;
2894 ops->to_call_history = record_btrace_call_history;
2895 ops->to_call_history_from = record_btrace_call_history_from;
2896 ops->to_call_history_range = record_btrace_call_history_range;
07bbe694 2897 ops->to_record_is_replaying = record_btrace_is_replaying;
7ff27e9b 2898 ops->to_record_will_replay = record_btrace_will_replay;
797094dd 2899 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
633785ff
MM
2900 ops->to_xfer_partial = record_btrace_xfer_partial;
2901 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2902 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
2903 ops->to_fetch_registers = record_btrace_fetch_registers;
2904 ops->to_store_registers = record_btrace_store_registers;
2905 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
2906 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2907 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde 2908 ops->to_resume = record_btrace_resume;
85ad3aaf 2909 ops->to_commit_resume = record_btrace_commit_resume;
b2f4cfde 2910 ops->to_wait = record_btrace_wait;
6e4879f0 2911 ops->to_stop = record_btrace_stop;
e8032dde 2912 ops->to_update_thread_list = record_btrace_update_thread_list;
e2887aa3 2913 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
2914 ops->to_goto_record_begin = record_btrace_goto_begin;
2915 ops->to_goto_record_end = record_btrace_goto_end;
2916 ops->to_goto_record = record_btrace_goto;
52834460 2917 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
9e8915c6
PA
2918 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2919 ops->to_supports_stopped_by_sw_breakpoint
2920 = record_btrace_supports_stopped_by_sw_breakpoint;
2921 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2922 ops->to_supports_stopped_by_hw_breakpoint
2923 = record_btrace_supports_stopped_by_hw_breakpoint;
70ad5bff 2924 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
2925 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2926 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
2927 ops->to_stratum = record_stratum;
2928 ops->to_magic = OPS_MAGIC;
2929}
2930
f4abbc16
MM
2931/* Start recording in BTS format. */
2932
2933static void
2934cmd_record_btrace_bts_start (char *args, int from_tty)
2935{
f4abbc16
MM
2936 if (args != NULL && *args != 0)
2937 error (_("Invalid argument."));
2938
2939 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2940
492d29ea
PA
2941 TRY
2942 {
2943 execute_command ("target record-btrace", from_tty);
2944 }
2945 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2946 {
2947 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2948 throw_exception (exception);
2949 }
492d29ea 2950 END_CATCH
f4abbc16
MM
2951}
2952
bc504a31 2953/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2954
2955static void
b20a6524 2956cmd_record_btrace_pt_start (char *args, int from_tty)
afedecd3
MM
2957{
2958 if (args != NULL && *args != 0)
2959 error (_("Invalid argument."));
2960
b20a6524 2961 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2962
492d29ea
PA
2963 TRY
2964 {
2965 execute_command ("target record-btrace", from_tty);
2966 }
2967 CATCH (exception, RETURN_MASK_ALL)
2968 {
2969 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2970 throw_exception (exception);
2971 }
2972 END_CATCH
afedecd3
MM
2973}
2974
b20a6524
MM
2975/* Alias for "target record". */
2976
2977static void
2978cmd_record_btrace_start (char *args, int from_tty)
2979{
2980 if (args != NULL && *args != 0)
2981 error (_("Invalid argument."));
2982
2983 record_btrace_conf.format = BTRACE_FORMAT_PT;
2984
2985 TRY
2986 {
2987 execute_command ("target record-btrace", from_tty);
2988 }
2989 CATCH (exception, RETURN_MASK_ALL)
2990 {
2991 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2992
2993 TRY
2994 {
2995 execute_command ("target record-btrace", from_tty);
2996 }
2997 CATCH (exception, RETURN_MASK_ALL)
2998 {
2999 record_btrace_conf.format = BTRACE_FORMAT_NONE;
3000 throw_exception (exception);
3001 }
3002 END_CATCH
3003 }
3004 END_CATCH
3005}
3006
67b5c0c1
MM
3007/* The "set record btrace" command. */
3008
3009static void
3010cmd_set_record_btrace (char *args, int from_tty)
3011{
3012 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
3013}
3014
3015/* The "show record btrace" command. */
3016
3017static void
3018cmd_show_record_btrace (char *args, int from_tty)
3019{
3020 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
3021}
3022
3023/* The "show record btrace replay-memory-access" command. */
3024
3025static void
3026cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
3027 struct cmd_list_element *c, const char *value)
3028{
3029 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
3030 replay_memory_access);
3031}
3032
d33501a5
MM
3033/* The "set record btrace bts" command. */
3034
3035static void
3036cmd_set_record_btrace_bts (char *args, int from_tty)
3037{
3038 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 3039 "by an appropriate subcommand.\n"));
d33501a5
MM
3040 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3041 all_commands, gdb_stdout);
3042}
3043
3044/* The "show record btrace bts" command. */
3045
3046static void
3047cmd_show_record_btrace_bts (char *args, int from_tty)
3048{
3049 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3050}
3051
b20a6524
MM
3052/* The "set record btrace pt" command. */
3053
3054static void
3055cmd_set_record_btrace_pt (char *args, int from_tty)
3056{
3057 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3058 "by an appropriate subcommand.\n"));
3059 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3060 all_commands, gdb_stdout);
3061}
3062
3063/* The "show record btrace pt" command. */
3064
3065static void
3066cmd_show_record_btrace_pt (char *args, int from_tty)
3067{
3068 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3069}
3070
3071/* The "record bts buffer-size" show value function. */
3072
3073static void
3074show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3075 struct cmd_list_element *c,
3076 const char *value)
3077{
3078 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3079 value);
3080}
3081
3082/* The "record pt buffer-size" show value function. */
3083
3084static void
3085show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3086 struct cmd_list_element *c,
3087 const char *value)
3088{
3089 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3090 value);
3091}
3092
afedecd3
MM
3093void _initialize_record_btrace (void);
3094
3095/* Initialize btrace commands. */
3096
3097void
3098_initialize_record_btrace (void)
3099{
f4abbc16
MM
3100 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3101 _("Start branch trace recording."), &record_btrace_cmdlist,
3102 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3103 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3104
f4abbc16
MM
3105 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3106 _("\
3107Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3108The processor stores a from/to record for each branch into a cyclic buffer.\n\
3109This format may not be available on all processors."),
3110 &record_btrace_cmdlist);
3111 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3112
b20a6524
MM
3113 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3114 _("\
bc504a31 3115Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3116This format may not be available on all processors."),
3117 &record_btrace_cmdlist);
3118 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3119
67b5c0c1
MM
3120 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3121 _("Set record options"), &set_record_btrace_cmdlist,
3122 "set record btrace ", 0, &set_record_cmdlist);
3123
3124 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3125 _("Show record options"), &show_record_btrace_cmdlist,
3126 "show record btrace ", 0, &show_record_cmdlist);
3127
3128 add_setshow_enum_cmd ("replay-memory-access", no_class,
3129 replay_memory_access_types, &replay_memory_access, _("\
3130Set what memory accesses are allowed during replay."), _("\
3131Show what memory accesses are allowed during replay."),
3132 _("Default is READ-ONLY.\n\n\
3133The btrace record target does not trace data.\n\
3134The memory therefore corresponds to the live target and not \
3135to the current replay position.\n\n\
3136When READ-ONLY, allow accesses to read-only memory during replay.\n\
3137When READ-WRITE, allow accesses to read-only and read-write memory during \
3138replay."),
3139 NULL, cmd_show_replay_memory_access,
3140 &set_record_btrace_cmdlist,
3141 &show_record_btrace_cmdlist);
3142
d33501a5
MM
3143 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3144 _("Set record btrace bts options"),
3145 &set_record_btrace_bts_cmdlist,
3146 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3147
3148 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3149 _("Show record btrace bts options"),
3150 &show_record_btrace_bts_cmdlist,
3151 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3152
3153 add_setshow_uinteger_cmd ("buffer-size", no_class,
3154 &record_btrace_conf.bts.size,
3155 _("Set the record/replay bts buffer size."),
3156 _("Show the record/replay bts buffer size."), _("\
3157When starting recording request a trace buffer of this size. \
3158The actual buffer size may differ from the requested size. \
3159Use \"info record\" to see the actual buffer size.\n\n\
3160Bigger buffers allow longer recording but also take more time to process \
3161the recorded execution trace.\n\n\
b20a6524
MM
3162The trace buffer size may not be changed while recording."), NULL,
3163 show_record_bts_buffer_size_value,
d33501a5
MM
3164 &set_record_btrace_bts_cmdlist,
3165 &show_record_btrace_bts_cmdlist);
3166
b20a6524
MM
3167 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3168 _("Set record btrace pt options"),
3169 &set_record_btrace_pt_cmdlist,
3170 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3171
3172 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3173 _("Show record btrace pt options"),
3174 &show_record_btrace_pt_cmdlist,
3175 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3176
3177 add_setshow_uinteger_cmd ("buffer-size", no_class,
3178 &record_btrace_conf.pt.size,
3179 _("Set the record/replay pt buffer size."),
3180 _("Show the record/replay pt buffer size."), _("\
3181Bigger buffers allow longer recording but also take more time to process \
3182the recorded execution.\n\
3183The actual buffer size may differ from the requested size. Use \"info record\" \
3184to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3185 &set_record_btrace_pt_cmdlist,
3186 &show_record_btrace_pt_cmdlist);
3187
afedecd3
MM
3188 init_record_btrace_ops ();
3189 add_target (&record_btrace_ops);
0b722aec
MM
3190
3191 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3192 xcalloc, xfree);
d33501a5
MM
3193
3194 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3195 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3196}
This page took 0.534261 seconds and 4 git commands to generate.