thread: add can_access_registers_ptid
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
61baf725 3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
c0272db5 24#include "record-btrace.h"
afedecd3
MM
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
29#include "observer.h"
afedecd3
MM
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
70ad5bff
MM
39#include "event-loop.h"
40#include "inf-loop.h"
e3cfc1c7 41#include "vec.h"
325fac50 42#include <algorithm>
afedecd3
MM
43
44/* The target_ops of record-btrace. */
45static struct target_ops record_btrace_ops;
46
47/* A new thread observer enabling branch tracing for the new thread. */
48static struct observer *record_btrace_thread_observer;
49
67b5c0c1
MM
50/* Memory access types used in set/show record btrace replay-memory-access. */
51static const char replay_memory_access_read_only[] = "read-only";
52static const char replay_memory_access_read_write[] = "read-write";
53static const char *const replay_memory_access_types[] =
54{
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
57 NULL
58};
59
60/* The currently allowed replay memory access type. */
61static const char *replay_memory_access = replay_memory_access_read_only;
62
63/* Command lists for "set/show record btrace". */
64static struct cmd_list_element *set_record_btrace_cmdlist;
65static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 66
70ad5bff
MM
67/* The execution direction of the last resume we got. See record-full.c. */
68static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
69
70/* The async event handler for reverse/replay execution. */
71static struct async_event_handler *record_btrace_async_inferior_event_handler;
72
aef92902
MM
73/* A flag indicating that we are currently generating a core file. */
74static int record_btrace_generating_corefile;
75
f4abbc16
MM
76/* The current branch trace configuration. */
77static struct btrace_config record_btrace_conf;
78
79/* Command list for "record btrace". */
80static struct cmd_list_element *record_btrace_cmdlist;
81
d33501a5
MM
82/* Command lists for "set/show record btrace bts". */
83static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84static struct cmd_list_element *show_record_btrace_bts_cmdlist;
85
b20a6524
MM
86/* Command lists for "set/show record btrace pt". */
87static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88static struct cmd_list_element *show_record_btrace_pt_cmdlist;
89
afedecd3
MM
90/* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
92
93#define DEBUG(msg, args...) \
94 do \
95 { \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
99 } \
100 while (0)
101
102
103/* Update the branch trace for the current thread and return a pointer to its
066ce621 104 thread_info.
afedecd3
MM
105
106 Throws an error if there is no thread or no trace. This function never
107 returns NULL. */
108
066ce621
MM
109static struct thread_info *
110require_btrace_thread (void)
afedecd3
MM
111{
112 struct thread_info *tp;
afedecd3
MM
113
114 DEBUG ("require");
115
116 tp = find_thread_ptid (inferior_ptid);
117 if (tp == NULL)
118 error (_("No thread."));
119
120 btrace_fetch (tp);
121
6e07b1d2 122 if (btrace_is_empty (tp))
afedecd3
MM
123 error (_("No trace."));
124
066ce621
MM
125 return tp;
126}
127
128/* Update the branch trace for the current thread and return a pointer to its
129 branch trace information struct.
130
131 Throws an error if there is no thread or no trace. This function never
132 returns NULL. */
133
134static struct btrace_thread_info *
135require_btrace (void)
136{
137 struct thread_info *tp;
138
139 tp = require_btrace_thread ();
140
141 return &tp->btrace;
afedecd3
MM
142}
143
144/* Enable branch tracing for one thread. Warn on errors. */
145
146static void
147record_btrace_enable_warn (struct thread_info *tp)
148{
492d29ea
PA
149 TRY
150 {
151 btrace_enable (tp, &record_btrace_conf);
152 }
153 CATCH (error, RETURN_MASK_ERROR)
154 {
155 warning ("%s", error.message);
156 }
157 END_CATCH
afedecd3
MM
158}
159
160/* Callback function to disable branch tracing for one thread. */
161
162static void
163record_btrace_disable_callback (void *arg)
164{
19ba03f4 165 struct thread_info *tp = (struct thread_info *) arg;
afedecd3
MM
166
167 btrace_disable (tp);
168}
169
170/* Enable automatic tracing of new threads. */
171
172static void
173record_btrace_auto_enable (void)
174{
175 DEBUG ("attach thread observer");
176
177 record_btrace_thread_observer
178 = observer_attach_new_thread (record_btrace_enable_warn);
179}
180
181/* Disable automatic tracing of new threads. */
182
183static void
184record_btrace_auto_disable (void)
185{
186 /* The observer may have been detached, already. */
187 if (record_btrace_thread_observer == NULL)
188 return;
189
190 DEBUG ("detach thread observer");
191
192 observer_detach_new_thread (record_btrace_thread_observer);
193 record_btrace_thread_observer = NULL;
194}
195
70ad5bff
MM
196/* The record-btrace async event handler function. */
197
198static void
199record_btrace_handle_async_inferior_event (gdb_client_data data)
200{
201 inferior_event_handler (INF_REG_EVENT, NULL);
202}
203
c0272db5
TW
204/* See record-btrace.h. */
205
206void
207record_btrace_push_target (void)
208{
209 const char *format;
210
211 record_btrace_auto_enable ();
212
213 push_target (&record_btrace_ops);
214
215 record_btrace_async_inferior_event_handler
216 = create_async_event_handler (record_btrace_handle_async_inferior_event,
217 NULL);
218 record_btrace_generating_corefile = 0;
219
220 format = btrace_format_short_string (record_btrace_conf.format);
221 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
222}
223
afedecd3
MM
224/* The to_open method of target record-btrace. */
225
226static void
014f9477 227record_btrace_open (const char *args, int from_tty)
afedecd3
MM
228{
229 struct cleanup *disable_chain;
230 struct thread_info *tp;
231
232 DEBUG ("open");
233
8213266a 234 record_preopen ();
afedecd3
MM
235
236 if (!target_has_execution)
237 error (_("The program is not being run."));
238
afedecd3
MM
239 gdb_assert (record_btrace_thread_observer == NULL);
240
241 disable_chain = make_cleanup (null_cleanup, NULL);
034f788c 242 ALL_NON_EXITED_THREADS (tp)
5d5658a1 243 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
afedecd3 244 {
f4abbc16 245 btrace_enable (tp, &record_btrace_conf);
afedecd3
MM
246
247 make_cleanup (record_btrace_disable_callback, tp);
248 }
249
c0272db5 250 record_btrace_push_target ();
afedecd3
MM
251
252 discard_cleanups (disable_chain);
253}
254
255/* The to_stop_recording method of target record-btrace. */
256
257static void
c6cd7c02 258record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
259{
260 struct thread_info *tp;
261
262 DEBUG ("stop recording");
263
264 record_btrace_auto_disable ();
265
034f788c 266 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
267 if (tp->btrace.target != NULL)
268 btrace_disable (tp);
269}
270
c0272db5
TW
271/* The to_disconnect method of target record-btrace. */
272
273static void
274record_btrace_disconnect (struct target_ops *self, const char *args,
275 int from_tty)
276{
277 struct target_ops *beneath = self->beneath;
278
279 /* Do not stop recording, just clean up GDB side. */
280 unpush_target (self);
281
282 /* Forward disconnect. */
283 beneath->to_disconnect (beneath, args, from_tty);
284}
285
afedecd3
MM
286/* The to_close method of target record-btrace. */
287
288static void
de90e03d 289record_btrace_close (struct target_ops *self)
afedecd3 290{
568e808b
MM
291 struct thread_info *tp;
292
70ad5bff
MM
293 if (record_btrace_async_inferior_event_handler != NULL)
294 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
295
99c819ee
MM
296 /* Make sure automatic recording gets disabled even if we did not stop
297 recording before closing the record-btrace target. */
298 record_btrace_auto_disable ();
299
568e808b
MM
300 /* We should have already stopped recording.
301 Tear down btrace in case we have not. */
034f788c 302 ALL_NON_EXITED_THREADS (tp)
568e808b 303 btrace_teardown (tp);
afedecd3
MM
304}
305
b7d2e916
PA
306/* The to_async method of target record-btrace. */
307
308static void
6a3753b3 309record_btrace_async (struct target_ops *ops, int enable)
b7d2e916 310{
6a3753b3 311 if (enable)
b7d2e916
PA
312 mark_async_event_handler (record_btrace_async_inferior_event_handler);
313 else
314 clear_async_event_handler (record_btrace_async_inferior_event_handler);
315
6a3753b3 316 ops->beneath->to_async (ops->beneath, enable);
b7d2e916
PA
317}
318
d33501a5
MM
319/* Adjusts the size and returns a human readable size suffix. */
320
321static const char *
322record_btrace_adjust_size (unsigned int *size)
323{
324 unsigned int sz;
325
326 sz = *size;
327
328 if ((sz & ((1u << 30) - 1)) == 0)
329 {
330 *size = sz >> 30;
331 return "GB";
332 }
333 else if ((sz & ((1u << 20) - 1)) == 0)
334 {
335 *size = sz >> 20;
336 return "MB";
337 }
338 else if ((sz & ((1u << 10) - 1)) == 0)
339 {
340 *size = sz >> 10;
341 return "kB";
342 }
343 else
344 return "";
345}
346
347/* Print a BTS configuration. */
348
349static void
350record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
351{
352 const char *suffix;
353 unsigned int size;
354
355 size = conf->size;
356 if (size > 0)
357 {
358 suffix = record_btrace_adjust_size (&size);
359 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
360 }
361}
362
bc504a31 363/* Print an Intel Processor Trace configuration. */
b20a6524
MM
364
365static void
366record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
367{
368 const char *suffix;
369 unsigned int size;
370
371 size = conf->size;
372 if (size > 0)
373 {
374 suffix = record_btrace_adjust_size (&size);
375 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
376 }
377}
378
d33501a5
MM
379/* Print a branch tracing configuration. */
380
381static void
382record_btrace_print_conf (const struct btrace_config *conf)
383{
384 printf_unfiltered (_("Recording format: %s.\n"),
385 btrace_format_string (conf->format));
386
387 switch (conf->format)
388 {
389 case BTRACE_FORMAT_NONE:
390 return;
391
392 case BTRACE_FORMAT_BTS:
393 record_btrace_print_bts_conf (&conf->bts);
394 return;
b20a6524
MM
395
396 case BTRACE_FORMAT_PT:
397 record_btrace_print_pt_conf (&conf->pt);
398 return;
d33501a5
MM
399 }
400
401 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
402}
403
afedecd3
MM
404/* The to_info_record method of target record-btrace. */
405
406static void
630d6a4a 407record_btrace_info (struct target_ops *self)
afedecd3
MM
408{
409 struct btrace_thread_info *btinfo;
f4abbc16 410 const struct btrace_config *conf;
afedecd3 411 struct thread_info *tp;
31fd9caa 412 unsigned int insns, calls, gaps;
afedecd3
MM
413
414 DEBUG ("info");
415
416 tp = find_thread_ptid (inferior_ptid);
417 if (tp == NULL)
418 error (_("No thread."));
419
f4abbc16
MM
420 btinfo = &tp->btrace;
421
422 conf = btrace_conf (btinfo);
423 if (conf != NULL)
d33501a5 424 record_btrace_print_conf (conf);
f4abbc16 425
afedecd3
MM
426 btrace_fetch (tp);
427
23a7fe75
MM
428 insns = 0;
429 calls = 0;
31fd9caa 430 gaps = 0;
23a7fe75 431
6e07b1d2 432 if (!btrace_is_empty (tp))
23a7fe75
MM
433 {
434 struct btrace_call_iterator call;
435 struct btrace_insn_iterator insn;
436
437 btrace_call_end (&call, btinfo);
438 btrace_call_prev (&call, 1);
5de9129b 439 calls = btrace_call_number (&call);
23a7fe75
MM
440
441 btrace_insn_end (&insn, btinfo);
31fd9caa 442
5de9129b 443 insns = btrace_insn_number (&insn);
31fd9caa
MM
444 if (insns != 0)
445 {
446 /* The last instruction does not really belong to the trace. */
447 insns -= 1;
448 }
449 else
450 {
451 unsigned int steps;
452
453 /* Skip gaps at the end. */
454 do
455 {
456 steps = btrace_insn_prev (&insn, 1);
457 if (steps == 0)
458 break;
459
460 insns = btrace_insn_number (&insn);
461 }
462 while (insns == 0);
463 }
464
465 gaps = btinfo->ngaps;
23a7fe75 466 }
afedecd3 467
31fd9caa 468 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
43792cf0
PA
469 "for thread %s (%s).\n"), insns, calls, gaps,
470 print_thread_id (tp), target_pid_to_str (tp->ptid));
07bbe694
MM
471
472 if (btrace_is_replaying (tp))
473 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
474 btrace_insn_number (btinfo->replay));
afedecd3
MM
475}
476
31fd9caa
MM
477/* Print a decode error. */
478
479static void
480btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
481 enum btrace_format format)
482{
483 const char *errstr;
484 int is_error;
485
486 errstr = _("unknown");
487 is_error = 1;
488
489 switch (format)
490 {
491 default:
492 break;
493
494 case BTRACE_FORMAT_BTS:
495 switch (errcode)
496 {
497 default:
498 break;
499
500 case BDE_BTS_OVERFLOW:
501 errstr = _("instruction overflow");
502 break;
503
504 case BDE_BTS_INSN_SIZE:
505 errstr = _("unknown instruction");
506 break;
507 }
508 break;
b20a6524
MM
509
510#if defined (HAVE_LIBIPT)
511 case BTRACE_FORMAT_PT:
512 switch (errcode)
513 {
514 case BDE_PT_USER_QUIT:
515 is_error = 0;
516 errstr = _("trace decode cancelled");
517 break;
518
519 case BDE_PT_DISABLED:
520 is_error = 0;
521 errstr = _("disabled");
522 break;
523
524 case BDE_PT_OVERFLOW:
525 is_error = 0;
526 errstr = _("overflow");
527 break;
528
529 default:
530 if (errcode < 0)
531 errstr = pt_errstr (pt_errcode (errcode));
532 break;
533 }
534 break;
535#endif /* defined (HAVE_LIBIPT) */
31fd9caa
MM
536 }
537
112e8700 538 uiout->text (_("["));
31fd9caa
MM
539 if (is_error)
540 {
112e8700
SM
541 uiout->text (_("decode error ("));
542 uiout->field_int ("errcode", errcode);
543 uiout->text (_("): "));
31fd9caa 544 }
112e8700
SM
545 uiout->text (errstr);
546 uiout->text (_("]\n"));
31fd9caa
MM
547}
548
afedecd3
MM
549/* Print an unsigned int. */
550
551static void
552ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
553{
112e8700 554 uiout->field_fmt (fld, "%u", val);
afedecd3
MM
555}
556
f94cc897
MM
557/* A range of source lines. */
558
559struct btrace_line_range
560{
561 /* The symtab this line is from. */
562 struct symtab *symtab;
563
564 /* The first line (inclusive). */
565 int begin;
566
567 /* The last line (exclusive). */
568 int end;
569};
570
571/* Construct a line range. */
572
573static struct btrace_line_range
574btrace_mk_line_range (struct symtab *symtab, int begin, int end)
575{
576 struct btrace_line_range range;
577
578 range.symtab = symtab;
579 range.begin = begin;
580 range.end = end;
581
582 return range;
583}
584
585/* Add a line to a line range. */
586
587static struct btrace_line_range
588btrace_line_range_add (struct btrace_line_range range, int line)
589{
590 if (range.end <= range.begin)
591 {
592 /* This is the first entry. */
593 range.begin = line;
594 range.end = line + 1;
595 }
596 else if (line < range.begin)
597 range.begin = line;
598 else if (range.end < line)
599 range.end = line;
600
601 return range;
602}
603
604/* Return non-zero if RANGE is empty, zero otherwise. */
605
606static int
607btrace_line_range_is_empty (struct btrace_line_range range)
608{
609 return range.end <= range.begin;
610}
611
612/* Return non-zero if LHS contains RHS, zero otherwise. */
613
614static int
615btrace_line_range_contains_range (struct btrace_line_range lhs,
616 struct btrace_line_range rhs)
617{
618 return ((lhs.symtab == rhs.symtab)
619 && (lhs.begin <= rhs.begin)
620 && (rhs.end <= lhs.end));
621}
622
623/* Find the line range associated with PC. */
624
625static struct btrace_line_range
626btrace_find_line_range (CORE_ADDR pc)
627{
628 struct btrace_line_range range;
629 struct linetable_entry *lines;
630 struct linetable *ltable;
631 struct symtab *symtab;
632 int nlines, i;
633
634 symtab = find_pc_line_symtab (pc);
635 if (symtab == NULL)
636 return btrace_mk_line_range (NULL, 0, 0);
637
638 ltable = SYMTAB_LINETABLE (symtab);
639 if (ltable == NULL)
640 return btrace_mk_line_range (symtab, 0, 0);
641
642 nlines = ltable->nitems;
643 lines = ltable->item;
644 if (nlines <= 0)
645 return btrace_mk_line_range (symtab, 0, 0);
646
647 range = btrace_mk_line_range (symtab, 0, 0);
648 for (i = 0; i < nlines - 1; i++)
649 {
650 if ((lines[i].pc == pc) && (lines[i].line != 0))
651 range = btrace_line_range_add (range, lines[i].line);
652 }
653
654 return range;
655}
656
657/* Print source lines in LINES to UIOUT.
658
659 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
660 instructions corresponding to that source line. When printing a new source
661 line, we do the cleanups for the open chain and open a new cleanup chain for
662 the new source line. If the source line range in LINES is not empty, this
663 function will leave the cleanup chain for the last printed source line open
664 so instructions can be added to it. */
665
666static void
667btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
668 struct cleanup **ui_item_chain, int flags)
669{
8d297bbf 670 print_source_lines_flags psl_flags;
f94cc897
MM
671 int line;
672
673 psl_flags = 0;
674 if (flags & DISASSEMBLY_FILENAME)
675 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
676
677 for (line = lines.begin; line < lines.end; ++line)
678 {
679 if (*ui_item_chain != NULL)
680 do_cleanups (*ui_item_chain);
681
682 *ui_item_chain
683 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
684
685 print_source_lines (lines.symtab, line, line + 1, psl_flags);
686
687 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
688 }
689}
690
afedecd3
MM
691/* Disassemble a section of the recorded instruction trace. */
692
693static void
23a7fe75 694btrace_insn_history (struct ui_out *uiout,
31fd9caa 695 const struct btrace_thread_info *btinfo,
23a7fe75
MM
696 const struct btrace_insn_iterator *begin,
697 const struct btrace_insn_iterator *end, int flags)
afedecd3 698{
f94cc897
MM
699 struct ui_file *stb;
700 struct cleanup *cleanups, *ui_item_chain;
afedecd3 701 struct gdbarch *gdbarch;
23a7fe75 702 struct btrace_insn_iterator it;
f94cc897 703 struct btrace_line_range last_lines;
afedecd3 704
23a7fe75
MM
705 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
706 btrace_insn_number (end));
afedecd3 707
f94cc897
MM
708 flags |= DISASSEMBLY_SPECULATIVE;
709
afedecd3 710 gdbarch = target_gdbarch ();
f94cc897
MM
711 stb = mem_fileopen ();
712 cleanups = make_cleanup_ui_file_delete (stb);
e47ad6c0 713 gdb_disassembler di (gdbarch, stb);
f94cc897
MM
714 last_lines = btrace_mk_line_range (NULL, 0, 0);
715
716 make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
717
718 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
719 instructions corresponding to that line. */
720 ui_item_chain = NULL;
afedecd3 721
23a7fe75 722 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 723 {
23a7fe75
MM
724 const struct btrace_insn *insn;
725
726 insn = btrace_insn_get (&it);
727
31fd9caa
MM
728 /* A NULL instruction indicates a gap in the trace. */
729 if (insn == NULL)
730 {
731 const struct btrace_config *conf;
732
733 conf = btrace_conf (btinfo);
afedecd3 734
31fd9caa
MM
735 /* We have trace so we must have a configuration. */
736 gdb_assert (conf != NULL);
737
738 btrace_ui_out_decode_error (uiout, it.function->errcode,
739 conf->format);
740 }
741 else
742 {
f94cc897 743 struct disasm_insn dinsn;
da8c46d2 744
f94cc897 745 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 746 {
f94cc897
MM
747 struct btrace_line_range lines;
748
749 lines = btrace_find_line_range (insn->pc);
750 if (!btrace_line_range_is_empty (lines)
751 && !btrace_line_range_contains_range (last_lines, lines))
752 {
753 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
754 last_lines = lines;
755 }
756 else if (ui_item_chain == NULL)
757 {
758 ui_item_chain
759 = make_cleanup_ui_out_tuple_begin_end (uiout,
760 "src_and_asm_line");
761 /* No source information. */
762 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
763 }
764
765 gdb_assert (ui_item_chain != NULL);
da8c46d2 766 }
da8c46d2 767
f94cc897
MM
768 memset (&dinsn, 0, sizeof (dinsn));
769 dinsn.number = btrace_insn_number (&it);
770 dinsn.addr = insn->pc;
31fd9caa 771
da8c46d2 772 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 773 dinsn.is_speculative = 1;
da8c46d2 774
e47ad6c0 775 di.pretty_print_insn (uiout, &dinsn, flags);
31fd9caa 776 }
afedecd3 777 }
f94cc897
MM
778
779 do_cleanups (cleanups);
afedecd3
MM
780}
781
782/* The to_insn_history method of target record-btrace. */
783
784static void
7a6c5609 785record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
786{
787 struct btrace_thread_info *btinfo;
23a7fe75
MM
788 struct btrace_insn_history *history;
789 struct btrace_insn_iterator begin, end;
afedecd3
MM
790 struct cleanup *uiout_cleanup;
791 struct ui_out *uiout;
23a7fe75 792 unsigned int context, covered;
afedecd3
MM
793
794 uiout = current_uiout;
795 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
796 "insn history");
afedecd3 797 context = abs (size);
afedecd3
MM
798 if (context == 0)
799 error (_("Bad record instruction-history-size."));
800
23a7fe75
MM
801 btinfo = require_btrace ();
802 history = btinfo->insn_history;
803 if (history == NULL)
afedecd3 804 {
07bbe694 805 struct btrace_insn_iterator *replay;
afedecd3 806
23a7fe75 807 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 808
07bbe694
MM
809 /* If we're replaying, we start at the replay position. Otherwise, we
810 start at the tail of the trace. */
811 replay = btinfo->replay;
812 if (replay != NULL)
813 begin = *replay;
814 else
815 btrace_insn_end (&begin, btinfo);
816
817 /* We start from here and expand in the requested direction. Then we
818 expand in the other direction, as well, to fill up any remaining
819 context. */
820 end = begin;
821 if (size < 0)
822 {
823 /* We want the current position covered, as well. */
824 covered = btrace_insn_next (&end, 1);
825 covered += btrace_insn_prev (&begin, context - covered);
826 covered += btrace_insn_next (&end, context - covered);
827 }
828 else
829 {
830 covered = btrace_insn_next (&end, context);
831 covered += btrace_insn_prev (&begin, context - covered);
832 }
afedecd3
MM
833 }
834 else
835 {
23a7fe75
MM
836 begin = history->begin;
837 end = history->end;
afedecd3 838
23a7fe75
MM
839 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
840 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 841
23a7fe75
MM
842 if (size < 0)
843 {
844 end = begin;
845 covered = btrace_insn_prev (&begin, context);
846 }
847 else
848 {
849 begin = end;
850 covered = btrace_insn_next (&end, context);
851 }
afedecd3
MM
852 }
853
23a7fe75 854 if (covered > 0)
31fd9caa 855 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
856 else
857 {
858 if (size < 0)
859 printf_unfiltered (_("At the start of the branch trace record.\n"));
860 else
861 printf_unfiltered (_("At the end of the branch trace record.\n"));
862 }
afedecd3 863
23a7fe75 864 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
865 do_cleanups (uiout_cleanup);
866}
867
868/* The to_insn_history_range method of target record-btrace. */
869
870static void
4e99c6b7
TT
871record_btrace_insn_history_range (struct target_ops *self,
872 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
873{
874 struct btrace_thread_info *btinfo;
23a7fe75
MM
875 struct btrace_insn_history *history;
876 struct btrace_insn_iterator begin, end;
afedecd3
MM
877 struct cleanup *uiout_cleanup;
878 struct ui_out *uiout;
23a7fe75
MM
879 unsigned int low, high;
880 int found;
afedecd3
MM
881
882 uiout = current_uiout;
883 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
884 "insn history");
23a7fe75
MM
885 low = from;
886 high = to;
afedecd3 887
23a7fe75 888 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
889
890 /* Check for wrap-arounds. */
23a7fe75 891 if (low != from || high != to)
afedecd3
MM
892 error (_("Bad range."));
893
0688d04e 894 if (high < low)
afedecd3
MM
895 error (_("Bad range."));
896
23a7fe75 897 btinfo = require_btrace ();
afedecd3 898
23a7fe75
MM
899 found = btrace_find_insn_by_number (&begin, btinfo, low);
900 if (found == 0)
901 error (_("Range out of bounds."));
afedecd3 902
23a7fe75
MM
903 found = btrace_find_insn_by_number (&end, btinfo, high);
904 if (found == 0)
0688d04e
MM
905 {
906 /* Silently truncate the range. */
907 btrace_insn_end (&end, btinfo);
908 }
909 else
910 {
911 /* We want both begin and end to be inclusive. */
912 btrace_insn_next (&end, 1);
913 }
afedecd3 914
31fd9caa 915 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 916 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
917
918 do_cleanups (uiout_cleanup);
919}
920
921/* The to_insn_history_from method of target record-btrace. */
922
923static void
9abc3ff3
TT
924record_btrace_insn_history_from (struct target_ops *self,
925 ULONGEST from, int size, int flags)
afedecd3
MM
926{
927 ULONGEST begin, end, context;
928
929 context = abs (size);
0688d04e
MM
930 if (context == 0)
931 error (_("Bad record instruction-history-size."));
afedecd3
MM
932
933 if (size < 0)
934 {
935 end = from;
936
937 if (from < context)
938 begin = 0;
939 else
0688d04e 940 begin = from - context + 1;
afedecd3
MM
941 }
942 else
943 {
944 begin = from;
0688d04e 945 end = from + context - 1;
afedecd3
MM
946
947 /* Check for wrap-around. */
948 if (end < begin)
949 end = ULONGEST_MAX;
950 }
951
4e99c6b7 952 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
953}
954
955/* Print the instruction number range for a function call history line. */
956
957static void
23a7fe75
MM
958btrace_call_history_insn_range (struct ui_out *uiout,
959 const struct btrace_function *bfun)
afedecd3 960{
7acbe133
MM
961 unsigned int begin, end, size;
962
963 size = VEC_length (btrace_insn_s, bfun->insn);
964 gdb_assert (size > 0);
afedecd3 965
23a7fe75 966 begin = bfun->insn_offset;
7acbe133 967 end = begin + size - 1;
afedecd3 968
23a7fe75 969 ui_out_field_uint (uiout, "insn begin", begin);
112e8700 970 uiout->text (",");
23a7fe75 971 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
972}
973
ce0dfbea
MM
974/* Compute the lowest and highest source line for the instructions in BFUN
975 and return them in PBEGIN and PEND.
976 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
977 result from inlining or macro expansion. */
978
979static void
980btrace_compute_src_line_range (const struct btrace_function *bfun,
981 int *pbegin, int *pend)
982{
983 struct btrace_insn *insn;
984 struct symtab *symtab;
985 struct symbol *sym;
986 unsigned int idx;
987 int begin, end;
988
989 begin = INT_MAX;
990 end = INT_MIN;
991
992 sym = bfun->sym;
993 if (sym == NULL)
994 goto out;
995
996 symtab = symbol_symtab (sym);
997
998 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
999 {
1000 struct symtab_and_line sal;
1001
1002 sal = find_pc_line (insn->pc, 0);
1003 if (sal.symtab != symtab || sal.line == 0)
1004 continue;
1005
325fac50
PA
1006 begin = std::min (begin, sal.line);
1007 end = std::max (end, sal.line);
ce0dfbea
MM
1008 }
1009
1010 out:
1011 *pbegin = begin;
1012 *pend = end;
1013}
1014
afedecd3
MM
1015/* Print the source line information for a function call history line. */
1016
1017static void
23a7fe75
MM
1018btrace_call_history_src_line (struct ui_out *uiout,
1019 const struct btrace_function *bfun)
afedecd3
MM
1020{
1021 struct symbol *sym;
23a7fe75 1022 int begin, end;
afedecd3
MM
1023
1024 sym = bfun->sym;
1025 if (sym == NULL)
1026 return;
1027
112e8700 1028 uiout->field_string ("file",
08be3fe3 1029 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 1030
ce0dfbea 1031 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 1032 if (end < begin)
afedecd3
MM
1033 return;
1034
112e8700
SM
1035 uiout->text (":");
1036 uiout->field_int ("min line", begin);
afedecd3 1037
23a7fe75 1038 if (end == begin)
afedecd3
MM
1039 return;
1040
112e8700
SM
1041 uiout->text (",");
1042 uiout->field_int ("max line", end);
afedecd3
MM
1043}
1044
0b722aec
MM
1045/* Get the name of a branch trace function. */
1046
1047static const char *
1048btrace_get_bfun_name (const struct btrace_function *bfun)
1049{
1050 struct minimal_symbol *msym;
1051 struct symbol *sym;
1052
1053 if (bfun == NULL)
1054 return "??";
1055
1056 msym = bfun->msym;
1057 sym = bfun->sym;
1058
1059 if (sym != NULL)
1060 return SYMBOL_PRINT_NAME (sym);
1061 else if (msym != NULL)
efd66ac6 1062 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
1063 else
1064 return "??";
1065}
1066
afedecd3
MM
1067/* Disassemble a section of the recorded function trace. */
1068
1069static void
23a7fe75 1070btrace_call_history (struct ui_out *uiout,
8710b709 1071 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1072 const struct btrace_call_iterator *begin,
1073 const struct btrace_call_iterator *end,
8d297bbf 1074 int int_flags)
afedecd3 1075{
23a7fe75 1076 struct btrace_call_iterator it;
8d297bbf 1077 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1078
8d297bbf 1079 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1080 btrace_call_number (end));
afedecd3 1081
23a7fe75 1082 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1083 {
23a7fe75
MM
1084 const struct btrace_function *bfun;
1085 struct minimal_symbol *msym;
1086 struct symbol *sym;
1087
1088 bfun = btrace_call_get (&it);
23a7fe75 1089 sym = bfun->sym;
0b722aec 1090 msym = bfun->msym;
23a7fe75 1091
afedecd3 1092 /* Print the function index. */
23a7fe75 1093 ui_out_field_uint (uiout, "index", bfun->number);
112e8700 1094 uiout->text ("\t");
afedecd3 1095
31fd9caa
MM
1096 /* Indicate gaps in the trace. */
1097 if (bfun->errcode != 0)
1098 {
1099 const struct btrace_config *conf;
1100
1101 conf = btrace_conf (btinfo);
1102
1103 /* We have trace so we must have a configuration. */
1104 gdb_assert (conf != NULL);
1105
1106 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1107
1108 continue;
1109 }
1110
8710b709
MM
1111 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1112 {
1113 int level = bfun->level + btinfo->level, i;
1114
1115 for (i = 0; i < level; ++i)
112e8700 1116 uiout->text (" ");
8710b709
MM
1117 }
1118
1119 if (sym != NULL)
112e8700 1120 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
8710b709 1121 else if (msym != NULL)
112e8700
SM
1122 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1123 else if (!uiout->is_mi_like_p ())
1124 uiout->field_string ("function", "??");
8710b709 1125
1e038f67 1126 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1127 {
112e8700 1128 uiout->text (_("\tinst "));
23a7fe75 1129 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1130 }
1131
1e038f67 1132 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1133 {
112e8700 1134 uiout->text (_("\tat "));
23a7fe75 1135 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1136 }
1137
112e8700 1138 uiout->text ("\n");
afedecd3
MM
1139 }
1140}
1141
1142/* The to_call_history method of target record-btrace. */
1143
1144static void
8d297bbf 1145record_btrace_call_history (struct target_ops *self, int size, int int_flags)
afedecd3
MM
1146{
1147 struct btrace_thread_info *btinfo;
23a7fe75
MM
1148 struct btrace_call_history *history;
1149 struct btrace_call_iterator begin, end;
afedecd3
MM
1150 struct cleanup *uiout_cleanup;
1151 struct ui_out *uiout;
23a7fe75 1152 unsigned int context, covered;
8d297bbf 1153 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1154
1155 uiout = current_uiout;
1156 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1157 "insn history");
afedecd3 1158 context = abs (size);
afedecd3
MM
1159 if (context == 0)
1160 error (_("Bad record function-call-history-size."));
1161
23a7fe75
MM
1162 btinfo = require_btrace ();
1163 history = btinfo->call_history;
1164 if (history == NULL)
afedecd3 1165 {
07bbe694 1166 struct btrace_insn_iterator *replay;
afedecd3 1167
8d297bbf 1168 DEBUG ("call-history (0x%x): %d", int_flags, size);
afedecd3 1169
07bbe694
MM
1170 /* If we're replaying, we start at the replay position. Otherwise, we
1171 start at the tail of the trace. */
1172 replay = btinfo->replay;
1173 if (replay != NULL)
1174 {
1175 begin.function = replay->function;
1176 begin.btinfo = btinfo;
1177 }
1178 else
1179 btrace_call_end (&begin, btinfo);
1180
1181 /* We start from here and expand in the requested direction. Then we
1182 expand in the other direction, as well, to fill up any remaining
1183 context. */
1184 end = begin;
1185 if (size < 0)
1186 {
1187 /* We want the current position covered, as well. */
1188 covered = btrace_call_next (&end, 1);
1189 covered += btrace_call_prev (&begin, context - covered);
1190 covered += btrace_call_next (&end, context - covered);
1191 }
1192 else
1193 {
1194 covered = btrace_call_next (&end, context);
1195 covered += btrace_call_prev (&begin, context- covered);
1196 }
afedecd3
MM
1197 }
1198 else
1199 {
23a7fe75
MM
1200 begin = history->begin;
1201 end = history->end;
afedecd3 1202
8d297bbf 1203 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
23a7fe75 1204 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1205
23a7fe75
MM
1206 if (size < 0)
1207 {
1208 end = begin;
1209 covered = btrace_call_prev (&begin, context);
1210 }
1211 else
1212 {
1213 begin = end;
1214 covered = btrace_call_next (&end, context);
1215 }
afedecd3
MM
1216 }
1217
23a7fe75 1218 if (covered > 0)
8710b709 1219 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1220 else
1221 {
1222 if (size < 0)
1223 printf_unfiltered (_("At the start of the branch trace record.\n"));
1224 else
1225 printf_unfiltered (_("At the end of the branch trace record.\n"));
1226 }
afedecd3 1227
23a7fe75 1228 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1229 do_cleanups (uiout_cleanup);
1230}
1231
1232/* The to_call_history_range method of target record-btrace. */
1233
1234static void
f0d960ea 1235record_btrace_call_history_range (struct target_ops *self,
8d297bbf
PA
1236 ULONGEST from, ULONGEST to,
1237 int int_flags)
afedecd3
MM
1238{
1239 struct btrace_thread_info *btinfo;
23a7fe75
MM
1240 struct btrace_call_history *history;
1241 struct btrace_call_iterator begin, end;
afedecd3
MM
1242 struct cleanup *uiout_cleanup;
1243 struct ui_out *uiout;
23a7fe75
MM
1244 unsigned int low, high;
1245 int found;
8d297bbf 1246 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1247
1248 uiout = current_uiout;
1249 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1250 "func history");
23a7fe75
MM
1251 low = from;
1252 high = to;
afedecd3 1253
8d297bbf 1254 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
afedecd3
MM
1255
1256 /* Check for wrap-arounds. */
23a7fe75 1257 if (low != from || high != to)
afedecd3
MM
1258 error (_("Bad range."));
1259
0688d04e 1260 if (high < low)
afedecd3
MM
1261 error (_("Bad range."));
1262
23a7fe75 1263 btinfo = require_btrace ();
afedecd3 1264
23a7fe75
MM
1265 found = btrace_find_call_by_number (&begin, btinfo, low);
1266 if (found == 0)
1267 error (_("Range out of bounds."));
afedecd3 1268
23a7fe75
MM
1269 found = btrace_find_call_by_number (&end, btinfo, high);
1270 if (found == 0)
0688d04e
MM
1271 {
1272 /* Silently truncate the range. */
1273 btrace_call_end (&end, btinfo);
1274 }
1275 else
1276 {
1277 /* We want both begin and end to be inclusive. */
1278 btrace_call_next (&end, 1);
1279 }
afedecd3 1280
8710b709 1281 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1282 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1283
1284 do_cleanups (uiout_cleanup);
1285}
1286
1287/* The to_call_history_from method of target record-btrace. */
1288
1289static void
ec0aea04 1290record_btrace_call_history_from (struct target_ops *self,
8d297bbf
PA
1291 ULONGEST from, int size,
1292 int int_flags)
afedecd3
MM
1293{
1294 ULONGEST begin, end, context;
8d297bbf 1295 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1296
1297 context = abs (size);
0688d04e
MM
1298 if (context == 0)
1299 error (_("Bad record function-call-history-size."));
afedecd3
MM
1300
1301 if (size < 0)
1302 {
1303 end = from;
1304
1305 if (from < context)
1306 begin = 0;
1307 else
0688d04e 1308 begin = from - context + 1;
afedecd3
MM
1309 }
1310 else
1311 {
1312 begin = from;
0688d04e 1313 end = from + context - 1;
afedecd3
MM
1314
1315 /* Check for wrap-around. */
1316 if (end < begin)
1317 end = ULONGEST_MAX;
1318 }
1319
f0d960ea 1320 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
1321}
1322
07bbe694
MM
1323/* The to_record_is_replaying method of target record-btrace. */
1324
1325static int
a52eab48 1326record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
07bbe694
MM
1327{
1328 struct thread_info *tp;
1329
034f788c 1330 ALL_NON_EXITED_THREADS (tp)
a52eab48 1331 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
07bbe694
MM
1332 return 1;
1333
1334 return 0;
1335}
1336
7ff27e9b
MM
1337/* The to_record_will_replay method of target record-btrace. */
1338
1339static int
1340record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1341{
1342 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1343}
1344
633785ff
MM
1345/* The to_xfer_partial method of target record-btrace. */
1346
9b409511 1347static enum target_xfer_status
633785ff
MM
1348record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1349 const char *annex, gdb_byte *readbuf,
1350 const gdb_byte *writebuf, ULONGEST offset,
9b409511 1351 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
1352{
1353 struct target_ops *t;
1354
1355 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1356 if (replay_memory_access == replay_memory_access_read_only
aef92902 1357 && !record_btrace_generating_corefile
4d10e986 1358 && record_btrace_is_replaying (ops, inferior_ptid))
633785ff
MM
1359 {
1360 switch (object)
1361 {
1362 case TARGET_OBJECT_MEMORY:
1363 {
1364 struct target_section *section;
1365
1366 /* We do not allow writing memory in general. */
1367 if (writebuf != NULL)
9b409511
YQ
1368 {
1369 *xfered_len = len;
bc113b4e 1370 return TARGET_XFER_UNAVAILABLE;
9b409511 1371 }
633785ff
MM
1372
1373 /* We allow reading readonly memory. */
1374 section = target_section_by_addr (ops, offset);
1375 if (section != NULL)
1376 {
1377 /* Check if the section we found is readonly. */
1378 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1379 section->the_bfd_section)
1380 & SEC_READONLY) != 0)
1381 {
1382 /* Truncate the request to fit into this section. */
325fac50 1383 len = std::min (len, section->endaddr - offset);
633785ff
MM
1384 break;
1385 }
1386 }
1387
9b409511 1388 *xfered_len = len;
bc113b4e 1389 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1390 }
1391 }
1392 }
1393
1394 /* Forward the request. */
e75fdfca
TT
1395 ops = ops->beneath;
1396 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1397 offset, len, xfered_len);
633785ff
MM
1398}
1399
1400/* The to_insert_breakpoint method of target record-btrace. */
1401
1402static int
1403record_btrace_insert_breakpoint (struct target_ops *ops,
1404 struct gdbarch *gdbarch,
1405 struct bp_target_info *bp_tgt)
1406{
67b5c0c1
MM
1407 const char *old;
1408 int ret;
633785ff
MM
1409
1410 /* Inserting breakpoints requires accessing memory. Allow it for the
1411 duration of this function. */
67b5c0c1
MM
1412 old = replay_memory_access;
1413 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1414
1415 ret = 0;
492d29ea
PA
1416 TRY
1417 {
1418 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1419 }
492d29ea
PA
1420 CATCH (except, RETURN_MASK_ALL)
1421 {
6c63c96a 1422 replay_memory_access = old;
492d29ea
PA
1423 throw_exception (except);
1424 }
1425 END_CATCH
6c63c96a 1426 replay_memory_access = old;
633785ff
MM
1427
1428 return ret;
1429}
1430
1431/* The to_remove_breakpoint method of target record-btrace. */
1432
1433static int
1434record_btrace_remove_breakpoint (struct target_ops *ops,
1435 struct gdbarch *gdbarch,
73971819
PA
1436 struct bp_target_info *bp_tgt,
1437 enum remove_bp_reason reason)
633785ff 1438{
67b5c0c1
MM
1439 const char *old;
1440 int ret;
633785ff
MM
1441
1442 /* Removing breakpoints requires accessing memory. Allow it for the
1443 duration of this function. */
67b5c0c1
MM
1444 old = replay_memory_access;
1445 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1446
1447 ret = 0;
492d29ea
PA
1448 TRY
1449 {
73971819
PA
1450 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1451 reason);
492d29ea 1452 }
492d29ea
PA
1453 CATCH (except, RETURN_MASK_ALL)
1454 {
6c63c96a 1455 replay_memory_access = old;
492d29ea
PA
1456 throw_exception (except);
1457 }
1458 END_CATCH
6c63c96a 1459 replay_memory_access = old;
633785ff
MM
1460
1461 return ret;
1462}
1463
1f3ef581
MM
1464/* The to_fetch_registers method of target record-btrace. */
1465
1466static void
1467record_btrace_fetch_registers (struct target_ops *ops,
1468 struct regcache *regcache, int regno)
1469{
1470 struct btrace_insn_iterator *replay;
1471 struct thread_info *tp;
1472
1473 tp = find_thread_ptid (inferior_ptid);
1474 gdb_assert (tp != NULL);
1475
1476 replay = tp->btrace.replay;
aef92902 1477 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1478 {
1479 const struct btrace_insn *insn;
1480 struct gdbarch *gdbarch;
1481 int pcreg;
1482
1483 gdbarch = get_regcache_arch (regcache);
1484 pcreg = gdbarch_pc_regnum (gdbarch);
1485 if (pcreg < 0)
1486 return;
1487
1488 /* We can only provide the PC register. */
1489 if (regno >= 0 && regno != pcreg)
1490 return;
1491
1492 insn = btrace_insn_get (replay);
1493 gdb_assert (insn != NULL);
1494
1495 regcache_raw_supply (regcache, regno, &insn->pc);
1496 }
1497 else
1498 {
e75fdfca 1499 struct target_ops *t = ops->beneath;
1f3ef581 1500
e75fdfca 1501 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
1502 }
1503}
1504
1505/* The to_store_registers method of target record-btrace. */
1506
1507static void
1508record_btrace_store_registers (struct target_ops *ops,
1509 struct regcache *regcache, int regno)
1510{
1511 struct target_ops *t;
1512
a52eab48 1513 if (!record_btrace_generating_corefile
4d10e986
MM
1514 && record_btrace_is_replaying (ops, inferior_ptid))
1515 error (_("Cannot write registers while replaying."));
1f3ef581
MM
1516
1517 gdb_assert (may_write_registers != 0);
1518
e75fdfca
TT
1519 t = ops->beneath;
1520 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1521}
1522
1523/* The to_prepare_to_store method of target record-btrace. */
1524
1525static void
1526record_btrace_prepare_to_store (struct target_ops *ops,
1527 struct regcache *regcache)
1528{
1529 struct target_ops *t;
1530
a52eab48 1531 if (!record_btrace_generating_corefile
4d10e986 1532 && record_btrace_is_replaying (ops, inferior_ptid))
1f3ef581
MM
1533 return;
1534
e75fdfca
TT
1535 t = ops->beneath;
1536 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1537}
1538
0b722aec
MM
1539/* The branch trace frame cache. */
1540
1541struct btrace_frame_cache
1542{
1543 /* The thread. */
1544 struct thread_info *tp;
1545
1546 /* The frame info. */
1547 struct frame_info *frame;
1548
1549 /* The branch trace function segment. */
1550 const struct btrace_function *bfun;
1551};
1552
1553/* A struct btrace_frame_cache hash table indexed by NEXT. */
1554
1555static htab_t bfcache;
1556
1557/* hash_f for htab_create_alloc of bfcache. */
1558
1559static hashval_t
1560bfcache_hash (const void *arg)
1561{
19ba03f4
SM
1562 const struct btrace_frame_cache *cache
1563 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1564
1565 return htab_hash_pointer (cache->frame);
1566}
1567
1568/* eq_f for htab_create_alloc of bfcache. */
1569
1570static int
1571bfcache_eq (const void *arg1, const void *arg2)
1572{
19ba03f4
SM
1573 const struct btrace_frame_cache *cache1
1574 = (const struct btrace_frame_cache *) arg1;
1575 const struct btrace_frame_cache *cache2
1576 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1577
1578 return cache1->frame == cache2->frame;
1579}
1580
1581/* Create a new btrace frame cache. */
1582
1583static struct btrace_frame_cache *
1584bfcache_new (struct frame_info *frame)
1585{
1586 struct btrace_frame_cache *cache;
1587 void **slot;
1588
1589 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1590 cache->frame = frame;
1591
1592 slot = htab_find_slot (bfcache, cache, INSERT);
1593 gdb_assert (*slot == NULL);
1594 *slot = cache;
1595
1596 return cache;
1597}
1598
1599/* Extract the branch trace function from a branch trace frame. */
1600
1601static const struct btrace_function *
1602btrace_get_frame_function (struct frame_info *frame)
1603{
1604 const struct btrace_frame_cache *cache;
1605 const struct btrace_function *bfun;
1606 struct btrace_frame_cache pattern;
1607 void **slot;
1608
1609 pattern.frame = frame;
1610
1611 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1612 if (slot == NULL)
1613 return NULL;
1614
19ba03f4 1615 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1616 return cache->bfun;
1617}
1618
cecac1ab
MM
1619/* Implement stop_reason method for record_btrace_frame_unwind. */
1620
1621static enum unwind_stop_reason
1622record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1623 void **this_cache)
1624{
0b722aec
MM
1625 const struct btrace_frame_cache *cache;
1626 const struct btrace_function *bfun;
1627
19ba03f4 1628 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1629 bfun = cache->bfun;
1630 gdb_assert (bfun != NULL);
1631
1632 if (bfun->up == NULL)
1633 return UNWIND_UNAVAILABLE;
1634
1635 return UNWIND_NO_REASON;
cecac1ab
MM
1636}
1637
1638/* Implement this_id method for record_btrace_frame_unwind. */
1639
1640static void
1641record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1642 struct frame_id *this_id)
1643{
0b722aec
MM
1644 const struct btrace_frame_cache *cache;
1645 const struct btrace_function *bfun;
1646 CORE_ADDR code, special;
1647
19ba03f4 1648 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1649
1650 bfun = cache->bfun;
1651 gdb_assert (bfun != NULL);
1652
1653 while (bfun->segment.prev != NULL)
1654 bfun = bfun->segment.prev;
1655
1656 code = get_frame_func (this_frame);
1657 special = bfun->number;
1658
1659 *this_id = frame_id_build_unavailable_stack_special (code, special);
1660
1661 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1662 btrace_get_bfun_name (cache->bfun),
1663 core_addr_to_string_nz (this_id->code_addr),
1664 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1665}
1666
1667/* Implement prev_register method for record_btrace_frame_unwind. */
1668
1669static struct value *
1670record_btrace_frame_prev_register (struct frame_info *this_frame,
1671 void **this_cache,
1672 int regnum)
1673{
0b722aec
MM
1674 const struct btrace_frame_cache *cache;
1675 const struct btrace_function *bfun, *caller;
1676 const struct btrace_insn *insn;
1677 struct gdbarch *gdbarch;
1678 CORE_ADDR pc;
1679 int pcreg;
1680
1681 gdbarch = get_frame_arch (this_frame);
1682 pcreg = gdbarch_pc_regnum (gdbarch);
1683 if (pcreg < 0 || regnum != pcreg)
1684 throw_error (NOT_AVAILABLE_ERROR,
1685 _("Registers are not available in btrace record history"));
1686
19ba03f4 1687 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1688 bfun = cache->bfun;
1689 gdb_assert (bfun != NULL);
1690
1691 caller = bfun->up;
1692 if (caller == NULL)
1693 throw_error (NOT_AVAILABLE_ERROR,
1694 _("No caller in btrace record history"));
1695
1696 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1697 {
1698 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1699 pc = insn->pc;
1700 }
1701 else
1702 {
1703 insn = VEC_last (btrace_insn_s, caller->insn);
1704 pc = insn->pc;
1705
1706 pc += gdb_insn_length (gdbarch, pc);
1707 }
1708
1709 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1710 btrace_get_bfun_name (bfun), bfun->level,
1711 core_addr_to_string_nz (pc));
1712
1713 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1714}
1715
1716/* Implement sniffer method for record_btrace_frame_unwind. */
1717
1718static int
1719record_btrace_frame_sniffer (const struct frame_unwind *self,
1720 struct frame_info *this_frame,
1721 void **this_cache)
1722{
0b722aec
MM
1723 const struct btrace_function *bfun;
1724 struct btrace_frame_cache *cache;
cecac1ab 1725 struct thread_info *tp;
0b722aec 1726 struct frame_info *next;
cecac1ab
MM
1727
1728 /* THIS_FRAME does not contain a reference to its thread. */
1729 tp = find_thread_ptid (inferior_ptid);
1730 gdb_assert (tp != NULL);
1731
0b722aec
MM
1732 bfun = NULL;
1733 next = get_next_frame (this_frame);
1734 if (next == NULL)
1735 {
1736 const struct btrace_insn_iterator *replay;
1737
1738 replay = tp->btrace.replay;
1739 if (replay != NULL)
1740 bfun = replay->function;
1741 }
1742 else
1743 {
1744 const struct btrace_function *callee;
1745
1746 callee = btrace_get_frame_function (next);
1747 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1748 bfun = callee->up;
1749 }
1750
1751 if (bfun == NULL)
1752 return 0;
1753
1754 DEBUG ("[frame] sniffed frame for %s on level %d",
1755 btrace_get_bfun_name (bfun), bfun->level);
1756
1757 /* This is our frame. Initialize the frame cache. */
1758 cache = bfcache_new (this_frame);
1759 cache->tp = tp;
1760 cache->bfun = bfun;
1761
1762 *this_cache = cache;
1763 return 1;
1764}
1765
1766/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1767
1768static int
1769record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1770 struct frame_info *this_frame,
1771 void **this_cache)
1772{
1773 const struct btrace_function *bfun, *callee;
1774 struct btrace_frame_cache *cache;
1775 struct frame_info *next;
1776
1777 next = get_next_frame (this_frame);
1778 if (next == NULL)
1779 return 0;
1780
1781 callee = btrace_get_frame_function (next);
1782 if (callee == NULL)
1783 return 0;
1784
1785 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1786 return 0;
1787
1788 bfun = callee->up;
1789 if (bfun == NULL)
1790 return 0;
1791
1792 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1793 btrace_get_bfun_name (bfun), bfun->level);
1794
1795 /* This is our frame. Initialize the frame cache. */
1796 cache = bfcache_new (this_frame);
1797 cache->tp = find_thread_ptid (inferior_ptid);
1798 cache->bfun = bfun;
1799
1800 *this_cache = cache;
1801 return 1;
1802}
1803
1804static void
1805record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1806{
1807 struct btrace_frame_cache *cache;
1808 void **slot;
1809
19ba03f4 1810 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1811
1812 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1813 gdb_assert (slot != NULL);
1814
1815 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1816}
1817
1818/* btrace recording does not store previous memory content, neither the stack
1819 frames content. Any unwinding would return errorneous results as the stack
1820 contents no longer matches the changed PC value restored from history.
1821 Therefore this unwinder reports any possibly unwound registers as
1822 <unavailable>. */
1823
0b722aec 1824const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1825{
1826 NORMAL_FRAME,
1827 record_btrace_frame_unwind_stop_reason,
1828 record_btrace_frame_this_id,
1829 record_btrace_frame_prev_register,
1830 NULL,
0b722aec
MM
1831 record_btrace_frame_sniffer,
1832 record_btrace_frame_dealloc_cache
1833};
1834
1835const struct frame_unwind record_btrace_tailcall_frame_unwind =
1836{
1837 TAILCALL_FRAME,
1838 record_btrace_frame_unwind_stop_reason,
1839 record_btrace_frame_this_id,
1840 record_btrace_frame_prev_register,
1841 NULL,
1842 record_btrace_tailcall_frame_sniffer,
1843 record_btrace_frame_dealloc_cache
cecac1ab 1844};
b2f4cfde 1845
ac01945b
TT
1846/* Implement the to_get_unwinder method. */
1847
1848static const struct frame_unwind *
1849record_btrace_to_get_unwinder (struct target_ops *self)
1850{
1851 return &record_btrace_frame_unwind;
1852}
1853
1854/* Implement the to_get_tailcall_unwinder method. */
1855
1856static const struct frame_unwind *
1857record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1858{
1859 return &record_btrace_tailcall_frame_unwind;
1860}
1861
987e68b1
MM
1862/* Return a human-readable string for FLAG. */
1863
1864static const char *
1865btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1866{
1867 switch (flag)
1868 {
1869 case BTHR_STEP:
1870 return "step";
1871
1872 case BTHR_RSTEP:
1873 return "reverse-step";
1874
1875 case BTHR_CONT:
1876 return "cont";
1877
1878 case BTHR_RCONT:
1879 return "reverse-cont";
1880
1881 case BTHR_STOP:
1882 return "stop";
1883 }
1884
1885 return "<invalid>";
1886}
1887
52834460
MM
1888/* Indicate that TP should be resumed according to FLAG. */
1889
1890static void
1891record_btrace_resume_thread (struct thread_info *tp,
1892 enum btrace_thread_flag flag)
1893{
1894 struct btrace_thread_info *btinfo;
1895
43792cf0 1896 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1 1897 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
52834460
MM
1898
1899 btinfo = &tp->btrace;
1900
52834460
MM
1901 /* Fetch the latest branch trace. */
1902 btrace_fetch (tp);
1903
0ca912df
MM
1904 /* A resume request overwrites a preceding resume or stop request. */
1905 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1906 btinfo->flags |= flag;
1907}
1908
ec71cc2f
MM
1909/* Get the current frame for TP. */
1910
1911static struct frame_info *
1912get_thread_current_frame (struct thread_info *tp)
1913{
1914 struct frame_info *frame;
1915 ptid_t old_inferior_ptid;
1916 int executing;
1917
1918 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1919 old_inferior_ptid = inferior_ptid;
1920 inferior_ptid = tp->ptid;
1921
1922 /* Clear the executing flag to allow changes to the current frame.
1923 We are not actually running, yet. We just started a reverse execution
1924 command or a record goto command.
1925 For the latter, EXECUTING is false and this has no effect.
1926 For the former, EXECUTING is true and we're in to_wait, about to
1927 move the thread. Since we need to recompute the stack, we temporarily
1928 set EXECUTING to flase. */
1929 executing = is_executing (inferior_ptid);
1930 set_executing (inferior_ptid, 0);
1931
1932 frame = NULL;
1933 TRY
1934 {
1935 frame = get_current_frame ();
1936 }
1937 CATCH (except, RETURN_MASK_ALL)
1938 {
1939 /* Restore the previous execution state. */
1940 set_executing (inferior_ptid, executing);
1941
1942 /* Restore the previous inferior_ptid. */
1943 inferior_ptid = old_inferior_ptid;
1944
1945 throw_exception (except);
1946 }
1947 END_CATCH
1948
1949 /* Restore the previous execution state. */
1950 set_executing (inferior_ptid, executing);
1951
1952 /* Restore the previous inferior_ptid. */
1953 inferior_ptid = old_inferior_ptid;
1954
1955 return frame;
1956}
1957
52834460
MM
1958/* Start replaying a thread. */
1959
1960static struct btrace_insn_iterator *
1961record_btrace_start_replaying (struct thread_info *tp)
1962{
52834460
MM
1963 struct btrace_insn_iterator *replay;
1964 struct btrace_thread_info *btinfo;
52834460
MM
1965
1966 btinfo = &tp->btrace;
1967 replay = NULL;
1968
1969 /* We can't start replaying without trace. */
1970 if (btinfo->begin == NULL)
1971 return NULL;
1972
52834460
MM
1973 /* GDB stores the current frame_id when stepping in order to detects steps
1974 into subroutines.
1975 Since frames are computed differently when we're replaying, we need to
1976 recompute those stored frames and fix them up so we can still detect
1977 subroutines after we started replaying. */
492d29ea 1978 TRY
52834460
MM
1979 {
1980 struct frame_info *frame;
1981 struct frame_id frame_id;
1982 int upd_step_frame_id, upd_step_stack_frame_id;
1983
1984 /* The current frame without replaying - computed via normal unwind. */
ec71cc2f 1985 frame = get_thread_current_frame (tp);
52834460
MM
1986 frame_id = get_frame_id (frame);
1987
1988 /* Check if we need to update any stepping-related frame id's. */
1989 upd_step_frame_id = frame_id_eq (frame_id,
1990 tp->control.step_frame_id);
1991 upd_step_stack_frame_id = frame_id_eq (frame_id,
1992 tp->control.step_stack_frame_id);
1993
1994 /* We start replaying at the end of the branch trace. This corresponds
1995 to the current instruction. */
8d749320 1996 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
1997 btrace_insn_end (replay, btinfo);
1998
31fd9caa
MM
1999 /* Skip gaps at the end of the trace. */
2000 while (btrace_insn_get (replay) == NULL)
2001 {
2002 unsigned int steps;
2003
2004 steps = btrace_insn_prev (replay, 1);
2005 if (steps == 0)
2006 error (_("No trace."));
2007 }
2008
52834460
MM
2009 /* We're not replaying, yet. */
2010 gdb_assert (btinfo->replay == NULL);
2011 btinfo->replay = replay;
2012
2013 /* Make sure we're not using any stale registers. */
2014 registers_changed_ptid (tp->ptid);
2015
2016 /* The current frame with replaying - computed via btrace unwind. */
ec71cc2f 2017 frame = get_thread_current_frame (tp);
52834460
MM
2018 frame_id = get_frame_id (frame);
2019
2020 /* Replace stepping related frames where necessary. */
2021 if (upd_step_frame_id)
2022 tp->control.step_frame_id = frame_id;
2023 if (upd_step_stack_frame_id)
2024 tp->control.step_stack_frame_id = frame_id;
2025 }
492d29ea 2026 CATCH (except, RETURN_MASK_ALL)
52834460
MM
2027 {
2028 xfree (btinfo->replay);
2029 btinfo->replay = NULL;
2030
2031 registers_changed_ptid (tp->ptid);
2032
2033 throw_exception (except);
2034 }
492d29ea 2035 END_CATCH
52834460
MM
2036
2037 return replay;
2038}
2039
2040/* Stop replaying a thread. */
2041
2042static void
2043record_btrace_stop_replaying (struct thread_info *tp)
2044{
2045 struct btrace_thread_info *btinfo;
2046
2047 btinfo = &tp->btrace;
2048
2049 xfree (btinfo->replay);
2050 btinfo->replay = NULL;
2051
2052 /* Make sure we're not leaving any stale registers. */
2053 registers_changed_ptid (tp->ptid);
2054}
2055
e3cfc1c7
MM
2056/* Stop replaying TP if it is at the end of its execution history. */
2057
2058static void
2059record_btrace_stop_replaying_at_end (struct thread_info *tp)
2060{
2061 struct btrace_insn_iterator *replay, end;
2062 struct btrace_thread_info *btinfo;
2063
2064 btinfo = &tp->btrace;
2065 replay = btinfo->replay;
2066
2067 if (replay == NULL)
2068 return;
2069
2070 btrace_insn_end (&end, btinfo);
2071
2072 if (btrace_insn_cmp (replay, &end) == 0)
2073 record_btrace_stop_replaying (tp);
2074}
2075
b2f4cfde
MM
2076/* The to_resume method of target record-btrace. */
2077
2078static void
2079record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2080 enum gdb_signal signal)
2081{
0ca912df 2082 struct thread_info *tp;
d2939ba2 2083 enum btrace_thread_flag flag, cflag;
52834460 2084
987e68b1
MM
2085 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2086 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2087 step ? "step" : "cont");
52834460 2088
0ca912df
MM
2089 /* Store the execution direction of the last resume.
2090
2091 If there is more than one to_resume call, we have to rely on infrun
2092 to not change the execution direction in-between. */
70ad5bff
MM
2093 record_btrace_resume_exec_dir = execution_direction;
2094
0ca912df 2095 /* As long as we're not replaying, just forward the request.
52834460 2096
0ca912df
MM
2097 For non-stop targets this means that no thread is replaying. In order to
2098 make progress, we may need to explicitly move replaying threads to the end
2099 of their execution history. */
a52eab48
MM
2100 if ((execution_direction != EXEC_REVERSE)
2101 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2102 {
e75fdfca 2103 ops = ops->beneath;
04c4fe8c
MM
2104 ops->to_resume (ops, ptid, step, signal);
2105 return;
b2f4cfde
MM
2106 }
2107
52834460 2108 /* Compute the btrace thread flag for the requested move. */
d2939ba2
MM
2109 if (execution_direction == EXEC_REVERSE)
2110 {
2111 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2112 cflag = BTHR_RCONT;
2113 }
52834460 2114 else
d2939ba2
MM
2115 {
2116 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2117 cflag = BTHR_CONT;
2118 }
52834460 2119
52834460 2120 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2121 record_btrace_wait below.
2122
2123 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2124 if (!target_is_non_stop_p ())
2125 {
2126 gdb_assert (ptid_match (inferior_ptid, ptid));
2127
2128 ALL_NON_EXITED_THREADS (tp)
2129 if (ptid_match (tp->ptid, ptid))
2130 {
2131 if (ptid_match (tp->ptid, inferior_ptid))
2132 record_btrace_resume_thread (tp, flag);
2133 else
2134 record_btrace_resume_thread (tp, cflag);
2135 }
2136 }
2137 else
2138 {
2139 ALL_NON_EXITED_THREADS (tp)
2140 if (ptid_match (tp->ptid, ptid))
2141 record_btrace_resume_thread (tp, flag);
2142 }
70ad5bff
MM
2143
2144 /* Async support. */
2145 if (target_can_async_p ())
2146 {
6a3753b3 2147 target_async (1);
70ad5bff
MM
2148 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2149 }
52834460
MM
2150}
2151
85ad3aaf
PA
2152/* The to_commit_resume method of target record-btrace. */
2153
2154static void
2155record_btrace_commit_resume (struct target_ops *ops)
2156{
2157 if ((execution_direction != EXEC_REVERSE)
2158 && !record_btrace_is_replaying (ops, minus_one_ptid))
2159 ops->beneath->to_commit_resume (ops->beneath);
2160}
2161
987e68b1
MM
2162/* Cancel resuming TP. */
2163
2164static void
2165record_btrace_cancel_resume (struct thread_info *tp)
2166{
2167 enum btrace_thread_flag flags;
2168
2169 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2170 if (flags == 0)
2171 return;
2172
43792cf0
PA
2173 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2174 print_thread_id (tp),
987e68b1
MM
2175 target_pid_to_str (tp->ptid), flags,
2176 btrace_thread_flag_to_str (flags));
2177
2178 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2179 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2180}
2181
2182/* Return a target_waitstatus indicating that we ran out of history. */
2183
2184static struct target_waitstatus
2185btrace_step_no_history (void)
2186{
2187 struct target_waitstatus status;
2188
2189 status.kind = TARGET_WAITKIND_NO_HISTORY;
2190
2191 return status;
2192}
2193
2194/* Return a target_waitstatus indicating that a step finished. */
2195
2196static struct target_waitstatus
2197btrace_step_stopped (void)
2198{
2199 struct target_waitstatus status;
2200
2201 status.kind = TARGET_WAITKIND_STOPPED;
2202 status.value.sig = GDB_SIGNAL_TRAP;
2203
2204 return status;
2205}
2206
6e4879f0
MM
2207/* Return a target_waitstatus indicating that a thread was stopped as
2208 requested. */
2209
2210static struct target_waitstatus
2211btrace_step_stopped_on_request (void)
2212{
2213 struct target_waitstatus status;
2214
2215 status.kind = TARGET_WAITKIND_STOPPED;
2216 status.value.sig = GDB_SIGNAL_0;
2217
2218 return status;
2219}
2220
d825d248
MM
2221/* Return a target_waitstatus indicating a spurious stop. */
2222
2223static struct target_waitstatus
2224btrace_step_spurious (void)
2225{
2226 struct target_waitstatus status;
2227
2228 status.kind = TARGET_WAITKIND_SPURIOUS;
2229
2230 return status;
2231}
2232
e3cfc1c7
MM
2233/* Return a target_waitstatus indicating that the thread was not resumed. */
2234
2235static struct target_waitstatus
2236btrace_step_no_resumed (void)
2237{
2238 struct target_waitstatus status;
2239
2240 status.kind = TARGET_WAITKIND_NO_RESUMED;
2241
2242 return status;
2243}
2244
2245/* Return a target_waitstatus indicating that we should wait again. */
2246
2247static struct target_waitstatus
2248btrace_step_again (void)
2249{
2250 struct target_waitstatus status;
2251
2252 status.kind = TARGET_WAITKIND_IGNORE;
2253
2254 return status;
2255}
2256
52834460
MM
2257/* Clear the record histories. */
2258
2259static void
2260record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2261{
2262 xfree (btinfo->insn_history);
2263 xfree (btinfo->call_history);
2264
2265 btinfo->insn_history = NULL;
2266 btinfo->call_history = NULL;
2267}
2268
3c615f99
MM
2269/* Check whether TP's current replay position is at a breakpoint. */
2270
2271static int
2272record_btrace_replay_at_breakpoint (struct thread_info *tp)
2273{
2274 struct btrace_insn_iterator *replay;
2275 struct btrace_thread_info *btinfo;
2276 const struct btrace_insn *insn;
2277 struct inferior *inf;
2278
2279 btinfo = &tp->btrace;
2280 replay = btinfo->replay;
2281
2282 if (replay == NULL)
2283 return 0;
2284
2285 insn = btrace_insn_get (replay);
2286 if (insn == NULL)
2287 return 0;
2288
2289 inf = find_inferior_ptid (tp->ptid);
2290 if (inf == NULL)
2291 return 0;
2292
2293 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2294 &btinfo->stop_reason);
2295}
2296
d825d248 2297/* Step one instruction in forward direction. */
52834460
MM
2298
2299static struct target_waitstatus
d825d248 2300record_btrace_single_step_forward (struct thread_info *tp)
52834460 2301{
b61ce85c 2302 struct btrace_insn_iterator *replay, end, start;
52834460 2303 struct btrace_thread_info *btinfo;
52834460 2304
d825d248
MM
2305 btinfo = &tp->btrace;
2306 replay = btinfo->replay;
2307
2308 /* We're done if we're not replaying. */
2309 if (replay == NULL)
2310 return btrace_step_no_history ();
2311
011c71b6
MM
2312 /* Check if we're stepping a breakpoint. */
2313 if (record_btrace_replay_at_breakpoint (tp))
2314 return btrace_step_stopped ();
2315
b61ce85c
MM
2316 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2317 jump back to the instruction at which we started. */
2318 start = *replay;
d825d248
MM
2319 do
2320 {
2321 unsigned int steps;
2322
e3cfc1c7
MM
2323 /* We will bail out here if we continue stepping after reaching the end
2324 of the execution history. */
d825d248
MM
2325 steps = btrace_insn_next (replay, 1);
2326 if (steps == 0)
b61ce85c
MM
2327 {
2328 *replay = start;
2329 return btrace_step_no_history ();
2330 }
d825d248
MM
2331 }
2332 while (btrace_insn_get (replay) == NULL);
2333
2334 /* Determine the end of the instruction trace. */
2335 btrace_insn_end (&end, btinfo);
2336
e3cfc1c7
MM
2337 /* The execution trace contains (and ends with) the current instruction.
2338 This instruction has not been executed, yet, so the trace really ends
2339 one instruction earlier. */
d825d248 2340 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2341 return btrace_step_no_history ();
d825d248
MM
2342
2343 return btrace_step_spurious ();
2344}
2345
2346/* Step one instruction in backward direction. */
2347
2348static struct target_waitstatus
2349record_btrace_single_step_backward (struct thread_info *tp)
2350{
b61ce85c 2351 struct btrace_insn_iterator *replay, start;
d825d248 2352 struct btrace_thread_info *btinfo;
e59fa00f 2353
52834460
MM
2354 btinfo = &tp->btrace;
2355 replay = btinfo->replay;
2356
d825d248
MM
2357 /* Start replaying if we're not already doing so. */
2358 if (replay == NULL)
2359 replay = record_btrace_start_replaying (tp);
2360
2361 /* If we can't step any further, we reached the end of the history.
b61ce85c
MM
2362 Skip gaps during replay. If we end up at a gap (at the beginning of
2363 the trace), jump back to the instruction at which we started. */
2364 start = *replay;
d825d248
MM
2365 do
2366 {
2367 unsigned int steps;
2368
2369 steps = btrace_insn_prev (replay, 1);
2370 if (steps == 0)
b61ce85c
MM
2371 {
2372 *replay = start;
2373 return btrace_step_no_history ();
2374 }
d825d248
MM
2375 }
2376 while (btrace_insn_get (replay) == NULL);
2377
011c71b6
MM
2378 /* Check if we're stepping a breakpoint.
2379
2380 For reverse-stepping, this check is after the step. There is logic in
2381 infrun.c that handles reverse-stepping separately. See, for example,
2382 proceed and adjust_pc_after_break.
2383
2384 This code assumes that for reverse-stepping, PC points to the last
2385 de-executed instruction, whereas for forward-stepping PC points to the
2386 next to-be-executed instruction. */
2387 if (record_btrace_replay_at_breakpoint (tp))
2388 return btrace_step_stopped ();
2389
d825d248
MM
2390 return btrace_step_spurious ();
2391}
2392
2393/* Step a single thread. */
2394
2395static struct target_waitstatus
2396record_btrace_step_thread (struct thread_info *tp)
2397{
2398 struct btrace_thread_info *btinfo;
2399 struct target_waitstatus status;
2400 enum btrace_thread_flag flags;
2401
2402 btinfo = &tp->btrace;
2403
6e4879f0
MM
2404 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2405 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2406
43792cf0 2407 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1
MM
2408 target_pid_to_str (tp->ptid), flags,
2409 btrace_thread_flag_to_str (flags));
52834460 2410
6e4879f0
MM
2411 /* We can't step without an execution history. */
2412 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2413 return btrace_step_no_history ();
2414
52834460
MM
2415 switch (flags)
2416 {
2417 default:
2418 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2419
6e4879f0
MM
2420 case BTHR_STOP:
2421 return btrace_step_stopped_on_request ();
2422
52834460 2423 case BTHR_STEP:
d825d248
MM
2424 status = record_btrace_single_step_forward (tp);
2425 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2426 break;
52834460
MM
2427
2428 return btrace_step_stopped ();
2429
2430 case BTHR_RSTEP:
d825d248
MM
2431 status = record_btrace_single_step_backward (tp);
2432 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2433 break;
52834460
MM
2434
2435 return btrace_step_stopped ();
2436
2437 case BTHR_CONT:
e3cfc1c7
MM
2438 status = record_btrace_single_step_forward (tp);
2439 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2440 break;
52834460 2441
e3cfc1c7
MM
2442 btinfo->flags |= flags;
2443 return btrace_step_again ();
52834460
MM
2444
2445 case BTHR_RCONT:
e3cfc1c7
MM
2446 status = record_btrace_single_step_backward (tp);
2447 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2448 break;
52834460 2449
e3cfc1c7
MM
2450 btinfo->flags |= flags;
2451 return btrace_step_again ();
2452 }
d825d248 2453
e3cfc1c7
MM
2454 /* We keep threads moving at the end of their execution history. The to_wait
2455 method will stop the thread for whom the event is reported. */
2456 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2457 btinfo->flags |= flags;
52834460 2458
e3cfc1c7 2459 return status;
b2f4cfde
MM
2460}
2461
e3cfc1c7
MM
2462/* A vector of threads. */
2463
2464typedef struct thread_info * tp_t;
2465DEF_VEC_P (tp_t);
2466
a6b5be76
MM
2467/* Announce further events if necessary. */
2468
2469static void
2470record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2471 const VEC (tp_t) *no_history)
2472{
2473 int more_moving, more_no_history;
2474
2475 more_moving = !VEC_empty (tp_t, moving);
2476 more_no_history = !VEC_empty (tp_t, no_history);
2477
2478 if (!more_moving && !more_no_history)
2479 return;
2480
2481 if (more_moving)
2482 DEBUG ("movers pending");
2483
2484 if (more_no_history)
2485 DEBUG ("no-history pending");
2486
2487 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2488}
2489
b2f4cfde
MM
2490/* The to_wait method of target record-btrace. */
2491
2492static ptid_t
2493record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2494 struct target_waitstatus *status, int options)
2495{
e3cfc1c7
MM
2496 VEC (tp_t) *moving, *no_history;
2497 struct thread_info *tp, *eventing;
2498 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
52834460
MM
2499
2500 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2501
b2f4cfde 2502 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2503 if ((execution_direction != EXEC_REVERSE)
2504 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2505 {
e75fdfca
TT
2506 ops = ops->beneath;
2507 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
2508 }
2509
e3cfc1c7
MM
2510 moving = NULL;
2511 no_history = NULL;
2512
2513 make_cleanup (VEC_cleanup (tp_t), &moving);
2514 make_cleanup (VEC_cleanup (tp_t), &no_history);
2515
2516 /* Keep a work list of moving threads. */
2517 ALL_NON_EXITED_THREADS (tp)
2518 if (ptid_match (tp->ptid, ptid)
2519 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2520 VEC_safe_push (tp_t, moving, tp);
2521
2522 if (VEC_empty (tp_t, moving))
52834460 2523 {
e3cfc1c7 2524 *status = btrace_step_no_resumed ();
52834460 2525
e3cfc1c7
MM
2526 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2527 target_waitstatus_to_string (status));
2528
2529 do_cleanups (cleanups);
2530 return null_ptid;
52834460
MM
2531 }
2532
e3cfc1c7
MM
2533 /* Step moving threads one by one, one step each, until either one thread
2534 reports an event or we run out of threads to step.
2535
2536 When stepping more than one thread, chances are that some threads reach
2537 the end of their execution history earlier than others. If we reported
2538 this immediately, all-stop on top of non-stop would stop all threads and
2539 resume the same threads next time. And we would report the same thread
2540 having reached the end of its execution history again.
2541
2542 In the worst case, this would starve the other threads. But even if other
2543 threads would be allowed to make progress, this would result in far too
2544 many intermediate stops.
2545
2546 We therefore delay the reporting of "no execution history" until we have
2547 nothing else to report. By this time, all threads should have moved to
2548 either the beginning or the end of their execution history. There will
2549 be a single user-visible stop. */
2550 eventing = NULL;
2551 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2552 {
2553 unsigned int ix;
2554
2555 ix = 0;
2556 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2557 {
2558 *status = record_btrace_step_thread (tp);
2559
2560 switch (status->kind)
2561 {
2562 case TARGET_WAITKIND_IGNORE:
2563 ix++;
2564 break;
2565
2566 case TARGET_WAITKIND_NO_HISTORY:
2567 VEC_safe_push (tp_t, no_history,
2568 VEC_ordered_remove (tp_t, moving, ix));
2569 break;
2570
2571 default:
2572 eventing = VEC_unordered_remove (tp_t, moving, ix);
2573 break;
2574 }
2575 }
2576 }
2577
2578 if (eventing == NULL)
2579 {
2580 /* We started with at least one moving thread. This thread must have
2581 either stopped or reached the end of its execution history.
2582
2583 In the former case, EVENTING must not be NULL.
2584 In the latter case, NO_HISTORY must not be empty. */
2585 gdb_assert (!VEC_empty (tp_t, no_history));
2586
2587 /* We kept threads moving at the end of their execution history. Stop
2588 EVENTING now that we are going to report its stop. */
2589 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2590 eventing->btrace.flags &= ~BTHR_MOVE;
2591
2592 *status = btrace_step_no_history ();
2593 }
2594
2595 gdb_assert (eventing != NULL);
2596
2597 /* We kept threads replaying at the end of their execution history. Stop
2598 replaying EVENTING now that we are going to report its stop. */
2599 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2600
2601 /* Stop all other threads. */
5953356c 2602 if (!target_is_non_stop_p ())
e3cfc1c7
MM
2603 ALL_NON_EXITED_THREADS (tp)
2604 record_btrace_cancel_resume (tp);
52834460 2605
a6b5be76
MM
2606 /* In async mode, we need to announce further events. */
2607 if (target_is_async_p ())
2608 record_btrace_maybe_mark_async_event (moving, no_history);
2609
52834460 2610 /* Start record histories anew from the current position. */
e3cfc1c7 2611 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2612
2613 /* We moved the replay position but did not update registers. */
e3cfc1c7
MM
2614 registers_changed_ptid (eventing->ptid);
2615
43792cf0
PA
2616 DEBUG ("wait ended by thread %s (%s): %s",
2617 print_thread_id (eventing),
e3cfc1c7
MM
2618 target_pid_to_str (eventing->ptid),
2619 target_waitstatus_to_string (status));
52834460 2620
e3cfc1c7
MM
2621 do_cleanups (cleanups);
2622 return eventing->ptid;
52834460
MM
2623}
2624
6e4879f0
MM
2625/* The to_stop method of target record-btrace. */
2626
2627static void
2628record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2629{
2630 DEBUG ("stop %s", target_pid_to_str (ptid));
2631
2632 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2633 if ((execution_direction != EXEC_REVERSE)
2634 && !record_btrace_is_replaying (ops, minus_one_ptid))
6e4879f0
MM
2635 {
2636 ops = ops->beneath;
2637 ops->to_stop (ops, ptid);
2638 }
2639 else
2640 {
2641 struct thread_info *tp;
2642
2643 ALL_NON_EXITED_THREADS (tp)
2644 if (ptid_match (tp->ptid, ptid))
2645 {
2646 tp->btrace.flags &= ~BTHR_MOVE;
2647 tp->btrace.flags |= BTHR_STOP;
2648 }
2649 }
2650 }
2651
52834460
MM
2652/* The to_can_execute_reverse method of target record-btrace. */
2653
2654static int
19db3e69 2655record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
2656{
2657 return 1;
2658}
2659
9e8915c6 2660/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2661
9e8915c6
PA
2662static int
2663record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
52834460 2664{
a52eab48 2665 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2666 {
2667 struct thread_info *tp = inferior_thread ();
2668
2669 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2670 }
2671
2672 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2673}
2674
2675/* The to_supports_stopped_by_sw_breakpoint method of target
2676 record-btrace. */
2677
2678static int
2679record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2680{
a52eab48 2681 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2682 return 1;
2683
2684 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2685}
2686
2687/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2688
2689static int
2690record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2691{
a52eab48 2692 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2693 {
2694 struct thread_info *tp = inferior_thread ();
2695
2696 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2697 }
2698
2699 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2700}
2701
2702/* The to_supports_stopped_by_hw_breakpoint method of target
2703 record-btrace. */
2704
2705static int
2706record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2707{
a52eab48 2708 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6 2709 return 1;
52834460 2710
9e8915c6 2711 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
b2f4cfde
MM
2712}
2713
e8032dde 2714/* The to_update_thread_list method of target record-btrace. */
e2887aa3
MM
2715
2716static void
e8032dde 2717record_btrace_update_thread_list (struct target_ops *ops)
e2887aa3 2718{
e8032dde 2719 /* We don't add or remove threads during replay. */
a52eab48 2720 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2721 return;
2722
2723 /* Forward the request. */
e75fdfca 2724 ops = ops->beneath;
e8032dde 2725 ops->to_update_thread_list (ops);
e2887aa3
MM
2726}
2727
2728/* The to_thread_alive method of target record-btrace. */
2729
2730static int
2731record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2732{
2733 /* We don't add or remove threads during replay. */
a52eab48 2734 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2735 return find_thread_ptid (ptid) != NULL;
2736
2737 /* Forward the request. */
e75fdfca
TT
2738 ops = ops->beneath;
2739 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
2740}
2741
066ce621
MM
2742/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2743 is stopped. */
2744
2745static void
2746record_btrace_set_replay (struct thread_info *tp,
2747 const struct btrace_insn_iterator *it)
2748{
2749 struct btrace_thread_info *btinfo;
2750
2751 btinfo = &tp->btrace;
2752
2753 if (it == NULL || it->function == NULL)
52834460 2754 record_btrace_stop_replaying (tp);
066ce621
MM
2755 else
2756 {
2757 if (btinfo->replay == NULL)
52834460 2758 record_btrace_start_replaying (tp);
066ce621
MM
2759 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2760 return;
2761
2762 *btinfo->replay = *it;
52834460 2763 registers_changed_ptid (tp->ptid);
066ce621
MM
2764 }
2765
52834460
MM
2766 /* Start anew from the new replay position. */
2767 record_btrace_clear_histories (btinfo);
485668e5
MM
2768
2769 stop_pc = regcache_read_pc (get_current_regcache ());
2770 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2771}
2772
2773/* The to_goto_record_begin method of target record-btrace. */
2774
2775static void
08475817 2776record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
2777{
2778 struct thread_info *tp;
2779 struct btrace_insn_iterator begin;
2780
2781 tp = require_btrace_thread ();
2782
2783 btrace_insn_begin (&begin, &tp->btrace);
b61ce85c
MM
2784
2785 /* Skip gaps at the beginning of the trace. */
2786 while (btrace_insn_get (&begin) == NULL)
2787 {
2788 unsigned int steps;
2789
2790 steps = btrace_insn_next (&begin, 1);
2791 if (steps == 0)
2792 error (_("No trace."));
2793 }
2794
066ce621 2795 record_btrace_set_replay (tp, &begin);
066ce621
MM
2796}
2797
2798/* The to_goto_record_end method of target record-btrace. */
2799
2800static void
307a1b91 2801record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
2802{
2803 struct thread_info *tp;
2804
2805 tp = require_btrace_thread ();
2806
2807 record_btrace_set_replay (tp, NULL);
066ce621
MM
2808}
2809
2810/* The to_goto_record method of target record-btrace. */
2811
2812static void
606183ac 2813record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
2814{
2815 struct thread_info *tp;
2816 struct btrace_insn_iterator it;
2817 unsigned int number;
2818 int found;
2819
2820 number = insn;
2821
2822 /* Check for wrap-arounds. */
2823 if (number != insn)
2824 error (_("Instruction number out of range."));
2825
2826 tp = require_btrace_thread ();
2827
2828 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2829 if (found == 0)
2830 error (_("No such instruction."));
2831
2832 record_btrace_set_replay (tp, &it);
066ce621
MM
2833}
2834
797094dd
MM
2835/* The to_record_stop_replaying method of target record-btrace. */
2836
2837static void
2838record_btrace_stop_replaying_all (struct target_ops *self)
2839{
2840 struct thread_info *tp;
2841
2842 ALL_NON_EXITED_THREADS (tp)
2843 record_btrace_stop_replaying (tp);
2844}
2845
70ad5bff
MM
2846/* The to_execution_direction target method. */
2847
2848static enum exec_direction_kind
2849record_btrace_execution_direction (struct target_ops *self)
2850{
2851 return record_btrace_resume_exec_dir;
2852}
2853
aef92902
MM
2854/* The to_prepare_to_generate_core target method. */
2855
2856static void
2857record_btrace_prepare_to_generate_core (struct target_ops *self)
2858{
2859 record_btrace_generating_corefile = 1;
2860}
2861
2862/* The to_done_generating_core target method. */
2863
2864static void
2865record_btrace_done_generating_core (struct target_ops *self)
2866{
2867 record_btrace_generating_corefile = 0;
2868}
2869
afedecd3
MM
2870/* Initialize the record-btrace target ops. */
2871
2872static void
2873init_record_btrace_ops (void)
2874{
2875 struct target_ops *ops;
2876
2877 ops = &record_btrace_ops;
2878 ops->to_shortname = "record-btrace";
2879 ops->to_longname = "Branch tracing target";
2880 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2881 ops->to_open = record_btrace_open;
2882 ops->to_close = record_btrace_close;
b7d2e916 2883 ops->to_async = record_btrace_async;
afedecd3 2884 ops->to_detach = record_detach;
c0272db5 2885 ops->to_disconnect = record_btrace_disconnect;
afedecd3
MM
2886 ops->to_mourn_inferior = record_mourn_inferior;
2887 ops->to_kill = record_kill;
afedecd3
MM
2888 ops->to_stop_recording = record_btrace_stop_recording;
2889 ops->to_info_record = record_btrace_info;
2890 ops->to_insn_history = record_btrace_insn_history;
2891 ops->to_insn_history_from = record_btrace_insn_history_from;
2892 ops->to_insn_history_range = record_btrace_insn_history_range;
2893 ops->to_call_history = record_btrace_call_history;
2894 ops->to_call_history_from = record_btrace_call_history_from;
2895 ops->to_call_history_range = record_btrace_call_history_range;
07bbe694 2896 ops->to_record_is_replaying = record_btrace_is_replaying;
7ff27e9b 2897 ops->to_record_will_replay = record_btrace_will_replay;
797094dd 2898 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
633785ff
MM
2899 ops->to_xfer_partial = record_btrace_xfer_partial;
2900 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2901 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
2902 ops->to_fetch_registers = record_btrace_fetch_registers;
2903 ops->to_store_registers = record_btrace_store_registers;
2904 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
2905 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2906 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde 2907 ops->to_resume = record_btrace_resume;
85ad3aaf 2908 ops->to_commit_resume = record_btrace_commit_resume;
b2f4cfde 2909 ops->to_wait = record_btrace_wait;
6e4879f0 2910 ops->to_stop = record_btrace_stop;
e8032dde 2911 ops->to_update_thread_list = record_btrace_update_thread_list;
e2887aa3 2912 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
2913 ops->to_goto_record_begin = record_btrace_goto_begin;
2914 ops->to_goto_record_end = record_btrace_goto_end;
2915 ops->to_goto_record = record_btrace_goto;
52834460 2916 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
9e8915c6
PA
2917 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2918 ops->to_supports_stopped_by_sw_breakpoint
2919 = record_btrace_supports_stopped_by_sw_breakpoint;
2920 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2921 ops->to_supports_stopped_by_hw_breakpoint
2922 = record_btrace_supports_stopped_by_hw_breakpoint;
70ad5bff 2923 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
2924 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2925 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
2926 ops->to_stratum = record_stratum;
2927 ops->to_magic = OPS_MAGIC;
2928}
2929
f4abbc16
MM
2930/* Start recording in BTS format. */
2931
2932static void
2933cmd_record_btrace_bts_start (char *args, int from_tty)
2934{
f4abbc16
MM
2935 if (args != NULL && *args != 0)
2936 error (_("Invalid argument."));
2937
2938 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2939
492d29ea
PA
2940 TRY
2941 {
2942 execute_command ("target record-btrace", from_tty);
2943 }
2944 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2945 {
2946 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2947 throw_exception (exception);
2948 }
492d29ea 2949 END_CATCH
f4abbc16
MM
2950}
2951
bc504a31 2952/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2953
2954static void
b20a6524 2955cmd_record_btrace_pt_start (char *args, int from_tty)
afedecd3
MM
2956{
2957 if (args != NULL && *args != 0)
2958 error (_("Invalid argument."));
2959
b20a6524 2960 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2961
492d29ea
PA
2962 TRY
2963 {
2964 execute_command ("target record-btrace", from_tty);
2965 }
2966 CATCH (exception, RETURN_MASK_ALL)
2967 {
2968 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2969 throw_exception (exception);
2970 }
2971 END_CATCH
afedecd3
MM
2972}
2973
b20a6524
MM
2974/* Alias for "target record". */
2975
2976static void
2977cmd_record_btrace_start (char *args, int from_tty)
2978{
2979 if (args != NULL && *args != 0)
2980 error (_("Invalid argument."));
2981
2982 record_btrace_conf.format = BTRACE_FORMAT_PT;
2983
2984 TRY
2985 {
2986 execute_command ("target record-btrace", from_tty);
2987 }
2988 CATCH (exception, RETURN_MASK_ALL)
2989 {
2990 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2991
2992 TRY
2993 {
2994 execute_command ("target record-btrace", from_tty);
2995 }
2996 CATCH (exception, RETURN_MASK_ALL)
2997 {
2998 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2999 throw_exception (exception);
3000 }
3001 END_CATCH
3002 }
3003 END_CATCH
3004}
3005
67b5c0c1
MM
3006/* The "set record btrace" command. */
3007
3008static void
3009cmd_set_record_btrace (char *args, int from_tty)
3010{
3011 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
3012}
3013
3014/* The "show record btrace" command. */
3015
3016static void
3017cmd_show_record_btrace (char *args, int from_tty)
3018{
3019 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
3020}
3021
3022/* The "show record btrace replay-memory-access" command. */
3023
3024static void
3025cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
3026 struct cmd_list_element *c, const char *value)
3027{
3028 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
3029 replay_memory_access);
3030}
3031
d33501a5
MM
3032/* The "set record btrace bts" command. */
3033
3034static void
3035cmd_set_record_btrace_bts (char *args, int from_tty)
3036{
3037 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 3038 "by an appropriate subcommand.\n"));
d33501a5
MM
3039 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3040 all_commands, gdb_stdout);
3041}
3042
3043/* The "show record btrace bts" command. */
3044
3045static void
3046cmd_show_record_btrace_bts (char *args, int from_tty)
3047{
3048 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3049}
3050
b20a6524
MM
3051/* The "set record btrace pt" command. */
3052
3053static void
3054cmd_set_record_btrace_pt (char *args, int from_tty)
3055{
3056 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3057 "by an appropriate subcommand.\n"));
3058 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3059 all_commands, gdb_stdout);
3060}
3061
3062/* The "show record btrace pt" command. */
3063
3064static void
3065cmd_show_record_btrace_pt (char *args, int from_tty)
3066{
3067 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3068}
3069
3070/* The "record bts buffer-size" show value function. */
3071
3072static void
3073show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3074 struct cmd_list_element *c,
3075 const char *value)
3076{
3077 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3078 value);
3079}
3080
3081/* The "record pt buffer-size" show value function. */
3082
3083static void
3084show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3085 struct cmd_list_element *c,
3086 const char *value)
3087{
3088 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3089 value);
3090}
3091
afedecd3
MM
3092void _initialize_record_btrace (void);
3093
3094/* Initialize btrace commands. */
3095
3096void
3097_initialize_record_btrace (void)
3098{
f4abbc16
MM
3099 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3100 _("Start branch trace recording."), &record_btrace_cmdlist,
3101 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3102 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3103
f4abbc16
MM
3104 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3105 _("\
3106Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3107The processor stores a from/to record for each branch into a cyclic buffer.\n\
3108This format may not be available on all processors."),
3109 &record_btrace_cmdlist);
3110 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3111
b20a6524
MM
3112 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3113 _("\
bc504a31 3114Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3115This format may not be available on all processors."),
3116 &record_btrace_cmdlist);
3117 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3118
67b5c0c1
MM
3119 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3120 _("Set record options"), &set_record_btrace_cmdlist,
3121 "set record btrace ", 0, &set_record_cmdlist);
3122
3123 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3124 _("Show record options"), &show_record_btrace_cmdlist,
3125 "show record btrace ", 0, &show_record_cmdlist);
3126
3127 add_setshow_enum_cmd ("replay-memory-access", no_class,
3128 replay_memory_access_types, &replay_memory_access, _("\
3129Set what memory accesses are allowed during replay."), _("\
3130Show what memory accesses are allowed during replay."),
3131 _("Default is READ-ONLY.\n\n\
3132The btrace record target does not trace data.\n\
3133The memory therefore corresponds to the live target and not \
3134to the current replay position.\n\n\
3135When READ-ONLY, allow accesses to read-only memory during replay.\n\
3136When READ-WRITE, allow accesses to read-only and read-write memory during \
3137replay."),
3138 NULL, cmd_show_replay_memory_access,
3139 &set_record_btrace_cmdlist,
3140 &show_record_btrace_cmdlist);
3141
d33501a5
MM
3142 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3143 _("Set record btrace bts options"),
3144 &set_record_btrace_bts_cmdlist,
3145 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3146
3147 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3148 _("Show record btrace bts options"),
3149 &show_record_btrace_bts_cmdlist,
3150 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3151
3152 add_setshow_uinteger_cmd ("buffer-size", no_class,
3153 &record_btrace_conf.bts.size,
3154 _("Set the record/replay bts buffer size."),
3155 _("Show the record/replay bts buffer size."), _("\
3156When starting recording request a trace buffer of this size. \
3157The actual buffer size may differ from the requested size. \
3158Use \"info record\" to see the actual buffer size.\n\n\
3159Bigger buffers allow longer recording but also take more time to process \
3160the recorded execution trace.\n\n\
b20a6524
MM
3161The trace buffer size may not be changed while recording."), NULL,
3162 show_record_bts_buffer_size_value,
d33501a5
MM
3163 &set_record_btrace_bts_cmdlist,
3164 &show_record_btrace_bts_cmdlist);
3165
b20a6524
MM
3166 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3167 _("Set record btrace pt options"),
3168 &set_record_btrace_pt_cmdlist,
3169 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3170
3171 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3172 _("Show record btrace pt options"),
3173 &show_record_btrace_pt_cmdlist,
3174 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3175
3176 add_setshow_uinteger_cmd ("buffer-size", no_class,
3177 &record_btrace_conf.pt.size,
3178 _("Set the record/replay pt buffer size."),
3179 _("Show the record/replay pt buffer size."), _("\
3180Bigger buffers allow longer recording but also take more time to process \
3181the recorded execution.\n\
3182The actual buffer size may differ from the requested size. Use \"info record\" \
3183to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3184 &set_record_btrace_pt_cmdlist,
3185 &show_record_btrace_pt_cmdlist);
3186
afedecd3
MM
3187 init_record_btrace_ops ();
3188 add_target (&record_btrace_ops);
0b722aec
MM
3189
3190 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3191 xcalloc, xfree);
d33501a5
MM
3192
3193 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3194 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3195}
This page took 0.629509 seconds and 4 git commands to generate.