infrun: step through indirect branch thunks
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
e2882c85 3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
c0272db5 24#include "record-btrace.h"
afedecd3
MM
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
76727919 29#include "observable.h"
afedecd3
MM
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
70ad5bff
MM
39#include "event-loop.h"
40#include "inf-loop.h"
e3cfc1c7 41#include "vec.h"
325fac50 42#include <algorithm>
afedecd3
MM
43
44/* The target_ops of record-btrace. */
45static struct target_ops record_btrace_ops;
46
76727919
TT
47/* Token associated with a new-thread observer enabling branch tracing
48 for the new thread. */
49static const gdb::observers::token record_btrace_thread_observer_token;
afedecd3 50
67b5c0c1
MM
51/* Memory access types used in set/show record btrace replay-memory-access. */
52static const char replay_memory_access_read_only[] = "read-only";
53static const char replay_memory_access_read_write[] = "read-write";
54static const char *const replay_memory_access_types[] =
55{
56 replay_memory_access_read_only,
57 replay_memory_access_read_write,
58 NULL
59};
60
61/* The currently allowed replay memory access type. */
62static const char *replay_memory_access = replay_memory_access_read_only;
63
64/* Command lists for "set/show record btrace". */
65static struct cmd_list_element *set_record_btrace_cmdlist;
66static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 67
70ad5bff
MM
68/* The execution direction of the last resume we got. See record-full.c. */
69static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
70
71/* The async event handler for reverse/replay execution. */
72static struct async_event_handler *record_btrace_async_inferior_event_handler;
73
aef92902
MM
74/* A flag indicating that we are currently generating a core file. */
75static int record_btrace_generating_corefile;
76
f4abbc16
MM
77/* The current branch trace configuration. */
78static struct btrace_config record_btrace_conf;
79
80/* Command list for "record btrace". */
81static struct cmd_list_element *record_btrace_cmdlist;
82
d33501a5
MM
83/* Command lists for "set/show record btrace bts". */
84static struct cmd_list_element *set_record_btrace_bts_cmdlist;
85static struct cmd_list_element *show_record_btrace_bts_cmdlist;
86
b20a6524
MM
87/* Command lists for "set/show record btrace pt". */
88static struct cmd_list_element *set_record_btrace_pt_cmdlist;
89static struct cmd_list_element *show_record_btrace_pt_cmdlist;
90
afedecd3
MM
91/* Print a record-btrace debug message. Use do ... while (0) to avoid
92 ambiguities when used in if statements. */
93
94#define DEBUG(msg, args...) \
95 do \
96 { \
97 if (record_debug != 0) \
98 fprintf_unfiltered (gdb_stdlog, \
99 "[record-btrace] " msg "\n", ##args); \
100 } \
101 while (0)
102
103
104/* Update the branch trace for the current thread and return a pointer to its
066ce621 105 thread_info.
afedecd3
MM
106
107 Throws an error if there is no thread or no trace. This function never
108 returns NULL. */
109
066ce621
MM
110static struct thread_info *
111require_btrace_thread (void)
afedecd3
MM
112{
113 struct thread_info *tp;
afedecd3
MM
114
115 DEBUG ("require");
116
117 tp = find_thread_ptid (inferior_ptid);
118 if (tp == NULL)
119 error (_("No thread."));
120
cd4007e4
MM
121 validate_registers_access ();
122
afedecd3
MM
123 btrace_fetch (tp);
124
6e07b1d2 125 if (btrace_is_empty (tp))
afedecd3
MM
126 error (_("No trace."));
127
066ce621
MM
128 return tp;
129}
130
131/* Update the branch trace for the current thread and return a pointer to its
132 branch trace information struct.
133
134 Throws an error if there is no thread or no trace. This function never
135 returns NULL. */
136
137static struct btrace_thread_info *
138require_btrace (void)
139{
140 struct thread_info *tp;
141
142 tp = require_btrace_thread ();
143
144 return &tp->btrace;
afedecd3
MM
145}
146
147/* Enable branch tracing for one thread. Warn on errors. */
148
149static void
150record_btrace_enable_warn (struct thread_info *tp)
151{
492d29ea
PA
152 TRY
153 {
154 btrace_enable (tp, &record_btrace_conf);
155 }
156 CATCH (error, RETURN_MASK_ERROR)
157 {
158 warning ("%s", error.message);
159 }
160 END_CATCH
afedecd3
MM
161}
162
afedecd3
MM
163/* Enable automatic tracing of new threads. */
164
165static void
166record_btrace_auto_enable (void)
167{
168 DEBUG ("attach thread observer");
169
76727919
TT
170 gdb::observers::new_thread.attach (record_btrace_enable_warn,
171 record_btrace_thread_observer_token);
afedecd3
MM
172}
173
174/* Disable automatic tracing of new threads. */
175
176static void
177record_btrace_auto_disable (void)
178{
afedecd3
MM
179 DEBUG ("detach thread observer");
180
76727919 181 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
afedecd3
MM
182}
183
70ad5bff
MM
184/* The record-btrace async event handler function. */
185
186static void
187record_btrace_handle_async_inferior_event (gdb_client_data data)
188{
189 inferior_event_handler (INF_REG_EVENT, NULL);
190}
191
c0272db5
TW
192/* See record-btrace.h. */
193
194void
195record_btrace_push_target (void)
196{
197 const char *format;
198
199 record_btrace_auto_enable ();
200
201 push_target (&record_btrace_ops);
202
203 record_btrace_async_inferior_event_handler
204 = create_async_event_handler (record_btrace_handle_async_inferior_event,
205 NULL);
206 record_btrace_generating_corefile = 0;
207
208 format = btrace_format_short_string (record_btrace_conf.format);
76727919 209 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
c0272db5
TW
210}
211
228f1508
SM
212/* Disable btrace on a set of threads on scope exit. */
213
214struct scoped_btrace_disable
215{
216 scoped_btrace_disable () = default;
217
218 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
219
220 ~scoped_btrace_disable ()
221 {
222 for (thread_info *tp : m_threads)
223 btrace_disable (tp);
224 }
225
226 void add_thread (thread_info *thread)
227 {
228 m_threads.push_front (thread);
229 }
230
231 void discard ()
232 {
233 m_threads.clear ();
234 }
235
236private:
237 std::forward_list<thread_info *> m_threads;
238};
239
afedecd3
MM
240/* The to_open method of target record-btrace. */
241
242static void
014f9477 243record_btrace_open (const char *args, int from_tty)
afedecd3 244{
228f1508
SM
245 /* If we fail to enable btrace for one thread, disable it for the threads for
246 which it was successfully enabled. */
247 scoped_btrace_disable btrace_disable;
afedecd3
MM
248 struct thread_info *tp;
249
250 DEBUG ("open");
251
8213266a 252 record_preopen ();
afedecd3
MM
253
254 if (!target_has_execution)
255 error (_("The program is not being run."));
256
034f788c 257 ALL_NON_EXITED_THREADS (tp)
5d5658a1 258 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
afedecd3 259 {
f4abbc16 260 btrace_enable (tp, &record_btrace_conf);
afedecd3 261
228f1508 262 btrace_disable.add_thread (tp);
afedecd3
MM
263 }
264
c0272db5 265 record_btrace_push_target ();
afedecd3 266
228f1508 267 btrace_disable.discard ();
afedecd3
MM
268}
269
270/* The to_stop_recording method of target record-btrace. */
271
272static void
c6cd7c02 273record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
274{
275 struct thread_info *tp;
276
277 DEBUG ("stop recording");
278
279 record_btrace_auto_disable ();
280
034f788c 281 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
282 if (tp->btrace.target != NULL)
283 btrace_disable (tp);
284}
285
c0272db5
TW
286/* The to_disconnect method of target record-btrace. */
287
288static void
289record_btrace_disconnect (struct target_ops *self, const char *args,
290 int from_tty)
291{
292 struct target_ops *beneath = self->beneath;
293
294 /* Do not stop recording, just clean up GDB side. */
295 unpush_target (self);
296
297 /* Forward disconnect. */
298 beneath->to_disconnect (beneath, args, from_tty);
299}
300
afedecd3
MM
301/* The to_close method of target record-btrace. */
302
303static void
de90e03d 304record_btrace_close (struct target_ops *self)
afedecd3 305{
568e808b
MM
306 struct thread_info *tp;
307
70ad5bff
MM
308 if (record_btrace_async_inferior_event_handler != NULL)
309 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
310
99c819ee
MM
311 /* Make sure automatic recording gets disabled even if we did not stop
312 recording before closing the record-btrace target. */
313 record_btrace_auto_disable ();
314
568e808b
MM
315 /* We should have already stopped recording.
316 Tear down btrace in case we have not. */
034f788c 317 ALL_NON_EXITED_THREADS (tp)
568e808b 318 btrace_teardown (tp);
afedecd3
MM
319}
320
b7d2e916
PA
321/* The to_async method of target record-btrace. */
322
323static void
6a3753b3 324record_btrace_async (struct target_ops *ops, int enable)
b7d2e916 325{
6a3753b3 326 if (enable)
b7d2e916
PA
327 mark_async_event_handler (record_btrace_async_inferior_event_handler);
328 else
329 clear_async_event_handler (record_btrace_async_inferior_event_handler);
330
6a3753b3 331 ops->beneath->to_async (ops->beneath, enable);
b7d2e916
PA
332}
333
d33501a5
MM
334/* Adjusts the size and returns a human readable size suffix. */
335
336static const char *
337record_btrace_adjust_size (unsigned int *size)
338{
339 unsigned int sz;
340
341 sz = *size;
342
343 if ((sz & ((1u << 30) - 1)) == 0)
344 {
345 *size = sz >> 30;
346 return "GB";
347 }
348 else if ((sz & ((1u << 20) - 1)) == 0)
349 {
350 *size = sz >> 20;
351 return "MB";
352 }
353 else if ((sz & ((1u << 10) - 1)) == 0)
354 {
355 *size = sz >> 10;
356 return "kB";
357 }
358 else
359 return "";
360}
361
362/* Print a BTS configuration. */
363
364static void
365record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
366{
367 const char *suffix;
368 unsigned int size;
369
370 size = conf->size;
371 if (size > 0)
372 {
373 suffix = record_btrace_adjust_size (&size);
374 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
375 }
376}
377
bc504a31 378/* Print an Intel Processor Trace configuration. */
b20a6524
MM
379
380static void
381record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
382{
383 const char *suffix;
384 unsigned int size;
385
386 size = conf->size;
387 if (size > 0)
388 {
389 suffix = record_btrace_adjust_size (&size);
390 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
391 }
392}
393
d33501a5
MM
394/* Print a branch tracing configuration. */
395
396static void
397record_btrace_print_conf (const struct btrace_config *conf)
398{
399 printf_unfiltered (_("Recording format: %s.\n"),
400 btrace_format_string (conf->format));
401
402 switch (conf->format)
403 {
404 case BTRACE_FORMAT_NONE:
405 return;
406
407 case BTRACE_FORMAT_BTS:
408 record_btrace_print_bts_conf (&conf->bts);
409 return;
b20a6524
MM
410
411 case BTRACE_FORMAT_PT:
412 record_btrace_print_pt_conf (&conf->pt);
413 return;
d33501a5
MM
414 }
415
416 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
417}
418
afedecd3
MM
419/* The to_info_record method of target record-btrace. */
420
421static void
630d6a4a 422record_btrace_info (struct target_ops *self)
afedecd3
MM
423{
424 struct btrace_thread_info *btinfo;
f4abbc16 425 const struct btrace_config *conf;
afedecd3 426 struct thread_info *tp;
31fd9caa 427 unsigned int insns, calls, gaps;
afedecd3
MM
428
429 DEBUG ("info");
430
431 tp = find_thread_ptid (inferior_ptid);
432 if (tp == NULL)
433 error (_("No thread."));
434
cd4007e4
MM
435 validate_registers_access ();
436
f4abbc16
MM
437 btinfo = &tp->btrace;
438
439 conf = btrace_conf (btinfo);
440 if (conf != NULL)
d33501a5 441 record_btrace_print_conf (conf);
f4abbc16 442
afedecd3
MM
443 btrace_fetch (tp);
444
23a7fe75
MM
445 insns = 0;
446 calls = 0;
31fd9caa 447 gaps = 0;
23a7fe75 448
6e07b1d2 449 if (!btrace_is_empty (tp))
23a7fe75
MM
450 {
451 struct btrace_call_iterator call;
452 struct btrace_insn_iterator insn;
453
454 btrace_call_end (&call, btinfo);
455 btrace_call_prev (&call, 1);
5de9129b 456 calls = btrace_call_number (&call);
23a7fe75
MM
457
458 btrace_insn_end (&insn, btinfo);
5de9129b 459 insns = btrace_insn_number (&insn);
31fd9caa 460
69090cee
TW
461 /* If the last instruction is not a gap, it is the current instruction
462 that is not actually part of the record. */
463 if (btrace_insn_get (&insn) != NULL)
464 insns -= 1;
31fd9caa
MM
465
466 gaps = btinfo->ngaps;
23a7fe75 467 }
afedecd3 468
31fd9caa 469 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
43792cf0
PA
470 "for thread %s (%s).\n"), insns, calls, gaps,
471 print_thread_id (tp), target_pid_to_str (tp->ptid));
07bbe694
MM
472
473 if (btrace_is_replaying (tp))
474 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
475 btrace_insn_number (btinfo->replay));
afedecd3
MM
476}
477
31fd9caa
MM
478/* Print a decode error. */
479
480static void
481btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
482 enum btrace_format format)
483{
508352a9 484 const char *errstr = btrace_decode_error (format, errcode);
31fd9caa 485
112e8700 486 uiout->text (_("["));
508352a9
TW
487 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
488 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
31fd9caa 489 {
112e8700
SM
490 uiout->text (_("decode error ("));
491 uiout->field_int ("errcode", errcode);
492 uiout->text (_("): "));
31fd9caa 493 }
112e8700
SM
494 uiout->text (errstr);
495 uiout->text (_("]\n"));
31fd9caa
MM
496}
497
afedecd3
MM
498/* Print an unsigned int. */
499
500static void
501ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
502{
112e8700 503 uiout->field_fmt (fld, "%u", val);
afedecd3
MM
504}
505
f94cc897
MM
506/* A range of source lines. */
507
508struct btrace_line_range
509{
510 /* The symtab this line is from. */
511 struct symtab *symtab;
512
513 /* The first line (inclusive). */
514 int begin;
515
516 /* The last line (exclusive). */
517 int end;
518};
519
520/* Construct a line range. */
521
522static struct btrace_line_range
523btrace_mk_line_range (struct symtab *symtab, int begin, int end)
524{
525 struct btrace_line_range range;
526
527 range.symtab = symtab;
528 range.begin = begin;
529 range.end = end;
530
531 return range;
532}
533
534/* Add a line to a line range. */
535
536static struct btrace_line_range
537btrace_line_range_add (struct btrace_line_range range, int line)
538{
539 if (range.end <= range.begin)
540 {
541 /* This is the first entry. */
542 range.begin = line;
543 range.end = line + 1;
544 }
545 else if (line < range.begin)
546 range.begin = line;
547 else if (range.end < line)
548 range.end = line;
549
550 return range;
551}
552
553/* Return non-zero if RANGE is empty, zero otherwise. */
554
555static int
556btrace_line_range_is_empty (struct btrace_line_range range)
557{
558 return range.end <= range.begin;
559}
560
561/* Return non-zero if LHS contains RHS, zero otherwise. */
562
563static int
564btrace_line_range_contains_range (struct btrace_line_range lhs,
565 struct btrace_line_range rhs)
566{
567 return ((lhs.symtab == rhs.symtab)
568 && (lhs.begin <= rhs.begin)
569 && (rhs.end <= lhs.end));
570}
571
572/* Find the line range associated with PC. */
573
574static struct btrace_line_range
575btrace_find_line_range (CORE_ADDR pc)
576{
577 struct btrace_line_range range;
578 struct linetable_entry *lines;
579 struct linetable *ltable;
580 struct symtab *symtab;
581 int nlines, i;
582
583 symtab = find_pc_line_symtab (pc);
584 if (symtab == NULL)
585 return btrace_mk_line_range (NULL, 0, 0);
586
587 ltable = SYMTAB_LINETABLE (symtab);
588 if (ltable == NULL)
589 return btrace_mk_line_range (symtab, 0, 0);
590
591 nlines = ltable->nitems;
592 lines = ltable->item;
593 if (nlines <= 0)
594 return btrace_mk_line_range (symtab, 0, 0);
595
596 range = btrace_mk_line_range (symtab, 0, 0);
597 for (i = 0; i < nlines - 1; i++)
598 {
599 if ((lines[i].pc == pc) && (lines[i].line != 0))
600 range = btrace_line_range_add (range, lines[i].line);
601 }
602
603 return range;
604}
605
606/* Print source lines in LINES to UIOUT.
607
608 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
609 instructions corresponding to that source line. When printing a new source
610 line, we do the cleanups for the open chain and open a new cleanup chain for
611 the new source line. If the source line range in LINES is not empty, this
612 function will leave the cleanup chain for the last printed source line open
613 so instructions can be added to it. */
614
615static void
616btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
7ea78b59
SM
617 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
618 gdb::optional<ui_out_emit_list> *asm_list,
619 gdb_disassembly_flags flags)
f94cc897 620{
8d297bbf 621 print_source_lines_flags psl_flags;
f94cc897 622
f94cc897
MM
623 if (flags & DISASSEMBLY_FILENAME)
624 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
625
7ea78b59 626 for (int line = lines.begin; line < lines.end; ++line)
f94cc897 627 {
7ea78b59 628 asm_list->reset ();
f94cc897 629
7ea78b59 630 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
f94cc897
MM
631
632 print_source_lines (lines.symtab, line, line + 1, psl_flags);
633
7ea78b59 634 asm_list->emplace (uiout, "line_asm_insn");
f94cc897
MM
635 }
636}
637
afedecd3
MM
638/* Disassemble a section of the recorded instruction trace. */
639
640static void
23a7fe75 641btrace_insn_history (struct ui_out *uiout,
31fd9caa 642 const struct btrace_thread_info *btinfo,
23a7fe75 643 const struct btrace_insn_iterator *begin,
9a24775b
PA
644 const struct btrace_insn_iterator *end,
645 gdb_disassembly_flags flags)
afedecd3 646{
9a24775b
PA
647 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
648 btrace_insn_number (begin), btrace_insn_number (end));
afedecd3 649
f94cc897
MM
650 flags |= DISASSEMBLY_SPECULATIVE;
651
7ea78b59
SM
652 struct gdbarch *gdbarch = target_gdbarch ();
653 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
f94cc897 654
7ea78b59 655 ui_out_emit_list list_emitter (uiout, "asm_insns");
f94cc897 656
7ea78b59
SM
657 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
658 gdb::optional<ui_out_emit_list> asm_list;
afedecd3 659
8b172ce7
PA
660 gdb_pretty_print_disassembler disasm (gdbarch);
661
7ea78b59
SM
662 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
663 btrace_insn_next (&it, 1))
afedecd3 664 {
23a7fe75
MM
665 const struct btrace_insn *insn;
666
667 insn = btrace_insn_get (&it);
668
31fd9caa
MM
669 /* A NULL instruction indicates a gap in the trace. */
670 if (insn == NULL)
671 {
672 const struct btrace_config *conf;
673
674 conf = btrace_conf (btinfo);
afedecd3 675
31fd9caa
MM
676 /* We have trace so we must have a configuration. */
677 gdb_assert (conf != NULL);
678
69090cee
TW
679 uiout->field_fmt ("insn-number", "%u",
680 btrace_insn_number (&it));
681 uiout->text ("\t");
682
683 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
31fd9caa
MM
684 conf->format);
685 }
686 else
687 {
f94cc897 688 struct disasm_insn dinsn;
da8c46d2 689
f94cc897 690 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 691 {
f94cc897
MM
692 struct btrace_line_range lines;
693
694 lines = btrace_find_line_range (insn->pc);
695 if (!btrace_line_range_is_empty (lines)
696 && !btrace_line_range_contains_range (last_lines, lines))
697 {
7ea78b59
SM
698 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
699 flags);
f94cc897
MM
700 last_lines = lines;
701 }
7ea78b59 702 else if (!src_and_asm_tuple.has_value ())
f94cc897 703 {
7ea78b59
SM
704 gdb_assert (!asm_list.has_value ());
705
706 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
707
f94cc897 708 /* No source information. */
7ea78b59 709 asm_list.emplace (uiout, "line_asm_insn");
f94cc897
MM
710 }
711
7ea78b59
SM
712 gdb_assert (src_and_asm_tuple.has_value ());
713 gdb_assert (asm_list.has_value ());
da8c46d2 714 }
da8c46d2 715
f94cc897
MM
716 memset (&dinsn, 0, sizeof (dinsn));
717 dinsn.number = btrace_insn_number (&it);
718 dinsn.addr = insn->pc;
31fd9caa 719
da8c46d2 720 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 721 dinsn.is_speculative = 1;
da8c46d2 722
8b172ce7 723 disasm.pretty_print_insn (uiout, &dinsn, flags);
31fd9caa 724 }
afedecd3
MM
725 }
726}
727
728/* The to_insn_history method of target record-btrace. */
729
730static void
9a24775b
PA
731record_btrace_insn_history (struct target_ops *self, int size,
732 gdb_disassembly_flags flags)
afedecd3
MM
733{
734 struct btrace_thread_info *btinfo;
23a7fe75
MM
735 struct btrace_insn_history *history;
736 struct btrace_insn_iterator begin, end;
afedecd3 737 struct ui_out *uiout;
23a7fe75 738 unsigned int context, covered;
afedecd3
MM
739
740 uiout = current_uiout;
2e783024 741 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 742 context = abs (size);
afedecd3
MM
743 if (context == 0)
744 error (_("Bad record instruction-history-size."));
745
23a7fe75
MM
746 btinfo = require_btrace ();
747 history = btinfo->insn_history;
748 if (history == NULL)
afedecd3 749 {
07bbe694 750 struct btrace_insn_iterator *replay;
afedecd3 751
9a24775b 752 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
afedecd3 753
07bbe694
MM
754 /* If we're replaying, we start at the replay position. Otherwise, we
755 start at the tail of the trace. */
756 replay = btinfo->replay;
757 if (replay != NULL)
758 begin = *replay;
759 else
760 btrace_insn_end (&begin, btinfo);
761
762 /* We start from here and expand in the requested direction. Then we
763 expand in the other direction, as well, to fill up any remaining
764 context. */
765 end = begin;
766 if (size < 0)
767 {
768 /* We want the current position covered, as well. */
769 covered = btrace_insn_next (&end, 1);
770 covered += btrace_insn_prev (&begin, context - covered);
771 covered += btrace_insn_next (&end, context - covered);
772 }
773 else
774 {
775 covered = btrace_insn_next (&end, context);
776 covered += btrace_insn_prev (&begin, context - covered);
777 }
afedecd3
MM
778 }
779 else
780 {
23a7fe75
MM
781 begin = history->begin;
782 end = history->end;
afedecd3 783
9a24775b 784 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
23a7fe75 785 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 786
23a7fe75
MM
787 if (size < 0)
788 {
789 end = begin;
790 covered = btrace_insn_prev (&begin, context);
791 }
792 else
793 {
794 begin = end;
795 covered = btrace_insn_next (&end, context);
796 }
afedecd3
MM
797 }
798
23a7fe75 799 if (covered > 0)
31fd9caa 800 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
801 else
802 {
803 if (size < 0)
804 printf_unfiltered (_("At the start of the branch trace record.\n"));
805 else
806 printf_unfiltered (_("At the end of the branch trace record.\n"));
807 }
afedecd3 808
23a7fe75 809 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
810}
811
812/* The to_insn_history_range method of target record-btrace. */
813
814static void
4e99c6b7 815record_btrace_insn_history_range (struct target_ops *self,
9a24775b
PA
816 ULONGEST from, ULONGEST to,
817 gdb_disassembly_flags flags)
afedecd3
MM
818{
819 struct btrace_thread_info *btinfo;
23a7fe75 820 struct btrace_insn_iterator begin, end;
afedecd3 821 struct ui_out *uiout;
23a7fe75
MM
822 unsigned int low, high;
823 int found;
afedecd3
MM
824
825 uiout = current_uiout;
2e783024 826 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
23a7fe75
MM
827 low = from;
828 high = to;
afedecd3 829
9a24775b 830 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
afedecd3
MM
831
832 /* Check for wrap-arounds. */
23a7fe75 833 if (low != from || high != to)
afedecd3
MM
834 error (_("Bad range."));
835
0688d04e 836 if (high < low)
afedecd3
MM
837 error (_("Bad range."));
838
23a7fe75 839 btinfo = require_btrace ();
afedecd3 840
23a7fe75
MM
841 found = btrace_find_insn_by_number (&begin, btinfo, low);
842 if (found == 0)
843 error (_("Range out of bounds."));
afedecd3 844
23a7fe75
MM
845 found = btrace_find_insn_by_number (&end, btinfo, high);
846 if (found == 0)
0688d04e
MM
847 {
848 /* Silently truncate the range. */
849 btrace_insn_end (&end, btinfo);
850 }
851 else
852 {
853 /* We want both begin and end to be inclusive. */
854 btrace_insn_next (&end, 1);
855 }
afedecd3 856
31fd9caa 857 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 858 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
859}
860
861/* The to_insn_history_from method of target record-btrace. */
862
863static void
9abc3ff3 864record_btrace_insn_history_from (struct target_ops *self,
9a24775b
PA
865 ULONGEST from, int size,
866 gdb_disassembly_flags flags)
afedecd3
MM
867{
868 ULONGEST begin, end, context;
869
870 context = abs (size);
0688d04e
MM
871 if (context == 0)
872 error (_("Bad record instruction-history-size."));
afedecd3
MM
873
874 if (size < 0)
875 {
876 end = from;
877
878 if (from < context)
879 begin = 0;
880 else
0688d04e 881 begin = from - context + 1;
afedecd3
MM
882 }
883 else
884 {
885 begin = from;
0688d04e 886 end = from + context - 1;
afedecd3
MM
887
888 /* Check for wrap-around. */
889 if (end < begin)
890 end = ULONGEST_MAX;
891 }
892
4e99c6b7 893 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
894}
895
896/* Print the instruction number range for a function call history line. */
897
898static void
23a7fe75
MM
899btrace_call_history_insn_range (struct ui_out *uiout,
900 const struct btrace_function *bfun)
afedecd3 901{
7acbe133
MM
902 unsigned int begin, end, size;
903
0860c437 904 size = bfun->insn.size ();
7acbe133 905 gdb_assert (size > 0);
afedecd3 906
23a7fe75 907 begin = bfun->insn_offset;
7acbe133 908 end = begin + size - 1;
afedecd3 909
23a7fe75 910 ui_out_field_uint (uiout, "insn begin", begin);
112e8700 911 uiout->text (",");
23a7fe75 912 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
913}
914
ce0dfbea
MM
915/* Compute the lowest and highest source line for the instructions in BFUN
916 and return them in PBEGIN and PEND.
917 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
918 result from inlining or macro expansion. */
919
920static void
921btrace_compute_src_line_range (const struct btrace_function *bfun,
922 int *pbegin, int *pend)
923{
ce0dfbea
MM
924 struct symtab *symtab;
925 struct symbol *sym;
ce0dfbea
MM
926 int begin, end;
927
928 begin = INT_MAX;
929 end = INT_MIN;
930
931 sym = bfun->sym;
932 if (sym == NULL)
933 goto out;
934
935 symtab = symbol_symtab (sym);
936
0860c437 937 for (const btrace_insn &insn : bfun->insn)
ce0dfbea
MM
938 {
939 struct symtab_and_line sal;
940
0860c437 941 sal = find_pc_line (insn.pc, 0);
ce0dfbea
MM
942 if (sal.symtab != symtab || sal.line == 0)
943 continue;
944
325fac50
PA
945 begin = std::min (begin, sal.line);
946 end = std::max (end, sal.line);
ce0dfbea
MM
947 }
948
949 out:
950 *pbegin = begin;
951 *pend = end;
952}
953
afedecd3
MM
954/* Print the source line information for a function call history line. */
955
956static void
23a7fe75
MM
957btrace_call_history_src_line (struct ui_out *uiout,
958 const struct btrace_function *bfun)
afedecd3
MM
959{
960 struct symbol *sym;
23a7fe75 961 int begin, end;
afedecd3
MM
962
963 sym = bfun->sym;
964 if (sym == NULL)
965 return;
966
112e8700 967 uiout->field_string ("file",
08be3fe3 968 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 969
ce0dfbea 970 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 971 if (end < begin)
afedecd3
MM
972 return;
973
112e8700
SM
974 uiout->text (":");
975 uiout->field_int ("min line", begin);
afedecd3 976
23a7fe75 977 if (end == begin)
afedecd3
MM
978 return;
979
112e8700
SM
980 uiout->text (",");
981 uiout->field_int ("max line", end);
afedecd3
MM
982}
983
0b722aec
MM
984/* Get the name of a branch trace function. */
985
986static const char *
987btrace_get_bfun_name (const struct btrace_function *bfun)
988{
989 struct minimal_symbol *msym;
990 struct symbol *sym;
991
992 if (bfun == NULL)
993 return "??";
994
995 msym = bfun->msym;
996 sym = bfun->sym;
997
998 if (sym != NULL)
999 return SYMBOL_PRINT_NAME (sym);
1000 else if (msym != NULL)
efd66ac6 1001 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
1002 else
1003 return "??";
1004}
1005
afedecd3
MM
1006/* Disassemble a section of the recorded function trace. */
1007
1008static void
23a7fe75 1009btrace_call_history (struct ui_out *uiout,
8710b709 1010 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1011 const struct btrace_call_iterator *begin,
1012 const struct btrace_call_iterator *end,
8d297bbf 1013 int int_flags)
afedecd3 1014{
23a7fe75 1015 struct btrace_call_iterator it;
8d297bbf 1016 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1017
8d297bbf 1018 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1019 btrace_call_number (end));
afedecd3 1020
23a7fe75 1021 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1022 {
23a7fe75
MM
1023 const struct btrace_function *bfun;
1024 struct minimal_symbol *msym;
1025 struct symbol *sym;
1026
1027 bfun = btrace_call_get (&it);
23a7fe75 1028 sym = bfun->sym;
0b722aec 1029 msym = bfun->msym;
23a7fe75 1030
afedecd3 1031 /* Print the function index. */
23a7fe75 1032 ui_out_field_uint (uiout, "index", bfun->number);
112e8700 1033 uiout->text ("\t");
afedecd3 1034
31fd9caa
MM
1035 /* Indicate gaps in the trace. */
1036 if (bfun->errcode != 0)
1037 {
1038 const struct btrace_config *conf;
1039
1040 conf = btrace_conf (btinfo);
1041
1042 /* We have trace so we must have a configuration. */
1043 gdb_assert (conf != NULL);
1044
1045 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1046
1047 continue;
1048 }
1049
8710b709
MM
1050 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1051 {
1052 int level = bfun->level + btinfo->level, i;
1053
1054 for (i = 0; i < level; ++i)
112e8700 1055 uiout->text (" ");
8710b709
MM
1056 }
1057
1058 if (sym != NULL)
112e8700 1059 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
8710b709 1060 else if (msym != NULL)
112e8700
SM
1061 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1062 else if (!uiout->is_mi_like_p ())
1063 uiout->field_string ("function", "??");
8710b709 1064
1e038f67 1065 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1066 {
112e8700 1067 uiout->text (_("\tinst "));
23a7fe75 1068 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1069 }
1070
1e038f67 1071 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1072 {
112e8700 1073 uiout->text (_("\tat "));
23a7fe75 1074 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1075 }
1076
112e8700 1077 uiout->text ("\n");
afedecd3
MM
1078 }
1079}
1080
1081/* The to_call_history method of target record-btrace. */
1082
1083static void
0cb7c7b0
SM
1084record_btrace_call_history (struct target_ops *self, int size,
1085 record_print_flags flags)
afedecd3
MM
1086{
1087 struct btrace_thread_info *btinfo;
23a7fe75
MM
1088 struct btrace_call_history *history;
1089 struct btrace_call_iterator begin, end;
afedecd3 1090 struct ui_out *uiout;
23a7fe75 1091 unsigned int context, covered;
afedecd3
MM
1092
1093 uiout = current_uiout;
2e783024 1094 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 1095 context = abs (size);
afedecd3
MM
1096 if (context == 0)
1097 error (_("Bad record function-call-history-size."));
1098
23a7fe75
MM
1099 btinfo = require_btrace ();
1100 history = btinfo->call_history;
1101 if (history == NULL)
afedecd3 1102 {
07bbe694 1103 struct btrace_insn_iterator *replay;
afedecd3 1104
0cb7c7b0 1105 DEBUG ("call-history (0x%x): %d", (int) flags, size);
afedecd3 1106
07bbe694
MM
1107 /* If we're replaying, we start at the replay position. Otherwise, we
1108 start at the tail of the trace. */
1109 replay = btinfo->replay;
1110 if (replay != NULL)
1111 {
07bbe694 1112 begin.btinfo = btinfo;
a0f1b963 1113 begin.index = replay->call_index;
07bbe694
MM
1114 }
1115 else
1116 btrace_call_end (&begin, btinfo);
1117
1118 /* We start from here and expand in the requested direction. Then we
1119 expand in the other direction, as well, to fill up any remaining
1120 context. */
1121 end = begin;
1122 if (size < 0)
1123 {
1124 /* We want the current position covered, as well. */
1125 covered = btrace_call_next (&end, 1);
1126 covered += btrace_call_prev (&begin, context - covered);
1127 covered += btrace_call_next (&end, context - covered);
1128 }
1129 else
1130 {
1131 covered = btrace_call_next (&end, context);
1132 covered += btrace_call_prev (&begin, context- covered);
1133 }
afedecd3
MM
1134 }
1135 else
1136 {
23a7fe75
MM
1137 begin = history->begin;
1138 end = history->end;
afedecd3 1139
0cb7c7b0 1140 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
23a7fe75 1141 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1142
23a7fe75
MM
1143 if (size < 0)
1144 {
1145 end = begin;
1146 covered = btrace_call_prev (&begin, context);
1147 }
1148 else
1149 {
1150 begin = end;
1151 covered = btrace_call_next (&end, context);
1152 }
afedecd3
MM
1153 }
1154
23a7fe75 1155 if (covered > 0)
8710b709 1156 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1157 else
1158 {
1159 if (size < 0)
1160 printf_unfiltered (_("At the start of the branch trace record.\n"));
1161 else
1162 printf_unfiltered (_("At the end of the branch trace record.\n"));
1163 }
afedecd3 1164
23a7fe75 1165 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1166}
1167
1168/* The to_call_history_range method of target record-btrace. */
1169
1170static void
f0d960ea 1171record_btrace_call_history_range (struct target_ops *self,
8d297bbf 1172 ULONGEST from, ULONGEST to,
0cb7c7b0 1173 record_print_flags flags)
afedecd3
MM
1174{
1175 struct btrace_thread_info *btinfo;
23a7fe75 1176 struct btrace_call_iterator begin, end;
afedecd3 1177 struct ui_out *uiout;
23a7fe75
MM
1178 unsigned int low, high;
1179 int found;
afedecd3
MM
1180
1181 uiout = current_uiout;
2e783024 1182 ui_out_emit_tuple tuple_emitter (uiout, "func history");
23a7fe75
MM
1183 low = from;
1184 high = to;
afedecd3 1185
0cb7c7b0 1186 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
afedecd3
MM
1187
1188 /* Check for wrap-arounds. */
23a7fe75 1189 if (low != from || high != to)
afedecd3
MM
1190 error (_("Bad range."));
1191
0688d04e 1192 if (high < low)
afedecd3
MM
1193 error (_("Bad range."));
1194
23a7fe75 1195 btinfo = require_btrace ();
afedecd3 1196
23a7fe75
MM
1197 found = btrace_find_call_by_number (&begin, btinfo, low);
1198 if (found == 0)
1199 error (_("Range out of bounds."));
afedecd3 1200
23a7fe75
MM
1201 found = btrace_find_call_by_number (&end, btinfo, high);
1202 if (found == 0)
0688d04e
MM
1203 {
1204 /* Silently truncate the range. */
1205 btrace_call_end (&end, btinfo);
1206 }
1207 else
1208 {
1209 /* We want both begin and end to be inclusive. */
1210 btrace_call_next (&end, 1);
1211 }
afedecd3 1212
8710b709 1213 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1214 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1215}
1216
1217/* The to_call_history_from method of target record-btrace. */
1218
1219static void
ec0aea04 1220record_btrace_call_history_from (struct target_ops *self,
8d297bbf 1221 ULONGEST from, int size,
0cb7c7b0 1222 record_print_flags flags)
afedecd3
MM
1223{
1224 ULONGEST begin, end, context;
1225
1226 context = abs (size);
0688d04e
MM
1227 if (context == 0)
1228 error (_("Bad record function-call-history-size."));
afedecd3
MM
1229
1230 if (size < 0)
1231 {
1232 end = from;
1233
1234 if (from < context)
1235 begin = 0;
1236 else
0688d04e 1237 begin = from - context + 1;
afedecd3
MM
1238 }
1239 else
1240 {
1241 begin = from;
0688d04e 1242 end = from + context - 1;
afedecd3
MM
1243
1244 /* Check for wrap-around. */
1245 if (end < begin)
1246 end = ULONGEST_MAX;
1247 }
1248
f0d960ea 1249 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
1250}
1251
b158a20f
TW
1252/* The to_record_method method of target record-btrace. */
1253
1254static enum record_method
1255record_btrace_record_method (struct target_ops *self, ptid_t ptid)
1256{
b158a20f
TW
1257 struct thread_info * const tp = find_thread_ptid (ptid);
1258
1259 if (tp == NULL)
1260 error (_("No thread."));
1261
1262 if (tp->btrace.target == NULL)
1263 return RECORD_METHOD_NONE;
1264
1265 return RECORD_METHOD_BTRACE;
1266}
1267
07bbe694
MM
1268/* The to_record_is_replaying method of target record-btrace. */
1269
1270static int
a52eab48 1271record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
07bbe694
MM
1272{
1273 struct thread_info *tp;
1274
034f788c 1275 ALL_NON_EXITED_THREADS (tp)
a52eab48 1276 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
07bbe694
MM
1277 return 1;
1278
1279 return 0;
1280}
1281
7ff27e9b
MM
1282/* The to_record_will_replay method of target record-btrace. */
1283
1284static int
1285record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1286{
1287 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1288}
1289
633785ff
MM
1290/* The to_xfer_partial method of target record-btrace. */
1291
9b409511 1292static enum target_xfer_status
633785ff
MM
1293record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1294 const char *annex, gdb_byte *readbuf,
1295 const gdb_byte *writebuf, ULONGEST offset,
9b409511 1296 ULONGEST len, ULONGEST *xfered_len)
633785ff 1297{
633785ff 1298 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1299 if (replay_memory_access == replay_memory_access_read_only
aef92902 1300 && !record_btrace_generating_corefile
4d10e986 1301 && record_btrace_is_replaying (ops, inferior_ptid))
633785ff
MM
1302 {
1303 switch (object)
1304 {
1305 case TARGET_OBJECT_MEMORY:
1306 {
1307 struct target_section *section;
1308
1309 /* We do not allow writing memory in general. */
1310 if (writebuf != NULL)
9b409511
YQ
1311 {
1312 *xfered_len = len;
bc113b4e 1313 return TARGET_XFER_UNAVAILABLE;
9b409511 1314 }
633785ff
MM
1315
1316 /* We allow reading readonly memory. */
1317 section = target_section_by_addr (ops, offset);
1318 if (section != NULL)
1319 {
1320 /* Check if the section we found is readonly. */
1321 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1322 section->the_bfd_section)
1323 & SEC_READONLY) != 0)
1324 {
1325 /* Truncate the request to fit into this section. */
325fac50 1326 len = std::min (len, section->endaddr - offset);
633785ff
MM
1327 break;
1328 }
1329 }
1330
9b409511 1331 *xfered_len = len;
bc113b4e 1332 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1333 }
1334 }
1335 }
1336
1337 /* Forward the request. */
e75fdfca
TT
1338 ops = ops->beneath;
1339 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1340 offset, len, xfered_len);
633785ff
MM
1341}
1342
1343/* The to_insert_breakpoint method of target record-btrace. */
1344
1345static int
1346record_btrace_insert_breakpoint (struct target_ops *ops,
1347 struct gdbarch *gdbarch,
1348 struct bp_target_info *bp_tgt)
1349{
67b5c0c1
MM
1350 const char *old;
1351 int ret;
633785ff
MM
1352
1353 /* Inserting breakpoints requires accessing memory. Allow it for the
1354 duration of this function. */
67b5c0c1
MM
1355 old = replay_memory_access;
1356 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1357
1358 ret = 0;
492d29ea
PA
1359 TRY
1360 {
1361 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1362 }
492d29ea
PA
1363 CATCH (except, RETURN_MASK_ALL)
1364 {
6c63c96a 1365 replay_memory_access = old;
492d29ea
PA
1366 throw_exception (except);
1367 }
1368 END_CATCH
6c63c96a 1369 replay_memory_access = old;
633785ff
MM
1370
1371 return ret;
1372}
1373
1374/* The to_remove_breakpoint method of target record-btrace. */
1375
1376static int
1377record_btrace_remove_breakpoint (struct target_ops *ops,
1378 struct gdbarch *gdbarch,
73971819
PA
1379 struct bp_target_info *bp_tgt,
1380 enum remove_bp_reason reason)
633785ff 1381{
67b5c0c1
MM
1382 const char *old;
1383 int ret;
633785ff
MM
1384
1385 /* Removing breakpoints requires accessing memory. Allow it for the
1386 duration of this function. */
67b5c0c1
MM
1387 old = replay_memory_access;
1388 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1389
1390 ret = 0;
492d29ea
PA
1391 TRY
1392 {
73971819
PA
1393 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1394 reason);
492d29ea 1395 }
492d29ea
PA
1396 CATCH (except, RETURN_MASK_ALL)
1397 {
6c63c96a 1398 replay_memory_access = old;
492d29ea
PA
1399 throw_exception (except);
1400 }
1401 END_CATCH
6c63c96a 1402 replay_memory_access = old;
633785ff
MM
1403
1404 return ret;
1405}
1406
1f3ef581
MM
1407/* The to_fetch_registers method of target record-btrace. */
1408
1409static void
1410record_btrace_fetch_registers (struct target_ops *ops,
1411 struct regcache *regcache, int regno)
1412{
1413 struct btrace_insn_iterator *replay;
1414 struct thread_info *tp;
1415
bcc0c096 1416 tp = find_thread_ptid (regcache_get_ptid (regcache));
1f3ef581
MM
1417 gdb_assert (tp != NULL);
1418
1419 replay = tp->btrace.replay;
aef92902 1420 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1421 {
1422 const struct btrace_insn *insn;
1423 struct gdbarch *gdbarch;
1424 int pcreg;
1425
ac7936df 1426 gdbarch = regcache->arch ();
1f3ef581
MM
1427 pcreg = gdbarch_pc_regnum (gdbarch);
1428 if (pcreg < 0)
1429 return;
1430
1431 /* We can only provide the PC register. */
1432 if (regno >= 0 && regno != pcreg)
1433 return;
1434
1435 insn = btrace_insn_get (replay);
1436 gdb_assert (insn != NULL);
1437
1438 regcache_raw_supply (regcache, regno, &insn->pc);
1439 }
1440 else
1441 {
e75fdfca 1442 struct target_ops *t = ops->beneath;
1f3ef581 1443
e75fdfca 1444 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
1445 }
1446}
1447
1448/* The to_store_registers method of target record-btrace. */
1449
1450static void
1451record_btrace_store_registers (struct target_ops *ops,
1452 struct regcache *regcache, int regno)
1453{
1454 struct target_ops *t;
1455
a52eab48 1456 if (!record_btrace_generating_corefile
bcc0c096 1457 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
4d10e986 1458 error (_("Cannot write registers while replaying."));
1f3ef581
MM
1459
1460 gdb_assert (may_write_registers != 0);
1461
e75fdfca
TT
1462 t = ops->beneath;
1463 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1464}
1465
1466/* The to_prepare_to_store method of target record-btrace. */
1467
1468static void
1469record_btrace_prepare_to_store (struct target_ops *ops,
1470 struct regcache *regcache)
1471{
1472 struct target_ops *t;
1473
a52eab48 1474 if (!record_btrace_generating_corefile
bcc0c096 1475 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1f3ef581
MM
1476 return;
1477
e75fdfca
TT
1478 t = ops->beneath;
1479 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1480}
1481
0b722aec
MM
1482/* The branch trace frame cache. */
1483
1484struct btrace_frame_cache
1485{
1486 /* The thread. */
1487 struct thread_info *tp;
1488
1489 /* The frame info. */
1490 struct frame_info *frame;
1491
1492 /* The branch trace function segment. */
1493 const struct btrace_function *bfun;
1494};
1495
1496/* A struct btrace_frame_cache hash table indexed by NEXT. */
1497
1498static htab_t bfcache;
1499
1500/* hash_f for htab_create_alloc of bfcache. */
1501
1502static hashval_t
1503bfcache_hash (const void *arg)
1504{
19ba03f4
SM
1505 const struct btrace_frame_cache *cache
1506 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1507
1508 return htab_hash_pointer (cache->frame);
1509}
1510
1511/* eq_f for htab_create_alloc of bfcache. */
1512
1513static int
1514bfcache_eq (const void *arg1, const void *arg2)
1515{
19ba03f4
SM
1516 const struct btrace_frame_cache *cache1
1517 = (const struct btrace_frame_cache *) arg1;
1518 const struct btrace_frame_cache *cache2
1519 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1520
1521 return cache1->frame == cache2->frame;
1522}
1523
1524/* Create a new btrace frame cache. */
1525
1526static struct btrace_frame_cache *
1527bfcache_new (struct frame_info *frame)
1528{
1529 struct btrace_frame_cache *cache;
1530 void **slot;
1531
1532 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1533 cache->frame = frame;
1534
1535 slot = htab_find_slot (bfcache, cache, INSERT);
1536 gdb_assert (*slot == NULL);
1537 *slot = cache;
1538
1539 return cache;
1540}
1541
1542/* Extract the branch trace function from a branch trace frame. */
1543
1544static const struct btrace_function *
1545btrace_get_frame_function (struct frame_info *frame)
1546{
1547 const struct btrace_frame_cache *cache;
0b722aec
MM
1548 struct btrace_frame_cache pattern;
1549 void **slot;
1550
1551 pattern.frame = frame;
1552
1553 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1554 if (slot == NULL)
1555 return NULL;
1556
19ba03f4 1557 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1558 return cache->bfun;
1559}
1560
cecac1ab
MM
1561/* Implement stop_reason method for record_btrace_frame_unwind. */
1562
1563static enum unwind_stop_reason
1564record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1565 void **this_cache)
1566{
0b722aec
MM
1567 const struct btrace_frame_cache *cache;
1568 const struct btrace_function *bfun;
1569
19ba03f4 1570 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1571 bfun = cache->bfun;
1572 gdb_assert (bfun != NULL);
1573
42bfe59e 1574 if (bfun->up == 0)
0b722aec
MM
1575 return UNWIND_UNAVAILABLE;
1576
1577 return UNWIND_NO_REASON;
cecac1ab
MM
1578}
1579
1580/* Implement this_id method for record_btrace_frame_unwind. */
1581
1582static void
1583record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1584 struct frame_id *this_id)
1585{
0b722aec
MM
1586 const struct btrace_frame_cache *cache;
1587 const struct btrace_function *bfun;
4aeb0dfc 1588 struct btrace_call_iterator it;
0b722aec
MM
1589 CORE_ADDR code, special;
1590
19ba03f4 1591 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1592
1593 bfun = cache->bfun;
1594 gdb_assert (bfun != NULL);
1595
4aeb0dfc
TW
1596 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1597 bfun = btrace_call_get (&it);
0b722aec
MM
1598
1599 code = get_frame_func (this_frame);
1600 special = bfun->number;
1601
1602 *this_id = frame_id_build_unavailable_stack_special (code, special);
1603
1604 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1605 btrace_get_bfun_name (cache->bfun),
1606 core_addr_to_string_nz (this_id->code_addr),
1607 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1608}
1609
1610/* Implement prev_register method for record_btrace_frame_unwind. */
1611
1612static struct value *
1613record_btrace_frame_prev_register (struct frame_info *this_frame,
1614 void **this_cache,
1615 int regnum)
1616{
0b722aec
MM
1617 const struct btrace_frame_cache *cache;
1618 const struct btrace_function *bfun, *caller;
42bfe59e 1619 struct btrace_call_iterator it;
0b722aec
MM
1620 struct gdbarch *gdbarch;
1621 CORE_ADDR pc;
1622 int pcreg;
1623
1624 gdbarch = get_frame_arch (this_frame);
1625 pcreg = gdbarch_pc_regnum (gdbarch);
1626 if (pcreg < 0 || regnum != pcreg)
1627 throw_error (NOT_AVAILABLE_ERROR,
1628 _("Registers are not available in btrace record history"));
1629
19ba03f4 1630 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1631 bfun = cache->bfun;
1632 gdb_assert (bfun != NULL);
1633
42bfe59e 1634 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
0b722aec
MM
1635 throw_error (NOT_AVAILABLE_ERROR,
1636 _("No caller in btrace record history"));
1637
42bfe59e
TW
1638 caller = btrace_call_get (&it);
1639
0b722aec 1640 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
0860c437 1641 pc = caller->insn.front ().pc;
0b722aec
MM
1642 else
1643 {
0860c437 1644 pc = caller->insn.back ().pc;
0b722aec
MM
1645 pc += gdb_insn_length (gdbarch, pc);
1646 }
1647
1648 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1649 btrace_get_bfun_name (bfun), bfun->level,
1650 core_addr_to_string_nz (pc));
1651
1652 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1653}
1654
1655/* Implement sniffer method for record_btrace_frame_unwind. */
1656
1657static int
1658record_btrace_frame_sniffer (const struct frame_unwind *self,
1659 struct frame_info *this_frame,
1660 void **this_cache)
1661{
0b722aec
MM
1662 const struct btrace_function *bfun;
1663 struct btrace_frame_cache *cache;
cecac1ab 1664 struct thread_info *tp;
0b722aec 1665 struct frame_info *next;
cecac1ab
MM
1666
1667 /* THIS_FRAME does not contain a reference to its thread. */
1668 tp = find_thread_ptid (inferior_ptid);
1669 gdb_assert (tp != NULL);
1670
0b722aec
MM
1671 bfun = NULL;
1672 next = get_next_frame (this_frame);
1673 if (next == NULL)
1674 {
1675 const struct btrace_insn_iterator *replay;
1676
1677 replay = tp->btrace.replay;
1678 if (replay != NULL)
08c3f6d2 1679 bfun = &replay->btinfo->functions[replay->call_index];
0b722aec
MM
1680 }
1681 else
1682 {
1683 const struct btrace_function *callee;
42bfe59e 1684 struct btrace_call_iterator it;
0b722aec
MM
1685
1686 callee = btrace_get_frame_function (next);
42bfe59e
TW
1687 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1688 return 0;
1689
1690 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1691 return 0;
1692
1693 bfun = btrace_call_get (&it);
0b722aec
MM
1694 }
1695
1696 if (bfun == NULL)
1697 return 0;
1698
1699 DEBUG ("[frame] sniffed frame for %s on level %d",
1700 btrace_get_bfun_name (bfun), bfun->level);
1701
1702 /* This is our frame. Initialize the frame cache. */
1703 cache = bfcache_new (this_frame);
1704 cache->tp = tp;
1705 cache->bfun = bfun;
1706
1707 *this_cache = cache;
1708 return 1;
1709}
1710
1711/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1712
1713static int
1714record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1715 struct frame_info *this_frame,
1716 void **this_cache)
1717{
1718 const struct btrace_function *bfun, *callee;
1719 struct btrace_frame_cache *cache;
42bfe59e 1720 struct btrace_call_iterator it;
0b722aec 1721 struct frame_info *next;
42bfe59e 1722 struct thread_info *tinfo;
0b722aec
MM
1723
1724 next = get_next_frame (this_frame);
1725 if (next == NULL)
1726 return 0;
1727
1728 callee = btrace_get_frame_function (next);
1729 if (callee == NULL)
1730 return 0;
1731
1732 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1733 return 0;
1734
42bfe59e
TW
1735 tinfo = find_thread_ptid (inferior_ptid);
1736 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
0b722aec
MM
1737 return 0;
1738
42bfe59e
TW
1739 bfun = btrace_call_get (&it);
1740
0b722aec
MM
1741 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1742 btrace_get_bfun_name (bfun), bfun->level);
1743
1744 /* This is our frame. Initialize the frame cache. */
1745 cache = bfcache_new (this_frame);
42bfe59e 1746 cache->tp = tinfo;
0b722aec
MM
1747 cache->bfun = bfun;
1748
1749 *this_cache = cache;
1750 return 1;
1751}
1752
1753static void
1754record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1755{
1756 struct btrace_frame_cache *cache;
1757 void **slot;
1758
19ba03f4 1759 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1760
1761 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1762 gdb_assert (slot != NULL);
1763
1764 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1765}
1766
1767/* btrace recording does not store previous memory content, neither the stack
1768 frames content. Any unwinding would return errorneous results as the stack
1769 contents no longer matches the changed PC value restored from history.
1770 Therefore this unwinder reports any possibly unwound registers as
1771 <unavailable>. */
1772
0b722aec 1773const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1774{
1775 NORMAL_FRAME,
1776 record_btrace_frame_unwind_stop_reason,
1777 record_btrace_frame_this_id,
1778 record_btrace_frame_prev_register,
1779 NULL,
0b722aec
MM
1780 record_btrace_frame_sniffer,
1781 record_btrace_frame_dealloc_cache
1782};
1783
1784const struct frame_unwind record_btrace_tailcall_frame_unwind =
1785{
1786 TAILCALL_FRAME,
1787 record_btrace_frame_unwind_stop_reason,
1788 record_btrace_frame_this_id,
1789 record_btrace_frame_prev_register,
1790 NULL,
1791 record_btrace_tailcall_frame_sniffer,
1792 record_btrace_frame_dealloc_cache
cecac1ab 1793};
b2f4cfde 1794
ac01945b
TT
1795/* Implement the to_get_unwinder method. */
1796
1797static const struct frame_unwind *
1798record_btrace_to_get_unwinder (struct target_ops *self)
1799{
1800 return &record_btrace_frame_unwind;
1801}
1802
1803/* Implement the to_get_tailcall_unwinder method. */
1804
1805static const struct frame_unwind *
1806record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1807{
1808 return &record_btrace_tailcall_frame_unwind;
1809}
1810
987e68b1
MM
1811/* Return a human-readable string for FLAG. */
1812
1813static const char *
1814btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1815{
1816 switch (flag)
1817 {
1818 case BTHR_STEP:
1819 return "step";
1820
1821 case BTHR_RSTEP:
1822 return "reverse-step";
1823
1824 case BTHR_CONT:
1825 return "cont";
1826
1827 case BTHR_RCONT:
1828 return "reverse-cont";
1829
1830 case BTHR_STOP:
1831 return "stop";
1832 }
1833
1834 return "<invalid>";
1835}
1836
52834460
MM
1837/* Indicate that TP should be resumed according to FLAG. */
1838
1839static void
1840record_btrace_resume_thread (struct thread_info *tp,
1841 enum btrace_thread_flag flag)
1842{
1843 struct btrace_thread_info *btinfo;
1844
43792cf0 1845 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1 1846 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
52834460
MM
1847
1848 btinfo = &tp->btrace;
1849
52834460
MM
1850 /* Fetch the latest branch trace. */
1851 btrace_fetch (tp);
1852
0ca912df
MM
1853 /* A resume request overwrites a preceding resume or stop request. */
1854 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1855 btinfo->flags |= flag;
1856}
1857
ec71cc2f
MM
1858/* Get the current frame for TP. */
1859
1860static struct frame_info *
1861get_thread_current_frame (struct thread_info *tp)
1862{
1863 struct frame_info *frame;
1864 ptid_t old_inferior_ptid;
1865 int executing;
1866
1867 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1868 old_inferior_ptid = inferior_ptid;
1869 inferior_ptid = tp->ptid;
1870
1871 /* Clear the executing flag to allow changes to the current frame.
1872 We are not actually running, yet. We just started a reverse execution
1873 command or a record goto command.
1874 For the latter, EXECUTING is false and this has no effect.
1875 For the former, EXECUTING is true and we're in to_wait, about to
1876 move the thread. Since we need to recompute the stack, we temporarily
1877 set EXECUTING to flase. */
1878 executing = is_executing (inferior_ptid);
1879 set_executing (inferior_ptid, 0);
1880
1881 frame = NULL;
1882 TRY
1883 {
1884 frame = get_current_frame ();
1885 }
1886 CATCH (except, RETURN_MASK_ALL)
1887 {
1888 /* Restore the previous execution state. */
1889 set_executing (inferior_ptid, executing);
1890
1891 /* Restore the previous inferior_ptid. */
1892 inferior_ptid = old_inferior_ptid;
1893
1894 throw_exception (except);
1895 }
1896 END_CATCH
1897
1898 /* Restore the previous execution state. */
1899 set_executing (inferior_ptid, executing);
1900
1901 /* Restore the previous inferior_ptid. */
1902 inferior_ptid = old_inferior_ptid;
1903
1904 return frame;
1905}
1906
52834460
MM
1907/* Start replaying a thread. */
1908
1909static struct btrace_insn_iterator *
1910record_btrace_start_replaying (struct thread_info *tp)
1911{
52834460
MM
1912 struct btrace_insn_iterator *replay;
1913 struct btrace_thread_info *btinfo;
52834460
MM
1914
1915 btinfo = &tp->btrace;
1916 replay = NULL;
1917
1918 /* We can't start replaying without trace. */
b54b03bd 1919 if (btinfo->functions.empty ())
52834460
MM
1920 return NULL;
1921
52834460
MM
1922 /* GDB stores the current frame_id when stepping in order to detects steps
1923 into subroutines.
1924 Since frames are computed differently when we're replaying, we need to
1925 recompute those stored frames and fix them up so we can still detect
1926 subroutines after we started replaying. */
492d29ea 1927 TRY
52834460
MM
1928 {
1929 struct frame_info *frame;
1930 struct frame_id frame_id;
1931 int upd_step_frame_id, upd_step_stack_frame_id;
1932
1933 /* The current frame without replaying - computed via normal unwind. */
ec71cc2f 1934 frame = get_thread_current_frame (tp);
52834460
MM
1935 frame_id = get_frame_id (frame);
1936
1937 /* Check if we need to update any stepping-related frame id's. */
1938 upd_step_frame_id = frame_id_eq (frame_id,
1939 tp->control.step_frame_id);
1940 upd_step_stack_frame_id = frame_id_eq (frame_id,
1941 tp->control.step_stack_frame_id);
1942
1943 /* We start replaying at the end of the branch trace. This corresponds
1944 to the current instruction. */
8d749320 1945 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
1946 btrace_insn_end (replay, btinfo);
1947
31fd9caa
MM
1948 /* Skip gaps at the end of the trace. */
1949 while (btrace_insn_get (replay) == NULL)
1950 {
1951 unsigned int steps;
1952
1953 steps = btrace_insn_prev (replay, 1);
1954 if (steps == 0)
1955 error (_("No trace."));
1956 }
1957
52834460
MM
1958 /* We're not replaying, yet. */
1959 gdb_assert (btinfo->replay == NULL);
1960 btinfo->replay = replay;
1961
1962 /* Make sure we're not using any stale registers. */
1963 registers_changed_ptid (tp->ptid);
1964
1965 /* The current frame with replaying - computed via btrace unwind. */
ec71cc2f 1966 frame = get_thread_current_frame (tp);
52834460
MM
1967 frame_id = get_frame_id (frame);
1968
1969 /* Replace stepping related frames where necessary. */
1970 if (upd_step_frame_id)
1971 tp->control.step_frame_id = frame_id;
1972 if (upd_step_stack_frame_id)
1973 tp->control.step_stack_frame_id = frame_id;
1974 }
492d29ea 1975 CATCH (except, RETURN_MASK_ALL)
52834460
MM
1976 {
1977 xfree (btinfo->replay);
1978 btinfo->replay = NULL;
1979
1980 registers_changed_ptid (tp->ptid);
1981
1982 throw_exception (except);
1983 }
492d29ea 1984 END_CATCH
52834460
MM
1985
1986 return replay;
1987}
1988
1989/* Stop replaying a thread. */
1990
1991static void
1992record_btrace_stop_replaying (struct thread_info *tp)
1993{
1994 struct btrace_thread_info *btinfo;
1995
1996 btinfo = &tp->btrace;
1997
1998 xfree (btinfo->replay);
1999 btinfo->replay = NULL;
2000
2001 /* Make sure we're not leaving any stale registers. */
2002 registers_changed_ptid (tp->ptid);
2003}
2004
e3cfc1c7
MM
2005/* Stop replaying TP if it is at the end of its execution history. */
2006
2007static void
2008record_btrace_stop_replaying_at_end (struct thread_info *tp)
2009{
2010 struct btrace_insn_iterator *replay, end;
2011 struct btrace_thread_info *btinfo;
2012
2013 btinfo = &tp->btrace;
2014 replay = btinfo->replay;
2015
2016 if (replay == NULL)
2017 return;
2018
2019 btrace_insn_end (&end, btinfo);
2020
2021 if (btrace_insn_cmp (replay, &end) == 0)
2022 record_btrace_stop_replaying (tp);
2023}
2024
b2f4cfde
MM
2025/* The to_resume method of target record-btrace. */
2026
2027static void
2028record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2029 enum gdb_signal signal)
2030{
0ca912df 2031 struct thread_info *tp;
d2939ba2 2032 enum btrace_thread_flag flag, cflag;
52834460 2033
987e68b1
MM
2034 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2035 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2036 step ? "step" : "cont");
52834460 2037
0ca912df
MM
2038 /* Store the execution direction of the last resume.
2039
2040 If there is more than one to_resume call, we have to rely on infrun
2041 to not change the execution direction in-between. */
70ad5bff
MM
2042 record_btrace_resume_exec_dir = execution_direction;
2043
0ca912df 2044 /* As long as we're not replaying, just forward the request.
52834460 2045
0ca912df
MM
2046 For non-stop targets this means that no thread is replaying. In order to
2047 make progress, we may need to explicitly move replaying threads to the end
2048 of their execution history. */
a52eab48
MM
2049 if ((execution_direction != EXEC_REVERSE)
2050 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2051 {
e75fdfca 2052 ops = ops->beneath;
04c4fe8c
MM
2053 ops->to_resume (ops, ptid, step, signal);
2054 return;
b2f4cfde
MM
2055 }
2056
52834460 2057 /* Compute the btrace thread flag for the requested move. */
d2939ba2
MM
2058 if (execution_direction == EXEC_REVERSE)
2059 {
2060 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2061 cflag = BTHR_RCONT;
2062 }
52834460 2063 else
d2939ba2
MM
2064 {
2065 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2066 cflag = BTHR_CONT;
2067 }
52834460 2068
52834460 2069 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2070 record_btrace_wait below.
2071
2072 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2073 if (!target_is_non_stop_p ())
2074 {
2075 gdb_assert (ptid_match (inferior_ptid, ptid));
2076
2077 ALL_NON_EXITED_THREADS (tp)
2078 if (ptid_match (tp->ptid, ptid))
2079 {
2080 if (ptid_match (tp->ptid, inferior_ptid))
2081 record_btrace_resume_thread (tp, flag);
2082 else
2083 record_btrace_resume_thread (tp, cflag);
2084 }
2085 }
2086 else
2087 {
2088 ALL_NON_EXITED_THREADS (tp)
2089 if (ptid_match (tp->ptid, ptid))
2090 record_btrace_resume_thread (tp, flag);
2091 }
70ad5bff
MM
2092
2093 /* Async support. */
2094 if (target_can_async_p ())
2095 {
6a3753b3 2096 target_async (1);
70ad5bff
MM
2097 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2098 }
52834460
MM
2099}
2100
85ad3aaf
PA
2101/* The to_commit_resume method of target record-btrace. */
2102
2103static void
2104record_btrace_commit_resume (struct target_ops *ops)
2105{
2106 if ((execution_direction != EXEC_REVERSE)
2107 && !record_btrace_is_replaying (ops, minus_one_ptid))
2108 ops->beneath->to_commit_resume (ops->beneath);
2109}
2110
987e68b1
MM
2111/* Cancel resuming TP. */
2112
2113static void
2114record_btrace_cancel_resume (struct thread_info *tp)
2115{
2116 enum btrace_thread_flag flags;
2117
2118 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2119 if (flags == 0)
2120 return;
2121
43792cf0
PA
2122 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2123 print_thread_id (tp),
987e68b1
MM
2124 target_pid_to_str (tp->ptid), flags,
2125 btrace_thread_flag_to_str (flags));
2126
2127 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2128 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2129}
2130
2131/* Return a target_waitstatus indicating that we ran out of history. */
2132
2133static struct target_waitstatus
2134btrace_step_no_history (void)
2135{
2136 struct target_waitstatus status;
2137
2138 status.kind = TARGET_WAITKIND_NO_HISTORY;
2139
2140 return status;
2141}
2142
2143/* Return a target_waitstatus indicating that a step finished. */
2144
2145static struct target_waitstatus
2146btrace_step_stopped (void)
2147{
2148 struct target_waitstatus status;
2149
2150 status.kind = TARGET_WAITKIND_STOPPED;
2151 status.value.sig = GDB_SIGNAL_TRAP;
2152
2153 return status;
2154}
2155
6e4879f0
MM
2156/* Return a target_waitstatus indicating that a thread was stopped as
2157 requested. */
2158
2159static struct target_waitstatus
2160btrace_step_stopped_on_request (void)
2161{
2162 struct target_waitstatus status;
2163
2164 status.kind = TARGET_WAITKIND_STOPPED;
2165 status.value.sig = GDB_SIGNAL_0;
2166
2167 return status;
2168}
2169
d825d248
MM
2170/* Return a target_waitstatus indicating a spurious stop. */
2171
2172static struct target_waitstatus
2173btrace_step_spurious (void)
2174{
2175 struct target_waitstatus status;
2176
2177 status.kind = TARGET_WAITKIND_SPURIOUS;
2178
2179 return status;
2180}
2181
e3cfc1c7
MM
2182/* Return a target_waitstatus indicating that the thread was not resumed. */
2183
2184static struct target_waitstatus
2185btrace_step_no_resumed (void)
2186{
2187 struct target_waitstatus status;
2188
2189 status.kind = TARGET_WAITKIND_NO_RESUMED;
2190
2191 return status;
2192}
2193
2194/* Return a target_waitstatus indicating that we should wait again. */
2195
2196static struct target_waitstatus
2197btrace_step_again (void)
2198{
2199 struct target_waitstatus status;
2200
2201 status.kind = TARGET_WAITKIND_IGNORE;
2202
2203 return status;
2204}
2205
52834460
MM
2206/* Clear the record histories. */
2207
2208static void
2209record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2210{
2211 xfree (btinfo->insn_history);
2212 xfree (btinfo->call_history);
2213
2214 btinfo->insn_history = NULL;
2215 btinfo->call_history = NULL;
2216}
2217
3c615f99
MM
2218/* Check whether TP's current replay position is at a breakpoint. */
2219
2220static int
2221record_btrace_replay_at_breakpoint (struct thread_info *tp)
2222{
2223 struct btrace_insn_iterator *replay;
2224 struct btrace_thread_info *btinfo;
2225 const struct btrace_insn *insn;
2226 struct inferior *inf;
2227
2228 btinfo = &tp->btrace;
2229 replay = btinfo->replay;
2230
2231 if (replay == NULL)
2232 return 0;
2233
2234 insn = btrace_insn_get (replay);
2235 if (insn == NULL)
2236 return 0;
2237
2238 inf = find_inferior_ptid (tp->ptid);
2239 if (inf == NULL)
2240 return 0;
2241
2242 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2243 &btinfo->stop_reason);
2244}
2245
d825d248 2246/* Step one instruction in forward direction. */
52834460
MM
2247
2248static struct target_waitstatus
d825d248 2249record_btrace_single_step_forward (struct thread_info *tp)
52834460 2250{
b61ce85c 2251 struct btrace_insn_iterator *replay, end, start;
52834460 2252 struct btrace_thread_info *btinfo;
52834460 2253
d825d248
MM
2254 btinfo = &tp->btrace;
2255 replay = btinfo->replay;
2256
2257 /* We're done if we're not replaying. */
2258 if (replay == NULL)
2259 return btrace_step_no_history ();
2260
011c71b6
MM
2261 /* Check if we're stepping a breakpoint. */
2262 if (record_btrace_replay_at_breakpoint (tp))
2263 return btrace_step_stopped ();
2264
b61ce85c
MM
2265 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2266 jump back to the instruction at which we started. */
2267 start = *replay;
d825d248
MM
2268 do
2269 {
2270 unsigned int steps;
2271
e3cfc1c7
MM
2272 /* We will bail out here if we continue stepping after reaching the end
2273 of the execution history. */
d825d248
MM
2274 steps = btrace_insn_next (replay, 1);
2275 if (steps == 0)
b61ce85c
MM
2276 {
2277 *replay = start;
2278 return btrace_step_no_history ();
2279 }
d825d248
MM
2280 }
2281 while (btrace_insn_get (replay) == NULL);
2282
2283 /* Determine the end of the instruction trace. */
2284 btrace_insn_end (&end, btinfo);
2285
e3cfc1c7
MM
2286 /* The execution trace contains (and ends with) the current instruction.
2287 This instruction has not been executed, yet, so the trace really ends
2288 one instruction earlier. */
d825d248 2289 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2290 return btrace_step_no_history ();
d825d248
MM
2291
2292 return btrace_step_spurious ();
2293}
2294
2295/* Step one instruction in backward direction. */
2296
2297static struct target_waitstatus
2298record_btrace_single_step_backward (struct thread_info *tp)
2299{
b61ce85c 2300 struct btrace_insn_iterator *replay, start;
d825d248 2301 struct btrace_thread_info *btinfo;
e59fa00f 2302
52834460
MM
2303 btinfo = &tp->btrace;
2304 replay = btinfo->replay;
2305
d825d248
MM
2306 /* Start replaying if we're not already doing so. */
2307 if (replay == NULL)
2308 replay = record_btrace_start_replaying (tp);
2309
2310 /* If we can't step any further, we reached the end of the history.
b61ce85c
MM
2311 Skip gaps during replay. If we end up at a gap (at the beginning of
2312 the trace), jump back to the instruction at which we started. */
2313 start = *replay;
d825d248
MM
2314 do
2315 {
2316 unsigned int steps;
2317
2318 steps = btrace_insn_prev (replay, 1);
2319 if (steps == 0)
b61ce85c
MM
2320 {
2321 *replay = start;
2322 return btrace_step_no_history ();
2323 }
d825d248
MM
2324 }
2325 while (btrace_insn_get (replay) == NULL);
2326
011c71b6
MM
2327 /* Check if we're stepping a breakpoint.
2328
2329 For reverse-stepping, this check is after the step. There is logic in
2330 infrun.c that handles reverse-stepping separately. See, for example,
2331 proceed and adjust_pc_after_break.
2332
2333 This code assumes that for reverse-stepping, PC points to the last
2334 de-executed instruction, whereas for forward-stepping PC points to the
2335 next to-be-executed instruction. */
2336 if (record_btrace_replay_at_breakpoint (tp))
2337 return btrace_step_stopped ();
2338
d825d248
MM
2339 return btrace_step_spurious ();
2340}
2341
2342/* Step a single thread. */
2343
2344static struct target_waitstatus
2345record_btrace_step_thread (struct thread_info *tp)
2346{
2347 struct btrace_thread_info *btinfo;
2348 struct target_waitstatus status;
2349 enum btrace_thread_flag flags;
2350
2351 btinfo = &tp->btrace;
2352
6e4879f0
MM
2353 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2354 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2355
43792cf0 2356 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1
MM
2357 target_pid_to_str (tp->ptid), flags,
2358 btrace_thread_flag_to_str (flags));
52834460 2359
6e4879f0
MM
2360 /* We can't step without an execution history. */
2361 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2362 return btrace_step_no_history ();
2363
52834460
MM
2364 switch (flags)
2365 {
2366 default:
2367 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2368
6e4879f0
MM
2369 case BTHR_STOP:
2370 return btrace_step_stopped_on_request ();
2371
52834460 2372 case BTHR_STEP:
d825d248
MM
2373 status = record_btrace_single_step_forward (tp);
2374 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2375 break;
52834460
MM
2376
2377 return btrace_step_stopped ();
2378
2379 case BTHR_RSTEP:
d825d248
MM
2380 status = record_btrace_single_step_backward (tp);
2381 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2382 break;
52834460
MM
2383
2384 return btrace_step_stopped ();
2385
2386 case BTHR_CONT:
e3cfc1c7
MM
2387 status = record_btrace_single_step_forward (tp);
2388 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2389 break;
52834460 2390
e3cfc1c7
MM
2391 btinfo->flags |= flags;
2392 return btrace_step_again ();
52834460
MM
2393
2394 case BTHR_RCONT:
e3cfc1c7
MM
2395 status = record_btrace_single_step_backward (tp);
2396 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2397 break;
52834460 2398
e3cfc1c7
MM
2399 btinfo->flags |= flags;
2400 return btrace_step_again ();
2401 }
d825d248 2402
e3cfc1c7
MM
2403 /* We keep threads moving at the end of their execution history. The to_wait
2404 method will stop the thread for whom the event is reported. */
2405 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2406 btinfo->flags |= flags;
52834460 2407
e3cfc1c7 2408 return status;
b2f4cfde
MM
2409}
2410
e3cfc1c7
MM
2411/* A vector of threads. */
2412
2413typedef struct thread_info * tp_t;
2414DEF_VEC_P (tp_t);
2415
a6b5be76
MM
2416/* Announce further events if necessary. */
2417
2418static void
53127008
SM
2419record_btrace_maybe_mark_async_event
2420 (const std::vector<thread_info *> &moving,
2421 const std::vector<thread_info *> &no_history)
a6b5be76 2422{
53127008
SM
2423 bool more_moving = !moving.empty ();
2424 bool more_no_history = !no_history.empty ();;
a6b5be76
MM
2425
2426 if (!more_moving && !more_no_history)
2427 return;
2428
2429 if (more_moving)
2430 DEBUG ("movers pending");
2431
2432 if (more_no_history)
2433 DEBUG ("no-history pending");
2434
2435 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2436}
2437
b2f4cfde
MM
2438/* The to_wait method of target record-btrace. */
2439
2440static ptid_t
2441record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2442 struct target_waitstatus *status, int options)
2443{
53127008
SM
2444 std::vector<thread_info *> moving;
2445 std::vector<thread_info *> no_history;
52834460
MM
2446
2447 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2448
b2f4cfde 2449 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2450 if ((execution_direction != EXEC_REVERSE)
2451 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2452 {
e75fdfca
TT
2453 ops = ops->beneath;
2454 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
2455 }
2456
e3cfc1c7 2457 /* Keep a work list of moving threads. */
53127008
SM
2458 {
2459 thread_info *tp;
2460
2461 ALL_NON_EXITED_THREADS (tp)
2462 {
2463 if (ptid_match (tp->ptid, ptid)
2464 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2465 moving.push_back (tp);
2466 }
2467 }
e3cfc1c7 2468
53127008 2469 if (moving.empty ())
52834460 2470 {
e3cfc1c7 2471 *status = btrace_step_no_resumed ();
52834460 2472
e3cfc1c7 2473 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
23fdd69e 2474 target_waitstatus_to_string (status).c_str ());
e3cfc1c7 2475
e3cfc1c7 2476 return null_ptid;
52834460
MM
2477 }
2478
e3cfc1c7
MM
2479 /* Step moving threads one by one, one step each, until either one thread
2480 reports an event or we run out of threads to step.
2481
2482 When stepping more than one thread, chances are that some threads reach
2483 the end of their execution history earlier than others. If we reported
2484 this immediately, all-stop on top of non-stop would stop all threads and
2485 resume the same threads next time. And we would report the same thread
2486 having reached the end of its execution history again.
2487
2488 In the worst case, this would starve the other threads. But even if other
2489 threads would be allowed to make progress, this would result in far too
2490 many intermediate stops.
2491
2492 We therefore delay the reporting of "no execution history" until we have
2493 nothing else to report. By this time, all threads should have moved to
2494 either the beginning or the end of their execution history. There will
2495 be a single user-visible stop. */
53127008
SM
2496 struct thread_info *eventing = NULL;
2497 while ((eventing == NULL) && !moving.empty ())
e3cfc1c7 2498 {
53127008 2499 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
e3cfc1c7 2500 {
53127008
SM
2501 thread_info *tp = moving[ix];
2502
e3cfc1c7
MM
2503 *status = record_btrace_step_thread (tp);
2504
2505 switch (status->kind)
2506 {
2507 case TARGET_WAITKIND_IGNORE:
2508 ix++;
2509 break;
2510
2511 case TARGET_WAITKIND_NO_HISTORY:
53127008 2512 no_history.push_back (ordered_remove (moving, ix));
e3cfc1c7
MM
2513 break;
2514
2515 default:
53127008 2516 eventing = unordered_remove (moving, ix);
e3cfc1c7
MM
2517 break;
2518 }
2519 }
2520 }
2521
2522 if (eventing == NULL)
2523 {
2524 /* We started with at least one moving thread. This thread must have
2525 either stopped or reached the end of its execution history.
2526
2527 In the former case, EVENTING must not be NULL.
2528 In the latter case, NO_HISTORY must not be empty. */
53127008 2529 gdb_assert (!no_history.empty ());
e3cfc1c7
MM
2530
2531 /* We kept threads moving at the end of their execution history. Stop
2532 EVENTING now that we are going to report its stop. */
53127008 2533 eventing = unordered_remove (no_history, 0);
e3cfc1c7
MM
2534 eventing->btrace.flags &= ~BTHR_MOVE;
2535
2536 *status = btrace_step_no_history ();
2537 }
2538
2539 gdb_assert (eventing != NULL);
2540
2541 /* We kept threads replaying at the end of their execution history. Stop
2542 replaying EVENTING now that we are going to report its stop. */
2543 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2544
2545 /* Stop all other threads. */
5953356c 2546 if (!target_is_non_stop_p ())
53127008
SM
2547 {
2548 thread_info *tp;
2549
2550 ALL_NON_EXITED_THREADS (tp)
2551 record_btrace_cancel_resume (tp);
2552 }
52834460 2553
a6b5be76
MM
2554 /* In async mode, we need to announce further events. */
2555 if (target_is_async_p ())
2556 record_btrace_maybe_mark_async_event (moving, no_history);
2557
52834460 2558 /* Start record histories anew from the current position. */
e3cfc1c7 2559 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2560
2561 /* We moved the replay position but did not update registers. */
e3cfc1c7
MM
2562 registers_changed_ptid (eventing->ptid);
2563
43792cf0
PA
2564 DEBUG ("wait ended by thread %s (%s): %s",
2565 print_thread_id (eventing),
e3cfc1c7 2566 target_pid_to_str (eventing->ptid),
23fdd69e 2567 target_waitstatus_to_string (status).c_str ());
52834460 2568
e3cfc1c7 2569 return eventing->ptid;
52834460
MM
2570}
2571
6e4879f0
MM
2572/* The to_stop method of target record-btrace. */
2573
2574static void
2575record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2576{
2577 DEBUG ("stop %s", target_pid_to_str (ptid));
2578
2579 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2580 if ((execution_direction != EXEC_REVERSE)
2581 && !record_btrace_is_replaying (ops, minus_one_ptid))
6e4879f0
MM
2582 {
2583 ops = ops->beneath;
2584 ops->to_stop (ops, ptid);
2585 }
2586 else
2587 {
2588 struct thread_info *tp;
2589
2590 ALL_NON_EXITED_THREADS (tp)
2591 if (ptid_match (tp->ptid, ptid))
2592 {
2593 tp->btrace.flags &= ~BTHR_MOVE;
2594 tp->btrace.flags |= BTHR_STOP;
2595 }
2596 }
2597 }
2598
52834460
MM
2599/* The to_can_execute_reverse method of target record-btrace. */
2600
2601static int
19db3e69 2602record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
2603{
2604 return 1;
2605}
2606
9e8915c6 2607/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2608
9e8915c6
PA
2609static int
2610record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
52834460 2611{
a52eab48 2612 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2613 {
2614 struct thread_info *tp = inferior_thread ();
2615
2616 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2617 }
2618
2619 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2620}
2621
2622/* The to_supports_stopped_by_sw_breakpoint method of target
2623 record-btrace. */
2624
2625static int
2626record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2627{
a52eab48 2628 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2629 return 1;
2630
2631 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2632}
2633
2634/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2635
2636static int
2637record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2638{
a52eab48 2639 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2640 {
2641 struct thread_info *tp = inferior_thread ();
2642
2643 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2644 }
2645
2646 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2647}
2648
2649/* The to_supports_stopped_by_hw_breakpoint method of target
2650 record-btrace. */
2651
2652static int
2653record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2654{
a52eab48 2655 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6 2656 return 1;
52834460 2657
9e8915c6 2658 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
b2f4cfde
MM
2659}
2660
e8032dde 2661/* The to_update_thread_list method of target record-btrace. */
e2887aa3
MM
2662
2663static void
e8032dde 2664record_btrace_update_thread_list (struct target_ops *ops)
e2887aa3 2665{
e8032dde 2666 /* We don't add or remove threads during replay. */
a52eab48 2667 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2668 return;
2669
2670 /* Forward the request. */
e75fdfca 2671 ops = ops->beneath;
e8032dde 2672 ops->to_update_thread_list (ops);
e2887aa3
MM
2673}
2674
2675/* The to_thread_alive method of target record-btrace. */
2676
2677static int
2678record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2679{
2680 /* We don't add or remove threads during replay. */
a52eab48 2681 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2682 return find_thread_ptid (ptid) != NULL;
2683
2684 /* Forward the request. */
e75fdfca
TT
2685 ops = ops->beneath;
2686 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
2687}
2688
066ce621
MM
2689/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2690 is stopped. */
2691
2692static void
2693record_btrace_set_replay (struct thread_info *tp,
2694 const struct btrace_insn_iterator *it)
2695{
2696 struct btrace_thread_info *btinfo;
2697
2698 btinfo = &tp->btrace;
2699
a0f1b963 2700 if (it == NULL)
52834460 2701 record_btrace_stop_replaying (tp);
066ce621
MM
2702 else
2703 {
2704 if (btinfo->replay == NULL)
52834460 2705 record_btrace_start_replaying (tp);
066ce621
MM
2706 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2707 return;
2708
2709 *btinfo->replay = *it;
52834460 2710 registers_changed_ptid (tp->ptid);
066ce621
MM
2711 }
2712
52834460
MM
2713 /* Start anew from the new replay position. */
2714 record_btrace_clear_histories (btinfo);
485668e5
MM
2715
2716 stop_pc = regcache_read_pc (get_current_regcache ());
2717 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2718}
2719
2720/* The to_goto_record_begin method of target record-btrace. */
2721
2722static void
08475817 2723record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
2724{
2725 struct thread_info *tp;
2726 struct btrace_insn_iterator begin;
2727
2728 tp = require_btrace_thread ();
2729
2730 btrace_insn_begin (&begin, &tp->btrace);
b61ce85c
MM
2731
2732 /* Skip gaps at the beginning of the trace. */
2733 while (btrace_insn_get (&begin) == NULL)
2734 {
2735 unsigned int steps;
2736
2737 steps = btrace_insn_next (&begin, 1);
2738 if (steps == 0)
2739 error (_("No trace."));
2740 }
2741
066ce621 2742 record_btrace_set_replay (tp, &begin);
066ce621
MM
2743}
2744
2745/* The to_goto_record_end method of target record-btrace. */
2746
2747static void
307a1b91 2748record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
2749{
2750 struct thread_info *tp;
2751
2752 tp = require_btrace_thread ();
2753
2754 record_btrace_set_replay (tp, NULL);
066ce621
MM
2755}
2756
2757/* The to_goto_record method of target record-btrace. */
2758
2759static void
606183ac 2760record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
2761{
2762 struct thread_info *tp;
2763 struct btrace_insn_iterator it;
2764 unsigned int number;
2765 int found;
2766
2767 number = insn;
2768
2769 /* Check for wrap-arounds. */
2770 if (number != insn)
2771 error (_("Instruction number out of range."));
2772
2773 tp = require_btrace_thread ();
2774
2775 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
69090cee
TW
2776
2777 /* Check if the instruction could not be found or is a gap. */
2778 if (found == 0 || btrace_insn_get (&it) == NULL)
066ce621
MM
2779 error (_("No such instruction."));
2780
2781 record_btrace_set_replay (tp, &it);
066ce621
MM
2782}
2783
797094dd
MM
2784/* The to_record_stop_replaying method of target record-btrace. */
2785
2786static void
2787record_btrace_stop_replaying_all (struct target_ops *self)
2788{
2789 struct thread_info *tp;
2790
2791 ALL_NON_EXITED_THREADS (tp)
2792 record_btrace_stop_replaying (tp);
2793}
2794
70ad5bff
MM
2795/* The to_execution_direction target method. */
2796
2797static enum exec_direction_kind
2798record_btrace_execution_direction (struct target_ops *self)
2799{
2800 return record_btrace_resume_exec_dir;
2801}
2802
aef92902
MM
2803/* The to_prepare_to_generate_core target method. */
2804
2805static void
2806record_btrace_prepare_to_generate_core (struct target_ops *self)
2807{
2808 record_btrace_generating_corefile = 1;
2809}
2810
2811/* The to_done_generating_core target method. */
2812
2813static void
2814record_btrace_done_generating_core (struct target_ops *self)
2815{
2816 record_btrace_generating_corefile = 0;
2817}
2818
afedecd3
MM
2819/* Initialize the record-btrace target ops. */
2820
2821static void
2822init_record_btrace_ops (void)
2823{
2824 struct target_ops *ops;
2825
2826 ops = &record_btrace_ops;
2827 ops->to_shortname = "record-btrace";
2828 ops->to_longname = "Branch tracing target";
2829 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2830 ops->to_open = record_btrace_open;
2831 ops->to_close = record_btrace_close;
b7d2e916 2832 ops->to_async = record_btrace_async;
afedecd3 2833 ops->to_detach = record_detach;
c0272db5 2834 ops->to_disconnect = record_btrace_disconnect;
afedecd3
MM
2835 ops->to_mourn_inferior = record_mourn_inferior;
2836 ops->to_kill = record_kill;
afedecd3
MM
2837 ops->to_stop_recording = record_btrace_stop_recording;
2838 ops->to_info_record = record_btrace_info;
2839 ops->to_insn_history = record_btrace_insn_history;
2840 ops->to_insn_history_from = record_btrace_insn_history_from;
2841 ops->to_insn_history_range = record_btrace_insn_history_range;
2842 ops->to_call_history = record_btrace_call_history;
2843 ops->to_call_history_from = record_btrace_call_history_from;
2844 ops->to_call_history_range = record_btrace_call_history_range;
b158a20f 2845 ops->to_record_method = record_btrace_record_method;
07bbe694 2846 ops->to_record_is_replaying = record_btrace_is_replaying;
7ff27e9b 2847 ops->to_record_will_replay = record_btrace_will_replay;
797094dd 2848 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
633785ff
MM
2849 ops->to_xfer_partial = record_btrace_xfer_partial;
2850 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2851 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
2852 ops->to_fetch_registers = record_btrace_fetch_registers;
2853 ops->to_store_registers = record_btrace_store_registers;
2854 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
2855 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2856 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde 2857 ops->to_resume = record_btrace_resume;
85ad3aaf 2858 ops->to_commit_resume = record_btrace_commit_resume;
b2f4cfde 2859 ops->to_wait = record_btrace_wait;
6e4879f0 2860 ops->to_stop = record_btrace_stop;
e8032dde 2861 ops->to_update_thread_list = record_btrace_update_thread_list;
e2887aa3 2862 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
2863 ops->to_goto_record_begin = record_btrace_goto_begin;
2864 ops->to_goto_record_end = record_btrace_goto_end;
2865 ops->to_goto_record = record_btrace_goto;
52834460 2866 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
9e8915c6
PA
2867 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2868 ops->to_supports_stopped_by_sw_breakpoint
2869 = record_btrace_supports_stopped_by_sw_breakpoint;
2870 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2871 ops->to_supports_stopped_by_hw_breakpoint
2872 = record_btrace_supports_stopped_by_hw_breakpoint;
70ad5bff 2873 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
2874 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2875 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
2876 ops->to_stratum = record_stratum;
2877 ops->to_magic = OPS_MAGIC;
2878}
2879
f4abbc16
MM
2880/* Start recording in BTS format. */
2881
2882static void
cdb34d4a 2883cmd_record_btrace_bts_start (const char *args, int from_tty)
f4abbc16 2884{
f4abbc16
MM
2885 if (args != NULL && *args != 0)
2886 error (_("Invalid argument."));
2887
2888 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2889
492d29ea
PA
2890 TRY
2891 {
95a6b0a1 2892 execute_command ("target record-btrace", from_tty);
492d29ea
PA
2893 }
2894 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2895 {
2896 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2897 throw_exception (exception);
2898 }
492d29ea 2899 END_CATCH
f4abbc16
MM
2900}
2901
bc504a31 2902/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2903
2904static void
cdb34d4a 2905cmd_record_btrace_pt_start (const char *args, int from_tty)
afedecd3
MM
2906{
2907 if (args != NULL && *args != 0)
2908 error (_("Invalid argument."));
2909
b20a6524 2910 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2911
492d29ea
PA
2912 TRY
2913 {
95a6b0a1 2914 execute_command ("target record-btrace", from_tty);
492d29ea
PA
2915 }
2916 CATCH (exception, RETURN_MASK_ALL)
2917 {
2918 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2919 throw_exception (exception);
2920 }
2921 END_CATCH
afedecd3
MM
2922}
2923
b20a6524
MM
2924/* Alias for "target record". */
2925
2926static void
981a3fb3 2927cmd_record_btrace_start (const char *args, int from_tty)
b20a6524
MM
2928{
2929 if (args != NULL && *args != 0)
2930 error (_("Invalid argument."));
2931
2932 record_btrace_conf.format = BTRACE_FORMAT_PT;
2933
2934 TRY
2935 {
95a6b0a1 2936 execute_command ("target record-btrace", from_tty);
b20a6524
MM
2937 }
2938 CATCH (exception, RETURN_MASK_ALL)
2939 {
2940 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2941
2942 TRY
2943 {
95a6b0a1 2944 execute_command ("target record-btrace", from_tty);
b20a6524
MM
2945 }
2946 CATCH (exception, RETURN_MASK_ALL)
2947 {
2948 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2949 throw_exception (exception);
2950 }
2951 END_CATCH
2952 }
2953 END_CATCH
2954}
2955
67b5c0c1
MM
2956/* The "set record btrace" command. */
2957
2958static void
981a3fb3 2959cmd_set_record_btrace (const char *args, int from_tty)
67b5c0c1
MM
2960{
2961 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2962}
2963
2964/* The "show record btrace" command. */
2965
2966static void
981a3fb3 2967cmd_show_record_btrace (const char *args, int from_tty)
67b5c0c1
MM
2968{
2969 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2970}
2971
2972/* The "show record btrace replay-memory-access" command. */
2973
2974static void
2975cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2976 struct cmd_list_element *c, const char *value)
2977{
2978 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2979 replay_memory_access);
2980}
2981
d33501a5
MM
2982/* The "set record btrace bts" command. */
2983
2984static void
981a3fb3 2985cmd_set_record_btrace_bts (const char *args, int from_tty)
d33501a5
MM
2986{
2987 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 2988 "by an appropriate subcommand.\n"));
d33501a5
MM
2989 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2990 all_commands, gdb_stdout);
2991}
2992
2993/* The "show record btrace bts" command. */
2994
2995static void
981a3fb3 2996cmd_show_record_btrace_bts (const char *args, int from_tty)
d33501a5
MM
2997{
2998 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2999}
3000
b20a6524
MM
3001/* The "set record btrace pt" command. */
3002
3003static void
981a3fb3 3004cmd_set_record_btrace_pt (const char *args, int from_tty)
b20a6524
MM
3005{
3006 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3007 "by an appropriate subcommand.\n"));
3008 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3009 all_commands, gdb_stdout);
3010}
3011
3012/* The "show record btrace pt" command. */
3013
3014static void
981a3fb3 3015cmd_show_record_btrace_pt (const char *args, int from_tty)
b20a6524
MM
3016{
3017 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3018}
3019
3020/* The "record bts buffer-size" show value function. */
3021
3022static void
3023show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3024 struct cmd_list_element *c,
3025 const char *value)
3026{
3027 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3028 value);
3029}
3030
3031/* The "record pt buffer-size" show value function. */
3032
3033static void
3034show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3035 struct cmd_list_element *c,
3036 const char *value)
3037{
3038 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3039 value);
3040}
3041
afedecd3
MM
3042/* Initialize btrace commands. */
3043
3044void
3045_initialize_record_btrace (void)
3046{
f4abbc16
MM
3047 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3048 _("Start branch trace recording."), &record_btrace_cmdlist,
3049 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3050 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3051
f4abbc16
MM
3052 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3053 _("\
3054Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3055The processor stores a from/to record for each branch into a cyclic buffer.\n\
3056This format may not be available on all processors."),
3057 &record_btrace_cmdlist);
3058 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3059
b20a6524
MM
3060 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3061 _("\
bc504a31 3062Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3063This format may not be available on all processors."),
3064 &record_btrace_cmdlist);
3065 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3066
67b5c0c1
MM
3067 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3068 _("Set record options"), &set_record_btrace_cmdlist,
3069 "set record btrace ", 0, &set_record_cmdlist);
3070
3071 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3072 _("Show record options"), &show_record_btrace_cmdlist,
3073 "show record btrace ", 0, &show_record_cmdlist);
3074
3075 add_setshow_enum_cmd ("replay-memory-access", no_class,
3076 replay_memory_access_types, &replay_memory_access, _("\
3077Set what memory accesses are allowed during replay."), _("\
3078Show what memory accesses are allowed during replay."),
3079 _("Default is READ-ONLY.\n\n\
3080The btrace record target does not trace data.\n\
3081The memory therefore corresponds to the live target and not \
3082to the current replay position.\n\n\
3083When READ-ONLY, allow accesses to read-only memory during replay.\n\
3084When READ-WRITE, allow accesses to read-only and read-write memory during \
3085replay."),
3086 NULL, cmd_show_replay_memory_access,
3087 &set_record_btrace_cmdlist,
3088 &show_record_btrace_cmdlist);
3089
d33501a5
MM
3090 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3091 _("Set record btrace bts options"),
3092 &set_record_btrace_bts_cmdlist,
3093 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3094
3095 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3096 _("Show record btrace bts options"),
3097 &show_record_btrace_bts_cmdlist,
3098 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3099
3100 add_setshow_uinteger_cmd ("buffer-size", no_class,
3101 &record_btrace_conf.bts.size,
3102 _("Set the record/replay bts buffer size."),
3103 _("Show the record/replay bts buffer size."), _("\
3104When starting recording request a trace buffer of this size. \
3105The actual buffer size may differ from the requested size. \
3106Use \"info record\" to see the actual buffer size.\n\n\
3107Bigger buffers allow longer recording but also take more time to process \
3108the recorded execution trace.\n\n\
b20a6524
MM
3109The trace buffer size may not be changed while recording."), NULL,
3110 show_record_bts_buffer_size_value,
d33501a5
MM
3111 &set_record_btrace_bts_cmdlist,
3112 &show_record_btrace_bts_cmdlist);
3113
b20a6524
MM
3114 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3115 _("Set record btrace pt options"),
3116 &set_record_btrace_pt_cmdlist,
3117 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3118
3119 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3120 _("Show record btrace pt options"),
3121 &show_record_btrace_pt_cmdlist,
3122 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3123
3124 add_setshow_uinteger_cmd ("buffer-size", no_class,
3125 &record_btrace_conf.pt.size,
3126 _("Set the record/replay pt buffer size."),
3127 _("Show the record/replay pt buffer size."), _("\
3128Bigger buffers allow longer recording but also take more time to process \
3129the recorded execution.\n\
3130The actual buffer size may differ from the requested size. Use \"info record\" \
3131to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3132 &set_record_btrace_pt_cmdlist,
3133 &show_record_btrace_pt_cmdlist);
3134
afedecd3
MM
3135 init_record_btrace_ops ();
3136 add_target (&record_btrace_ops);
0b722aec
MM
3137
3138 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3139 xcalloc, xfree);
d33501a5
MM
3140
3141 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3142 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3143}
This page took 0.824011 seconds and 4 git commands to generate.