Automatic date update in version.in
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
61baf725 3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
c0272db5 24#include "record-btrace.h"
afedecd3
MM
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
29#include "observer.h"
afedecd3
MM
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
70ad5bff
MM
39#include "event-loop.h"
40#include "inf-loop.h"
e3cfc1c7 41#include "vec.h"
325fac50 42#include <algorithm>
afedecd3
MM
43
44/* The target_ops of record-btrace. */
45static struct target_ops record_btrace_ops;
46
47/* A new thread observer enabling branch tracing for the new thread. */
48static struct observer *record_btrace_thread_observer;
49
67b5c0c1
MM
50/* Memory access types used in set/show record btrace replay-memory-access. */
51static const char replay_memory_access_read_only[] = "read-only";
52static const char replay_memory_access_read_write[] = "read-write";
53static const char *const replay_memory_access_types[] =
54{
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
57 NULL
58};
59
60/* The currently allowed replay memory access type. */
61static const char *replay_memory_access = replay_memory_access_read_only;
62
63/* Command lists for "set/show record btrace". */
64static struct cmd_list_element *set_record_btrace_cmdlist;
65static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 66
70ad5bff
MM
67/* The execution direction of the last resume we got. See record-full.c. */
68static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
69
70/* The async event handler for reverse/replay execution. */
71static struct async_event_handler *record_btrace_async_inferior_event_handler;
72
aef92902
MM
73/* A flag indicating that we are currently generating a core file. */
74static int record_btrace_generating_corefile;
75
f4abbc16
MM
76/* The current branch trace configuration. */
77static struct btrace_config record_btrace_conf;
78
79/* Command list for "record btrace". */
80static struct cmd_list_element *record_btrace_cmdlist;
81
d33501a5
MM
82/* Command lists for "set/show record btrace bts". */
83static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84static struct cmd_list_element *show_record_btrace_bts_cmdlist;
85
b20a6524
MM
86/* Command lists for "set/show record btrace pt". */
87static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88static struct cmd_list_element *show_record_btrace_pt_cmdlist;
89
afedecd3
MM
90/* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
92
93#define DEBUG(msg, args...) \
94 do \
95 { \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
99 } \
100 while (0)
101
102
103/* Update the branch trace for the current thread and return a pointer to its
066ce621 104 thread_info.
afedecd3
MM
105
106 Throws an error if there is no thread or no trace. This function never
107 returns NULL. */
108
066ce621
MM
109static struct thread_info *
110require_btrace_thread (void)
afedecd3
MM
111{
112 struct thread_info *tp;
afedecd3
MM
113
114 DEBUG ("require");
115
116 tp = find_thread_ptid (inferior_ptid);
117 if (tp == NULL)
118 error (_("No thread."));
119
cd4007e4
MM
120 validate_registers_access ();
121
afedecd3
MM
122 btrace_fetch (tp);
123
6e07b1d2 124 if (btrace_is_empty (tp))
afedecd3
MM
125 error (_("No trace."));
126
066ce621
MM
127 return tp;
128}
129
130/* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
132
133 Throws an error if there is no thread or no trace. This function never
134 returns NULL. */
135
136static struct btrace_thread_info *
137require_btrace (void)
138{
139 struct thread_info *tp;
140
141 tp = require_btrace_thread ();
142
143 return &tp->btrace;
afedecd3
MM
144}
145
146/* Enable branch tracing for one thread. Warn on errors. */
147
148static void
149record_btrace_enable_warn (struct thread_info *tp)
150{
492d29ea
PA
151 TRY
152 {
153 btrace_enable (tp, &record_btrace_conf);
154 }
155 CATCH (error, RETURN_MASK_ERROR)
156 {
157 warning ("%s", error.message);
158 }
159 END_CATCH
afedecd3
MM
160}
161
162/* Callback function to disable branch tracing for one thread. */
163
164static void
165record_btrace_disable_callback (void *arg)
166{
19ba03f4 167 struct thread_info *tp = (struct thread_info *) arg;
afedecd3
MM
168
169 btrace_disable (tp);
170}
171
172/* Enable automatic tracing of new threads. */
173
174static void
175record_btrace_auto_enable (void)
176{
177 DEBUG ("attach thread observer");
178
179 record_btrace_thread_observer
180 = observer_attach_new_thread (record_btrace_enable_warn);
181}
182
183/* Disable automatic tracing of new threads. */
184
185static void
186record_btrace_auto_disable (void)
187{
188 /* The observer may have been detached, already. */
189 if (record_btrace_thread_observer == NULL)
190 return;
191
192 DEBUG ("detach thread observer");
193
194 observer_detach_new_thread (record_btrace_thread_observer);
195 record_btrace_thread_observer = NULL;
196}
197
70ad5bff
MM
198/* The record-btrace async event handler function. */
199
200static void
201record_btrace_handle_async_inferior_event (gdb_client_data data)
202{
203 inferior_event_handler (INF_REG_EVENT, NULL);
204}
205
c0272db5
TW
206/* See record-btrace.h. */
207
208void
209record_btrace_push_target (void)
210{
211 const char *format;
212
213 record_btrace_auto_enable ();
214
215 push_target (&record_btrace_ops);
216
217 record_btrace_async_inferior_event_handler
218 = create_async_event_handler (record_btrace_handle_async_inferior_event,
219 NULL);
220 record_btrace_generating_corefile = 0;
221
222 format = btrace_format_short_string (record_btrace_conf.format);
223 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
224}
225
afedecd3
MM
226/* The to_open method of target record-btrace. */
227
228static void
014f9477 229record_btrace_open (const char *args, int from_tty)
afedecd3
MM
230{
231 struct cleanup *disable_chain;
232 struct thread_info *tp;
233
234 DEBUG ("open");
235
8213266a 236 record_preopen ();
afedecd3
MM
237
238 if (!target_has_execution)
239 error (_("The program is not being run."));
240
afedecd3
MM
241 gdb_assert (record_btrace_thread_observer == NULL);
242
243 disable_chain = make_cleanup (null_cleanup, NULL);
034f788c 244 ALL_NON_EXITED_THREADS (tp)
5d5658a1 245 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
afedecd3 246 {
f4abbc16 247 btrace_enable (tp, &record_btrace_conf);
afedecd3
MM
248
249 make_cleanup (record_btrace_disable_callback, tp);
250 }
251
c0272db5 252 record_btrace_push_target ();
afedecd3
MM
253
254 discard_cleanups (disable_chain);
255}
256
257/* The to_stop_recording method of target record-btrace. */
258
259static void
c6cd7c02 260record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
261{
262 struct thread_info *tp;
263
264 DEBUG ("stop recording");
265
266 record_btrace_auto_disable ();
267
034f788c 268 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
269 if (tp->btrace.target != NULL)
270 btrace_disable (tp);
271}
272
c0272db5
TW
273/* The to_disconnect method of target record-btrace. */
274
275static void
276record_btrace_disconnect (struct target_ops *self, const char *args,
277 int from_tty)
278{
279 struct target_ops *beneath = self->beneath;
280
281 /* Do not stop recording, just clean up GDB side. */
282 unpush_target (self);
283
284 /* Forward disconnect. */
285 beneath->to_disconnect (beneath, args, from_tty);
286}
287
afedecd3
MM
288/* The to_close method of target record-btrace. */
289
290static void
de90e03d 291record_btrace_close (struct target_ops *self)
afedecd3 292{
568e808b
MM
293 struct thread_info *tp;
294
70ad5bff
MM
295 if (record_btrace_async_inferior_event_handler != NULL)
296 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
297
99c819ee
MM
298 /* Make sure automatic recording gets disabled even if we did not stop
299 recording before closing the record-btrace target. */
300 record_btrace_auto_disable ();
301
568e808b
MM
302 /* We should have already stopped recording.
303 Tear down btrace in case we have not. */
034f788c 304 ALL_NON_EXITED_THREADS (tp)
568e808b 305 btrace_teardown (tp);
afedecd3
MM
306}
307
b7d2e916
PA
308/* The to_async method of target record-btrace. */
309
310static void
6a3753b3 311record_btrace_async (struct target_ops *ops, int enable)
b7d2e916 312{
6a3753b3 313 if (enable)
b7d2e916
PA
314 mark_async_event_handler (record_btrace_async_inferior_event_handler);
315 else
316 clear_async_event_handler (record_btrace_async_inferior_event_handler);
317
6a3753b3 318 ops->beneath->to_async (ops->beneath, enable);
b7d2e916
PA
319}
320
d33501a5
MM
321/* Adjusts the size and returns a human readable size suffix. */
322
323static const char *
324record_btrace_adjust_size (unsigned int *size)
325{
326 unsigned int sz;
327
328 sz = *size;
329
330 if ((sz & ((1u << 30) - 1)) == 0)
331 {
332 *size = sz >> 30;
333 return "GB";
334 }
335 else if ((sz & ((1u << 20) - 1)) == 0)
336 {
337 *size = sz >> 20;
338 return "MB";
339 }
340 else if ((sz & ((1u << 10) - 1)) == 0)
341 {
342 *size = sz >> 10;
343 return "kB";
344 }
345 else
346 return "";
347}
348
349/* Print a BTS configuration. */
350
351static void
352record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
353{
354 const char *suffix;
355 unsigned int size;
356
357 size = conf->size;
358 if (size > 0)
359 {
360 suffix = record_btrace_adjust_size (&size);
361 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
362 }
363}
364
bc504a31 365/* Print an Intel Processor Trace configuration. */
b20a6524
MM
366
367static void
368record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
369{
370 const char *suffix;
371 unsigned int size;
372
373 size = conf->size;
374 if (size > 0)
375 {
376 suffix = record_btrace_adjust_size (&size);
377 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
378 }
379}
380
d33501a5
MM
381/* Print a branch tracing configuration. */
382
383static void
384record_btrace_print_conf (const struct btrace_config *conf)
385{
386 printf_unfiltered (_("Recording format: %s.\n"),
387 btrace_format_string (conf->format));
388
389 switch (conf->format)
390 {
391 case BTRACE_FORMAT_NONE:
392 return;
393
394 case BTRACE_FORMAT_BTS:
395 record_btrace_print_bts_conf (&conf->bts);
396 return;
b20a6524
MM
397
398 case BTRACE_FORMAT_PT:
399 record_btrace_print_pt_conf (&conf->pt);
400 return;
d33501a5
MM
401 }
402
403 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
404}
405
afedecd3
MM
406/* The to_info_record method of target record-btrace. */
407
408static void
630d6a4a 409record_btrace_info (struct target_ops *self)
afedecd3
MM
410{
411 struct btrace_thread_info *btinfo;
f4abbc16 412 const struct btrace_config *conf;
afedecd3 413 struct thread_info *tp;
31fd9caa 414 unsigned int insns, calls, gaps;
afedecd3
MM
415
416 DEBUG ("info");
417
418 tp = find_thread_ptid (inferior_ptid);
419 if (tp == NULL)
420 error (_("No thread."));
421
cd4007e4
MM
422 validate_registers_access ();
423
f4abbc16
MM
424 btinfo = &tp->btrace;
425
426 conf = btrace_conf (btinfo);
427 if (conf != NULL)
d33501a5 428 record_btrace_print_conf (conf);
f4abbc16 429
afedecd3
MM
430 btrace_fetch (tp);
431
23a7fe75
MM
432 insns = 0;
433 calls = 0;
31fd9caa 434 gaps = 0;
23a7fe75 435
6e07b1d2 436 if (!btrace_is_empty (tp))
23a7fe75
MM
437 {
438 struct btrace_call_iterator call;
439 struct btrace_insn_iterator insn;
440
441 btrace_call_end (&call, btinfo);
442 btrace_call_prev (&call, 1);
5de9129b 443 calls = btrace_call_number (&call);
23a7fe75
MM
444
445 btrace_insn_end (&insn, btinfo);
5de9129b 446 insns = btrace_insn_number (&insn);
31fd9caa 447
69090cee
TW
448 /* If the last instruction is not a gap, it is the current instruction
449 that is not actually part of the record. */
450 if (btrace_insn_get (&insn) != NULL)
451 insns -= 1;
31fd9caa
MM
452
453 gaps = btinfo->ngaps;
23a7fe75 454 }
afedecd3 455
31fd9caa 456 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
43792cf0
PA
457 "for thread %s (%s).\n"), insns, calls, gaps,
458 print_thread_id (tp), target_pid_to_str (tp->ptid));
07bbe694
MM
459
460 if (btrace_is_replaying (tp))
461 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
462 btrace_insn_number (btinfo->replay));
afedecd3
MM
463}
464
31fd9caa
MM
465/* Print a decode error. */
466
467static void
468btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
469 enum btrace_format format)
470{
508352a9 471 const char *errstr = btrace_decode_error (format, errcode);
31fd9caa 472
112e8700 473 uiout->text (_("["));
508352a9
TW
474 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
475 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
31fd9caa 476 {
112e8700
SM
477 uiout->text (_("decode error ("));
478 uiout->field_int ("errcode", errcode);
479 uiout->text (_("): "));
31fd9caa 480 }
112e8700
SM
481 uiout->text (errstr);
482 uiout->text (_("]\n"));
31fd9caa
MM
483}
484
afedecd3
MM
485/* Print an unsigned int. */
486
487static void
488ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
489{
112e8700 490 uiout->field_fmt (fld, "%u", val);
afedecd3
MM
491}
492
f94cc897
MM
493/* A range of source lines. */
494
495struct btrace_line_range
496{
497 /* The symtab this line is from. */
498 struct symtab *symtab;
499
500 /* The first line (inclusive). */
501 int begin;
502
503 /* The last line (exclusive). */
504 int end;
505};
506
507/* Construct a line range. */
508
509static struct btrace_line_range
510btrace_mk_line_range (struct symtab *symtab, int begin, int end)
511{
512 struct btrace_line_range range;
513
514 range.symtab = symtab;
515 range.begin = begin;
516 range.end = end;
517
518 return range;
519}
520
521/* Add a line to a line range. */
522
523static struct btrace_line_range
524btrace_line_range_add (struct btrace_line_range range, int line)
525{
526 if (range.end <= range.begin)
527 {
528 /* This is the first entry. */
529 range.begin = line;
530 range.end = line + 1;
531 }
532 else if (line < range.begin)
533 range.begin = line;
534 else if (range.end < line)
535 range.end = line;
536
537 return range;
538}
539
540/* Return non-zero if RANGE is empty, zero otherwise. */
541
542static int
543btrace_line_range_is_empty (struct btrace_line_range range)
544{
545 return range.end <= range.begin;
546}
547
548/* Return non-zero if LHS contains RHS, zero otherwise. */
549
550static int
551btrace_line_range_contains_range (struct btrace_line_range lhs,
552 struct btrace_line_range rhs)
553{
554 return ((lhs.symtab == rhs.symtab)
555 && (lhs.begin <= rhs.begin)
556 && (rhs.end <= lhs.end));
557}
558
559/* Find the line range associated with PC. */
560
561static struct btrace_line_range
562btrace_find_line_range (CORE_ADDR pc)
563{
564 struct btrace_line_range range;
565 struct linetable_entry *lines;
566 struct linetable *ltable;
567 struct symtab *symtab;
568 int nlines, i;
569
570 symtab = find_pc_line_symtab (pc);
571 if (symtab == NULL)
572 return btrace_mk_line_range (NULL, 0, 0);
573
574 ltable = SYMTAB_LINETABLE (symtab);
575 if (ltable == NULL)
576 return btrace_mk_line_range (symtab, 0, 0);
577
578 nlines = ltable->nitems;
579 lines = ltable->item;
580 if (nlines <= 0)
581 return btrace_mk_line_range (symtab, 0, 0);
582
583 range = btrace_mk_line_range (symtab, 0, 0);
584 for (i = 0; i < nlines - 1; i++)
585 {
586 if ((lines[i].pc == pc) && (lines[i].line != 0))
587 range = btrace_line_range_add (range, lines[i].line);
588 }
589
590 return range;
591}
592
593/* Print source lines in LINES to UIOUT.
594
595 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
596 instructions corresponding to that source line. When printing a new source
597 line, we do the cleanups for the open chain and open a new cleanup chain for
598 the new source line. If the source line range in LINES is not empty, this
599 function will leave the cleanup chain for the last printed source line open
600 so instructions can be added to it. */
601
602static void
603btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
604 struct cleanup **ui_item_chain, int flags)
605{
8d297bbf 606 print_source_lines_flags psl_flags;
f94cc897
MM
607 int line;
608
609 psl_flags = 0;
610 if (flags & DISASSEMBLY_FILENAME)
611 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
612
613 for (line = lines.begin; line < lines.end; ++line)
614 {
615 if (*ui_item_chain != NULL)
616 do_cleanups (*ui_item_chain);
617
618 *ui_item_chain
619 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
620
621 print_source_lines (lines.symtab, line, line + 1, psl_flags);
622
623 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
624 }
625}
626
afedecd3
MM
627/* Disassemble a section of the recorded instruction trace. */
628
629static void
23a7fe75 630btrace_insn_history (struct ui_out *uiout,
31fd9caa 631 const struct btrace_thread_info *btinfo,
23a7fe75
MM
632 const struct btrace_insn_iterator *begin,
633 const struct btrace_insn_iterator *end, int flags)
afedecd3 634{
f94cc897 635 struct cleanup *cleanups, *ui_item_chain;
afedecd3 636 struct gdbarch *gdbarch;
23a7fe75 637 struct btrace_insn_iterator it;
f94cc897 638 struct btrace_line_range last_lines;
afedecd3 639
23a7fe75
MM
640 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
641 btrace_insn_number (end));
afedecd3 642
f94cc897
MM
643 flags |= DISASSEMBLY_SPECULATIVE;
644
afedecd3 645 gdbarch = target_gdbarch ();
f94cc897
MM
646 last_lines = btrace_mk_line_range (NULL, 0, 0);
647
187808b0 648 cleanups = make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
f94cc897
MM
649
650 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
651 instructions corresponding to that line. */
652 ui_item_chain = NULL;
afedecd3 653
8b172ce7
PA
654 gdb_pretty_print_disassembler disasm (gdbarch);
655
23a7fe75 656 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 657 {
23a7fe75
MM
658 const struct btrace_insn *insn;
659
660 insn = btrace_insn_get (&it);
661
31fd9caa
MM
662 /* A NULL instruction indicates a gap in the trace. */
663 if (insn == NULL)
664 {
665 const struct btrace_config *conf;
666
667 conf = btrace_conf (btinfo);
afedecd3 668
31fd9caa
MM
669 /* We have trace so we must have a configuration. */
670 gdb_assert (conf != NULL);
671
69090cee
TW
672 uiout->field_fmt ("insn-number", "%u",
673 btrace_insn_number (&it));
674 uiout->text ("\t");
675
676 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
31fd9caa
MM
677 conf->format);
678 }
679 else
680 {
f94cc897 681 struct disasm_insn dinsn;
da8c46d2 682
f94cc897 683 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 684 {
f94cc897
MM
685 struct btrace_line_range lines;
686
687 lines = btrace_find_line_range (insn->pc);
688 if (!btrace_line_range_is_empty (lines)
689 && !btrace_line_range_contains_range (last_lines, lines))
690 {
691 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
692 last_lines = lines;
693 }
694 else if (ui_item_chain == NULL)
695 {
696 ui_item_chain
697 = make_cleanup_ui_out_tuple_begin_end (uiout,
698 "src_and_asm_line");
699 /* No source information. */
700 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
701 }
702
703 gdb_assert (ui_item_chain != NULL);
da8c46d2 704 }
da8c46d2 705
f94cc897
MM
706 memset (&dinsn, 0, sizeof (dinsn));
707 dinsn.number = btrace_insn_number (&it);
708 dinsn.addr = insn->pc;
31fd9caa 709
da8c46d2 710 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 711 dinsn.is_speculative = 1;
da8c46d2 712
8b172ce7 713 disasm.pretty_print_insn (uiout, &dinsn, flags);
31fd9caa 714 }
afedecd3 715 }
f94cc897
MM
716
717 do_cleanups (cleanups);
afedecd3
MM
718}
719
720/* The to_insn_history method of target record-btrace. */
721
722static void
7a6c5609 723record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
724{
725 struct btrace_thread_info *btinfo;
23a7fe75
MM
726 struct btrace_insn_history *history;
727 struct btrace_insn_iterator begin, end;
afedecd3 728 struct ui_out *uiout;
23a7fe75 729 unsigned int context, covered;
afedecd3
MM
730
731 uiout = current_uiout;
2e783024 732 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 733 context = abs (size);
afedecd3
MM
734 if (context == 0)
735 error (_("Bad record instruction-history-size."));
736
23a7fe75
MM
737 btinfo = require_btrace ();
738 history = btinfo->insn_history;
739 if (history == NULL)
afedecd3 740 {
07bbe694 741 struct btrace_insn_iterator *replay;
afedecd3 742
23a7fe75 743 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 744
07bbe694
MM
745 /* If we're replaying, we start at the replay position. Otherwise, we
746 start at the tail of the trace. */
747 replay = btinfo->replay;
748 if (replay != NULL)
749 begin = *replay;
750 else
751 btrace_insn_end (&begin, btinfo);
752
753 /* We start from here and expand in the requested direction. Then we
754 expand in the other direction, as well, to fill up any remaining
755 context. */
756 end = begin;
757 if (size < 0)
758 {
759 /* We want the current position covered, as well. */
760 covered = btrace_insn_next (&end, 1);
761 covered += btrace_insn_prev (&begin, context - covered);
762 covered += btrace_insn_next (&end, context - covered);
763 }
764 else
765 {
766 covered = btrace_insn_next (&end, context);
767 covered += btrace_insn_prev (&begin, context - covered);
768 }
afedecd3
MM
769 }
770 else
771 {
23a7fe75
MM
772 begin = history->begin;
773 end = history->end;
afedecd3 774
23a7fe75
MM
775 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
776 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 777
23a7fe75
MM
778 if (size < 0)
779 {
780 end = begin;
781 covered = btrace_insn_prev (&begin, context);
782 }
783 else
784 {
785 begin = end;
786 covered = btrace_insn_next (&end, context);
787 }
afedecd3
MM
788 }
789
23a7fe75 790 if (covered > 0)
31fd9caa 791 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
792 else
793 {
794 if (size < 0)
795 printf_unfiltered (_("At the start of the branch trace record.\n"));
796 else
797 printf_unfiltered (_("At the end of the branch trace record.\n"));
798 }
afedecd3 799
23a7fe75 800 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
801}
802
803/* The to_insn_history_range method of target record-btrace. */
804
805static void
4e99c6b7
TT
806record_btrace_insn_history_range (struct target_ops *self,
807 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
808{
809 struct btrace_thread_info *btinfo;
23a7fe75
MM
810 struct btrace_insn_history *history;
811 struct btrace_insn_iterator begin, end;
afedecd3 812 struct ui_out *uiout;
23a7fe75
MM
813 unsigned int low, high;
814 int found;
afedecd3
MM
815
816 uiout = current_uiout;
2e783024 817 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
23a7fe75
MM
818 low = from;
819 high = to;
afedecd3 820
23a7fe75 821 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
822
823 /* Check for wrap-arounds. */
23a7fe75 824 if (low != from || high != to)
afedecd3
MM
825 error (_("Bad range."));
826
0688d04e 827 if (high < low)
afedecd3
MM
828 error (_("Bad range."));
829
23a7fe75 830 btinfo = require_btrace ();
afedecd3 831
23a7fe75
MM
832 found = btrace_find_insn_by_number (&begin, btinfo, low);
833 if (found == 0)
834 error (_("Range out of bounds."));
afedecd3 835
23a7fe75
MM
836 found = btrace_find_insn_by_number (&end, btinfo, high);
837 if (found == 0)
0688d04e
MM
838 {
839 /* Silently truncate the range. */
840 btrace_insn_end (&end, btinfo);
841 }
842 else
843 {
844 /* We want both begin and end to be inclusive. */
845 btrace_insn_next (&end, 1);
846 }
afedecd3 847
31fd9caa 848 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 849 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
850}
851
852/* The to_insn_history_from method of target record-btrace. */
853
854static void
9abc3ff3
TT
855record_btrace_insn_history_from (struct target_ops *self,
856 ULONGEST from, int size, int flags)
afedecd3
MM
857{
858 ULONGEST begin, end, context;
859
860 context = abs (size);
0688d04e
MM
861 if (context == 0)
862 error (_("Bad record instruction-history-size."));
afedecd3
MM
863
864 if (size < 0)
865 {
866 end = from;
867
868 if (from < context)
869 begin = 0;
870 else
0688d04e 871 begin = from - context + 1;
afedecd3
MM
872 }
873 else
874 {
875 begin = from;
0688d04e 876 end = from + context - 1;
afedecd3
MM
877
878 /* Check for wrap-around. */
879 if (end < begin)
880 end = ULONGEST_MAX;
881 }
882
4e99c6b7 883 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
884}
885
886/* Print the instruction number range for a function call history line. */
887
888static void
23a7fe75
MM
889btrace_call_history_insn_range (struct ui_out *uiout,
890 const struct btrace_function *bfun)
afedecd3 891{
7acbe133
MM
892 unsigned int begin, end, size;
893
894 size = VEC_length (btrace_insn_s, bfun->insn);
895 gdb_assert (size > 0);
afedecd3 896
23a7fe75 897 begin = bfun->insn_offset;
7acbe133 898 end = begin + size - 1;
afedecd3 899
23a7fe75 900 ui_out_field_uint (uiout, "insn begin", begin);
112e8700 901 uiout->text (",");
23a7fe75 902 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
903}
904
ce0dfbea
MM
905/* Compute the lowest and highest source line for the instructions in BFUN
906 and return them in PBEGIN and PEND.
907 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
908 result from inlining or macro expansion. */
909
910static void
911btrace_compute_src_line_range (const struct btrace_function *bfun,
912 int *pbegin, int *pend)
913{
914 struct btrace_insn *insn;
915 struct symtab *symtab;
916 struct symbol *sym;
917 unsigned int idx;
918 int begin, end;
919
920 begin = INT_MAX;
921 end = INT_MIN;
922
923 sym = bfun->sym;
924 if (sym == NULL)
925 goto out;
926
927 symtab = symbol_symtab (sym);
928
929 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
930 {
931 struct symtab_and_line sal;
932
933 sal = find_pc_line (insn->pc, 0);
934 if (sal.symtab != symtab || sal.line == 0)
935 continue;
936
325fac50
PA
937 begin = std::min (begin, sal.line);
938 end = std::max (end, sal.line);
ce0dfbea
MM
939 }
940
941 out:
942 *pbegin = begin;
943 *pend = end;
944}
945
afedecd3
MM
946/* Print the source line information for a function call history line. */
947
948static void
23a7fe75
MM
949btrace_call_history_src_line (struct ui_out *uiout,
950 const struct btrace_function *bfun)
afedecd3
MM
951{
952 struct symbol *sym;
23a7fe75 953 int begin, end;
afedecd3
MM
954
955 sym = bfun->sym;
956 if (sym == NULL)
957 return;
958
112e8700 959 uiout->field_string ("file",
08be3fe3 960 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 961
ce0dfbea 962 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 963 if (end < begin)
afedecd3
MM
964 return;
965
112e8700
SM
966 uiout->text (":");
967 uiout->field_int ("min line", begin);
afedecd3 968
23a7fe75 969 if (end == begin)
afedecd3
MM
970 return;
971
112e8700
SM
972 uiout->text (",");
973 uiout->field_int ("max line", end);
afedecd3
MM
974}
975
0b722aec
MM
976/* Get the name of a branch trace function. */
977
978static const char *
979btrace_get_bfun_name (const struct btrace_function *bfun)
980{
981 struct minimal_symbol *msym;
982 struct symbol *sym;
983
984 if (bfun == NULL)
985 return "??";
986
987 msym = bfun->msym;
988 sym = bfun->sym;
989
990 if (sym != NULL)
991 return SYMBOL_PRINT_NAME (sym);
992 else if (msym != NULL)
efd66ac6 993 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
994 else
995 return "??";
996}
997
afedecd3
MM
998/* Disassemble a section of the recorded function trace. */
999
1000static void
23a7fe75 1001btrace_call_history (struct ui_out *uiout,
8710b709 1002 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1003 const struct btrace_call_iterator *begin,
1004 const struct btrace_call_iterator *end,
8d297bbf 1005 int int_flags)
afedecd3 1006{
23a7fe75 1007 struct btrace_call_iterator it;
8d297bbf 1008 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1009
8d297bbf 1010 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1011 btrace_call_number (end));
afedecd3 1012
23a7fe75 1013 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1014 {
23a7fe75
MM
1015 const struct btrace_function *bfun;
1016 struct minimal_symbol *msym;
1017 struct symbol *sym;
1018
1019 bfun = btrace_call_get (&it);
23a7fe75 1020 sym = bfun->sym;
0b722aec 1021 msym = bfun->msym;
23a7fe75 1022
afedecd3 1023 /* Print the function index. */
23a7fe75 1024 ui_out_field_uint (uiout, "index", bfun->number);
112e8700 1025 uiout->text ("\t");
afedecd3 1026
31fd9caa
MM
1027 /* Indicate gaps in the trace. */
1028 if (bfun->errcode != 0)
1029 {
1030 const struct btrace_config *conf;
1031
1032 conf = btrace_conf (btinfo);
1033
1034 /* We have trace so we must have a configuration. */
1035 gdb_assert (conf != NULL);
1036
1037 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1038
1039 continue;
1040 }
1041
8710b709
MM
1042 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1043 {
1044 int level = bfun->level + btinfo->level, i;
1045
1046 for (i = 0; i < level; ++i)
112e8700 1047 uiout->text (" ");
8710b709
MM
1048 }
1049
1050 if (sym != NULL)
112e8700 1051 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
8710b709 1052 else if (msym != NULL)
112e8700
SM
1053 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1054 else if (!uiout->is_mi_like_p ())
1055 uiout->field_string ("function", "??");
8710b709 1056
1e038f67 1057 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1058 {
112e8700 1059 uiout->text (_("\tinst "));
23a7fe75 1060 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1061 }
1062
1e038f67 1063 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1064 {
112e8700 1065 uiout->text (_("\tat "));
23a7fe75 1066 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1067 }
1068
112e8700 1069 uiout->text ("\n");
afedecd3
MM
1070 }
1071}
1072
1073/* The to_call_history method of target record-btrace. */
1074
1075static void
8d297bbf 1076record_btrace_call_history (struct target_ops *self, int size, int int_flags)
afedecd3
MM
1077{
1078 struct btrace_thread_info *btinfo;
23a7fe75
MM
1079 struct btrace_call_history *history;
1080 struct btrace_call_iterator begin, end;
afedecd3 1081 struct ui_out *uiout;
23a7fe75 1082 unsigned int context, covered;
8d297bbf 1083 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1084
1085 uiout = current_uiout;
2e783024 1086 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 1087 context = abs (size);
afedecd3
MM
1088 if (context == 0)
1089 error (_("Bad record function-call-history-size."));
1090
23a7fe75
MM
1091 btinfo = require_btrace ();
1092 history = btinfo->call_history;
1093 if (history == NULL)
afedecd3 1094 {
07bbe694 1095 struct btrace_insn_iterator *replay;
afedecd3 1096
8d297bbf 1097 DEBUG ("call-history (0x%x): %d", int_flags, size);
afedecd3 1098
07bbe694
MM
1099 /* If we're replaying, we start at the replay position. Otherwise, we
1100 start at the tail of the trace. */
1101 replay = btinfo->replay;
1102 if (replay != NULL)
1103 {
07bbe694 1104 begin.btinfo = btinfo;
a0f1b963 1105 begin.index = replay->call_index;
07bbe694
MM
1106 }
1107 else
1108 btrace_call_end (&begin, btinfo);
1109
1110 /* We start from here and expand in the requested direction. Then we
1111 expand in the other direction, as well, to fill up any remaining
1112 context. */
1113 end = begin;
1114 if (size < 0)
1115 {
1116 /* We want the current position covered, as well. */
1117 covered = btrace_call_next (&end, 1);
1118 covered += btrace_call_prev (&begin, context - covered);
1119 covered += btrace_call_next (&end, context - covered);
1120 }
1121 else
1122 {
1123 covered = btrace_call_next (&end, context);
1124 covered += btrace_call_prev (&begin, context- covered);
1125 }
afedecd3
MM
1126 }
1127 else
1128 {
23a7fe75
MM
1129 begin = history->begin;
1130 end = history->end;
afedecd3 1131
8d297bbf 1132 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
23a7fe75 1133 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1134
23a7fe75
MM
1135 if (size < 0)
1136 {
1137 end = begin;
1138 covered = btrace_call_prev (&begin, context);
1139 }
1140 else
1141 {
1142 begin = end;
1143 covered = btrace_call_next (&end, context);
1144 }
afedecd3
MM
1145 }
1146
23a7fe75 1147 if (covered > 0)
8710b709 1148 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1149 else
1150 {
1151 if (size < 0)
1152 printf_unfiltered (_("At the start of the branch trace record.\n"));
1153 else
1154 printf_unfiltered (_("At the end of the branch trace record.\n"));
1155 }
afedecd3 1156
23a7fe75 1157 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1158}
1159
1160/* The to_call_history_range method of target record-btrace. */
1161
1162static void
f0d960ea 1163record_btrace_call_history_range (struct target_ops *self,
8d297bbf
PA
1164 ULONGEST from, ULONGEST to,
1165 int int_flags)
afedecd3
MM
1166{
1167 struct btrace_thread_info *btinfo;
23a7fe75
MM
1168 struct btrace_call_history *history;
1169 struct btrace_call_iterator begin, end;
afedecd3 1170 struct ui_out *uiout;
23a7fe75
MM
1171 unsigned int low, high;
1172 int found;
8d297bbf 1173 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1174
1175 uiout = current_uiout;
2e783024 1176 ui_out_emit_tuple tuple_emitter (uiout, "func history");
23a7fe75
MM
1177 low = from;
1178 high = to;
afedecd3 1179
8d297bbf 1180 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
afedecd3
MM
1181
1182 /* Check for wrap-arounds. */
23a7fe75 1183 if (low != from || high != to)
afedecd3
MM
1184 error (_("Bad range."));
1185
0688d04e 1186 if (high < low)
afedecd3
MM
1187 error (_("Bad range."));
1188
23a7fe75 1189 btinfo = require_btrace ();
afedecd3 1190
23a7fe75
MM
1191 found = btrace_find_call_by_number (&begin, btinfo, low);
1192 if (found == 0)
1193 error (_("Range out of bounds."));
afedecd3 1194
23a7fe75
MM
1195 found = btrace_find_call_by_number (&end, btinfo, high);
1196 if (found == 0)
0688d04e
MM
1197 {
1198 /* Silently truncate the range. */
1199 btrace_call_end (&end, btinfo);
1200 }
1201 else
1202 {
1203 /* We want both begin and end to be inclusive. */
1204 btrace_call_next (&end, 1);
1205 }
afedecd3 1206
8710b709 1207 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1208 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1209}
1210
1211/* The to_call_history_from method of target record-btrace. */
1212
1213static void
ec0aea04 1214record_btrace_call_history_from (struct target_ops *self,
8d297bbf
PA
1215 ULONGEST from, int size,
1216 int int_flags)
afedecd3
MM
1217{
1218 ULONGEST begin, end, context;
8d297bbf 1219 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1220
1221 context = abs (size);
0688d04e
MM
1222 if (context == 0)
1223 error (_("Bad record function-call-history-size."));
afedecd3
MM
1224
1225 if (size < 0)
1226 {
1227 end = from;
1228
1229 if (from < context)
1230 begin = 0;
1231 else
0688d04e 1232 begin = from - context + 1;
afedecd3
MM
1233 }
1234 else
1235 {
1236 begin = from;
0688d04e 1237 end = from + context - 1;
afedecd3
MM
1238
1239 /* Check for wrap-around. */
1240 if (end < begin)
1241 end = ULONGEST_MAX;
1242 }
1243
f0d960ea 1244 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
1245}
1246
b158a20f
TW
1247/* The to_record_method method of target record-btrace. */
1248
1249static enum record_method
1250record_btrace_record_method (struct target_ops *self, ptid_t ptid)
1251{
1252 const struct btrace_config *config;
1253 struct thread_info * const tp = find_thread_ptid (ptid);
1254
1255 if (tp == NULL)
1256 error (_("No thread."));
1257
1258 if (tp->btrace.target == NULL)
1259 return RECORD_METHOD_NONE;
1260
1261 return RECORD_METHOD_BTRACE;
1262}
1263
07bbe694
MM
1264/* The to_record_is_replaying method of target record-btrace. */
1265
1266static int
a52eab48 1267record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
07bbe694
MM
1268{
1269 struct thread_info *tp;
1270
034f788c 1271 ALL_NON_EXITED_THREADS (tp)
a52eab48 1272 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
07bbe694
MM
1273 return 1;
1274
1275 return 0;
1276}
1277
7ff27e9b
MM
1278/* The to_record_will_replay method of target record-btrace. */
1279
1280static int
1281record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1282{
1283 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1284}
1285
633785ff
MM
1286/* The to_xfer_partial method of target record-btrace. */
1287
9b409511 1288static enum target_xfer_status
633785ff
MM
1289record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1290 const char *annex, gdb_byte *readbuf,
1291 const gdb_byte *writebuf, ULONGEST offset,
9b409511 1292 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
1293{
1294 struct target_ops *t;
1295
1296 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1297 if (replay_memory_access == replay_memory_access_read_only
aef92902 1298 && !record_btrace_generating_corefile
4d10e986 1299 && record_btrace_is_replaying (ops, inferior_ptid))
633785ff
MM
1300 {
1301 switch (object)
1302 {
1303 case TARGET_OBJECT_MEMORY:
1304 {
1305 struct target_section *section;
1306
1307 /* We do not allow writing memory in general. */
1308 if (writebuf != NULL)
9b409511
YQ
1309 {
1310 *xfered_len = len;
bc113b4e 1311 return TARGET_XFER_UNAVAILABLE;
9b409511 1312 }
633785ff
MM
1313
1314 /* We allow reading readonly memory. */
1315 section = target_section_by_addr (ops, offset);
1316 if (section != NULL)
1317 {
1318 /* Check if the section we found is readonly. */
1319 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1320 section->the_bfd_section)
1321 & SEC_READONLY) != 0)
1322 {
1323 /* Truncate the request to fit into this section. */
325fac50 1324 len = std::min (len, section->endaddr - offset);
633785ff
MM
1325 break;
1326 }
1327 }
1328
9b409511 1329 *xfered_len = len;
bc113b4e 1330 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1331 }
1332 }
1333 }
1334
1335 /* Forward the request. */
e75fdfca
TT
1336 ops = ops->beneath;
1337 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1338 offset, len, xfered_len);
633785ff
MM
1339}
1340
1341/* The to_insert_breakpoint method of target record-btrace. */
1342
1343static int
1344record_btrace_insert_breakpoint (struct target_ops *ops,
1345 struct gdbarch *gdbarch,
1346 struct bp_target_info *bp_tgt)
1347{
67b5c0c1
MM
1348 const char *old;
1349 int ret;
633785ff
MM
1350
1351 /* Inserting breakpoints requires accessing memory. Allow it for the
1352 duration of this function. */
67b5c0c1
MM
1353 old = replay_memory_access;
1354 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1355
1356 ret = 0;
492d29ea
PA
1357 TRY
1358 {
1359 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1360 }
492d29ea
PA
1361 CATCH (except, RETURN_MASK_ALL)
1362 {
6c63c96a 1363 replay_memory_access = old;
492d29ea
PA
1364 throw_exception (except);
1365 }
1366 END_CATCH
6c63c96a 1367 replay_memory_access = old;
633785ff
MM
1368
1369 return ret;
1370}
1371
1372/* The to_remove_breakpoint method of target record-btrace. */
1373
1374static int
1375record_btrace_remove_breakpoint (struct target_ops *ops,
1376 struct gdbarch *gdbarch,
73971819
PA
1377 struct bp_target_info *bp_tgt,
1378 enum remove_bp_reason reason)
633785ff 1379{
67b5c0c1
MM
1380 const char *old;
1381 int ret;
633785ff
MM
1382
1383 /* Removing breakpoints requires accessing memory. Allow it for the
1384 duration of this function. */
67b5c0c1
MM
1385 old = replay_memory_access;
1386 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1387
1388 ret = 0;
492d29ea
PA
1389 TRY
1390 {
73971819
PA
1391 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1392 reason);
492d29ea 1393 }
492d29ea
PA
1394 CATCH (except, RETURN_MASK_ALL)
1395 {
6c63c96a 1396 replay_memory_access = old;
492d29ea
PA
1397 throw_exception (except);
1398 }
1399 END_CATCH
6c63c96a 1400 replay_memory_access = old;
633785ff
MM
1401
1402 return ret;
1403}
1404
1f3ef581
MM
1405/* The to_fetch_registers method of target record-btrace. */
1406
1407static void
1408record_btrace_fetch_registers (struct target_ops *ops,
1409 struct regcache *regcache, int regno)
1410{
1411 struct btrace_insn_iterator *replay;
1412 struct thread_info *tp;
1413
bcc0c096 1414 tp = find_thread_ptid (regcache_get_ptid (regcache));
1f3ef581
MM
1415 gdb_assert (tp != NULL);
1416
1417 replay = tp->btrace.replay;
aef92902 1418 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1419 {
1420 const struct btrace_insn *insn;
1421 struct gdbarch *gdbarch;
1422 int pcreg;
1423
1424 gdbarch = get_regcache_arch (regcache);
1425 pcreg = gdbarch_pc_regnum (gdbarch);
1426 if (pcreg < 0)
1427 return;
1428
1429 /* We can only provide the PC register. */
1430 if (regno >= 0 && regno != pcreg)
1431 return;
1432
1433 insn = btrace_insn_get (replay);
1434 gdb_assert (insn != NULL);
1435
1436 regcache_raw_supply (regcache, regno, &insn->pc);
1437 }
1438 else
1439 {
e75fdfca 1440 struct target_ops *t = ops->beneath;
1f3ef581 1441
e75fdfca 1442 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
1443 }
1444}
1445
1446/* The to_store_registers method of target record-btrace. */
1447
1448static void
1449record_btrace_store_registers (struct target_ops *ops,
1450 struct regcache *regcache, int regno)
1451{
1452 struct target_ops *t;
1453
a52eab48 1454 if (!record_btrace_generating_corefile
bcc0c096 1455 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
4d10e986 1456 error (_("Cannot write registers while replaying."));
1f3ef581
MM
1457
1458 gdb_assert (may_write_registers != 0);
1459
e75fdfca
TT
1460 t = ops->beneath;
1461 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1462}
1463
1464/* The to_prepare_to_store method of target record-btrace. */
1465
1466static void
1467record_btrace_prepare_to_store (struct target_ops *ops,
1468 struct regcache *regcache)
1469{
1470 struct target_ops *t;
1471
a52eab48 1472 if (!record_btrace_generating_corefile
bcc0c096 1473 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1f3ef581
MM
1474 return;
1475
e75fdfca
TT
1476 t = ops->beneath;
1477 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1478}
1479
0b722aec
MM
1480/* The branch trace frame cache. */
1481
1482struct btrace_frame_cache
1483{
1484 /* The thread. */
1485 struct thread_info *tp;
1486
1487 /* The frame info. */
1488 struct frame_info *frame;
1489
1490 /* The branch trace function segment. */
1491 const struct btrace_function *bfun;
1492};
1493
1494/* A struct btrace_frame_cache hash table indexed by NEXT. */
1495
1496static htab_t bfcache;
1497
1498/* hash_f for htab_create_alloc of bfcache. */
1499
1500static hashval_t
1501bfcache_hash (const void *arg)
1502{
19ba03f4
SM
1503 const struct btrace_frame_cache *cache
1504 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1505
1506 return htab_hash_pointer (cache->frame);
1507}
1508
1509/* eq_f for htab_create_alloc of bfcache. */
1510
1511static int
1512bfcache_eq (const void *arg1, const void *arg2)
1513{
19ba03f4
SM
1514 const struct btrace_frame_cache *cache1
1515 = (const struct btrace_frame_cache *) arg1;
1516 const struct btrace_frame_cache *cache2
1517 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1518
1519 return cache1->frame == cache2->frame;
1520}
1521
1522/* Create a new btrace frame cache. */
1523
1524static struct btrace_frame_cache *
1525bfcache_new (struct frame_info *frame)
1526{
1527 struct btrace_frame_cache *cache;
1528 void **slot;
1529
1530 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1531 cache->frame = frame;
1532
1533 slot = htab_find_slot (bfcache, cache, INSERT);
1534 gdb_assert (*slot == NULL);
1535 *slot = cache;
1536
1537 return cache;
1538}
1539
1540/* Extract the branch trace function from a branch trace frame. */
1541
1542static const struct btrace_function *
1543btrace_get_frame_function (struct frame_info *frame)
1544{
1545 const struct btrace_frame_cache *cache;
1546 const struct btrace_function *bfun;
1547 struct btrace_frame_cache pattern;
1548 void **slot;
1549
1550 pattern.frame = frame;
1551
1552 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1553 if (slot == NULL)
1554 return NULL;
1555
19ba03f4 1556 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1557 return cache->bfun;
1558}
1559
cecac1ab
MM
1560/* Implement stop_reason method for record_btrace_frame_unwind. */
1561
1562static enum unwind_stop_reason
1563record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1564 void **this_cache)
1565{
0b722aec
MM
1566 const struct btrace_frame_cache *cache;
1567 const struct btrace_function *bfun;
1568
19ba03f4 1569 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1570 bfun = cache->bfun;
1571 gdb_assert (bfun != NULL);
1572
42bfe59e 1573 if (bfun->up == 0)
0b722aec
MM
1574 return UNWIND_UNAVAILABLE;
1575
1576 return UNWIND_NO_REASON;
cecac1ab
MM
1577}
1578
1579/* Implement this_id method for record_btrace_frame_unwind. */
1580
1581static void
1582record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1583 struct frame_id *this_id)
1584{
0b722aec
MM
1585 const struct btrace_frame_cache *cache;
1586 const struct btrace_function *bfun;
4aeb0dfc 1587 struct btrace_call_iterator it;
0b722aec
MM
1588 CORE_ADDR code, special;
1589
19ba03f4 1590 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1591
1592 bfun = cache->bfun;
1593 gdb_assert (bfun != NULL);
1594
4aeb0dfc
TW
1595 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1596 bfun = btrace_call_get (&it);
0b722aec
MM
1597
1598 code = get_frame_func (this_frame);
1599 special = bfun->number;
1600
1601 *this_id = frame_id_build_unavailable_stack_special (code, special);
1602
1603 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1604 btrace_get_bfun_name (cache->bfun),
1605 core_addr_to_string_nz (this_id->code_addr),
1606 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1607}
1608
1609/* Implement prev_register method for record_btrace_frame_unwind. */
1610
1611static struct value *
1612record_btrace_frame_prev_register (struct frame_info *this_frame,
1613 void **this_cache,
1614 int regnum)
1615{
0b722aec
MM
1616 const struct btrace_frame_cache *cache;
1617 const struct btrace_function *bfun, *caller;
1618 const struct btrace_insn *insn;
42bfe59e 1619 struct btrace_call_iterator it;
0b722aec
MM
1620 struct gdbarch *gdbarch;
1621 CORE_ADDR pc;
1622 int pcreg;
1623
1624 gdbarch = get_frame_arch (this_frame);
1625 pcreg = gdbarch_pc_regnum (gdbarch);
1626 if (pcreg < 0 || regnum != pcreg)
1627 throw_error (NOT_AVAILABLE_ERROR,
1628 _("Registers are not available in btrace record history"));
1629
19ba03f4 1630 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1631 bfun = cache->bfun;
1632 gdb_assert (bfun != NULL);
1633
42bfe59e 1634 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
0b722aec
MM
1635 throw_error (NOT_AVAILABLE_ERROR,
1636 _("No caller in btrace record history"));
1637
42bfe59e
TW
1638 caller = btrace_call_get (&it);
1639
0b722aec
MM
1640 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1641 {
1642 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1643 pc = insn->pc;
1644 }
1645 else
1646 {
1647 insn = VEC_last (btrace_insn_s, caller->insn);
1648 pc = insn->pc;
1649
1650 pc += gdb_insn_length (gdbarch, pc);
1651 }
1652
1653 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1654 btrace_get_bfun_name (bfun), bfun->level,
1655 core_addr_to_string_nz (pc));
1656
1657 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1658}
1659
1660/* Implement sniffer method for record_btrace_frame_unwind. */
1661
1662static int
1663record_btrace_frame_sniffer (const struct frame_unwind *self,
1664 struct frame_info *this_frame,
1665 void **this_cache)
1666{
0b722aec
MM
1667 const struct btrace_function *bfun;
1668 struct btrace_frame_cache *cache;
cecac1ab 1669 struct thread_info *tp;
0b722aec 1670 struct frame_info *next;
cecac1ab
MM
1671
1672 /* THIS_FRAME does not contain a reference to its thread. */
1673 tp = find_thread_ptid (inferior_ptid);
1674 gdb_assert (tp != NULL);
1675
0b722aec
MM
1676 bfun = NULL;
1677 next = get_next_frame (this_frame);
1678 if (next == NULL)
1679 {
1680 const struct btrace_insn_iterator *replay;
1681
1682 replay = tp->btrace.replay;
1683 if (replay != NULL)
08c3f6d2 1684 bfun = &replay->btinfo->functions[replay->call_index];
0b722aec
MM
1685 }
1686 else
1687 {
1688 const struct btrace_function *callee;
42bfe59e 1689 struct btrace_call_iterator it;
0b722aec
MM
1690
1691 callee = btrace_get_frame_function (next);
42bfe59e
TW
1692 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1693 return 0;
1694
1695 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1696 return 0;
1697
1698 bfun = btrace_call_get (&it);
0b722aec
MM
1699 }
1700
1701 if (bfun == NULL)
1702 return 0;
1703
1704 DEBUG ("[frame] sniffed frame for %s on level %d",
1705 btrace_get_bfun_name (bfun), bfun->level);
1706
1707 /* This is our frame. Initialize the frame cache. */
1708 cache = bfcache_new (this_frame);
1709 cache->tp = tp;
1710 cache->bfun = bfun;
1711
1712 *this_cache = cache;
1713 return 1;
1714}
1715
1716/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1717
1718static int
1719record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1720 struct frame_info *this_frame,
1721 void **this_cache)
1722{
1723 const struct btrace_function *bfun, *callee;
1724 struct btrace_frame_cache *cache;
42bfe59e 1725 struct btrace_call_iterator it;
0b722aec 1726 struct frame_info *next;
42bfe59e 1727 struct thread_info *tinfo;
0b722aec
MM
1728
1729 next = get_next_frame (this_frame);
1730 if (next == NULL)
1731 return 0;
1732
1733 callee = btrace_get_frame_function (next);
1734 if (callee == NULL)
1735 return 0;
1736
1737 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1738 return 0;
1739
42bfe59e
TW
1740 tinfo = find_thread_ptid (inferior_ptid);
1741 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
0b722aec
MM
1742 return 0;
1743
42bfe59e
TW
1744 bfun = btrace_call_get (&it);
1745
0b722aec
MM
1746 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1747 btrace_get_bfun_name (bfun), bfun->level);
1748
1749 /* This is our frame. Initialize the frame cache. */
1750 cache = bfcache_new (this_frame);
42bfe59e 1751 cache->tp = tinfo;
0b722aec
MM
1752 cache->bfun = bfun;
1753
1754 *this_cache = cache;
1755 return 1;
1756}
1757
1758static void
1759record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1760{
1761 struct btrace_frame_cache *cache;
1762 void **slot;
1763
19ba03f4 1764 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1765
1766 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1767 gdb_assert (slot != NULL);
1768
1769 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1770}
1771
1772/* btrace recording does not store previous memory content, neither the stack
1773 frames content. Any unwinding would return errorneous results as the stack
1774 contents no longer matches the changed PC value restored from history.
1775 Therefore this unwinder reports any possibly unwound registers as
1776 <unavailable>. */
1777
0b722aec 1778const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1779{
1780 NORMAL_FRAME,
1781 record_btrace_frame_unwind_stop_reason,
1782 record_btrace_frame_this_id,
1783 record_btrace_frame_prev_register,
1784 NULL,
0b722aec
MM
1785 record_btrace_frame_sniffer,
1786 record_btrace_frame_dealloc_cache
1787};
1788
1789const struct frame_unwind record_btrace_tailcall_frame_unwind =
1790{
1791 TAILCALL_FRAME,
1792 record_btrace_frame_unwind_stop_reason,
1793 record_btrace_frame_this_id,
1794 record_btrace_frame_prev_register,
1795 NULL,
1796 record_btrace_tailcall_frame_sniffer,
1797 record_btrace_frame_dealloc_cache
cecac1ab 1798};
b2f4cfde 1799
ac01945b
TT
1800/* Implement the to_get_unwinder method. */
1801
1802static const struct frame_unwind *
1803record_btrace_to_get_unwinder (struct target_ops *self)
1804{
1805 return &record_btrace_frame_unwind;
1806}
1807
1808/* Implement the to_get_tailcall_unwinder method. */
1809
1810static const struct frame_unwind *
1811record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1812{
1813 return &record_btrace_tailcall_frame_unwind;
1814}
1815
987e68b1
MM
1816/* Return a human-readable string for FLAG. */
1817
1818static const char *
1819btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1820{
1821 switch (flag)
1822 {
1823 case BTHR_STEP:
1824 return "step";
1825
1826 case BTHR_RSTEP:
1827 return "reverse-step";
1828
1829 case BTHR_CONT:
1830 return "cont";
1831
1832 case BTHR_RCONT:
1833 return "reverse-cont";
1834
1835 case BTHR_STOP:
1836 return "stop";
1837 }
1838
1839 return "<invalid>";
1840}
1841
52834460
MM
1842/* Indicate that TP should be resumed according to FLAG. */
1843
1844static void
1845record_btrace_resume_thread (struct thread_info *tp,
1846 enum btrace_thread_flag flag)
1847{
1848 struct btrace_thread_info *btinfo;
1849
43792cf0 1850 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1 1851 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
52834460
MM
1852
1853 btinfo = &tp->btrace;
1854
52834460
MM
1855 /* Fetch the latest branch trace. */
1856 btrace_fetch (tp);
1857
0ca912df
MM
1858 /* A resume request overwrites a preceding resume or stop request. */
1859 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1860 btinfo->flags |= flag;
1861}
1862
ec71cc2f
MM
1863/* Get the current frame for TP. */
1864
1865static struct frame_info *
1866get_thread_current_frame (struct thread_info *tp)
1867{
1868 struct frame_info *frame;
1869 ptid_t old_inferior_ptid;
1870 int executing;
1871
1872 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1873 old_inferior_ptid = inferior_ptid;
1874 inferior_ptid = tp->ptid;
1875
1876 /* Clear the executing flag to allow changes to the current frame.
1877 We are not actually running, yet. We just started a reverse execution
1878 command or a record goto command.
1879 For the latter, EXECUTING is false and this has no effect.
1880 For the former, EXECUTING is true and we're in to_wait, about to
1881 move the thread. Since we need to recompute the stack, we temporarily
1882 set EXECUTING to flase. */
1883 executing = is_executing (inferior_ptid);
1884 set_executing (inferior_ptid, 0);
1885
1886 frame = NULL;
1887 TRY
1888 {
1889 frame = get_current_frame ();
1890 }
1891 CATCH (except, RETURN_MASK_ALL)
1892 {
1893 /* Restore the previous execution state. */
1894 set_executing (inferior_ptid, executing);
1895
1896 /* Restore the previous inferior_ptid. */
1897 inferior_ptid = old_inferior_ptid;
1898
1899 throw_exception (except);
1900 }
1901 END_CATCH
1902
1903 /* Restore the previous execution state. */
1904 set_executing (inferior_ptid, executing);
1905
1906 /* Restore the previous inferior_ptid. */
1907 inferior_ptid = old_inferior_ptid;
1908
1909 return frame;
1910}
1911
52834460
MM
1912/* Start replaying a thread. */
1913
1914static struct btrace_insn_iterator *
1915record_btrace_start_replaying (struct thread_info *tp)
1916{
52834460
MM
1917 struct btrace_insn_iterator *replay;
1918 struct btrace_thread_info *btinfo;
52834460
MM
1919
1920 btinfo = &tp->btrace;
1921 replay = NULL;
1922
1923 /* We can't start replaying without trace. */
b54b03bd 1924 if (btinfo->functions.empty ())
52834460
MM
1925 return NULL;
1926
52834460
MM
1927 /* GDB stores the current frame_id when stepping in order to detects steps
1928 into subroutines.
1929 Since frames are computed differently when we're replaying, we need to
1930 recompute those stored frames and fix them up so we can still detect
1931 subroutines after we started replaying. */
492d29ea 1932 TRY
52834460
MM
1933 {
1934 struct frame_info *frame;
1935 struct frame_id frame_id;
1936 int upd_step_frame_id, upd_step_stack_frame_id;
1937
1938 /* The current frame without replaying - computed via normal unwind. */
ec71cc2f 1939 frame = get_thread_current_frame (tp);
52834460
MM
1940 frame_id = get_frame_id (frame);
1941
1942 /* Check if we need to update any stepping-related frame id's. */
1943 upd_step_frame_id = frame_id_eq (frame_id,
1944 tp->control.step_frame_id);
1945 upd_step_stack_frame_id = frame_id_eq (frame_id,
1946 tp->control.step_stack_frame_id);
1947
1948 /* We start replaying at the end of the branch trace. This corresponds
1949 to the current instruction. */
8d749320 1950 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
1951 btrace_insn_end (replay, btinfo);
1952
31fd9caa
MM
1953 /* Skip gaps at the end of the trace. */
1954 while (btrace_insn_get (replay) == NULL)
1955 {
1956 unsigned int steps;
1957
1958 steps = btrace_insn_prev (replay, 1);
1959 if (steps == 0)
1960 error (_("No trace."));
1961 }
1962
52834460
MM
1963 /* We're not replaying, yet. */
1964 gdb_assert (btinfo->replay == NULL);
1965 btinfo->replay = replay;
1966
1967 /* Make sure we're not using any stale registers. */
1968 registers_changed_ptid (tp->ptid);
1969
1970 /* The current frame with replaying - computed via btrace unwind. */
ec71cc2f 1971 frame = get_thread_current_frame (tp);
52834460
MM
1972 frame_id = get_frame_id (frame);
1973
1974 /* Replace stepping related frames where necessary. */
1975 if (upd_step_frame_id)
1976 tp->control.step_frame_id = frame_id;
1977 if (upd_step_stack_frame_id)
1978 tp->control.step_stack_frame_id = frame_id;
1979 }
492d29ea 1980 CATCH (except, RETURN_MASK_ALL)
52834460
MM
1981 {
1982 xfree (btinfo->replay);
1983 btinfo->replay = NULL;
1984
1985 registers_changed_ptid (tp->ptid);
1986
1987 throw_exception (except);
1988 }
492d29ea 1989 END_CATCH
52834460
MM
1990
1991 return replay;
1992}
1993
1994/* Stop replaying a thread. */
1995
1996static void
1997record_btrace_stop_replaying (struct thread_info *tp)
1998{
1999 struct btrace_thread_info *btinfo;
2000
2001 btinfo = &tp->btrace;
2002
2003 xfree (btinfo->replay);
2004 btinfo->replay = NULL;
2005
2006 /* Make sure we're not leaving any stale registers. */
2007 registers_changed_ptid (tp->ptid);
2008}
2009
e3cfc1c7
MM
2010/* Stop replaying TP if it is at the end of its execution history. */
2011
2012static void
2013record_btrace_stop_replaying_at_end (struct thread_info *tp)
2014{
2015 struct btrace_insn_iterator *replay, end;
2016 struct btrace_thread_info *btinfo;
2017
2018 btinfo = &tp->btrace;
2019 replay = btinfo->replay;
2020
2021 if (replay == NULL)
2022 return;
2023
2024 btrace_insn_end (&end, btinfo);
2025
2026 if (btrace_insn_cmp (replay, &end) == 0)
2027 record_btrace_stop_replaying (tp);
2028}
2029
b2f4cfde
MM
2030/* The to_resume method of target record-btrace. */
2031
2032static void
2033record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2034 enum gdb_signal signal)
2035{
0ca912df 2036 struct thread_info *tp;
d2939ba2 2037 enum btrace_thread_flag flag, cflag;
52834460 2038
987e68b1
MM
2039 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2040 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2041 step ? "step" : "cont");
52834460 2042
0ca912df
MM
2043 /* Store the execution direction of the last resume.
2044
2045 If there is more than one to_resume call, we have to rely on infrun
2046 to not change the execution direction in-between. */
70ad5bff
MM
2047 record_btrace_resume_exec_dir = execution_direction;
2048
0ca912df 2049 /* As long as we're not replaying, just forward the request.
52834460 2050
0ca912df
MM
2051 For non-stop targets this means that no thread is replaying. In order to
2052 make progress, we may need to explicitly move replaying threads to the end
2053 of their execution history. */
a52eab48
MM
2054 if ((execution_direction != EXEC_REVERSE)
2055 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2056 {
e75fdfca 2057 ops = ops->beneath;
04c4fe8c
MM
2058 ops->to_resume (ops, ptid, step, signal);
2059 return;
b2f4cfde
MM
2060 }
2061
52834460 2062 /* Compute the btrace thread flag for the requested move. */
d2939ba2
MM
2063 if (execution_direction == EXEC_REVERSE)
2064 {
2065 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2066 cflag = BTHR_RCONT;
2067 }
52834460 2068 else
d2939ba2
MM
2069 {
2070 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2071 cflag = BTHR_CONT;
2072 }
52834460 2073
52834460 2074 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2075 record_btrace_wait below.
2076
2077 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2078 if (!target_is_non_stop_p ())
2079 {
2080 gdb_assert (ptid_match (inferior_ptid, ptid));
2081
2082 ALL_NON_EXITED_THREADS (tp)
2083 if (ptid_match (tp->ptid, ptid))
2084 {
2085 if (ptid_match (tp->ptid, inferior_ptid))
2086 record_btrace_resume_thread (tp, flag);
2087 else
2088 record_btrace_resume_thread (tp, cflag);
2089 }
2090 }
2091 else
2092 {
2093 ALL_NON_EXITED_THREADS (tp)
2094 if (ptid_match (tp->ptid, ptid))
2095 record_btrace_resume_thread (tp, flag);
2096 }
70ad5bff
MM
2097
2098 /* Async support. */
2099 if (target_can_async_p ())
2100 {
6a3753b3 2101 target_async (1);
70ad5bff
MM
2102 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2103 }
52834460
MM
2104}
2105
85ad3aaf
PA
2106/* The to_commit_resume method of target record-btrace. */
2107
2108static void
2109record_btrace_commit_resume (struct target_ops *ops)
2110{
2111 if ((execution_direction != EXEC_REVERSE)
2112 && !record_btrace_is_replaying (ops, minus_one_ptid))
2113 ops->beneath->to_commit_resume (ops->beneath);
2114}
2115
987e68b1
MM
2116/* Cancel resuming TP. */
2117
2118static void
2119record_btrace_cancel_resume (struct thread_info *tp)
2120{
2121 enum btrace_thread_flag flags;
2122
2123 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2124 if (flags == 0)
2125 return;
2126
43792cf0
PA
2127 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2128 print_thread_id (tp),
987e68b1
MM
2129 target_pid_to_str (tp->ptid), flags,
2130 btrace_thread_flag_to_str (flags));
2131
2132 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2133 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2134}
2135
2136/* Return a target_waitstatus indicating that we ran out of history. */
2137
2138static struct target_waitstatus
2139btrace_step_no_history (void)
2140{
2141 struct target_waitstatus status;
2142
2143 status.kind = TARGET_WAITKIND_NO_HISTORY;
2144
2145 return status;
2146}
2147
2148/* Return a target_waitstatus indicating that a step finished. */
2149
2150static struct target_waitstatus
2151btrace_step_stopped (void)
2152{
2153 struct target_waitstatus status;
2154
2155 status.kind = TARGET_WAITKIND_STOPPED;
2156 status.value.sig = GDB_SIGNAL_TRAP;
2157
2158 return status;
2159}
2160
6e4879f0
MM
2161/* Return a target_waitstatus indicating that a thread was stopped as
2162 requested. */
2163
2164static struct target_waitstatus
2165btrace_step_stopped_on_request (void)
2166{
2167 struct target_waitstatus status;
2168
2169 status.kind = TARGET_WAITKIND_STOPPED;
2170 status.value.sig = GDB_SIGNAL_0;
2171
2172 return status;
2173}
2174
d825d248
MM
2175/* Return a target_waitstatus indicating a spurious stop. */
2176
2177static struct target_waitstatus
2178btrace_step_spurious (void)
2179{
2180 struct target_waitstatus status;
2181
2182 status.kind = TARGET_WAITKIND_SPURIOUS;
2183
2184 return status;
2185}
2186
e3cfc1c7
MM
2187/* Return a target_waitstatus indicating that the thread was not resumed. */
2188
2189static struct target_waitstatus
2190btrace_step_no_resumed (void)
2191{
2192 struct target_waitstatus status;
2193
2194 status.kind = TARGET_WAITKIND_NO_RESUMED;
2195
2196 return status;
2197}
2198
2199/* Return a target_waitstatus indicating that we should wait again. */
2200
2201static struct target_waitstatus
2202btrace_step_again (void)
2203{
2204 struct target_waitstatus status;
2205
2206 status.kind = TARGET_WAITKIND_IGNORE;
2207
2208 return status;
2209}
2210
52834460
MM
2211/* Clear the record histories. */
2212
2213static void
2214record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2215{
2216 xfree (btinfo->insn_history);
2217 xfree (btinfo->call_history);
2218
2219 btinfo->insn_history = NULL;
2220 btinfo->call_history = NULL;
2221}
2222
3c615f99
MM
2223/* Check whether TP's current replay position is at a breakpoint. */
2224
2225static int
2226record_btrace_replay_at_breakpoint (struct thread_info *tp)
2227{
2228 struct btrace_insn_iterator *replay;
2229 struct btrace_thread_info *btinfo;
2230 const struct btrace_insn *insn;
2231 struct inferior *inf;
2232
2233 btinfo = &tp->btrace;
2234 replay = btinfo->replay;
2235
2236 if (replay == NULL)
2237 return 0;
2238
2239 insn = btrace_insn_get (replay);
2240 if (insn == NULL)
2241 return 0;
2242
2243 inf = find_inferior_ptid (tp->ptid);
2244 if (inf == NULL)
2245 return 0;
2246
2247 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2248 &btinfo->stop_reason);
2249}
2250
d825d248 2251/* Step one instruction in forward direction. */
52834460
MM
2252
2253static struct target_waitstatus
d825d248 2254record_btrace_single_step_forward (struct thread_info *tp)
52834460 2255{
b61ce85c 2256 struct btrace_insn_iterator *replay, end, start;
52834460 2257 struct btrace_thread_info *btinfo;
52834460 2258
d825d248
MM
2259 btinfo = &tp->btrace;
2260 replay = btinfo->replay;
2261
2262 /* We're done if we're not replaying. */
2263 if (replay == NULL)
2264 return btrace_step_no_history ();
2265
011c71b6
MM
2266 /* Check if we're stepping a breakpoint. */
2267 if (record_btrace_replay_at_breakpoint (tp))
2268 return btrace_step_stopped ();
2269
b61ce85c
MM
2270 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2271 jump back to the instruction at which we started. */
2272 start = *replay;
d825d248
MM
2273 do
2274 {
2275 unsigned int steps;
2276
e3cfc1c7
MM
2277 /* We will bail out here if we continue stepping after reaching the end
2278 of the execution history. */
d825d248
MM
2279 steps = btrace_insn_next (replay, 1);
2280 if (steps == 0)
b61ce85c
MM
2281 {
2282 *replay = start;
2283 return btrace_step_no_history ();
2284 }
d825d248
MM
2285 }
2286 while (btrace_insn_get (replay) == NULL);
2287
2288 /* Determine the end of the instruction trace. */
2289 btrace_insn_end (&end, btinfo);
2290
e3cfc1c7
MM
2291 /* The execution trace contains (and ends with) the current instruction.
2292 This instruction has not been executed, yet, so the trace really ends
2293 one instruction earlier. */
d825d248 2294 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2295 return btrace_step_no_history ();
d825d248
MM
2296
2297 return btrace_step_spurious ();
2298}
2299
2300/* Step one instruction in backward direction. */
2301
2302static struct target_waitstatus
2303record_btrace_single_step_backward (struct thread_info *tp)
2304{
b61ce85c 2305 struct btrace_insn_iterator *replay, start;
d825d248 2306 struct btrace_thread_info *btinfo;
e59fa00f 2307
52834460
MM
2308 btinfo = &tp->btrace;
2309 replay = btinfo->replay;
2310
d825d248
MM
2311 /* Start replaying if we're not already doing so. */
2312 if (replay == NULL)
2313 replay = record_btrace_start_replaying (tp);
2314
2315 /* If we can't step any further, we reached the end of the history.
b61ce85c
MM
2316 Skip gaps during replay. If we end up at a gap (at the beginning of
2317 the trace), jump back to the instruction at which we started. */
2318 start = *replay;
d825d248
MM
2319 do
2320 {
2321 unsigned int steps;
2322
2323 steps = btrace_insn_prev (replay, 1);
2324 if (steps == 0)
b61ce85c
MM
2325 {
2326 *replay = start;
2327 return btrace_step_no_history ();
2328 }
d825d248
MM
2329 }
2330 while (btrace_insn_get (replay) == NULL);
2331
011c71b6
MM
2332 /* Check if we're stepping a breakpoint.
2333
2334 For reverse-stepping, this check is after the step. There is logic in
2335 infrun.c that handles reverse-stepping separately. See, for example,
2336 proceed and adjust_pc_after_break.
2337
2338 This code assumes that for reverse-stepping, PC points to the last
2339 de-executed instruction, whereas for forward-stepping PC points to the
2340 next to-be-executed instruction. */
2341 if (record_btrace_replay_at_breakpoint (tp))
2342 return btrace_step_stopped ();
2343
d825d248
MM
2344 return btrace_step_spurious ();
2345}
2346
2347/* Step a single thread. */
2348
2349static struct target_waitstatus
2350record_btrace_step_thread (struct thread_info *tp)
2351{
2352 struct btrace_thread_info *btinfo;
2353 struct target_waitstatus status;
2354 enum btrace_thread_flag flags;
2355
2356 btinfo = &tp->btrace;
2357
6e4879f0
MM
2358 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2359 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2360
43792cf0 2361 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1
MM
2362 target_pid_to_str (tp->ptid), flags,
2363 btrace_thread_flag_to_str (flags));
52834460 2364
6e4879f0
MM
2365 /* We can't step without an execution history. */
2366 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2367 return btrace_step_no_history ();
2368
52834460
MM
2369 switch (flags)
2370 {
2371 default:
2372 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2373
6e4879f0
MM
2374 case BTHR_STOP:
2375 return btrace_step_stopped_on_request ();
2376
52834460 2377 case BTHR_STEP:
d825d248
MM
2378 status = record_btrace_single_step_forward (tp);
2379 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2380 break;
52834460
MM
2381
2382 return btrace_step_stopped ();
2383
2384 case BTHR_RSTEP:
d825d248
MM
2385 status = record_btrace_single_step_backward (tp);
2386 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2387 break;
52834460
MM
2388
2389 return btrace_step_stopped ();
2390
2391 case BTHR_CONT:
e3cfc1c7
MM
2392 status = record_btrace_single_step_forward (tp);
2393 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2394 break;
52834460 2395
e3cfc1c7
MM
2396 btinfo->flags |= flags;
2397 return btrace_step_again ();
52834460
MM
2398
2399 case BTHR_RCONT:
e3cfc1c7
MM
2400 status = record_btrace_single_step_backward (tp);
2401 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2402 break;
52834460 2403
e3cfc1c7
MM
2404 btinfo->flags |= flags;
2405 return btrace_step_again ();
2406 }
d825d248 2407
e3cfc1c7
MM
2408 /* We keep threads moving at the end of their execution history. The to_wait
2409 method will stop the thread for whom the event is reported. */
2410 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2411 btinfo->flags |= flags;
52834460 2412
e3cfc1c7 2413 return status;
b2f4cfde
MM
2414}
2415
e3cfc1c7
MM
2416/* A vector of threads. */
2417
2418typedef struct thread_info * tp_t;
2419DEF_VEC_P (tp_t);
2420
a6b5be76
MM
2421/* Announce further events if necessary. */
2422
2423static void
2424record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2425 const VEC (tp_t) *no_history)
2426{
2427 int more_moving, more_no_history;
2428
2429 more_moving = !VEC_empty (tp_t, moving);
2430 more_no_history = !VEC_empty (tp_t, no_history);
2431
2432 if (!more_moving && !more_no_history)
2433 return;
2434
2435 if (more_moving)
2436 DEBUG ("movers pending");
2437
2438 if (more_no_history)
2439 DEBUG ("no-history pending");
2440
2441 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2442}
2443
b2f4cfde
MM
2444/* The to_wait method of target record-btrace. */
2445
2446static ptid_t
2447record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2448 struct target_waitstatus *status, int options)
2449{
e3cfc1c7
MM
2450 VEC (tp_t) *moving, *no_history;
2451 struct thread_info *tp, *eventing;
2452 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
52834460
MM
2453
2454 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2455
b2f4cfde 2456 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2457 if ((execution_direction != EXEC_REVERSE)
2458 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2459 {
e75fdfca
TT
2460 ops = ops->beneath;
2461 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
2462 }
2463
e3cfc1c7
MM
2464 moving = NULL;
2465 no_history = NULL;
2466
2467 make_cleanup (VEC_cleanup (tp_t), &moving);
2468 make_cleanup (VEC_cleanup (tp_t), &no_history);
2469
2470 /* Keep a work list of moving threads. */
2471 ALL_NON_EXITED_THREADS (tp)
2472 if (ptid_match (tp->ptid, ptid)
2473 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2474 VEC_safe_push (tp_t, moving, tp);
2475
2476 if (VEC_empty (tp_t, moving))
52834460 2477 {
e3cfc1c7 2478 *status = btrace_step_no_resumed ();
52834460 2479
e3cfc1c7 2480 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
23fdd69e 2481 target_waitstatus_to_string (status).c_str ());
e3cfc1c7
MM
2482
2483 do_cleanups (cleanups);
2484 return null_ptid;
52834460
MM
2485 }
2486
e3cfc1c7
MM
2487 /* Step moving threads one by one, one step each, until either one thread
2488 reports an event or we run out of threads to step.
2489
2490 When stepping more than one thread, chances are that some threads reach
2491 the end of their execution history earlier than others. If we reported
2492 this immediately, all-stop on top of non-stop would stop all threads and
2493 resume the same threads next time. And we would report the same thread
2494 having reached the end of its execution history again.
2495
2496 In the worst case, this would starve the other threads. But even if other
2497 threads would be allowed to make progress, this would result in far too
2498 many intermediate stops.
2499
2500 We therefore delay the reporting of "no execution history" until we have
2501 nothing else to report. By this time, all threads should have moved to
2502 either the beginning or the end of their execution history. There will
2503 be a single user-visible stop. */
2504 eventing = NULL;
2505 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2506 {
2507 unsigned int ix;
2508
2509 ix = 0;
2510 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2511 {
2512 *status = record_btrace_step_thread (tp);
2513
2514 switch (status->kind)
2515 {
2516 case TARGET_WAITKIND_IGNORE:
2517 ix++;
2518 break;
2519
2520 case TARGET_WAITKIND_NO_HISTORY:
2521 VEC_safe_push (tp_t, no_history,
2522 VEC_ordered_remove (tp_t, moving, ix));
2523 break;
2524
2525 default:
2526 eventing = VEC_unordered_remove (tp_t, moving, ix);
2527 break;
2528 }
2529 }
2530 }
2531
2532 if (eventing == NULL)
2533 {
2534 /* We started with at least one moving thread. This thread must have
2535 either stopped or reached the end of its execution history.
2536
2537 In the former case, EVENTING must not be NULL.
2538 In the latter case, NO_HISTORY must not be empty. */
2539 gdb_assert (!VEC_empty (tp_t, no_history));
2540
2541 /* We kept threads moving at the end of their execution history. Stop
2542 EVENTING now that we are going to report its stop. */
2543 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2544 eventing->btrace.flags &= ~BTHR_MOVE;
2545
2546 *status = btrace_step_no_history ();
2547 }
2548
2549 gdb_assert (eventing != NULL);
2550
2551 /* We kept threads replaying at the end of their execution history. Stop
2552 replaying EVENTING now that we are going to report its stop. */
2553 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2554
2555 /* Stop all other threads. */
5953356c 2556 if (!target_is_non_stop_p ())
e3cfc1c7
MM
2557 ALL_NON_EXITED_THREADS (tp)
2558 record_btrace_cancel_resume (tp);
52834460 2559
a6b5be76
MM
2560 /* In async mode, we need to announce further events. */
2561 if (target_is_async_p ())
2562 record_btrace_maybe_mark_async_event (moving, no_history);
2563
52834460 2564 /* Start record histories anew from the current position. */
e3cfc1c7 2565 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2566
2567 /* We moved the replay position but did not update registers. */
e3cfc1c7
MM
2568 registers_changed_ptid (eventing->ptid);
2569
43792cf0
PA
2570 DEBUG ("wait ended by thread %s (%s): %s",
2571 print_thread_id (eventing),
e3cfc1c7 2572 target_pid_to_str (eventing->ptid),
23fdd69e 2573 target_waitstatus_to_string (status).c_str ());
52834460 2574
e3cfc1c7
MM
2575 do_cleanups (cleanups);
2576 return eventing->ptid;
52834460
MM
2577}
2578
6e4879f0
MM
2579/* The to_stop method of target record-btrace. */
2580
2581static void
2582record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2583{
2584 DEBUG ("stop %s", target_pid_to_str (ptid));
2585
2586 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2587 if ((execution_direction != EXEC_REVERSE)
2588 && !record_btrace_is_replaying (ops, minus_one_ptid))
6e4879f0
MM
2589 {
2590 ops = ops->beneath;
2591 ops->to_stop (ops, ptid);
2592 }
2593 else
2594 {
2595 struct thread_info *tp;
2596
2597 ALL_NON_EXITED_THREADS (tp)
2598 if (ptid_match (tp->ptid, ptid))
2599 {
2600 tp->btrace.flags &= ~BTHR_MOVE;
2601 tp->btrace.flags |= BTHR_STOP;
2602 }
2603 }
2604 }
2605
52834460
MM
2606/* The to_can_execute_reverse method of target record-btrace. */
2607
2608static int
19db3e69 2609record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
2610{
2611 return 1;
2612}
2613
9e8915c6 2614/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2615
9e8915c6
PA
2616static int
2617record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
52834460 2618{
a52eab48 2619 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2620 {
2621 struct thread_info *tp = inferior_thread ();
2622
2623 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2624 }
2625
2626 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2627}
2628
2629/* The to_supports_stopped_by_sw_breakpoint method of target
2630 record-btrace. */
2631
2632static int
2633record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2634{
a52eab48 2635 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2636 return 1;
2637
2638 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2639}
2640
2641/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2642
2643static int
2644record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2645{
a52eab48 2646 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2647 {
2648 struct thread_info *tp = inferior_thread ();
2649
2650 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2651 }
2652
2653 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2654}
2655
2656/* The to_supports_stopped_by_hw_breakpoint method of target
2657 record-btrace. */
2658
2659static int
2660record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2661{
a52eab48 2662 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6 2663 return 1;
52834460 2664
9e8915c6 2665 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
b2f4cfde
MM
2666}
2667
e8032dde 2668/* The to_update_thread_list method of target record-btrace. */
e2887aa3
MM
2669
2670static void
e8032dde 2671record_btrace_update_thread_list (struct target_ops *ops)
e2887aa3 2672{
e8032dde 2673 /* We don't add or remove threads during replay. */
a52eab48 2674 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2675 return;
2676
2677 /* Forward the request. */
e75fdfca 2678 ops = ops->beneath;
e8032dde 2679 ops->to_update_thread_list (ops);
e2887aa3
MM
2680}
2681
2682/* The to_thread_alive method of target record-btrace. */
2683
2684static int
2685record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2686{
2687 /* We don't add or remove threads during replay. */
a52eab48 2688 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2689 return find_thread_ptid (ptid) != NULL;
2690
2691 /* Forward the request. */
e75fdfca
TT
2692 ops = ops->beneath;
2693 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
2694}
2695
066ce621
MM
2696/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2697 is stopped. */
2698
2699static void
2700record_btrace_set_replay (struct thread_info *tp,
2701 const struct btrace_insn_iterator *it)
2702{
2703 struct btrace_thread_info *btinfo;
2704
2705 btinfo = &tp->btrace;
2706
a0f1b963 2707 if (it == NULL)
52834460 2708 record_btrace_stop_replaying (tp);
066ce621
MM
2709 else
2710 {
2711 if (btinfo->replay == NULL)
52834460 2712 record_btrace_start_replaying (tp);
066ce621
MM
2713 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2714 return;
2715
2716 *btinfo->replay = *it;
52834460 2717 registers_changed_ptid (tp->ptid);
066ce621
MM
2718 }
2719
52834460
MM
2720 /* Start anew from the new replay position. */
2721 record_btrace_clear_histories (btinfo);
485668e5
MM
2722
2723 stop_pc = regcache_read_pc (get_current_regcache ());
2724 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2725}
2726
2727/* The to_goto_record_begin method of target record-btrace. */
2728
2729static void
08475817 2730record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
2731{
2732 struct thread_info *tp;
2733 struct btrace_insn_iterator begin;
2734
2735 tp = require_btrace_thread ();
2736
2737 btrace_insn_begin (&begin, &tp->btrace);
b61ce85c
MM
2738
2739 /* Skip gaps at the beginning of the trace. */
2740 while (btrace_insn_get (&begin) == NULL)
2741 {
2742 unsigned int steps;
2743
2744 steps = btrace_insn_next (&begin, 1);
2745 if (steps == 0)
2746 error (_("No trace."));
2747 }
2748
066ce621 2749 record_btrace_set_replay (tp, &begin);
066ce621
MM
2750}
2751
2752/* The to_goto_record_end method of target record-btrace. */
2753
2754static void
307a1b91 2755record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
2756{
2757 struct thread_info *tp;
2758
2759 tp = require_btrace_thread ();
2760
2761 record_btrace_set_replay (tp, NULL);
066ce621
MM
2762}
2763
2764/* The to_goto_record method of target record-btrace. */
2765
2766static void
606183ac 2767record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
2768{
2769 struct thread_info *tp;
2770 struct btrace_insn_iterator it;
2771 unsigned int number;
2772 int found;
2773
2774 number = insn;
2775
2776 /* Check for wrap-arounds. */
2777 if (number != insn)
2778 error (_("Instruction number out of range."));
2779
2780 tp = require_btrace_thread ();
2781
2782 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
69090cee
TW
2783
2784 /* Check if the instruction could not be found or is a gap. */
2785 if (found == 0 || btrace_insn_get (&it) == NULL)
066ce621
MM
2786 error (_("No such instruction."));
2787
2788 record_btrace_set_replay (tp, &it);
066ce621
MM
2789}
2790
797094dd
MM
2791/* The to_record_stop_replaying method of target record-btrace. */
2792
2793static void
2794record_btrace_stop_replaying_all (struct target_ops *self)
2795{
2796 struct thread_info *tp;
2797
2798 ALL_NON_EXITED_THREADS (tp)
2799 record_btrace_stop_replaying (tp);
2800}
2801
70ad5bff
MM
2802/* The to_execution_direction target method. */
2803
2804static enum exec_direction_kind
2805record_btrace_execution_direction (struct target_ops *self)
2806{
2807 return record_btrace_resume_exec_dir;
2808}
2809
aef92902
MM
2810/* The to_prepare_to_generate_core target method. */
2811
2812static void
2813record_btrace_prepare_to_generate_core (struct target_ops *self)
2814{
2815 record_btrace_generating_corefile = 1;
2816}
2817
2818/* The to_done_generating_core target method. */
2819
2820static void
2821record_btrace_done_generating_core (struct target_ops *self)
2822{
2823 record_btrace_generating_corefile = 0;
2824}
2825
afedecd3
MM
2826/* Initialize the record-btrace target ops. */
2827
2828static void
2829init_record_btrace_ops (void)
2830{
2831 struct target_ops *ops;
2832
2833 ops = &record_btrace_ops;
2834 ops->to_shortname = "record-btrace";
2835 ops->to_longname = "Branch tracing target";
2836 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2837 ops->to_open = record_btrace_open;
2838 ops->to_close = record_btrace_close;
b7d2e916 2839 ops->to_async = record_btrace_async;
afedecd3 2840 ops->to_detach = record_detach;
c0272db5 2841 ops->to_disconnect = record_btrace_disconnect;
afedecd3
MM
2842 ops->to_mourn_inferior = record_mourn_inferior;
2843 ops->to_kill = record_kill;
afedecd3
MM
2844 ops->to_stop_recording = record_btrace_stop_recording;
2845 ops->to_info_record = record_btrace_info;
2846 ops->to_insn_history = record_btrace_insn_history;
2847 ops->to_insn_history_from = record_btrace_insn_history_from;
2848 ops->to_insn_history_range = record_btrace_insn_history_range;
2849 ops->to_call_history = record_btrace_call_history;
2850 ops->to_call_history_from = record_btrace_call_history_from;
2851 ops->to_call_history_range = record_btrace_call_history_range;
b158a20f 2852 ops->to_record_method = record_btrace_record_method;
07bbe694 2853 ops->to_record_is_replaying = record_btrace_is_replaying;
7ff27e9b 2854 ops->to_record_will_replay = record_btrace_will_replay;
797094dd 2855 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
633785ff
MM
2856 ops->to_xfer_partial = record_btrace_xfer_partial;
2857 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2858 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
2859 ops->to_fetch_registers = record_btrace_fetch_registers;
2860 ops->to_store_registers = record_btrace_store_registers;
2861 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
2862 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2863 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde 2864 ops->to_resume = record_btrace_resume;
85ad3aaf 2865 ops->to_commit_resume = record_btrace_commit_resume;
b2f4cfde 2866 ops->to_wait = record_btrace_wait;
6e4879f0 2867 ops->to_stop = record_btrace_stop;
e8032dde 2868 ops->to_update_thread_list = record_btrace_update_thread_list;
e2887aa3 2869 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
2870 ops->to_goto_record_begin = record_btrace_goto_begin;
2871 ops->to_goto_record_end = record_btrace_goto_end;
2872 ops->to_goto_record = record_btrace_goto;
52834460 2873 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
9e8915c6
PA
2874 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2875 ops->to_supports_stopped_by_sw_breakpoint
2876 = record_btrace_supports_stopped_by_sw_breakpoint;
2877 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2878 ops->to_supports_stopped_by_hw_breakpoint
2879 = record_btrace_supports_stopped_by_hw_breakpoint;
70ad5bff 2880 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
2881 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2882 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
2883 ops->to_stratum = record_stratum;
2884 ops->to_magic = OPS_MAGIC;
2885}
2886
f4abbc16
MM
2887/* Start recording in BTS format. */
2888
2889static void
2890cmd_record_btrace_bts_start (char *args, int from_tty)
2891{
f4abbc16
MM
2892 if (args != NULL && *args != 0)
2893 error (_("Invalid argument."));
2894
2895 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2896
492d29ea
PA
2897 TRY
2898 {
9b2eba3d 2899 execute_command ((char *) "target record-btrace", from_tty);
492d29ea
PA
2900 }
2901 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2902 {
2903 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2904 throw_exception (exception);
2905 }
492d29ea 2906 END_CATCH
f4abbc16
MM
2907}
2908
bc504a31 2909/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2910
2911static void
b20a6524 2912cmd_record_btrace_pt_start (char *args, int from_tty)
afedecd3
MM
2913{
2914 if (args != NULL && *args != 0)
2915 error (_("Invalid argument."));
2916
b20a6524 2917 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2918
492d29ea
PA
2919 TRY
2920 {
9b2eba3d 2921 execute_command ((char *) "target record-btrace", from_tty);
492d29ea
PA
2922 }
2923 CATCH (exception, RETURN_MASK_ALL)
2924 {
2925 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2926 throw_exception (exception);
2927 }
2928 END_CATCH
afedecd3
MM
2929}
2930
b20a6524
MM
2931/* Alias for "target record". */
2932
2933static void
2934cmd_record_btrace_start (char *args, int from_tty)
2935{
2936 if (args != NULL && *args != 0)
2937 error (_("Invalid argument."));
2938
2939 record_btrace_conf.format = BTRACE_FORMAT_PT;
2940
2941 TRY
2942 {
9b2eba3d 2943 execute_command ((char *) "target record-btrace", from_tty);
b20a6524
MM
2944 }
2945 CATCH (exception, RETURN_MASK_ALL)
2946 {
2947 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2948
2949 TRY
2950 {
9b2eba3d 2951 execute_command ((char *) "target record-btrace", from_tty);
b20a6524
MM
2952 }
2953 CATCH (exception, RETURN_MASK_ALL)
2954 {
2955 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2956 throw_exception (exception);
2957 }
2958 END_CATCH
2959 }
2960 END_CATCH
2961}
2962
67b5c0c1
MM
2963/* The "set record btrace" command. */
2964
2965static void
2966cmd_set_record_btrace (char *args, int from_tty)
2967{
2968 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2969}
2970
2971/* The "show record btrace" command. */
2972
2973static void
2974cmd_show_record_btrace (char *args, int from_tty)
2975{
2976 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2977}
2978
2979/* The "show record btrace replay-memory-access" command. */
2980
2981static void
2982cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2983 struct cmd_list_element *c, const char *value)
2984{
2985 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2986 replay_memory_access);
2987}
2988
d33501a5
MM
2989/* The "set record btrace bts" command. */
2990
2991static void
2992cmd_set_record_btrace_bts (char *args, int from_tty)
2993{
2994 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 2995 "by an appropriate subcommand.\n"));
d33501a5
MM
2996 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2997 all_commands, gdb_stdout);
2998}
2999
3000/* The "show record btrace bts" command. */
3001
3002static void
3003cmd_show_record_btrace_bts (char *args, int from_tty)
3004{
3005 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3006}
3007
b20a6524
MM
3008/* The "set record btrace pt" command. */
3009
3010static void
3011cmd_set_record_btrace_pt (char *args, int from_tty)
3012{
3013 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3014 "by an appropriate subcommand.\n"));
3015 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3016 all_commands, gdb_stdout);
3017}
3018
3019/* The "show record btrace pt" command. */
3020
3021static void
3022cmd_show_record_btrace_pt (char *args, int from_tty)
3023{
3024 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3025}
3026
3027/* The "record bts buffer-size" show value function. */
3028
3029static void
3030show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3031 struct cmd_list_element *c,
3032 const char *value)
3033{
3034 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3035 value);
3036}
3037
3038/* The "record pt buffer-size" show value function. */
3039
3040static void
3041show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3042 struct cmd_list_element *c,
3043 const char *value)
3044{
3045 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3046 value);
3047}
3048
afedecd3
MM
3049void _initialize_record_btrace (void);
3050
3051/* Initialize btrace commands. */
3052
3053void
3054_initialize_record_btrace (void)
3055{
f4abbc16
MM
3056 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3057 _("Start branch trace recording."), &record_btrace_cmdlist,
3058 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3059 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3060
f4abbc16
MM
3061 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3062 _("\
3063Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3064The processor stores a from/to record for each branch into a cyclic buffer.\n\
3065This format may not be available on all processors."),
3066 &record_btrace_cmdlist);
3067 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3068
b20a6524
MM
3069 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3070 _("\
bc504a31 3071Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3072This format may not be available on all processors."),
3073 &record_btrace_cmdlist);
3074 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3075
67b5c0c1
MM
3076 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3077 _("Set record options"), &set_record_btrace_cmdlist,
3078 "set record btrace ", 0, &set_record_cmdlist);
3079
3080 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3081 _("Show record options"), &show_record_btrace_cmdlist,
3082 "show record btrace ", 0, &show_record_cmdlist);
3083
3084 add_setshow_enum_cmd ("replay-memory-access", no_class,
3085 replay_memory_access_types, &replay_memory_access, _("\
3086Set what memory accesses are allowed during replay."), _("\
3087Show what memory accesses are allowed during replay."),
3088 _("Default is READ-ONLY.\n\n\
3089The btrace record target does not trace data.\n\
3090The memory therefore corresponds to the live target and not \
3091to the current replay position.\n\n\
3092When READ-ONLY, allow accesses to read-only memory during replay.\n\
3093When READ-WRITE, allow accesses to read-only and read-write memory during \
3094replay."),
3095 NULL, cmd_show_replay_memory_access,
3096 &set_record_btrace_cmdlist,
3097 &show_record_btrace_cmdlist);
3098
d33501a5
MM
3099 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3100 _("Set record btrace bts options"),
3101 &set_record_btrace_bts_cmdlist,
3102 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3103
3104 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3105 _("Show record btrace bts options"),
3106 &show_record_btrace_bts_cmdlist,
3107 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3108
3109 add_setshow_uinteger_cmd ("buffer-size", no_class,
3110 &record_btrace_conf.bts.size,
3111 _("Set the record/replay bts buffer size."),
3112 _("Show the record/replay bts buffer size."), _("\
3113When starting recording request a trace buffer of this size. \
3114The actual buffer size may differ from the requested size. \
3115Use \"info record\" to see the actual buffer size.\n\n\
3116Bigger buffers allow longer recording but also take more time to process \
3117the recorded execution trace.\n\n\
b20a6524
MM
3118The trace buffer size may not be changed while recording."), NULL,
3119 show_record_bts_buffer_size_value,
d33501a5
MM
3120 &set_record_btrace_bts_cmdlist,
3121 &show_record_btrace_bts_cmdlist);
3122
b20a6524
MM
3123 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3124 _("Set record btrace pt options"),
3125 &set_record_btrace_pt_cmdlist,
3126 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3127
3128 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3129 _("Show record btrace pt options"),
3130 &show_record_btrace_pt_cmdlist,
3131 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3132
3133 add_setshow_uinteger_cmd ("buffer-size", no_class,
3134 &record_btrace_conf.pt.size,
3135 _("Set the record/replay pt buffer size."),
3136 _("Show the record/replay pt buffer size."), _("\
3137Bigger buffers allow longer recording but also take more time to process \
3138the recorded execution.\n\
3139The actual buffer size may differ from the requested size. Use \"info record\" \
3140to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3141 &set_record_btrace_pt_cmdlist,
3142 &show_record_btrace_pt_cmdlist);
3143
afedecd3
MM
3144 init_record_btrace_ops ();
3145 add_target (&record_btrace_ops);
0b722aec
MM
3146
3147 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3148 xcalloc, xfree);
d33501a5
MM
3149
3150 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3151 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3152}
This page took 1.035214 seconds and 4 git commands to generate.