Target FP: Use target format throughout expression parsing
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
61baf725 3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
c0272db5 24#include "record-btrace.h"
afedecd3
MM
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
29#include "observer.h"
afedecd3
MM
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
70ad5bff
MM
39#include "event-loop.h"
40#include "inf-loop.h"
e3cfc1c7 41#include "vec.h"
325fac50 42#include <algorithm>
afedecd3
MM
43
44/* The target_ops of record-btrace. */
45static struct target_ops record_btrace_ops;
46
47/* A new thread observer enabling branch tracing for the new thread. */
48static struct observer *record_btrace_thread_observer;
49
67b5c0c1
MM
50/* Memory access types used in set/show record btrace replay-memory-access. */
51static const char replay_memory_access_read_only[] = "read-only";
52static const char replay_memory_access_read_write[] = "read-write";
53static const char *const replay_memory_access_types[] =
54{
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
57 NULL
58};
59
60/* The currently allowed replay memory access type. */
61static const char *replay_memory_access = replay_memory_access_read_only;
62
63/* Command lists for "set/show record btrace". */
64static struct cmd_list_element *set_record_btrace_cmdlist;
65static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 66
70ad5bff
MM
67/* The execution direction of the last resume we got. See record-full.c. */
68static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
69
70/* The async event handler for reverse/replay execution. */
71static struct async_event_handler *record_btrace_async_inferior_event_handler;
72
aef92902
MM
73/* A flag indicating that we are currently generating a core file. */
74static int record_btrace_generating_corefile;
75
f4abbc16
MM
76/* The current branch trace configuration. */
77static struct btrace_config record_btrace_conf;
78
79/* Command list for "record btrace". */
80static struct cmd_list_element *record_btrace_cmdlist;
81
d33501a5
MM
82/* Command lists for "set/show record btrace bts". */
83static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84static struct cmd_list_element *show_record_btrace_bts_cmdlist;
85
b20a6524
MM
86/* Command lists for "set/show record btrace pt". */
87static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88static struct cmd_list_element *show_record_btrace_pt_cmdlist;
89
afedecd3
MM
90/* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
92
93#define DEBUG(msg, args...) \
94 do \
95 { \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
99 } \
100 while (0)
101
102
103/* Update the branch trace for the current thread and return a pointer to its
066ce621 104 thread_info.
afedecd3
MM
105
106 Throws an error if there is no thread or no trace. This function never
107 returns NULL. */
108
066ce621
MM
109static struct thread_info *
110require_btrace_thread (void)
afedecd3
MM
111{
112 struct thread_info *tp;
afedecd3
MM
113
114 DEBUG ("require");
115
116 tp = find_thread_ptid (inferior_ptid);
117 if (tp == NULL)
118 error (_("No thread."));
119
cd4007e4
MM
120 validate_registers_access ();
121
afedecd3
MM
122 btrace_fetch (tp);
123
6e07b1d2 124 if (btrace_is_empty (tp))
afedecd3
MM
125 error (_("No trace."));
126
066ce621
MM
127 return tp;
128}
129
130/* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
132
133 Throws an error if there is no thread or no trace. This function never
134 returns NULL. */
135
136static struct btrace_thread_info *
137require_btrace (void)
138{
139 struct thread_info *tp;
140
141 tp = require_btrace_thread ();
142
143 return &tp->btrace;
afedecd3
MM
144}
145
146/* Enable branch tracing for one thread. Warn on errors. */
147
148static void
149record_btrace_enable_warn (struct thread_info *tp)
150{
492d29ea
PA
151 TRY
152 {
153 btrace_enable (tp, &record_btrace_conf);
154 }
155 CATCH (error, RETURN_MASK_ERROR)
156 {
157 warning ("%s", error.message);
158 }
159 END_CATCH
afedecd3
MM
160}
161
162/* Callback function to disable branch tracing for one thread. */
163
164static void
165record_btrace_disable_callback (void *arg)
166{
19ba03f4 167 struct thread_info *tp = (struct thread_info *) arg;
afedecd3
MM
168
169 btrace_disable (tp);
170}
171
172/* Enable automatic tracing of new threads. */
173
174static void
175record_btrace_auto_enable (void)
176{
177 DEBUG ("attach thread observer");
178
179 record_btrace_thread_observer
180 = observer_attach_new_thread (record_btrace_enable_warn);
181}
182
183/* Disable automatic tracing of new threads. */
184
185static void
186record_btrace_auto_disable (void)
187{
188 /* The observer may have been detached, already. */
189 if (record_btrace_thread_observer == NULL)
190 return;
191
192 DEBUG ("detach thread observer");
193
194 observer_detach_new_thread (record_btrace_thread_observer);
195 record_btrace_thread_observer = NULL;
196}
197
70ad5bff
MM
198/* The record-btrace async event handler function. */
199
200static void
201record_btrace_handle_async_inferior_event (gdb_client_data data)
202{
203 inferior_event_handler (INF_REG_EVENT, NULL);
204}
205
c0272db5
TW
206/* See record-btrace.h. */
207
208void
209record_btrace_push_target (void)
210{
211 const char *format;
212
213 record_btrace_auto_enable ();
214
215 push_target (&record_btrace_ops);
216
217 record_btrace_async_inferior_event_handler
218 = create_async_event_handler (record_btrace_handle_async_inferior_event,
219 NULL);
220 record_btrace_generating_corefile = 0;
221
222 format = btrace_format_short_string (record_btrace_conf.format);
223 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
224}
225
afedecd3
MM
226/* The to_open method of target record-btrace. */
227
228static void
014f9477 229record_btrace_open (const char *args, int from_tty)
afedecd3
MM
230{
231 struct cleanup *disable_chain;
232 struct thread_info *tp;
233
234 DEBUG ("open");
235
8213266a 236 record_preopen ();
afedecd3
MM
237
238 if (!target_has_execution)
239 error (_("The program is not being run."));
240
afedecd3
MM
241 gdb_assert (record_btrace_thread_observer == NULL);
242
243 disable_chain = make_cleanup (null_cleanup, NULL);
034f788c 244 ALL_NON_EXITED_THREADS (tp)
5d5658a1 245 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
afedecd3 246 {
f4abbc16 247 btrace_enable (tp, &record_btrace_conf);
afedecd3
MM
248
249 make_cleanup (record_btrace_disable_callback, tp);
250 }
251
c0272db5 252 record_btrace_push_target ();
afedecd3
MM
253
254 discard_cleanups (disable_chain);
255}
256
257/* The to_stop_recording method of target record-btrace. */
258
259static void
c6cd7c02 260record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
261{
262 struct thread_info *tp;
263
264 DEBUG ("stop recording");
265
266 record_btrace_auto_disable ();
267
034f788c 268 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
269 if (tp->btrace.target != NULL)
270 btrace_disable (tp);
271}
272
c0272db5
TW
273/* The to_disconnect method of target record-btrace. */
274
275static void
276record_btrace_disconnect (struct target_ops *self, const char *args,
277 int from_tty)
278{
279 struct target_ops *beneath = self->beneath;
280
281 /* Do not stop recording, just clean up GDB side. */
282 unpush_target (self);
283
284 /* Forward disconnect. */
285 beneath->to_disconnect (beneath, args, from_tty);
286}
287
afedecd3
MM
288/* The to_close method of target record-btrace. */
289
290static void
de90e03d 291record_btrace_close (struct target_ops *self)
afedecd3 292{
568e808b
MM
293 struct thread_info *tp;
294
70ad5bff
MM
295 if (record_btrace_async_inferior_event_handler != NULL)
296 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
297
99c819ee
MM
298 /* Make sure automatic recording gets disabled even if we did not stop
299 recording before closing the record-btrace target. */
300 record_btrace_auto_disable ();
301
568e808b
MM
302 /* We should have already stopped recording.
303 Tear down btrace in case we have not. */
034f788c 304 ALL_NON_EXITED_THREADS (tp)
568e808b 305 btrace_teardown (tp);
afedecd3
MM
306}
307
b7d2e916
PA
308/* The to_async method of target record-btrace. */
309
310static void
6a3753b3 311record_btrace_async (struct target_ops *ops, int enable)
b7d2e916 312{
6a3753b3 313 if (enable)
b7d2e916
PA
314 mark_async_event_handler (record_btrace_async_inferior_event_handler);
315 else
316 clear_async_event_handler (record_btrace_async_inferior_event_handler);
317
6a3753b3 318 ops->beneath->to_async (ops->beneath, enable);
b7d2e916
PA
319}
320
d33501a5
MM
321/* Adjusts the size and returns a human readable size suffix. */
322
323static const char *
324record_btrace_adjust_size (unsigned int *size)
325{
326 unsigned int sz;
327
328 sz = *size;
329
330 if ((sz & ((1u << 30) - 1)) == 0)
331 {
332 *size = sz >> 30;
333 return "GB";
334 }
335 else if ((sz & ((1u << 20) - 1)) == 0)
336 {
337 *size = sz >> 20;
338 return "MB";
339 }
340 else if ((sz & ((1u << 10) - 1)) == 0)
341 {
342 *size = sz >> 10;
343 return "kB";
344 }
345 else
346 return "";
347}
348
349/* Print a BTS configuration. */
350
351static void
352record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
353{
354 const char *suffix;
355 unsigned int size;
356
357 size = conf->size;
358 if (size > 0)
359 {
360 suffix = record_btrace_adjust_size (&size);
361 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
362 }
363}
364
bc504a31 365/* Print an Intel Processor Trace configuration. */
b20a6524
MM
366
367static void
368record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
369{
370 const char *suffix;
371 unsigned int size;
372
373 size = conf->size;
374 if (size > 0)
375 {
376 suffix = record_btrace_adjust_size (&size);
377 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
378 }
379}
380
d33501a5
MM
381/* Print a branch tracing configuration. */
382
383static void
384record_btrace_print_conf (const struct btrace_config *conf)
385{
386 printf_unfiltered (_("Recording format: %s.\n"),
387 btrace_format_string (conf->format));
388
389 switch (conf->format)
390 {
391 case BTRACE_FORMAT_NONE:
392 return;
393
394 case BTRACE_FORMAT_BTS:
395 record_btrace_print_bts_conf (&conf->bts);
396 return;
b20a6524
MM
397
398 case BTRACE_FORMAT_PT:
399 record_btrace_print_pt_conf (&conf->pt);
400 return;
d33501a5
MM
401 }
402
403 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
404}
405
afedecd3
MM
406/* The to_info_record method of target record-btrace. */
407
408static void
630d6a4a 409record_btrace_info (struct target_ops *self)
afedecd3
MM
410{
411 struct btrace_thread_info *btinfo;
f4abbc16 412 const struct btrace_config *conf;
afedecd3 413 struct thread_info *tp;
31fd9caa 414 unsigned int insns, calls, gaps;
afedecd3
MM
415
416 DEBUG ("info");
417
418 tp = find_thread_ptid (inferior_ptid);
419 if (tp == NULL)
420 error (_("No thread."));
421
cd4007e4
MM
422 validate_registers_access ();
423
f4abbc16
MM
424 btinfo = &tp->btrace;
425
426 conf = btrace_conf (btinfo);
427 if (conf != NULL)
d33501a5 428 record_btrace_print_conf (conf);
f4abbc16 429
afedecd3
MM
430 btrace_fetch (tp);
431
23a7fe75
MM
432 insns = 0;
433 calls = 0;
31fd9caa 434 gaps = 0;
23a7fe75 435
6e07b1d2 436 if (!btrace_is_empty (tp))
23a7fe75
MM
437 {
438 struct btrace_call_iterator call;
439 struct btrace_insn_iterator insn;
440
441 btrace_call_end (&call, btinfo);
442 btrace_call_prev (&call, 1);
5de9129b 443 calls = btrace_call_number (&call);
23a7fe75
MM
444
445 btrace_insn_end (&insn, btinfo);
5de9129b 446 insns = btrace_insn_number (&insn);
31fd9caa 447
69090cee
TW
448 /* If the last instruction is not a gap, it is the current instruction
449 that is not actually part of the record. */
450 if (btrace_insn_get (&insn) != NULL)
451 insns -= 1;
31fd9caa
MM
452
453 gaps = btinfo->ngaps;
23a7fe75 454 }
afedecd3 455
31fd9caa 456 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
43792cf0
PA
457 "for thread %s (%s).\n"), insns, calls, gaps,
458 print_thread_id (tp), target_pid_to_str (tp->ptid));
07bbe694
MM
459
460 if (btrace_is_replaying (tp))
461 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
462 btrace_insn_number (btinfo->replay));
afedecd3
MM
463}
464
31fd9caa
MM
465/* Print a decode error. */
466
467static void
468btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
469 enum btrace_format format)
470{
508352a9 471 const char *errstr = btrace_decode_error (format, errcode);
31fd9caa 472
112e8700 473 uiout->text (_("["));
508352a9
TW
474 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
475 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
31fd9caa 476 {
112e8700
SM
477 uiout->text (_("decode error ("));
478 uiout->field_int ("errcode", errcode);
479 uiout->text (_("): "));
31fd9caa 480 }
112e8700
SM
481 uiout->text (errstr);
482 uiout->text (_("]\n"));
31fd9caa
MM
483}
484
afedecd3
MM
485/* Print an unsigned int. */
486
487static void
488ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
489{
112e8700 490 uiout->field_fmt (fld, "%u", val);
afedecd3
MM
491}
492
f94cc897
MM
493/* A range of source lines. */
494
495struct btrace_line_range
496{
497 /* The symtab this line is from. */
498 struct symtab *symtab;
499
500 /* The first line (inclusive). */
501 int begin;
502
503 /* The last line (exclusive). */
504 int end;
505};
506
507/* Construct a line range. */
508
509static struct btrace_line_range
510btrace_mk_line_range (struct symtab *symtab, int begin, int end)
511{
512 struct btrace_line_range range;
513
514 range.symtab = symtab;
515 range.begin = begin;
516 range.end = end;
517
518 return range;
519}
520
521/* Add a line to a line range. */
522
523static struct btrace_line_range
524btrace_line_range_add (struct btrace_line_range range, int line)
525{
526 if (range.end <= range.begin)
527 {
528 /* This is the first entry. */
529 range.begin = line;
530 range.end = line + 1;
531 }
532 else if (line < range.begin)
533 range.begin = line;
534 else if (range.end < line)
535 range.end = line;
536
537 return range;
538}
539
540/* Return non-zero if RANGE is empty, zero otherwise. */
541
542static int
543btrace_line_range_is_empty (struct btrace_line_range range)
544{
545 return range.end <= range.begin;
546}
547
548/* Return non-zero if LHS contains RHS, zero otherwise. */
549
550static int
551btrace_line_range_contains_range (struct btrace_line_range lhs,
552 struct btrace_line_range rhs)
553{
554 return ((lhs.symtab == rhs.symtab)
555 && (lhs.begin <= rhs.begin)
556 && (rhs.end <= lhs.end));
557}
558
559/* Find the line range associated with PC. */
560
561static struct btrace_line_range
562btrace_find_line_range (CORE_ADDR pc)
563{
564 struct btrace_line_range range;
565 struct linetable_entry *lines;
566 struct linetable *ltable;
567 struct symtab *symtab;
568 int nlines, i;
569
570 symtab = find_pc_line_symtab (pc);
571 if (symtab == NULL)
572 return btrace_mk_line_range (NULL, 0, 0);
573
574 ltable = SYMTAB_LINETABLE (symtab);
575 if (ltable == NULL)
576 return btrace_mk_line_range (symtab, 0, 0);
577
578 nlines = ltable->nitems;
579 lines = ltable->item;
580 if (nlines <= 0)
581 return btrace_mk_line_range (symtab, 0, 0);
582
583 range = btrace_mk_line_range (symtab, 0, 0);
584 for (i = 0; i < nlines - 1; i++)
585 {
586 if ((lines[i].pc == pc) && (lines[i].line != 0))
587 range = btrace_line_range_add (range, lines[i].line);
588 }
589
590 return range;
591}
592
593/* Print source lines in LINES to UIOUT.
594
595 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
596 instructions corresponding to that source line. When printing a new source
597 line, we do the cleanups for the open chain and open a new cleanup chain for
598 the new source line. If the source line range in LINES is not empty, this
599 function will leave the cleanup chain for the last printed source line open
600 so instructions can be added to it. */
601
602static void
603btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
604 struct cleanup **ui_item_chain, int flags)
605{
8d297bbf 606 print_source_lines_flags psl_flags;
f94cc897
MM
607 int line;
608
609 psl_flags = 0;
610 if (flags & DISASSEMBLY_FILENAME)
611 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
612
613 for (line = lines.begin; line < lines.end; ++line)
614 {
615 if (*ui_item_chain != NULL)
616 do_cleanups (*ui_item_chain);
617
618 *ui_item_chain
619 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
620
621 print_source_lines (lines.symtab, line, line + 1, psl_flags);
622
623 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
624 }
625}
626
afedecd3
MM
627/* Disassemble a section of the recorded instruction trace. */
628
629static void
23a7fe75 630btrace_insn_history (struct ui_out *uiout,
31fd9caa 631 const struct btrace_thread_info *btinfo,
23a7fe75 632 const struct btrace_insn_iterator *begin,
9a24775b
PA
633 const struct btrace_insn_iterator *end,
634 gdb_disassembly_flags flags)
afedecd3 635{
f94cc897 636 struct cleanup *cleanups, *ui_item_chain;
afedecd3 637 struct gdbarch *gdbarch;
23a7fe75 638 struct btrace_insn_iterator it;
f94cc897 639 struct btrace_line_range last_lines;
afedecd3 640
9a24775b
PA
641 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
642 btrace_insn_number (begin), btrace_insn_number (end));
afedecd3 643
f94cc897
MM
644 flags |= DISASSEMBLY_SPECULATIVE;
645
afedecd3 646 gdbarch = target_gdbarch ();
f94cc897
MM
647 last_lines = btrace_mk_line_range (NULL, 0, 0);
648
187808b0 649 cleanups = make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
f94cc897
MM
650
651 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
652 instructions corresponding to that line. */
653 ui_item_chain = NULL;
afedecd3 654
8b172ce7
PA
655 gdb_pretty_print_disassembler disasm (gdbarch);
656
23a7fe75 657 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 658 {
23a7fe75
MM
659 const struct btrace_insn *insn;
660
661 insn = btrace_insn_get (&it);
662
31fd9caa
MM
663 /* A NULL instruction indicates a gap in the trace. */
664 if (insn == NULL)
665 {
666 const struct btrace_config *conf;
667
668 conf = btrace_conf (btinfo);
afedecd3 669
31fd9caa
MM
670 /* We have trace so we must have a configuration. */
671 gdb_assert (conf != NULL);
672
69090cee
TW
673 uiout->field_fmt ("insn-number", "%u",
674 btrace_insn_number (&it));
675 uiout->text ("\t");
676
677 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
31fd9caa
MM
678 conf->format);
679 }
680 else
681 {
f94cc897 682 struct disasm_insn dinsn;
da8c46d2 683
f94cc897 684 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 685 {
f94cc897
MM
686 struct btrace_line_range lines;
687
688 lines = btrace_find_line_range (insn->pc);
689 if (!btrace_line_range_is_empty (lines)
690 && !btrace_line_range_contains_range (last_lines, lines))
691 {
692 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
693 last_lines = lines;
694 }
695 else if (ui_item_chain == NULL)
696 {
697 ui_item_chain
698 = make_cleanup_ui_out_tuple_begin_end (uiout,
699 "src_and_asm_line");
700 /* No source information. */
701 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
702 }
703
704 gdb_assert (ui_item_chain != NULL);
da8c46d2 705 }
da8c46d2 706
f94cc897
MM
707 memset (&dinsn, 0, sizeof (dinsn));
708 dinsn.number = btrace_insn_number (&it);
709 dinsn.addr = insn->pc;
31fd9caa 710
da8c46d2 711 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 712 dinsn.is_speculative = 1;
da8c46d2 713
8b172ce7 714 disasm.pretty_print_insn (uiout, &dinsn, flags);
31fd9caa 715 }
afedecd3 716 }
f94cc897
MM
717
718 do_cleanups (cleanups);
afedecd3
MM
719}
720
721/* The to_insn_history method of target record-btrace. */
722
723static void
9a24775b
PA
724record_btrace_insn_history (struct target_ops *self, int size,
725 gdb_disassembly_flags flags)
afedecd3
MM
726{
727 struct btrace_thread_info *btinfo;
23a7fe75
MM
728 struct btrace_insn_history *history;
729 struct btrace_insn_iterator begin, end;
afedecd3 730 struct ui_out *uiout;
23a7fe75 731 unsigned int context, covered;
afedecd3
MM
732
733 uiout = current_uiout;
2e783024 734 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 735 context = abs (size);
afedecd3
MM
736 if (context == 0)
737 error (_("Bad record instruction-history-size."));
738
23a7fe75
MM
739 btinfo = require_btrace ();
740 history = btinfo->insn_history;
741 if (history == NULL)
afedecd3 742 {
07bbe694 743 struct btrace_insn_iterator *replay;
afedecd3 744
9a24775b 745 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
afedecd3 746
07bbe694
MM
747 /* If we're replaying, we start at the replay position. Otherwise, we
748 start at the tail of the trace. */
749 replay = btinfo->replay;
750 if (replay != NULL)
751 begin = *replay;
752 else
753 btrace_insn_end (&begin, btinfo);
754
755 /* We start from here and expand in the requested direction. Then we
756 expand in the other direction, as well, to fill up any remaining
757 context. */
758 end = begin;
759 if (size < 0)
760 {
761 /* We want the current position covered, as well. */
762 covered = btrace_insn_next (&end, 1);
763 covered += btrace_insn_prev (&begin, context - covered);
764 covered += btrace_insn_next (&end, context - covered);
765 }
766 else
767 {
768 covered = btrace_insn_next (&end, context);
769 covered += btrace_insn_prev (&begin, context - covered);
770 }
afedecd3
MM
771 }
772 else
773 {
23a7fe75
MM
774 begin = history->begin;
775 end = history->end;
afedecd3 776
9a24775b 777 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
23a7fe75 778 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 779
23a7fe75
MM
780 if (size < 0)
781 {
782 end = begin;
783 covered = btrace_insn_prev (&begin, context);
784 }
785 else
786 {
787 begin = end;
788 covered = btrace_insn_next (&end, context);
789 }
afedecd3
MM
790 }
791
23a7fe75 792 if (covered > 0)
31fd9caa 793 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
794 else
795 {
796 if (size < 0)
797 printf_unfiltered (_("At the start of the branch trace record.\n"));
798 else
799 printf_unfiltered (_("At the end of the branch trace record.\n"));
800 }
afedecd3 801
23a7fe75 802 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
803}
804
805/* The to_insn_history_range method of target record-btrace. */
806
807static void
4e99c6b7 808record_btrace_insn_history_range (struct target_ops *self,
9a24775b
PA
809 ULONGEST from, ULONGEST to,
810 gdb_disassembly_flags flags)
afedecd3
MM
811{
812 struct btrace_thread_info *btinfo;
23a7fe75
MM
813 struct btrace_insn_history *history;
814 struct btrace_insn_iterator begin, end;
afedecd3 815 struct ui_out *uiout;
23a7fe75
MM
816 unsigned int low, high;
817 int found;
afedecd3
MM
818
819 uiout = current_uiout;
2e783024 820 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
23a7fe75
MM
821 low = from;
822 high = to;
afedecd3 823
9a24775b 824 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
afedecd3
MM
825
826 /* Check for wrap-arounds. */
23a7fe75 827 if (low != from || high != to)
afedecd3
MM
828 error (_("Bad range."));
829
0688d04e 830 if (high < low)
afedecd3
MM
831 error (_("Bad range."));
832
23a7fe75 833 btinfo = require_btrace ();
afedecd3 834
23a7fe75
MM
835 found = btrace_find_insn_by_number (&begin, btinfo, low);
836 if (found == 0)
837 error (_("Range out of bounds."));
afedecd3 838
23a7fe75
MM
839 found = btrace_find_insn_by_number (&end, btinfo, high);
840 if (found == 0)
0688d04e
MM
841 {
842 /* Silently truncate the range. */
843 btrace_insn_end (&end, btinfo);
844 }
845 else
846 {
847 /* We want both begin and end to be inclusive. */
848 btrace_insn_next (&end, 1);
849 }
afedecd3 850
31fd9caa 851 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 852 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
853}
854
855/* The to_insn_history_from method of target record-btrace. */
856
857static void
9abc3ff3 858record_btrace_insn_history_from (struct target_ops *self,
9a24775b
PA
859 ULONGEST from, int size,
860 gdb_disassembly_flags flags)
afedecd3
MM
861{
862 ULONGEST begin, end, context;
863
864 context = abs (size);
0688d04e
MM
865 if (context == 0)
866 error (_("Bad record instruction-history-size."));
afedecd3
MM
867
868 if (size < 0)
869 {
870 end = from;
871
872 if (from < context)
873 begin = 0;
874 else
0688d04e 875 begin = from - context + 1;
afedecd3
MM
876 }
877 else
878 {
879 begin = from;
0688d04e 880 end = from + context - 1;
afedecd3
MM
881
882 /* Check for wrap-around. */
883 if (end < begin)
884 end = ULONGEST_MAX;
885 }
886
4e99c6b7 887 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
888}
889
890/* Print the instruction number range for a function call history line. */
891
892static void
23a7fe75
MM
893btrace_call_history_insn_range (struct ui_out *uiout,
894 const struct btrace_function *bfun)
afedecd3 895{
7acbe133
MM
896 unsigned int begin, end, size;
897
0860c437 898 size = bfun->insn.size ();
7acbe133 899 gdb_assert (size > 0);
afedecd3 900
23a7fe75 901 begin = bfun->insn_offset;
7acbe133 902 end = begin + size - 1;
afedecd3 903
23a7fe75 904 ui_out_field_uint (uiout, "insn begin", begin);
112e8700 905 uiout->text (",");
23a7fe75 906 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
907}
908
ce0dfbea
MM
909/* Compute the lowest and highest source line for the instructions in BFUN
910 and return them in PBEGIN and PEND.
911 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
912 result from inlining or macro expansion. */
913
914static void
915btrace_compute_src_line_range (const struct btrace_function *bfun,
916 int *pbegin, int *pend)
917{
ce0dfbea
MM
918 struct symtab *symtab;
919 struct symbol *sym;
ce0dfbea
MM
920 int begin, end;
921
922 begin = INT_MAX;
923 end = INT_MIN;
924
925 sym = bfun->sym;
926 if (sym == NULL)
927 goto out;
928
929 symtab = symbol_symtab (sym);
930
0860c437 931 for (const btrace_insn &insn : bfun->insn)
ce0dfbea
MM
932 {
933 struct symtab_and_line sal;
934
0860c437 935 sal = find_pc_line (insn.pc, 0);
ce0dfbea
MM
936 if (sal.symtab != symtab || sal.line == 0)
937 continue;
938
325fac50
PA
939 begin = std::min (begin, sal.line);
940 end = std::max (end, sal.line);
ce0dfbea
MM
941 }
942
943 out:
944 *pbegin = begin;
945 *pend = end;
946}
947
afedecd3
MM
948/* Print the source line information for a function call history line. */
949
950static void
23a7fe75
MM
951btrace_call_history_src_line (struct ui_out *uiout,
952 const struct btrace_function *bfun)
afedecd3
MM
953{
954 struct symbol *sym;
23a7fe75 955 int begin, end;
afedecd3
MM
956
957 sym = bfun->sym;
958 if (sym == NULL)
959 return;
960
112e8700 961 uiout->field_string ("file",
08be3fe3 962 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 963
ce0dfbea 964 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 965 if (end < begin)
afedecd3
MM
966 return;
967
112e8700
SM
968 uiout->text (":");
969 uiout->field_int ("min line", begin);
afedecd3 970
23a7fe75 971 if (end == begin)
afedecd3
MM
972 return;
973
112e8700
SM
974 uiout->text (",");
975 uiout->field_int ("max line", end);
afedecd3
MM
976}
977
0b722aec
MM
978/* Get the name of a branch trace function. */
979
980static const char *
981btrace_get_bfun_name (const struct btrace_function *bfun)
982{
983 struct minimal_symbol *msym;
984 struct symbol *sym;
985
986 if (bfun == NULL)
987 return "??";
988
989 msym = bfun->msym;
990 sym = bfun->sym;
991
992 if (sym != NULL)
993 return SYMBOL_PRINT_NAME (sym);
994 else if (msym != NULL)
efd66ac6 995 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
996 else
997 return "??";
998}
999
afedecd3
MM
1000/* Disassemble a section of the recorded function trace. */
1001
1002static void
23a7fe75 1003btrace_call_history (struct ui_out *uiout,
8710b709 1004 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1005 const struct btrace_call_iterator *begin,
1006 const struct btrace_call_iterator *end,
8d297bbf 1007 int int_flags)
afedecd3 1008{
23a7fe75 1009 struct btrace_call_iterator it;
8d297bbf 1010 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1011
8d297bbf 1012 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1013 btrace_call_number (end));
afedecd3 1014
23a7fe75 1015 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1016 {
23a7fe75
MM
1017 const struct btrace_function *bfun;
1018 struct minimal_symbol *msym;
1019 struct symbol *sym;
1020
1021 bfun = btrace_call_get (&it);
23a7fe75 1022 sym = bfun->sym;
0b722aec 1023 msym = bfun->msym;
23a7fe75 1024
afedecd3 1025 /* Print the function index. */
23a7fe75 1026 ui_out_field_uint (uiout, "index", bfun->number);
112e8700 1027 uiout->text ("\t");
afedecd3 1028
31fd9caa
MM
1029 /* Indicate gaps in the trace. */
1030 if (bfun->errcode != 0)
1031 {
1032 const struct btrace_config *conf;
1033
1034 conf = btrace_conf (btinfo);
1035
1036 /* We have trace so we must have a configuration. */
1037 gdb_assert (conf != NULL);
1038
1039 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1040
1041 continue;
1042 }
1043
8710b709
MM
1044 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1045 {
1046 int level = bfun->level + btinfo->level, i;
1047
1048 for (i = 0; i < level; ++i)
112e8700 1049 uiout->text (" ");
8710b709
MM
1050 }
1051
1052 if (sym != NULL)
112e8700 1053 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
8710b709 1054 else if (msym != NULL)
112e8700
SM
1055 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1056 else if (!uiout->is_mi_like_p ())
1057 uiout->field_string ("function", "??");
8710b709 1058
1e038f67 1059 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1060 {
112e8700 1061 uiout->text (_("\tinst "));
23a7fe75 1062 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1063 }
1064
1e038f67 1065 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1066 {
112e8700 1067 uiout->text (_("\tat "));
23a7fe75 1068 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1069 }
1070
112e8700 1071 uiout->text ("\n");
afedecd3
MM
1072 }
1073}
1074
1075/* The to_call_history method of target record-btrace. */
1076
1077static void
8d297bbf 1078record_btrace_call_history (struct target_ops *self, int size, int int_flags)
afedecd3
MM
1079{
1080 struct btrace_thread_info *btinfo;
23a7fe75
MM
1081 struct btrace_call_history *history;
1082 struct btrace_call_iterator begin, end;
afedecd3 1083 struct ui_out *uiout;
23a7fe75 1084 unsigned int context, covered;
8d297bbf 1085 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1086
1087 uiout = current_uiout;
2e783024 1088 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 1089 context = abs (size);
afedecd3
MM
1090 if (context == 0)
1091 error (_("Bad record function-call-history-size."));
1092
23a7fe75
MM
1093 btinfo = require_btrace ();
1094 history = btinfo->call_history;
1095 if (history == NULL)
afedecd3 1096 {
07bbe694 1097 struct btrace_insn_iterator *replay;
afedecd3 1098
8d297bbf 1099 DEBUG ("call-history (0x%x): %d", int_flags, size);
afedecd3 1100
07bbe694
MM
1101 /* If we're replaying, we start at the replay position. Otherwise, we
1102 start at the tail of the trace. */
1103 replay = btinfo->replay;
1104 if (replay != NULL)
1105 {
07bbe694 1106 begin.btinfo = btinfo;
a0f1b963 1107 begin.index = replay->call_index;
07bbe694
MM
1108 }
1109 else
1110 btrace_call_end (&begin, btinfo);
1111
1112 /* We start from here and expand in the requested direction. Then we
1113 expand in the other direction, as well, to fill up any remaining
1114 context. */
1115 end = begin;
1116 if (size < 0)
1117 {
1118 /* We want the current position covered, as well. */
1119 covered = btrace_call_next (&end, 1);
1120 covered += btrace_call_prev (&begin, context - covered);
1121 covered += btrace_call_next (&end, context - covered);
1122 }
1123 else
1124 {
1125 covered = btrace_call_next (&end, context);
1126 covered += btrace_call_prev (&begin, context- covered);
1127 }
afedecd3
MM
1128 }
1129 else
1130 {
23a7fe75
MM
1131 begin = history->begin;
1132 end = history->end;
afedecd3 1133
8d297bbf 1134 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
23a7fe75 1135 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1136
23a7fe75
MM
1137 if (size < 0)
1138 {
1139 end = begin;
1140 covered = btrace_call_prev (&begin, context);
1141 }
1142 else
1143 {
1144 begin = end;
1145 covered = btrace_call_next (&end, context);
1146 }
afedecd3
MM
1147 }
1148
23a7fe75 1149 if (covered > 0)
8710b709 1150 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1151 else
1152 {
1153 if (size < 0)
1154 printf_unfiltered (_("At the start of the branch trace record.\n"));
1155 else
1156 printf_unfiltered (_("At the end of the branch trace record.\n"));
1157 }
afedecd3 1158
23a7fe75 1159 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1160}
1161
1162/* The to_call_history_range method of target record-btrace. */
1163
1164static void
f0d960ea 1165record_btrace_call_history_range (struct target_ops *self,
8d297bbf
PA
1166 ULONGEST from, ULONGEST to,
1167 int int_flags)
afedecd3
MM
1168{
1169 struct btrace_thread_info *btinfo;
23a7fe75
MM
1170 struct btrace_call_history *history;
1171 struct btrace_call_iterator begin, end;
afedecd3 1172 struct ui_out *uiout;
23a7fe75
MM
1173 unsigned int low, high;
1174 int found;
8d297bbf 1175 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1176
1177 uiout = current_uiout;
2e783024 1178 ui_out_emit_tuple tuple_emitter (uiout, "func history");
23a7fe75
MM
1179 low = from;
1180 high = to;
afedecd3 1181
8d297bbf 1182 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
afedecd3
MM
1183
1184 /* Check for wrap-arounds. */
23a7fe75 1185 if (low != from || high != to)
afedecd3
MM
1186 error (_("Bad range."));
1187
0688d04e 1188 if (high < low)
afedecd3
MM
1189 error (_("Bad range."));
1190
23a7fe75 1191 btinfo = require_btrace ();
afedecd3 1192
23a7fe75
MM
1193 found = btrace_find_call_by_number (&begin, btinfo, low);
1194 if (found == 0)
1195 error (_("Range out of bounds."));
afedecd3 1196
23a7fe75
MM
1197 found = btrace_find_call_by_number (&end, btinfo, high);
1198 if (found == 0)
0688d04e
MM
1199 {
1200 /* Silently truncate the range. */
1201 btrace_call_end (&end, btinfo);
1202 }
1203 else
1204 {
1205 /* We want both begin and end to be inclusive. */
1206 btrace_call_next (&end, 1);
1207 }
afedecd3 1208
8710b709 1209 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1210 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1211}
1212
1213/* The to_call_history_from method of target record-btrace. */
1214
1215static void
ec0aea04 1216record_btrace_call_history_from (struct target_ops *self,
8d297bbf
PA
1217 ULONGEST from, int size,
1218 int int_flags)
afedecd3
MM
1219{
1220 ULONGEST begin, end, context;
8d297bbf 1221 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1222
1223 context = abs (size);
0688d04e
MM
1224 if (context == 0)
1225 error (_("Bad record function-call-history-size."));
afedecd3
MM
1226
1227 if (size < 0)
1228 {
1229 end = from;
1230
1231 if (from < context)
1232 begin = 0;
1233 else
0688d04e 1234 begin = from - context + 1;
afedecd3
MM
1235 }
1236 else
1237 {
1238 begin = from;
0688d04e 1239 end = from + context - 1;
afedecd3
MM
1240
1241 /* Check for wrap-around. */
1242 if (end < begin)
1243 end = ULONGEST_MAX;
1244 }
1245
f0d960ea 1246 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
1247}
1248
b158a20f
TW
1249/* The to_record_method method of target record-btrace. */
1250
1251static enum record_method
1252record_btrace_record_method (struct target_ops *self, ptid_t ptid)
1253{
1254 const struct btrace_config *config;
1255 struct thread_info * const tp = find_thread_ptid (ptid);
1256
1257 if (tp == NULL)
1258 error (_("No thread."));
1259
1260 if (tp->btrace.target == NULL)
1261 return RECORD_METHOD_NONE;
1262
1263 return RECORD_METHOD_BTRACE;
1264}
1265
07bbe694
MM
1266/* The to_record_is_replaying method of target record-btrace. */
1267
1268static int
a52eab48 1269record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
07bbe694
MM
1270{
1271 struct thread_info *tp;
1272
034f788c 1273 ALL_NON_EXITED_THREADS (tp)
a52eab48 1274 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
07bbe694
MM
1275 return 1;
1276
1277 return 0;
1278}
1279
7ff27e9b
MM
1280/* The to_record_will_replay method of target record-btrace. */
1281
1282static int
1283record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1284{
1285 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1286}
1287
633785ff
MM
1288/* The to_xfer_partial method of target record-btrace. */
1289
9b409511 1290static enum target_xfer_status
633785ff
MM
1291record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1292 const char *annex, gdb_byte *readbuf,
1293 const gdb_byte *writebuf, ULONGEST offset,
9b409511 1294 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
1295{
1296 struct target_ops *t;
1297
1298 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1299 if (replay_memory_access == replay_memory_access_read_only
aef92902 1300 && !record_btrace_generating_corefile
4d10e986 1301 && record_btrace_is_replaying (ops, inferior_ptid))
633785ff
MM
1302 {
1303 switch (object)
1304 {
1305 case TARGET_OBJECT_MEMORY:
1306 {
1307 struct target_section *section;
1308
1309 /* We do not allow writing memory in general. */
1310 if (writebuf != NULL)
9b409511
YQ
1311 {
1312 *xfered_len = len;
bc113b4e 1313 return TARGET_XFER_UNAVAILABLE;
9b409511 1314 }
633785ff
MM
1315
1316 /* We allow reading readonly memory. */
1317 section = target_section_by_addr (ops, offset);
1318 if (section != NULL)
1319 {
1320 /* Check if the section we found is readonly. */
1321 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1322 section->the_bfd_section)
1323 & SEC_READONLY) != 0)
1324 {
1325 /* Truncate the request to fit into this section. */
325fac50 1326 len = std::min (len, section->endaddr - offset);
633785ff
MM
1327 break;
1328 }
1329 }
1330
9b409511 1331 *xfered_len = len;
bc113b4e 1332 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1333 }
1334 }
1335 }
1336
1337 /* Forward the request. */
e75fdfca
TT
1338 ops = ops->beneath;
1339 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1340 offset, len, xfered_len);
633785ff
MM
1341}
1342
1343/* The to_insert_breakpoint method of target record-btrace. */
1344
1345static int
1346record_btrace_insert_breakpoint (struct target_ops *ops,
1347 struct gdbarch *gdbarch,
1348 struct bp_target_info *bp_tgt)
1349{
67b5c0c1
MM
1350 const char *old;
1351 int ret;
633785ff
MM
1352
1353 /* Inserting breakpoints requires accessing memory. Allow it for the
1354 duration of this function. */
67b5c0c1
MM
1355 old = replay_memory_access;
1356 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1357
1358 ret = 0;
492d29ea
PA
1359 TRY
1360 {
1361 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1362 }
492d29ea
PA
1363 CATCH (except, RETURN_MASK_ALL)
1364 {
6c63c96a 1365 replay_memory_access = old;
492d29ea
PA
1366 throw_exception (except);
1367 }
1368 END_CATCH
6c63c96a 1369 replay_memory_access = old;
633785ff
MM
1370
1371 return ret;
1372}
1373
1374/* The to_remove_breakpoint method of target record-btrace. */
1375
1376static int
1377record_btrace_remove_breakpoint (struct target_ops *ops,
1378 struct gdbarch *gdbarch,
73971819
PA
1379 struct bp_target_info *bp_tgt,
1380 enum remove_bp_reason reason)
633785ff 1381{
67b5c0c1
MM
1382 const char *old;
1383 int ret;
633785ff
MM
1384
1385 /* Removing breakpoints requires accessing memory. Allow it for the
1386 duration of this function. */
67b5c0c1
MM
1387 old = replay_memory_access;
1388 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1389
1390 ret = 0;
492d29ea
PA
1391 TRY
1392 {
73971819
PA
1393 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1394 reason);
492d29ea 1395 }
492d29ea
PA
1396 CATCH (except, RETURN_MASK_ALL)
1397 {
6c63c96a 1398 replay_memory_access = old;
492d29ea
PA
1399 throw_exception (except);
1400 }
1401 END_CATCH
6c63c96a 1402 replay_memory_access = old;
633785ff
MM
1403
1404 return ret;
1405}
1406
1f3ef581
MM
1407/* The to_fetch_registers method of target record-btrace. */
1408
1409static void
1410record_btrace_fetch_registers (struct target_ops *ops,
1411 struct regcache *regcache, int regno)
1412{
1413 struct btrace_insn_iterator *replay;
1414 struct thread_info *tp;
1415
bcc0c096 1416 tp = find_thread_ptid (regcache_get_ptid (regcache));
1f3ef581
MM
1417 gdb_assert (tp != NULL);
1418
1419 replay = tp->btrace.replay;
aef92902 1420 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1421 {
1422 const struct btrace_insn *insn;
1423 struct gdbarch *gdbarch;
1424 int pcreg;
1425
1426 gdbarch = get_regcache_arch (regcache);
1427 pcreg = gdbarch_pc_regnum (gdbarch);
1428 if (pcreg < 0)
1429 return;
1430
1431 /* We can only provide the PC register. */
1432 if (regno >= 0 && regno != pcreg)
1433 return;
1434
1435 insn = btrace_insn_get (replay);
1436 gdb_assert (insn != NULL);
1437
1438 regcache_raw_supply (regcache, regno, &insn->pc);
1439 }
1440 else
1441 {
e75fdfca 1442 struct target_ops *t = ops->beneath;
1f3ef581 1443
e75fdfca 1444 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
1445 }
1446}
1447
1448/* The to_store_registers method of target record-btrace. */
1449
1450static void
1451record_btrace_store_registers (struct target_ops *ops,
1452 struct regcache *regcache, int regno)
1453{
1454 struct target_ops *t;
1455
a52eab48 1456 if (!record_btrace_generating_corefile
bcc0c096 1457 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
4d10e986 1458 error (_("Cannot write registers while replaying."));
1f3ef581
MM
1459
1460 gdb_assert (may_write_registers != 0);
1461
e75fdfca
TT
1462 t = ops->beneath;
1463 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1464}
1465
1466/* The to_prepare_to_store method of target record-btrace. */
1467
1468static void
1469record_btrace_prepare_to_store (struct target_ops *ops,
1470 struct regcache *regcache)
1471{
1472 struct target_ops *t;
1473
a52eab48 1474 if (!record_btrace_generating_corefile
bcc0c096 1475 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1f3ef581
MM
1476 return;
1477
e75fdfca
TT
1478 t = ops->beneath;
1479 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1480}
1481
0b722aec
MM
1482/* The branch trace frame cache. */
1483
1484struct btrace_frame_cache
1485{
1486 /* The thread. */
1487 struct thread_info *tp;
1488
1489 /* The frame info. */
1490 struct frame_info *frame;
1491
1492 /* The branch trace function segment. */
1493 const struct btrace_function *bfun;
1494};
1495
1496/* A struct btrace_frame_cache hash table indexed by NEXT. */
1497
1498static htab_t bfcache;
1499
1500/* hash_f for htab_create_alloc of bfcache. */
1501
1502static hashval_t
1503bfcache_hash (const void *arg)
1504{
19ba03f4
SM
1505 const struct btrace_frame_cache *cache
1506 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1507
1508 return htab_hash_pointer (cache->frame);
1509}
1510
1511/* eq_f for htab_create_alloc of bfcache. */
1512
1513static int
1514bfcache_eq (const void *arg1, const void *arg2)
1515{
19ba03f4
SM
1516 const struct btrace_frame_cache *cache1
1517 = (const struct btrace_frame_cache *) arg1;
1518 const struct btrace_frame_cache *cache2
1519 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1520
1521 return cache1->frame == cache2->frame;
1522}
1523
1524/* Create a new btrace frame cache. */
1525
1526static struct btrace_frame_cache *
1527bfcache_new (struct frame_info *frame)
1528{
1529 struct btrace_frame_cache *cache;
1530 void **slot;
1531
1532 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1533 cache->frame = frame;
1534
1535 slot = htab_find_slot (bfcache, cache, INSERT);
1536 gdb_assert (*slot == NULL);
1537 *slot = cache;
1538
1539 return cache;
1540}
1541
1542/* Extract the branch trace function from a branch trace frame. */
1543
1544static const struct btrace_function *
1545btrace_get_frame_function (struct frame_info *frame)
1546{
1547 const struct btrace_frame_cache *cache;
1548 const struct btrace_function *bfun;
1549 struct btrace_frame_cache pattern;
1550 void **slot;
1551
1552 pattern.frame = frame;
1553
1554 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1555 if (slot == NULL)
1556 return NULL;
1557
19ba03f4 1558 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1559 return cache->bfun;
1560}
1561
cecac1ab
MM
1562/* Implement stop_reason method for record_btrace_frame_unwind. */
1563
1564static enum unwind_stop_reason
1565record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1566 void **this_cache)
1567{
0b722aec
MM
1568 const struct btrace_frame_cache *cache;
1569 const struct btrace_function *bfun;
1570
19ba03f4 1571 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1572 bfun = cache->bfun;
1573 gdb_assert (bfun != NULL);
1574
42bfe59e 1575 if (bfun->up == 0)
0b722aec
MM
1576 return UNWIND_UNAVAILABLE;
1577
1578 return UNWIND_NO_REASON;
cecac1ab
MM
1579}
1580
1581/* Implement this_id method for record_btrace_frame_unwind. */
1582
1583static void
1584record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1585 struct frame_id *this_id)
1586{
0b722aec
MM
1587 const struct btrace_frame_cache *cache;
1588 const struct btrace_function *bfun;
4aeb0dfc 1589 struct btrace_call_iterator it;
0b722aec
MM
1590 CORE_ADDR code, special;
1591
19ba03f4 1592 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1593
1594 bfun = cache->bfun;
1595 gdb_assert (bfun != NULL);
1596
4aeb0dfc
TW
1597 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1598 bfun = btrace_call_get (&it);
0b722aec
MM
1599
1600 code = get_frame_func (this_frame);
1601 special = bfun->number;
1602
1603 *this_id = frame_id_build_unavailable_stack_special (code, special);
1604
1605 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1606 btrace_get_bfun_name (cache->bfun),
1607 core_addr_to_string_nz (this_id->code_addr),
1608 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1609}
1610
1611/* Implement prev_register method for record_btrace_frame_unwind. */
1612
1613static struct value *
1614record_btrace_frame_prev_register (struct frame_info *this_frame,
1615 void **this_cache,
1616 int regnum)
1617{
0b722aec
MM
1618 const struct btrace_frame_cache *cache;
1619 const struct btrace_function *bfun, *caller;
42bfe59e 1620 struct btrace_call_iterator it;
0b722aec
MM
1621 struct gdbarch *gdbarch;
1622 CORE_ADDR pc;
1623 int pcreg;
1624
1625 gdbarch = get_frame_arch (this_frame);
1626 pcreg = gdbarch_pc_regnum (gdbarch);
1627 if (pcreg < 0 || regnum != pcreg)
1628 throw_error (NOT_AVAILABLE_ERROR,
1629 _("Registers are not available in btrace record history"));
1630
19ba03f4 1631 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1632 bfun = cache->bfun;
1633 gdb_assert (bfun != NULL);
1634
42bfe59e 1635 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
0b722aec
MM
1636 throw_error (NOT_AVAILABLE_ERROR,
1637 _("No caller in btrace record history"));
1638
42bfe59e
TW
1639 caller = btrace_call_get (&it);
1640
0b722aec 1641 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
0860c437 1642 pc = caller->insn.front ().pc;
0b722aec
MM
1643 else
1644 {
0860c437 1645 pc = caller->insn.back ().pc;
0b722aec
MM
1646 pc += gdb_insn_length (gdbarch, pc);
1647 }
1648
1649 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1650 btrace_get_bfun_name (bfun), bfun->level,
1651 core_addr_to_string_nz (pc));
1652
1653 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1654}
1655
1656/* Implement sniffer method for record_btrace_frame_unwind. */
1657
1658static int
1659record_btrace_frame_sniffer (const struct frame_unwind *self,
1660 struct frame_info *this_frame,
1661 void **this_cache)
1662{
0b722aec
MM
1663 const struct btrace_function *bfun;
1664 struct btrace_frame_cache *cache;
cecac1ab 1665 struct thread_info *tp;
0b722aec 1666 struct frame_info *next;
cecac1ab
MM
1667
1668 /* THIS_FRAME does not contain a reference to its thread. */
1669 tp = find_thread_ptid (inferior_ptid);
1670 gdb_assert (tp != NULL);
1671
0b722aec
MM
1672 bfun = NULL;
1673 next = get_next_frame (this_frame);
1674 if (next == NULL)
1675 {
1676 const struct btrace_insn_iterator *replay;
1677
1678 replay = tp->btrace.replay;
1679 if (replay != NULL)
08c3f6d2 1680 bfun = &replay->btinfo->functions[replay->call_index];
0b722aec
MM
1681 }
1682 else
1683 {
1684 const struct btrace_function *callee;
42bfe59e 1685 struct btrace_call_iterator it;
0b722aec
MM
1686
1687 callee = btrace_get_frame_function (next);
42bfe59e
TW
1688 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1689 return 0;
1690
1691 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1692 return 0;
1693
1694 bfun = btrace_call_get (&it);
0b722aec
MM
1695 }
1696
1697 if (bfun == NULL)
1698 return 0;
1699
1700 DEBUG ("[frame] sniffed frame for %s on level %d",
1701 btrace_get_bfun_name (bfun), bfun->level);
1702
1703 /* This is our frame. Initialize the frame cache. */
1704 cache = bfcache_new (this_frame);
1705 cache->tp = tp;
1706 cache->bfun = bfun;
1707
1708 *this_cache = cache;
1709 return 1;
1710}
1711
1712/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1713
1714static int
1715record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1716 struct frame_info *this_frame,
1717 void **this_cache)
1718{
1719 const struct btrace_function *bfun, *callee;
1720 struct btrace_frame_cache *cache;
42bfe59e 1721 struct btrace_call_iterator it;
0b722aec 1722 struct frame_info *next;
42bfe59e 1723 struct thread_info *tinfo;
0b722aec
MM
1724
1725 next = get_next_frame (this_frame);
1726 if (next == NULL)
1727 return 0;
1728
1729 callee = btrace_get_frame_function (next);
1730 if (callee == NULL)
1731 return 0;
1732
1733 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1734 return 0;
1735
42bfe59e
TW
1736 tinfo = find_thread_ptid (inferior_ptid);
1737 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
0b722aec
MM
1738 return 0;
1739
42bfe59e
TW
1740 bfun = btrace_call_get (&it);
1741
0b722aec
MM
1742 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1743 btrace_get_bfun_name (bfun), bfun->level);
1744
1745 /* This is our frame. Initialize the frame cache. */
1746 cache = bfcache_new (this_frame);
42bfe59e 1747 cache->tp = tinfo;
0b722aec
MM
1748 cache->bfun = bfun;
1749
1750 *this_cache = cache;
1751 return 1;
1752}
1753
1754static void
1755record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1756{
1757 struct btrace_frame_cache *cache;
1758 void **slot;
1759
19ba03f4 1760 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1761
1762 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1763 gdb_assert (slot != NULL);
1764
1765 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1766}
1767
1768/* btrace recording does not store previous memory content, neither the stack
1769 frames content. Any unwinding would return errorneous results as the stack
1770 contents no longer matches the changed PC value restored from history.
1771 Therefore this unwinder reports any possibly unwound registers as
1772 <unavailable>. */
1773
0b722aec 1774const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1775{
1776 NORMAL_FRAME,
1777 record_btrace_frame_unwind_stop_reason,
1778 record_btrace_frame_this_id,
1779 record_btrace_frame_prev_register,
1780 NULL,
0b722aec
MM
1781 record_btrace_frame_sniffer,
1782 record_btrace_frame_dealloc_cache
1783};
1784
1785const struct frame_unwind record_btrace_tailcall_frame_unwind =
1786{
1787 TAILCALL_FRAME,
1788 record_btrace_frame_unwind_stop_reason,
1789 record_btrace_frame_this_id,
1790 record_btrace_frame_prev_register,
1791 NULL,
1792 record_btrace_tailcall_frame_sniffer,
1793 record_btrace_frame_dealloc_cache
cecac1ab 1794};
b2f4cfde 1795
ac01945b
TT
1796/* Implement the to_get_unwinder method. */
1797
1798static const struct frame_unwind *
1799record_btrace_to_get_unwinder (struct target_ops *self)
1800{
1801 return &record_btrace_frame_unwind;
1802}
1803
1804/* Implement the to_get_tailcall_unwinder method. */
1805
1806static const struct frame_unwind *
1807record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1808{
1809 return &record_btrace_tailcall_frame_unwind;
1810}
1811
987e68b1
MM
1812/* Return a human-readable string for FLAG. */
1813
1814static const char *
1815btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1816{
1817 switch (flag)
1818 {
1819 case BTHR_STEP:
1820 return "step";
1821
1822 case BTHR_RSTEP:
1823 return "reverse-step";
1824
1825 case BTHR_CONT:
1826 return "cont";
1827
1828 case BTHR_RCONT:
1829 return "reverse-cont";
1830
1831 case BTHR_STOP:
1832 return "stop";
1833 }
1834
1835 return "<invalid>";
1836}
1837
52834460
MM
1838/* Indicate that TP should be resumed according to FLAG. */
1839
1840static void
1841record_btrace_resume_thread (struct thread_info *tp,
1842 enum btrace_thread_flag flag)
1843{
1844 struct btrace_thread_info *btinfo;
1845
43792cf0 1846 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1 1847 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
52834460
MM
1848
1849 btinfo = &tp->btrace;
1850
52834460
MM
1851 /* Fetch the latest branch trace. */
1852 btrace_fetch (tp);
1853
0ca912df
MM
1854 /* A resume request overwrites a preceding resume or stop request. */
1855 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1856 btinfo->flags |= flag;
1857}
1858
ec71cc2f
MM
1859/* Get the current frame for TP. */
1860
1861static struct frame_info *
1862get_thread_current_frame (struct thread_info *tp)
1863{
1864 struct frame_info *frame;
1865 ptid_t old_inferior_ptid;
1866 int executing;
1867
1868 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1869 old_inferior_ptid = inferior_ptid;
1870 inferior_ptid = tp->ptid;
1871
1872 /* Clear the executing flag to allow changes to the current frame.
1873 We are not actually running, yet. We just started a reverse execution
1874 command or a record goto command.
1875 For the latter, EXECUTING is false and this has no effect.
1876 For the former, EXECUTING is true and we're in to_wait, about to
1877 move the thread. Since we need to recompute the stack, we temporarily
1878 set EXECUTING to flase. */
1879 executing = is_executing (inferior_ptid);
1880 set_executing (inferior_ptid, 0);
1881
1882 frame = NULL;
1883 TRY
1884 {
1885 frame = get_current_frame ();
1886 }
1887 CATCH (except, RETURN_MASK_ALL)
1888 {
1889 /* Restore the previous execution state. */
1890 set_executing (inferior_ptid, executing);
1891
1892 /* Restore the previous inferior_ptid. */
1893 inferior_ptid = old_inferior_ptid;
1894
1895 throw_exception (except);
1896 }
1897 END_CATCH
1898
1899 /* Restore the previous execution state. */
1900 set_executing (inferior_ptid, executing);
1901
1902 /* Restore the previous inferior_ptid. */
1903 inferior_ptid = old_inferior_ptid;
1904
1905 return frame;
1906}
1907
52834460
MM
1908/* Start replaying a thread. */
1909
1910static struct btrace_insn_iterator *
1911record_btrace_start_replaying (struct thread_info *tp)
1912{
52834460
MM
1913 struct btrace_insn_iterator *replay;
1914 struct btrace_thread_info *btinfo;
52834460
MM
1915
1916 btinfo = &tp->btrace;
1917 replay = NULL;
1918
1919 /* We can't start replaying without trace. */
b54b03bd 1920 if (btinfo->functions.empty ())
52834460
MM
1921 return NULL;
1922
52834460
MM
1923 /* GDB stores the current frame_id when stepping in order to detects steps
1924 into subroutines.
1925 Since frames are computed differently when we're replaying, we need to
1926 recompute those stored frames and fix them up so we can still detect
1927 subroutines after we started replaying. */
492d29ea 1928 TRY
52834460
MM
1929 {
1930 struct frame_info *frame;
1931 struct frame_id frame_id;
1932 int upd_step_frame_id, upd_step_stack_frame_id;
1933
1934 /* The current frame without replaying - computed via normal unwind. */
ec71cc2f 1935 frame = get_thread_current_frame (tp);
52834460
MM
1936 frame_id = get_frame_id (frame);
1937
1938 /* Check if we need to update any stepping-related frame id's. */
1939 upd_step_frame_id = frame_id_eq (frame_id,
1940 tp->control.step_frame_id);
1941 upd_step_stack_frame_id = frame_id_eq (frame_id,
1942 tp->control.step_stack_frame_id);
1943
1944 /* We start replaying at the end of the branch trace. This corresponds
1945 to the current instruction. */
8d749320 1946 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
1947 btrace_insn_end (replay, btinfo);
1948
31fd9caa
MM
1949 /* Skip gaps at the end of the trace. */
1950 while (btrace_insn_get (replay) == NULL)
1951 {
1952 unsigned int steps;
1953
1954 steps = btrace_insn_prev (replay, 1);
1955 if (steps == 0)
1956 error (_("No trace."));
1957 }
1958
52834460
MM
1959 /* We're not replaying, yet. */
1960 gdb_assert (btinfo->replay == NULL);
1961 btinfo->replay = replay;
1962
1963 /* Make sure we're not using any stale registers. */
1964 registers_changed_ptid (tp->ptid);
1965
1966 /* The current frame with replaying - computed via btrace unwind. */
ec71cc2f 1967 frame = get_thread_current_frame (tp);
52834460
MM
1968 frame_id = get_frame_id (frame);
1969
1970 /* Replace stepping related frames where necessary. */
1971 if (upd_step_frame_id)
1972 tp->control.step_frame_id = frame_id;
1973 if (upd_step_stack_frame_id)
1974 tp->control.step_stack_frame_id = frame_id;
1975 }
492d29ea 1976 CATCH (except, RETURN_MASK_ALL)
52834460
MM
1977 {
1978 xfree (btinfo->replay);
1979 btinfo->replay = NULL;
1980
1981 registers_changed_ptid (tp->ptid);
1982
1983 throw_exception (except);
1984 }
492d29ea 1985 END_CATCH
52834460
MM
1986
1987 return replay;
1988}
1989
1990/* Stop replaying a thread. */
1991
1992static void
1993record_btrace_stop_replaying (struct thread_info *tp)
1994{
1995 struct btrace_thread_info *btinfo;
1996
1997 btinfo = &tp->btrace;
1998
1999 xfree (btinfo->replay);
2000 btinfo->replay = NULL;
2001
2002 /* Make sure we're not leaving any stale registers. */
2003 registers_changed_ptid (tp->ptid);
2004}
2005
e3cfc1c7
MM
2006/* Stop replaying TP if it is at the end of its execution history. */
2007
2008static void
2009record_btrace_stop_replaying_at_end (struct thread_info *tp)
2010{
2011 struct btrace_insn_iterator *replay, end;
2012 struct btrace_thread_info *btinfo;
2013
2014 btinfo = &tp->btrace;
2015 replay = btinfo->replay;
2016
2017 if (replay == NULL)
2018 return;
2019
2020 btrace_insn_end (&end, btinfo);
2021
2022 if (btrace_insn_cmp (replay, &end) == 0)
2023 record_btrace_stop_replaying (tp);
2024}
2025
b2f4cfde
MM
2026/* The to_resume method of target record-btrace. */
2027
2028static void
2029record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2030 enum gdb_signal signal)
2031{
0ca912df 2032 struct thread_info *tp;
d2939ba2 2033 enum btrace_thread_flag flag, cflag;
52834460 2034
987e68b1
MM
2035 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2036 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2037 step ? "step" : "cont");
52834460 2038
0ca912df
MM
2039 /* Store the execution direction of the last resume.
2040
2041 If there is more than one to_resume call, we have to rely on infrun
2042 to not change the execution direction in-between. */
70ad5bff
MM
2043 record_btrace_resume_exec_dir = execution_direction;
2044
0ca912df 2045 /* As long as we're not replaying, just forward the request.
52834460 2046
0ca912df
MM
2047 For non-stop targets this means that no thread is replaying. In order to
2048 make progress, we may need to explicitly move replaying threads to the end
2049 of their execution history. */
a52eab48
MM
2050 if ((execution_direction != EXEC_REVERSE)
2051 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2052 {
e75fdfca 2053 ops = ops->beneath;
04c4fe8c
MM
2054 ops->to_resume (ops, ptid, step, signal);
2055 return;
b2f4cfde
MM
2056 }
2057
52834460 2058 /* Compute the btrace thread flag for the requested move. */
d2939ba2
MM
2059 if (execution_direction == EXEC_REVERSE)
2060 {
2061 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2062 cflag = BTHR_RCONT;
2063 }
52834460 2064 else
d2939ba2
MM
2065 {
2066 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2067 cflag = BTHR_CONT;
2068 }
52834460 2069
52834460 2070 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2071 record_btrace_wait below.
2072
2073 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2074 if (!target_is_non_stop_p ())
2075 {
2076 gdb_assert (ptid_match (inferior_ptid, ptid));
2077
2078 ALL_NON_EXITED_THREADS (tp)
2079 if (ptid_match (tp->ptid, ptid))
2080 {
2081 if (ptid_match (tp->ptid, inferior_ptid))
2082 record_btrace_resume_thread (tp, flag);
2083 else
2084 record_btrace_resume_thread (tp, cflag);
2085 }
2086 }
2087 else
2088 {
2089 ALL_NON_EXITED_THREADS (tp)
2090 if (ptid_match (tp->ptid, ptid))
2091 record_btrace_resume_thread (tp, flag);
2092 }
70ad5bff
MM
2093
2094 /* Async support. */
2095 if (target_can_async_p ())
2096 {
6a3753b3 2097 target_async (1);
70ad5bff
MM
2098 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2099 }
52834460
MM
2100}
2101
85ad3aaf
PA
2102/* The to_commit_resume method of target record-btrace. */
2103
2104static void
2105record_btrace_commit_resume (struct target_ops *ops)
2106{
2107 if ((execution_direction != EXEC_REVERSE)
2108 && !record_btrace_is_replaying (ops, minus_one_ptid))
2109 ops->beneath->to_commit_resume (ops->beneath);
2110}
2111
987e68b1
MM
2112/* Cancel resuming TP. */
2113
2114static void
2115record_btrace_cancel_resume (struct thread_info *tp)
2116{
2117 enum btrace_thread_flag flags;
2118
2119 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2120 if (flags == 0)
2121 return;
2122
43792cf0
PA
2123 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2124 print_thread_id (tp),
987e68b1
MM
2125 target_pid_to_str (tp->ptid), flags,
2126 btrace_thread_flag_to_str (flags));
2127
2128 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2129 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2130}
2131
2132/* Return a target_waitstatus indicating that we ran out of history. */
2133
2134static struct target_waitstatus
2135btrace_step_no_history (void)
2136{
2137 struct target_waitstatus status;
2138
2139 status.kind = TARGET_WAITKIND_NO_HISTORY;
2140
2141 return status;
2142}
2143
2144/* Return a target_waitstatus indicating that a step finished. */
2145
2146static struct target_waitstatus
2147btrace_step_stopped (void)
2148{
2149 struct target_waitstatus status;
2150
2151 status.kind = TARGET_WAITKIND_STOPPED;
2152 status.value.sig = GDB_SIGNAL_TRAP;
2153
2154 return status;
2155}
2156
6e4879f0
MM
2157/* Return a target_waitstatus indicating that a thread was stopped as
2158 requested. */
2159
2160static struct target_waitstatus
2161btrace_step_stopped_on_request (void)
2162{
2163 struct target_waitstatus status;
2164
2165 status.kind = TARGET_WAITKIND_STOPPED;
2166 status.value.sig = GDB_SIGNAL_0;
2167
2168 return status;
2169}
2170
d825d248
MM
2171/* Return a target_waitstatus indicating a spurious stop. */
2172
2173static struct target_waitstatus
2174btrace_step_spurious (void)
2175{
2176 struct target_waitstatus status;
2177
2178 status.kind = TARGET_WAITKIND_SPURIOUS;
2179
2180 return status;
2181}
2182
e3cfc1c7
MM
2183/* Return a target_waitstatus indicating that the thread was not resumed. */
2184
2185static struct target_waitstatus
2186btrace_step_no_resumed (void)
2187{
2188 struct target_waitstatus status;
2189
2190 status.kind = TARGET_WAITKIND_NO_RESUMED;
2191
2192 return status;
2193}
2194
2195/* Return a target_waitstatus indicating that we should wait again. */
2196
2197static struct target_waitstatus
2198btrace_step_again (void)
2199{
2200 struct target_waitstatus status;
2201
2202 status.kind = TARGET_WAITKIND_IGNORE;
2203
2204 return status;
2205}
2206
52834460
MM
2207/* Clear the record histories. */
2208
2209static void
2210record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2211{
2212 xfree (btinfo->insn_history);
2213 xfree (btinfo->call_history);
2214
2215 btinfo->insn_history = NULL;
2216 btinfo->call_history = NULL;
2217}
2218
3c615f99
MM
2219/* Check whether TP's current replay position is at a breakpoint. */
2220
2221static int
2222record_btrace_replay_at_breakpoint (struct thread_info *tp)
2223{
2224 struct btrace_insn_iterator *replay;
2225 struct btrace_thread_info *btinfo;
2226 const struct btrace_insn *insn;
2227 struct inferior *inf;
2228
2229 btinfo = &tp->btrace;
2230 replay = btinfo->replay;
2231
2232 if (replay == NULL)
2233 return 0;
2234
2235 insn = btrace_insn_get (replay);
2236 if (insn == NULL)
2237 return 0;
2238
2239 inf = find_inferior_ptid (tp->ptid);
2240 if (inf == NULL)
2241 return 0;
2242
2243 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2244 &btinfo->stop_reason);
2245}
2246
d825d248 2247/* Step one instruction in forward direction. */
52834460
MM
2248
2249static struct target_waitstatus
d825d248 2250record_btrace_single_step_forward (struct thread_info *tp)
52834460 2251{
b61ce85c 2252 struct btrace_insn_iterator *replay, end, start;
52834460 2253 struct btrace_thread_info *btinfo;
52834460 2254
d825d248
MM
2255 btinfo = &tp->btrace;
2256 replay = btinfo->replay;
2257
2258 /* We're done if we're not replaying. */
2259 if (replay == NULL)
2260 return btrace_step_no_history ();
2261
011c71b6
MM
2262 /* Check if we're stepping a breakpoint. */
2263 if (record_btrace_replay_at_breakpoint (tp))
2264 return btrace_step_stopped ();
2265
b61ce85c
MM
2266 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2267 jump back to the instruction at which we started. */
2268 start = *replay;
d825d248
MM
2269 do
2270 {
2271 unsigned int steps;
2272
e3cfc1c7
MM
2273 /* We will bail out here if we continue stepping after reaching the end
2274 of the execution history. */
d825d248
MM
2275 steps = btrace_insn_next (replay, 1);
2276 if (steps == 0)
b61ce85c
MM
2277 {
2278 *replay = start;
2279 return btrace_step_no_history ();
2280 }
d825d248
MM
2281 }
2282 while (btrace_insn_get (replay) == NULL);
2283
2284 /* Determine the end of the instruction trace. */
2285 btrace_insn_end (&end, btinfo);
2286
e3cfc1c7
MM
2287 /* The execution trace contains (and ends with) the current instruction.
2288 This instruction has not been executed, yet, so the trace really ends
2289 one instruction earlier. */
d825d248 2290 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2291 return btrace_step_no_history ();
d825d248
MM
2292
2293 return btrace_step_spurious ();
2294}
2295
2296/* Step one instruction in backward direction. */
2297
2298static struct target_waitstatus
2299record_btrace_single_step_backward (struct thread_info *tp)
2300{
b61ce85c 2301 struct btrace_insn_iterator *replay, start;
d825d248 2302 struct btrace_thread_info *btinfo;
e59fa00f 2303
52834460
MM
2304 btinfo = &tp->btrace;
2305 replay = btinfo->replay;
2306
d825d248
MM
2307 /* Start replaying if we're not already doing so. */
2308 if (replay == NULL)
2309 replay = record_btrace_start_replaying (tp);
2310
2311 /* If we can't step any further, we reached the end of the history.
b61ce85c
MM
2312 Skip gaps during replay. If we end up at a gap (at the beginning of
2313 the trace), jump back to the instruction at which we started. */
2314 start = *replay;
d825d248
MM
2315 do
2316 {
2317 unsigned int steps;
2318
2319 steps = btrace_insn_prev (replay, 1);
2320 if (steps == 0)
b61ce85c
MM
2321 {
2322 *replay = start;
2323 return btrace_step_no_history ();
2324 }
d825d248
MM
2325 }
2326 while (btrace_insn_get (replay) == NULL);
2327
011c71b6
MM
2328 /* Check if we're stepping a breakpoint.
2329
2330 For reverse-stepping, this check is after the step. There is logic in
2331 infrun.c that handles reverse-stepping separately. See, for example,
2332 proceed and adjust_pc_after_break.
2333
2334 This code assumes that for reverse-stepping, PC points to the last
2335 de-executed instruction, whereas for forward-stepping PC points to the
2336 next to-be-executed instruction. */
2337 if (record_btrace_replay_at_breakpoint (tp))
2338 return btrace_step_stopped ();
2339
d825d248
MM
2340 return btrace_step_spurious ();
2341}
2342
2343/* Step a single thread. */
2344
2345static struct target_waitstatus
2346record_btrace_step_thread (struct thread_info *tp)
2347{
2348 struct btrace_thread_info *btinfo;
2349 struct target_waitstatus status;
2350 enum btrace_thread_flag flags;
2351
2352 btinfo = &tp->btrace;
2353
6e4879f0
MM
2354 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2355 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2356
43792cf0 2357 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1
MM
2358 target_pid_to_str (tp->ptid), flags,
2359 btrace_thread_flag_to_str (flags));
52834460 2360
6e4879f0
MM
2361 /* We can't step without an execution history. */
2362 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2363 return btrace_step_no_history ();
2364
52834460
MM
2365 switch (flags)
2366 {
2367 default:
2368 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2369
6e4879f0
MM
2370 case BTHR_STOP:
2371 return btrace_step_stopped_on_request ();
2372
52834460 2373 case BTHR_STEP:
d825d248
MM
2374 status = record_btrace_single_step_forward (tp);
2375 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2376 break;
52834460
MM
2377
2378 return btrace_step_stopped ();
2379
2380 case BTHR_RSTEP:
d825d248
MM
2381 status = record_btrace_single_step_backward (tp);
2382 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2383 break;
52834460
MM
2384
2385 return btrace_step_stopped ();
2386
2387 case BTHR_CONT:
e3cfc1c7
MM
2388 status = record_btrace_single_step_forward (tp);
2389 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2390 break;
52834460 2391
e3cfc1c7
MM
2392 btinfo->flags |= flags;
2393 return btrace_step_again ();
52834460
MM
2394
2395 case BTHR_RCONT:
e3cfc1c7
MM
2396 status = record_btrace_single_step_backward (tp);
2397 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2398 break;
52834460 2399
e3cfc1c7
MM
2400 btinfo->flags |= flags;
2401 return btrace_step_again ();
2402 }
d825d248 2403
e3cfc1c7
MM
2404 /* We keep threads moving at the end of their execution history. The to_wait
2405 method will stop the thread for whom the event is reported. */
2406 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2407 btinfo->flags |= flags;
52834460 2408
e3cfc1c7 2409 return status;
b2f4cfde
MM
2410}
2411
e3cfc1c7
MM
2412/* A vector of threads. */
2413
2414typedef struct thread_info * tp_t;
2415DEF_VEC_P (tp_t);
2416
a6b5be76
MM
2417/* Announce further events if necessary. */
2418
2419static void
2420record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2421 const VEC (tp_t) *no_history)
2422{
2423 int more_moving, more_no_history;
2424
2425 more_moving = !VEC_empty (tp_t, moving);
2426 more_no_history = !VEC_empty (tp_t, no_history);
2427
2428 if (!more_moving && !more_no_history)
2429 return;
2430
2431 if (more_moving)
2432 DEBUG ("movers pending");
2433
2434 if (more_no_history)
2435 DEBUG ("no-history pending");
2436
2437 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2438}
2439
b2f4cfde
MM
2440/* The to_wait method of target record-btrace. */
2441
2442static ptid_t
2443record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2444 struct target_waitstatus *status, int options)
2445{
e3cfc1c7
MM
2446 VEC (tp_t) *moving, *no_history;
2447 struct thread_info *tp, *eventing;
2448 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
52834460
MM
2449
2450 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2451
b2f4cfde 2452 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2453 if ((execution_direction != EXEC_REVERSE)
2454 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2455 {
e75fdfca
TT
2456 ops = ops->beneath;
2457 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
2458 }
2459
e3cfc1c7
MM
2460 moving = NULL;
2461 no_history = NULL;
2462
2463 make_cleanup (VEC_cleanup (tp_t), &moving);
2464 make_cleanup (VEC_cleanup (tp_t), &no_history);
2465
2466 /* Keep a work list of moving threads. */
2467 ALL_NON_EXITED_THREADS (tp)
2468 if (ptid_match (tp->ptid, ptid)
2469 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2470 VEC_safe_push (tp_t, moving, tp);
2471
2472 if (VEC_empty (tp_t, moving))
52834460 2473 {
e3cfc1c7 2474 *status = btrace_step_no_resumed ();
52834460 2475
e3cfc1c7 2476 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
23fdd69e 2477 target_waitstatus_to_string (status).c_str ());
e3cfc1c7
MM
2478
2479 do_cleanups (cleanups);
2480 return null_ptid;
52834460
MM
2481 }
2482
e3cfc1c7
MM
2483 /* Step moving threads one by one, one step each, until either one thread
2484 reports an event or we run out of threads to step.
2485
2486 When stepping more than one thread, chances are that some threads reach
2487 the end of their execution history earlier than others. If we reported
2488 this immediately, all-stop on top of non-stop would stop all threads and
2489 resume the same threads next time. And we would report the same thread
2490 having reached the end of its execution history again.
2491
2492 In the worst case, this would starve the other threads. But even if other
2493 threads would be allowed to make progress, this would result in far too
2494 many intermediate stops.
2495
2496 We therefore delay the reporting of "no execution history" until we have
2497 nothing else to report. By this time, all threads should have moved to
2498 either the beginning or the end of their execution history. There will
2499 be a single user-visible stop. */
2500 eventing = NULL;
2501 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2502 {
2503 unsigned int ix;
2504
2505 ix = 0;
2506 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2507 {
2508 *status = record_btrace_step_thread (tp);
2509
2510 switch (status->kind)
2511 {
2512 case TARGET_WAITKIND_IGNORE:
2513 ix++;
2514 break;
2515
2516 case TARGET_WAITKIND_NO_HISTORY:
2517 VEC_safe_push (tp_t, no_history,
2518 VEC_ordered_remove (tp_t, moving, ix));
2519 break;
2520
2521 default:
2522 eventing = VEC_unordered_remove (tp_t, moving, ix);
2523 break;
2524 }
2525 }
2526 }
2527
2528 if (eventing == NULL)
2529 {
2530 /* We started with at least one moving thread. This thread must have
2531 either stopped or reached the end of its execution history.
2532
2533 In the former case, EVENTING must not be NULL.
2534 In the latter case, NO_HISTORY must not be empty. */
2535 gdb_assert (!VEC_empty (tp_t, no_history));
2536
2537 /* We kept threads moving at the end of their execution history. Stop
2538 EVENTING now that we are going to report its stop. */
2539 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2540 eventing->btrace.flags &= ~BTHR_MOVE;
2541
2542 *status = btrace_step_no_history ();
2543 }
2544
2545 gdb_assert (eventing != NULL);
2546
2547 /* We kept threads replaying at the end of their execution history. Stop
2548 replaying EVENTING now that we are going to report its stop. */
2549 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2550
2551 /* Stop all other threads. */
5953356c 2552 if (!target_is_non_stop_p ())
e3cfc1c7
MM
2553 ALL_NON_EXITED_THREADS (tp)
2554 record_btrace_cancel_resume (tp);
52834460 2555
a6b5be76
MM
2556 /* In async mode, we need to announce further events. */
2557 if (target_is_async_p ())
2558 record_btrace_maybe_mark_async_event (moving, no_history);
2559
52834460 2560 /* Start record histories anew from the current position. */
e3cfc1c7 2561 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2562
2563 /* We moved the replay position but did not update registers. */
e3cfc1c7
MM
2564 registers_changed_ptid (eventing->ptid);
2565
43792cf0
PA
2566 DEBUG ("wait ended by thread %s (%s): %s",
2567 print_thread_id (eventing),
e3cfc1c7 2568 target_pid_to_str (eventing->ptid),
23fdd69e 2569 target_waitstatus_to_string (status).c_str ());
52834460 2570
e3cfc1c7
MM
2571 do_cleanups (cleanups);
2572 return eventing->ptid;
52834460
MM
2573}
2574
6e4879f0
MM
2575/* The to_stop method of target record-btrace. */
2576
2577static void
2578record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2579{
2580 DEBUG ("stop %s", target_pid_to_str (ptid));
2581
2582 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2583 if ((execution_direction != EXEC_REVERSE)
2584 && !record_btrace_is_replaying (ops, minus_one_ptid))
6e4879f0
MM
2585 {
2586 ops = ops->beneath;
2587 ops->to_stop (ops, ptid);
2588 }
2589 else
2590 {
2591 struct thread_info *tp;
2592
2593 ALL_NON_EXITED_THREADS (tp)
2594 if (ptid_match (tp->ptid, ptid))
2595 {
2596 tp->btrace.flags &= ~BTHR_MOVE;
2597 tp->btrace.flags |= BTHR_STOP;
2598 }
2599 }
2600 }
2601
52834460
MM
2602/* The to_can_execute_reverse method of target record-btrace. */
2603
2604static int
19db3e69 2605record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
2606{
2607 return 1;
2608}
2609
9e8915c6 2610/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2611
9e8915c6
PA
2612static int
2613record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
52834460 2614{
a52eab48 2615 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2616 {
2617 struct thread_info *tp = inferior_thread ();
2618
2619 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2620 }
2621
2622 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2623}
2624
2625/* The to_supports_stopped_by_sw_breakpoint method of target
2626 record-btrace. */
2627
2628static int
2629record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2630{
a52eab48 2631 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2632 return 1;
2633
2634 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2635}
2636
2637/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2638
2639static int
2640record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2641{
a52eab48 2642 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2643 {
2644 struct thread_info *tp = inferior_thread ();
2645
2646 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2647 }
2648
2649 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2650}
2651
2652/* The to_supports_stopped_by_hw_breakpoint method of target
2653 record-btrace. */
2654
2655static int
2656record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2657{
a52eab48 2658 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6 2659 return 1;
52834460 2660
9e8915c6 2661 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
b2f4cfde
MM
2662}
2663
e8032dde 2664/* The to_update_thread_list method of target record-btrace. */
e2887aa3
MM
2665
2666static void
e8032dde 2667record_btrace_update_thread_list (struct target_ops *ops)
e2887aa3 2668{
e8032dde 2669 /* We don't add or remove threads during replay. */
a52eab48 2670 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2671 return;
2672
2673 /* Forward the request. */
e75fdfca 2674 ops = ops->beneath;
e8032dde 2675 ops->to_update_thread_list (ops);
e2887aa3
MM
2676}
2677
2678/* The to_thread_alive method of target record-btrace. */
2679
2680static int
2681record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2682{
2683 /* We don't add or remove threads during replay. */
a52eab48 2684 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2685 return find_thread_ptid (ptid) != NULL;
2686
2687 /* Forward the request. */
e75fdfca
TT
2688 ops = ops->beneath;
2689 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
2690}
2691
066ce621
MM
2692/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2693 is stopped. */
2694
2695static void
2696record_btrace_set_replay (struct thread_info *tp,
2697 const struct btrace_insn_iterator *it)
2698{
2699 struct btrace_thread_info *btinfo;
2700
2701 btinfo = &tp->btrace;
2702
a0f1b963 2703 if (it == NULL)
52834460 2704 record_btrace_stop_replaying (tp);
066ce621
MM
2705 else
2706 {
2707 if (btinfo->replay == NULL)
52834460 2708 record_btrace_start_replaying (tp);
066ce621
MM
2709 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2710 return;
2711
2712 *btinfo->replay = *it;
52834460 2713 registers_changed_ptid (tp->ptid);
066ce621
MM
2714 }
2715
52834460
MM
2716 /* Start anew from the new replay position. */
2717 record_btrace_clear_histories (btinfo);
485668e5
MM
2718
2719 stop_pc = regcache_read_pc (get_current_regcache ());
2720 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2721}
2722
2723/* The to_goto_record_begin method of target record-btrace. */
2724
2725static void
08475817 2726record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
2727{
2728 struct thread_info *tp;
2729 struct btrace_insn_iterator begin;
2730
2731 tp = require_btrace_thread ();
2732
2733 btrace_insn_begin (&begin, &tp->btrace);
b61ce85c
MM
2734
2735 /* Skip gaps at the beginning of the trace. */
2736 while (btrace_insn_get (&begin) == NULL)
2737 {
2738 unsigned int steps;
2739
2740 steps = btrace_insn_next (&begin, 1);
2741 if (steps == 0)
2742 error (_("No trace."));
2743 }
2744
066ce621 2745 record_btrace_set_replay (tp, &begin);
066ce621
MM
2746}
2747
2748/* The to_goto_record_end method of target record-btrace. */
2749
2750static void
307a1b91 2751record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
2752{
2753 struct thread_info *tp;
2754
2755 tp = require_btrace_thread ();
2756
2757 record_btrace_set_replay (tp, NULL);
066ce621
MM
2758}
2759
2760/* The to_goto_record method of target record-btrace. */
2761
2762static void
606183ac 2763record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
2764{
2765 struct thread_info *tp;
2766 struct btrace_insn_iterator it;
2767 unsigned int number;
2768 int found;
2769
2770 number = insn;
2771
2772 /* Check for wrap-arounds. */
2773 if (number != insn)
2774 error (_("Instruction number out of range."));
2775
2776 tp = require_btrace_thread ();
2777
2778 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
69090cee
TW
2779
2780 /* Check if the instruction could not be found or is a gap. */
2781 if (found == 0 || btrace_insn_get (&it) == NULL)
066ce621
MM
2782 error (_("No such instruction."));
2783
2784 record_btrace_set_replay (tp, &it);
066ce621
MM
2785}
2786
797094dd
MM
2787/* The to_record_stop_replaying method of target record-btrace. */
2788
2789static void
2790record_btrace_stop_replaying_all (struct target_ops *self)
2791{
2792 struct thread_info *tp;
2793
2794 ALL_NON_EXITED_THREADS (tp)
2795 record_btrace_stop_replaying (tp);
2796}
2797
70ad5bff
MM
2798/* The to_execution_direction target method. */
2799
2800static enum exec_direction_kind
2801record_btrace_execution_direction (struct target_ops *self)
2802{
2803 return record_btrace_resume_exec_dir;
2804}
2805
aef92902
MM
2806/* The to_prepare_to_generate_core target method. */
2807
2808static void
2809record_btrace_prepare_to_generate_core (struct target_ops *self)
2810{
2811 record_btrace_generating_corefile = 1;
2812}
2813
2814/* The to_done_generating_core target method. */
2815
2816static void
2817record_btrace_done_generating_core (struct target_ops *self)
2818{
2819 record_btrace_generating_corefile = 0;
2820}
2821
afedecd3
MM
2822/* Initialize the record-btrace target ops. */
2823
2824static void
2825init_record_btrace_ops (void)
2826{
2827 struct target_ops *ops;
2828
2829 ops = &record_btrace_ops;
2830 ops->to_shortname = "record-btrace";
2831 ops->to_longname = "Branch tracing target";
2832 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2833 ops->to_open = record_btrace_open;
2834 ops->to_close = record_btrace_close;
b7d2e916 2835 ops->to_async = record_btrace_async;
afedecd3 2836 ops->to_detach = record_detach;
c0272db5 2837 ops->to_disconnect = record_btrace_disconnect;
afedecd3
MM
2838 ops->to_mourn_inferior = record_mourn_inferior;
2839 ops->to_kill = record_kill;
afedecd3
MM
2840 ops->to_stop_recording = record_btrace_stop_recording;
2841 ops->to_info_record = record_btrace_info;
2842 ops->to_insn_history = record_btrace_insn_history;
2843 ops->to_insn_history_from = record_btrace_insn_history_from;
2844 ops->to_insn_history_range = record_btrace_insn_history_range;
2845 ops->to_call_history = record_btrace_call_history;
2846 ops->to_call_history_from = record_btrace_call_history_from;
2847 ops->to_call_history_range = record_btrace_call_history_range;
b158a20f 2848 ops->to_record_method = record_btrace_record_method;
07bbe694 2849 ops->to_record_is_replaying = record_btrace_is_replaying;
7ff27e9b 2850 ops->to_record_will_replay = record_btrace_will_replay;
797094dd 2851 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
633785ff
MM
2852 ops->to_xfer_partial = record_btrace_xfer_partial;
2853 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2854 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
2855 ops->to_fetch_registers = record_btrace_fetch_registers;
2856 ops->to_store_registers = record_btrace_store_registers;
2857 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
2858 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2859 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde 2860 ops->to_resume = record_btrace_resume;
85ad3aaf 2861 ops->to_commit_resume = record_btrace_commit_resume;
b2f4cfde 2862 ops->to_wait = record_btrace_wait;
6e4879f0 2863 ops->to_stop = record_btrace_stop;
e8032dde 2864 ops->to_update_thread_list = record_btrace_update_thread_list;
e2887aa3 2865 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
2866 ops->to_goto_record_begin = record_btrace_goto_begin;
2867 ops->to_goto_record_end = record_btrace_goto_end;
2868 ops->to_goto_record = record_btrace_goto;
52834460 2869 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
9e8915c6
PA
2870 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2871 ops->to_supports_stopped_by_sw_breakpoint
2872 = record_btrace_supports_stopped_by_sw_breakpoint;
2873 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2874 ops->to_supports_stopped_by_hw_breakpoint
2875 = record_btrace_supports_stopped_by_hw_breakpoint;
70ad5bff 2876 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
2877 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2878 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
2879 ops->to_stratum = record_stratum;
2880 ops->to_magic = OPS_MAGIC;
2881}
2882
f4abbc16
MM
2883/* Start recording in BTS format. */
2884
2885static void
cdb34d4a 2886cmd_record_btrace_bts_start (const char *args, int from_tty)
f4abbc16 2887{
f4abbc16
MM
2888 if (args != NULL && *args != 0)
2889 error (_("Invalid argument."));
2890
2891 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2892
492d29ea
PA
2893 TRY
2894 {
9b2eba3d 2895 execute_command ((char *) "target record-btrace", from_tty);
492d29ea
PA
2896 }
2897 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2898 {
2899 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2900 throw_exception (exception);
2901 }
492d29ea 2902 END_CATCH
f4abbc16
MM
2903}
2904
bc504a31 2905/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2906
2907static void
cdb34d4a 2908cmd_record_btrace_pt_start (const char *args, int from_tty)
afedecd3
MM
2909{
2910 if (args != NULL && *args != 0)
2911 error (_("Invalid argument."));
2912
b20a6524 2913 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2914
492d29ea
PA
2915 TRY
2916 {
9b2eba3d 2917 execute_command ((char *) "target record-btrace", from_tty);
492d29ea
PA
2918 }
2919 CATCH (exception, RETURN_MASK_ALL)
2920 {
2921 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2922 throw_exception (exception);
2923 }
2924 END_CATCH
afedecd3
MM
2925}
2926
b20a6524
MM
2927/* Alias for "target record". */
2928
2929static void
981a3fb3 2930cmd_record_btrace_start (const char *args, int from_tty)
b20a6524
MM
2931{
2932 if (args != NULL && *args != 0)
2933 error (_("Invalid argument."));
2934
2935 record_btrace_conf.format = BTRACE_FORMAT_PT;
2936
2937 TRY
2938 {
9b2eba3d 2939 execute_command ((char *) "target record-btrace", from_tty);
b20a6524
MM
2940 }
2941 CATCH (exception, RETURN_MASK_ALL)
2942 {
2943 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2944
2945 TRY
2946 {
9b2eba3d 2947 execute_command ((char *) "target record-btrace", from_tty);
b20a6524
MM
2948 }
2949 CATCH (exception, RETURN_MASK_ALL)
2950 {
2951 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2952 throw_exception (exception);
2953 }
2954 END_CATCH
2955 }
2956 END_CATCH
2957}
2958
67b5c0c1
MM
2959/* The "set record btrace" command. */
2960
2961static void
981a3fb3 2962cmd_set_record_btrace (const char *args, int from_tty)
67b5c0c1
MM
2963{
2964 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2965}
2966
2967/* The "show record btrace" command. */
2968
2969static void
981a3fb3 2970cmd_show_record_btrace (const char *args, int from_tty)
67b5c0c1
MM
2971{
2972 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2973}
2974
2975/* The "show record btrace replay-memory-access" command. */
2976
2977static void
2978cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2979 struct cmd_list_element *c, const char *value)
2980{
2981 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2982 replay_memory_access);
2983}
2984
d33501a5
MM
2985/* The "set record btrace bts" command. */
2986
2987static void
981a3fb3 2988cmd_set_record_btrace_bts (const char *args, int from_tty)
d33501a5
MM
2989{
2990 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 2991 "by an appropriate subcommand.\n"));
d33501a5
MM
2992 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2993 all_commands, gdb_stdout);
2994}
2995
2996/* The "show record btrace bts" command. */
2997
2998static void
981a3fb3 2999cmd_show_record_btrace_bts (const char *args, int from_tty)
d33501a5
MM
3000{
3001 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3002}
3003
b20a6524
MM
3004/* The "set record btrace pt" command. */
3005
3006static void
981a3fb3 3007cmd_set_record_btrace_pt (const char *args, int from_tty)
b20a6524
MM
3008{
3009 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3010 "by an appropriate subcommand.\n"));
3011 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3012 all_commands, gdb_stdout);
3013}
3014
3015/* The "show record btrace pt" command. */
3016
3017static void
981a3fb3 3018cmd_show_record_btrace_pt (const char *args, int from_tty)
b20a6524
MM
3019{
3020 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3021}
3022
3023/* The "record bts buffer-size" show value function. */
3024
3025static void
3026show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3027 struct cmd_list_element *c,
3028 const char *value)
3029{
3030 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3031 value);
3032}
3033
3034/* The "record pt buffer-size" show value function. */
3035
3036static void
3037show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3038 struct cmd_list_element *c,
3039 const char *value)
3040{
3041 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3042 value);
3043}
3044
afedecd3
MM
3045/* Initialize btrace commands. */
3046
3047void
3048_initialize_record_btrace (void)
3049{
f4abbc16
MM
3050 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3051 _("Start branch trace recording."), &record_btrace_cmdlist,
3052 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3053 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3054
f4abbc16
MM
3055 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3056 _("\
3057Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3058The processor stores a from/to record for each branch into a cyclic buffer.\n\
3059This format may not be available on all processors."),
3060 &record_btrace_cmdlist);
3061 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3062
b20a6524
MM
3063 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3064 _("\
bc504a31 3065Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3066This format may not be available on all processors."),
3067 &record_btrace_cmdlist);
3068 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3069
67b5c0c1
MM
3070 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3071 _("Set record options"), &set_record_btrace_cmdlist,
3072 "set record btrace ", 0, &set_record_cmdlist);
3073
3074 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3075 _("Show record options"), &show_record_btrace_cmdlist,
3076 "show record btrace ", 0, &show_record_cmdlist);
3077
3078 add_setshow_enum_cmd ("replay-memory-access", no_class,
3079 replay_memory_access_types, &replay_memory_access, _("\
3080Set what memory accesses are allowed during replay."), _("\
3081Show what memory accesses are allowed during replay."),
3082 _("Default is READ-ONLY.\n\n\
3083The btrace record target does not trace data.\n\
3084The memory therefore corresponds to the live target and not \
3085to the current replay position.\n\n\
3086When READ-ONLY, allow accesses to read-only memory during replay.\n\
3087When READ-WRITE, allow accesses to read-only and read-write memory during \
3088replay."),
3089 NULL, cmd_show_replay_memory_access,
3090 &set_record_btrace_cmdlist,
3091 &show_record_btrace_cmdlist);
3092
d33501a5
MM
3093 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3094 _("Set record btrace bts options"),
3095 &set_record_btrace_bts_cmdlist,
3096 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3097
3098 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3099 _("Show record btrace bts options"),
3100 &show_record_btrace_bts_cmdlist,
3101 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3102
3103 add_setshow_uinteger_cmd ("buffer-size", no_class,
3104 &record_btrace_conf.bts.size,
3105 _("Set the record/replay bts buffer size."),
3106 _("Show the record/replay bts buffer size."), _("\
3107When starting recording request a trace buffer of this size. \
3108The actual buffer size may differ from the requested size. \
3109Use \"info record\" to see the actual buffer size.\n\n\
3110Bigger buffers allow longer recording but also take more time to process \
3111the recorded execution trace.\n\n\
b20a6524
MM
3112The trace buffer size may not be changed while recording."), NULL,
3113 show_record_bts_buffer_size_value,
d33501a5
MM
3114 &set_record_btrace_bts_cmdlist,
3115 &show_record_btrace_bts_cmdlist);
3116
b20a6524
MM
3117 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3118 _("Set record btrace pt options"),
3119 &set_record_btrace_pt_cmdlist,
3120 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3121
3122 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3123 _("Show record btrace pt options"),
3124 &show_record_btrace_pt_cmdlist,
3125 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3126
3127 add_setshow_uinteger_cmd ("buffer-size", no_class,
3128 &record_btrace_conf.pt.size,
3129 _("Set the record/replay pt buffer size."),
3130 _("Show the record/replay pt buffer size."), _("\
3131Bigger buffers allow longer recording but also take more time to process \
3132the recorded execution.\n\
3133The actual buffer size may differ from the requested size. Use \"info record\" \
3134to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3135 &set_record_btrace_pt_cmdlist,
3136 &show_record_btrace_pt_cmdlist);
3137
afedecd3
MM
3138 init_record_btrace_ops ();
3139 add_target (&record_btrace_ops);
0b722aec
MM
3140
3141 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3142 xcalloc, xfree);
d33501a5
MM
3143
3144 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3145 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3146}
This page took 0.683372 seconds and 4 git commands to generate.