Revise targets able to run ELF 64k section test
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
618f726f 3 Copyright (C) 2013-2016 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
24#include "gdbthread.h"
25#include "target.h"
26#include "gdbcmd.h"
27#include "disasm.h"
28#include "observer.h"
afedecd3
MM
29#include "cli/cli-utils.h"
30#include "source.h"
31#include "ui-out.h"
32#include "symtab.h"
33#include "filenames.h"
1f3ef581 34#include "regcache.h"
cecac1ab 35#include "frame-unwind.h"
0b722aec 36#include "hashtab.h"
45741a9c 37#include "infrun.h"
70ad5bff
MM
38#include "event-loop.h"
39#include "inf-loop.h"
e3cfc1c7 40#include "vec.h"
afedecd3
MM
41
42/* The target_ops of record-btrace. */
43static struct target_ops record_btrace_ops;
44
45/* A new thread observer enabling branch tracing for the new thread. */
46static struct observer *record_btrace_thread_observer;
47
67b5c0c1
MM
48/* Memory access types used in set/show record btrace replay-memory-access. */
49static const char replay_memory_access_read_only[] = "read-only";
50static const char replay_memory_access_read_write[] = "read-write";
51static const char *const replay_memory_access_types[] =
52{
53 replay_memory_access_read_only,
54 replay_memory_access_read_write,
55 NULL
56};
57
58/* The currently allowed replay memory access type. */
59static const char *replay_memory_access = replay_memory_access_read_only;
60
61/* Command lists for "set/show record btrace". */
62static struct cmd_list_element *set_record_btrace_cmdlist;
63static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 64
70ad5bff
MM
65/* The execution direction of the last resume we got. See record-full.c. */
66static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
67
68/* The async event handler for reverse/replay execution. */
69static struct async_event_handler *record_btrace_async_inferior_event_handler;
70
aef92902
MM
71/* A flag indicating that we are currently generating a core file. */
72static int record_btrace_generating_corefile;
73
f4abbc16
MM
74/* The current branch trace configuration. */
75static struct btrace_config record_btrace_conf;
76
77/* Command list for "record btrace". */
78static struct cmd_list_element *record_btrace_cmdlist;
79
d33501a5
MM
80/* Command lists for "set/show record btrace bts". */
81static struct cmd_list_element *set_record_btrace_bts_cmdlist;
82static struct cmd_list_element *show_record_btrace_bts_cmdlist;
83
b20a6524
MM
84/* Command lists for "set/show record btrace pt". */
85static struct cmd_list_element *set_record_btrace_pt_cmdlist;
86static struct cmd_list_element *show_record_btrace_pt_cmdlist;
87
afedecd3
MM
88/* Print a record-btrace debug message. Use do ... while (0) to avoid
89 ambiguities when used in if statements. */
90
91#define DEBUG(msg, args...) \
92 do \
93 { \
94 if (record_debug != 0) \
95 fprintf_unfiltered (gdb_stdlog, \
96 "[record-btrace] " msg "\n", ##args); \
97 } \
98 while (0)
99
100
101/* Update the branch trace for the current thread and return a pointer to its
066ce621 102 thread_info.
afedecd3
MM
103
104 Throws an error if there is no thread or no trace. This function never
105 returns NULL. */
106
066ce621
MM
107static struct thread_info *
108require_btrace_thread (void)
afedecd3
MM
109{
110 struct thread_info *tp;
afedecd3
MM
111
112 DEBUG ("require");
113
114 tp = find_thread_ptid (inferior_ptid);
115 if (tp == NULL)
116 error (_("No thread."));
117
118 btrace_fetch (tp);
119
6e07b1d2 120 if (btrace_is_empty (tp))
afedecd3
MM
121 error (_("No trace."));
122
066ce621
MM
123 return tp;
124}
125
126/* Update the branch trace for the current thread and return a pointer to its
127 branch trace information struct.
128
129 Throws an error if there is no thread or no trace. This function never
130 returns NULL. */
131
132static struct btrace_thread_info *
133require_btrace (void)
134{
135 struct thread_info *tp;
136
137 tp = require_btrace_thread ();
138
139 return &tp->btrace;
afedecd3
MM
140}
141
142/* Enable branch tracing for one thread. Warn on errors. */
143
144static void
145record_btrace_enable_warn (struct thread_info *tp)
146{
492d29ea
PA
147 TRY
148 {
149 btrace_enable (tp, &record_btrace_conf);
150 }
151 CATCH (error, RETURN_MASK_ERROR)
152 {
153 warning ("%s", error.message);
154 }
155 END_CATCH
afedecd3
MM
156}
157
158/* Callback function to disable branch tracing for one thread. */
159
160static void
161record_btrace_disable_callback (void *arg)
162{
19ba03f4 163 struct thread_info *tp = (struct thread_info *) arg;
afedecd3
MM
164
165 btrace_disable (tp);
166}
167
168/* Enable automatic tracing of new threads. */
169
170static void
171record_btrace_auto_enable (void)
172{
173 DEBUG ("attach thread observer");
174
175 record_btrace_thread_observer
176 = observer_attach_new_thread (record_btrace_enable_warn);
177}
178
179/* Disable automatic tracing of new threads. */
180
181static void
182record_btrace_auto_disable (void)
183{
184 /* The observer may have been detached, already. */
185 if (record_btrace_thread_observer == NULL)
186 return;
187
188 DEBUG ("detach thread observer");
189
190 observer_detach_new_thread (record_btrace_thread_observer);
191 record_btrace_thread_observer = NULL;
192}
193
70ad5bff
MM
194/* The record-btrace async event handler function. */
195
196static void
197record_btrace_handle_async_inferior_event (gdb_client_data data)
198{
199 inferior_event_handler (INF_REG_EVENT, NULL);
200}
201
afedecd3
MM
202/* The to_open method of target record-btrace. */
203
204static void
014f9477 205record_btrace_open (const char *args, int from_tty)
afedecd3
MM
206{
207 struct cleanup *disable_chain;
208 struct thread_info *tp;
38b022b4 209 const char *format;
afedecd3
MM
210
211 DEBUG ("open");
212
8213266a 213 record_preopen ();
afedecd3
MM
214
215 if (!target_has_execution)
216 error (_("The program is not being run."));
217
afedecd3
MM
218 gdb_assert (record_btrace_thread_observer == NULL);
219
220 disable_chain = make_cleanup (null_cleanup, NULL);
034f788c 221 ALL_NON_EXITED_THREADS (tp)
5d5658a1 222 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
afedecd3 223 {
f4abbc16 224 btrace_enable (tp, &record_btrace_conf);
afedecd3
MM
225
226 make_cleanup (record_btrace_disable_callback, tp);
227 }
228
229 record_btrace_auto_enable ();
230
231 push_target (&record_btrace_ops);
232
70ad5bff
MM
233 record_btrace_async_inferior_event_handler
234 = create_async_event_handler (record_btrace_handle_async_inferior_event,
235 NULL);
aef92902 236 record_btrace_generating_corefile = 0;
70ad5bff 237
38b022b4
SM
238 format = btrace_format_short_string (record_btrace_conf.format);
239 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
afedecd3
MM
240
241 discard_cleanups (disable_chain);
242}
243
244/* The to_stop_recording method of target record-btrace. */
245
246static void
c6cd7c02 247record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
248{
249 struct thread_info *tp;
250
251 DEBUG ("stop recording");
252
253 record_btrace_auto_disable ();
254
034f788c 255 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
256 if (tp->btrace.target != NULL)
257 btrace_disable (tp);
258}
259
260/* The to_close method of target record-btrace. */
261
262static void
de90e03d 263record_btrace_close (struct target_ops *self)
afedecd3 264{
568e808b
MM
265 struct thread_info *tp;
266
70ad5bff
MM
267 if (record_btrace_async_inferior_event_handler != NULL)
268 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
269
99c819ee
MM
270 /* Make sure automatic recording gets disabled even if we did not stop
271 recording before closing the record-btrace target. */
272 record_btrace_auto_disable ();
273
568e808b
MM
274 /* We should have already stopped recording.
275 Tear down btrace in case we have not. */
034f788c 276 ALL_NON_EXITED_THREADS (tp)
568e808b 277 btrace_teardown (tp);
afedecd3
MM
278}
279
b7d2e916
PA
280/* The to_async method of target record-btrace. */
281
282static void
6a3753b3 283record_btrace_async (struct target_ops *ops, int enable)
b7d2e916 284{
6a3753b3 285 if (enable)
b7d2e916
PA
286 mark_async_event_handler (record_btrace_async_inferior_event_handler);
287 else
288 clear_async_event_handler (record_btrace_async_inferior_event_handler);
289
6a3753b3 290 ops->beneath->to_async (ops->beneath, enable);
b7d2e916
PA
291}
292
d33501a5
MM
293/* Adjusts the size and returns a human readable size suffix. */
294
295static const char *
296record_btrace_adjust_size (unsigned int *size)
297{
298 unsigned int sz;
299
300 sz = *size;
301
302 if ((sz & ((1u << 30) - 1)) == 0)
303 {
304 *size = sz >> 30;
305 return "GB";
306 }
307 else if ((sz & ((1u << 20) - 1)) == 0)
308 {
309 *size = sz >> 20;
310 return "MB";
311 }
312 else if ((sz & ((1u << 10) - 1)) == 0)
313 {
314 *size = sz >> 10;
315 return "kB";
316 }
317 else
318 return "";
319}
320
321/* Print a BTS configuration. */
322
323static void
324record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
325{
326 const char *suffix;
327 unsigned int size;
328
329 size = conf->size;
330 if (size > 0)
331 {
332 suffix = record_btrace_adjust_size (&size);
333 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
334 }
335}
336
bc504a31 337/* Print an Intel Processor Trace configuration. */
b20a6524
MM
338
339static void
340record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
341{
342 const char *suffix;
343 unsigned int size;
344
345 size = conf->size;
346 if (size > 0)
347 {
348 suffix = record_btrace_adjust_size (&size);
349 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
350 }
351}
352
d33501a5
MM
353/* Print a branch tracing configuration. */
354
355static void
356record_btrace_print_conf (const struct btrace_config *conf)
357{
358 printf_unfiltered (_("Recording format: %s.\n"),
359 btrace_format_string (conf->format));
360
361 switch (conf->format)
362 {
363 case BTRACE_FORMAT_NONE:
364 return;
365
366 case BTRACE_FORMAT_BTS:
367 record_btrace_print_bts_conf (&conf->bts);
368 return;
b20a6524
MM
369
370 case BTRACE_FORMAT_PT:
371 record_btrace_print_pt_conf (&conf->pt);
372 return;
d33501a5
MM
373 }
374
375 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
376}
377
afedecd3
MM
378/* The to_info_record method of target record-btrace. */
379
380static void
630d6a4a 381record_btrace_info (struct target_ops *self)
afedecd3
MM
382{
383 struct btrace_thread_info *btinfo;
f4abbc16 384 const struct btrace_config *conf;
afedecd3 385 struct thread_info *tp;
31fd9caa 386 unsigned int insns, calls, gaps;
afedecd3
MM
387
388 DEBUG ("info");
389
390 tp = find_thread_ptid (inferior_ptid);
391 if (tp == NULL)
392 error (_("No thread."));
393
f4abbc16
MM
394 btinfo = &tp->btrace;
395
396 conf = btrace_conf (btinfo);
397 if (conf != NULL)
d33501a5 398 record_btrace_print_conf (conf);
f4abbc16 399
afedecd3
MM
400 btrace_fetch (tp);
401
23a7fe75
MM
402 insns = 0;
403 calls = 0;
31fd9caa 404 gaps = 0;
23a7fe75 405
6e07b1d2 406 if (!btrace_is_empty (tp))
23a7fe75
MM
407 {
408 struct btrace_call_iterator call;
409 struct btrace_insn_iterator insn;
410
411 btrace_call_end (&call, btinfo);
412 btrace_call_prev (&call, 1);
5de9129b 413 calls = btrace_call_number (&call);
23a7fe75
MM
414
415 btrace_insn_end (&insn, btinfo);
31fd9caa 416
5de9129b 417 insns = btrace_insn_number (&insn);
31fd9caa
MM
418 if (insns != 0)
419 {
420 /* The last instruction does not really belong to the trace. */
421 insns -= 1;
422 }
423 else
424 {
425 unsigned int steps;
426
427 /* Skip gaps at the end. */
428 do
429 {
430 steps = btrace_insn_prev (&insn, 1);
431 if (steps == 0)
432 break;
433
434 insns = btrace_insn_number (&insn);
435 }
436 while (insns == 0);
437 }
438
439 gaps = btinfo->ngaps;
23a7fe75 440 }
afedecd3 441
31fd9caa 442 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
43792cf0
PA
443 "for thread %s (%s).\n"), insns, calls, gaps,
444 print_thread_id (tp), target_pid_to_str (tp->ptid));
07bbe694
MM
445
446 if (btrace_is_replaying (tp))
447 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
448 btrace_insn_number (btinfo->replay));
afedecd3
MM
449}
450
31fd9caa
MM
451/* Print a decode error. */
452
453static void
454btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
455 enum btrace_format format)
456{
457 const char *errstr;
458 int is_error;
459
460 errstr = _("unknown");
461 is_error = 1;
462
463 switch (format)
464 {
465 default:
466 break;
467
468 case BTRACE_FORMAT_BTS:
469 switch (errcode)
470 {
471 default:
472 break;
473
474 case BDE_BTS_OVERFLOW:
475 errstr = _("instruction overflow");
476 break;
477
478 case BDE_BTS_INSN_SIZE:
479 errstr = _("unknown instruction");
480 break;
481 }
482 break;
b20a6524
MM
483
484#if defined (HAVE_LIBIPT)
485 case BTRACE_FORMAT_PT:
486 switch (errcode)
487 {
488 case BDE_PT_USER_QUIT:
489 is_error = 0;
490 errstr = _("trace decode cancelled");
491 break;
492
493 case BDE_PT_DISABLED:
494 is_error = 0;
495 errstr = _("disabled");
496 break;
497
498 case BDE_PT_OVERFLOW:
499 is_error = 0;
500 errstr = _("overflow");
501 break;
502
503 default:
504 if (errcode < 0)
505 errstr = pt_errstr (pt_errcode (errcode));
506 break;
507 }
508 break;
509#endif /* defined (HAVE_LIBIPT) */
31fd9caa
MM
510 }
511
512 ui_out_text (uiout, _("["));
513 if (is_error)
514 {
515 ui_out_text (uiout, _("decode error ("));
516 ui_out_field_int (uiout, "errcode", errcode);
517 ui_out_text (uiout, _("): "));
518 }
519 ui_out_text (uiout, errstr);
520 ui_out_text (uiout, _("]\n"));
521}
522
afedecd3
MM
523/* Print an unsigned int. */
524
525static void
526ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
527{
528 ui_out_field_fmt (uiout, fld, "%u", val);
529}
530
f94cc897
MM
531/* A range of source lines. */
532
533struct btrace_line_range
534{
535 /* The symtab this line is from. */
536 struct symtab *symtab;
537
538 /* The first line (inclusive). */
539 int begin;
540
541 /* The last line (exclusive). */
542 int end;
543};
544
545/* Construct a line range. */
546
547static struct btrace_line_range
548btrace_mk_line_range (struct symtab *symtab, int begin, int end)
549{
550 struct btrace_line_range range;
551
552 range.symtab = symtab;
553 range.begin = begin;
554 range.end = end;
555
556 return range;
557}
558
559/* Add a line to a line range. */
560
561static struct btrace_line_range
562btrace_line_range_add (struct btrace_line_range range, int line)
563{
564 if (range.end <= range.begin)
565 {
566 /* This is the first entry. */
567 range.begin = line;
568 range.end = line + 1;
569 }
570 else if (line < range.begin)
571 range.begin = line;
572 else if (range.end < line)
573 range.end = line;
574
575 return range;
576}
577
578/* Return non-zero if RANGE is empty, zero otherwise. */
579
580static int
581btrace_line_range_is_empty (struct btrace_line_range range)
582{
583 return range.end <= range.begin;
584}
585
586/* Return non-zero if LHS contains RHS, zero otherwise. */
587
588static int
589btrace_line_range_contains_range (struct btrace_line_range lhs,
590 struct btrace_line_range rhs)
591{
592 return ((lhs.symtab == rhs.symtab)
593 && (lhs.begin <= rhs.begin)
594 && (rhs.end <= lhs.end));
595}
596
597/* Find the line range associated with PC. */
598
599static struct btrace_line_range
600btrace_find_line_range (CORE_ADDR pc)
601{
602 struct btrace_line_range range;
603 struct linetable_entry *lines;
604 struct linetable *ltable;
605 struct symtab *symtab;
606 int nlines, i;
607
608 symtab = find_pc_line_symtab (pc);
609 if (symtab == NULL)
610 return btrace_mk_line_range (NULL, 0, 0);
611
612 ltable = SYMTAB_LINETABLE (symtab);
613 if (ltable == NULL)
614 return btrace_mk_line_range (symtab, 0, 0);
615
616 nlines = ltable->nitems;
617 lines = ltable->item;
618 if (nlines <= 0)
619 return btrace_mk_line_range (symtab, 0, 0);
620
621 range = btrace_mk_line_range (symtab, 0, 0);
622 for (i = 0; i < nlines - 1; i++)
623 {
624 if ((lines[i].pc == pc) && (lines[i].line != 0))
625 range = btrace_line_range_add (range, lines[i].line);
626 }
627
628 return range;
629}
630
631/* Print source lines in LINES to UIOUT.
632
633 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
634 instructions corresponding to that source line. When printing a new source
635 line, we do the cleanups for the open chain and open a new cleanup chain for
636 the new source line. If the source line range in LINES is not empty, this
637 function will leave the cleanup chain for the last printed source line open
638 so instructions can be added to it. */
639
640static void
641btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
642 struct cleanup **ui_item_chain, int flags)
643{
8d297bbf 644 print_source_lines_flags psl_flags;
f94cc897
MM
645 int line;
646
647 psl_flags = 0;
648 if (flags & DISASSEMBLY_FILENAME)
649 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
650
651 for (line = lines.begin; line < lines.end; ++line)
652 {
653 if (*ui_item_chain != NULL)
654 do_cleanups (*ui_item_chain);
655
656 *ui_item_chain
657 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
658
659 print_source_lines (lines.symtab, line, line + 1, psl_flags);
660
661 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
662 }
663}
664
afedecd3
MM
665/* Disassemble a section of the recorded instruction trace. */
666
667static void
23a7fe75 668btrace_insn_history (struct ui_out *uiout,
31fd9caa 669 const struct btrace_thread_info *btinfo,
23a7fe75
MM
670 const struct btrace_insn_iterator *begin,
671 const struct btrace_insn_iterator *end, int flags)
afedecd3 672{
f94cc897
MM
673 struct ui_file *stb;
674 struct cleanup *cleanups, *ui_item_chain;
675 struct disassemble_info di;
afedecd3 676 struct gdbarch *gdbarch;
23a7fe75 677 struct btrace_insn_iterator it;
f94cc897 678 struct btrace_line_range last_lines;
afedecd3 679
23a7fe75
MM
680 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
681 btrace_insn_number (end));
afedecd3 682
f94cc897
MM
683 flags |= DISASSEMBLY_SPECULATIVE;
684
afedecd3 685 gdbarch = target_gdbarch ();
f94cc897
MM
686 stb = mem_fileopen ();
687 cleanups = make_cleanup_ui_file_delete (stb);
688 di = gdb_disassemble_info (gdbarch, stb);
689 last_lines = btrace_mk_line_range (NULL, 0, 0);
690
691 make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
692
693 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
694 instructions corresponding to that line. */
695 ui_item_chain = NULL;
afedecd3 696
23a7fe75 697 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 698 {
23a7fe75
MM
699 const struct btrace_insn *insn;
700
701 insn = btrace_insn_get (&it);
702
31fd9caa
MM
703 /* A NULL instruction indicates a gap in the trace. */
704 if (insn == NULL)
705 {
706 const struct btrace_config *conf;
707
708 conf = btrace_conf (btinfo);
afedecd3 709
31fd9caa
MM
710 /* We have trace so we must have a configuration. */
711 gdb_assert (conf != NULL);
712
713 btrace_ui_out_decode_error (uiout, it.function->errcode,
714 conf->format);
715 }
716 else
717 {
f94cc897 718 struct disasm_insn dinsn;
da8c46d2 719
f94cc897 720 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 721 {
f94cc897
MM
722 struct btrace_line_range lines;
723
724 lines = btrace_find_line_range (insn->pc);
725 if (!btrace_line_range_is_empty (lines)
726 && !btrace_line_range_contains_range (last_lines, lines))
727 {
728 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
729 last_lines = lines;
730 }
731 else if (ui_item_chain == NULL)
732 {
733 ui_item_chain
734 = make_cleanup_ui_out_tuple_begin_end (uiout,
735 "src_and_asm_line");
736 /* No source information. */
737 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
738 }
739
740 gdb_assert (ui_item_chain != NULL);
da8c46d2 741 }
da8c46d2 742
f94cc897
MM
743 memset (&dinsn, 0, sizeof (dinsn));
744 dinsn.number = btrace_insn_number (&it);
745 dinsn.addr = insn->pc;
31fd9caa 746
da8c46d2 747 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 748 dinsn.is_speculative = 1;
da8c46d2 749
f94cc897 750 gdb_pretty_print_insn (gdbarch, uiout, &di, &dinsn, flags, stb);
31fd9caa 751 }
afedecd3 752 }
f94cc897
MM
753
754 do_cleanups (cleanups);
afedecd3
MM
755}
756
757/* The to_insn_history method of target record-btrace. */
758
759static void
7a6c5609 760record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
761{
762 struct btrace_thread_info *btinfo;
23a7fe75
MM
763 struct btrace_insn_history *history;
764 struct btrace_insn_iterator begin, end;
afedecd3
MM
765 struct cleanup *uiout_cleanup;
766 struct ui_out *uiout;
23a7fe75 767 unsigned int context, covered;
afedecd3
MM
768
769 uiout = current_uiout;
770 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
771 "insn history");
afedecd3 772 context = abs (size);
afedecd3
MM
773 if (context == 0)
774 error (_("Bad record instruction-history-size."));
775
23a7fe75
MM
776 btinfo = require_btrace ();
777 history = btinfo->insn_history;
778 if (history == NULL)
afedecd3 779 {
07bbe694 780 struct btrace_insn_iterator *replay;
afedecd3 781
23a7fe75 782 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 783
07bbe694
MM
784 /* If we're replaying, we start at the replay position. Otherwise, we
785 start at the tail of the trace. */
786 replay = btinfo->replay;
787 if (replay != NULL)
788 begin = *replay;
789 else
790 btrace_insn_end (&begin, btinfo);
791
792 /* We start from here and expand in the requested direction. Then we
793 expand in the other direction, as well, to fill up any remaining
794 context. */
795 end = begin;
796 if (size < 0)
797 {
798 /* We want the current position covered, as well. */
799 covered = btrace_insn_next (&end, 1);
800 covered += btrace_insn_prev (&begin, context - covered);
801 covered += btrace_insn_next (&end, context - covered);
802 }
803 else
804 {
805 covered = btrace_insn_next (&end, context);
806 covered += btrace_insn_prev (&begin, context - covered);
807 }
afedecd3
MM
808 }
809 else
810 {
23a7fe75
MM
811 begin = history->begin;
812 end = history->end;
afedecd3 813
23a7fe75
MM
814 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
815 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 816
23a7fe75
MM
817 if (size < 0)
818 {
819 end = begin;
820 covered = btrace_insn_prev (&begin, context);
821 }
822 else
823 {
824 begin = end;
825 covered = btrace_insn_next (&end, context);
826 }
afedecd3
MM
827 }
828
23a7fe75 829 if (covered > 0)
31fd9caa 830 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
831 else
832 {
833 if (size < 0)
834 printf_unfiltered (_("At the start of the branch trace record.\n"));
835 else
836 printf_unfiltered (_("At the end of the branch trace record.\n"));
837 }
afedecd3 838
23a7fe75 839 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
840 do_cleanups (uiout_cleanup);
841}
842
843/* The to_insn_history_range method of target record-btrace. */
844
845static void
4e99c6b7
TT
846record_btrace_insn_history_range (struct target_ops *self,
847 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
848{
849 struct btrace_thread_info *btinfo;
23a7fe75
MM
850 struct btrace_insn_history *history;
851 struct btrace_insn_iterator begin, end;
afedecd3
MM
852 struct cleanup *uiout_cleanup;
853 struct ui_out *uiout;
23a7fe75
MM
854 unsigned int low, high;
855 int found;
afedecd3
MM
856
857 uiout = current_uiout;
858 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
859 "insn history");
23a7fe75
MM
860 low = from;
861 high = to;
afedecd3 862
23a7fe75 863 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
864
865 /* Check for wrap-arounds. */
23a7fe75 866 if (low != from || high != to)
afedecd3
MM
867 error (_("Bad range."));
868
0688d04e 869 if (high < low)
afedecd3
MM
870 error (_("Bad range."));
871
23a7fe75 872 btinfo = require_btrace ();
afedecd3 873
23a7fe75
MM
874 found = btrace_find_insn_by_number (&begin, btinfo, low);
875 if (found == 0)
876 error (_("Range out of bounds."));
afedecd3 877
23a7fe75
MM
878 found = btrace_find_insn_by_number (&end, btinfo, high);
879 if (found == 0)
0688d04e
MM
880 {
881 /* Silently truncate the range. */
882 btrace_insn_end (&end, btinfo);
883 }
884 else
885 {
886 /* We want both begin and end to be inclusive. */
887 btrace_insn_next (&end, 1);
888 }
afedecd3 889
31fd9caa 890 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 891 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
892
893 do_cleanups (uiout_cleanup);
894}
895
896/* The to_insn_history_from method of target record-btrace. */
897
898static void
9abc3ff3
TT
899record_btrace_insn_history_from (struct target_ops *self,
900 ULONGEST from, int size, int flags)
afedecd3
MM
901{
902 ULONGEST begin, end, context;
903
904 context = abs (size);
0688d04e
MM
905 if (context == 0)
906 error (_("Bad record instruction-history-size."));
afedecd3
MM
907
908 if (size < 0)
909 {
910 end = from;
911
912 if (from < context)
913 begin = 0;
914 else
0688d04e 915 begin = from - context + 1;
afedecd3
MM
916 }
917 else
918 {
919 begin = from;
0688d04e 920 end = from + context - 1;
afedecd3
MM
921
922 /* Check for wrap-around. */
923 if (end < begin)
924 end = ULONGEST_MAX;
925 }
926
4e99c6b7 927 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
928}
929
930/* Print the instruction number range for a function call history line. */
931
932static void
23a7fe75
MM
933btrace_call_history_insn_range (struct ui_out *uiout,
934 const struct btrace_function *bfun)
afedecd3 935{
7acbe133
MM
936 unsigned int begin, end, size;
937
938 size = VEC_length (btrace_insn_s, bfun->insn);
939 gdb_assert (size > 0);
afedecd3 940
23a7fe75 941 begin = bfun->insn_offset;
7acbe133 942 end = begin + size - 1;
afedecd3 943
23a7fe75 944 ui_out_field_uint (uiout, "insn begin", begin);
8710b709 945 ui_out_text (uiout, ",");
23a7fe75 946 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
947}
948
ce0dfbea
MM
949/* Compute the lowest and highest source line for the instructions in BFUN
950 and return them in PBEGIN and PEND.
951 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
952 result from inlining or macro expansion. */
953
954static void
955btrace_compute_src_line_range (const struct btrace_function *bfun,
956 int *pbegin, int *pend)
957{
958 struct btrace_insn *insn;
959 struct symtab *symtab;
960 struct symbol *sym;
961 unsigned int idx;
962 int begin, end;
963
964 begin = INT_MAX;
965 end = INT_MIN;
966
967 sym = bfun->sym;
968 if (sym == NULL)
969 goto out;
970
971 symtab = symbol_symtab (sym);
972
973 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
974 {
975 struct symtab_and_line sal;
976
977 sal = find_pc_line (insn->pc, 0);
978 if (sal.symtab != symtab || sal.line == 0)
979 continue;
980
981 begin = min (begin, sal.line);
982 end = max (end, sal.line);
983 }
984
985 out:
986 *pbegin = begin;
987 *pend = end;
988}
989
afedecd3
MM
990/* Print the source line information for a function call history line. */
991
992static void
23a7fe75
MM
993btrace_call_history_src_line (struct ui_out *uiout,
994 const struct btrace_function *bfun)
afedecd3
MM
995{
996 struct symbol *sym;
23a7fe75 997 int begin, end;
afedecd3
MM
998
999 sym = bfun->sym;
1000 if (sym == NULL)
1001 return;
1002
1003 ui_out_field_string (uiout, "file",
08be3fe3 1004 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 1005
ce0dfbea 1006 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 1007 if (end < begin)
afedecd3
MM
1008 return;
1009
1010 ui_out_text (uiout, ":");
23a7fe75 1011 ui_out_field_int (uiout, "min line", begin);
afedecd3 1012
23a7fe75 1013 if (end == begin)
afedecd3
MM
1014 return;
1015
8710b709 1016 ui_out_text (uiout, ",");
23a7fe75 1017 ui_out_field_int (uiout, "max line", end);
afedecd3
MM
1018}
1019
0b722aec
MM
1020/* Get the name of a branch trace function. */
1021
1022static const char *
1023btrace_get_bfun_name (const struct btrace_function *bfun)
1024{
1025 struct minimal_symbol *msym;
1026 struct symbol *sym;
1027
1028 if (bfun == NULL)
1029 return "??";
1030
1031 msym = bfun->msym;
1032 sym = bfun->sym;
1033
1034 if (sym != NULL)
1035 return SYMBOL_PRINT_NAME (sym);
1036 else if (msym != NULL)
efd66ac6 1037 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
1038 else
1039 return "??";
1040}
1041
afedecd3
MM
1042/* Disassemble a section of the recorded function trace. */
1043
1044static void
23a7fe75 1045btrace_call_history (struct ui_out *uiout,
8710b709 1046 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1047 const struct btrace_call_iterator *begin,
1048 const struct btrace_call_iterator *end,
8d297bbf 1049 int int_flags)
afedecd3 1050{
23a7fe75 1051 struct btrace_call_iterator it;
8d297bbf 1052 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1053
8d297bbf 1054 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1055 btrace_call_number (end));
afedecd3 1056
23a7fe75 1057 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1058 {
23a7fe75
MM
1059 const struct btrace_function *bfun;
1060 struct minimal_symbol *msym;
1061 struct symbol *sym;
1062
1063 bfun = btrace_call_get (&it);
23a7fe75 1064 sym = bfun->sym;
0b722aec 1065 msym = bfun->msym;
23a7fe75 1066
afedecd3 1067 /* Print the function index. */
23a7fe75 1068 ui_out_field_uint (uiout, "index", bfun->number);
afedecd3
MM
1069 ui_out_text (uiout, "\t");
1070
31fd9caa
MM
1071 /* Indicate gaps in the trace. */
1072 if (bfun->errcode != 0)
1073 {
1074 const struct btrace_config *conf;
1075
1076 conf = btrace_conf (btinfo);
1077
1078 /* We have trace so we must have a configuration. */
1079 gdb_assert (conf != NULL);
1080
1081 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1082
1083 continue;
1084 }
1085
8710b709
MM
1086 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1087 {
1088 int level = bfun->level + btinfo->level, i;
1089
1090 for (i = 0; i < level; ++i)
1091 ui_out_text (uiout, " ");
1092 }
1093
1094 if (sym != NULL)
1095 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
1096 else if (msym != NULL)
efd66ac6 1097 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
8710b709
MM
1098 else if (!ui_out_is_mi_like_p (uiout))
1099 ui_out_field_string (uiout, "function", "??");
1100
1e038f67 1101 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1102 {
8710b709 1103 ui_out_text (uiout, _("\tinst "));
23a7fe75 1104 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1105 }
1106
1e038f67 1107 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1108 {
8710b709 1109 ui_out_text (uiout, _("\tat "));
23a7fe75 1110 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1111 }
1112
afedecd3
MM
1113 ui_out_text (uiout, "\n");
1114 }
1115}
1116
1117/* The to_call_history method of target record-btrace. */
1118
1119static void
8d297bbf 1120record_btrace_call_history (struct target_ops *self, int size, int int_flags)
afedecd3
MM
1121{
1122 struct btrace_thread_info *btinfo;
23a7fe75
MM
1123 struct btrace_call_history *history;
1124 struct btrace_call_iterator begin, end;
afedecd3
MM
1125 struct cleanup *uiout_cleanup;
1126 struct ui_out *uiout;
23a7fe75 1127 unsigned int context, covered;
8d297bbf 1128 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1129
1130 uiout = current_uiout;
1131 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1132 "insn history");
afedecd3 1133 context = abs (size);
afedecd3
MM
1134 if (context == 0)
1135 error (_("Bad record function-call-history-size."));
1136
23a7fe75
MM
1137 btinfo = require_btrace ();
1138 history = btinfo->call_history;
1139 if (history == NULL)
afedecd3 1140 {
07bbe694 1141 struct btrace_insn_iterator *replay;
afedecd3 1142
8d297bbf 1143 DEBUG ("call-history (0x%x): %d", int_flags, size);
afedecd3 1144
07bbe694
MM
1145 /* If we're replaying, we start at the replay position. Otherwise, we
1146 start at the tail of the trace. */
1147 replay = btinfo->replay;
1148 if (replay != NULL)
1149 {
1150 begin.function = replay->function;
1151 begin.btinfo = btinfo;
1152 }
1153 else
1154 btrace_call_end (&begin, btinfo);
1155
1156 /* We start from here and expand in the requested direction. Then we
1157 expand in the other direction, as well, to fill up any remaining
1158 context. */
1159 end = begin;
1160 if (size < 0)
1161 {
1162 /* We want the current position covered, as well. */
1163 covered = btrace_call_next (&end, 1);
1164 covered += btrace_call_prev (&begin, context - covered);
1165 covered += btrace_call_next (&end, context - covered);
1166 }
1167 else
1168 {
1169 covered = btrace_call_next (&end, context);
1170 covered += btrace_call_prev (&begin, context- covered);
1171 }
afedecd3
MM
1172 }
1173 else
1174 {
23a7fe75
MM
1175 begin = history->begin;
1176 end = history->end;
afedecd3 1177
8d297bbf 1178 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
23a7fe75 1179 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1180
23a7fe75
MM
1181 if (size < 0)
1182 {
1183 end = begin;
1184 covered = btrace_call_prev (&begin, context);
1185 }
1186 else
1187 {
1188 begin = end;
1189 covered = btrace_call_next (&end, context);
1190 }
afedecd3
MM
1191 }
1192
23a7fe75 1193 if (covered > 0)
8710b709 1194 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1195 else
1196 {
1197 if (size < 0)
1198 printf_unfiltered (_("At the start of the branch trace record.\n"));
1199 else
1200 printf_unfiltered (_("At the end of the branch trace record.\n"));
1201 }
afedecd3 1202
23a7fe75 1203 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1204 do_cleanups (uiout_cleanup);
1205}
1206
1207/* The to_call_history_range method of target record-btrace. */
1208
1209static void
f0d960ea 1210record_btrace_call_history_range (struct target_ops *self,
8d297bbf
PA
1211 ULONGEST from, ULONGEST to,
1212 int int_flags)
afedecd3
MM
1213{
1214 struct btrace_thread_info *btinfo;
23a7fe75
MM
1215 struct btrace_call_history *history;
1216 struct btrace_call_iterator begin, end;
afedecd3
MM
1217 struct cleanup *uiout_cleanup;
1218 struct ui_out *uiout;
23a7fe75
MM
1219 unsigned int low, high;
1220 int found;
8d297bbf 1221 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1222
1223 uiout = current_uiout;
1224 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1225 "func history");
23a7fe75
MM
1226 low = from;
1227 high = to;
afedecd3 1228
8d297bbf 1229 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
afedecd3
MM
1230
1231 /* Check for wrap-arounds. */
23a7fe75 1232 if (low != from || high != to)
afedecd3
MM
1233 error (_("Bad range."));
1234
0688d04e 1235 if (high < low)
afedecd3
MM
1236 error (_("Bad range."));
1237
23a7fe75 1238 btinfo = require_btrace ();
afedecd3 1239
23a7fe75
MM
1240 found = btrace_find_call_by_number (&begin, btinfo, low);
1241 if (found == 0)
1242 error (_("Range out of bounds."));
afedecd3 1243
23a7fe75
MM
1244 found = btrace_find_call_by_number (&end, btinfo, high);
1245 if (found == 0)
0688d04e
MM
1246 {
1247 /* Silently truncate the range. */
1248 btrace_call_end (&end, btinfo);
1249 }
1250 else
1251 {
1252 /* We want both begin and end to be inclusive. */
1253 btrace_call_next (&end, 1);
1254 }
afedecd3 1255
8710b709 1256 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1257 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1258
1259 do_cleanups (uiout_cleanup);
1260}
1261
1262/* The to_call_history_from method of target record-btrace. */
1263
1264static void
ec0aea04 1265record_btrace_call_history_from (struct target_ops *self,
8d297bbf
PA
1266 ULONGEST from, int size,
1267 int int_flags)
afedecd3
MM
1268{
1269 ULONGEST begin, end, context;
8d297bbf 1270 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1271
1272 context = abs (size);
0688d04e
MM
1273 if (context == 0)
1274 error (_("Bad record function-call-history-size."));
afedecd3
MM
1275
1276 if (size < 0)
1277 {
1278 end = from;
1279
1280 if (from < context)
1281 begin = 0;
1282 else
0688d04e 1283 begin = from - context + 1;
afedecd3
MM
1284 }
1285 else
1286 {
1287 begin = from;
0688d04e 1288 end = from + context - 1;
afedecd3
MM
1289
1290 /* Check for wrap-around. */
1291 if (end < begin)
1292 end = ULONGEST_MAX;
1293 }
1294
f0d960ea 1295 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
1296}
1297
07bbe694
MM
1298/* The to_record_is_replaying method of target record-btrace. */
1299
1300static int
a52eab48 1301record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
07bbe694
MM
1302{
1303 struct thread_info *tp;
1304
034f788c 1305 ALL_NON_EXITED_THREADS (tp)
a52eab48 1306 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
07bbe694
MM
1307 return 1;
1308
1309 return 0;
1310}
1311
7ff27e9b
MM
1312/* The to_record_will_replay method of target record-btrace. */
1313
1314static int
1315record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1316{
1317 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1318}
1319
633785ff
MM
1320/* The to_xfer_partial method of target record-btrace. */
1321
9b409511 1322static enum target_xfer_status
633785ff
MM
1323record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1324 const char *annex, gdb_byte *readbuf,
1325 const gdb_byte *writebuf, ULONGEST offset,
9b409511 1326 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
1327{
1328 struct target_ops *t;
1329
1330 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1331 if (replay_memory_access == replay_memory_access_read_only
aef92902 1332 && !record_btrace_generating_corefile
4d10e986 1333 && record_btrace_is_replaying (ops, inferior_ptid))
633785ff
MM
1334 {
1335 switch (object)
1336 {
1337 case TARGET_OBJECT_MEMORY:
1338 {
1339 struct target_section *section;
1340
1341 /* We do not allow writing memory in general. */
1342 if (writebuf != NULL)
9b409511
YQ
1343 {
1344 *xfered_len = len;
bc113b4e 1345 return TARGET_XFER_UNAVAILABLE;
9b409511 1346 }
633785ff
MM
1347
1348 /* We allow reading readonly memory. */
1349 section = target_section_by_addr (ops, offset);
1350 if (section != NULL)
1351 {
1352 /* Check if the section we found is readonly. */
1353 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1354 section->the_bfd_section)
1355 & SEC_READONLY) != 0)
1356 {
1357 /* Truncate the request to fit into this section. */
1358 len = min (len, section->endaddr - offset);
1359 break;
1360 }
1361 }
1362
9b409511 1363 *xfered_len = len;
bc113b4e 1364 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1365 }
1366 }
1367 }
1368
1369 /* Forward the request. */
e75fdfca
TT
1370 ops = ops->beneath;
1371 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1372 offset, len, xfered_len);
633785ff
MM
1373}
1374
1375/* The to_insert_breakpoint method of target record-btrace. */
1376
1377static int
1378record_btrace_insert_breakpoint (struct target_ops *ops,
1379 struct gdbarch *gdbarch,
1380 struct bp_target_info *bp_tgt)
1381{
67b5c0c1
MM
1382 const char *old;
1383 int ret;
633785ff
MM
1384
1385 /* Inserting breakpoints requires accessing memory. Allow it for the
1386 duration of this function. */
67b5c0c1
MM
1387 old = replay_memory_access;
1388 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1389
1390 ret = 0;
492d29ea
PA
1391 TRY
1392 {
1393 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1394 }
492d29ea
PA
1395 CATCH (except, RETURN_MASK_ALL)
1396 {
6c63c96a 1397 replay_memory_access = old;
492d29ea
PA
1398 throw_exception (except);
1399 }
1400 END_CATCH
6c63c96a 1401 replay_memory_access = old;
633785ff
MM
1402
1403 return ret;
1404}
1405
1406/* The to_remove_breakpoint method of target record-btrace. */
1407
1408static int
1409record_btrace_remove_breakpoint (struct target_ops *ops,
1410 struct gdbarch *gdbarch,
1411 struct bp_target_info *bp_tgt)
1412{
67b5c0c1
MM
1413 const char *old;
1414 int ret;
633785ff
MM
1415
1416 /* Removing breakpoints requires accessing memory. Allow it for the
1417 duration of this function. */
67b5c0c1
MM
1418 old = replay_memory_access;
1419 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1420
1421 ret = 0;
492d29ea
PA
1422 TRY
1423 {
1424 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1425 }
492d29ea
PA
1426 CATCH (except, RETURN_MASK_ALL)
1427 {
6c63c96a 1428 replay_memory_access = old;
492d29ea
PA
1429 throw_exception (except);
1430 }
1431 END_CATCH
6c63c96a 1432 replay_memory_access = old;
633785ff
MM
1433
1434 return ret;
1435}
1436
1f3ef581
MM
1437/* The to_fetch_registers method of target record-btrace. */
1438
1439static void
1440record_btrace_fetch_registers (struct target_ops *ops,
1441 struct regcache *regcache, int regno)
1442{
1443 struct btrace_insn_iterator *replay;
1444 struct thread_info *tp;
1445
1446 tp = find_thread_ptid (inferior_ptid);
1447 gdb_assert (tp != NULL);
1448
1449 replay = tp->btrace.replay;
aef92902 1450 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1451 {
1452 const struct btrace_insn *insn;
1453 struct gdbarch *gdbarch;
1454 int pcreg;
1455
1456 gdbarch = get_regcache_arch (regcache);
1457 pcreg = gdbarch_pc_regnum (gdbarch);
1458 if (pcreg < 0)
1459 return;
1460
1461 /* We can only provide the PC register. */
1462 if (regno >= 0 && regno != pcreg)
1463 return;
1464
1465 insn = btrace_insn_get (replay);
1466 gdb_assert (insn != NULL);
1467
1468 regcache_raw_supply (regcache, regno, &insn->pc);
1469 }
1470 else
1471 {
e75fdfca 1472 struct target_ops *t = ops->beneath;
1f3ef581 1473
e75fdfca 1474 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
1475 }
1476}
1477
1478/* The to_store_registers method of target record-btrace. */
1479
1480static void
1481record_btrace_store_registers (struct target_ops *ops,
1482 struct regcache *regcache, int regno)
1483{
1484 struct target_ops *t;
1485
a52eab48 1486 if (!record_btrace_generating_corefile
4d10e986
MM
1487 && record_btrace_is_replaying (ops, inferior_ptid))
1488 error (_("Cannot write registers while replaying."));
1f3ef581
MM
1489
1490 gdb_assert (may_write_registers != 0);
1491
e75fdfca
TT
1492 t = ops->beneath;
1493 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1494}
1495
1496/* The to_prepare_to_store method of target record-btrace. */
1497
1498static void
1499record_btrace_prepare_to_store (struct target_ops *ops,
1500 struct regcache *regcache)
1501{
1502 struct target_ops *t;
1503
a52eab48 1504 if (!record_btrace_generating_corefile
4d10e986 1505 && record_btrace_is_replaying (ops, inferior_ptid))
1f3ef581
MM
1506 return;
1507
e75fdfca
TT
1508 t = ops->beneath;
1509 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1510}
1511
0b722aec
MM
1512/* The branch trace frame cache. */
1513
1514struct btrace_frame_cache
1515{
1516 /* The thread. */
1517 struct thread_info *tp;
1518
1519 /* The frame info. */
1520 struct frame_info *frame;
1521
1522 /* The branch trace function segment. */
1523 const struct btrace_function *bfun;
1524};
1525
1526/* A struct btrace_frame_cache hash table indexed by NEXT. */
1527
1528static htab_t bfcache;
1529
1530/* hash_f for htab_create_alloc of bfcache. */
1531
1532static hashval_t
1533bfcache_hash (const void *arg)
1534{
19ba03f4
SM
1535 const struct btrace_frame_cache *cache
1536 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1537
1538 return htab_hash_pointer (cache->frame);
1539}
1540
1541/* eq_f for htab_create_alloc of bfcache. */
1542
1543static int
1544bfcache_eq (const void *arg1, const void *arg2)
1545{
19ba03f4
SM
1546 const struct btrace_frame_cache *cache1
1547 = (const struct btrace_frame_cache *) arg1;
1548 const struct btrace_frame_cache *cache2
1549 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1550
1551 return cache1->frame == cache2->frame;
1552}
1553
1554/* Create a new btrace frame cache. */
1555
1556static struct btrace_frame_cache *
1557bfcache_new (struct frame_info *frame)
1558{
1559 struct btrace_frame_cache *cache;
1560 void **slot;
1561
1562 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1563 cache->frame = frame;
1564
1565 slot = htab_find_slot (bfcache, cache, INSERT);
1566 gdb_assert (*slot == NULL);
1567 *slot = cache;
1568
1569 return cache;
1570}
1571
1572/* Extract the branch trace function from a branch trace frame. */
1573
1574static const struct btrace_function *
1575btrace_get_frame_function (struct frame_info *frame)
1576{
1577 const struct btrace_frame_cache *cache;
1578 const struct btrace_function *bfun;
1579 struct btrace_frame_cache pattern;
1580 void **slot;
1581
1582 pattern.frame = frame;
1583
1584 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1585 if (slot == NULL)
1586 return NULL;
1587
19ba03f4 1588 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1589 return cache->bfun;
1590}
1591
cecac1ab
MM
1592/* Implement stop_reason method for record_btrace_frame_unwind. */
1593
1594static enum unwind_stop_reason
1595record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1596 void **this_cache)
1597{
0b722aec
MM
1598 const struct btrace_frame_cache *cache;
1599 const struct btrace_function *bfun;
1600
19ba03f4 1601 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1602 bfun = cache->bfun;
1603 gdb_assert (bfun != NULL);
1604
1605 if (bfun->up == NULL)
1606 return UNWIND_UNAVAILABLE;
1607
1608 return UNWIND_NO_REASON;
cecac1ab
MM
1609}
1610
1611/* Implement this_id method for record_btrace_frame_unwind. */
1612
1613static void
1614record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1615 struct frame_id *this_id)
1616{
0b722aec
MM
1617 const struct btrace_frame_cache *cache;
1618 const struct btrace_function *bfun;
1619 CORE_ADDR code, special;
1620
19ba03f4 1621 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1622
1623 bfun = cache->bfun;
1624 gdb_assert (bfun != NULL);
1625
1626 while (bfun->segment.prev != NULL)
1627 bfun = bfun->segment.prev;
1628
1629 code = get_frame_func (this_frame);
1630 special = bfun->number;
1631
1632 *this_id = frame_id_build_unavailable_stack_special (code, special);
1633
1634 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1635 btrace_get_bfun_name (cache->bfun),
1636 core_addr_to_string_nz (this_id->code_addr),
1637 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1638}
1639
1640/* Implement prev_register method for record_btrace_frame_unwind. */
1641
1642static struct value *
1643record_btrace_frame_prev_register (struct frame_info *this_frame,
1644 void **this_cache,
1645 int regnum)
1646{
0b722aec
MM
1647 const struct btrace_frame_cache *cache;
1648 const struct btrace_function *bfun, *caller;
1649 const struct btrace_insn *insn;
1650 struct gdbarch *gdbarch;
1651 CORE_ADDR pc;
1652 int pcreg;
1653
1654 gdbarch = get_frame_arch (this_frame);
1655 pcreg = gdbarch_pc_regnum (gdbarch);
1656 if (pcreg < 0 || regnum != pcreg)
1657 throw_error (NOT_AVAILABLE_ERROR,
1658 _("Registers are not available in btrace record history"));
1659
19ba03f4 1660 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1661 bfun = cache->bfun;
1662 gdb_assert (bfun != NULL);
1663
1664 caller = bfun->up;
1665 if (caller == NULL)
1666 throw_error (NOT_AVAILABLE_ERROR,
1667 _("No caller in btrace record history"));
1668
1669 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1670 {
1671 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1672 pc = insn->pc;
1673 }
1674 else
1675 {
1676 insn = VEC_last (btrace_insn_s, caller->insn);
1677 pc = insn->pc;
1678
1679 pc += gdb_insn_length (gdbarch, pc);
1680 }
1681
1682 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1683 btrace_get_bfun_name (bfun), bfun->level,
1684 core_addr_to_string_nz (pc));
1685
1686 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1687}
1688
1689/* Implement sniffer method for record_btrace_frame_unwind. */
1690
1691static int
1692record_btrace_frame_sniffer (const struct frame_unwind *self,
1693 struct frame_info *this_frame,
1694 void **this_cache)
1695{
0b722aec
MM
1696 const struct btrace_function *bfun;
1697 struct btrace_frame_cache *cache;
cecac1ab 1698 struct thread_info *tp;
0b722aec 1699 struct frame_info *next;
cecac1ab
MM
1700
1701 /* THIS_FRAME does not contain a reference to its thread. */
1702 tp = find_thread_ptid (inferior_ptid);
1703 gdb_assert (tp != NULL);
1704
0b722aec
MM
1705 bfun = NULL;
1706 next = get_next_frame (this_frame);
1707 if (next == NULL)
1708 {
1709 const struct btrace_insn_iterator *replay;
1710
1711 replay = tp->btrace.replay;
1712 if (replay != NULL)
1713 bfun = replay->function;
1714 }
1715 else
1716 {
1717 const struct btrace_function *callee;
1718
1719 callee = btrace_get_frame_function (next);
1720 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1721 bfun = callee->up;
1722 }
1723
1724 if (bfun == NULL)
1725 return 0;
1726
1727 DEBUG ("[frame] sniffed frame for %s on level %d",
1728 btrace_get_bfun_name (bfun), bfun->level);
1729
1730 /* This is our frame. Initialize the frame cache. */
1731 cache = bfcache_new (this_frame);
1732 cache->tp = tp;
1733 cache->bfun = bfun;
1734
1735 *this_cache = cache;
1736 return 1;
1737}
1738
1739/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1740
1741static int
1742record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1743 struct frame_info *this_frame,
1744 void **this_cache)
1745{
1746 const struct btrace_function *bfun, *callee;
1747 struct btrace_frame_cache *cache;
1748 struct frame_info *next;
1749
1750 next = get_next_frame (this_frame);
1751 if (next == NULL)
1752 return 0;
1753
1754 callee = btrace_get_frame_function (next);
1755 if (callee == NULL)
1756 return 0;
1757
1758 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1759 return 0;
1760
1761 bfun = callee->up;
1762 if (bfun == NULL)
1763 return 0;
1764
1765 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1766 btrace_get_bfun_name (bfun), bfun->level);
1767
1768 /* This is our frame. Initialize the frame cache. */
1769 cache = bfcache_new (this_frame);
1770 cache->tp = find_thread_ptid (inferior_ptid);
1771 cache->bfun = bfun;
1772
1773 *this_cache = cache;
1774 return 1;
1775}
1776
1777static void
1778record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1779{
1780 struct btrace_frame_cache *cache;
1781 void **slot;
1782
19ba03f4 1783 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1784
1785 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1786 gdb_assert (slot != NULL);
1787
1788 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1789}
1790
1791/* btrace recording does not store previous memory content, neither the stack
1792 frames content. Any unwinding would return errorneous results as the stack
1793 contents no longer matches the changed PC value restored from history.
1794 Therefore this unwinder reports any possibly unwound registers as
1795 <unavailable>. */
1796
0b722aec 1797const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1798{
1799 NORMAL_FRAME,
1800 record_btrace_frame_unwind_stop_reason,
1801 record_btrace_frame_this_id,
1802 record_btrace_frame_prev_register,
1803 NULL,
0b722aec
MM
1804 record_btrace_frame_sniffer,
1805 record_btrace_frame_dealloc_cache
1806};
1807
1808const struct frame_unwind record_btrace_tailcall_frame_unwind =
1809{
1810 TAILCALL_FRAME,
1811 record_btrace_frame_unwind_stop_reason,
1812 record_btrace_frame_this_id,
1813 record_btrace_frame_prev_register,
1814 NULL,
1815 record_btrace_tailcall_frame_sniffer,
1816 record_btrace_frame_dealloc_cache
cecac1ab 1817};
b2f4cfde 1818
ac01945b
TT
1819/* Implement the to_get_unwinder method. */
1820
1821static const struct frame_unwind *
1822record_btrace_to_get_unwinder (struct target_ops *self)
1823{
1824 return &record_btrace_frame_unwind;
1825}
1826
1827/* Implement the to_get_tailcall_unwinder method. */
1828
1829static const struct frame_unwind *
1830record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1831{
1832 return &record_btrace_tailcall_frame_unwind;
1833}
1834
987e68b1
MM
1835/* Return a human-readable string for FLAG. */
1836
1837static const char *
1838btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1839{
1840 switch (flag)
1841 {
1842 case BTHR_STEP:
1843 return "step";
1844
1845 case BTHR_RSTEP:
1846 return "reverse-step";
1847
1848 case BTHR_CONT:
1849 return "cont";
1850
1851 case BTHR_RCONT:
1852 return "reverse-cont";
1853
1854 case BTHR_STOP:
1855 return "stop";
1856 }
1857
1858 return "<invalid>";
1859}
1860
52834460
MM
1861/* Indicate that TP should be resumed according to FLAG. */
1862
1863static void
1864record_btrace_resume_thread (struct thread_info *tp,
1865 enum btrace_thread_flag flag)
1866{
1867 struct btrace_thread_info *btinfo;
1868
43792cf0 1869 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1 1870 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
52834460
MM
1871
1872 btinfo = &tp->btrace;
1873
52834460
MM
1874 /* Fetch the latest branch trace. */
1875 btrace_fetch (tp);
1876
0ca912df
MM
1877 /* A resume request overwrites a preceding resume or stop request. */
1878 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1879 btinfo->flags |= flag;
1880}
1881
ec71cc2f
MM
1882/* Get the current frame for TP. */
1883
1884static struct frame_info *
1885get_thread_current_frame (struct thread_info *tp)
1886{
1887 struct frame_info *frame;
1888 ptid_t old_inferior_ptid;
1889 int executing;
1890
1891 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1892 old_inferior_ptid = inferior_ptid;
1893 inferior_ptid = tp->ptid;
1894
1895 /* Clear the executing flag to allow changes to the current frame.
1896 We are not actually running, yet. We just started a reverse execution
1897 command or a record goto command.
1898 For the latter, EXECUTING is false and this has no effect.
1899 For the former, EXECUTING is true and we're in to_wait, about to
1900 move the thread. Since we need to recompute the stack, we temporarily
1901 set EXECUTING to flase. */
1902 executing = is_executing (inferior_ptid);
1903 set_executing (inferior_ptid, 0);
1904
1905 frame = NULL;
1906 TRY
1907 {
1908 frame = get_current_frame ();
1909 }
1910 CATCH (except, RETURN_MASK_ALL)
1911 {
1912 /* Restore the previous execution state. */
1913 set_executing (inferior_ptid, executing);
1914
1915 /* Restore the previous inferior_ptid. */
1916 inferior_ptid = old_inferior_ptid;
1917
1918 throw_exception (except);
1919 }
1920 END_CATCH
1921
1922 /* Restore the previous execution state. */
1923 set_executing (inferior_ptid, executing);
1924
1925 /* Restore the previous inferior_ptid. */
1926 inferior_ptid = old_inferior_ptid;
1927
1928 return frame;
1929}
1930
52834460
MM
1931/* Start replaying a thread. */
1932
1933static struct btrace_insn_iterator *
1934record_btrace_start_replaying (struct thread_info *tp)
1935{
52834460
MM
1936 struct btrace_insn_iterator *replay;
1937 struct btrace_thread_info *btinfo;
52834460
MM
1938
1939 btinfo = &tp->btrace;
1940 replay = NULL;
1941
1942 /* We can't start replaying without trace. */
1943 if (btinfo->begin == NULL)
1944 return NULL;
1945
52834460
MM
1946 /* GDB stores the current frame_id when stepping in order to detects steps
1947 into subroutines.
1948 Since frames are computed differently when we're replaying, we need to
1949 recompute those stored frames and fix them up so we can still detect
1950 subroutines after we started replaying. */
492d29ea 1951 TRY
52834460
MM
1952 {
1953 struct frame_info *frame;
1954 struct frame_id frame_id;
1955 int upd_step_frame_id, upd_step_stack_frame_id;
1956
1957 /* The current frame without replaying - computed via normal unwind. */
ec71cc2f 1958 frame = get_thread_current_frame (tp);
52834460
MM
1959 frame_id = get_frame_id (frame);
1960
1961 /* Check if we need to update any stepping-related frame id's. */
1962 upd_step_frame_id = frame_id_eq (frame_id,
1963 tp->control.step_frame_id);
1964 upd_step_stack_frame_id = frame_id_eq (frame_id,
1965 tp->control.step_stack_frame_id);
1966
1967 /* We start replaying at the end of the branch trace. This corresponds
1968 to the current instruction. */
8d749320 1969 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
1970 btrace_insn_end (replay, btinfo);
1971
31fd9caa
MM
1972 /* Skip gaps at the end of the trace. */
1973 while (btrace_insn_get (replay) == NULL)
1974 {
1975 unsigned int steps;
1976
1977 steps = btrace_insn_prev (replay, 1);
1978 if (steps == 0)
1979 error (_("No trace."));
1980 }
1981
52834460
MM
1982 /* We're not replaying, yet. */
1983 gdb_assert (btinfo->replay == NULL);
1984 btinfo->replay = replay;
1985
1986 /* Make sure we're not using any stale registers. */
1987 registers_changed_ptid (tp->ptid);
1988
1989 /* The current frame with replaying - computed via btrace unwind. */
ec71cc2f 1990 frame = get_thread_current_frame (tp);
52834460
MM
1991 frame_id = get_frame_id (frame);
1992
1993 /* Replace stepping related frames where necessary. */
1994 if (upd_step_frame_id)
1995 tp->control.step_frame_id = frame_id;
1996 if (upd_step_stack_frame_id)
1997 tp->control.step_stack_frame_id = frame_id;
1998 }
492d29ea 1999 CATCH (except, RETURN_MASK_ALL)
52834460
MM
2000 {
2001 xfree (btinfo->replay);
2002 btinfo->replay = NULL;
2003
2004 registers_changed_ptid (tp->ptid);
2005
2006 throw_exception (except);
2007 }
492d29ea 2008 END_CATCH
52834460
MM
2009
2010 return replay;
2011}
2012
2013/* Stop replaying a thread. */
2014
2015static void
2016record_btrace_stop_replaying (struct thread_info *tp)
2017{
2018 struct btrace_thread_info *btinfo;
2019
2020 btinfo = &tp->btrace;
2021
2022 xfree (btinfo->replay);
2023 btinfo->replay = NULL;
2024
2025 /* Make sure we're not leaving any stale registers. */
2026 registers_changed_ptid (tp->ptid);
2027}
2028
e3cfc1c7
MM
2029/* Stop replaying TP if it is at the end of its execution history. */
2030
2031static void
2032record_btrace_stop_replaying_at_end (struct thread_info *tp)
2033{
2034 struct btrace_insn_iterator *replay, end;
2035 struct btrace_thread_info *btinfo;
2036
2037 btinfo = &tp->btrace;
2038 replay = btinfo->replay;
2039
2040 if (replay == NULL)
2041 return;
2042
2043 btrace_insn_end (&end, btinfo);
2044
2045 if (btrace_insn_cmp (replay, &end) == 0)
2046 record_btrace_stop_replaying (tp);
2047}
2048
b2f4cfde
MM
2049/* The to_resume method of target record-btrace. */
2050
2051static void
2052record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2053 enum gdb_signal signal)
2054{
0ca912df 2055 struct thread_info *tp;
d2939ba2 2056 enum btrace_thread_flag flag, cflag;
52834460 2057
987e68b1
MM
2058 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2059 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2060 step ? "step" : "cont");
52834460 2061
0ca912df
MM
2062 /* Store the execution direction of the last resume.
2063
2064 If there is more than one to_resume call, we have to rely on infrun
2065 to not change the execution direction in-between. */
70ad5bff
MM
2066 record_btrace_resume_exec_dir = execution_direction;
2067
0ca912df 2068 /* As long as we're not replaying, just forward the request.
52834460 2069
0ca912df
MM
2070 For non-stop targets this means that no thread is replaying. In order to
2071 make progress, we may need to explicitly move replaying threads to the end
2072 of their execution history. */
a52eab48
MM
2073 if ((execution_direction != EXEC_REVERSE)
2074 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2075 {
e75fdfca 2076 ops = ops->beneath;
04c4fe8c
MM
2077 ops->to_resume (ops, ptid, step, signal);
2078 return;
b2f4cfde
MM
2079 }
2080
52834460 2081 /* Compute the btrace thread flag for the requested move. */
d2939ba2
MM
2082 if (execution_direction == EXEC_REVERSE)
2083 {
2084 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2085 cflag = BTHR_RCONT;
2086 }
52834460 2087 else
d2939ba2
MM
2088 {
2089 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2090 cflag = BTHR_CONT;
2091 }
52834460 2092
52834460 2093 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2094 record_btrace_wait below.
2095
2096 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2097 if (!target_is_non_stop_p ())
2098 {
2099 gdb_assert (ptid_match (inferior_ptid, ptid));
2100
2101 ALL_NON_EXITED_THREADS (tp)
2102 if (ptid_match (tp->ptid, ptid))
2103 {
2104 if (ptid_match (tp->ptid, inferior_ptid))
2105 record_btrace_resume_thread (tp, flag);
2106 else
2107 record_btrace_resume_thread (tp, cflag);
2108 }
2109 }
2110 else
2111 {
2112 ALL_NON_EXITED_THREADS (tp)
2113 if (ptid_match (tp->ptid, ptid))
2114 record_btrace_resume_thread (tp, flag);
2115 }
70ad5bff
MM
2116
2117 /* Async support. */
2118 if (target_can_async_p ())
2119 {
6a3753b3 2120 target_async (1);
70ad5bff
MM
2121 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2122 }
52834460
MM
2123}
2124
987e68b1
MM
2125/* Cancel resuming TP. */
2126
2127static void
2128record_btrace_cancel_resume (struct thread_info *tp)
2129{
2130 enum btrace_thread_flag flags;
2131
2132 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2133 if (flags == 0)
2134 return;
2135
43792cf0
PA
2136 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2137 print_thread_id (tp),
987e68b1
MM
2138 target_pid_to_str (tp->ptid), flags,
2139 btrace_thread_flag_to_str (flags));
2140
2141 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2142 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2143}
2144
2145/* Return a target_waitstatus indicating that we ran out of history. */
2146
2147static struct target_waitstatus
2148btrace_step_no_history (void)
2149{
2150 struct target_waitstatus status;
2151
2152 status.kind = TARGET_WAITKIND_NO_HISTORY;
2153
2154 return status;
2155}
2156
2157/* Return a target_waitstatus indicating that a step finished. */
2158
2159static struct target_waitstatus
2160btrace_step_stopped (void)
2161{
2162 struct target_waitstatus status;
2163
2164 status.kind = TARGET_WAITKIND_STOPPED;
2165 status.value.sig = GDB_SIGNAL_TRAP;
2166
2167 return status;
2168}
2169
6e4879f0
MM
2170/* Return a target_waitstatus indicating that a thread was stopped as
2171 requested. */
2172
2173static struct target_waitstatus
2174btrace_step_stopped_on_request (void)
2175{
2176 struct target_waitstatus status;
2177
2178 status.kind = TARGET_WAITKIND_STOPPED;
2179 status.value.sig = GDB_SIGNAL_0;
2180
2181 return status;
2182}
2183
d825d248
MM
2184/* Return a target_waitstatus indicating a spurious stop. */
2185
2186static struct target_waitstatus
2187btrace_step_spurious (void)
2188{
2189 struct target_waitstatus status;
2190
2191 status.kind = TARGET_WAITKIND_SPURIOUS;
2192
2193 return status;
2194}
2195
e3cfc1c7
MM
2196/* Return a target_waitstatus indicating that the thread was not resumed. */
2197
2198static struct target_waitstatus
2199btrace_step_no_resumed (void)
2200{
2201 struct target_waitstatus status;
2202
2203 status.kind = TARGET_WAITKIND_NO_RESUMED;
2204
2205 return status;
2206}
2207
2208/* Return a target_waitstatus indicating that we should wait again. */
2209
2210static struct target_waitstatus
2211btrace_step_again (void)
2212{
2213 struct target_waitstatus status;
2214
2215 status.kind = TARGET_WAITKIND_IGNORE;
2216
2217 return status;
2218}
2219
52834460
MM
2220/* Clear the record histories. */
2221
2222static void
2223record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2224{
2225 xfree (btinfo->insn_history);
2226 xfree (btinfo->call_history);
2227
2228 btinfo->insn_history = NULL;
2229 btinfo->call_history = NULL;
2230}
2231
3c615f99
MM
2232/* Check whether TP's current replay position is at a breakpoint. */
2233
2234static int
2235record_btrace_replay_at_breakpoint (struct thread_info *tp)
2236{
2237 struct btrace_insn_iterator *replay;
2238 struct btrace_thread_info *btinfo;
2239 const struct btrace_insn *insn;
2240 struct inferior *inf;
2241
2242 btinfo = &tp->btrace;
2243 replay = btinfo->replay;
2244
2245 if (replay == NULL)
2246 return 0;
2247
2248 insn = btrace_insn_get (replay);
2249 if (insn == NULL)
2250 return 0;
2251
2252 inf = find_inferior_ptid (tp->ptid);
2253 if (inf == NULL)
2254 return 0;
2255
2256 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2257 &btinfo->stop_reason);
2258}
2259
d825d248 2260/* Step one instruction in forward direction. */
52834460
MM
2261
2262static struct target_waitstatus
d825d248 2263record_btrace_single_step_forward (struct thread_info *tp)
52834460
MM
2264{
2265 struct btrace_insn_iterator *replay, end;
2266 struct btrace_thread_info *btinfo;
52834460 2267
d825d248
MM
2268 btinfo = &tp->btrace;
2269 replay = btinfo->replay;
2270
2271 /* We're done if we're not replaying. */
2272 if (replay == NULL)
2273 return btrace_step_no_history ();
2274
011c71b6
MM
2275 /* Check if we're stepping a breakpoint. */
2276 if (record_btrace_replay_at_breakpoint (tp))
2277 return btrace_step_stopped ();
2278
d825d248
MM
2279 /* Skip gaps during replay. */
2280 do
2281 {
2282 unsigned int steps;
2283
e3cfc1c7
MM
2284 /* We will bail out here if we continue stepping after reaching the end
2285 of the execution history. */
d825d248
MM
2286 steps = btrace_insn_next (replay, 1);
2287 if (steps == 0)
e3cfc1c7 2288 return btrace_step_no_history ();
d825d248
MM
2289 }
2290 while (btrace_insn_get (replay) == NULL);
2291
2292 /* Determine the end of the instruction trace. */
2293 btrace_insn_end (&end, btinfo);
2294
e3cfc1c7
MM
2295 /* The execution trace contains (and ends with) the current instruction.
2296 This instruction has not been executed, yet, so the trace really ends
2297 one instruction earlier. */
d825d248 2298 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2299 return btrace_step_no_history ();
d825d248
MM
2300
2301 return btrace_step_spurious ();
2302}
2303
2304/* Step one instruction in backward direction. */
2305
2306static struct target_waitstatus
2307record_btrace_single_step_backward (struct thread_info *tp)
2308{
2309 struct btrace_insn_iterator *replay;
2310 struct btrace_thread_info *btinfo;
e59fa00f 2311
52834460
MM
2312 btinfo = &tp->btrace;
2313 replay = btinfo->replay;
2314
d825d248
MM
2315 /* Start replaying if we're not already doing so. */
2316 if (replay == NULL)
2317 replay = record_btrace_start_replaying (tp);
2318
2319 /* If we can't step any further, we reached the end of the history.
2320 Skip gaps during replay. */
2321 do
2322 {
2323 unsigned int steps;
2324
2325 steps = btrace_insn_prev (replay, 1);
2326 if (steps == 0)
2327 return btrace_step_no_history ();
2328 }
2329 while (btrace_insn_get (replay) == NULL);
2330
011c71b6
MM
2331 /* Check if we're stepping a breakpoint.
2332
2333 For reverse-stepping, this check is after the step. There is logic in
2334 infrun.c that handles reverse-stepping separately. See, for example,
2335 proceed and adjust_pc_after_break.
2336
2337 This code assumes that for reverse-stepping, PC points to the last
2338 de-executed instruction, whereas for forward-stepping PC points to the
2339 next to-be-executed instruction. */
2340 if (record_btrace_replay_at_breakpoint (tp))
2341 return btrace_step_stopped ();
2342
d825d248
MM
2343 return btrace_step_spurious ();
2344}
2345
2346/* Step a single thread. */
2347
2348static struct target_waitstatus
2349record_btrace_step_thread (struct thread_info *tp)
2350{
2351 struct btrace_thread_info *btinfo;
2352 struct target_waitstatus status;
2353 enum btrace_thread_flag flags;
2354
2355 btinfo = &tp->btrace;
2356
6e4879f0
MM
2357 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2358 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2359
43792cf0 2360 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1
MM
2361 target_pid_to_str (tp->ptid), flags,
2362 btrace_thread_flag_to_str (flags));
52834460 2363
6e4879f0
MM
2364 /* We can't step without an execution history. */
2365 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2366 return btrace_step_no_history ();
2367
52834460
MM
2368 switch (flags)
2369 {
2370 default:
2371 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2372
6e4879f0
MM
2373 case BTHR_STOP:
2374 return btrace_step_stopped_on_request ();
2375
52834460 2376 case BTHR_STEP:
d825d248
MM
2377 status = record_btrace_single_step_forward (tp);
2378 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2379 break;
52834460
MM
2380
2381 return btrace_step_stopped ();
2382
2383 case BTHR_RSTEP:
d825d248
MM
2384 status = record_btrace_single_step_backward (tp);
2385 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2386 break;
52834460
MM
2387
2388 return btrace_step_stopped ();
2389
2390 case BTHR_CONT:
e3cfc1c7
MM
2391 status = record_btrace_single_step_forward (tp);
2392 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2393 break;
52834460 2394
e3cfc1c7
MM
2395 btinfo->flags |= flags;
2396 return btrace_step_again ();
52834460
MM
2397
2398 case BTHR_RCONT:
e3cfc1c7
MM
2399 status = record_btrace_single_step_backward (tp);
2400 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2401 break;
52834460 2402
e3cfc1c7
MM
2403 btinfo->flags |= flags;
2404 return btrace_step_again ();
2405 }
d825d248 2406
e3cfc1c7
MM
2407 /* We keep threads moving at the end of their execution history. The to_wait
2408 method will stop the thread for whom the event is reported. */
2409 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2410 btinfo->flags |= flags;
52834460 2411
e3cfc1c7 2412 return status;
b2f4cfde
MM
2413}
2414
e3cfc1c7
MM
2415/* A vector of threads. */
2416
2417typedef struct thread_info * tp_t;
2418DEF_VEC_P (tp_t);
2419
a6b5be76
MM
2420/* Announce further events if necessary. */
2421
2422static void
2423record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2424 const VEC (tp_t) *no_history)
2425{
2426 int more_moving, more_no_history;
2427
2428 more_moving = !VEC_empty (tp_t, moving);
2429 more_no_history = !VEC_empty (tp_t, no_history);
2430
2431 if (!more_moving && !more_no_history)
2432 return;
2433
2434 if (more_moving)
2435 DEBUG ("movers pending");
2436
2437 if (more_no_history)
2438 DEBUG ("no-history pending");
2439
2440 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2441}
2442
b2f4cfde
MM
2443/* The to_wait method of target record-btrace. */
2444
2445static ptid_t
2446record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2447 struct target_waitstatus *status, int options)
2448{
e3cfc1c7
MM
2449 VEC (tp_t) *moving, *no_history;
2450 struct thread_info *tp, *eventing;
2451 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
52834460
MM
2452
2453 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2454
b2f4cfde 2455 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2456 if ((execution_direction != EXEC_REVERSE)
2457 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2458 {
e75fdfca
TT
2459 ops = ops->beneath;
2460 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
2461 }
2462
e3cfc1c7
MM
2463 moving = NULL;
2464 no_history = NULL;
2465
2466 make_cleanup (VEC_cleanup (tp_t), &moving);
2467 make_cleanup (VEC_cleanup (tp_t), &no_history);
2468
2469 /* Keep a work list of moving threads. */
2470 ALL_NON_EXITED_THREADS (tp)
2471 if (ptid_match (tp->ptid, ptid)
2472 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2473 VEC_safe_push (tp_t, moving, tp);
2474
2475 if (VEC_empty (tp_t, moving))
52834460 2476 {
e3cfc1c7 2477 *status = btrace_step_no_resumed ();
52834460 2478
e3cfc1c7
MM
2479 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2480 target_waitstatus_to_string (status));
2481
2482 do_cleanups (cleanups);
2483 return null_ptid;
52834460
MM
2484 }
2485
e3cfc1c7
MM
2486 /* Step moving threads one by one, one step each, until either one thread
2487 reports an event or we run out of threads to step.
2488
2489 When stepping more than one thread, chances are that some threads reach
2490 the end of their execution history earlier than others. If we reported
2491 this immediately, all-stop on top of non-stop would stop all threads and
2492 resume the same threads next time. And we would report the same thread
2493 having reached the end of its execution history again.
2494
2495 In the worst case, this would starve the other threads. But even if other
2496 threads would be allowed to make progress, this would result in far too
2497 many intermediate stops.
2498
2499 We therefore delay the reporting of "no execution history" until we have
2500 nothing else to report. By this time, all threads should have moved to
2501 either the beginning or the end of their execution history. There will
2502 be a single user-visible stop. */
2503 eventing = NULL;
2504 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2505 {
2506 unsigned int ix;
2507
2508 ix = 0;
2509 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2510 {
2511 *status = record_btrace_step_thread (tp);
2512
2513 switch (status->kind)
2514 {
2515 case TARGET_WAITKIND_IGNORE:
2516 ix++;
2517 break;
2518
2519 case TARGET_WAITKIND_NO_HISTORY:
2520 VEC_safe_push (tp_t, no_history,
2521 VEC_ordered_remove (tp_t, moving, ix));
2522 break;
2523
2524 default:
2525 eventing = VEC_unordered_remove (tp_t, moving, ix);
2526 break;
2527 }
2528 }
2529 }
2530
2531 if (eventing == NULL)
2532 {
2533 /* We started with at least one moving thread. This thread must have
2534 either stopped or reached the end of its execution history.
2535
2536 In the former case, EVENTING must not be NULL.
2537 In the latter case, NO_HISTORY must not be empty. */
2538 gdb_assert (!VEC_empty (tp_t, no_history));
2539
2540 /* We kept threads moving at the end of their execution history. Stop
2541 EVENTING now that we are going to report its stop. */
2542 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2543 eventing->btrace.flags &= ~BTHR_MOVE;
2544
2545 *status = btrace_step_no_history ();
2546 }
2547
2548 gdb_assert (eventing != NULL);
2549
2550 /* We kept threads replaying at the end of their execution history. Stop
2551 replaying EVENTING now that we are going to report its stop. */
2552 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2553
2554 /* Stop all other threads. */
5953356c 2555 if (!target_is_non_stop_p ())
e3cfc1c7
MM
2556 ALL_NON_EXITED_THREADS (tp)
2557 record_btrace_cancel_resume (tp);
52834460 2558
a6b5be76
MM
2559 /* In async mode, we need to announce further events. */
2560 if (target_is_async_p ())
2561 record_btrace_maybe_mark_async_event (moving, no_history);
2562
52834460 2563 /* Start record histories anew from the current position. */
e3cfc1c7 2564 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2565
2566 /* We moved the replay position but did not update registers. */
e3cfc1c7
MM
2567 registers_changed_ptid (eventing->ptid);
2568
43792cf0
PA
2569 DEBUG ("wait ended by thread %s (%s): %s",
2570 print_thread_id (eventing),
e3cfc1c7
MM
2571 target_pid_to_str (eventing->ptid),
2572 target_waitstatus_to_string (status));
52834460 2573
e3cfc1c7
MM
2574 do_cleanups (cleanups);
2575 return eventing->ptid;
52834460
MM
2576}
2577
6e4879f0
MM
2578/* The to_stop method of target record-btrace. */
2579
2580static void
2581record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2582{
2583 DEBUG ("stop %s", target_pid_to_str (ptid));
2584
2585 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2586 if ((execution_direction != EXEC_REVERSE)
2587 && !record_btrace_is_replaying (ops, minus_one_ptid))
6e4879f0
MM
2588 {
2589 ops = ops->beneath;
2590 ops->to_stop (ops, ptid);
2591 }
2592 else
2593 {
2594 struct thread_info *tp;
2595
2596 ALL_NON_EXITED_THREADS (tp)
2597 if (ptid_match (tp->ptid, ptid))
2598 {
2599 tp->btrace.flags &= ~BTHR_MOVE;
2600 tp->btrace.flags |= BTHR_STOP;
2601 }
2602 }
2603 }
2604
52834460
MM
2605/* The to_can_execute_reverse method of target record-btrace. */
2606
2607static int
19db3e69 2608record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
2609{
2610 return 1;
2611}
2612
9e8915c6 2613/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2614
9e8915c6
PA
2615static int
2616record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
52834460 2617{
a52eab48 2618 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2619 {
2620 struct thread_info *tp = inferior_thread ();
2621
2622 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2623 }
2624
2625 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2626}
2627
2628/* The to_supports_stopped_by_sw_breakpoint method of target
2629 record-btrace. */
2630
2631static int
2632record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2633{
a52eab48 2634 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2635 return 1;
2636
2637 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2638}
2639
2640/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2641
2642static int
2643record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2644{
a52eab48 2645 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2646 {
2647 struct thread_info *tp = inferior_thread ();
2648
2649 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2650 }
2651
2652 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2653}
2654
2655/* The to_supports_stopped_by_hw_breakpoint method of target
2656 record-btrace. */
2657
2658static int
2659record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2660{
a52eab48 2661 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6 2662 return 1;
52834460 2663
9e8915c6 2664 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
b2f4cfde
MM
2665}
2666
e8032dde 2667/* The to_update_thread_list method of target record-btrace. */
e2887aa3
MM
2668
2669static void
e8032dde 2670record_btrace_update_thread_list (struct target_ops *ops)
e2887aa3 2671{
e8032dde 2672 /* We don't add or remove threads during replay. */
a52eab48 2673 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2674 return;
2675
2676 /* Forward the request. */
e75fdfca 2677 ops = ops->beneath;
e8032dde 2678 ops->to_update_thread_list (ops);
e2887aa3
MM
2679}
2680
2681/* The to_thread_alive method of target record-btrace. */
2682
2683static int
2684record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2685{
2686 /* We don't add or remove threads during replay. */
a52eab48 2687 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2688 return find_thread_ptid (ptid) != NULL;
2689
2690 /* Forward the request. */
e75fdfca
TT
2691 ops = ops->beneath;
2692 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
2693}
2694
066ce621
MM
2695/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2696 is stopped. */
2697
2698static void
2699record_btrace_set_replay (struct thread_info *tp,
2700 const struct btrace_insn_iterator *it)
2701{
2702 struct btrace_thread_info *btinfo;
2703
2704 btinfo = &tp->btrace;
2705
2706 if (it == NULL || it->function == NULL)
52834460 2707 record_btrace_stop_replaying (tp);
066ce621
MM
2708 else
2709 {
2710 if (btinfo->replay == NULL)
52834460 2711 record_btrace_start_replaying (tp);
066ce621
MM
2712 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2713 return;
2714
2715 *btinfo->replay = *it;
52834460 2716 registers_changed_ptid (tp->ptid);
066ce621
MM
2717 }
2718
52834460
MM
2719 /* Start anew from the new replay position. */
2720 record_btrace_clear_histories (btinfo);
485668e5
MM
2721
2722 stop_pc = regcache_read_pc (get_current_regcache ());
2723 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2724}
2725
2726/* The to_goto_record_begin method of target record-btrace. */
2727
2728static void
08475817 2729record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
2730{
2731 struct thread_info *tp;
2732 struct btrace_insn_iterator begin;
2733
2734 tp = require_btrace_thread ();
2735
2736 btrace_insn_begin (&begin, &tp->btrace);
2737 record_btrace_set_replay (tp, &begin);
066ce621
MM
2738}
2739
2740/* The to_goto_record_end method of target record-btrace. */
2741
2742static void
307a1b91 2743record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
2744{
2745 struct thread_info *tp;
2746
2747 tp = require_btrace_thread ();
2748
2749 record_btrace_set_replay (tp, NULL);
066ce621
MM
2750}
2751
2752/* The to_goto_record method of target record-btrace. */
2753
2754static void
606183ac 2755record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
2756{
2757 struct thread_info *tp;
2758 struct btrace_insn_iterator it;
2759 unsigned int number;
2760 int found;
2761
2762 number = insn;
2763
2764 /* Check for wrap-arounds. */
2765 if (number != insn)
2766 error (_("Instruction number out of range."));
2767
2768 tp = require_btrace_thread ();
2769
2770 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2771 if (found == 0)
2772 error (_("No such instruction."));
2773
2774 record_btrace_set_replay (tp, &it);
066ce621
MM
2775}
2776
797094dd
MM
2777/* The to_record_stop_replaying method of target record-btrace. */
2778
2779static void
2780record_btrace_stop_replaying_all (struct target_ops *self)
2781{
2782 struct thread_info *tp;
2783
2784 ALL_NON_EXITED_THREADS (tp)
2785 record_btrace_stop_replaying (tp);
2786}
2787
70ad5bff
MM
2788/* The to_execution_direction target method. */
2789
2790static enum exec_direction_kind
2791record_btrace_execution_direction (struct target_ops *self)
2792{
2793 return record_btrace_resume_exec_dir;
2794}
2795
aef92902
MM
2796/* The to_prepare_to_generate_core target method. */
2797
2798static void
2799record_btrace_prepare_to_generate_core (struct target_ops *self)
2800{
2801 record_btrace_generating_corefile = 1;
2802}
2803
2804/* The to_done_generating_core target method. */
2805
2806static void
2807record_btrace_done_generating_core (struct target_ops *self)
2808{
2809 record_btrace_generating_corefile = 0;
2810}
2811
afedecd3
MM
2812/* Initialize the record-btrace target ops. */
2813
2814static void
2815init_record_btrace_ops (void)
2816{
2817 struct target_ops *ops;
2818
2819 ops = &record_btrace_ops;
2820 ops->to_shortname = "record-btrace";
2821 ops->to_longname = "Branch tracing target";
2822 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2823 ops->to_open = record_btrace_open;
2824 ops->to_close = record_btrace_close;
b7d2e916 2825 ops->to_async = record_btrace_async;
afedecd3
MM
2826 ops->to_detach = record_detach;
2827 ops->to_disconnect = record_disconnect;
2828 ops->to_mourn_inferior = record_mourn_inferior;
2829 ops->to_kill = record_kill;
afedecd3
MM
2830 ops->to_stop_recording = record_btrace_stop_recording;
2831 ops->to_info_record = record_btrace_info;
2832 ops->to_insn_history = record_btrace_insn_history;
2833 ops->to_insn_history_from = record_btrace_insn_history_from;
2834 ops->to_insn_history_range = record_btrace_insn_history_range;
2835 ops->to_call_history = record_btrace_call_history;
2836 ops->to_call_history_from = record_btrace_call_history_from;
2837 ops->to_call_history_range = record_btrace_call_history_range;
07bbe694 2838 ops->to_record_is_replaying = record_btrace_is_replaying;
7ff27e9b 2839 ops->to_record_will_replay = record_btrace_will_replay;
797094dd 2840 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
633785ff
MM
2841 ops->to_xfer_partial = record_btrace_xfer_partial;
2842 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2843 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
2844 ops->to_fetch_registers = record_btrace_fetch_registers;
2845 ops->to_store_registers = record_btrace_store_registers;
2846 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
2847 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2848 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde
MM
2849 ops->to_resume = record_btrace_resume;
2850 ops->to_wait = record_btrace_wait;
6e4879f0 2851 ops->to_stop = record_btrace_stop;
e8032dde 2852 ops->to_update_thread_list = record_btrace_update_thread_list;
e2887aa3 2853 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
2854 ops->to_goto_record_begin = record_btrace_goto_begin;
2855 ops->to_goto_record_end = record_btrace_goto_end;
2856 ops->to_goto_record = record_btrace_goto;
52834460 2857 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
9e8915c6
PA
2858 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2859 ops->to_supports_stopped_by_sw_breakpoint
2860 = record_btrace_supports_stopped_by_sw_breakpoint;
2861 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2862 ops->to_supports_stopped_by_hw_breakpoint
2863 = record_btrace_supports_stopped_by_hw_breakpoint;
70ad5bff 2864 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
2865 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2866 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
2867 ops->to_stratum = record_stratum;
2868 ops->to_magic = OPS_MAGIC;
2869}
2870
f4abbc16
MM
2871/* Start recording in BTS format. */
2872
2873static void
2874cmd_record_btrace_bts_start (char *args, int from_tty)
2875{
f4abbc16
MM
2876 if (args != NULL && *args != 0)
2877 error (_("Invalid argument."));
2878
2879 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2880
492d29ea
PA
2881 TRY
2882 {
2883 execute_command ("target record-btrace", from_tty);
2884 }
2885 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2886 {
2887 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2888 throw_exception (exception);
2889 }
492d29ea 2890 END_CATCH
f4abbc16
MM
2891}
2892
bc504a31 2893/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2894
2895static void
b20a6524 2896cmd_record_btrace_pt_start (char *args, int from_tty)
afedecd3
MM
2897{
2898 if (args != NULL && *args != 0)
2899 error (_("Invalid argument."));
2900
b20a6524 2901 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2902
492d29ea
PA
2903 TRY
2904 {
2905 execute_command ("target record-btrace", from_tty);
2906 }
2907 CATCH (exception, RETURN_MASK_ALL)
2908 {
2909 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2910 throw_exception (exception);
2911 }
2912 END_CATCH
afedecd3
MM
2913}
2914
b20a6524
MM
2915/* Alias for "target record". */
2916
2917static void
2918cmd_record_btrace_start (char *args, int from_tty)
2919{
2920 if (args != NULL && *args != 0)
2921 error (_("Invalid argument."));
2922
2923 record_btrace_conf.format = BTRACE_FORMAT_PT;
2924
2925 TRY
2926 {
2927 execute_command ("target record-btrace", from_tty);
2928 }
2929 CATCH (exception, RETURN_MASK_ALL)
2930 {
2931 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2932
2933 TRY
2934 {
2935 execute_command ("target record-btrace", from_tty);
2936 }
2937 CATCH (exception, RETURN_MASK_ALL)
2938 {
2939 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2940 throw_exception (exception);
2941 }
2942 END_CATCH
2943 }
2944 END_CATCH
2945}
2946
67b5c0c1
MM
2947/* The "set record btrace" command. */
2948
2949static void
2950cmd_set_record_btrace (char *args, int from_tty)
2951{
2952 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2953}
2954
2955/* The "show record btrace" command. */
2956
2957static void
2958cmd_show_record_btrace (char *args, int from_tty)
2959{
2960 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2961}
2962
2963/* The "show record btrace replay-memory-access" command. */
2964
2965static void
2966cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2967 struct cmd_list_element *c, const char *value)
2968{
2969 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2970 replay_memory_access);
2971}
2972
d33501a5
MM
2973/* The "set record btrace bts" command. */
2974
2975static void
2976cmd_set_record_btrace_bts (char *args, int from_tty)
2977{
2978 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 2979 "by an appropriate subcommand.\n"));
d33501a5
MM
2980 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2981 all_commands, gdb_stdout);
2982}
2983
2984/* The "show record btrace bts" command. */
2985
2986static void
2987cmd_show_record_btrace_bts (char *args, int from_tty)
2988{
2989 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2990}
2991
b20a6524
MM
2992/* The "set record btrace pt" command. */
2993
2994static void
2995cmd_set_record_btrace_pt (char *args, int from_tty)
2996{
2997 printf_unfiltered (_("\"set record btrace pt\" must be followed "
2998 "by an appropriate subcommand.\n"));
2999 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3000 all_commands, gdb_stdout);
3001}
3002
3003/* The "show record btrace pt" command. */
3004
3005static void
3006cmd_show_record_btrace_pt (char *args, int from_tty)
3007{
3008 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3009}
3010
3011/* The "record bts buffer-size" show value function. */
3012
3013static void
3014show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3015 struct cmd_list_element *c,
3016 const char *value)
3017{
3018 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3019 value);
3020}
3021
3022/* The "record pt buffer-size" show value function. */
3023
3024static void
3025show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3026 struct cmd_list_element *c,
3027 const char *value)
3028{
3029 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3030 value);
3031}
3032
afedecd3
MM
3033void _initialize_record_btrace (void);
3034
3035/* Initialize btrace commands. */
3036
3037void
3038_initialize_record_btrace (void)
3039{
f4abbc16
MM
3040 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3041 _("Start branch trace recording."), &record_btrace_cmdlist,
3042 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3043 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3044
f4abbc16
MM
3045 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3046 _("\
3047Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3048The processor stores a from/to record for each branch into a cyclic buffer.\n\
3049This format may not be available on all processors."),
3050 &record_btrace_cmdlist);
3051 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3052
b20a6524
MM
3053 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3054 _("\
bc504a31 3055Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3056This format may not be available on all processors."),
3057 &record_btrace_cmdlist);
3058 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3059
67b5c0c1
MM
3060 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3061 _("Set record options"), &set_record_btrace_cmdlist,
3062 "set record btrace ", 0, &set_record_cmdlist);
3063
3064 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3065 _("Show record options"), &show_record_btrace_cmdlist,
3066 "show record btrace ", 0, &show_record_cmdlist);
3067
3068 add_setshow_enum_cmd ("replay-memory-access", no_class,
3069 replay_memory_access_types, &replay_memory_access, _("\
3070Set what memory accesses are allowed during replay."), _("\
3071Show what memory accesses are allowed during replay."),
3072 _("Default is READ-ONLY.\n\n\
3073The btrace record target does not trace data.\n\
3074The memory therefore corresponds to the live target and not \
3075to the current replay position.\n\n\
3076When READ-ONLY, allow accesses to read-only memory during replay.\n\
3077When READ-WRITE, allow accesses to read-only and read-write memory during \
3078replay."),
3079 NULL, cmd_show_replay_memory_access,
3080 &set_record_btrace_cmdlist,
3081 &show_record_btrace_cmdlist);
3082
d33501a5
MM
3083 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3084 _("Set record btrace bts options"),
3085 &set_record_btrace_bts_cmdlist,
3086 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3087
3088 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3089 _("Show record btrace bts options"),
3090 &show_record_btrace_bts_cmdlist,
3091 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3092
3093 add_setshow_uinteger_cmd ("buffer-size", no_class,
3094 &record_btrace_conf.bts.size,
3095 _("Set the record/replay bts buffer size."),
3096 _("Show the record/replay bts buffer size."), _("\
3097When starting recording request a trace buffer of this size. \
3098The actual buffer size may differ from the requested size. \
3099Use \"info record\" to see the actual buffer size.\n\n\
3100Bigger buffers allow longer recording but also take more time to process \
3101the recorded execution trace.\n\n\
b20a6524
MM
3102The trace buffer size may not be changed while recording."), NULL,
3103 show_record_bts_buffer_size_value,
d33501a5
MM
3104 &set_record_btrace_bts_cmdlist,
3105 &show_record_btrace_bts_cmdlist);
3106
b20a6524
MM
3107 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3108 _("Set record btrace pt options"),
3109 &set_record_btrace_pt_cmdlist,
3110 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3111
3112 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3113 _("Show record btrace pt options"),
3114 &show_record_btrace_pt_cmdlist,
3115 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3116
3117 add_setshow_uinteger_cmd ("buffer-size", no_class,
3118 &record_btrace_conf.pt.size,
3119 _("Set the record/replay pt buffer size."),
3120 _("Show the record/replay pt buffer size."), _("\
3121Bigger buffers allow longer recording but also take more time to process \
3122the recorded execution.\n\
3123The actual buffer size may differ from the requested size. Use \"info record\" \
3124to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3125 &set_record_btrace_pt_cmdlist,
3126 &show_record_btrace_pt_cmdlist);
3127
afedecd3
MM
3128 init_record_btrace_ops ();
3129 add_target (&record_btrace_ops);
0b722aec
MM
3130
3131 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3132 xcalloc, xfree);
d33501a5
MM
3133
3134 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3135 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3136}
This page took 0.886889 seconds and 4 git commands to generate.