Add Python InferiorThread.inferior attribute
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
618f726f 3 Copyright (C) 2013-2016 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
24#include "gdbthread.h"
25#include "target.h"
26#include "gdbcmd.h"
27#include "disasm.h"
28#include "observer.h"
afedecd3
MM
29#include "cli/cli-utils.h"
30#include "source.h"
31#include "ui-out.h"
32#include "symtab.h"
33#include "filenames.h"
1f3ef581 34#include "regcache.h"
cecac1ab 35#include "frame-unwind.h"
0b722aec 36#include "hashtab.h"
45741a9c 37#include "infrun.h"
70ad5bff
MM
38#include "event-loop.h"
39#include "inf-loop.h"
e3cfc1c7 40#include "vec.h"
afedecd3
MM
41
42/* The target_ops of record-btrace. */
43static struct target_ops record_btrace_ops;
44
45/* A new thread observer enabling branch tracing for the new thread. */
46static struct observer *record_btrace_thread_observer;
47
67b5c0c1
MM
48/* Memory access types used in set/show record btrace replay-memory-access. */
49static const char replay_memory_access_read_only[] = "read-only";
50static const char replay_memory_access_read_write[] = "read-write";
51static const char *const replay_memory_access_types[] =
52{
53 replay_memory_access_read_only,
54 replay_memory_access_read_write,
55 NULL
56};
57
58/* The currently allowed replay memory access type. */
59static const char *replay_memory_access = replay_memory_access_read_only;
60
61/* Command lists for "set/show record btrace". */
62static struct cmd_list_element *set_record_btrace_cmdlist;
63static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 64
70ad5bff
MM
65/* The execution direction of the last resume we got. See record-full.c. */
66static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
67
68/* The async event handler for reverse/replay execution. */
69static struct async_event_handler *record_btrace_async_inferior_event_handler;
70
aef92902
MM
71/* A flag indicating that we are currently generating a core file. */
72static int record_btrace_generating_corefile;
73
f4abbc16
MM
74/* The current branch trace configuration. */
75static struct btrace_config record_btrace_conf;
76
77/* Command list for "record btrace". */
78static struct cmd_list_element *record_btrace_cmdlist;
79
d33501a5
MM
80/* Command lists for "set/show record btrace bts". */
81static struct cmd_list_element *set_record_btrace_bts_cmdlist;
82static struct cmd_list_element *show_record_btrace_bts_cmdlist;
83
b20a6524
MM
84/* Command lists for "set/show record btrace pt". */
85static struct cmd_list_element *set_record_btrace_pt_cmdlist;
86static struct cmd_list_element *show_record_btrace_pt_cmdlist;
87
afedecd3
MM
88/* Print a record-btrace debug message. Use do ... while (0) to avoid
89 ambiguities when used in if statements. */
90
91#define DEBUG(msg, args...) \
92 do \
93 { \
94 if (record_debug != 0) \
95 fprintf_unfiltered (gdb_stdlog, \
96 "[record-btrace] " msg "\n", ##args); \
97 } \
98 while (0)
99
100
101/* Update the branch trace for the current thread and return a pointer to its
066ce621 102 thread_info.
afedecd3
MM
103
104 Throws an error if there is no thread or no trace. This function never
105 returns NULL. */
106
066ce621
MM
107static struct thread_info *
108require_btrace_thread (void)
afedecd3
MM
109{
110 struct thread_info *tp;
afedecd3
MM
111
112 DEBUG ("require");
113
114 tp = find_thread_ptid (inferior_ptid);
115 if (tp == NULL)
116 error (_("No thread."));
117
118 btrace_fetch (tp);
119
6e07b1d2 120 if (btrace_is_empty (tp))
afedecd3
MM
121 error (_("No trace."));
122
066ce621
MM
123 return tp;
124}
125
126/* Update the branch trace for the current thread and return a pointer to its
127 branch trace information struct.
128
129 Throws an error if there is no thread or no trace. This function never
130 returns NULL. */
131
132static struct btrace_thread_info *
133require_btrace (void)
134{
135 struct thread_info *tp;
136
137 tp = require_btrace_thread ();
138
139 return &tp->btrace;
afedecd3
MM
140}
141
142/* Enable branch tracing for one thread. Warn on errors. */
143
144static void
145record_btrace_enable_warn (struct thread_info *tp)
146{
492d29ea
PA
147 TRY
148 {
149 btrace_enable (tp, &record_btrace_conf);
150 }
151 CATCH (error, RETURN_MASK_ERROR)
152 {
153 warning ("%s", error.message);
154 }
155 END_CATCH
afedecd3
MM
156}
157
158/* Callback function to disable branch tracing for one thread. */
159
160static void
161record_btrace_disable_callback (void *arg)
162{
19ba03f4 163 struct thread_info *tp = (struct thread_info *) arg;
afedecd3
MM
164
165 btrace_disable (tp);
166}
167
168/* Enable automatic tracing of new threads. */
169
170static void
171record_btrace_auto_enable (void)
172{
173 DEBUG ("attach thread observer");
174
175 record_btrace_thread_observer
176 = observer_attach_new_thread (record_btrace_enable_warn);
177}
178
179/* Disable automatic tracing of new threads. */
180
181static void
182record_btrace_auto_disable (void)
183{
184 /* The observer may have been detached, already. */
185 if (record_btrace_thread_observer == NULL)
186 return;
187
188 DEBUG ("detach thread observer");
189
190 observer_detach_new_thread (record_btrace_thread_observer);
191 record_btrace_thread_observer = NULL;
192}
193
70ad5bff
MM
194/* The record-btrace async event handler function. */
195
196static void
197record_btrace_handle_async_inferior_event (gdb_client_data data)
198{
199 inferior_event_handler (INF_REG_EVENT, NULL);
200}
201
afedecd3
MM
202/* The to_open method of target record-btrace. */
203
204static void
014f9477 205record_btrace_open (const char *args, int from_tty)
afedecd3
MM
206{
207 struct cleanup *disable_chain;
208 struct thread_info *tp;
209
210 DEBUG ("open");
211
8213266a 212 record_preopen ();
afedecd3
MM
213
214 if (!target_has_execution)
215 error (_("The program is not being run."));
216
afedecd3
MM
217 gdb_assert (record_btrace_thread_observer == NULL);
218
219 disable_chain = make_cleanup (null_cleanup, NULL);
034f788c 220 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
221 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
222 {
f4abbc16 223 btrace_enable (tp, &record_btrace_conf);
afedecd3
MM
224
225 make_cleanup (record_btrace_disable_callback, tp);
226 }
227
228 record_btrace_auto_enable ();
229
230 push_target (&record_btrace_ops);
231
70ad5bff
MM
232 record_btrace_async_inferior_event_handler
233 = create_async_event_handler (record_btrace_handle_async_inferior_event,
234 NULL);
aef92902 235 record_btrace_generating_corefile = 0;
70ad5bff 236
afedecd3
MM
237 observer_notify_record_changed (current_inferior (), 1);
238
239 discard_cleanups (disable_chain);
240}
241
242/* The to_stop_recording method of target record-btrace. */
243
244static void
c6cd7c02 245record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
246{
247 struct thread_info *tp;
248
249 DEBUG ("stop recording");
250
251 record_btrace_auto_disable ();
252
034f788c 253 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
254 if (tp->btrace.target != NULL)
255 btrace_disable (tp);
256}
257
258/* The to_close method of target record-btrace. */
259
260static void
de90e03d 261record_btrace_close (struct target_ops *self)
afedecd3 262{
568e808b
MM
263 struct thread_info *tp;
264
70ad5bff
MM
265 if (record_btrace_async_inferior_event_handler != NULL)
266 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
267
99c819ee
MM
268 /* Make sure automatic recording gets disabled even if we did not stop
269 recording before closing the record-btrace target. */
270 record_btrace_auto_disable ();
271
568e808b
MM
272 /* We should have already stopped recording.
273 Tear down btrace in case we have not. */
034f788c 274 ALL_NON_EXITED_THREADS (tp)
568e808b 275 btrace_teardown (tp);
afedecd3
MM
276}
277
b7d2e916
PA
278/* The to_async method of target record-btrace. */
279
280static void
6a3753b3 281record_btrace_async (struct target_ops *ops, int enable)
b7d2e916 282{
6a3753b3 283 if (enable)
b7d2e916
PA
284 mark_async_event_handler (record_btrace_async_inferior_event_handler);
285 else
286 clear_async_event_handler (record_btrace_async_inferior_event_handler);
287
6a3753b3 288 ops->beneath->to_async (ops->beneath, enable);
b7d2e916
PA
289}
290
d33501a5
MM
291/* Adjusts the size and returns a human readable size suffix. */
292
293static const char *
294record_btrace_adjust_size (unsigned int *size)
295{
296 unsigned int sz;
297
298 sz = *size;
299
300 if ((sz & ((1u << 30) - 1)) == 0)
301 {
302 *size = sz >> 30;
303 return "GB";
304 }
305 else if ((sz & ((1u << 20) - 1)) == 0)
306 {
307 *size = sz >> 20;
308 return "MB";
309 }
310 else if ((sz & ((1u << 10) - 1)) == 0)
311 {
312 *size = sz >> 10;
313 return "kB";
314 }
315 else
316 return "";
317}
318
319/* Print a BTS configuration. */
320
321static void
322record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
323{
324 const char *suffix;
325 unsigned int size;
326
327 size = conf->size;
328 if (size > 0)
329 {
330 suffix = record_btrace_adjust_size (&size);
331 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
332 }
333}
334
bc504a31 335/* Print an Intel Processor Trace configuration. */
b20a6524
MM
336
337static void
338record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
339{
340 const char *suffix;
341 unsigned int size;
342
343 size = conf->size;
344 if (size > 0)
345 {
346 suffix = record_btrace_adjust_size (&size);
347 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
348 }
349}
350
d33501a5
MM
351/* Print a branch tracing configuration. */
352
353static void
354record_btrace_print_conf (const struct btrace_config *conf)
355{
356 printf_unfiltered (_("Recording format: %s.\n"),
357 btrace_format_string (conf->format));
358
359 switch (conf->format)
360 {
361 case BTRACE_FORMAT_NONE:
362 return;
363
364 case BTRACE_FORMAT_BTS:
365 record_btrace_print_bts_conf (&conf->bts);
366 return;
b20a6524
MM
367
368 case BTRACE_FORMAT_PT:
369 record_btrace_print_pt_conf (&conf->pt);
370 return;
d33501a5
MM
371 }
372
373 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
374}
375
afedecd3
MM
376/* The to_info_record method of target record-btrace. */
377
378static void
630d6a4a 379record_btrace_info (struct target_ops *self)
afedecd3
MM
380{
381 struct btrace_thread_info *btinfo;
f4abbc16 382 const struct btrace_config *conf;
afedecd3 383 struct thread_info *tp;
31fd9caa 384 unsigned int insns, calls, gaps;
afedecd3
MM
385
386 DEBUG ("info");
387
388 tp = find_thread_ptid (inferior_ptid);
389 if (tp == NULL)
390 error (_("No thread."));
391
f4abbc16
MM
392 btinfo = &tp->btrace;
393
394 conf = btrace_conf (btinfo);
395 if (conf != NULL)
d33501a5 396 record_btrace_print_conf (conf);
f4abbc16 397
afedecd3
MM
398 btrace_fetch (tp);
399
23a7fe75
MM
400 insns = 0;
401 calls = 0;
31fd9caa 402 gaps = 0;
23a7fe75 403
6e07b1d2 404 if (!btrace_is_empty (tp))
23a7fe75
MM
405 {
406 struct btrace_call_iterator call;
407 struct btrace_insn_iterator insn;
408
409 btrace_call_end (&call, btinfo);
410 btrace_call_prev (&call, 1);
5de9129b 411 calls = btrace_call_number (&call);
23a7fe75
MM
412
413 btrace_insn_end (&insn, btinfo);
31fd9caa 414
5de9129b 415 insns = btrace_insn_number (&insn);
31fd9caa
MM
416 if (insns != 0)
417 {
418 /* The last instruction does not really belong to the trace. */
419 insns -= 1;
420 }
421 else
422 {
423 unsigned int steps;
424
425 /* Skip gaps at the end. */
426 do
427 {
428 steps = btrace_insn_prev (&insn, 1);
429 if (steps == 0)
430 break;
431
432 insns = btrace_insn_number (&insn);
433 }
434 while (insns == 0);
435 }
436
437 gaps = btinfo->ngaps;
23a7fe75 438 }
afedecd3 439
31fd9caa
MM
440 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
441 "for thread %d (%s).\n"), insns, calls, gaps,
442 tp->num, target_pid_to_str (tp->ptid));
07bbe694
MM
443
444 if (btrace_is_replaying (tp))
445 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
446 btrace_insn_number (btinfo->replay));
afedecd3
MM
447}
448
31fd9caa
MM
449/* Print a decode error. */
450
451static void
452btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
453 enum btrace_format format)
454{
455 const char *errstr;
456 int is_error;
457
458 errstr = _("unknown");
459 is_error = 1;
460
461 switch (format)
462 {
463 default:
464 break;
465
466 case BTRACE_FORMAT_BTS:
467 switch (errcode)
468 {
469 default:
470 break;
471
472 case BDE_BTS_OVERFLOW:
473 errstr = _("instruction overflow");
474 break;
475
476 case BDE_BTS_INSN_SIZE:
477 errstr = _("unknown instruction");
478 break;
479 }
480 break;
b20a6524
MM
481
482#if defined (HAVE_LIBIPT)
483 case BTRACE_FORMAT_PT:
484 switch (errcode)
485 {
486 case BDE_PT_USER_QUIT:
487 is_error = 0;
488 errstr = _("trace decode cancelled");
489 break;
490
491 case BDE_PT_DISABLED:
492 is_error = 0;
493 errstr = _("disabled");
494 break;
495
496 case BDE_PT_OVERFLOW:
497 is_error = 0;
498 errstr = _("overflow");
499 break;
500
501 default:
502 if (errcode < 0)
503 errstr = pt_errstr (pt_errcode (errcode));
504 break;
505 }
506 break;
507#endif /* defined (HAVE_LIBIPT) */
31fd9caa
MM
508 }
509
510 ui_out_text (uiout, _("["));
511 if (is_error)
512 {
513 ui_out_text (uiout, _("decode error ("));
514 ui_out_field_int (uiout, "errcode", errcode);
515 ui_out_text (uiout, _("): "));
516 }
517 ui_out_text (uiout, errstr);
518 ui_out_text (uiout, _("]\n"));
519}
520
afedecd3
MM
521/* Print an unsigned int. */
522
523static void
524ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
525{
526 ui_out_field_fmt (uiout, fld, "%u", val);
527}
528
f94cc897
MM
529/* A range of source lines. */
530
531struct btrace_line_range
532{
533 /* The symtab this line is from. */
534 struct symtab *symtab;
535
536 /* The first line (inclusive). */
537 int begin;
538
539 /* The last line (exclusive). */
540 int end;
541};
542
543/* Construct a line range. */
544
545static struct btrace_line_range
546btrace_mk_line_range (struct symtab *symtab, int begin, int end)
547{
548 struct btrace_line_range range;
549
550 range.symtab = symtab;
551 range.begin = begin;
552 range.end = end;
553
554 return range;
555}
556
557/* Add a line to a line range. */
558
559static struct btrace_line_range
560btrace_line_range_add (struct btrace_line_range range, int line)
561{
562 if (range.end <= range.begin)
563 {
564 /* This is the first entry. */
565 range.begin = line;
566 range.end = line + 1;
567 }
568 else if (line < range.begin)
569 range.begin = line;
570 else if (range.end < line)
571 range.end = line;
572
573 return range;
574}
575
576/* Return non-zero if RANGE is empty, zero otherwise. */
577
578static int
579btrace_line_range_is_empty (struct btrace_line_range range)
580{
581 return range.end <= range.begin;
582}
583
584/* Return non-zero if LHS contains RHS, zero otherwise. */
585
586static int
587btrace_line_range_contains_range (struct btrace_line_range lhs,
588 struct btrace_line_range rhs)
589{
590 return ((lhs.symtab == rhs.symtab)
591 && (lhs.begin <= rhs.begin)
592 && (rhs.end <= lhs.end));
593}
594
595/* Find the line range associated with PC. */
596
597static struct btrace_line_range
598btrace_find_line_range (CORE_ADDR pc)
599{
600 struct btrace_line_range range;
601 struct linetable_entry *lines;
602 struct linetable *ltable;
603 struct symtab *symtab;
604 int nlines, i;
605
606 symtab = find_pc_line_symtab (pc);
607 if (symtab == NULL)
608 return btrace_mk_line_range (NULL, 0, 0);
609
610 ltable = SYMTAB_LINETABLE (symtab);
611 if (ltable == NULL)
612 return btrace_mk_line_range (symtab, 0, 0);
613
614 nlines = ltable->nitems;
615 lines = ltable->item;
616 if (nlines <= 0)
617 return btrace_mk_line_range (symtab, 0, 0);
618
619 range = btrace_mk_line_range (symtab, 0, 0);
620 for (i = 0; i < nlines - 1; i++)
621 {
622 if ((lines[i].pc == pc) && (lines[i].line != 0))
623 range = btrace_line_range_add (range, lines[i].line);
624 }
625
626 return range;
627}
628
629/* Print source lines in LINES to UIOUT.
630
631 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
632 instructions corresponding to that source line. When printing a new source
633 line, we do the cleanups for the open chain and open a new cleanup chain for
634 the new source line. If the source line range in LINES is not empty, this
635 function will leave the cleanup chain for the last printed source line open
636 so instructions can be added to it. */
637
638static void
639btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
640 struct cleanup **ui_item_chain, int flags)
641{
8d297bbf 642 print_source_lines_flags psl_flags;
f94cc897
MM
643 int line;
644
645 psl_flags = 0;
646 if (flags & DISASSEMBLY_FILENAME)
647 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
648
649 for (line = lines.begin; line < lines.end; ++line)
650 {
651 if (*ui_item_chain != NULL)
652 do_cleanups (*ui_item_chain);
653
654 *ui_item_chain
655 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
656
657 print_source_lines (lines.symtab, line, line + 1, psl_flags);
658
659 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
660 }
661}
662
afedecd3
MM
663/* Disassemble a section of the recorded instruction trace. */
664
665static void
23a7fe75 666btrace_insn_history (struct ui_out *uiout,
31fd9caa 667 const struct btrace_thread_info *btinfo,
23a7fe75
MM
668 const struct btrace_insn_iterator *begin,
669 const struct btrace_insn_iterator *end, int flags)
afedecd3 670{
f94cc897
MM
671 struct ui_file *stb;
672 struct cleanup *cleanups, *ui_item_chain;
673 struct disassemble_info di;
afedecd3 674 struct gdbarch *gdbarch;
23a7fe75 675 struct btrace_insn_iterator it;
f94cc897 676 struct btrace_line_range last_lines;
afedecd3 677
23a7fe75
MM
678 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
679 btrace_insn_number (end));
afedecd3 680
f94cc897
MM
681 flags |= DISASSEMBLY_SPECULATIVE;
682
afedecd3 683 gdbarch = target_gdbarch ();
f94cc897
MM
684 stb = mem_fileopen ();
685 cleanups = make_cleanup_ui_file_delete (stb);
686 di = gdb_disassemble_info (gdbarch, stb);
687 last_lines = btrace_mk_line_range (NULL, 0, 0);
688
689 make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
690
691 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
692 instructions corresponding to that line. */
693 ui_item_chain = NULL;
afedecd3 694
23a7fe75 695 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 696 {
23a7fe75
MM
697 const struct btrace_insn *insn;
698
699 insn = btrace_insn_get (&it);
700
31fd9caa
MM
701 /* A NULL instruction indicates a gap in the trace. */
702 if (insn == NULL)
703 {
704 const struct btrace_config *conf;
705
706 conf = btrace_conf (btinfo);
afedecd3 707
31fd9caa
MM
708 /* We have trace so we must have a configuration. */
709 gdb_assert (conf != NULL);
710
711 btrace_ui_out_decode_error (uiout, it.function->errcode,
712 conf->format);
713 }
714 else
715 {
f94cc897 716 struct disasm_insn dinsn;
da8c46d2 717
f94cc897 718 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 719 {
f94cc897
MM
720 struct btrace_line_range lines;
721
722 lines = btrace_find_line_range (insn->pc);
723 if (!btrace_line_range_is_empty (lines)
724 && !btrace_line_range_contains_range (last_lines, lines))
725 {
726 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
727 last_lines = lines;
728 }
729 else if (ui_item_chain == NULL)
730 {
731 ui_item_chain
732 = make_cleanup_ui_out_tuple_begin_end (uiout,
733 "src_and_asm_line");
734 /* No source information. */
735 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
736 }
737
738 gdb_assert (ui_item_chain != NULL);
da8c46d2 739 }
da8c46d2 740
f94cc897
MM
741 memset (&dinsn, 0, sizeof (dinsn));
742 dinsn.number = btrace_insn_number (&it);
743 dinsn.addr = insn->pc;
31fd9caa 744
da8c46d2 745 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 746 dinsn.is_speculative = 1;
da8c46d2 747
f94cc897 748 gdb_pretty_print_insn (gdbarch, uiout, &di, &dinsn, flags, stb);
31fd9caa 749 }
afedecd3 750 }
f94cc897
MM
751
752 do_cleanups (cleanups);
afedecd3
MM
753}
754
755/* The to_insn_history method of target record-btrace. */
756
757static void
7a6c5609 758record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
759{
760 struct btrace_thread_info *btinfo;
23a7fe75
MM
761 struct btrace_insn_history *history;
762 struct btrace_insn_iterator begin, end;
afedecd3
MM
763 struct cleanup *uiout_cleanup;
764 struct ui_out *uiout;
23a7fe75 765 unsigned int context, covered;
afedecd3
MM
766
767 uiout = current_uiout;
768 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
769 "insn history");
afedecd3 770 context = abs (size);
afedecd3
MM
771 if (context == 0)
772 error (_("Bad record instruction-history-size."));
773
23a7fe75
MM
774 btinfo = require_btrace ();
775 history = btinfo->insn_history;
776 if (history == NULL)
afedecd3 777 {
07bbe694 778 struct btrace_insn_iterator *replay;
afedecd3 779
23a7fe75 780 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 781
07bbe694
MM
782 /* If we're replaying, we start at the replay position. Otherwise, we
783 start at the tail of the trace. */
784 replay = btinfo->replay;
785 if (replay != NULL)
786 begin = *replay;
787 else
788 btrace_insn_end (&begin, btinfo);
789
790 /* We start from here and expand in the requested direction. Then we
791 expand in the other direction, as well, to fill up any remaining
792 context. */
793 end = begin;
794 if (size < 0)
795 {
796 /* We want the current position covered, as well. */
797 covered = btrace_insn_next (&end, 1);
798 covered += btrace_insn_prev (&begin, context - covered);
799 covered += btrace_insn_next (&end, context - covered);
800 }
801 else
802 {
803 covered = btrace_insn_next (&end, context);
804 covered += btrace_insn_prev (&begin, context - covered);
805 }
afedecd3
MM
806 }
807 else
808 {
23a7fe75
MM
809 begin = history->begin;
810 end = history->end;
afedecd3 811
23a7fe75
MM
812 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
813 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 814
23a7fe75
MM
815 if (size < 0)
816 {
817 end = begin;
818 covered = btrace_insn_prev (&begin, context);
819 }
820 else
821 {
822 begin = end;
823 covered = btrace_insn_next (&end, context);
824 }
afedecd3
MM
825 }
826
23a7fe75 827 if (covered > 0)
31fd9caa 828 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
829 else
830 {
831 if (size < 0)
832 printf_unfiltered (_("At the start of the branch trace record.\n"));
833 else
834 printf_unfiltered (_("At the end of the branch trace record.\n"));
835 }
afedecd3 836
23a7fe75 837 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
838 do_cleanups (uiout_cleanup);
839}
840
841/* The to_insn_history_range method of target record-btrace. */
842
843static void
4e99c6b7
TT
844record_btrace_insn_history_range (struct target_ops *self,
845 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
846{
847 struct btrace_thread_info *btinfo;
23a7fe75
MM
848 struct btrace_insn_history *history;
849 struct btrace_insn_iterator begin, end;
afedecd3
MM
850 struct cleanup *uiout_cleanup;
851 struct ui_out *uiout;
23a7fe75
MM
852 unsigned int low, high;
853 int found;
afedecd3
MM
854
855 uiout = current_uiout;
856 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
857 "insn history");
23a7fe75
MM
858 low = from;
859 high = to;
afedecd3 860
23a7fe75 861 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
862
863 /* Check for wrap-arounds. */
23a7fe75 864 if (low != from || high != to)
afedecd3
MM
865 error (_("Bad range."));
866
0688d04e 867 if (high < low)
afedecd3
MM
868 error (_("Bad range."));
869
23a7fe75 870 btinfo = require_btrace ();
afedecd3 871
23a7fe75
MM
872 found = btrace_find_insn_by_number (&begin, btinfo, low);
873 if (found == 0)
874 error (_("Range out of bounds."));
afedecd3 875
23a7fe75
MM
876 found = btrace_find_insn_by_number (&end, btinfo, high);
877 if (found == 0)
0688d04e
MM
878 {
879 /* Silently truncate the range. */
880 btrace_insn_end (&end, btinfo);
881 }
882 else
883 {
884 /* We want both begin and end to be inclusive. */
885 btrace_insn_next (&end, 1);
886 }
afedecd3 887
31fd9caa 888 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 889 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
890
891 do_cleanups (uiout_cleanup);
892}
893
894/* The to_insn_history_from method of target record-btrace. */
895
896static void
9abc3ff3
TT
897record_btrace_insn_history_from (struct target_ops *self,
898 ULONGEST from, int size, int flags)
afedecd3
MM
899{
900 ULONGEST begin, end, context;
901
902 context = abs (size);
0688d04e
MM
903 if (context == 0)
904 error (_("Bad record instruction-history-size."));
afedecd3
MM
905
906 if (size < 0)
907 {
908 end = from;
909
910 if (from < context)
911 begin = 0;
912 else
0688d04e 913 begin = from - context + 1;
afedecd3
MM
914 }
915 else
916 {
917 begin = from;
0688d04e 918 end = from + context - 1;
afedecd3
MM
919
920 /* Check for wrap-around. */
921 if (end < begin)
922 end = ULONGEST_MAX;
923 }
924
4e99c6b7 925 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
926}
927
928/* Print the instruction number range for a function call history line. */
929
930static void
23a7fe75
MM
931btrace_call_history_insn_range (struct ui_out *uiout,
932 const struct btrace_function *bfun)
afedecd3 933{
7acbe133
MM
934 unsigned int begin, end, size;
935
936 size = VEC_length (btrace_insn_s, bfun->insn);
937 gdb_assert (size > 0);
afedecd3 938
23a7fe75 939 begin = bfun->insn_offset;
7acbe133 940 end = begin + size - 1;
afedecd3 941
23a7fe75 942 ui_out_field_uint (uiout, "insn begin", begin);
8710b709 943 ui_out_text (uiout, ",");
23a7fe75 944 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
945}
946
ce0dfbea
MM
947/* Compute the lowest and highest source line for the instructions in BFUN
948 and return them in PBEGIN and PEND.
949 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
950 result from inlining or macro expansion. */
951
952static void
953btrace_compute_src_line_range (const struct btrace_function *bfun,
954 int *pbegin, int *pend)
955{
956 struct btrace_insn *insn;
957 struct symtab *symtab;
958 struct symbol *sym;
959 unsigned int idx;
960 int begin, end;
961
962 begin = INT_MAX;
963 end = INT_MIN;
964
965 sym = bfun->sym;
966 if (sym == NULL)
967 goto out;
968
969 symtab = symbol_symtab (sym);
970
971 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
972 {
973 struct symtab_and_line sal;
974
975 sal = find_pc_line (insn->pc, 0);
976 if (sal.symtab != symtab || sal.line == 0)
977 continue;
978
979 begin = min (begin, sal.line);
980 end = max (end, sal.line);
981 }
982
983 out:
984 *pbegin = begin;
985 *pend = end;
986}
987
afedecd3
MM
988/* Print the source line information for a function call history line. */
989
990static void
23a7fe75
MM
991btrace_call_history_src_line (struct ui_out *uiout,
992 const struct btrace_function *bfun)
afedecd3
MM
993{
994 struct symbol *sym;
23a7fe75 995 int begin, end;
afedecd3
MM
996
997 sym = bfun->sym;
998 if (sym == NULL)
999 return;
1000
1001 ui_out_field_string (uiout, "file",
08be3fe3 1002 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 1003
ce0dfbea 1004 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 1005 if (end < begin)
afedecd3
MM
1006 return;
1007
1008 ui_out_text (uiout, ":");
23a7fe75 1009 ui_out_field_int (uiout, "min line", begin);
afedecd3 1010
23a7fe75 1011 if (end == begin)
afedecd3
MM
1012 return;
1013
8710b709 1014 ui_out_text (uiout, ",");
23a7fe75 1015 ui_out_field_int (uiout, "max line", end);
afedecd3
MM
1016}
1017
0b722aec
MM
1018/* Get the name of a branch trace function. */
1019
1020static const char *
1021btrace_get_bfun_name (const struct btrace_function *bfun)
1022{
1023 struct minimal_symbol *msym;
1024 struct symbol *sym;
1025
1026 if (bfun == NULL)
1027 return "??";
1028
1029 msym = bfun->msym;
1030 sym = bfun->sym;
1031
1032 if (sym != NULL)
1033 return SYMBOL_PRINT_NAME (sym);
1034 else if (msym != NULL)
efd66ac6 1035 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
1036 else
1037 return "??";
1038}
1039
afedecd3
MM
1040/* Disassemble a section of the recorded function trace. */
1041
1042static void
23a7fe75 1043btrace_call_history (struct ui_out *uiout,
8710b709 1044 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1045 const struct btrace_call_iterator *begin,
1046 const struct btrace_call_iterator *end,
8d297bbf 1047 int int_flags)
afedecd3 1048{
23a7fe75 1049 struct btrace_call_iterator it;
8d297bbf 1050 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1051
8d297bbf 1052 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1053 btrace_call_number (end));
afedecd3 1054
23a7fe75 1055 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1056 {
23a7fe75
MM
1057 const struct btrace_function *bfun;
1058 struct minimal_symbol *msym;
1059 struct symbol *sym;
1060
1061 bfun = btrace_call_get (&it);
23a7fe75 1062 sym = bfun->sym;
0b722aec 1063 msym = bfun->msym;
23a7fe75 1064
afedecd3 1065 /* Print the function index. */
23a7fe75 1066 ui_out_field_uint (uiout, "index", bfun->number);
afedecd3
MM
1067 ui_out_text (uiout, "\t");
1068
31fd9caa
MM
1069 /* Indicate gaps in the trace. */
1070 if (bfun->errcode != 0)
1071 {
1072 const struct btrace_config *conf;
1073
1074 conf = btrace_conf (btinfo);
1075
1076 /* We have trace so we must have a configuration. */
1077 gdb_assert (conf != NULL);
1078
1079 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1080
1081 continue;
1082 }
1083
8710b709
MM
1084 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1085 {
1086 int level = bfun->level + btinfo->level, i;
1087
1088 for (i = 0; i < level; ++i)
1089 ui_out_text (uiout, " ");
1090 }
1091
1092 if (sym != NULL)
1093 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
1094 else if (msym != NULL)
efd66ac6 1095 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
8710b709
MM
1096 else if (!ui_out_is_mi_like_p (uiout))
1097 ui_out_field_string (uiout, "function", "??");
1098
1e038f67 1099 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1100 {
8710b709 1101 ui_out_text (uiout, _("\tinst "));
23a7fe75 1102 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1103 }
1104
1e038f67 1105 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1106 {
8710b709 1107 ui_out_text (uiout, _("\tat "));
23a7fe75 1108 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1109 }
1110
afedecd3
MM
1111 ui_out_text (uiout, "\n");
1112 }
1113}
1114
1115/* The to_call_history method of target record-btrace. */
1116
1117static void
8d297bbf 1118record_btrace_call_history (struct target_ops *self, int size, int int_flags)
afedecd3
MM
1119{
1120 struct btrace_thread_info *btinfo;
23a7fe75
MM
1121 struct btrace_call_history *history;
1122 struct btrace_call_iterator begin, end;
afedecd3
MM
1123 struct cleanup *uiout_cleanup;
1124 struct ui_out *uiout;
23a7fe75 1125 unsigned int context, covered;
8d297bbf 1126 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1127
1128 uiout = current_uiout;
1129 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1130 "insn history");
afedecd3 1131 context = abs (size);
afedecd3
MM
1132 if (context == 0)
1133 error (_("Bad record function-call-history-size."));
1134
23a7fe75
MM
1135 btinfo = require_btrace ();
1136 history = btinfo->call_history;
1137 if (history == NULL)
afedecd3 1138 {
07bbe694 1139 struct btrace_insn_iterator *replay;
afedecd3 1140
8d297bbf 1141 DEBUG ("call-history (0x%x): %d", int_flags, size);
afedecd3 1142
07bbe694
MM
1143 /* If we're replaying, we start at the replay position. Otherwise, we
1144 start at the tail of the trace. */
1145 replay = btinfo->replay;
1146 if (replay != NULL)
1147 {
1148 begin.function = replay->function;
1149 begin.btinfo = btinfo;
1150 }
1151 else
1152 btrace_call_end (&begin, btinfo);
1153
1154 /* We start from here and expand in the requested direction. Then we
1155 expand in the other direction, as well, to fill up any remaining
1156 context. */
1157 end = begin;
1158 if (size < 0)
1159 {
1160 /* We want the current position covered, as well. */
1161 covered = btrace_call_next (&end, 1);
1162 covered += btrace_call_prev (&begin, context - covered);
1163 covered += btrace_call_next (&end, context - covered);
1164 }
1165 else
1166 {
1167 covered = btrace_call_next (&end, context);
1168 covered += btrace_call_prev (&begin, context- covered);
1169 }
afedecd3
MM
1170 }
1171 else
1172 {
23a7fe75
MM
1173 begin = history->begin;
1174 end = history->end;
afedecd3 1175
8d297bbf 1176 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
23a7fe75 1177 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1178
23a7fe75
MM
1179 if (size < 0)
1180 {
1181 end = begin;
1182 covered = btrace_call_prev (&begin, context);
1183 }
1184 else
1185 {
1186 begin = end;
1187 covered = btrace_call_next (&end, context);
1188 }
afedecd3
MM
1189 }
1190
23a7fe75 1191 if (covered > 0)
8710b709 1192 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1193 else
1194 {
1195 if (size < 0)
1196 printf_unfiltered (_("At the start of the branch trace record.\n"));
1197 else
1198 printf_unfiltered (_("At the end of the branch trace record.\n"));
1199 }
afedecd3 1200
23a7fe75 1201 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1202 do_cleanups (uiout_cleanup);
1203}
1204
1205/* The to_call_history_range method of target record-btrace. */
1206
1207static void
f0d960ea 1208record_btrace_call_history_range (struct target_ops *self,
8d297bbf
PA
1209 ULONGEST from, ULONGEST to,
1210 int int_flags)
afedecd3
MM
1211{
1212 struct btrace_thread_info *btinfo;
23a7fe75
MM
1213 struct btrace_call_history *history;
1214 struct btrace_call_iterator begin, end;
afedecd3
MM
1215 struct cleanup *uiout_cleanup;
1216 struct ui_out *uiout;
23a7fe75
MM
1217 unsigned int low, high;
1218 int found;
8d297bbf 1219 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1220
1221 uiout = current_uiout;
1222 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1223 "func history");
23a7fe75
MM
1224 low = from;
1225 high = to;
afedecd3 1226
8d297bbf 1227 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
afedecd3
MM
1228
1229 /* Check for wrap-arounds. */
23a7fe75 1230 if (low != from || high != to)
afedecd3
MM
1231 error (_("Bad range."));
1232
0688d04e 1233 if (high < low)
afedecd3
MM
1234 error (_("Bad range."));
1235
23a7fe75 1236 btinfo = require_btrace ();
afedecd3 1237
23a7fe75
MM
1238 found = btrace_find_call_by_number (&begin, btinfo, low);
1239 if (found == 0)
1240 error (_("Range out of bounds."));
afedecd3 1241
23a7fe75
MM
1242 found = btrace_find_call_by_number (&end, btinfo, high);
1243 if (found == 0)
0688d04e
MM
1244 {
1245 /* Silently truncate the range. */
1246 btrace_call_end (&end, btinfo);
1247 }
1248 else
1249 {
1250 /* We want both begin and end to be inclusive. */
1251 btrace_call_next (&end, 1);
1252 }
afedecd3 1253
8710b709 1254 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1255 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1256
1257 do_cleanups (uiout_cleanup);
1258}
1259
1260/* The to_call_history_from method of target record-btrace. */
1261
1262static void
ec0aea04 1263record_btrace_call_history_from (struct target_ops *self,
8d297bbf
PA
1264 ULONGEST from, int size,
1265 int int_flags)
afedecd3
MM
1266{
1267 ULONGEST begin, end, context;
8d297bbf 1268 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1269
1270 context = abs (size);
0688d04e
MM
1271 if (context == 0)
1272 error (_("Bad record function-call-history-size."));
afedecd3
MM
1273
1274 if (size < 0)
1275 {
1276 end = from;
1277
1278 if (from < context)
1279 begin = 0;
1280 else
0688d04e 1281 begin = from - context + 1;
afedecd3
MM
1282 }
1283 else
1284 {
1285 begin = from;
0688d04e 1286 end = from + context - 1;
afedecd3
MM
1287
1288 /* Check for wrap-around. */
1289 if (end < begin)
1290 end = ULONGEST_MAX;
1291 }
1292
f0d960ea 1293 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
1294}
1295
07bbe694
MM
1296/* The to_record_is_replaying method of target record-btrace. */
1297
1298static int
a52eab48 1299record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
07bbe694
MM
1300{
1301 struct thread_info *tp;
1302
034f788c 1303 ALL_NON_EXITED_THREADS (tp)
a52eab48 1304 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
07bbe694
MM
1305 return 1;
1306
1307 return 0;
1308}
1309
7ff27e9b
MM
1310/* The to_record_will_replay method of target record-btrace. */
1311
1312static int
1313record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1314{
1315 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1316}
1317
633785ff
MM
1318/* The to_xfer_partial method of target record-btrace. */
1319
9b409511 1320static enum target_xfer_status
633785ff
MM
1321record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1322 const char *annex, gdb_byte *readbuf,
1323 const gdb_byte *writebuf, ULONGEST offset,
9b409511 1324 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
1325{
1326 struct target_ops *t;
1327
1328 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1329 if (replay_memory_access == replay_memory_access_read_only
aef92902 1330 && !record_btrace_generating_corefile
4d10e986 1331 && record_btrace_is_replaying (ops, inferior_ptid))
633785ff
MM
1332 {
1333 switch (object)
1334 {
1335 case TARGET_OBJECT_MEMORY:
1336 {
1337 struct target_section *section;
1338
1339 /* We do not allow writing memory in general. */
1340 if (writebuf != NULL)
9b409511
YQ
1341 {
1342 *xfered_len = len;
bc113b4e 1343 return TARGET_XFER_UNAVAILABLE;
9b409511 1344 }
633785ff
MM
1345
1346 /* We allow reading readonly memory. */
1347 section = target_section_by_addr (ops, offset);
1348 if (section != NULL)
1349 {
1350 /* Check if the section we found is readonly. */
1351 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1352 section->the_bfd_section)
1353 & SEC_READONLY) != 0)
1354 {
1355 /* Truncate the request to fit into this section. */
1356 len = min (len, section->endaddr - offset);
1357 break;
1358 }
1359 }
1360
9b409511 1361 *xfered_len = len;
bc113b4e 1362 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1363 }
1364 }
1365 }
1366
1367 /* Forward the request. */
e75fdfca
TT
1368 ops = ops->beneath;
1369 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1370 offset, len, xfered_len);
633785ff
MM
1371}
1372
1373/* The to_insert_breakpoint method of target record-btrace. */
1374
1375static int
1376record_btrace_insert_breakpoint (struct target_ops *ops,
1377 struct gdbarch *gdbarch,
1378 struct bp_target_info *bp_tgt)
1379{
67b5c0c1
MM
1380 const char *old;
1381 int ret;
633785ff
MM
1382
1383 /* Inserting breakpoints requires accessing memory. Allow it for the
1384 duration of this function. */
67b5c0c1
MM
1385 old = replay_memory_access;
1386 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1387
1388 ret = 0;
492d29ea
PA
1389 TRY
1390 {
1391 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1392 }
492d29ea
PA
1393 CATCH (except, RETURN_MASK_ALL)
1394 {
6c63c96a 1395 replay_memory_access = old;
492d29ea
PA
1396 throw_exception (except);
1397 }
1398 END_CATCH
6c63c96a 1399 replay_memory_access = old;
633785ff
MM
1400
1401 return ret;
1402}
1403
1404/* The to_remove_breakpoint method of target record-btrace. */
1405
1406static int
1407record_btrace_remove_breakpoint (struct target_ops *ops,
1408 struct gdbarch *gdbarch,
1409 struct bp_target_info *bp_tgt)
1410{
67b5c0c1
MM
1411 const char *old;
1412 int ret;
633785ff
MM
1413
1414 /* Removing breakpoints requires accessing memory. Allow it for the
1415 duration of this function. */
67b5c0c1
MM
1416 old = replay_memory_access;
1417 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1418
1419 ret = 0;
492d29ea
PA
1420 TRY
1421 {
1422 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1423 }
492d29ea
PA
1424 CATCH (except, RETURN_MASK_ALL)
1425 {
6c63c96a 1426 replay_memory_access = old;
492d29ea
PA
1427 throw_exception (except);
1428 }
1429 END_CATCH
6c63c96a 1430 replay_memory_access = old;
633785ff
MM
1431
1432 return ret;
1433}
1434
1f3ef581
MM
1435/* The to_fetch_registers method of target record-btrace. */
1436
1437static void
1438record_btrace_fetch_registers (struct target_ops *ops,
1439 struct regcache *regcache, int regno)
1440{
1441 struct btrace_insn_iterator *replay;
1442 struct thread_info *tp;
1443
1444 tp = find_thread_ptid (inferior_ptid);
1445 gdb_assert (tp != NULL);
1446
1447 replay = tp->btrace.replay;
aef92902 1448 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1449 {
1450 const struct btrace_insn *insn;
1451 struct gdbarch *gdbarch;
1452 int pcreg;
1453
1454 gdbarch = get_regcache_arch (regcache);
1455 pcreg = gdbarch_pc_regnum (gdbarch);
1456 if (pcreg < 0)
1457 return;
1458
1459 /* We can only provide the PC register. */
1460 if (regno >= 0 && regno != pcreg)
1461 return;
1462
1463 insn = btrace_insn_get (replay);
1464 gdb_assert (insn != NULL);
1465
1466 regcache_raw_supply (regcache, regno, &insn->pc);
1467 }
1468 else
1469 {
e75fdfca 1470 struct target_ops *t = ops->beneath;
1f3ef581 1471
e75fdfca 1472 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
1473 }
1474}
1475
1476/* The to_store_registers method of target record-btrace. */
1477
1478static void
1479record_btrace_store_registers (struct target_ops *ops,
1480 struct regcache *regcache, int regno)
1481{
1482 struct target_ops *t;
1483
a52eab48 1484 if (!record_btrace_generating_corefile
4d10e986
MM
1485 && record_btrace_is_replaying (ops, inferior_ptid))
1486 error (_("Cannot write registers while replaying."));
1f3ef581
MM
1487
1488 gdb_assert (may_write_registers != 0);
1489
e75fdfca
TT
1490 t = ops->beneath;
1491 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1492}
1493
1494/* The to_prepare_to_store method of target record-btrace. */
1495
1496static void
1497record_btrace_prepare_to_store (struct target_ops *ops,
1498 struct regcache *regcache)
1499{
1500 struct target_ops *t;
1501
a52eab48 1502 if (!record_btrace_generating_corefile
4d10e986 1503 && record_btrace_is_replaying (ops, inferior_ptid))
1f3ef581
MM
1504 return;
1505
e75fdfca
TT
1506 t = ops->beneath;
1507 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1508}
1509
0b722aec
MM
1510/* The branch trace frame cache. */
1511
1512struct btrace_frame_cache
1513{
1514 /* The thread. */
1515 struct thread_info *tp;
1516
1517 /* The frame info. */
1518 struct frame_info *frame;
1519
1520 /* The branch trace function segment. */
1521 const struct btrace_function *bfun;
1522};
1523
1524/* A struct btrace_frame_cache hash table indexed by NEXT. */
1525
1526static htab_t bfcache;
1527
1528/* hash_f for htab_create_alloc of bfcache. */
1529
1530static hashval_t
1531bfcache_hash (const void *arg)
1532{
19ba03f4
SM
1533 const struct btrace_frame_cache *cache
1534 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1535
1536 return htab_hash_pointer (cache->frame);
1537}
1538
1539/* eq_f for htab_create_alloc of bfcache. */
1540
1541static int
1542bfcache_eq (const void *arg1, const void *arg2)
1543{
19ba03f4
SM
1544 const struct btrace_frame_cache *cache1
1545 = (const struct btrace_frame_cache *) arg1;
1546 const struct btrace_frame_cache *cache2
1547 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1548
1549 return cache1->frame == cache2->frame;
1550}
1551
1552/* Create a new btrace frame cache. */
1553
1554static struct btrace_frame_cache *
1555bfcache_new (struct frame_info *frame)
1556{
1557 struct btrace_frame_cache *cache;
1558 void **slot;
1559
1560 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1561 cache->frame = frame;
1562
1563 slot = htab_find_slot (bfcache, cache, INSERT);
1564 gdb_assert (*slot == NULL);
1565 *slot = cache;
1566
1567 return cache;
1568}
1569
1570/* Extract the branch trace function from a branch trace frame. */
1571
1572static const struct btrace_function *
1573btrace_get_frame_function (struct frame_info *frame)
1574{
1575 const struct btrace_frame_cache *cache;
1576 const struct btrace_function *bfun;
1577 struct btrace_frame_cache pattern;
1578 void **slot;
1579
1580 pattern.frame = frame;
1581
1582 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1583 if (slot == NULL)
1584 return NULL;
1585
19ba03f4 1586 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1587 return cache->bfun;
1588}
1589
cecac1ab
MM
1590/* Implement stop_reason method for record_btrace_frame_unwind. */
1591
1592static enum unwind_stop_reason
1593record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1594 void **this_cache)
1595{
0b722aec
MM
1596 const struct btrace_frame_cache *cache;
1597 const struct btrace_function *bfun;
1598
19ba03f4 1599 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1600 bfun = cache->bfun;
1601 gdb_assert (bfun != NULL);
1602
1603 if (bfun->up == NULL)
1604 return UNWIND_UNAVAILABLE;
1605
1606 return UNWIND_NO_REASON;
cecac1ab
MM
1607}
1608
1609/* Implement this_id method for record_btrace_frame_unwind. */
1610
1611static void
1612record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1613 struct frame_id *this_id)
1614{
0b722aec
MM
1615 const struct btrace_frame_cache *cache;
1616 const struct btrace_function *bfun;
1617 CORE_ADDR code, special;
1618
19ba03f4 1619 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1620
1621 bfun = cache->bfun;
1622 gdb_assert (bfun != NULL);
1623
1624 while (bfun->segment.prev != NULL)
1625 bfun = bfun->segment.prev;
1626
1627 code = get_frame_func (this_frame);
1628 special = bfun->number;
1629
1630 *this_id = frame_id_build_unavailable_stack_special (code, special);
1631
1632 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1633 btrace_get_bfun_name (cache->bfun),
1634 core_addr_to_string_nz (this_id->code_addr),
1635 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1636}
1637
1638/* Implement prev_register method for record_btrace_frame_unwind. */
1639
1640static struct value *
1641record_btrace_frame_prev_register (struct frame_info *this_frame,
1642 void **this_cache,
1643 int regnum)
1644{
0b722aec
MM
1645 const struct btrace_frame_cache *cache;
1646 const struct btrace_function *bfun, *caller;
1647 const struct btrace_insn *insn;
1648 struct gdbarch *gdbarch;
1649 CORE_ADDR pc;
1650 int pcreg;
1651
1652 gdbarch = get_frame_arch (this_frame);
1653 pcreg = gdbarch_pc_regnum (gdbarch);
1654 if (pcreg < 0 || regnum != pcreg)
1655 throw_error (NOT_AVAILABLE_ERROR,
1656 _("Registers are not available in btrace record history"));
1657
19ba03f4 1658 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1659 bfun = cache->bfun;
1660 gdb_assert (bfun != NULL);
1661
1662 caller = bfun->up;
1663 if (caller == NULL)
1664 throw_error (NOT_AVAILABLE_ERROR,
1665 _("No caller in btrace record history"));
1666
1667 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1668 {
1669 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1670 pc = insn->pc;
1671 }
1672 else
1673 {
1674 insn = VEC_last (btrace_insn_s, caller->insn);
1675 pc = insn->pc;
1676
1677 pc += gdb_insn_length (gdbarch, pc);
1678 }
1679
1680 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1681 btrace_get_bfun_name (bfun), bfun->level,
1682 core_addr_to_string_nz (pc));
1683
1684 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1685}
1686
1687/* Implement sniffer method for record_btrace_frame_unwind. */
1688
1689static int
1690record_btrace_frame_sniffer (const struct frame_unwind *self,
1691 struct frame_info *this_frame,
1692 void **this_cache)
1693{
0b722aec
MM
1694 const struct btrace_function *bfun;
1695 struct btrace_frame_cache *cache;
cecac1ab 1696 struct thread_info *tp;
0b722aec 1697 struct frame_info *next;
cecac1ab
MM
1698
1699 /* THIS_FRAME does not contain a reference to its thread. */
1700 tp = find_thread_ptid (inferior_ptid);
1701 gdb_assert (tp != NULL);
1702
0b722aec
MM
1703 bfun = NULL;
1704 next = get_next_frame (this_frame);
1705 if (next == NULL)
1706 {
1707 const struct btrace_insn_iterator *replay;
1708
1709 replay = tp->btrace.replay;
1710 if (replay != NULL)
1711 bfun = replay->function;
1712 }
1713 else
1714 {
1715 const struct btrace_function *callee;
1716
1717 callee = btrace_get_frame_function (next);
1718 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1719 bfun = callee->up;
1720 }
1721
1722 if (bfun == NULL)
1723 return 0;
1724
1725 DEBUG ("[frame] sniffed frame for %s on level %d",
1726 btrace_get_bfun_name (bfun), bfun->level);
1727
1728 /* This is our frame. Initialize the frame cache. */
1729 cache = bfcache_new (this_frame);
1730 cache->tp = tp;
1731 cache->bfun = bfun;
1732
1733 *this_cache = cache;
1734 return 1;
1735}
1736
1737/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1738
1739static int
1740record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1741 struct frame_info *this_frame,
1742 void **this_cache)
1743{
1744 const struct btrace_function *bfun, *callee;
1745 struct btrace_frame_cache *cache;
1746 struct frame_info *next;
1747
1748 next = get_next_frame (this_frame);
1749 if (next == NULL)
1750 return 0;
1751
1752 callee = btrace_get_frame_function (next);
1753 if (callee == NULL)
1754 return 0;
1755
1756 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1757 return 0;
1758
1759 bfun = callee->up;
1760 if (bfun == NULL)
1761 return 0;
1762
1763 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1764 btrace_get_bfun_name (bfun), bfun->level);
1765
1766 /* This is our frame. Initialize the frame cache. */
1767 cache = bfcache_new (this_frame);
1768 cache->tp = find_thread_ptid (inferior_ptid);
1769 cache->bfun = bfun;
1770
1771 *this_cache = cache;
1772 return 1;
1773}
1774
1775static void
1776record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1777{
1778 struct btrace_frame_cache *cache;
1779 void **slot;
1780
19ba03f4 1781 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1782
1783 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1784 gdb_assert (slot != NULL);
1785
1786 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1787}
1788
1789/* btrace recording does not store previous memory content, neither the stack
1790 frames content. Any unwinding would return errorneous results as the stack
1791 contents no longer matches the changed PC value restored from history.
1792 Therefore this unwinder reports any possibly unwound registers as
1793 <unavailable>. */
1794
0b722aec 1795const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1796{
1797 NORMAL_FRAME,
1798 record_btrace_frame_unwind_stop_reason,
1799 record_btrace_frame_this_id,
1800 record_btrace_frame_prev_register,
1801 NULL,
0b722aec
MM
1802 record_btrace_frame_sniffer,
1803 record_btrace_frame_dealloc_cache
1804};
1805
1806const struct frame_unwind record_btrace_tailcall_frame_unwind =
1807{
1808 TAILCALL_FRAME,
1809 record_btrace_frame_unwind_stop_reason,
1810 record_btrace_frame_this_id,
1811 record_btrace_frame_prev_register,
1812 NULL,
1813 record_btrace_tailcall_frame_sniffer,
1814 record_btrace_frame_dealloc_cache
cecac1ab 1815};
b2f4cfde 1816
ac01945b
TT
1817/* Implement the to_get_unwinder method. */
1818
1819static const struct frame_unwind *
1820record_btrace_to_get_unwinder (struct target_ops *self)
1821{
1822 return &record_btrace_frame_unwind;
1823}
1824
1825/* Implement the to_get_tailcall_unwinder method. */
1826
1827static const struct frame_unwind *
1828record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1829{
1830 return &record_btrace_tailcall_frame_unwind;
1831}
1832
987e68b1
MM
1833/* Return a human-readable string for FLAG. */
1834
1835static const char *
1836btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1837{
1838 switch (flag)
1839 {
1840 case BTHR_STEP:
1841 return "step";
1842
1843 case BTHR_RSTEP:
1844 return "reverse-step";
1845
1846 case BTHR_CONT:
1847 return "cont";
1848
1849 case BTHR_RCONT:
1850 return "reverse-cont";
1851
1852 case BTHR_STOP:
1853 return "stop";
1854 }
1855
1856 return "<invalid>";
1857}
1858
52834460
MM
1859/* Indicate that TP should be resumed according to FLAG. */
1860
1861static void
1862record_btrace_resume_thread (struct thread_info *tp,
1863 enum btrace_thread_flag flag)
1864{
1865 struct btrace_thread_info *btinfo;
1866
987e68b1
MM
1867 DEBUG ("resuming thread %d (%s): %x (%s)", tp->num,
1868 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
52834460
MM
1869
1870 btinfo = &tp->btrace;
1871
52834460
MM
1872 /* Fetch the latest branch trace. */
1873 btrace_fetch (tp);
1874
0ca912df
MM
1875 /* A resume request overwrites a preceding resume or stop request. */
1876 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1877 btinfo->flags |= flag;
1878}
1879
ec71cc2f
MM
1880/* Get the current frame for TP. */
1881
1882static struct frame_info *
1883get_thread_current_frame (struct thread_info *tp)
1884{
1885 struct frame_info *frame;
1886 ptid_t old_inferior_ptid;
1887 int executing;
1888
1889 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1890 old_inferior_ptid = inferior_ptid;
1891 inferior_ptid = tp->ptid;
1892
1893 /* Clear the executing flag to allow changes to the current frame.
1894 We are not actually running, yet. We just started a reverse execution
1895 command or a record goto command.
1896 For the latter, EXECUTING is false and this has no effect.
1897 For the former, EXECUTING is true and we're in to_wait, about to
1898 move the thread. Since we need to recompute the stack, we temporarily
1899 set EXECUTING to flase. */
1900 executing = is_executing (inferior_ptid);
1901 set_executing (inferior_ptid, 0);
1902
1903 frame = NULL;
1904 TRY
1905 {
1906 frame = get_current_frame ();
1907 }
1908 CATCH (except, RETURN_MASK_ALL)
1909 {
1910 /* Restore the previous execution state. */
1911 set_executing (inferior_ptid, executing);
1912
1913 /* Restore the previous inferior_ptid. */
1914 inferior_ptid = old_inferior_ptid;
1915
1916 throw_exception (except);
1917 }
1918 END_CATCH
1919
1920 /* Restore the previous execution state. */
1921 set_executing (inferior_ptid, executing);
1922
1923 /* Restore the previous inferior_ptid. */
1924 inferior_ptid = old_inferior_ptid;
1925
1926 return frame;
1927}
1928
52834460
MM
1929/* Start replaying a thread. */
1930
1931static struct btrace_insn_iterator *
1932record_btrace_start_replaying (struct thread_info *tp)
1933{
52834460
MM
1934 struct btrace_insn_iterator *replay;
1935 struct btrace_thread_info *btinfo;
52834460
MM
1936
1937 btinfo = &tp->btrace;
1938 replay = NULL;
1939
1940 /* We can't start replaying without trace. */
1941 if (btinfo->begin == NULL)
1942 return NULL;
1943
52834460
MM
1944 /* GDB stores the current frame_id when stepping in order to detects steps
1945 into subroutines.
1946 Since frames are computed differently when we're replaying, we need to
1947 recompute those stored frames and fix them up so we can still detect
1948 subroutines after we started replaying. */
492d29ea 1949 TRY
52834460
MM
1950 {
1951 struct frame_info *frame;
1952 struct frame_id frame_id;
1953 int upd_step_frame_id, upd_step_stack_frame_id;
1954
1955 /* The current frame without replaying - computed via normal unwind. */
ec71cc2f 1956 frame = get_thread_current_frame (tp);
52834460
MM
1957 frame_id = get_frame_id (frame);
1958
1959 /* Check if we need to update any stepping-related frame id's. */
1960 upd_step_frame_id = frame_id_eq (frame_id,
1961 tp->control.step_frame_id);
1962 upd_step_stack_frame_id = frame_id_eq (frame_id,
1963 tp->control.step_stack_frame_id);
1964
1965 /* We start replaying at the end of the branch trace. This corresponds
1966 to the current instruction. */
8d749320 1967 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
1968 btrace_insn_end (replay, btinfo);
1969
31fd9caa
MM
1970 /* Skip gaps at the end of the trace. */
1971 while (btrace_insn_get (replay) == NULL)
1972 {
1973 unsigned int steps;
1974
1975 steps = btrace_insn_prev (replay, 1);
1976 if (steps == 0)
1977 error (_("No trace."));
1978 }
1979
52834460
MM
1980 /* We're not replaying, yet. */
1981 gdb_assert (btinfo->replay == NULL);
1982 btinfo->replay = replay;
1983
1984 /* Make sure we're not using any stale registers. */
1985 registers_changed_ptid (tp->ptid);
1986
1987 /* The current frame with replaying - computed via btrace unwind. */
ec71cc2f 1988 frame = get_thread_current_frame (tp);
52834460
MM
1989 frame_id = get_frame_id (frame);
1990
1991 /* Replace stepping related frames where necessary. */
1992 if (upd_step_frame_id)
1993 tp->control.step_frame_id = frame_id;
1994 if (upd_step_stack_frame_id)
1995 tp->control.step_stack_frame_id = frame_id;
1996 }
492d29ea 1997 CATCH (except, RETURN_MASK_ALL)
52834460
MM
1998 {
1999 xfree (btinfo->replay);
2000 btinfo->replay = NULL;
2001
2002 registers_changed_ptid (tp->ptid);
2003
2004 throw_exception (except);
2005 }
492d29ea 2006 END_CATCH
52834460
MM
2007
2008 return replay;
2009}
2010
2011/* Stop replaying a thread. */
2012
2013static void
2014record_btrace_stop_replaying (struct thread_info *tp)
2015{
2016 struct btrace_thread_info *btinfo;
2017
2018 btinfo = &tp->btrace;
2019
2020 xfree (btinfo->replay);
2021 btinfo->replay = NULL;
2022
2023 /* Make sure we're not leaving any stale registers. */
2024 registers_changed_ptid (tp->ptid);
2025}
2026
e3cfc1c7
MM
2027/* Stop replaying TP if it is at the end of its execution history. */
2028
2029static void
2030record_btrace_stop_replaying_at_end (struct thread_info *tp)
2031{
2032 struct btrace_insn_iterator *replay, end;
2033 struct btrace_thread_info *btinfo;
2034
2035 btinfo = &tp->btrace;
2036 replay = btinfo->replay;
2037
2038 if (replay == NULL)
2039 return;
2040
2041 btrace_insn_end (&end, btinfo);
2042
2043 if (btrace_insn_cmp (replay, &end) == 0)
2044 record_btrace_stop_replaying (tp);
2045}
2046
b2f4cfde
MM
2047/* The to_resume method of target record-btrace. */
2048
2049static void
2050record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2051 enum gdb_signal signal)
2052{
0ca912df 2053 struct thread_info *tp;
d2939ba2 2054 enum btrace_thread_flag flag, cflag;
52834460 2055
987e68b1
MM
2056 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2057 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2058 step ? "step" : "cont");
52834460 2059
0ca912df
MM
2060 /* Store the execution direction of the last resume.
2061
2062 If there is more than one to_resume call, we have to rely on infrun
2063 to not change the execution direction in-between. */
70ad5bff
MM
2064 record_btrace_resume_exec_dir = execution_direction;
2065
0ca912df 2066 /* As long as we're not replaying, just forward the request.
52834460 2067
0ca912df
MM
2068 For non-stop targets this means that no thread is replaying. In order to
2069 make progress, we may need to explicitly move replaying threads to the end
2070 of their execution history. */
a52eab48
MM
2071 if ((execution_direction != EXEC_REVERSE)
2072 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2073 {
e75fdfca 2074 ops = ops->beneath;
04c4fe8c
MM
2075 ops->to_resume (ops, ptid, step, signal);
2076 return;
b2f4cfde
MM
2077 }
2078
52834460 2079 /* Compute the btrace thread flag for the requested move. */
d2939ba2
MM
2080 if (execution_direction == EXEC_REVERSE)
2081 {
2082 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2083 cflag = BTHR_RCONT;
2084 }
52834460 2085 else
d2939ba2
MM
2086 {
2087 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2088 cflag = BTHR_CONT;
2089 }
52834460 2090
52834460 2091 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2092 record_btrace_wait below.
2093
2094 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2095 if (!target_is_non_stop_p ())
2096 {
2097 gdb_assert (ptid_match (inferior_ptid, ptid));
2098
2099 ALL_NON_EXITED_THREADS (tp)
2100 if (ptid_match (tp->ptid, ptid))
2101 {
2102 if (ptid_match (tp->ptid, inferior_ptid))
2103 record_btrace_resume_thread (tp, flag);
2104 else
2105 record_btrace_resume_thread (tp, cflag);
2106 }
2107 }
2108 else
2109 {
2110 ALL_NON_EXITED_THREADS (tp)
2111 if (ptid_match (tp->ptid, ptid))
2112 record_btrace_resume_thread (tp, flag);
2113 }
70ad5bff
MM
2114
2115 /* Async support. */
2116 if (target_can_async_p ())
2117 {
6a3753b3 2118 target_async (1);
70ad5bff
MM
2119 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2120 }
52834460
MM
2121}
2122
987e68b1
MM
2123/* Cancel resuming TP. */
2124
2125static void
2126record_btrace_cancel_resume (struct thread_info *tp)
2127{
2128 enum btrace_thread_flag flags;
2129
2130 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2131 if (flags == 0)
2132 return;
2133
2134 DEBUG ("cancel resume thread %d (%s): %x (%s)", tp->num,
2135 target_pid_to_str (tp->ptid), flags,
2136 btrace_thread_flag_to_str (flags));
2137
2138 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2139 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2140}
2141
2142/* Return a target_waitstatus indicating that we ran out of history. */
2143
2144static struct target_waitstatus
2145btrace_step_no_history (void)
2146{
2147 struct target_waitstatus status;
2148
2149 status.kind = TARGET_WAITKIND_NO_HISTORY;
2150
2151 return status;
2152}
2153
2154/* Return a target_waitstatus indicating that a step finished. */
2155
2156static struct target_waitstatus
2157btrace_step_stopped (void)
2158{
2159 struct target_waitstatus status;
2160
2161 status.kind = TARGET_WAITKIND_STOPPED;
2162 status.value.sig = GDB_SIGNAL_TRAP;
2163
2164 return status;
2165}
2166
6e4879f0
MM
2167/* Return a target_waitstatus indicating that a thread was stopped as
2168 requested. */
2169
2170static struct target_waitstatus
2171btrace_step_stopped_on_request (void)
2172{
2173 struct target_waitstatus status;
2174
2175 status.kind = TARGET_WAITKIND_STOPPED;
2176 status.value.sig = GDB_SIGNAL_0;
2177
2178 return status;
2179}
2180
d825d248
MM
2181/* Return a target_waitstatus indicating a spurious stop. */
2182
2183static struct target_waitstatus
2184btrace_step_spurious (void)
2185{
2186 struct target_waitstatus status;
2187
2188 status.kind = TARGET_WAITKIND_SPURIOUS;
2189
2190 return status;
2191}
2192
e3cfc1c7
MM
2193/* Return a target_waitstatus indicating that the thread was not resumed. */
2194
2195static struct target_waitstatus
2196btrace_step_no_resumed (void)
2197{
2198 struct target_waitstatus status;
2199
2200 status.kind = TARGET_WAITKIND_NO_RESUMED;
2201
2202 return status;
2203}
2204
2205/* Return a target_waitstatus indicating that we should wait again. */
2206
2207static struct target_waitstatus
2208btrace_step_again (void)
2209{
2210 struct target_waitstatus status;
2211
2212 status.kind = TARGET_WAITKIND_IGNORE;
2213
2214 return status;
2215}
2216
52834460
MM
2217/* Clear the record histories. */
2218
2219static void
2220record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2221{
2222 xfree (btinfo->insn_history);
2223 xfree (btinfo->call_history);
2224
2225 btinfo->insn_history = NULL;
2226 btinfo->call_history = NULL;
2227}
2228
3c615f99
MM
2229/* Check whether TP's current replay position is at a breakpoint. */
2230
2231static int
2232record_btrace_replay_at_breakpoint (struct thread_info *tp)
2233{
2234 struct btrace_insn_iterator *replay;
2235 struct btrace_thread_info *btinfo;
2236 const struct btrace_insn *insn;
2237 struct inferior *inf;
2238
2239 btinfo = &tp->btrace;
2240 replay = btinfo->replay;
2241
2242 if (replay == NULL)
2243 return 0;
2244
2245 insn = btrace_insn_get (replay);
2246 if (insn == NULL)
2247 return 0;
2248
2249 inf = find_inferior_ptid (tp->ptid);
2250 if (inf == NULL)
2251 return 0;
2252
2253 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2254 &btinfo->stop_reason);
2255}
2256
d825d248 2257/* Step one instruction in forward direction. */
52834460
MM
2258
2259static struct target_waitstatus
d825d248 2260record_btrace_single_step_forward (struct thread_info *tp)
52834460
MM
2261{
2262 struct btrace_insn_iterator *replay, end;
2263 struct btrace_thread_info *btinfo;
52834460 2264
d825d248
MM
2265 btinfo = &tp->btrace;
2266 replay = btinfo->replay;
2267
2268 /* We're done if we're not replaying. */
2269 if (replay == NULL)
2270 return btrace_step_no_history ();
2271
011c71b6
MM
2272 /* Check if we're stepping a breakpoint. */
2273 if (record_btrace_replay_at_breakpoint (tp))
2274 return btrace_step_stopped ();
2275
d825d248
MM
2276 /* Skip gaps during replay. */
2277 do
2278 {
2279 unsigned int steps;
2280
e3cfc1c7
MM
2281 /* We will bail out here if we continue stepping after reaching the end
2282 of the execution history. */
d825d248
MM
2283 steps = btrace_insn_next (replay, 1);
2284 if (steps == 0)
e3cfc1c7 2285 return btrace_step_no_history ();
d825d248
MM
2286 }
2287 while (btrace_insn_get (replay) == NULL);
2288
2289 /* Determine the end of the instruction trace. */
2290 btrace_insn_end (&end, btinfo);
2291
e3cfc1c7
MM
2292 /* The execution trace contains (and ends with) the current instruction.
2293 This instruction has not been executed, yet, so the trace really ends
2294 one instruction earlier. */
d825d248 2295 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2296 return btrace_step_no_history ();
d825d248
MM
2297
2298 return btrace_step_spurious ();
2299}
2300
2301/* Step one instruction in backward direction. */
2302
2303static struct target_waitstatus
2304record_btrace_single_step_backward (struct thread_info *tp)
2305{
2306 struct btrace_insn_iterator *replay;
2307 struct btrace_thread_info *btinfo;
e59fa00f 2308
52834460
MM
2309 btinfo = &tp->btrace;
2310 replay = btinfo->replay;
2311
d825d248
MM
2312 /* Start replaying if we're not already doing so. */
2313 if (replay == NULL)
2314 replay = record_btrace_start_replaying (tp);
2315
2316 /* If we can't step any further, we reached the end of the history.
2317 Skip gaps during replay. */
2318 do
2319 {
2320 unsigned int steps;
2321
2322 steps = btrace_insn_prev (replay, 1);
2323 if (steps == 0)
2324 return btrace_step_no_history ();
2325 }
2326 while (btrace_insn_get (replay) == NULL);
2327
011c71b6
MM
2328 /* Check if we're stepping a breakpoint.
2329
2330 For reverse-stepping, this check is after the step. There is logic in
2331 infrun.c that handles reverse-stepping separately. See, for example,
2332 proceed and adjust_pc_after_break.
2333
2334 This code assumes that for reverse-stepping, PC points to the last
2335 de-executed instruction, whereas for forward-stepping PC points to the
2336 next to-be-executed instruction. */
2337 if (record_btrace_replay_at_breakpoint (tp))
2338 return btrace_step_stopped ();
2339
d825d248
MM
2340 return btrace_step_spurious ();
2341}
2342
2343/* Step a single thread. */
2344
2345static struct target_waitstatus
2346record_btrace_step_thread (struct thread_info *tp)
2347{
2348 struct btrace_thread_info *btinfo;
2349 struct target_waitstatus status;
2350 enum btrace_thread_flag flags;
2351
2352 btinfo = &tp->btrace;
2353
6e4879f0
MM
2354 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2355 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2356
987e68b1
MM
2357 DEBUG ("stepping thread %d (%s): %x (%s)", tp->num,
2358 target_pid_to_str (tp->ptid), flags,
2359 btrace_thread_flag_to_str (flags));
52834460 2360
6e4879f0
MM
2361 /* We can't step without an execution history. */
2362 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2363 return btrace_step_no_history ();
2364
52834460
MM
2365 switch (flags)
2366 {
2367 default:
2368 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2369
6e4879f0
MM
2370 case BTHR_STOP:
2371 return btrace_step_stopped_on_request ();
2372
52834460 2373 case BTHR_STEP:
d825d248
MM
2374 status = record_btrace_single_step_forward (tp);
2375 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2376 break;
52834460
MM
2377
2378 return btrace_step_stopped ();
2379
2380 case BTHR_RSTEP:
d825d248
MM
2381 status = record_btrace_single_step_backward (tp);
2382 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2383 break;
52834460
MM
2384
2385 return btrace_step_stopped ();
2386
2387 case BTHR_CONT:
e3cfc1c7
MM
2388 status = record_btrace_single_step_forward (tp);
2389 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2390 break;
52834460 2391
e3cfc1c7
MM
2392 btinfo->flags |= flags;
2393 return btrace_step_again ();
52834460
MM
2394
2395 case BTHR_RCONT:
e3cfc1c7
MM
2396 status = record_btrace_single_step_backward (tp);
2397 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2398 break;
52834460 2399
e3cfc1c7
MM
2400 btinfo->flags |= flags;
2401 return btrace_step_again ();
2402 }
d825d248 2403
e3cfc1c7
MM
2404 /* We keep threads moving at the end of their execution history. The to_wait
2405 method will stop the thread for whom the event is reported. */
2406 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2407 btinfo->flags |= flags;
52834460 2408
e3cfc1c7 2409 return status;
b2f4cfde
MM
2410}
2411
e3cfc1c7
MM
2412/* A vector of threads. */
2413
2414typedef struct thread_info * tp_t;
2415DEF_VEC_P (tp_t);
2416
a6b5be76
MM
2417/* Announce further events if necessary. */
2418
2419static void
2420record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2421 const VEC (tp_t) *no_history)
2422{
2423 int more_moving, more_no_history;
2424
2425 more_moving = !VEC_empty (tp_t, moving);
2426 more_no_history = !VEC_empty (tp_t, no_history);
2427
2428 if (!more_moving && !more_no_history)
2429 return;
2430
2431 if (more_moving)
2432 DEBUG ("movers pending");
2433
2434 if (more_no_history)
2435 DEBUG ("no-history pending");
2436
2437 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2438}
2439
b2f4cfde
MM
2440/* The to_wait method of target record-btrace. */
2441
2442static ptid_t
2443record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2444 struct target_waitstatus *status, int options)
2445{
e3cfc1c7
MM
2446 VEC (tp_t) *moving, *no_history;
2447 struct thread_info *tp, *eventing;
2448 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
52834460
MM
2449
2450 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2451
b2f4cfde 2452 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2453 if ((execution_direction != EXEC_REVERSE)
2454 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2455 {
e75fdfca
TT
2456 ops = ops->beneath;
2457 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
2458 }
2459
e3cfc1c7
MM
2460 moving = NULL;
2461 no_history = NULL;
2462
2463 make_cleanup (VEC_cleanup (tp_t), &moving);
2464 make_cleanup (VEC_cleanup (tp_t), &no_history);
2465
2466 /* Keep a work list of moving threads. */
2467 ALL_NON_EXITED_THREADS (tp)
2468 if (ptid_match (tp->ptid, ptid)
2469 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2470 VEC_safe_push (tp_t, moving, tp);
2471
2472 if (VEC_empty (tp_t, moving))
52834460 2473 {
e3cfc1c7 2474 *status = btrace_step_no_resumed ();
52834460 2475
e3cfc1c7
MM
2476 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2477 target_waitstatus_to_string (status));
2478
2479 do_cleanups (cleanups);
2480 return null_ptid;
52834460
MM
2481 }
2482
e3cfc1c7
MM
2483 /* Step moving threads one by one, one step each, until either one thread
2484 reports an event or we run out of threads to step.
2485
2486 When stepping more than one thread, chances are that some threads reach
2487 the end of their execution history earlier than others. If we reported
2488 this immediately, all-stop on top of non-stop would stop all threads and
2489 resume the same threads next time. And we would report the same thread
2490 having reached the end of its execution history again.
2491
2492 In the worst case, this would starve the other threads. But even if other
2493 threads would be allowed to make progress, this would result in far too
2494 many intermediate stops.
2495
2496 We therefore delay the reporting of "no execution history" until we have
2497 nothing else to report. By this time, all threads should have moved to
2498 either the beginning or the end of their execution history. There will
2499 be a single user-visible stop. */
2500 eventing = NULL;
2501 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2502 {
2503 unsigned int ix;
2504
2505 ix = 0;
2506 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2507 {
2508 *status = record_btrace_step_thread (tp);
2509
2510 switch (status->kind)
2511 {
2512 case TARGET_WAITKIND_IGNORE:
2513 ix++;
2514 break;
2515
2516 case TARGET_WAITKIND_NO_HISTORY:
2517 VEC_safe_push (tp_t, no_history,
2518 VEC_ordered_remove (tp_t, moving, ix));
2519 break;
2520
2521 default:
2522 eventing = VEC_unordered_remove (tp_t, moving, ix);
2523 break;
2524 }
2525 }
2526 }
2527
2528 if (eventing == NULL)
2529 {
2530 /* We started with at least one moving thread. This thread must have
2531 either stopped or reached the end of its execution history.
2532
2533 In the former case, EVENTING must not be NULL.
2534 In the latter case, NO_HISTORY must not be empty. */
2535 gdb_assert (!VEC_empty (tp_t, no_history));
2536
2537 /* We kept threads moving at the end of their execution history. Stop
2538 EVENTING now that we are going to report its stop. */
2539 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2540 eventing->btrace.flags &= ~BTHR_MOVE;
2541
2542 *status = btrace_step_no_history ();
2543 }
2544
2545 gdb_assert (eventing != NULL);
2546
2547 /* We kept threads replaying at the end of their execution history. Stop
2548 replaying EVENTING now that we are going to report its stop. */
2549 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2550
2551 /* Stop all other threads. */
5953356c 2552 if (!target_is_non_stop_p ())
e3cfc1c7
MM
2553 ALL_NON_EXITED_THREADS (tp)
2554 record_btrace_cancel_resume (tp);
52834460 2555
a6b5be76
MM
2556 /* In async mode, we need to announce further events. */
2557 if (target_is_async_p ())
2558 record_btrace_maybe_mark_async_event (moving, no_history);
2559
52834460 2560 /* Start record histories anew from the current position. */
e3cfc1c7 2561 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2562
2563 /* We moved the replay position but did not update registers. */
e3cfc1c7
MM
2564 registers_changed_ptid (eventing->ptid);
2565
2566 DEBUG ("wait ended by thread %d (%s): %s", eventing->num,
2567 target_pid_to_str (eventing->ptid),
2568 target_waitstatus_to_string (status));
52834460 2569
e3cfc1c7
MM
2570 do_cleanups (cleanups);
2571 return eventing->ptid;
52834460
MM
2572}
2573
6e4879f0
MM
2574/* The to_stop method of target record-btrace. */
2575
2576static void
2577record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2578{
2579 DEBUG ("stop %s", target_pid_to_str (ptid));
2580
2581 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2582 if ((execution_direction != EXEC_REVERSE)
2583 && !record_btrace_is_replaying (ops, minus_one_ptid))
6e4879f0
MM
2584 {
2585 ops = ops->beneath;
2586 ops->to_stop (ops, ptid);
2587 }
2588 else
2589 {
2590 struct thread_info *tp;
2591
2592 ALL_NON_EXITED_THREADS (tp)
2593 if (ptid_match (tp->ptid, ptid))
2594 {
2595 tp->btrace.flags &= ~BTHR_MOVE;
2596 tp->btrace.flags |= BTHR_STOP;
2597 }
2598 }
2599 }
2600
52834460
MM
2601/* The to_can_execute_reverse method of target record-btrace. */
2602
2603static int
19db3e69 2604record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
2605{
2606 return 1;
2607}
2608
9e8915c6 2609/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2610
9e8915c6
PA
2611static int
2612record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
52834460 2613{
a52eab48 2614 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2615 {
2616 struct thread_info *tp = inferior_thread ();
2617
2618 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2619 }
2620
2621 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2622}
2623
2624/* The to_supports_stopped_by_sw_breakpoint method of target
2625 record-btrace. */
2626
2627static int
2628record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2629{
a52eab48 2630 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2631 return 1;
2632
2633 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2634}
2635
2636/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2637
2638static int
2639record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2640{
a52eab48 2641 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2642 {
2643 struct thread_info *tp = inferior_thread ();
2644
2645 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2646 }
2647
2648 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2649}
2650
2651/* The to_supports_stopped_by_hw_breakpoint method of target
2652 record-btrace. */
2653
2654static int
2655record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2656{
a52eab48 2657 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6 2658 return 1;
52834460 2659
9e8915c6 2660 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
b2f4cfde
MM
2661}
2662
e8032dde 2663/* The to_update_thread_list method of target record-btrace. */
e2887aa3
MM
2664
2665static void
e8032dde 2666record_btrace_update_thread_list (struct target_ops *ops)
e2887aa3 2667{
e8032dde 2668 /* We don't add or remove threads during replay. */
a52eab48 2669 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2670 return;
2671
2672 /* Forward the request. */
e75fdfca 2673 ops = ops->beneath;
e8032dde 2674 ops->to_update_thread_list (ops);
e2887aa3
MM
2675}
2676
2677/* The to_thread_alive method of target record-btrace. */
2678
2679static int
2680record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2681{
2682 /* We don't add or remove threads during replay. */
a52eab48 2683 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2684 return find_thread_ptid (ptid) != NULL;
2685
2686 /* Forward the request. */
e75fdfca
TT
2687 ops = ops->beneath;
2688 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
2689}
2690
066ce621
MM
2691/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2692 is stopped. */
2693
2694static void
2695record_btrace_set_replay (struct thread_info *tp,
2696 const struct btrace_insn_iterator *it)
2697{
2698 struct btrace_thread_info *btinfo;
2699
2700 btinfo = &tp->btrace;
2701
2702 if (it == NULL || it->function == NULL)
52834460 2703 record_btrace_stop_replaying (tp);
066ce621
MM
2704 else
2705 {
2706 if (btinfo->replay == NULL)
52834460 2707 record_btrace_start_replaying (tp);
066ce621
MM
2708 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2709 return;
2710
2711 *btinfo->replay = *it;
52834460 2712 registers_changed_ptid (tp->ptid);
066ce621
MM
2713 }
2714
52834460
MM
2715 /* Start anew from the new replay position. */
2716 record_btrace_clear_histories (btinfo);
485668e5
MM
2717
2718 stop_pc = regcache_read_pc (get_current_regcache ());
2719 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2720}
2721
2722/* The to_goto_record_begin method of target record-btrace. */
2723
2724static void
08475817 2725record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
2726{
2727 struct thread_info *tp;
2728 struct btrace_insn_iterator begin;
2729
2730 tp = require_btrace_thread ();
2731
2732 btrace_insn_begin (&begin, &tp->btrace);
2733 record_btrace_set_replay (tp, &begin);
066ce621
MM
2734}
2735
2736/* The to_goto_record_end method of target record-btrace. */
2737
2738static void
307a1b91 2739record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
2740{
2741 struct thread_info *tp;
2742
2743 tp = require_btrace_thread ();
2744
2745 record_btrace_set_replay (tp, NULL);
066ce621
MM
2746}
2747
2748/* The to_goto_record method of target record-btrace. */
2749
2750static void
606183ac 2751record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
2752{
2753 struct thread_info *tp;
2754 struct btrace_insn_iterator it;
2755 unsigned int number;
2756 int found;
2757
2758 number = insn;
2759
2760 /* Check for wrap-arounds. */
2761 if (number != insn)
2762 error (_("Instruction number out of range."));
2763
2764 tp = require_btrace_thread ();
2765
2766 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2767 if (found == 0)
2768 error (_("No such instruction."));
2769
2770 record_btrace_set_replay (tp, &it);
066ce621
MM
2771}
2772
797094dd
MM
2773/* The to_record_stop_replaying method of target record-btrace. */
2774
2775static void
2776record_btrace_stop_replaying_all (struct target_ops *self)
2777{
2778 struct thread_info *tp;
2779
2780 ALL_NON_EXITED_THREADS (tp)
2781 record_btrace_stop_replaying (tp);
2782}
2783
70ad5bff
MM
2784/* The to_execution_direction target method. */
2785
2786static enum exec_direction_kind
2787record_btrace_execution_direction (struct target_ops *self)
2788{
2789 return record_btrace_resume_exec_dir;
2790}
2791
aef92902
MM
2792/* The to_prepare_to_generate_core target method. */
2793
2794static void
2795record_btrace_prepare_to_generate_core (struct target_ops *self)
2796{
2797 record_btrace_generating_corefile = 1;
2798}
2799
2800/* The to_done_generating_core target method. */
2801
2802static void
2803record_btrace_done_generating_core (struct target_ops *self)
2804{
2805 record_btrace_generating_corefile = 0;
2806}
2807
afedecd3
MM
2808/* Initialize the record-btrace target ops. */
2809
2810static void
2811init_record_btrace_ops (void)
2812{
2813 struct target_ops *ops;
2814
2815 ops = &record_btrace_ops;
2816 ops->to_shortname = "record-btrace";
2817 ops->to_longname = "Branch tracing target";
2818 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2819 ops->to_open = record_btrace_open;
2820 ops->to_close = record_btrace_close;
b7d2e916 2821 ops->to_async = record_btrace_async;
afedecd3
MM
2822 ops->to_detach = record_detach;
2823 ops->to_disconnect = record_disconnect;
2824 ops->to_mourn_inferior = record_mourn_inferior;
2825 ops->to_kill = record_kill;
afedecd3
MM
2826 ops->to_stop_recording = record_btrace_stop_recording;
2827 ops->to_info_record = record_btrace_info;
2828 ops->to_insn_history = record_btrace_insn_history;
2829 ops->to_insn_history_from = record_btrace_insn_history_from;
2830 ops->to_insn_history_range = record_btrace_insn_history_range;
2831 ops->to_call_history = record_btrace_call_history;
2832 ops->to_call_history_from = record_btrace_call_history_from;
2833 ops->to_call_history_range = record_btrace_call_history_range;
07bbe694 2834 ops->to_record_is_replaying = record_btrace_is_replaying;
7ff27e9b 2835 ops->to_record_will_replay = record_btrace_will_replay;
797094dd 2836 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
633785ff
MM
2837 ops->to_xfer_partial = record_btrace_xfer_partial;
2838 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2839 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
2840 ops->to_fetch_registers = record_btrace_fetch_registers;
2841 ops->to_store_registers = record_btrace_store_registers;
2842 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
2843 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2844 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde
MM
2845 ops->to_resume = record_btrace_resume;
2846 ops->to_wait = record_btrace_wait;
6e4879f0 2847 ops->to_stop = record_btrace_stop;
e8032dde 2848 ops->to_update_thread_list = record_btrace_update_thread_list;
e2887aa3 2849 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
2850 ops->to_goto_record_begin = record_btrace_goto_begin;
2851 ops->to_goto_record_end = record_btrace_goto_end;
2852 ops->to_goto_record = record_btrace_goto;
52834460 2853 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
9e8915c6
PA
2854 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2855 ops->to_supports_stopped_by_sw_breakpoint
2856 = record_btrace_supports_stopped_by_sw_breakpoint;
2857 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2858 ops->to_supports_stopped_by_hw_breakpoint
2859 = record_btrace_supports_stopped_by_hw_breakpoint;
70ad5bff 2860 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
2861 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2862 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
2863 ops->to_stratum = record_stratum;
2864 ops->to_magic = OPS_MAGIC;
2865}
2866
f4abbc16
MM
2867/* Start recording in BTS format. */
2868
2869static void
2870cmd_record_btrace_bts_start (char *args, int from_tty)
2871{
f4abbc16
MM
2872 if (args != NULL && *args != 0)
2873 error (_("Invalid argument."));
2874
2875 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2876
492d29ea
PA
2877 TRY
2878 {
2879 execute_command ("target record-btrace", from_tty);
2880 }
2881 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2882 {
2883 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2884 throw_exception (exception);
2885 }
492d29ea 2886 END_CATCH
f4abbc16
MM
2887}
2888
bc504a31 2889/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2890
2891static void
b20a6524 2892cmd_record_btrace_pt_start (char *args, int from_tty)
afedecd3
MM
2893{
2894 if (args != NULL && *args != 0)
2895 error (_("Invalid argument."));
2896
b20a6524 2897 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2898
492d29ea
PA
2899 TRY
2900 {
2901 execute_command ("target record-btrace", from_tty);
2902 }
2903 CATCH (exception, RETURN_MASK_ALL)
2904 {
2905 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2906 throw_exception (exception);
2907 }
2908 END_CATCH
afedecd3
MM
2909}
2910
b20a6524
MM
2911/* Alias for "target record". */
2912
2913static void
2914cmd_record_btrace_start (char *args, int from_tty)
2915{
2916 if (args != NULL && *args != 0)
2917 error (_("Invalid argument."));
2918
2919 record_btrace_conf.format = BTRACE_FORMAT_PT;
2920
2921 TRY
2922 {
2923 execute_command ("target record-btrace", from_tty);
2924 }
2925 CATCH (exception, RETURN_MASK_ALL)
2926 {
2927 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2928
2929 TRY
2930 {
2931 execute_command ("target record-btrace", from_tty);
2932 }
2933 CATCH (exception, RETURN_MASK_ALL)
2934 {
2935 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2936 throw_exception (exception);
2937 }
2938 END_CATCH
2939 }
2940 END_CATCH
2941}
2942
67b5c0c1
MM
2943/* The "set record btrace" command. */
2944
2945static void
2946cmd_set_record_btrace (char *args, int from_tty)
2947{
2948 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2949}
2950
2951/* The "show record btrace" command. */
2952
2953static void
2954cmd_show_record_btrace (char *args, int from_tty)
2955{
2956 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2957}
2958
2959/* The "show record btrace replay-memory-access" command. */
2960
2961static void
2962cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2963 struct cmd_list_element *c, const char *value)
2964{
2965 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2966 replay_memory_access);
2967}
2968
d33501a5
MM
2969/* The "set record btrace bts" command. */
2970
2971static void
2972cmd_set_record_btrace_bts (char *args, int from_tty)
2973{
2974 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 2975 "by an appropriate subcommand.\n"));
d33501a5
MM
2976 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2977 all_commands, gdb_stdout);
2978}
2979
2980/* The "show record btrace bts" command. */
2981
2982static void
2983cmd_show_record_btrace_bts (char *args, int from_tty)
2984{
2985 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2986}
2987
b20a6524
MM
2988/* The "set record btrace pt" command. */
2989
2990static void
2991cmd_set_record_btrace_pt (char *args, int from_tty)
2992{
2993 printf_unfiltered (_("\"set record btrace pt\" must be followed "
2994 "by an appropriate subcommand.\n"));
2995 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
2996 all_commands, gdb_stdout);
2997}
2998
2999/* The "show record btrace pt" command. */
3000
3001static void
3002cmd_show_record_btrace_pt (char *args, int from_tty)
3003{
3004 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3005}
3006
3007/* The "record bts buffer-size" show value function. */
3008
3009static void
3010show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3011 struct cmd_list_element *c,
3012 const char *value)
3013{
3014 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3015 value);
3016}
3017
3018/* The "record pt buffer-size" show value function. */
3019
3020static void
3021show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3022 struct cmd_list_element *c,
3023 const char *value)
3024{
3025 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3026 value);
3027}
3028
afedecd3
MM
3029void _initialize_record_btrace (void);
3030
3031/* Initialize btrace commands. */
3032
3033void
3034_initialize_record_btrace (void)
3035{
f4abbc16
MM
3036 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3037 _("Start branch trace recording."), &record_btrace_cmdlist,
3038 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3039 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3040
f4abbc16
MM
3041 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3042 _("\
3043Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3044The processor stores a from/to record for each branch into a cyclic buffer.\n\
3045This format may not be available on all processors."),
3046 &record_btrace_cmdlist);
3047 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3048
b20a6524
MM
3049 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3050 _("\
bc504a31 3051Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3052This format may not be available on all processors."),
3053 &record_btrace_cmdlist);
3054 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3055
67b5c0c1
MM
3056 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3057 _("Set record options"), &set_record_btrace_cmdlist,
3058 "set record btrace ", 0, &set_record_cmdlist);
3059
3060 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3061 _("Show record options"), &show_record_btrace_cmdlist,
3062 "show record btrace ", 0, &show_record_cmdlist);
3063
3064 add_setshow_enum_cmd ("replay-memory-access", no_class,
3065 replay_memory_access_types, &replay_memory_access, _("\
3066Set what memory accesses are allowed during replay."), _("\
3067Show what memory accesses are allowed during replay."),
3068 _("Default is READ-ONLY.\n\n\
3069The btrace record target does not trace data.\n\
3070The memory therefore corresponds to the live target and not \
3071to the current replay position.\n\n\
3072When READ-ONLY, allow accesses to read-only memory during replay.\n\
3073When READ-WRITE, allow accesses to read-only and read-write memory during \
3074replay."),
3075 NULL, cmd_show_replay_memory_access,
3076 &set_record_btrace_cmdlist,
3077 &show_record_btrace_cmdlist);
3078
d33501a5
MM
3079 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3080 _("Set record btrace bts options"),
3081 &set_record_btrace_bts_cmdlist,
3082 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3083
3084 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3085 _("Show record btrace bts options"),
3086 &show_record_btrace_bts_cmdlist,
3087 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3088
3089 add_setshow_uinteger_cmd ("buffer-size", no_class,
3090 &record_btrace_conf.bts.size,
3091 _("Set the record/replay bts buffer size."),
3092 _("Show the record/replay bts buffer size."), _("\
3093When starting recording request a trace buffer of this size. \
3094The actual buffer size may differ from the requested size. \
3095Use \"info record\" to see the actual buffer size.\n\n\
3096Bigger buffers allow longer recording but also take more time to process \
3097the recorded execution trace.\n\n\
b20a6524
MM
3098The trace buffer size may not be changed while recording."), NULL,
3099 show_record_bts_buffer_size_value,
d33501a5
MM
3100 &set_record_btrace_bts_cmdlist,
3101 &show_record_btrace_bts_cmdlist);
3102
b20a6524
MM
3103 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3104 _("Set record btrace pt options"),
3105 &set_record_btrace_pt_cmdlist,
3106 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3107
3108 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3109 _("Show record btrace pt options"),
3110 &show_record_btrace_pt_cmdlist,
3111 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3112
3113 add_setshow_uinteger_cmd ("buffer-size", no_class,
3114 &record_btrace_conf.pt.size,
3115 _("Set the record/replay pt buffer size."),
3116 _("Show the record/replay pt buffer size."), _("\
3117Bigger buffers allow longer recording but also take more time to process \
3118the recorded execution.\n\
3119The actual buffer size may differ from the requested size. Use \"info record\" \
3120to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3121 &set_record_btrace_pt_cmdlist,
3122 &show_record_btrace_pt_cmdlist);
3123
afedecd3
MM
3124 init_record_btrace_ops ();
3125 add_target (&record_btrace_ops);
0b722aec
MM
3126
3127 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3128 xcalloc, xfree);
d33501a5
MM
3129
3130 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3131 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3132}
This page took 0.48178 seconds and 4 git commands to generate.