infrun: scheduler-locking replay
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
32d0add0 3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
24#include "gdbthread.h"
25#include "target.h"
26#include "gdbcmd.h"
27#include "disasm.h"
28#include "observer.h"
afedecd3
MM
29#include "cli/cli-utils.h"
30#include "source.h"
31#include "ui-out.h"
32#include "symtab.h"
33#include "filenames.h"
1f3ef581 34#include "regcache.h"
cecac1ab 35#include "frame-unwind.h"
0b722aec 36#include "hashtab.h"
45741a9c 37#include "infrun.h"
70ad5bff
MM
38#include "event-loop.h"
39#include "inf-loop.h"
e3cfc1c7 40#include "vec.h"
afedecd3
MM
41
42/* The target_ops of record-btrace. */
43static struct target_ops record_btrace_ops;
44
45/* A new thread observer enabling branch tracing for the new thread. */
46static struct observer *record_btrace_thread_observer;
47
67b5c0c1
MM
48/* Memory access types used in set/show record btrace replay-memory-access. */
49static const char replay_memory_access_read_only[] = "read-only";
50static const char replay_memory_access_read_write[] = "read-write";
51static const char *const replay_memory_access_types[] =
52{
53 replay_memory_access_read_only,
54 replay_memory_access_read_write,
55 NULL
56};
57
58/* The currently allowed replay memory access type. */
59static const char *replay_memory_access = replay_memory_access_read_only;
60
61/* Command lists for "set/show record btrace". */
62static struct cmd_list_element *set_record_btrace_cmdlist;
63static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 64
70ad5bff
MM
65/* The execution direction of the last resume we got. See record-full.c. */
66static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
67
68/* The async event handler for reverse/replay execution. */
69static struct async_event_handler *record_btrace_async_inferior_event_handler;
70
aef92902
MM
71/* A flag indicating that we are currently generating a core file. */
72static int record_btrace_generating_corefile;
73
f4abbc16
MM
74/* The current branch trace configuration. */
75static struct btrace_config record_btrace_conf;
76
77/* Command list for "record btrace". */
78static struct cmd_list_element *record_btrace_cmdlist;
79
d33501a5
MM
80/* Command lists for "set/show record btrace bts". */
81static struct cmd_list_element *set_record_btrace_bts_cmdlist;
82static struct cmd_list_element *show_record_btrace_bts_cmdlist;
83
b20a6524
MM
84/* Command lists for "set/show record btrace pt". */
85static struct cmd_list_element *set_record_btrace_pt_cmdlist;
86static struct cmd_list_element *show_record_btrace_pt_cmdlist;
87
afedecd3
MM
88/* Print a record-btrace debug message. Use do ... while (0) to avoid
89 ambiguities when used in if statements. */
90
91#define DEBUG(msg, args...) \
92 do \
93 { \
94 if (record_debug != 0) \
95 fprintf_unfiltered (gdb_stdlog, \
96 "[record-btrace] " msg "\n", ##args); \
97 } \
98 while (0)
99
100
101/* Update the branch trace for the current thread and return a pointer to its
066ce621 102 thread_info.
afedecd3
MM
103
104 Throws an error if there is no thread or no trace. This function never
105 returns NULL. */
106
066ce621
MM
107static struct thread_info *
108require_btrace_thread (void)
afedecd3
MM
109{
110 struct thread_info *tp;
afedecd3
MM
111
112 DEBUG ("require");
113
114 tp = find_thread_ptid (inferior_ptid);
115 if (tp == NULL)
116 error (_("No thread."));
117
118 btrace_fetch (tp);
119
6e07b1d2 120 if (btrace_is_empty (tp))
afedecd3
MM
121 error (_("No trace."));
122
066ce621
MM
123 return tp;
124}
125
126/* Update the branch trace for the current thread and return a pointer to its
127 branch trace information struct.
128
129 Throws an error if there is no thread or no trace. This function never
130 returns NULL. */
131
132static struct btrace_thread_info *
133require_btrace (void)
134{
135 struct thread_info *tp;
136
137 tp = require_btrace_thread ();
138
139 return &tp->btrace;
afedecd3
MM
140}
141
142/* Enable branch tracing for one thread. Warn on errors. */
143
144static void
145record_btrace_enable_warn (struct thread_info *tp)
146{
492d29ea
PA
147 TRY
148 {
149 btrace_enable (tp, &record_btrace_conf);
150 }
151 CATCH (error, RETURN_MASK_ERROR)
152 {
153 warning ("%s", error.message);
154 }
155 END_CATCH
afedecd3
MM
156}
157
158/* Callback function to disable branch tracing for one thread. */
159
160static void
161record_btrace_disable_callback (void *arg)
162{
163 struct thread_info *tp;
164
165 tp = arg;
166
167 btrace_disable (tp);
168}
169
170/* Enable automatic tracing of new threads. */
171
172static void
173record_btrace_auto_enable (void)
174{
175 DEBUG ("attach thread observer");
176
177 record_btrace_thread_observer
178 = observer_attach_new_thread (record_btrace_enable_warn);
179}
180
181/* Disable automatic tracing of new threads. */
182
183static void
184record_btrace_auto_disable (void)
185{
186 /* The observer may have been detached, already. */
187 if (record_btrace_thread_observer == NULL)
188 return;
189
190 DEBUG ("detach thread observer");
191
192 observer_detach_new_thread (record_btrace_thread_observer);
193 record_btrace_thread_observer = NULL;
194}
195
70ad5bff
MM
196/* The record-btrace async event handler function. */
197
198static void
199record_btrace_handle_async_inferior_event (gdb_client_data data)
200{
201 inferior_event_handler (INF_REG_EVENT, NULL);
202}
203
afedecd3
MM
204/* The to_open method of target record-btrace. */
205
206static void
014f9477 207record_btrace_open (const char *args, int from_tty)
afedecd3
MM
208{
209 struct cleanup *disable_chain;
210 struct thread_info *tp;
211
212 DEBUG ("open");
213
8213266a 214 record_preopen ();
afedecd3
MM
215
216 if (!target_has_execution)
217 error (_("The program is not being run."));
218
afedecd3
MM
219 gdb_assert (record_btrace_thread_observer == NULL);
220
221 disable_chain = make_cleanup (null_cleanup, NULL);
034f788c 222 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
223 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
224 {
f4abbc16 225 btrace_enable (tp, &record_btrace_conf);
afedecd3
MM
226
227 make_cleanup (record_btrace_disable_callback, tp);
228 }
229
230 record_btrace_auto_enable ();
231
232 push_target (&record_btrace_ops);
233
70ad5bff
MM
234 record_btrace_async_inferior_event_handler
235 = create_async_event_handler (record_btrace_handle_async_inferior_event,
236 NULL);
aef92902 237 record_btrace_generating_corefile = 0;
70ad5bff 238
afedecd3
MM
239 observer_notify_record_changed (current_inferior (), 1);
240
241 discard_cleanups (disable_chain);
242}
243
244/* The to_stop_recording method of target record-btrace. */
245
246static void
c6cd7c02 247record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
248{
249 struct thread_info *tp;
250
251 DEBUG ("stop recording");
252
253 record_btrace_auto_disable ();
254
034f788c 255 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
256 if (tp->btrace.target != NULL)
257 btrace_disable (tp);
258}
259
260/* The to_close method of target record-btrace. */
261
262static void
de90e03d 263record_btrace_close (struct target_ops *self)
afedecd3 264{
568e808b
MM
265 struct thread_info *tp;
266
70ad5bff
MM
267 if (record_btrace_async_inferior_event_handler != NULL)
268 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
269
99c819ee
MM
270 /* Make sure automatic recording gets disabled even if we did not stop
271 recording before closing the record-btrace target. */
272 record_btrace_auto_disable ();
273
568e808b
MM
274 /* We should have already stopped recording.
275 Tear down btrace in case we have not. */
034f788c 276 ALL_NON_EXITED_THREADS (tp)
568e808b 277 btrace_teardown (tp);
afedecd3
MM
278}
279
b7d2e916
PA
280/* The to_async method of target record-btrace. */
281
282static void
6a3753b3 283record_btrace_async (struct target_ops *ops, int enable)
b7d2e916 284{
6a3753b3 285 if (enable)
b7d2e916
PA
286 mark_async_event_handler (record_btrace_async_inferior_event_handler);
287 else
288 clear_async_event_handler (record_btrace_async_inferior_event_handler);
289
6a3753b3 290 ops->beneath->to_async (ops->beneath, enable);
b7d2e916
PA
291}
292
d33501a5
MM
293/* Adjusts the size and returns a human readable size suffix. */
294
295static const char *
296record_btrace_adjust_size (unsigned int *size)
297{
298 unsigned int sz;
299
300 sz = *size;
301
302 if ((sz & ((1u << 30) - 1)) == 0)
303 {
304 *size = sz >> 30;
305 return "GB";
306 }
307 else if ((sz & ((1u << 20) - 1)) == 0)
308 {
309 *size = sz >> 20;
310 return "MB";
311 }
312 else if ((sz & ((1u << 10) - 1)) == 0)
313 {
314 *size = sz >> 10;
315 return "kB";
316 }
317 else
318 return "";
319}
320
321/* Print a BTS configuration. */
322
323static void
324record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
325{
326 const char *suffix;
327 unsigned int size;
328
329 size = conf->size;
330 if (size > 0)
331 {
332 suffix = record_btrace_adjust_size (&size);
333 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
334 }
335}
336
b20a6524
MM
337/* Print an Intel(R) Processor Trace configuration. */
338
339static void
340record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
341{
342 const char *suffix;
343 unsigned int size;
344
345 size = conf->size;
346 if (size > 0)
347 {
348 suffix = record_btrace_adjust_size (&size);
349 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
350 }
351}
352
d33501a5
MM
353/* Print a branch tracing configuration. */
354
355static void
356record_btrace_print_conf (const struct btrace_config *conf)
357{
358 printf_unfiltered (_("Recording format: %s.\n"),
359 btrace_format_string (conf->format));
360
361 switch (conf->format)
362 {
363 case BTRACE_FORMAT_NONE:
364 return;
365
366 case BTRACE_FORMAT_BTS:
367 record_btrace_print_bts_conf (&conf->bts);
368 return;
b20a6524
MM
369
370 case BTRACE_FORMAT_PT:
371 record_btrace_print_pt_conf (&conf->pt);
372 return;
d33501a5
MM
373 }
374
375 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
376}
377
afedecd3
MM
378/* The to_info_record method of target record-btrace. */
379
380static void
630d6a4a 381record_btrace_info (struct target_ops *self)
afedecd3
MM
382{
383 struct btrace_thread_info *btinfo;
f4abbc16 384 const struct btrace_config *conf;
afedecd3 385 struct thread_info *tp;
31fd9caa 386 unsigned int insns, calls, gaps;
afedecd3
MM
387
388 DEBUG ("info");
389
390 tp = find_thread_ptid (inferior_ptid);
391 if (tp == NULL)
392 error (_("No thread."));
393
f4abbc16
MM
394 btinfo = &tp->btrace;
395
396 conf = btrace_conf (btinfo);
397 if (conf != NULL)
d33501a5 398 record_btrace_print_conf (conf);
f4abbc16 399
afedecd3
MM
400 btrace_fetch (tp);
401
23a7fe75
MM
402 insns = 0;
403 calls = 0;
31fd9caa 404 gaps = 0;
23a7fe75 405
6e07b1d2 406 if (!btrace_is_empty (tp))
23a7fe75
MM
407 {
408 struct btrace_call_iterator call;
409 struct btrace_insn_iterator insn;
410
411 btrace_call_end (&call, btinfo);
412 btrace_call_prev (&call, 1);
5de9129b 413 calls = btrace_call_number (&call);
23a7fe75
MM
414
415 btrace_insn_end (&insn, btinfo);
31fd9caa 416
5de9129b 417 insns = btrace_insn_number (&insn);
31fd9caa
MM
418 if (insns != 0)
419 {
420 /* The last instruction does not really belong to the trace. */
421 insns -= 1;
422 }
423 else
424 {
425 unsigned int steps;
426
427 /* Skip gaps at the end. */
428 do
429 {
430 steps = btrace_insn_prev (&insn, 1);
431 if (steps == 0)
432 break;
433
434 insns = btrace_insn_number (&insn);
435 }
436 while (insns == 0);
437 }
438
439 gaps = btinfo->ngaps;
23a7fe75 440 }
afedecd3 441
31fd9caa
MM
442 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
443 "for thread %d (%s).\n"), insns, calls, gaps,
444 tp->num, target_pid_to_str (tp->ptid));
07bbe694
MM
445
446 if (btrace_is_replaying (tp))
447 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
448 btrace_insn_number (btinfo->replay));
afedecd3
MM
449}
450
31fd9caa
MM
451/* Print a decode error. */
452
453static void
454btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
455 enum btrace_format format)
456{
457 const char *errstr;
458 int is_error;
459
460 errstr = _("unknown");
461 is_error = 1;
462
463 switch (format)
464 {
465 default:
466 break;
467
468 case BTRACE_FORMAT_BTS:
469 switch (errcode)
470 {
471 default:
472 break;
473
474 case BDE_BTS_OVERFLOW:
475 errstr = _("instruction overflow");
476 break;
477
478 case BDE_BTS_INSN_SIZE:
479 errstr = _("unknown instruction");
480 break;
481 }
482 break;
b20a6524
MM
483
484#if defined (HAVE_LIBIPT)
485 case BTRACE_FORMAT_PT:
486 switch (errcode)
487 {
488 case BDE_PT_USER_QUIT:
489 is_error = 0;
490 errstr = _("trace decode cancelled");
491 break;
492
493 case BDE_PT_DISABLED:
494 is_error = 0;
495 errstr = _("disabled");
496 break;
497
498 case BDE_PT_OVERFLOW:
499 is_error = 0;
500 errstr = _("overflow");
501 break;
502
503 default:
504 if (errcode < 0)
505 errstr = pt_errstr (pt_errcode (errcode));
506 break;
507 }
508 break;
509#endif /* defined (HAVE_LIBIPT) */
31fd9caa
MM
510 }
511
512 ui_out_text (uiout, _("["));
513 if (is_error)
514 {
515 ui_out_text (uiout, _("decode error ("));
516 ui_out_field_int (uiout, "errcode", errcode);
517 ui_out_text (uiout, _("): "));
518 }
519 ui_out_text (uiout, errstr);
520 ui_out_text (uiout, _("]\n"));
521}
522
afedecd3
MM
523/* Print an unsigned int. */
524
525static void
526ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
527{
528 ui_out_field_fmt (uiout, fld, "%u", val);
529}
530
531/* Disassemble a section of the recorded instruction trace. */
532
533static void
23a7fe75 534btrace_insn_history (struct ui_out *uiout,
31fd9caa 535 const struct btrace_thread_info *btinfo,
23a7fe75
MM
536 const struct btrace_insn_iterator *begin,
537 const struct btrace_insn_iterator *end, int flags)
afedecd3
MM
538{
539 struct gdbarch *gdbarch;
23a7fe75 540 struct btrace_insn_iterator it;
afedecd3 541
23a7fe75
MM
542 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
543 btrace_insn_number (end));
afedecd3
MM
544
545 gdbarch = target_gdbarch ();
546
23a7fe75 547 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 548 {
23a7fe75
MM
549 const struct btrace_insn *insn;
550
551 insn = btrace_insn_get (&it);
552
31fd9caa
MM
553 /* A NULL instruction indicates a gap in the trace. */
554 if (insn == NULL)
555 {
556 const struct btrace_config *conf;
557
558 conf = btrace_conf (btinfo);
afedecd3 559
31fd9caa
MM
560 /* We have trace so we must have a configuration. */
561 gdb_assert (conf != NULL);
562
563 btrace_ui_out_decode_error (uiout, it.function->errcode,
564 conf->format);
565 }
566 else
567 {
da8c46d2
MM
568 char prefix[4];
569
570 /* We may add a speculation prefix later. We use the same space
571 that is used for the pc prefix. */
572 if ((flags & DISASSEMBLY_OMIT_PC) == 0)
573 strncpy (prefix, pc_prefix (insn->pc), 3);
574 else
575 {
576 prefix[0] = ' ';
577 prefix[1] = ' ';
578 prefix[2] = ' ';
579 }
580 prefix[3] = 0;
581
31fd9caa
MM
582 /* Print the instruction index. */
583 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
584 ui_out_text (uiout, "\t");
585
da8c46d2
MM
586 /* Indicate speculative execution by a leading '?'. */
587 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
588 prefix[0] = '?';
589
590 /* Print the prefix; we tell gdb_disassembly below to omit it. */
591 ui_out_field_fmt (uiout, "prefix", "%s", prefix);
592
31fd9caa
MM
593 /* Disassembly with '/m' flag may not produce the expected result.
594 See PR gdb/11833. */
da8c46d2
MM
595 gdb_disassembly (gdbarch, uiout, NULL, flags | DISASSEMBLY_OMIT_PC,
596 1, insn->pc, insn->pc + 1);
31fd9caa 597 }
afedecd3
MM
598 }
599}
600
601/* The to_insn_history method of target record-btrace. */
602
603static void
7a6c5609 604record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
605{
606 struct btrace_thread_info *btinfo;
23a7fe75
MM
607 struct btrace_insn_history *history;
608 struct btrace_insn_iterator begin, end;
afedecd3
MM
609 struct cleanup *uiout_cleanup;
610 struct ui_out *uiout;
23a7fe75 611 unsigned int context, covered;
afedecd3
MM
612
613 uiout = current_uiout;
614 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
615 "insn history");
afedecd3 616 context = abs (size);
afedecd3
MM
617 if (context == 0)
618 error (_("Bad record instruction-history-size."));
619
23a7fe75
MM
620 btinfo = require_btrace ();
621 history = btinfo->insn_history;
622 if (history == NULL)
afedecd3 623 {
07bbe694 624 struct btrace_insn_iterator *replay;
afedecd3 625
23a7fe75 626 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 627
07bbe694
MM
628 /* If we're replaying, we start at the replay position. Otherwise, we
629 start at the tail of the trace. */
630 replay = btinfo->replay;
631 if (replay != NULL)
632 begin = *replay;
633 else
634 btrace_insn_end (&begin, btinfo);
635
636 /* We start from here and expand in the requested direction. Then we
637 expand in the other direction, as well, to fill up any remaining
638 context. */
639 end = begin;
640 if (size < 0)
641 {
642 /* We want the current position covered, as well. */
643 covered = btrace_insn_next (&end, 1);
644 covered += btrace_insn_prev (&begin, context - covered);
645 covered += btrace_insn_next (&end, context - covered);
646 }
647 else
648 {
649 covered = btrace_insn_next (&end, context);
650 covered += btrace_insn_prev (&begin, context - covered);
651 }
afedecd3
MM
652 }
653 else
654 {
23a7fe75
MM
655 begin = history->begin;
656 end = history->end;
afedecd3 657
23a7fe75
MM
658 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
659 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 660
23a7fe75
MM
661 if (size < 0)
662 {
663 end = begin;
664 covered = btrace_insn_prev (&begin, context);
665 }
666 else
667 {
668 begin = end;
669 covered = btrace_insn_next (&end, context);
670 }
afedecd3
MM
671 }
672
23a7fe75 673 if (covered > 0)
31fd9caa 674 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
675 else
676 {
677 if (size < 0)
678 printf_unfiltered (_("At the start of the branch trace record.\n"));
679 else
680 printf_unfiltered (_("At the end of the branch trace record.\n"));
681 }
afedecd3 682
23a7fe75 683 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
684 do_cleanups (uiout_cleanup);
685}
686
687/* The to_insn_history_range method of target record-btrace. */
688
689static void
4e99c6b7
TT
690record_btrace_insn_history_range (struct target_ops *self,
691 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
692{
693 struct btrace_thread_info *btinfo;
23a7fe75
MM
694 struct btrace_insn_history *history;
695 struct btrace_insn_iterator begin, end;
afedecd3
MM
696 struct cleanup *uiout_cleanup;
697 struct ui_out *uiout;
23a7fe75
MM
698 unsigned int low, high;
699 int found;
afedecd3
MM
700
701 uiout = current_uiout;
702 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
703 "insn history");
23a7fe75
MM
704 low = from;
705 high = to;
afedecd3 706
23a7fe75 707 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
708
709 /* Check for wrap-arounds. */
23a7fe75 710 if (low != from || high != to)
afedecd3
MM
711 error (_("Bad range."));
712
0688d04e 713 if (high < low)
afedecd3
MM
714 error (_("Bad range."));
715
23a7fe75 716 btinfo = require_btrace ();
afedecd3 717
23a7fe75
MM
718 found = btrace_find_insn_by_number (&begin, btinfo, low);
719 if (found == 0)
720 error (_("Range out of bounds."));
afedecd3 721
23a7fe75
MM
722 found = btrace_find_insn_by_number (&end, btinfo, high);
723 if (found == 0)
0688d04e
MM
724 {
725 /* Silently truncate the range. */
726 btrace_insn_end (&end, btinfo);
727 }
728 else
729 {
730 /* We want both begin and end to be inclusive. */
731 btrace_insn_next (&end, 1);
732 }
afedecd3 733
31fd9caa 734 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 735 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
736
737 do_cleanups (uiout_cleanup);
738}
739
740/* The to_insn_history_from method of target record-btrace. */
741
742static void
9abc3ff3
TT
743record_btrace_insn_history_from (struct target_ops *self,
744 ULONGEST from, int size, int flags)
afedecd3
MM
745{
746 ULONGEST begin, end, context;
747
748 context = abs (size);
0688d04e
MM
749 if (context == 0)
750 error (_("Bad record instruction-history-size."));
afedecd3
MM
751
752 if (size < 0)
753 {
754 end = from;
755
756 if (from < context)
757 begin = 0;
758 else
0688d04e 759 begin = from - context + 1;
afedecd3
MM
760 }
761 else
762 {
763 begin = from;
0688d04e 764 end = from + context - 1;
afedecd3
MM
765
766 /* Check for wrap-around. */
767 if (end < begin)
768 end = ULONGEST_MAX;
769 }
770
4e99c6b7 771 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
772}
773
774/* Print the instruction number range for a function call history line. */
775
776static void
23a7fe75
MM
777btrace_call_history_insn_range (struct ui_out *uiout,
778 const struct btrace_function *bfun)
afedecd3 779{
7acbe133
MM
780 unsigned int begin, end, size;
781
782 size = VEC_length (btrace_insn_s, bfun->insn);
783 gdb_assert (size > 0);
afedecd3 784
23a7fe75 785 begin = bfun->insn_offset;
7acbe133 786 end = begin + size - 1;
afedecd3 787
23a7fe75 788 ui_out_field_uint (uiout, "insn begin", begin);
8710b709 789 ui_out_text (uiout, ",");
23a7fe75 790 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
791}
792
ce0dfbea
MM
793/* Compute the lowest and highest source line for the instructions in BFUN
794 and return them in PBEGIN and PEND.
795 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
796 result from inlining or macro expansion. */
797
798static void
799btrace_compute_src_line_range (const struct btrace_function *bfun,
800 int *pbegin, int *pend)
801{
802 struct btrace_insn *insn;
803 struct symtab *symtab;
804 struct symbol *sym;
805 unsigned int idx;
806 int begin, end;
807
808 begin = INT_MAX;
809 end = INT_MIN;
810
811 sym = bfun->sym;
812 if (sym == NULL)
813 goto out;
814
815 symtab = symbol_symtab (sym);
816
817 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
818 {
819 struct symtab_and_line sal;
820
821 sal = find_pc_line (insn->pc, 0);
822 if (sal.symtab != symtab || sal.line == 0)
823 continue;
824
825 begin = min (begin, sal.line);
826 end = max (end, sal.line);
827 }
828
829 out:
830 *pbegin = begin;
831 *pend = end;
832}
833
afedecd3
MM
834/* Print the source line information for a function call history line. */
835
836static void
23a7fe75
MM
837btrace_call_history_src_line (struct ui_out *uiout,
838 const struct btrace_function *bfun)
afedecd3
MM
839{
840 struct symbol *sym;
23a7fe75 841 int begin, end;
afedecd3
MM
842
843 sym = bfun->sym;
844 if (sym == NULL)
845 return;
846
847 ui_out_field_string (uiout, "file",
08be3fe3 848 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 849
ce0dfbea 850 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 851 if (end < begin)
afedecd3
MM
852 return;
853
854 ui_out_text (uiout, ":");
23a7fe75 855 ui_out_field_int (uiout, "min line", begin);
afedecd3 856
23a7fe75 857 if (end == begin)
afedecd3
MM
858 return;
859
8710b709 860 ui_out_text (uiout, ",");
23a7fe75 861 ui_out_field_int (uiout, "max line", end);
afedecd3
MM
862}
863
0b722aec
MM
864/* Get the name of a branch trace function. */
865
866static const char *
867btrace_get_bfun_name (const struct btrace_function *bfun)
868{
869 struct minimal_symbol *msym;
870 struct symbol *sym;
871
872 if (bfun == NULL)
873 return "??";
874
875 msym = bfun->msym;
876 sym = bfun->sym;
877
878 if (sym != NULL)
879 return SYMBOL_PRINT_NAME (sym);
880 else if (msym != NULL)
efd66ac6 881 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
882 else
883 return "??";
884}
885
afedecd3
MM
886/* Disassemble a section of the recorded function trace. */
887
888static void
23a7fe75 889btrace_call_history (struct ui_out *uiout,
8710b709 890 const struct btrace_thread_info *btinfo,
23a7fe75
MM
891 const struct btrace_call_iterator *begin,
892 const struct btrace_call_iterator *end,
afedecd3
MM
893 enum record_print_flag flags)
894{
23a7fe75 895 struct btrace_call_iterator it;
afedecd3 896
23a7fe75
MM
897 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
898 btrace_call_number (end));
afedecd3 899
23a7fe75 900 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 901 {
23a7fe75
MM
902 const struct btrace_function *bfun;
903 struct minimal_symbol *msym;
904 struct symbol *sym;
905
906 bfun = btrace_call_get (&it);
23a7fe75 907 sym = bfun->sym;
0b722aec 908 msym = bfun->msym;
23a7fe75 909
afedecd3 910 /* Print the function index. */
23a7fe75 911 ui_out_field_uint (uiout, "index", bfun->number);
afedecd3
MM
912 ui_out_text (uiout, "\t");
913
31fd9caa
MM
914 /* Indicate gaps in the trace. */
915 if (bfun->errcode != 0)
916 {
917 const struct btrace_config *conf;
918
919 conf = btrace_conf (btinfo);
920
921 /* We have trace so we must have a configuration. */
922 gdb_assert (conf != NULL);
923
924 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
925
926 continue;
927 }
928
8710b709
MM
929 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
930 {
931 int level = bfun->level + btinfo->level, i;
932
933 for (i = 0; i < level; ++i)
934 ui_out_text (uiout, " ");
935 }
936
937 if (sym != NULL)
938 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
939 else if (msym != NULL)
efd66ac6 940 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
8710b709
MM
941 else if (!ui_out_is_mi_like_p (uiout))
942 ui_out_field_string (uiout, "function", "??");
943
1e038f67 944 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 945 {
8710b709 946 ui_out_text (uiout, _("\tinst "));
23a7fe75 947 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
948 }
949
1e038f67 950 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 951 {
8710b709 952 ui_out_text (uiout, _("\tat "));
23a7fe75 953 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
954 }
955
afedecd3
MM
956 ui_out_text (uiout, "\n");
957 }
958}
959
960/* The to_call_history method of target record-btrace. */
961
962static void
5df2fcba 963record_btrace_call_history (struct target_ops *self, int size, int flags)
afedecd3
MM
964{
965 struct btrace_thread_info *btinfo;
23a7fe75
MM
966 struct btrace_call_history *history;
967 struct btrace_call_iterator begin, end;
afedecd3
MM
968 struct cleanup *uiout_cleanup;
969 struct ui_out *uiout;
23a7fe75 970 unsigned int context, covered;
afedecd3
MM
971
972 uiout = current_uiout;
973 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
974 "insn history");
afedecd3 975 context = abs (size);
afedecd3
MM
976 if (context == 0)
977 error (_("Bad record function-call-history-size."));
978
23a7fe75
MM
979 btinfo = require_btrace ();
980 history = btinfo->call_history;
981 if (history == NULL)
afedecd3 982 {
07bbe694 983 struct btrace_insn_iterator *replay;
afedecd3 984
23a7fe75 985 DEBUG ("call-history (0x%x): %d", flags, size);
afedecd3 986
07bbe694
MM
987 /* If we're replaying, we start at the replay position. Otherwise, we
988 start at the tail of the trace. */
989 replay = btinfo->replay;
990 if (replay != NULL)
991 {
992 begin.function = replay->function;
993 begin.btinfo = btinfo;
994 }
995 else
996 btrace_call_end (&begin, btinfo);
997
998 /* We start from here and expand in the requested direction. Then we
999 expand in the other direction, as well, to fill up any remaining
1000 context. */
1001 end = begin;
1002 if (size < 0)
1003 {
1004 /* We want the current position covered, as well. */
1005 covered = btrace_call_next (&end, 1);
1006 covered += btrace_call_prev (&begin, context - covered);
1007 covered += btrace_call_next (&end, context - covered);
1008 }
1009 else
1010 {
1011 covered = btrace_call_next (&end, context);
1012 covered += btrace_call_prev (&begin, context- covered);
1013 }
afedecd3
MM
1014 }
1015 else
1016 {
23a7fe75
MM
1017 begin = history->begin;
1018 end = history->end;
afedecd3 1019
23a7fe75
MM
1020 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
1021 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1022
23a7fe75
MM
1023 if (size < 0)
1024 {
1025 end = begin;
1026 covered = btrace_call_prev (&begin, context);
1027 }
1028 else
1029 {
1030 begin = end;
1031 covered = btrace_call_next (&end, context);
1032 }
afedecd3
MM
1033 }
1034
23a7fe75 1035 if (covered > 0)
8710b709 1036 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1037 else
1038 {
1039 if (size < 0)
1040 printf_unfiltered (_("At the start of the branch trace record.\n"));
1041 else
1042 printf_unfiltered (_("At the end of the branch trace record.\n"));
1043 }
afedecd3 1044
23a7fe75 1045 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1046 do_cleanups (uiout_cleanup);
1047}
1048
1049/* The to_call_history_range method of target record-btrace. */
1050
1051static void
f0d960ea
TT
1052record_btrace_call_history_range (struct target_ops *self,
1053 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
1054{
1055 struct btrace_thread_info *btinfo;
23a7fe75
MM
1056 struct btrace_call_history *history;
1057 struct btrace_call_iterator begin, end;
afedecd3
MM
1058 struct cleanup *uiout_cleanup;
1059 struct ui_out *uiout;
23a7fe75
MM
1060 unsigned int low, high;
1061 int found;
afedecd3
MM
1062
1063 uiout = current_uiout;
1064 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1065 "func history");
23a7fe75
MM
1066 low = from;
1067 high = to;
afedecd3 1068
23a7fe75 1069 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
1070
1071 /* Check for wrap-arounds. */
23a7fe75 1072 if (low != from || high != to)
afedecd3
MM
1073 error (_("Bad range."));
1074
0688d04e 1075 if (high < low)
afedecd3
MM
1076 error (_("Bad range."));
1077
23a7fe75 1078 btinfo = require_btrace ();
afedecd3 1079
23a7fe75
MM
1080 found = btrace_find_call_by_number (&begin, btinfo, low);
1081 if (found == 0)
1082 error (_("Range out of bounds."));
afedecd3 1083
23a7fe75
MM
1084 found = btrace_find_call_by_number (&end, btinfo, high);
1085 if (found == 0)
0688d04e
MM
1086 {
1087 /* Silently truncate the range. */
1088 btrace_call_end (&end, btinfo);
1089 }
1090 else
1091 {
1092 /* We want both begin and end to be inclusive. */
1093 btrace_call_next (&end, 1);
1094 }
afedecd3 1095
8710b709 1096 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1097 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1098
1099 do_cleanups (uiout_cleanup);
1100}
1101
1102/* The to_call_history_from method of target record-btrace. */
1103
1104static void
ec0aea04
TT
1105record_btrace_call_history_from (struct target_ops *self,
1106 ULONGEST from, int size, int flags)
afedecd3
MM
1107{
1108 ULONGEST begin, end, context;
1109
1110 context = abs (size);
0688d04e
MM
1111 if (context == 0)
1112 error (_("Bad record function-call-history-size."));
afedecd3
MM
1113
1114 if (size < 0)
1115 {
1116 end = from;
1117
1118 if (from < context)
1119 begin = 0;
1120 else
0688d04e 1121 begin = from - context + 1;
afedecd3
MM
1122 }
1123 else
1124 {
1125 begin = from;
0688d04e 1126 end = from + context - 1;
afedecd3
MM
1127
1128 /* Check for wrap-around. */
1129 if (end < begin)
1130 end = ULONGEST_MAX;
1131 }
1132
f0d960ea 1133 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
1134}
1135
07bbe694
MM
1136/* The to_record_is_replaying method of target record-btrace. */
1137
1138static int
a52eab48 1139record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
07bbe694
MM
1140{
1141 struct thread_info *tp;
1142
034f788c 1143 ALL_NON_EXITED_THREADS (tp)
a52eab48 1144 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
07bbe694
MM
1145 return 1;
1146
1147 return 0;
1148}
1149
7ff27e9b
MM
1150/* The to_record_will_replay method of target record-btrace. */
1151
1152static int
1153record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1154{
1155 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1156}
1157
633785ff
MM
1158/* The to_xfer_partial method of target record-btrace. */
1159
9b409511 1160static enum target_xfer_status
633785ff
MM
1161record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1162 const char *annex, gdb_byte *readbuf,
1163 const gdb_byte *writebuf, ULONGEST offset,
9b409511 1164 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
1165{
1166 struct target_ops *t;
1167
1168 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1169 if (replay_memory_access == replay_memory_access_read_only
aef92902 1170 && !record_btrace_generating_corefile
4d10e986 1171 && record_btrace_is_replaying (ops, inferior_ptid))
633785ff
MM
1172 {
1173 switch (object)
1174 {
1175 case TARGET_OBJECT_MEMORY:
1176 {
1177 struct target_section *section;
1178
1179 /* We do not allow writing memory in general. */
1180 if (writebuf != NULL)
9b409511
YQ
1181 {
1182 *xfered_len = len;
bc113b4e 1183 return TARGET_XFER_UNAVAILABLE;
9b409511 1184 }
633785ff
MM
1185
1186 /* We allow reading readonly memory. */
1187 section = target_section_by_addr (ops, offset);
1188 if (section != NULL)
1189 {
1190 /* Check if the section we found is readonly. */
1191 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1192 section->the_bfd_section)
1193 & SEC_READONLY) != 0)
1194 {
1195 /* Truncate the request to fit into this section. */
1196 len = min (len, section->endaddr - offset);
1197 break;
1198 }
1199 }
1200
9b409511 1201 *xfered_len = len;
bc113b4e 1202 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1203 }
1204 }
1205 }
1206
1207 /* Forward the request. */
e75fdfca
TT
1208 ops = ops->beneath;
1209 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1210 offset, len, xfered_len);
633785ff
MM
1211}
1212
1213/* The to_insert_breakpoint method of target record-btrace. */
1214
1215static int
1216record_btrace_insert_breakpoint (struct target_ops *ops,
1217 struct gdbarch *gdbarch,
1218 struct bp_target_info *bp_tgt)
1219{
67b5c0c1
MM
1220 const char *old;
1221 int ret;
633785ff
MM
1222
1223 /* Inserting breakpoints requires accessing memory. Allow it for the
1224 duration of this function. */
67b5c0c1
MM
1225 old = replay_memory_access;
1226 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1227
1228 ret = 0;
492d29ea
PA
1229 TRY
1230 {
1231 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1232 }
492d29ea
PA
1233 CATCH (except, RETURN_MASK_ALL)
1234 {
6c63c96a 1235 replay_memory_access = old;
492d29ea
PA
1236 throw_exception (except);
1237 }
1238 END_CATCH
6c63c96a 1239 replay_memory_access = old;
633785ff
MM
1240
1241 return ret;
1242}
1243
1244/* The to_remove_breakpoint method of target record-btrace. */
1245
1246static int
1247record_btrace_remove_breakpoint (struct target_ops *ops,
1248 struct gdbarch *gdbarch,
1249 struct bp_target_info *bp_tgt)
1250{
67b5c0c1
MM
1251 const char *old;
1252 int ret;
633785ff
MM
1253
1254 /* Removing breakpoints requires accessing memory. Allow it for the
1255 duration of this function. */
67b5c0c1
MM
1256 old = replay_memory_access;
1257 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1258
1259 ret = 0;
492d29ea
PA
1260 TRY
1261 {
1262 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1263 }
492d29ea
PA
1264 CATCH (except, RETURN_MASK_ALL)
1265 {
6c63c96a 1266 replay_memory_access = old;
492d29ea
PA
1267 throw_exception (except);
1268 }
1269 END_CATCH
6c63c96a 1270 replay_memory_access = old;
633785ff
MM
1271
1272 return ret;
1273}
1274
1f3ef581
MM
1275/* The to_fetch_registers method of target record-btrace. */
1276
1277static void
1278record_btrace_fetch_registers (struct target_ops *ops,
1279 struct regcache *regcache, int regno)
1280{
1281 struct btrace_insn_iterator *replay;
1282 struct thread_info *tp;
1283
1284 tp = find_thread_ptid (inferior_ptid);
1285 gdb_assert (tp != NULL);
1286
1287 replay = tp->btrace.replay;
aef92902 1288 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1289 {
1290 const struct btrace_insn *insn;
1291 struct gdbarch *gdbarch;
1292 int pcreg;
1293
1294 gdbarch = get_regcache_arch (regcache);
1295 pcreg = gdbarch_pc_regnum (gdbarch);
1296 if (pcreg < 0)
1297 return;
1298
1299 /* We can only provide the PC register. */
1300 if (regno >= 0 && regno != pcreg)
1301 return;
1302
1303 insn = btrace_insn_get (replay);
1304 gdb_assert (insn != NULL);
1305
1306 regcache_raw_supply (regcache, regno, &insn->pc);
1307 }
1308 else
1309 {
e75fdfca 1310 struct target_ops *t = ops->beneath;
1f3ef581 1311
e75fdfca 1312 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
1313 }
1314}
1315
1316/* The to_store_registers method of target record-btrace. */
1317
1318static void
1319record_btrace_store_registers (struct target_ops *ops,
1320 struct regcache *regcache, int regno)
1321{
1322 struct target_ops *t;
1323
a52eab48 1324 if (!record_btrace_generating_corefile
4d10e986
MM
1325 && record_btrace_is_replaying (ops, inferior_ptid))
1326 error (_("Cannot write registers while replaying."));
1f3ef581
MM
1327
1328 gdb_assert (may_write_registers != 0);
1329
e75fdfca
TT
1330 t = ops->beneath;
1331 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1332}
1333
1334/* The to_prepare_to_store method of target record-btrace. */
1335
1336static void
1337record_btrace_prepare_to_store (struct target_ops *ops,
1338 struct regcache *regcache)
1339{
1340 struct target_ops *t;
1341
a52eab48 1342 if (!record_btrace_generating_corefile
4d10e986 1343 && record_btrace_is_replaying (ops, inferior_ptid))
1f3ef581
MM
1344 return;
1345
e75fdfca
TT
1346 t = ops->beneath;
1347 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1348}
1349
0b722aec
MM
1350/* The branch trace frame cache. */
1351
1352struct btrace_frame_cache
1353{
1354 /* The thread. */
1355 struct thread_info *tp;
1356
1357 /* The frame info. */
1358 struct frame_info *frame;
1359
1360 /* The branch trace function segment. */
1361 const struct btrace_function *bfun;
1362};
1363
1364/* A struct btrace_frame_cache hash table indexed by NEXT. */
1365
1366static htab_t bfcache;
1367
1368/* hash_f for htab_create_alloc of bfcache. */
1369
1370static hashval_t
1371bfcache_hash (const void *arg)
1372{
1373 const struct btrace_frame_cache *cache = arg;
1374
1375 return htab_hash_pointer (cache->frame);
1376}
1377
1378/* eq_f for htab_create_alloc of bfcache. */
1379
1380static int
1381bfcache_eq (const void *arg1, const void *arg2)
1382{
1383 const struct btrace_frame_cache *cache1 = arg1;
1384 const struct btrace_frame_cache *cache2 = arg2;
1385
1386 return cache1->frame == cache2->frame;
1387}
1388
1389/* Create a new btrace frame cache. */
1390
1391static struct btrace_frame_cache *
1392bfcache_new (struct frame_info *frame)
1393{
1394 struct btrace_frame_cache *cache;
1395 void **slot;
1396
1397 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1398 cache->frame = frame;
1399
1400 slot = htab_find_slot (bfcache, cache, INSERT);
1401 gdb_assert (*slot == NULL);
1402 *slot = cache;
1403
1404 return cache;
1405}
1406
1407/* Extract the branch trace function from a branch trace frame. */
1408
1409static const struct btrace_function *
1410btrace_get_frame_function (struct frame_info *frame)
1411{
1412 const struct btrace_frame_cache *cache;
1413 const struct btrace_function *bfun;
1414 struct btrace_frame_cache pattern;
1415 void **slot;
1416
1417 pattern.frame = frame;
1418
1419 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1420 if (slot == NULL)
1421 return NULL;
1422
1423 cache = *slot;
1424 return cache->bfun;
1425}
1426
cecac1ab
MM
1427/* Implement stop_reason method for record_btrace_frame_unwind. */
1428
1429static enum unwind_stop_reason
1430record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1431 void **this_cache)
1432{
0b722aec
MM
1433 const struct btrace_frame_cache *cache;
1434 const struct btrace_function *bfun;
1435
1436 cache = *this_cache;
1437 bfun = cache->bfun;
1438 gdb_assert (bfun != NULL);
1439
1440 if (bfun->up == NULL)
1441 return UNWIND_UNAVAILABLE;
1442
1443 return UNWIND_NO_REASON;
cecac1ab
MM
1444}
1445
1446/* Implement this_id method for record_btrace_frame_unwind. */
1447
1448static void
1449record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1450 struct frame_id *this_id)
1451{
0b722aec
MM
1452 const struct btrace_frame_cache *cache;
1453 const struct btrace_function *bfun;
1454 CORE_ADDR code, special;
1455
1456 cache = *this_cache;
1457
1458 bfun = cache->bfun;
1459 gdb_assert (bfun != NULL);
1460
1461 while (bfun->segment.prev != NULL)
1462 bfun = bfun->segment.prev;
1463
1464 code = get_frame_func (this_frame);
1465 special = bfun->number;
1466
1467 *this_id = frame_id_build_unavailable_stack_special (code, special);
1468
1469 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1470 btrace_get_bfun_name (cache->bfun),
1471 core_addr_to_string_nz (this_id->code_addr),
1472 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1473}
1474
1475/* Implement prev_register method for record_btrace_frame_unwind. */
1476
1477static struct value *
1478record_btrace_frame_prev_register (struct frame_info *this_frame,
1479 void **this_cache,
1480 int regnum)
1481{
0b722aec
MM
1482 const struct btrace_frame_cache *cache;
1483 const struct btrace_function *bfun, *caller;
1484 const struct btrace_insn *insn;
1485 struct gdbarch *gdbarch;
1486 CORE_ADDR pc;
1487 int pcreg;
1488
1489 gdbarch = get_frame_arch (this_frame);
1490 pcreg = gdbarch_pc_regnum (gdbarch);
1491 if (pcreg < 0 || regnum != pcreg)
1492 throw_error (NOT_AVAILABLE_ERROR,
1493 _("Registers are not available in btrace record history"));
1494
1495 cache = *this_cache;
1496 bfun = cache->bfun;
1497 gdb_assert (bfun != NULL);
1498
1499 caller = bfun->up;
1500 if (caller == NULL)
1501 throw_error (NOT_AVAILABLE_ERROR,
1502 _("No caller in btrace record history"));
1503
1504 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1505 {
1506 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1507 pc = insn->pc;
1508 }
1509 else
1510 {
1511 insn = VEC_last (btrace_insn_s, caller->insn);
1512 pc = insn->pc;
1513
1514 pc += gdb_insn_length (gdbarch, pc);
1515 }
1516
1517 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1518 btrace_get_bfun_name (bfun), bfun->level,
1519 core_addr_to_string_nz (pc));
1520
1521 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1522}
1523
1524/* Implement sniffer method for record_btrace_frame_unwind. */
1525
1526static int
1527record_btrace_frame_sniffer (const struct frame_unwind *self,
1528 struct frame_info *this_frame,
1529 void **this_cache)
1530{
0b722aec
MM
1531 const struct btrace_function *bfun;
1532 struct btrace_frame_cache *cache;
cecac1ab 1533 struct thread_info *tp;
0b722aec 1534 struct frame_info *next;
cecac1ab
MM
1535
1536 /* THIS_FRAME does not contain a reference to its thread. */
1537 tp = find_thread_ptid (inferior_ptid);
1538 gdb_assert (tp != NULL);
1539
0b722aec
MM
1540 bfun = NULL;
1541 next = get_next_frame (this_frame);
1542 if (next == NULL)
1543 {
1544 const struct btrace_insn_iterator *replay;
1545
1546 replay = tp->btrace.replay;
1547 if (replay != NULL)
1548 bfun = replay->function;
1549 }
1550 else
1551 {
1552 const struct btrace_function *callee;
1553
1554 callee = btrace_get_frame_function (next);
1555 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1556 bfun = callee->up;
1557 }
1558
1559 if (bfun == NULL)
1560 return 0;
1561
1562 DEBUG ("[frame] sniffed frame for %s on level %d",
1563 btrace_get_bfun_name (bfun), bfun->level);
1564
1565 /* This is our frame. Initialize the frame cache. */
1566 cache = bfcache_new (this_frame);
1567 cache->tp = tp;
1568 cache->bfun = bfun;
1569
1570 *this_cache = cache;
1571 return 1;
1572}
1573
1574/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1575
1576static int
1577record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1578 struct frame_info *this_frame,
1579 void **this_cache)
1580{
1581 const struct btrace_function *bfun, *callee;
1582 struct btrace_frame_cache *cache;
1583 struct frame_info *next;
1584
1585 next = get_next_frame (this_frame);
1586 if (next == NULL)
1587 return 0;
1588
1589 callee = btrace_get_frame_function (next);
1590 if (callee == NULL)
1591 return 0;
1592
1593 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1594 return 0;
1595
1596 bfun = callee->up;
1597 if (bfun == NULL)
1598 return 0;
1599
1600 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1601 btrace_get_bfun_name (bfun), bfun->level);
1602
1603 /* This is our frame. Initialize the frame cache. */
1604 cache = bfcache_new (this_frame);
1605 cache->tp = find_thread_ptid (inferior_ptid);
1606 cache->bfun = bfun;
1607
1608 *this_cache = cache;
1609 return 1;
1610}
1611
1612static void
1613record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1614{
1615 struct btrace_frame_cache *cache;
1616 void **slot;
1617
1618 cache = this_cache;
1619
1620 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1621 gdb_assert (slot != NULL);
1622
1623 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1624}
1625
1626/* btrace recording does not store previous memory content, neither the stack
1627 frames content. Any unwinding would return errorneous results as the stack
1628 contents no longer matches the changed PC value restored from history.
1629 Therefore this unwinder reports any possibly unwound registers as
1630 <unavailable>. */
1631
0b722aec 1632const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1633{
1634 NORMAL_FRAME,
1635 record_btrace_frame_unwind_stop_reason,
1636 record_btrace_frame_this_id,
1637 record_btrace_frame_prev_register,
1638 NULL,
0b722aec
MM
1639 record_btrace_frame_sniffer,
1640 record_btrace_frame_dealloc_cache
1641};
1642
1643const struct frame_unwind record_btrace_tailcall_frame_unwind =
1644{
1645 TAILCALL_FRAME,
1646 record_btrace_frame_unwind_stop_reason,
1647 record_btrace_frame_this_id,
1648 record_btrace_frame_prev_register,
1649 NULL,
1650 record_btrace_tailcall_frame_sniffer,
1651 record_btrace_frame_dealloc_cache
cecac1ab 1652};
b2f4cfde 1653
ac01945b
TT
1654/* Implement the to_get_unwinder method. */
1655
1656static const struct frame_unwind *
1657record_btrace_to_get_unwinder (struct target_ops *self)
1658{
1659 return &record_btrace_frame_unwind;
1660}
1661
1662/* Implement the to_get_tailcall_unwinder method. */
1663
1664static const struct frame_unwind *
1665record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1666{
1667 return &record_btrace_tailcall_frame_unwind;
1668}
1669
987e68b1
MM
1670/* Return a human-readable string for FLAG. */
1671
1672static const char *
1673btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1674{
1675 switch (flag)
1676 {
1677 case BTHR_STEP:
1678 return "step";
1679
1680 case BTHR_RSTEP:
1681 return "reverse-step";
1682
1683 case BTHR_CONT:
1684 return "cont";
1685
1686 case BTHR_RCONT:
1687 return "reverse-cont";
1688
1689 case BTHR_STOP:
1690 return "stop";
1691 }
1692
1693 return "<invalid>";
1694}
1695
52834460
MM
1696/* Indicate that TP should be resumed according to FLAG. */
1697
1698static void
1699record_btrace_resume_thread (struct thread_info *tp,
1700 enum btrace_thread_flag flag)
1701{
1702 struct btrace_thread_info *btinfo;
1703
987e68b1
MM
1704 DEBUG ("resuming thread %d (%s): %x (%s)", tp->num,
1705 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
52834460
MM
1706
1707 btinfo = &tp->btrace;
1708
52834460
MM
1709 /* Fetch the latest branch trace. */
1710 btrace_fetch (tp);
1711
0ca912df
MM
1712 /* A resume request overwrites a preceding resume or stop request. */
1713 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1714 btinfo->flags |= flag;
1715}
1716
ec71cc2f
MM
1717/* Get the current frame for TP. */
1718
1719static struct frame_info *
1720get_thread_current_frame (struct thread_info *tp)
1721{
1722 struct frame_info *frame;
1723 ptid_t old_inferior_ptid;
1724 int executing;
1725
1726 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1727 old_inferior_ptid = inferior_ptid;
1728 inferior_ptid = tp->ptid;
1729
1730 /* Clear the executing flag to allow changes to the current frame.
1731 We are not actually running, yet. We just started a reverse execution
1732 command or a record goto command.
1733 For the latter, EXECUTING is false and this has no effect.
1734 For the former, EXECUTING is true and we're in to_wait, about to
1735 move the thread. Since we need to recompute the stack, we temporarily
1736 set EXECUTING to flase. */
1737 executing = is_executing (inferior_ptid);
1738 set_executing (inferior_ptid, 0);
1739
1740 frame = NULL;
1741 TRY
1742 {
1743 frame = get_current_frame ();
1744 }
1745 CATCH (except, RETURN_MASK_ALL)
1746 {
1747 /* Restore the previous execution state. */
1748 set_executing (inferior_ptid, executing);
1749
1750 /* Restore the previous inferior_ptid. */
1751 inferior_ptid = old_inferior_ptid;
1752
1753 throw_exception (except);
1754 }
1755 END_CATCH
1756
1757 /* Restore the previous execution state. */
1758 set_executing (inferior_ptid, executing);
1759
1760 /* Restore the previous inferior_ptid. */
1761 inferior_ptid = old_inferior_ptid;
1762
1763 return frame;
1764}
1765
52834460
MM
1766/* Start replaying a thread. */
1767
1768static struct btrace_insn_iterator *
1769record_btrace_start_replaying (struct thread_info *tp)
1770{
52834460
MM
1771 struct btrace_insn_iterator *replay;
1772 struct btrace_thread_info *btinfo;
52834460
MM
1773
1774 btinfo = &tp->btrace;
1775 replay = NULL;
1776
1777 /* We can't start replaying without trace. */
1778 if (btinfo->begin == NULL)
1779 return NULL;
1780
52834460
MM
1781 /* GDB stores the current frame_id when stepping in order to detects steps
1782 into subroutines.
1783 Since frames are computed differently when we're replaying, we need to
1784 recompute those stored frames and fix them up so we can still detect
1785 subroutines after we started replaying. */
492d29ea 1786 TRY
52834460
MM
1787 {
1788 struct frame_info *frame;
1789 struct frame_id frame_id;
1790 int upd_step_frame_id, upd_step_stack_frame_id;
1791
1792 /* The current frame without replaying - computed via normal unwind. */
ec71cc2f 1793 frame = get_thread_current_frame (tp);
52834460
MM
1794 frame_id = get_frame_id (frame);
1795
1796 /* Check if we need to update any stepping-related frame id's. */
1797 upd_step_frame_id = frame_id_eq (frame_id,
1798 tp->control.step_frame_id);
1799 upd_step_stack_frame_id = frame_id_eq (frame_id,
1800 tp->control.step_stack_frame_id);
1801
1802 /* We start replaying at the end of the branch trace. This corresponds
1803 to the current instruction. */
8d749320 1804 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
1805 btrace_insn_end (replay, btinfo);
1806
31fd9caa
MM
1807 /* Skip gaps at the end of the trace. */
1808 while (btrace_insn_get (replay) == NULL)
1809 {
1810 unsigned int steps;
1811
1812 steps = btrace_insn_prev (replay, 1);
1813 if (steps == 0)
1814 error (_("No trace."));
1815 }
1816
52834460
MM
1817 /* We're not replaying, yet. */
1818 gdb_assert (btinfo->replay == NULL);
1819 btinfo->replay = replay;
1820
1821 /* Make sure we're not using any stale registers. */
1822 registers_changed_ptid (tp->ptid);
1823
1824 /* The current frame with replaying - computed via btrace unwind. */
ec71cc2f 1825 frame = get_thread_current_frame (tp);
52834460
MM
1826 frame_id = get_frame_id (frame);
1827
1828 /* Replace stepping related frames where necessary. */
1829 if (upd_step_frame_id)
1830 tp->control.step_frame_id = frame_id;
1831 if (upd_step_stack_frame_id)
1832 tp->control.step_stack_frame_id = frame_id;
1833 }
492d29ea 1834 CATCH (except, RETURN_MASK_ALL)
52834460
MM
1835 {
1836 xfree (btinfo->replay);
1837 btinfo->replay = NULL;
1838
1839 registers_changed_ptid (tp->ptid);
1840
1841 throw_exception (except);
1842 }
492d29ea 1843 END_CATCH
52834460
MM
1844
1845 return replay;
1846}
1847
1848/* Stop replaying a thread. */
1849
1850static void
1851record_btrace_stop_replaying (struct thread_info *tp)
1852{
1853 struct btrace_thread_info *btinfo;
1854
1855 btinfo = &tp->btrace;
1856
1857 xfree (btinfo->replay);
1858 btinfo->replay = NULL;
1859
1860 /* Make sure we're not leaving any stale registers. */
1861 registers_changed_ptid (tp->ptid);
1862}
1863
e3cfc1c7
MM
1864/* Stop replaying TP if it is at the end of its execution history. */
1865
1866static void
1867record_btrace_stop_replaying_at_end (struct thread_info *tp)
1868{
1869 struct btrace_insn_iterator *replay, end;
1870 struct btrace_thread_info *btinfo;
1871
1872 btinfo = &tp->btrace;
1873 replay = btinfo->replay;
1874
1875 if (replay == NULL)
1876 return;
1877
1878 btrace_insn_end (&end, btinfo);
1879
1880 if (btrace_insn_cmp (replay, &end) == 0)
1881 record_btrace_stop_replaying (tp);
1882}
1883
b2f4cfde
MM
1884/* The to_resume method of target record-btrace. */
1885
1886static void
1887record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1888 enum gdb_signal signal)
1889{
0ca912df 1890 struct thread_info *tp;
52834460 1891 enum btrace_thread_flag flag;
0ca912df 1892 ptid_t orig_ptid;
52834460 1893
987e68b1
MM
1894 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
1895 execution_direction == EXEC_REVERSE ? "reverse-" : "",
1896 step ? "step" : "cont");
52834460 1897
0ca912df
MM
1898 orig_ptid = ptid;
1899
1900 /* Store the execution direction of the last resume.
1901
1902 If there is more than one to_resume call, we have to rely on infrun
1903 to not change the execution direction in-between. */
70ad5bff
MM
1904 record_btrace_resume_exec_dir = execution_direction;
1905
f2665db5
MM
1906 /* For all-stop targets we pick the current thread when asked to resume an
1907 entire process or everything. */
0ca912df
MM
1908 if (!target_is_non_stop_p ())
1909 {
0ca912df
MM
1910 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1911 ptid = inferior_ptid;
1912
1913 tp = find_thread_ptid (ptid);
1914 if (tp == NULL)
1915 error (_("Cannot find thread to resume."));
0ca912df 1916 }
52834460 1917
0ca912df 1918 /* As long as we're not replaying, just forward the request.
52834460 1919
0ca912df
MM
1920 For non-stop targets this means that no thread is replaying. In order to
1921 make progress, we may need to explicitly move replaying threads to the end
1922 of their execution history. */
a52eab48
MM
1923 if ((execution_direction != EXEC_REVERSE)
1924 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 1925 {
e75fdfca 1926 ops = ops->beneath;
0ca912df 1927 return ops->to_resume (ops, orig_ptid, step, signal);
b2f4cfde
MM
1928 }
1929
52834460
MM
1930 /* Compute the btrace thread flag for the requested move. */
1931 if (step == 0)
1932 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1933 else
1934 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1935
52834460
MM
1936 /* We just indicate the resume intent here. The actual stepping happens in
1937 record_btrace_wait below. */
0ca912df
MM
1938 ALL_NON_EXITED_THREADS (tp)
1939 if (ptid_match (tp->ptid, ptid))
1940 record_btrace_resume_thread (tp, flag);
70ad5bff
MM
1941
1942 /* Async support. */
1943 if (target_can_async_p ())
1944 {
6a3753b3 1945 target_async (1);
70ad5bff
MM
1946 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1947 }
52834460
MM
1948}
1949
987e68b1
MM
1950/* Cancel resuming TP. */
1951
1952static void
1953record_btrace_cancel_resume (struct thread_info *tp)
1954{
1955 enum btrace_thread_flag flags;
1956
1957 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
1958 if (flags == 0)
1959 return;
1960
1961 DEBUG ("cancel resume thread %d (%s): %x (%s)", tp->num,
1962 target_pid_to_str (tp->ptid), flags,
1963 btrace_thread_flag_to_str (flags));
1964
1965 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 1966 record_btrace_stop_replaying_at_end (tp);
52834460
MM
1967}
1968
1969/* Return a target_waitstatus indicating that we ran out of history. */
1970
1971static struct target_waitstatus
1972btrace_step_no_history (void)
1973{
1974 struct target_waitstatus status;
1975
1976 status.kind = TARGET_WAITKIND_NO_HISTORY;
1977
1978 return status;
1979}
1980
1981/* Return a target_waitstatus indicating that a step finished. */
1982
1983static struct target_waitstatus
1984btrace_step_stopped (void)
1985{
1986 struct target_waitstatus status;
1987
1988 status.kind = TARGET_WAITKIND_STOPPED;
1989 status.value.sig = GDB_SIGNAL_TRAP;
1990
1991 return status;
1992}
1993
6e4879f0
MM
1994/* Return a target_waitstatus indicating that a thread was stopped as
1995 requested. */
1996
1997static struct target_waitstatus
1998btrace_step_stopped_on_request (void)
1999{
2000 struct target_waitstatus status;
2001
2002 status.kind = TARGET_WAITKIND_STOPPED;
2003 status.value.sig = GDB_SIGNAL_0;
2004
2005 return status;
2006}
2007
d825d248
MM
2008/* Return a target_waitstatus indicating a spurious stop. */
2009
2010static struct target_waitstatus
2011btrace_step_spurious (void)
2012{
2013 struct target_waitstatus status;
2014
2015 status.kind = TARGET_WAITKIND_SPURIOUS;
2016
2017 return status;
2018}
2019
e3cfc1c7
MM
2020/* Return a target_waitstatus indicating that the thread was not resumed. */
2021
2022static struct target_waitstatus
2023btrace_step_no_resumed (void)
2024{
2025 struct target_waitstatus status;
2026
2027 status.kind = TARGET_WAITKIND_NO_RESUMED;
2028
2029 return status;
2030}
2031
2032/* Return a target_waitstatus indicating that we should wait again. */
2033
2034static struct target_waitstatus
2035btrace_step_again (void)
2036{
2037 struct target_waitstatus status;
2038
2039 status.kind = TARGET_WAITKIND_IGNORE;
2040
2041 return status;
2042}
2043
52834460
MM
2044/* Clear the record histories. */
2045
2046static void
2047record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2048{
2049 xfree (btinfo->insn_history);
2050 xfree (btinfo->call_history);
2051
2052 btinfo->insn_history = NULL;
2053 btinfo->call_history = NULL;
2054}
2055
3c615f99
MM
2056/* Check whether TP's current replay position is at a breakpoint. */
2057
2058static int
2059record_btrace_replay_at_breakpoint (struct thread_info *tp)
2060{
2061 struct btrace_insn_iterator *replay;
2062 struct btrace_thread_info *btinfo;
2063 const struct btrace_insn *insn;
2064 struct inferior *inf;
2065
2066 btinfo = &tp->btrace;
2067 replay = btinfo->replay;
2068
2069 if (replay == NULL)
2070 return 0;
2071
2072 insn = btrace_insn_get (replay);
2073 if (insn == NULL)
2074 return 0;
2075
2076 inf = find_inferior_ptid (tp->ptid);
2077 if (inf == NULL)
2078 return 0;
2079
2080 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2081 &btinfo->stop_reason);
2082}
2083
d825d248 2084/* Step one instruction in forward direction. */
52834460
MM
2085
2086static struct target_waitstatus
d825d248 2087record_btrace_single_step_forward (struct thread_info *tp)
52834460
MM
2088{
2089 struct btrace_insn_iterator *replay, end;
2090 struct btrace_thread_info *btinfo;
52834460 2091
d825d248
MM
2092 btinfo = &tp->btrace;
2093 replay = btinfo->replay;
2094
2095 /* We're done if we're not replaying. */
2096 if (replay == NULL)
2097 return btrace_step_no_history ();
2098
011c71b6
MM
2099 /* Check if we're stepping a breakpoint. */
2100 if (record_btrace_replay_at_breakpoint (tp))
2101 return btrace_step_stopped ();
2102
d825d248
MM
2103 /* Skip gaps during replay. */
2104 do
2105 {
2106 unsigned int steps;
2107
e3cfc1c7
MM
2108 /* We will bail out here if we continue stepping after reaching the end
2109 of the execution history. */
d825d248
MM
2110 steps = btrace_insn_next (replay, 1);
2111 if (steps == 0)
e3cfc1c7 2112 return btrace_step_no_history ();
d825d248
MM
2113 }
2114 while (btrace_insn_get (replay) == NULL);
2115
2116 /* Determine the end of the instruction trace. */
2117 btrace_insn_end (&end, btinfo);
2118
e3cfc1c7
MM
2119 /* The execution trace contains (and ends with) the current instruction.
2120 This instruction has not been executed, yet, so the trace really ends
2121 one instruction earlier. */
d825d248 2122 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2123 return btrace_step_no_history ();
d825d248
MM
2124
2125 return btrace_step_spurious ();
2126}
2127
2128/* Step one instruction in backward direction. */
2129
2130static struct target_waitstatus
2131record_btrace_single_step_backward (struct thread_info *tp)
2132{
2133 struct btrace_insn_iterator *replay;
2134 struct btrace_thread_info *btinfo;
e59fa00f 2135
52834460
MM
2136 btinfo = &tp->btrace;
2137 replay = btinfo->replay;
2138
d825d248
MM
2139 /* Start replaying if we're not already doing so. */
2140 if (replay == NULL)
2141 replay = record_btrace_start_replaying (tp);
2142
2143 /* If we can't step any further, we reached the end of the history.
2144 Skip gaps during replay. */
2145 do
2146 {
2147 unsigned int steps;
2148
2149 steps = btrace_insn_prev (replay, 1);
2150 if (steps == 0)
2151 return btrace_step_no_history ();
2152 }
2153 while (btrace_insn_get (replay) == NULL);
2154
011c71b6
MM
2155 /* Check if we're stepping a breakpoint.
2156
2157 For reverse-stepping, this check is after the step. There is logic in
2158 infrun.c that handles reverse-stepping separately. See, for example,
2159 proceed and adjust_pc_after_break.
2160
2161 This code assumes that for reverse-stepping, PC points to the last
2162 de-executed instruction, whereas for forward-stepping PC points to the
2163 next to-be-executed instruction. */
2164 if (record_btrace_replay_at_breakpoint (tp))
2165 return btrace_step_stopped ();
2166
d825d248
MM
2167 return btrace_step_spurious ();
2168}
2169
2170/* Step a single thread. */
2171
2172static struct target_waitstatus
2173record_btrace_step_thread (struct thread_info *tp)
2174{
2175 struct btrace_thread_info *btinfo;
2176 struct target_waitstatus status;
2177 enum btrace_thread_flag flags;
2178
2179 btinfo = &tp->btrace;
2180
6e4879f0
MM
2181 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2182 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2183
987e68b1
MM
2184 DEBUG ("stepping thread %d (%s): %x (%s)", tp->num,
2185 target_pid_to_str (tp->ptid), flags,
2186 btrace_thread_flag_to_str (flags));
52834460 2187
6e4879f0
MM
2188 /* We can't step without an execution history. */
2189 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2190 return btrace_step_no_history ();
2191
52834460
MM
2192 switch (flags)
2193 {
2194 default:
2195 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2196
6e4879f0
MM
2197 case BTHR_STOP:
2198 return btrace_step_stopped_on_request ();
2199
52834460 2200 case BTHR_STEP:
d825d248
MM
2201 status = record_btrace_single_step_forward (tp);
2202 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2203 break;
52834460
MM
2204
2205 return btrace_step_stopped ();
2206
2207 case BTHR_RSTEP:
d825d248
MM
2208 status = record_btrace_single_step_backward (tp);
2209 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2210 break;
52834460
MM
2211
2212 return btrace_step_stopped ();
2213
2214 case BTHR_CONT:
e3cfc1c7
MM
2215 status = record_btrace_single_step_forward (tp);
2216 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2217 break;
52834460 2218
e3cfc1c7
MM
2219 btinfo->flags |= flags;
2220 return btrace_step_again ();
52834460
MM
2221
2222 case BTHR_RCONT:
e3cfc1c7
MM
2223 status = record_btrace_single_step_backward (tp);
2224 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2225 break;
52834460 2226
e3cfc1c7
MM
2227 btinfo->flags |= flags;
2228 return btrace_step_again ();
2229 }
d825d248 2230
e3cfc1c7
MM
2231 /* We keep threads moving at the end of their execution history. The to_wait
2232 method will stop the thread for whom the event is reported. */
2233 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2234 btinfo->flags |= flags;
52834460 2235
e3cfc1c7 2236 return status;
b2f4cfde
MM
2237}
2238
e3cfc1c7
MM
2239/* A vector of threads. */
2240
2241typedef struct thread_info * tp_t;
2242DEF_VEC_P (tp_t);
2243
a6b5be76
MM
2244/* Announce further events if necessary. */
2245
2246static void
2247record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2248 const VEC (tp_t) *no_history)
2249{
2250 int more_moving, more_no_history;
2251
2252 more_moving = !VEC_empty (tp_t, moving);
2253 more_no_history = !VEC_empty (tp_t, no_history);
2254
2255 if (!more_moving && !more_no_history)
2256 return;
2257
2258 if (more_moving)
2259 DEBUG ("movers pending");
2260
2261 if (more_no_history)
2262 DEBUG ("no-history pending");
2263
2264 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2265}
2266
b2f4cfde
MM
2267/* The to_wait method of target record-btrace. */
2268
2269static ptid_t
2270record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2271 struct target_waitstatus *status, int options)
2272{
e3cfc1c7
MM
2273 VEC (tp_t) *moving, *no_history;
2274 struct thread_info *tp, *eventing;
2275 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
52834460
MM
2276
2277 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2278
b2f4cfde 2279 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2280 if ((execution_direction != EXEC_REVERSE)
2281 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2282 {
e75fdfca
TT
2283 ops = ops->beneath;
2284 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
2285 }
2286
e3cfc1c7
MM
2287 moving = NULL;
2288 no_history = NULL;
2289
2290 make_cleanup (VEC_cleanup (tp_t), &moving);
2291 make_cleanup (VEC_cleanup (tp_t), &no_history);
2292
2293 /* Keep a work list of moving threads. */
2294 ALL_NON_EXITED_THREADS (tp)
2295 if (ptid_match (tp->ptid, ptid)
2296 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2297 VEC_safe_push (tp_t, moving, tp);
2298
2299 if (VEC_empty (tp_t, moving))
52834460 2300 {
e3cfc1c7 2301 *status = btrace_step_no_resumed ();
52834460 2302
e3cfc1c7
MM
2303 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2304 target_waitstatus_to_string (status));
2305
2306 do_cleanups (cleanups);
2307 return null_ptid;
52834460
MM
2308 }
2309
e3cfc1c7
MM
2310 /* Step moving threads one by one, one step each, until either one thread
2311 reports an event or we run out of threads to step.
2312
2313 When stepping more than one thread, chances are that some threads reach
2314 the end of their execution history earlier than others. If we reported
2315 this immediately, all-stop on top of non-stop would stop all threads and
2316 resume the same threads next time. And we would report the same thread
2317 having reached the end of its execution history again.
2318
2319 In the worst case, this would starve the other threads. But even if other
2320 threads would be allowed to make progress, this would result in far too
2321 many intermediate stops.
2322
2323 We therefore delay the reporting of "no execution history" until we have
2324 nothing else to report. By this time, all threads should have moved to
2325 either the beginning or the end of their execution history. There will
2326 be a single user-visible stop. */
2327 eventing = NULL;
2328 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2329 {
2330 unsigned int ix;
2331
2332 ix = 0;
2333 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2334 {
2335 *status = record_btrace_step_thread (tp);
2336
2337 switch (status->kind)
2338 {
2339 case TARGET_WAITKIND_IGNORE:
2340 ix++;
2341 break;
2342
2343 case TARGET_WAITKIND_NO_HISTORY:
2344 VEC_safe_push (tp_t, no_history,
2345 VEC_ordered_remove (tp_t, moving, ix));
2346 break;
2347
2348 default:
2349 eventing = VEC_unordered_remove (tp_t, moving, ix);
2350 break;
2351 }
2352 }
2353 }
2354
2355 if (eventing == NULL)
2356 {
2357 /* We started with at least one moving thread. This thread must have
2358 either stopped or reached the end of its execution history.
2359
2360 In the former case, EVENTING must not be NULL.
2361 In the latter case, NO_HISTORY must not be empty. */
2362 gdb_assert (!VEC_empty (tp_t, no_history));
2363
2364 /* We kept threads moving at the end of their execution history. Stop
2365 EVENTING now that we are going to report its stop. */
2366 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2367 eventing->btrace.flags &= ~BTHR_MOVE;
2368
2369 *status = btrace_step_no_history ();
2370 }
2371
2372 gdb_assert (eventing != NULL);
2373
2374 /* We kept threads replaying at the end of their execution history. Stop
2375 replaying EVENTING now that we are going to report its stop. */
2376 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2377
2378 /* Stop all other threads. */
5953356c 2379 if (!target_is_non_stop_p ())
e3cfc1c7
MM
2380 ALL_NON_EXITED_THREADS (tp)
2381 record_btrace_cancel_resume (tp);
52834460 2382
a6b5be76
MM
2383 /* In async mode, we need to announce further events. */
2384 if (target_is_async_p ())
2385 record_btrace_maybe_mark_async_event (moving, no_history);
2386
52834460 2387 /* Start record histories anew from the current position. */
e3cfc1c7 2388 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2389
2390 /* We moved the replay position but did not update registers. */
e3cfc1c7
MM
2391 registers_changed_ptid (eventing->ptid);
2392
2393 DEBUG ("wait ended by thread %d (%s): %s", eventing->num,
2394 target_pid_to_str (eventing->ptid),
2395 target_waitstatus_to_string (status));
52834460 2396
e3cfc1c7
MM
2397 do_cleanups (cleanups);
2398 return eventing->ptid;
52834460
MM
2399}
2400
6e4879f0
MM
2401/* The to_stop method of target record-btrace. */
2402
2403static void
2404record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2405{
2406 DEBUG ("stop %s", target_pid_to_str (ptid));
2407
2408 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2409 if ((execution_direction != EXEC_REVERSE)
2410 && !record_btrace_is_replaying (ops, minus_one_ptid))
6e4879f0
MM
2411 {
2412 ops = ops->beneath;
2413 ops->to_stop (ops, ptid);
2414 }
2415 else
2416 {
2417 struct thread_info *tp;
2418
2419 ALL_NON_EXITED_THREADS (tp)
2420 if (ptid_match (tp->ptid, ptid))
2421 {
2422 tp->btrace.flags &= ~BTHR_MOVE;
2423 tp->btrace.flags |= BTHR_STOP;
2424 }
2425 }
2426 }
2427
52834460
MM
2428/* The to_can_execute_reverse method of target record-btrace. */
2429
2430static int
19db3e69 2431record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
2432{
2433 return 1;
2434}
2435
9e8915c6 2436/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2437
9e8915c6
PA
2438static int
2439record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
52834460 2440{
a52eab48 2441 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2442 {
2443 struct thread_info *tp = inferior_thread ();
2444
2445 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2446 }
2447
2448 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2449}
2450
2451/* The to_supports_stopped_by_sw_breakpoint method of target
2452 record-btrace. */
2453
2454static int
2455record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2456{
a52eab48 2457 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2458 return 1;
2459
2460 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2461}
2462
2463/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2464
2465static int
2466record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2467{
a52eab48 2468 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2469 {
2470 struct thread_info *tp = inferior_thread ();
2471
2472 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2473 }
2474
2475 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2476}
2477
2478/* The to_supports_stopped_by_hw_breakpoint method of target
2479 record-btrace. */
2480
2481static int
2482record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2483{
a52eab48 2484 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6 2485 return 1;
52834460 2486
9e8915c6 2487 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
b2f4cfde
MM
2488}
2489
e8032dde 2490/* The to_update_thread_list method of target record-btrace. */
e2887aa3
MM
2491
2492static void
e8032dde 2493record_btrace_update_thread_list (struct target_ops *ops)
e2887aa3 2494{
e8032dde 2495 /* We don't add or remove threads during replay. */
a52eab48 2496 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2497 return;
2498
2499 /* Forward the request. */
e75fdfca 2500 ops = ops->beneath;
e8032dde 2501 ops->to_update_thread_list (ops);
e2887aa3
MM
2502}
2503
2504/* The to_thread_alive method of target record-btrace. */
2505
2506static int
2507record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2508{
2509 /* We don't add or remove threads during replay. */
a52eab48 2510 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2511 return find_thread_ptid (ptid) != NULL;
2512
2513 /* Forward the request. */
e75fdfca
TT
2514 ops = ops->beneath;
2515 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
2516}
2517
066ce621
MM
2518/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2519 is stopped. */
2520
2521static void
2522record_btrace_set_replay (struct thread_info *tp,
2523 const struct btrace_insn_iterator *it)
2524{
2525 struct btrace_thread_info *btinfo;
2526
2527 btinfo = &tp->btrace;
2528
2529 if (it == NULL || it->function == NULL)
52834460 2530 record_btrace_stop_replaying (tp);
066ce621
MM
2531 else
2532 {
2533 if (btinfo->replay == NULL)
52834460 2534 record_btrace_start_replaying (tp);
066ce621
MM
2535 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2536 return;
2537
2538 *btinfo->replay = *it;
52834460 2539 registers_changed_ptid (tp->ptid);
066ce621
MM
2540 }
2541
52834460
MM
2542 /* Start anew from the new replay position. */
2543 record_btrace_clear_histories (btinfo);
485668e5
MM
2544
2545 stop_pc = regcache_read_pc (get_current_regcache ());
2546 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2547}
2548
2549/* The to_goto_record_begin method of target record-btrace. */
2550
2551static void
08475817 2552record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
2553{
2554 struct thread_info *tp;
2555 struct btrace_insn_iterator begin;
2556
2557 tp = require_btrace_thread ();
2558
2559 btrace_insn_begin (&begin, &tp->btrace);
2560 record_btrace_set_replay (tp, &begin);
066ce621
MM
2561}
2562
2563/* The to_goto_record_end method of target record-btrace. */
2564
2565static void
307a1b91 2566record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
2567{
2568 struct thread_info *tp;
2569
2570 tp = require_btrace_thread ();
2571
2572 record_btrace_set_replay (tp, NULL);
066ce621
MM
2573}
2574
2575/* The to_goto_record method of target record-btrace. */
2576
2577static void
606183ac 2578record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
2579{
2580 struct thread_info *tp;
2581 struct btrace_insn_iterator it;
2582 unsigned int number;
2583 int found;
2584
2585 number = insn;
2586
2587 /* Check for wrap-arounds. */
2588 if (number != insn)
2589 error (_("Instruction number out of range."));
2590
2591 tp = require_btrace_thread ();
2592
2593 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2594 if (found == 0)
2595 error (_("No such instruction."));
2596
2597 record_btrace_set_replay (tp, &it);
066ce621
MM
2598}
2599
797094dd
MM
2600/* The to_record_stop_replaying method of target record-btrace. */
2601
2602static void
2603record_btrace_stop_replaying_all (struct target_ops *self)
2604{
2605 struct thread_info *tp;
2606
2607 ALL_NON_EXITED_THREADS (tp)
2608 record_btrace_stop_replaying (tp);
2609}
2610
70ad5bff
MM
2611/* The to_execution_direction target method. */
2612
2613static enum exec_direction_kind
2614record_btrace_execution_direction (struct target_ops *self)
2615{
2616 return record_btrace_resume_exec_dir;
2617}
2618
aef92902
MM
2619/* The to_prepare_to_generate_core target method. */
2620
2621static void
2622record_btrace_prepare_to_generate_core (struct target_ops *self)
2623{
2624 record_btrace_generating_corefile = 1;
2625}
2626
2627/* The to_done_generating_core target method. */
2628
2629static void
2630record_btrace_done_generating_core (struct target_ops *self)
2631{
2632 record_btrace_generating_corefile = 0;
2633}
2634
afedecd3
MM
2635/* Initialize the record-btrace target ops. */
2636
2637static void
2638init_record_btrace_ops (void)
2639{
2640 struct target_ops *ops;
2641
2642 ops = &record_btrace_ops;
2643 ops->to_shortname = "record-btrace";
2644 ops->to_longname = "Branch tracing target";
2645 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2646 ops->to_open = record_btrace_open;
2647 ops->to_close = record_btrace_close;
b7d2e916 2648 ops->to_async = record_btrace_async;
afedecd3
MM
2649 ops->to_detach = record_detach;
2650 ops->to_disconnect = record_disconnect;
2651 ops->to_mourn_inferior = record_mourn_inferior;
2652 ops->to_kill = record_kill;
afedecd3
MM
2653 ops->to_stop_recording = record_btrace_stop_recording;
2654 ops->to_info_record = record_btrace_info;
2655 ops->to_insn_history = record_btrace_insn_history;
2656 ops->to_insn_history_from = record_btrace_insn_history_from;
2657 ops->to_insn_history_range = record_btrace_insn_history_range;
2658 ops->to_call_history = record_btrace_call_history;
2659 ops->to_call_history_from = record_btrace_call_history_from;
2660 ops->to_call_history_range = record_btrace_call_history_range;
07bbe694 2661 ops->to_record_is_replaying = record_btrace_is_replaying;
7ff27e9b 2662 ops->to_record_will_replay = record_btrace_will_replay;
797094dd 2663 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
633785ff
MM
2664 ops->to_xfer_partial = record_btrace_xfer_partial;
2665 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2666 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
2667 ops->to_fetch_registers = record_btrace_fetch_registers;
2668 ops->to_store_registers = record_btrace_store_registers;
2669 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
2670 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2671 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde
MM
2672 ops->to_resume = record_btrace_resume;
2673 ops->to_wait = record_btrace_wait;
6e4879f0 2674 ops->to_stop = record_btrace_stop;
e8032dde 2675 ops->to_update_thread_list = record_btrace_update_thread_list;
e2887aa3 2676 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
2677 ops->to_goto_record_begin = record_btrace_goto_begin;
2678 ops->to_goto_record_end = record_btrace_goto_end;
2679 ops->to_goto_record = record_btrace_goto;
52834460 2680 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
9e8915c6
PA
2681 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2682 ops->to_supports_stopped_by_sw_breakpoint
2683 = record_btrace_supports_stopped_by_sw_breakpoint;
2684 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2685 ops->to_supports_stopped_by_hw_breakpoint
2686 = record_btrace_supports_stopped_by_hw_breakpoint;
70ad5bff 2687 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
2688 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2689 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
2690 ops->to_stratum = record_stratum;
2691 ops->to_magic = OPS_MAGIC;
2692}
2693
f4abbc16
MM
2694/* Start recording in BTS format. */
2695
2696static void
2697cmd_record_btrace_bts_start (char *args, int from_tty)
2698{
f4abbc16
MM
2699 if (args != NULL && *args != 0)
2700 error (_("Invalid argument."));
2701
2702 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2703
492d29ea
PA
2704 TRY
2705 {
2706 execute_command ("target record-btrace", from_tty);
2707 }
2708 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2709 {
2710 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2711 throw_exception (exception);
2712 }
492d29ea 2713 END_CATCH
f4abbc16
MM
2714}
2715
b20a6524 2716/* Start recording Intel(R) Processor Trace. */
afedecd3
MM
2717
2718static void
b20a6524 2719cmd_record_btrace_pt_start (char *args, int from_tty)
afedecd3
MM
2720{
2721 if (args != NULL && *args != 0)
2722 error (_("Invalid argument."));
2723
b20a6524 2724 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2725
492d29ea
PA
2726 TRY
2727 {
2728 execute_command ("target record-btrace", from_tty);
2729 }
2730 CATCH (exception, RETURN_MASK_ALL)
2731 {
2732 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2733 throw_exception (exception);
2734 }
2735 END_CATCH
afedecd3
MM
2736}
2737
b20a6524
MM
2738/* Alias for "target record". */
2739
2740static void
2741cmd_record_btrace_start (char *args, int from_tty)
2742{
2743 if (args != NULL && *args != 0)
2744 error (_("Invalid argument."));
2745
2746 record_btrace_conf.format = BTRACE_FORMAT_PT;
2747
2748 TRY
2749 {
2750 execute_command ("target record-btrace", from_tty);
2751 }
2752 CATCH (exception, RETURN_MASK_ALL)
2753 {
2754 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2755
2756 TRY
2757 {
2758 execute_command ("target record-btrace", from_tty);
2759 }
2760 CATCH (exception, RETURN_MASK_ALL)
2761 {
2762 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2763 throw_exception (exception);
2764 }
2765 END_CATCH
2766 }
2767 END_CATCH
2768}
2769
67b5c0c1
MM
2770/* The "set record btrace" command. */
2771
2772static void
2773cmd_set_record_btrace (char *args, int from_tty)
2774{
2775 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2776}
2777
2778/* The "show record btrace" command. */
2779
2780static void
2781cmd_show_record_btrace (char *args, int from_tty)
2782{
2783 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2784}
2785
2786/* The "show record btrace replay-memory-access" command. */
2787
2788static void
2789cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2790 struct cmd_list_element *c, const char *value)
2791{
2792 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2793 replay_memory_access);
2794}
2795
d33501a5
MM
2796/* The "set record btrace bts" command. */
2797
2798static void
2799cmd_set_record_btrace_bts (char *args, int from_tty)
2800{
2801 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 2802 "by an appropriate subcommand.\n"));
d33501a5
MM
2803 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2804 all_commands, gdb_stdout);
2805}
2806
2807/* The "show record btrace bts" command. */
2808
2809static void
2810cmd_show_record_btrace_bts (char *args, int from_tty)
2811{
2812 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2813}
2814
b20a6524
MM
2815/* The "set record btrace pt" command. */
2816
2817static void
2818cmd_set_record_btrace_pt (char *args, int from_tty)
2819{
2820 printf_unfiltered (_("\"set record btrace pt\" must be followed "
2821 "by an appropriate subcommand.\n"));
2822 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
2823 all_commands, gdb_stdout);
2824}
2825
2826/* The "show record btrace pt" command. */
2827
2828static void
2829cmd_show_record_btrace_pt (char *args, int from_tty)
2830{
2831 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
2832}
2833
2834/* The "record bts buffer-size" show value function. */
2835
2836static void
2837show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
2838 struct cmd_list_element *c,
2839 const char *value)
2840{
2841 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
2842 value);
2843}
2844
2845/* The "record pt buffer-size" show value function. */
2846
2847static void
2848show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
2849 struct cmd_list_element *c,
2850 const char *value)
2851{
2852 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
2853 value);
2854}
2855
afedecd3
MM
2856void _initialize_record_btrace (void);
2857
2858/* Initialize btrace commands. */
2859
2860void
2861_initialize_record_btrace (void)
2862{
f4abbc16
MM
2863 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2864 _("Start branch trace recording."), &record_btrace_cmdlist,
2865 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
2866 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2867
f4abbc16
MM
2868 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
2869 _("\
2870Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2871The processor stores a from/to record for each branch into a cyclic buffer.\n\
2872This format may not be available on all processors."),
2873 &record_btrace_cmdlist);
2874 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
2875
b20a6524
MM
2876 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
2877 _("\
2878Start branch trace recording in Intel(R) Processor Trace format.\n\n\
2879This format may not be available on all processors."),
2880 &record_btrace_cmdlist);
2881 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
2882
67b5c0c1
MM
2883 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2884 _("Set record options"), &set_record_btrace_cmdlist,
2885 "set record btrace ", 0, &set_record_cmdlist);
2886
2887 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2888 _("Show record options"), &show_record_btrace_cmdlist,
2889 "show record btrace ", 0, &show_record_cmdlist);
2890
2891 add_setshow_enum_cmd ("replay-memory-access", no_class,
2892 replay_memory_access_types, &replay_memory_access, _("\
2893Set what memory accesses are allowed during replay."), _("\
2894Show what memory accesses are allowed during replay."),
2895 _("Default is READ-ONLY.\n\n\
2896The btrace record target does not trace data.\n\
2897The memory therefore corresponds to the live target and not \
2898to the current replay position.\n\n\
2899When READ-ONLY, allow accesses to read-only memory during replay.\n\
2900When READ-WRITE, allow accesses to read-only and read-write memory during \
2901replay."),
2902 NULL, cmd_show_replay_memory_access,
2903 &set_record_btrace_cmdlist,
2904 &show_record_btrace_cmdlist);
2905
d33501a5
MM
2906 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
2907 _("Set record btrace bts options"),
2908 &set_record_btrace_bts_cmdlist,
2909 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
2910
2911 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
2912 _("Show record btrace bts options"),
2913 &show_record_btrace_bts_cmdlist,
2914 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
2915
2916 add_setshow_uinteger_cmd ("buffer-size", no_class,
2917 &record_btrace_conf.bts.size,
2918 _("Set the record/replay bts buffer size."),
2919 _("Show the record/replay bts buffer size."), _("\
2920When starting recording request a trace buffer of this size. \
2921The actual buffer size may differ from the requested size. \
2922Use \"info record\" to see the actual buffer size.\n\n\
2923Bigger buffers allow longer recording but also take more time to process \
2924the recorded execution trace.\n\n\
b20a6524
MM
2925The trace buffer size may not be changed while recording."), NULL,
2926 show_record_bts_buffer_size_value,
d33501a5
MM
2927 &set_record_btrace_bts_cmdlist,
2928 &show_record_btrace_bts_cmdlist);
2929
b20a6524
MM
2930 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
2931 _("Set record btrace pt options"),
2932 &set_record_btrace_pt_cmdlist,
2933 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
2934
2935 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
2936 _("Show record btrace pt options"),
2937 &show_record_btrace_pt_cmdlist,
2938 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
2939
2940 add_setshow_uinteger_cmd ("buffer-size", no_class,
2941 &record_btrace_conf.pt.size,
2942 _("Set the record/replay pt buffer size."),
2943 _("Show the record/replay pt buffer size."), _("\
2944Bigger buffers allow longer recording but also take more time to process \
2945the recorded execution.\n\
2946The actual buffer size may differ from the requested size. Use \"info record\" \
2947to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
2948 &set_record_btrace_pt_cmdlist,
2949 &show_record_btrace_pt_cmdlist);
2950
afedecd3
MM
2951 init_record_btrace_ops ();
2952 add_target (&record_btrace_ops);
0b722aec
MM
2953
2954 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2955 xcalloc, xfree);
d33501a5
MM
2956
2957 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 2958 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 2959}
This page took 0.560233 seconds and 4 git commands to generate.