btrace: split record_btrace_step_thread
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
32d0add0 3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
24#include "gdbthread.h"
25#include "target.h"
26#include "gdbcmd.h"
27#include "disasm.h"
28#include "observer.h"
afedecd3
MM
29#include "cli/cli-utils.h"
30#include "source.h"
31#include "ui-out.h"
32#include "symtab.h"
33#include "filenames.h"
1f3ef581 34#include "regcache.h"
cecac1ab 35#include "frame-unwind.h"
0b722aec 36#include "hashtab.h"
45741a9c 37#include "infrun.h"
70ad5bff
MM
38#include "event-loop.h"
39#include "inf-loop.h"
afedecd3
MM
40
41/* The target_ops of record-btrace. */
42static struct target_ops record_btrace_ops;
43
44/* A new thread observer enabling branch tracing for the new thread. */
45static struct observer *record_btrace_thread_observer;
46
67b5c0c1
MM
47/* Memory access types used in set/show record btrace replay-memory-access. */
48static const char replay_memory_access_read_only[] = "read-only";
49static const char replay_memory_access_read_write[] = "read-write";
50static const char *const replay_memory_access_types[] =
51{
52 replay_memory_access_read_only,
53 replay_memory_access_read_write,
54 NULL
55};
56
57/* The currently allowed replay memory access type. */
58static const char *replay_memory_access = replay_memory_access_read_only;
59
60/* Command lists for "set/show record btrace". */
61static struct cmd_list_element *set_record_btrace_cmdlist;
62static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 63
70ad5bff
MM
64/* The execution direction of the last resume we got. See record-full.c. */
65static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
66
67/* The async event handler for reverse/replay execution. */
68static struct async_event_handler *record_btrace_async_inferior_event_handler;
69
aef92902
MM
70/* A flag indicating that we are currently generating a core file. */
71static int record_btrace_generating_corefile;
72
f4abbc16
MM
73/* The current branch trace configuration. */
74static struct btrace_config record_btrace_conf;
75
76/* Command list for "record btrace". */
77static struct cmd_list_element *record_btrace_cmdlist;
78
d33501a5
MM
79/* Command lists for "set/show record btrace bts". */
80static struct cmd_list_element *set_record_btrace_bts_cmdlist;
81static struct cmd_list_element *show_record_btrace_bts_cmdlist;
82
b20a6524
MM
83/* Command lists for "set/show record btrace pt". */
84static struct cmd_list_element *set_record_btrace_pt_cmdlist;
85static struct cmd_list_element *show_record_btrace_pt_cmdlist;
86
afedecd3
MM
87/* Print a record-btrace debug message. Use do ... while (0) to avoid
88 ambiguities when used in if statements. */
89
90#define DEBUG(msg, args...) \
91 do \
92 { \
93 if (record_debug != 0) \
94 fprintf_unfiltered (gdb_stdlog, \
95 "[record-btrace] " msg "\n", ##args); \
96 } \
97 while (0)
98
99
100/* Update the branch trace for the current thread and return a pointer to its
066ce621 101 thread_info.
afedecd3
MM
102
103 Throws an error if there is no thread or no trace. This function never
104 returns NULL. */
105
066ce621
MM
106static struct thread_info *
107require_btrace_thread (void)
afedecd3
MM
108{
109 struct thread_info *tp;
afedecd3
MM
110
111 DEBUG ("require");
112
113 tp = find_thread_ptid (inferior_ptid);
114 if (tp == NULL)
115 error (_("No thread."));
116
117 btrace_fetch (tp);
118
6e07b1d2 119 if (btrace_is_empty (tp))
afedecd3
MM
120 error (_("No trace."));
121
066ce621
MM
122 return tp;
123}
124
125/* Update the branch trace for the current thread and return a pointer to its
126 branch trace information struct.
127
128 Throws an error if there is no thread or no trace. This function never
129 returns NULL. */
130
131static struct btrace_thread_info *
132require_btrace (void)
133{
134 struct thread_info *tp;
135
136 tp = require_btrace_thread ();
137
138 return &tp->btrace;
afedecd3
MM
139}
140
141/* Enable branch tracing for one thread. Warn on errors. */
142
143static void
144record_btrace_enable_warn (struct thread_info *tp)
145{
492d29ea
PA
146 TRY
147 {
148 btrace_enable (tp, &record_btrace_conf);
149 }
150 CATCH (error, RETURN_MASK_ERROR)
151 {
152 warning ("%s", error.message);
153 }
154 END_CATCH
afedecd3
MM
155}
156
157/* Callback function to disable branch tracing for one thread. */
158
159static void
160record_btrace_disable_callback (void *arg)
161{
162 struct thread_info *tp;
163
164 tp = arg;
165
166 btrace_disable (tp);
167}
168
169/* Enable automatic tracing of new threads. */
170
171static void
172record_btrace_auto_enable (void)
173{
174 DEBUG ("attach thread observer");
175
176 record_btrace_thread_observer
177 = observer_attach_new_thread (record_btrace_enable_warn);
178}
179
180/* Disable automatic tracing of new threads. */
181
182static void
183record_btrace_auto_disable (void)
184{
185 /* The observer may have been detached, already. */
186 if (record_btrace_thread_observer == NULL)
187 return;
188
189 DEBUG ("detach thread observer");
190
191 observer_detach_new_thread (record_btrace_thread_observer);
192 record_btrace_thread_observer = NULL;
193}
194
70ad5bff
MM
195/* The record-btrace async event handler function. */
196
197static void
198record_btrace_handle_async_inferior_event (gdb_client_data data)
199{
200 inferior_event_handler (INF_REG_EVENT, NULL);
201}
202
afedecd3
MM
203/* The to_open method of target record-btrace. */
204
205static void
014f9477 206record_btrace_open (const char *args, int from_tty)
afedecd3
MM
207{
208 struct cleanup *disable_chain;
209 struct thread_info *tp;
210
211 DEBUG ("open");
212
8213266a 213 record_preopen ();
afedecd3
MM
214
215 if (!target_has_execution)
216 error (_("The program is not being run."));
217
52834460
MM
218 if (non_stop)
219 error (_("Record btrace can't debug inferior in non-stop mode."));
220
afedecd3
MM
221 gdb_assert (record_btrace_thread_observer == NULL);
222
223 disable_chain = make_cleanup (null_cleanup, NULL);
034f788c 224 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
225 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
226 {
f4abbc16 227 btrace_enable (tp, &record_btrace_conf);
afedecd3
MM
228
229 make_cleanup (record_btrace_disable_callback, tp);
230 }
231
232 record_btrace_auto_enable ();
233
234 push_target (&record_btrace_ops);
235
70ad5bff
MM
236 record_btrace_async_inferior_event_handler
237 = create_async_event_handler (record_btrace_handle_async_inferior_event,
238 NULL);
aef92902 239 record_btrace_generating_corefile = 0;
70ad5bff 240
afedecd3
MM
241 observer_notify_record_changed (current_inferior (), 1);
242
243 discard_cleanups (disable_chain);
244}
245
246/* The to_stop_recording method of target record-btrace. */
247
248static void
c6cd7c02 249record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
250{
251 struct thread_info *tp;
252
253 DEBUG ("stop recording");
254
255 record_btrace_auto_disable ();
256
034f788c 257 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
258 if (tp->btrace.target != NULL)
259 btrace_disable (tp);
260}
261
262/* The to_close method of target record-btrace. */
263
264static void
de90e03d 265record_btrace_close (struct target_ops *self)
afedecd3 266{
568e808b
MM
267 struct thread_info *tp;
268
70ad5bff
MM
269 if (record_btrace_async_inferior_event_handler != NULL)
270 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
271
99c819ee
MM
272 /* Make sure automatic recording gets disabled even if we did not stop
273 recording before closing the record-btrace target. */
274 record_btrace_auto_disable ();
275
568e808b
MM
276 /* We should have already stopped recording.
277 Tear down btrace in case we have not. */
034f788c 278 ALL_NON_EXITED_THREADS (tp)
568e808b 279 btrace_teardown (tp);
afedecd3
MM
280}
281
b7d2e916
PA
282/* The to_async method of target record-btrace. */
283
284static void
6a3753b3 285record_btrace_async (struct target_ops *ops, int enable)
b7d2e916 286{
6a3753b3 287 if (enable)
b7d2e916
PA
288 mark_async_event_handler (record_btrace_async_inferior_event_handler);
289 else
290 clear_async_event_handler (record_btrace_async_inferior_event_handler);
291
6a3753b3 292 ops->beneath->to_async (ops->beneath, enable);
b7d2e916
PA
293}
294
d33501a5
MM
295/* Adjusts the size and returns a human readable size suffix. */
296
297static const char *
298record_btrace_adjust_size (unsigned int *size)
299{
300 unsigned int sz;
301
302 sz = *size;
303
304 if ((sz & ((1u << 30) - 1)) == 0)
305 {
306 *size = sz >> 30;
307 return "GB";
308 }
309 else if ((sz & ((1u << 20) - 1)) == 0)
310 {
311 *size = sz >> 20;
312 return "MB";
313 }
314 else if ((sz & ((1u << 10) - 1)) == 0)
315 {
316 *size = sz >> 10;
317 return "kB";
318 }
319 else
320 return "";
321}
322
323/* Print a BTS configuration. */
324
325static void
326record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
327{
328 const char *suffix;
329 unsigned int size;
330
331 size = conf->size;
332 if (size > 0)
333 {
334 suffix = record_btrace_adjust_size (&size);
335 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
336 }
337}
338
b20a6524
MM
339/* Print an Intel(R) Processor Trace configuration. */
340
341static void
342record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
343{
344 const char *suffix;
345 unsigned int size;
346
347 size = conf->size;
348 if (size > 0)
349 {
350 suffix = record_btrace_adjust_size (&size);
351 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
352 }
353}
354
d33501a5
MM
355/* Print a branch tracing configuration. */
356
357static void
358record_btrace_print_conf (const struct btrace_config *conf)
359{
360 printf_unfiltered (_("Recording format: %s.\n"),
361 btrace_format_string (conf->format));
362
363 switch (conf->format)
364 {
365 case BTRACE_FORMAT_NONE:
366 return;
367
368 case BTRACE_FORMAT_BTS:
369 record_btrace_print_bts_conf (&conf->bts);
370 return;
b20a6524
MM
371
372 case BTRACE_FORMAT_PT:
373 record_btrace_print_pt_conf (&conf->pt);
374 return;
d33501a5
MM
375 }
376
377 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
378}
379
afedecd3
MM
380/* The to_info_record method of target record-btrace. */
381
382static void
630d6a4a 383record_btrace_info (struct target_ops *self)
afedecd3
MM
384{
385 struct btrace_thread_info *btinfo;
f4abbc16 386 const struct btrace_config *conf;
afedecd3 387 struct thread_info *tp;
31fd9caa 388 unsigned int insns, calls, gaps;
afedecd3
MM
389
390 DEBUG ("info");
391
392 tp = find_thread_ptid (inferior_ptid);
393 if (tp == NULL)
394 error (_("No thread."));
395
f4abbc16
MM
396 btinfo = &tp->btrace;
397
398 conf = btrace_conf (btinfo);
399 if (conf != NULL)
d33501a5 400 record_btrace_print_conf (conf);
f4abbc16 401
afedecd3
MM
402 btrace_fetch (tp);
403
23a7fe75
MM
404 insns = 0;
405 calls = 0;
31fd9caa 406 gaps = 0;
23a7fe75 407
6e07b1d2 408 if (!btrace_is_empty (tp))
23a7fe75
MM
409 {
410 struct btrace_call_iterator call;
411 struct btrace_insn_iterator insn;
412
413 btrace_call_end (&call, btinfo);
414 btrace_call_prev (&call, 1);
5de9129b 415 calls = btrace_call_number (&call);
23a7fe75
MM
416
417 btrace_insn_end (&insn, btinfo);
31fd9caa 418
5de9129b 419 insns = btrace_insn_number (&insn);
31fd9caa
MM
420 if (insns != 0)
421 {
422 /* The last instruction does not really belong to the trace. */
423 insns -= 1;
424 }
425 else
426 {
427 unsigned int steps;
428
429 /* Skip gaps at the end. */
430 do
431 {
432 steps = btrace_insn_prev (&insn, 1);
433 if (steps == 0)
434 break;
435
436 insns = btrace_insn_number (&insn);
437 }
438 while (insns == 0);
439 }
440
441 gaps = btinfo->ngaps;
23a7fe75 442 }
afedecd3 443
31fd9caa
MM
444 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
445 "for thread %d (%s).\n"), insns, calls, gaps,
446 tp->num, target_pid_to_str (tp->ptid));
07bbe694
MM
447
448 if (btrace_is_replaying (tp))
449 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
450 btrace_insn_number (btinfo->replay));
afedecd3
MM
451}
452
31fd9caa
MM
453/* Print a decode error. */
454
455static void
456btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
457 enum btrace_format format)
458{
459 const char *errstr;
460 int is_error;
461
462 errstr = _("unknown");
463 is_error = 1;
464
465 switch (format)
466 {
467 default:
468 break;
469
470 case BTRACE_FORMAT_BTS:
471 switch (errcode)
472 {
473 default:
474 break;
475
476 case BDE_BTS_OVERFLOW:
477 errstr = _("instruction overflow");
478 break;
479
480 case BDE_BTS_INSN_SIZE:
481 errstr = _("unknown instruction");
482 break;
483 }
484 break;
b20a6524
MM
485
486#if defined (HAVE_LIBIPT)
487 case BTRACE_FORMAT_PT:
488 switch (errcode)
489 {
490 case BDE_PT_USER_QUIT:
491 is_error = 0;
492 errstr = _("trace decode cancelled");
493 break;
494
495 case BDE_PT_DISABLED:
496 is_error = 0;
497 errstr = _("disabled");
498 break;
499
500 case BDE_PT_OVERFLOW:
501 is_error = 0;
502 errstr = _("overflow");
503 break;
504
505 default:
506 if (errcode < 0)
507 errstr = pt_errstr (pt_errcode (errcode));
508 break;
509 }
510 break;
511#endif /* defined (HAVE_LIBIPT) */
31fd9caa
MM
512 }
513
514 ui_out_text (uiout, _("["));
515 if (is_error)
516 {
517 ui_out_text (uiout, _("decode error ("));
518 ui_out_field_int (uiout, "errcode", errcode);
519 ui_out_text (uiout, _("): "));
520 }
521 ui_out_text (uiout, errstr);
522 ui_out_text (uiout, _("]\n"));
523}
524
afedecd3
MM
525/* Print an unsigned int. */
526
527static void
528ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
529{
530 ui_out_field_fmt (uiout, fld, "%u", val);
531}
532
533/* Disassemble a section of the recorded instruction trace. */
534
535static void
23a7fe75 536btrace_insn_history (struct ui_out *uiout,
31fd9caa 537 const struct btrace_thread_info *btinfo,
23a7fe75
MM
538 const struct btrace_insn_iterator *begin,
539 const struct btrace_insn_iterator *end, int flags)
afedecd3
MM
540{
541 struct gdbarch *gdbarch;
23a7fe75 542 struct btrace_insn_iterator it;
afedecd3 543
23a7fe75
MM
544 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
545 btrace_insn_number (end));
afedecd3
MM
546
547 gdbarch = target_gdbarch ();
548
23a7fe75 549 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 550 {
23a7fe75
MM
551 const struct btrace_insn *insn;
552
553 insn = btrace_insn_get (&it);
554
31fd9caa
MM
555 /* A NULL instruction indicates a gap in the trace. */
556 if (insn == NULL)
557 {
558 const struct btrace_config *conf;
559
560 conf = btrace_conf (btinfo);
afedecd3 561
31fd9caa
MM
562 /* We have trace so we must have a configuration. */
563 gdb_assert (conf != NULL);
564
565 btrace_ui_out_decode_error (uiout, it.function->errcode,
566 conf->format);
567 }
568 else
569 {
da8c46d2
MM
570 char prefix[4];
571
572 /* We may add a speculation prefix later. We use the same space
573 that is used for the pc prefix. */
574 if ((flags & DISASSEMBLY_OMIT_PC) == 0)
575 strncpy (prefix, pc_prefix (insn->pc), 3);
576 else
577 {
578 prefix[0] = ' ';
579 prefix[1] = ' ';
580 prefix[2] = ' ';
581 }
582 prefix[3] = 0;
583
31fd9caa
MM
584 /* Print the instruction index. */
585 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
586 ui_out_text (uiout, "\t");
587
da8c46d2
MM
588 /* Indicate speculative execution by a leading '?'. */
589 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
590 prefix[0] = '?';
591
592 /* Print the prefix; we tell gdb_disassembly below to omit it. */
593 ui_out_field_fmt (uiout, "prefix", "%s", prefix);
594
31fd9caa
MM
595 /* Disassembly with '/m' flag may not produce the expected result.
596 See PR gdb/11833. */
da8c46d2
MM
597 gdb_disassembly (gdbarch, uiout, NULL, flags | DISASSEMBLY_OMIT_PC,
598 1, insn->pc, insn->pc + 1);
31fd9caa 599 }
afedecd3
MM
600 }
601}
602
603/* The to_insn_history method of target record-btrace. */
604
605static void
7a6c5609 606record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
607{
608 struct btrace_thread_info *btinfo;
23a7fe75
MM
609 struct btrace_insn_history *history;
610 struct btrace_insn_iterator begin, end;
afedecd3
MM
611 struct cleanup *uiout_cleanup;
612 struct ui_out *uiout;
23a7fe75 613 unsigned int context, covered;
afedecd3
MM
614
615 uiout = current_uiout;
616 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
617 "insn history");
afedecd3 618 context = abs (size);
afedecd3
MM
619 if (context == 0)
620 error (_("Bad record instruction-history-size."));
621
23a7fe75
MM
622 btinfo = require_btrace ();
623 history = btinfo->insn_history;
624 if (history == NULL)
afedecd3 625 {
07bbe694 626 struct btrace_insn_iterator *replay;
afedecd3 627
23a7fe75 628 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 629
07bbe694
MM
630 /* If we're replaying, we start at the replay position. Otherwise, we
631 start at the tail of the trace. */
632 replay = btinfo->replay;
633 if (replay != NULL)
634 begin = *replay;
635 else
636 btrace_insn_end (&begin, btinfo);
637
638 /* We start from here and expand in the requested direction. Then we
639 expand in the other direction, as well, to fill up any remaining
640 context. */
641 end = begin;
642 if (size < 0)
643 {
644 /* We want the current position covered, as well. */
645 covered = btrace_insn_next (&end, 1);
646 covered += btrace_insn_prev (&begin, context - covered);
647 covered += btrace_insn_next (&end, context - covered);
648 }
649 else
650 {
651 covered = btrace_insn_next (&end, context);
652 covered += btrace_insn_prev (&begin, context - covered);
653 }
afedecd3
MM
654 }
655 else
656 {
23a7fe75
MM
657 begin = history->begin;
658 end = history->end;
afedecd3 659
23a7fe75
MM
660 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
661 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 662
23a7fe75
MM
663 if (size < 0)
664 {
665 end = begin;
666 covered = btrace_insn_prev (&begin, context);
667 }
668 else
669 {
670 begin = end;
671 covered = btrace_insn_next (&end, context);
672 }
afedecd3
MM
673 }
674
23a7fe75 675 if (covered > 0)
31fd9caa 676 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
677 else
678 {
679 if (size < 0)
680 printf_unfiltered (_("At the start of the branch trace record.\n"));
681 else
682 printf_unfiltered (_("At the end of the branch trace record.\n"));
683 }
afedecd3 684
23a7fe75 685 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
686 do_cleanups (uiout_cleanup);
687}
688
689/* The to_insn_history_range method of target record-btrace. */
690
691static void
4e99c6b7
TT
692record_btrace_insn_history_range (struct target_ops *self,
693 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
694{
695 struct btrace_thread_info *btinfo;
23a7fe75
MM
696 struct btrace_insn_history *history;
697 struct btrace_insn_iterator begin, end;
afedecd3
MM
698 struct cleanup *uiout_cleanup;
699 struct ui_out *uiout;
23a7fe75
MM
700 unsigned int low, high;
701 int found;
afedecd3
MM
702
703 uiout = current_uiout;
704 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
705 "insn history");
23a7fe75
MM
706 low = from;
707 high = to;
afedecd3 708
23a7fe75 709 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
710
711 /* Check for wrap-arounds. */
23a7fe75 712 if (low != from || high != to)
afedecd3
MM
713 error (_("Bad range."));
714
0688d04e 715 if (high < low)
afedecd3
MM
716 error (_("Bad range."));
717
23a7fe75 718 btinfo = require_btrace ();
afedecd3 719
23a7fe75
MM
720 found = btrace_find_insn_by_number (&begin, btinfo, low);
721 if (found == 0)
722 error (_("Range out of bounds."));
afedecd3 723
23a7fe75
MM
724 found = btrace_find_insn_by_number (&end, btinfo, high);
725 if (found == 0)
0688d04e
MM
726 {
727 /* Silently truncate the range. */
728 btrace_insn_end (&end, btinfo);
729 }
730 else
731 {
732 /* We want both begin and end to be inclusive. */
733 btrace_insn_next (&end, 1);
734 }
afedecd3 735
31fd9caa 736 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 737 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
738
739 do_cleanups (uiout_cleanup);
740}
741
742/* The to_insn_history_from method of target record-btrace. */
743
744static void
9abc3ff3
TT
745record_btrace_insn_history_from (struct target_ops *self,
746 ULONGEST from, int size, int flags)
afedecd3
MM
747{
748 ULONGEST begin, end, context;
749
750 context = abs (size);
0688d04e
MM
751 if (context == 0)
752 error (_("Bad record instruction-history-size."));
afedecd3
MM
753
754 if (size < 0)
755 {
756 end = from;
757
758 if (from < context)
759 begin = 0;
760 else
0688d04e 761 begin = from - context + 1;
afedecd3
MM
762 }
763 else
764 {
765 begin = from;
0688d04e 766 end = from + context - 1;
afedecd3
MM
767
768 /* Check for wrap-around. */
769 if (end < begin)
770 end = ULONGEST_MAX;
771 }
772
4e99c6b7 773 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
774}
775
776/* Print the instruction number range for a function call history line. */
777
778static void
23a7fe75
MM
779btrace_call_history_insn_range (struct ui_out *uiout,
780 const struct btrace_function *bfun)
afedecd3 781{
7acbe133
MM
782 unsigned int begin, end, size;
783
784 size = VEC_length (btrace_insn_s, bfun->insn);
785 gdb_assert (size > 0);
afedecd3 786
23a7fe75 787 begin = bfun->insn_offset;
7acbe133 788 end = begin + size - 1;
afedecd3 789
23a7fe75 790 ui_out_field_uint (uiout, "insn begin", begin);
8710b709 791 ui_out_text (uiout, ",");
23a7fe75 792 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
793}
794
ce0dfbea
MM
795/* Compute the lowest and highest source line for the instructions in BFUN
796 and return them in PBEGIN and PEND.
797 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
798 result from inlining or macro expansion. */
799
800static void
801btrace_compute_src_line_range (const struct btrace_function *bfun,
802 int *pbegin, int *pend)
803{
804 struct btrace_insn *insn;
805 struct symtab *symtab;
806 struct symbol *sym;
807 unsigned int idx;
808 int begin, end;
809
810 begin = INT_MAX;
811 end = INT_MIN;
812
813 sym = bfun->sym;
814 if (sym == NULL)
815 goto out;
816
817 symtab = symbol_symtab (sym);
818
819 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
820 {
821 struct symtab_and_line sal;
822
823 sal = find_pc_line (insn->pc, 0);
824 if (sal.symtab != symtab || sal.line == 0)
825 continue;
826
827 begin = min (begin, sal.line);
828 end = max (end, sal.line);
829 }
830
831 out:
832 *pbegin = begin;
833 *pend = end;
834}
835
afedecd3
MM
836/* Print the source line information for a function call history line. */
837
838static void
23a7fe75
MM
839btrace_call_history_src_line (struct ui_out *uiout,
840 const struct btrace_function *bfun)
afedecd3
MM
841{
842 struct symbol *sym;
23a7fe75 843 int begin, end;
afedecd3
MM
844
845 sym = bfun->sym;
846 if (sym == NULL)
847 return;
848
849 ui_out_field_string (uiout, "file",
08be3fe3 850 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 851
ce0dfbea 852 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 853 if (end < begin)
afedecd3
MM
854 return;
855
856 ui_out_text (uiout, ":");
23a7fe75 857 ui_out_field_int (uiout, "min line", begin);
afedecd3 858
23a7fe75 859 if (end == begin)
afedecd3
MM
860 return;
861
8710b709 862 ui_out_text (uiout, ",");
23a7fe75 863 ui_out_field_int (uiout, "max line", end);
afedecd3
MM
864}
865
0b722aec
MM
866/* Get the name of a branch trace function. */
867
868static const char *
869btrace_get_bfun_name (const struct btrace_function *bfun)
870{
871 struct minimal_symbol *msym;
872 struct symbol *sym;
873
874 if (bfun == NULL)
875 return "??";
876
877 msym = bfun->msym;
878 sym = bfun->sym;
879
880 if (sym != NULL)
881 return SYMBOL_PRINT_NAME (sym);
882 else if (msym != NULL)
efd66ac6 883 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
884 else
885 return "??";
886}
887
afedecd3
MM
888/* Disassemble a section of the recorded function trace. */
889
890static void
23a7fe75 891btrace_call_history (struct ui_out *uiout,
8710b709 892 const struct btrace_thread_info *btinfo,
23a7fe75
MM
893 const struct btrace_call_iterator *begin,
894 const struct btrace_call_iterator *end,
afedecd3
MM
895 enum record_print_flag flags)
896{
23a7fe75 897 struct btrace_call_iterator it;
afedecd3 898
23a7fe75
MM
899 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
900 btrace_call_number (end));
afedecd3 901
23a7fe75 902 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 903 {
23a7fe75
MM
904 const struct btrace_function *bfun;
905 struct minimal_symbol *msym;
906 struct symbol *sym;
907
908 bfun = btrace_call_get (&it);
23a7fe75 909 sym = bfun->sym;
0b722aec 910 msym = bfun->msym;
23a7fe75 911
afedecd3 912 /* Print the function index. */
23a7fe75 913 ui_out_field_uint (uiout, "index", bfun->number);
afedecd3
MM
914 ui_out_text (uiout, "\t");
915
31fd9caa
MM
916 /* Indicate gaps in the trace. */
917 if (bfun->errcode != 0)
918 {
919 const struct btrace_config *conf;
920
921 conf = btrace_conf (btinfo);
922
923 /* We have trace so we must have a configuration. */
924 gdb_assert (conf != NULL);
925
926 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
927
928 continue;
929 }
930
8710b709
MM
931 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
932 {
933 int level = bfun->level + btinfo->level, i;
934
935 for (i = 0; i < level; ++i)
936 ui_out_text (uiout, " ");
937 }
938
939 if (sym != NULL)
940 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
941 else if (msym != NULL)
efd66ac6 942 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
8710b709
MM
943 else if (!ui_out_is_mi_like_p (uiout))
944 ui_out_field_string (uiout, "function", "??");
945
1e038f67 946 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 947 {
8710b709 948 ui_out_text (uiout, _("\tinst "));
23a7fe75 949 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
950 }
951
1e038f67 952 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 953 {
8710b709 954 ui_out_text (uiout, _("\tat "));
23a7fe75 955 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
956 }
957
afedecd3
MM
958 ui_out_text (uiout, "\n");
959 }
960}
961
962/* The to_call_history method of target record-btrace. */
963
964static void
5df2fcba 965record_btrace_call_history (struct target_ops *self, int size, int flags)
afedecd3
MM
966{
967 struct btrace_thread_info *btinfo;
23a7fe75
MM
968 struct btrace_call_history *history;
969 struct btrace_call_iterator begin, end;
afedecd3
MM
970 struct cleanup *uiout_cleanup;
971 struct ui_out *uiout;
23a7fe75 972 unsigned int context, covered;
afedecd3
MM
973
974 uiout = current_uiout;
975 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
976 "insn history");
afedecd3 977 context = abs (size);
afedecd3
MM
978 if (context == 0)
979 error (_("Bad record function-call-history-size."));
980
23a7fe75
MM
981 btinfo = require_btrace ();
982 history = btinfo->call_history;
983 if (history == NULL)
afedecd3 984 {
07bbe694 985 struct btrace_insn_iterator *replay;
afedecd3 986
23a7fe75 987 DEBUG ("call-history (0x%x): %d", flags, size);
afedecd3 988
07bbe694
MM
989 /* If we're replaying, we start at the replay position. Otherwise, we
990 start at the tail of the trace. */
991 replay = btinfo->replay;
992 if (replay != NULL)
993 {
994 begin.function = replay->function;
995 begin.btinfo = btinfo;
996 }
997 else
998 btrace_call_end (&begin, btinfo);
999
1000 /* We start from here and expand in the requested direction. Then we
1001 expand in the other direction, as well, to fill up any remaining
1002 context. */
1003 end = begin;
1004 if (size < 0)
1005 {
1006 /* We want the current position covered, as well. */
1007 covered = btrace_call_next (&end, 1);
1008 covered += btrace_call_prev (&begin, context - covered);
1009 covered += btrace_call_next (&end, context - covered);
1010 }
1011 else
1012 {
1013 covered = btrace_call_next (&end, context);
1014 covered += btrace_call_prev (&begin, context- covered);
1015 }
afedecd3
MM
1016 }
1017 else
1018 {
23a7fe75
MM
1019 begin = history->begin;
1020 end = history->end;
afedecd3 1021
23a7fe75
MM
1022 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
1023 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1024
23a7fe75
MM
1025 if (size < 0)
1026 {
1027 end = begin;
1028 covered = btrace_call_prev (&begin, context);
1029 }
1030 else
1031 {
1032 begin = end;
1033 covered = btrace_call_next (&end, context);
1034 }
afedecd3
MM
1035 }
1036
23a7fe75 1037 if (covered > 0)
8710b709 1038 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1039 else
1040 {
1041 if (size < 0)
1042 printf_unfiltered (_("At the start of the branch trace record.\n"));
1043 else
1044 printf_unfiltered (_("At the end of the branch trace record.\n"));
1045 }
afedecd3 1046
23a7fe75 1047 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1048 do_cleanups (uiout_cleanup);
1049}
1050
1051/* The to_call_history_range method of target record-btrace. */
1052
1053static void
f0d960ea
TT
1054record_btrace_call_history_range (struct target_ops *self,
1055 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
1056{
1057 struct btrace_thread_info *btinfo;
23a7fe75
MM
1058 struct btrace_call_history *history;
1059 struct btrace_call_iterator begin, end;
afedecd3
MM
1060 struct cleanup *uiout_cleanup;
1061 struct ui_out *uiout;
23a7fe75
MM
1062 unsigned int low, high;
1063 int found;
afedecd3
MM
1064
1065 uiout = current_uiout;
1066 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1067 "func history");
23a7fe75
MM
1068 low = from;
1069 high = to;
afedecd3 1070
23a7fe75 1071 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
1072
1073 /* Check for wrap-arounds. */
23a7fe75 1074 if (low != from || high != to)
afedecd3
MM
1075 error (_("Bad range."));
1076
0688d04e 1077 if (high < low)
afedecd3
MM
1078 error (_("Bad range."));
1079
23a7fe75 1080 btinfo = require_btrace ();
afedecd3 1081
23a7fe75
MM
1082 found = btrace_find_call_by_number (&begin, btinfo, low);
1083 if (found == 0)
1084 error (_("Range out of bounds."));
afedecd3 1085
23a7fe75
MM
1086 found = btrace_find_call_by_number (&end, btinfo, high);
1087 if (found == 0)
0688d04e
MM
1088 {
1089 /* Silently truncate the range. */
1090 btrace_call_end (&end, btinfo);
1091 }
1092 else
1093 {
1094 /* We want both begin and end to be inclusive. */
1095 btrace_call_next (&end, 1);
1096 }
afedecd3 1097
8710b709 1098 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1099 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1100
1101 do_cleanups (uiout_cleanup);
1102}
1103
1104/* The to_call_history_from method of target record-btrace. */
1105
1106static void
ec0aea04
TT
1107record_btrace_call_history_from (struct target_ops *self,
1108 ULONGEST from, int size, int flags)
afedecd3
MM
1109{
1110 ULONGEST begin, end, context;
1111
1112 context = abs (size);
0688d04e
MM
1113 if (context == 0)
1114 error (_("Bad record function-call-history-size."));
afedecd3
MM
1115
1116 if (size < 0)
1117 {
1118 end = from;
1119
1120 if (from < context)
1121 begin = 0;
1122 else
0688d04e 1123 begin = from - context + 1;
afedecd3
MM
1124 }
1125 else
1126 {
1127 begin = from;
0688d04e 1128 end = from + context - 1;
afedecd3
MM
1129
1130 /* Check for wrap-around. */
1131 if (end < begin)
1132 end = ULONGEST_MAX;
1133 }
1134
f0d960ea 1135 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
1136}
1137
07bbe694
MM
1138/* The to_record_is_replaying method of target record-btrace. */
1139
1140static int
1c63c994 1141record_btrace_is_replaying (struct target_ops *self)
07bbe694
MM
1142{
1143 struct thread_info *tp;
1144
034f788c 1145 ALL_NON_EXITED_THREADS (tp)
07bbe694
MM
1146 if (btrace_is_replaying (tp))
1147 return 1;
1148
1149 return 0;
1150}
1151
633785ff
MM
1152/* The to_xfer_partial method of target record-btrace. */
1153
9b409511 1154static enum target_xfer_status
633785ff
MM
1155record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1156 const char *annex, gdb_byte *readbuf,
1157 const gdb_byte *writebuf, ULONGEST offset,
9b409511 1158 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
1159{
1160 struct target_ops *t;
1161
1162 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1163 if (replay_memory_access == replay_memory_access_read_only
aef92902 1164 && !record_btrace_generating_corefile
67b5c0c1 1165 && record_btrace_is_replaying (ops))
633785ff
MM
1166 {
1167 switch (object)
1168 {
1169 case TARGET_OBJECT_MEMORY:
1170 {
1171 struct target_section *section;
1172
1173 /* We do not allow writing memory in general. */
1174 if (writebuf != NULL)
9b409511
YQ
1175 {
1176 *xfered_len = len;
bc113b4e 1177 return TARGET_XFER_UNAVAILABLE;
9b409511 1178 }
633785ff
MM
1179
1180 /* We allow reading readonly memory. */
1181 section = target_section_by_addr (ops, offset);
1182 if (section != NULL)
1183 {
1184 /* Check if the section we found is readonly. */
1185 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1186 section->the_bfd_section)
1187 & SEC_READONLY) != 0)
1188 {
1189 /* Truncate the request to fit into this section. */
1190 len = min (len, section->endaddr - offset);
1191 break;
1192 }
1193 }
1194
9b409511 1195 *xfered_len = len;
bc113b4e 1196 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1197 }
1198 }
1199 }
1200
1201 /* Forward the request. */
e75fdfca
TT
1202 ops = ops->beneath;
1203 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1204 offset, len, xfered_len);
633785ff
MM
1205}
1206
1207/* The to_insert_breakpoint method of target record-btrace. */
1208
1209static int
1210record_btrace_insert_breakpoint (struct target_ops *ops,
1211 struct gdbarch *gdbarch,
1212 struct bp_target_info *bp_tgt)
1213{
67b5c0c1
MM
1214 const char *old;
1215 int ret;
633785ff
MM
1216
1217 /* Inserting breakpoints requires accessing memory. Allow it for the
1218 duration of this function. */
67b5c0c1
MM
1219 old = replay_memory_access;
1220 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1221
1222 ret = 0;
492d29ea
PA
1223 TRY
1224 {
1225 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1226 }
492d29ea
PA
1227 CATCH (except, RETURN_MASK_ALL)
1228 {
6c63c96a 1229 replay_memory_access = old;
492d29ea
PA
1230 throw_exception (except);
1231 }
1232 END_CATCH
6c63c96a 1233 replay_memory_access = old;
633785ff
MM
1234
1235 return ret;
1236}
1237
1238/* The to_remove_breakpoint method of target record-btrace. */
1239
1240static int
1241record_btrace_remove_breakpoint (struct target_ops *ops,
1242 struct gdbarch *gdbarch,
1243 struct bp_target_info *bp_tgt)
1244{
67b5c0c1
MM
1245 const char *old;
1246 int ret;
633785ff
MM
1247
1248 /* Removing breakpoints requires accessing memory. Allow it for the
1249 duration of this function. */
67b5c0c1
MM
1250 old = replay_memory_access;
1251 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1252
1253 ret = 0;
492d29ea
PA
1254 TRY
1255 {
1256 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1257 }
492d29ea
PA
1258 CATCH (except, RETURN_MASK_ALL)
1259 {
6c63c96a 1260 replay_memory_access = old;
492d29ea
PA
1261 throw_exception (except);
1262 }
1263 END_CATCH
6c63c96a 1264 replay_memory_access = old;
633785ff
MM
1265
1266 return ret;
1267}
1268
1f3ef581
MM
1269/* The to_fetch_registers method of target record-btrace. */
1270
1271static void
1272record_btrace_fetch_registers (struct target_ops *ops,
1273 struct regcache *regcache, int regno)
1274{
1275 struct btrace_insn_iterator *replay;
1276 struct thread_info *tp;
1277
1278 tp = find_thread_ptid (inferior_ptid);
1279 gdb_assert (tp != NULL);
1280
1281 replay = tp->btrace.replay;
aef92902 1282 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1283 {
1284 const struct btrace_insn *insn;
1285 struct gdbarch *gdbarch;
1286 int pcreg;
1287
1288 gdbarch = get_regcache_arch (regcache);
1289 pcreg = gdbarch_pc_regnum (gdbarch);
1290 if (pcreg < 0)
1291 return;
1292
1293 /* We can only provide the PC register. */
1294 if (regno >= 0 && regno != pcreg)
1295 return;
1296
1297 insn = btrace_insn_get (replay);
1298 gdb_assert (insn != NULL);
1299
1300 regcache_raw_supply (regcache, regno, &insn->pc);
1301 }
1302 else
1303 {
e75fdfca 1304 struct target_ops *t = ops->beneath;
1f3ef581 1305
e75fdfca 1306 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
1307 }
1308}
1309
1310/* The to_store_registers method of target record-btrace. */
1311
1312static void
1313record_btrace_store_registers (struct target_ops *ops,
1314 struct regcache *regcache, int regno)
1315{
1316 struct target_ops *t;
1317
aef92902 1318 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1f3ef581
MM
1319 error (_("This record target does not allow writing registers."));
1320
1321 gdb_assert (may_write_registers != 0);
1322
e75fdfca
TT
1323 t = ops->beneath;
1324 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1325}
1326
1327/* The to_prepare_to_store method of target record-btrace. */
1328
1329static void
1330record_btrace_prepare_to_store (struct target_ops *ops,
1331 struct regcache *regcache)
1332{
1333 struct target_ops *t;
1334
aef92902 1335 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1f3ef581
MM
1336 return;
1337
e75fdfca
TT
1338 t = ops->beneath;
1339 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1340}
1341
0b722aec
MM
1342/* The branch trace frame cache. */
1343
1344struct btrace_frame_cache
1345{
1346 /* The thread. */
1347 struct thread_info *tp;
1348
1349 /* The frame info. */
1350 struct frame_info *frame;
1351
1352 /* The branch trace function segment. */
1353 const struct btrace_function *bfun;
1354};
1355
1356/* A struct btrace_frame_cache hash table indexed by NEXT. */
1357
1358static htab_t bfcache;
1359
1360/* hash_f for htab_create_alloc of bfcache. */
1361
1362static hashval_t
1363bfcache_hash (const void *arg)
1364{
1365 const struct btrace_frame_cache *cache = arg;
1366
1367 return htab_hash_pointer (cache->frame);
1368}
1369
1370/* eq_f for htab_create_alloc of bfcache. */
1371
1372static int
1373bfcache_eq (const void *arg1, const void *arg2)
1374{
1375 const struct btrace_frame_cache *cache1 = arg1;
1376 const struct btrace_frame_cache *cache2 = arg2;
1377
1378 return cache1->frame == cache2->frame;
1379}
1380
1381/* Create a new btrace frame cache. */
1382
1383static struct btrace_frame_cache *
1384bfcache_new (struct frame_info *frame)
1385{
1386 struct btrace_frame_cache *cache;
1387 void **slot;
1388
1389 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1390 cache->frame = frame;
1391
1392 slot = htab_find_slot (bfcache, cache, INSERT);
1393 gdb_assert (*slot == NULL);
1394 *slot = cache;
1395
1396 return cache;
1397}
1398
1399/* Extract the branch trace function from a branch trace frame. */
1400
1401static const struct btrace_function *
1402btrace_get_frame_function (struct frame_info *frame)
1403{
1404 const struct btrace_frame_cache *cache;
1405 const struct btrace_function *bfun;
1406 struct btrace_frame_cache pattern;
1407 void **slot;
1408
1409 pattern.frame = frame;
1410
1411 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1412 if (slot == NULL)
1413 return NULL;
1414
1415 cache = *slot;
1416 return cache->bfun;
1417}
1418
cecac1ab
MM
1419/* Implement stop_reason method for record_btrace_frame_unwind. */
1420
1421static enum unwind_stop_reason
1422record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1423 void **this_cache)
1424{
0b722aec
MM
1425 const struct btrace_frame_cache *cache;
1426 const struct btrace_function *bfun;
1427
1428 cache = *this_cache;
1429 bfun = cache->bfun;
1430 gdb_assert (bfun != NULL);
1431
1432 if (bfun->up == NULL)
1433 return UNWIND_UNAVAILABLE;
1434
1435 return UNWIND_NO_REASON;
cecac1ab
MM
1436}
1437
1438/* Implement this_id method for record_btrace_frame_unwind. */
1439
1440static void
1441record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1442 struct frame_id *this_id)
1443{
0b722aec
MM
1444 const struct btrace_frame_cache *cache;
1445 const struct btrace_function *bfun;
1446 CORE_ADDR code, special;
1447
1448 cache = *this_cache;
1449
1450 bfun = cache->bfun;
1451 gdb_assert (bfun != NULL);
1452
1453 while (bfun->segment.prev != NULL)
1454 bfun = bfun->segment.prev;
1455
1456 code = get_frame_func (this_frame);
1457 special = bfun->number;
1458
1459 *this_id = frame_id_build_unavailable_stack_special (code, special);
1460
1461 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1462 btrace_get_bfun_name (cache->bfun),
1463 core_addr_to_string_nz (this_id->code_addr),
1464 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1465}
1466
1467/* Implement prev_register method for record_btrace_frame_unwind. */
1468
1469static struct value *
1470record_btrace_frame_prev_register (struct frame_info *this_frame,
1471 void **this_cache,
1472 int regnum)
1473{
0b722aec
MM
1474 const struct btrace_frame_cache *cache;
1475 const struct btrace_function *bfun, *caller;
1476 const struct btrace_insn *insn;
1477 struct gdbarch *gdbarch;
1478 CORE_ADDR pc;
1479 int pcreg;
1480
1481 gdbarch = get_frame_arch (this_frame);
1482 pcreg = gdbarch_pc_regnum (gdbarch);
1483 if (pcreg < 0 || regnum != pcreg)
1484 throw_error (NOT_AVAILABLE_ERROR,
1485 _("Registers are not available in btrace record history"));
1486
1487 cache = *this_cache;
1488 bfun = cache->bfun;
1489 gdb_assert (bfun != NULL);
1490
1491 caller = bfun->up;
1492 if (caller == NULL)
1493 throw_error (NOT_AVAILABLE_ERROR,
1494 _("No caller in btrace record history"));
1495
1496 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1497 {
1498 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1499 pc = insn->pc;
1500 }
1501 else
1502 {
1503 insn = VEC_last (btrace_insn_s, caller->insn);
1504 pc = insn->pc;
1505
1506 pc += gdb_insn_length (gdbarch, pc);
1507 }
1508
1509 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1510 btrace_get_bfun_name (bfun), bfun->level,
1511 core_addr_to_string_nz (pc));
1512
1513 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1514}
1515
1516/* Implement sniffer method for record_btrace_frame_unwind. */
1517
1518static int
1519record_btrace_frame_sniffer (const struct frame_unwind *self,
1520 struct frame_info *this_frame,
1521 void **this_cache)
1522{
0b722aec
MM
1523 const struct btrace_function *bfun;
1524 struct btrace_frame_cache *cache;
cecac1ab 1525 struct thread_info *tp;
0b722aec 1526 struct frame_info *next;
cecac1ab
MM
1527
1528 /* THIS_FRAME does not contain a reference to its thread. */
1529 tp = find_thread_ptid (inferior_ptid);
1530 gdb_assert (tp != NULL);
1531
0b722aec
MM
1532 bfun = NULL;
1533 next = get_next_frame (this_frame);
1534 if (next == NULL)
1535 {
1536 const struct btrace_insn_iterator *replay;
1537
1538 replay = tp->btrace.replay;
1539 if (replay != NULL)
1540 bfun = replay->function;
1541 }
1542 else
1543 {
1544 const struct btrace_function *callee;
1545
1546 callee = btrace_get_frame_function (next);
1547 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1548 bfun = callee->up;
1549 }
1550
1551 if (bfun == NULL)
1552 return 0;
1553
1554 DEBUG ("[frame] sniffed frame for %s on level %d",
1555 btrace_get_bfun_name (bfun), bfun->level);
1556
1557 /* This is our frame. Initialize the frame cache. */
1558 cache = bfcache_new (this_frame);
1559 cache->tp = tp;
1560 cache->bfun = bfun;
1561
1562 *this_cache = cache;
1563 return 1;
1564}
1565
1566/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1567
1568static int
1569record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1570 struct frame_info *this_frame,
1571 void **this_cache)
1572{
1573 const struct btrace_function *bfun, *callee;
1574 struct btrace_frame_cache *cache;
1575 struct frame_info *next;
1576
1577 next = get_next_frame (this_frame);
1578 if (next == NULL)
1579 return 0;
1580
1581 callee = btrace_get_frame_function (next);
1582 if (callee == NULL)
1583 return 0;
1584
1585 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1586 return 0;
1587
1588 bfun = callee->up;
1589 if (bfun == NULL)
1590 return 0;
1591
1592 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1593 btrace_get_bfun_name (bfun), bfun->level);
1594
1595 /* This is our frame. Initialize the frame cache. */
1596 cache = bfcache_new (this_frame);
1597 cache->tp = find_thread_ptid (inferior_ptid);
1598 cache->bfun = bfun;
1599
1600 *this_cache = cache;
1601 return 1;
1602}
1603
1604static void
1605record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1606{
1607 struct btrace_frame_cache *cache;
1608 void **slot;
1609
1610 cache = this_cache;
1611
1612 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1613 gdb_assert (slot != NULL);
1614
1615 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1616}
1617
1618/* btrace recording does not store previous memory content, neither the stack
1619 frames content. Any unwinding would return errorneous results as the stack
1620 contents no longer matches the changed PC value restored from history.
1621 Therefore this unwinder reports any possibly unwound registers as
1622 <unavailable>. */
1623
0b722aec 1624const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1625{
1626 NORMAL_FRAME,
1627 record_btrace_frame_unwind_stop_reason,
1628 record_btrace_frame_this_id,
1629 record_btrace_frame_prev_register,
1630 NULL,
0b722aec
MM
1631 record_btrace_frame_sniffer,
1632 record_btrace_frame_dealloc_cache
1633};
1634
1635const struct frame_unwind record_btrace_tailcall_frame_unwind =
1636{
1637 TAILCALL_FRAME,
1638 record_btrace_frame_unwind_stop_reason,
1639 record_btrace_frame_this_id,
1640 record_btrace_frame_prev_register,
1641 NULL,
1642 record_btrace_tailcall_frame_sniffer,
1643 record_btrace_frame_dealloc_cache
cecac1ab 1644};
b2f4cfde 1645
ac01945b
TT
1646/* Implement the to_get_unwinder method. */
1647
1648static const struct frame_unwind *
1649record_btrace_to_get_unwinder (struct target_ops *self)
1650{
1651 return &record_btrace_frame_unwind;
1652}
1653
1654/* Implement the to_get_tailcall_unwinder method. */
1655
1656static const struct frame_unwind *
1657record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1658{
1659 return &record_btrace_tailcall_frame_unwind;
1660}
1661
987e68b1
MM
1662/* Return a human-readable string for FLAG. */
1663
1664static const char *
1665btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1666{
1667 switch (flag)
1668 {
1669 case BTHR_STEP:
1670 return "step";
1671
1672 case BTHR_RSTEP:
1673 return "reverse-step";
1674
1675 case BTHR_CONT:
1676 return "cont";
1677
1678 case BTHR_RCONT:
1679 return "reverse-cont";
1680
1681 case BTHR_STOP:
1682 return "stop";
1683 }
1684
1685 return "<invalid>";
1686}
1687
52834460
MM
1688/* Indicate that TP should be resumed according to FLAG. */
1689
1690static void
1691record_btrace_resume_thread (struct thread_info *tp,
1692 enum btrace_thread_flag flag)
1693{
1694 struct btrace_thread_info *btinfo;
1695
987e68b1
MM
1696 DEBUG ("resuming thread %d (%s): %x (%s)", tp->num,
1697 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
52834460
MM
1698
1699 btinfo = &tp->btrace;
1700
1701 if ((btinfo->flags & BTHR_MOVE) != 0)
1702 error (_("Thread already moving."));
1703
1704 /* Fetch the latest branch trace. */
1705 btrace_fetch (tp);
1706
6e4879f0
MM
1707 /* A resume request overwrites a preceding stop request. */
1708 btinfo->flags &= ~BTHR_STOP;
52834460
MM
1709 btinfo->flags |= flag;
1710}
1711
1712/* Find the thread to resume given a PTID. */
1713
1714static struct thread_info *
1715record_btrace_find_resume_thread (ptid_t ptid)
1716{
1717 struct thread_info *tp;
1718
1719 /* When asked to resume everything, we pick the current thread. */
1720 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1721 ptid = inferior_ptid;
1722
1723 return find_thread_ptid (ptid);
1724}
1725
1726/* Start replaying a thread. */
1727
1728static struct btrace_insn_iterator *
1729record_btrace_start_replaying (struct thread_info *tp)
1730{
52834460
MM
1731 struct btrace_insn_iterator *replay;
1732 struct btrace_thread_info *btinfo;
1733 int executing;
1734
1735 btinfo = &tp->btrace;
1736 replay = NULL;
1737
1738 /* We can't start replaying without trace. */
1739 if (btinfo->begin == NULL)
1740 return NULL;
1741
1742 /* Clear the executing flag to allow changes to the current frame.
1743 We are not actually running, yet. We just started a reverse execution
1744 command or a record goto command.
1745 For the latter, EXECUTING is false and this has no effect.
1746 For the former, EXECUTING is true and we're in to_wait, about to
1747 move the thread. Since we need to recompute the stack, we temporarily
1748 set EXECUTING to flase. */
1749 executing = is_executing (tp->ptid);
1750 set_executing (tp->ptid, 0);
1751
1752 /* GDB stores the current frame_id when stepping in order to detects steps
1753 into subroutines.
1754 Since frames are computed differently when we're replaying, we need to
1755 recompute those stored frames and fix them up so we can still detect
1756 subroutines after we started replaying. */
492d29ea 1757 TRY
52834460
MM
1758 {
1759 struct frame_info *frame;
1760 struct frame_id frame_id;
1761 int upd_step_frame_id, upd_step_stack_frame_id;
1762
1763 /* The current frame without replaying - computed via normal unwind. */
1764 frame = get_current_frame ();
1765 frame_id = get_frame_id (frame);
1766
1767 /* Check if we need to update any stepping-related frame id's. */
1768 upd_step_frame_id = frame_id_eq (frame_id,
1769 tp->control.step_frame_id);
1770 upd_step_stack_frame_id = frame_id_eq (frame_id,
1771 tp->control.step_stack_frame_id);
1772
1773 /* We start replaying at the end of the branch trace. This corresponds
1774 to the current instruction. */
8d749320 1775 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
1776 btrace_insn_end (replay, btinfo);
1777
31fd9caa
MM
1778 /* Skip gaps at the end of the trace. */
1779 while (btrace_insn_get (replay) == NULL)
1780 {
1781 unsigned int steps;
1782
1783 steps = btrace_insn_prev (replay, 1);
1784 if (steps == 0)
1785 error (_("No trace."));
1786 }
1787
52834460
MM
1788 /* We're not replaying, yet. */
1789 gdb_assert (btinfo->replay == NULL);
1790 btinfo->replay = replay;
1791
1792 /* Make sure we're not using any stale registers. */
1793 registers_changed_ptid (tp->ptid);
1794
1795 /* The current frame with replaying - computed via btrace unwind. */
1796 frame = get_current_frame ();
1797 frame_id = get_frame_id (frame);
1798
1799 /* Replace stepping related frames where necessary. */
1800 if (upd_step_frame_id)
1801 tp->control.step_frame_id = frame_id;
1802 if (upd_step_stack_frame_id)
1803 tp->control.step_stack_frame_id = frame_id;
1804 }
492d29ea 1805 CATCH (except, RETURN_MASK_ALL)
52834460 1806 {
6c63c96a
PA
1807 /* Restore the previous execution state. */
1808 set_executing (tp->ptid, executing);
1809
52834460
MM
1810 xfree (btinfo->replay);
1811 btinfo->replay = NULL;
1812
1813 registers_changed_ptid (tp->ptid);
1814
1815 throw_exception (except);
1816 }
492d29ea 1817 END_CATCH
52834460 1818
6c63c96a
PA
1819 /* Restore the previous execution state. */
1820 set_executing (tp->ptid, executing);
1821
52834460
MM
1822 return replay;
1823}
1824
1825/* Stop replaying a thread. */
1826
1827static void
1828record_btrace_stop_replaying (struct thread_info *tp)
1829{
1830 struct btrace_thread_info *btinfo;
1831
1832 btinfo = &tp->btrace;
1833
1834 xfree (btinfo->replay);
1835 btinfo->replay = NULL;
1836
1837 /* Make sure we're not leaving any stale registers. */
1838 registers_changed_ptid (tp->ptid);
1839}
1840
b2f4cfde
MM
1841/* The to_resume method of target record-btrace. */
1842
1843static void
1844record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1845 enum gdb_signal signal)
1846{
52834460
MM
1847 struct thread_info *tp, *other;
1848 enum btrace_thread_flag flag;
1849
987e68b1
MM
1850 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
1851 execution_direction == EXEC_REVERSE ? "reverse-" : "",
1852 step ? "step" : "cont");
52834460 1853
70ad5bff
MM
1854 /* Store the execution direction of the last resume. */
1855 record_btrace_resume_exec_dir = execution_direction;
1856
52834460
MM
1857 tp = record_btrace_find_resume_thread (ptid);
1858 if (tp == NULL)
1859 error (_("Cannot find thread to resume."));
1860
1861 /* Stop replaying other threads if the thread to resume is not replaying. */
1862 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
034f788c 1863 ALL_NON_EXITED_THREADS (other)
52834460
MM
1864 record_btrace_stop_replaying (other);
1865
b2f4cfde 1866 /* As long as we're not replaying, just forward the request. */
1c63c994 1867 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde 1868 {
e75fdfca
TT
1869 ops = ops->beneath;
1870 return ops->to_resume (ops, ptid, step, signal);
b2f4cfde
MM
1871 }
1872
52834460
MM
1873 /* Compute the btrace thread flag for the requested move. */
1874 if (step == 0)
1875 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1876 else
1877 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1878
1879 /* At the moment, we only move a single thread. We could also move
1880 all threads in parallel by single-stepping each resumed thread
1881 until the first runs into an event.
1882 When we do that, we would want to continue all other threads.
1883 For now, just resume one thread to not confuse to_wait. */
1884 record_btrace_resume_thread (tp, flag);
1885
1886 /* We just indicate the resume intent here. The actual stepping happens in
1887 record_btrace_wait below. */
70ad5bff
MM
1888
1889 /* Async support. */
1890 if (target_can_async_p ())
1891 {
6a3753b3 1892 target_async (1);
70ad5bff
MM
1893 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1894 }
52834460
MM
1895}
1896
987e68b1
MM
1897/* Cancel resuming TP. */
1898
1899static void
1900record_btrace_cancel_resume (struct thread_info *tp)
1901{
1902 enum btrace_thread_flag flags;
1903
1904 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
1905 if (flags == 0)
1906 return;
1907
1908 DEBUG ("cancel resume thread %d (%s): %x (%s)", tp->num,
1909 target_pid_to_str (tp->ptid), flags,
1910 btrace_thread_flag_to_str (flags));
1911
1912 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
1913}
1914
52834460
MM
1915/* Find a thread to move. */
1916
1917static struct thread_info *
1918record_btrace_find_thread_to_move (ptid_t ptid)
1919{
1920 struct thread_info *tp;
1921
1922 /* First check the parameter thread. */
1923 tp = find_thread_ptid (ptid);
6e4879f0 1924 if (tp != NULL && (tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
52834460
MM
1925 return tp;
1926
1927 /* Otherwise, find one other thread that has been resumed. */
034f788c 1928 ALL_NON_EXITED_THREADS (tp)
6e4879f0 1929 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
52834460
MM
1930 return tp;
1931
1932 return NULL;
1933}
1934
1935/* Return a target_waitstatus indicating that we ran out of history. */
1936
1937static struct target_waitstatus
1938btrace_step_no_history (void)
1939{
1940 struct target_waitstatus status;
1941
1942 status.kind = TARGET_WAITKIND_NO_HISTORY;
1943
1944 return status;
1945}
1946
1947/* Return a target_waitstatus indicating that a step finished. */
1948
1949static struct target_waitstatus
1950btrace_step_stopped (void)
1951{
1952 struct target_waitstatus status;
1953
1954 status.kind = TARGET_WAITKIND_STOPPED;
1955 status.value.sig = GDB_SIGNAL_TRAP;
1956
1957 return status;
1958}
1959
6e4879f0
MM
1960/* Return a target_waitstatus indicating that a thread was stopped as
1961 requested. */
1962
1963static struct target_waitstatus
1964btrace_step_stopped_on_request (void)
1965{
1966 struct target_waitstatus status;
1967
1968 status.kind = TARGET_WAITKIND_STOPPED;
1969 status.value.sig = GDB_SIGNAL_0;
1970
1971 return status;
1972}
1973
d825d248
MM
1974/* Return a target_waitstatus indicating a spurious stop. */
1975
1976static struct target_waitstatus
1977btrace_step_spurious (void)
1978{
1979 struct target_waitstatus status;
1980
1981 status.kind = TARGET_WAITKIND_SPURIOUS;
1982
1983 return status;
1984}
1985
52834460
MM
1986/* Clear the record histories. */
1987
1988static void
1989record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1990{
1991 xfree (btinfo->insn_history);
1992 xfree (btinfo->call_history);
1993
1994 btinfo->insn_history = NULL;
1995 btinfo->call_history = NULL;
1996}
1997
3c615f99
MM
1998/* Check whether TP's current replay position is at a breakpoint. */
1999
2000static int
2001record_btrace_replay_at_breakpoint (struct thread_info *tp)
2002{
2003 struct btrace_insn_iterator *replay;
2004 struct btrace_thread_info *btinfo;
2005 const struct btrace_insn *insn;
2006 struct inferior *inf;
2007
2008 btinfo = &tp->btrace;
2009 replay = btinfo->replay;
2010
2011 if (replay == NULL)
2012 return 0;
2013
2014 insn = btrace_insn_get (replay);
2015 if (insn == NULL)
2016 return 0;
2017
2018 inf = find_inferior_ptid (tp->ptid);
2019 if (inf == NULL)
2020 return 0;
2021
2022 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2023 &btinfo->stop_reason);
2024}
2025
d825d248 2026/* Step one instruction in forward direction. */
52834460
MM
2027
2028static struct target_waitstatus
d825d248 2029record_btrace_single_step_forward (struct thread_info *tp)
52834460
MM
2030{
2031 struct btrace_insn_iterator *replay, end;
2032 struct btrace_thread_info *btinfo;
52834460 2033
d825d248
MM
2034 btinfo = &tp->btrace;
2035 replay = btinfo->replay;
2036
2037 /* We're done if we're not replaying. */
2038 if (replay == NULL)
2039 return btrace_step_no_history ();
2040
2041 /* Skip gaps during replay. */
2042 do
2043 {
2044 unsigned int steps;
2045
2046 steps = btrace_insn_next (replay, 1);
2047 if (steps == 0)
2048 {
2049 record_btrace_stop_replaying (tp);
2050 return btrace_step_no_history ();
2051 }
2052 }
2053 while (btrace_insn_get (replay) == NULL);
2054
2055 /* Determine the end of the instruction trace. */
2056 btrace_insn_end (&end, btinfo);
2057
2058 /* We stop replaying if we reached the end of the trace. */
2059 if (btrace_insn_cmp (replay, &end) == 0)
2060 record_btrace_stop_replaying (tp);
2061
2062 return btrace_step_spurious ();
2063}
2064
2065/* Step one instruction in backward direction. */
2066
2067static struct target_waitstatus
2068record_btrace_single_step_backward (struct thread_info *tp)
2069{
2070 struct btrace_insn_iterator *replay;
2071 struct btrace_thread_info *btinfo;
e59fa00f 2072
52834460
MM
2073 btinfo = &tp->btrace;
2074 replay = btinfo->replay;
2075
d825d248
MM
2076 /* Start replaying if we're not already doing so. */
2077 if (replay == NULL)
2078 replay = record_btrace_start_replaying (tp);
2079
2080 /* If we can't step any further, we reached the end of the history.
2081 Skip gaps during replay. */
2082 do
2083 {
2084 unsigned int steps;
2085
2086 steps = btrace_insn_prev (replay, 1);
2087 if (steps == 0)
2088 return btrace_step_no_history ();
2089 }
2090 while (btrace_insn_get (replay) == NULL);
2091
2092 return btrace_step_spurious ();
2093}
2094
2095/* Step a single thread. */
2096
2097static struct target_waitstatus
2098record_btrace_step_thread (struct thread_info *tp)
2099{
2100 struct btrace_thread_info *btinfo;
2101 struct target_waitstatus status;
2102 enum btrace_thread_flag flags;
2103
2104 btinfo = &tp->btrace;
2105
6e4879f0
MM
2106 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2107 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2108
987e68b1
MM
2109 DEBUG ("stepping thread %d (%s): %x (%s)", tp->num,
2110 target_pid_to_str (tp->ptid), flags,
2111 btrace_thread_flag_to_str (flags));
52834460 2112
6e4879f0
MM
2113 /* We can't step without an execution history. */
2114 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2115 return btrace_step_no_history ();
2116
52834460
MM
2117 switch (flags)
2118 {
2119 default:
2120 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2121
6e4879f0
MM
2122 case BTHR_STOP:
2123 return btrace_step_stopped_on_request ();
2124
52834460 2125 case BTHR_STEP:
d825d248
MM
2126 status = record_btrace_single_step_forward (tp);
2127 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2128 return status;
52834460
MM
2129
2130 return btrace_step_stopped ();
2131
2132 case BTHR_RSTEP:
d825d248
MM
2133 status = record_btrace_single_step_backward (tp);
2134 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2135 return status;
52834460
MM
2136
2137 return btrace_step_stopped ();
2138
2139 case BTHR_CONT:
52834460
MM
2140 for (;;)
2141 {
d825d248
MM
2142 status = record_btrace_single_step_forward (tp);
2143 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2144 return status;
52834460 2145
d825d248 2146 if (btinfo->replay != NULL)
31fd9caa 2147 {
d825d248 2148 const struct btrace_insn *insn;
31fd9caa 2149
d825d248
MM
2150 insn = btrace_insn_get (btinfo->replay);
2151 gdb_assert (insn != NULL);
52834460 2152
d825d248
MM
2153 DEBUG ("stepping %d (%s) ... %s", tp->num,
2154 target_pid_to_str (tp->ptid),
2155 core_addr_to_string_nz (insn->pc));
52834460
MM
2156 }
2157
3c615f99 2158 if (record_btrace_replay_at_breakpoint (tp))
52834460
MM
2159 return btrace_step_stopped ();
2160 }
2161
2162 case BTHR_RCONT:
52834460
MM
2163 for (;;)
2164 {
2165 const struct btrace_insn *insn;
2166
d825d248
MM
2167 status = record_btrace_single_step_backward (tp);
2168 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2169 return status;
52834460 2170
d825d248
MM
2171 gdb_assert (btinfo->replay != NULL);
2172
2173 insn = btrace_insn_get (btinfo->replay);
2174 gdb_assert (insn != NULL);
52834460
MM
2175
2176 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
2177 target_pid_to_str (tp->ptid),
2178 core_addr_to_string_nz (insn->pc));
2179
3c615f99 2180 if (record_btrace_replay_at_breakpoint (tp))
52834460
MM
2181 return btrace_step_stopped ();
2182 }
2183 }
b2f4cfde
MM
2184}
2185
2186/* The to_wait method of target record-btrace. */
2187
2188static ptid_t
2189record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2190 struct target_waitstatus *status, int options)
2191{
52834460
MM
2192 struct thread_info *tp, *other;
2193
2194 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2195
b2f4cfde 2196 /* As long as we're not replaying, just forward the request. */
1c63c994 2197 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde 2198 {
e75fdfca
TT
2199 ops = ops->beneath;
2200 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
2201 }
2202
52834460
MM
2203 /* Let's find a thread to move. */
2204 tp = record_btrace_find_thread_to_move (ptid);
2205 if (tp == NULL)
2206 {
2207 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
2208
2209 status->kind = TARGET_WAITKIND_IGNORE;
2210 return minus_one_ptid;
2211 }
2212
2213 /* We only move a single thread. We're not able to correlate threads. */
2214 *status = record_btrace_step_thread (tp);
2215
2216 /* Stop all other threads. */
5953356c 2217 if (!target_is_non_stop_p ())
034f788c 2218 ALL_NON_EXITED_THREADS (other)
987e68b1 2219 record_btrace_cancel_resume (other);
52834460
MM
2220
2221 /* Start record histories anew from the current position. */
2222 record_btrace_clear_histories (&tp->btrace);
2223
2224 /* We moved the replay position but did not update registers. */
2225 registers_changed_ptid (tp->ptid);
2226
2227 return tp->ptid;
2228}
2229
6e4879f0
MM
2230/* The to_stop method of target record-btrace. */
2231
2232static void
2233record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2234{
2235 DEBUG ("stop %s", target_pid_to_str (ptid));
2236
2237 /* As long as we're not replaying, just forward the request. */
2238 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
2239 {
2240 ops = ops->beneath;
2241 ops->to_stop (ops, ptid);
2242 }
2243 else
2244 {
2245 struct thread_info *tp;
2246
2247 ALL_NON_EXITED_THREADS (tp)
2248 if (ptid_match (tp->ptid, ptid))
2249 {
2250 tp->btrace.flags &= ~BTHR_MOVE;
2251 tp->btrace.flags |= BTHR_STOP;
2252 }
2253 }
2254 }
2255
52834460
MM
2256/* The to_can_execute_reverse method of target record-btrace. */
2257
2258static int
19db3e69 2259record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
2260{
2261 return 1;
2262}
2263
9e8915c6 2264/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2265
9e8915c6
PA
2266static int
2267record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
52834460 2268{
1c63c994 2269 if (record_btrace_is_replaying (ops))
9e8915c6
PA
2270 {
2271 struct thread_info *tp = inferior_thread ();
2272
2273 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2274 }
2275
2276 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2277}
2278
2279/* The to_supports_stopped_by_sw_breakpoint method of target
2280 record-btrace. */
2281
2282static int
2283record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2284{
2285 if (record_btrace_is_replaying (ops))
2286 return 1;
2287
2288 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2289}
2290
2291/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2292
2293static int
2294record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2295{
2296 if (record_btrace_is_replaying (ops))
2297 {
2298 struct thread_info *tp = inferior_thread ();
2299
2300 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2301 }
2302
2303 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2304}
2305
2306/* The to_supports_stopped_by_hw_breakpoint method of target
2307 record-btrace. */
2308
2309static int
2310record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2311{
2312 if (record_btrace_is_replaying (ops))
2313 return 1;
52834460 2314
9e8915c6 2315 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
b2f4cfde
MM
2316}
2317
e8032dde 2318/* The to_update_thread_list method of target record-btrace. */
e2887aa3
MM
2319
2320static void
e8032dde 2321record_btrace_update_thread_list (struct target_ops *ops)
e2887aa3 2322{
e8032dde 2323 /* We don't add or remove threads during replay. */
1c63c994 2324 if (record_btrace_is_replaying (ops))
e2887aa3
MM
2325 return;
2326
2327 /* Forward the request. */
e75fdfca 2328 ops = ops->beneath;
e8032dde 2329 ops->to_update_thread_list (ops);
e2887aa3
MM
2330}
2331
2332/* The to_thread_alive method of target record-btrace. */
2333
2334static int
2335record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2336{
2337 /* We don't add or remove threads during replay. */
1c63c994 2338 if (record_btrace_is_replaying (ops))
e2887aa3
MM
2339 return find_thread_ptid (ptid) != NULL;
2340
2341 /* Forward the request. */
e75fdfca
TT
2342 ops = ops->beneath;
2343 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
2344}
2345
066ce621
MM
2346/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2347 is stopped. */
2348
2349static void
2350record_btrace_set_replay (struct thread_info *tp,
2351 const struct btrace_insn_iterator *it)
2352{
2353 struct btrace_thread_info *btinfo;
2354
2355 btinfo = &tp->btrace;
2356
2357 if (it == NULL || it->function == NULL)
52834460 2358 record_btrace_stop_replaying (tp);
066ce621
MM
2359 else
2360 {
2361 if (btinfo->replay == NULL)
52834460 2362 record_btrace_start_replaying (tp);
066ce621
MM
2363 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2364 return;
2365
2366 *btinfo->replay = *it;
52834460 2367 registers_changed_ptid (tp->ptid);
066ce621
MM
2368 }
2369
52834460
MM
2370 /* Start anew from the new replay position. */
2371 record_btrace_clear_histories (btinfo);
485668e5
MM
2372
2373 stop_pc = regcache_read_pc (get_current_regcache ());
2374 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2375}
2376
2377/* The to_goto_record_begin method of target record-btrace. */
2378
2379static void
08475817 2380record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
2381{
2382 struct thread_info *tp;
2383 struct btrace_insn_iterator begin;
2384
2385 tp = require_btrace_thread ();
2386
2387 btrace_insn_begin (&begin, &tp->btrace);
2388 record_btrace_set_replay (tp, &begin);
066ce621
MM
2389}
2390
2391/* The to_goto_record_end method of target record-btrace. */
2392
2393static void
307a1b91 2394record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
2395{
2396 struct thread_info *tp;
2397
2398 tp = require_btrace_thread ();
2399
2400 record_btrace_set_replay (tp, NULL);
066ce621
MM
2401}
2402
2403/* The to_goto_record method of target record-btrace. */
2404
2405static void
606183ac 2406record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
2407{
2408 struct thread_info *tp;
2409 struct btrace_insn_iterator it;
2410 unsigned int number;
2411 int found;
2412
2413 number = insn;
2414
2415 /* Check for wrap-arounds. */
2416 if (number != insn)
2417 error (_("Instruction number out of range."));
2418
2419 tp = require_btrace_thread ();
2420
2421 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2422 if (found == 0)
2423 error (_("No such instruction."));
2424
2425 record_btrace_set_replay (tp, &it);
066ce621
MM
2426}
2427
70ad5bff
MM
2428/* The to_execution_direction target method. */
2429
2430static enum exec_direction_kind
2431record_btrace_execution_direction (struct target_ops *self)
2432{
2433 return record_btrace_resume_exec_dir;
2434}
2435
aef92902
MM
2436/* The to_prepare_to_generate_core target method. */
2437
2438static void
2439record_btrace_prepare_to_generate_core (struct target_ops *self)
2440{
2441 record_btrace_generating_corefile = 1;
2442}
2443
2444/* The to_done_generating_core target method. */
2445
2446static void
2447record_btrace_done_generating_core (struct target_ops *self)
2448{
2449 record_btrace_generating_corefile = 0;
2450}
2451
afedecd3
MM
2452/* Initialize the record-btrace target ops. */
2453
2454static void
2455init_record_btrace_ops (void)
2456{
2457 struct target_ops *ops;
2458
2459 ops = &record_btrace_ops;
2460 ops->to_shortname = "record-btrace";
2461 ops->to_longname = "Branch tracing target";
2462 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2463 ops->to_open = record_btrace_open;
2464 ops->to_close = record_btrace_close;
b7d2e916 2465 ops->to_async = record_btrace_async;
afedecd3
MM
2466 ops->to_detach = record_detach;
2467 ops->to_disconnect = record_disconnect;
2468 ops->to_mourn_inferior = record_mourn_inferior;
2469 ops->to_kill = record_kill;
afedecd3
MM
2470 ops->to_stop_recording = record_btrace_stop_recording;
2471 ops->to_info_record = record_btrace_info;
2472 ops->to_insn_history = record_btrace_insn_history;
2473 ops->to_insn_history_from = record_btrace_insn_history_from;
2474 ops->to_insn_history_range = record_btrace_insn_history_range;
2475 ops->to_call_history = record_btrace_call_history;
2476 ops->to_call_history_from = record_btrace_call_history_from;
2477 ops->to_call_history_range = record_btrace_call_history_range;
07bbe694 2478 ops->to_record_is_replaying = record_btrace_is_replaying;
633785ff
MM
2479 ops->to_xfer_partial = record_btrace_xfer_partial;
2480 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2481 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
2482 ops->to_fetch_registers = record_btrace_fetch_registers;
2483 ops->to_store_registers = record_btrace_store_registers;
2484 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
2485 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2486 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde
MM
2487 ops->to_resume = record_btrace_resume;
2488 ops->to_wait = record_btrace_wait;
6e4879f0 2489 ops->to_stop = record_btrace_stop;
e8032dde 2490 ops->to_update_thread_list = record_btrace_update_thread_list;
e2887aa3 2491 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
2492 ops->to_goto_record_begin = record_btrace_goto_begin;
2493 ops->to_goto_record_end = record_btrace_goto_end;
2494 ops->to_goto_record = record_btrace_goto;
52834460 2495 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
9e8915c6
PA
2496 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2497 ops->to_supports_stopped_by_sw_breakpoint
2498 = record_btrace_supports_stopped_by_sw_breakpoint;
2499 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2500 ops->to_supports_stopped_by_hw_breakpoint
2501 = record_btrace_supports_stopped_by_hw_breakpoint;
70ad5bff 2502 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
2503 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2504 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
2505 ops->to_stratum = record_stratum;
2506 ops->to_magic = OPS_MAGIC;
2507}
2508
f4abbc16
MM
2509/* Start recording in BTS format. */
2510
2511static void
2512cmd_record_btrace_bts_start (char *args, int from_tty)
2513{
f4abbc16
MM
2514 if (args != NULL && *args != 0)
2515 error (_("Invalid argument."));
2516
2517 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2518
492d29ea
PA
2519 TRY
2520 {
2521 execute_command ("target record-btrace", from_tty);
2522 }
2523 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2524 {
2525 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2526 throw_exception (exception);
2527 }
492d29ea 2528 END_CATCH
f4abbc16
MM
2529}
2530
b20a6524 2531/* Start recording Intel(R) Processor Trace. */
afedecd3
MM
2532
2533static void
b20a6524 2534cmd_record_btrace_pt_start (char *args, int from_tty)
afedecd3
MM
2535{
2536 if (args != NULL && *args != 0)
2537 error (_("Invalid argument."));
2538
b20a6524 2539 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2540
492d29ea
PA
2541 TRY
2542 {
2543 execute_command ("target record-btrace", from_tty);
2544 }
2545 CATCH (exception, RETURN_MASK_ALL)
2546 {
2547 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2548 throw_exception (exception);
2549 }
2550 END_CATCH
afedecd3
MM
2551}
2552
b20a6524
MM
2553/* Alias for "target record". */
2554
2555static void
2556cmd_record_btrace_start (char *args, int from_tty)
2557{
2558 if (args != NULL && *args != 0)
2559 error (_("Invalid argument."));
2560
2561 record_btrace_conf.format = BTRACE_FORMAT_PT;
2562
2563 TRY
2564 {
2565 execute_command ("target record-btrace", from_tty);
2566 }
2567 CATCH (exception, RETURN_MASK_ALL)
2568 {
2569 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2570
2571 TRY
2572 {
2573 execute_command ("target record-btrace", from_tty);
2574 }
2575 CATCH (exception, RETURN_MASK_ALL)
2576 {
2577 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2578 throw_exception (exception);
2579 }
2580 END_CATCH
2581 }
2582 END_CATCH
2583}
2584
67b5c0c1
MM
2585/* The "set record btrace" command. */
2586
2587static void
2588cmd_set_record_btrace (char *args, int from_tty)
2589{
2590 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2591}
2592
2593/* The "show record btrace" command. */
2594
2595static void
2596cmd_show_record_btrace (char *args, int from_tty)
2597{
2598 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2599}
2600
2601/* The "show record btrace replay-memory-access" command. */
2602
2603static void
2604cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2605 struct cmd_list_element *c, const char *value)
2606{
2607 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2608 replay_memory_access);
2609}
2610
d33501a5
MM
2611/* The "set record btrace bts" command. */
2612
2613static void
2614cmd_set_record_btrace_bts (char *args, int from_tty)
2615{
2616 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 2617 "by an appropriate subcommand.\n"));
d33501a5
MM
2618 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2619 all_commands, gdb_stdout);
2620}
2621
2622/* The "show record btrace bts" command. */
2623
2624static void
2625cmd_show_record_btrace_bts (char *args, int from_tty)
2626{
2627 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2628}
2629
b20a6524
MM
2630/* The "set record btrace pt" command. */
2631
2632static void
2633cmd_set_record_btrace_pt (char *args, int from_tty)
2634{
2635 printf_unfiltered (_("\"set record btrace pt\" must be followed "
2636 "by an appropriate subcommand.\n"));
2637 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
2638 all_commands, gdb_stdout);
2639}
2640
2641/* The "show record btrace pt" command. */
2642
2643static void
2644cmd_show_record_btrace_pt (char *args, int from_tty)
2645{
2646 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
2647}
2648
2649/* The "record bts buffer-size" show value function. */
2650
2651static void
2652show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
2653 struct cmd_list_element *c,
2654 const char *value)
2655{
2656 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
2657 value);
2658}
2659
2660/* The "record pt buffer-size" show value function. */
2661
2662static void
2663show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
2664 struct cmd_list_element *c,
2665 const char *value)
2666{
2667 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
2668 value);
2669}
2670
afedecd3
MM
2671void _initialize_record_btrace (void);
2672
2673/* Initialize btrace commands. */
2674
2675void
2676_initialize_record_btrace (void)
2677{
f4abbc16
MM
2678 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2679 _("Start branch trace recording."), &record_btrace_cmdlist,
2680 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
2681 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2682
f4abbc16
MM
2683 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
2684 _("\
2685Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2686The processor stores a from/to record for each branch into a cyclic buffer.\n\
2687This format may not be available on all processors."),
2688 &record_btrace_cmdlist);
2689 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
2690
b20a6524
MM
2691 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
2692 _("\
2693Start branch trace recording in Intel(R) Processor Trace format.\n\n\
2694This format may not be available on all processors."),
2695 &record_btrace_cmdlist);
2696 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
2697
67b5c0c1
MM
2698 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2699 _("Set record options"), &set_record_btrace_cmdlist,
2700 "set record btrace ", 0, &set_record_cmdlist);
2701
2702 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2703 _("Show record options"), &show_record_btrace_cmdlist,
2704 "show record btrace ", 0, &show_record_cmdlist);
2705
2706 add_setshow_enum_cmd ("replay-memory-access", no_class,
2707 replay_memory_access_types, &replay_memory_access, _("\
2708Set what memory accesses are allowed during replay."), _("\
2709Show what memory accesses are allowed during replay."),
2710 _("Default is READ-ONLY.\n\n\
2711The btrace record target does not trace data.\n\
2712The memory therefore corresponds to the live target and not \
2713to the current replay position.\n\n\
2714When READ-ONLY, allow accesses to read-only memory during replay.\n\
2715When READ-WRITE, allow accesses to read-only and read-write memory during \
2716replay."),
2717 NULL, cmd_show_replay_memory_access,
2718 &set_record_btrace_cmdlist,
2719 &show_record_btrace_cmdlist);
2720
d33501a5
MM
2721 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
2722 _("Set record btrace bts options"),
2723 &set_record_btrace_bts_cmdlist,
2724 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
2725
2726 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
2727 _("Show record btrace bts options"),
2728 &show_record_btrace_bts_cmdlist,
2729 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
2730
2731 add_setshow_uinteger_cmd ("buffer-size", no_class,
2732 &record_btrace_conf.bts.size,
2733 _("Set the record/replay bts buffer size."),
2734 _("Show the record/replay bts buffer size."), _("\
2735When starting recording request a trace buffer of this size. \
2736The actual buffer size may differ from the requested size. \
2737Use \"info record\" to see the actual buffer size.\n\n\
2738Bigger buffers allow longer recording but also take more time to process \
2739the recorded execution trace.\n\n\
b20a6524
MM
2740The trace buffer size may not be changed while recording."), NULL,
2741 show_record_bts_buffer_size_value,
d33501a5
MM
2742 &set_record_btrace_bts_cmdlist,
2743 &show_record_btrace_bts_cmdlist);
2744
b20a6524
MM
2745 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
2746 _("Set record btrace pt options"),
2747 &set_record_btrace_pt_cmdlist,
2748 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
2749
2750 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
2751 _("Show record btrace pt options"),
2752 &show_record_btrace_pt_cmdlist,
2753 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
2754
2755 add_setshow_uinteger_cmd ("buffer-size", no_class,
2756 &record_btrace_conf.pt.size,
2757 _("Set the record/replay pt buffer size."),
2758 _("Show the record/replay pt buffer size."), _("\
2759Bigger buffers allow longer recording but also take more time to process \
2760the recorded execution.\n\
2761The actual buffer size may differ from the requested size. Use \"info record\" \
2762to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
2763 &set_record_btrace_pt_cmdlist,
2764 &show_record_btrace_pt_cmdlist);
2765
afedecd3
MM
2766 init_record_btrace_ops ();
2767 add_target (&record_btrace_ops);
0b722aec
MM
2768
2769 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2770 xcalloc, xfree);
d33501a5
MM
2771
2772 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 2773 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 2774}
This page took 0.571757 seconds and 4 git commands to generate.