Eliminate target_ops::to_xclose
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
e2882c85 3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
c0272db5 24#include "record-btrace.h"
afedecd3
MM
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
76727919 29#include "observable.h"
afedecd3
MM
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
70ad5bff
MM
39#include "event-loop.h"
40#include "inf-loop.h"
e3cfc1c7 41#include "vec.h"
325fac50 42#include <algorithm>
afedecd3
MM
43
44/* The target_ops of record-btrace. */
45static struct target_ops record_btrace_ops;
46
76727919
TT
47/* Token associated with a new-thread observer enabling branch tracing
48 for the new thread. */
49static const gdb::observers::token record_btrace_thread_observer_token;
afedecd3 50
67b5c0c1
MM
51/* Memory access types used in set/show record btrace replay-memory-access. */
52static const char replay_memory_access_read_only[] = "read-only";
53static const char replay_memory_access_read_write[] = "read-write";
54static const char *const replay_memory_access_types[] =
55{
56 replay_memory_access_read_only,
57 replay_memory_access_read_write,
58 NULL
59};
60
61/* The currently allowed replay memory access type. */
62static const char *replay_memory_access = replay_memory_access_read_only;
63
4a4495d6
MM
64/* The cpu state kinds. */
65enum record_btrace_cpu_state_kind
66{
67 CS_AUTO,
68 CS_NONE,
69 CS_CPU
70};
71
72/* The current cpu state. */
73static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
74
75/* The current cpu for trace decode. */
76static struct btrace_cpu record_btrace_cpu;
77
67b5c0c1
MM
78/* Command lists for "set/show record btrace". */
79static struct cmd_list_element *set_record_btrace_cmdlist;
80static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 81
70ad5bff
MM
82/* The execution direction of the last resume we got. See record-full.c. */
83static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
84
85/* The async event handler for reverse/replay execution. */
86static struct async_event_handler *record_btrace_async_inferior_event_handler;
87
aef92902
MM
88/* A flag indicating that we are currently generating a core file. */
89static int record_btrace_generating_corefile;
90
f4abbc16
MM
91/* The current branch trace configuration. */
92static struct btrace_config record_btrace_conf;
93
94/* Command list for "record btrace". */
95static struct cmd_list_element *record_btrace_cmdlist;
96
d33501a5
MM
97/* Command lists for "set/show record btrace bts". */
98static struct cmd_list_element *set_record_btrace_bts_cmdlist;
99static struct cmd_list_element *show_record_btrace_bts_cmdlist;
100
b20a6524
MM
101/* Command lists for "set/show record btrace pt". */
102static struct cmd_list_element *set_record_btrace_pt_cmdlist;
103static struct cmd_list_element *show_record_btrace_pt_cmdlist;
104
4a4495d6
MM
105/* Command list for "set record btrace cpu". */
106static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
107
afedecd3
MM
108/* Print a record-btrace debug message. Use do ... while (0) to avoid
109 ambiguities when used in if statements. */
110
111#define DEBUG(msg, args...) \
112 do \
113 { \
114 if (record_debug != 0) \
115 fprintf_unfiltered (gdb_stdlog, \
116 "[record-btrace] " msg "\n", ##args); \
117 } \
118 while (0)
119
120
4a4495d6
MM
121/* Return the cpu configured by the user. Returns NULL if the cpu was
122 configured as auto. */
123const struct btrace_cpu *
124record_btrace_get_cpu (void)
125{
126 switch (record_btrace_cpu_state)
127 {
128 case CS_AUTO:
129 return nullptr;
130
131 case CS_NONE:
132 record_btrace_cpu.vendor = CV_UNKNOWN;
133 /* Fall through. */
134 case CS_CPU:
135 return &record_btrace_cpu;
136 }
137
138 error (_("Internal error: bad record btrace cpu state."));
139}
140
afedecd3 141/* Update the branch trace for the current thread and return a pointer to its
066ce621 142 thread_info.
afedecd3
MM
143
144 Throws an error if there is no thread or no trace. This function never
145 returns NULL. */
146
066ce621
MM
147static struct thread_info *
148require_btrace_thread (void)
afedecd3
MM
149{
150 struct thread_info *tp;
afedecd3
MM
151
152 DEBUG ("require");
153
154 tp = find_thread_ptid (inferior_ptid);
155 if (tp == NULL)
156 error (_("No thread."));
157
cd4007e4
MM
158 validate_registers_access ();
159
4a4495d6 160 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 161
6e07b1d2 162 if (btrace_is_empty (tp))
afedecd3
MM
163 error (_("No trace."));
164
066ce621
MM
165 return tp;
166}
167
168/* Update the branch trace for the current thread and return a pointer to its
169 branch trace information struct.
170
171 Throws an error if there is no thread or no trace. This function never
172 returns NULL. */
173
174static struct btrace_thread_info *
175require_btrace (void)
176{
177 struct thread_info *tp;
178
179 tp = require_btrace_thread ();
180
181 return &tp->btrace;
afedecd3
MM
182}
183
184/* Enable branch tracing for one thread. Warn on errors. */
185
186static void
187record_btrace_enable_warn (struct thread_info *tp)
188{
492d29ea
PA
189 TRY
190 {
191 btrace_enable (tp, &record_btrace_conf);
192 }
193 CATCH (error, RETURN_MASK_ERROR)
194 {
195 warning ("%s", error.message);
196 }
197 END_CATCH
afedecd3
MM
198}
199
afedecd3
MM
200/* Enable automatic tracing of new threads. */
201
202static void
203record_btrace_auto_enable (void)
204{
205 DEBUG ("attach thread observer");
206
76727919
TT
207 gdb::observers::new_thread.attach (record_btrace_enable_warn,
208 record_btrace_thread_observer_token);
afedecd3
MM
209}
210
211/* Disable automatic tracing of new threads. */
212
213static void
214record_btrace_auto_disable (void)
215{
afedecd3
MM
216 DEBUG ("detach thread observer");
217
76727919 218 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
afedecd3
MM
219}
220
70ad5bff
MM
221/* The record-btrace async event handler function. */
222
223static void
224record_btrace_handle_async_inferior_event (gdb_client_data data)
225{
226 inferior_event_handler (INF_REG_EVENT, NULL);
227}
228
c0272db5
TW
229/* See record-btrace.h. */
230
231void
232record_btrace_push_target (void)
233{
234 const char *format;
235
236 record_btrace_auto_enable ();
237
238 push_target (&record_btrace_ops);
239
240 record_btrace_async_inferior_event_handler
241 = create_async_event_handler (record_btrace_handle_async_inferior_event,
242 NULL);
243 record_btrace_generating_corefile = 0;
244
245 format = btrace_format_short_string (record_btrace_conf.format);
76727919 246 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
c0272db5
TW
247}
248
228f1508
SM
249/* Disable btrace on a set of threads on scope exit. */
250
251struct scoped_btrace_disable
252{
253 scoped_btrace_disable () = default;
254
255 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
256
257 ~scoped_btrace_disable ()
258 {
259 for (thread_info *tp : m_threads)
260 btrace_disable (tp);
261 }
262
263 void add_thread (thread_info *thread)
264 {
265 m_threads.push_front (thread);
266 }
267
268 void discard ()
269 {
270 m_threads.clear ();
271 }
272
273private:
274 std::forward_list<thread_info *> m_threads;
275};
276
afedecd3
MM
277/* The to_open method of target record-btrace. */
278
279static void
014f9477 280record_btrace_open (const char *args, int from_tty)
afedecd3 281{
228f1508
SM
282 /* If we fail to enable btrace for one thread, disable it for the threads for
283 which it was successfully enabled. */
284 scoped_btrace_disable btrace_disable;
afedecd3
MM
285 struct thread_info *tp;
286
287 DEBUG ("open");
288
8213266a 289 record_preopen ();
afedecd3
MM
290
291 if (!target_has_execution)
292 error (_("The program is not being run."));
293
034f788c 294 ALL_NON_EXITED_THREADS (tp)
5d5658a1 295 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
afedecd3 296 {
f4abbc16 297 btrace_enable (tp, &record_btrace_conf);
afedecd3 298
228f1508 299 btrace_disable.add_thread (tp);
afedecd3
MM
300 }
301
c0272db5 302 record_btrace_push_target ();
afedecd3 303
228f1508 304 btrace_disable.discard ();
afedecd3
MM
305}
306
307/* The to_stop_recording method of target record-btrace. */
308
309static void
c6cd7c02 310record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
311{
312 struct thread_info *tp;
313
314 DEBUG ("stop recording");
315
316 record_btrace_auto_disable ();
317
034f788c 318 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
319 if (tp->btrace.target != NULL)
320 btrace_disable (tp);
321}
322
c0272db5
TW
323/* The to_disconnect method of target record-btrace. */
324
325static void
326record_btrace_disconnect (struct target_ops *self, const char *args,
327 int from_tty)
328{
329 struct target_ops *beneath = self->beneath;
330
331 /* Do not stop recording, just clean up GDB side. */
332 unpush_target (self);
333
334 /* Forward disconnect. */
335 beneath->to_disconnect (beneath, args, from_tty);
336}
337
afedecd3
MM
338/* The to_close method of target record-btrace. */
339
340static void
de90e03d 341record_btrace_close (struct target_ops *self)
afedecd3 342{
568e808b
MM
343 struct thread_info *tp;
344
70ad5bff
MM
345 if (record_btrace_async_inferior_event_handler != NULL)
346 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
347
99c819ee
MM
348 /* Make sure automatic recording gets disabled even if we did not stop
349 recording before closing the record-btrace target. */
350 record_btrace_auto_disable ();
351
568e808b
MM
352 /* We should have already stopped recording.
353 Tear down btrace in case we have not. */
034f788c 354 ALL_NON_EXITED_THREADS (tp)
568e808b 355 btrace_teardown (tp);
afedecd3
MM
356}
357
b7d2e916
PA
358/* The to_async method of target record-btrace. */
359
360static void
6a3753b3 361record_btrace_async (struct target_ops *ops, int enable)
b7d2e916 362{
6a3753b3 363 if (enable)
b7d2e916
PA
364 mark_async_event_handler (record_btrace_async_inferior_event_handler);
365 else
366 clear_async_event_handler (record_btrace_async_inferior_event_handler);
367
6a3753b3 368 ops->beneath->to_async (ops->beneath, enable);
b7d2e916
PA
369}
370
d33501a5
MM
371/* Adjusts the size and returns a human readable size suffix. */
372
373static const char *
374record_btrace_adjust_size (unsigned int *size)
375{
376 unsigned int sz;
377
378 sz = *size;
379
380 if ((sz & ((1u << 30) - 1)) == 0)
381 {
382 *size = sz >> 30;
383 return "GB";
384 }
385 else if ((sz & ((1u << 20) - 1)) == 0)
386 {
387 *size = sz >> 20;
388 return "MB";
389 }
390 else if ((sz & ((1u << 10) - 1)) == 0)
391 {
392 *size = sz >> 10;
393 return "kB";
394 }
395 else
396 return "";
397}
398
399/* Print a BTS configuration. */
400
401static void
402record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
403{
404 const char *suffix;
405 unsigned int size;
406
407 size = conf->size;
408 if (size > 0)
409 {
410 suffix = record_btrace_adjust_size (&size);
411 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
412 }
413}
414
bc504a31 415/* Print an Intel Processor Trace configuration. */
b20a6524
MM
416
417static void
418record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
419{
420 const char *suffix;
421 unsigned int size;
422
423 size = conf->size;
424 if (size > 0)
425 {
426 suffix = record_btrace_adjust_size (&size);
427 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
428 }
429}
430
d33501a5
MM
431/* Print a branch tracing configuration. */
432
433static void
434record_btrace_print_conf (const struct btrace_config *conf)
435{
436 printf_unfiltered (_("Recording format: %s.\n"),
437 btrace_format_string (conf->format));
438
439 switch (conf->format)
440 {
441 case BTRACE_FORMAT_NONE:
442 return;
443
444 case BTRACE_FORMAT_BTS:
445 record_btrace_print_bts_conf (&conf->bts);
446 return;
b20a6524
MM
447
448 case BTRACE_FORMAT_PT:
449 record_btrace_print_pt_conf (&conf->pt);
450 return;
d33501a5
MM
451 }
452
453 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
454}
455
afedecd3
MM
456/* The to_info_record method of target record-btrace. */
457
458static void
630d6a4a 459record_btrace_info (struct target_ops *self)
afedecd3
MM
460{
461 struct btrace_thread_info *btinfo;
f4abbc16 462 const struct btrace_config *conf;
afedecd3 463 struct thread_info *tp;
31fd9caa 464 unsigned int insns, calls, gaps;
afedecd3
MM
465
466 DEBUG ("info");
467
468 tp = find_thread_ptid (inferior_ptid);
469 if (tp == NULL)
470 error (_("No thread."));
471
cd4007e4
MM
472 validate_registers_access ();
473
f4abbc16
MM
474 btinfo = &tp->btrace;
475
476 conf = btrace_conf (btinfo);
477 if (conf != NULL)
d33501a5 478 record_btrace_print_conf (conf);
f4abbc16 479
4a4495d6 480 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 481
23a7fe75
MM
482 insns = 0;
483 calls = 0;
31fd9caa 484 gaps = 0;
23a7fe75 485
6e07b1d2 486 if (!btrace_is_empty (tp))
23a7fe75
MM
487 {
488 struct btrace_call_iterator call;
489 struct btrace_insn_iterator insn;
490
491 btrace_call_end (&call, btinfo);
492 btrace_call_prev (&call, 1);
5de9129b 493 calls = btrace_call_number (&call);
23a7fe75
MM
494
495 btrace_insn_end (&insn, btinfo);
5de9129b 496 insns = btrace_insn_number (&insn);
31fd9caa 497
69090cee
TW
498 /* If the last instruction is not a gap, it is the current instruction
499 that is not actually part of the record. */
500 if (btrace_insn_get (&insn) != NULL)
501 insns -= 1;
31fd9caa
MM
502
503 gaps = btinfo->ngaps;
23a7fe75 504 }
afedecd3 505
31fd9caa 506 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
43792cf0
PA
507 "for thread %s (%s).\n"), insns, calls, gaps,
508 print_thread_id (tp), target_pid_to_str (tp->ptid));
07bbe694
MM
509
510 if (btrace_is_replaying (tp))
511 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
512 btrace_insn_number (btinfo->replay));
afedecd3
MM
513}
514
31fd9caa
MM
515/* Print a decode error. */
516
517static void
518btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
519 enum btrace_format format)
520{
508352a9 521 const char *errstr = btrace_decode_error (format, errcode);
31fd9caa 522
112e8700 523 uiout->text (_("["));
508352a9
TW
524 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
525 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
31fd9caa 526 {
112e8700
SM
527 uiout->text (_("decode error ("));
528 uiout->field_int ("errcode", errcode);
529 uiout->text (_("): "));
31fd9caa 530 }
112e8700
SM
531 uiout->text (errstr);
532 uiout->text (_("]\n"));
31fd9caa
MM
533}
534
afedecd3
MM
535/* Print an unsigned int. */
536
537static void
538ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
539{
112e8700 540 uiout->field_fmt (fld, "%u", val);
afedecd3
MM
541}
542
f94cc897
MM
543/* A range of source lines. */
544
545struct btrace_line_range
546{
547 /* The symtab this line is from. */
548 struct symtab *symtab;
549
550 /* The first line (inclusive). */
551 int begin;
552
553 /* The last line (exclusive). */
554 int end;
555};
556
557/* Construct a line range. */
558
559static struct btrace_line_range
560btrace_mk_line_range (struct symtab *symtab, int begin, int end)
561{
562 struct btrace_line_range range;
563
564 range.symtab = symtab;
565 range.begin = begin;
566 range.end = end;
567
568 return range;
569}
570
571/* Add a line to a line range. */
572
573static struct btrace_line_range
574btrace_line_range_add (struct btrace_line_range range, int line)
575{
576 if (range.end <= range.begin)
577 {
578 /* This is the first entry. */
579 range.begin = line;
580 range.end = line + 1;
581 }
582 else if (line < range.begin)
583 range.begin = line;
584 else if (range.end < line)
585 range.end = line;
586
587 return range;
588}
589
590/* Return non-zero if RANGE is empty, zero otherwise. */
591
592static int
593btrace_line_range_is_empty (struct btrace_line_range range)
594{
595 return range.end <= range.begin;
596}
597
598/* Return non-zero if LHS contains RHS, zero otherwise. */
599
600static int
601btrace_line_range_contains_range (struct btrace_line_range lhs,
602 struct btrace_line_range rhs)
603{
604 return ((lhs.symtab == rhs.symtab)
605 && (lhs.begin <= rhs.begin)
606 && (rhs.end <= lhs.end));
607}
608
609/* Find the line range associated with PC. */
610
611static struct btrace_line_range
612btrace_find_line_range (CORE_ADDR pc)
613{
614 struct btrace_line_range range;
615 struct linetable_entry *lines;
616 struct linetable *ltable;
617 struct symtab *symtab;
618 int nlines, i;
619
620 symtab = find_pc_line_symtab (pc);
621 if (symtab == NULL)
622 return btrace_mk_line_range (NULL, 0, 0);
623
624 ltable = SYMTAB_LINETABLE (symtab);
625 if (ltable == NULL)
626 return btrace_mk_line_range (symtab, 0, 0);
627
628 nlines = ltable->nitems;
629 lines = ltable->item;
630 if (nlines <= 0)
631 return btrace_mk_line_range (symtab, 0, 0);
632
633 range = btrace_mk_line_range (symtab, 0, 0);
634 for (i = 0; i < nlines - 1; i++)
635 {
636 if ((lines[i].pc == pc) && (lines[i].line != 0))
637 range = btrace_line_range_add (range, lines[i].line);
638 }
639
640 return range;
641}
642
643/* Print source lines in LINES to UIOUT.
644
645 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
646 instructions corresponding to that source line. When printing a new source
647 line, we do the cleanups for the open chain and open a new cleanup chain for
648 the new source line. If the source line range in LINES is not empty, this
649 function will leave the cleanup chain for the last printed source line open
650 so instructions can be added to it. */
651
652static void
653btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
7ea78b59
SM
654 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
655 gdb::optional<ui_out_emit_list> *asm_list,
656 gdb_disassembly_flags flags)
f94cc897 657{
8d297bbf 658 print_source_lines_flags psl_flags;
f94cc897 659
f94cc897
MM
660 if (flags & DISASSEMBLY_FILENAME)
661 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
662
7ea78b59 663 for (int line = lines.begin; line < lines.end; ++line)
f94cc897 664 {
7ea78b59 665 asm_list->reset ();
f94cc897 666
7ea78b59 667 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
f94cc897
MM
668
669 print_source_lines (lines.symtab, line, line + 1, psl_flags);
670
7ea78b59 671 asm_list->emplace (uiout, "line_asm_insn");
f94cc897
MM
672 }
673}
674
afedecd3
MM
675/* Disassemble a section of the recorded instruction trace. */
676
677static void
23a7fe75 678btrace_insn_history (struct ui_out *uiout,
31fd9caa 679 const struct btrace_thread_info *btinfo,
23a7fe75 680 const struct btrace_insn_iterator *begin,
9a24775b
PA
681 const struct btrace_insn_iterator *end,
682 gdb_disassembly_flags flags)
afedecd3 683{
9a24775b
PA
684 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
685 btrace_insn_number (begin), btrace_insn_number (end));
afedecd3 686
f94cc897
MM
687 flags |= DISASSEMBLY_SPECULATIVE;
688
7ea78b59
SM
689 struct gdbarch *gdbarch = target_gdbarch ();
690 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
f94cc897 691
7ea78b59 692 ui_out_emit_list list_emitter (uiout, "asm_insns");
f94cc897 693
7ea78b59
SM
694 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
695 gdb::optional<ui_out_emit_list> asm_list;
afedecd3 696
8b172ce7
PA
697 gdb_pretty_print_disassembler disasm (gdbarch);
698
7ea78b59
SM
699 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
700 btrace_insn_next (&it, 1))
afedecd3 701 {
23a7fe75
MM
702 const struct btrace_insn *insn;
703
704 insn = btrace_insn_get (&it);
705
31fd9caa
MM
706 /* A NULL instruction indicates a gap in the trace. */
707 if (insn == NULL)
708 {
709 const struct btrace_config *conf;
710
711 conf = btrace_conf (btinfo);
afedecd3 712
31fd9caa
MM
713 /* We have trace so we must have a configuration. */
714 gdb_assert (conf != NULL);
715
69090cee
TW
716 uiout->field_fmt ("insn-number", "%u",
717 btrace_insn_number (&it));
718 uiout->text ("\t");
719
720 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
31fd9caa
MM
721 conf->format);
722 }
723 else
724 {
f94cc897 725 struct disasm_insn dinsn;
da8c46d2 726
f94cc897 727 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 728 {
f94cc897
MM
729 struct btrace_line_range lines;
730
731 lines = btrace_find_line_range (insn->pc);
732 if (!btrace_line_range_is_empty (lines)
733 && !btrace_line_range_contains_range (last_lines, lines))
734 {
7ea78b59
SM
735 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
736 flags);
f94cc897
MM
737 last_lines = lines;
738 }
7ea78b59 739 else if (!src_and_asm_tuple.has_value ())
f94cc897 740 {
7ea78b59
SM
741 gdb_assert (!asm_list.has_value ());
742
743 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
744
f94cc897 745 /* No source information. */
7ea78b59 746 asm_list.emplace (uiout, "line_asm_insn");
f94cc897
MM
747 }
748
7ea78b59
SM
749 gdb_assert (src_and_asm_tuple.has_value ());
750 gdb_assert (asm_list.has_value ());
da8c46d2 751 }
da8c46d2 752
f94cc897
MM
753 memset (&dinsn, 0, sizeof (dinsn));
754 dinsn.number = btrace_insn_number (&it);
755 dinsn.addr = insn->pc;
31fd9caa 756
da8c46d2 757 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 758 dinsn.is_speculative = 1;
da8c46d2 759
8b172ce7 760 disasm.pretty_print_insn (uiout, &dinsn, flags);
31fd9caa 761 }
afedecd3
MM
762 }
763}
764
765/* The to_insn_history method of target record-btrace. */
766
767static void
9a24775b
PA
768record_btrace_insn_history (struct target_ops *self, int size,
769 gdb_disassembly_flags flags)
afedecd3
MM
770{
771 struct btrace_thread_info *btinfo;
23a7fe75
MM
772 struct btrace_insn_history *history;
773 struct btrace_insn_iterator begin, end;
afedecd3 774 struct ui_out *uiout;
23a7fe75 775 unsigned int context, covered;
afedecd3
MM
776
777 uiout = current_uiout;
2e783024 778 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 779 context = abs (size);
afedecd3
MM
780 if (context == 0)
781 error (_("Bad record instruction-history-size."));
782
23a7fe75
MM
783 btinfo = require_btrace ();
784 history = btinfo->insn_history;
785 if (history == NULL)
afedecd3 786 {
07bbe694 787 struct btrace_insn_iterator *replay;
afedecd3 788
9a24775b 789 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
afedecd3 790
07bbe694
MM
791 /* If we're replaying, we start at the replay position. Otherwise, we
792 start at the tail of the trace. */
793 replay = btinfo->replay;
794 if (replay != NULL)
795 begin = *replay;
796 else
797 btrace_insn_end (&begin, btinfo);
798
799 /* We start from here and expand in the requested direction. Then we
800 expand in the other direction, as well, to fill up any remaining
801 context. */
802 end = begin;
803 if (size < 0)
804 {
805 /* We want the current position covered, as well. */
806 covered = btrace_insn_next (&end, 1);
807 covered += btrace_insn_prev (&begin, context - covered);
808 covered += btrace_insn_next (&end, context - covered);
809 }
810 else
811 {
812 covered = btrace_insn_next (&end, context);
813 covered += btrace_insn_prev (&begin, context - covered);
814 }
afedecd3
MM
815 }
816 else
817 {
23a7fe75
MM
818 begin = history->begin;
819 end = history->end;
afedecd3 820
9a24775b 821 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
23a7fe75 822 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 823
23a7fe75
MM
824 if (size < 0)
825 {
826 end = begin;
827 covered = btrace_insn_prev (&begin, context);
828 }
829 else
830 {
831 begin = end;
832 covered = btrace_insn_next (&end, context);
833 }
afedecd3
MM
834 }
835
23a7fe75 836 if (covered > 0)
31fd9caa 837 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
838 else
839 {
840 if (size < 0)
841 printf_unfiltered (_("At the start of the branch trace record.\n"));
842 else
843 printf_unfiltered (_("At the end of the branch trace record.\n"));
844 }
afedecd3 845
23a7fe75 846 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
847}
848
849/* The to_insn_history_range method of target record-btrace. */
850
851static void
4e99c6b7 852record_btrace_insn_history_range (struct target_ops *self,
9a24775b
PA
853 ULONGEST from, ULONGEST to,
854 gdb_disassembly_flags flags)
afedecd3
MM
855{
856 struct btrace_thread_info *btinfo;
23a7fe75 857 struct btrace_insn_iterator begin, end;
afedecd3 858 struct ui_out *uiout;
23a7fe75
MM
859 unsigned int low, high;
860 int found;
afedecd3
MM
861
862 uiout = current_uiout;
2e783024 863 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
23a7fe75
MM
864 low = from;
865 high = to;
afedecd3 866
9a24775b 867 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
afedecd3
MM
868
869 /* Check for wrap-arounds. */
23a7fe75 870 if (low != from || high != to)
afedecd3
MM
871 error (_("Bad range."));
872
0688d04e 873 if (high < low)
afedecd3
MM
874 error (_("Bad range."));
875
23a7fe75 876 btinfo = require_btrace ();
afedecd3 877
23a7fe75
MM
878 found = btrace_find_insn_by_number (&begin, btinfo, low);
879 if (found == 0)
880 error (_("Range out of bounds."));
afedecd3 881
23a7fe75
MM
882 found = btrace_find_insn_by_number (&end, btinfo, high);
883 if (found == 0)
0688d04e
MM
884 {
885 /* Silently truncate the range. */
886 btrace_insn_end (&end, btinfo);
887 }
888 else
889 {
890 /* We want both begin and end to be inclusive. */
891 btrace_insn_next (&end, 1);
892 }
afedecd3 893
31fd9caa 894 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 895 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
896}
897
898/* The to_insn_history_from method of target record-btrace. */
899
900static void
9abc3ff3 901record_btrace_insn_history_from (struct target_ops *self,
9a24775b
PA
902 ULONGEST from, int size,
903 gdb_disassembly_flags flags)
afedecd3
MM
904{
905 ULONGEST begin, end, context;
906
907 context = abs (size);
0688d04e
MM
908 if (context == 0)
909 error (_("Bad record instruction-history-size."));
afedecd3
MM
910
911 if (size < 0)
912 {
913 end = from;
914
915 if (from < context)
916 begin = 0;
917 else
0688d04e 918 begin = from - context + 1;
afedecd3
MM
919 }
920 else
921 {
922 begin = from;
0688d04e 923 end = from + context - 1;
afedecd3
MM
924
925 /* Check for wrap-around. */
926 if (end < begin)
927 end = ULONGEST_MAX;
928 }
929
4e99c6b7 930 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
931}
932
933/* Print the instruction number range for a function call history line. */
934
935static void
23a7fe75
MM
936btrace_call_history_insn_range (struct ui_out *uiout,
937 const struct btrace_function *bfun)
afedecd3 938{
7acbe133
MM
939 unsigned int begin, end, size;
940
0860c437 941 size = bfun->insn.size ();
7acbe133 942 gdb_assert (size > 0);
afedecd3 943
23a7fe75 944 begin = bfun->insn_offset;
7acbe133 945 end = begin + size - 1;
afedecd3 946
23a7fe75 947 ui_out_field_uint (uiout, "insn begin", begin);
112e8700 948 uiout->text (",");
23a7fe75 949 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
950}
951
ce0dfbea
MM
952/* Compute the lowest and highest source line for the instructions in BFUN
953 and return them in PBEGIN and PEND.
954 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
955 result from inlining or macro expansion. */
956
957static void
958btrace_compute_src_line_range (const struct btrace_function *bfun,
959 int *pbegin, int *pend)
960{
ce0dfbea
MM
961 struct symtab *symtab;
962 struct symbol *sym;
ce0dfbea
MM
963 int begin, end;
964
965 begin = INT_MAX;
966 end = INT_MIN;
967
968 sym = bfun->sym;
969 if (sym == NULL)
970 goto out;
971
972 symtab = symbol_symtab (sym);
973
0860c437 974 for (const btrace_insn &insn : bfun->insn)
ce0dfbea
MM
975 {
976 struct symtab_and_line sal;
977
0860c437 978 sal = find_pc_line (insn.pc, 0);
ce0dfbea
MM
979 if (sal.symtab != symtab || sal.line == 0)
980 continue;
981
325fac50
PA
982 begin = std::min (begin, sal.line);
983 end = std::max (end, sal.line);
ce0dfbea
MM
984 }
985
986 out:
987 *pbegin = begin;
988 *pend = end;
989}
990
afedecd3
MM
991/* Print the source line information for a function call history line. */
992
993static void
23a7fe75
MM
994btrace_call_history_src_line (struct ui_out *uiout,
995 const struct btrace_function *bfun)
afedecd3
MM
996{
997 struct symbol *sym;
23a7fe75 998 int begin, end;
afedecd3
MM
999
1000 sym = bfun->sym;
1001 if (sym == NULL)
1002 return;
1003
112e8700 1004 uiout->field_string ("file",
08be3fe3 1005 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 1006
ce0dfbea 1007 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 1008 if (end < begin)
afedecd3
MM
1009 return;
1010
112e8700
SM
1011 uiout->text (":");
1012 uiout->field_int ("min line", begin);
afedecd3 1013
23a7fe75 1014 if (end == begin)
afedecd3
MM
1015 return;
1016
112e8700
SM
1017 uiout->text (",");
1018 uiout->field_int ("max line", end);
afedecd3
MM
1019}
1020
0b722aec
MM
1021/* Get the name of a branch trace function. */
1022
1023static const char *
1024btrace_get_bfun_name (const struct btrace_function *bfun)
1025{
1026 struct minimal_symbol *msym;
1027 struct symbol *sym;
1028
1029 if (bfun == NULL)
1030 return "??";
1031
1032 msym = bfun->msym;
1033 sym = bfun->sym;
1034
1035 if (sym != NULL)
1036 return SYMBOL_PRINT_NAME (sym);
1037 else if (msym != NULL)
efd66ac6 1038 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
1039 else
1040 return "??";
1041}
1042
afedecd3
MM
1043/* Disassemble a section of the recorded function trace. */
1044
1045static void
23a7fe75 1046btrace_call_history (struct ui_out *uiout,
8710b709 1047 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1048 const struct btrace_call_iterator *begin,
1049 const struct btrace_call_iterator *end,
8d297bbf 1050 int int_flags)
afedecd3 1051{
23a7fe75 1052 struct btrace_call_iterator it;
8d297bbf 1053 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1054
8d297bbf 1055 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1056 btrace_call_number (end));
afedecd3 1057
23a7fe75 1058 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1059 {
23a7fe75
MM
1060 const struct btrace_function *bfun;
1061 struct minimal_symbol *msym;
1062 struct symbol *sym;
1063
1064 bfun = btrace_call_get (&it);
23a7fe75 1065 sym = bfun->sym;
0b722aec 1066 msym = bfun->msym;
23a7fe75 1067
afedecd3 1068 /* Print the function index. */
23a7fe75 1069 ui_out_field_uint (uiout, "index", bfun->number);
112e8700 1070 uiout->text ("\t");
afedecd3 1071
31fd9caa
MM
1072 /* Indicate gaps in the trace. */
1073 if (bfun->errcode != 0)
1074 {
1075 const struct btrace_config *conf;
1076
1077 conf = btrace_conf (btinfo);
1078
1079 /* We have trace so we must have a configuration. */
1080 gdb_assert (conf != NULL);
1081
1082 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1083
1084 continue;
1085 }
1086
8710b709
MM
1087 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1088 {
1089 int level = bfun->level + btinfo->level, i;
1090
1091 for (i = 0; i < level; ++i)
112e8700 1092 uiout->text (" ");
8710b709
MM
1093 }
1094
1095 if (sym != NULL)
112e8700 1096 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
8710b709 1097 else if (msym != NULL)
112e8700
SM
1098 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1099 else if (!uiout->is_mi_like_p ())
1100 uiout->field_string ("function", "??");
8710b709 1101
1e038f67 1102 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1103 {
112e8700 1104 uiout->text (_("\tinst "));
23a7fe75 1105 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1106 }
1107
1e038f67 1108 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1109 {
112e8700 1110 uiout->text (_("\tat "));
23a7fe75 1111 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1112 }
1113
112e8700 1114 uiout->text ("\n");
afedecd3
MM
1115 }
1116}
1117
1118/* The to_call_history method of target record-btrace. */
1119
1120static void
0cb7c7b0
SM
1121record_btrace_call_history (struct target_ops *self, int size,
1122 record_print_flags flags)
afedecd3
MM
1123{
1124 struct btrace_thread_info *btinfo;
23a7fe75
MM
1125 struct btrace_call_history *history;
1126 struct btrace_call_iterator begin, end;
afedecd3 1127 struct ui_out *uiout;
23a7fe75 1128 unsigned int context, covered;
afedecd3
MM
1129
1130 uiout = current_uiout;
2e783024 1131 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 1132 context = abs (size);
afedecd3
MM
1133 if (context == 0)
1134 error (_("Bad record function-call-history-size."));
1135
23a7fe75
MM
1136 btinfo = require_btrace ();
1137 history = btinfo->call_history;
1138 if (history == NULL)
afedecd3 1139 {
07bbe694 1140 struct btrace_insn_iterator *replay;
afedecd3 1141
0cb7c7b0 1142 DEBUG ("call-history (0x%x): %d", (int) flags, size);
afedecd3 1143
07bbe694
MM
1144 /* If we're replaying, we start at the replay position. Otherwise, we
1145 start at the tail of the trace. */
1146 replay = btinfo->replay;
1147 if (replay != NULL)
1148 {
07bbe694 1149 begin.btinfo = btinfo;
a0f1b963 1150 begin.index = replay->call_index;
07bbe694
MM
1151 }
1152 else
1153 btrace_call_end (&begin, btinfo);
1154
1155 /* We start from here and expand in the requested direction. Then we
1156 expand in the other direction, as well, to fill up any remaining
1157 context. */
1158 end = begin;
1159 if (size < 0)
1160 {
1161 /* We want the current position covered, as well. */
1162 covered = btrace_call_next (&end, 1);
1163 covered += btrace_call_prev (&begin, context - covered);
1164 covered += btrace_call_next (&end, context - covered);
1165 }
1166 else
1167 {
1168 covered = btrace_call_next (&end, context);
1169 covered += btrace_call_prev (&begin, context- covered);
1170 }
afedecd3
MM
1171 }
1172 else
1173 {
23a7fe75
MM
1174 begin = history->begin;
1175 end = history->end;
afedecd3 1176
0cb7c7b0 1177 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
23a7fe75 1178 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1179
23a7fe75
MM
1180 if (size < 0)
1181 {
1182 end = begin;
1183 covered = btrace_call_prev (&begin, context);
1184 }
1185 else
1186 {
1187 begin = end;
1188 covered = btrace_call_next (&end, context);
1189 }
afedecd3
MM
1190 }
1191
23a7fe75 1192 if (covered > 0)
8710b709 1193 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1194 else
1195 {
1196 if (size < 0)
1197 printf_unfiltered (_("At the start of the branch trace record.\n"));
1198 else
1199 printf_unfiltered (_("At the end of the branch trace record.\n"));
1200 }
afedecd3 1201
23a7fe75 1202 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1203}
1204
1205/* The to_call_history_range method of target record-btrace. */
1206
1207static void
f0d960ea 1208record_btrace_call_history_range (struct target_ops *self,
8d297bbf 1209 ULONGEST from, ULONGEST to,
0cb7c7b0 1210 record_print_flags flags)
afedecd3
MM
1211{
1212 struct btrace_thread_info *btinfo;
23a7fe75 1213 struct btrace_call_iterator begin, end;
afedecd3 1214 struct ui_out *uiout;
23a7fe75
MM
1215 unsigned int low, high;
1216 int found;
afedecd3
MM
1217
1218 uiout = current_uiout;
2e783024 1219 ui_out_emit_tuple tuple_emitter (uiout, "func history");
23a7fe75
MM
1220 low = from;
1221 high = to;
afedecd3 1222
0cb7c7b0 1223 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
afedecd3
MM
1224
1225 /* Check for wrap-arounds. */
23a7fe75 1226 if (low != from || high != to)
afedecd3
MM
1227 error (_("Bad range."));
1228
0688d04e 1229 if (high < low)
afedecd3
MM
1230 error (_("Bad range."));
1231
23a7fe75 1232 btinfo = require_btrace ();
afedecd3 1233
23a7fe75
MM
1234 found = btrace_find_call_by_number (&begin, btinfo, low);
1235 if (found == 0)
1236 error (_("Range out of bounds."));
afedecd3 1237
23a7fe75
MM
1238 found = btrace_find_call_by_number (&end, btinfo, high);
1239 if (found == 0)
0688d04e
MM
1240 {
1241 /* Silently truncate the range. */
1242 btrace_call_end (&end, btinfo);
1243 }
1244 else
1245 {
1246 /* We want both begin and end to be inclusive. */
1247 btrace_call_next (&end, 1);
1248 }
afedecd3 1249
8710b709 1250 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1251 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1252}
1253
1254/* The to_call_history_from method of target record-btrace. */
1255
1256static void
ec0aea04 1257record_btrace_call_history_from (struct target_ops *self,
8d297bbf 1258 ULONGEST from, int size,
0cb7c7b0 1259 record_print_flags flags)
afedecd3
MM
1260{
1261 ULONGEST begin, end, context;
1262
1263 context = abs (size);
0688d04e
MM
1264 if (context == 0)
1265 error (_("Bad record function-call-history-size."));
afedecd3
MM
1266
1267 if (size < 0)
1268 {
1269 end = from;
1270
1271 if (from < context)
1272 begin = 0;
1273 else
0688d04e 1274 begin = from - context + 1;
afedecd3
MM
1275 }
1276 else
1277 {
1278 begin = from;
0688d04e 1279 end = from + context - 1;
afedecd3
MM
1280
1281 /* Check for wrap-around. */
1282 if (end < begin)
1283 end = ULONGEST_MAX;
1284 }
1285
f0d960ea 1286 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
1287}
1288
b158a20f
TW
1289/* The to_record_method method of target record-btrace. */
1290
1291static enum record_method
1292record_btrace_record_method (struct target_ops *self, ptid_t ptid)
1293{
b158a20f
TW
1294 struct thread_info * const tp = find_thread_ptid (ptid);
1295
1296 if (tp == NULL)
1297 error (_("No thread."));
1298
1299 if (tp->btrace.target == NULL)
1300 return RECORD_METHOD_NONE;
1301
1302 return RECORD_METHOD_BTRACE;
1303}
1304
07bbe694
MM
1305/* The to_record_is_replaying method of target record-btrace. */
1306
1307static int
a52eab48 1308record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
07bbe694
MM
1309{
1310 struct thread_info *tp;
1311
034f788c 1312 ALL_NON_EXITED_THREADS (tp)
a52eab48 1313 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
07bbe694
MM
1314 return 1;
1315
1316 return 0;
1317}
1318
7ff27e9b
MM
1319/* The to_record_will_replay method of target record-btrace. */
1320
1321static int
1322record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1323{
1324 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1325}
1326
633785ff
MM
1327/* The to_xfer_partial method of target record-btrace. */
1328
9b409511 1329static enum target_xfer_status
633785ff
MM
1330record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1331 const char *annex, gdb_byte *readbuf,
1332 const gdb_byte *writebuf, ULONGEST offset,
9b409511 1333 ULONGEST len, ULONGEST *xfered_len)
633785ff 1334{
633785ff 1335 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1336 if (replay_memory_access == replay_memory_access_read_only
aef92902 1337 && !record_btrace_generating_corefile
4d10e986 1338 && record_btrace_is_replaying (ops, inferior_ptid))
633785ff
MM
1339 {
1340 switch (object)
1341 {
1342 case TARGET_OBJECT_MEMORY:
1343 {
1344 struct target_section *section;
1345
1346 /* We do not allow writing memory in general. */
1347 if (writebuf != NULL)
9b409511
YQ
1348 {
1349 *xfered_len = len;
bc113b4e 1350 return TARGET_XFER_UNAVAILABLE;
9b409511 1351 }
633785ff
MM
1352
1353 /* We allow reading readonly memory. */
1354 section = target_section_by_addr (ops, offset);
1355 if (section != NULL)
1356 {
1357 /* Check if the section we found is readonly. */
1358 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1359 section->the_bfd_section)
1360 & SEC_READONLY) != 0)
1361 {
1362 /* Truncate the request to fit into this section. */
325fac50 1363 len = std::min (len, section->endaddr - offset);
633785ff
MM
1364 break;
1365 }
1366 }
1367
9b409511 1368 *xfered_len = len;
bc113b4e 1369 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1370 }
1371 }
1372 }
1373
1374 /* Forward the request. */
e75fdfca
TT
1375 ops = ops->beneath;
1376 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1377 offset, len, xfered_len);
633785ff
MM
1378}
1379
1380/* The to_insert_breakpoint method of target record-btrace. */
1381
1382static int
1383record_btrace_insert_breakpoint (struct target_ops *ops,
1384 struct gdbarch *gdbarch,
1385 struct bp_target_info *bp_tgt)
1386{
67b5c0c1
MM
1387 const char *old;
1388 int ret;
633785ff
MM
1389
1390 /* Inserting breakpoints requires accessing memory. Allow it for the
1391 duration of this function. */
67b5c0c1
MM
1392 old = replay_memory_access;
1393 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1394
1395 ret = 0;
492d29ea
PA
1396 TRY
1397 {
1398 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1399 }
492d29ea
PA
1400 CATCH (except, RETURN_MASK_ALL)
1401 {
6c63c96a 1402 replay_memory_access = old;
492d29ea
PA
1403 throw_exception (except);
1404 }
1405 END_CATCH
6c63c96a 1406 replay_memory_access = old;
633785ff
MM
1407
1408 return ret;
1409}
1410
1411/* The to_remove_breakpoint method of target record-btrace. */
1412
1413static int
1414record_btrace_remove_breakpoint (struct target_ops *ops,
1415 struct gdbarch *gdbarch,
73971819
PA
1416 struct bp_target_info *bp_tgt,
1417 enum remove_bp_reason reason)
633785ff 1418{
67b5c0c1
MM
1419 const char *old;
1420 int ret;
633785ff
MM
1421
1422 /* Removing breakpoints requires accessing memory. Allow it for the
1423 duration of this function. */
67b5c0c1
MM
1424 old = replay_memory_access;
1425 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1426
1427 ret = 0;
492d29ea
PA
1428 TRY
1429 {
73971819
PA
1430 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1431 reason);
492d29ea 1432 }
492d29ea
PA
1433 CATCH (except, RETURN_MASK_ALL)
1434 {
6c63c96a 1435 replay_memory_access = old;
492d29ea
PA
1436 throw_exception (except);
1437 }
1438 END_CATCH
6c63c96a 1439 replay_memory_access = old;
633785ff
MM
1440
1441 return ret;
1442}
1443
1f3ef581
MM
1444/* The to_fetch_registers method of target record-btrace. */
1445
1446static void
1447record_btrace_fetch_registers (struct target_ops *ops,
1448 struct regcache *regcache, int regno)
1449{
1450 struct btrace_insn_iterator *replay;
1451 struct thread_info *tp;
1452
bcc0c096 1453 tp = find_thread_ptid (regcache_get_ptid (regcache));
1f3ef581
MM
1454 gdb_assert (tp != NULL);
1455
1456 replay = tp->btrace.replay;
aef92902 1457 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1458 {
1459 const struct btrace_insn *insn;
1460 struct gdbarch *gdbarch;
1461 int pcreg;
1462
ac7936df 1463 gdbarch = regcache->arch ();
1f3ef581
MM
1464 pcreg = gdbarch_pc_regnum (gdbarch);
1465 if (pcreg < 0)
1466 return;
1467
1468 /* We can only provide the PC register. */
1469 if (regno >= 0 && regno != pcreg)
1470 return;
1471
1472 insn = btrace_insn_get (replay);
1473 gdb_assert (insn != NULL);
1474
1475 regcache_raw_supply (regcache, regno, &insn->pc);
1476 }
1477 else
1478 {
e75fdfca 1479 struct target_ops *t = ops->beneath;
1f3ef581 1480
e75fdfca 1481 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
1482 }
1483}
1484
1485/* The to_store_registers method of target record-btrace. */
1486
1487static void
1488record_btrace_store_registers (struct target_ops *ops,
1489 struct regcache *regcache, int regno)
1490{
1491 struct target_ops *t;
1492
a52eab48 1493 if (!record_btrace_generating_corefile
bcc0c096 1494 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
4d10e986 1495 error (_("Cannot write registers while replaying."));
1f3ef581
MM
1496
1497 gdb_assert (may_write_registers != 0);
1498
e75fdfca
TT
1499 t = ops->beneath;
1500 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1501}
1502
1503/* The to_prepare_to_store method of target record-btrace. */
1504
1505static void
1506record_btrace_prepare_to_store (struct target_ops *ops,
1507 struct regcache *regcache)
1508{
1509 struct target_ops *t;
1510
a52eab48 1511 if (!record_btrace_generating_corefile
bcc0c096 1512 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1f3ef581
MM
1513 return;
1514
e75fdfca
TT
1515 t = ops->beneath;
1516 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1517}
1518
0b722aec
MM
1519/* The branch trace frame cache. */
1520
1521struct btrace_frame_cache
1522{
1523 /* The thread. */
1524 struct thread_info *tp;
1525
1526 /* The frame info. */
1527 struct frame_info *frame;
1528
1529 /* The branch trace function segment. */
1530 const struct btrace_function *bfun;
1531};
1532
1533/* A struct btrace_frame_cache hash table indexed by NEXT. */
1534
1535static htab_t bfcache;
1536
1537/* hash_f for htab_create_alloc of bfcache. */
1538
1539static hashval_t
1540bfcache_hash (const void *arg)
1541{
19ba03f4
SM
1542 const struct btrace_frame_cache *cache
1543 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1544
1545 return htab_hash_pointer (cache->frame);
1546}
1547
1548/* eq_f for htab_create_alloc of bfcache. */
1549
1550static int
1551bfcache_eq (const void *arg1, const void *arg2)
1552{
19ba03f4
SM
1553 const struct btrace_frame_cache *cache1
1554 = (const struct btrace_frame_cache *) arg1;
1555 const struct btrace_frame_cache *cache2
1556 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1557
1558 return cache1->frame == cache2->frame;
1559}
1560
1561/* Create a new btrace frame cache. */
1562
1563static struct btrace_frame_cache *
1564bfcache_new (struct frame_info *frame)
1565{
1566 struct btrace_frame_cache *cache;
1567 void **slot;
1568
1569 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1570 cache->frame = frame;
1571
1572 slot = htab_find_slot (bfcache, cache, INSERT);
1573 gdb_assert (*slot == NULL);
1574 *slot = cache;
1575
1576 return cache;
1577}
1578
1579/* Extract the branch trace function from a branch trace frame. */
1580
1581static const struct btrace_function *
1582btrace_get_frame_function (struct frame_info *frame)
1583{
1584 const struct btrace_frame_cache *cache;
0b722aec
MM
1585 struct btrace_frame_cache pattern;
1586 void **slot;
1587
1588 pattern.frame = frame;
1589
1590 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1591 if (slot == NULL)
1592 return NULL;
1593
19ba03f4 1594 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1595 return cache->bfun;
1596}
1597
cecac1ab
MM
1598/* Implement stop_reason method for record_btrace_frame_unwind. */
1599
1600static enum unwind_stop_reason
1601record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1602 void **this_cache)
1603{
0b722aec
MM
1604 const struct btrace_frame_cache *cache;
1605 const struct btrace_function *bfun;
1606
19ba03f4 1607 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1608 bfun = cache->bfun;
1609 gdb_assert (bfun != NULL);
1610
42bfe59e 1611 if (bfun->up == 0)
0b722aec
MM
1612 return UNWIND_UNAVAILABLE;
1613
1614 return UNWIND_NO_REASON;
cecac1ab
MM
1615}
1616
1617/* Implement this_id method for record_btrace_frame_unwind. */
1618
1619static void
1620record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1621 struct frame_id *this_id)
1622{
0b722aec
MM
1623 const struct btrace_frame_cache *cache;
1624 const struct btrace_function *bfun;
4aeb0dfc 1625 struct btrace_call_iterator it;
0b722aec
MM
1626 CORE_ADDR code, special;
1627
19ba03f4 1628 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1629
1630 bfun = cache->bfun;
1631 gdb_assert (bfun != NULL);
1632
4aeb0dfc
TW
1633 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1634 bfun = btrace_call_get (&it);
0b722aec
MM
1635
1636 code = get_frame_func (this_frame);
1637 special = bfun->number;
1638
1639 *this_id = frame_id_build_unavailable_stack_special (code, special);
1640
1641 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1642 btrace_get_bfun_name (cache->bfun),
1643 core_addr_to_string_nz (this_id->code_addr),
1644 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1645}
1646
1647/* Implement prev_register method for record_btrace_frame_unwind. */
1648
1649static struct value *
1650record_btrace_frame_prev_register (struct frame_info *this_frame,
1651 void **this_cache,
1652 int regnum)
1653{
0b722aec
MM
1654 const struct btrace_frame_cache *cache;
1655 const struct btrace_function *bfun, *caller;
42bfe59e 1656 struct btrace_call_iterator it;
0b722aec
MM
1657 struct gdbarch *gdbarch;
1658 CORE_ADDR pc;
1659 int pcreg;
1660
1661 gdbarch = get_frame_arch (this_frame);
1662 pcreg = gdbarch_pc_regnum (gdbarch);
1663 if (pcreg < 0 || regnum != pcreg)
1664 throw_error (NOT_AVAILABLE_ERROR,
1665 _("Registers are not available in btrace record history"));
1666
19ba03f4 1667 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1668 bfun = cache->bfun;
1669 gdb_assert (bfun != NULL);
1670
42bfe59e 1671 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
0b722aec
MM
1672 throw_error (NOT_AVAILABLE_ERROR,
1673 _("No caller in btrace record history"));
1674
42bfe59e
TW
1675 caller = btrace_call_get (&it);
1676
0b722aec 1677 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
0860c437 1678 pc = caller->insn.front ().pc;
0b722aec
MM
1679 else
1680 {
0860c437 1681 pc = caller->insn.back ().pc;
0b722aec
MM
1682 pc += gdb_insn_length (gdbarch, pc);
1683 }
1684
1685 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1686 btrace_get_bfun_name (bfun), bfun->level,
1687 core_addr_to_string_nz (pc));
1688
1689 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1690}
1691
1692/* Implement sniffer method for record_btrace_frame_unwind. */
1693
1694static int
1695record_btrace_frame_sniffer (const struct frame_unwind *self,
1696 struct frame_info *this_frame,
1697 void **this_cache)
1698{
0b722aec
MM
1699 const struct btrace_function *bfun;
1700 struct btrace_frame_cache *cache;
cecac1ab 1701 struct thread_info *tp;
0b722aec 1702 struct frame_info *next;
cecac1ab
MM
1703
1704 /* THIS_FRAME does not contain a reference to its thread. */
1705 tp = find_thread_ptid (inferior_ptid);
1706 gdb_assert (tp != NULL);
1707
0b722aec
MM
1708 bfun = NULL;
1709 next = get_next_frame (this_frame);
1710 if (next == NULL)
1711 {
1712 const struct btrace_insn_iterator *replay;
1713
1714 replay = tp->btrace.replay;
1715 if (replay != NULL)
08c3f6d2 1716 bfun = &replay->btinfo->functions[replay->call_index];
0b722aec
MM
1717 }
1718 else
1719 {
1720 const struct btrace_function *callee;
42bfe59e 1721 struct btrace_call_iterator it;
0b722aec
MM
1722
1723 callee = btrace_get_frame_function (next);
42bfe59e
TW
1724 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1725 return 0;
1726
1727 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1728 return 0;
1729
1730 bfun = btrace_call_get (&it);
0b722aec
MM
1731 }
1732
1733 if (bfun == NULL)
1734 return 0;
1735
1736 DEBUG ("[frame] sniffed frame for %s on level %d",
1737 btrace_get_bfun_name (bfun), bfun->level);
1738
1739 /* This is our frame. Initialize the frame cache. */
1740 cache = bfcache_new (this_frame);
1741 cache->tp = tp;
1742 cache->bfun = bfun;
1743
1744 *this_cache = cache;
1745 return 1;
1746}
1747
1748/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1749
1750static int
1751record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1752 struct frame_info *this_frame,
1753 void **this_cache)
1754{
1755 const struct btrace_function *bfun, *callee;
1756 struct btrace_frame_cache *cache;
42bfe59e 1757 struct btrace_call_iterator it;
0b722aec 1758 struct frame_info *next;
42bfe59e 1759 struct thread_info *tinfo;
0b722aec
MM
1760
1761 next = get_next_frame (this_frame);
1762 if (next == NULL)
1763 return 0;
1764
1765 callee = btrace_get_frame_function (next);
1766 if (callee == NULL)
1767 return 0;
1768
1769 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1770 return 0;
1771
42bfe59e
TW
1772 tinfo = find_thread_ptid (inferior_ptid);
1773 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
0b722aec
MM
1774 return 0;
1775
42bfe59e
TW
1776 bfun = btrace_call_get (&it);
1777
0b722aec
MM
1778 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1779 btrace_get_bfun_name (bfun), bfun->level);
1780
1781 /* This is our frame. Initialize the frame cache. */
1782 cache = bfcache_new (this_frame);
42bfe59e 1783 cache->tp = tinfo;
0b722aec
MM
1784 cache->bfun = bfun;
1785
1786 *this_cache = cache;
1787 return 1;
1788}
1789
1790static void
1791record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1792{
1793 struct btrace_frame_cache *cache;
1794 void **slot;
1795
19ba03f4 1796 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1797
1798 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1799 gdb_assert (slot != NULL);
1800
1801 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1802}
1803
1804/* btrace recording does not store previous memory content, neither the stack
1805 frames content. Any unwinding would return errorneous results as the stack
1806 contents no longer matches the changed PC value restored from history.
1807 Therefore this unwinder reports any possibly unwound registers as
1808 <unavailable>. */
1809
0b722aec 1810const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1811{
1812 NORMAL_FRAME,
1813 record_btrace_frame_unwind_stop_reason,
1814 record_btrace_frame_this_id,
1815 record_btrace_frame_prev_register,
1816 NULL,
0b722aec
MM
1817 record_btrace_frame_sniffer,
1818 record_btrace_frame_dealloc_cache
1819};
1820
1821const struct frame_unwind record_btrace_tailcall_frame_unwind =
1822{
1823 TAILCALL_FRAME,
1824 record_btrace_frame_unwind_stop_reason,
1825 record_btrace_frame_this_id,
1826 record_btrace_frame_prev_register,
1827 NULL,
1828 record_btrace_tailcall_frame_sniffer,
1829 record_btrace_frame_dealloc_cache
cecac1ab 1830};
b2f4cfde 1831
ac01945b
TT
1832/* Implement the to_get_unwinder method. */
1833
1834static const struct frame_unwind *
1835record_btrace_to_get_unwinder (struct target_ops *self)
1836{
1837 return &record_btrace_frame_unwind;
1838}
1839
1840/* Implement the to_get_tailcall_unwinder method. */
1841
1842static const struct frame_unwind *
1843record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1844{
1845 return &record_btrace_tailcall_frame_unwind;
1846}
1847
987e68b1
MM
1848/* Return a human-readable string for FLAG. */
1849
1850static const char *
1851btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1852{
1853 switch (flag)
1854 {
1855 case BTHR_STEP:
1856 return "step";
1857
1858 case BTHR_RSTEP:
1859 return "reverse-step";
1860
1861 case BTHR_CONT:
1862 return "cont";
1863
1864 case BTHR_RCONT:
1865 return "reverse-cont";
1866
1867 case BTHR_STOP:
1868 return "stop";
1869 }
1870
1871 return "<invalid>";
1872}
1873
52834460
MM
1874/* Indicate that TP should be resumed according to FLAG. */
1875
1876static void
1877record_btrace_resume_thread (struct thread_info *tp,
1878 enum btrace_thread_flag flag)
1879{
1880 struct btrace_thread_info *btinfo;
1881
43792cf0 1882 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1 1883 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
52834460
MM
1884
1885 btinfo = &tp->btrace;
1886
52834460 1887 /* Fetch the latest branch trace. */
4a4495d6 1888 btrace_fetch (tp, record_btrace_get_cpu ());
52834460 1889
0ca912df
MM
1890 /* A resume request overwrites a preceding resume or stop request. */
1891 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1892 btinfo->flags |= flag;
1893}
1894
ec71cc2f
MM
1895/* Get the current frame for TP. */
1896
1897static struct frame_info *
1898get_thread_current_frame (struct thread_info *tp)
1899{
1900 struct frame_info *frame;
1901 ptid_t old_inferior_ptid;
1902 int executing;
1903
1904 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1905 old_inferior_ptid = inferior_ptid;
1906 inferior_ptid = tp->ptid;
1907
1908 /* Clear the executing flag to allow changes to the current frame.
1909 We are not actually running, yet. We just started a reverse execution
1910 command or a record goto command.
1911 For the latter, EXECUTING is false and this has no effect.
1912 For the former, EXECUTING is true and we're in to_wait, about to
1913 move the thread. Since we need to recompute the stack, we temporarily
1914 set EXECUTING to flase. */
1915 executing = is_executing (inferior_ptid);
1916 set_executing (inferior_ptid, 0);
1917
1918 frame = NULL;
1919 TRY
1920 {
1921 frame = get_current_frame ();
1922 }
1923 CATCH (except, RETURN_MASK_ALL)
1924 {
1925 /* Restore the previous execution state. */
1926 set_executing (inferior_ptid, executing);
1927
1928 /* Restore the previous inferior_ptid. */
1929 inferior_ptid = old_inferior_ptid;
1930
1931 throw_exception (except);
1932 }
1933 END_CATCH
1934
1935 /* Restore the previous execution state. */
1936 set_executing (inferior_ptid, executing);
1937
1938 /* Restore the previous inferior_ptid. */
1939 inferior_ptid = old_inferior_ptid;
1940
1941 return frame;
1942}
1943
52834460
MM
1944/* Start replaying a thread. */
1945
1946static struct btrace_insn_iterator *
1947record_btrace_start_replaying (struct thread_info *tp)
1948{
52834460
MM
1949 struct btrace_insn_iterator *replay;
1950 struct btrace_thread_info *btinfo;
52834460
MM
1951
1952 btinfo = &tp->btrace;
1953 replay = NULL;
1954
1955 /* We can't start replaying without trace. */
b54b03bd 1956 if (btinfo->functions.empty ())
52834460
MM
1957 return NULL;
1958
52834460
MM
1959 /* GDB stores the current frame_id when stepping in order to detects steps
1960 into subroutines.
1961 Since frames are computed differently when we're replaying, we need to
1962 recompute those stored frames and fix them up so we can still detect
1963 subroutines after we started replaying. */
492d29ea 1964 TRY
52834460
MM
1965 {
1966 struct frame_info *frame;
1967 struct frame_id frame_id;
1968 int upd_step_frame_id, upd_step_stack_frame_id;
1969
1970 /* The current frame without replaying - computed via normal unwind. */
ec71cc2f 1971 frame = get_thread_current_frame (tp);
52834460
MM
1972 frame_id = get_frame_id (frame);
1973
1974 /* Check if we need to update any stepping-related frame id's. */
1975 upd_step_frame_id = frame_id_eq (frame_id,
1976 tp->control.step_frame_id);
1977 upd_step_stack_frame_id = frame_id_eq (frame_id,
1978 tp->control.step_stack_frame_id);
1979
1980 /* We start replaying at the end of the branch trace. This corresponds
1981 to the current instruction. */
8d749320 1982 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
1983 btrace_insn_end (replay, btinfo);
1984
31fd9caa
MM
1985 /* Skip gaps at the end of the trace. */
1986 while (btrace_insn_get (replay) == NULL)
1987 {
1988 unsigned int steps;
1989
1990 steps = btrace_insn_prev (replay, 1);
1991 if (steps == 0)
1992 error (_("No trace."));
1993 }
1994
52834460
MM
1995 /* We're not replaying, yet. */
1996 gdb_assert (btinfo->replay == NULL);
1997 btinfo->replay = replay;
1998
1999 /* Make sure we're not using any stale registers. */
2000 registers_changed_ptid (tp->ptid);
2001
2002 /* The current frame with replaying - computed via btrace unwind. */
ec71cc2f 2003 frame = get_thread_current_frame (tp);
52834460
MM
2004 frame_id = get_frame_id (frame);
2005
2006 /* Replace stepping related frames where necessary. */
2007 if (upd_step_frame_id)
2008 tp->control.step_frame_id = frame_id;
2009 if (upd_step_stack_frame_id)
2010 tp->control.step_stack_frame_id = frame_id;
2011 }
492d29ea 2012 CATCH (except, RETURN_MASK_ALL)
52834460
MM
2013 {
2014 xfree (btinfo->replay);
2015 btinfo->replay = NULL;
2016
2017 registers_changed_ptid (tp->ptid);
2018
2019 throw_exception (except);
2020 }
492d29ea 2021 END_CATCH
52834460
MM
2022
2023 return replay;
2024}
2025
2026/* Stop replaying a thread. */
2027
2028static void
2029record_btrace_stop_replaying (struct thread_info *tp)
2030{
2031 struct btrace_thread_info *btinfo;
2032
2033 btinfo = &tp->btrace;
2034
2035 xfree (btinfo->replay);
2036 btinfo->replay = NULL;
2037
2038 /* Make sure we're not leaving any stale registers. */
2039 registers_changed_ptid (tp->ptid);
2040}
2041
e3cfc1c7
MM
2042/* Stop replaying TP if it is at the end of its execution history. */
2043
2044static void
2045record_btrace_stop_replaying_at_end (struct thread_info *tp)
2046{
2047 struct btrace_insn_iterator *replay, end;
2048 struct btrace_thread_info *btinfo;
2049
2050 btinfo = &tp->btrace;
2051 replay = btinfo->replay;
2052
2053 if (replay == NULL)
2054 return;
2055
2056 btrace_insn_end (&end, btinfo);
2057
2058 if (btrace_insn_cmp (replay, &end) == 0)
2059 record_btrace_stop_replaying (tp);
2060}
2061
b2f4cfde
MM
2062/* The to_resume method of target record-btrace. */
2063
2064static void
2065record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2066 enum gdb_signal signal)
2067{
0ca912df 2068 struct thread_info *tp;
d2939ba2 2069 enum btrace_thread_flag flag, cflag;
52834460 2070
987e68b1
MM
2071 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2072 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2073 step ? "step" : "cont");
52834460 2074
0ca912df
MM
2075 /* Store the execution direction of the last resume.
2076
2077 If there is more than one to_resume call, we have to rely on infrun
2078 to not change the execution direction in-between. */
70ad5bff
MM
2079 record_btrace_resume_exec_dir = execution_direction;
2080
0ca912df 2081 /* As long as we're not replaying, just forward the request.
52834460 2082
0ca912df
MM
2083 For non-stop targets this means that no thread is replaying. In order to
2084 make progress, we may need to explicitly move replaying threads to the end
2085 of their execution history. */
a52eab48
MM
2086 if ((execution_direction != EXEC_REVERSE)
2087 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2088 {
e75fdfca 2089 ops = ops->beneath;
04c4fe8c
MM
2090 ops->to_resume (ops, ptid, step, signal);
2091 return;
b2f4cfde
MM
2092 }
2093
52834460 2094 /* Compute the btrace thread flag for the requested move. */
d2939ba2
MM
2095 if (execution_direction == EXEC_REVERSE)
2096 {
2097 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2098 cflag = BTHR_RCONT;
2099 }
52834460 2100 else
d2939ba2
MM
2101 {
2102 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2103 cflag = BTHR_CONT;
2104 }
52834460 2105
52834460 2106 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2107 record_btrace_wait below.
2108
2109 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2110 if (!target_is_non_stop_p ())
2111 {
2112 gdb_assert (ptid_match (inferior_ptid, ptid));
2113
2114 ALL_NON_EXITED_THREADS (tp)
2115 if (ptid_match (tp->ptid, ptid))
2116 {
2117 if (ptid_match (tp->ptid, inferior_ptid))
2118 record_btrace_resume_thread (tp, flag);
2119 else
2120 record_btrace_resume_thread (tp, cflag);
2121 }
2122 }
2123 else
2124 {
2125 ALL_NON_EXITED_THREADS (tp)
2126 if (ptid_match (tp->ptid, ptid))
2127 record_btrace_resume_thread (tp, flag);
2128 }
70ad5bff
MM
2129
2130 /* Async support. */
2131 if (target_can_async_p ())
2132 {
6a3753b3 2133 target_async (1);
70ad5bff
MM
2134 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2135 }
52834460
MM
2136}
2137
85ad3aaf
PA
2138/* The to_commit_resume method of target record-btrace. */
2139
2140static void
2141record_btrace_commit_resume (struct target_ops *ops)
2142{
2143 if ((execution_direction != EXEC_REVERSE)
2144 && !record_btrace_is_replaying (ops, minus_one_ptid))
2145 ops->beneath->to_commit_resume (ops->beneath);
2146}
2147
987e68b1
MM
2148/* Cancel resuming TP. */
2149
2150static void
2151record_btrace_cancel_resume (struct thread_info *tp)
2152{
2153 enum btrace_thread_flag flags;
2154
2155 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2156 if (flags == 0)
2157 return;
2158
43792cf0
PA
2159 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2160 print_thread_id (tp),
987e68b1
MM
2161 target_pid_to_str (tp->ptid), flags,
2162 btrace_thread_flag_to_str (flags));
2163
2164 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2165 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2166}
2167
2168/* Return a target_waitstatus indicating that we ran out of history. */
2169
2170static struct target_waitstatus
2171btrace_step_no_history (void)
2172{
2173 struct target_waitstatus status;
2174
2175 status.kind = TARGET_WAITKIND_NO_HISTORY;
2176
2177 return status;
2178}
2179
2180/* Return a target_waitstatus indicating that a step finished. */
2181
2182static struct target_waitstatus
2183btrace_step_stopped (void)
2184{
2185 struct target_waitstatus status;
2186
2187 status.kind = TARGET_WAITKIND_STOPPED;
2188 status.value.sig = GDB_SIGNAL_TRAP;
2189
2190 return status;
2191}
2192
6e4879f0
MM
2193/* Return a target_waitstatus indicating that a thread was stopped as
2194 requested. */
2195
2196static struct target_waitstatus
2197btrace_step_stopped_on_request (void)
2198{
2199 struct target_waitstatus status;
2200
2201 status.kind = TARGET_WAITKIND_STOPPED;
2202 status.value.sig = GDB_SIGNAL_0;
2203
2204 return status;
2205}
2206
d825d248
MM
2207/* Return a target_waitstatus indicating a spurious stop. */
2208
2209static struct target_waitstatus
2210btrace_step_spurious (void)
2211{
2212 struct target_waitstatus status;
2213
2214 status.kind = TARGET_WAITKIND_SPURIOUS;
2215
2216 return status;
2217}
2218
e3cfc1c7
MM
2219/* Return a target_waitstatus indicating that the thread was not resumed. */
2220
2221static struct target_waitstatus
2222btrace_step_no_resumed (void)
2223{
2224 struct target_waitstatus status;
2225
2226 status.kind = TARGET_WAITKIND_NO_RESUMED;
2227
2228 return status;
2229}
2230
2231/* Return a target_waitstatus indicating that we should wait again. */
2232
2233static struct target_waitstatus
2234btrace_step_again (void)
2235{
2236 struct target_waitstatus status;
2237
2238 status.kind = TARGET_WAITKIND_IGNORE;
2239
2240 return status;
2241}
2242
52834460
MM
2243/* Clear the record histories. */
2244
2245static void
2246record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2247{
2248 xfree (btinfo->insn_history);
2249 xfree (btinfo->call_history);
2250
2251 btinfo->insn_history = NULL;
2252 btinfo->call_history = NULL;
2253}
2254
3c615f99
MM
2255/* Check whether TP's current replay position is at a breakpoint. */
2256
2257static int
2258record_btrace_replay_at_breakpoint (struct thread_info *tp)
2259{
2260 struct btrace_insn_iterator *replay;
2261 struct btrace_thread_info *btinfo;
2262 const struct btrace_insn *insn;
2263 struct inferior *inf;
2264
2265 btinfo = &tp->btrace;
2266 replay = btinfo->replay;
2267
2268 if (replay == NULL)
2269 return 0;
2270
2271 insn = btrace_insn_get (replay);
2272 if (insn == NULL)
2273 return 0;
2274
2275 inf = find_inferior_ptid (tp->ptid);
2276 if (inf == NULL)
2277 return 0;
2278
2279 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2280 &btinfo->stop_reason);
2281}
2282
d825d248 2283/* Step one instruction in forward direction. */
52834460
MM
2284
2285static struct target_waitstatus
d825d248 2286record_btrace_single_step_forward (struct thread_info *tp)
52834460 2287{
b61ce85c 2288 struct btrace_insn_iterator *replay, end, start;
52834460 2289 struct btrace_thread_info *btinfo;
52834460 2290
d825d248
MM
2291 btinfo = &tp->btrace;
2292 replay = btinfo->replay;
2293
2294 /* We're done if we're not replaying. */
2295 if (replay == NULL)
2296 return btrace_step_no_history ();
2297
011c71b6
MM
2298 /* Check if we're stepping a breakpoint. */
2299 if (record_btrace_replay_at_breakpoint (tp))
2300 return btrace_step_stopped ();
2301
b61ce85c
MM
2302 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2303 jump back to the instruction at which we started. */
2304 start = *replay;
d825d248
MM
2305 do
2306 {
2307 unsigned int steps;
2308
e3cfc1c7
MM
2309 /* We will bail out here if we continue stepping after reaching the end
2310 of the execution history. */
d825d248
MM
2311 steps = btrace_insn_next (replay, 1);
2312 if (steps == 0)
b61ce85c
MM
2313 {
2314 *replay = start;
2315 return btrace_step_no_history ();
2316 }
d825d248
MM
2317 }
2318 while (btrace_insn_get (replay) == NULL);
2319
2320 /* Determine the end of the instruction trace. */
2321 btrace_insn_end (&end, btinfo);
2322
e3cfc1c7
MM
2323 /* The execution trace contains (and ends with) the current instruction.
2324 This instruction has not been executed, yet, so the trace really ends
2325 one instruction earlier. */
d825d248 2326 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2327 return btrace_step_no_history ();
d825d248
MM
2328
2329 return btrace_step_spurious ();
2330}
2331
2332/* Step one instruction in backward direction. */
2333
2334static struct target_waitstatus
2335record_btrace_single_step_backward (struct thread_info *tp)
2336{
b61ce85c 2337 struct btrace_insn_iterator *replay, start;
d825d248 2338 struct btrace_thread_info *btinfo;
e59fa00f 2339
52834460
MM
2340 btinfo = &tp->btrace;
2341 replay = btinfo->replay;
2342
d825d248
MM
2343 /* Start replaying if we're not already doing so. */
2344 if (replay == NULL)
2345 replay = record_btrace_start_replaying (tp);
2346
2347 /* If we can't step any further, we reached the end of the history.
b61ce85c
MM
2348 Skip gaps during replay. If we end up at a gap (at the beginning of
2349 the trace), jump back to the instruction at which we started. */
2350 start = *replay;
d825d248
MM
2351 do
2352 {
2353 unsigned int steps;
2354
2355 steps = btrace_insn_prev (replay, 1);
2356 if (steps == 0)
b61ce85c
MM
2357 {
2358 *replay = start;
2359 return btrace_step_no_history ();
2360 }
d825d248
MM
2361 }
2362 while (btrace_insn_get (replay) == NULL);
2363
011c71b6
MM
2364 /* Check if we're stepping a breakpoint.
2365
2366 For reverse-stepping, this check is after the step. There is logic in
2367 infrun.c that handles reverse-stepping separately. See, for example,
2368 proceed and adjust_pc_after_break.
2369
2370 This code assumes that for reverse-stepping, PC points to the last
2371 de-executed instruction, whereas for forward-stepping PC points to the
2372 next to-be-executed instruction. */
2373 if (record_btrace_replay_at_breakpoint (tp))
2374 return btrace_step_stopped ();
2375
d825d248
MM
2376 return btrace_step_spurious ();
2377}
2378
2379/* Step a single thread. */
2380
2381static struct target_waitstatus
2382record_btrace_step_thread (struct thread_info *tp)
2383{
2384 struct btrace_thread_info *btinfo;
2385 struct target_waitstatus status;
2386 enum btrace_thread_flag flags;
2387
2388 btinfo = &tp->btrace;
2389
6e4879f0
MM
2390 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2391 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2392
43792cf0 2393 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1
MM
2394 target_pid_to_str (tp->ptid), flags,
2395 btrace_thread_flag_to_str (flags));
52834460 2396
6e4879f0
MM
2397 /* We can't step without an execution history. */
2398 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2399 return btrace_step_no_history ();
2400
52834460
MM
2401 switch (flags)
2402 {
2403 default:
2404 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2405
6e4879f0
MM
2406 case BTHR_STOP:
2407 return btrace_step_stopped_on_request ();
2408
52834460 2409 case BTHR_STEP:
d825d248
MM
2410 status = record_btrace_single_step_forward (tp);
2411 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2412 break;
52834460
MM
2413
2414 return btrace_step_stopped ();
2415
2416 case BTHR_RSTEP:
d825d248
MM
2417 status = record_btrace_single_step_backward (tp);
2418 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2419 break;
52834460
MM
2420
2421 return btrace_step_stopped ();
2422
2423 case BTHR_CONT:
e3cfc1c7
MM
2424 status = record_btrace_single_step_forward (tp);
2425 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2426 break;
52834460 2427
e3cfc1c7
MM
2428 btinfo->flags |= flags;
2429 return btrace_step_again ();
52834460
MM
2430
2431 case BTHR_RCONT:
e3cfc1c7
MM
2432 status = record_btrace_single_step_backward (tp);
2433 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2434 break;
52834460 2435
e3cfc1c7
MM
2436 btinfo->flags |= flags;
2437 return btrace_step_again ();
2438 }
d825d248 2439
e3cfc1c7
MM
2440 /* We keep threads moving at the end of their execution history. The to_wait
2441 method will stop the thread for whom the event is reported. */
2442 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2443 btinfo->flags |= flags;
52834460 2444
e3cfc1c7 2445 return status;
b2f4cfde
MM
2446}
2447
e3cfc1c7
MM
2448/* A vector of threads. */
2449
2450typedef struct thread_info * tp_t;
2451DEF_VEC_P (tp_t);
2452
a6b5be76
MM
2453/* Announce further events if necessary. */
2454
2455static void
53127008
SM
2456record_btrace_maybe_mark_async_event
2457 (const std::vector<thread_info *> &moving,
2458 const std::vector<thread_info *> &no_history)
a6b5be76 2459{
53127008
SM
2460 bool more_moving = !moving.empty ();
2461 bool more_no_history = !no_history.empty ();;
a6b5be76
MM
2462
2463 if (!more_moving && !more_no_history)
2464 return;
2465
2466 if (more_moving)
2467 DEBUG ("movers pending");
2468
2469 if (more_no_history)
2470 DEBUG ("no-history pending");
2471
2472 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2473}
2474
b2f4cfde
MM
2475/* The to_wait method of target record-btrace. */
2476
2477static ptid_t
2478record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2479 struct target_waitstatus *status, int options)
2480{
53127008
SM
2481 std::vector<thread_info *> moving;
2482 std::vector<thread_info *> no_history;
52834460
MM
2483
2484 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2485
b2f4cfde 2486 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2487 if ((execution_direction != EXEC_REVERSE)
2488 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2489 {
e75fdfca
TT
2490 ops = ops->beneath;
2491 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
2492 }
2493
e3cfc1c7 2494 /* Keep a work list of moving threads. */
53127008
SM
2495 {
2496 thread_info *tp;
2497
2498 ALL_NON_EXITED_THREADS (tp)
2499 {
2500 if (ptid_match (tp->ptid, ptid)
2501 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2502 moving.push_back (tp);
2503 }
2504 }
e3cfc1c7 2505
53127008 2506 if (moving.empty ())
52834460 2507 {
e3cfc1c7 2508 *status = btrace_step_no_resumed ();
52834460 2509
e3cfc1c7 2510 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
23fdd69e 2511 target_waitstatus_to_string (status).c_str ());
e3cfc1c7 2512
e3cfc1c7 2513 return null_ptid;
52834460
MM
2514 }
2515
e3cfc1c7
MM
2516 /* Step moving threads one by one, one step each, until either one thread
2517 reports an event or we run out of threads to step.
2518
2519 When stepping more than one thread, chances are that some threads reach
2520 the end of their execution history earlier than others. If we reported
2521 this immediately, all-stop on top of non-stop would stop all threads and
2522 resume the same threads next time. And we would report the same thread
2523 having reached the end of its execution history again.
2524
2525 In the worst case, this would starve the other threads. But even if other
2526 threads would be allowed to make progress, this would result in far too
2527 many intermediate stops.
2528
2529 We therefore delay the reporting of "no execution history" until we have
2530 nothing else to report. By this time, all threads should have moved to
2531 either the beginning or the end of their execution history. There will
2532 be a single user-visible stop. */
53127008
SM
2533 struct thread_info *eventing = NULL;
2534 while ((eventing == NULL) && !moving.empty ())
e3cfc1c7 2535 {
53127008 2536 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
e3cfc1c7 2537 {
53127008
SM
2538 thread_info *tp = moving[ix];
2539
e3cfc1c7
MM
2540 *status = record_btrace_step_thread (tp);
2541
2542 switch (status->kind)
2543 {
2544 case TARGET_WAITKIND_IGNORE:
2545 ix++;
2546 break;
2547
2548 case TARGET_WAITKIND_NO_HISTORY:
53127008 2549 no_history.push_back (ordered_remove (moving, ix));
e3cfc1c7
MM
2550 break;
2551
2552 default:
53127008 2553 eventing = unordered_remove (moving, ix);
e3cfc1c7
MM
2554 break;
2555 }
2556 }
2557 }
2558
2559 if (eventing == NULL)
2560 {
2561 /* We started with at least one moving thread. This thread must have
2562 either stopped or reached the end of its execution history.
2563
2564 In the former case, EVENTING must not be NULL.
2565 In the latter case, NO_HISTORY must not be empty. */
53127008 2566 gdb_assert (!no_history.empty ());
e3cfc1c7
MM
2567
2568 /* We kept threads moving at the end of their execution history. Stop
2569 EVENTING now that we are going to report its stop. */
53127008 2570 eventing = unordered_remove (no_history, 0);
e3cfc1c7
MM
2571 eventing->btrace.flags &= ~BTHR_MOVE;
2572
2573 *status = btrace_step_no_history ();
2574 }
2575
2576 gdb_assert (eventing != NULL);
2577
2578 /* We kept threads replaying at the end of their execution history. Stop
2579 replaying EVENTING now that we are going to report its stop. */
2580 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2581
2582 /* Stop all other threads. */
5953356c 2583 if (!target_is_non_stop_p ())
53127008
SM
2584 {
2585 thread_info *tp;
2586
2587 ALL_NON_EXITED_THREADS (tp)
2588 record_btrace_cancel_resume (tp);
2589 }
52834460 2590
a6b5be76
MM
2591 /* In async mode, we need to announce further events. */
2592 if (target_is_async_p ())
2593 record_btrace_maybe_mark_async_event (moving, no_history);
2594
52834460 2595 /* Start record histories anew from the current position. */
e3cfc1c7 2596 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2597
2598 /* We moved the replay position but did not update registers. */
e3cfc1c7
MM
2599 registers_changed_ptid (eventing->ptid);
2600
43792cf0
PA
2601 DEBUG ("wait ended by thread %s (%s): %s",
2602 print_thread_id (eventing),
e3cfc1c7 2603 target_pid_to_str (eventing->ptid),
23fdd69e 2604 target_waitstatus_to_string (status).c_str ());
52834460 2605
e3cfc1c7 2606 return eventing->ptid;
52834460
MM
2607}
2608
6e4879f0
MM
2609/* The to_stop method of target record-btrace. */
2610
2611static void
2612record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2613{
2614 DEBUG ("stop %s", target_pid_to_str (ptid));
2615
2616 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2617 if ((execution_direction != EXEC_REVERSE)
2618 && !record_btrace_is_replaying (ops, minus_one_ptid))
6e4879f0
MM
2619 {
2620 ops = ops->beneath;
2621 ops->to_stop (ops, ptid);
2622 }
2623 else
2624 {
2625 struct thread_info *tp;
2626
2627 ALL_NON_EXITED_THREADS (tp)
2628 if (ptid_match (tp->ptid, ptid))
2629 {
2630 tp->btrace.flags &= ~BTHR_MOVE;
2631 tp->btrace.flags |= BTHR_STOP;
2632 }
2633 }
2634 }
2635
52834460
MM
2636/* The to_can_execute_reverse method of target record-btrace. */
2637
2638static int
19db3e69 2639record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
2640{
2641 return 1;
2642}
2643
9e8915c6 2644/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2645
9e8915c6
PA
2646static int
2647record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
52834460 2648{
a52eab48 2649 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2650 {
2651 struct thread_info *tp = inferior_thread ();
2652
2653 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2654 }
2655
2656 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2657}
2658
2659/* The to_supports_stopped_by_sw_breakpoint method of target
2660 record-btrace. */
2661
2662static int
2663record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2664{
a52eab48 2665 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2666 return 1;
2667
2668 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2669}
2670
2671/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2672
2673static int
2674record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2675{
a52eab48 2676 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2677 {
2678 struct thread_info *tp = inferior_thread ();
2679
2680 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2681 }
2682
2683 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2684}
2685
2686/* The to_supports_stopped_by_hw_breakpoint method of target
2687 record-btrace. */
2688
2689static int
2690record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2691{
a52eab48 2692 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6 2693 return 1;
52834460 2694
9e8915c6 2695 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
b2f4cfde
MM
2696}
2697
e8032dde 2698/* The to_update_thread_list method of target record-btrace. */
e2887aa3
MM
2699
2700static void
e8032dde 2701record_btrace_update_thread_list (struct target_ops *ops)
e2887aa3 2702{
e8032dde 2703 /* We don't add or remove threads during replay. */
a52eab48 2704 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2705 return;
2706
2707 /* Forward the request. */
e75fdfca 2708 ops = ops->beneath;
e8032dde 2709 ops->to_update_thread_list (ops);
e2887aa3
MM
2710}
2711
2712/* The to_thread_alive method of target record-btrace. */
2713
2714static int
2715record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2716{
2717 /* We don't add or remove threads during replay. */
a52eab48 2718 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2719 return find_thread_ptid (ptid) != NULL;
2720
2721 /* Forward the request. */
e75fdfca
TT
2722 ops = ops->beneath;
2723 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
2724}
2725
066ce621
MM
2726/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2727 is stopped. */
2728
2729static void
2730record_btrace_set_replay (struct thread_info *tp,
2731 const struct btrace_insn_iterator *it)
2732{
2733 struct btrace_thread_info *btinfo;
2734
2735 btinfo = &tp->btrace;
2736
a0f1b963 2737 if (it == NULL)
52834460 2738 record_btrace_stop_replaying (tp);
066ce621
MM
2739 else
2740 {
2741 if (btinfo->replay == NULL)
52834460 2742 record_btrace_start_replaying (tp);
066ce621
MM
2743 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2744 return;
2745
2746 *btinfo->replay = *it;
52834460 2747 registers_changed_ptid (tp->ptid);
066ce621
MM
2748 }
2749
52834460
MM
2750 /* Start anew from the new replay position. */
2751 record_btrace_clear_histories (btinfo);
485668e5
MM
2752
2753 stop_pc = regcache_read_pc (get_current_regcache ());
2754 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2755}
2756
2757/* The to_goto_record_begin method of target record-btrace. */
2758
2759static void
08475817 2760record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
2761{
2762 struct thread_info *tp;
2763 struct btrace_insn_iterator begin;
2764
2765 tp = require_btrace_thread ();
2766
2767 btrace_insn_begin (&begin, &tp->btrace);
b61ce85c
MM
2768
2769 /* Skip gaps at the beginning of the trace. */
2770 while (btrace_insn_get (&begin) == NULL)
2771 {
2772 unsigned int steps;
2773
2774 steps = btrace_insn_next (&begin, 1);
2775 if (steps == 0)
2776 error (_("No trace."));
2777 }
2778
066ce621 2779 record_btrace_set_replay (tp, &begin);
066ce621
MM
2780}
2781
2782/* The to_goto_record_end method of target record-btrace. */
2783
2784static void
307a1b91 2785record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
2786{
2787 struct thread_info *tp;
2788
2789 tp = require_btrace_thread ();
2790
2791 record_btrace_set_replay (tp, NULL);
066ce621
MM
2792}
2793
2794/* The to_goto_record method of target record-btrace. */
2795
2796static void
606183ac 2797record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
2798{
2799 struct thread_info *tp;
2800 struct btrace_insn_iterator it;
2801 unsigned int number;
2802 int found;
2803
2804 number = insn;
2805
2806 /* Check for wrap-arounds. */
2807 if (number != insn)
2808 error (_("Instruction number out of range."));
2809
2810 tp = require_btrace_thread ();
2811
2812 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
69090cee
TW
2813
2814 /* Check if the instruction could not be found or is a gap. */
2815 if (found == 0 || btrace_insn_get (&it) == NULL)
066ce621
MM
2816 error (_("No such instruction."));
2817
2818 record_btrace_set_replay (tp, &it);
066ce621
MM
2819}
2820
797094dd
MM
2821/* The to_record_stop_replaying method of target record-btrace. */
2822
2823static void
2824record_btrace_stop_replaying_all (struct target_ops *self)
2825{
2826 struct thread_info *tp;
2827
2828 ALL_NON_EXITED_THREADS (tp)
2829 record_btrace_stop_replaying (tp);
2830}
2831
70ad5bff
MM
2832/* The to_execution_direction target method. */
2833
2834static enum exec_direction_kind
2835record_btrace_execution_direction (struct target_ops *self)
2836{
2837 return record_btrace_resume_exec_dir;
2838}
2839
aef92902
MM
2840/* The to_prepare_to_generate_core target method. */
2841
2842static void
2843record_btrace_prepare_to_generate_core (struct target_ops *self)
2844{
2845 record_btrace_generating_corefile = 1;
2846}
2847
2848/* The to_done_generating_core target method. */
2849
2850static void
2851record_btrace_done_generating_core (struct target_ops *self)
2852{
2853 record_btrace_generating_corefile = 0;
2854}
2855
afedecd3
MM
2856/* Initialize the record-btrace target ops. */
2857
2858static void
2859init_record_btrace_ops (void)
2860{
2861 struct target_ops *ops;
2862
2863 ops = &record_btrace_ops;
2864 ops->to_shortname = "record-btrace";
2865 ops->to_longname = "Branch tracing target";
2866 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2867 ops->to_open = record_btrace_open;
2868 ops->to_close = record_btrace_close;
b7d2e916 2869 ops->to_async = record_btrace_async;
afedecd3 2870 ops->to_detach = record_detach;
c0272db5 2871 ops->to_disconnect = record_btrace_disconnect;
afedecd3
MM
2872 ops->to_mourn_inferior = record_mourn_inferior;
2873 ops->to_kill = record_kill;
afedecd3
MM
2874 ops->to_stop_recording = record_btrace_stop_recording;
2875 ops->to_info_record = record_btrace_info;
2876 ops->to_insn_history = record_btrace_insn_history;
2877 ops->to_insn_history_from = record_btrace_insn_history_from;
2878 ops->to_insn_history_range = record_btrace_insn_history_range;
2879 ops->to_call_history = record_btrace_call_history;
2880 ops->to_call_history_from = record_btrace_call_history_from;
2881 ops->to_call_history_range = record_btrace_call_history_range;
b158a20f 2882 ops->to_record_method = record_btrace_record_method;
07bbe694 2883 ops->to_record_is_replaying = record_btrace_is_replaying;
7ff27e9b 2884 ops->to_record_will_replay = record_btrace_will_replay;
797094dd 2885 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
633785ff
MM
2886 ops->to_xfer_partial = record_btrace_xfer_partial;
2887 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2888 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
2889 ops->to_fetch_registers = record_btrace_fetch_registers;
2890 ops->to_store_registers = record_btrace_store_registers;
2891 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
2892 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2893 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde 2894 ops->to_resume = record_btrace_resume;
85ad3aaf 2895 ops->to_commit_resume = record_btrace_commit_resume;
b2f4cfde 2896 ops->to_wait = record_btrace_wait;
6e4879f0 2897 ops->to_stop = record_btrace_stop;
e8032dde 2898 ops->to_update_thread_list = record_btrace_update_thread_list;
e2887aa3 2899 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
2900 ops->to_goto_record_begin = record_btrace_goto_begin;
2901 ops->to_goto_record_end = record_btrace_goto_end;
2902 ops->to_goto_record = record_btrace_goto;
52834460 2903 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
9e8915c6
PA
2904 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2905 ops->to_supports_stopped_by_sw_breakpoint
2906 = record_btrace_supports_stopped_by_sw_breakpoint;
2907 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2908 ops->to_supports_stopped_by_hw_breakpoint
2909 = record_btrace_supports_stopped_by_hw_breakpoint;
70ad5bff 2910 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
2911 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2912 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
2913 ops->to_stratum = record_stratum;
2914 ops->to_magic = OPS_MAGIC;
2915}
2916
f4abbc16
MM
2917/* Start recording in BTS format. */
2918
2919static void
cdb34d4a 2920cmd_record_btrace_bts_start (const char *args, int from_tty)
f4abbc16 2921{
f4abbc16
MM
2922 if (args != NULL && *args != 0)
2923 error (_("Invalid argument."));
2924
2925 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2926
492d29ea
PA
2927 TRY
2928 {
95a6b0a1 2929 execute_command ("target record-btrace", from_tty);
492d29ea
PA
2930 }
2931 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2932 {
2933 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2934 throw_exception (exception);
2935 }
492d29ea 2936 END_CATCH
f4abbc16
MM
2937}
2938
bc504a31 2939/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2940
2941static void
cdb34d4a 2942cmd_record_btrace_pt_start (const char *args, int from_tty)
afedecd3
MM
2943{
2944 if (args != NULL && *args != 0)
2945 error (_("Invalid argument."));
2946
b20a6524 2947 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2948
492d29ea
PA
2949 TRY
2950 {
95a6b0a1 2951 execute_command ("target record-btrace", from_tty);
492d29ea
PA
2952 }
2953 CATCH (exception, RETURN_MASK_ALL)
2954 {
2955 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2956 throw_exception (exception);
2957 }
2958 END_CATCH
afedecd3
MM
2959}
2960
b20a6524
MM
2961/* Alias for "target record". */
2962
2963static void
981a3fb3 2964cmd_record_btrace_start (const char *args, int from_tty)
b20a6524
MM
2965{
2966 if (args != NULL && *args != 0)
2967 error (_("Invalid argument."));
2968
2969 record_btrace_conf.format = BTRACE_FORMAT_PT;
2970
2971 TRY
2972 {
95a6b0a1 2973 execute_command ("target record-btrace", from_tty);
b20a6524
MM
2974 }
2975 CATCH (exception, RETURN_MASK_ALL)
2976 {
2977 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2978
2979 TRY
2980 {
95a6b0a1 2981 execute_command ("target record-btrace", from_tty);
b20a6524
MM
2982 }
2983 CATCH (exception, RETURN_MASK_ALL)
2984 {
2985 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2986 throw_exception (exception);
2987 }
2988 END_CATCH
2989 }
2990 END_CATCH
2991}
2992
67b5c0c1
MM
2993/* The "set record btrace" command. */
2994
2995static void
981a3fb3 2996cmd_set_record_btrace (const char *args, int from_tty)
67b5c0c1 2997{
b85310e1
MM
2998 printf_unfiltered (_("\"set record btrace\" must be followed "
2999 "by an appropriate subcommand.\n"));
3000 help_list (set_record_btrace_cmdlist, "set record btrace ",
3001 all_commands, gdb_stdout);
67b5c0c1
MM
3002}
3003
3004/* The "show record btrace" command. */
3005
3006static void
981a3fb3 3007cmd_show_record_btrace (const char *args, int from_tty)
67b5c0c1
MM
3008{
3009 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
3010}
3011
3012/* The "show record btrace replay-memory-access" command. */
3013
3014static void
3015cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
3016 struct cmd_list_element *c, const char *value)
3017{
3018 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
3019 replay_memory_access);
3020}
3021
4a4495d6
MM
3022/* The "set record btrace cpu none" command. */
3023
3024static void
3025cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
3026{
3027 if (args != nullptr && *args != 0)
3028 error (_("Trailing junk: '%s'."), args);
3029
3030 record_btrace_cpu_state = CS_NONE;
3031}
3032
3033/* The "set record btrace cpu auto" command. */
3034
3035static void
3036cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
3037{
3038 if (args != nullptr && *args != 0)
3039 error (_("Trailing junk: '%s'."), args);
3040
3041 record_btrace_cpu_state = CS_AUTO;
3042}
3043
3044/* The "set record btrace cpu" command. */
3045
3046static void
3047cmd_set_record_btrace_cpu (const char *args, int from_tty)
3048{
3049 if (args == nullptr)
3050 args = "";
3051
3052 /* We use a hard-coded vendor string for now. */
3053 unsigned int family, model, stepping;
3054 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3055 &model, &l1, &stepping, &l2);
3056 if (matches == 3)
3057 {
3058 if (strlen (args) != l2)
3059 error (_("Trailing junk: '%s'."), args + l2);
3060 }
3061 else if (matches == 2)
3062 {
3063 if (strlen (args) != l1)
3064 error (_("Trailing junk: '%s'."), args + l1);
3065
3066 stepping = 0;
3067 }
3068 else
3069 error (_("Bad format. See \"help set record btrace cpu\"."));
3070
3071 if (USHRT_MAX < family)
3072 error (_("Cpu family too big."));
3073
3074 if (UCHAR_MAX < model)
3075 error (_("Cpu model too big."));
3076
3077 if (UCHAR_MAX < stepping)
3078 error (_("Cpu stepping too big."));
3079
3080 record_btrace_cpu.vendor = CV_INTEL;
3081 record_btrace_cpu.family = family;
3082 record_btrace_cpu.model = model;
3083 record_btrace_cpu.stepping = stepping;
3084
3085 record_btrace_cpu_state = CS_CPU;
3086}
3087
3088/* The "show record btrace cpu" command. */
3089
3090static void
3091cmd_show_record_btrace_cpu (const char *args, int from_tty)
3092{
3093 const char *cpu;
3094
3095 if (args != nullptr && *args != 0)
3096 error (_("Trailing junk: '%s'."), args);
3097
3098 switch (record_btrace_cpu_state)
3099 {
3100 case CS_AUTO:
3101 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3102 return;
3103
3104 case CS_NONE:
3105 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3106 return;
3107
3108 case CS_CPU:
3109 switch (record_btrace_cpu.vendor)
3110 {
3111 case CV_INTEL:
3112 if (record_btrace_cpu.stepping == 0)
3113 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3114 record_btrace_cpu.family,
3115 record_btrace_cpu.model);
3116 else
3117 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3118 record_btrace_cpu.family,
3119 record_btrace_cpu.model,
3120 record_btrace_cpu.stepping);
3121 return;
3122 }
3123 }
3124
3125 error (_("Internal error: bad cpu state."));
3126}
3127
3128/* The "s record btrace bts" command. */
d33501a5
MM
3129
3130static void
981a3fb3 3131cmd_set_record_btrace_bts (const char *args, int from_tty)
d33501a5
MM
3132{
3133 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 3134 "by an appropriate subcommand.\n"));
d33501a5
MM
3135 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3136 all_commands, gdb_stdout);
3137}
3138
3139/* The "show record btrace bts" command. */
3140
3141static void
981a3fb3 3142cmd_show_record_btrace_bts (const char *args, int from_tty)
d33501a5
MM
3143{
3144 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3145}
3146
b20a6524
MM
3147/* The "set record btrace pt" command. */
3148
3149static void
981a3fb3 3150cmd_set_record_btrace_pt (const char *args, int from_tty)
b20a6524
MM
3151{
3152 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3153 "by an appropriate subcommand.\n"));
3154 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3155 all_commands, gdb_stdout);
3156}
3157
3158/* The "show record btrace pt" command. */
3159
3160static void
981a3fb3 3161cmd_show_record_btrace_pt (const char *args, int from_tty)
b20a6524
MM
3162{
3163 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3164}
3165
3166/* The "record bts buffer-size" show value function. */
3167
3168static void
3169show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3170 struct cmd_list_element *c,
3171 const char *value)
3172{
3173 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3174 value);
3175}
3176
3177/* The "record pt buffer-size" show value function. */
3178
3179static void
3180show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3181 struct cmd_list_element *c,
3182 const char *value)
3183{
3184 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3185 value);
3186}
3187
afedecd3
MM
3188/* Initialize btrace commands. */
3189
3190void
3191_initialize_record_btrace (void)
3192{
f4abbc16
MM
3193 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3194 _("Start branch trace recording."), &record_btrace_cmdlist,
3195 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3196 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3197
f4abbc16
MM
3198 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3199 _("\
3200Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3201The processor stores a from/to record for each branch into a cyclic buffer.\n\
3202This format may not be available on all processors."),
3203 &record_btrace_cmdlist);
3204 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3205
b20a6524
MM
3206 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3207 _("\
bc504a31 3208Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3209This format may not be available on all processors."),
3210 &record_btrace_cmdlist);
3211 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3212
67b5c0c1
MM
3213 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3214 _("Set record options"), &set_record_btrace_cmdlist,
3215 "set record btrace ", 0, &set_record_cmdlist);
3216
3217 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3218 _("Show record options"), &show_record_btrace_cmdlist,
3219 "show record btrace ", 0, &show_record_cmdlist);
3220
3221 add_setshow_enum_cmd ("replay-memory-access", no_class,
3222 replay_memory_access_types, &replay_memory_access, _("\
3223Set what memory accesses are allowed during replay."), _("\
3224Show what memory accesses are allowed during replay."),
3225 _("Default is READ-ONLY.\n\n\
3226The btrace record target does not trace data.\n\
3227The memory therefore corresponds to the live target and not \
3228to the current replay position.\n\n\
3229When READ-ONLY, allow accesses to read-only memory during replay.\n\
3230When READ-WRITE, allow accesses to read-only and read-write memory during \
3231replay."),
3232 NULL, cmd_show_replay_memory_access,
3233 &set_record_btrace_cmdlist,
3234 &show_record_btrace_cmdlist);
3235
4a4495d6
MM
3236 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3237 _("\
3238Set the cpu to be used for trace decode.\n\n\
3239The format is \"<vendor>:<identifier>\" or \"none\" or \"auto\" (default).\n\
3240For vendor \"intel\" the format is \"<family>/<model>[/<stepping>]\".\n\n\
3241When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3242The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3243When GDB does not support that cpu, this option can be used to enable\n\
3244workarounds for a similar cpu that GDB supports.\n\n\
3245When set to \"none\", errata workarounds are disabled."),
3246 &set_record_btrace_cpu_cmdlist,
3247 _("set record btrace cpu "), 1,
3248 &set_record_btrace_cmdlist);
3249
3250 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3251Automatically determine the cpu to be used for trace decode."),
3252 &set_record_btrace_cpu_cmdlist);
3253
3254 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3255Do not enable errata workarounds for trace decode."),
3256 &set_record_btrace_cpu_cmdlist);
3257
3258 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3259Show the cpu to be used for trace decode."),
3260 &show_record_btrace_cmdlist);
3261
d33501a5
MM
3262 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3263 _("Set record btrace bts options"),
3264 &set_record_btrace_bts_cmdlist,
3265 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3266
3267 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3268 _("Show record btrace bts options"),
3269 &show_record_btrace_bts_cmdlist,
3270 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3271
3272 add_setshow_uinteger_cmd ("buffer-size", no_class,
3273 &record_btrace_conf.bts.size,
3274 _("Set the record/replay bts buffer size."),
3275 _("Show the record/replay bts buffer size."), _("\
3276When starting recording request a trace buffer of this size. \
3277The actual buffer size may differ from the requested size. \
3278Use \"info record\" to see the actual buffer size.\n\n\
3279Bigger buffers allow longer recording but also take more time to process \
3280the recorded execution trace.\n\n\
b20a6524
MM
3281The trace buffer size may not be changed while recording."), NULL,
3282 show_record_bts_buffer_size_value,
d33501a5
MM
3283 &set_record_btrace_bts_cmdlist,
3284 &show_record_btrace_bts_cmdlist);
3285
b20a6524
MM
3286 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3287 _("Set record btrace pt options"),
3288 &set_record_btrace_pt_cmdlist,
3289 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3290
3291 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3292 _("Show record btrace pt options"),
3293 &show_record_btrace_pt_cmdlist,
3294 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3295
3296 add_setshow_uinteger_cmd ("buffer-size", no_class,
3297 &record_btrace_conf.pt.size,
3298 _("Set the record/replay pt buffer size."),
3299 _("Show the record/replay pt buffer size."), _("\
3300Bigger buffers allow longer recording but also take more time to process \
3301the recorded execution.\n\
3302The actual buffer size may differ from the requested size. Use \"info record\" \
3303to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3304 &set_record_btrace_pt_cmdlist,
3305 &show_record_btrace_pt_cmdlist);
3306
afedecd3
MM
3307 init_record_btrace_ops ();
3308 add_target (&record_btrace_ops);
0b722aec
MM
3309
3310 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3311 xcalloc, xfree);
d33501a5
MM
3312
3313 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3314 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3315}
This page took 0.860478 seconds and 4 git commands to generate.