Add PR number to previous delta to the bfd/ directory.
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
e2882c85 3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
c0272db5 24#include "record-btrace.h"
afedecd3
MM
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
76727919 29#include "observable.h"
afedecd3
MM
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
70ad5bff
MM
39#include "event-loop.h"
40#include "inf-loop.h"
e3cfc1c7 41#include "vec.h"
00431a78 42#include "inferior.h"
325fac50 43#include <algorithm>
afedecd3 44
d9f719f1
PA
45static const target_info record_btrace_target_info = {
46 "record-btrace",
47 N_("Branch tracing target"),
48 N_("Collect control-flow trace and provide the execution history.")
49};
50
afedecd3 51/* The target_ops of record-btrace. */
f6ac5f3d
PA
52
53class record_btrace_target final : public target_ops
54{
55public:
56 record_btrace_target ()
57 { to_stratum = record_stratum; }
58
d9f719f1
PA
59 const target_info &info () const override
60 { return record_btrace_target_info; }
f6ac5f3d 61
f6ac5f3d
PA
62 void close () override;
63 void async (int) override;
64
65 void detach (inferior *inf, int from_tty) override
66 { record_detach (this, inf, from_tty); }
67
68 void disconnect (const char *, int) override;
69
70 void mourn_inferior () override
71 { record_mourn_inferior (this); }
72
73 void kill () override
74 { record_kill (this); }
75
76 enum record_method record_method (ptid_t ptid) override;
77
78 void stop_recording () override;
79 void info_record () override;
80
81 void insn_history (int size, gdb_disassembly_flags flags) override;
82 void insn_history_from (ULONGEST from, int size,
83 gdb_disassembly_flags flags) override;
84 void insn_history_range (ULONGEST begin, ULONGEST end,
85 gdb_disassembly_flags flags) override;
86 void call_history (int size, record_print_flags flags) override;
87 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
88 override;
89 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
90 override;
91
57810aa7
PA
92 bool record_is_replaying (ptid_t ptid) override;
93 bool record_will_replay (ptid_t ptid, int dir) override;
f6ac5f3d
PA
94 void record_stop_replaying () override;
95
96 enum target_xfer_status xfer_partial (enum target_object object,
97 const char *annex,
98 gdb_byte *readbuf,
99 const gdb_byte *writebuf,
100 ULONGEST offset, ULONGEST len,
101 ULONGEST *xfered_len) override;
102
103 int insert_breakpoint (struct gdbarch *,
104 struct bp_target_info *) override;
105 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
106 enum remove_bp_reason) override;
107
108 void fetch_registers (struct regcache *, int) override;
109
110 void store_registers (struct regcache *, int) override;
111 void prepare_to_store (struct regcache *) override;
112
113 const struct frame_unwind *get_unwinder () override;
114
115 const struct frame_unwind *get_tailcall_unwinder () override;
116
117 void commit_resume () override;
118 void resume (ptid_t, int, enum gdb_signal) override;
119 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
120
121 void stop (ptid_t) override;
122 void update_thread_list () override;
57810aa7 123 bool thread_alive (ptid_t ptid) override;
f6ac5f3d
PA
124 void goto_record_begin () override;
125 void goto_record_end () override;
126 void goto_record (ULONGEST insn) override;
127
57810aa7 128 bool can_execute_reverse () override;
f6ac5f3d 129
57810aa7
PA
130 bool stopped_by_sw_breakpoint () override;
131 bool supports_stopped_by_sw_breakpoint () override;
f6ac5f3d 132
57810aa7
PA
133 bool stopped_by_hw_breakpoint () override;
134 bool supports_stopped_by_hw_breakpoint () override;
f6ac5f3d
PA
135
136 enum exec_direction_kind execution_direction () override;
137 void prepare_to_generate_core () override;
138 void done_generating_core () override;
139};
140
141static record_btrace_target record_btrace_ops;
142
143/* Initialize the record-btrace target ops. */
afedecd3 144
76727919
TT
145/* Token associated with a new-thread observer enabling branch tracing
146 for the new thread. */
147static const gdb::observers::token record_btrace_thread_observer_token;
afedecd3 148
67b5c0c1
MM
149/* Memory access types used in set/show record btrace replay-memory-access. */
150static const char replay_memory_access_read_only[] = "read-only";
151static const char replay_memory_access_read_write[] = "read-write";
152static const char *const replay_memory_access_types[] =
153{
154 replay_memory_access_read_only,
155 replay_memory_access_read_write,
156 NULL
157};
158
159/* The currently allowed replay memory access type. */
160static const char *replay_memory_access = replay_memory_access_read_only;
161
4a4495d6
MM
162/* The cpu state kinds. */
163enum record_btrace_cpu_state_kind
164{
165 CS_AUTO,
166 CS_NONE,
167 CS_CPU
168};
169
170/* The current cpu state. */
171static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
172
173/* The current cpu for trace decode. */
174static struct btrace_cpu record_btrace_cpu;
175
67b5c0c1
MM
176/* Command lists for "set/show record btrace". */
177static struct cmd_list_element *set_record_btrace_cmdlist;
178static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 179
70ad5bff
MM
180/* The execution direction of the last resume we got. See record-full.c. */
181static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
182
183/* The async event handler for reverse/replay execution. */
184static struct async_event_handler *record_btrace_async_inferior_event_handler;
185
aef92902
MM
186/* A flag indicating that we are currently generating a core file. */
187static int record_btrace_generating_corefile;
188
f4abbc16
MM
189/* The current branch trace configuration. */
190static struct btrace_config record_btrace_conf;
191
192/* Command list for "record btrace". */
193static struct cmd_list_element *record_btrace_cmdlist;
194
d33501a5
MM
195/* Command lists for "set/show record btrace bts". */
196static struct cmd_list_element *set_record_btrace_bts_cmdlist;
197static struct cmd_list_element *show_record_btrace_bts_cmdlist;
198
b20a6524
MM
199/* Command lists for "set/show record btrace pt". */
200static struct cmd_list_element *set_record_btrace_pt_cmdlist;
201static struct cmd_list_element *show_record_btrace_pt_cmdlist;
202
4a4495d6
MM
203/* Command list for "set record btrace cpu". */
204static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
205
afedecd3
MM
206/* Print a record-btrace debug message. Use do ... while (0) to avoid
207 ambiguities when used in if statements. */
208
209#define DEBUG(msg, args...) \
210 do \
211 { \
212 if (record_debug != 0) \
213 fprintf_unfiltered (gdb_stdlog, \
214 "[record-btrace] " msg "\n", ##args); \
215 } \
216 while (0)
217
218
4a4495d6
MM
219/* Return the cpu configured by the user. Returns NULL if the cpu was
220 configured as auto. */
221const struct btrace_cpu *
222record_btrace_get_cpu (void)
223{
224 switch (record_btrace_cpu_state)
225 {
226 case CS_AUTO:
227 return nullptr;
228
229 case CS_NONE:
230 record_btrace_cpu.vendor = CV_UNKNOWN;
231 /* Fall through. */
232 case CS_CPU:
233 return &record_btrace_cpu;
234 }
235
236 error (_("Internal error: bad record btrace cpu state."));
237}
238
afedecd3 239/* Update the branch trace for the current thread and return a pointer to its
066ce621 240 thread_info.
afedecd3
MM
241
242 Throws an error if there is no thread or no trace. This function never
243 returns NULL. */
244
066ce621
MM
245static struct thread_info *
246require_btrace_thread (void)
afedecd3 247{
afedecd3
MM
248 DEBUG ("require");
249
00431a78 250 if (inferior_ptid == null_ptid)
afedecd3
MM
251 error (_("No thread."));
252
00431a78
PA
253 thread_info *tp = inferior_thread ();
254
cd4007e4
MM
255 validate_registers_access ();
256
4a4495d6 257 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 258
6e07b1d2 259 if (btrace_is_empty (tp))
afedecd3
MM
260 error (_("No trace."));
261
066ce621
MM
262 return tp;
263}
264
265/* Update the branch trace for the current thread and return a pointer to its
266 branch trace information struct.
267
268 Throws an error if there is no thread or no trace. This function never
269 returns NULL. */
270
271static struct btrace_thread_info *
272require_btrace (void)
273{
274 struct thread_info *tp;
275
276 tp = require_btrace_thread ();
277
278 return &tp->btrace;
afedecd3
MM
279}
280
281/* Enable branch tracing for one thread. Warn on errors. */
282
283static void
284record_btrace_enable_warn (struct thread_info *tp)
285{
492d29ea
PA
286 TRY
287 {
288 btrace_enable (tp, &record_btrace_conf);
289 }
290 CATCH (error, RETURN_MASK_ERROR)
291 {
292 warning ("%s", error.message);
293 }
294 END_CATCH
afedecd3
MM
295}
296
afedecd3
MM
297/* Enable automatic tracing of new threads. */
298
299static void
300record_btrace_auto_enable (void)
301{
302 DEBUG ("attach thread observer");
303
76727919
TT
304 gdb::observers::new_thread.attach (record_btrace_enable_warn,
305 record_btrace_thread_observer_token);
afedecd3
MM
306}
307
308/* Disable automatic tracing of new threads. */
309
310static void
311record_btrace_auto_disable (void)
312{
afedecd3
MM
313 DEBUG ("detach thread observer");
314
76727919 315 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
afedecd3
MM
316}
317
70ad5bff
MM
318/* The record-btrace async event handler function. */
319
320static void
321record_btrace_handle_async_inferior_event (gdb_client_data data)
322{
323 inferior_event_handler (INF_REG_EVENT, NULL);
324}
325
c0272db5
TW
326/* See record-btrace.h. */
327
328void
329record_btrace_push_target (void)
330{
331 const char *format;
332
333 record_btrace_auto_enable ();
334
335 push_target (&record_btrace_ops);
336
337 record_btrace_async_inferior_event_handler
338 = create_async_event_handler (record_btrace_handle_async_inferior_event,
339 NULL);
340 record_btrace_generating_corefile = 0;
341
342 format = btrace_format_short_string (record_btrace_conf.format);
76727919 343 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
c0272db5
TW
344}
345
228f1508
SM
346/* Disable btrace on a set of threads on scope exit. */
347
348struct scoped_btrace_disable
349{
350 scoped_btrace_disable () = default;
351
352 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
353
354 ~scoped_btrace_disable ()
355 {
356 for (thread_info *tp : m_threads)
357 btrace_disable (tp);
358 }
359
360 void add_thread (thread_info *thread)
361 {
362 m_threads.push_front (thread);
363 }
364
365 void discard ()
366 {
367 m_threads.clear ();
368 }
369
370private:
371 std::forward_list<thread_info *> m_threads;
372};
373
d9f719f1 374/* Open target record-btrace. */
afedecd3 375
d9f719f1
PA
376static void
377record_btrace_target_open (const char *args, int from_tty)
afedecd3 378{
228f1508
SM
379 /* If we fail to enable btrace for one thread, disable it for the threads for
380 which it was successfully enabled. */
381 scoped_btrace_disable btrace_disable;
afedecd3
MM
382
383 DEBUG ("open");
384
8213266a 385 record_preopen ();
afedecd3
MM
386
387 if (!target_has_execution)
388 error (_("The program is not being run."));
389
08036331 390 for (thread_info *tp : all_non_exited_threads ())
5d5658a1 391 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
afedecd3 392 {
f4abbc16 393 btrace_enable (tp, &record_btrace_conf);
afedecd3 394
228f1508 395 btrace_disable.add_thread (tp);
afedecd3
MM
396 }
397
c0272db5 398 record_btrace_push_target ();
afedecd3 399
228f1508 400 btrace_disable.discard ();
afedecd3
MM
401}
402
f6ac5f3d 403/* The stop_recording method of target record-btrace. */
afedecd3 404
f6ac5f3d
PA
405void
406record_btrace_target::stop_recording ()
afedecd3 407{
afedecd3
MM
408 DEBUG ("stop recording");
409
410 record_btrace_auto_disable ();
411
08036331 412 for (thread_info *tp : all_non_exited_threads ())
afedecd3
MM
413 if (tp->btrace.target != NULL)
414 btrace_disable (tp);
415}
416
f6ac5f3d 417/* The disconnect method of target record-btrace. */
c0272db5 418
f6ac5f3d
PA
419void
420record_btrace_target::disconnect (const char *args,
421 int from_tty)
c0272db5 422{
b6a8c27b 423 struct target_ops *beneath = this->beneath ();
c0272db5
TW
424
425 /* Do not stop recording, just clean up GDB side. */
f6ac5f3d 426 unpush_target (this);
c0272db5
TW
427
428 /* Forward disconnect. */
f6ac5f3d 429 beneath->disconnect (args, from_tty);
c0272db5
TW
430}
431
f6ac5f3d 432/* The close method of target record-btrace. */
afedecd3 433
f6ac5f3d
PA
434void
435record_btrace_target::close ()
afedecd3 436{
70ad5bff
MM
437 if (record_btrace_async_inferior_event_handler != NULL)
438 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
439
99c819ee
MM
440 /* Make sure automatic recording gets disabled even if we did not stop
441 recording before closing the record-btrace target. */
442 record_btrace_auto_disable ();
443
568e808b
MM
444 /* We should have already stopped recording.
445 Tear down btrace in case we have not. */
08036331 446 for (thread_info *tp : all_non_exited_threads ())
568e808b 447 btrace_teardown (tp);
afedecd3
MM
448}
449
f6ac5f3d 450/* The async method of target record-btrace. */
b7d2e916 451
f6ac5f3d
PA
452void
453record_btrace_target::async (int enable)
b7d2e916 454{
6a3753b3 455 if (enable)
b7d2e916
PA
456 mark_async_event_handler (record_btrace_async_inferior_event_handler);
457 else
458 clear_async_event_handler (record_btrace_async_inferior_event_handler);
459
b6a8c27b 460 this->beneath ()->async (enable);
b7d2e916
PA
461}
462
d33501a5
MM
463/* Adjusts the size and returns a human readable size suffix. */
464
465static const char *
466record_btrace_adjust_size (unsigned int *size)
467{
468 unsigned int sz;
469
470 sz = *size;
471
472 if ((sz & ((1u << 30) - 1)) == 0)
473 {
474 *size = sz >> 30;
475 return "GB";
476 }
477 else if ((sz & ((1u << 20) - 1)) == 0)
478 {
479 *size = sz >> 20;
480 return "MB";
481 }
482 else if ((sz & ((1u << 10) - 1)) == 0)
483 {
484 *size = sz >> 10;
485 return "kB";
486 }
487 else
488 return "";
489}
490
491/* Print a BTS configuration. */
492
493static void
494record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
495{
496 const char *suffix;
497 unsigned int size;
498
499 size = conf->size;
500 if (size > 0)
501 {
502 suffix = record_btrace_adjust_size (&size);
503 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
504 }
505}
506
bc504a31 507/* Print an Intel Processor Trace configuration. */
b20a6524
MM
508
509static void
510record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
511{
512 const char *suffix;
513 unsigned int size;
514
515 size = conf->size;
516 if (size > 0)
517 {
518 suffix = record_btrace_adjust_size (&size);
519 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
520 }
521}
522
d33501a5
MM
523/* Print a branch tracing configuration. */
524
525static void
526record_btrace_print_conf (const struct btrace_config *conf)
527{
528 printf_unfiltered (_("Recording format: %s.\n"),
529 btrace_format_string (conf->format));
530
531 switch (conf->format)
532 {
533 case BTRACE_FORMAT_NONE:
534 return;
535
536 case BTRACE_FORMAT_BTS:
537 record_btrace_print_bts_conf (&conf->bts);
538 return;
b20a6524
MM
539
540 case BTRACE_FORMAT_PT:
541 record_btrace_print_pt_conf (&conf->pt);
542 return;
d33501a5
MM
543 }
544
545 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
546}
547
f6ac5f3d 548/* The info_record method of target record-btrace. */
afedecd3 549
f6ac5f3d
PA
550void
551record_btrace_target::info_record ()
afedecd3
MM
552{
553 struct btrace_thread_info *btinfo;
f4abbc16 554 const struct btrace_config *conf;
afedecd3 555 struct thread_info *tp;
31fd9caa 556 unsigned int insns, calls, gaps;
afedecd3
MM
557
558 DEBUG ("info");
559
560 tp = find_thread_ptid (inferior_ptid);
561 if (tp == NULL)
562 error (_("No thread."));
563
cd4007e4
MM
564 validate_registers_access ();
565
f4abbc16
MM
566 btinfo = &tp->btrace;
567
f6ac5f3d 568 conf = ::btrace_conf (btinfo);
f4abbc16 569 if (conf != NULL)
d33501a5 570 record_btrace_print_conf (conf);
f4abbc16 571
4a4495d6 572 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 573
23a7fe75
MM
574 insns = 0;
575 calls = 0;
31fd9caa 576 gaps = 0;
23a7fe75 577
6e07b1d2 578 if (!btrace_is_empty (tp))
23a7fe75
MM
579 {
580 struct btrace_call_iterator call;
581 struct btrace_insn_iterator insn;
582
583 btrace_call_end (&call, btinfo);
584 btrace_call_prev (&call, 1);
5de9129b 585 calls = btrace_call_number (&call);
23a7fe75
MM
586
587 btrace_insn_end (&insn, btinfo);
5de9129b 588 insns = btrace_insn_number (&insn);
31fd9caa 589
69090cee
TW
590 /* If the last instruction is not a gap, it is the current instruction
591 that is not actually part of the record. */
592 if (btrace_insn_get (&insn) != NULL)
593 insns -= 1;
31fd9caa
MM
594
595 gaps = btinfo->ngaps;
23a7fe75 596 }
afedecd3 597
31fd9caa 598 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
43792cf0
PA
599 "for thread %s (%s).\n"), insns, calls, gaps,
600 print_thread_id (tp), target_pid_to_str (tp->ptid));
07bbe694
MM
601
602 if (btrace_is_replaying (tp))
603 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
604 btrace_insn_number (btinfo->replay));
afedecd3
MM
605}
606
31fd9caa
MM
607/* Print a decode error. */
608
609static void
610btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
611 enum btrace_format format)
612{
508352a9 613 const char *errstr = btrace_decode_error (format, errcode);
31fd9caa 614
112e8700 615 uiout->text (_("["));
508352a9
TW
616 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
617 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
31fd9caa 618 {
112e8700
SM
619 uiout->text (_("decode error ("));
620 uiout->field_int ("errcode", errcode);
621 uiout->text (_("): "));
31fd9caa 622 }
112e8700
SM
623 uiout->text (errstr);
624 uiout->text (_("]\n"));
31fd9caa
MM
625}
626
afedecd3
MM
627/* Print an unsigned int. */
628
629static void
630ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
631{
112e8700 632 uiout->field_fmt (fld, "%u", val);
afedecd3
MM
633}
634
f94cc897
MM
635/* A range of source lines. */
636
637struct btrace_line_range
638{
639 /* The symtab this line is from. */
640 struct symtab *symtab;
641
642 /* The first line (inclusive). */
643 int begin;
644
645 /* The last line (exclusive). */
646 int end;
647};
648
649/* Construct a line range. */
650
651static struct btrace_line_range
652btrace_mk_line_range (struct symtab *symtab, int begin, int end)
653{
654 struct btrace_line_range range;
655
656 range.symtab = symtab;
657 range.begin = begin;
658 range.end = end;
659
660 return range;
661}
662
663/* Add a line to a line range. */
664
665static struct btrace_line_range
666btrace_line_range_add (struct btrace_line_range range, int line)
667{
668 if (range.end <= range.begin)
669 {
670 /* This is the first entry. */
671 range.begin = line;
672 range.end = line + 1;
673 }
674 else if (line < range.begin)
675 range.begin = line;
676 else if (range.end < line)
677 range.end = line;
678
679 return range;
680}
681
682/* Return non-zero if RANGE is empty, zero otherwise. */
683
684static int
685btrace_line_range_is_empty (struct btrace_line_range range)
686{
687 return range.end <= range.begin;
688}
689
690/* Return non-zero if LHS contains RHS, zero otherwise. */
691
692static int
693btrace_line_range_contains_range (struct btrace_line_range lhs,
694 struct btrace_line_range rhs)
695{
696 return ((lhs.symtab == rhs.symtab)
697 && (lhs.begin <= rhs.begin)
698 && (rhs.end <= lhs.end));
699}
700
701/* Find the line range associated with PC. */
702
703static struct btrace_line_range
704btrace_find_line_range (CORE_ADDR pc)
705{
706 struct btrace_line_range range;
707 struct linetable_entry *lines;
708 struct linetable *ltable;
709 struct symtab *symtab;
710 int nlines, i;
711
712 symtab = find_pc_line_symtab (pc);
713 if (symtab == NULL)
714 return btrace_mk_line_range (NULL, 0, 0);
715
716 ltable = SYMTAB_LINETABLE (symtab);
717 if (ltable == NULL)
718 return btrace_mk_line_range (symtab, 0, 0);
719
720 nlines = ltable->nitems;
721 lines = ltable->item;
722 if (nlines <= 0)
723 return btrace_mk_line_range (symtab, 0, 0);
724
725 range = btrace_mk_line_range (symtab, 0, 0);
726 for (i = 0; i < nlines - 1; i++)
727 {
728 if ((lines[i].pc == pc) && (lines[i].line != 0))
729 range = btrace_line_range_add (range, lines[i].line);
730 }
731
732 return range;
733}
734
735/* Print source lines in LINES to UIOUT.
736
737 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
738 instructions corresponding to that source line. When printing a new source
739 line, we do the cleanups for the open chain and open a new cleanup chain for
740 the new source line. If the source line range in LINES is not empty, this
741 function will leave the cleanup chain for the last printed source line open
742 so instructions can be added to it. */
743
744static void
745btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
7ea78b59
SM
746 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
747 gdb::optional<ui_out_emit_list> *asm_list,
748 gdb_disassembly_flags flags)
f94cc897 749{
8d297bbf 750 print_source_lines_flags psl_flags;
f94cc897 751
f94cc897
MM
752 if (flags & DISASSEMBLY_FILENAME)
753 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
754
7ea78b59 755 for (int line = lines.begin; line < lines.end; ++line)
f94cc897 756 {
7ea78b59 757 asm_list->reset ();
f94cc897 758
7ea78b59 759 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
f94cc897
MM
760
761 print_source_lines (lines.symtab, line, line + 1, psl_flags);
762
7ea78b59 763 asm_list->emplace (uiout, "line_asm_insn");
f94cc897
MM
764 }
765}
766
afedecd3
MM
767/* Disassemble a section of the recorded instruction trace. */
768
769static void
23a7fe75 770btrace_insn_history (struct ui_out *uiout,
31fd9caa 771 const struct btrace_thread_info *btinfo,
23a7fe75 772 const struct btrace_insn_iterator *begin,
9a24775b
PA
773 const struct btrace_insn_iterator *end,
774 gdb_disassembly_flags flags)
afedecd3 775{
9a24775b
PA
776 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
777 btrace_insn_number (begin), btrace_insn_number (end));
afedecd3 778
f94cc897
MM
779 flags |= DISASSEMBLY_SPECULATIVE;
780
7ea78b59
SM
781 struct gdbarch *gdbarch = target_gdbarch ();
782 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
f94cc897 783
7ea78b59 784 ui_out_emit_list list_emitter (uiout, "asm_insns");
f94cc897 785
7ea78b59
SM
786 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
787 gdb::optional<ui_out_emit_list> asm_list;
afedecd3 788
8b172ce7
PA
789 gdb_pretty_print_disassembler disasm (gdbarch);
790
7ea78b59
SM
791 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
792 btrace_insn_next (&it, 1))
afedecd3 793 {
23a7fe75
MM
794 const struct btrace_insn *insn;
795
796 insn = btrace_insn_get (&it);
797
31fd9caa
MM
798 /* A NULL instruction indicates a gap in the trace. */
799 if (insn == NULL)
800 {
801 const struct btrace_config *conf;
802
803 conf = btrace_conf (btinfo);
afedecd3 804
31fd9caa
MM
805 /* We have trace so we must have a configuration. */
806 gdb_assert (conf != NULL);
807
69090cee
TW
808 uiout->field_fmt ("insn-number", "%u",
809 btrace_insn_number (&it));
810 uiout->text ("\t");
811
812 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
31fd9caa
MM
813 conf->format);
814 }
815 else
816 {
f94cc897 817 struct disasm_insn dinsn;
da8c46d2 818
f94cc897 819 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 820 {
f94cc897
MM
821 struct btrace_line_range lines;
822
823 lines = btrace_find_line_range (insn->pc);
824 if (!btrace_line_range_is_empty (lines)
825 && !btrace_line_range_contains_range (last_lines, lines))
826 {
7ea78b59
SM
827 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
828 flags);
f94cc897
MM
829 last_lines = lines;
830 }
7ea78b59 831 else if (!src_and_asm_tuple.has_value ())
f94cc897 832 {
7ea78b59
SM
833 gdb_assert (!asm_list.has_value ());
834
835 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
836
f94cc897 837 /* No source information. */
7ea78b59 838 asm_list.emplace (uiout, "line_asm_insn");
f94cc897
MM
839 }
840
7ea78b59
SM
841 gdb_assert (src_and_asm_tuple.has_value ());
842 gdb_assert (asm_list.has_value ());
da8c46d2 843 }
da8c46d2 844
f94cc897
MM
845 memset (&dinsn, 0, sizeof (dinsn));
846 dinsn.number = btrace_insn_number (&it);
847 dinsn.addr = insn->pc;
31fd9caa 848
da8c46d2 849 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 850 dinsn.is_speculative = 1;
da8c46d2 851
8b172ce7 852 disasm.pretty_print_insn (uiout, &dinsn, flags);
31fd9caa 853 }
afedecd3
MM
854 }
855}
856
f6ac5f3d 857/* The insn_history method of target record-btrace. */
afedecd3 858
f6ac5f3d
PA
859void
860record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
afedecd3
MM
861{
862 struct btrace_thread_info *btinfo;
23a7fe75
MM
863 struct btrace_insn_history *history;
864 struct btrace_insn_iterator begin, end;
afedecd3 865 struct ui_out *uiout;
23a7fe75 866 unsigned int context, covered;
afedecd3
MM
867
868 uiout = current_uiout;
2e783024 869 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 870 context = abs (size);
afedecd3
MM
871 if (context == 0)
872 error (_("Bad record instruction-history-size."));
873
23a7fe75
MM
874 btinfo = require_btrace ();
875 history = btinfo->insn_history;
876 if (history == NULL)
afedecd3 877 {
07bbe694 878 struct btrace_insn_iterator *replay;
afedecd3 879
9a24775b 880 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
afedecd3 881
07bbe694
MM
882 /* If we're replaying, we start at the replay position. Otherwise, we
883 start at the tail of the trace. */
884 replay = btinfo->replay;
885 if (replay != NULL)
886 begin = *replay;
887 else
888 btrace_insn_end (&begin, btinfo);
889
890 /* We start from here and expand in the requested direction. Then we
891 expand in the other direction, as well, to fill up any remaining
892 context. */
893 end = begin;
894 if (size < 0)
895 {
896 /* We want the current position covered, as well. */
897 covered = btrace_insn_next (&end, 1);
898 covered += btrace_insn_prev (&begin, context - covered);
899 covered += btrace_insn_next (&end, context - covered);
900 }
901 else
902 {
903 covered = btrace_insn_next (&end, context);
904 covered += btrace_insn_prev (&begin, context - covered);
905 }
afedecd3
MM
906 }
907 else
908 {
23a7fe75
MM
909 begin = history->begin;
910 end = history->end;
afedecd3 911
9a24775b 912 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
23a7fe75 913 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 914
23a7fe75
MM
915 if (size < 0)
916 {
917 end = begin;
918 covered = btrace_insn_prev (&begin, context);
919 }
920 else
921 {
922 begin = end;
923 covered = btrace_insn_next (&end, context);
924 }
afedecd3
MM
925 }
926
23a7fe75 927 if (covered > 0)
31fd9caa 928 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
929 else
930 {
931 if (size < 0)
932 printf_unfiltered (_("At the start of the branch trace record.\n"));
933 else
934 printf_unfiltered (_("At the end of the branch trace record.\n"));
935 }
afedecd3 936
23a7fe75 937 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
938}
939
f6ac5f3d 940/* The insn_history_range method of target record-btrace. */
afedecd3 941
f6ac5f3d
PA
942void
943record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
944 gdb_disassembly_flags flags)
afedecd3
MM
945{
946 struct btrace_thread_info *btinfo;
23a7fe75 947 struct btrace_insn_iterator begin, end;
afedecd3 948 struct ui_out *uiout;
23a7fe75
MM
949 unsigned int low, high;
950 int found;
afedecd3
MM
951
952 uiout = current_uiout;
2e783024 953 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
23a7fe75
MM
954 low = from;
955 high = to;
afedecd3 956
9a24775b 957 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
afedecd3
MM
958
959 /* Check for wrap-arounds. */
23a7fe75 960 if (low != from || high != to)
afedecd3
MM
961 error (_("Bad range."));
962
0688d04e 963 if (high < low)
afedecd3
MM
964 error (_("Bad range."));
965
23a7fe75 966 btinfo = require_btrace ();
afedecd3 967
23a7fe75
MM
968 found = btrace_find_insn_by_number (&begin, btinfo, low);
969 if (found == 0)
970 error (_("Range out of bounds."));
afedecd3 971
23a7fe75
MM
972 found = btrace_find_insn_by_number (&end, btinfo, high);
973 if (found == 0)
0688d04e
MM
974 {
975 /* Silently truncate the range. */
976 btrace_insn_end (&end, btinfo);
977 }
978 else
979 {
980 /* We want both begin and end to be inclusive. */
981 btrace_insn_next (&end, 1);
982 }
afedecd3 983
31fd9caa 984 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 985 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
986}
987
f6ac5f3d 988/* The insn_history_from method of target record-btrace. */
afedecd3 989
f6ac5f3d
PA
990void
991record_btrace_target::insn_history_from (ULONGEST from, int size,
992 gdb_disassembly_flags flags)
afedecd3
MM
993{
994 ULONGEST begin, end, context;
995
996 context = abs (size);
0688d04e
MM
997 if (context == 0)
998 error (_("Bad record instruction-history-size."));
afedecd3
MM
999
1000 if (size < 0)
1001 {
1002 end = from;
1003
1004 if (from < context)
1005 begin = 0;
1006 else
0688d04e 1007 begin = from - context + 1;
afedecd3
MM
1008 }
1009 else
1010 {
1011 begin = from;
0688d04e 1012 end = from + context - 1;
afedecd3
MM
1013
1014 /* Check for wrap-around. */
1015 if (end < begin)
1016 end = ULONGEST_MAX;
1017 }
1018
f6ac5f3d 1019 insn_history_range (begin, end, flags);
afedecd3
MM
1020}
1021
1022/* Print the instruction number range for a function call history line. */
1023
1024static void
23a7fe75
MM
1025btrace_call_history_insn_range (struct ui_out *uiout,
1026 const struct btrace_function *bfun)
afedecd3 1027{
7acbe133
MM
1028 unsigned int begin, end, size;
1029
0860c437 1030 size = bfun->insn.size ();
7acbe133 1031 gdb_assert (size > 0);
afedecd3 1032
23a7fe75 1033 begin = bfun->insn_offset;
7acbe133 1034 end = begin + size - 1;
afedecd3 1035
23a7fe75 1036 ui_out_field_uint (uiout, "insn begin", begin);
112e8700 1037 uiout->text (",");
23a7fe75 1038 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
1039}
1040
ce0dfbea
MM
1041/* Compute the lowest and highest source line for the instructions in BFUN
1042 and return them in PBEGIN and PEND.
1043 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1044 result from inlining or macro expansion. */
1045
1046static void
1047btrace_compute_src_line_range (const struct btrace_function *bfun,
1048 int *pbegin, int *pend)
1049{
ce0dfbea
MM
1050 struct symtab *symtab;
1051 struct symbol *sym;
ce0dfbea
MM
1052 int begin, end;
1053
1054 begin = INT_MAX;
1055 end = INT_MIN;
1056
1057 sym = bfun->sym;
1058 if (sym == NULL)
1059 goto out;
1060
1061 symtab = symbol_symtab (sym);
1062
0860c437 1063 for (const btrace_insn &insn : bfun->insn)
ce0dfbea
MM
1064 {
1065 struct symtab_and_line sal;
1066
0860c437 1067 sal = find_pc_line (insn.pc, 0);
ce0dfbea
MM
1068 if (sal.symtab != symtab || sal.line == 0)
1069 continue;
1070
325fac50
PA
1071 begin = std::min (begin, sal.line);
1072 end = std::max (end, sal.line);
ce0dfbea
MM
1073 }
1074
1075 out:
1076 *pbegin = begin;
1077 *pend = end;
1078}
1079
afedecd3
MM
1080/* Print the source line information for a function call history line. */
1081
1082static void
23a7fe75
MM
1083btrace_call_history_src_line (struct ui_out *uiout,
1084 const struct btrace_function *bfun)
afedecd3
MM
1085{
1086 struct symbol *sym;
23a7fe75 1087 int begin, end;
afedecd3
MM
1088
1089 sym = bfun->sym;
1090 if (sym == NULL)
1091 return;
1092
112e8700 1093 uiout->field_string ("file",
08be3fe3 1094 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 1095
ce0dfbea 1096 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 1097 if (end < begin)
afedecd3
MM
1098 return;
1099
112e8700
SM
1100 uiout->text (":");
1101 uiout->field_int ("min line", begin);
afedecd3 1102
23a7fe75 1103 if (end == begin)
afedecd3
MM
1104 return;
1105
112e8700
SM
1106 uiout->text (",");
1107 uiout->field_int ("max line", end);
afedecd3
MM
1108}
1109
0b722aec
MM
1110/* Get the name of a branch trace function. */
1111
1112static const char *
1113btrace_get_bfun_name (const struct btrace_function *bfun)
1114{
1115 struct minimal_symbol *msym;
1116 struct symbol *sym;
1117
1118 if (bfun == NULL)
1119 return "??";
1120
1121 msym = bfun->msym;
1122 sym = bfun->sym;
1123
1124 if (sym != NULL)
1125 return SYMBOL_PRINT_NAME (sym);
1126 else if (msym != NULL)
efd66ac6 1127 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
1128 else
1129 return "??";
1130}
1131
afedecd3
MM
1132/* Disassemble a section of the recorded function trace. */
1133
1134static void
23a7fe75 1135btrace_call_history (struct ui_out *uiout,
8710b709 1136 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1137 const struct btrace_call_iterator *begin,
1138 const struct btrace_call_iterator *end,
8d297bbf 1139 int int_flags)
afedecd3 1140{
23a7fe75 1141 struct btrace_call_iterator it;
8d297bbf 1142 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1143
8d297bbf 1144 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1145 btrace_call_number (end));
afedecd3 1146
23a7fe75 1147 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1148 {
23a7fe75
MM
1149 const struct btrace_function *bfun;
1150 struct minimal_symbol *msym;
1151 struct symbol *sym;
1152
1153 bfun = btrace_call_get (&it);
23a7fe75 1154 sym = bfun->sym;
0b722aec 1155 msym = bfun->msym;
23a7fe75 1156
afedecd3 1157 /* Print the function index. */
23a7fe75 1158 ui_out_field_uint (uiout, "index", bfun->number);
112e8700 1159 uiout->text ("\t");
afedecd3 1160
31fd9caa
MM
1161 /* Indicate gaps in the trace. */
1162 if (bfun->errcode != 0)
1163 {
1164 const struct btrace_config *conf;
1165
1166 conf = btrace_conf (btinfo);
1167
1168 /* We have trace so we must have a configuration. */
1169 gdb_assert (conf != NULL);
1170
1171 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1172
1173 continue;
1174 }
1175
8710b709
MM
1176 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1177 {
1178 int level = bfun->level + btinfo->level, i;
1179
1180 for (i = 0; i < level; ++i)
112e8700 1181 uiout->text (" ");
8710b709
MM
1182 }
1183
1184 if (sym != NULL)
112e8700 1185 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
8710b709 1186 else if (msym != NULL)
112e8700
SM
1187 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1188 else if (!uiout->is_mi_like_p ())
1189 uiout->field_string ("function", "??");
8710b709 1190
1e038f67 1191 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1192 {
112e8700 1193 uiout->text (_("\tinst "));
23a7fe75 1194 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1195 }
1196
1e038f67 1197 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1198 {
112e8700 1199 uiout->text (_("\tat "));
23a7fe75 1200 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1201 }
1202
112e8700 1203 uiout->text ("\n");
afedecd3
MM
1204 }
1205}
1206
f6ac5f3d 1207/* The call_history method of target record-btrace. */
afedecd3 1208
f6ac5f3d
PA
1209void
1210record_btrace_target::call_history (int size, record_print_flags flags)
afedecd3
MM
1211{
1212 struct btrace_thread_info *btinfo;
23a7fe75
MM
1213 struct btrace_call_history *history;
1214 struct btrace_call_iterator begin, end;
afedecd3 1215 struct ui_out *uiout;
23a7fe75 1216 unsigned int context, covered;
afedecd3
MM
1217
1218 uiout = current_uiout;
2e783024 1219 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 1220 context = abs (size);
afedecd3
MM
1221 if (context == 0)
1222 error (_("Bad record function-call-history-size."));
1223
23a7fe75
MM
1224 btinfo = require_btrace ();
1225 history = btinfo->call_history;
1226 if (history == NULL)
afedecd3 1227 {
07bbe694 1228 struct btrace_insn_iterator *replay;
afedecd3 1229
0cb7c7b0 1230 DEBUG ("call-history (0x%x): %d", (int) flags, size);
afedecd3 1231
07bbe694
MM
1232 /* If we're replaying, we start at the replay position. Otherwise, we
1233 start at the tail of the trace. */
1234 replay = btinfo->replay;
1235 if (replay != NULL)
1236 {
07bbe694 1237 begin.btinfo = btinfo;
a0f1b963 1238 begin.index = replay->call_index;
07bbe694
MM
1239 }
1240 else
1241 btrace_call_end (&begin, btinfo);
1242
1243 /* We start from here and expand in the requested direction. Then we
1244 expand in the other direction, as well, to fill up any remaining
1245 context. */
1246 end = begin;
1247 if (size < 0)
1248 {
1249 /* We want the current position covered, as well. */
1250 covered = btrace_call_next (&end, 1);
1251 covered += btrace_call_prev (&begin, context - covered);
1252 covered += btrace_call_next (&end, context - covered);
1253 }
1254 else
1255 {
1256 covered = btrace_call_next (&end, context);
1257 covered += btrace_call_prev (&begin, context- covered);
1258 }
afedecd3
MM
1259 }
1260 else
1261 {
23a7fe75
MM
1262 begin = history->begin;
1263 end = history->end;
afedecd3 1264
0cb7c7b0 1265 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
23a7fe75 1266 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1267
23a7fe75
MM
1268 if (size < 0)
1269 {
1270 end = begin;
1271 covered = btrace_call_prev (&begin, context);
1272 }
1273 else
1274 {
1275 begin = end;
1276 covered = btrace_call_next (&end, context);
1277 }
afedecd3
MM
1278 }
1279
23a7fe75 1280 if (covered > 0)
8710b709 1281 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1282 else
1283 {
1284 if (size < 0)
1285 printf_unfiltered (_("At the start of the branch trace record.\n"));
1286 else
1287 printf_unfiltered (_("At the end of the branch trace record.\n"));
1288 }
afedecd3 1289
23a7fe75 1290 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1291}
1292
f6ac5f3d 1293/* The call_history_range method of target record-btrace. */
afedecd3 1294
f6ac5f3d
PA
1295void
1296record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1297 record_print_flags flags)
afedecd3
MM
1298{
1299 struct btrace_thread_info *btinfo;
23a7fe75 1300 struct btrace_call_iterator begin, end;
afedecd3 1301 struct ui_out *uiout;
23a7fe75
MM
1302 unsigned int low, high;
1303 int found;
afedecd3
MM
1304
1305 uiout = current_uiout;
2e783024 1306 ui_out_emit_tuple tuple_emitter (uiout, "func history");
23a7fe75
MM
1307 low = from;
1308 high = to;
afedecd3 1309
0cb7c7b0 1310 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
afedecd3
MM
1311
1312 /* Check for wrap-arounds. */
23a7fe75 1313 if (low != from || high != to)
afedecd3
MM
1314 error (_("Bad range."));
1315
0688d04e 1316 if (high < low)
afedecd3
MM
1317 error (_("Bad range."));
1318
23a7fe75 1319 btinfo = require_btrace ();
afedecd3 1320
23a7fe75
MM
1321 found = btrace_find_call_by_number (&begin, btinfo, low);
1322 if (found == 0)
1323 error (_("Range out of bounds."));
afedecd3 1324
23a7fe75
MM
1325 found = btrace_find_call_by_number (&end, btinfo, high);
1326 if (found == 0)
0688d04e
MM
1327 {
1328 /* Silently truncate the range. */
1329 btrace_call_end (&end, btinfo);
1330 }
1331 else
1332 {
1333 /* We want both begin and end to be inclusive. */
1334 btrace_call_next (&end, 1);
1335 }
afedecd3 1336
8710b709 1337 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1338 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1339}
1340
f6ac5f3d 1341/* The call_history_from method of target record-btrace. */
afedecd3 1342
f6ac5f3d
PA
1343void
1344record_btrace_target::call_history_from (ULONGEST from, int size,
1345 record_print_flags flags)
afedecd3
MM
1346{
1347 ULONGEST begin, end, context;
1348
1349 context = abs (size);
0688d04e
MM
1350 if (context == 0)
1351 error (_("Bad record function-call-history-size."));
afedecd3
MM
1352
1353 if (size < 0)
1354 {
1355 end = from;
1356
1357 if (from < context)
1358 begin = 0;
1359 else
0688d04e 1360 begin = from - context + 1;
afedecd3
MM
1361 }
1362 else
1363 {
1364 begin = from;
0688d04e 1365 end = from + context - 1;
afedecd3
MM
1366
1367 /* Check for wrap-around. */
1368 if (end < begin)
1369 end = ULONGEST_MAX;
1370 }
1371
f6ac5f3d 1372 call_history_range ( begin, end, flags);
afedecd3
MM
1373}
1374
f6ac5f3d 1375/* The record_method method of target record-btrace. */
b158a20f 1376
f6ac5f3d
PA
1377enum record_method
1378record_btrace_target::record_method (ptid_t ptid)
b158a20f 1379{
b158a20f
TW
1380 struct thread_info * const tp = find_thread_ptid (ptid);
1381
1382 if (tp == NULL)
1383 error (_("No thread."));
1384
1385 if (tp->btrace.target == NULL)
1386 return RECORD_METHOD_NONE;
1387
1388 return RECORD_METHOD_BTRACE;
1389}
1390
f6ac5f3d 1391/* The record_is_replaying method of target record-btrace. */
07bbe694 1392
57810aa7 1393bool
f6ac5f3d 1394record_btrace_target::record_is_replaying (ptid_t ptid)
07bbe694 1395{
08036331
PA
1396 for (thread_info *tp : all_non_exited_threads (ptid))
1397 if (btrace_is_replaying (tp))
57810aa7 1398 return true;
07bbe694 1399
57810aa7 1400 return false;
07bbe694
MM
1401}
1402
f6ac5f3d 1403/* The record_will_replay method of target record-btrace. */
7ff27e9b 1404
57810aa7 1405bool
f6ac5f3d 1406record_btrace_target::record_will_replay (ptid_t ptid, int dir)
7ff27e9b 1407{
f6ac5f3d 1408 return dir == EXEC_REVERSE || record_is_replaying (ptid);
7ff27e9b
MM
1409}
1410
f6ac5f3d 1411/* The xfer_partial method of target record-btrace. */
633785ff 1412
f6ac5f3d
PA
1413enum target_xfer_status
1414record_btrace_target::xfer_partial (enum target_object object,
1415 const char *annex, gdb_byte *readbuf,
1416 const gdb_byte *writebuf, ULONGEST offset,
1417 ULONGEST len, ULONGEST *xfered_len)
633785ff 1418{
633785ff 1419 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1420 if (replay_memory_access == replay_memory_access_read_only
aef92902 1421 && !record_btrace_generating_corefile
f6ac5f3d 1422 && record_is_replaying (inferior_ptid))
633785ff
MM
1423 {
1424 switch (object)
1425 {
1426 case TARGET_OBJECT_MEMORY:
1427 {
1428 struct target_section *section;
1429
1430 /* We do not allow writing memory in general. */
1431 if (writebuf != NULL)
9b409511
YQ
1432 {
1433 *xfered_len = len;
bc113b4e 1434 return TARGET_XFER_UNAVAILABLE;
9b409511 1435 }
633785ff
MM
1436
1437 /* We allow reading readonly memory. */
f6ac5f3d 1438 section = target_section_by_addr (this, offset);
633785ff
MM
1439 if (section != NULL)
1440 {
1441 /* Check if the section we found is readonly. */
1442 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1443 section->the_bfd_section)
1444 & SEC_READONLY) != 0)
1445 {
1446 /* Truncate the request to fit into this section. */
325fac50 1447 len = std::min (len, section->endaddr - offset);
633785ff
MM
1448 break;
1449 }
1450 }
1451
9b409511 1452 *xfered_len = len;
bc113b4e 1453 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1454 }
1455 }
1456 }
1457
1458 /* Forward the request. */
b6a8c27b
PA
1459 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1460 offset, len, xfered_len);
633785ff
MM
1461}
1462
f6ac5f3d 1463/* The insert_breakpoint method of target record-btrace. */
633785ff 1464
f6ac5f3d
PA
1465int
1466record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1467 struct bp_target_info *bp_tgt)
633785ff 1468{
67b5c0c1
MM
1469 const char *old;
1470 int ret;
633785ff
MM
1471
1472 /* Inserting breakpoints requires accessing memory. Allow it for the
1473 duration of this function. */
67b5c0c1
MM
1474 old = replay_memory_access;
1475 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1476
1477 ret = 0;
492d29ea
PA
1478 TRY
1479 {
b6a8c27b 1480 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
492d29ea 1481 }
492d29ea
PA
1482 CATCH (except, RETURN_MASK_ALL)
1483 {
6c63c96a 1484 replay_memory_access = old;
492d29ea
PA
1485 throw_exception (except);
1486 }
1487 END_CATCH
6c63c96a 1488 replay_memory_access = old;
633785ff
MM
1489
1490 return ret;
1491}
1492
f6ac5f3d 1493/* The remove_breakpoint method of target record-btrace. */
633785ff 1494
f6ac5f3d
PA
1495int
1496record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1497 struct bp_target_info *bp_tgt,
1498 enum remove_bp_reason reason)
633785ff 1499{
67b5c0c1
MM
1500 const char *old;
1501 int ret;
633785ff
MM
1502
1503 /* Removing breakpoints requires accessing memory. Allow it for the
1504 duration of this function. */
67b5c0c1
MM
1505 old = replay_memory_access;
1506 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1507
1508 ret = 0;
492d29ea
PA
1509 TRY
1510 {
b6a8c27b 1511 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
492d29ea 1512 }
492d29ea
PA
1513 CATCH (except, RETURN_MASK_ALL)
1514 {
6c63c96a 1515 replay_memory_access = old;
492d29ea
PA
1516 throw_exception (except);
1517 }
1518 END_CATCH
6c63c96a 1519 replay_memory_access = old;
633785ff
MM
1520
1521 return ret;
1522}
1523
f6ac5f3d 1524/* The fetch_registers method of target record-btrace. */
1f3ef581 1525
f6ac5f3d
PA
1526void
1527record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1f3ef581
MM
1528{
1529 struct btrace_insn_iterator *replay;
1530 struct thread_info *tp;
1531
222312d3 1532 tp = find_thread_ptid (regcache->ptid ());
1f3ef581
MM
1533 gdb_assert (tp != NULL);
1534
1535 replay = tp->btrace.replay;
aef92902 1536 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1537 {
1538 const struct btrace_insn *insn;
1539 struct gdbarch *gdbarch;
1540 int pcreg;
1541
ac7936df 1542 gdbarch = regcache->arch ();
1f3ef581
MM
1543 pcreg = gdbarch_pc_regnum (gdbarch);
1544 if (pcreg < 0)
1545 return;
1546
1547 /* We can only provide the PC register. */
1548 if (regno >= 0 && regno != pcreg)
1549 return;
1550
1551 insn = btrace_insn_get (replay);
1552 gdb_assert (insn != NULL);
1553
73e1c03f 1554 regcache->raw_supply (regno, &insn->pc);
1f3ef581
MM
1555 }
1556 else
b6a8c27b 1557 this->beneath ()->fetch_registers (regcache, regno);
1f3ef581
MM
1558}
1559
f6ac5f3d 1560/* The store_registers method of target record-btrace. */
1f3ef581 1561
f6ac5f3d
PA
1562void
1563record_btrace_target::store_registers (struct regcache *regcache, int regno)
1f3ef581 1564{
a52eab48 1565 if (!record_btrace_generating_corefile
222312d3 1566 && record_is_replaying (regcache->ptid ()))
4d10e986 1567 error (_("Cannot write registers while replaying."));
1f3ef581
MM
1568
1569 gdb_assert (may_write_registers != 0);
1570
b6a8c27b 1571 this->beneath ()->store_registers (regcache, regno);
1f3ef581
MM
1572}
1573
f6ac5f3d 1574/* The prepare_to_store method of target record-btrace. */
1f3ef581 1575
f6ac5f3d
PA
1576void
1577record_btrace_target::prepare_to_store (struct regcache *regcache)
1f3ef581 1578{
a52eab48 1579 if (!record_btrace_generating_corefile
222312d3 1580 && record_is_replaying (regcache->ptid ()))
1f3ef581
MM
1581 return;
1582
b6a8c27b 1583 this->beneath ()->prepare_to_store (regcache);
1f3ef581
MM
1584}
1585
0b722aec
MM
1586/* The branch trace frame cache. */
1587
1588struct btrace_frame_cache
1589{
1590 /* The thread. */
1591 struct thread_info *tp;
1592
1593 /* The frame info. */
1594 struct frame_info *frame;
1595
1596 /* The branch trace function segment. */
1597 const struct btrace_function *bfun;
1598};
1599
1600/* A struct btrace_frame_cache hash table indexed by NEXT. */
1601
1602static htab_t bfcache;
1603
1604/* hash_f for htab_create_alloc of bfcache. */
1605
1606static hashval_t
1607bfcache_hash (const void *arg)
1608{
19ba03f4
SM
1609 const struct btrace_frame_cache *cache
1610 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1611
1612 return htab_hash_pointer (cache->frame);
1613}
1614
1615/* eq_f for htab_create_alloc of bfcache. */
1616
1617static int
1618bfcache_eq (const void *arg1, const void *arg2)
1619{
19ba03f4
SM
1620 const struct btrace_frame_cache *cache1
1621 = (const struct btrace_frame_cache *) arg1;
1622 const struct btrace_frame_cache *cache2
1623 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1624
1625 return cache1->frame == cache2->frame;
1626}
1627
1628/* Create a new btrace frame cache. */
1629
1630static struct btrace_frame_cache *
1631bfcache_new (struct frame_info *frame)
1632{
1633 struct btrace_frame_cache *cache;
1634 void **slot;
1635
1636 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1637 cache->frame = frame;
1638
1639 slot = htab_find_slot (bfcache, cache, INSERT);
1640 gdb_assert (*slot == NULL);
1641 *slot = cache;
1642
1643 return cache;
1644}
1645
1646/* Extract the branch trace function from a branch trace frame. */
1647
1648static const struct btrace_function *
1649btrace_get_frame_function (struct frame_info *frame)
1650{
1651 const struct btrace_frame_cache *cache;
0b722aec
MM
1652 struct btrace_frame_cache pattern;
1653 void **slot;
1654
1655 pattern.frame = frame;
1656
1657 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1658 if (slot == NULL)
1659 return NULL;
1660
19ba03f4 1661 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1662 return cache->bfun;
1663}
1664
cecac1ab
MM
1665/* Implement stop_reason method for record_btrace_frame_unwind. */
1666
1667static enum unwind_stop_reason
1668record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1669 void **this_cache)
1670{
0b722aec
MM
1671 const struct btrace_frame_cache *cache;
1672 const struct btrace_function *bfun;
1673
19ba03f4 1674 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1675 bfun = cache->bfun;
1676 gdb_assert (bfun != NULL);
1677
42bfe59e 1678 if (bfun->up == 0)
0b722aec
MM
1679 return UNWIND_UNAVAILABLE;
1680
1681 return UNWIND_NO_REASON;
cecac1ab
MM
1682}
1683
1684/* Implement this_id method for record_btrace_frame_unwind. */
1685
1686static void
1687record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1688 struct frame_id *this_id)
1689{
0b722aec
MM
1690 const struct btrace_frame_cache *cache;
1691 const struct btrace_function *bfun;
4aeb0dfc 1692 struct btrace_call_iterator it;
0b722aec
MM
1693 CORE_ADDR code, special;
1694
19ba03f4 1695 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1696
1697 bfun = cache->bfun;
1698 gdb_assert (bfun != NULL);
1699
4aeb0dfc
TW
1700 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1701 bfun = btrace_call_get (&it);
0b722aec
MM
1702
1703 code = get_frame_func (this_frame);
1704 special = bfun->number;
1705
1706 *this_id = frame_id_build_unavailable_stack_special (code, special);
1707
1708 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1709 btrace_get_bfun_name (cache->bfun),
1710 core_addr_to_string_nz (this_id->code_addr),
1711 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1712}
1713
1714/* Implement prev_register method for record_btrace_frame_unwind. */
1715
1716static struct value *
1717record_btrace_frame_prev_register (struct frame_info *this_frame,
1718 void **this_cache,
1719 int regnum)
1720{
0b722aec
MM
1721 const struct btrace_frame_cache *cache;
1722 const struct btrace_function *bfun, *caller;
42bfe59e 1723 struct btrace_call_iterator it;
0b722aec
MM
1724 struct gdbarch *gdbarch;
1725 CORE_ADDR pc;
1726 int pcreg;
1727
1728 gdbarch = get_frame_arch (this_frame);
1729 pcreg = gdbarch_pc_regnum (gdbarch);
1730 if (pcreg < 0 || regnum != pcreg)
1731 throw_error (NOT_AVAILABLE_ERROR,
1732 _("Registers are not available in btrace record history"));
1733
19ba03f4 1734 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1735 bfun = cache->bfun;
1736 gdb_assert (bfun != NULL);
1737
42bfe59e 1738 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
0b722aec
MM
1739 throw_error (NOT_AVAILABLE_ERROR,
1740 _("No caller in btrace record history"));
1741
42bfe59e
TW
1742 caller = btrace_call_get (&it);
1743
0b722aec 1744 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
0860c437 1745 pc = caller->insn.front ().pc;
0b722aec
MM
1746 else
1747 {
0860c437 1748 pc = caller->insn.back ().pc;
0b722aec
MM
1749 pc += gdb_insn_length (gdbarch, pc);
1750 }
1751
1752 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1753 btrace_get_bfun_name (bfun), bfun->level,
1754 core_addr_to_string_nz (pc));
1755
1756 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1757}
1758
1759/* Implement sniffer method for record_btrace_frame_unwind. */
1760
1761static int
1762record_btrace_frame_sniffer (const struct frame_unwind *self,
1763 struct frame_info *this_frame,
1764 void **this_cache)
1765{
0b722aec
MM
1766 const struct btrace_function *bfun;
1767 struct btrace_frame_cache *cache;
cecac1ab 1768 struct thread_info *tp;
0b722aec 1769 struct frame_info *next;
cecac1ab
MM
1770
1771 /* THIS_FRAME does not contain a reference to its thread. */
00431a78 1772 tp = inferior_thread ();
cecac1ab 1773
0b722aec
MM
1774 bfun = NULL;
1775 next = get_next_frame (this_frame);
1776 if (next == NULL)
1777 {
1778 const struct btrace_insn_iterator *replay;
1779
1780 replay = tp->btrace.replay;
1781 if (replay != NULL)
08c3f6d2 1782 bfun = &replay->btinfo->functions[replay->call_index];
0b722aec
MM
1783 }
1784 else
1785 {
1786 const struct btrace_function *callee;
42bfe59e 1787 struct btrace_call_iterator it;
0b722aec
MM
1788
1789 callee = btrace_get_frame_function (next);
42bfe59e
TW
1790 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1791 return 0;
1792
1793 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1794 return 0;
1795
1796 bfun = btrace_call_get (&it);
0b722aec
MM
1797 }
1798
1799 if (bfun == NULL)
1800 return 0;
1801
1802 DEBUG ("[frame] sniffed frame for %s on level %d",
1803 btrace_get_bfun_name (bfun), bfun->level);
1804
1805 /* This is our frame. Initialize the frame cache. */
1806 cache = bfcache_new (this_frame);
1807 cache->tp = tp;
1808 cache->bfun = bfun;
1809
1810 *this_cache = cache;
1811 return 1;
1812}
1813
1814/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1815
1816static int
1817record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1818 struct frame_info *this_frame,
1819 void **this_cache)
1820{
1821 const struct btrace_function *bfun, *callee;
1822 struct btrace_frame_cache *cache;
42bfe59e 1823 struct btrace_call_iterator it;
0b722aec 1824 struct frame_info *next;
42bfe59e 1825 struct thread_info *tinfo;
0b722aec
MM
1826
1827 next = get_next_frame (this_frame);
1828 if (next == NULL)
1829 return 0;
1830
1831 callee = btrace_get_frame_function (next);
1832 if (callee == NULL)
1833 return 0;
1834
1835 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1836 return 0;
1837
00431a78 1838 tinfo = inferior_thread ();
42bfe59e 1839 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
0b722aec
MM
1840 return 0;
1841
42bfe59e
TW
1842 bfun = btrace_call_get (&it);
1843
0b722aec
MM
1844 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1845 btrace_get_bfun_name (bfun), bfun->level);
1846
1847 /* This is our frame. Initialize the frame cache. */
1848 cache = bfcache_new (this_frame);
42bfe59e 1849 cache->tp = tinfo;
0b722aec
MM
1850 cache->bfun = bfun;
1851
1852 *this_cache = cache;
1853 return 1;
1854}
1855
1856static void
1857record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1858{
1859 struct btrace_frame_cache *cache;
1860 void **slot;
1861
19ba03f4 1862 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1863
1864 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1865 gdb_assert (slot != NULL);
1866
1867 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1868}
1869
1870/* btrace recording does not store previous memory content, neither the stack
1871 frames content. Any unwinding would return errorneous results as the stack
1872 contents no longer matches the changed PC value restored from history.
1873 Therefore this unwinder reports any possibly unwound registers as
1874 <unavailable>. */
1875
0b722aec 1876const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1877{
1878 NORMAL_FRAME,
1879 record_btrace_frame_unwind_stop_reason,
1880 record_btrace_frame_this_id,
1881 record_btrace_frame_prev_register,
1882 NULL,
0b722aec
MM
1883 record_btrace_frame_sniffer,
1884 record_btrace_frame_dealloc_cache
1885};
1886
1887const struct frame_unwind record_btrace_tailcall_frame_unwind =
1888{
1889 TAILCALL_FRAME,
1890 record_btrace_frame_unwind_stop_reason,
1891 record_btrace_frame_this_id,
1892 record_btrace_frame_prev_register,
1893 NULL,
1894 record_btrace_tailcall_frame_sniffer,
1895 record_btrace_frame_dealloc_cache
cecac1ab 1896};
b2f4cfde 1897
f6ac5f3d 1898/* Implement the get_unwinder method. */
ac01945b 1899
f6ac5f3d
PA
1900const struct frame_unwind *
1901record_btrace_target::get_unwinder ()
ac01945b
TT
1902{
1903 return &record_btrace_frame_unwind;
1904}
1905
f6ac5f3d 1906/* Implement the get_tailcall_unwinder method. */
ac01945b 1907
f6ac5f3d
PA
1908const struct frame_unwind *
1909record_btrace_target::get_tailcall_unwinder ()
ac01945b
TT
1910{
1911 return &record_btrace_tailcall_frame_unwind;
1912}
1913
987e68b1
MM
1914/* Return a human-readable string for FLAG. */
1915
1916static const char *
1917btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1918{
1919 switch (flag)
1920 {
1921 case BTHR_STEP:
1922 return "step";
1923
1924 case BTHR_RSTEP:
1925 return "reverse-step";
1926
1927 case BTHR_CONT:
1928 return "cont";
1929
1930 case BTHR_RCONT:
1931 return "reverse-cont";
1932
1933 case BTHR_STOP:
1934 return "stop";
1935 }
1936
1937 return "<invalid>";
1938}
1939
52834460
MM
1940/* Indicate that TP should be resumed according to FLAG. */
1941
1942static void
1943record_btrace_resume_thread (struct thread_info *tp,
1944 enum btrace_thread_flag flag)
1945{
1946 struct btrace_thread_info *btinfo;
1947
43792cf0 1948 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1 1949 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
52834460
MM
1950
1951 btinfo = &tp->btrace;
1952
52834460 1953 /* Fetch the latest branch trace. */
4a4495d6 1954 btrace_fetch (tp, record_btrace_get_cpu ());
52834460 1955
0ca912df
MM
1956 /* A resume request overwrites a preceding resume or stop request. */
1957 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1958 btinfo->flags |= flag;
1959}
1960
ec71cc2f
MM
1961/* Get the current frame for TP. */
1962
79b8d3b0
TT
1963static struct frame_id
1964get_thread_current_frame_id (struct thread_info *tp)
ec71cc2f 1965{
79b8d3b0 1966 struct frame_id id;
ec71cc2f
MM
1967 int executing;
1968
00431a78
PA
1969 /* Set current thread, which is implicitly used by
1970 get_current_frame. */
1971 scoped_restore_current_thread restore_thread;
1972
1973 switch_to_thread (tp);
ec71cc2f
MM
1974
1975 /* Clear the executing flag to allow changes to the current frame.
1976 We are not actually running, yet. We just started a reverse execution
1977 command or a record goto command.
1978 For the latter, EXECUTING is false and this has no effect.
f6ac5f3d 1979 For the former, EXECUTING is true and we're in wait, about to
ec71cc2f
MM
1980 move the thread. Since we need to recompute the stack, we temporarily
1981 set EXECUTING to flase. */
00431a78
PA
1982 executing = tp->executing;
1983 set_executing (inferior_ptid, false);
ec71cc2f 1984
79b8d3b0 1985 id = null_frame_id;
ec71cc2f
MM
1986 TRY
1987 {
79b8d3b0 1988 id = get_frame_id (get_current_frame ());
ec71cc2f
MM
1989 }
1990 CATCH (except, RETURN_MASK_ALL)
1991 {
1992 /* Restore the previous execution state. */
1993 set_executing (inferior_ptid, executing);
1994
ec71cc2f
MM
1995 throw_exception (except);
1996 }
1997 END_CATCH
1998
1999 /* Restore the previous execution state. */
2000 set_executing (inferior_ptid, executing);
2001
79b8d3b0 2002 return id;
ec71cc2f
MM
2003}
2004
52834460
MM
2005/* Start replaying a thread. */
2006
2007static struct btrace_insn_iterator *
2008record_btrace_start_replaying (struct thread_info *tp)
2009{
52834460
MM
2010 struct btrace_insn_iterator *replay;
2011 struct btrace_thread_info *btinfo;
52834460
MM
2012
2013 btinfo = &tp->btrace;
2014 replay = NULL;
2015
2016 /* We can't start replaying without trace. */
b54b03bd 2017 if (btinfo->functions.empty ())
52834460
MM
2018 return NULL;
2019
52834460
MM
2020 /* GDB stores the current frame_id when stepping in order to detects steps
2021 into subroutines.
2022 Since frames are computed differently when we're replaying, we need to
2023 recompute those stored frames and fix them up so we can still detect
2024 subroutines after we started replaying. */
492d29ea 2025 TRY
52834460 2026 {
52834460
MM
2027 struct frame_id frame_id;
2028 int upd_step_frame_id, upd_step_stack_frame_id;
2029
2030 /* The current frame without replaying - computed via normal unwind. */
79b8d3b0 2031 frame_id = get_thread_current_frame_id (tp);
52834460
MM
2032
2033 /* Check if we need to update any stepping-related frame id's. */
2034 upd_step_frame_id = frame_id_eq (frame_id,
2035 tp->control.step_frame_id);
2036 upd_step_stack_frame_id = frame_id_eq (frame_id,
2037 tp->control.step_stack_frame_id);
2038
2039 /* We start replaying at the end of the branch trace. This corresponds
2040 to the current instruction. */
8d749320 2041 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
2042 btrace_insn_end (replay, btinfo);
2043
31fd9caa
MM
2044 /* Skip gaps at the end of the trace. */
2045 while (btrace_insn_get (replay) == NULL)
2046 {
2047 unsigned int steps;
2048
2049 steps = btrace_insn_prev (replay, 1);
2050 if (steps == 0)
2051 error (_("No trace."));
2052 }
2053
52834460
MM
2054 /* We're not replaying, yet. */
2055 gdb_assert (btinfo->replay == NULL);
2056 btinfo->replay = replay;
2057
2058 /* Make sure we're not using any stale registers. */
00431a78 2059 registers_changed_thread (tp);
52834460
MM
2060
2061 /* The current frame with replaying - computed via btrace unwind. */
79b8d3b0 2062 frame_id = get_thread_current_frame_id (tp);
52834460
MM
2063
2064 /* Replace stepping related frames where necessary. */
2065 if (upd_step_frame_id)
2066 tp->control.step_frame_id = frame_id;
2067 if (upd_step_stack_frame_id)
2068 tp->control.step_stack_frame_id = frame_id;
2069 }
492d29ea 2070 CATCH (except, RETURN_MASK_ALL)
52834460
MM
2071 {
2072 xfree (btinfo->replay);
2073 btinfo->replay = NULL;
2074
00431a78 2075 registers_changed_thread (tp);
52834460
MM
2076
2077 throw_exception (except);
2078 }
492d29ea 2079 END_CATCH
52834460
MM
2080
2081 return replay;
2082}
2083
2084/* Stop replaying a thread. */
2085
2086static void
2087record_btrace_stop_replaying (struct thread_info *tp)
2088{
2089 struct btrace_thread_info *btinfo;
2090
2091 btinfo = &tp->btrace;
2092
2093 xfree (btinfo->replay);
2094 btinfo->replay = NULL;
2095
2096 /* Make sure we're not leaving any stale registers. */
00431a78 2097 registers_changed_thread (tp);
52834460
MM
2098}
2099
e3cfc1c7
MM
2100/* Stop replaying TP if it is at the end of its execution history. */
2101
2102static void
2103record_btrace_stop_replaying_at_end (struct thread_info *tp)
2104{
2105 struct btrace_insn_iterator *replay, end;
2106 struct btrace_thread_info *btinfo;
2107
2108 btinfo = &tp->btrace;
2109 replay = btinfo->replay;
2110
2111 if (replay == NULL)
2112 return;
2113
2114 btrace_insn_end (&end, btinfo);
2115
2116 if (btrace_insn_cmp (replay, &end) == 0)
2117 record_btrace_stop_replaying (tp);
2118}
2119
f6ac5f3d 2120/* The resume method of target record-btrace. */
b2f4cfde 2121
f6ac5f3d
PA
2122void
2123record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
b2f4cfde 2124{
d2939ba2 2125 enum btrace_thread_flag flag, cflag;
52834460 2126
987e68b1 2127 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
f6ac5f3d 2128 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
987e68b1 2129 step ? "step" : "cont");
52834460 2130
0ca912df
MM
2131 /* Store the execution direction of the last resume.
2132
f6ac5f3d 2133 If there is more than one resume call, we have to rely on infrun
0ca912df 2134 to not change the execution direction in-between. */
f6ac5f3d 2135 record_btrace_resume_exec_dir = ::execution_direction;
70ad5bff 2136
0ca912df 2137 /* As long as we're not replaying, just forward the request.
52834460 2138
0ca912df
MM
2139 For non-stop targets this means that no thread is replaying. In order to
2140 make progress, we may need to explicitly move replaying threads to the end
2141 of their execution history. */
f6ac5f3d
PA
2142 if ((::execution_direction != EXEC_REVERSE)
2143 && !record_is_replaying (minus_one_ptid))
b2f4cfde 2144 {
b6a8c27b 2145 this->beneath ()->resume (ptid, step, signal);
04c4fe8c 2146 return;
b2f4cfde
MM
2147 }
2148
52834460 2149 /* Compute the btrace thread flag for the requested move. */
f6ac5f3d 2150 if (::execution_direction == EXEC_REVERSE)
d2939ba2
MM
2151 {
2152 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2153 cflag = BTHR_RCONT;
2154 }
52834460 2155 else
d2939ba2
MM
2156 {
2157 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2158 cflag = BTHR_CONT;
2159 }
52834460 2160
52834460 2161 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2162 record_btrace_wait below.
2163
2164 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2165 if (!target_is_non_stop_p ())
2166 {
26a57c92 2167 gdb_assert (inferior_ptid.matches (ptid));
d2939ba2 2168
08036331
PA
2169 for (thread_info *tp : all_non_exited_threads (ptid))
2170 {
2171 if (tp->ptid.matches (inferior_ptid))
2172 record_btrace_resume_thread (tp, flag);
2173 else
2174 record_btrace_resume_thread (tp, cflag);
2175 }
d2939ba2
MM
2176 }
2177 else
2178 {
08036331
PA
2179 for (thread_info *tp : all_non_exited_threads (ptid))
2180 record_btrace_resume_thread (tp, flag);
d2939ba2 2181 }
70ad5bff
MM
2182
2183 /* Async support. */
2184 if (target_can_async_p ())
2185 {
6a3753b3 2186 target_async (1);
70ad5bff
MM
2187 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2188 }
52834460
MM
2189}
2190
f6ac5f3d 2191/* The commit_resume method of target record-btrace. */
85ad3aaf 2192
f6ac5f3d
PA
2193void
2194record_btrace_target::commit_resume ()
85ad3aaf 2195{
f6ac5f3d
PA
2196 if ((::execution_direction != EXEC_REVERSE)
2197 && !record_is_replaying (minus_one_ptid))
b6a8c27b 2198 beneath ()->commit_resume ();
85ad3aaf
PA
2199}
2200
987e68b1
MM
2201/* Cancel resuming TP. */
2202
2203static void
2204record_btrace_cancel_resume (struct thread_info *tp)
2205{
2206 enum btrace_thread_flag flags;
2207
2208 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2209 if (flags == 0)
2210 return;
2211
43792cf0
PA
2212 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2213 print_thread_id (tp),
987e68b1
MM
2214 target_pid_to_str (tp->ptid), flags,
2215 btrace_thread_flag_to_str (flags));
2216
2217 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2218 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2219}
2220
2221/* Return a target_waitstatus indicating that we ran out of history. */
2222
2223static struct target_waitstatus
2224btrace_step_no_history (void)
2225{
2226 struct target_waitstatus status;
2227
2228 status.kind = TARGET_WAITKIND_NO_HISTORY;
2229
2230 return status;
2231}
2232
2233/* Return a target_waitstatus indicating that a step finished. */
2234
2235static struct target_waitstatus
2236btrace_step_stopped (void)
2237{
2238 struct target_waitstatus status;
2239
2240 status.kind = TARGET_WAITKIND_STOPPED;
2241 status.value.sig = GDB_SIGNAL_TRAP;
2242
2243 return status;
2244}
2245
6e4879f0
MM
2246/* Return a target_waitstatus indicating that a thread was stopped as
2247 requested. */
2248
2249static struct target_waitstatus
2250btrace_step_stopped_on_request (void)
2251{
2252 struct target_waitstatus status;
2253
2254 status.kind = TARGET_WAITKIND_STOPPED;
2255 status.value.sig = GDB_SIGNAL_0;
2256
2257 return status;
2258}
2259
d825d248
MM
2260/* Return a target_waitstatus indicating a spurious stop. */
2261
2262static struct target_waitstatus
2263btrace_step_spurious (void)
2264{
2265 struct target_waitstatus status;
2266
2267 status.kind = TARGET_WAITKIND_SPURIOUS;
2268
2269 return status;
2270}
2271
e3cfc1c7
MM
2272/* Return a target_waitstatus indicating that the thread was not resumed. */
2273
2274static struct target_waitstatus
2275btrace_step_no_resumed (void)
2276{
2277 struct target_waitstatus status;
2278
2279 status.kind = TARGET_WAITKIND_NO_RESUMED;
2280
2281 return status;
2282}
2283
2284/* Return a target_waitstatus indicating that we should wait again. */
2285
2286static struct target_waitstatus
2287btrace_step_again (void)
2288{
2289 struct target_waitstatus status;
2290
2291 status.kind = TARGET_WAITKIND_IGNORE;
2292
2293 return status;
2294}
2295
52834460
MM
2296/* Clear the record histories. */
2297
2298static void
2299record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2300{
2301 xfree (btinfo->insn_history);
2302 xfree (btinfo->call_history);
2303
2304 btinfo->insn_history = NULL;
2305 btinfo->call_history = NULL;
2306}
2307
3c615f99
MM
2308/* Check whether TP's current replay position is at a breakpoint. */
2309
2310static int
2311record_btrace_replay_at_breakpoint (struct thread_info *tp)
2312{
2313 struct btrace_insn_iterator *replay;
2314 struct btrace_thread_info *btinfo;
2315 const struct btrace_insn *insn;
3c615f99
MM
2316
2317 btinfo = &tp->btrace;
2318 replay = btinfo->replay;
2319
2320 if (replay == NULL)
2321 return 0;
2322
2323 insn = btrace_insn_get (replay);
2324 if (insn == NULL)
2325 return 0;
2326
00431a78 2327 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
3c615f99
MM
2328 &btinfo->stop_reason);
2329}
2330
d825d248 2331/* Step one instruction in forward direction. */
52834460
MM
2332
2333static struct target_waitstatus
d825d248 2334record_btrace_single_step_forward (struct thread_info *tp)
52834460 2335{
b61ce85c 2336 struct btrace_insn_iterator *replay, end, start;
52834460 2337 struct btrace_thread_info *btinfo;
52834460 2338
d825d248
MM
2339 btinfo = &tp->btrace;
2340 replay = btinfo->replay;
2341
2342 /* We're done if we're not replaying. */
2343 if (replay == NULL)
2344 return btrace_step_no_history ();
2345
011c71b6
MM
2346 /* Check if we're stepping a breakpoint. */
2347 if (record_btrace_replay_at_breakpoint (tp))
2348 return btrace_step_stopped ();
2349
b61ce85c
MM
2350 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2351 jump back to the instruction at which we started. */
2352 start = *replay;
d825d248
MM
2353 do
2354 {
2355 unsigned int steps;
2356
e3cfc1c7
MM
2357 /* We will bail out here if we continue stepping after reaching the end
2358 of the execution history. */
d825d248
MM
2359 steps = btrace_insn_next (replay, 1);
2360 if (steps == 0)
b61ce85c
MM
2361 {
2362 *replay = start;
2363 return btrace_step_no_history ();
2364 }
d825d248
MM
2365 }
2366 while (btrace_insn_get (replay) == NULL);
2367
2368 /* Determine the end of the instruction trace. */
2369 btrace_insn_end (&end, btinfo);
2370
e3cfc1c7
MM
2371 /* The execution trace contains (and ends with) the current instruction.
2372 This instruction has not been executed, yet, so the trace really ends
2373 one instruction earlier. */
d825d248 2374 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2375 return btrace_step_no_history ();
d825d248
MM
2376
2377 return btrace_step_spurious ();
2378}
2379
2380/* Step one instruction in backward direction. */
2381
2382static struct target_waitstatus
2383record_btrace_single_step_backward (struct thread_info *tp)
2384{
b61ce85c 2385 struct btrace_insn_iterator *replay, start;
d825d248 2386 struct btrace_thread_info *btinfo;
e59fa00f 2387
52834460
MM
2388 btinfo = &tp->btrace;
2389 replay = btinfo->replay;
2390
d825d248
MM
2391 /* Start replaying if we're not already doing so. */
2392 if (replay == NULL)
2393 replay = record_btrace_start_replaying (tp);
2394
2395 /* If we can't step any further, we reached the end of the history.
b61ce85c
MM
2396 Skip gaps during replay. If we end up at a gap (at the beginning of
2397 the trace), jump back to the instruction at which we started. */
2398 start = *replay;
d825d248
MM
2399 do
2400 {
2401 unsigned int steps;
2402
2403 steps = btrace_insn_prev (replay, 1);
2404 if (steps == 0)
b61ce85c
MM
2405 {
2406 *replay = start;
2407 return btrace_step_no_history ();
2408 }
d825d248
MM
2409 }
2410 while (btrace_insn_get (replay) == NULL);
2411
011c71b6
MM
2412 /* Check if we're stepping a breakpoint.
2413
2414 For reverse-stepping, this check is after the step. There is logic in
2415 infrun.c that handles reverse-stepping separately. See, for example,
2416 proceed and adjust_pc_after_break.
2417
2418 This code assumes that for reverse-stepping, PC points to the last
2419 de-executed instruction, whereas for forward-stepping PC points to the
2420 next to-be-executed instruction. */
2421 if (record_btrace_replay_at_breakpoint (tp))
2422 return btrace_step_stopped ();
2423
d825d248
MM
2424 return btrace_step_spurious ();
2425}
2426
2427/* Step a single thread. */
2428
2429static struct target_waitstatus
2430record_btrace_step_thread (struct thread_info *tp)
2431{
2432 struct btrace_thread_info *btinfo;
2433 struct target_waitstatus status;
2434 enum btrace_thread_flag flags;
2435
2436 btinfo = &tp->btrace;
2437
6e4879f0
MM
2438 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2439 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2440
43792cf0 2441 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1
MM
2442 target_pid_to_str (tp->ptid), flags,
2443 btrace_thread_flag_to_str (flags));
52834460 2444
6e4879f0
MM
2445 /* We can't step without an execution history. */
2446 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2447 return btrace_step_no_history ();
2448
52834460
MM
2449 switch (flags)
2450 {
2451 default:
2452 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2453
6e4879f0
MM
2454 case BTHR_STOP:
2455 return btrace_step_stopped_on_request ();
2456
52834460 2457 case BTHR_STEP:
d825d248
MM
2458 status = record_btrace_single_step_forward (tp);
2459 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2460 break;
52834460
MM
2461
2462 return btrace_step_stopped ();
2463
2464 case BTHR_RSTEP:
d825d248
MM
2465 status = record_btrace_single_step_backward (tp);
2466 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2467 break;
52834460
MM
2468
2469 return btrace_step_stopped ();
2470
2471 case BTHR_CONT:
e3cfc1c7
MM
2472 status = record_btrace_single_step_forward (tp);
2473 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2474 break;
52834460 2475
e3cfc1c7
MM
2476 btinfo->flags |= flags;
2477 return btrace_step_again ();
52834460
MM
2478
2479 case BTHR_RCONT:
e3cfc1c7
MM
2480 status = record_btrace_single_step_backward (tp);
2481 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2482 break;
52834460 2483
e3cfc1c7
MM
2484 btinfo->flags |= flags;
2485 return btrace_step_again ();
2486 }
d825d248 2487
f6ac5f3d 2488 /* We keep threads moving at the end of their execution history. The wait
e3cfc1c7
MM
2489 method will stop the thread for whom the event is reported. */
2490 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2491 btinfo->flags |= flags;
52834460 2492
e3cfc1c7 2493 return status;
b2f4cfde
MM
2494}
2495
a6b5be76
MM
2496/* Announce further events if necessary. */
2497
2498static void
53127008
SM
2499record_btrace_maybe_mark_async_event
2500 (const std::vector<thread_info *> &moving,
2501 const std::vector<thread_info *> &no_history)
a6b5be76 2502{
53127008
SM
2503 bool more_moving = !moving.empty ();
2504 bool more_no_history = !no_history.empty ();;
a6b5be76
MM
2505
2506 if (!more_moving && !more_no_history)
2507 return;
2508
2509 if (more_moving)
2510 DEBUG ("movers pending");
2511
2512 if (more_no_history)
2513 DEBUG ("no-history pending");
2514
2515 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2516}
2517
f6ac5f3d 2518/* The wait method of target record-btrace. */
b2f4cfde 2519
f6ac5f3d
PA
2520ptid_t
2521record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2522 int options)
b2f4cfde 2523{
53127008
SM
2524 std::vector<thread_info *> moving;
2525 std::vector<thread_info *> no_history;
52834460
MM
2526
2527 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2528
b2f4cfde 2529 /* As long as we're not replaying, just forward the request. */
f6ac5f3d
PA
2530 if ((::execution_direction != EXEC_REVERSE)
2531 && !record_is_replaying (minus_one_ptid))
b2f4cfde 2532 {
b6a8c27b 2533 return this->beneath ()->wait (ptid, status, options);
b2f4cfde
MM
2534 }
2535
e3cfc1c7 2536 /* Keep a work list of moving threads. */
08036331
PA
2537 for (thread_info *tp : all_non_exited_threads (ptid))
2538 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2539 moving.push_back (tp);
e3cfc1c7 2540
53127008 2541 if (moving.empty ())
52834460 2542 {
e3cfc1c7 2543 *status = btrace_step_no_resumed ();
52834460 2544
e3cfc1c7 2545 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
23fdd69e 2546 target_waitstatus_to_string (status).c_str ());
e3cfc1c7 2547
e3cfc1c7 2548 return null_ptid;
52834460
MM
2549 }
2550
e3cfc1c7
MM
2551 /* Step moving threads one by one, one step each, until either one thread
2552 reports an event or we run out of threads to step.
2553
2554 When stepping more than one thread, chances are that some threads reach
2555 the end of their execution history earlier than others. If we reported
2556 this immediately, all-stop on top of non-stop would stop all threads and
2557 resume the same threads next time. And we would report the same thread
2558 having reached the end of its execution history again.
2559
2560 In the worst case, this would starve the other threads. But even if other
2561 threads would be allowed to make progress, this would result in far too
2562 many intermediate stops.
2563
2564 We therefore delay the reporting of "no execution history" until we have
2565 nothing else to report. By this time, all threads should have moved to
2566 either the beginning or the end of their execution history. There will
2567 be a single user-visible stop. */
53127008
SM
2568 struct thread_info *eventing = NULL;
2569 while ((eventing == NULL) && !moving.empty ())
e3cfc1c7 2570 {
53127008 2571 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
e3cfc1c7 2572 {
53127008
SM
2573 thread_info *tp = moving[ix];
2574
e3cfc1c7
MM
2575 *status = record_btrace_step_thread (tp);
2576
2577 switch (status->kind)
2578 {
2579 case TARGET_WAITKIND_IGNORE:
2580 ix++;
2581 break;
2582
2583 case TARGET_WAITKIND_NO_HISTORY:
53127008 2584 no_history.push_back (ordered_remove (moving, ix));
e3cfc1c7
MM
2585 break;
2586
2587 default:
53127008 2588 eventing = unordered_remove (moving, ix);
e3cfc1c7
MM
2589 break;
2590 }
2591 }
2592 }
2593
2594 if (eventing == NULL)
2595 {
2596 /* We started with at least one moving thread. This thread must have
2597 either stopped or reached the end of its execution history.
2598
2599 In the former case, EVENTING must not be NULL.
2600 In the latter case, NO_HISTORY must not be empty. */
53127008 2601 gdb_assert (!no_history.empty ());
e3cfc1c7
MM
2602
2603 /* We kept threads moving at the end of their execution history. Stop
2604 EVENTING now that we are going to report its stop. */
53127008 2605 eventing = unordered_remove (no_history, 0);
e3cfc1c7
MM
2606 eventing->btrace.flags &= ~BTHR_MOVE;
2607
2608 *status = btrace_step_no_history ();
2609 }
2610
2611 gdb_assert (eventing != NULL);
2612
2613 /* We kept threads replaying at the end of their execution history. Stop
2614 replaying EVENTING now that we are going to report its stop. */
2615 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2616
2617 /* Stop all other threads. */
5953356c 2618 if (!target_is_non_stop_p ())
53127008 2619 {
08036331 2620 for (thread_info *tp : all_non_exited_threads ())
53127008
SM
2621 record_btrace_cancel_resume (tp);
2622 }
52834460 2623
a6b5be76
MM
2624 /* In async mode, we need to announce further events. */
2625 if (target_is_async_p ())
2626 record_btrace_maybe_mark_async_event (moving, no_history);
2627
52834460 2628 /* Start record histories anew from the current position. */
e3cfc1c7 2629 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2630
2631 /* We moved the replay position but did not update registers. */
00431a78 2632 registers_changed_thread (eventing);
e3cfc1c7 2633
43792cf0
PA
2634 DEBUG ("wait ended by thread %s (%s): %s",
2635 print_thread_id (eventing),
e3cfc1c7 2636 target_pid_to_str (eventing->ptid),
23fdd69e 2637 target_waitstatus_to_string (status).c_str ());
52834460 2638
e3cfc1c7 2639 return eventing->ptid;
52834460
MM
2640}
2641
f6ac5f3d 2642/* The stop method of target record-btrace. */
6e4879f0 2643
f6ac5f3d
PA
2644void
2645record_btrace_target::stop (ptid_t ptid)
6e4879f0
MM
2646{
2647 DEBUG ("stop %s", target_pid_to_str (ptid));
2648
2649 /* As long as we're not replaying, just forward the request. */
f6ac5f3d
PA
2650 if ((::execution_direction != EXEC_REVERSE)
2651 && !record_is_replaying (minus_one_ptid))
6e4879f0 2652 {
b6a8c27b 2653 this->beneath ()->stop (ptid);
6e4879f0
MM
2654 }
2655 else
2656 {
08036331
PA
2657 for (thread_info *tp : all_non_exited_threads (ptid))
2658 {
2659 tp->btrace.flags &= ~BTHR_MOVE;
2660 tp->btrace.flags |= BTHR_STOP;
2661 }
6e4879f0
MM
2662 }
2663 }
2664
f6ac5f3d 2665/* The can_execute_reverse method of target record-btrace. */
52834460 2666
57810aa7 2667bool
f6ac5f3d 2668record_btrace_target::can_execute_reverse ()
52834460 2669{
57810aa7 2670 return true;
52834460
MM
2671}
2672
f6ac5f3d 2673/* The stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2674
57810aa7 2675bool
f6ac5f3d 2676record_btrace_target::stopped_by_sw_breakpoint ()
52834460 2677{
f6ac5f3d 2678 if (record_is_replaying (minus_one_ptid))
9e8915c6
PA
2679 {
2680 struct thread_info *tp = inferior_thread ();
2681
2682 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2683 }
2684
b6a8c27b 2685 return this->beneath ()->stopped_by_sw_breakpoint ();
9e8915c6
PA
2686}
2687
f6ac5f3d 2688/* The supports_stopped_by_sw_breakpoint method of target
9e8915c6
PA
2689 record-btrace. */
2690
57810aa7 2691bool
f6ac5f3d 2692record_btrace_target::supports_stopped_by_sw_breakpoint ()
9e8915c6 2693{
f6ac5f3d 2694 if (record_is_replaying (minus_one_ptid))
57810aa7 2695 return true;
9e8915c6 2696
b6a8c27b 2697 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
9e8915c6
PA
2698}
2699
f6ac5f3d 2700/* The stopped_by_sw_breakpoint method of target record-btrace. */
9e8915c6 2701
57810aa7 2702bool
f6ac5f3d 2703record_btrace_target::stopped_by_hw_breakpoint ()
9e8915c6 2704{
f6ac5f3d 2705 if (record_is_replaying (minus_one_ptid))
9e8915c6
PA
2706 {
2707 struct thread_info *tp = inferior_thread ();
2708
2709 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2710 }
2711
b6a8c27b 2712 return this->beneath ()->stopped_by_hw_breakpoint ();
9e8915c6
PA
2713}
2714
f6ac5f3d 2715/* The supports_stopped_by_hw_breakpoint method of target
9e8915c6
PA
2716 record-btrace. */
2717
57810aa7 2718bool
f6ac5f3d 2719record_btrace_target::supports_stopped_by_hw_breakpoint ()
9e8915c6 2720{
f6ac5f3d 2721 if (record_is_replaying (minus_one_ptid))
57810aa7 2722 return true;
52834460 2723
b6a8c27b 2724 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
b2f4cfde
MM
2725}
2726
f6ac5f3d 2727/* The update_thread_list method of target record-btrace. */
e2887aa3 2728
f6ac5f3d
PA
2729void
2730record_btrace_target::update_thread_list ()
e2887aa3 2731{
e8032dde 2732 /* We don't add or remove threads during replay. */
f6ac5f3d 2733 if (record_is_replaying (minus_one_ptid))
e2887aa3
MM
2734 return;
2735
2736 /* Forward the request. */
b6a8c27b 2737 this->beneath ()->update_thread_list ();
e2887aa3
MM
2738}
2739
f6ac5f3d 2740/* The thread_alive method of target record-btrace. */
e2887aa3 2741
57810aa7 2742bool
f6ac5f3d 2743record_btrace_target::thread_alive (ptid_t ptid)
e2887aa3
MM
2744{
2745 /* We don't add or remove threads during replay. */
f6ac5f3d 2746 if (record_is_replaying (minus_one_ptid))
00431a78 2747 return true;
e2887aa3
MM
2748
2749 /* Forward the request. */
b6a8c27b 2750 return this->beneath ()->thread_alive (ptid);
e2887aa3
MM
2751}
2752
066ce621
MM
2753/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2754 is stopped. */
2755
2756static void
2757record_btrace_set_replay (struct thread_info *tp,
2758 const struct btrace_insn_iterator *it)
2759{
2760 struct btrace_thread_info *btinfo;
2761
2762 btinfo = &tp->btrace;
2763
a0f1b963 2764 if (it == NULL)
52834460 2765 record_btrace_stop_replaying (tp);
066ce621
MM
2766 else
2767 {
2768 if (btinfo->replay == NULL)
52834460 2769 record_btrace_start_replaying (tp);
066ce621
MM
2770 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2771 return;
2772
2773 *btinfo->replay = *it;
00431a78 2774 registers_changed_thread (tp);
066ce621
MM
2775 }
2776
52834460
MM
2777 /* Start anew from the new replay position. */
2778 record_btrace_clear_histories (btinfo);
485668e5 2779
f2ffa92b
PA
2780 inferior_thread ()->suspend.stop_pc
2781 = regcache_read_pc (get_current_regcache ());
485668e5 2782 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2783}
2784
f6ac5f3d 2785/* The goto_record_begin method of target record-btrace. */
066ce621 2786
f6ac5f3d
PA
2787void
2788record_btrace_target::goto_record_begin ()
066ce621
MM
2789{
2790 struct thread_info *tp;
2791 struct btrace_insn_iterator begin;
2792
2793 tp = require_btrace_thread ();
2794
2795 btrace_insn_begin (&begin, &tp->btrace);
b61ce85c
MM
2796
2797 /* Skip gaps at the beginning of the trace. */
2798 while (btrace_insn_get (&begin) == NULL)
2799 {
2800 unsigned int steps;
2801
2802 steps = btrace_insn_next (&begin, 1);
2803 if (steps == 0)
2804 error (_("No trace."));
2805 }
2806
066ce621 2807 record_btrace_set_replay (tp, &begin);
066ce621
MM
2808}
2809
f6ac5f3d 2810/* The goto_record_end method of target record-btrace. */
066ce621 2811
f6ac5f3d
PA
2812void
2813record_btrace_target::goto_record_end ()
066ce621
MM
2814{
2815 struct thread_info *tp;
2816
2817 tp = require_btrace_thread ();
2818
2819 record_btrace_set_replay (tp, NULL);
066ce621
MM
2820}
2821
f6ac5f3d 2822/* The goto_record method of target record-btrace. */
066ce621 2823
f6ac5f3d
PA
2824void
2825record_btrace_target::goto_record (ULONGEST insn)
066ce621
MM
2826{
2827 struct thread_info *tp;
2828 struct btrace_insn_iterator it;
2829 unsigned int number;
2830 int found;
2831
2832 number = insn;
2833
2834 /* Check for wrap-arounds. */
2835 if (number != insn)
2836 error (_("Instruction number out of range."));
2837
2838 tp = require_btrace_thread ();
2839
2840 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
69090cee
TW
2841
2842 /* Check if the instruction could not be found or is a gap. */
2843 if (found == 0 || btrace_insn_get (&it) == NULL)
066ce621
MM
2844 error (_("No such instruction."));
2845
2846 record_btrace_set_replay (tp, &it);
066ce621
MM
2847}
2848
f6ac5f3d 2849/* The record_stop_replaying method of target record-btrace. */
797094dd 2850
f6ac5f3d
PA
2851void
2852record_btrace_target::record_stop_replaying ()
797094dd 2853{
08036331 2854 for (thread_info *tp : all_non_exited_threads ())
797094dd
MM
2855 record_btrace_stop_replaying (tp);
2856}
2857
f6ac5f3d 2858/* The execution_direction target method. */
70ad5bff 2859
f6ac5f3d
PA
2860enum exec_direction_kind
2861record_btrace_target::execution_direction ()
70ad5bff
MM
2862{
2863 return record_btrace_resume_exec_dir;
2864}
2865
f6ac5f3d 2866/* The prepare_to_generate_core target method. */
aef92902 2867
f6ac5f3d
PA
2868void
2869record_btrace_target::prepare_to_generate_core ()
aef92902
MM
2870{
2871 record_btrace_generating_corefile = 1;
2872}
2873
f6ac5f3d 2874/* The done_generating_core target method. */
aef92902 2875
f6ac5f3d
PA
2876void
2877record_btrace_target::done_generating_core ()
aef92902
MM
2878{
2879 record_btrace_generating_corefile = 0;
2880}
2881
f4abbc16
MM
2882/* Start recording in BTS format. */
2883
2884static void
cdb34d4a 2885cmd_record_btrace_bts_start (const char *args, int from_tty)
f4abbc16 2886{
f4abbc16
MM
2887 if (args != NULL && *args != 0)
2888 error (_("Invalid argument."));
2889
2890 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2891
492d29ea
PA
2892 TRY
2893 {
95a6b0a1 2894 execute_command ("target record-btrace", from_tty);
492d29ea
PA
2895 }
2896 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2897 {
2898 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2899 throw_exception (exception);
2900 }
492d29ea 2901 END_CATCH
f4abbc16
MM
2902}
2903
bc504a31 2904/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2905
2906static void
cdb34d4a 2907cmd_record_btrace_pt_start (const char *args, int from_tty)
afedecd3
MM
2908{
2909 if (args != NULL && *args != 0)
2910 error (_("Invalid argument."));
2911
b20a6524 2912 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2913
492d29ea
PA
2914 TRY
2915 {
95a6b0a1 2916 execute_command ("target record-btrace", from_tty);
492d29ea
PA
2917 }
2918 CATCH (exception, RETURN_MASK_ALL)
2919 {
2920 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2921 throw_exception (exception);
2922 }
2923 END_CATCH
afedecd3
MM
2924}
2925
b20a6524
MM
2926/* Alias for "target record". */
2927
2928static void
981a3fb3 2929cmd_record_btrace_start (const char *args, int from_tty)
b20a6524
MM
2930{
2931 if (args != NULL && *args != 0)
2932 error (_("Invalid argument."));
2933
2934 record_btrace_conf.format = BTRACE_FORMAT_PT;
2935
2936 TRY
2937 {
95a6b0a1 2938 execute_command ("target record-btrace", from_tty);
b20a6524
MM
2939 }
2940 CATCH (exception, RETURN_MASK_ALL)
2941 {
2942 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2943
2944 TRY
2945 {
95a6b0a1 2946 execute_command ("target record-btrace", from_tty);
b20a6524 2947 }
b926417a 2948 CATCH (ex, RETURN_MASK_ALL)
b20a6524
MM
2949 {
2950 record_btrace_conf.format = BTRACE_FORMAT_NONE;
b926417a 2951 throw_exception (ex);
b20a6524
MM
2952 }
2953 END_CATCH
2954 }
2955 END_CATCH
2956}
2957
67b5c0c1
MM
2958/* The "set record btrace" command. */
2959
2960static void
981a3fb3 2961cmd_set_record_btrace (const char *args, int from_tty)
67b5c0c1 2962{
b85310e1
MM
2963 printf_unfiltered (_("\"set record btrace\" must be followed "
2964 "by an appropriate subcommand.\n"));
2965 help_list (set_record_btrace_cmdlist, "set record btrace ",
2966 all_commands, gdb_stdout);
67b5c0c1
MM
2967}
2968
2969/* The "show record btrace" command. */
2970
2971static void
981a3fb3 2972cmd_show_record_btrace (const char *args, int from_tty)
67b5c0c1
MM
2973{
2974 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2975}
2976
2977/* The "show record btrace replay-memory-access" command. */
2978
2979static void
2980cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2981 struct cmd_list_element *c, const char *value)
2982{
2983 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2984 replay_memory_access);
2985}
2986
4a4495d6
MM
2987/* The "set record btrace cpu none" command. */
2988
2989static void
2990cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2991{
2992 if (args != nullptr && *args != 0)
2993 error (_("Trailing junk: '%s'."), args);
2994
2995 record_btrace_cpu_state = CS_NONE;
2996}
2997
2998/* The "set record btrace cpu auto" command. */
2999
3000static void
3001cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
3002{
3003 if (args != nullptr && *args != 0)
3004 error (_("Trailing junk: '%s'."), args);
3005
3006 record_btrace_cpu_state = CS_AUTO;
3007}
3008
3009/* The "set record btrace cpu" command. */
3010
3011static void
3012cmd_set_record_btrace_cpu (const char *args, int from_tty)
3013{
3014 if (args == nullptr)
3015 args = "";
3016
3017 /* We use a hard-coded vendor string for now. */
3018 unsigned int family, model, stepping;
3019 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3020 &model, &l1, &stepping, &l2);
3021 if (matches == 3)
3022 {
3023 if (strlen (args) != l2)
3024 error (_("Trailing junk: '%s'."), args + l2);
3025 }
3026 else if (matches == 2)
3027 {
3028 if (strlen (args) != l1)
3029 error (_("Trailing junk: '%s'."), args + l1);
3030
3031 stepping = 0;
3032 }
3033 else
3034 error (_("Bad format. See \"help set record btrace cpu\"."));
3035
3036 if (USHRT_MAX < family)
3037 error (_("Cpu family too big."));
3038
3039 if (UCHAR_MAX < model)
3040 error (_("Cpu model too big."));
3041
3042 if (UCHAR_MAX < stepping)
3043 error (_("Cpu stepping too big."));
3044
3045 record_btrace_cpu.vendor = CV_INTEL;
3046 record_btrace_cpu.family = family;
3047 record_btrace_cpu.model = model;
3048 record_btrace_cpu.stepping = stepping;
3049
3050 record_btrace_cpu_state = CS_CPU;
3051}
3052
3053/* The "show record btrace cpu" command. */
3054
3055static void
3056cmd_show_record_btrace_cpu (const char *args, int from_tty)
3057{
4a4495d6
MM
3058 if (args != nullptr && *args != 0)
3059 error (_("Trailing junk: '%s'."), args);
3060
3061 switch (record_btrace_cpu_state)
3062 {
3063 case CS_AUTO:
3064 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3065 return;
3066
3067 case CS_NONE:
3068 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3069 return;
3070
3071 case CS_CPU:
3072 switch (record_btrace_cpu.vendor)
3073 {
3074 case CV_INTEL:
3075 if (record_btrace_cpu.stepping == 0)
3076 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3077 record_btrace_cpu.family,
3078 record_btrace_cpu.model);
3079 else
3080 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3081 record_btrace_cpu.family,
3082 record_btrace_cpu.model,
3083 record_btrace_cpu.stepping);
3084 return;
3085 }
3086 }
3087
3088 error (_("Internal error: bad cpu state."));
3089}
3090
3091/* The "s record btrace bts" command. */
d33501a5
MM
3092
3093static void
981a3fb3 3094cmd_set_record_btrace_bts (const char *args, int from_tty)
d33501a5
MM
3095{
3096 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 3097 "by an appropriate subcommand.\n"));
d33501a5
MM
3098 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3099 all_commands, gdb_stdout);
3100}
3101
3102/* The "show record btrace bts" command. */
3103
3104static void
981a3fb3 3105cmd_show_record_btrace_bts (const char *args, int from_tty)
d33501a5
MM
3106{
3107 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3108}
3109
b20a6524
MM
3110/* The "set record btrace pt" command. */
3111
3112static void
981a3fb3 3113cmd_set_record_btrace_pt (const char *args, int from_tty)
b20a6524
MM
3114{
3115 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3116 "by an appropriate subcommand.\n"));
3117 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3118 all_commands, gdb_stdout);
3119}
3120
3121/* The "show record btrace pt" command. */
3122
3123static void
981a3fb3 3124cmd_show_record_btrace_pt (const char *args, int from_tty)
b20a6524
MM
3125{
3126 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3127}
3128
3129/* The "record bts buffer-size" show value function. */
3130
3131static void
3132show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3133 struct cmd_list_element *c,
3134 const char *value)
3135{
3136 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3137 value);
3138}
3139
3140/* The "record pt buffer-size" show value function. */
3141
3142static void
3143show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3144 struct cmd_list_element *c,
3145 const char *value)
3146{
3147 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3148 value);
3149}
3150
afedecd3
MM
3151/* Initialize btrace commands. */
3152
3153void
3154_initialize_record_btrace (void)
3155{
f4abbc16
MM
3156 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3157 _("Start branch trace recording."), &record_btrace_cmdlist,
3158 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3159 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3160
f4abbc16
MM
3161 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3162 _("\
3163Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3164The processor stores a from/to record for each branch into a cyclic buffer.\n\
3165This format may not be available on all processors."),
3166 &record_btrace_cmdlist);
3167 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3168
b20a6524
MM
3169 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3170 _("\
bc504a31 3171Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3172This format may not be available on all processors."),
3173 &record_btrace_cmdlist);
3174 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3175
67b5c0c1
MM
3176 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3177 _("Set record options"), &set_record_btrace_cmdlist,
3178 "set record btrace ", 0, &set_record_cmdlist);
3179
3180 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3181 _("Show record options"), &show_record_btrace_cmdlist,
3182 "show record btrace ", 0, &show_record_cmdlist);
3183
3184 add_setshow_enum_cmd ("replay-memory-access", no_class,
3185 replay_memory_access_types, &replay_memory_access, _("\
3186Set what memory accesses are allowed during replay."), _("\
3187Show what memory accesses are allowed during replay."),
3188 _("Default is READ-ONLY.\n\n\
3189The btrace record target does not trace data.\n\
3190The memory therefore corresponds to the live target and not \
3191to the current replay position.\n\n\
3192When READ-ONLY, allow accesses to read-only memory during replay.\n\
3193When READ-WRITE, allow accesses to read-only and read-write memory during \
3194replay."),
3195 NULL, cmd_show_replay_memory_access,
3196 &set_record_btrace_cmdlist,
3197 &show_record_btrace_cmdlist);
3198
4a4495d6
MM
3199 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3200 _("\
3201Set the cpu to be used for trace decode.\n\n\
55063ddb
TT
3202The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3203For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
4a4495d6
MM
3204When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3205The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3206When GDB does not support that cpu, this option can be used to enable\n\
3207workarounds for a similar cpu that GDB supports.\n\n\
3208When set to \"none\", errata workarounds are disabled."),
3209 &set_record_btrace_cpu_cmdlist,
3210 _("set record btrace cpu "), 1,
3211 &set_record_btrace_cmdlist);
3212
3213 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3214Automatically determine the cpu to be used for trace decode."),
3215 &set_record_btrace_cpu_cmdlist);
3216
3217 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3218Do not enable errata workarounds for trace decode."),
3219 &set_record_btrace_cpu_cmdlist);
3220
3221 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3222Show the cpu to be used for trace decode."),
3223 &show_record_btrace_cmdlist);
3224
d33501a5
MM
3225 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3226 _("Set record btrace bts options"),
3227 &set_record_btrace_bts_cmdlist,
3228 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3229
3230 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3231 _("Show record btrace bts options"),
3232 &show_record_btrace_bts_cmdlist,
3233 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3234
3235 add_setshow_uinteger_cmd ("buffer-size", no_class,
3236 &record_btrace_conf.bts.size,
3237 _("Set the record/replay bts buffer size."),
3238 _("Show the record/replay bts buffer size."), _("\
3239When starting recording request a trace buffer of this size. \
3240The actual buffer size may differ from the requested size. \
3241Use \"info record\" to see the actual buffer size.\n\n\
3242Bigger buffers allow longer recording but also take more time to process \
3243the recorded execution trace.\n\n\
b20a6524
MM
3244The trace buffer size may not be changed while recording."), NULL,
3245 show_record_bts_buffer_size_value,
d33501a5
MM
3246 &set_record_btrace_bts_cmdlist,
3247 &show_record_btrace_bts_cmdlist);
3248
b20a6524
MM
3249 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3250 _("Set record btrace pt options"),
3251 &set_record_btrace_pt_cmdlist,
3252 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3253
3254 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3255 _("Show record btrace pt options"),
3256 &show_record_btrace_pt_cmdlist,
3257 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3258
3259 add_setshow_uinteger_cmd ("buffer-size", no_class,
3260 &record_btrace_conf.pt.size,
3261 _("Set the record/replay pt buffer size."),
3262 _("Show the record/replay pt buffer size."), _("\
3263Bigger buffers allow longer recording but also take more time to process \
3264the recorded execution.\n\
3265The actual buffer size may differ from the requested size. Use \"info record\" \
3266to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3267 &set_record_btrace_pt_cmdlist,
3268 &show_record_btrace_pt_cmdlist);
3269
d9f719f1 3270 add_target (record_btrace_target_info, record_btrace_target_open);
0b722aec
MM
3271
3272 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3273 xcalloc, xfree);
d33501a5
MM
3274
3275 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3276 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3277}
This page took 0.768552 seconds and 4 git commands to generate.