Use field_string in more places
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
42a4f53d 3 Copyright (C) 2013-2019 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
c0272db5 24#include "record-btrace.h"
afedecd3
MM
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
76727919 29#include "observable.h"
afedecd3
MM
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
70ad5bff
MM
39#include "event-loop.h"
40#include "inf-loop.h"
268a13a5 41#include "gdbsupport/vec.h"
00431a78 42#include "inferior.h"
325fac50 43#include <algorithm>
0d12e84c 44#include "gdbarch.h"
afedecd3 45
d9f719f1
PA
46static const target_info record_btrace_target_info = {
47 "record-btrace",
48 N_("Branch tracing target"),
49 N_("Collect control-flow trace and provide the execution history.")
50};
51
afedecd3 52/* The target_ops of record-btrace. */
f6ac5f3d
PA
53
54class record_btrace_target final : public target_ops
55{
56public:
d9f719f1
PA
57 const target_info &info () const override
58 { return record_btrace_target_info; }
f6ac5f3d 59
66b4deae
PA
60 strata stratum () const override { return record_stratum; }
61
f6ac5f3d
PA
62 void close () override;
63 void async (int) override;
64
65 void detach (inferior *inf, int from_tty) override
66 { record_detach (this, inf, from_tty); }
67
68 void disconnect (const char *, int) override;
69
70 void mourn_inferior () override
71 { record_mourn_inferior (this); }
72
73 void kill () override
74 { record_kill (this); }
75
76 enum record_method record_method (ptid_t ptid) override;
77
78 void stop_recording () override;
79 void info_record () override;
80
81 void insn_history (int size, gdb_disassembly_flags flags) override;
82 void insn_history_from (ULONGEST from, int size,
83 gdb_disassembly_flags flags) override;
84 void insn_history_range (ULONGEST begin, ULONGEST end,
85 gdb_disassembly_flags flags) override;
86 void call_history (int size, record_print_flags flags) override;
87 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
88 override;
89 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
90 override;
91
57810aa7
PA
92 bool record_is_replaying (ptid_t ptid) override;
93 bool record_will_replay (ptid_t ptid, int dir) override;
f6ac5f3d
PA
94 void record_stop_replaying () override;
95
96 enum target_xfer_status xfer_partial (enum target_object object,
97 const char *annex,
98 gdb_byte *readbuf,
99 const gdb_byte *writebuf,
100 ULONGEST offset, ULONGEST len,
101 ULONGEST *xfered_len) override;
102
103 int insert_breakpoint (struct gdbarch *,
104 struct bp_target_info *) override;
105 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
106 enum remove_bp_reason) override;
107
108 void fetch_registers (struct regcache *, int) override;
109
110 void store_registers (struct regcache *, int) override;
111 void prepare_to_store (struct regcache *) override;
112
113 const struct frame_unwind *get_unwinder () override;
114
115 const struct frame_unwind *get_tailcall_unwinder () override;
116
117 void commit_resume () override;
118 void resume (ptid_t, int, enum gdb_signal) override;
119 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
120
121 void stop (ptid_t) override;
122 void update_thread_list () override;
57810aa7 123 bool thread_alive (ptid_t ptid) override;
f6ac5f3d
PA
124 void goto_record_begin () override;
125 void goto_record_end () override;
126 void goto_record (ULONGEST insn) override;
127
57810aa7 128 bool can_execute_reverse () override;
f6ac5f3d 129
57810aa7
PA
130 bool stopped_by_sw_breakpoint () override;
131 bool supports_stopped_by_sw_breakpoint () override;
f6ac5f3d 132
57810aa7
PA
133 bool stopped_by_hw_breakpoint () override;
134 bool supports_stopped_by_hw_breakpoint () override;
f6ac5f3d
PA
135
136 enum exec_direction_kind execution_direction () override;
137 void prepare_to_generate_core () override;
138 void done_generating_core () override;
139};
140
141static record_btrace_target record_btrace_ops;
142
143/* Initialize the record-btrace target ops. */
afedecd3 144
76727919
TT
145/* Token associated with a new-thread observer enabling branch tracing
146 for the new thread. */
3dcfdc58 147static const gdb::observers::token record_btrace_thread_observer_token {};
afedecd3 148
67b5c0c1
MM
149/* Memory access types used in set/show record btrace replay-memory-access. */
150static const char replay_memory_access_read_only[] = "read-only";
151static const char replay_memory_access_read_write[] = "read-write";
152static const char *const replay_memory_access_types[] =
153{
154 replay_memory_access_read_only,
155 replay_memory_access_read_write,
156 NULL
157};
158
159/* The currently allowed replay memory access type. */
160static const char *replay_memory_access = replay_memory_access_read_only;
161
4a4495d6
MM
162/* The cpu state kinds. */
163enum record_btrace_cpu_state_kind
164{
165 CS_AUTO,
166 CS_NONE,
167 CS_CPU
168};
169
170/* The current cpu state. */
171static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
172
173/* The current cpu for trace decode. */
174static struct btrace_cpu record_btrace_cpu;
175
67b5c0c1
MM
176/* Command lists for "set/show record btrace". */
177static struct cmd_list_element *set_record_btrace_cmdlist;
178static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 179
70ad5bff
MM
180/* The execution direction of the last resume we got. See record-full.c. */
181static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
182
183/* The async event handler for reverse/replay execution. */
184static struct async_event_handler *record_btrace_async_inferior_event_handler;
185
aef92902
MM
186/* A flag indicating that we are currently generating a core file. */
187static int record_btrace_generating_corefile;
188
f4abbc16
MM
189/* The current branch trace configuration. */
190static struct btrace_config record_btrace_conf;
191
192/* Command list for "record btrace". */
193static struct cmd_list_element *record_btrace_cmdlist;
194
d33501a5
MM
195/* Command lists for "set/show record btrace bts". */
196static struct cmd_list_element *set_record_btrace_bts_cmdlist;
197static struct cmd_list_element *show_record_btrace_bts_cmdlist;
198
b20a6524
MM
199/* Command lists for "set/show record btrace pt". */
200static struct cmd_list_element *set_record_btrace_pt_cmdlist;
201static struct cmd_list_element *show_record_btrace_pt_cmdlist;
202
4a4495d6
MM
203/* Command list for "set record btrace cpu". */
204static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
205
afedecd3
MM
206/* Print a record-btrace debug message. Use do ... while (0) to avoid
207 ambiguities when used in if statements. */
208
209#define DEBUG(msg, args...) \
210 do \
211 { \
212 if (record_debug != 0) \
213 fprintf_unfiltered (gdb_stdlog, \
214 "[record-btrace] " msg "\n", ##args); \
215 } \
216 while (0)
217
218
4a4495d6
MM
219/* Return the cpu configured by the user. Returns NULL if the cpu was
220 configured as auto. */
221const struct btrace_cpu *
222record_btrace_get_cpu (void)
223{
224 switch (record_btrace_cpu_state)
225 {
226 case CS_AUTO:
227 return nullptr;
228
229 case CS_NONE:
230 record_btrace_cpu.vendor = CV_UNKNOWN;
231 /* Fall through. */
232 case CS_CPU:
233 return &record_btrace_cpu;
234 }
235
236 error (_("Internal error: bad record btrace cpu state."));
237}
238
afedecd3 239/* Update the branch trace for the current thread and return a pointer to its
066ce621 240 thread_info.
afedecd3
MM
241
242 Throws an error if there is no thread or no trace. This function never
243 returns NULL. */
244
066ce621
MM
245static struct thread_info *
246require_btrace_thread (void)
afedecd3 247{
afedecd3
MM
248 DEBUG ("require");
249
00431a78 250 if (inferior_ptid == null_ptid)
afedecd3
MM
251 error (_("No thread."));
252
00431a78
PA
253 thread_info *tp = inferior_thread ();
254
cd4007e4
MM
255 validate_registers_access ();
256
4a4495d6 257 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 258
6e07b1d2 259 if (btrace_is_empty (tp))
afedecd3
MM
260 error (_("No trace."));
261
066ce621
MM
262 return tp;
263}
264
265/* Update the branch trace for the current thread and return a pointer to its
266 branch trace information struct.
267
268 Throws an error if there is no thread or no trace. This function never
269 returns NULL. */
270
271static struct btrace_thread_info *
272require_btrace (void)
273{
274 struct thread_info *tp;
275
276 tp = require_btrace_thread ();
277
278 return &tp->btrace;
afedecd3
MM
279}
280
281/* Enable branch tracing for one thread. Warn on errors. */
282
283static void
284record_btrace_enable_warn (struct thread_info *tp)
285{
a70b8144 286 try
492d29ea
PA
287 {
288 btrace_enable (tp, &record_btrace_conf);
289 }
230d2906 290 catch (const gdb_exception_error &error)
492d29ea 291 {
3d6e9d23 292 warning ("%s", error.what ());
492d29ea 293 }
afedecd3
MM
294}
295
afedecd3
MM
296/* Enable automatic tracing of new threads. */
297
298static void
299record_btrace_auto_enable (void)
300{
301 DEBUG ("attach thread observer");
302
76727919
TT
303 gdb::observers::new_thread.attach (record_btrace_enable_warn,
304 record_btrace_thread_observer_token);
afedecd3
MM
305}
306
307/* Disable automatic tracing of new threads. */
308
309static void
310record_btrace_auto_disable (void)
311{
afedecd3
MM
312 DEBUG ("detach thread observer");
313
76727919 314 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
afedecd3
MM
315}
316
70ad5bff
MM
317/* The record-btrace async event handler function. */
318
319static void
320record_btrace_handle_async_inferior_event (gdb_client_data data)
321{
322 inferior_event_handler (INF_REG_EVENT, NULL);
323}
324
c0272db5
TW
325/* See record-btrace.h. */
326
327void
328record_btrace_push_target (void)
329{
330 const char *format;
331
332 record_btrace_auto_enable ();
333
334 push_target (&record_btrace_ops);
335
336 record_btrace_async_inferior_event_handler
337 = create_async_event_handler (record_btrace_handle_async_inferior_event,
338 NULL);
339 record_btrace_generating_corefile = 0;
340
341 format = btrace_format_short_string (record_btrace_conf.format);
76727919 342 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
c0272db5
TW
343}
344
228f1508
SM
345/* Disable btrace on a set of threads on scope exit. */
346
347struct scoped_btrace_disable
348{
349 scoped_btrace_disable () = default;
350
351 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
352
353 ~scoped_btrace_disable ()
354 {
355 for (thread_info *tp : m_threads)
356 btrace_disable (tp);
357 }
358
359 void add_thread (thread_info *thread)
360 {
361 m_threads.push_front (thread);
362 }
363
364 void discard ()
365 {
366 m_threads.clear ();
367 }
368
369private:
370 std::forward_list<thread_info *> m_threads;
371};
372
d9f719f1 373/* Open target record-btrace. */
afedecd3 374
d9f719f1
PA
375static void
376record_btrace_target_open (const char *args, int from_tty)
afedecd3 377{
228f1508
SM
378 /* If we fail to enable btrace for one thread, disable it for the threads for
379 which it was successfully enabled. */
380 scoped_btrace_disable btrace_disable;
afedecd3
MM
381
382 DEBUG ("open");
383
8213266a 384 record_preopen ();
afedecd3
MM
385
386 if (!target_has_execution)
387 error (_("The program is not being run."));
388
08036331 389 for (thread_info *tp : all_non_exited_threads ())
5d5658a1 390 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
afedecd3 391 {
f4abbc16 392 btrace_enable (tp, &record_btrace_conf);
afedecd3 393
228f1508 394 btrace_disable.add_thread (tp);
afedecd3
MM
395 }
396
c0272db5 397 record_btrace_push_target ();
afedecd3 398
228f1508 399 btrace_disable.discard ();
afedecd3
MM
400}
401
f6ac5f3d 402/* The stop_recording method of target record-btrace. */
afedecd3 403
f6ac5f3d
PA
404void
405record_btrace_target::stop_recording ()
afedecd3 406{
afedecd3
MM
407 DEBUG ("stop recording");
408
409 record_btrace_auto_disable ();
410
08036331 411 for (thread_info *tp : all_non_exited_threads ())
afedecd3
MM
412 if (tp->btrace.target != NULL)
413 btrace_disable (tp);
414}
415
f6ac5f3d 416/* The disconnect method of target record-btrace. */
c0272db5 417
f6ac5f3d
PA
418void
419record_btrace_target::disconnect (const char *args,
420 int from_tty)
c0272db5 421{
b6a8c27b 422 struct target_ops *beneath = this->beneath ();
c0272db5
TW
423
424 /* Do not stop recording, just clean up GDB side. */
f6ac5f3d 425 unpush_target (this);
c0272db5
TW
426
427 /* Forward disconnect. */
f6ac5f3d 428 beneath->disconnect (args, from_tty);
c0272db5
TW
429}
430
f6ac5f3d 431/* The close method of target record-btrace. */
afedecd3 432
f6ac5f3d
PA
433void
434record_btrace_target::close ()
afedecd3 435{
70ad5bff
MM
436 if (record_btrace_async_inferior_event_handler != NULL)
437 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
438
99c819ee
MM
439 /* Make sure automatic recording gets disabled even if we did not stop
440 recording before closing the record-btrace target. */
441 record_btrace_auto_disable ();
442
568e808b
MM
443 /* We should have already stopped recording.
444 Tear down btrace in case we have not. */
08036331 445 for (thread_info *tp : all_non_exited_threads ())
568e808b 446 btrace_teardown (tp);
afedecd3
MM
447}
448
f6ac5f3d 449/* The async method of target record-btrace. */
b7d2e916 450
f6ac5f3d
PA
451void
452record_btrace_target::async (int enable)
b7d2e916 453{
6a3753b3 454 if (enable)
b7d2e916
PA
455 mark_async_event_handler (record_btrace_async_inferior_event_handler);
456 else
457 clear_async_event_handler (record_btrace_async_inferior_event_handler);
458
b6a8c27b 459 this->beneath ()->async (enable);
b7d2e916
PA
460}
461
d33501a5
MM
462/* Adjusts the size and returns a human readable size suffix. */
463
464static const char *
465record_btrace_adjust_size (unsigned int *size)
466{
467 unsigned int sz;
468
469 sz = *size;
470
471 if ((sz & ((1u << 30) - 1)) == 0)
472 {
473 *size = sz >> 30;
474 return "GB";
475 }
476 else if ((sz & ((1u << 20) - 1)) == 0)
477 {
478 *size = sz >> 20;
479 return "MB";
480 }
481 else if ((sz & ((1u << 10) - 1)) == 0)
482 {
483 *size = sz >> 10;
484 return "kB";
485 }
486 else
487 return "";
488}
489
490/* Print a BTS configuration. */
491
492static void
493record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
494{
495 const char *suffix;
496 unsigned int size;
497
498 size = conf->size;
499 if (size > 0)
500 {
501 suffix = record_btrace_adjust_size (&size);
502 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
503 }
504}
505
bc504a31 506/* Print an Intel Processor Trace configuration. */
b20a6524
MM
507
508static void
509record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
510{
511 const char *suffix;
512 unsigned int size;
513
514 size = conf->size;
515 if (size > 0)
516 {
517 suffix = record_btrace_adjust_size (&size);
518 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
519 }
520}
521
d33501a5
MM
522/* Print a branch tracing configuration. */
523
524static void
525record_btrace_print_conf (const struct btrace_config *conf)
526{
527 printf_unfiltered (_("Recording format: %s.\n"),
528 btrace_format_string (conf->format));
529
530 switch (conf->format)
531 {
532 case BTRACE_FORMAT_NONE:
533 return;
534
535 case BTRACE_FORMAT_BTS:
536 record_btrace_print_bts_conf (&conf->bts);
537 return;
b20a6524
MM
538
539 case BTRACE_FORMAT_PT:
540 record_btrace_print_pt_conf (&conf->pt);
541 return;
d33501a5
MM
542 }
543
544 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
545}
546
f6ac5f3d 547/* The info_record method of target record-btrace. */
afedecd3 548
f6ac5f3d
PA
549void
550record_btrace_target::info_record ()
afedecd3
MM
551{
552 struct btrace_thread_info *btinfo;
f4abbc16 553 const struct btrace_config *conf;
afedecd3 554 struct thread_info *tp;
31fd9caa 555 unsigned int insns, calls, gaps;
afedecd3
MM
556
557 DEBUG ("info");
558
559 tp = find_thread_ptid (inferior_ptid);
560 if (tp == NULL)
561 error (_("No thread."));
562
cd4007e4
MM
563 validate_registers_access ();
564
f4abbc16
MM
565 btinfo = &tp->btrace;
566
f6ac5f3d 567 conf = ::btrace_conf (btinfo);
f4abbc16 568 if (conf != NULL)
d33501a5 569 record_btrace_print_conf (conf);
f4abbc16 570
4a4495d6 571 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 572
23a7fe75
MM
573 insns = 0;
574 calls = 0;
31fd9caa 575 gaps = 0;
23a7fe75 576
6e07b1d2 577 if (!btrace_is_empty (tp))
23a7fe75
MM
578 {
579 struct btrace_call_iterator call;
580 struct btrace_insn_iterator insn;
581
582 btrace_call_end (&call, btinfo);
583 btrace_call_prev (&call, 1);
5de9129b 584 calls = btrace_call_number (&call);
23a7fe75
MM
585
586 btrace_insn_end (&insn, btinfo);
5de9129b 587 insns = btrace_insn_number (&insn);
31fd9caa 588
69090cee
TW
589 /* If the last instruction is not a gap, it is the current instruction
590 that is not actually part of the record. */
591 if (btrace_insn_get (&insn) != NULL)
592 insns -= 1;
31fd9caa
MM
593
594 gaps = btinfo->ngaps;
23a7fe75 595 }
afedecd3 596
31fd9caa 597 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
43792cf0 598 "for thread %s (%s).\n"), insns, calls, gaps,
a068643d
TT
599 print_thread_id (tp),
600 target_pid_to_str (tp->ptid).c_str ());
07bbe694
MM
601
602 if (btrace_is_replaying (tp))
603 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
604 btrace_insn_number (btinfo->replay));
afedecd3
MM
605}
606
31fd9caa
MM
607/* Print a decode error. */
608
609static void
610btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
611 enum btrace_format format)
612{
508352a9 613 const char *errstr = btrace_decode_error (format, errcode);
31fd9caa 614
112e8700 615 uiout->text (_("["));
508352a9
TW
616 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
617 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
31fd9caa 618 {
112e8700
SM
619 uiout->text (_("decode error ("));
620 uiout->field_int ("errcode", errcode);
621 uiout->text (_("): "));
31fd9caa 622 }
112e8700
SM
623 uiout->text (errstr);
624 uiout->text (_("]\n"));
31fd9caa
MM
625}
626
afedecd3
MM
627/* Print an unsigned int. */
628
629static void
630ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
631{
112e8700 632 uiout->field_fmt (fld, "%u", val);
afedecd3
MM
633}
634
f94cc897
MM
635/* A range of source lines. */
636
637struct btrace_line_range
638{
639 /* The symtab this line is from. */
640 struct symtab *symtab;
641
642 /* The first line (inclusive). */
643 int begin;
644
645 /* The last line (exclusive). */
646 int end;
647};
648
649/* Construct a line range. */
650
651static struct btrace_line_range
652btrace_mk_line_range (struct symtab *symtab, int begin, int end)
653{
654 struct btrace_line_range range;
655
656 range.symtab = symtab;
657 range.begin = begin;
658 range.end = end;
659
660 return range;
661}
662
663/* Add a line to a line range. */
664
665static struct btrace_line_range
666btrace_line_range_add (struct btrace_line_range range, int line)
667{
668 if (range.end <= range.begin)
669 {
670 /* This is the first entry. */
671 range.begin = line;
672 range.end = line + 1;
673 }
674 else if (line < range.begin)
675 range.begin = line;
676 else if (range.end < line)
677 range.end = line;
678
679 return range;
680}
681
682/* Return non-zero if RANGE is empty, zero otherwise. */
683
684static int
685btrace_line_range_is_empty (struct btrace_line_range range)
686{
687 return range.end <= range.begin;
688}
689
690/* Return non-zero if LHS contains RHS, zero otherwise. */
691
692static int
693btrace_line_range_contains_range (struct btrace_line_range lhs,
694 struct btrace_line_range rhs)
695{
696 return ((lhs.symtab == rhs.symtab)
697 && (lhs.begin <= rhs.begin)
698 && (rhs.end <= lhs.end));
699}
700
701/* Find the line range associated with PC. */
702
703static struct btrace_line_range
704btrace_find_line_range (CORE_ADDR pc)
705{
706 struct btrace_line_range range;
707 struct linetable_entry *lines;
708 struct linetable *ltable;
709 struct symtab *symtab;
710 int nlines, i;
711
712 symtab = find_pc_line_symtab (pc);
713 if (symtab == NULL)
714 return btrace_mk_line_range (NULL, 0, 0);
715
716 ltable = SYMTAB_LINETABLE (symtab);
717 if (ltable == NULL)
718 return btrace_mk_line_range (symtab, 0, 0);
719
720 nlines = ltable->nitems;
721 lines = ltable->item;
722 if (nlines <= 0)
723 return btrace_mk_line_range (symtab, 0, 0);
724
725 range = btrace_mk_line_range (symtab, 0, 0);
726 for (i = 0; i < nlines - 1; i++)
727 {
728 if ((lines[i].pc == pc) && (lines[i].line != 0))
729 range = btrace_line_range_add (range, lines[i].line);
730 }
731
732 return range;
733}
734
735/* Print source lines in LINES to UIOUT.
736
737 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
738 instructions corresponding to that source line. When printing a new source
739 line, we do the cleanups for the open chain and open a new cleanup chain for
740 the new source line. If the source line range in LINES is not empty, this
741 function will leave the cleanup chain for the last printed source line open
742 so instructions can be added to it. */
743
744static void
745btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
7ea78b59
SM
746 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
747 gdb::optional<ui_out_emit_list> *asm_list,
748 gdb_disassembly_flags flags)
f94cc897 749{
8d297bbf 750 print_source_lines_flags psl_flags;
f94cc897 751
f94cc897
MM
752 if (flags & DISASSEMBLY_FILENAME)
753 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
754
7ea78b59 755 for (int line = lines.begin; line < lines.end; ++line)
f94cc897 756 {
7ea78b59 757 asm_list->reset ();
f94cc897 758
7ea78b59 759 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
f94cc897
MM
760
761 print_source_lines (lines.symtab, line, line + 1, psl_flags);
762
7ea78b59 763 asm_list->emplace (uiout, "line_asm_insn");
f94cc897
MM
764 }
765}
766
afedecd3
MM
767/* Disassemble a section of the recorded instruction trace. */
768
769static void
23a7fe75 770btrace_insn_history (struct ui_out *uiout,
31fd9caa 771 const struct btrace_thread_info *btinfo,
23a7fe75 772 const struct btrace_insn_iterator *begin,
9a24775b
PA
773 const struct btrace_insn_iterator *end,
774 gdb_disassembly_flags flags)
afedecd3 775{
9a24775b
PA
776 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
777 btrace_insn_number (begin), btrace_insn_number (end));
afedecd3 778
f94cc897
MM
779 flags |= DISASSEMBLY_SPECULATIVE;
780
7ea78b59
SM
781 struct gdbarch *gdbarch = target_gdbarch ();
782 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
f94cc897 783
7ea78b59 784 ui_out_emit_list list_emitter (uiout, "asm_insns");
f94cc897 785
7ea78b59
SM
786 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
787 gdb::optional<ui_out_emit_list> asm_list;
afedecd3 788
8b172ce7
PA
789 gdb_pretty_print_disassembler disasm (gdbarch);
790
7ea78b59
SM
791 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
792 btrace_insn_next (&it, 1))
afedecd3 793 {
23a7fe75
MM
794 const struct btrace_insn *insn;
795
796 insn = btrace_insn_get (&it);
797
31fd9caa
MM
798 /* A NULL instruction indicates a gap in the trace. */
799 if (insn == NULL)
800 {
801 const struct btrace_config *conf;
802
803 conf = btrace_conf (btinfo);
afedecd3 804
31fd9caa
MM
805 /* We have trace so we must have a configuration. */
806 gdb_assert (conf != NULL);
807
69090cee
TW
808 uiout->field_fmt ("insn-number", "%u",
809 btrace_insn_number (&it));
810 uiout->text ("\t");
811
812 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
31fd9caa
MM
813 conf->format);
814 }
815 else
816 {
f94cc897 817 struct disasm_insn dinsn;
da8c46d2 818
f94cc897 819 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 820 {
f94cc897
MM
821 struct btrace_line_range lines;
822
823 lines = btrace_find_line_range (insn->pc);
824 if (!btrace_line_range_is_empty (lines)
825 && !btrace_line_range_contains_range (last_lines, lines))
826 {
7ea78b59
SM
827 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
828 flags);
f94cc897
MM
829 last_lines = lines;
830 }
7ea78b59 831 else if (!src_and_asm_tuple.has_value ())
f94cc897 832 {
7ea78b59
SM
833 gdb_assert (!asm_list.has_value ());
834
835 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
836
f94cc897 837 /* No source information. */
7ea78b59 838 asm_list.emplace (uiout, "line_asm_insn");
f94cc897
MM
839 }
840
7ea78b59
SM
841 gdb_assert (src_and_asm_tuple.has_value ());
842 gdb_assert (asm_list.has_value ());
da8c46d2 843 }
da8c46d2 844
f94cc897
MM
845 memset (&dinsn, 0, sizeof (dinsn));
846 dinsn.number = btrace_insn_number (&it);
847 dinsn.addr = insn->pc;
31fd9caa 848
da8c46d2 849 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 850 dinsn.is_speculative = 1;
da8c46d2 851
8b172ce7 852 disasm.pretty_print_insn (uiout, &dinsn, flags);
31fd9caa 853 }
afedecd3
MM
854 }
855}
856
f6ac5f3d 857/* The insn_history method of target record-btrace. */
afedecd3 858
f6ac5f3d
PA
859void
860record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
afedecd3
MM
861{
862 struct btrace_thread_info *btinfo;
23a7fe75
MM
863 struct btrace_insn_history *history;
864 struct btrace_insn_iterator begin, end;
afedecd3 865 struct ui_out *uiout;
23a7fe75 866 unsigned int context, covered;
afedecd3
MM
867
868 uiout = current_uiout;
2e783024 869 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 870 context = abs (size);
afedecd3
MM
871 if (context == 0)
872 error (_("Bad record instruction-history-size."));
873
23a7fe75
MM
874 btinfo = require_btrace ();
875 history = btinfo->insn_history;
876 if (history == NULL)
afedecd3 877 {
07bbe694 878 struct btrace_insn_iterator *replay;
afedecd3 879
9a24775b 880 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
afedecd3 881
07bbe694
MM
882 /* If we're replaying, we start at the replay position. Otherwise, we
883 start at the tail of the trace. */
884 replay = btinfo->replay;
885 if (replay != NULL)
886 begin = *replay;
887 else
888 btrace_insn_end (&begin, btinfo);
889
890 /* We start from here and expand in the requested direction. Then we
891 expand in the other direction, as well, to fill up any remaining
892 context. */
893 end = begin;
894 if (size < 0)
895 {
896 /* We want the current position covered, as well. */
897 covered = btrace_insn_next (&end, 1);
898 covered += btrace_insn_prev (&begin, context - covered);
899 covered += btrace_insn_next (&end, context - covered);
900 }
901 else
902 {
903 covered = btrace_insn_next (&end, context);
904 covered += btrace_insn_prev (&begin, context - covered);
905 }
afedecd3
MM
906 }
907 else
908 {
23a7fe75
MM
909 begin = history->begin;
910 end = history->end;
afedecd3 911
9a24775b 912 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
23a7fe75 913 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 914
23a7fe75
MM
915 if (size < 0)
916 {
917 end = begin;
918 covered = btrace_insn_prev (&begin, context);
919 }
920 else
921 {
922 begin = end;
923 covered = btrace_insn_next (&end, context);
924 }
afedecd3
MM
925 }
926
23a7fe75 927 if (covered > 0)
31fd9caa 928 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
929 else
930 {
931 if (size < 0)
932 printf_unfiltered (_("At the start of the branch trace record.\n"));
933 else
934 printf_unfiltered (_("At the end of the branch trace record.\n"));
935 }
afedecd3 936
23a7fe75 937 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
938}
939
f6ac5f3d 940/* The insn_history_range method of target record-btrace. */
afedecd3 941
f6ac5f3d
PA
942void
943record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
944 gdb_disassembly_flags flags)
afedecd3
MM
945{
946 struct btrace_thread_info *btinfo;
23a7fe75 947 struct btrace_insn_iterator begin, end;
afedecd3 948 struct ui_out *uiout;
23a7fe75
MM
949 unsigned int low, high;
950 int found;
afedecd3
MM
951
952 uiout = current_uiout;
2e783024 953 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
23a7fe75
MM
954 low = from;
955 high = to;
afedecd3 956
9a24775b 957 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
afedecd3
MM
958
959 /* Check for wrap-arounds. */
23a7fe75 960 if (low != from || high != to)
afedecd3
MM
961 error (_("Bad range."));
962
0688d04e 963 if (high < low)
afedecd3
MM
964 error (_("Bad range."));
965
23a7fe75 966 btinfo = require_btrace ();
afedecd3 967
23a7fe75
MM
968 found = btrace_find_insn_by_number (&begin, btinfo, low);
969 if (found == 0)
970 error (_("Range out of bounds."));
afedecd3 971
23a7fe75
MM
972 found = btrace_find_insn_by_number (&end, btinfo, high);
973 if (found == 0)
0688d04e
MM
974 {
975 /* Silently truncate the range. */
976 btrace_insn_end (&end, btinfo);
977 }
978 else
979 {
980 /* We want both begin and end to be inclusive. */
981 btrace_insn_next (&end, 1);
982 }
afedecd3 983
31fd9caa 984 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 985 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
986}
987
f6ac5f3d 988/* The insn_history_from method of target record-btrace. */
afedecd3 989
f6ac5f3d
PA
990void
991record_btrace_target::insn_history_from (ULONGEST from, int size,
992 gdb_disassembly_flags flags)
afedecd3
MM
993{
994 ULONGEST begin, end, context;
995
996 context = abs (size);
0688d04e
MM
997 if (context == 0)
998 error (_("Bad record instruction-history-size."));
afedecd3
MM
999
1000 if (size < 0)
1001 {
1002 end = from;
1003
1004 if (from < context)
1005 begin = 0;
1006 else
0688d04e 1007 begin = from - context + 1;
afedecd3
MM
1008 }
1009 else
1010 {
1011 begin = from;
0688d04e 1012 end = from + context - 1;
afedecd3
MM
1013
1014 /* Check for wrap-around. */
1015 if (end < begin)
1016 end = ULONGEST_MAX;
1017 }
1018
f6ac5f3d 1019 insn_history_range (begin, end, flags);
afedecd3
MM
1020}
1021
1022/* Print the instruction number range for a function call history line. */
1023
1024static void
23a7fe75
MM
1025btrace_call_history_insn_range (struct ui_out *uiout,
1026 const struct btrace_function *bfun)
afedecd3 1027{
7acbe133
MM
1028 unsigned int begin, end, size;
1029
0860c437 1030 size = bfun->insn.size ();
7acbe133 1031 gdb_assert (size > 0);
afedecd3 1032
23a7fe75 1033 begin = bfun->insn_offset;
7acbe133 1034 end = begin + size - 1;
afedecd3 1035
23a7fe75 1036 ui_out_field_uint (uiout, "insn begin", begin);
112e8700 1037 uiout->text (",");
23a7fe75 1038 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
1039}
1040
ce0dfbea
MM
1041/* Compute the lowest and highest source line for the instructions in BFUN
1042 and return them in PBEGIN and PEND.
1043 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1044 result from inlining or macro expansion. */
1045
1046static void
1047btrace_compute_src_line_range (const struct btrace_function *bfun,
1048 int *pbegin, int *pend)
1049{
ce0dfbea
MM
1050 struct symtab *symtab;
1051 struct symbol *sym;
ce0dfbea
MM
1052 int begin, end;
1053
1054 begin = INT_MAX;
1055 end = INT_MIN;
1056
1057 sym = bfun->sym;
1058 if (sym == NULL)
1059 goto out;
1060
1061 symtab = symbol_symtab (sym);
1062
0860c437 1063 for (const btrace_insn &insn : bfun->insn)
ce0dfbea
MM
1064 {
1065 struct symtab_and_line sal;
1066
0860c437 1067 sal = find_pc_line (insn.pc, 0);
ce0dfbea
MM
1068 if (sal.symtab != symtab || sal.line == 0)
1069 continue;
1070
325fac50
PA
1071 begin = std::min (begin, sal.line);
1072 end = std::max (end, sal.line);
ce0dfbea
MM
1073 }
1074
1075 out:
1076 *pbegin = begin;
1077 *pend = end;
1078}
1079
afedecd3
MM
1080/* Print the source line information for a function call history line. */
1081
1082static void
23a7fe75
MM
1083btrace_call_history_src_line (struct ui_out *uiout,
1084 const struct btrace_function *bfun)
afedecd3
MM
1085{
1086 struct symbol *sym;
23a7fe75 1087 int begin, end;
afedecd3
MM
1088
1089 sym = bfun->sym;
1090 if (sym == NULL)
1091 return;
1092
112e8700 1093 uiout->field_string ("file",
cbe56571
TT
1094 symtab_to_filename_for_display (symbol_symtab (sym)),
1095 ui_out_style_kind::FILE);
afedecd3 1096
ce0dfbea 1097 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 1098 if (end < begin)
afedecd3
MM
1099 return;
1100
112e8700
SM
1101 uiout->text (":");
1102 uiout->field_int ("min line", begin);
afedecd3 1103
23a7fe75 1104 if (end == begin)
afedecd3
MM
1105 return;
1106
112e8700
SM
1107 uiout->text (",");
1108 uiout->field_int ("max line", end);
afedecd3
MM
1109}
1110
0b722aec
MM
1111/* Get the name of a branch trace function. */
1112
1113static const char *
1114btrace_get_bfun_name (const struct btrace_function *bfun)
1115{
1116 struct minimal_symbol *msym;
1117 struct symbol *sym;
1118
1119 if (bfun == NULL)
1120 return "??";
1121
1122 msym = bfun->msym;
1123 sym = bfun->sym;
1124
1125 if (sym != NULL)
1126 return SYMBOL_PRINT_NAME (sym);
1127 else if (msym != NULL)
efd66ac6 1128 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
1129 else
1130 return "??";
1131}
1132
afedecd3
MM
1133/* Disassemble a section of the recorded function trace. */
1134
1135static void
23a7fe75 1136btrace_call_history (struct ui_out *uiout,
8710b709 1137 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1138 const struct btrace_call_iterator *begin,
1139 const struct btrace_call_iterator *end,
8d297bbf 1140 int int_flags)
afedecd3 1141{
23a7fe75 1142 struct btrace_call_iterator it;
8d297bbf 1143 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1144
8d297bbf 1145 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1146 btrace_call_number (end));
afedecd3 1147
23a7fe75 1148 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1149 {
23a7fe75
MM
1150 const struct btrace_function *bfun;
1151 struct minimal_symbol *msym;
1152 struct symbol *sym;
1153
1154 bfun = btrace_call_get (&it);
23a7fe75 1155 sym = bfun->sym;
0b722aec 1156 msym = bfun->msym;
23a7fe75 1157
afedecd3 1158 /* Print the function index. */
23a7fe75 1159 ui_out_field_uint (uiout, "index", bfun->number);
112e8700 1160 uiout->text ("\t");
afedecd3 1161
31fd9caa
MM
1162 /* Indicate gaps in the trace. */
1163 if (bfun->errcode != 0)
1164 {
1165 const struct btrace_config *conf;
1166
1167 conf = btrace_conf (btinfo);
1168
1169 /* We have trace so we must have a configuration. */
1170 gdb_assert (conf != NULL);
1171
1172 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1173
1174 continue;
1175 }
1176
8710b709
MM
1177 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1178 {
1179 int level = bfun->level + btinfo->level, i;
1180
1181 for (i = 0; i < level; ++i)
112e8700 1182 uiout->text (" ");
8710b709
MM
1183 }
1184
1185 if (sym != NULL)
cbe56571
TT
1186 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym),
1187 ui_out_style_kind::FUNCTION);
8710b709 1188 else if (msym != NULL)
cbe56571
TT
1189 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym),
1190 ui_out_style_kind::FUNCTION);
112e8700 1191 else if (!uiout->is_mi_like_p ())
cbe56571
TT
1192 uiout->field_string ("function", "??",
1193 ui_out_style_kind::FUNCTION);
8710b709 1194
1e038f67 1195 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1196 {
112e8700 1197 uiout->text (_("\tinst "));
23a7fe75 1198 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1199 }
1200
1e038f67 1201 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1202 {
112e8700 1203 uiout->text (_("\tat "));
23a7fe75 1204 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1205 }
1206
112e8700 1207 uiout->text ("\n");
afedecd3
MM
1208 }
1209}
1210
f6ac5f3d 1211/* The call_history method of target record-btrace. */
afedecd3 1212
f6ac5f3d
PA
1213void
1214record_btrace_target::call_history (int size, record_print_flags flags)
afedecd3
MM
1215{
1216 struct btrace_thread_info *btinfo;
23a7fe75
MM
1217 struct btrace_call_history *history;
1218 struct btrace_call_iterator begin, end;
afedecd3 1219 struct ui_out *uiout;
23a7fe75 1220 unsigned int context, covered;
afedecd3
MM
1221
1222 uiout = current_uiout;
2e783024 1223 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 1224 context = abs (size);
afedecd3
MM
1225 if (context == 0)
1226 error (_("Bad record function-call-history-size."));
1227
23a7fe75
MM
1228 btinfo = require_btrace ();
1229 history = btinfo->call_history;
1230 if (history == NULL)
afedecd3 1231 {
07bbe694 1232 struct btrace_insn_iterator *replay;
afedecd3 1233
0cb7c7b0 1234 DEBUG ("call-history (0x%x): %d", (int) flags, size);
afedecd3 1235
07bbe694
MM
1236 /* If we're replaying, we start at the replay position. Otherwise, we
1237 start at the tail of the trace. */
1238 replay = btinfo->replay;
1239 if (replay != NULL)
1240 {
07bbe694 1241 begin.btinfo = btinfo;
a0f1b963 1242 begin.index = replay->call_index;
07bbe694
MM
1243 }
1244 else
1245 btrace_call_end (&begin, btinfo);
1246
1247 /* We start from here and expand in the requested direction. Then we
1248 expand in the other direction, as well, to fill up any remaining
1249 context. */
1250 end = begin;
1251 if (size < 0)
1252 {
1253 /* We want the current position covered, as well. */
1254 covered = btrace_call_next (&end, 1);
1255 covered += btrace_call_prev (&begin, context - covered);
1256 covered += btrace_call_next (&end, context - covered);
1257 }
1258 else
1259 {
1260 covered = btrace_call_next (&end, context);
1261 covered += btrace_call_prev (&begin, context- covered);
1262 }
afedecd3
MM
1263 }
1264 else
1265 {
23a7fe75
MM
1266 begin = history->begin;
1267 end = history->end;
afedecd3 1268
0cb7c7b0 1269 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
23a7fe75 1270 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1271
23a7fe75
MM
1272 if (size < 0)
1273 {
1274 end = begin;
1275 covered = btrace_call_prev (&begin, context);
1276 }
1277 else
1278 {
1279 begin = end;
1280 covered = btrace_call_next (&end, context);
1281 }
afedecd3
MM
1282 }
1283
23a7fe75 1284 if (covered > 0)
8710b709 1285 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1286 else
1287 {
1288 if (size < 0)
1289 printf_unfiltered (_("At the start of the branch trace record.\n"));
1290 else
1291 printf_unfiltered (_("At the end of the branch trace record.\n"));
1292 }
afedecd3 1293
23a7fe75 1294 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1295}
1296
f6ac5f3d 1297/* The call_history_range method of target record-btrace. */
afedecd3 1298
f6ac5f3d
PA
1299void
1300record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1301 record_print_flags flags)
afedecd3
MM
1302{
1303 struct btrace_thread_info *btinfo;
23a7fe75 1304 struct btrace_call_iterator begin, end;
afedecd3 1305 struct ui_out *uiout;
23a7fe75
MM
1306 unsigned int low, high;
1307 int found;
afedecd3
MM
1308
1309 uiout = current_uiout;
2e783024 1310 ui_out_emit_tuple tuple_emitter (uiout, "func history");
23a7fe75
MM
1311 low = from;
1312 high = to;
afedecd3 1313
0cb7c7b0 1314 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
afedecd3
MM
1315
1316 /* Check for wrap-arounds. */
23a7fe75 1317 if (low != from || high != to)
afedecd3
MM
1318 error (_("Bad range."));
1319
0688d04e 1320 if (high < low)
afedecd3
MM
1321 error (_("Bad range."));
1322
23a7fe75 1323 btinfo = require_btrace ();
afedecd3 1324
23a7fe75
MM
1325 found = btrace_find_call_by_number (&begin, btinfo, low);
1326 if (found == 0)
1327 error (_("Range out of bounds."));
afedecd3 1328
23a7fe75
MM
1329 found = btrace_find_call_by_number (&end, btinfo, high);
1330 if (found == 0)
0688d04e
MM
1331 {
1332 /* Silently truncate the range. */
1333 btrace_call_end (&end, btinfo);
1334 }
1335 else
1336 {
1337 /* We want both begin and end to be inclusive. */
1338 btrace_call_next (&end, 1);
1339 }
afedecd3 1340
8710b709 1341 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1342 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1343}
1344
f6ac5f3d 1345/* The call_history_from method of target record-btrace. */
afedecd3 1346
f6ac5f3d
PA
1347void
1348record_btrace_target::call_history_from (ULONGEST from, int size,
1349 record_print_flags flags)
afedecd3
MM
1350{
1351 ULONGEST begin, end, context;
1352
1353 context = abs (size);
0688d04e
MM
1354 if (context == 0)
1355 error (_("Bad record function-call-history-size."));
afedecd3
MM
1356
1357 if (size < 0)
1358 {
1359 end = from;
1360
1361 if (from < context)
1362 begin = 0;
1363 else
0688d04e 1364 begin = from - context + 1;
afedecd3
MM
1365 }
1366 else
1367 {
1368 begin = from;
0688d04e 1369 end = from + context - 1;
afedecd3
MM
1370
1371 /* Check for wrap-around. */
1372 if (end < begin)
1373 end = ULONGEST_MAX;
1374 }
1375
f6ac5f3d 1376 call_history_range ( begin, end, flags);
afedecd3
MM
1377}
1378
f6ac5f3d 1379/* The record_method method of target record-btrace. */
b158a20f 1380
f6ac5f3d
PA
1381enum record_method
1382record_btrace_target::record_method (ptid_t ptid)
b158a20f 1383{
b158a20f
TW
1384 struct thread_info * const tp = find_thread_ptid (ptid);
1385
1386 if (tp == NULL)
1387 error (_("No thread."));
1388
1389 if (tp->btrace.target == NULL)
1390 return RECORD_METHOD_NONE;
1391
1392 return RECORD_METHOD_BTRACE;
1393}
1394
f6ac5f3d 1395/* The record_is_replaying method of target record-btrace. */
07bbe694 1396
57810aa7 1397bool
f6ac5f3d 1398record_btrace_target::record_is_replaying (ptid_t ptid)
07bbe694 1399{
08036331
PA
1400 for (thread_info *tp : all_non_exited_threads (ptid))
1401 if (btrace_is_replaying (tp))
57810aa7 1402 return true;
07bbe694 1403
57810aa7 1404 return false;
07bbe694
MM
1405}
1406
f6ac5f3d 1407/* The record_will_replay method of target record-btrace. */
7ff27e9b 1408
57810aa7 1409bool
f6ac5f3d 1410record_btrace_target::record_will_replay (ptid_t ptid, int dir)
7ff27e9b 1411{
f6ac5f3d 1412 return dir == EXEC_REVERSE || record_is_replaying (ptid);
7ff27e9b
MM
1413}
1414
f6ac5f3d 1415/* The xfer_partial method of target record-btrace. */
633785ff 1416
f6ac5f3d
PA
1417enum target_xfer_status
1418record_btrace_target::xfer_partial (enum target_object object,
1419 const char *annex, gdb_byte *readbuf,
1420 const gdb_byte *writebuf, ULONGEST offset,
1421 ULONGEST len, ULONGEST *xfered_len)
633785ff 1422{
633785ff 1423 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1424 if (replay_memory_access == replay_memory_access_read_only
aef92902 1425 && !record_btrace_generating_corefile
f6ac5f3d 1426 && record_is_replaying (inferior_ptid))
633785ff
MM
1427 {
1428 switch (object)
1429 {
1430 case TARGET_OBJECT_MEMORY:
1431 {
1432 struct target_section *section;
1433
1434 /* We do not allow writing memory in general. */
1435 if (writebuf != NULL)
9b409511
YQ
1436 {
1437 *xfered_len = len;
bc113b4e 1438 return TARGET_XFER_UNAVAILABLE;
9b409511 1439 }
633785ff
MM
1440
1441 /* We allow reading readonly memory. */
f6ac5f3d 1442 section = target_section_by_addr (this, offset);
633785ff
MM
1443 if (section != NULL)
1444 {
1445 /* Check if the section we found is readonly. */
1446 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1447 section->the_bfd_section)
1448 & SEC_READONLY) != 0)
1449 {
1450 /* Truncate the request to fit into this section. */
325fac50 1451 len = std::min (len, section->endaddr - offset);
633785ff
MM
1452 break;
1453 }
1454 }
1455
9b409511 1456 *xfered_len = len;
bc113b4e 1457 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1458 }
1459 }
1460 }
1461
1462 /* Forward the request. */
b6a8c27b
PA
1463 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1464 offset, len, xfered_len);
633785ff
MM
1465}
1466
f6ac5f3d 1467/* The insert_breakpoint method of target record-btrace. */
633785ff 1468
f6ac5f3d
PA
1469int
1470record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1471 struct bp_target_info *bp_tgt)
633785ff 1472{
67b5c0c1
MM
1473 const char *old;
1474 int ret;
633785ff
MM
1475
1476 /* Inserting breakpoints requires accessing memory. Allow it for the
1477 duration of this function. */
67b5c0c1
MM
1478 old = replay_memory_access;
1479 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1480
1481 ret = 0;
a70b8144 1482 try
492d29ea 1483 {
b6a8c27b 1484 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
492d29ea 1485 }
230d2906 1486 catch (const gdb_exception &except)
492d29ea 1487 {
6c63c96a 1488 replay_memory_access = old;
eedc3f4f 1489 throw;
492d29ea 1490 }
6c63c96a 1491 replay_memory_access = old;
633785ff
MM
1492
1493 return ret;
1494}
1495
f6ac5f3d 1496/* The remove_breakpoint method of target record-btrace. */
633785ff 1497
f6ac5f3d
PA
1498int
1499record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1500 struct bp_target_info *bp_tgt,
1501 enum remove_bp_reason reason)
633785ff 1502{
67b5c0c1
MM
1503 const char *old;
1504 int ret;
633785ff
MM
1505
1506 /* Removing breakpoints requires accessing memory. Allow it for the
1507 duration of this function. */
67b5c0c1
MM
1508 old = replay_memory_access;
1509 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1510
1511 ret = 0;
a70b8144 1512 try
492d29ea 1513 {
b6a8c27b 1514 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
492d29ea 1515 }
230d2906 1516 catch (const gdb_exception &except)
492d29ea 1517 {
6c63c96a 1518 replay_memory_access = old;
eedc3f4f 1519 throw;
492d29ea 1520 }
6c63c96a 1521 replay_memory_access = old;
633785ff
MM
1522
1523 return ret;
1524}
1525
f6ac5f3d 1526/* The fetch_registers method of target record-btrace. */
1f3ef581 1527
f6ac5f3d
PA
1528void
1529record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1f3ef581
MM
1530{
1531 struct btrace_insn_iterator *replay;
1532 struct thread_info *tp;
1533
222312d3 1534 tp = find_thread_ptid (regcache->ptid ());
1f3ef581
MM
1535 gdb_assert (tp != NULL);
1536
1537 replay = tp->btrace.replay;
aef92902 1538 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1539 {
1540 const struct btrace_insn *insn;
1541 struct gdbarch *gdbarch;
1542 int pcreg;
1543
ac7936df 1544 gdbarch = regcache->arch ();
1f3ef581
MM
1545 pcreg = gdbarch_pc_regnum (gdbarch);
1546 if (pcreg < 0)
1547 return;
1548
1549 /* We can only provide the PC register. */
1550 if (regno >= 0 && regno != pcreg)
1551 return;
1552
1553 insn = btrace_insn_get (replay);
1554 gdb_assert (insn != NULL);
1555
73e1c03f 1556 regcache->raw_supply (regno, &insn->pc);
1f3ef581
MM
1557 }
1558 else
b6a8c27b 1559 this->beneath ()->fetch_registers (regcache, regno);
1f3ef581
MM
1560}
1561
f6ac5f3d 1562/* The store_registers method of target record-btrace. */
1f3ef581 1563
f6ac5f3d
PA
1564void
1565record_btrace_target::store_registers (struct regcache *regcache, int regno)
1f3ef581 1566{
a52eab48 1567 if (!record_btrace_generating_corefile
222312d3 1568 && record_is_replaying (regcache->ptid ()))
4d10e986 1569 error (_("Cannot write registers while replaying."));
1f3ef581
MM
1570
1571 gdb_assert (may_write_registers != 0);
1572
b6a8c27b 1573 this->beneath ()->store_registers (regcache, regno);
1f3ef581
MM
1574}
1575
f6ac5f3d 1576/* The prepare_to_store method of target record-btrace. */
1f3ef581 1577
f6ac5f3d
PA
1578void
1579record_btrace_target::prepare_to_store (struct regcache *regcache)
1f3ef581 1580{
a52eab48 1581 if (!record_btrace_generating_corefile
222312d3 1582 && record_is_replaying (regcache->ptid ()))
1f3ef581
MM
1583 return;
1584
b6a8c27b 1585 this->beneath ()->prepare_to_store (regcache);
1f3ef581
MM
1586}
1587
0b722aec
MM
1588/* The branch trace frame cache. */
1589
1590struct btrace_frame_cache
1591{
1592 /* The thread. */
1593 struct thread_info *tp;
1594
1595 /* The frame info. */
1596 struct frame_info *frame;
1597
1598 /* The branch trace function segment. */
1599 const struct btrace_function *bfun;
1600};
1601
1602/* A struct btrace_frame_cache hash table indexed by NEXT. */
1603
1604static htab_t bfcache;
1605
1606/* hash_f for htab_create_alloc of bfcache. */
1607
1608static hashval_t
1609bfcache_hash (const void *arg)
1610{
19ba03f4
SM
1611 const struct btrace_frame_cache *cache
1612 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1613
1614 return htab_hash_pointer (cache->frame);
1615}
1616
1617/* eq_f for htab_create_alloc of bfcache. */
1618
1619static int
1620bfcache_eq (const void *arg1, const void *arg2)
1621{
19ba03f4
SM
1622 const struct btrace_frame_cache *cache1
1623 = (const struct btrace_frame_cache *) arg1;
1624 const struct btrace_frame_cache *cache2
1625 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1626
1627 return cache1->frame == cache2->frame;
1628}
1629
1630/* Create a new btrace frame cache. */
1631
1632static struct btrace_frame_cache *
1633bfcache_new (struct frame_info *frame)
1634{
1635 struct btrace_frame_cache *cache;
1636 void **slot;
1637
1638 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1639 cache->frame = frame;
1640
1641 slot = htab_find_slot (bfcache, cache, INSERT);
1642 gdb_assert (*slot == NULL);
1643 *slot = cache;
1644
1645 return cache;
1646}
1647
1648/* Extract the branch trace function from a branch trace frame. */
1649
1650static const struct btrace_function *
1651btrace_get_frame_function (struct frame_info *frame)
1652{
1653 const struct btrace_frame_cache *cache;
0b722aec
MM
1654 struct btrace_frame_cache pattern;
1655 void **slot;
1656
1657 pattern.frame = frame;
1658
1659 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1660 if (slot == NULL)
1661 return NULL;
1662
19ba03f4 1663 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1664 return cache->bfun;
1665}
1666
cecac1ab
MM
1667/* Implement stop_reason method for record_btrace_frame_unwind. */
1668
1669static enum unwind_stop_reason
1670record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1671 void **this_cache)
1672{
0b722aec
MM
1673 const struct btrace_frame_cache *cache;
1674 const struct btrace_function *bfun;
1675
19ba03f4 1676 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1677 bfun = cache->bfun;
1678 gdb_assert (bfun != NULL);
1679
42bfe59e 1680 if (bfun->up == 0)
0b722aec
MM
1681 return UNWIND_UNAVAILABLE;
1682
1683 return UNWIND_NO_REASON;
cecac1ab
MM
1684}
1685
1686/* Implement this_id method for record_btrace_frame_unwind. */
1687
1688static void
1689record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1690 struct frame_id *this_id)
1691{
0b722aec
MM
1692 const struct btrace_frame_cache *cache;
1693 const struct btrace_function *bfun;
4aeb0dfc 1694 struct btrace_call_iterator it;
0b722aec
MM
1695 CORE_ADDR code, special;
1696
19ba03f4 1697 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1698
1699 bfun = cache->bfun;
1700 gdb_assert (bfun != NULL);
1701
4aeb0dfc
TW
1702 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1703 bfun = btrace_call_get (&it);
0b722aec
MM
1704
1705 code = get_frame_func (this_frame);
1706 special = bfun->number;
1707
1708 *this_id = frame_id_build_unavailable_stack_special (code, special);
1709
1710 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1711 btrace_get_bfun_name (cache->bfun),
1712 core_addr_to_string_nz (this_id->code_addr),
1713 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1714}
1715
1716/* Implement prev_register method for record_btrace_frame_unwind. */
1717
1718static struct value *
1719record_btrace_frame_prev_register (struct frame_info *this_frame,
1720 void **this_cache,
1721 int regnum)
1722{
0b722aec
MM
1723 const struct btrace_frame_cache *cache;
1724 const struct btrace_function *bfun, *caller;
42bfe59e 1725 struct btrace_call_iterator it;
0b722aec
MM
1726 struct gdbarch *gdbarch;
1727 CORE_ADDR pc;
1728 int pcreg;
1729
1730 gdbarch = get_frame_arch (this_frame);
1731 pcreg = gdbarch_pc_regnum (gdbarch);
1732 if (pcreg < 0 || regnum != pcreg)
1733 throw_error (NOT_AVAILABLE_ERROR,
1734 _("Registers are not available in btrace record history"));
1735
19ba03f4 1736 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1737 bfun = cache->bfun;
1738 gdb_assert (bfun != NULL);
1739
42bfe59e 1740 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
0b722aec
MM
1741 throw_error (NOT_AVAILABLE_ERROR,
1742 _("No caller in btrace record history"));
1743
42bfe59e
TW
1744 caller = btrace_call_get (&it);
1745
0b722aec 1746 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
0860c437 1747 pc = caller->insn.front ().pc;
0b722aec
MM
1748 else
1749 {
0860c437 1750 pc = caller->insn.back ().pc;
0b722aec
MM
1751 pc += gdb_insn_length (gdbarch, pc);
1752 }
1753
1754 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1755 btrace_get_bfun_name (bfun), bfun->level,
1756 core_addr_to_string_nz (pc));
1757
1758 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1759}
1760
1761/* Implement sniffer method for record_btrace_frame_unwind. */
1762
1763static int
1764record_btrace_frame_sniffer (const struct frame_unwind *self,
1765 struct frame_info *this_frame,
1766 void **this_cache)
1767{
0b722aec
MM
1768 const struct btrace_function *bfun;
1769 struct btrace_frame_cache *cache;
cecac1ab 1770 struct thread_info *tp;
0b722aec 1771 struct frame_info *next;
cecac1ab
MM
1772
1773 /* THIS_FRAME does not contain a reference to its thread. */
00431a78 1774 tp = inferior_thread ();
cecac1ab 1775
0b722aec
MM
1776 bfun = NULL;
1777 next = get_next_frame (this_frame);
1778 if (next == NULL)
1779 {
1780 const struct btrace_insn_iterator *replay;
1781
1782 replay = tp->btrace.replay;
1783 if (replay != NULL)
08c3f6d2 1784 bfun = &replay->btinfo->functions[replay->call_index];
0b722aec
MM
1785 }
1786 else
1787 {
1788 const struct btrace_function *callee;
42bfe59e 1789 struct btrace_call_iterator it;
0b722aec
MM
1790
1791 callee = btrace_get_frame_function (next);
42bfe59e
TW
1792 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1793 return 0;
1794
1795 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1796 return 0;
1797
1798 bfun = btrace_call_get (&it);
0b722aec
MM
1799 }
1800
1801 if (bfun == NULL)
1802 return 0;
1803
1804 DEBUG ("[frame] sniffed frame for %s on level %d",
1805 btrace_get_bfun_name (bfun), bfun->level);
1806
1807 /* This is our frame. Initialize the frame cache. */
1808 cache = bfcache_new (this_frame);
1809 cache->tp = tp;
1810 cache->bfun = bfun;
1811
1812 *this_cache = cache;
1813 return 1;
1814}
1815
1816/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1817
1818static int
1819record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1820 struct frame_info *this_frame,
1821 void **this_cache)
1822{
1823 const struct btrace_function *bfun, *callee;
1824 struct btrace_frame_cache *cache;
42bfe59e 1825 struct btrace_call_iterator it;
0b722aec 1826 struct frame_info *next;
42bfe59e 1827 struct thread_info *tinfo;
0b722aec
MM
1828
1829 next = get_next_frame (this_frame);
1830 if (next == NULL)
1831 return 0;
1832
1833 callee = btrace_get_frame_function (next);
1834 if (callee == NULL)
1835 return 0;
1836
1837 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1838 return 0;
1839
00431a78 1840 tinfo = inferior_thread ();
42bfe59e 1841 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
0b722aec
MM
1842 return 0;
1843
42bfe59e
TW
1844 bfun = btrace_call_get (&it);
1845
0b722aec
MM
1846 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1847 btrace_get_bfun_name (bfun), bfun->level);
1848
1849 /* This is our frame. Initialize the frame cache. */
1850 cache = bfcache_new (this_frame);
42bfe59e 1851 cache->tp = tinfo;
0b722aec
MM
1852 cache->bfun = bfun;
1853
1854 *this_cache = cache;
1855 return 1;
1856}
1857
1858static void
1859record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1860{
1861 struct btrace_frame_cache *cache;
1862 void **slot;
1863
19ba03f4 1864 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1865
1866 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1867 gdb_assert (slot != NULL);
1868
1869 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1870}
1871
1872/* btrace recording does not store previous memory content, neither the stack
1873 frames content. Any unwinding would return errorneous results as the stack
1874 contents no longer matches the changed PC value restored from history.
1875 Therefore this unwinder reports any possibly unwound registers as
1876 <unavailable>. */
1877
0b722aec 1878const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1879{
1880 NORMAL_FRAME,
1881 record_btrace_frame_unwind_stop_reason,
1882 record_btrace_frame_this_id,
1883 record_btrace_frame_prev_register,
1884 NULL,
0b722aec
MM
1885 record_btrace_frame_sniffer,
1886 record_btrace_frame_dealloc_cache
1887};
1888
1889const struct frame_unwind record_btrace_tailcall_frame_unwind =
1890{
1891 TAILCALL_FRAME,
1892 record_btrace_frame_unwind_stop_reason,
1893 record_btrace_frame_this_id,
1894 record_btrace_frame_prev_register,
1895 NULL,
1896 record_btrace_tailcall_frame_sniffer,
1897 record_btrace_frame_dealloc_cache
cecac1ab 1898};
b2f4cfde 1899
f6ac5f3d 1900/* Implement the get_unwinder method. */
ac01945b 1901
f6ac5f3d
PA
1902const struct frame_unwind *
1903record_btrace_target::get_unwinder ()
ac01945b
TT
1904{
1905 return &record_btrace_frame_unwind;
1906}
1907
f6ac5f3d 1908/* Implement the get_tailcall_unwinder method. */
ac01945b 1909
f6ac5f3d
PA
1910const struct frame_unwind *
1911record_btrace_target::get_tailcall_unwinder ()
ac01945b
TT
1912{
1913 return &record_btrace_tailcall_frame_unwind;
1914}
1915
987e68b1
MM
1916/* Return a human-readable string for FLAG. */
1917
1918static const char *
1919btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1920{
1921 switch (flag)
1922 {
1923 case BTHR_STEP:
1924 return "step";
1925
1926 case BTHR_RSTEP:
1927 return "reverse-step";
1928
1929 case BTHR_CONT:
1930 return "cont";
1931
1932 case BTHR_RCONT:
1933 return "reverse-cont";
1934
1935 case BTHR_STOP:
1936 return "stop";
1937 }
1938
1939 return "<invalid>";
1940}
1941
52834460
MM
1942/* Indicate that TP should be resumed according to FLAG. */
1943
1944static void
1945record_btrace_resume_thread (struct thread_info *tp,
1946 enum btrace_thread_flag flag)
1947{
1948 struct btrace_thread_info *btinfo;
1949
43792cf0 1950 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
a068643d
TT
1951 target_pid_to_str (tp->ptid).c_str (), flag,
1952 btrace_thread_flag_to_str (flag));
52834460
MM
1953
1954 btinfo = &tp->btrace;
1955
52834460 1956 /* Fetch the latest branch trace. */
4a4495d6 1957 btrace_fetch (tp, record_btrace_get_cpu ());
52834460 1958
0ca912df
MM
1959 /* A resume request overwrites a preceding resume or stop request. */
1960 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1961 btinfo->flags |= flag;
1962}
1963
ec71cc2f
MM
1964/* Get the current frame for TP. */
1965
79b8d3b0
TT
1966static struct frame_id
1967get_thread_current_frame_id (struct thread_info *tp)
ec71cc2f 1968{
79b8d3b0 1969 struct frame_id id;
ec71cc2f
MM
1970 int executing;
1971
00431a78
PA
1972 /* Set current thread, which is implicitly used by
1973 get_current_frame. */
1974 scoped_restore_current_thread restore_thread;
1975
1976 switch_to_thread (tp);
ec71cc2f
MM
1977
1978 /* Clear the executing flag to allow changes to the current frame.
1979 We are not actually running, yet. We just started a reverse execution
1980 command or a record goto command.
1981 For the latter, EXECUTING is false and this has no effect.
f6ac5f3d 1982 For the former, EXECUTING is true and we're in wait, about to
ec71cc2f
MM
1983 move the thread. Since we need to recompute the stack, we temporarily
1984 set EXECUTING to flase. */
00431a78
PA
1985 executing = tp->executing;
1986 set_executing (inferior_ptid, false);
ec71cc2f 1987
79b8d3b0 1988 id = null_frame_id;
a70b8144 1989 try
ec71cc2f 1990 {
79b8d3b0 1991 id = get_frame_id (get_current_frame ());
ec71cc2f 1992 }
230d2906 1993 catch (const gdb_exception &except)
ec71cc2f
MM
1994 {
1995 /* Restore the previous execution state. */
1996 set_executing (inferior_ptid, executing);
1997
eedc3f4f 1998 throw;
ec71cc2f 1999 }
ec71cc2f
MM
2000
2001 /* Restore the previous execution state. */
2002 set_executing (inferior_ptid, executing);
2003
79b8d3b0 2004 return id;
ec71cc2f
MM
2005}
2006
52834460
MM
2007/* Start replaying a thread. */
2008
2009static struct btrace_insn_iterator *
2010record_btrace_start_replaying (struct thread_info *tp)
2011{
52834460
MM
2012 struct btrace_insn_iterator *replay;
2013 struct btrace_thread_info *btinfo;
52834460
MM
2014
2015 btinfo = &tp->btrace;
2016 replay = NULL;
2017
2018 /* We can't start replaying without trace. */
b54b03bd 2019 if (btinfo->functions.empty ())
52834460
MM
2020 return NULL;
2021
52834460
MM
2022 /* GDB stores the current frame_id when stepping in order to detects steps
2023 into subroutines.
2024 Since frames are computed differently when we're replaying, we need to
2025 recompute those stored frames and fix them up so we can still detect
2026 subroutines after we started replaying. */
a70b8144 2027 try
52834460 2028 {
52834460
MM
2029 struct frame_id frame_id;
2030 int upd_step_frame_id, upd_step_stack_frame_id;
2031
2032 /* The current frame without replaying - computed via normal unwind. */
79b8d3b0 2033 frame_id = get_thread_current_frame_id (tp);
52834460
MM
2034
2035 /* Check if we need to update any stepping-related frame id's. */
2036 upd_step_frame_id = frame_id_eq (frame_id,
2037 tp->control.step_frame_id);
2038 upd_step_stack_frame_id = frame_id_eq (frame_id,
2039 tp->control.step_stack_frame_id);
2040
2041 /* We start replaying at the end of the branch trace. This corresponds
2042 to the current instruction. */
8d749320 2043 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
2044 btrace_insn_end (replay, btinfo);
2045
31fd9caa
MM
2046 /* Skip gaps at the end of the trace. */
2047 while (btrace_insn_get (replay) == NULL)
2048 {
2049 unsigned int steps;
2050
2051 steps = btrace_insn_prev (replay, 1);
2052 if (steps == 0)
2053 error (_("No trace."));
2054 }
2055
52834460
MM
2056 /* We're not replaying, yet. */
2057 gdb_assert (btinfo->replay == NULL);
2058 btinfo->replay = replay;
2059
2060 /* Make sure we're not using any stale registers. */
00431a78 2061 registers_changed_thread (tp);
52834460
MM
2062
2063 /* The current frame with replaying - computed via btrace unwind. */
79b8d3b0 2064 frame_id = get_thread_current_frame_id (tp);
52834460
MM
2065
2066 /* Replace stepping related frames where necessary. */
2067 if (upd_step_frame_id)
2068 tp->control.step_frame_id = frame_id;
2069 if (upd_step_stack_frame_id)
2070 tp->control.step_stack_frame_id = frame_id;
2071 }
230d2906 2072 catch (const gdb_exception &except)
52834460
MM
2073 {
2074 xfree (btinfo->replay);
2075 btinfo->replay = NULL;
2076
00431a78 2077 registers_changed_thread (tp);
52834460 2078
eedc3f4f 2079 throw;
52834460
MM
2080 }
2081
2082 return replay;
2083}
2084
2085/* Stop replaying a thread. */
2086
2087static void
2088record_btrace_stop_replaying (struct thread_info *tp)
2089{
2090 struct btrace_thread_info *btinfo;
2091
2092 btinfo = &tp->btrace;
2093
2094 xfree (btinfo->replay);
2095 btinfo->replay = NULL;
2096
2097 /* Make sure we're not leaving any stale registers. */
00431a78 2098 registers_changed_thread (tp);
52834460
MM
2099}
2100
e3cfc1c7
MM
2101/* Stop replaying TP if it is at the end of its execution history. */
2102
2103static void
2104record_btrace_stop_replaying_at_end (struct thread_info *tp)
2105{
2106 struct btrace_insn_iterator *replay, end;
2107 struct btrace_thread_info *btinfo;
2108
2109 btinfo = &tp->btrace;
2110 replay = btinfo->replay;
2111
2112 if (replay == NULL)
2113 return;
2114
2115 btrace_insn_end (&end, btinfo);
2116
2117 if (btrace_insn_cmp (replay, &end) == 0)
2118 record_btrace_stop_replaying (tp);
2119}
2120
f6ac5f3d 2121/* The resume method of target record-btrace. */
b2f4cfde 2122
f6ac5f3d
PA
2123void
2124record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
b2f4cfde 2125{
d2939ba2 2126 enum btrace_thread_flag flag, cflag;
52834460 2127
a068643d 2128 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid).c_str (),
f6ac5f3d 2129 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
987e68b1 2130 step ? "step" : "cont");
52834460 2131
0ca912df
MM
2132 /* Store the execution direction of the last resume.
2133
f6ac5f3d 2134 If there is more than one resume call, we have to rely on infrun
0ca912df 2135 to not change the execution direction in-between. */
f6ac5f3d 2136 record_btrace_resume_exec_dir = ::execution_direction;
70ad5bff 2137
0ca912df 2138 /* As long as we're not replaying, just forward the request.
52834460 2139
0ca912df
MM
2140 For non-stop targets this means that no thread is replaying. In order to
2141 make progress, we may need to explicitly move replaying threads to the end
2142 of their execution history. */
f6ac5f3d
PA
2143 if ((::execution_direction != EXEC_REVERSE)
2144 && !record_is_replaying (minus_one_ptid))
b2f4cfde 2145 {
b6a8c27b 2146 this->beneath ()->resume (ptid, step, signal);
04c4fe8c 2147 return;
b2f4cfde
MM
2148 }
2149
52834460 2150 /* Compute the btrace thread flag for the requested move. */
f6ac5f3d 2151 if (::execution_direction == EXEC_REVERSE)
d2939ba2
MM
2152 {
2153 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2154 cflag = BTHR_RCONT;
2155 }
52834460 2156 else
d2939ba2
MM
2157 {
2158 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2159 cflag = BTHR_CONT;
2160 }
52834460 2161
52834460 2162 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2163 record_btrace_wait below.
2164
2165 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2166 if (!target_is_non_stop_p ())
2167 {
26a57c92 2168 gdb_assert (inferior_ptid.matches (ptid));
d2939ba2 2169
08036331
PA
2170 for (thread_info *tp : all_non_exited_threads (ptid))
2171 {
2172 if (tp->ptid.matches (inferior_ptid))
2173 record_btrace_resume_thread (tp, flag);
2174 else
2175 record_btrace_resume_thread (tp, cflag);
2176 }
d2939ba2
MM
2177 }
2178 else
2179 {
08036331
PA
2180 for (thread_info *tp : all_non_exited_threads (ptid))
2181 record_btrace_resume_thread (tp, flag);
d2939ba2 2182 }
70ad5bff
MM
2183
2184 /* Async support. */
2185 if (target_can_async_p ())
2186 {
6a3753b3 2187 target_async (1);
70ad5bff
MM
2188 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2189 }
52834460
MM
2190}
2191
f6ac5f3d 2192/* The commit_resume method of target record-btrace. */
85ad3aaf 2193
f6ac5f3d
PA
2194void
2195record_btrace_target::commit_resume ()
85ad3aaf 2196{
f6ac5f3d
PA
2197 if ((::execution_direction != EXEC_REVERSE)
2198 && !record_is_replaying (minus_one_ptid))
b6a8c27b 2199 beneath ()->commit_resume ();
85ad3aaf
PA
2200}
2201
987e68b1
MM
2202/* Cancel resuming TP. */
2203
2204static void
2205record_btrace_cancel_resume (struct thread_info *tp)
2206{
2207 enum btrace_thread_flag flags;
2208
2209 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2210 if (flags == 0)
2211 return;
2212
43792cf0
PA
2213 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2214 print_thread_id (tp),
a068643d 2215 target_pid_to_str (tp->ptid).c_str (), flags,
987e68b1
MM
2216 btrace_thread_flag_to_str (flags));
2217
2218 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2219 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2220}
2221
2222/* Return a target_waitstatus indicating that we ran out of history. */
2223
2224static struct target_waitstatus
2225btrace_step_no_history (void)
2226{
2227 struct target_waitstatus status;
2228
2229 status.kind = TARGET_WAITKIND_NO_HISTORY;
2230
2231 return status;
2232}
2233
2234/* Return a target_waitstatus indicating that a step finished. */
2235
2236static struct target_waitstatus
2237btrace_step_stopped (void)
2238{
2239 struct target_waitstatus status;
2240
2241 status.kind = TARGET_WAITKIND_STOPPED;
2242 status.value.sig = GDB_SIGNAL_TRAP;
2243
2244 return status;
2245}
2246
6e4879f0
MM
2247/* Return a target_waitstatus indicating that a thread was stopped as
2248 requested. */
2249
2250static struct target_waitstatus
2251btrace_step_stopped_on_request (void)
2252{
2253 struct target_waitstatus status;
2254
2255 status.kind = TARGET_WAITKIND_STOPPED;
2256 status.value.sig = GDB_SIGNAL_0;
2257
2258 return status;
2259}
2260
d825d248
MM
2261/* Return a target_waitstatus indicating a spurious stop. */
2262
2263static struct target_waitstatus
2264btrace_step_spurious (void)
2265{
2266 struct target_waitstatus status;
2267
2268 status.kind = TARGET_WAITKIND_SPURIOUS;
2269
2270 return status;
2271}
2272
e3cfc1c7
MM
2273/* Return a target_waitstatus indicating that the thread was not resumed. */
2274
2275static struct target_waitstatus
2276btrace_step_no_resumed (void)
2277{
2278 struct target_waitstatus status;
2279
2280 status.kind = TARGET_WAITKIND_NO_RESUMED;
2281
2282 return status;
2283}
2284
2285/* Return a target_waitstatus indicating that we should wait again. */
2286
2287static struct target_waitstatus
2288btrace_step_again (void)
2289{
2290 struct target_waitstatus status;
2291
2292 status.kind = TARGET_WAITKIND_IGNORE;
2293
2294 return status;
2295}
2296
52834460
MM
2297/* Clear the record histories. */
2298
2299static void
2300record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2301{
2302 xfree (btinfo->insn_history);
2303 xfree (btinfo->call_history);
2304
2305 btinfo->insn_history = NULL;
2306 btinfo->call_history = NULL;
2307}
2308
3c615f99
MM
2309/* Check whether TP's current replay position is at a breakpoint. */
2310
2311static int
2312record_btrace_replay_at_breakpoint (struct thread_info *tp)
2313{
2314 struct btrace_insn_iterator *replay;
2315 struct btrace_thread_info *btinfo;
2316 const struct btrace_insn *insn;
3c615f99
MM
2317
2318 btinfo = &tp->btrace;
2319 replay = btinfo->replay;
2320
2321 if (replay == NULL)
2322 return 0;
2323
2324 insn = btrace_insn_get (replay);
2325 if (insn == NULL)
2326 return 0;
2327
00431a78 2328 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
3c615f99
MM
2329 &btinfo->stop_reason);
2330}
2331
d825d248 2332/* Step one instruction in forward direction. */
52834460
MM
2333
2334static struct target_waitstatus
d825d248 2335record_btrace_single_step_forward (struct thread_info *tp)
52834460 2336{
b61ce85c 2337 struct btrace_insn_iterator *replay, end, start;
52834460 2338 struct btrace_thread_info *btinfo;
52834460 2339
d825d248
MM
2340 btinfo = &tp->btrace;
2341 replay = btinfo->replay;
2342
2343 /* We're done if we're not replaying. */
2344 if (replay == NULL)
2345 return btrace_step_no_history ();
2346
011c71b6
MM
2347 /* Check if we're stepping a breakpoint. */
2348 if (record_btrace_replay_at_breakpoint (tp))
2349 return btrace_step_stopped ();
2350
b61ce85c
MM
2351 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2352 jump back to the instruction at which we started. */
2353 start = *replay;
d825d248
MM
2354 do
2355 {
2356 unsigned int steps;
2357
e3cfc1c7
MM
2358 /* We will bail out here if we continue stepping after reaching the end
2359 of the execution history. */
d825d248
MM
2360 steps = btrace_insn_next (replay, 1);
2361 if (steps == 0)
b61ce85c
MM
2362 {
2363 *replay = start;
2364 return btrace_step_no_history ();
2365 }
d825d248
MM
2366 }
2367 while (btrace_insn_get (replay) == NULL);
2368
2369 /* Determine the end of the instruction trace. */
2370 btrace_insn_end (&end, btinfo);
2371
e3cfc1c7
MM
2372 /* The execution trace contains (and ends with) the current instruction.
2373 This instruction has not been executed, yet, so the trace really ends
2374 one instruction earlier. */
d825d248 2375 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2376 return btrace_step_no_history ();
d825d248
MM
2377
2378 return btrace_step_spurious ();
2379}
2380
2381/* Step one instruction in backward direction. */
2382
2383static struct target_waitstatus
2384record_btrace_single_step_backward (struct thread_info *tp)
2385{
b61ce85c 2386 struct btrace_insn_iterator *replay, start;
d825d248 2387 struct btrace_thread_info *btinfo;
e59fa00f 2388
52834460
MM
2389 btinfo = &tp->btrace;
2390 replay = btinfo->replay;
2391
d825d248
MM
2392 /* Start replaying if we're not already doing so. */
2393 if (replay == NULL)
2394 replay = record_btrace_start_replaying (tp);
2395
2396 /* If we can't step any further, we reached the end of the history.
b61ce85c
MM
2397 Skip gaps during replay. If we end up at a gap (at the beginning of
2398 the trace), jump back to the instruction at which we started. */
2399 start = *replay;
d825d248
MM
2400 do
2401 {
2402 unsigned int steps;
2403
2404 steps = btrace_insn_prev (replay, 1);
2405 if (steps == 0)
b61ce85c
MM
2406 {
2407 *replay = start;
2408 return btrace_step_no_history ();
2409 }
d825d248
MM
2410 }
2411 while (btrace_insn_get (replay) == NULL);
2412
011c71b6
MM
2413 /* Check if we're stepping a breakpoint.
2414
2415 For reverse-stepping, this check is after the step. There is logic in
2416 infrun.c that handles reverse-stepping separately. See, for example,
2417 proceed and adjust_pc_after_break.
2418
2419 This code assumes that for reverse-stepping, PC points to the last
2420 de-executed instruction, whereas for forward-stepping PC points to the
2421 next to-be-executed instruction. */
2422 if (record_btrace_replay_at_breakpoint (tp))
2423 return btrace_step_stopped ();
2424
d825d248
MM
2425 return btrace_step_spurious ();
2426}
2427
2428/* Step a single thread. */
2429
2430static struct target_waitstatus
2431record_btrace_step_thread (struct thread_info *tp)
2432{
2433 struct btrace_thread_info *btinfo;
2434 struct target_waitstatus status;
2435 enum btrace_thread_flag flags;
2436
2437 btinfo = &tp->btrace;
2438
6e4879f0
MM
2439 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2440 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2441
43792cf0 2442 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
a068643d 2443 target_pid_to_str (tp->ptid).c_str (), flags,
987e68b1 2444 btrace_thread_flag_to_str (flags));
52834460 2445
6e4879f0
MM
2446 /* We can't step without an execution history. */
2447 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2448 return btrace_step_no_history ();
2449
52834460
MM
2450 switch (flags)
2451 {
2452 default:
2453 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2454
6e4879f0
MM
2455 case BTHR_STOP:
2456 return btrace_step_stopped_on_request ();
2457
52834460 2458 case BTHR_STEP:
d825d248
MM
2459 status = record_btrace_single_step_forward (tp);
2460 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2461 break;
52834460
MM
2462
2463 return btrace_step_stopped ();
2464
2465 case BTHR_RSTEP:
d825d248
MM
2466 status = record_btrace_single_step_backward (tp);
2467 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2468 break;
52834460
MM
2469
2470 return btrace_step_stopped ();
2471
2472 case BTHR_CONT:
e3cfc1c7
MM
2473 status = record_btrace_single_step_forward (tp);
2474 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2475 break;
52834460 2476
e3cfc1c7
MM
2477 btinfo->flags |= flags;
2478 return btrace_step_again ();
52834460
MM
2479
2480 case BTHR_RCONT:
e3cfc1c7
MM
2481 status = record_btrace_single_step_backward (tp);
2482 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2483 break;
52834460 2484
e3cfc1c7
MM
2485 btinfo->flags |= flags;
2486 return btrace_step_again ();
2487 }
d825d248 2488
f6ac5f3d 2489 /* We keep threads moving at the end of their execution history. The wait
e3cfc1c7
MM
2490 method will stop the thread for whom the event is reported. */
2491 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2492 btinfo->flags |= flags;
52834460 2493
e3cfc1c7 2494 return status;
b2f4cfde
MM
2495}
2496
a6b5be76
MM
2497/* Announce further events if necessary. */
2498
2499static void
53127008
SM
2500record_btrace_maybe_mark_async_event
2501 (const std::vector<thread_info *> &moving,
2502 const std::vector<thread_info *> &no_history)
a6b5be76 2503{
53127008
SM
2504 bool more_moving = !moving.empty ();
2505 bool more_no_history = !no_history.empty ();;
a6b5be76
MM
2506
2507 if (!more_moving && !more_no_history)
2508 return;
2509
2510 if (more_moving)
2511 DEBUG ("movers pending");
2512
2513 if (more_no_history)
2514 DEBUG ("no-history pending");
2515
2516 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2517}
2518
f6ac5f3d 2519/* The wait method of target record-btrace. */
b2f4cfde 2520
f6ac5f3d
PA
2521ptid_t
2522record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2523 int options)
b2f4cfde 2524{
53127008
SM
2525 std::vector<thread_info *> moving;
2526 std::vector<thread_info *> no_history;
52834460 2527
a068643d 2528 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid).c_str (), options);
52834460 2529
b2f4cfde 2530 /* As long as we're not replaying, just forward the request. */
f6ac5f3d
PA
2531 if ((::execution_direction != EXEC_REVERSE)
2532 && !record_is_replaying (minus_one_ptid))
b2f4cfde 2533 {
b6a8c27b 2534 return this->beneath ()->wait (ptid, status, options);
b2f4cfde
MM
2535 }
2536
e3cfc1c7 2537 /* Keep a work list of moving threads. */
08036331
PA
2538 for (thread_info *tp : all_non_exited_threads (ptid))
2539 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2540 moving.push_back (tp);
e3cfc1c7 2541
53127008 2542 if (moving.empty ())
52834460 2543 {
e3cfc1c7 2544 *status = btrace_step_no_resumed ();
52834460 2545
a068643d 2546 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid).c_str (),
23fdd69e 2547 target_waitstatus_to_string (status).c_str ());
e3cfc1c7 2548
e3cfc1c7 2549 return null_ptid;
52834460
MM
2550 }
2551
e3cfc1c7
MM
2552 /* Step moving threads one by one, one step each, until either one thread
2553 reports an event or we run out of threads to step.
2554
2555 When stepping more than one thread, chances are that some threads reach
2556 the end of their execution history earlier than others. If we reported
2557 this immediately, all-stop on top of non-stop would stop all threads and
2558 resume the same threads next time. And we would report the same thread
2559 having reached the end of its execution history again.
2560
2561 In the worst case, this would starve the other threads. But even if other
2562 threads would be allowed to make progress, this would result in far too
2563 many intermediate stops.
2564
2565 We therefore delay the reporting of "no execution history" until we have
2566 nothing else to report. By this time, all threads should have moved to
2567 either the beginning or the end of their execution history. There will
2568 be a single user-visible stop. */
53127008
SM
2569 struct thread_info *eventing = NULL;
2570 while ((eventing == NULL) && !moving.empty ())
e3cfc1c7 2571 {
53127008 2572 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
e3cfc1c7 2573 {
53127008
SM
2574 thread_info *tp = moving[ix];
2575
e3cfc1c7
MM
2576 *status = record_btrace_step_thread (tp);
2577
2578 switch (status->kind)
2579 {
2580 case TARGET_WAITKIND_IGNORE:
2581 ix++;
2582 break;
2583
2584 case TARGET_WAITKIND_NO_HISTORY:
53127008 2585 no_history.push_back (ordered_remove (moving, ix));
e3cfc1c7
MM
2586 break;
2587
2588 default:
53127008 2589 eventing = unordered_remove (moving, ix);
e3cfc1c7
MM
2590 break;
2591 }
2592 }
2593 }
2594
2595 if (eventing == NULL)
2596 {
2597 /* We started with at least one moving thread. This thread must have
2598 either stopped or reached the end of its execution history.
2599
2600 In the former case, EVENTING must not be NULL.
2601 In the latter case, NO_HISTORY must not be empty. */
53127008 2602 gdb_assert (!no_history.empty ());
e3cfc1c7
MM
2603
2604 /* We kept threads moving at the end of their execution history. Stop
2605 EVENTING now that we are going to report its stop. */
53127008 2606 eventing = unordered_remove (no_history, 0);
e3cfc1c7
MM
2607 eventing->btrace.flags &= ~BTHR_MOVE;
2608
2609 *status = btrace_step_no_history ();
2610 }
2611
2612 gdb_assert (eventing != NULL);
2613
2614 /* We kept threads replaying at the end of their execution history. Stop
2615 replaying EVENTING now that we are going to report its stop. */
2616 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2617
2618 /* Stop all other threads. */
5953356c 2619 if (!target_is_non_stop_p ())
53127008 2620 {
08036331 2621 for (thread_info *tp : all_non_exited_threads ())
53127008
SM
2622 record_btrace_cancel_resume (tp);
2623 }
52834460 2624
a6b5be76
MM
2625 /* In async mode, we need to announce further events. */
2626 if (target_is_async_p ())
2627 record_btrace_maybe_mark_async_event (moving, no_history);
2628
52834460 2629 /* Start record histories anew from the current position. */
e3cfc1c7 2630 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2631
2632 /* We moved the replay position but did not update registers. */
00431a78 2633 registers_changed_thread (eventing);
e3cfc1c7 2634
43792cf0
PA
2635 DEBUG ("wait ended by thread %s (%s): %s",
2636 print_thread_id (eventing),
a068643d 2637 target_pid_to_str (eventing->ptid).c_str (),
23fdd69e 2638 target_waitstatus_to_string (status).c_str ());
52834460 2639
e3cfc1c7 2640 return eventing->ptid;
52834460
MM
2641}
2642
f6ac5f3d 2643/* The stop method of target record-btrace. */
6e4879f0 2644
f6ac5f3d
PA
2645void
2646record_btrace_target::stop (ptid_t ptid)
6e4879f0 2647{
a068643d 2648 DEBUG ("stop %s", target_pid_to_str (ptid).c_str ());
6e4879f0
MM
2649
2650 /* As long as we're not replaying, just forward the request. */
f6ac5f3d
PA
2651 if ((::execution_direction != EXEC_REVERSE)
2652 && !record_is_replaying (minus_one_ptid))
6e4879f0 2653 {
b6a8c27b 2654 this->beneath ()->stop (ptid);
6e4879f0
MM
2655 }
2656 else
2657 {
08036331
PA
2658 for (thread_info *tp : all_non_exited_threads (ptid))
2659 {
2660 tp->btrace.flags &= ~BTHR_MOVE;
2661 tp->btrace.flags |= BTHR_STOP;
2662 }
6e4879f0
MM
2663 }
2664 }
2665
f6ac5f3d 2666/* The can_execute_reverse method of target record-btrace. */
52834460 2667
57810aa7 2668bool
f6ac5f3d 2669record_btrace_target::can_execute_reverse ()
52834460 2670{
57810aa7 2671 return true;
52834460
MM
2672}
2673
f6ac5f3d 2674/* The stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2675
57810aa7 2676bool
f6ac5f3d 2677record_btrace_target::stopped_by_sw_breakpoint ()
52834460 2678{
f6ac5f3d 2679 if (record_is_replaying (minus_one_ptid))
9e8915c6
PA
2680 {
2681 struct thread_info *tp = inferior_thread ();
2682
2683 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2684 }
2685
b6a8c27b 2686 return this->beneath ()->stopped_by_sw_breakpoint ();
9e8915c6
PA
2687}
2688
f6ac5f3d 2689/* The supports_stopped_by_sw_breakpoint method of target
9e8915c6
PA
2690 record-btrace. */
2691
57810aa7 2692bool
f6ac5f3d 2693record_btrace_target::supports_stopped_by_sw_breakpoint ()
9e8915c6 2694{
f6ac5f3d 2695 if (record_is_replaying (minus_one_ptid))
57810aa7 2696 return true;
9e8915c6 2697
b6a8c27b 2698 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
9e8915c6
PA
2699}
2700
f6ac5f3d 2701/* The stopped_by_sw_breakpoint method of target record-btrace. */
9e8915c6 2702
57810aa7 2703bool
f6ac5f3d 2704record_btrace_target::stopped_by_hw_breakpoint ()
9e8915c6 2705{
f6ac5f3d 2706 if (record_is_replaying (minus_one_ptid))
9e8915c6
PA
2707 {
2708 struct thread_info *tp = inferior_thread ();
2709
2710 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2711 }
2712
b6a8c27b 2713 return this->beneath ()->stopped_by_hw_breakpoint ();
9e8915c6
PA
2714}
2715
f6ac5f3d 2716/* The supports_stopped_by_hw_breakpoint method of target
9e8915c6
PA
2717 record-btrace. */
2718
57810aa7 2719bool
f6ac5f3d 2720record_btrace_target::supports_stopped_by_hw_breakpoint ()
9e8915c6 2721{
f6ac5f3d 2722 if (record_is_replaying (minus_one_ptid))
57810aa7 2723 return true;
52834460 2724
b6a8c27b 2725 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
b2f4cfde
MM
2726}
2727
f6ac5f3d 2728/* The update_thread_list method of target record-btrace. */
e2887aa3 2729
f6ac5f3d
PA
2730void
2731record_btrace_target::update_thread_list ()
e2887aa3 2732{
e8032dde 2733 /* We don't add or remove threads during replay. */
f6ac5f3d 2734 if (record_is_replaying (minus_one_ptid))
e2887aa3
MM
2735 return;
2736
2737 /* Forward the request. */
b6a8c27b 2738 this->beneath ()->update_thread_list ();
e2887aa3
MM
2739}
2740
f6ac5f3d 2741/* The thread_alive method of target record-btrace. */
e2887aa3 2742
57810aa7 2743bool
f6ac5f3d 2744record_btrace_target::thread_alive (ptid_t ptid)
e2887aa3
MM
2745{
2746 /* We don't add or remove threads during replay. */
f6ac5f3d 2747 if (record_is_replaying (minus_one_ptid))
00431a78 2748 return true;
e2887aa3
MM
2749
2750 /* Forward the request. */
b6a8c27b 2751 return this->beneath ()->thread_alive (ptid);
e2887aa3
MM
2752}
2753
066ce621
MM
2754/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2755 is stopped. */
2756
2757static void
2758record_btrace_set_replay (struct thread_info *tp,
2759 const struct btrace_insn_iterator *it)
2760{
2761 struct btrace_thread_info *btinfo;
2762
2763 btinfo = &tp->btrace;
2764
a0f1b963 2765 if (it == NULL)
52834460 2766 record_btrace_stop_replaying (tp);
066ce621
MM
2767 else
2768 {
2769 if (btinfo->replay == NULL)
52834460 2770 record_btrace_start_replaying (tp);
066ce621
MM
2771 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2772 return;
2773
2774 *btinfo->replay = *it;
00431a78 2775 registers_changed_thread (tp);
066ce621
MM
2776 }
2777
52834460
MM
2778 /* Start anew from the new replay position. */
2779 record_btrace_clear_histories (btinfo);
485668e5 2780
f2ffa92b
PA
2781 inferior_thread ()->suspend.stop_pc
2782 = regcache_read_pc (get_current_regcache ());
485668e5 2783 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2784}
2785
f6ac5f3d 2786/* The goto_record_begin method of target record-btrace. */
066ce621 2787
f6ac5f3d
PA
2788void
2789record_btrace_target::goto_record_begin ()
066ce621
MM
2790{
2791 struct thread_info *tp;
2792 struct btrace_insn_iterator begin;
2793
2794 tp = require_btrace_thread ();
2795
2796 btrace_insn_begin (&begin, &tp->btrace);
b61ce85c
MM
2797
2798 /* Skip gaps at the beginning of the trace. */
2799 while (btrace_insn_get (&begin) == NULL)
2800 {
2801 unsigned int steps;
2802
2803 steps = btrace_insn_next (&begin, 1);
2804 if (steps == 0)
2805 error (_("No trace."));
2806 }
2807
066ce621 2808 record_btrace_set_replay (tp, &begin);
066ce621
MM
2809}
2810
f6ac5f3d 2811/* The goto_record_end method of target record-btrace. */
066ce621 2812
f6ac5f3d
PA
2813void
2814record_btrace_target::goto_record_end ()
066ce621
MM
2815{
2816 struct thread_info *tp;
2817
2818 tp = require_btrace_thread ();
2819
2820 record_btrace_set_replay (tp, NULL);
066ce621
MM
2821}
2822
f6ac5f3d 2823/* The goto_record method of target record-btrace. */
066ce621 2824
f6ac5f3d
PA
2825void
2826record_btrace_target::goto_record (ULONGEST insn)
066ce621
MM
2827{
2828 struct thread_info *tp;
2829 struct btrace_insn_iterator it;
2830 unsigned int number;
2831 int found;
2832
2833 number = insn;
2834
2835 /* Check for wrap-arounds. */
2836 if (number != insn)
2837 error (_("Instruction number out of range."));
2838
2839 tp = require_btrace_thread ();
2840
2841 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
69090cee
TW
2842
2843 /* Check if the instruction could not be found or is a gap. */
2844 if (found == 0 || btrace_insn_get (&it) == NULL)
066ce621
MM
2845 error (_("No such instruction."));
2846
2847 record_btrace_set_replay (tp, &it);
066ce621
MM
2848}
2849
f6ac5f3d 2850/* The record_stop_replaying method of target record-btrace. */
797094dd 2851
f6ac5f3d
PA
2852void
2853record_btrace_target::record_stop_replaying ()
797094dd 2854{
08036331 2855 for (thread_info *tp : all_non_exited_threads ())
797094dd
MM
2856 record_btrace_stop_replaying (tp);
2857}
2858
f6ac5f3d 2859/* The execution_direction target method. */
70ad5bff 2860
f6ac5f3d
PA
2861enum exec_direction_kind
2862record_btrace_target::execution_direction ()
70ad5bff
MM
2863{
2864 return record_btrace_resume_exec_dir;
2865}
2866
f6ac5f3d 2867/* The prepare_to_generate_core target method. */
aef92902 2868
f6ac5f3d
PA
2869void
2870record_btrace_target::prepare_to_generate_core ()
aef92902
MM
2871{
2872 record_btrace_generating_corefile = 1;
2873}
2874
f6ac5f3d 2875/* The done_generating_core target method. */
aef92902 2876
f6ac5f3d
PA
2877void
2878record_btrace_target::done_generating_core ()
aef92902
MM
2879{
2880 record_btrace_generating_corefile = 0;
2881}
2882
f4abbc16
MM
2883/* Start recording in BTS format. */
2884
2885static void
cdb34d4a 2886cmd_record_btrace_bts_start (const char *args, int from_tty)
f4abbc16 2887{
f4abbc16
MM
2888 if (args != NULL && *args != 0)
2889 error (_("Invalid argument."));
2890
2891 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2892
a70b8144 2893 try
492d29ea 2894 {
95a6b0a1 2895 execute_command ("target record-btrace", from_tty);
492d29ea 2896 }
230d2906 2897 catch (const gdb_exception &exception)
f4abbc16
MM
2898 {
2899 record_btrace_conf.format = BTRACE_FORMAT_NONE;
eedc3f4f 2900 throw;
f4abbc16
MM
2901 }
2902}
2903
bc504a31 2904/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2905
2906static void
cdb34d4a 2907cmd_record_btrace_pt_start (const char *args, int from_tty)
afedecd3
MM
2908{
2909 if (args != NULL && *args != 0)
2910 error (_("Invalid argument."));
2911
b20a6524 2912 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2913
a70b8144 2914 try
492d29ea 2915 {
95a6b0a1 2916 execute_command ("target record-btrace", from_tty);
492d29ea 2917 }
230d2906 2918 catch (const gdb_exception &exception)
492d29ea
PA
2919 {
2920 record_btrace_conf.format = BTRACE_FORMAT_NONE;
eedc3f4f 2921 throw;
492d29ea 2922 }
afedecd3
MM
2923}
2924
b20a6524
MM
2925/* Alias for "target record". */
2926
2927static void
981a3fb3 2928cmd_record_btrace_start (const char *args, int from_tty)
b20a6524
MM
2929{
2930 if (args != NULL && *args != 0)
2931 error (_("Invalid argument."));
2932
2933 record_btrace_conf.format = BTRACE_FORMAT_PT;
2934
a70b8144 2935 try
b20a6524 2936 {
95a6b0a1 2937 execute_command ("target record-btrace", from_tty);
b20a6524 2938 }
230d2906 2939 catch (const gdb_exception &exception)
b20a6524
MM
2940 {
2941 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2942
a70b8144 2943 try
b20a6524 2944 {
95a6b0a1 2945 execute_command ("target record-btrace", from_tty);
b20a6524 2946 }
230d2906 2947 catch (const gdb_exception &ex)
b20a6524
MM
2948 {
2949 record_btrace_conf.format = BTRACE_FORMAT_NONE;
eedc3f4f 2950 throw;
b20a6524 2951 }
b20a6524 2952 }
b20a6524
MM
2953}
2954
67b5c0c1
MM
2955/* The "set record btrace" command. */
2956
2957static void
981a3fb3 2958cmd_set_record_btrace (const char *args, int from_tty)
67b5c0c1 2959{
b85310e1
MM
2960 printf_unfiltered (_("\"set record btrace\" must be followed "
2961 "by an appropriate subcommand.\n"));
2962 help_list (set_record_btrace_cmdlist, "set record btrace ",
2963 all_commands, gdb_stdout);
67b5c0c1
MM
2964}
2965
2966/* The "show record btrace" command. */
2967
2968static void
981a3fb3 2969cmd_show_record_btrace (const char *args, int from_tty)
67b5c0c1
MM
2970{
2971 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2972}
2973
2974/* The "show record btrace replay-memory-access" command. */
2975
2976static void
2977cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2978 struct cmd_list_element *c, const char *value)
2979{
2980 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2981 replay_memory_access);
2982}
2983
4a4495d6
MM
2984/* The "set record btrace cpu none" command. */
2985
2986static void
2987cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2988{
2989 if (args != nullptr && *args != 0)
2990 error (_("Trailing junk: '%s'."), args);
2991
2992 record_btrace_cpu_state = CS_NONE;
2993}
2994
2995/* The "set record btrace cpu auto" command. */
2996
2997static void
2998cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
2999{
3000 if (args != nullptr && *args != 0)
3001 error (_("Trailing junk: '%s'."), args);
3002
3003 record_btrace_cpu_state = CS_AUTO;
3004}
3005
3006/* The "set record btrace cpu" command. */
3007
3008static void
3009cmd_set_record_btrace_cpu (const char *args, int from_tty)
3010{
3011 if (args == nullptr)
3012 args = "";
3013
3014 /* We use a hard-coded vendor string for now. */
3015 unsigned int family, model, stepping;
3016 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3017 &model, &l1, &stepping, &l2);
3018 if (matches == 3)
3019 {
3020 if (strlen (args) != l2)
3021 error (_("Trailing junk: '%s'."), args + l2);
3022 }
3023 else if (matches == 2)
3024 {
3025 if (strlen (args) != l1)
3026 error (_("Trailing junk: '%s'."), args + l1);
3027
3028 stepping = 0;
3029 }
3030 else
3031 error (_("Bad format. See \"help set record btrace cpu\"."));
3032
3033 if (USHRT_MAX < family)
3034 error (_("Cpu family too big."));
3035
3036 if (UCHAR_MAX < model)
3037 error (_("Cpu model too big."));
3038
3039 if (UCHAR_MAX < stepping)
3040 error (_("Cpu stepping too big."));
3041
3042 record_btrace_cpu.vendor = CV_INTEL;
3043 record_btrace_cpu.family = family;
3044 record_btrace_cpu.model = model;
3045 record_btrace_cpu.stepping = stepping;
3046
3047 record_btrace_cpu_state = CS_CPU;
3048}
3049
3050/* The "show record btrace cpu" command. */
3051
3052static void
3053cmd_show_record_btrace_cpu (const char *args, int from_tty)
3054{
4a4495d6
MM
3055 if (args != nullptr && *args != 0)
3056 error (_("Trailing junk: '%s'."), args);
3057
3058 switch (record_btrace_cpu_state)
3059 {
3060 case CS_AUTO:
3061 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3062 return;
3063
3064 case CS_NONE:
3065 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3066 return;
3067
3068 case CS_CPU:
3069 switch (record_btrace_cpu.vendor)
3070 {
3071 case CV_INTEL:
3072 if (record_btrace_cpu.stepping == 0)
3073 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3074 record_btrace_cpu.family,
3075 record_btrace_cpu.model);
3076 else
3077 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3078 record_btrace_cpu.family,
3079 record_btrace_cpu.model,
3080 record_btrace_cpu.stepping);
3081 return;
3082 }
3083 }
3084
3085 error (_("Internal error: bad cpu state."));
3086}
3087
3088/* The "s record btrace bts" command. */
d33501a5
MM
3089
3090static void
981a3fb3 3091cmd_set_record_btrace_bts (const char *args, int from_tty)
d33501a5
MM
3092{
3093 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 3094 "by an appropriate subcommand.\n"));
d33501a5
MM
3095 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3096 all_commands, gdb_stdout);
3097}
3098
3099/* The "show record btrace bts" command. */
3100
3101static void
981a3fb3 3102cmd_show_record_btrace_bts (const char *args, int from_tty)
d33501a5
MM
3103{
3104 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3105}
3106
b20a6524
MM
3107/* The "set record btrace pt" command. */
3108
3109static void
981a3fb3 3110cmd_set_record_btrace_pt (const char *args, int from_tty)
b20a6524
MM
3111{
3112 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3113 "by an appropriate subcommand.\n"));
3114 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3115 all_commands, gdb_stdout);
3116}
3117
3118/* The "show record btrace pt" command. */
3119
3120static void
981a3fb3 3121cmd_show_record_btrace_pt (const char *args, int from_tty)
b20a6524
MM
3122{
3123 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3124}
3125
3126/* The "record bts buffer-size" show value function. */
3127
3128static void
3129show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3130 struct cmd_list_element *c,
3131 const char *value)
3132{
3133 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3134 value);
3135}
3136
3137/* The "record pt buffer-size" show value function. */
3138
3139static void
3140show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3141 struct cmd_list_element *c,
3142 const char *value)
3143{
3144 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3145 value);
3146}
3147
afedecd3
MM
3148/* Initialize btrace commands. */
3149
3150void
3151_initialize_record_btrace (void)
3152{
f4abbc16
MM
3153 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3154 _("Start branch trace recording."), &record_btrace_cmdlist,
3155 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3156 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3157
f4abbc16
MM
3158 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3159 _("\
3160Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3161The processor stores a from/to record for each branch into a cyclic buffer.\n\
3162This format may not be available on all processors."),
3163 &record_btrace_cmdlist);
3164 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3165
b20a6524
MM
3166 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3167 _("\
bc504a31 3168Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3169This format may not be available on all processors."),
3170 &record_btrace_cmdlist);
3171 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3172
67b5c0c1
MM
3173 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3174 _("Set record options"), &set_record_btrace_cmdlist,
3175 "set record btrace ", 0, &set_record_cmdlist);
3176
3177 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3178 _("Show record options"), &show_record_btrace_cmdlist,
3179 "show record btrace ", 0, &show_record_cmdlist);
3180
3181 add_setshow_enum_cmd ("replay-memory-access", no_class,
3182 replay_memory_access_types, &replay_memory_access, _("\
3183Set what memory accesses are allowed during replay."), _("\
3184Show what memory accesses are allowed during replay."),
3185 _("Default is READ-ONLY.\n\n\
3186The btrace record target does not trace data.\n\
3187The memory therefore corresponds to the live target and not \
3188to the current replay position.\n\n\
3189When READ-ONLY, allow accesses to read-only memory during replay.\n\
3190When READ-WRITE, allow accesses to read-only and read-write memory during \
3191replay."),
3192 NULL, cmd_show_replay_memory_access,
3193 &set_record_btrace_cmdlist,
3194 &show_record_btrace_cmdlist);
3195
4a4495d6
MM
3196 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3197 _("\
3198Set the cpu to be used for trace decode.\n\n\
55063ddb
TT
3199The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3200For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
4a4495d6
MM
3201When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3202The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3203When GDB does not support that cpu, this option can be used to enable\n\
3204workarounds for a similar cpu that GDB supports.\n\n\
3205When set to \"none\", errata workarounds are disabled."),
3206 &set_record_btrace_cpu_cmdlist,
3207 _("set record btrace cpu "), 1,
3208 &set_record_btrace_cmdlist);
3209
3210 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3211Automatically determine the cpu to be used for trace decode."),
3212 &set_record_btrace_cpu_cmdlist);
3213
3214 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3215Do not enable errata workarounds for trace decode."),
3216 &set_record_btrace_cpu_cmdlist);
3217
3218 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3219Show the cpu to be used for trace decode."),
3220 &show_record_btrace_cmdlist);
3221
d33501a5
MM
3222 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3223 _("Set record btrace bts options"),
3224 &set_record_btrace_bts_cmdlist,
3225 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3226
3227 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3228 _("Show record btrace bts options"),
3229 &show_record_btrace_bts_cmdlist,
3230 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3231
3232 add_setshow_uinteger_cmd ("buffer-size", no_class,
3233 &record_btrace_conf.bts.size,
3234 _("Set the record/replay bts buffer size."),
3235 _("Show the record/replay bts buffer size."), _("\
3236When starting recording request a trace buffer of this size. \
3237The actual buffer size may differ from the requested size. \
3238Use \"info record\" to see the actual buffer size.\n\n\
3239Bigger buffers allow longer recording but also take more time to process \
3240the recorded execution trace.\n\n\
b20a6524
MM
3241The trace buffer size may not be changed while recording."), NULL,
3242 show_record_bts_buffer_size_value,
d33501a5
MM
3243 &set_record_btrace_bts_cmdlist,
3244 &show_record_btrace_bts_cmdlist);
3245
b20a6524
MM
3246 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3247 _("Set record btrace pt options"),
3248 &set_record_btrace_pt_cmdlist,
3249 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3250
3251 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3252 _("Show record btrace pt options"),
3253 &show_record_btrace_pt_cmdlist,
3254 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3255
3256 add_setshow_uinteger_cmd ("buffer-size", no_class,
3257 &record_btrace_conf.pt.size,
3258 _("Set the record/replay pt buffer size."),
3259 _("Show the record/replay pt buffer size."), _("\
3260Bigger buffers allow longer recording but also take more time to process \
3261the recorded execution.\n\
3262The actual buffer size may differ from the requested size. Use \"info record\" \
3263to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3264 &set_record_btrace_pt_cmdlist,
3265 &show_record_btrace_pt_cmdlist);
3266
d9f719f1 3267 add_target (record_btrace_target_info, record_btrace_target_open);
0b722aec
MM
3268
3269 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3270 xcalloc, xfree);
d33501a5
MM
3271
3272 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3273 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3274}
This page took 0.887487 seconds and 4 git commands to generate.