Simplify exception handling
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
42a4f53d 3 Copyright (C) 2013-2019 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
c0272db5 24#include "record-btrace.h"
afedecd3
MM
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
76727919 29#include "observable.h"
afedecd3
MM
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
70ad5bff
MM
39#include "event-loop.h"
40#include "inf-loop.h"
0747795c 41#include "common/vec.h"
00431a78 42#include "inferior.h"
325fac50 43#include <algorithm>
afedecd3 44
d9f719f1
PA
45static const target_info record_btrace_target_info = {
46 "record-btrace",
47 N_("Branch tracing target"),
48 N_("Collect control-flow trace and provide the execution history.")
49};
50
afedecd3 51/* The target_ops of record-btrace. */
f6ac5f3d
PA
52
53class record_btrace_target final : public target_ops
54{
55public:
d9f719f1
PA
56 const target_info &info () const override
57 { return record_btrace_target_info; }
f6ac5f3d 58
66b4deae
PA
59 strata stratum () const override { return record_stratum; }
60
f6ac5f3d
PA
61 void close () override;
62 void async (int) override;
63
64 void detach (inferior *inf, int from_tty) override
65 { record_detach (this, inf, from_tty); }
66
67 void disconnect (const char *, int) override;
68
69 void mourn_inferior () override
70 { record_mourn_inferior (this); }
71
72 void kill () override
73 { record_kill (this); }
74
75 enum record_method record_method (ptid_t ptid) override;
76
77 void stop_recording () override;
78 void info_record () override;
79
80 void insn_history (int size, gdb_disassembly_flags flags) override;
81 void insn_history_from (ULONGEST from, int size,
82 gdb_disassembly_flags flags) override;
83 void insn_history_range (ULONGEST begin, ULONGEST end,
84 gdb_disassembly_flags flags) override;
85 void call_history (int size, record_print_flags flags) override;
86 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
87 override;
88 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
89 override;
90
57810aa7
PA
91 bool record_is_replaying (ptid_t ptid) override;
92 bool record_will_replay (ptid_t ptid, int dir) override;
f6ac5f3d
PA
93 void record_stop_replaying () override;
94
95 enum target_xfer_status xfer_partial (enum target_object object,
96 const char *annex,
97 gdb_byte *readbuf,
98 const gdb_byte *writebuf,
99 ULONGEST offset, ULONGEST len,
100 ULONGEST *xfered_len) override;
101
102 int insert_breakpoint (struct gdbarch *,
103 struct bp_target_info *) override;
104 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
105 enum remove_bp_reason) override;
106
107 void fetch_registers (struct regcache *, int) override;
108
109 void store_registers (struct regcache *, int) override;
110 void prepare_to_store (struct regcache *) override;
111
112 const struct frame_unwind *get_unwinder () override;
113
114 const struct frame_unwind *get_tailcall_unwinder () override;
115
116 void commit_resume () override;
117 void resume (ptid_t, int, enum gdb_signal) override;
118 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
119
120 void stop (ptid_t) override;
121 void update_thread_list () override;
57810aa7 122 bool thread_alive (ptid_t ptid) override;
f6ac5f3d
PA
123 void goto_record_begin () override;
124 void goto_record_end () override;
125 void goto_record (ULONGEST insn) override;
126
57810aa7 127 bool can_execute_reverse () override;
f6ac5f3d 128
57810aa7
PA
129 bool stopped_by_sw_breakpoint () override;
130 bool supports_stopped_by_sw_breakpoint () override;
f6ac5f3d 131
57810aa7
PA
132 bool stopped_by_hw_breakpoint () override;
133 bool supports_stopped_by_hw_breakpoint () override;
f6ac5f3d
PA
134
135 enum exec_direction_kind execution_direction () override;
136 void prepare_to_generate_core () override;
137 void done_generating_core () override;
138};
139
140static record_btrace_target record_btrace_ops;
141
142/* Initialize the record-btrace target ops. */
afedecd3 143
76727919
TT
144/* Token associated with a new-thread observer enabling branch tracing
145 for the new thread. */
3dcfdc58 146static const gdb::observers::token record_btrace_thread_observer_token {};
afedecd3 147
67b5c0c1
MM
148/* Memory access types used in set/show record btrace replay-memory-access. */
149static const char replay_memory_access_read_only[] = "read-only";
150static const char replay_memory_access_read_write[] = "read-write";
151static const char *const replay_memory_access_types[] =
152{
153 replay_memory_access_read_only,
154 replay_memory_access_read_write,
155 NULL
156};
157
158/* The currently allowed replay memory access type. */
159static const char *replay_memory_access = replay_memory_access_read_only;
160
4a4495d6
MM
161/* The cpu state kinds. */
162enum record_btrace_cpu_state_kind
163{
164 CS_AUTO,
165 CS_NONE,
166 CS_CPU
167};
168
169/* The current cpu state. */
170static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
171
172/* The current cpu for trace decode. */
173static struct btrace_cpu record_btrace_cpu;
174
67b5c0c1
MM
175/* Command lists for "set/show record btrace". */
176static struct cmd_list_element *set_record_btrace_cmdlist;
177static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 178
70ad5bff
MM
179/* The execution direction of the last resume we got. See record-full.c. */
180static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
181
182/* The async event handler for reverse/replay execution. */
183static struct async_event_handler *record_btrace_async_inferior_event_handler;
184
aef92902
MM
185/* A flag indicating that we are currently generating a core file. */
186static int record_btrace_generating_corefile;
187
f4abbc16
MM
188/* The current branch trace configuration. */
189static struct btrace_config record_btrace_conf;
190
191/* Command list for "record btrace". */
192static struct cmd_list_element *record_btrace_cmdlist;
193
d33501a5
MM
194/* Command lists for "set/show record btrace bts". */
195static struct cmd_list_element *set_record_btrace_bts_cmdlist;
196static struct cmd_list_element *show_record_btrace_bts_cmdlist;
197
b20a6524
MM
198/* Command lists for "set/show record btrace pt". */
199static struct cmd_list_element *set_record_btrace_pt_cmdlist;
200static struct cmd_list_element *show_record_btrace_pt_cmdlist;
201
4a4495d6
MM
202/* Command list for "set record btrace cpu". */
203static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
204
afedecd3
MM
205/* Print a record-btrace debug message. Use do ... while (0) to avoid
206 ambiguities when used in if statements. */
207
208#define DEBUG(msg, args...) \
209 do \
210 { \
211 if (record_debug != 0) \
212 fprintf_unfiltered (gdb_stdlog, \
213 "[record-btrace] " msg "\n", ##args); \
214 } \
215 while (0)
216
217
4a4495d6
MM
218/* Return the cpu configured by the user. Returns NULL if the cpu was
219 configured as auto. */
220const struct btrace_cpu *
221record_btrace_get_cpu (void)
222{
223 switch (record_btrace_cpu_state)
224 {
225 case CS_AUTO:
226 return nullptr;
227
228 case CS_NONE:
229 record_btrace_cpu.vendor = CV_UNKNOWN;
230 /* Fall through. */
231 case CS_CPU:
232 return &record_btrace_cpu;
233 }
234
235 error (_("Internal error: bad record btrace cpu state."));
236}
237
afedecd3 238/* Update the branch trace for the current thread and return a pointer to its
066ce621 239 thread_info.
afedecd3
MM
240
241 Throws an error if there is no thread or no trace. This function never
242 returns NULL. */
243
066ce621
MM
244static struct thread_info *
245require_btrace_thread (void)
afedecd3 246{
afedecd3
MM
247 DEBUG ("require");
248
00431a78 249 if (inferior_ptid == null_ptid)
afedecd3
MM
250 error (_("No thread."));
251
00431a78
PA
252 thread_info *tp = inferior_thread ();
253
cd4007e4
MM
254 validate_registers_access ();
255
4a4495d6 256 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 257
6e07b1d2 258 if (btrace_is_empty (tp))
afedecd3
MM
259 error (_("No trace."));
260
066ce621
MM
261 return tp;
262}
263
264/* Update the branch trace for the current thread and return a pointer to its
265 branch trace information struct.
266
267 Throws an error if there is no thread or no trace. This function never
268 returns NULL. */
269
270static struct btrace_thread_info *
271require_btrace (void)
272{
273 struct thread_info *tp;
274
275 tp = require_btrace_thread ();
276
277 return &tp->btrace;
afedecd3
MM
278}
279
280/* Enable branch tracing for one thread. Warn on errors. */
281
282static void
283record_btrace_enable_warn (struct thread_info *tp)
284{
492d29ea
PA
285 TRY
286 {
287 btrace_enable (tp, &record_btrace_conf);
288 }
289 CATCH (error, RETURN_MASK_ERROR)
290 {
291 warning ("%s", error.message);
292 }
293 END_CATCH
afedecd3
MM
294}
295
afedecd3
MM
296/* Enable automatic tracing of new threads. */
297
298static void
299record_btrace_auto_enable (void)
300{
301 DEBUG ("attach thread observer");
302
76727919
TT
303 gdb::observers::new_thread.attach (record_btrace_enable_warn,
304 record_btrace_thread_observer_token);
afedecd3
MM
305}
306
307/* Disable automatic tracing of new threads. */
308
309static void
310record_btrace_auto_disable (void)
311{
afedecd3
MM
312 DEBUG ("detach thread observer");
313
76727919 314 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
afedecd3
MM
315}
316
70ad5bff
MM
317/* The record-btrace async event handler function. */
318
319static void
320record_btrace_handle_async_inferior_event (gdb_client_data data)
321{
322 inferior_event_handler (INF_REG_EVENT, NULL);
323}
324
c0272db5
TW
325/* See record-btrace.h. */
326
327void
328record_btrace_push_target (void)
329{
330 const char *format;
331
332 record_btrace_auto_enable ();
333
334 push_target (&record_btrace_ops);
335
336 record_btrace_async_inferior_event_handler
337 = create_async_event_handler (record_btrace_handle_async_inferior_event,
338 NULL);
339 record_btrace_generating_corefile = 0;
340
341 format = btrace_format_short_string (record_btrace_conf.format);
76727919 342 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
c0272db5
TW
343}
344
228f1508
SM
345/* Disable btrace on a set of threads on scope exit. */
346
347struct scoped_btrace_disable
348{
349 scoped_btrace_disable () = default;
350
351 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
352
353 ~scoped_btrace_disable ()
354 {
355 for (thread_info *tp : m_threads)
356 btrace_disable (tp);
357 }
358
359 void add_thread (thread_info *thread)
360 {
361 m_threads.push_front (thread);
362 }
363
364 void discard ()
365 {
366 m_threads.clear ();
367 }
368
369private:
370 std::forward_list<thread_info *> m_threads;
371};
372
d9f719f1 373/* Open target record-btrace. */
afedecd3 374
d9f719f1
PA
375static void
376record_btrace_target_open (const char *args, int from_tty)
afedecd3 377{
228f1508
SM
378 /* If we fail to enable btrace for one thread, disable it for the threads for
379 which it was successfully enabled. */
380 scoped_btrace_disable btrace_disable;
afedecd3
MM
381
382 DEBUG ("open");
383
8213266a 384 record_preopen ();
afedecd3
MM
385
386 if (!target_has_execution)
387 error (_("The program is not being run."));
388
08036331 389 for (thread_info *tp : all_non_exited_threads ())
5d5658a1 390 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
afedecd3 391 {
f4abbc16 392 btrace_enable (tp, &record_btrace_conf);
afedecd3 393
228f1508 394 btrace_disable.add_thread (tp);
afedecd3
MM
395 }
396
c0272db5 397 record_btrace_push_target ();
afedecd3 398
228f1508 399 btrace_disable.discard ();
afedecd3
MM
400}
401
f6ac5f3d 402/* The stop_recording method of target record-btrace. */
afedecd3 403
f6ac5f3d
PA
404void
405record_btrace_target::stop_recording ()
afedecd3 406{
afedecd3
MM
407 DEBUG ("stop recording");
408
409 record_btrace_auto_disable ();
410
08036331 411 for (thread_info *tp : all_non_exited_threads ())
afedecd3
MM
412 if (tp->btrace.target != NULL)
413 btrace_disable (tp);
414}
415
f6ac5f3d 416/* The disconnect method of target record-btrace. */
c0272db5 417
f6ac5f3d
PA
418void
419record_btrace_target::disconnect (const char *args,
420 int from_tty)
c0272db5 421{
b6a8c27b 422 struct target_ops *beneath = this->beneath ();
c0272db5
TW
423
424 /* Do not stop recording, just clean up GDB side. */
f6ac5f3d 425 unpush_target (this);
c0272db5
TW
426
427 /* Forward disconnect. */
f6ac5f3d 428 beneath->disconnect (args, from_tty);
c0272db5
TW
429}
430
f6ac5f3d 431/* The close method of target record-btrace. */
afedecd3 432
f6ac5f3d
PA
433void
434record_btrace_target::close ()
afedecd3 435{
70ad5bff
MM
436 if (record_btrace_async_inferior_event_handler != NULL)
437 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
438
99c819ee
MM
439 /* Make sure automatic recording gets disabled even if we did not stop
440 recording before closing the record-btrace target. */
441 record_btrace_auto_disable ();
442
568e808b
MM
443 /* We should have already stopped recording.
444 Tear down btrace in case we have not. */
08036331 445 for (thread_info *tp : all_non_exited_threads ())
568e808b 446 btrace_teardown (tp);
afedecd3
MM
447}
448
f6ac5f3d 449/* The async method of target record-btrace. */
b7d2e916 450
f6ac5f3d
PA
451void
452record_btrace_target::async (int enable)
b7d2e916 453{
6a3753b3 454 if (enable)
b7d2e916
PA
455 mark_async_event_handler (record_btrace_async_inferior_event_handler);
456 else
457 clear_async_event_handler (record_btrace_async_inferior_event_handler);
458
b6a8c27b 459 this->beneath ()->async (enable);
b7d2e916
PA
460}
461
d33501a5
MM
462/* Adjusts the size and returns a human readable size suffix. */
463
464static const char *
465record_btrace_adjust_size (unsigned int *size)
466{
467 unsigned int sz;
468
469 sz = *size;
470
471 if ((sz & ((1u << 30) - 1)) == 0)
472 {
473 *size = sz >> 30;
474 return "GB";
475 }
476 else if ((sz & ((1u << 20) - 1)) == 0)
477 {
478 *size = sz >> 20;
479 return "MB";
480 }
481 else if ((sz & ((1u << 10) - 1)) == 0)
482 {
483 *size = sz >> 10;
484 return "kB";
485 }
486 else
487 return "";
488}
489
490/* Print a BTS configuration. */
491
492static void
493record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
494{
495 const char *suffix;
496 unsigned int size;
497
498 size = conf->size;
499 if (size > 0)
500 {
501 suffix = record_btrace_adjust_size (&size);
502 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
503 }
504}
505
bc504a31 506/* Print an Intel Processor Trace configuration. */
b20a6524
MM
507
508static void
509record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
510{
511 const char *suffix;
512 unsigned int size;
513
514 size = conf->size;
515 if (size > 0)
516 {
517 suffix = record_btrace_adjust_size (&size);
518 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
519 }
520}
521
d33501a5
MM
522/* Print a branch tracing configuration. */
523
524static void
525record_btrace_print_conf (const struct btrace_config *conf)
526{
527 printf_unfiltered (_("Recording format: %s.\n"),
528 btrace_format_string (conf->format));
529
530 switch (conf->format)
531 {
532 case BTRACE_FORMAT_NONE:
533 return;
534
535 case BTRACE_FORMAT_BTS:
536 record_btrace_print_bts_conf (&conf->bts);
537 return;
b20a6524
MM
538
539 case BTRACE_FORMAT_PT:
540 record_btrace_print_pt_conf (&conf->pt);
541 return;
d33501a5
MM
542 }
543
544 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
545}
546
f6ac5f3d 547/* The info_record method of target record-btrace. */
afedecd3 548
f6ac5f3d
PA
549void
550record_btrace_target::info_record ()
afedecd3
MM
551{
552 struct btrace_thread_info *btinfo;
f4abbc16 553 const struct btrace_config *conf;
afedecd3 554 struct thread_info *tp;
31fd9caa 555 unsigned int insns, calls, gaps;
afedecd3
MM
556
557 DEBUG ("info");
558
559 tp = find_thread_ptid (inferior_ptid);
560 if (tp == NULL)
561 error (_("No thread."));
562
cd4007e4
MM
563 validate_registers_access ();
564
f4abbc16
MM
565 btinfo = &tp->btrace;
566
f6ac5f3d 567 conf = ::btrace_conf (btinfo);
f4abbc16 568 if (conf != NULL)
d33501a5 569 record_btrace_print_conf (conf);
f4abbc16 570
4a4495d6 571 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 572
23a7fe75
MM
573 insns = 0;
574 calls = 0;
31fd9caa 575 gaps = 0;
23a7fe75 576
6e07b1d2 577 if (!btrace_is_empty (tp))
23a7fe75
MM
578 {
579 struct btrace_call_iterator call;
580 struct btrace_insn_iterator insn;
581
582 btrace_call_end (&call, btinfo);
583 btrace_call_prev (&call, 1);
5de9129b 584 calls = btrace_call_number (&call);
23a7fe75
MM
585
586 btrace_insn_end (&insn, btinfo);
5de9129b 587 insns = btrace_insn_number (&insn);
31fd9caa 588
69090cee
TW
589 /* If the last instruction is not a gap, it is the current instruction
590 that is not actually part of the record. */
591 if (btrace_insn_get (&insn) != NULL)
592 insns -= 1;
31fd9caa
MM
593
594 gaps = btinfo->ngaps;
23a7fe75 595 }
afedecd3 596
31fd9caa 597 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
43792cf0 598 "for thread %s (%s).\n"), insns, calls, gaps,
a068643d
TT
599 print_thread_id (tp),
600 target_pid_to_str (tp->ptid).c_str ());
07bbe694
MM
601
602 if (btrace_is_replaying (tp))
603 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
604 btrace_insn_number (btinfo->replay));
afedecd3
MM
605}
606
31fd9caa
MM
607/* Print a decode error. */
608
609static void
610btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
611 enum btrace_format format)
612{
508352a9 613 const char *errstr = btrace_decode_error (format, errcode);
31fd9caa 614
112e8700 615 uiout->text (_("["));
508352a9
TW
616 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
617 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
31fd9caa 618 {
112e8700
SM
619 uiout->text (_("decode error ("));
620 uiout->field_int ("errcode", errcode);
621 uiout->text (_("): "));
31fd9caa 622 }
112e8700
SM
623 uiout->text (errstr);
624 uiout->text (_("]\n"));
31fd9caa
MM
625}
626
afedecd3
MM
627/* Print an unsigned int. */
628
629static void
630ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
631{
112e8700 632 uiout->field_fmt (fld, "%u", val);
afedecd3
MM
633}
634
f94cc897
MM
635/* A range of source lines. */
636
637struct btrace_line_range
638{
639 /* The symtab this line is from. */
640 struct symtab *symtab;
641
642 /* The first line (inclusive). */
643 int begin;
644
645 /* The last line (exclusive). */
646 int end;
647};
648
649/* Construct a line range. */
650
651static struct btrace_line_range
652btrace_mk_line_range (struct symtab *symtab, int begin, int end)
653{
654 struct btrace_line_range range;
655
656 range.symtab = symtab;
657 range.begin = begin;
658 range.end = end;
659
660 return range;
661}
662
663/* Add a line to a line range. */
664
665static struct btrace_line_range
666btrace_line_range_add (struct btrace_line_range range, int line)
667{
668 if (range.end <= range.begin)
669 {
670 /* This is the first entry. */
671 range.begin = line;
672 range.end = line + 1;
673 }
674 else if (line < range.begin)
675 range.begin = line;
676 else if (range.end < line)
677 range.end = line;
678
679 return range;
680}
681
682/* Return non-zero if RANGE is empty, zero otherwise. */
683
684static int
685btrace_line_range_is_empty (struct btrace_line_range range)
686{
687 return range.end <= range.begin;
688}
689
690/* Return non-zero if LHS contains RHS, zero otherwise. */
691
692static int
693btrace_line_range_contains_range (struct btrace_line_range lhs,
694 struct btrace_line_range rhs)
695{
696 return ((lhs.symtab == rhs.symtab)
697 && (lhs.begin <= rhs.begin)
698 && (rhs.end <= lhs.end));
699}
700
701/* Find the line range associated with PC. */
702
703static struct btrace_line_range
704btrace_find_line_range (CORE_ADDR pc)
705{
706 struct btrace_line_range range;
707 struct linetable_entry *lines;
708 struct linetable *ltable;
709 struct symtab *symtab;
710 int nlines, i;
711
712 symtab = find_pc_line_symtab (pc);
713 if (symtab == NULL)
714 return btrace_mk_line_range (NULL, 0, 0);
715
716 ltable = SYMTAB_LINETABLE (symtab);
717 if (ltable == NULL)
718 return btrace_mk_line_range (symtab, 0, 0);
719
720 nlines = ltable->nitems;
721 lines = ltable->item;
722 if (nlines <= 0)
723 return btrace_mk_line_range (symtab, 0, 0);
724
725 range = btrace_mk_line_range (symtab, 0, 0);
726 for (i = 0; i < nlines - 1; i++)
727 {
728 if ((lines[i].pc == pc) && (lines[i].line != 0))
729 range = btrace_line_range_add (range, lines[i].line);
730 }
731
732 return range;
733}
734
735/* Print source lines in LINES to UIOUT.
736
737 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
738 instructions corresponding to that source line. When printing a new source
739 line, we do the cleanups for the open chain and open a new cleanup chain for
740 the new source line. If the source line range in LINES is not empty, this
741 function will leave the cleanup chain for the last printed source line open
742 so instructions can be added to it. */
743
744static void
745btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
7ea78b59
SM
746 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
747 gdb::optional<ui_out_emit_list> *asm_list,
748 gdb_disassembly_flags flags)
f94cc897 749{
8d297bbf 750 print_source_lines_flags psl_flags;
f94cc897 751
f94cc897
MM
752 if (flags & DISASSEMBLY_FILENAME)
753 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
754
7ea78b59 755 for (int line = lines.begin; line < lines.end; ++line)
f94cc897 756 {
7ea78b59 757 asm_list->reset ();
f94cc897 758
7ea78b59 759 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
f94cc897
MM
760
761 print_source_lines (lines.symtab, line, line + 1, psl_flags);
762
7ea78b59 763 asm_list->emplace (uiout, "line_asm_insn");
f94cc897
MM
764 }
765}
766
afedecd3
MM
767/* Disassemble a section of the recorded instruction trace. */
768
769static void
23a7fe75 770btrace_insn_history (struct ui_out *uiout,
31fd9caa 771 const struct btrace_thread_info *btinfo,
23a7fe75 772 const struct btrace_insn_iterator *begin,
9a24775b
PA
773 const struct btrace_insn_iterator *end,
774 gdb_disassembly_flags flags)
afedecd3 775{
9a24775b
PA
776 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
777 btrace_insn_number (begin), btrace_insn_number (end));
afedecd3 778
f94cc897
MM
779 flags |= DISASSEMBLY_SPECULATIVE;
780
7ea78b59
SM
781 struct gdbarch *gdbarch = target_gdbarch ();
782 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
f94cc897 783
7ea78b59 784 ui_out_emit_list list_emitter (uiout, "asm_insns");
f94cc897 785
7ea78b59
SM
786 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
787 gdb::optional<ui_out_emit_list> asm_list;
afedecd3 788
8b172ce7
PA
789 gdb_pretty_print_disassembler disasm (gdbarch);
790
7ea78b59
SM
791 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
792 btrace_insn_next (&it, 1))
afedecd3 793 {
23a7fe75
MM
794 const struct btrace_insn *insn;
795
796 insn = btrace_insn_get (&it);
797
31fd9caa
MM
798 /* A NULL instruction indicates a gap in the trace. */
799 if (insn == NULL)
800 {
801 const struct btrace_config *conf;
802
803 conf = btrace_conf (btinfo);
afedecd3 804
31fd9caa
MM
805 /* We have trace so we must have a configuration. */
806 gdb_assert (conf != NULL);
807
69090cee
TW
808 uiout->field_fmt ("insn-number", "%u",
809 btrace_insn_number (&it));
810 uiout->text ("\t");
811
812 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
31fd9caa
MM
813 conf->format);
814 }
815 else
816 {
f94cc897 817 struct disasm_insn dinsn;
da8c46d2 818
f94cc897 819 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 820 {
f94cc897
MM
821 struct btrace_line_range lines;
822
823 lines = btrace_find_line_range (insn->pc);
824 if (!btrace_line_range_is_empty (lines)
825 && !btrace_line_range_contains_range (last_lines, lines))
826 {
7ea78b59
SM
827 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
828 flags);
f94cc897
MM
829 last_lines = lines;
830 }
7ea78b59 831 else if (!src_and_asm_tuple.has_value ())
f94cc897 832 {
7ea78b59
SM
833 gdb_assert (!asm_list.has_value ());
834
835 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
836
f94cc897 837 /* No source information. */
7ea78b59 838 asm_list.emplace (uiout, "line_asm_insn");
f94cc897
MM
839 }
840
7ea78b59
SM
841 gdb_assert (src_and_asm_tuple.has_value ());
842 gdb_assert (asm_list.has_value ());
da8c46d2 843 }
da8c46d2 844
f94cc897
MM
845 memset (&dinsn, 0, sizeof (dinsn));
846 dinsn.number = btrace_insn_number (&it);
847 dinsn.addr = insn->pc;
31fd9caa 848
da8c46d2 849 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 850 dinsn.is_speculative = 1;
da8c46d2 851
8b172ce7 852 disasm.pretty_print_insn (uiout, &dinsn, flags);
31fd9caa 853 }
afedecd3
MM
854 }
855}
856
f6ac5f3d 857/* The insn_history method of target record-btrace. */
afedecd3 858
f6ac5f3d
PA
859void
860record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
afedecd3
MM
861{
862 struct btrace_thread_info *btinfo;
23a7fe75
MM
863 struct btrace_insn_history *history;
864 struct btrace_insn_iterator begin, end;
afedecd3 865 struct ui_out *uiout;
23a7fe75 866 unsigned int context, covered;
afedecd3
MM
867
868 uiout = current_uiout;
2e783024 869 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 870 context = abs (size);
afedecd3
MM
871 if (context == 0)
872 error (_("Bad record instruction-history-size."));
873
23a7fe75
MM
874 btinfo = require_btrace ();
875 history = btinfo->insn_history;
876 if (history == NULL)
afedecd3 877 {
07bbe694 878 struct btrace_insn_iterator *replay;
afedecd3 879
9a24775b 880 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
afedecd3 881
07bbe694
MM
882 /* If we're replaying, we start at the replay position. Otherwise, we
883 start at the tail of the trace. */
884 replay = btinfo->replay;
885 if (replay != NULL)
886 begin = *replay;
887 else
888 btrace_insn_end (&begin, btinfo);
889
890 /* We start from here and expand in the requested direction. Then we
891 expand in the other direction, as well, to fill up any remaining
892 context. */
893 end = begin;
894 if (size < 0)
895 {
896 /* We want the current position covered, as well. */
897 covered = btrace_insn_next (&end, 1);
898 covered += btrace_insn_prev (&begin, context - covered);
899 covered += btrace_insn_next (&end, context - covered);
900 }
901 else
902 {
903 covered = btrace_insn_next (&end, context);
904 covered += btrace_insn_prev (&begin, context - covered);
905 }
afedecd3
MM
906 }
907 else
908 {
23a7fe75
MM
909 begin = history->begin;
910 end = history->end;
afedecd3 911
9a24775b 912 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
23a7fe75 913 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 914
23a7fe75
MM
915 if (size < 0)
916 {
917 end = begin;
918 covered = btrace_insn_prev (&begin, context);
919 }
920 else
921 {
922 begin = end;
923 covered = btrace_insn_next (&end, context);
924 }
afedecd3
MM
925 }
926
23a7fe75 927 if (covered > 0)
31fd9caa 928 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
929 else
930 {
931 if (size < 0)
932 printf_unfiltered (_("At the start of the branch trace record.\n"));
933 else
934 printf_unfiltered (_("At the end of the branch trace record.\n"));
935 }
afedecd3 936
23a7fe75 937 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
938}
939
f6ac5f3d 940/* The insn_history_range method of target record-btrace. */
afedecd3 941
f6ac5f3d
PA
942void
943record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
944 gdb_disassembly_flags flags)
afedecd3
MM
945{
946 struct btrace_thread_info *btinfo;
23a7fe75 947 struct btrace_insn_iterator begin, end;
afedecd3 948 struct ui_out *uiout;
23a7fe75
MM
949 unsigned int low, high;
950 int found;
afedecd3
MM
951
952 uiout = current_uiout;
2e783024 953 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
23a7fe75
MM
954 low = from;
955 high = to;
afedecd3 956
9a24775b 957 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
afedecd3
MM
958
959 /* Check for wrap-arounds. */
23a7fe75 960 if (low != from || high != to)
afedecd3
MM
961 error (_("Bad range."));
962
0688d04e 963 if (high < low)
afedecd3
MM
964 error (_("Bad range."));
965
23a7fe75 966 btinfo = require_btrace ();
afedecd3 967
23a7fe75
MM
968 found = btrace_find_insn_by_number (&begin, btinfo, low);
969 if (found == 0)
970 error (_("Range out of bounds."));
afedecd3 971
23a7fe75
MM
972 found = btrace_find_insn_by_number (&end, btinfo, high);
973 if (found == 0)
0688d04e
MM
974 {
975 /* Silently truncate the range. */
976 btrace_insn_end (&end, btinfo);
977 }
978 else
979 {
980 /* We want both begin and end to be inclusive. */
981 btrace_insn_next (&end, 1);
982 }
afedecd3 983
31fd9caa 984 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 985 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
986}
987
f6ac5f3d 988/* The insn_history_from method of target record-btrace. */
afedecd3 989
f6ac5f3d
PA
990void
991record_btrace_target::insn_history_from (ULONGEST from, int size,
992 gdb_disassembly_flags flags)
afedecd3
MM
993{
994 ULONGEST begin, end, context;
995
996 context = abs (size);
0688d04e
MM
997 if (context == 0)
998 error (_("Bad record instruction-history-size."));
afedecd3
MM
999
1000 if (size < 0)
1001 {
1002 end = from;
1003
1004 if (from < context)
1005 begin = 0;
1006 else
0688d04e 1007 begin = from - context + 1;
afedecd3
MM
1008 }
1009 else
1010 {
1011 begin = from;
0688d04e 1012 end = from + context - 1;
afedecd3
MM
1013
1014 /* Check for wrap-around. */
1015 if (end < begin)
1016 end = ULONGEST_MAX;
1017 }
1018
f6ac5f3d 1019 insn_history_range (begin, end, flags);
afedecd3
MM
1020}
1021
1022/* Print the instruction number range for a function call history line. */
1023
1024static void
23a7fe75
MM
1025btrace_call_history_insn_range (struct ui_out *uiout,
1026 const struct btrace_function *bfun)
afedecd3 1027{
7acbe133
MM
1028 unsigned int begin, end, size;
1029
0860c437 1030 size = bfun->insn.size ();
7acbe133 1031 gdb_assert (size > 0);
afedecd3 1032
23a7fe75 1033 begin = bfun->insn_offset;
7acbe133 1034 end = begin + size - 1;
afedecd3 1035
23a7fe75 1036 ui_out_field_uint (uiout, "insn begin", begin);
112e8700 1037 uiout->text (",");
23a7fe75 1038 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
1039}
1040
ce0dfbea
MM
1041/* Compute the lowest and highest source line for the instructions in BFUN
1042 and return them in PBEGIN and PEND.
1043 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1044 result from inlining or macro expansion. */
1045
1046static void
1047btrace_compute_src_line_range (const struct btrace_function *bfun,
1048 int *pbegin, int *pend)
1049{
ce0dfbea
MM
1050 struct symtab *symtab;
1051 struct symbol *sym;
ce0dfbea
MM
1052 int begin, end;
1053
1054 begin = INT_MAX;
1055 end = INT_MIN;
1056
1057 sym = bfun->sym;
1058 if (sym == NULL)
1059 goto out;
1060
1061 symtab = symbol_symtab (sym);
1062
0860c437 1063 for (const btrace_insn &insn : bfun->insn)
ce0dfbea
MM
1064 {
1065 struct symtab_and_line sal;
1066
0860c437 1067 sal = find_pc_line (insn.pc, 0);
ce0dfbea
MM
1068 if (sal.symtab != symtab || sal.line == 0)
1069 continue;
1070
325fac50
PA
1071 begin = std::min (begin, sal.line);
1072 end = std::max (end, sal.line);
ce0dfbea
MM
1073 }
1074
1075 out:
1076 *pbegin = begin;
1077 *pend = end;
1078}
1079
afedecd3
MM
1080/* Print the source line information for a function call history line. */
1081
1082static void
23a7fe75
MM
1083btrace_call_history_src_line (struct ui_out *uiout,
1084 const struct btrace_function *bfun)
afedecd3
MM
1085{
1086 struct symbol *sym;
23a7fe75 1087 int begin, end;
afedecd3
MM
1088
1089 sym = bfun->sym;
1090 if (sym == NULL)
1091 return;
1092
112e8700 1093 uiout->field_string ("file",
cbe56571
TT
1094 symtab_to_filename_for_display (symbol_symtab (sym)),
1095 ui_out_style_kind::FILE);
afedecd3 1096
ce0dfbea 1097 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 1098 if (end < begin)
afedecd3
MM
1099 return;
1100
112e8700
SM
1101 uiout->text (":");
1102 uiout->field_int ("min line", begin);
afedecd3 1103
23a7fe75 1104 if (end == begin)
afedecd3
MM
1105 return;
1106
112e8700
SM
1107 uiout->text (",");
1108 uiout->field_int ("max line", end);
afedecd3
MM
1109}
1110
0b722aec
MM
1111/* Get the name of a branch trace function. */
1112
1113static const char *
1114btrace_get_bfun_name (const struct btrace_function *bfun)
1115{
1116 struct minimal_symbol *msym;
1117 struct symbol *sym;
1118
1119 if (bfun == NULL)
1120 return "??";
1121
1122 msym = bfun->msym;
1123 sym = bfun->sym;
1124
1125 if (sym != NULL)
1126 return SYMBOL_PRINT_NAME (sym);
1127 else if (msym != NULL)
efd66ac6 1128 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
1129 else
1130 return "??";
1131}
1132
afedecd3
MM
1133/* Disassemble a section of the recorded function trace. */
1134
1135static void
23a7fe75 1136btrace_call_history (struct ui_out *uiout,
8710b709 1137 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1138 const struct btrace_call_iterator *begin,
1139 const struct btrace_call_iterator *end,
8d297bbf 1140 int int_flags)
afedecd3 1141{
23a7fe75 1142 struct btrace_call_iterator it;
8d297bbf 1143 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1144
8d297bbf 1145 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1146 btrace_call_number (end));
afedecd3 1147
23a7fe75 1148 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1149 {
23a7fe75
MM
1150 const struct btrace_function *bfun;
1151 struct minimal_symbol *msym;
1152 struct symbol *sym;
1153
1154 bfun = btrace_call_get (&it);
23a7fe75 1155 sym = bfun->sym;
0b722aec 1156 msym = bfun->msym;
23a7fe75 1157
afedecd3 1158 /* Print the function index. */
23a7fe75 1159 ui_out_field_uint (uiout, "index", bfun->number);
112e8700 1160 uiout->text ("\t");
afedecd3 1161
31fd9caa
MM
1162 /* Indicate gaps in the trace. */
1163 if (bfun->errcode != 0)
1164 {
1165 const struct btrace_config *conf;
1166
1167 conf = btrace_conf (btinfo);
1168
1169 /* We have trace so we must have a configuration. */
1170 gdb_assert (conf != NULL);
1171
1172 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1173
1174 continue;
1175 }
1176
8710b709
MM
1177 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1178 {
1179 int level = bfun->level + btinfo->level, i;
1180
1181 for (i = 0; i < level; ++i)
112e8700 1182 uiout->text (" ");
8710b709
MM
1183 }
1184
1185 if (sym != NULL)
cbe56571
TT
1186 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym),
1187 ui_out_style_kind::FUNCTION);
8710b709 1188 else if (msym != NULL)
cbe56571
TT
1189 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym),
1190 ui_out_style_kind::FUNCTION);
112e8700 1191 else if (!uiout->is_mi_like_p ())
cbe56571
TT
1192 uiout->field_string ("function", "??",
1193 ui_out_style_kind::FUNCTION);
8710b709 1194
1e038f67 1195 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1196 {
112e8700 1197 uiout->text (_("\tinst "));
23a7fe75 1198 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1199 }
1200
1e038f67 1201 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1202 {
112e8700 1203 uiout->text (_("\tat "));
23a7fe75 1204 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1205 }
1206
112e8700 1207 uiout->text ("\n");
afedecd3
MM
1208 }
1209}
1210
f6ac5f3d 1211/* The call_history method of target record-btrace. */
afedecd3 1212
f6ac5f3d
PA
1213void
1214record_btrace_target::call_history (int size, record_print_flags flags)
afedecd3
MM
1215{
1216 struct btrace_thread_info *btinfo;
23a7fe75
MM
1217 struct btrace_call_history *history;
1218 struct btrace_call_iterator begin, end;
afedecd3 1219 struct ui_out *uiout;
23a7fe75 1220 unsigned int context, covered;
afedecd3
MM
1221
1222 uiout = current_uiout;
2e783024 1223 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 1224 context = abs (size);
afedecd3
MM
1225 if (context == 0)
1226 error (_("Bad record function-call-history-size."));
1227
23a7fe75
MM
1228 btinfo = require_btrace ();
1229 history = btinfo->call_history;
1230 if (history == NULL)
afedecd3 1231 {
07bbe694 1232 struct btrace_insn_iterator *replay;
afedecd3 1233
0cb7c7b0 1234 DEBUG ("call-history (0x%x): %d", (int) flags, size);
afedecd3 1235
07bbe694
MM
1236 /* If we're replaying, we start at the replay position. Otherwise, we
1237 start at the tail of the trace. */
1238 replay = btinfo->replay;
1239 if (replay != NULL)
1240 {
07bbe694 1241 begin.btinfo = btinfo;
a0f1b963 1242 begin.index = replay->call_index;
07bbe694
MM
1243 }
1244 else
1245 btrace_call_end (&begin, btinfo);
1246
1247 /* We start from here and expand in the requested direction. Then we
1248 expand in the other direction, as well, to fill up any remaining
1249 context. */
1250 end = begin;
1251 if (size < 0)
1252 {
1253 /* We want the current position covered, as well. */
1254 covered = btrace_call_next (&end, 1);
1255 covered += btrace_call_prev (&begin, context - covered);
1256 covered += btrace_call_next (&end, context - covered);
1257 }
1258 else
1259 {
1260 covered = btrace_call_next (&end, context);
1261 covered += btrace_call_prev (&begin, context- covered);
1262 }
afedecd3
MM
1263 }
1264 else
1265 {
23a7fe75
MM
1266 begin = history->begin;
1267 end = history->end;
afedecd3 1268
0cb7c7b0 1269 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
23a7fe75 1270 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1271
23a7fe75
MM
1272 if (size < 0)
1273 {
1274 end = begin;
1275 covered = btrace_call_prev (&begin, context);
1276 }
1277 else
1278 {
1279 begin = end;
1280 covered = btrace_call_next (&end, context);
1281 }
afedecd3
MM
1282 }
1283
23a7fe75 1284 if (covered > 0)
8710b709 1285 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1286 else
1287 {
1288 if (size < 0)
1289 printf_unfiltered (_("At the start of the branch trace record.\n"));
1290 else
1291 printf_unfiltered (_("At the end of the branch trace record.\n"));
1292 }
afedecd3 1293
23a7fe75 1294 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1295}
1296
f6ac5f3d 1297/* The call_history_range method of target record-btrace. */
afedecd3 1298
f6ac5f3d
PA
1299void
1300record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1301 record_print_flags flags)
afedecd3
MM
1302{
1303 struct btrace_thread_info *btinfo;
23a7fe75 1304 struct btrace_call_iterator begin, end;
afedecd3 1305 struct ui_out *uiout;
23a7fe75
MM
1306 unsigned int low, high;
1307 int found;
afedecd3
MM
1308
1309 uiout = current_uiout;
2e783024 1310 ui_out_emit_tuple tuple_emitter (uiout, "func history");
23a7fe75
MM
1311 low = from;
1312 high = to;
afedecd3 1313
0cb7c7b0 1314 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
afedecd3
MM
1315
1316 /* Check for wrap-arounds. */
23a7fe75 1317 if (low != from || high != to)
afedecd3
MM
1318 error (_("Bad range."));
1319
0688d04e 1320 if (high < low)
afedecd3
MM
1321 error (_("Bad range."));
1322
23a7fe75 1323 btinfo = require_btrace ();
afedecd3 1324
23a7fe75
MM
1325 found = btrace_find_call_by_number (&begin, btinfo, low);
1326 if (found == 0)
1327 error (_("Range out of bounds."));
afedecd3 1328
23a7fe75
MM
1329 found = btrace_find_call_by_number (&end, btinfo, high);
1330 if (found == 0)
0688d04e
MM
1331 {
1332 /* Silently truncate the range. */
1333 btrace_call_end (&end, btinfo);
1334 }
1335 else
1336 {
1337 /* We want both begin and end to be inclusive. */
1338 btrace_call_next (&end, 1);
1339 }
afedecd3 1340
8710b709 1341 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1342 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1343}
1344
f6ac5f3d 1345/* The call_history_from method of target record-btrace. */
afedecd3 1346
f6ac5f3d
PA
1347void
1348record_btrace_target::call_history_from (ULONGEST from, int size,
1349 record_print_flags flags)
afedecd3
MM
1350{
1351 ULONGEST begin, end, context;
1352
1353 context = abs (size);
0688d04e
MM
1354 if (context == 0)
1355 error (_("Bad record function-call-history-size."));
afedecd3
MM
1356
1357 if (size < 0)
1358 {
1359 end = from;
1360
1361 if (from < context)
1362 begin = 0;
1363 else
0688d04e 1364 begin = from - context + 1;
afedecd3
MM
1365 }
1366 else
1367 {
1368 begin = from;
0688d04e 1369 end = from + context - 1;
afedecd3
MM
1370
1371 /* Check for wrap-around. */
1372 if (end < begin)
1373 end = ULONGEST_MAX;
1374 }
1375
f6ac5f3d 1376 call_history_range ( begin, end, flags);
afedecd3
MM
1377}
1378
f6ac5f3d 1379/* The record_method method of target record-btrace. */
b158a20f 1380
f6ac5f3d
PA
1381enum record_method
1382record_btrace_target::record_method (ptid_t ptid)
b158a20f 1383{
b158a20f
TW
1384 struct thread_info * const tp = find_thread_ptid (ptid);
1385
1386 if (tp == NULL)
1387 error (_("No thread."));
1388
1389 if (tp->btrace.target == NULL)
1390 return RECORD_METHOD_NONE;
1391
1392 return RECORD_METHOD_BTRACE;
1393}
1394
f6ac5f3d 1395/* The record_is_replaying method of target record-btrace. */
07bbe694 1396
57810aa7 1397bool
f6ac5f3d 1398record_btrace_target::record_is_replaying (ptid_t ptid)
07bbe694 1399{
08036331
PA
1400 for (thread_info *tp : all_non_exited_threads (ptid))
1401 if (btrace_is_replaying (tp))
57810aa7 1402 return true;
07bbe694 1403
57810aa7 1404 return false;
07bbe694
MM
1405}
1406
f6ac5f3d 1407/* The record_will_replay method of target record-btrace. */
7ff27e9b 1408
57810aa7 1409bool
f6ac5f3d 1410record_btrace_target::record_will_replay (ptid_t ptid, int dir)
7ff27e9b 1411{
f6ac5f3d 1412 return dir == EXEC_REVERSE || record_is_replaying (ptid);
7ff27e9b
MM
1413}
1414
f6ac5f3d 1415/* The xfer_partial method of target record-btrace. */
633785ff 1416
f6ac5f3d
PA
1417enum target_xfer_status
1418record_btrace_target::xfer_partial (enum target_object object,
1419 const char *annex, gdb_byte *readbuf,
1420 const gdb_byte *writebuf, ULONGEST offset,
1421 ULONGEST len, ULONGEST *xfered_len)
633785ff 1422{
633785ff 1423 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1424 if (replay_memory_access == replay_memory_access_read_only
aef92902 1425 && !record_btrace_generating_corefile
f6ac5f3d 1426 && record_is_replaying (inferior_ptid))
633785ff
MM
1427 {
1428 switch (object)
1429 {
1430 case TARGET_OBJECT_MEMORY:
1431 {
1432 struct target_section *section;
1433
1434 /* We do not allow writing memory in general. */
1435 if (writebuf != NULL)
9b409511
YQ
1436 {
1437 *xfered_len = len;
bc113b4e 1438 return TARGET_XFER_UNAVAILABLE;
9b409511 1439 }
633785ff
MM
1440
1441 /* We allow reading readonly memory. */
f6ac5f3d 1442 section = target_section_by_addr (this, offset);
633785ff
MM
1443 if (section != NULL)
1444 {
1445 /* Check if the section we found is readonly. */
1446 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1447 section->the_bfd_section)
1448 & SEC_READONLY) != 0)
1449 {
1450 /* Truncate the request to fit into this section. */
325fac50 1451 len = std::min (len, section->endaddr - offset);
633785ff
MM
1452 break;
1453 }
1454 }
1455
9b409511 1456 *xfered_len = len;
bc113b4e 1457 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1458 }
1459 }
1460 }
1461
1462 /* Forward the request. */
b6a8c27b
PA
1463 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1464 offset, len, xfered_len);
633785ff
MM
1465}
1466
f6ac5f3d 1467/* The insert_breakpoint method of target record-btrace. */
633785ff 1468
f6ac5f3d
PA
1469int
1470record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1471 struct bp_target_info *bp_tgt)
633785ff 1472{
67b5c0c1
MM
1473 const char *old;
1474 int ret;
633785ff
MM
1475
1476 /* Inserting breakpoints requires accessing memory. Allow it for the
1477 duration of this function. */
67b5c0c1
MM
1478 old = replay_memory_access;
1479 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1480
1481 ret = 0;
492d29ea
PA
1482 TRY
1483 {
b6a8c27b 1484 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
492d29ea 1485 }
492d29ea
PA
1486 CATCH (except, RETURN_MASK_ALL)
1487 {
6c63c96a 1488 replay_memory_access = old;
492d29ea
PA
1489 throw_exception (except);
1490 }
1491 END_CATCH
6c63c96a 1492 replay_memory_access = old;
633785ff
MM
1493
1494 return ret;
1495}
1496
f6ac5f3d 1497/* The remove_breakpoint method of target record-btrace. */
633785ff 1498
f6ac5f3d
PA
1499int
1500record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1501 struct bp_target_info *bp_tgt,
1502 enum remove_bp_reason reason)
633785ff 1503{
67b5c0c1
MM
1504 const char *old;
1505 int ret;
633785ff
MM
1506
1507 /* Removing breakpoints requires accessing memory. Allow it for the
1508 duration of this function. */
67b5c0c1
MM
1509 old = replay_memory_access;
1510 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1511
1512 ret = 0;
492d29ea
PA
1513 TRY
1514 {
b6a8c27b 1515 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
492d29ea 1516 }
492d29ea
PA
1517 CATCH (except, RETURN_MASK_ALL)
1518 {
6c63c96a 1519 replay_memory_access = old;
492d29ea
PA
1520 throw_exception (except);
1521 }
1522 END_CATCH
6c63c96a 1523 replay_memory_access = old;
633785ff
MM
1524
1525 return ret;
1526}
1527
f6ac5f3d 1528/* The fetch_registers method of target record-btrace. */
1f3ef581 1529
f6ac5f3d
PA
1530void
1531record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1f3ef581
MM
1532{
1533 struct btrace_insn_iterator *replay;
1534 struct thread_info *tp;
1535
222312d3 1536 tp = find_thread_ptid (regcache->ptid ());
1f3ef581
MM
1537 gdb_assert (tp != NULL);
1538
1539 replay = tp->btrace.replay;
aef92902 1540 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1541 {
1542 const struct btrace_insn *insn;
1543 struct gdbarch *gdbarch;
1544 int pcreg;
1545
ac7936df 1546 gdbarch = regcache->arch ();
1f3ef581
MM
1547 pcreg = gdbarch_pc_regnum (gdbarch);
1548 if (pcreg < 0)
1549 return;
1550
1551 /* We can only provide the PC register. */
1552 if (regno >= 0 && regno != pcreg)
1553 return;
1554
1555 insn = btrace_insn_get (replay);
1556 gdb_assert (insn != NULL);
1557
73e1c03f 1558 regcache->raw_supply (regno, &insn->pc);
1f3ef581
MM
1559 }
1560 else
b6a8c27b 1561 this->beneath ()->fetch_registers (regcache, regno);
1f3ef581
MM
1562}
1563
f6ac5f3d 1564/* The store_registers method of target record-btrace. */
1f3ef581 1565
f6ac5f3d
PA
1566void
1567record_btrace_target::store_registers (struct regcache *regcache, int regno)
1f3ef581 1568{
a52eab48 1569 if (!record_btrace_generating_corefile
222312d3 1570 && record_is_replaying (regcache->ptid ()))
4d10e986 1571 error (_("Cannot write registers while replaying."));
1f3ef581
MM
1572
1573 gdb_assert (may_write_registers != 0);
1574
b6a8c27b 1575 this->beneath ()->store_registers (regcache, regno);
1f3ef581
MM
1576}
1577
f6ac5f3d 1578/* The prepare_to_store method of target record-btrace. */
1f3ef581 1579
f6ac5f3d
PA
1580void
1581record_btrace_target::prepare_to_store (struct regcache *regcache)
1f3ef581 1582{
a52eab48 1583 if (!record_btrace_generating_corefile
222312d3 1584 && record_is_replaying (regcache->ptid ()))
1f3ef581
MM
1585 return;
1586
b6a8c27b 1587 this->beneath ()->prepare_to_store (regcache);
1f3ef581
MM
1588}
1589
0b722aec
MM
1590/* The branch trace frame cache. */
1591
1592struct btrace_frame_cache
1593{
1594 /* The thread. */
1595 struct thread_info *tp;
1596
1597 /* The frame info. */
1598 struct frame_info *frame;
1599
1600 /* The branch trace function segment. */
1601 const struct btrace_function *bfun;
1602};
1603
1604/* A struct btrace_frame_cache hash table indexed by NEXT. */
1605
1606static htab_t bfcache;
1607
1608/* hash_f for htab_create_alloc of bfcache. */
1609
1610static hashval_t
1611bfcache_hash (const void *arg)
1612{
19ba03f4
SM
1613 const struct btrace_frame_cache *cache
1614 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1615
1616 return htab_hash_pointer (cache->frame);
1617}
1618
1619/* eq_f for htab_create_alloc of bfcache. */
1620
1621static int
1622bfcache_eq (const void *arg1, const void *arg2)
1623{
19ba03f4
SM
1624 const struct btrace_frame_cache *cache1
1625 = (const struct btrace_frame_cache *) arg1;
1626 const struct btrace_frame_cache *cache2
1627 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1628
1629 return cache1->frame == cache2->frame;
1630}
1631
1632/* Create a new btrace frame cache. */
1633
1634static struct btrace_frame_cache *
1635bfcache_new (struct frame_info *frame)
1636{
1637 struct btrace_frame_cache *cache;
1638 void **slot;
1639
1640 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1641 cache->frame = frame;
1642
1643 slot = htab_find_slot (bfcache, cache, INSERT);
1644 gdb_assert (*slot == NULL);
1645 *slot = cache;
1646
1647 return cache;
1648}
1649
1650/* Extract the branch trace function from a branch trace frame. */
1651
1652static const struct btrace_function *
1653btrace_get_frame_function (struct frame_info *frame)
1654{
1655 const struct btrace_frame_cache *cache;
0b722aec
MM
1656 struct btrace_frame_cache pattern;
1657 void **slot;
1658
1659 pattern.frame = frame;
1660
1661 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1662 if (slot == NULL)
1663 return NULL;
1664
19ba03f4 1665 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1666 return cache->bfun;
1667}
1668
cecac1ab
MM
1669/* Implement stop_reason method for record_btrace_frame_unwind. */
1670
1671static enum unwind_stop_reason
1672record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1673 void **this_cache)
1674{
0b722aec
MM
1675 const struct btrace_frame_cache *cache;
1676 const struct btrace_function *bfun;
1677
19ba03f4 1678 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1679 bfun = cache->bfun;
1680 gdb_assert (bfun != NULL);
1681
42bfe59e 1682 if (bfun->up == 0)
0b722aec
MM
1683 return UNWIND_UNAVAILABLE;
1684
1685 return UNWIND_NO_REASON;
cecac1ab
MM
1686}
1687
1688/* Implement this_id method for record_btrace_frame_unwind. */
1689
1690static void
1691record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1692 struct frame_id *this_id)
1693{
0b722aec
MM
1694 const struct btrace_frame_cache *cache;
1695 const struct btrace_function *bfun;
4aeb0dfc 1696 struct btrace_call_iterator it;
0b722aec
MM
1697 CORE_ADDR code, special;
1698
19ba03f4 1699 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1700
1701 bfun = cache->bfun;
1702 gdb_assert (bfun != NULL);
1703
4aeb0dfc
TW
1704 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1705 bfun = btrace_call_get (&it);
0b722aec
MM
1706
1707 code = get_frame_func (this_frame);
1708 special = bfun->number;
1709
1710 *this_id = frame_id_build_unavailable_stack_special (code, special);
1711
1712 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1713 btrace_get_bfun_name (cache->bfun),
1714 core_addr_to_string_nz (this_id->code_addr),
1715 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1716}
1717
1718/* Implement prev_register method for record_btrace_frame_unwind. */
1719
1720static struct value *
1721record_btrace_frame_prev_register (struct frame_info *this_frame,
1722 void **this_cache,
1723 int regnum)
1724{
0b722aec
MM
1725 const struct btrace_frame_cache *cache;
1726 const struct btrace_function *bfun, *caller;
42bfe59e 1727 struct btrace_call_iterator it;
0b722aec
MM
1728 struct gdbarch *gdbarch;
1729 CORE_ADDR pc;
1730 int pcreg;
1731
1732 gdbarch = get_frame_arch (this_frame);
1733 pcreg = gdbarch_pc_regnum (gdbarch);
1734 if (pcreg < 0 || regnum != pcreg)
1735 throw_error (NOT_AVAILABLE_ERROR,
1736 _("Registers are not available in btrace record history"));
1737
19ba03f4 1738 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1739 bfun = cache->bfun;
1740 gdb_assert (bfun != NULL);
1741
42bfe59e 1742 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
0b722aec
MM
1743 throw_error (NOT_AVAILABLE_ERROR,
1744 _("No caller in btrace record history"));
1745
42bfe59e
TW
1746 caller = btrace_call_get (&it);
1747
0b722aec 1748 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
0860c437 1749 pc = caller->insn.front ().pc;
0b722aec
MM
1750 else
1751 {
0860c437 1752 pc = caller->insn.back ().pc;
0b722aec
MM
1753 pc += gdb_insn_length (gdbarch, pc);
1754 }
1755
1756 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1757 btrace_get_bfun_name (bfun), bfun->level,
1758 core_addr_to_string_nz (pc));
1759
1760 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1761}
1762
1763/* Implement sniffer method for record_btrace_frame_unwind. */
1764
1765static int
1766record_btrace_frame_sniffer (const struct frame_unwind *self,
1767 struct frame_info *this_frame,
1768 void **this_cache)
1769{
0b722aec
MM
1770 const struct btrace_function *bfun;
1771 struct btrace_frame_cache *cache;
cecac1ab 1772 struct thread_info *tp;
0b722aec 1773 struct frame_info *next;
cecac1ab
MM
1774
1775 /* THIS_FRAME does not contain a reference to its thread. */
00431a78 1776 tp = inferior_thread ();
cecac1ab 1777
0b722aec
MM
1778 bfun = NULL;
1779 next = get_next_frame (this_frame);
1780 if (next == NULL)
1781 {
1782 const struct btrace_insn_iterator *replay;
1783
1784 replay = tp->btrace.replay;
1785 if (replay != NULL)
08c3f6d2 1786 bfun = &replay->btinfo->functions[replay->call_index];
0b722aec
MM
1787 }
1788 else
1789 {
1790 const struct btrace_function *callee;
42bfe59e 1791 struct btrace_call_iterator it;
0b722aec
MM
1792
1793 callee = btrace_get_frame_function (next);
42bfe59e
TW
1794 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1795 return 0;
1796
1797 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1798 return 0;
1799
1800 bfun = btrace_call_get (&it);
0b722aec
MM
1801 }
1802
1803 if (bfun == NULL)
1804 return 0;
1805
1806 DEBUG ("[frame] sniffed frame for %s on level %d",
1807 btrace_get_bfun_name (bfun), bfun->level);
1808
1809 /* This is our frame. Initialize the frame cache. */
1810 cache = bfcache_new (this_frame);
1811 cache->tp = tp;
1812 cache->bfun = bfun;
1813
1814 *this_cache = cache;
1815 return 1;
1816}
1817
1818/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1819
1820static int
1821record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1822 struct frame_info *this_frame,
1823 void **this_cache)
1824{
1825 const struct btrace_function *bfun, *callee;
1826 struct btrace_frame_cache *cache;
42bfe59e 1827 struct btrace_call_iterator it;
0b722aec 1828 struct frame_info *next;
42bfe59e 1829 struct thread_info *tinfo;
0b722aec
MM
1830
1831 next = get_next_frame (this_frame);
1832 if (next == NULL)
1833 return 0;
1834
1835 callee = btrace_get_frame_function (next);
1836 if (callee == NULL)
1837 return 0;
1838
1839 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1840 return 0;
1841
00431a78 1842 tinfo = inferior_thread ();
42bfe59e 1843 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
0b722aec
MM
1844 return 0;
1845
42bfe59e
TW
1846 bfun = btrace_call_get (&it);
1847
0b722aec
MM
1848 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1849 btrace_get_bfun_name (bfun), bfun->level);
1850
1851 /* This is our frame. Initialize the frame cache. */
1852 cache = bfcache_new (this_frame);
42bfe59e 1853 cache->tp = tinfo;
0b722aec
MM
1854 cache->bfun = bfun;
1855
1856 *this_cache = cache;
1857 return 1;
1858}
1859
1860static void
1861record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1862{
1863 struct btrace_frame_cache *cache;
1864 void **slot;
1865
19ba03f4 1866 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1867
1868 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1869 gdb_assert (slot != NULL);
1870
1871 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1872}
1873
1874/* btrace recording does not store previous memory content, neither the stack
1875 frames content. Any unwinding would return errorneous results as the stack
1876 contents no longer matches the changed PC value restored from history.
1877 Therefore this unwinder reports any possibly unwound registers as
1878 <unavailable>. */
1879
0b722aec 1880const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1881{
1882 NORMAL_FRAME,
1883 record_btrace_frame_unwind_stop_reason,
1884 record_btrace_frame_this_id,
1885 record_btrace_frame_prev_register,
1886 NULL,
0b722aec
MM
1887 record_btrace_frame_sniffer,
1888 record_btrace_frame_dealloc_cache
1889};
1890
1891const struct frame_unwind record_btrace_tailcall_frame_unwind =
1892{
1893 TAILCALL_FRAME,
1894 record_btrace_frame_unwind_stop_reason,
1895 record_btrace_frame_this_id,
1896 record_btrace_frame_prev_register,
1897 NULL,
1898 record_btrace_tailcall_frame_sniffer,
1899 record_btrace_frame_dealloc_cache
cecac1ab 1900};
b2f4cfde 1901
f6ac5f3d 1902/* Implement the get_unwinder method. */
ac01945b 1903
f6ac5f3d
PA
1904const struct frame_unwind *
1905record_btrace_target::get_unwinder ()
ac01945b
TT
1906{
1907 return &record_btrace_frame_unwind;
1908}
1909
f6ac5f3d 1910/* Implement the get_tailcall_unwinder method. */
ac01945b 1911
f6ac5f3d
PA
1912const struct frame_unwind *
1913record_btrace_target::get_tailcall_unwinder ()
ac01945b
TT
1914{
1915 return &record_btrace_tailcall_frame_unwind;
1916}
1917
987e68b1
MM
1918/* Return a human-readable string for FLAG. */
1919
1920static const char *
1921btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1922{
1923 switch (flag)
1924 {
1925 case BTHR_STEP:
1926 return "step";
1927
1928 case BTHR_RSTEP:
1929 return "reverse-step";
1930
1931 case BTHR_CONT:
1932 return "cont";
1933
1934 case BTHR_RCONT:
1935 return "reverse-cont";
1936
1937 case BTHR_STOP:
1938 return "stop";
1939 }
1940
1941 return "<invalid>";
1942}
1943
52834460
MM
1944/* Indicate that TP should be resumed according to FLAG. */
1945
1946static void
1947record_btrace_resume_thread (struct thread_info *tp,
1948 enum btrace_thread_flag flag)
1949{
1950 struct btrace_thread_info *btinfo;
1951
43792cf0 1952 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
a068643d
TT
1953 target_pid_to_str (tp->ptid).c_str (), flag,
1954 btrace_thread_flag_to_str (flag));
52834460
MM
1955
1956 btinfo = &tp->btrace;
1957
52834460 1958 /* Fetch the latest branch trace. */
4a4495d6 1959 btrace_fetch (tp, record_btrace_get_cpu ());
52834460 1960
0ca912df
MM
1961 /* A resume request overwrites a preceding resume or stop request. */
1962 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1963 btinfo->flags |= flag;
1964}
1965
ec71cc2f
MM
1966/* Get the current frame for TP. */
1967
79b8d3b0
TT
1968static struct frame_id
1969get_thread_current_frame_id (struct thread_info *tp)
ec71cc2f 1970{
79b8d3b0 1971 struct frame_id id;
ec71cc2f
MM
1972 int executing;
1973
00431a78
PA
1974 /* Set current thread, which is implicitly used by
1975 get_current_frame. */
1976 scoped_restore_current_thread restore_thread;
1977
1978 switch_to_thread (tp);
ec71cc2f
MM
1979
1980 /* Clear the executing flag to allow changes to the current frame.
1981 We are not actually running, yet. We just started a reverse execution
1982 command or a record goto command.
1983 For the latter, EXECUTING is false and this has no effect.
f6ac5f3d 1984 For the former, EXECUTING is true and we're in wait, about to
ec71cc2f
MM
1985 move the thread. Since we need to recompute the stack, we temporarily
1986 set EXECUTING to flase. */
00431a78
PA
1987 executing = tp->executing;
1988 set_executing (inferior_ptid, false);
ec71cc2f 1989
79b8d3b0 1990 id = null_frame_id;
ec71cc2f
MM
1991 TRY
1992 {
79b8d3b0 1993 id = get_frame_id (get_current_frame ());
ec71cc2f
MM
1994 }
1995 CATCH (except, RETURN_MASK_ALL)
1996 {
1997 /* Restore the previous execution state. */
1998 set_executing (inferior_ptid, executing);
1999
ec71cc2f
MM
2000 throw_exception (except);
2001 }
2002 END_CATCH
2003
2004 /* Restore the previous execution state. */
2005 set_executing (inferior_ptid, executing);
2006
79b8d3b0 2007 return id;
ec71cc2f
MM
2008}
2009
52834460
MM
2010/* Start replaying a thread. */
2011
2012static struct btrace_insn_iterator *
2013record_btrace_start_replaying (struct thread_info *tp)
2014{
52834460
MM
2015 struct btrace_insn_iterator *replay;
2016 struct btrace_thread_info *btinfo;
52834460
MM
2017
2018 btinfo = &tp->btrace;
2019 replay = NULL;
2020
2021 /* We can't start replaying without trace. */
b54b03bd 2022 if (btinfo->functions.empty ())
52834460
MM
2023 return NULL;
2024
52834460
MM
2025 /* GDB stores the current frame_id when stepping in order to detects steps
2026 into subroutines.
2027 Since frames are computed differently when we're replaying, we need to
2028 recompute those stored frames and fix them up so we can still detect
2029 subroutines after we started replaying. */
492d29ea 2030 TRY
52834460 2031 {
52834460
MM
2032 struct frame_id frame_id;
2033 int upd_step_frame_id, upd_step_stack_frame_id;
2034
2035 /* The current frame without replaying - computed via normal unwind. */
79b8d3b0 2036 frame_id = get_thread_current_frame_id (tp);
52834460
MM
2037
2038 /* Check if we need to update any stepping-related frame id's. */
2039 upd_step_frame_id = frame_id_eq (frame_id,
2040 tp->control.step_frame_id);
2041 upd_step_stack_frame_id = frame_id_eq (frame_id,
2042 tp->control.step_stack_frame_id);
2043
2044 /* We start replaying at the end of the branch trace. This corresponds
2045 to the current instruction. */
8d749320 2046 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
2047 btrace_insn_end (replay, btinfo);
2048
31fd9caa
MM
2049 /* Skip gaps at the end of the trace. */
2050 while (btrace_insn_get (replay) == NULL)
2051 {
2052 unsigned int steps;
2053
2054 steps = btrace_insn_prev (replay, 1);
2055 if (steps == 0)
2056 error (_("No trace."));
2057 }
2058
52834460
MM
2059 /* We're not replaying, yet. */
2060 gdb_assert (btinfo->replay == NULL);
2061 btinfo->replay = replay;
2062
2063 /* Make sure we're not using any stale registers. */
00431a78 2064 registers_changed_thread (tp);
52834460
MM
2065
2066 /* The current frame with replaying - computed via btrace unwind. */
79b8d3b0 2067 frame_id = get_thread_current_frame_id (tp);
52834460
MM
2068
2069 /* Replace stepping related frames where necessary. */
2070 if (upd_step_frame_id)
2071 tp->control.step_frame_id = frame_id;
2072 if (upd_step_stack_frame_id)
2073 tp->control.step_stack_frame_id = frame_id;
2074 }
492d29ea 2075 CATCH (except, RETURN_MASK_ALL)
52834460
MM
2076 {
2077 xfree (btinfo->replay);
2078 btinfo->replay = NULL;
2079
00431a78 2080 registers_changed_thread (tp);
52834460
MM
2081
2082 throw_exception (except);
2083 }
492d29ea 2084 END_CATCH
52834460
MM
2085
2086 return replay;
2087}
2088
2089/* Stop replaying a thread. */
2090
2091static void
2092record_btrace_stop_replaying (struct thread_info *tp)
2093{
2094 struct btrace_thread_info *btinfo;
2095
2096 btinfo = &tp->btrace;
2097
2098 xfree (btinfo->replay);
2099 btinfo->replay = NULL;
2100
2101 /* Make sure we're not leaving any stale registers. */
00431a78 2102 registers_changed_thread (tp);
52834460
MM
2103}
2104
e3cfc1c7
MM
2105/* Stop replaying TP if it is at the end of its execution history. */
2106
2107static void
2108record_btrace_stop_replaying_at_end (struct thread_info *tp)
2109{
2110 struct btrace_insn_iterator *replay, end;
2111 struct btrace_thread_info *btinfo;
2112
2113 btinfo = &tp->btrace;
2114 replay = btinfo->replay;
2115
2116 if (replay == NULL)
2117 return;
2118
2119 btrace_insn_end (&end, btinfo);
2120
2121 if (btrace_insn_cmp (replay, &end) == 0)
2122 record_btrace_stop_replaying (tp);
2123}
2124
f6ac5f3d 2125/* The resume method of target record-btrace. */
b2f4cfde 2126
f6ac5f3d
PA
2127void
2128record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
b2f4cfde 2129{
d2939ba2 2130 enum btrace_thread_flag flag, cflag;
52834460 2131
a068643d 2132 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid).c_str (),
f6ac5f3d 2133 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
987e68b1 2134 step ? "step" : "cont");
52834460 2135
0ca912df
MM
2136 /* Store the execution direction of the last resume.
2137
f6ac5f3d 2138 If there is more than one resume call, we have to rely on infrun
0ca912df 2139 to not change the execution direction in-between. */
f6ac5f3d 2140 record_btrace_resume_exec_dir = ::execution_direction;
70ad5bff 2141
0ca912df 2142 /* As long as we're not replaying, just forward the request.
52834460 2143
0ca912df
MM
2144 For non-stop targets this means that no thread is replaying. In order to
2145 make progress, we may need to explicitly move replaying threads to the end
2146 of their execution history. */
f6ac5f3d
PA
2147 if ((::execution_direction != EXEC_REVERSE)
2148 && !record_is_replaying (minus_one_ptid))
b2f4cfde 2149 {
b6a8c27b 2150 this->beneath ()->resume (ptid, step, signal);
04c4fe8c 2151 return;
b2f4cfde
MM
2152 }
2153
52834460 2154 /* Compute the btrace thread flag for the requested move. */
f6ac5f3d 2155 if (::execution_direction == EXEC_REVERSE)
d2939ba2
MM
2156 {
2157 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2158 cflag = BTHR_RCONT;
2159 }
52834460 2160 else
d2939ba2
MM
2161 {
2162 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2163 cflag = BTHR_CONT;
2164 }
52834460 2165
52834460 2166 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2167 record_btrace_wait below.
2168
2169 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2170 if (!target_is_non_stop_p ())
2171 {
26a57c92 2172 gdb_assert (inferior_ptid.matches (ptid));
d2939ba2 2173
08036331
PA
2174 for (thread_info *tp : all_non_exited_threads (ptid))
2175 {
2176 if (tp->ptid.matches (inferior_ptid))
2177 record_btrace_resume_thread (tp, flag);
2178 else
2179 record_btrace_resume_thread (tp, cflag);
2180 }
d2939ba2
MM
2181 }
2182 else
2183 {
08036331
PA
2184 for (thread_info *tp : all_non_exited_threads (ptid))
2185 record_btrace_resume_thread (tp, flag);
d2939ba2 2186 }
70ad5bff
MM
2187
2188 /* Async support. */
2189 if (target_can_async_p ())
2190 {
6a3753b3 2191 target_async (1);
70ad5bff
MM
2192 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2193 }
52834460
MM
2194}
2195
f6ac5f3d 2196/* The commit_resume method of target record-btrace. */
85ad3aaf 2197
f6ac5f3d
PA
2198void
2199record_btrace_target::commit_resume ()
85ad3aaf 2200{
f6ac5f3d
PA
2201 if ((::execution_direction != EXEC_REVERSE)
2202 && !record_is_replaying (minus_one_ptid))
b6a8c27b 2203 beneath ()->commit_resume ();
85ad3aaf
PA
2204}
2205
987e68b1
MM
2206/* Cancel resuming TP. */
2207
2208static void
2209record_btrace_cancel_resume (struct thread_info *tp)
2210{
2211 enum btrace_thread_flag flags;
2212
2213 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2214 if (flags == 0)
2215 return;
2216
43792cf0
PA
2217 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2218 print_thread_id (tp),
a068643d 2219 target_pid_to_str (tp->ptid).c_str (), flags,
987e68b1
MM
2220 btrace_thread_flag_to_str (flags));
2221
2222 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2223 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2224}
2225
2226/* Return a target_waitstatus indicating that we ran out of history. */
2227
2228static struct target_waitstatus
2229btrace_step_no_history (void)
2230{
2231 struct target_waitstatus status;
2232
2233 status.kind = TARGET_WAITKIND_NO_HISTORY;
2234
2235 return status;
2236}
2237
2238/* Return a target_waitstatus indicating that a step finished. */
2239
2240static struct target_waitstatus
2241btrace_step_stopped (void)
2242{
2243 struct target_waitstatus status;
2244
2245 status.kind = TARGET_WAITKIND_STOPPED;
2246 status.value.sig = GDB_SIGNAL_TRAP;
2247
2248 return status;
2249}
2250
6e4879f0
MM
2251/* Return a target_waitstatus indicating that a thread was stopped as
2252 requested. */
2253
2254static struct target_waitstatus
2255btrace_step_stopped_on_request (void)
2256{
2257 struct target_waitstatus status;
2258
2259 status.kind = TARGET_WAITKIND_STOPPED;
2260 status.value.sig = GDB_SIGNAL_0;
2261
2262 return status;
2263}
2264
d825d248
MM
2265/* Return a target_waitstatus indicating a spurious stop. */
2266
2267static struct target_waitstatus
2268btrace_step_spurious (void)
2269{
2270 struct target_waitstatus status;
2271
2272 status.kind = TARGET_WAITKIND_SPURIOUS;
2273
2274 return status;
2275}
2276
e3cfc1c7
MM
2277/* Return a target_waitstatus indicating that the thread was not resumed. */
2278
2279static struct target_waitstatus
2280btrace_step_no_resumed (void)
2281{
2282 struct target_waitstatus status;
2283
2284 status.kind = TARGET_WAITKIND_NO_RESUMED;
2285
2286 return status;
2287}
2288
2289/* Return a target_waitstatus indicating that we should wait again. */
2290
2291static struct target_waitstatus
2292btrace_step_again (void)
2293{
2294 struct target_waitstatus status;
2295
2296 status.kind = TARGET_WAITKIND_IGNORE;
2297
2298 return status;
2299}
2300
52834460
MM
2301/* Clear the record histories. */
2302
2303static void
2304record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2305{
2306 xfree (btinfo->insn_history);
2307 xfree (btinfo->call_history);
2308
2309 btinfo->insn_history = NULL;
2310 btinfo->call_history = NULL;
2311}
2312
3c615f99
MM
2313/* Check whether TP's current replay position is at a breakpoint. */
2314
2315static int
2316record_btrace_replay_at_breakpoint (struct thread_info *tp)
2317{
2318 struct btrace_insn_iterator *replay;
2319 struct btrace_thread_info *btinfo;
2320 const struct btrace_insn *insn;
3c615f99
MM
2321
2322 btinfo = &tp->btrace;
2323 replay = btinfo->replay;
2324
2325 if (replay == NULL)
2326 return 0;
2327
2328 insn = btrace_insn_get (replay);
2329 if (insn == NULL)
2330 return 0;
2331
00431a78 2332 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
3c615f99
MM
2333 &btinfo->stop_reason);
2334}
2335
d825d248 2336/* Step one instruction in forward direction. */
52834460
MM
2337
2338static struct target_waitstatus
d825d248 2339record_btrace_single_step_forward (struct thread_info *tp)
52834460 2340{
b61ce85c 2341 struct btrace_insn_iterator *replay, end, start;
52834460 2342 struct btrace_thread_info *btinfo;
52834460 2343
d825d248
MM
2344 btinfo = &tp->btrace;
2345 replay = btinfo->replay;
2346
2347 /* We're done if we're not replaying. */
2348 if (replay == NULL)
2349 return btrace_step_no_history ();
2350
011c71b6
MM
2351 /* Check if we're stepping a breakpoint. */
2352 if (record_btrace_replay_at_breakpoint (tp))
2353 return btrace_step_stopped ();
2354
b61ce85c
MM
2355 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2356 jump back to the instruction at which we started. */
2357 start = *replay;
d825d248
MM
2358 do
2359 {
2360 unsigned int steps;
2361
e3cfc1c7
MM
2362 /* We will bail out here if we continue stepping after reaching the end
2363 of the execution history. */
d825d248
MM
2364 steps = btrace_insn_next (replay, 1);
2365 if (steps == 0)
b61ce85c
MM
2366 {
2367 *replay = start;
2368 return btrace_step_no_history ();
2369 }
d825d248
MM
2370 }
2371 while (btrace_insn_get (replay) == NULL);
2372
2373 /* Determine the end of the instruction trace. */
2374 btrace_insn_end (&end, btinfo);
2375
e3cfc1c7
MM
2376 /* The execution trace contains (and ends with) the current instruction.
2377 This instruction has not been executed, yet, so the trace really ends
2378 one instruction earlier. */
d825d248 2379 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2380 return btrace_step_no_history ();
d825d248
MM
2381
2382 return btrace_step_spurious ();
2383}
2384
2385/* Step one instruction in backward direction. */
2386
2387static struct target_waitstatus
2388record_btrace_single_step_backward (struct thread_info *tp)
2389{
b61ce85c 2390 struct btrace_insn_iterator *replay, start;
d825d248 2391 struct btrace_thread_info *btinfo;
e59fa00f 2392
52834460
MM
2393 btinfo = &tp->btrace;
2394 replay = btinfo->replay;
2395
d825d248
MM
2396 /* Start replaying if we're not already doing so. */
2397 if (replay == NULL)
2398 replay = record_btrace_start_replaying (tp);
2399
2400 /* If we can't step any further, we reached the end of the history.
b61ce85c
MM
2401 Skip gaps during replay. If we end up at a gap (at the beginning of
2402 the trace), jump back to the instruction at which we started. */
2403 start = *replay;
d825d248
MM
2404 do
2405 {
2406 unsigned int steps;
2407
2408 steps = btrace_insn_prev (replay, 1);
2409 if (steps == 0)
b61ce85c
MM
2410 {
2411 *replay = start;
2412 return btrace_step_no_history ();
2413 }
d825d248
MM
2414 }
2415 while (btrace_insn_get (replay) == NULL);
2416
011c71b6
MM
2417 /* Check if we're stepping a breakpoint.
2418
2419 For reverse-stepping, this check is after the step. There is logic in
2420 infrun.c that handles reverse-stepping separately. See, for example,
2421 proceed and adjust_pc_after_break.
2422
2423 This code assumes that for reverse-stepping, PC points to the last
2424 de-executed instruction, whereas for forward-stepping PC points to the
2425 next to-be-executed instruction. */
2426 if (record_btrace_replay_at_breakpoint (tp))
2427 return btrace_step_stopped ();
2428
d825d248
MM
2429 return btrace_step_spurious ();
2430}
2431
2432/* Step a single thread. */
2433
2434static struct target_waitstatus
2435record_btrace_step_thread (struct thread_info *tp)
2436{
2437 struct btrace_thread_info *btinfo;
2438 struct target_waitstatus status;
2439 enum btrace_thread_flag flags;
2440
2441 btinfo = &tp->btrace;
2442
6e4879f0
MM
2443 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2444 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2445
43792cf0 2446 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
a068643d 2447 target_pid_to_str (tp->ptid).c_str (), flags,
987e68b1 2448 btrace_thread_flag_to_str (flags));
52834460 2449
6e4879f0
MM
2450 /* We can't step without an execution history. */
2451 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2452 return btrace_step_no_history ();
2453
52834460
MM
2454 switch (flags)
2455 {
2456 default:
2457 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2458
6e4879f0
MM
2459 case BTHR_STOP:
2460 return btrace_step_stopped_on_request ();
2461
52834460 2462 case BTHR_STEP:
d825d248
MM
2463 status = record_btrace_single_step_forward (tp);
2464 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2465 break;
52834460
MM
2466
2467 return btrace_step_stopped ();
2468
2469 case BTHR_RSTEP:
d825d248
MM
2470 status = record_btrace_single_step_backward (tp);
2471 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2472 break;
52834460
MM
2473
2474 return btrace_step_stopped ();
2475
2476 case BTHR_CONT:
e3cfc1c7
MM
2477 status = record_btrace_single_step_forward (tp);
2478 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2479 break;
52834460 2480
e3cfc1c7
MM
2481 btinfo->flags |= flags;
2482 return btrace_step_again ();
52834460
MM
2483
2484 case BTHR_RCONT:
e3cfc1c7
MM
2485 status = record_btrace_single_step_backward (tp);
2486 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2487 break;
52834460 2488
e3cfc1c7
MM
2489 btinfo->flags |= flags;
2490 return btrace_step_again ();
2491 }
d825d248 2492
f6ac5f3d 2493 /* We keep threads moving at the end of their execution history. The wait
e3cfc1c7
MM
2494 method will stop the thread for whom the event is reported. */
2495 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2496 btinfo->flags |= flags;
52834460 2497
e3cfc1c7 2498 return status;
b2f4cfde
MM
2499}
2500
a6b5be76
MM
2501/* Announce further events if necessary. */
2502
2503static void
53127008
SM
2504record_btrace_maybe_mark_async_event
2505 (const std::vector<thread_info *> &moving,
2506 const std::vector<thread_info *> &no_history)
a6b5be76 2507{
53127008
SM
2508 bool more_moving = !moving.empty ();
2509 bool more_no_history = !no_history.empty ();;
a6b5be76
MM
2510
2511 if (!more_moving && !more_no_history)
2512 return;
2513
2514 if (more_moving)
2515 DEBUG ("movers pending");
2516
2517 if (more_no_history)
2518 DEBUG ("no-history pending");
2519
2520 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2521}
2522
f6ac5f3d 2523/* The wait method of target record-btrace. */
b2f4cfde 2524
f6ac5f3d
PA
2525ptid_t
2526record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2527 int options)
b2f4cfde 2528{
53127008
SM
2529 std::vector<thread_info *> moving;
2530 std::vector<thread_info *> no_history;
52834460 2531
a068643d 2532 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid).c_str (), options);
52834460 2533
b2f4cfde 2534 /* As long as we're not replaying, just forward the request. */
f6ac5f3d
PA
2535 if ((::execution_direction != EXEC_REVERSE)
2536 && !record_is_replaying (minus_one_ptid))
b2f4cfde 2537 {
b6a8c27b 2538 return this->beneath ()->wait (ptid, status, options);
b2f4cfde
MM
2539 }
2540
e3cfc1c7 2541 /* Keep a work list of moving threads. */
08036331
PA
2542 for (thread_info *tp : all_non_exited_threads (ptid))
2543 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2544 moving.push_back (tp);
e3cfc1c7 2545
53127008 2546 if (moving.empty ())
52834460 2547 {
e3cfc1c7 2548 *status = btrace_step_no_resumed ();
52834460 2549
a068643d 2550 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid).c_str (),
23fdd69e 2551 target_waitstatus_to_string (status).c_str ());
e3cfc1c7 2552
e3cfc1c7 2553 return null_ptid;
52834460
MM
2554 }
2555
e3cfc1c7
MM
2556 /* Step moving threads one by one, one step each, until either one thread
2557 reports an event or we run out of threads to step.
2558
2559 When stepping more than one thread, chances are that some threads reach
2560 the end of their execution history earlier than others. If we reported
2561 this immediately, all-stop on top of non-stop would stop all threads and
2562 resume the same threads next time. And we would report the same thread
2563 having reached the end of its execution history again.
2564
2565 In the worst case, this would starve the other threads. But even if other
2566 threads would be allowed to make progress, this would result in far too
2567 many intermediate stops.
2568
2569 We therefore delay the reporting of "no execution history" until we have
2570 nothing else to report. By this time, all threads should have moved to
2571 either the beginning or the end of their execution history. There will
2572 be a single user-visible stop. */
53127008
SM
2573 struct thread_info *eventing = NULL;
2574 while ((eventing == NULL) && !moving.empty ())
e3cfc1c7 2575 {
53127008 2576 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
e3cfc1c7 2577 {
53127008
SM
2578 thread_info *tp = moving[ix];
2579
e3cfc1c7
MM
2580 *status = record_btrace_step_thread (tp);
2581
2582 switch (status->kind)
2583 {
2584 case TARGET_WAITKIND_IGNORE:
2585 ix++;
2586 break;
2587
2588 case TARGET_WAITKIND_NO_HISTORY:
53127008 2589 no_history.push_back (ordered_remove (moving, ix));
e3cfc1c7
MM
2590 break;
2591
2592 default:
53127008 2593 eventing = unordered_remove (moving, ix);
e3cfc1c7
MM
2594 break;
2595 }
2596 }
2597 }
2598
2599 if (eventing == NULL)
2600 {
2601 /* We started with at least one moving thread. This thread must have
2602 either stopped or reached the end of its execution history.
2603
2604 In the former case, EVENTING must not be NULL.
2605 In the latter case, NO_HISTORY must not be empty. */
53127008 2606 gdb_assert (!no_history.empty ());
e3cfc1c7
MM
2607
2608 /* We kept threads moving at the end of their execution history. Stop
2609 EVENTING now that we are going to report its stop. */
53127008 2610 eventing = unordered_remove (no_history, 0);
e3cfc1c7
MM
2611 eventing->btrace.flags &= ~BTHR_MOVE;
2612
2613 *status = btrace_step_no_history ();
2614 }
2615
2616 gdb_assert (eventing != NULL);
2617
2618 /* We kept threads replaying at the end of their execution history. Stop
2619 replaying EVENTING now that we are going to report its stop. */
2620 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2621
2622 /* Stop all other threads. */
5953356c 2623 if (!target_is_non_stop_p ())
53127008 2624 {
08036331 2625 for (thread_info *tp : all_non_exited_threads ())
53127008
SM
2626 record_btrace_cancel_resume (tp);
2627 }
52834460 2628
a6b5be76
MM
2629 /* In async mode, we need to announce further events. */
2630 if (target_is_async_p ())
2631 record_btrace_maybe_mark_async_event (moving, no_history);
2632
52834460 2633 /* Start record histories anew from the current position. */
e3cfc1c7 2634 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2635
2636 /* We moved the replay position but did not update registers. */
00431a78 2637 registers_changed_thread (eventing);
e3cfc1c7 2638
43792cf0
PA
2639 DEBUG ("wait ended by thread %s (%s): %s",
2640 print_thread_id (eventing),
a068643d 2641 target_pid_to_str (eventing->ptid).c_str (),
23fdd69e 2642 target_waitstatus_to_string (status).c_str ());
52834460 2643
e3cfc1c7 2644 return eventing->ptid;
52834460
MM
2645}
2646
f6ac5f3d 2647/* The stop method of target record-btrace. */
6e4879f0 2648
f6ac5f3d
PA
2649void
2650record_btrace_target::stop (ptid_t ptid)
6e4879f0 2651{
a068643d 2652 DEBUG ("stop %s", target_pid_to_str (ptid).c_str ());
6e4879f0
MM
2653
2654 /* As long as we're not replaying, just forward the request. */
f6ac5f3d
PA
2655 if ((::execution_direction != EXEC_REVERSE)
2656 && !record_is_replaying (minus_one_ptid))
6e4879f0 2657 {
b6a8c27b 2658 this->beneath ()->stop (ptid);
6e4879f0
MM
2659 }
2660 else
2661 {
08036331
PA
2662 for (thread_info *tp : all_non_exited_threads (ptid))
2663 {
2664 tp->btrace.flags &= ~BTHR_MOVE;
2665 tp->btrace.flags |= BTHR_STOP;
2666 }
6e4879f0
MM
2667 }
2668 }
2669
f6ac5f3d 2670/* The can_execute_reverse method of target record-btrace. */
52834460 2671
57810aa7 2672bool
f6ac5f3d 2673record_btrace_target::can_execute_reverse ()
52834460 2674{
57810aa7 2675 return true;
52834460
MM
2676}
2677
f6ac5f3d 2678/* The stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2679
57810aa7 2680bool
f6ac5f3d 2681record_btrace_target::stopped_by_sw_breakpoint ()
52834460 2682{
f6ac5f3d 2683 if (record_is_replaying (minus_one_ptid))
9e8915c6
PA
2684 {
2685 struct thread_info *tp = inferior_thread ();
2686
2687 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2688 }
2689
b6a8c27b 2690 return this->beneath ()->stopped_by_sw_breakpoint ();
9e8915c6
PA
2691}
2692
f6ac5f3d 2693/* The supports_stopped_by_sw_breakpoint method of target
9e8915c6
PA
2694 record-btrace. */
2695
57810aa7 2696bool
f6ac5f3d 2697record_btrace_target::supports_stopped_by_sw_breakpoint ()
9e8915c6 2698{
f6ac5f3d 2699 if (record_is_replaying (minus_one_ptid))
57810aa7 2700 return true;
9e8915c6 2701
b6a8c27b 2702 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
9e8915c6
PA
2703}
2704
f6ac5f3d 2705/* The stopped_by_sw_breakpoint method of target record-btrace. */
9e8915c6 2706
57810aa7 2707bool
f6ac5f3d 2708record_btrace_target::stopped_by_hw_breakpoint ()
9e8915c6 2709{
f6ac5f3d 2710 if (record_is_replaying (minus_one_ptid))
9e8915c6
PA
2711 {
2712 struct thread_info *tp = inferior_thread ();
2713
2714 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2715 }
2716
b6a8c27b 2717 return this->beneath ()->stopped_by_hw_breakpoint ();
9e8915c6
PA
2718}
2719
f6ac5f3d 2720/* The supports_stopped_by_hw_breakpoint method of target
9e8915c6
PA
2721 record-btrace. */
2722
57810aa7 2723bool
f6ac5f3d 2724record_btrace_target::supports_stopped_by_hw_breakpoint ()
9e8915c6 2725{
f6ac5f3d 2726 if (record_is_replaying (minus_one_ptid))
57810aa7 2727 return true;
52834460 2728
b6a8c27b 2729 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
b2f4cfde
MM
2730}
2731
f6ac5f3d 2732/* The update_thread_list method of target record-btrace. */
e2887aa3 2733
f6ac5f3d
PA
2734void
2735record_btrace_target::update_thread_list ()
e2887aa3 2736{
e8032dde 2737 /* We don't add or remove threads during replay. */
f6ac5f3d 2738 if (record_is_replaying (minus_one_ptid))
e2887aa3
MM
2739 return;
2740
2741 /* Forward the request. */
b6a8c27b 2742 this->beneath ()->update_thread_list ();
e2887aa3
MM
2743}
2744
f6ac5f3d 2745/* The thread_alive method of target record-btrace. */
e2887aa3 2746
57810aa7 2747bool
f6ac5f3d 2748record_btrace_target::thread_alive (ptid_t ptid)
e2887aa3
MM
2749{
2750 /* We don't add or remove threads during replay. */
f6ac5f3d 2751 if (record_is_replaying (minus_one_ptid))
00431a78 2752 return true;
e2887aa3
MM
2753
2754 /* Forward the request. */
b6a8c27b 2755 return this->beneath ()->thread_alive (ptid);
e2887aa3
MM
2756}
2757
066ce621
MM
2758/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2759 is stopped. */
2760
2761static void
2762record_btrace_set_replay (struct thread_info *tp,
2763 const struct btrace_insn_iterator *it)
2764{
2765 struct btrace_thread_info *btinfo;
2766
2767 btinfo = &tp->btrace;
2768
a0f1b963 2769 if (it == NULL)
52834460 2770 record_btrace_stop_replaying (tp);
066ce621
MM
2771 else
2772 {
2773 if (btinfo->replay == NULL)
52834460 2774 record_btrace_start_replaying (tp);
066ce621
MM
2775 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2776 return;
2777
2778 *btinfo->replay = *it;
00431a78 2779 registers_changed_thread (tp);
066ce621
MM
2780 }
2781
52834460
MM
2782 /* Start anew from the new replay position. */
2783 record_btrace_clear_histories (btinfo);
485668e5 2784
f2ffa92b
PA
2785 inferior_thread ()->suspend.stop_pc
2786 = regcache_read_pc (get_current_regcache ());
485668e5 2787 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2788}
2789
f6ac5f3d 2790/* The goto_record_begin method of target record-btrace. */
066ce621 2791
f6ac5f3d
PA
2792void
2793record_btrace_target::goto_record_begin ()
066ce621
MM
2794{
2795 struct thread_info *tp;
2796 struct btrace_insn_iterator begin;
2797
2798 tp = require_btrace_thread ();
2799
2800 btrace_insn_begin (&begin, &tp->btrace);
b61ce85c
MM
2801
2802 /* Skip gaps at the beginning of the trace. */
2803 while (btrace_insn_get (&begin) == NULL)
2804 {
2805 unsigned int steps;
2806
2807 steps = btrace_insn_next (&begin, 1);
2808 if (steps == 0)
2809 error (_("No trace."));
2810 }
2811
066ce621 2812 record_btrace_set_replay (tp, &begin);
066ce621
MM
2813}
2814
f6ac5f3d 2815/* The goto_record_end method of target record-btrace. */
066ce621 2816
f6ac5f3d
PA
2817void
2818record_btrace_target::goto_record_end ()
066ce621
MM
2819{
2820 struct thread_info *tp;
2821
2822 tp = require_btrace_thread ();
2823
2824 record_btrace_set_replay (tp, NULL);
066ce621
MM
2825}
2826
f6ac5f3d 2827/* The goto_record method of target record-btrace. */
066ce621 2828
f6ac5f3d
PA
2829void
2830record_btrace_target::goto_record (ULONGEST insn)
066ce621
MM
2831{
2832 struct thread_info *tp;
2833 struct btrace_insn_iterator it;
2834 unsigned int number;
2835 int found;
2836
2837 number = insn;
2838
2839 /* Check for wrap-arounds. */
2840 if (number != insn)
2841 error (_("Instruction number out of range."));
2842
2843 tp = require_btrace_thread ();
2844
2845 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
69090cee
TW
2846
2847 /* Check if the instruction could not be found or is a gap. */
2848 if (found == 0 || btrace_insn_get (&it) == NULL)
066ce621
MM
2849 error (_("No such instruction."));
2850
2851 record_btrace_set_replay (tp, &it);
066ce621
MM
2852}
2853
f6ac5f3d 2854/* The record_stop_replaying method of target record-btrace. */
797094dd 2855
f6ac5f3d
PA
2856void
2857record_btrace_target::record_stop_replaying ()
797094dd 2858{
08036331 2859 for (thread_info *tp : all_non_exited_threads ())
797094dd
MM
2860 record_btrace_stop_replaying (tp);
2861}
2862
f6ac5f3d 2863/* The execution_direction target method. */
70ad5bff 2864
f6ac5f3d
PA
2865enum exec_direction_kind
2866record_btrace_target::execution_direction ()
70ad5bff
MM
2867{
2868 return record_btrace_resume_exec_dir;
2869}
2870
f6ac5f3d 2871/* The prepare_to_generate_core target method. */
aef92902 2872
f6ac5f3d
PA
2873void
2874record_btrace_target::prepare_to_generate_core ()
aef92902
MM
2875{
2876 record_btrace_generating_corefile = 1;
2877}
2878
f6ac5f3d 2879/* The done_generating_core target method. */
aef92902 2880
f6ac5f3d
PA
2881void
2882record_btrace_target::done_generating_core ()
aef92902
MM
2883{
2884 record_btrace_generating_corefile = 0;
2885}
2886
f4abbc16
MM
2887/* Start recording in BTS format. */
2888
2889static void
cdb34d4a 2890cmd_record_btrace_bts_start (const char *args, int from_tty)
f4abbc16 2891{
f4abbc16
MM
2892 if (args != NULL && *args != 0)
2893 error (_("Invalid argument."));
2894
2895 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2896
492d29ea
PA
2897 TRY
2898 {
95a6b0a1 2899 execute_command ("target record-btrace", from_tty);
492d29ea
PA
2900 }
2901 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2902 {
2903 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2904 throw_exception (exception);
2905 }
492d29ea 2906 END_CATCH
f4abbc16
MM
2907}
2908
bc504a31 2909/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2910
2911static void
cdb34d4a 2912cmd_record_btrace_pt_start (const char *args, int from_tty)
afedecd3
MM
2913{
2914 if (args != NULL && *args != 0)
2915 error (_("Invalid argument."));
2916
b20a6524 2917 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2918
492d29ea
PA
2919 TRY
2920 {
95a6b0a1 2921 execute_command ("target record-btrace", from_tty);
492d29ea
PA
2922 }
2923 CATCH (exception, RETURN_MASK_ALL)
2924 {
2925 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2926 throw_exception (exception);
2927 }
2928 END_CATCH
afedecd3
MM
2929}
2930
b20a6524
MM
2931/* Alias for "target record". */
2932
2933static void
981a3fb3 2934cmd_record_btrace_start (const char *args, int from_tty)
b20a6524
MM
2935{
2936 if (args != NULL && *args != 0)
2937 error (_("Invalid argument."));
2938
2939 record_btrace_conf.format = BTRACE_FORMAT_PT;
2940
2941 TRY
2942 {
95a6b0a1 2943 execute_command ("target record-btrace", from_tty);
b20a6524
MM
2944 }
2945 CATCH (exception, RETURN_MASK_ALL)
2946 {
2947 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2948
2949 TRY
2950 {
95a6b0a1 2951 execute_command ("target record-btrace", from_tty);
b20a6524 2952 }
b926417a 2953 CATCH (ex, RETURN_MASK_ALL)
b20a6524
MM
2954 {
2955 record_btrace_conf.format = BTRACE_FORMAT_NONE;
b926417a 2956 throw_exception (ex);
b20a6524
MM
2957 }
2958 END_CATCH
2959 }
2960 END_CATCH
2961}
2962
67b5c0c1
MM
2963/* The "set record btrace" command. */
2964
2965static void
981a3fb3 2966cmd_set_record_btrace (const char *args, int from_tty)
67b5c0c1 2967{
b85310e1
MM
2968 printf_unfiltered (_("\"set record btrace\" must be followed "
2969 "by an appropriate subcommand.\n"));
2970 help_list (set_record_btrace_cmdlist, "set record btrace ",
2971 all_commands, gdb_stdout);
67b5c0c1
MM
2972}
2973
2974/* The "show record btrace" command. */
2975
2976static void
981a3fb3 2977cmd_show_record_btrace (const char *args, int from_tty)
67b5c0c1
MM
2978{
2979 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2980}
2981
2982/* The "show record btrace replay-memory-access" command. */
2983
2984static void
2985cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2986 struct cmd_list_element *c, const char *value)
2987{
2988 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2989 replay_memory_access);
2990}
2991
4a4495d6
MM
2992/* The "set record btrace cpu none" command. */
2993
2994static void
2995cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2996{
2997 if (args != nullptr && *args != 0)
2998 error (_("Trailing junk: '%s'."), args);
2999
3000 record_btrace_cpu_state = CS_NONE;
3001}
3002
3003/* The "set record btrace cpu auto" command. */
3004
3005static void
3006cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
3007{
3008 if (args != nullptr && *args != 0)
3009 error (_("Trailing junk: '%s'."), args);
3010
3011 record_btrace_cpu_state = CS_AUTO;
3012}
3013
3014/* The "set record btrace cpu" command. */
3015
3016static void
3017cmd_set_record_btrace_cpu (const char *args, int from_tty)
3018{
3019 if (args == nullptr)
3020 args = "";
3021
3022 /* We use a hard-coded vendor string for now. */
3023 unsigned int family, model, stepping;
3024 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3025 &model, &l1, &stepping, &l2);
3026 if (matches == 3)
3027 {
3028 if (strlen (args) != l2)
3029 error (_("Trailing junk: '%s'."), args + l2);
3030 }
3031 else if (matches == 2)
3032 {
3033 if (strlen (args) != l1)
3034 error (_("Trailing junk: '%s'."), args + l1);
3035
3036 stepping = 0;
3037 }
3038 else
3039 error (_("Bad format. See \"help set record btrace cpu\"."));
3040
3041 if (USHRT_MAX < family)
3042 error (_("Cpu family too big."));
3043
3044 if (UCHAR_MAX < model)
3045 error (_("Cpu model too big."));
3046
3047 if (UCHAR_MAX < stepping)
3048 error (_("Cpu stepping too big."));
3049
3050 record_btrace_cpu.vendor = CV_INTEL;
3051 record_btrace_cpu.family = family;
3052 record_btrace_cpu.model = model;
3053 record_btrace_cpu.stepping = stepping;
3054
3055 record_btrace_cpu_state = CS_CPU;
3056}
3057
3058/* The "show record btrace cpu" command. */
3059
3060static void
3061cmd_show_record_btrace_cpu (const char *args, int from_tty)
3062{
4a4495d6
MM
3063 if (args != nullptr && *args != 0)
3064 error (_("Trailing junk: '%s'."), args);
3065
3066 switch (record_btrace_cpu_state)
3067 {
3068 case CS_AUTO:
3069 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3070 return;
3071
3072 case CS_NONE:
3073 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3074 return;
3075
3076 case CS_CPU:
3077 switch (record_btrace_cpu.vendor)
3078 {
3079 case CV_INTEL:
3080 if (record_btrace_cpu.stepping == 0)
3081 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3082 record_btrace_cpu.family,
3083 record_btrace_cpu.model);
3084 else
3085 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3086 record_btrace_cpu.family,
3087 record_btrace_cpu.model,
3088 record_btrace_cpu.stepping);
3089 return;
3090 }
3091 }
3092
3093 error (_("Internal error: bad cpu state."));
3094}
3095
3096/* The "s record btrace bts" command. */
d33501a5
MM
3097
3098static void
981a3fb3 3099cmd_set_record_btrace_bts (const char *args, int from_tty)
d33501a5
MM
3100{
3101 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 3102 "by an appropriate subcommand.\n"));
d33501a5
MM
3103 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3104 all_commands, gdb_stdout);
3105}
3106
3107/* The "show record btrace bts" command. */
3108
3109static void
981a3fb3 3110cmd_show_record_btrace_bts (const char *args, int from_tty)
d33501a5
MM
3111{
3112 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3113}
3114
b20a6524
MM
3115/* The "set record btrace pt" command. */
3116
3117static void
981a3fb3 3118cmd_set_record_btrace_pt (const char *args, int from_tty)
b20a6524
MM
3119{
3120 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3121 "by an appropriate subcommand.\n"));
3122 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3123 all_commands, gdb_stdout);
3124}
3125
3126/* The "show record btrace pt" command. */
3127
3128static void
981a3fb3 3129cmd_show_record_btrace_pt (const char *args, int from_tty)
b20a6524
MM
3130{
3131 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3132}
3133
3134/* The "record bts buffer-size" show value function. */
3135
3136static void
3137show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3138 struct cmd_list_element *c,
3139 const char *value)
3140{
3141 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3142 value);
3143}
3144
3145/* The "record pt buffer-size" show value function. */
3146
3147static void
3148show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3149 struct cmd_list_element *c,
3150 const char *value)
3151{
3152 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3153 value);
3154}
3155
afedecd3
MM
3156/* Initialize btrace commands. */
3157
3158void
3159_initialize_record_btrace (void)
3160{
f4abbc16
MM
3161 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3162 _("Start branch trace recording."), &record_btrace_cmdlist,
3163 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3164 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3165
f4abbc16
MM
3166 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3167 _("\
3168Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3169The processor stores a from/to record for each branch into a cyclic buffer.\n\
3170This format may not be available on all processors."),
3171 &record_btrace_cmdlist);
3172 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3173
b20a6524
MM
3174 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3175 _("\
bc504a31 3176Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3177This format may not be available on all processors."),
3178 &record_btrace_cmdlist);
3179 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3180
67b5c0c1
MM
3181 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3182 _("Set record options"), &set_record_btrace_cmdlist,
3183 "set record btrace ", 0, &set_record_cmdlist);
3184
3185 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3186 _("Show record options"), &show_record_btrace_cmdlist,
3187 "show record btrace ", 0, &show_record_cmdlist);
3188
3189 add_setshow_enum_cmd ("replay-memory-access", no_class,
3190 replay_memory_access_types, &replay_memory_access, _("\
3191Set what memory accesses are allowed during replay."), _("\
3192Show what memory accesses are allowed during replay."),
3193 _("Default is READ-ONLY.\n\n\
3194The btrace record target does not trace data.\n\
3195The memory therefore corresponds to the live target and not \
3196to the current replay position.\n\n\
3197When READ-ONLY, allow accesses to read-only memory during replay.\n\
3198When READ-WRITE, allow accesses to read-only and read-write memory during \
3199replay."),
3200 NULL, cmd_show_replay_memory_access,
3201 &set_record_btrace_cmdlist,
3202 &show_record_btrace_cmdlist);
3203
4a4495d6
MM
3204 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3205 _("\
3206Set the cpu to be used for trace decode.\n\n\
55063ddb
TT
3207The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3208For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
4a4495d6
MM
3209When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3210The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3211When GDB does not support that cpu, this option can be used to enable\n\
3212workarounds for a similar cpu that GDB supports.\n\n\
3213When set to \"none\", errata workarounds are disabled."),
3214 &set_record_btrace_cpu_cmdlist,
3215 _("set record btrace cpu "), 1,
3216 &set_record_btrace_cmdlist);
3217
3218 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3219Automatically determine the cpu to be used for trace decode."),
3220 &set_record_btrace_cpu_cmdlist);
3221
3222 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3223Do not enable errata workarounds for trace decode."),
3224 &set_record_btrace_cpu_cmdlist);
3225
3226 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3227Show the cpu to be used for trace decode."),
3228 &show_record_btrace_cmdlist);
3229
d33501a5
MM
3230 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3231 _("Set record btrace bts options"),
3232 &set_record_btrace_bts_cmdlist,
3233 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3234
3235 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3236 _("Show record btrace bts options"),
3237 &show_record_btrace_bts_cmdlist,
3238 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3239
3240 add_setshow_uinteger_cmd ("buffer-size", no_class,
3241 &record_btrace_conf.bts.size,
3242 _("Set the record/replay bts buffer size."),
3243 _("Show the record/replay bts buffer size."), _("\
3244When starting recording request a trace buffer of this size. \
3245The actual buffer size may differ from the requested size. \
3246Use \"info record\" to see the actual buffer size.\n\n\
3247Bigger buffers allow longer recording but also take more time to process \
3248the recorded execution trace.\n\n\
b20a6524
MM
3249The trace buffer size may not be changed while recording."), NULL,
3250 show_record_bts_buffer_size_value,
d33501a5
MM
3251 &set_record_btrace_bts_cmdlist,
3252 &show_record_btrace_bts_cmdlist);
3253
b20a6524
MM
3254 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3255 _("Set record btrace pt options"),
3256 &set_record_btrace_pt_cmdlist,
3257 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3258
3259 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3260 _("Show record btrace pt options"),
3261 &show_record_btrace_pt_cmdlist,
3262 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3263
3264 add_setshow_uinteger_cmd ("buffer-size", no_class,
3265 &record_btrace_conf.pt.size,
3266 _("Set the record/replay pt buffer size."),
3267 _("Show the record/replay pt buffer size."), _("\
3268Bigger buffers allow longer recording but also take more time to process \
3269the recorded execution.\n\
3270The actual buffer size may differ from the requested size. Use \"info record\" \
3271to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3272 &set_record_btrace_pt_cmdlist,
3273 &show_record_btrace_pt_cmdlist);
3274
d9f719f1 3275 add_target (record_btrace_target_info, record_btrace_target_open);
0b722aec
MM
3276
3277 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3278 xcalloc, xfree);
d33501a5
MM
3279
3280 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3281 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3282}
This page took 0.820061 seconds and 4 git commands to generate.