gdb: add target_ops::supports_displaced_step
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
b811d2c2 3 Copyright (C) 2013-2020 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
c0272db5 24#include "record-btrace.h"
afedecd3
MM
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
76727919 29#include "observable.h"
afedecd3
MM
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
400b5eca 39#include "gdbsupport/event-loop.h"
70ad5bff 40#include "inf-loop.h"
00431a78 41#include "inferior.h"
325fac50 42#include <algorithm>
0d12e84c 43#include "gdbarch.h"
e43b10e1 44#include "cli/cli-style.h"
93b54c8e 45#include "async-event.h"
afedecd3 46
d9f719f1
PA
47static const target_info record_btrace_target_info = {
48 "record-btrace",
49 N_("Branch tracing target"),
50 N_("Collect control-flow trace and provide the execution history.")
51};
52
afedecd3 53/* The target_ops of record-btrace. */
f6ac5f3d
PA
54
55class record_btrace_target final : public target_ops
56{
57public:
d9f719f1
PA
58 const target_info &info () const override
59 { return record_btrace_target_info; }
f6ac5f3d 60
66b4deae
PA
61 strata stratum () const override { return record_stratum; }
62
f6ac5f3d
PA
63 void close () override;
64 void async (int) override;
65
66 void detach (inferior *inf, int from_tty) override
67 { record_detach (this, inf, from_tty); }
68
69 void disconnect (const char *, int) override;
70
71 void mourn_inferior () override
72 { record_mourn_inferior (this); }
73
74 void kill () override
75 { record_kill (this); }
76
77 enum record_method record_method (ptid_t ptid) override;
78
79 void stop_recording () override;
80 void info_record () override;
81
82 void insn_history (int size, gdb_disassembly_flags flags) override;
83 void insn_history_from (ULONGEST from, int size,
84 gdb_disassembly_flags flags) override;
85 void insn_history_range (ULONGEST begin, ULONGEST end,
86 gdb_disassembly_flags flags) override;
87 void call_history (int size, record_print_flags flags) override;
88 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
89 override;
90 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
91 override;
92
57810aa7
PA
93 bool record_is_replaying (ptid_t ptid) override;
94 bool record_will_replay (ptid_t ptid, int dir) override;
f6ac5f3d
PA
95 void record_stop_replaying () override;
96
97 enum target_xfer_status xfer_partial (enum target_object object,
98 const char *annex,
99 gdb_byte *readbuf,
100 const gdb_byte *writebuf,
101 ULONGEST offset, ULONGEST len,
102 ULONGEST *xfered_len) override;
103
104 int insert_breakpoint (struct gdbarch *,
105 struct bp_target_info *) override;
106 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
107 enum remove_bp_reason) override;
108
109 void fetch_registers (struct regcache *, int) override;
110
111 void store_registers (struct regcache *, int) override;
112 void prepare_to_store (struct regcache *) override;
113
114 const struct frame_unwind *get_unwinder () override;
115
116 const struct frame_unwind *get_tailcall_unwinder () override;
117
118 void commit_resume () override;
119 void resume (ptid_t, int, enum gdb_signal) override;
120 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
121
122 void stop (ptid_t) override;
123 void update_thread_list () override;
57810aa7 124 bool thread_alive (ptid_t ptid) override;
f6ac5f3d
PA
125 void goto_record_begin () override;
126 void goto_record_end () override;
127 void goto_record (ULONGEST insn) override;
128
57810aa7 129 bool can_execute_reverse () override;
f6ac5f3d 130
57810aa7
PA
131 bool stopped_by_sw_breakpoint () override;
132 bool supports_stopped_by_sw_breakpoint () override;
f6ac5f3d 133
57810aa7
PA
134 bool stopped_by_hw_breakpoint () override;
135 bool supports_stopped_by_hw_breakpoint () override;
f6ac5f3d
PA
136
137 enum exec_direction_kind execution_direction () override;
138 void prepare_to_generate_core () override;
139 void done_generating_core () override;
140};
141
142static record_btrace_target record_btrace_ops;
143
144/* Initialize the record-btrace target ops. */
afedecd3 145
76727919
TT
146/* Token associated with a new-thread observer enabling branch tracing
147 for the new thread. */
3dcfdc58 148static const gdb::observers::token record_btrace_thread_observer_token {};
afedecd3 149
67b5c0c1
MM
150/* Memory access types used in set/show record btrace replay-memory-access. */
151static const char replay_memory_access_read_only[] = "read-only";
152static const char replay_memory_access_read_write[] = "read-write";
153static const char *const replay_memory_access_types[] =
154{
155 replay_memory_access_read_only,
156 replay_memory_access_read_write,
157 NULL
158};
159
160/* The currently allowed replay memory access type. */
161static const char *replay_memory_access = replay_memory_access_read_only;
162
4a4495d6
MM
163/* The cpu state kinds. */
164enum record_btrace_cpu_state_kind
165{
166 CS_AUTO,
167 CS_NONE,
168 CS_CPU
169};
170
171/* The current cpu state. */
172static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
173
174/* The current cpu for trace decode. */
175static struct btrace_cpu record_btrace_cpu;
176
67b5c0c1
MM
177/* Command lists for "set/show record btrace". */
178static struct cmd_list_element *set_record_btrace_cmdlist;
179static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 180
70ad5bff
MM
181/* The execution direction of the last resume we got. See record-full.c. */
182static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
183
184/* The async event handler for reverse/replay execution. */
185static struct async_event_handler *record_btrace_async_inferior_event_handler;
186
aef92902
MM
187/* A flag indicating that we are currently generating a core file. */
188static int record_btrace_generating_corefile;
189
f4abbc16
MM
190/* The current branch trace configuration. */
191static struct btrace_config record_btrace_conf;
192
193/* Command list for "record btrace". */
194static struct cmd_list_element *record_btrace_cmdlist;
195
d33501a5
MM
196/* Command lists for "set/show record btrace bts". */
197static struct cmd_list_element *set_record_btrace_bts_cmdlist;
198static struct cmd_list_element *show_record_btrace_bts_cmdlist;
199
b20a6524
MM
200/* Command lists for "set/show record btrace pt". */
201static struct cmd_list_element *set_record_btrace_pt_cmdlist;
202static struct cmd_list_element *show_record_btrace_pt_cmdlist;
203
4a4495d6
MM
204/* Command list for "set record btrace cpu". */
205static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
206
afedecd3
MM
207/* Print a record-btrace debug message. Use do ... while (0) to avoid
208 ambiguities when used in if statements. */
209
210#define DEBUG(msg, args...) \
211 do \
212 { \
213 if (record_debug != 0) \
214 fprintf_unfiltered (gdb_stdlog, \
215 "[record-btrace] " msg "\n", ##args); \
216 } \
217 while (0)
218
219
4a4495d6
MM
220/* Return the cpu configured by the user. Returns NULL if the cpu was
221 configured as auto. */
222const struct btrace_cpu *
223record_btrace_get_cpu (void)
224{
225 switch (record_btrace_cpu_state)
226 {
227 case CS_AUTO:
228 return nullptr;
229
230 case CS_NONE:
231 record_btrace_cpu.vendor = CV_UNKNOWN;
232 /* Fall through. */
233 case CS_CPU:
234 return &record_btrace_cpu;
235 }
236
237 error (_("Internal error: bad record btrace cpu state."));
238}
239
afedecd3 240/* Update the branch trace for the current thread and return a pointer to its
066ce621 241 thread_info.
afedecd3
MM
242
243 Throws an error if there is no thread or no trace. This function never
244 returns NULL. */
245
066ce621
MM
246static struct thread_info *
247require_btrace_thread (void)
afedecd3 248{
afedecd3
MM
249 DEBUG ("require");
250
00431a78 251 if (inferior_ptid == null_ptid)
afedecd3
MM
252 error (_("No thread."));
253
00431a78
PA
254 thread_info *tp = inferior_thread ();
255
cd4007e4
MM
256 validate_registers_access ();
257
4a4495d6 258 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 259
6e07b1d2 260 if (btrace_is_empty (tp))
afedecd3
MM
261 error (_("No trace."));
262
066ce621
MM
263 return tp;
264}
265
266/* Update the branch trace for the current thread and return a pointer to its
267 branch trace information struct.
268
269 Throws an error if there is no thread or no trace. This function never
270 returns NULL. */
271
272static struct btrace_thread_info *
273require_btrace (void)
274{
275 struct thread_info *tp;
276
277 tp = require_btrace_thread ();
278
279 return &tp->btrace;
afedecd3
MM
280}
281
282/* Enable branch tracing for one thread. Warn on errors. */
283
284static void
285record_btrace_enable_warn (struct thread_info *tp)
286{
d89edf9b
MM
287 /* Ignore this thread if its inferior is not recorded by us. */
288 target_ops *rec = tp->inf->target_at (record_stratum);
289 if (rec != &record_btrace_ops)
290 return;
291
a70b8144 292 try
492d29ea
PA
293 {
294 btrace_enable (tp, &record_btrace_conf);
295 }
230d2906 296 catch (const gdb_exception_error &error)
492d29ea 297 {
3d6e9d23 298 warning ("%s", error.what ());
492d29ea 299 }
afedecd3
MM
300}
301
afedecd3
MM
302/* Enable automatic tracing of new threads. */
303
304static void
305record_btrace_auto_enable (void)
306{
307 DEBUG ("attach thread observer");
308
76727919
TT
309 gdb::observers::new_thread.attach (record_btrace_enable_warn,
310 record_btrace_thread_observer_token);
afedecd3
MM
311}
312
313/* Disable automatic tracing of new threads. */
314
315static void
316record_btrace_auto_disable (void)
317{
afedecd3
MM
318 DEBUG ("detach thread observer");
319
76727919 320 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
afedecd3
MM
321}
322
70ad5bff
MM
323/* The record-btrace async event handler function. */
324
325static void
326record_btrace_handle_async_inferior_event (gdb_client_data data)
327{
328 inferior_event_handler (INF_REG_EVENT, NULL);
329}
330
c0272db5
TW
331/* See record-btrace.h. */
332
333void
334record_btrace_push_target (void)
335{
336 const char *format;
337
338 record_btrace_auto_enable ();
339
340 push_target (&record_btrace_ops);
341
342 record_btrace_async_inferior_event_handler
343 = create_async_event_handler (record_btrace_handle_async_inferior_event,
344 NULL);
345 record_btrace_generating_corefile = 0;
346
347 format = btrace_format_short_string (record_btrace_conf.format);
76727919 348 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
c0272db5
TW
349}
350
228f1508
SM
351/* Disable btrace on a set of threads on scope exit. */
352
353struct scoped_btrace_disable
354{
355 scoped_btrace_disable () = default;
356
357 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
358
359 ~scoped_btrace_disable ()
360 {
361 for (thread_info *tp : m_threads)
362 btrace_disable (tp);
363 }
364
365 void add_thread (thread_info *thread)
366 {
367 m_threads.push_front (thread);
368 }
369
370 void discard ()
371 {
372 m_threads.clear ();
373 }
374
375private:
376 std::forward_list<thread_info *> m_threads;
377};
378
d9f719f1 379/* Open target record-btrace. */
afedecd3 380
d9f719f1
PA
381static void
382record_btrace_target_open (const char *args, int from_tty)
afedecd3 383{
228f1508
SM
384 /* If we fail to enable btrace for one thread, disable it for the threads for
385 which it was successfully enabled. */
386 scoped_btrace_disable btrace_disable;
afedecd3
MM
387
388 DEBUG ("open");
389
8213266a 390 record_preopen ();
afedecd3
MM
391
392 if (!target_has_execution)
393 error (_("The program is not being run."));
394
d89edf9b 395 for (thread_info *tp : current_inferior ()->non_exited_threads ())
5d5658a1 396 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
afedecd3 397 {
f4abbc16 398 btrace_enable (tp, &record_btrace_conf);
afedecd3 399
228f1508 400 btrace_disable.add_thread (tp);
afedecd3
MM
401 }
402
c0272db5 403 record_btrace_push_target ();
afedecd3 404
228f1508 405 btrace_disable.discard ();
afedecd3
MM
406}
407
f6ac5f3d 408/* The stop_recording method of target record-btrace. */
afedecd3 409
f6ac5f3d
PA
410void
411record_btrace_target::stop_recording ()
afedecd3 412{
afedecd3
MM
413 DEBUG ("stop recording");
414
415 record_btrace_auto_disable ();
416
d89edf9b 417 for (thread_info *tp : current_inferior ()->non_exited_threads ())
afedecd3
MM
418 if (tp->btrace.target != NULL)
419 btrace_disable (tp);
420}
421
f6ac5f3d 422/* The disconnect method of target record-btrace. */
c0272db5 423
f6ac5f3d
PA
424void
425record_btrace_target::disconnect (const char *args,
426 int from_tty)
c0272db5 427{
b6a8c27b 428 struct target_ops *beneath = this->beneath ();
c0272db5
TW
429
430 /* Do not stop recording, just clean up GDB side. */
f6ac5f3d 431 unpush_target (this);
c0272db5
TW
432
433 /* Forward disconnect. */
f6ac5f3d 434 beneath->disconnect (args, from_tty);
c0272db5
TW
435}
436
f6ac5f3d 437/* The close method of target record-btrace. */
afedecd3 438
f6ac5f3d
PA
439void
440record_btrace_target::close ()
afedecd3 441{
70ad5bff
MM
442 if (record_btrace_async_inferior_event_handler != NULL)
443 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
444
99c819ee
MM
445 /* Make sure automatic recording gets disabled even if we did not stop
446 recording before closing the record-btrace target. */
447 record_btrace_auto_disable ();
448
568e808b
MM
449 /* We should have already stopped recording.
450 Tear down btrace in case we have not. */
d89edf9b 451 for (thread_info *tp : current_inferior ()->non_exited_threads ())
568e808b 452 btrace_teardown (tp);
afedecd3
MM
453}
454
f6ac5f3d 455/* The async method of target record-btrace. */
b7d2e916 456
f6ac5f3d
PA
457void
458record_btrace_target::async (int enable)
b7d2e916 459{
6a3753b3 460 if (enable)
b7d2e916
PA
461 mark_async_event_handler (record_btrace_async_inferior_event_handler);
462 else
463 clear_async_event_handler (record_btrace_async_inferior_event_handler);
464
b6a8c27b 465 this->beneath ()->async (enable);
b7d2e916
PA
466}
467
d33501a5
MM
468/* Adjusts the size and returns a human readable size suffix. */
469
470static const char *
471record_btrace_adjust_size (unsigned int *size)
472{
473 unsigned int sz;
474
475 sz = *size;
476
477 if ((sz & ((1u << 30) - 1)) == 0)
478 {
479 *size = sz >> 30;
480 return "GB";
481 }
482 else if ((sz & ((1u << 20) - 1)) == 0)
483 {
484 *size = sz >> 20;
485 return "MB";
486 }
487 else if ((sz & ((1u << 10) - 1)) == 0)
488 {
489 *size = sz >> 10;
490 return "kB";
491 }
492 else
493 return "";
494}
495
496/* Print a BTS configuration. */
497
498static void
499record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
500{
501 const char *suffix;
502 unsigned int size;
503
504 size = conf->size;
505 if (size > 0)
506 {
507 suffix = record_btrace_adjust_size (&size);
508 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
509 }
510}
511
bc504a31 512/* Print an Intel Processor Trace configuration. */
b20a6524
MM
513
514static void
515record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
516{
517 const char *suffix;
518 unsigned int size;
519
520 size = conf->size;
521 if (size > 0)
522 {
523 suffix = record_btrace_adjust_size (&size);
524 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
525 }
526}
527
d33501a5
MM
528/* Print a branch tracing configuration. */
529
530static void
531record_btrace_print_conf (const struct btrace_config *conf)
532{
533 printf_unfiltered (_("Recording format: %s.\n"),
534 btrace_format_string (conf->format));
535
536 switch (conf->format)
537 {
538 case BTRACE_FORMAT_NONE:
539 return;
540
541 case BTRACE_FORMAT_BTS:
542 record_btrace_print_bts_conf (&conf->bts);
543 return;
b20a6524
MM
544
545 case BTRACE_FORMAT_PT:
546 record_btrace_print_pt_conf (&conf->pt);
547 return;
d33501a5
MM
548 }
549
40c94099 550 internal_error (__FILE__, __LINE__, _("Unknown branch trace format."));
d33501a5
MM
551}
552
f6ac5f3d 553/* The info_record method of target record-btrace. */
afedecd3 554
f6ac5f3d
PA
555void
556record_btrace_target::info_record ()
afedecd3
MM
557{
558 struct btrace_thread_info *btinfo;
f4abbc16 559 const struct btrace_config *conf;
afedecd3 560 struct thread_info *tp;
31fd9caa 561 unsigned int insns, calls, gaps;
afedecd3
MM
562
563 DEBUG ("info");
564
5b6d1e4f 565 if (inferior_ptid == null_ptid)
afedecd3
MM
566 error (_("No thread."));
567
5b6d1e4f
PA
568 tp = inferior_thread ();
569
cd4007e4
MM
570 validate_registers_access ();
571
f4abbc16
MM
572 btinfo = &tp->btrace;
573
f6ac5f3d 574 conf = ::btrace_conf (btinfo);
f4abbc16 575 if (conf != NULL)
d33501a5 576 record_btrace_print_conf (conf);
f4abbc16 577
4a4495d6 578 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 579
23a7fe75
MM
580 insns = 0;
581 calls = 0;
31fd9caa 582 gaps = 0;
23a7fe75 583
6e07b1d2 584 if (!btrace_is_empty (tp))
23a7fe75
MM
585 {
586 struct btrace_call_iterator call;
587 struct btrace_insn_iterator insn;
588
589 btrace_call_end (&call, btinfo);
590 btrace_call_prev (&call, 1);
5de9129b 591 calls = btrace_call_number (&call);
23a7fe75
MM
592
593 btrace_insn_end (&insn, btinfo);
5de9129b 594 insns = btrace_insn_number (&insn);
31fd9caa 595
69090cee
TW
596 /* If the last instruction is not a gap, it is the current instruction
597 that is not actually part of the record. */
598 if (btrace_insn_get (&insn) != NULL)
599 insns -= 1;
31fd9caa
MM
600
601 gaps = btinfo->ngaps;
23a7fe75 602 }
afedecd3 603
31fd9caa 604 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
43792cf0 605 "for thread %s (%s).\n"), insns, calls, gaps,
a068643d
TT
606 print_thread_id (tp),
607 target_pid_to_str (tp->ptid).c_str ());
07bbe694
MM
608
609 if (btrace_is_replaying (tp))
610 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
611 btrace_insn_number (btinfo->replay));
afedecd3
MM
612}
613
31fd9caa
MM
614/* Print a decode error. */
615
616static void
617btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
618 enum btrace_format format)
619{
508352a9 620 const char *errstr = btrace_decode_error (format, errcode);
31fd9caa 621
112e8700 622 uiout->text (_("["));
508352a9
TW
623 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
624 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
31fd9caa 625 {
112e8700 626 uiout->text (_("decode error ("));
381befee 627 uiout->field_signed ("errcode", errcode);
112e8700 628 uiout->text (_("): "));
31fd9caa 629 }
112e8700
SM
630 uiout->text (errstr);
631 uiout->text (_("]\n"));
31fd9caa
MM
632}
633
f94cc897
MM
634/* A range of source lines. */
635
636struct btrace_line_range
637{
638 /* The symtab this line is from. */
639 struct symtab *symtab;
640
641 /* The first line (inclusive). */
642 int begin;
643
644 /* The last line (exclusive). */
645 int end;
646};
647
648/* Construct a line range. */
649
650static struct btrace_line_range
651btrace_mk_line_range (struct symtab *symtab, int begin, int end)
652{
653 struct btrace_line_range range;
654
655 range.symtab = symtab;
656 range.begin = begin;
657 range.end = end;
658
659 return range;
660}
661
662/* Add a line to a line range. */
663
664static struct btrace_line_range
665btrace_line_range_add (struct btrace_line_range range, int line)
666{
667 if (range.end <= range.begin)
668 {
669 /* This is the first entry. */
670 range.begin = line;
671 range.end = line + 1;
672 }
673 else if (line < range.begin)
674 range.begin = line;
675 else if (range.end < line)
676 range.end = line;
677
678 return range;
679}
680
681/* Return non-zero if RANGE is empty, zero otherwise. */
682
683static int
684btrace_line_range_is_empty (struct btrace_line_range range)
685{
686 return range.end <= range.begin;
687}
688
689/* Return non-zero if LHS contains RHS, zero otherwise. */
690
691static int
692btrace_line_range_contains_range (struct btrace_line_range lhs,
693 struct btrace_line_range rhs)
694{
695 return ((lhs.symtab == rhs.symtab)
696 && (lhs.begin <= rhs.begin)
697 && (rhs.end <= lhs.end));
698}
699
700/* Find the line range associated with PC. */
701
702static struct btrace_line_range
703btrace_find_line_range (CORE_ADDR pc)
704{
705 struct btrace_line_range range;
706 struct linetable_entry *lines;
707 struct linetable *ltable;
708 struct symtab *symtab;
709 int nlines, i;
710
711 symtab = find_pc_line_symtab (pc);
712 if (symtab == NULL)
713 return btrace_mk_line_range (NULL, 0, 0);
714
715 ltable = SYMTAB_LINETABLE (symtab);
716 if (ltable == NULL)
717 return btrace_mk_line_range (symtab, 0, 0);
718
719 nlines = ltable->nitems;
720 lines = ltable->item;
721 if (nlines <= 0)
722 return btrace_mk_line_range (symtab, 0, 0);
723
724 range = btrace_mk_line_range (symtab, 0, 0);
725 for (i = 0; i < nlines - 1; i++)
726 {
8c95582d
AB
727 /* The test of is_stmt here was added when the is_stmt field was
728 introduced to the 'struct linetable_entry' structure. This
729 ensured that this loop maintained the same behaviour as before we
730 introduced is_stmt. That said, it might be that we would be
731 better off not checking is_stmt here, this would lead to us
732 possibly adding more line numbers to the range. At the time this
733 change was made I was unsure how to test this so chose to go with
734 maintaining the existing experience. */
735 if ((lines[i].pc == pc) && (lines[i].line != 0)
736 && (lines[i].is_stmt == 1))
f94cc897
MM
737 range = btrace_line_range_add (range, lines[i].line);
738 }
739
740 return range;
741}
742
743/* Print source lines in LINES to UIOUT.
744
745 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
746 instructions corresponding to that source line. When printing a new source
747 line, we do the cleanups for the open chain and open a new cleanup chain for
748 the new source line. If the source line range in LINES is not empty, this
749 function will leave the cleanup chain for the last printed source line open
750 so instructions can be added to it. */
751
752static void
753btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
7ea78b59
SM
754 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
755 gdb::optional<ui_out_emit_list> *asm_list,
756 gdb_disassembly_flags flags)
f94cc897 757{
8d297bbf 758 print_source_lines_flags psl_flags;
f94cc897 759
f94cc897
MM
760 if (flags & DISASSEMBLY_FILENAME)
761 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
762
7ea78b59 763 for (int line = lines.begin; line < lines.end; ++line)
f94cc897 764 {
7ea78b59 765 asm_list->reset ();
f94cc897 766
7ea78b59 767 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
f94cc897
MM
768
769 print_source_lines (lines.symtab, line, line + 1, psl_flags);
770
7ea78b59 771 asm_list->emplace (uiout, "line_asm_insn");
f94cc897
MM
772 }
773}
774
afedecd3
MM
775/* Disassemble a section of the recorded instruction trace. */
776
777static void
23a7fe75 778btrace_insn_history (struct ui_out *uiout,
31fd9caa 779 const struct btrace_thread_info *btinfo,
23a7fe75 780 const struct btrace_insn_iterator *begin,
9a24775b
PA
781 const struct btrace_insn_iterator *end,
782 gdb_disassembly_flags flags)
afedecd3 783{
9a24775b
PA
784 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
785 btrace_insn_number (begin), btrace_insn_number (end));
afedecd3 786
f94cc897
MM
787 flags |= DISASSEMBLY_SPECULATIVE;
788
7ea78b59
SM
789 struct gdbarch *gdbarch = target_gdbarch ();
790 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
f94cc897 791
7ea78b59 792 ui_out_emit_list list_emitter (uiout, "asm_insns");
f94cc897 793
7ea78b59
SM
794 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
795 gdb::optional<ui_out_emit_list> asm_list;
afedecd3 796
046bebe1 797 gdb_pretty_print_disassembler disasm (gdbarch, uiout);
8b172ce7 798
7ea78b59
SM
799 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
800 btrace_insn_next (&it, 1))
afedecd3 801 {
23a7fe75
MM
802 const struct btrace_insn *insn;
803
804 insn = btrace_insn_get (&it);
805
31fd9caa
MM
806 /* A NULL instruction indicates a gap in the trace. */
807 if (insn == NULL)
808 {
809 const struct btrace_config *conf;
810
811 conf = btrace_conf (btinfo);
afedecd3 812
31fd9caa
MM
813 /* We have trace so we must have a configuration. */
814 gdb_assert (conf != NULL);
815
69090cee
TW
816 uiout->field_fmt ("insn-number", "%u",
817 btrace_insn_number (&it));
818 uiout->text ("\t");
819
820 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
31fd9caa
MM
821 conf->format);
822 }
823 else
824 {
f94cc897 825 struct disasm_insn dinsn;
da8c46d2 826
f94cc897 827 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 828 {
f94cc897
MM
829 struct btrace_line_range lines;
830
831 lines = btrace_find_line_range (insn->pc);
832 if (!btrace_line_range_is_empty (lines)
833 && !btrace_line_range_contains_range (last_lines, lines))
834 {
7ea78b59
SM
835 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
836 flags);
f94cc897
MM
837 last_lines = lines;
838 }
7ea78b59 839 else if (!src_and_asm_tuple.has_value ())
f94cc897 840 {
7ea78b59
SM
841 gdb_assert (!asm_list.has_value ());
842
843 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
844
f94cc897 845 /* No source information. */
7ea78b59 846 asm_list.emplace (uiout, "line_asm_insn");
f94cc897
MM
847 }
848
7ea78b59
SM
849 gdb_assert (src_and_asm_tuple.has_value ());
850 gdb_assert (asm_list.has_value ());
da8c46d2 851 }
da8c46d2 852
f94cc897
MM
853 memset (&dinsn, 0, sizeof (dinsn));
854 dinsn.number = btrace_insn_number (&it);
855 dinsn.addr = insn->pc;
31fd9caa 856
da8c46d2 857 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 858 dinsn.is_speculative = 1;
da8c46d2 859
046bebe1 860 disasm.pretty_print_insn (&dinsn, flags);
31fd9caa 861 }
afedecd3
MM
862 }
863}
864
f6ac5f3d 865/* The insn_history method of target record-btrace. */
afedecd3 866
f6ac5f3d
PA
867void
868record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
afedecd3
MM
869{
870 struct btrace_thread_info *btinfo;
23a7fe75
MM
871 struct btrace_insn_history *history;
872 struct btrace_insn_iterator begin, end;
afedecd3 873 struct ui_out *uiout;
23a7fe75 874 unsigned int context, covered;
afedecd3
MM
875
876 uiout = current_uiout;
2e783024 877 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 878 context = abs (size);
afedecd3
MM
879 if (context == 0)
880 error (_("Bad record instruction-history-size."));
881
23a7fe75
MM
882 btinfo = require_btrace ();
883 history = btinfo->insn_history;
884 if (history == NULL)
afedecd3 885 {
07bbe694 886 struct btrace_insn_iterator *replay;
afedecd3 887
9a24775b 888 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
afedecd3 889
07bbe694
MM
890 /* If we're replaying, we start at the replay position. Otherwise, we
891 start at the tail of the trace. */
892 replay = btinfo->replay;
893 if (replay != NULL)
894 begin = *replay;
895 else
896 btrace_insn_end (&begin, btinfo);
897
898 /* We start from here and expand in the requested direction. Then we
899 expand in the other direction, as well, to fill up any remaining
900 context. */
901 end = begin;
902 if (size < 0)
903 {
904 /* We want the current position covered, as well. */
905 covered = btrace_insn_next (&end, 1);
906 covered += btrace_insn_prev (&begin, context - covered);
907 covered += btrace_insn_next (&end, context - covered);
908 }
909 else
910 {
911 covered = btrace_insn_next (&end, context);
912 covered += btrace_insn_prev (&begin, context - covered);
913 }
afedecd3
MM
914 }
915 else
916 {
23a7fe75
MM
917 begin = history->begin;
918 end = history->end;
afedecd3 919
9a24775b 920 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
23a7fe75 921 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 922
23a7fe75
MM
923 if (size < 0)
924 {
925 end = begin;
926 covered = btrace_insn_prev (&begin, context);
927 }
928 else
929 {
930 begin = end;
931 covered = btrace_insn_next (&end, context);
932 }
afedecd3
MM
933 }
934
23a7fe75 935 if (covered > 0)
31fd9caa 936 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
937 else
938 {
939 if (size < 0)
940 printf_unfiltered (_("At the start of the branch trace record.\n"));
941 else
942 printf_unfiltered (_("At the end of the branch trace record.\n"));
943 }
afedecd3 944
23a7fe75 945 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
946}
947
f6ac5f3d 948/* The insn_history_range method of target record-btrace. */
afedecd3 949
f6ac5f3d
PA
950void
951record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
952 gdb_disassembly_flags flags)
afedecd3
MM
953{
954 struct btrace_thread_info *btinfo;
23a7fe75 955 struct btrace_insn_iterator begin, end;
afedecd3 956 struct ui_out *uiout;
23a7fe75
MM
957 unsigned int low, high;
958 int found;
afedecd3
MM
959
960 uiout = current_uiout;
2e783024 961 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
23a7fe75
MM
962 low = from;
963 high = to;
afedecd3 964
9a24775b 965 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
afedecd3
MM
966
967 /* Check for wrap-arounds. */
23a7fe75 968 if (low != from || high != to)
afedecd3
MM
969 error (_("Bad range."));
970
0688d04e 971 if (high < low)
afedecd3
MM
972 error (_("Bad range."));
973
23a7fe75 974 btinfo = require_btrace ();
afedecd3 975
23a7fe75
MM
976 found = btrace_find_insn_by_number (&begin, btinfo, low);
977 if (found == 0)
978 error (_("Range out of bounds."));
afedecd3 979
23a7fe75
MM
980 found = btrace_find_insn_by_number (&end, btinfo, high);
981 if (found == 0)
0688d04e
MM
982 {
983 /* Silently truncate the range. */
984 btrace_insn_end (&end, btinfo);
985 }
986 else
987 {
988 /* We want both begin and end to be inclusive. */
989 btrace_insn_next (&end, 1);
990 }
afedecd3 991
31fd9caa 992 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 993 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
994}
995
f6ac5f3d 996/* The insn_history_from method of target record-btrace. */
afedecd3 997
f6ac5f3d
PA
998void
999record_btrace_target::insn_history_from (ULONGEST from, int size,
1000 gdb_disassembly_flags flags)
afedecd3
MM
1001{
1002 ULONGEST begin, end, context;
1003
1004 context = abs (size);
0688d04e
MM
1005 if (context == 0)
1006 error (_("Bad record instruction-history-size."));
afedecd3
MM
1007
1008 if (size < 0)
1009 {
1010 end = from;
1011
1012 if (from < context)
1013 begin = 0;
1014 else
0688d04e 1015 begin = from - context + 1;
afedecd3
MM
1016 }
1017 else
1018 {
1019 begin = from;
0688d04e 1020 end = from + context - 1;
afedecd3
MM
1021
1022 /* Check for wrap-around. */
1023 if (end < begin)
1024 end = ULONGEST_MAX;
1025 }
1026
f6ac5f3d 1027 insn_history_range (begin, end, flags);
afedecd3
MM
1028}
1029
1030/* Print the instruction number range for a function call history line. */
1031
1032static void
23a7fe75
MM
1033btrace_call_history_insn_range (struct ui_out *uiout,
1034 const struct btrace_function *bfun)
afedecd3 1035{
7acbe133
MM
1036 unsigned int begin, end, size;
1037
0860c437 1038 size = bfun->insn.size ();
7acbe133 1039 gdb_assert (size > 0);
afedecd3 1040
23a7fe75 1041 begin = bfun->insn_offset;
7acbe133 1042 end = begin + size - 1;
afedecd3 1043
1f77b012 1044 uiout->field_unsigned ("insn begin", begin);
112e8700 1045 uiout->text (",");
1f77b012 1046 uiout->field_unsigned ("insn end", end);
afedecd3
MM
1047}
1048
ce0dfbea
MM
1049/* Compute the lowest and highest source line for the instructions in BFUN
1050 and return them in PBEGIN and PEND.
1051 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1052 result from inlining or macro expansion. */
1053
1054static void
1055btrace_compute_src_line_range (const struct btrace_function *bfun,
1056 int *pbegin, int *pend)
1057{
ce0dfbea
MM
1058 struct symtab *symtab;
1059 struct symbol *sym;
ce0dfbea
MM
1060 int begin, end;
1061
1062 begin = INT_MAX;
1063 end = INT_MIN;
1064
1065 sym = bfun->sym;
1066 if (sym == NULL)
1067 goto out;
1068
1069 symtab = symbol_symtab (sym);
1070
0860c437 1071 for (const btrace_insn &insn : bfun->insn)
ce0dfbea
MM
1072 {
1073 struct symtab_and_line sal;
1074
0860c437 1075 sal = find_pc_line (insn.pc, 0);
ce0dfbea
MM
1076 if (sal.symtab != symtab || sal.line == 0)
1077 continue;
1078
325fac50
PA
1079 begin = std::min (begin, sal.line);
1080 end = std::max (end, sal.line);
ce0dfbea
MM
1081 }
1082
1083 out:
1084 *pbegin = begin;
1085 *pend = end;
1086}
1087
afedecd3
MM
1088/* Print the source line information for a function call history line. */
1089
1090static void
23a7fe75
MM
1091btrace_call_history_src_line (struct ui_out *uiout,
1092 const struct btrace_function *bfun)
afedecd3
MM
1093{
1094 struct symbol *sym;
23a7fe75 1095 int begin, end;
afedecd3
MM
1096
1097 sym = bfun->sym;
1098 if (sym == NULL)
1099 return;
1100
112e8700 1101 uiout->field_string ("file",
cbe56571 1102 symtab_to_filename_for_display (symbol_symtab (sym)),
e43b10e1 1103 file_name_style.style ());
afedecd3 1104
ce0dfbea 1105 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 1106 if (end < begin)
afedecd3
MM
1107 return;
1108
112e8700 1109 uiout->text (":");
381befee 1110 uiout->field_signed ("min line", begin);
afedecd3 1111
23a7fe75 1112 if (end == begin)
afedecd3
MM
1113 return;
1114
112e8700 1115 uiout->text (",");
381befee 1116 uiout->field_signed ("max line", end);
afedecd3
MM
1117}
1118
0b722aec
MM
1119/* Get the name of a branch trace function. */
1120
1121static const char *
1122btrace_get_bfun_name (const struct btrace_function *bfun)
1123{
1124 struct minimal_symbol *msym;
1125 struct symbol *sym;
1126
1127 if (bfun == NULL)
1128 return "??";
1129
1130 msym = bfun->msym;
1131 sym = bfun->sym;
1132
1133 if (sym != NULL)
987012b8 1134 return sym->print_name ();
0b722aec 1135 else if (msym != NULL)
c9d95fa3 1136 return msym->print_name ();
0b722aec
MM
1137 else
1138 return "??";
1139}
1140
afedecd3
MM
1141/* Disassemble a section of the recorded function trace. */
1142
1143static void
23a7fe75 1144btrace_call_history (struct ui_out *uiout,
8710b709 1145 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1146 const struct btrace_call_iterator *begin,
1147 const struct btrace_call_iterator *end,
8d297bbf 1148 int int_flags)
afedecd3 1149{
23a7fe75 1150 struct btrace_call_iterator it;
8d297bbf 1151 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1152
8d297bbf 1153 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1154 btrace_call_number (end));
afedecd3 1155
23a7fe75 1156 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1157 {
23a7fe75
MM
1158 const struct btrace_function *bfun;
1159 struct minimal_symbol *msym;
1160 struct symbol *sym;
1161
1162 bfun = btrace_call_get (&it);
23a7fe75 1163 sym = bfun->sym;
0b722aec 1164 msym = bfun->msym;
23a7fe75 1165
afedecd3 1166 /* Print the function index. */
1f77b012 1167 uiout->field_unsigned ("index", bfun->number);
112e8700 1168 uiout->text ("\t");
afedecd3 1169
31fd9caa
MM
1170 /* Indicate gaps in the trace. */
1171 if (bfun->errcode != 0)
1172 {
1173 const struct btrace_config *conf;
1174
1175 conf = btrace_conf (btinfo);
1176
1177 /* We have trace so we must have a configuration. */
1178 gdb_assert (conf != NULL);
1179
1180 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1181
1182 continue;
1183 }
1184
8710b709
MM
1185 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1186 {
1187 int level = bfun->level + btinfo->level, i;
1188
1189 for (i = 0; i < level; ++i)
112e8700 1190 uiout->text (" ");
8710b709
MM
1191 }
1192
1193 if (sym != NULL)
987012b8 1194 uiout->field_string ("function", sym->print_name (),
e43b10e1 1195 function_name_style.style ());
8710b709 1196 else if (msym != NULL)
c9d95fa3 1197 uiout->field_string ("function", msym->print_name (),
e43b10e1 1198 function_name_style.style ());
112e8700 1199 else if (!uiout->is_mi_like_p ())
cbe56571 1200 uiout->field_string ("function", "??",
e43b10e1 1201 function_name_style.style ());
8710b709 1202
1e038f67 1203 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1204 {
112e8700 1205 uiout->text (_("\tinst "));
23a7fe75 1206 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1207 }
1208
1e038f67 1209 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1210 {
112e8700 1211 uiout->text (_("\tat "));
23a7fe75 1212 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1213 }
1214
112e8700 1215 uiout->text ("\n");
afedecd3
MM
1216 }
1217}
1218
f6ac5f3d 1219/* The call_history method of target record-btrace. */
afedecd3 1220
f6ac5f3d
PA
1221void
1222record_btrace_target::call_history (int size, record_print_flags flags)
afedecd3
MM
1223{
1224 struct btrace_thread_info *btinfo;
23a7fe75
MM
1225 struct btrace_call_history *history;
1226 struct btrace_call_iterator begin, end;
afedecd3 1227 struct ui_out *uiout;
23a7fe75 1228 unsigned int context, covered;
afedecd3
MM
1229
1230 uiout = current_uiout;
2e783024 1231 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 1232 context = abs (size);
afedecd3
MM
1233 if (context == 0)
1234 error (_("Bad record function-call-history-size."));
1235
23a7fe75
MM
1236 btinfo = require_btrace ();
1237 history = btinfo->call_history;
1238 if (history == NULL)
afedecd3 1239 {
07bbe694 1240 struct btrace_insn_iterator *replay;
afedecd3 1241
0cb7c7b0 1242 DEBUG ("call-history (0x%x): %d", (int) flags, size);
afedecd3 1243
07bbe694
MM
1244 /* If we're replaying, we start at the replay position. Otherwise, we
1245 start at the tail of the trace. */
1246 replay = btinfo->replay;
1247 if (replay != NULL)
1248 {
07bbe694 1249 begin.btinfo = btinfo;
a0f1b963 1250 begin.index = replay->call_index;
07bbe694
MM
1251 }
1252 else
1253 btrace_call_end (&begin, btinfo);
1254
1255 /* We start from here and expand in the requested direction. Then we
1256 expand in the other direction, as well, to fill up any remaining
1257 context. */
1258 end = begin;
1259 if (size < 0)
1260 {
1261 /* We want the current position covered, as well. */
1262 covered = btrace_call_next (&end, 1);
1263 covered += btrace_call_prev (&begin, context - covered);
1264 covered += btrace_call_next (&end, context - covered);
1265 }
1266 else
1267 {
1268 covered = btrace_call_next (&end, context);
1269 covered += btrace_call_prev (&begin, context- covered);
1270 }
afedecd3
MM
1271 }
1272 else
1273 {
23a7fe75
MM
1274 begin = history->begin;
1275 end = history->end;
afedecd3 1276
0cb7c7b0 1277 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
23a7fe75 1278 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1279
23a7fe75
MM
1280 if (size < 0)
1281 {
1282 end = begin;
1283 covered = btrace_call_prev (&begin, context);
1284 }
1285 else
1286 {
1287 begin = end;
1288 covered = btrace_call_next (&end, context);
1289 }
afedecd3
MM
1290 }
1291
23a7fe75 1292 if (covered > 0)
8710b709 1293 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1294 else
1295 {
1296 if (size < 0)
1297 printf_unfiltered (_("At the start of the branch trace record.\n"));
1298 else
1299 printf_unfiltered (_("At the end of the branch trace record.\n"));
1300 }
afedecd3 1301
23a7fe75 1302 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1303}
1304
f6ac5f3d 1305/* The call_history_range method of target record-btrace. */
afedecd3 1306
f6ac5f3d
PA
1307void
1308record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1309 record_print_flags flags)
afedecd3
MM
1310{
1311 struct btrace_thread_info *btinfo;
23a7fe75 1312 struct btrace_call_iterator begin, end;
afedecd3 1313 struct ui_out *uiout;
23a7fe75
MM
1314 unsigned int low, high;
1315 int found;
afedecd3
MM
1316
1317 uiout = current_uiout;
2e783024 1318 ui_out_emit_tuple tuple_emitter (uiout, "func history");
23a7fe75
MM
1319 low = from;
1320 high = to;
afedecd3 1321
0cb7c7b0 1322 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
afedecd3
MM
1323
1324 /* Check for wrap-arounds. */
23a7fe75 1325 if (low != from || high != to)
afedecd3
MM
1326 error (_("Bad range."));
1327
0688d04e 1328 if (high < low)
afedecd3
MM
1329 error (_("Bad range."));
1330
23a7fe75 1331 btinfo = require_btrace ();
afedecd3 1332
23a7fe75
MM
1333 found = btrace_find_call_by_number (&begin, btinfo, low);
1334 if (found == 0)
1335 error (_("Range out of bounds."));
afedecd3 1336
23a7fe75
MM
1337 found = btrace_find_call_by_number (&end, btinfo, high);
1338 if (found == 0)
0688d04e
MM
1339 {
1340 /* Silently truncate the range. */
1341 btrace_call_end (&end, btinfo);
1342 }
1343 else
1344 {
1345 /* We want both begin and end to be inclusive. */
1346 btrace_call_next (&end, 1);
1347 }
afedecd3 1348
8710b709 1349 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1350 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1351}
1352
f6ac5f3d 1353/* The call_history_from method of target record-btrace. */
afedecd3 1354
f6ac5f3d
PA
1355void
1356record_btrace_target::call_history_from (ULONGEST from, int size,
1357 record_print_flags flags)
afedecd3
MM
1358{
1359 ULONGEST begin, end, context;
1360
1361 context = abs (size);
0688d04e
MM
1362 if (context == 0)
1363 error (_("Bad record function-call-history-size."));
afedecd3
MM
1364
1365 if (size < 0)
1366 {
1367 end = from;
1368
1369 if (from < context)
1370 begin = 0;
1371 else
0688d04e 1372 begin = from - context + 1;
afedecd3
MM
1373 }
1374 else
1375 {
1376 begin = from;
0688d04e 1377 end = from + context - 1;
afedecd3
MM
1378
1379 /* Check for wrap-around. */
1380 if (end < begin)
1381 end = ULONGEST_MAX;
1382 }
1383
f6ac5f3d 1384 call_history_range ( begin, end, flags);
afedecd3
MM
1385}
1386
f6ac5f3d 1387/* The record_method method of target record-btrace. */
b158a20f 1388
f6ac5f3d
PA
1389enum record_method
1390record_btrace_target::record_method (ptid_t ptid)
b158a20f 1391{
5b6d1e4f
PA
1392 process_stratum_target *proc_target = current_inferior ()->process_target ();
1393 thread_info *const tp = find_thread_ptid (proc_target, ptid);
b158a20f
TW
1394
1395 if (tp == NULL)
1396 error (_("No thread."));
1397
1398 if (tp->btrace.target == NULL)
1399 return RECORD_METHOD_NONE;
1400
1401 return RECORD_METHOD_BTRACE;
1402}
1403
f6ac5f3d 1404/* The record_is_replaying method of target record-btrace. */
07bbe694 1405
57810aa7 1406bool
f6ac5f3d 1407record_btrace_target::record_is_replaying (ptid_t ptid)
07bbe694 1408{
5b6d1e4f
PA
1409 process_stratum_target *proc_target = current_inferior ()->process_target ();
1410 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
08036331 1411 if (btrace_is_replaying (tp))
57810aa7 1412 return true;
07bbe694 1413
57810aa7 1414 return false;
07bbe694
MM
1415}
1416
f6ac5f3d 1417/* The record_will_replay method of target record-btrace. */
7ff27e9b 1418
57810aa7 1419bool
f6ac5f3d 1420record_btrace_target::record_will_replay (ptid_t ptid, int dir)
7ff27e9b 1421{
f6ac5f3d 1422 return dir == EXEC_REVERSE || record_is_replaying (ptid);
7ff27e9b
MM
1423}
1424
f6ac5f3d 1425/* The xfer_partial method of target record-btrace. */
633785ff 1426
f6ac5f3d
PA
1427enum target_xfer_status
1428record_btrace_target::xfer_partial (enum target_object object,
1429 const char *annex, gdb_byte *readbuf,
1430 const gdb_byte *writebuf, ULONGEST offset,
1431 ULONGEST len, ULONGEST *xfered_len)
633785ff 1432{
633785ff 1433 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1434 if (replay_memory_access == replay_memory_access_read_only
aef92902 1435 && !record_btrace_generating_corefile
f6ac5f3d 1436 && record_is_replaying (inferior_ptid))
633785ff
MM
1437 {
1438 switch (object)
1439 {
1440 case TARGET_OBJECT_MEMORY:
1441 {
1442 struct target_section *section;
1443
1444 /* We do not allow writing memory in general. */
1445 if (writebuf != NULL)
9b409511
YQ
1446 {
1447 *xfered_len = len;
bc113b4e 1448 return TARGET_XFER_UNAVAILABLE;
9b409511 1449 }
633785ff
MM
1450
1451 /* We allow reading readonly memory. */
f6ac5f3d 1452 section = target_section_by_addr (this, offset);
633785ff
MM
1453 if (section != NULL)
1454 {
1455 /* Check if the section we found is readonly. */
fd361982 1456 if ((bfd_section_flags (section->the_bfd_section)
633785ff
MM
1457 & SEC_READONLY) != 0)
1458 {
1459 /* Truncate the request to fit into this section. */
325fac50 1460 len = std::min (len, section->endaddr - offset);
633785ff
MM
1461 break;
1462 }
1463 }
1464
9b409511 1465 *xfered_len = len;
bc113b4e 1466 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1467 }
1468 }
1469 }
1470
1471 /* Forward the request. */
b6a8c27b
PA
1472 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1473 offset, len, xfered_len);
633785ff
MM
1474}
1475
f6ac5f3d 1476/* The insert_breakpoint method of target record-btrace. */
633785ff 1477
f6ac5f3d
PA
1478int
1479record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1480 struct bp_target_info *bp_tgt)
633785ff 1481{
67b5c0c1
MM
1482 const char *old;
1483 int ret;
633785ff
MM
1484
1485 /* Inserting breakpoints requires accessing memory. Allow it for the
1486 duration of this function. */
67b5c0c1
MM
1487 old = replay_memory_access;
1488 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1489
1490 ret = 0;
a70b8144 1491 try
492d29ea 1492 {
b6a8c27b 1493 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
492d29ea 1494 }
230d2906 1495 catch (const gdb_exception &except)
492d29ea 1496 {
6c63c96a 1497 replay_memory_access = old;
eedc3f4f 1498 throw;
492d29ea 1499 }
6c63c96a 1500 replay_memory_access = old;
633785ff
MM
1501
1502 return ret;
1503}
1504
f6ac5f3d 1505/* The remove_breakpoint method of target record-btrace. */
633785ff 1506
f6ac5f3d
PA
1507int
1508record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1509 struct bp_target_info *bp_tgt,
1510 enum remove_bp_reason reason)
633785ff 1511{
67b5c0c1
MM
1512 const char *old;
1513 int ret;
633785ff
MM
1514
1515 /* Removing breakpoints requires accessing memory. Allow it for the
1516 duration of this function. */
67b5c0c1
MM
1517 old = replay_memory_access;
1518 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1519
1520 ret = 0;
a70b8144 1521 try
492d29ea 1522 {
b6a8c27b 1523 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
492d29ea 1524 }
230d2906 1525 catch (const gdb_exception &except)
492d29ea 1526 {
6c63c96a 1527 replay_memory_access = old;
eedc3f4f 1528 throw;
492d29ea 1529 }
6c63c96a 1530 replay_memory_access = old;
633785ff
MM
1531
1532 return ret;
1533}
1534
f6ac5f3d 1535/* The fetch_registers method of target record-btrace. */
1f3ef581 1536
f6ac5f3d
PA
1537void
1538record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1f3ef581 1539{
1a476b6d
MM
1540 btrace_insn_iterator *replay = nullptr;
1541
1542 /* Thread-db may ask for a thread's registers before GDB knows about the
1543 thread. We forward the request to the target beneath in this
1544 case. */
5b6d1e4f 1545 thread_info *tp = find_thread_ptid (regcache->target (), regcache->ptid ());
1a476b6d
MM
1546 if (tp != nullptr)
1547 replay = tp->btrace.replay;
1f3ef581 1548
1a476b6d 1549 if (replay != nullptr && !record_btrace_generating_corefile)
1f3ef581
MM
1550 {
1551 const struct btrace_insn *insn;
1552 struct gdbarch *gdbarch;
1553 int pcreg;
1554
ac7936df 1555 gdbarch = regcache->arch ();
1f3ef581
MM
1556 pcreg = gdbarch_pc_regnum (gdbarch);
1557 if (pcreg < 0)
1558 return;
1559
1560 /* We can only provide the PC register. */
1561 if (regno >= 0 && regno != pcreg)
1562 return;
1563
1564 insn = btrace_insn_get (replay);
1565 gdb_assert (insn != NULL);
1566
73e1c03f 1567 regcache->raw_supply (regno, &insn->pc);
1f3ef581
MM
1568 }
1569 else
b6a8c27b 1570 this->beneath ()->fetch_registers (regcache, regno);
1f3ef581
MM
1571}
1572
f6ac5f3d 1573/* The store_registers method of target record-btrace. */
1f3ef581 1574
f6ac5f3d
PA
1575void
1576record_btrace_target::store_registers (struct regcache *regcache, int regno)
1f3ef581 1577{
a52eab48 1578 if (!record_btrace_generating_corefile
222312d3 1579 && record_is_replaying (regcache->ptid ()))
4d10e986 1580 error (_("Cannot write registers while replaying."));
1f3ef581 1581
491144b5 1582 gdb_assert (may_write_registers);
1f3ef581 1583
b6a8c27b 1584 this->beneath ()->store_registers (regcache, regno);
1f3ef581
MM
1585}
1586
f6ac5f3d 1587/* The prepare_to_store method of target record-btrace. */
1f3ef581 1588
f6ac5f3d
PA
1589void
1590record_btrace_target::prepare_to_store (struct regcache *regcache)
1f3ef581 1591{
a52eab48 1592 if (!record_btrace_generating_corefile
222312d3 1593 && record_is_replaying (regcache->ptid ()))
1f3ef581
MM
1594 return;
1595
b6a8c27b 1596 this->beneath ()->prepare_to_store (regcache);
1f3ef581
MM
1597}
1598
0b722aec
MM
1599/* The branch trace frame cache. */
1600
1601struct btrace_frame_cache
1602{
1603 /* The thread. */
1604 struct thread_info *tp;
1605
1606 /* The frame info. */
1607 struct frame_info *frame;
1608
1609 /* The branch trace function segment. */
1610 const struct btrace_function *bfun;
1611};
1612
1613/* A struct btrace_frame_cache hash table indexed by NEXT. */
1614
1615static htab_t bfcache;
1616
1617/* hash_f for htab_create_alloc of bfcache. */
1618
1619static hashval_t
1620bfcache_hash (const void *arg)
1621{
19ba03f4
SM
1622 const struct btrace_frame_cache *cache
1623 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1624
1625 return htab_hash_pointer (cache->frame);
1626}
1627
1628/* eq_f for htab_create_alloc of bfcache. */
1629
1630static int
1631bfcache_eq (const void *arg1, const void *arg2)
1632{
19ba03f4
SM
1633 const struct btrace_frame_cache *cache1
1634 = (const struct btrace_frame_cache *) arg1;
1635 const struct btrace_frame_cache *cache2
1636 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1637
1638 return cache1->frame == cache2->frame;
1639}
1640
1641/* Create a new btrace frame cache. */
1642
1643static struct btrace_frame_cache *
1644bfcache_new (struct frame_info *frame)
1645{
1646 struct btrace_frame_cache *cache;
1647 void **slot;
1648
1649 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1650 cache->frame = frame;
1651
1652 slot = htab_find_slot (bfcache, cache, INSERT);
1653 gdb_assert (*slot == NULL);
1654 *slot = cache;
1655
1656 return cache;
1657}
1658
1659/* Extract the branch trace function from a branch trace frame. */
1660
1661static const struct btrace_function *
1662btrace_get_frame_function (struct frame_info *frame)
1663{
1664 const struct btrace_frame_cache *cache;
0b722aec
MM
1665 struct btrace_frame_cache pattern;
1666 void **slot;
1667
1668 pattern.frame = frame;
1669
1670 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1671 if (slot == NULL)
1672 return NULL;
1673
19ba03f4 1674 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1675 return cache->bfun;
1676}
1677
cecac1ab
MM
1678/* Implement stop_reason method for record_btrace_frame_unwind. */
1679
1680static enum unwind_stop_reason
1681record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1682 void **this_cache)
1683{
0b722aec
MM
1684 const struct btrace_frame_cache *cache;
1685 const struct btrace_function *bfun;
1686
19ba03f4 1687 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1688 bfun = cache->bfun;
1689 gdb_assert (bfun != NULL);
1690
42bfe59e 1691 if (bfun->up == 0)
0b722aec
MM
1692 return UNWIND_UNAVAILABLE;
1693
1694 return UNWIND_NO_REASON;
cecac1ab
MM
1695}
1696
1697/* Implement this_id method for record_btrace_frame_unwind. */
1698
1699static void
1700record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1701 struct frame_id *this_id)
1702{
0b722aec
MM
1703 const struct btrace_frame_cache *cache;
1704 const struct btrace_function *bfun;
4aeb0dfc 1705 struct btrace_call_iterator it;
0b722aec
MM
1706 CORE_ADDR code, special;
1707
19ba03f4 1708 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1709
1710 bfun = cache->bfun;
1711 gdb_assert (bfun != NULL);
1712
4aeb0dfc
TW
1713 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1714 bfun = btrace_call_get (&it);
0b722aec
MM
1715
1716 code = get_frame_func (this_frame);
1717 special = bfun->number;
1718
1719 *this_id = frame_id_build_unavailable_stack_special (code, special);
1720
1721 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1722 btrace_get_bfun_name (cache->bfun),
1723 core_addr_to_string_nz (this_id->code_addr),
1724 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1725}
1726
1727/* Implement prev_register method for record_btrace_frame_unwind. */
1728
1729static struct value *
1730record_btrace_frame_prev_register (struct frame_info *this_frame,
1731 void **this_cache,
1732 int regnum)
1733{
0b722aec
MM
1734 const struct btrace_frame_cache *cache;
1735 const struct btrace_function *bfun, *caller;
42bfe59e 1736 struct btrace_call_iterator it;
0b722aec
MM
1737 struct gdbarch *gdbarch;
1738 CORE_ADDR pc;
1739 int pcreg;
1740
1741 gdbarch = get_frame_arch (this_frame);
1742 pcreg = gdbarch_pc_regnum (gdbarch);
1743 if (pcreg < 0 || regnum != pcreg)
1744 throw_error (NOT_AVAILABLE_ERROR,
1745 _("Registers are not available in btrace record history"));
1746
19ba03f4 1747 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1748 bfun = cache->bfun;
1749 gdb_assert (bfun != NULL);
1750
42bfe59e 1751 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
0b722aec
MM
1752 throw_error (NOT_AVAILABLE_ERROR,
1753 _("No caller in btrace record history"));
1754
42bfe59e
TW
1755 caller = btrace_call_get (&it);
1756
0b722aec 1757 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
0860c437 1758 pc = caller->insn.front ().pc;
0b722aec
MM
1759 else
1760 {
0860c437 1761 pc = caller->insn.back ().pc;
0b722aec
MM
1762 pc += gdb_insn_length (gdbarch, pc);
1763 }
1764
1765 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1766 btrace_get_bfun_name (bfun), bfun->level,
1767 core_addr_to_string_nz (pc));
1768
1769 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1770}
1771
1772/* Implement sniffer method for record_btrace_frame_unwind. */
1773
1774static int
1775record_btrace_frame_sniffer (const struct frame_unwind *self,
1776 struct frame_info *this_frame,
1777 void **this_cache)
1778{
0b722aec
MM
1779 const struct btrace_function *bfun;
1780 struct btrace_frame_cache *cache;
cecac1ab 1781 struct thread_info *tp;
0b722aec 1782 struct frame_info *next;
cecac1ab
MM
1783
1784 /* THIS_FRAME does not contain a reference to its thread. */
00431a78 1785 tp = inferior_thread ();
cecac1ab 1786
0b722aec
MM
1787 bfun = NULL;
1788 next = get_next_frame (this_frame);
1789 if (next == NULL)
1790 {
1791 const struct btrace_insn_iterator *replay;
1792
1793 replay = tp->btrace.replay;
1794 if (replay != NULL)
08c3f6d2 1795 bfun = &replay->btinfo->functions[replay->call_index];
0b722aec
MM
1796 }
1797 else
1798 {
1799 const struct btrace_function *callee;
42bfe59e 1800 struct btrace_call_iterator it;
0b722aec
MM
1801
1802 callee = btrace_get_frame_function (next);
42bfe59e
TW
1803 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1804 return 0;
1805
1806 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1807 return 0;
1808
1809 bfun = btrace_call_get (&it);
0b722aec
MM
1810 }
1811
1812 if (bfun == NULL)
1813 return 0;
1814
1815 DEBUG ("[frame] sniffed frame for %s on level %d",
1816 btrace_get_bfun_name (bfun), bfun->level);
1817
1818 /* This is our frame. Initialize the frame cache. */
1819 cache = bfcache_new (this_frame);
1820 cache->tp = tp;
1821 cache->bfun = bfun;
1822
1823 *this_cache = cache;
1824 return 1;
1825}
1826
1827/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1828
1829static int
1830record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1831 struct frame_info *this_frame,
1832 void **this_cache)
1833{
1834 const struct btrace_function *bfun, *callee;
1835 struct btrace_frame_cache *cache;
42bfe59e 1836 struct btrace_call_iterator it;
0b722aec 1837 struct frame_info *next;
42bfe59e 1838 struct thread_info *tinfo;
0b722aec
MM
1839
1840 next = get_next_frame (this_frame);
1841 if (next == NULL)
1842 return 0;
1843
1844 callee = btrace_get_frame_function (next);
1845 if (callee == NULL)
1846 return 0;
1847
1848 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1849 return 0;
1850
00431a78 1851 tinfo = inferior_thread ();
42bfe59e 1852 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
0b722aec
MM
1853 return 0;
1854
42bfe59e
TW
1855 bfun = btrace_call_get (&it);
1856
0b722aec
MM
1857 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1858 btrace_get_bfun_name (bfun), bfun->level);
1859
1860 /* This is our frame. Initialize the frame cache. */
1861 cache = bfcache_new (this_frame);
42bfe59e 1862 cache->tp = tinfo;
0b722aec
MM
1863 cache->bfun = bfun;
1864
1865 *this_cache = cache;
1866 return 1;
1867}
1868
1869static void
1870record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1871{
1872 struct btrace_frame_cache *cache;
1873 void **slot;
1874
19ba03f4 1875 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1876
1877 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1878 gdb_assert (slot != NULL);
1879
1880 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1881}
1882
1883/* btrace recording does not store previous memory content, neither the stack
30baf67b 1884 frames content. Any unwinding would return erroneous results as the stack
cecac1ab
MM
1885 contents no longer matches the changed PC value restored from history.
1886 Therefore this unwinder reports any possibly unwound registers as
1887 <unavailable>. */
1888
0b722aec 1889const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1890{
1891 NORMAL_FRAME,
1892 record_btrace_frame_unwind_stop_reason,
1893 record_btrace_frame_this_id,
1894 record_btrace_frame_prev_register,
1895 NULL,
0b722aec
MM
1896 record_btrace_frame_sniffer,
1897 record_btrace_frame_dealloc_cache
1898};
1899
1900const struct frame_unwind record_btrace_tailcall_frame_unwind =
1901{
1902 TAILCALL_FRAME,
1903 record_btrace_frame_unwind_stop_reason,
1904 record_btrace_frame_this_id,
1905 record_btrace_frame_prev_register,
1906 NULL,
1907 record_btrace_tailcall_frame_sniffer,
1908 record_btrace_frame_dealloc_cache
cecac1ab 1909};
b2f4cfde 1910
f6ac5f3d 1911/* Implement the get_unwinder method. */
ac01945b 1912
f6ac5f3d
PA
1913const struct frame_unwind *
1914record_btrace_target::get_unwinder ()
ac01945b
TT
1915{
1916 return &record_btrace_frame_unwind;
1917}
1918
f6ac5f3d 1919/* Implement the get_tailcall_unwinder method. */
ac01945b 1920
f6ac5f3d
PA
1921const struct frame_unwind *
1922record_btrace_target::get_tailcall_unwinder ()
ac01945b
TT
1923{
1924 return &record_btrace_tailcall_frame_unwind;
1925}
1926
987e68b1
MM
1927/* Return a human-readable string for FLAG. */
1928
1929static const char *
1930btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1931{
1932 switch (flag)
1933 {
1934 case BTHR_STEP:
1935 return "step";
1936
1937 case BTHR_RSTEP:
1938 return "reverse-step";
1939
1940 case BTHR_CONT:
1941 return "cont";
1942
1943 case BTHR_RCONT:
1944 return "reverse-cont";
1945
1946 case BTHR_STOP:
1947 return "stop";
1948 }
1949
1950 return "<invalid>";
1951}
1952
52834460
MM
1953/* Indicate that TP should be resumed according to FLAG. */
1954
1955static void
1956record_btrace_resume_thread (struct thread_info *tp,
1957 enum btrace_thread_flag flag)
1958{
1959 struct btrace_thread_info *btinfo;
1960
43792cf0 1961 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
a068643d
TT
1962 target_pid_to_str (tp->ptid).c_str (), flag,
1963 btrace_thread_flag_to_str (flag));
52834460
MM
1964
1965 btinfo = &tp->btrace;
1966
52834460 1967 /* Fetch the latest branch trace. */
4a4495d6 1968 btrace_fetch (tp, record_btrace_get_cpu ());
52834460 1969
0ca912df
MM
1970 /* A resume request overwrites a preceding resume or stop request. */
1971 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1972 btinfo->flags |= flag;
1973}
1974
ec71cc2f
MM
1975/* Get the current frame for TP. */
1976
79b8d3b0
TT
1977static struct frame_id
1978get_thread_current_frame_id (struct thread_info *tp)
ec71cc2f 1979{
79b8d3b0 1980 struct frame_id id;
719546c4 1981 bool executing;
ec71cc2f 1982
00431a78
PA
1983 /* Set current thread, which is implicitly used by
1984 get_current_frame. */
1985 scoped_restore_current_thread restore_thread;
1986
1987 switch_to_thread (tp);
ec71cc2f 1988
5b6d1e4f
PA
1989 process_stratum_target *proc_target = tp->inf->process_target ();
1990
ec71cc2f
MM
1991 /* Clear the executing flag to allow changes to the current frame.
1992 We are not actually running, yet. We just started a reverse execution
1993 command or a record goto command.
1994 For the latter, EXECUTING is false and this has no effect.
f6ac5f3d 1995 For the former, EXECUTING is true and we're in wait, about to
ec71cc2f 1996 move the thread. Since we need to recompute the stack, we temporarily
85102364 1997 set EXECUTING to false. */
00431a78 1998 executing = tp->executing;
5b6d1e4f 1999 set_executing (proc_target, inferior_ptid, false);
ec71cc2f 2000
79b8d3b0 2001 id = null_frame_id;
a70b8144 2002 try
ec71cc2f 2003 {
79b8d3b0 2004 id = get_frame_id (get_current_frame ());
ec71cc2f 2005 }
230d2906 2006 catch (const gdb_exception &except)
ec71cc2f
MM
2007 {
2008 /* Restore the previous execution state. */
5b6d1e4f 2009 set_executing (proc_target, inferior_ptid, executing);
ec71cc2f 2010
eedc3f4f 2011 throw;
ec71cc2f 2012 }
ec71cc2f
MM
2013
2014 /* Restore the previous execution state. */
5b6d1e4f 2015 set_executing (proc_target, inferior_ptid, executing);
ec71cc2f 2016
79b8d3b0 2017 return id;
ec71cc2f
MM
2018}
2019
52834460
MM
2020/* Start replaying a thread. */
2021
2022static struct btrace_insn_iterator *
2023record_btrace_start_replaying (struct thread_info *tp)
2024{
52834460
MM
2025 struct btrace_insn_iterator *replay;
2026 struct btrace_thread_info *btinfo;
52834460
MM
2027
2028 btinfo = &tp->btrace;
2029 replay = NULL;
2030
2031 /* We can't start replaying without trace. */
b54b03bd 2032 if (btinfo->functions.empty ())
52834460
MM
2033 return NULL;
2034
52834460
MM
2035 /* GDB stores the current frame_id when stepping in order to detects steps
2036 into subroutines.
2037 Since frames are computed differently when we're replaying, we need to
2038 recompute those stored frames and fix them up so we can still detect
2039 subroutines after we started replaying. */
a70b8144 2040 try
52834460 2041 {
52834460
MM
2042 struct frame_id frame_id;
2043 int upd_step_frame_id, upd_step_stack_frame_id;
2044
2045 /* The current frame without replaying - computed via normal unwind. */
79b8d3b0 2046 frame_id = get_thread_current_frame_id (tp);
52834460
MM
2047
2048 /* Check if we need to update any stepping-related frame id's. */
2049 upd_step_frame_id = frame_id_eq (frame_id,
2050 tp->control.step_frame_id);
2051 upd_step_stack_frame_id = frame_id_eq (frame_id,
2052 tp->control.step_stack_frame_id);
2053
2054 /* We start replaying at the end of the branch trace. This corresponds
2055 to the current instruction. */
8d749320 2056 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
2057 btrace_insn_end (replay, btinfo);
2058
31fd9caa
MM
2059 /* Skip gaps at the end of the trace. */
2060 while (btrace_insn_get (replay) == NULL)
2061 {
2062 unsigned int steps;
2063
2064 steps = btrace_insn_prev (replay, 1);
2065 if (steps == 0)
2066 error (_("No trace."));
2067 }
2068
52834460
MM
2069 /* We're not replaying, yet. */
2070 gdb_assert (btinfo->replay == NULL);
2071 btinfo->replay = replay;
2072
2073 /* Make sure we're not using any stale registers. */
00431a78 2074 registers_changed_thread (tp);
52834460
MM
2075
2076 /* The current frame with replaying - computed via btrace unwind. */
79b8d3b0 2077 frame_id = get_thread_current_frame_id (tp);
52834460
MM
2078
2079 /* Replace stepping related frames where necessary. */
2080 if (upd_step_frame_id)
2081 tp->control.step_frame_id = frame_id;
2082 if (upd_step_stack_frame_id)
2083 tp->control.step_stack_frame_id = frame_id;
2084 }
230d2906 2085 catch (const gdb_exception &except)
52834460
MM
2086 {
2087 xfree (btinfo->replay);
2088 btinfo->replay = NULL;
2089
00431a78 2090 registers_changed_thread (tp);
52834460 2091
eedc3f4f 2092 throw;
52834460
MM
2093 }
2094
2095 return replay;
2096}
2097
2098/* Stop replaying a thread. */
2099
2100static void
2101record_btrace_stop_replaying (struct thread_info *tp)
2102{
2103 struct btrace_thread_info *btinfo;
2104
2105 btinfo = &tp->btrace;
2106
2107 xfree (btinfo->replay);
2108 btinfo->replay = NULL;
2109
2110 /* Make sure we're not leaving any stale registers. */
00431a78 2111 registers_changed_thread (tp);
52834460
MM
2112}
2113
e3cfc1c7
MM
2114/* Stop replaying TP if it is at the end of its execution history. */
2115
2116static void
2117record_btrace_stop_replaying_at_end (struct thread_info *tp)
2118{
2119 struct btrace_insn_iterator *replay, end;
2120 struct btrace_thread_info *btinfo;
2121
2122 btinfo = &tp->btrace;
2123 replay = btinfo->replay;
2124
2125 if (replay == NULL)
2126 return;
2127
2128 btrace_insn_end (&end, btinfo);
2129
2130 if (btrace_insn_cmp (replay, &end) == 0)
2131 record_btrace_stop_replaying (tp);
2132}
2133
f6ac5f3d 2134/* The resume method of target record-btrace. */
b2f4cfde 2135
f6ac5f3d
PA
2136void
2137record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
b2f4cfde 2138{
d2939ba2 2139 enum btrace_thread_flag flag, cflag;
52834460 2140
a068643d 2141 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid).c_str (),
f6ac5f3d 2142 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
987e68b1 2143 step ? "step" : "cont");
52834460 2144
0ca912df
MM
2145 /* Store the execution direction of the last resume.
2146
f6ac5f3d 2147 If there is more than one resume call, we have to rely on infrun
0ca912df 2148 to not change the execution direction in-between. */
f6ac5f3d 2149 record_btrace_resume_exec_dir = ::execution_direction;
70ad5bff 2150
0ca912df 2151 /* As long as we're not replaying, just forward the request.
52834460 2152
0ca912df
MM
2153 For non-stop targets this means that no thread is replaying. In order to
2154 make progress, we may need to explicitly move replaying threads to the end
2155 of their execution history. */
f6ac5f3d
PA
2156 if ((::execution_direction != EXEC_REVERSE)
2157 && !record_is_replaying (minus_one_ptid))
b2f4cfde 2158 {
b6a8c27b 2159 this->beneath ()->resume (ptid, step, signal);
04c4fe8c 2160 return;
b2f4cfde
MM
2161 }
2162
52834460 2163 /* Compute the btrace thread flag for the requested move. */
f6ac5f3d 2164 if (::execution_direction == EXEC_REVERSE)
d2939ba2
MM
2165 {
2166 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2167 cflag = BTHR_RCONT;
2168 }
52834460 2169 else
d2939ba2
MM
2170 {
2171 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2172 cflag = BTHR_CONT;
2173 }
52834460 2174
52834460 2175 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2176 record_btrace_wait below.
2177
2178 For all-stop targets, we only step INFERIOR_PTID and continue others. */
5b6d1e4f
PA
2179
2180 process_stratum_target *proc_target = current_inferior ()->process_target ();
2181
d2939ba2
MM
2182 if (!target_is_non_stop_p ())
2183 {
26a57c92 2184 gdb_assert (inferior_ptid.matches (ptid));
d2939ba2 2185
5b6d1e4f 2186 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
08036331
PA
2187 {
2188 if (tp->ptid.matches (inferior_ptid))
2189 record_btrace_resume_thread (tp, flag);
2190 else
2191 record_btrace_resume_thread (tp, cflag);
2192 }
d2939ba2
MM
2193 }
2194 else
2195 {
5b6d1e4f 2196 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
08036331 2197 record_btrace_resume_thread (tp, flag);
d2939ba2 2198 }
70ad5bff
MM
2199
2200 /* Async support. */
2201 if (target_can_async_p ())
2202 {
6a3753b3 2203 target_async (1);
70ad5bff
MM
2204 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2205 }
52834460
MM
2206}
2207
f6ac5f3d 2208/* The commit_resume method of target record-btrace. */
85ad3aaf 2209
f6ac5f3d
PA
2210void
2211record_btrace_target::commit_resume ()
85ad3aaf 2212{
f6ac5f3d
PA
2213 if ((::execution_direction != EXEC_REVERSE)
2214 && !record_is_replaying (minus_one_ptid))
b6a8c27b 2215 beneath ()->commit_resume ();
85ad3aaf
PA
2216}
2217
987e68b1
MM
2218/* Cancel resuming TP. */
2219
2220static void
2221record_btrace_cancel_resume (struct thread_info *tp)
2222{
2223 enum btrace_thread_flag flags;
2224
2225 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2226 if (flags == 0)
2227 return;
2228
43792cf0
PA
2229 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2230 print_thread_id (tp),
a068643d 2231 target_pid_to_str (tp->ptid).c_str (), flags,
987e68b1
MM
2232 btrace_thread_flag_to_str (flags));
2233
2234 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2235 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2236}
2237
2238/* Return a target_waitstatus indicating that we ran out of history. */
2239
2240static struct target_waitstatus
2241btrace_step_no_history (void)
2242{
2243 struct target_waitstatus status;
2244
2245 status.kind = TARGET_WAITKIND_NO_HISTORY;
2246
2247 return status;
2248}
2249
2250/* Return a target_waitstatus indicating that a step finished. */
2251
2252static struct target_waitstatus
2253btrace_step_stopped (void)
2254{
2255 struct target_waitstatus status;
2256
2257 status.kind = TARGET_WAITKIND_STOPPED;
2258 status.value.sig = GDB_SIGNAL_TRAP;
2259
2260 return status;
2261}
2262
6e4879f0
MM
2263/* Return a target_waitstatus indicating that a thread was stopped as
2264 requested. */
2265
2266static struct target_waitstatus
2267btrace_step_stopped_on_request (void)
2268{
2269 struct target_waitstatus status;
2270
2271 status.kind = TARGET_WAITKIND_STOPPED;
2272 status.value.sig = GDB_SIGNAL_0;
2273
2274 return status;
2275}
2276
d825d248
MM
2277/* Return a target_waitstatus indicating a spurious stop. */
2278
2279static struct target_waitstatus
2280btrace_step_spurious (void)
2281{
2282 struct target_waitstatus status;
2283
2284 status.kind = TARGET_WAITKIND_SPURIOUS;
2285
2286 return status;
2287}
2288
e3cfc1c7
MM
2289/* Return a target_waitstatus indicating that the thread was not resumed. */
2290
2291static struct target_waitstatus
2292btrace_step_no_resumed (void)
2293{
2294 struct target_waitstatus status;
2295
2296 status.kind = TARGET_WAITKIND_NO_RESUMED;
2297
2298 return status;
2299}
2300
2301/* Return a target_waitstatus indicating that we should wait again. */
2302
2303static struct target_waitstatus
2304btrace_step_again (void)
2305{
2306 struct target_waitstatus status;
2307
2308 status.kind = TARGET_WAITKIND_IGNORE;
2309
2310 return status;
2311}
2312
52834460
MM
2313/* Clear the record histories. */
2314
2315static void
2316record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2317{
2318 xfree (btinfo->insn_history);
2319 xfree (btinfo->call_history);
2320
2321 btinfo->insn_history = NULL;
2322 btinfo->call_history = NULL;
2323}
2324
3c615f99
MM
2325/* Check whether TP's current replay position is at a breakpoint. */
2326
2327static int
2328record_btrace_replay_at_breakpoint (struct thread_info *tp)
2329{
2330 struct btrace_insn_iterator *replay;
2331 struct btrace_thread_info *btinfo;
2332 const struct btrace_insn *insn;
3c615f99
MM
2333
2334 btinfo = &tp->btrace;
2335 replay = btinfo->replay;
2336
2337 if (replay == NULL)
2338 return 0;
2339
2340 insn = btrace_insn_get (replay);
2341 if (insn == NULL)
2342 return 0;
2343
00431a78 2344 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
3c615f99
MM
2345 &btinfo->stop_reason);
2346}
2347
d825d248 2348/* Step one instruction in forward direction. */
52834460
MM
2349
2350static struct target_waitstatus
d825d248 2351record_btrace_single_step_forward (struct thread_info *tp)
52834460 2352{
b61ce85c 2353 struct btrace_insn_iterator *replay, end, start;
52834460 2354 struct btrace_thread_info *btinfo;
52834460 2355
d825d248
MM
2356 btinfo = &tp->btrace;
2357 replay = btinfo->replay;
2358
2359 /* We're done if we're not replaying. */
2360 if (replay == NULL)
2361 return btrace_step_no_history ();
2362
011c71b6
MM
2363 /* Check if we're stepping a breakpoint. */
2364 if (record_btrace_replay_at_breakpoint (tp))
2365 return btrace_step_stopped ();
2366
b61ce85c
MM
2367 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2368 jump back to the instruction at which we started. */
2369 start = *replay;
d825d248
MM
2370 do
2371 {
2372 unsigned int steps;
2373
e3cfc1c7
MM
2374 /* We will bail out here if we continue stepping after reaching the end
2375 of the execution history. */
d825d248
MM
2376 steps = btrace_insn_next (replay, 1);
2377 if (steps == 0)
b61ce85c
MM
2378 {
2379 *replay = start;
2380 return btrace_step_no_history ();
2381 }
d825d248
MM
2382 }
2383 while (btrace_insn_get (replay) == NULL);
2384
2385 /* Determine the end of the instruction trace. */
2386 btrace_insn_end (&end, btinfo);
2387
e3cfc1c7
MM
2388 /* The execution trace contains (and ends with) the current instruction.
2389 This instruction has not been executed, yet, so the trace really ends
2390 one instruction earlier. */
d825d248 2391 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2392 return btrace_step_no_history ();
d825d248
MM
2393
2394 return btrace_step_spurious ();
2395}
2396
2397/* Step one instruction in backward direction. */
2398
2399static struct target_waitstatus
2400record_btrace_single_step_backward (struct thread_info *tp)
2401{
b61ce85c 2402 struct btrace_insn_iterator *replay, start;
d825d248 2403 struct btrace_thread_info *btinfo;
e59fa00f 2404
52834460
MM
2405 btinfo = &tp->btrace;
2406 replay = btinfo->replay;
2407
d825d248
MM
2408 /* Start replaying if we're not already doing so. */
2409 if (replay == NULL)
2410 replay = record_btrace_start_replaying (tp);
2411
2412 /* If we can't step any further, we reached the end of the history.
b61ce85c
MM
2413 Skip gaps during replay. If we end up at a gap (at the beginning of
2414 the trace), jump back to the instruction at which we started. */
2415 start = *replay;
d825d248
MM
2416 do
2417 {
2418 unsigned int steps;
2419
2420 steps = btrace_insn_prev (replay, 1);
2421 if (steps == 0)
b61ce85c
MM
2422 {
2423 *replay = start;
2424 return btrace_step_no_history ();
2425 }
d825d248
MM
2426 }
2427 while (btrace_insn_get (replay) == NULL);
2428
011c71b6
MM
2429 /* Check if we're stepping a breakpoint.
2430
2431 For reverse-stepping, this check is after the step. There is logic in
2432 infrun.c that handles reverse-stepping separately. See, for example,
2433 proceed and adjust_pc_after_break.
2434
2435 This code assumes that for reverse-stepping, PC points to the last
2436 de-executed instruction, whereas for forward-stepping PC points to the
2437 next to-be-executed instruction. */
2438 if (record_btrace_replay_at_breakpoint (tp))
2439 return btrace_step_stopped ();
2440
d825d248
MM
2441 return btrace_step_spurious ();
2442}
2443
2444/* Step a single thread. */
2445
2446static struct target_waitstatus
2447record_btrace_step_thread (struct thread_info *tp)
2448{
2449 struct btrace_thread_info *btinfo;
2450 struct target_waitstatus status;
2451 enum btrace_thread_flag flags;
2452
2453 btinfo = &tp->btrace;
2454
6e4879f0
MM
2455 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2456 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2457
43792cf0 2458 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
a068643d 2459 target_pid_to_str (tp->ptid).c_str (), flags,
987e68b1 2460 btrace_thread_flag_to_str (flags));
52834460 2461
6e4879f0
MM
2462 /* We can't step without an execution history. */
2463 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2464 return btrace_step_no_history ();
2465
52834460
MM
2466 switch (flags)
2467 {
2468 default:
2469 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2470
6e4879f0
MM
2471 case BTHR_STOP:
2472 return btrace_step_stopped_on_request ();
2473
52834460 2474 case BTHR_STEP:
d825d248
MM
2475 status = record_btrace_single_step_forward (tp);
2476 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2477 break;
52834460
MM
2478
2479 return btrace_step_stopped ();
2480
2481 case BTHR_RSTEP:
d825d248
MM
2482 status = record_btrace_single_step_backward (tp);
2483 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2484 break;
52834460
MM
2485
2486 return btrace_step_stopped ();
2487
2488 case BTHR_CONT:
e3cfc1c7
MM
2489 status = record_btrace_single_step_forward (tp);
2490 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2491 break;
52834460 2492
e3cfc1c7
MM
2493 btinfo->flags |= flags;
2494 return btrace_step_again ();
52834460
MM
2495
2496 case BTHR_RCONT:
e3cfc1c7
MM
2497 status = record_btrace_single_step_backward (tp);
2498 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2499 break;
52834460 2500
e3cfc1c7
MM
2501 btinfo->flags |= flags;
2502 return btrace_step_again ();
2503 }
d825d248 2504
f6ac5f3d 2505 /* We keep threads moving at the end of their execution history. The wait
e3cfc1c7
MM
2506 method will stop the thread for whom the event is reported. */
2507 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2508 btinfo->flags |= flags;
52834460 2509
e3cfc1c7 2510 return status;
b2f4cfde
MM
2511}
2512
a6b5be76
MM
2513/* Announce further events if necessary. */
2514
2515static void
53127008
SM
2516record_btrace_maybe_mark_async_event
2517 (const std::vector<thread_info *> &moving,
2518 const std::vector<thread_info *> &no_history)
a6b5be76 2519{
53127008
SM
2520 bool more_moving = !moving.empty ();
2521 bool more_no_history = !no_history.empty ();;
a6b5be76
MM
2522
2523 if (!more_moving && !more_no_history)
2524 return;
2525
2526 if (more_moving)
2527 DEBUG ("movers pending");
2528
2529 if (more_no_history)
2530 DEBUG ("no-history pending");
2531
2532 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2533}
2534
f6ac5f3d 2535/* The wait method of target record-btrace. */
b2f4cfde 2536
f6ac5f3d
PA
2537ptid_t
2538record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2539 int options)
b2f4cfde 2540{
53127008
SM
2541 std::vector<thread_info *> moving;
2542 std::vector<thread_info *> no_history;
52834460 2543
a068643d 2544 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid).c_str (), options);
52834460 2545
b2f4cfde 2546 /* As long as we're not replaying, just forward the request. */
f6ac5f3d
PA
2547 if ((::execution_direction != EXEC_REVERSE)
2548 && !record_is_replaying (minus_one_ptid))
b2f4cfde 2549 {
b6a8c27b 2550 return this->beneath ()->wait (ptid, status, options);
b2f4cfde
MM
2551 }
2552
e3cfc1c7 2553 /* Keep a work list of moving threads. */
5b6d1e4f
PA
2554 process_stratum_target *proc_target = current_inferior ()->process_target ();
2555 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
08036331
PA
2556 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2557 moving.push_back (tp);
e3cfc1c7 2558
53127008 2559 if (moving.empty ())
52834460 2560 {
e3cfc1c7 2561 *status = btrace_step_no_resumed ();
52834460 2562
a068643d 2563 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid).c_str (),
23fdd69e 2564 target_waitstatus_to_string (status).c_str ());
e3cfc1c7 2565
e3cfc1c7 2566 return null_ptid;
52834460
MM
2567 }
2568
e3cfc1c7
MM
2569 /* Step moving threads one by one, one step each, until either one thread
2570 reports an event or we run out of threads to step.
2571
2572 When stepping more than one thread, chances are that some threads reach
2573 the end of their execution history earlier than others. If we reported
2574 this immediately, all-stop on top of non-stop would stop all threads and
2575 resume the same threads next time. And we would report the same thread
2576 having reached the end of its execution history again.
2577
2578 In the worst case, this would starve the other threads. But even if other
2579 threads would be allowed to make progress, this would result in far too
2580 many intermediate stops.
2581
2582 We therefore delay the reporting of "no execution history" until we have
2583 nothing else to report. By this time, all threads should have moved to
2584 either the beginning or the end of their execution history. There will
2585 be a single user-visible stop. */
53127008
SM
2586 struct thread_info *eventing = NULL;
2587 while ((eventing == NULL) && !moving.empty ())
e3cfc1c7 2588 {
53127008 2589 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
e3cfc1c7 2590 {
53127008
SM
2591 thread_info *tp = moving[ix];
2592
e3cfc1c7
MM
2593 *status = record_btrace_step_thread (tp);
2594
2595 switch (status->kind)
2596 {
2597 case TARGET_WAITKIND_IGNORE:
2598 ix++;
2599 break;
2600
2601 case TARGET_WAITKIND_NO_HISTORY:
53127008 2602 no_history.push_back (ordered_remove (moving, ix));
e3cfc1c7
MM
2603 break;
2604
2605 default:
53127008 2606 eventing = unordered_remove (moving, ix);
e3cfc1c7
MM
2607 break;
2608 }
2609 }
2610 }
2611
2612 if (eventing == NULL)
2613 {
2614 /* We started with at least one moving thread. This thread must have
2615 either stopped or reached the end of its execution history.
2616
2617 In the former case, EVENTING must not be NULL.
2618 In the latter case, NO_HISTORY must not be empty. */
53127008 2619 gdb_assert (!no_history.empty ());
e3cfc1c7
MM
2620
2621 /* We kept threads moving at the end of their execution history. Stop
2622 EVENTING now that we are going to report its stop. */
53127008 2623 eventing = unordered_remove (no_history, 0);
e3cfc1c7
MM
2624 eventing->btrace.flags &= ~BTHR_MOVE;
2625
2626 *status = btrace_step_no_history ();
2627 }
2628
2629 gdb_assert (eventing != NULL);
2630
2631 /* We kept threads replaying at the end of their execution history. Stop
2632 replaying EVENTING now that we are going to report its stop. */
2633 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2634
2635 /* Stop all other threads. */
5953356c 2636 if (!target_is_non_stop_p ())
53127008 2637 {
d89edf9b 2638 for (thread_info *tp : current_inferior ()->non_exited_threads ())
53127008
SM
2639 record_btrace_cancel_resume (tp);
2640 }
52834460 2641
a6b5be76
MM
2642 /* In async mode, we need to announce further events. */
2643 if (target_is_async_p ())
2644 record_btrace_maybe_mark_async_event (moving, no_history);
2645
52834460 2646 /* Start record histories anew from the current position. */
e3cfc1c7 2647 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2648
2649 /* We moved the replay position but did not update registers. */
00431a78 2650 registers_changed_thread (eventing);
e3cfc1c7 2651
43792cf0
PA
2652 DEBUG ("wait ended by thread %s (%s): %s",
2653 print_thread_id (eventing),
a068643d 2654 target_pid_to_str (eventing->ptid).c_str (),
23fdd69e 2655 target_waitstatus_to_string (status).c_str ());
52834460 2656
e3cfc1c7 2657 return eventing->ptid;
52834460
MM
2658}
2659
f6ac5f3d 2660/* The stop method of target record-btrace. */
6e4879f0 2661
f6ac5f3d
PA
2662void
2663record_btrace_target::stop (ptid_t ptid)
6e4879f0 2664{
a068643d 2665 DEBUG ("stop %s", target_pid_to_str (ptid).c_str ());
6e4879f0
MM
2666
2667 /* As long as we're not replaying, just forward the request. */
f6ac5f3d
PA
2668 if ((::execution_direction != EXEC_REVERSE)
2669 && !record_is_replaying (minus_one_ptid))
6e4879f0 2670 {
b6a8c27b 2671 this->beneath ()->stop (ptid);
6e4879f0
MM
2672 }
2673 else
2674 {
5b6d1e4f
PA
2675 process_stratum_target *proc_target
2676 = current_inferior ()->process_target ();
2677
2678 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
08036331
PA
2679 {
2680 tp->btrace.flags &= ~BTHR_MOVE;
2681 tp->btrace.flags |= BTHR_STOP;
2682 }
6e4879f0
MM
2683 }
2684 }
2685
f6ac5f3d 2686/* The can_execute_reverse method of target record-btrace. */
52834460 2687
57810aa7 2688bool
f6ac5f3d 2689record_btrace_target::can_execute_reverse ()
52834460 2690{
57810aa7 2691 return true;
52834460
MM
2692}
2693
f6ac5f3d 2694/* The stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2695
57810aa7 2696bool
f6ac5f3d 2697record_btrace_target::stopped_by_sw_breakpoint ()
52834460 2698{
f6ac5f3d 2699 if (record_is_replaying (minus_one_ptid))
9e8915c6
PA
2700 {
2701 struct thread_info *tp = inferior_thread ();
2702
2703 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2704 }
2705
b6a8c27b 2706 return this->beneath ()->stopped_by_sw_breakpoint ();
9e8915c6
PA
2707}
2708
f6ac5f3d 2709/* The supports_stopped_by_sw_breakpoint method of target
9e8915c6
PA
2710 record-btrace. */
2711
57810aa7 2712bool
f6ac5f3d 2713record_btrace_target::supports_stopped_by_sw_breakpoint ()
9e8915c6 2714{
f6ac5f3d 2715 if (record_is_replaying (minus_one_ptid))
57810aa7 2716 return true;
9e8915c6 2717
b6a8c27b 2718 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
9e8915c6
PA
2719}
2720
f6ac5f3d 2721/* The stopped_by_sw_breakpoint method of target record-btrace. */
9e8915c6 2722
57810aa7 2723bool
f6ac5f3d 2724record_btrace_target::stopped_by_hw_breakpoint ()
9e8915c6 2725{
f6ac5f3d 2726 if (record_is_replaying (minus_one_ptid))
9e8915c6
PA
2727 {
2728 struct thread_info *tp = inferior_thread ();
2729
2730 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2731 }
2732
b6a8c27b 2733 return this->beneath ()->stopped_by_hw_breakpoint ();
9e8915c6
PA
2734}
2735
f6ac5f3d 2736/* The supports_stopped_by_hw_breakpoint method of target
9e8915c6
PA
2737 record-btrace. */
2738
57810aa7 2739bool
f6ac5f3d 2740record_btrace_target::supports_stopped_by_hw_breakpoint ()
9e8915c6 2741{
f6ac5f3d 2742 if (record_is_replaying (minus_one_ptid))
57810aa7 2743 return true;
52834460 2744
b6a8c27b 2745 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
b2f4cfde
MM
2746}
2747
f6ac5f3d 2748/* The update_thread_list method of target record-btrace. */
e2887aa3 2749
f6ac5f3d
PA
2750void
2751record_btrace_target::update_thread_list ()
e2887aa3 2752{
e8032dde 2753 /* We don't add or remove threads during replay. */
f6ac5f3d 2754 if (record_is_replaying (minus_one_ptid))
e2887aa3
MM
2755 return;
2756
2757 /* Forward the request. */
b6a8c27b 2758 this->beneath ()->update_thread_list ();
e2887aa3
MM
2759}
2760
f6ac5f3d 2761/* The thread_alive method of target record-btrace. */
e2887aa3 2762
57810aa7 2763bool
f6ac5f3d 2764record_btrace_target::thread_alive (ptid_t ptid)
e2887aa3
MM
2765{
2766 /* We don't add or remove threads during replay. */
f6ac5f3d 2767 if (record_is_replaying (minus_one_ptid))
00431a78 2768 return true;
e2887aa3
MM
2769
2770 /* Forward the request. */
b6a8c27b 2771 return this->beneath ()->thread_alive (ptid);
e2887aa3
MM
2772}
2773
066ce621
MM
2774/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2775 is stopped. */
2776
2777static void
2778record_btrace_set_replay (struct thread_info *tp,
2779 const struct btrace_insn_iterator *it)
2780{
2781 struct btrace_thread_info *btinfo;
2782
2783 btinfo = &tp->btrace;
2784
a0f1b963 2785 if (it == NULL)
52834460 2786 record_btrace_stop_replaying (tp);
066ce621
MM
2787 else
2788 {
2789 if (btinfo->replay == NULL)
52834460 2790 record_btrace_start_replaying (tp);
066ce621
MM
2791 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2792 return;
2793
2794 *btinfo->replay = *it;
00431a78 2795 registers_changed_thread (tp);
066ce621
MM
2796 }
2797
52834460
MM
2798 /* Start anew from the new replay position. */
2799 record_btrace_clear_histories (btinfo);
485668e5 2800
f2ffa92b
PA
2801 inferior_thread ()->suspend.stop_pc
2802 = regcache_read_pc (get_current_regcache ());
485668e5 2803 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2804}
2805
f6ac5f3d 2806/* The goto_record_begin method of target record-btrace. */
066ce621 2807
f6ac5f3d
PA
2808void
2809record_btrace_target::goto_record_begin ()
066ce621
MM
2810{
2811 struct thread_info *tp;
2812 struct btrace_insn_iterator begin;
2813
2814 tp = require_btrace_thread ();
2815
2816 btrace_insn_begin (&begin, &tp->btrace);
b61ce85c
MM
2817
2818 /* Skip gaps at the beginning of the trace. */
2819 while (btrace_insn_get (&begin) == NULL)
2820 {
2821 unsigned int steps;
2822
2823 steps = btrace_insn_next (&begin, 1);
2824 if (steps == 0)
2825 error (_("No trace."));
2826 }
2827
066ce621 2828 record_btrace_set_replay (tp, &begin);
066ce621
MM
2829}
2830
f6ac5f3d 2831/* The goto_record_end method of target record-btrace. */
066ce621 2832
f6ac5f3d
PA
2833void
2834record_btrace_target::goto_record_end ()
066ce621
MM
2835{
2836 struct thread_info *tp;
2837
2838 tp = require_btrace_thread ();
2839
2840 record_btrace_set_replay (tp, NULL);
066ce621
MM
2841}
2842
f6ac5f3d 2843/* The goto_record method of target record-btrace. */
066ce621 2844
f6ac5f3d
PA
2845void
2846record_btrace_target::goto_record (ULONGEST insn)
066ce621
MM
2847{
2848 struct thread_info *tp;
2849 struct btrace_insn_iterator it;
2850 unsigned int number;
2851 int found;
2852
2853 number = insn;
2854
2855 /* Check for wrap-arounds. */
2856 if (number != insn)
2857 error (_("Instruction number out of range."));
2858
2859 tp = require_btrace_thread ();
2860
2861 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
69090cee
TW
2862
2863 /* Check if the instruction could not be found or is a gap. */
2864 if (found == 0 || btrace_insn_get (&it) == NULL)
066ce621
MM
2865 error (_("No such instruction."));
2866
2867 record_btrace_set_replay (tp, &it);
066ce621
MM
2868}
2869
f6ac5f3d 2870/* The record_stop_replaying method of target record-btrace. */
797094dd 2871
f6ac5f3d
PA
2872void
2873record_btrace_target::record_stop_replaying ()
797094dd 2874{
d89edf9b 2875 for (thread_info *tp : current_inferior ()->non_exited_threads ())
797094dd
MM
2876 record_btrace_stop_replaying (tp);
2877}
2878
f6ac5f3d 2879/* The execution_direction target method. */
70ad5bff 2880
f6ac5f3d
PA
2881enum exec_direction_kind
2882record_btrace_target::execution_direction ()
70ad5bff
MM
2883{
2884 return record_btrace_resume_exec_dir;
2885}
2886
f6ac5f3d 2887/* The prepare_to_generate_core target method. */
aef92902 2888
f6ac5f3d
PA
2889void
2890record_btrace_target::prepare_to_generate_core ()
aef92902
MM
2891{
2892 record_btrace_generating_corefile = 1;
2893}
2894
f6ac5f3d 2895/* The done_generating_core target method. */
aef92902 2896
f6ac5f3d
PA
2897void
2898record_btrace_target::done_generating_core ()
aef92902
MM
2899{
2900 record_btrace_generating_corefile = 0;
2901}
2902
f4abbc16
MM
2903/* Start recording in BTS format. */
2904
2905static void
cdb34d4a 2906cmd_record_btrace_bts_start (const char *args, int from_tty)
f4abbc16 2907{
f4abbc16
MM
2908 if (args != NULL && *args != 0)
2909 error (_("Invalid argument."));
2910
2911 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2912
a70b8144 2913 try
492d29ea 2914 {
95a6b0a1 2915 execute_command ("target record-btrace", from_tty);
492d29ea 2916 }
230d2906 2917 catch (const gdb_exception &exception)
f4abbc16
MM
2918 {
2919 record_btrace_conf.format = BTRACE_FORMAT_NONE;
eedc3f4f 2920 throw;
f4abbc16
MM
2921 }
2922}
2923
bc504a31 2924/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2925
2926static void
cdb34d4a 2927cmd_record_btrace_pt_start (const char *args, int from_tty)
afedecd3
MM
2928{
2929 if (args != NULL && *args != 0)
2930 error (_("Invalid argument."));
2931
b20a6524 2932 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2933
a70b8144 2934 try
492d29ea 2935 {
95a6b0a1 2936 execute_command ("target record-btrace", from_tty);
492d29ea 2937 }
230d2906 2938 catch (const gdb_exception &exception)
492d29ea
PA
2939 {
2940 record_btrace_conf.format = BTRACE_FORMAT_NONE;
eedc3f4f 2941 throw;
492d29ea 2942 }
afedecd3
MM
2943}
2944
b20a6524
MM
2945/* Alias for "target record". */
2946
2947static void
981a3fb3 2948cmd_record_btrace_start (const char *args, int from_tty)
b20a6524
MM
2949{
2950 if (args != NULL && *args != 0)
2951 error (_("Invalid argument."));
2952
2953 record_btrace_conf.format = BTRACE_FORMAT_PT;
2954
a70b8144 2955 try
b20a6524 2956 {
95a6b0a1 2957 execute_command ("target record-btrace", from_tty);
b20a6524 2958 }
230d2906 2959 catch (const gdb_exception &exception)
b20a6524
MM
2960 {
2961 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2962
a70b8144 2963 try
b20a6524 2964 {
95a6b0a1 2965 execute_command ("target record-btrace", from_tty);
b20a6524 2966 }
230d2906 2967 catch (const gdb_exception &ex)
b20a6524
MM
2968 {
2969 record_btrace_conf.format = BTRACE_FORMAT_NONE;
eedc3f4f 2970 throw;
b20a6524 2971 }
b20a6524 2972 }
b20a6524
MM
2973}
2974
67b5c0c1
MM
2975/* The "show record btrace replay-memory-access" command. */
2976
2977static void
2978cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2979 struct cmd_list_element *c, const char *value)
2980{
2981 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2982 replay_memory_access);
2983}
2984
4a4495d6
MM
2985/* The "set record btrace cpu none" command. */
2986
2987static void
2988cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2989{
2990 if (args != nullptr && *args != 0)
2991 error (_("Trailing junk: '%s'."), args);
2992
2993 record_btrace_cpu_state = CS_NONE;
2994}
2995
2996/* The "set record btrace cpu auto" command. */
2997
2998static void
2999cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
3000{
3001 if (args != nullptr && *args != 0)
3002 error (_("Trailing junk: '%s'."), args);
3003
3004 record_btrace_cpu_state = CS_AUTO;
3005}
3006
3007/* The "set record btrace cpu" command. */
3008
3009static void
3010cmd_set_record_btrace_cpu (const char *args, int from_tty)
3011{
3012 if (args == nullptr)
3013 args = "";
3014
3015 /* We use a hard-coded vendor string for now. */
3016 unsigned int family, model, stepping;
3017 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3018 &model, &l1, &stepping, &l2);
3019 if (matches == 3)
3020 {
3021 if (strlen (args) != l2)
3022 error (_("Trailing junk: '%s'."), args + l2);
3023 }
3024 else if (matches == 2)
3025 {
3026 if (strlen (args) != l1)
3027 error (_("Trailing junk: '%s'."), args + l1);
3028
3029 stepping = 0;
3030 }
3031 else
3032 error (_("Bad format. See \"help set record btrace cpu\"."));
3033
3034 if (USHRT_MAX < family)
3035 error (_("Cpu family too big."));
3036
3037 if (UCHAR_MAX < model)
3038 error (_("Cpu model too big."));
3039
3040 if (UCHAR_MAX < stepping)
3041 error (_("Cpu stepping too big."));
3042
3043 record_btrace_cpu.vendor = CV_INTEL;
3044 record_btrace_cpu.family = family;
3045 record_btrace_cpu.model = model;
3046 record_btrace_cpu.stepping = stepping;
3047
3048 record_btrace_cpu_state = CS_CPU;
3049}
3050
3051/* The "show record btrace cpu" command. */
3052
3053static void
3054cmd_show_record_btrace_cpu (const char *args, int from_tty)
3055{
4a4495d6
MM
3056 if (args != nullptr && *args != 0)
3057 error (_("Trailing junk: '%s'."), args);
3058
3059 switch (record_btrace_cpu_state)
3060 {
3061 case CS_AUTO:
3062 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3063 return;
3064
3065 case CS_NONE:
3066 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3067 return;
3068
3069 case CS_CPU:
3070 switch (record_btrace_cpu.vendor)
3071 {
3072 case CV_INTEL:
3073 if (record_btrace_cpu.stepping == 0)
3074 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3075 record_btrace_cpu.family,
3076 record_btrace_cpu.model);
3077 else
3078 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3079 record_btrace_cpu.family,
3080 record_btrace_cpu.model,
3081 record_btrace_cpu.stepping);
3082 return;
3083 }
3084 }
3085
3086 error (_("Internal error: bad cpu state."));
3087}
3088
b20a6524
MM
3089/* The "record bts buffer-size" show value function. */
3090
3091static void
3092show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3093 struct cmd_list_element *c,
3094 const char *value)
3095{
3096 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3097 value);
3098}
3099
3100/* The "record pt buffer-size" show value function. */
3101
3102static void
3103show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3104 struct cmd_list_element *c,
3105 const char *value)
3106{
3107 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3108 value);
3109}
3110
afedecd3
MM
3111/* Initialize btrace commands. */
3112
6c265988 3113void _initialize_record_btrace ();
afedecd3 3114void
6c265988 3115_initialize_record_btrace ()
afedecd3 3116{
f4abbc16
MM
3117 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3118 _("Start branch trace recording."), &record_btrace_cmdlist,
3119 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3120 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3121
f4abbc16
MM
3122 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3123 _("\
3124Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3125The processor stores a from/to record for each branch into a cyclic buffer.\n\
3126This format may not be available on all processors."),
3127 &record_btrace_cmdlist);
3128 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3129
b20a6524
MM
3130 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3131 _("\
bc504a31 3132Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3133This format may not be available on all processors."),
3134 &record_btrace_cmdlist);
3135 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3136
0743fc83
TT
3137 add_basic_prefix_cmd ("btrace", class_support,
3138 _("Set record options."), &set_record_btrace_cmdlist,
3139 "set record btrace ", 0, &set_record_cmdlist);
67b5c0c1 3140
0743fc83
TT
3141 add_show_prefix_cmd ("btrace", class_support,
3142 _("Show record options."), &show_record_btrace_cmdlist,
3143 "show record btrace ", 0, &show_record_cmdlist);
67b5c0c1
MM
3144
3145 add_setshow_enum_cmd ("replay-memory-access", no_class,
3146 replay_memory_access_types, &replay_memory_access, _("\
3147Set what memory accesses are allowed during replay."), _("\
3148Show what memory accesses are allowed during replay."),
3149 _("Default is READ-ONLY.\n\n\
3150The btrace record target does not trace data.\n\
3151The memory therefore corresponds to the live target and not \
3152to the current replay position.\n\n\
3153When READ-ONLY, allow accesses to read-only memory during replay.\n\
3154When READ-WRITE, allow accesses to read-only and read-write memory during \
3155replay."),
3156 NULL, cmd_show_replay_memory_access,
3157 &set_record_btrace_cmdlist,
3158 &show_record_btrace_cmdlist);
3159
4a4495d6
MM
3160 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3161 _("\
3162Set the cpu to be used for trace decode.\n\n\
55063ddb
TT
3163The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3164For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
4a4495d6
MM
3165When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3166The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3167When GDB does not support that cpu, this option can be used to enable\n\
3168workarounds for a similar cpu that GDB supports.\n\n\
3169When set to \"none\", errata workarounds are disabled."),
3170 &set_record_btrace_cpu_cmdlist,
590042fc 3171 "set record btrace cpu ", 1,
4a4495d6
MM
3172 &set_record_btrace_cmdlist);
3173
3174 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3175Automatically determine the cpu to be used for trace decode."),
3176 &set_record_btrace_cpu_cmdlist);
3177
3178 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3179Do not enable errata workarounds for trace decode."),
3180 &set_record_btrace_cpu_cmdlist);
3181
3182 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3183Show the cpu to be used for trace decode."),
3184 &show_record_btrace_cmdlist);
3185
0743fc83
TT
3186 add_basic_prefix_cmd ("bts", class_support,
3187 _("Set record btrace bts options."),
3188 &set_record_btrace_bts_cmdlist,
3189 "set record btrace bts ", 0,
3190 &set_record_btrace_cmdlist);
d33501a5 3191
0743fc83
TT
3192 add_show_prefix_cmd ("bts", class_support,
3193 _("Show record btrace bts options."),
3194 &show_record_btrace_bts_cmdlist,
3195 "show record btrace bts ", 0,
3196 &show_record_btrace_cmdlist);
d33501a5
MM
3197
3198 add_setshow_uinteger_cmd ("buffer-size", no_class,
3199 &record_btrace_conf.bts.size,
3200 _("Set the record/replay bts buffer size."),
3201 _("Show the record/replay bts buffer size."), _("\
3202When starting recording request a trace buffer of this size. \
3203The actual buffer size may differ from the requested size. \
3204Use \"info record\" to see the actual buffer size.\n\n\
3205Bigger buffers allow longer recording but also take more time to process \
3206the recorded execution trace.\n\n\
b20a6524
MM
3207The trace buffer size may not be changed while recording."), NULL,
3208 show_record_bts_buffer_size_value,
d33501a5
MM
3209 &set_record_btrace_bts_cmdlist,
3210 &show_record_btrace_bts_cmdlist);
3211
0743fc83
TT
3212 add_basic_prefix_cmd ("pt", class_support,
3213 _("Set record btrace pt options."),
3214 &set_record_btrace_pt_cmdlist,
3215 "set record btrace pt ", 0,
3216 &set_record_btrace_cmdlist);
3217
3218 add_show_prefix_cmd ("pt", class_support,
3219 _("Show record btrace pt options."),
3220 &show_record_btrace_pt_cmdlist,
3221 "show record btrace pt ", 0,
3222 &show_record_btrace_cmdlist);
b20a6524
MM
3223
3224 add_setshow_uinteger_cmd ("buffer-size", no_class,
3225 &record_btrace_conf.pt.size,
3226 _("Set the record/replay pt buffer size."),
3227 _("Show the record/replay pt buffer size."), _("\
3228Bigger buffers allow longer recording but also take more time to process \
3229the recorded execution.\n\
3230The actual buffer size may differ from the requested size. Use \"info record\" \
3231to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3232 &set_record_btrace_pt_cmdlist,
3233 &show_record_btrace_pt_cmdlist);
3234
d9f719f1 3235 add_target (record_btrace_target_info, record_btrace_target_open);
0b722aec
MM
3236
3237 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3238 xcalloc, xfree);
d33501a5
MM
3239
3240 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3241 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3242}
This page took 1.014609 seconds and 4 git commands to generate.