Fix compile error with clang 3.8
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
e2882c85 3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
c0272db5 24#include "record-btrace.h"
afedecd3
MM
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
76727919 29#include "observable.h"
afedecd3
MM
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
70ad5bff
MM
39#include "event-loop.h"
40#include "inf-loop.h"
e3cfc1c7 41#include "vec.h"
00431a78 42#include "inferior.h"
325fac50 43#include <algorithm>
afedecd3 44
d9f719f1
PA
45static const target_info record_btrace_target_info = {
46 "record-btrace",
47 N_("Branch tracing target"),
48 N_("Collect control-flow trace and provide the execution history.")
49};
50
afedecd3 51/* The target_ops of record-btrace. */
f6ac5f3d
PA
52
53class record_btrace_target final : public target_ops
54{
55public:
d9f719f1
PA
56 const target_info &info () const override
57 { return record_btrace_target_info; }
f6ac5f3d 58
66b4deae
PA
59 strata stratum () const override { return record_stratum; }
60
f6ac5f3d
PA
61 void close () override;
62 void async (int) override;
63
64 void detach (inferior *inf, int from_tty) override
65 { record_detach (this, inf, from_tty); }
66
67 void disconnect (const char *, int) override;
68
69 void mourn_inferior () override
70 { record_mourn_inferior (this); }
71
72 void kill () override
73 { record_kill (this); }
74
75 enum record_method record_method (ptid_t ptid) override;
76
77 void stop_recording () override;
78 void info_record () override;
79
80 void insn_history (int size, gdb_disassembly_flags flags) override;
81 void insn_history_from (ULONGEST from, int size,
82 gdb_disassembly_flags flags) override;
83 void insn_history_range (ULONGEST begin, ULONGEST end,
84 gdb_disassembly_flags flags) override;
85 void call_history (int size, record_print_flags flags) override;
86 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
87 override;
88 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
89 override;
90
57810aa7
PA
91 bool record_is_replaying (ptid_t ptid) override;
92 bool record_will_replay (ptid_t ptid, int dir) override;
f6ac5f3d
PA
93 void record_stop_replaying () override;
94
95 enum target_xfer_status xfer_partial (enum target_object object,
96 const char *annex,
97 gdb_byte *readbuf,
98 const gdb_byte *writebuf,
99 ULONGEST offset, ULONGEST len,
100 ULONGEST *xfered_len) override;
101
102 int insert_breakpoint (struct gdbarch *,
103 struct bp_target_info *) override;
104 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
105 enum remove_bp_reason) override;
106
107 void fetch_registers (struct regcache *, int) override;
108
109 void store_registers (struct regcache *, int) override;
110 void prepare_to_store (struct regcache *) override;
111
112 const struct frame_unwind *get_unwinder () override;
113
114 const struct frame_unwind *get_tailcall_unwinder () override;
115
116 void commit_resume () override;
117 void resume (ptid_t, int, enum gdb_signal) override;
118 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
119
120 void stop (ptid_t) override;
121 void update_thread_list () override;
57810aa7 122 bool thread_alive (ptid_t ptid) override;
f6ac5f3d
PA
123 void goto_record_begin () override;
124 void goto_record_end () override;
125 void goto_record (ULONGEST insn) override;
126
57810aa7 127 bool can_execute_reverse () override;
f6ac5f3d 128
57810aa7
PA
129 bool stopped_by_sw_breakpoint () override;
130 bool supports_stopped_by_sw_breakpoint () override;
f6ac5f3d 131
57810aa7
PA
132 bool stopped_by_hw_breakpoint () override;
133 bool supports_stopped_by_hw_breakpoint () override;
f6ac5f3d
PA
134
135 enum exec_direction_kind execution_direction () override;
136 void prepare_to_generate_core () override;
137 void done_generating_core () override;
138};
139
140static record_btrace_target record_btrace_ops;
141
142/* Initialize the record-btrace target ops. */
afedecd3 143
76727919
TT
144/* Token associated with a new-thread observer enabling branch tracing
145 for the new thread. */
3dcfdc58 146static const gdb::observers::token record_btrace_thread_observer_token {};
afedecd3 147
67b5c0c1
MM
148/* Memory access types used in set/show record btrace replay-memory-access. */
149static const char replay_memory_access_read_only[] = "read-only";
150static const char replay_memory_access_read_write[] = "read-write";
151static const char *const replay_memory_access_types[] =
152{
153 replay_memory_access_read_only,
154 replay_memory_access_read_write,
155 NULL
156};
157
158/* The currently allowed replay memory access type. */
159static const char *replay_memory_access = replay_memory_access_read_only;
160
4a4495d6
MM
161/* The cpu state kinds. */
162enum record_btrace_cpu_state_kind
163{
164 CS_AUTO,
165 CS_NONE,
166 CS_CPU
167};
168
169/* The current cpu state. */
170static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
171
172/* The current cpu for trace decode. */
173static struct btrace_cpu record_btrace_cpu;
174
67b5c0c1
MM
175/* Command lists for "set/show record btrace". */
176static struct cmd_list_element *set_record_btrace_cmdlist;
177static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 178
70ad5bff
MM
179/* The execution direction of the last resume we got. See record-full.c. */
180static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
181
182/* The async event handler for reverse/replay execution. */
183static struct async_event_handler *record_btrace_async_inferior_event_handler;
184
aef92902
MM
185/* A flag indicating that we are currently generating a core file. */
186static int record_btrace_generating_corefile;
187
f4abbc16
MM
188/* The current branch trace configuration. */
189static struct btrace_config record_btrace_conf;
190
191/* Command list for "record btrace". */
192static struct cmd_list_element *record_btrace_cmdlist;
193
d33501a5
MM
194/* Command lists for "set/show record btrace bts". */
195static struct cmd_list_element *set_record_btrace_bts_cmdlist;
196static struct cmd_list_element *show_record_btrace_bts_cmdlist;
197
b20a6524
MM
198/* Command lists for "set/show record btrace pt". */
199static struct cmd_list_element *set_record_btrace_pt_cmdlist;
200static struct cmd_list_element *show_record_btrace_pt_cmdlist;
201
4a4495d6
MM
202/* Command list for "set record btrace cpu". */
203static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
204
afedecd3
MM
205/* Print a record-btrace debug message. Use do ... while (0) to avoid
206 ambiguities when used in if statements. */
207
208#define DEBUG(msg, args...) \
209 do \
210 { \
211 if (record_debug != 0) \
212 fprintf_unfiltered (gdb_stdlog, \
213 "[record-btrace] " msg "\n", ##args); \
214 } \
215 while (0)
216
217
4a4495d6
MM
218/* Return the cpu configured by the user. Returns NULL if the cpu was
219 configured as auto. */
220const struct btrace_cpu *
221record_btrace_get_cpu (void)
222{
223 switch (record_btrace_cpu_state)
224 {
225 case CS_AUTO:
226 return nullptr;
227
228 case CS_NONE:
229 record_btrace_cpu.vendor = CV_UNKNOWN;
230 /* Fall through. */
231 case CS_CPU:
232 return &record_btrace_cpu;
233 }
234
235 error (_("Internal error: bad record btrace cpu state."));
236}
237
afedecd3 238/* Update the branch trace for the current thread and return a pointer to its
066ce621 239 thread_info.
afedecd3
MM
240
241 Throws an error if there is no thread or no trace. This function never
242 returns NULL. */
243
066ce621
MM
244static struct thread_info *
245require_btrace_thread (void)
afedecd3 246{
afedecd3
MM
247 DEBUG ("require");
248
00431a78 249 if (inferior_ptid == null_ptid)
afedecd3
MM
250 error (_("No thread."));
251
00431a78
PA
252 thread_info *tp = inferior_thread ();
253
cd4007e4
MM
254 validate_registers_access ();
255
4a4495d6 256 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 257
6e07b1d2 258 if (btrace_is_empty (tp))
afedecd3
MM
259 error (_("No trace."));
260
066ce621
MM
261 return tp;
262}
263
264/* Update the branch trace for the current thread and return a pointer to its
265 branch trace information struct.
266
267 Throws an error if there is no thread or no trace. This function never
268 returns NULL. */
269
270static struct btrace_thread_info *
271require_btrace (void)
272{
273 struct thread_info *tp;
274
275 tp = require_btrace_thread ();
276
277 return &tp->btrace;
afedecd3
MM
278}
279
280/* Enable branch tracing for one thread. Warn on errors. */
281
282static void
283record_btrace_enable_warn (struct thread_info *tp)
284{
492d29ea
PA
285 TRY
286 {
287 btrace_enable (tp, &record_btrace_conf);
288 }
289 CATCH (error, RETURN_MASK_ERROR)
290 {
291 warning ("%s", error.message);
292 }
293 END_CATCH
afedecd3
MM
294}
295
afedecd3
MM
296/* Enable automatic tracing of new threads. */
297
298static void
299record_btrace_auto_enable (void)
300{
301 DEBUG ("attach thread observer");
302
76727919
TT
303 gdb::observers::new_thread.attach (record_btrace_enable_warn,
304 record_btrace_thread_observer_token);
afedecd3
MM
305}
306
307/* Disable automatic tracing of new threads. */
308
309static void
310record_btrace_auto_disable (void)
311{
afedecd3
MM
312 DEBUG ("detach thread observer");
313
76727919 314 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
afedecd3
MM
315}
316
70ad5bff
MM
317/* The record-btrace async event handler function. */
318
319static void
320record_btrace_handle_async_inferior_event (gdb_client_data data)
321{
322 inferior_event_handler (INF_REG_EVENT, NULL);
323}
324
c0272db5
TW
325/* See record-btrace.h. */
326
327void
328record_btrace_push_target (void)
329{
330 const char *format;
331
332 record_btrace_auto_enable ();
333
334 push_target (&record_btrace_ops);
335
336 record_btrace_async_inferior_event_handler
337 = create_async_event_handler (record_btrace_handle_async_inferior_event,
338 NULL);
339 record_btrace_generating_corefile = 0;
340
341 format = btrace_format_short_string (record_btrace_conf.format);
76727919 342 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
c0272db5
TW
343}
344
228f1508
SM
345/* Disable btrace on a set of threads on scope exit. */
346
347struct scoped_btrace_disable
348{
349 scoped_btrace_disable () = default;
350
351 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
352
353 ~scoped_btrace_disable ()
354 {
355 for (thread_info *tp : m_threads)
356 btrace_disable (tp);
357 }
358
359 void add_thread (thread_info *thread)
360 {
361 m_threads.push_front (thread);
362 }
363
364 void discard ()
365 {
366 m_threads.clear ();
367 }
368
369private:
370 std::forward_list<thread_info *> m_threads;
371};
372
d9f719f1 373/* Open target record-btrace. */
afedecd3 374
d9f719f1
PA
375static void
376record_btrace_target_open (const char *args, int from_tty)
afedecd3 377{
228f1508
SM
378 /* If we fail to enable btrace for one thread, disable it for the threads for
379 which it was successfully enabled. */
380 scoped_btrace_disable btrace_disable;
afedecd3
MM
381
382 DEBUG ("open");
383
8213266a 384 record_preopen ();
afedecd3
MM
385
386 if (!target_has_execution)
387 error (_("The program is not being run."));
388
08036331 389 for (thread_info *tp : all_non_exited_threads ())
5d5658a1 390 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
afedecd3 391 {
f4abbc16 392 btrace_enable (tp, &record_btrace_conf);
afedecd3 393
228f1508 394 btrace_disable.add_thread (tp);
afedecd3
MM
395 }
396
c0272db5 397 record_btrace_push_target ();
afedecd3 398
228f1508 399 btrace_disable.discard ();
afedecd3
MM
400}
401
f6ac5f3d 402/* The stop_recording method of target record-btrace. */
afedecd3 403
f6ac5f3d
PA
404void
405record_btrace_target::stop_recording ()
afedecd3 406{
afedecd3
MM
407 DEBUG ("stop recording");
408
409 record_btrace_auto_disable ();
410
08036331 411 for (thread_info *tp : all_non_exited_threads ())
afedecd3
MM
412 if (tp->btrace.target != NULL)
413 btrace_disable (tp);
414}
415
f6ac5f3d 416/* The disconnect method of target record-btrace. */
c0272db5 417
f6ac5f3d
PA
418void
419record_btrace_target::disconnect (const char *args,
420 int from_tty)
c0272db5 421{
b6a8c27b 422 struct target_ops *beneath = this->beneath ();
c0272db5
TW
423
424 /* Do not stop recording, just clean up GDB side. */
f6ac5f3d 425 unpush_target (this);
c0272db5
TW
426
427 /* Forward disconnect. */
f6ac5f3d 428 beneath->disconnect (args, from_tty);
c0272db5
TW
429}
430
f6ac5f3d 431/* The close method of target record-btrace. */
afedecd3 432
f6ac5f3d
PA
433void
434record_btrace_target::close ()
afedecd3 435{
70ad5bff
MM
436 if (record_btrace_async_inferior_event_handler != NULL)
437 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
438
99c819ee
MM
439 /* Make sure automatic recording gets disabled even if we did not stop
440 recording before closing the record-btrace target. */
441 record_btrace_auto_disable ();
442
568e808b
MM
443 /* We should have already stopped recording.
444 Tear down btrace in case we have not. */
08036331 445 for (thread_info *tp : all_non_exited_threads ())
568e808b 446 btrace_teardown (tp);
afedecd3
MM
447}
448
f6ac5f3d 449/* The async method of target record-btrace. */
b7d2e916 450
f6ac5f3d
PA
451void
452record_btrace_target::async (int enable)
b7d2e916 453{
6a3753b3 454 if (enable)
b7d2e916
PA
455 mark_async_event_handler (record_btrace_async_inferior_event_handler);
456 else
457 clear_async_event_handler (record_btrace_async_inferior_event_handler);
458
b6a8c27b 459 this->beneath ()->async (enable);
b7d2e916
PA
460}
461
d33501a5
MM
462/* Adjusts the size and returns a human readable size suffix. */
463
464static const char *
465record_btrace_adjust_size (unsigned int *size)
466{
467 unsigned int sz;
468
469 sz = *size;
470
471 if ((sz & ((1u << 30) - 1)) == 0)
472 {
473 *size = sz >> 30;
474 return "GB";
475 }
476 else if ((sz & ((1u << 20) - 1)) == 0)
477 {
478 *size = sz >> 20;
479 return "MB";
480 }
481 else if ((sz & ((1u << 10) - 1)) == 0)
482 {
483 *size = sz >> 10;
484 return "kB";
485 }
486 else
487 return "";
488}
489
490/* Print a BTS configuration. */
491
492static void
493record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
494{
495 const char *suffix;
496 unsigned int size;
497
498 size = conf->size;
499 if (size > 0)
500 {
501 suffix = record_btrace_adjust_size (&size);
502 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
503 }
504}
505
bc504a31 506/* Print an Intel Processor Trace configuration. */
b20a6524
MM
507
508static void
509record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
510{
511 const char *suffix;
512 unsigned int size;
513
514 size = conf->size;
515 if (size > 0)
516 {
517 suffix = record_btrace_adjust_size (&size);
518 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
519 }
520}
521
d33501a5
MM
522/* Print a branch tracing configuration. */
523
524static void
525record_btrace_print_conf (const struct btrace_config *conf)
526{
527 printf_unfiltered (_("Recording format: %s.\n"),
528 btrace_format_string (conf->format));
529
530 switch (conf->format)
531 {
532 case BTRACE_FORMAT_NONE:
533 return;
534
535 case BTRACE_FORMAT_BTS:
536 record_btrace_print_bts_conf (&conf->bts);
537 return;
b20a6524
MM
538
539 case BTRACE_FORMAT_PT:
540 record_btrace_print_pt_conf (&conf->pt);
541 return;
d33501a5
MM
542 }
543
544 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
545}
546
f6ac5f3d 547/* The info_record method of target record-btrace. */
afedecd3 548
f6ac5f3d
PA
549void
550record_btrace_target::info_record ()
afedecd3
MM
551{
552 struct btrace_thread_info *btinfo;
f4abbc16 553 const struct btrace_config *conf;
afedecd3 554 struct thread_info *tp;
31fd9caa 555 unsigned int insns, calls, gaps;
afedecd3
MM
556
557 DEBUG ("info");
558
559 tp = find_thread_ptid (inferior_ptid);
560 if (tp == NULL)
561 error (_("No thread."));
562
cd4007e4
MM
563 validate_registers_access ();
564
f4abbc16
MM
565 btinfo = &tp->btrace;
566
f6ac5f3d 567 conf = ::btrace_conf (btinfo);
f4abbc16 568 if (conf != NULL)
d33501a5 569 record_btrace_print_conf (conf);
f4abbc16 570
4a4495d6 571 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 572
23a7fe75
MM
573 insns = 0;
574 calls = 0;
31fd9caa 575 gaps = 0;
23a7fe75 576
6e07b1d2 577 if (!btrace_is_empty (tp))
23a7fe75
MM
578 {
579 struct btrace_call_iterator call;
580 struct btrace_insn_iterator insn;
581
582 btrace_call_end (&call, btinfo);
583 btrace_call_prev (&call, 1);
5de9129b 584 calls = btrace_call_number (&call);
23a7fe75
MM
585
586 btrace_insn_end (&insn, btinfo);
5de9129b 587 insns = btrace_insn_number (&insn);
31fd9caa 588
69090cee
TW
589 /* If the last instruction is not a gap, it is the current instruction
590 that is not actually part of the record. */
591 if (btrace_insn_get (&insn) != NULL)
592 insns -= 1;
31fd9caa
MM
593
594 gaps = btinfo->ngaps;
23a7fe75 595 }
afedecd3 596
31fd9caa 597 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
43792cf0
PA
598 "for thread %s (%s).\n"), insns, calls, gaps,
599 print_thread_id (tp), target_pid_to_str (tp->ptid));
07bbe694
MM
600
601 if (btrace_is_replaying (tp))
602 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
603 btrace_insn_number (btinfo->replay));
afedecd3
MM
604}
605
31fd9caa
MM
606/* Print a decode error. */
607
608static void
609btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
610 enum btrace_format format)
611{
508352a9 612 const char *errstr = btrace_decode_error (format, errcode);
31fd9caa 613
112e8700 614 uiout->text (_("["));
508352a9
TW
615 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
616 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
31fd9caa 617 {
112e8700
SM
618 uiout->text (_("decode error ("));
619 uiout->field_int ("errcode", errcode);
620 uiout->text (_("): "));
31fd9caa 621 }
112e8700
SM
622 uiout->text (errstr);
623 uiout->text (_("]\n"));
31fd9caa
MM
624}
625
afedecd3
MM
626/* Print an unsigned int. */
627
628static void
629ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
630{
112e8700 631 uiout->field_fmt (fld, "%u", val);
afedecd3
MM
632}
633
f94cc897
MM
634/* A range of source lines. */
635
636struct btrace_line_range
637{
638 /* The symtab this line is from. */
639 struct symtab *symtab;
640
641 /* The first line (inclusive). */
642 int begin;
643
644 /* The last line (exclusive). */
645 int end;
646};
647
648/* Construct a line range. */
649
650static struct btrace_line_range
651btrace_mk_line_range (struct symtab *symtab, int begin, int end)
652{
653 struct btrace_line_range range;
654
655 range.symtab = symtab;
656 range.begin = begin;
657 range.end = end;
658
659 return range;
660}
661
662/* Add a line to a line range. */
663
664static struct btrace_line_range
665btrace_line_range_add (struct btrace_line_range range, int line)
666{
667 if (range.end <= range.begin)
668 {
669 /* This is the first entry. */
670 range.begin = line;
671 range.end = line + 1;
672 }
673 else if (line < range.begin)
674 range.begin = line;
675 else if (range.end < line)
676 range.end = line;
677
678 return range;
679}
680
681/* Return non-zero if RANGE is empty, zero otherwise. */
682
683static int
684btrace_line_range_is_empty (struct btrace_line_range range)
685{
686 return range.end <= range.begin;
687}
688
689/* Return non-zero if LHS contains RHS, zero otherwise. */
690
691static int
692btrace_line_range_contains_range (struct btrace_line_range lhs,
693 struct btrace_line_range rhs)
694{
695 return ((lhs.symtab == rhs.symtab)
696 && (lhs.begin <= rhs.begin)
697 && (rhs.end <= lhs.end));
698}
699
700/* Find the line range associated with PC. */
701
702static struct btrace_line_range
703btrace_find_line_range (CORE_ADDR pc)
704{
705 struct btrace_line_range range;
706 struct linetable_entry *lines;
707 struct linetable *ltable;
708 struct symtab *symtab;
709 int nlines, i;
710
711 symtab = find_pc_line_symtab (pc);
712 if (symtab == NULL)
713 return btrace_mk_line_range (NULL, 0, 0);
714
715 ltable = SYMTAB_LINETABLE (symtab);
716 if (ltable == NULL)
717 return btrace_mk_line_range (symtab, 0, 0);
718
719 nlines = ltable->nitems;
720 lines = ltable->item;
721 if (nlines <= 0)
722 return btrace_mk_line_range (symtab, 0, 0);
723
724 range = btrace_mk_line_range (symtab, 0, 0);
725 for (i = 0; i < nlines - 1; i++)
726 {
727 if ((lines[i].pc == pc) && (lines[i].line != 0))
728 range = btrace_line_range_add (range, lines[i].line);
729 }
730
731 return range;
732}
733
734/* Print source lines in LINES to UIOUT.
735
736 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
737 instructions corresponding to that source line. When printing a new source
738 line, we do the cleanups for the open chain and open a new cleanup chain for
739 the new source line. If the source line range in LINES is not empty, this
740 function will leave the cleanup chain for the last printed source line open
741 so instructions can be added to it. */
742
743static void
744btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
7ea78b59
SM
745 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
746 gdb::optional<ui_out_emit_list> *asm_list,
747 gdb_disassembly_flags flags)
f94cc897 748{
8d297bbf 749 print_source_lines_flags psl_flags;
f94cc897 750
f94cc897
MM
751 if (flags & DISASSEMBLY_FILENAME)
752 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
753
7ea78b59 754 for (int line = lines.begin; line < lines.end; ++line)
f94cc897 755 {
7ea78b59 756 asm_list->reset ();
f94cc897 757
7ea78b59 758 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
f94cc897
MM
759
760 print_source_lines (lines.symtab, line, line + 1, psl_flags);
761
7ea78b59 762 asm_list->emplace (uiout, "line_asm_insn");
f94cc897
MM
763 }
764}
765
afedecd3
MM
766/* Disassemble a section of the recorded instruction trace. */
767
768static void
23a7fe75 769btrace_insn_history (struct ui_out *uiout,
31fd9caa 770 const struct btrace_thread_info *btinfo,
23a7fe75 771 const struct btrace_insn_iterator *begin,
9a24775b
PA
772 const struct btrace_insn_iterator *end,
773 gdb_disassembly_flags flags)
afedecd3 774{
9a24775b
PA
775 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
776 btrace_insn_number (begin), btrace_insn_number (end));
afedecd3 777
f94cc897
MM
778 flags |= DISASSEMBLY_SPECULATIVE;
779
7ea78b59
SM
780 struct gdbarch *gdbarch = target_gdbarch ();
781 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
f94cc897 782
7ea78b59 783 ui_out_emit_list list_emitter (uiout, "asm_insns");
f94cc897 784
7ea78b59
SM
785 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
786 gdb::optional<ui_out_emit_list> asm_list;
afedecd3 787
8b172ce7
PA
788 gdb_pretty_print_disassembler disasm (gdbarch);
789
7ea78b59
SM
790 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
791 btrace_insn_next (&it, 1))
afedecd3 792 {
23a7fe75
MM
793 const struct btrace_insn *insn;
794
795 insn = btrace_insn_get (&it);
796
31fd9caa
MM
797 /* A NULL instruction indicates a gap in the trace. */
798 if (insn == NULL)
799 {
800 const struct btrace_config *conf;
801
802 conf = btrace_conf (btinfo);
afedecd3 803
31fd9caa
MM
804 /* We have trace so we must have a configuration. */
805 gdb_assert (conf != NULL);
806
69090cee
TW
807 uiout->field_fmt ("insn-number", "%u",
808 btrace_insn_number (&it));
809 uiout->text ("\t");
810
811 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
31fd9caa
MM
812 conf->format);
813 }
814 else
815 {
f94cc897 816 struct disasm_insn dinsn;
da8c46d2 817
f94cc897 818 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 819 {
f94cc897
MM
820 struct btrace_line_range lines;
821
822 lines = btrace_find_line_range (insn->pc);
823 if (!btrace_line_range_is_empty (lines)
824 && !btrace_line_range_contains_range (last_lines, lines))
825 {
7ea78b59
SM
826 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
827 flags);
f94cc897
MM
828 last_lines = lines;
829 }
7ea78b59 830 else if (!src_and_asm_tuple.has_value ())
f94cc897 831 {
7ea78b59
SM
832 gdb_assert (!asm_list.has_value ());
833
834 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
835
f94cc897 836 /* No source information. */
7ea78b59 837 asm_list.emplace (uiout, "line_asm_insn");
f94cc897
MM
838 }
839
7ea78b59
SM
840 gdb_assert (src_and_asm_tuple.has_value ());
841 gdb_assert (asm_list.has_value ());
da8c46d2 842 }
da8c46d2 843
f94cc897
MM
844 memset (&dinsn, 0, sizeof (dinsn));
845 dinsn.number = btrace_insn_number (&it);
846 dinsn.addr = insn->pc;
31fd9caa 847
da8c46d2 848 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 849 dinsn.is_speculative = 1;
da8c46d2 850
8b172ce7 851 disasm.pretty_print_insn (uiout, &dinsn, flags);
31fd9caa 852 }
afedecd3
MM
853 }
854}
855
f6ac5f3d 856/* The insn_history method of target record-btrace. */
afedecd3 857
f6ac5f3d
PA
858void
859record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
afedecd3
MM
860{
861 struct btrace_thread_info *btinfo;
23a7fe75
MM
862 struct btrace_insn_history *history;
863 struct btrace_insn_iterator begin, end;
afedecd3 864 struct ui_out *uiout;
23a7fe75 865 unsigned int context, covered;
afedecd3
MM
866
867 uiout = current_uiout;
2e783024 868 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 869 context = abs (size);
afedecd3
MM
870 if (context == 0)
871 error (_("Bad record instruction-history-size."));
872
23a7fe75
MM
873 btinfo = require_btrace ();
874 history = btinfo->insn_history;
875 if (history == NULL)
afedecd3 876 {
07bbe694 877 struct btrace_insn_iterator *replay;
afedecd3 878
9a24775b 879 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
afedecd3 880
07bbe694
MM
881 /* If we're replaying, we start at the replay position. Otherwise, we
882 start at the tail of the trace. */
883 replay = btinfo->replay;
884 if (replay != NULL)
885 begin = *replay;
886 else
887 btrace_insn_end (&begin, btinfo);
888
889 /* We start from here and expand in the requested direction. Then we
890 expand in the other direction, as well, to fill up any remaining
891 context. */
892 end = begin;
893 if (size < 0)
894 {
895 /* We want the current position covered, as well. */
896 covered = btrace_insn_next (&end, 1);
897 covered += btrace_insn_prev (&begin, context - covered);
898 covered += btrace_insn_next (&end, context - covered);
899 }
900 else
901 {
902 covered = btrace_insn_next (&end, context);
903 covered += btrace_insn_prev (&begin, context - covered);
904 }
afedecd3
MM
905 }
906 else
907 {
23a7fe75
MM
908 begin = history->begin;
909 end = history->end;
afedecd3 910
9a24775b 911 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
23a7fe75 912 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 913
23a7fe75
MM
914 if (size < 0)
915 {
916 end = begin;
917 covered = btrace_insn_prev (&begin, context);
918 }
919 else
920 {
921 begin = end;
922 covered = btrace_insn_next (&end, context);
923 }
afedecd3
MM
924 }
925
23a7fe75 926 if (covered > 0)
31fd9caa 927 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
928 else
929 {
930 if (size < 0)
931 printf_unfiltered (_("At the start of the branch trace record.\n"));
932 else
933 printf_unfiltered (_("At the end of the branch trace record.\n"));
934 }
afedecd3 935
23a7fe75 936 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
937}
938
f6ac5f3d 939/* The insn_history_range method of target record-btrace. */
afedecd3 940
f6ac5f3d
PA
941void
942record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
943 gdb_disassembly_flags flags)
afedecd3
MM
944{
945 struct btrace_thread_info *btinfo;
23a7fe75 946 struct btrace_insn_iterator begin, end;
afedecd3 947 struct ui_out *uiout;
23a7fe75
MM
948 unsigned int low, high;
949 int found;
afedecd3
MM
950
951 uiout = current_uiout;
2e783024 952 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
23a7fe75
MM
953 low = from;
954 high = to;
afedecd3 955
9a24775b 956 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
afedecd3
MM
957
958 /* Check for wrap-arounds. */
23a7fe75 959 if (low != from || high != to)
afedecd3
MM
960 error (_("Bad range."));
961
0688d04e 962 if (high < low)
afedecd3
MM
963 error (_("Bad range."));
964
23a7fe75 965 btinfo = require_btrace ();
afedecd3 966
23a7fe75
MM
967 found = btrace_find_insn_by_number (&begin, btinfo, low);
968 if (found == 0)
969 error (_("Range out of bounds."));
afedecd3 970
23a7fe75
MM
971 found = btrace_find_insn_by_number (&end, btinfo, high);
972 if (found == 0)
0688d04e
MM
973 {
974 /* Silently truncate the range. */
975 btrace_insn_end (&end, btinfo);
976 }
977 else
978 {
979 /* We want both begin and end to be inclusive. */
980 btrace_insn_next (&end, 1);
981 }
afedecd3 982
31fd9caa 983 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 984 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
985}
986
f6ac5f3d 987/* The insn_history_from method of target record-btrace. */
afedecd3 988
f6ac5f3d
PA
989void
990record_btrace_target::insn_history_from (ULONGEST from, int size,
991 gdb_disassembly_flags flags)
afedecd3
MM
992{
993 ULONGEST begin, end, context;
994
995 context = abs (size);
0688d04e
MM
996 if (context == 0)
997 error (_("Bad record instruction-history-size."));
afedecd3
MM
998
999 if (size < 0)
1000 {
1001 end = from;
1002
1003 if (from < context)
1004 begin = 0;
1005 else
0688d04e 1006 begin = from - context + 1;
afedecd3
MM
1007 }
1008 else
1009 {
1010 begin = from;
0688d04e 1011 end = from + context - 1;
afedecd3
MM
1012
1013 /* Check for wrap-around. */
1014 if (end < begin)
1015 end = ULONGEST_MAX;
1016 }
1017
f6ac5f3d 1018 insn_history_range (begin, end, flags);
afedecd3
MM
1019}
1020
1021/* Print the instruction number range for a function call history line. */
1022
1023static void
23a7fe75
MM
1024btrace_call_history_insn_range (struct ui_out *uiout,
1025 const struct btrace_function *bfun)
afedecd3 1026{
7acbe133
MM
1027 unsigned int begin, end, size;
1028
0860c437 1029 size = bfun->insn.size ();
7acbe133 1030 gdb_assert (size > 0);
afedecd3 1031
23a7fe75 1032 begin = bfun->insn_offset;
7acbe133 1033 end = begin + size - 1;
afedecd3 1034
23a7fe75 1035 ui_out_field_uint (uiout, "insn begin", begin);
112e8700 1036 uiout->text (",");
23a7fe75 1037 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
1038}
1039
ce0dfbea
MM
1040/* Compute the lowest and highest source line for the instructions in BFUN
1041 and return them in PBEGIN and PEND.
1042 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1043 result from inlining or macro expansion. */
1044
1045static void
1046btrace_compute_src_line_range (const struct btrace_function *bfun,
1047 int *pbegin, int *pend)
1048{
ce0dfbea
MM
1049 struct symtab *symtab;
1050 struct symbol *sym;
ce0dfbea
MM
1051 int begin, end;
1052
1053 begin = INT_MAX;
1054 end = INT_MIN;
1055
1056 sym = bfun->sym;
1057 if (sym == NULL)
1058 goto out;
1059
1060 symtab = symbol_symtab (sym);
1061
0860c437 1062 for (const btrace_insn &insn : bfun->insn)
ce0dfbea
MM
1063 {
1064 struct symtab_and_line sal;
1065
0860c437 1066 sal = find_pc_line (insn.pc, 0);
ce0dfbea
MM
1067 if (sal.symtab != symtab || sal.line == 0)
1068 continue;
1069
325fac50
PA
1070 begin = std::min (begin, sal.line);
1071 end = std::max (end, sal.line);
ce0dfbea
MM
1072 }
1073
1074 out:
1075 *pbegin = begin;
1076 *pend = end;
1077}
1078
afedecd3
MM
1079/* Print the source line information for a function call history line. */
1080
1081static void
23a7fe75
MM
1082btrace_call_history_src_line (struct ui_out *uiout,
1083 const struct btrace_function *bfun)
afedecd3
MM
1084{
1085 struct symbol *sym;
23a7fe75 1086 int begin, end;
afedecd3
MM
1087
1088 sym = bfun->sym;
1089 if (sym == NULL)
1090 return;
1091
112e8700 1092 uiout->field_string ("file",
08be3fe3 1093 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 1094
ce0dfbea 1095 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 1096 if (end < begin)
afedecd3
MM
1097 return;
1098
112e8700
SM
1099 uiout->text (":");
1100 uiout->field_int ("min line", begin);
afedecd3 1101
23a7fe75 1102 if (end == begin)
afedecd3
MM
1103 return;
1104
112e8700
SM
1105 uiout->text (",");
1106 uiout->field_int ("max line", end);
afedecd3
MM
1107}
1108
0b722aec
MM
1109/* Get the name of a branch trace function. */
1110
1111static const char *
1112btrace_get_bfun_name (const struct btrace_function *bfun)
1113{
1114 struct minimal_symbol *msym;
1115 struct symbol *sym;
1116
1117 if (bfun == NULL)
1118 return "??";
1119
1120 msym = bfun->msym;
1121 sym = bfun->sym;
1122
1123 if (sym != NULL)
1124 return SYMBOL_PRINT_NAME (sym);
1125 else if (msym != NULL)
efd66ac6 1126 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
1127 else
1128 return "??";
1129}
1130
afedecd3
MM
1131/* Disassemble a section of the recorded function trace. */
1132
1133static void
23a7fe75 1134btrace_call_history (struct ui_out *uiout,
8710b709 1135 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1136 const struct btrace_call_iterator *begin,
1137 const struct btrace_call_iterator *end,
8d297bbf 1138 int int_flags)
afedecd3 1139{
23a7fe75 1140 struct btrace_call_iterator it;
8d297bbf 1141 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1142
8d297bbf 1143 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1144 btrace_call_number (end));
afedecd3 1145
23a7fe75 1146 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1147 {
23a7fe75
MM
1148 const struct btrace_function *bfun;
1149 struct minimal_symbol *msym;
1150 struct symbol *sym;
1151
1152 bfun = btrace_call_get (&it);
23a7fe75 1153 sym = bfun->sym;
0b722aec 1154 msym = bfun->msym;
23a7fe75 1155
afedecd3 1156 /* Print the function index. */
23a7fe75 1157 ui_out_field_uint (uiout, "index", bfun->number);
112e8700 1158 uiout->text ("\t");
afedecd3 1159
31fd9caa
MM
1160 /* Indicate gaps in the trace. */
1161 if (bfun->errcode != 0)
1162 {
1163 const struct btrace_config *conf;
1164
1165 conf = btrace_conf (btinfo);
1166
1167 /* We have trace so we must have a configuration. */
1168 gdb_assert (conf != NULL);
1169
1170 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1171
1172 continue;
1173 }
1174
8710b709
MM
1175 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1176 {
1177 int level = bfun->level + btinfo->level, i;
1178
1179 for (i = 0; i < level; ++i)
112e8700 1180 uiout->text (" ");
8710b709
MM
1181 }
1182
1183 if (sym != NULL)
112e8700 1184 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
8710b709 1185 else if (msym != NULL)
112e8700
SM
1186 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1187 else if (!uiout->is_mi_like_p ())
1188 uiout->field_string ("function", "??");
8710b709 1189
1e038f67 1190 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1191 {
112e8700 1192 uiout->text (_("\tinst "));
23a7fe75 1193 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1194 }
1195
1e038f67 1196 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1197 {
112e8700 1198 uiout->text (_("\tat "));
23a7fe75 1199 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1200 }
1201
112e8700 1202 uiout->text ("\n");
afedecd3
MM
1203 }
1204}
1205
f6ac5f3d 1206/* The call_history method of target record-btrace. */
afedecd3 1207
f6ac5f3d
PA
1208void
1209record_btrace_target::call_history (int size, record_print_flags flags)
afedecd3
MM
1210{
1211 struct btrace_thread_info *btinfo;
23a7fe75
MM
1212 struct btrace_call_history *history;
1213 struct btrace_call_iterator begin, end;
afedecd3 1214 struct ui_out *uiout;
23a7fe75 1215 unsigned int context, covered;
afedecd3
MM
1216
1217 uiout = current_uiout;
2e783024 1218 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 1219 context = abs (size);
afedecd3
MM
1220 if (context == 0)
1221 error (_("Bad record function-call-history-size."));
1222
23a7fe75
MM
1223 btinfo = require_btrace ();
1224 history = btinfo->call_history;
1225 if (history == NULL)
afedecd3 1226 {
07bbe694 1227 struct btrace_insn_iterator *replay;
afedecd3 1228
0cb7c7b0 1229 DEBUG ("call-history (0x%x): %d", (int) flags, size);
afedecd3 1230
07bbe694
MM
1231 /* If we're replaying, we start at the replay position. Otherwise, we
1232 start at the tail of the trace. */
1233 replay = btinfo->replay;
1234 if (replay != NULL)
1235 {
07bbe694 1236 begin.btinfo = btinfo;
a0f1b963 1237 begin.index = replay->call_index;
07bbe694
MM
1238 }
1239 else
1240 btrace_call_end (&begin, btinfo);
1241
1242 /* We start from here and expand in the requested direction. Then we
1243 expand in the other direction, as well, to fill up any remaining
1244 context. */
1245 end = begin;
1246 if (size < 0)
1247 {
1248 /* We want the current position covered, as well. */
1249 covered = btrace_call_next (&end, 1);
1250 covered += btrace_call_prev (&begin, context - covered);
1251 covered += btrace_call_next (&end, context - covered);
1252 }
1253 else
1254 {
1255 covered = btrace_call_next (&end, context);
1256 covered += btrace_call_prev (&begin, context- covered);
1257 }
afedecd3
MM
1258 }
1259 else
1260 {
23a7fe75
MM
1261 begin = history->begin;
1262 end = history->end;
afedecd3 1263
0cb7c7b0 1264 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
23a7fe75 1265 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1266
23a7fe75
MM
1267 if (size < 0)
1268 {
1269 end = begin;
1270 covered = btrace_call_prev (&begin, context);
1271 }
1272 else
1273 {
1274 begin = end;
1275 covered = btrace_call_next (&end, context);
1276 }
afedecd3
MM
1277 }
1278
23a7fe75 1279 if (covered > 0)
8710b709 1280 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1281 else
1282 {
1283 if (size < 0)
1284 printf_unfiltered (_("At the start of the branch trace record.\n"));
1285 else
1286 printf_unfiltered (_("At the end of the branch trace record.\n"));
1287 }
afedecd3 1288
23a7fe75 1289 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1290}
1291
f6ac5f3d 1292/* The call_history_range method of target record-btrace. */
afedecd3 1293
f6ac5f3d
PA
1294void
1295record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1296 record_print_flags flags)
afedecd3
MM
1297{
1298 struct btrace_thread_info *btinfo;
23a7fe75 1299 struct btrace_call_iterator begin, end;
afedecd3 1300 struct ui_out *uiout;
23a7fe75
MM
1301 unsigned int low, high;
1302 int found;
afedecd3
MM
1303
1304 uiout = current_uiout;
2e783024 1305 ui_out_emit_tuple tuple_emitter (uiout, "func history");
23a7fe75
MM
1306 low = from;
1307 high = to;
afedecd3 1308
0cb7c7b0 1309 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
afedecd3
MM
1310
1311 /* Check for wrap-arounds. */
23a7fe75 1312 if (low != from || high != to)
afedecd3
MM
1313 error (_("Bad range."));
1314
0688d04e 1315 if (high < low)
afedecd3
MM
1316 error (_("Bad range."));
1317
23a7fe75 1318 btinfo = require_btrace ();
afedecd3 1319
23a7fe75
MM
1320 found = btrace_find_call_by_number (&begin, btinfo, low);
1321 if (found == 0)
1322 error (_("Range out of bounds."));
afedecd3 1323
23a7fe75
MM
1324 found = btrace_find_call_by_number (&end, btinfo, high);
1325 if (found == 0)
0688d04e
MM
1326 {
1327 /* Silently truncate the range. */
1328 btrace_call_end (&end, btinfo);
1329 }
1330 else
1331 {
1332 /* We want both begin and end to be inclusive. */
1333 btrace_call_next (&end, 1);
1334 }
afedecd3 1335
8710b709 1336 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1337 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1338}
1339
f6ac5f3d 1340/* The call_history_from method of target record-btrace. */
afedecd3 1341
f6ac5f3d
PA
1342void
1343record_btrace_target::call_history_from (ULONGEST from, int size,
1344 record_print_flags flags)
afedecd3
MM
1345{
1346 ULONGEST begin, end, context;
1347
1348 context = abs (size);
0688d04e
MM
1349 if (context == 0)
1350 error (_("Bad record function-call-history-size."));
afedecd3
MM
1351
1352 if (size < 0)
1353 {
1354 end = from;
1355
1356 if (from < context)
1357 begin = 0;
1358 else
0688d04e 1359 begin = from - context + 1;
afedecd3
MM
1360 }
1361 else
1362 {
1363 begin = from;
0688d04e 1364 end = from + context - 1;
afedecd3
MM
1365
1366 /* Check for wrap-around. */
1367 if (end < begin)
1368 end = ULONGEST_MAX;
1369 }
1370
f6ac5f3d 1371 call_history_range ( begin, end, flags);
afedecd3
MM
1372}
1373
f6ac5f3d 1374/* The record_method method of target record-btrace. */
b158a20f 1375
f6ac5f3d
PA
1376enum record_method
1377record_btrace_target::record_method (ptid_t ptid)
b158a20f 1378{
b158a20f
TW
1379 struct thread_info * const tp = find_thread_ptid (ptid);
1380
1381 if (tp == NULL)
1382 error (_("No thread."));
1383
1384 if (tp->btrace.target == NULL)
1385 return RECORD_METHOD_NONE;
1386
1387 return RECORD_METHOD_BTRACE;
1388}
1389
f6ac5f3d 1390/* The record_is_replaying method of target record-btrace. */
07bbe694 1391
57810aa7 1392bool
f6ac5f3d 1393record_btrace_target::record_is_replaying (ptid_t ptid)
07bbe694 1394{
08036331
PA
1395 for (thread_info *tp : all_non_exited_threads (ptid))
1396 if (btrace_is_replaying (tp))
57810aa7 1397 return true;
07bbe694 1398
57810aa7 1399 return false;
07bbe694
MM
1400}
1401
f6ac5f3d 1402/* The record_will_replay method of target record-btrace. */
7ff27e9b 1403
57810aa7 1404bool
f6ac5f3d 1405record_btrace_target::record_will_replay (ptid_t ptid, int dir)
7ff27e9b 1406{
f6ac5f3d 1407 return dir == EXEC_REVERSE || record_is_replaying (ptid);
7ff27e9b
MM
1408}
1409
f6ac5f3d 1410/* The xfer_partial method of target record-btrace. */
633785ff 1411
f6ac5f3d
PA
1412enum target_xfer_status
1413record_btrace_target::xfer_partial (enum target_object object,
1414 const char *annex, gdb_byte *readbuf,
1415 const gdb_byte *writebuf, ULONGEST offset,
1416 ULONGEST len, ULONGEST *xfered_len)
633785ff 1417{
633785ff 1418 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1419 if (replay_memory_access == replay_memory_access_read_only
aef92902 1420 && !record_btrace_generating_corefile
f6ac5f3d 1421 && record_is_replaying (inferior_ptid))
633785ff
MM
1422 {
1423 switch (object)
1424 {
1425 case TARGET_OBJECT_MEMORY:
1426 {
1427 struct target_section *section;
1428
1429 /* We do not allow writing memory in general. */
1430 if (writebuf != NULL)
9b409511
YQ
1431 {
1432 *xfered_len = len;
bc113b4e 1433 return TARGET_XFER_UNAVAILABLE;
9b409511 1434 }
633785ff
MM
1435
1436 /* We allow reading readonly memory. */
f6ac5f3d 1437 section = target_section_by_addr (this, offset);
633785ff
MM
1438 if (section != NULL)
1439 {
1440 /* Check if the section we found is readonly. */
1441 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1442 section->the_bfd_section)
1443 & SEC_READONLY) != 0)
1444 {
1445 /* Truncate the request to fit into this section. */
325fac50 1446 len = std::min (len, section->endaddr - offset);
633785ff
MM
1447 break;
1448 }
1449 }
1450
9b409511 1451 *xfered_len = len;
bc113b4e 1452 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1453 }
1454 }
1455 }
1456
1457 /* Forward the request. */
b6a8c27b
PA
1458 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1459 offset, len, xfered_len);
633785ff
MM
1460}
1461
f6ac5f3d 1462/* The insert_breakpoint method of target record-btrace. */
633785ff 1463
f6ac5f3d
PA
1464int
1465record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1466 struct bp_target_info *bp_tgt)
633785ff 1467{
67b5c0c1
MM
1468 const char *old;
1469 int ret;
633785ff
MM
1470
1471 /* Inserting breakpoints requires accessing memory. Allow it for the
1472 duration of this function. */
67b5c0c1
MM
1473 old = replay_memory_access;
1474 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1475
1476 ret = 0;
492d29ea
PA
1477 TRY
1478 {
b6a8c27b 1479 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
492d29ea 1480 }
492d29ea
PA
1481 CATCH (except, RETURN_MASK_ALL)
1482 {
6c63c96a 1483 replay_memory_access = old;
492d29ea
PA
1484 throw_exception (except);
1485 }
1486 END_CATCH
6c63c96a 1487 replay_memory_access = old;
633785ff
MM
1488
1489 return ret;
1490}
1491
f6ac5f3d 1492/* The remove_breakpoint method of target record-btrace. */
633785ff 1493
f6ac5f3d
PA
1494int
1495record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1496 struct bp_target_info *bp_tgt,
1497 enum remove_bp_reason reason)
633785ff 1498{
67b5c0c1
MM
1499 const char *old;
1500 int ret;
633785ff
MM
1501
1502 /* Removing breakpoints requires accessing memory. Allow it for the
1503 duration of this function. */
67b5c0c1
MM
1504 old = replay_memory_access;
1505 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1506
1507 ret = 0;
492d29ea
PA
1508 TRY
1509 {
b6a8c27b 1510 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
492d29ea 1511 }
492d29ea
PA
1512 CATCH (except, RETURN_MASK_ALL)
1513 {
6c63c96a 1514 replay_memory_access = old;
492d29ea
PA
1515 throw_exception (except);
1516 }
1517 END_CATCH
6c63c96a 1518 replay_memory_access = old;
633785ff
MM
1519
1520 return ret;
1521}
1522
f6ac5f3d 1523/* The fetch_registers method of target record-btrace. */
1f3ef581 1524
f6ac5f3d
PA
1525void
1526record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1f3ef581
MM
1527{
1528 struct btrace_insn_iterator *replay;
1529 struct thread_info *tp;
1530
222312d3 1531 tp = find_thread_ptid (regcache->ptid ());
1f3ef581
MM
1532 gdb_assert (tp != NULL);
1533
1534 replay = tp->btrace.replay;
aef92902 1535 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1536 {
1537 const struct btrace_insn *insn;
1538 struct gdbarch *gdbarch;
1539 int pcreg;
1540
ac7936df 1541 gdbarch = regcache->arch ();
1f3ef581
MM
1542 pcreg = gdbarch_pc_regnum (gdbarch);
1543 if (pcreg < 0)
1544 return;
1545
1546 /* We can only provide the PC register. */
1547 if (regno >= 0 && regno != pcreg)
1548 return;
1549
1550 insn = btrace_insn_get (replay);
1551 gdb_assert (insn != NULL);
1552
73e1c03f 1553 regcache->raw_supply (regno, &insn->pc);
1f3ef581
MM
1554 }
1555 else
b6a8c27b 1556 this->beneath ()->fetch_registers (regcache, regno);
1f3ef581
MM
1557}
1558
f6ac5f3d 1559/* The store_registers method of target record-btrace. */
1f3ef581 1560
f6ac5f3d
PA
1561void
1562record_btrace_target::store_registers (struct regcache *regcache, int regno)
1f3ef581 1563{
a52eab48 1564 if (!record_btrace_generating_corefile
222312d3 1565 && record_is_replaying (regcache->ptid ()))
4d10e986 1566 error (_("Cannot write registers while replaying."));
1f3ef581
MM
1567
1568 gdb_assert (may_write_registers != 0);
1569
b6a8c27b 1570 this->beneath ()->store_registers (regcache, regno);
1f3ef581
MM
1571}
1572
f6ac5f3d 1573/* The prepare_to_store method of target record-btrace. */
1f3ef581 1574
f6ac5f3d
PA
1575void
1576record_btrace_target::prepare_to_store (struct regcache *regcache)
1f3ef581 1577{
a52eab48 1578 if (!record_btrace_generating_corefile
222312d3 1579 && record_is_replaying (regcache->ptid ()))
1f3ef581
MM
1580 return;
1581
b6a8c27b 1582 this->beneath ()->prepare_to_store (regcache);
1f3ef581
MM
1583}
1584
0b722aec
MM
1585/* The branch trace frame cache. */
1586
1587struct btrace_frame_cache
1588{
1589 /* The thread. */
1590 struct thread_info *tp;
1591
1592 /* The frame info. */
1593 struct frame_info *frame;
1594
1595 /* The branch trace function segment. */
1596 const struct btrace_function *bfun;
1597};
1598
1599/* A struct btrace_frame_cache hash table indexed by NEXT. */
1600
1601static htab_t bfcache;
1602
1603/* hash_f for htab_create_alloc of bfcache. */
1604
1605static hashval_t
1606bfcache_hash (const void *arg)
1607{
19ba03f4
SM
1608 const struct btrace_frame_cache *cache
1609 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1610
1611 return htab_hash_pointer (cache->frame);
1612}
1613
1614/* eq_f for htab_create_alloc of bfcache. */
1615
1616static int
1617bfcache_eq (const void *arg1, const void *arg2)
1618{
19ba03f4
SM
1619 const struct btrace_frame_cache *cache1
1620 = (const struct btrace_frame_cache *) arg1;
1621 const struct btrace_frame_cache *cache2
1622 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1623
1624 return cache1->frame == cache2->frame;
1625}
1626
1627/* Create a new btrace frame cache. */
1628
1629static struct btrace_frame_cache *
1630bfcache_new (struct frame_info *frame)
1631{
1632 struct btrace_frame_cache *cache;
1633 void **slot;
1634
1635 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1636 cache->frame = frame;
1637
1638 slot = htab_find_slot (bfcache, cache, INSERT);
1639 gdb_assert (*slot == NULL);
1640 *slot = cache;
1641
1642 return cache;
1643}
1644
1645/* Extract the branch trace function from a branch trace frame. */
1646
1647static const struct btrace_function *
1648btrace_get_frame_function (struct frame_info *frame)
1649{
1650 const struct btrace_frame_cache *cache;
0b722aec
MM
1651 struct btrace_frame_cache pattern;
1652 void **slot;
1653
1654 pattern.frame = frame;
1655
1656 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1657 if (slot == NULL)
1658 return NULL;
1659
19ba03f4 1660 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1661 return cache->bfun;
1662}
1663
cecac1ab
MM
1664/* Implement stop_reason method for record_btrace_frame_unwind. */
1665
1666static enum unwind_stop_reason
1667record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1668 void **this_cache)
1669{
0b722aec
MM
1670 const struct btrace_frame_cache *cache;
1671 const struct btrace_function *bfun;
1672
19ba03f4 1673 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1674 bfun = cache->bfun;
1675 gdb_assert (bfun != NULL);
1676
42bfe59e 1677 if (bfun->up == 0)
0b722aec
MM
1678 return UNWIND_UNAVAILABLE;
1679
1680 return UNWIND_NO_REASON;
cecac1ab
MM
1681}
1682
1683/* Implement this_id method for record_btrace_frame_unwind. */
1684
1685static void
1686record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1687 struct frame_id *this_id)
1688{
0b722aec
MM
1689 const struct btrace_frame_cache *cache;
1690 const struct btrace_function *bfun;
4aeb0dfc 1691 struct btrace_call_iterator it;
0b722aec
MM
1692 CORE_ADDR code, special;
1693
19ba03f4 1694 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1695
1696 bfun = cache->bfun;
1697 gdb_assert (bfun != NULL);
1698
4aeb0dfc
TW
1699 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1700 bfun = btrace_call_get (&it);
0b722aec
MM
1701
1702 code = get_frame_func (this_frame);
1703 special = bfun->number;
1704
1705 *this_id = frame_id_build_unavailable_stack_special (code, special);
1706
1707 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1708 btrace_get_bfun_name (cache->bfun),
1709 core_addr_to_string_nz (this_id->code_addr),
1710 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1711}
1712
1713/* Implement prev_register method for record_btrace_frame_unwind. */
1714
1715static struct value *
1716record_btrace_frame_prev_register (struct frame_info *this_frame,
1717 void **this_cache,
1718 int regnum)
1719{
0b722aec
MM
1720 const struct btrace_frame_cache *cache;
1721 const struct btrace_function *bfun, *caller;
42bfe59e 1722 struct btrace_call_iterator it;
0b722aec
MM
1723 struct gdbarch *gdbarch;
1724 CORE_ADDR pc;
1725 int pcreg;
1726
1727 gdbarch = get_frame_arch (this_frame);
1728 pcreg = gdbarch_pc_regnum (gdbarch);
1729 if (pcreg < 0 || regnum != pcreg)
1730 throw_error (NOT_AVAILABLE_ERROR,
1731 _("Registers are not available in btrace record history"));
1732
19ba03f4 1733 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1734 bfun = cache->bfun;
1735 gdb_assert (bfun != NULL);
1736
42bfe59e 1737 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
0b722aec
MM
1738 throw_error (NOT_AVAILABLE_ERROR,
1739 _("No caller in btrace record history"));
1740
42bfe59e
TW
1741 caller = btrace_call_get (&it);
1742
0b722aec 1743 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
0860c437 1744 pc = caller->insn.front ().pc;
0b722aec
MM
1745 else
1746 {
0860c437 1747 pc = caller->insn.back ().pc;
0b722aec
MM
1748 pc += gdb_insn_length (gdbarch, pc);
1749 }
1750
1751 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1752 btrace_get_bfun_name (bfun), bfun->level,
1753 core_addr_to_string_nz (pc));
1754
1755 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1756}
1757
1758/* Implement sniffer method for record_btrace_frame_unwind. */
1759
1760static int
1761record_btrace_frame_sniffer (const struct frame_unwind *self,
1762 struct frame_info *this_frame,
1763 void **this_cache)
1764{
0b722aec
MM
1765 const struct btrace_function *bfun;
1766 struct btrace_frame_cache *cache;
cecac1ab 1767 struct thread_info *tp;
0b722aec 1768 struct frame_info *next;
cecac1ab
MM
1769
1770 /* THIS_FRAME does not contain a reference to its thread. */
00431a78 1771 tp = inferior_thread ();
cecac1ab 1772
0b722aec
MM
1773 bfun = NULL;
1774 next = get_next_frame (this_frame);
1775 if (next == NULL)
1776 {
1777 const struct btrace_insn_iterator *replay;
1778
1779 replay = tp->btrace.replay;
1780 if (replay != NULL)
08c3f6d2 1781 bfun = &replay->btinfo->functions[replay->call_index];
0b722aec
MM
1782 }
1783 else
1784 {
1785 const struct btrace_function *callee;
42bfe59e 1786 struct btrace_call_iterator it;
0b722aec
MM
1787
1788 callee = btrace_get_frame_function (next);
42bfe59e
TW
1789 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1790 return 0;
1791
1792 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1793 return 0;
1794
1795 bfun = btrace_call_get (&it);
0b722aec
MM
1796 }
1797
1798 if (bfun == NULL)
1799 return 0;
1800
1801 DEBUG ("[frame] sniffed frame for %s on level %d",
1802 btrace_get_bfun_name (bfun), bfun->level);
1803
1804 /* This is our frame. Initialize the frame cache. */
1805 cache = bfcache_new (this_frame);
1806 cache->tp = tp;
1807 cache->bfun = bfun;
1808
1809 *this_cache = cache;
1810 return 1;
1811}
1812
1813/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1814
1815static int
1816record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1817 struct frame_info *this_frame,
1818 void **this_cache)
1819{
1820 const struct btrace_function *bfun, *callee;
1821 struct btrace_frame_cache *cache;
42bfe59e 1822 struct btrace_call_iterator it;
0b722aec 1823 struct frame_info *next;
42bfe59e 1824 struct thread_info *tinfo;
0b722aec
MM
1825
1826 next = get_next_frame (this_frame);
1827 if (next == NULL)
1828 return 0;
1829
1830 callee = btrace_get_frame_function (next);
1831 if (callee == NULL)
1832 return 0;
1833
1834 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1835 return 0;
1836
00431a78 1837 tinfo = inferior_thread ();
42bfe59e 1838 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
0b722aec
MM
1839 return 0;
1840
42bfe59e
TW
1841 bfun = btrace_call_get (&it);
1842
0b722aec
MM
1843 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1844 btrace_get_bfun_name (bfun), bfun->level);
1845
1846 /* This is our frame. Initialize the frame cache. */
1847 cache = bfcache_new (this_frame);
42bfe59e 1848 cache->tp = tinfo;
0b722aec
MM
1849 cache->bfun = bfun;
1850
1851 *this_cache = cache;
1852 return 1;
1853}
1854
1855static void
1856record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1857{
1858 struct btrace_frame_cache *cache;
1859 void **slot;
1860
19ba03f4 1861 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1862
1863 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1864 gdb_assert (slot != NULL);
1865
1866 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1867}
1868
1869/* btrace recording does not store previous memory content, neither the stack
1870 frames content. Any unwinding would return errorneous results as the stack
1871 contents no longer matches the changed PC value restored from history.
1872 Therefore this unwinder reports any possibly unwound registers as
1873 <unavailable>. */
1874
0b722aec 1875const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1876{
1877 NORMAL_FRAME,
1878 record_btrace_frame_unwind_stop_reason,
1879 record_btrace_frame_this_id,
1880 record_btrace_frame_prev_register,
1881 NULL,
0b722aec
MM
1882 record_btrace_frame_sniffer,
1883 record_btrace_frame_dealloc_cache
1884};
1885
1886const struct frame_unwind record_btrace_tailcall_frame_unwind =
1887{
1888 TAILCALL_FRAME,
1889 record_btrace_frame_unwind_stop_reason,
1890 record_btrace_frame_this_id,
1891 record_btrace_frame_prev_register,
1892 NULL,
1893 record_btrace_tailcall_frame_sniffer,
1894 record_btrace_frame_dealloc_cache
cecac1ab 1895};
b2f4cfde 1896
f6ac5f3d 1897/* Implement the get_unwinder method. */
ac01945b 1898
f6ac5f3d
PA
1899const struct frame_unwind *
1900record_btrace_target::get_unwinder ()
ac01945b
TT
1901{
1902 return &record_btrace_frame_unwind;
1903}
1904
f6ac5f3d 1905/* Implement the get_tailcall_unwinder method. */
ac01945b 1906
f6ac5f3d
PA
1907const struct frame_unwind *
1908record_btrace_target::get_tailcall_unwinder ()
ac01945b
TT
1909{
1910 return &record_btrace_tailcall_frame_unwind;
1911}
1912
987e68b1
MM
1913/* Return a human-readable string for FLAG. */
1914
1915static const char *
1916btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1917{
1918 switch (flag)
1919 {
1920 case BTHR_STEP:
1921 return "step";
1922
1923 case BTHR_RSTEP:
1924 return "reverse-step";
1925
1926 case BTHR_CONT:
1927 return "cont";
1928
1929 case BTHR_RCONT:
1930 return "reverse-cont";
1931
1932 case BTHR_STOP:
1933 return "stop";
1934 }
1935
1936 return "<invalid>";
1937}
1938
52834460
MM
1939/* Indicate that TP should be resumed according to FLAG. */
1940
1941static void
1942record_btrace_resume_thread (struct thread_info *tp,
1943 enum btrace_thread_flag flag)
1944{
1945 struct btrace_thread_info *btinfo;
1946
43792cf0 1947 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1 1948 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
52834460
MM
1949
1950 btinfo = &tp->btrace;
1951
52834460 1952 /* Fetch the latest branch trace. */
4a4495d6 1953 btrace_fetch (tp, record_btrace_get_cpu ());
52834460 1954
0ca912df
MM
1955 /* A resume request overwrites a preceding resume or stop request. */
1956 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1957 btinfo->flags |= flag;
1958}
1959
ec71cc2f
MM
1960/* Get the current frame for TP. */
1961
79b8d3b0
TT
1962static struct frame_id
1963get_thread_current_frame_id (struct thread_info *tp)
ec71cc2f 1964{
79b8d3b0 1965 struct frame_id id;
ec71cc2f
MM
1966 int executing;
1967
00431a78
PA
1968 /* Set current thread, which is implicitly used by
1969 get_current_frame. */
1970 scoped_restore_current_thread restore_thread;
1971
1972 switch_to_thread (tp);
ec71cc2f
MM
1973
1974 /* Clear the executing flag to allow changes to the current frame.
1975 We are not actually running, yet. We just started a reverse execution
1976 command or a record goto command.
1977 For the latter, EXECUTING is false and this has no effect.
f6ac5f3d 1978 For the former, EXECUTING is true and we're in wait, about to
ec71cc2f
MM
1979 move the thread. Since we need to recompute the stack, we temporarily
1980 set EXECUTING to flase. */
00431a78
PA
1981 executing = tp->executing;
1982 set_executing (inferior_ptid, false);
ec71cc2f 1983
79b8d3b0 1984 id = null_frame_id;
ec71cc2f
MM
1985 TRY
1986 {
79b8d3b0 1987 id = get_frame_id (get_current_frame ());
ec71cc2f
MM
1988 }
1989 CATCH (except, RETURN_MASK_ALL)
1990 {
1991 /* Restore the previous execution state. */
1992 set_executing (inferior_ptid, executing);
1993
ec71cc2f
MM
1994 throw_exception (except);
1995 }
1996 END_CATCH
1997
1998 /* Restore the previous execution state. */
1999 set_executing (inferior_ptid, executing);
2000
79b8d3b0 2001 return id;
ec71cc2f
MM
2002}
2003
52834460
MM
2004/* Start replaying a thread. */
2005
2006static struct btrace_insn_iterator *
2007record_btrace_start_replaying (struct thread_info *tp)
2008{
52834460
MM
2009 struct btrace_insn_iterator *replay;
2010 struct btrace_thread_info *btinfo;
52834460
MM
2011
2012 btinfo = &tp->btrace;
2013 replay = NULL;
2014
2015 /* We can't start replaying without trace. */
b54b03bd 2016 if (btinfo->functions.empty ())
52834460
MM
2017 return NULL;
2018
52834460
MM
2019 /* GDB stores the current frame_id when stepping in order to detects steps
2020 into subroutines.
2021 Since frames are computed differently when we're replaying, we need to
2022 recompute those stored frames and fix them up so we can still detect
2023 subroutines after we started replaying. */
492d29ea 2024 TRY
52834460 2025 {
52834460
MM
2026 struct frame_id frame_id;
2027 int upd_step_frame_id, upd_step_stack_frame_id;
2028
2029 /* The current frame without replaying - computed via normal unwind. */
79b8d3b0 2030 frame_id = get_thread_current_frame_id (tp);
52834460
MM
2031
2032 /* Check if we need to update any stepping-related frame id's. */
2033 upd_step_frame_id = frame_id_eq (frame_id,
2034 tp->control.step_frame_id);
2035 upd_step_stack_frame_id = frame_id_eq (frame_id,
2036 tp->control.step_stack_frame_id);
2037
2038 /* We start replaying at the end of the branch trace. This corresponds
2039 to the current instruction. */
8d749320 2040 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
2041 btrace_insn_end (replay, btinfo);
2042
31fd9caa
MM
2043 /* Skip gaps at the end of the trace. */
2044 while (btrace_insn_get (replay) == NULL)
2045 {
2046 unsigned int steps;
2047
2048 steps = btrace_insn_prev (replay, 1);
2049 if (steps == 0)
2050 error (_("No trace."));
2051 }
2052
52834460
MM
2053 /* We're not replaying, yet. */
2054 gdb_assert (btinfo->replay == NULL);
2055 btinfo->replay = replay;
2056
2057 /* Make sure we're not using any stale registers. */
00431a78 2058 registers_changed_thread (tp);
52834460
MM
2059
2060 /* The current frame with replaying - computed via btrace unwind. */
79b8d3b0 2061 frame_id = get_thread_current_frame_id (tp);
52834460
MM
2062
2063 /* Replace stepping related frames where necessary. */
2064 if (upd_step_frame_id)
2065 tp->control.step_frame_id = frame_id;
2066 if (upd_step_stack_frame_id)
2067 tp->control.step_stack_frame_id = frame_id;
2068 }
492d29ea 2069 CATCH (except, RETURN_MASK_ALL)
52834460
MM
2070 {
2071 xfree (btinfo->replay);
2072 btinfo->replay = NULL;
2073
00431a78 2074 registers_changed_thread (tp);
52834460
MM
2075
2076 throw_exception (except);
2077 }
492d29ea 2078 END_CATCH
52834460
MM
2079
2080 return replay;
2081}
2082
2083/* Stop replaying a thread. */
2084
2085static void
2086record_btrace_stop_replaying (struct thread_info *tp)
2087{
2088 struct btrace_thread_info *btinfo;
2089
2090 btinfo = &tp->btrace;
2091
2092 xfree (btinfo->replay);
2093 btinfo->replay = NULL;
2094
2095 /* Make sure we're not leaving any stale registers. */
00431a78 2096 registers_changed_thread (tp);
52834460
MM
2097}
2098
e3cfc1c7
MM
2099/* Stop replaying TP if it is at the end of its execution history. */
2100
2101static void
2102record_btrace_stop_replaying_at_end (struct thread_info *tp)
2103{
2104 struct btrace_insn_iterator *replay, end;
2105 struct btrace_thread_info *btinfo;
2106
2107 btinfo = &tp->btrace;
2108 replay = btinfo->replay;
2109
2110 if (replay == NULL)
2111 return;
2112
2113 btrace_insn_end (&end, btinfo);
2114
2115 if (btrace_insn_cmp (replay, &end) == 0)
2116 record_btrace_stop_replaying (tp);
2117}
2118
f6ac5f3d 2119/* The resume method of target record-btrace. */
b2f4cfde 2120
f6ac5f3d
PA
2121void
2122record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
b2f4cfde 2123{
d2939ba2 2124 enum btrace_thread_flag flag, cflag;
52834460 2125
987e68b1 2126 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
f6ac5f3d 2127 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
987e68b1 2128 step ? "step" : "cont");
52834460 2129
0ca912df
MM
2130 /* Store the execution direction of the last resume.
2131
f6ac5f3d 2132 If there is more than one resume call, we have to rely on infrun
0ca912df 2133 to not change the execution direction in-between. */
f6ac5f3d 2134 record_btrace_resume_exec_dir = ::execution_direction;
70ad5bff 2135
0ca912df 2136 /* As long as we're not replaying, just forward the request.
52834460 2137
0ca912df
MM
2138 For non-stop targets this means that no thread is replaying. In order to
2139 make progress, we may need to explicitly move replaying threads to the end
2140 of their execution history. */
f6ac5f3d
PA
2141 if ((::execution_direction != EXEC_REVERSE)
2142 && !record_is_replaying (minus_one_ptid))
b2f4cfde 2143 {
b6a8c27b 2144 this->beneath ()->resume (ptid, step, signal);
04c4fe8c 2145 return;
b2f4cfde
MM
2146 }
2147
52834460 2148 /* Compute the btrace thread flag for the requested move. */
f6ac5f3d 2149 if (::execution_direction == EXEC_REVERSE)
d2939ba2
MM
2150 {
2151 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2152 cflag = BTHR_RCONT;
2153 }
52834460 2154 else
d2939ba2
MM
2155 {
2156 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2157 cflag = BTHR_CONT;
2158 }
52834460 2159
52834460 2160 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2161 record_btrace_wait below.
2162
2163 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2164 if (!target_is_non_stop_p ())
2165 {
26a57c92 2166 gdb_assert (inferior_ptid.matches (ptid));
d2939ba2 2167
08036331
PA
2168 for (thread_info *tp : all_non_exited_threads (ptid))
2169 {
2170 if (tp->ptid.matches (inferior_ptid))
2171 record_btrace_resume_thread (tp, flag);
2172 else
2173 record_btrace_resume_thread (tp, cflag);
2174 }
d2939ba2
MM
2175 }
2176 else
2177 {
08036331
PA
2178 for (thread_info *tp : all_non_exited_threads (ptid))
2179 record_btrace_resume_thread (tp, flag);
d2939ba2 2180 }
70ad5bff
MM
2181
2182 /* Async support. */
2183 if (target_can_async_p ())
2184 {
6a3753b3 2185 target_async (1);
70ad5bff
MM
2186 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2187 }
52834460
MM
2188}
2189
f6ac5f3d 2190/* The commit_resume method of target record-btrace. */
85ad3aaf 2191
f6ac5f3d
PA
2192void
2193record_btrace_target::commit_resume ()
85ad3aaf 2194{
f6ac5f3d
PA
2195 if ((::execution_direction != EXEC_REVERSE)
2196 && !record_is_replaying (minus_one_ptid))
b6a8c27b 2197 beneath ()->commit_resume ();
85ad3aaf
PA
2198}
2199
987e68b1
MM
2200/* Cancel resuming TP. */
2201
2202static void
2203record_btrace_cancel_resume (struct thread_info *tp)
2204{
2205 enum btrace_thread_flag flags;
2206
2207 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2208 if (flags == 0)
2209 return;
2210
43792cf0
PA
2211 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2212 print_thread_id (tp),
987e68b1
MM
2213 target_pid_to_str (tp->ptid), flags,
2214 btrace_thread_flag_to_str (flags));
2215
2216 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2217 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2218}
2219
2220/* Return a target_waitstatus indicating that we ran out of history. */
2221
2222static struct target_waitstatus
2223btrace_step_no_history (void)
2224{
2225 struct target_waitstatus status;
2226
2227 status.kind = TARGET_WAITKIND_NO_HISTORY;
2228
2229 return status;
2230}
2231
2232/* Return a target_waitstatus indicating that a step finished. */
2233
2234static struct target_waitstatus
2235btrace_step_stopped (void)
2236{
2237 struct target_waitstatus status;
2238
2239 status.kind = TARGET_WAITKIND_STOPPED;
2240 status.value.sig = GDB_SIGNAL_TRAP;
2241
2242 return status;
2243}
2244
6e4879f0
MM
2245/* Return a target_waitstatus indicating that a thread was stopped as
2246 requested. */
2247
2248static struct target_waitstatus
2249btrace_step_stopped_on_request (void)
2250{
2251 struct target_waitstatus status;
2252
2253 status.kind = TARGET_WAITKIND_STOPPED;
2254 status.value.sig = GDB_SIGNAL_0;
2255
2256 return status;
2257}
2258
d825d248
MM
2259/* Return a target_waitstatus indicating a spurious stop. */
2260
2261static struct target_waitstatus
2262btrace_step_spurious (void)
2263{
2264 struct target_waitstatus status;
2265
2266 status.kind = TARGET_WAITKIND_SPURIOUS;
2267
2268 return status;
2269}
2270
e3cfc1c7
MM
2271/* Return a target_waitstatus indicating that the thread was not resumed. */
2272
2273static struct target_waitstatus
2274btrace_step_no_resumed (void)
2275{
2276 struct target_waitstatus status;
2277
2278 status.kind = TARGET_WAITKIND_NO_RESUMED;
2279
2280 return status;
2281}
2282
2283/* Return a target_waitstatus indicating that we should wait again. */
2284
2285static struct target_waitstatus
2286btrace_step_again (void)
2287{
2288 struct target_waitstatus status;
2289
2290 status.kind = TARGET_WAITKIND_IGNORE;
2291
2292 return status;
2293}
2294
52834460
MM
2295/* Clear the record histories. */
2296
2297static void
2298record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2299{
2300 xfree (btinfo->insn_history);
2301 xfree (btinfo->call_history);
2302
2303 btinfo->insn_history = NULL;
2304 btinfo->call_history = NULL;
2305}
2306
3c615f99
MM
2307/* Check whether TP's current replay position is at a breakpoint. */
2308
2309static int
2310record_btrace_replay_at_breakpoint (struct thread_info *tp)
2311{
2312 struct btrace_insn_iterator *replay;
2313 struct btrace_thread_info *btinfo;
2314 const struct btrace_insn *insn;
3c615f99
MM
2315
2316 btinfo = &tp->btrace;
2317 replay = btinfo->replay;
2318
2319 if (replay == NULL)
2320 return 0;
2321
2322 insn = btrace_insn_get (replay);
2323 if (insn == NULL)
2324 return 0;
2325
00431a78 2326 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
3c615f99
MM
2327 &btinfo->stop_reason);
2328}
2329
d825d248 2330/* Step one instruction in forward direction. */
52834460
MM
2331
2332static struct target_waitstatus
d825d248 2333record_btrace_single_step_forward (struct thread_info *tp)
52834460 2334{
b61ce85c 2335 struct btrace_insn_iterator *replay, end, start;
52834460 2336 struct btrace_thread_info *btinfo;
52834460 2337
d825d248
MM
2338 btinfo = &tp->btrace;
2339 replay = btinfo->replay;
2340
2341 /* We're done if we're not replaying. */
2342 if (replay == NULL)
2343 return btrace_step_no_history ();
2344
011c71b6
MM
2345 /* Check if we're stepping a breakpoint. */
2346 if (record_btrace_replay_at_breakpoint (tp))
2347 return btrace_step_stopped ();
2348
b61ce85c
MM
2349 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2350 jump back to the instruction at which we started. */
2351 start = *replay;
d825d248
MM
2352 do
2353 {
2354 unsigned int steps;
2355
e3cfc1c7
MM
2356 /* We will bail out here if we continue stepping after reaching the end
2357 of the execution history. */
d825d248
MM
2358 steps = btrace_insn_next (replay, 1);
2359 if (steps == 0)
b61ce85c
MM
2360 {
2361 *replay = start;
2362 return btrace_step_no_history ();
2363 }
d825d248
MM
2364 }
2365 while (btrace_insn_get (replay) == NULL);
2366
2367 /* Determine the end of the instruction trace. */
2368 btrace_insn_end (&end, btinfo);
2369
e3cfc1c7
MM
2370 /* The execution trace contains (and ends with) the current instruction.
2371 This instruction has not been executed, yet, so the trace really ends
2372 one instruction earlier. */
d825d248 2373 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2374 return btrace_step_no_history ();
d825d248
MM
2375
2376 return btrace_step_spurious ();
2377}
2378
2379/* Step one instruction in backward direction. */
2380
2381static struct target_waitstatus
2382record_btrace_single_step_backward (struct thread_info *tp)
2383{
b61ce85c 2384 struct btrace_insn_iterator *replay, start;
d825d248 2385 struct btrace_thread_info *btinfo;
e59fa00f 2386
52834460
MM
2387 btinfo = &tp->btrace;
2388 replay = btinfo->replay;
2389
d825d248
MM
2390 /* Start replaying if we're not already doing so. */
2391 if (replay == NULL)
2392 replay = record_btrace_start_replaying (tp);
2393
2394 /* If we can't step any further, we reached the end of the history.
b61ce85c
MM
2395 Skip gaps during replay. If we end up at a gap (at the beginning of
2396 the trace), jump back to the instruction at which we started. */
2397 start = *replay;
d825d248
MM
2398 do
2399 {
2400 unsigned int steps;
2401
2402 steps = btrace_insn_prev (replay, 1);
2403 if (steps == 0)
b61ce85c
MM
2404 {
2405 *replay = start;
2406 return btrace_step_no_history ();
2407 }
d825d248
MM
2408 }
2409 while (btrace_insn_get (replay) == NULL);
2410
011c71b6
MM
2411 /* Check if we're stepping a breakpoint.
2412
2413 For reverse-stepping, this check is after the step. There is logic in
2414 infrun.c that handles reverse-stepping separately. See, for example,
2415 proceed and adjust_pc_after_break.
2416
2417 This code assumes that for reverse-stepping, PC points to the last
2418 de-executed instruction, whereas for forward-stepping PC points to the
2419 next to-be-executed instruction. */
2420 if (record_btrace_replay_at_breakpoint (tp))
2421 return btrace_step_stopped ();
2422
d825d248
MM
2423 return btrace_step_spurious ();
2424}
2425
2426/* Step a single thread. */
2427
2428static struct target_waitstatus
2429record_btrace_step_thread (struct thread_info *tp)
2430{
2431 struct btrace_thread_info *btinfo;
2432 struct target_waitstatus status;
2433 enum btrace_thread_flag flags;
2434
2435 btinfo = &tp->btrace;
2436
6e4879f0
MM
2437 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2438 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2439
43792cf0 2440 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1
MM
2441 target_pid_to_str (tp->ptid), flags,
2442 btrace_thread_flag_to_str (flags));
52834460 2443
6e4879f0
MM
2444 /* We can't step without an execution history. */
2445 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2446 return btrace_step_no_history ();
2447
52834460
MM
2448 switch (flags)
2449 {
2450 default:
2451 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2452
6e4879f0
MM
2453 case BTHR_STOP:
2454 return btrace_step_stopped_on_request ();
2455
52834460 2456 case BTHR_STEP:
d825d248
MM
2457 status = record_btrace_single_step_forward (tp);
2458 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2459 break;
52834460
MM
2460
2461 return btrace_step_stopped ();
2462
2463 case BTHR_RSTEP:
d825d248
MM
2464 status = record_btrace_single_step_backward (tp);
2465 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2466 break;
52834460
MM
2467
2468 return btrace_step_stopped ();
2469
2470 case BTHR_CONT:
e3cfc1c7
MM
2471 status = record_btrace_single_step_forward (tp);
2472 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2473 break;
52834460 2474
e3cfc1c7
MM
2475 btinfo->flags |= flags;
2476 return btrace_step_again ();
52834460
MM
2477
2478 case BTHR_RCONT:
e3cfc1c7
MM
2479 status = record_btrace_single_step_backward (tp);
2480 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2481 break;
52834460 2482
e3cfc1c7
MM
2483 btinfo->flags |= flags;
2484 return btrace_step_again ();
2485 }
d825d248 2486
f6ac5f3d 2487 /* We keep threads moving at the end of their execution history. The wait
e3cfc1c7
MM
2488 method will stop the thread for whom the event is reported. */
2489 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2490 btinfo->flags |= flags;
52834460 2491
e3cfc1c7 2492 return status;
b2f4cfde
MM
2493}
2494
a6b5be76
MM
2495/* Announce further events if necessary. */
2496
2497static void
53127008
SM
2498record_btrace_maybe_mark_async_event
2499 (const std::vector<thread_info *> &moving,
2500 const std::vector<thread_info *> &no_history)
a6b5be76 2501{
53127008
SM
2502 bool more_moving = !moving.empty ();
2503 bool more_no_history = !no_history.empty ();;
a6b5be76
MM
2504
2505 if (!more_moving && !more_no_history)
2506 return;
2507
2508 if (more_moving)
2509 DEBUG ("movers pending");
2510
2511 if (more_no_history)
2512 DEBUG ("no-history pending");
2513
2514 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2515}
2516
f6ac5f3d 2517/* The wait method of target record-btrace. */
b2f4cfde 2518
f6ac5f3d
PA
2519ptid_t
2520record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2521 int options)
b2f4cfde 2522{
53127008
SM
2523 std::vector<thread_info *> moving;
2524 std::vector<thread_info *> no_history;
52834460
MM
2525
2526 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2527
b2f4cfde 2528 /* As long as we're not replaying, just forward the request. */
f6ac5f3d
PA
2529 if ((::execution_direction != EXEC_REVERSE)
2530 && !record_is_replaying (minus_one_ptid))
b2f4cfde 2531 {
b6a8c27b 2532 return this->beneath ()->wait (ptid, status, options);
b2f4cfde
MM
2533 }
2534
e3cfc1c7 2535 /* Keep a work list of moving threads. */
08036331
PA
2536 for (thread_info *tp : all_non_exited_threads (ptid))
2537 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2538 moving.push_back (tp);
e3cfc1c7 2539
53127008 2540 if (moving.empty ())
52834460 2541 {
e3cfc1c7 2542 *status = btrace_step_no_resumed ();
52834460 2543
e3cfc1c7 2544 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
23fdd69e 2545 target_waitstatus_to_string (status).c_str ());
e3cfc1c7 2546
e3cfc1c7 2547 return null_ptid;
52834460
MM
2548 }
2549
e3cfc1c7
MM
2550 /* Step moving threads one by one, one step each, until either one thread
2551 reports an event or we run out of threads to step.
2552
2553 When stepping more than one thread, chances are that some threads reach
2554 the end of their execution history earlier than others. If we reported
2555 this immediately, all-stop on top of non-stop would stop all threads and
2556 resume the same threads next time. And we would report the same thread
2557 having reached the end of its execution history again.
2558
2559 In the worst case, this would starve the other threads. But even if other
2560 threads would be allowed to make progress, this would result in far too
2561 many intermediate stops.
2562
2563 We therefore delay the reporting of "no execution history" until we have
2564 nothing else to report. By this time, all threads should have moved to
2565 either the beginning or the end of their execution history. There will
2566 be a single user-visible stop. */
53127008
SM
2567 struct thread_info *eventing = NULL;
2568 while ((eventing == NULL) && !moving.empty ())
e3cfc1c7 2569 {
53127008 2570 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
e3cfc1c7 2571 {
53127008
SM
2572 thread_info *tp = moving[ix];
2573
e3cfc1c7
MM
2574 *status = record_btrace_step_thread (tp);
2575
2576 switch (status->kind)
2577 {
2578 case TARGET_WAITKIND_IGNORE:
2579 ix++;
2580 break;
2581
2582 case TARGET_WAITKIND_NO_HISTORY:
53127008 2583 no_history.push_back (ordered_remove (moving, ix));
e3cfc1c7
MM
2584 break;
2585
2586 default:
53127008 2587 eventing = unordered_remove (moving, ix);
e3cfc1c7
MM
2588 break;
2589 }
2590 }
2591 }
2592
2593 if (eventing == NULL)
2594 {
2595 /* We started with at least one moving thread. This thread must have
2596 either stopped or reached the end of its execution history.
2597
2598 In the former case, EVENTING must not be NULL.
2599 In the latter case, NO_HISTORY must not be empty. */
53127008 2600 gdb_assert (!no_history.empty ());
e3cfc1c7
MM
2601
2602 /* We kept threads moving at the end of their execution history. Stop
2603 EVENTING now that we are going to report its stop. */
53127008 2604 eventing = unordered_remove (no_history, 0);
e3cfc1c7
MM
2605 eventing->btrace.flags &= ~BTHR_MOVE;
2606
2607 *status = btrace_step_no_history ();
2608 }
2609
2610 gdb_assert (eventing != NULL);
2611
2612 /* We kept threads replaying at the end of their execution history. Stop
2613 replaying EVENTING now that we are going to report its stop. */
2614 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2615
2616 /* Stop all other threads. */
5953356c 2617 if (!target_is_non_stop_p ())
53127008 2618 {
08036331 2619 for (thread_info *tp : all_non_exited_threads ())
53127008
SM
2620 record_btrace_cancel_resume (tp);
2621 }
52834460 2622
a6b5be76
MM
2623 /* In async mode, we need to announce further events. */
2624 if (target_is_async_p ())
2625 record_btrace_maybe_mark_async_event (moving, no_history);
2626
52834460 2627 /* Start record histories anew from the current position. */
e3cfc1c7 2628 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2629
2630 /* We moved the replay position but did not update registers. */
00431a78 2631 registers_changed_thread (eventing);
e3cfc1c7 2632
43792cf0
PA
2633 DEBUG ("wait ended by thread %s (%s): %s",
2634 print_thread_id (eventing),
e3cfc1c7 2635 target_pid_to_str (eventing->ptid),
23fdd69e 2636 target_waitstatus_to_string (status).c_str ());
52834460 2637
e3cfc1c7 2638 return eventing->ptid;
52834460
MM
2639}
2640
f6ac5f3d 2641/* The stop method of target record-btrace. */
6e4879f0 2642
f6ac5f3d
PA
2643void
2644record_btrace_target::stop (ptid_t ptid)
6e4879f0
MM
2645{
2646 DEBUG ("stop %s", target_pid_to_str (ptid));
2647
2648 /* As long as we're not replaying, just forward the request. */
f6ac5f3d
PA
2649 if ((::execution_direction != EXEC_REVERSE)
2650 && !record_is_replaying (minus_one_ptid))
6e4879f0 2651 {
b6a8c27b 2652 this->beneath ()->stop (ptid);
6e4879f0
MM
2653 }
2654 else
2655 {
08036331
PA
2656 for (thread_info *tp : all_non_exited_threads (ptid))
2657 {
2658 tp->btrace.flags &= ~BTHR_MOVE;
2659 tp->btrace.flags |= BTHR_STOP;
2660 }
6e4879f0
MM
2661 }
2662 }
2663
f6ac5f3d 2664/* The can_execute_reverse method of target record-btrace. */
52834460 2665
57810aa7 2666bool
f6ac5f3d 2667record_btrace_target::can_execute_reverse ()
52834460 2668{
57810aa7 2669 return true;
52834460
MM
2670}
2671
f6ac5f3d 2672/* The stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2673
57810aa7 2674bool
f6ac5f3d 2675record_btrace_target::stopped_by_sw_breakpoint ()
52834460 2676{
f6ac5f3d 2677 if (record_is_replaying (minus_one_ptid))
9e8915c6
PA
2678 {
2679 struct thread_info *tp = inferior_thread ();
2680
2681 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2682 }
2683
b6a8c27b 2684 return this->beneath ()->stopped_by_sw_breakpoint ();
9e8915c6
PA
2685}
2686
f6ac5f3d 2687/* The supports_stopped_by_sw_breakpoint method of target
9e8915c6
PA
2688 record-btrace. */
2689
57810aa7 2690bool
f6ac5f3d 2691record_btrace_target::supports_stopped_by_sw_breakpoint ()
9e8915c6 2692{
f6ac5f3d 2693 if (record_is_replaying (minus_one_ptid))
57810aa7 2694 return true;
9e8915c6 2695
b6a8c27b 2696 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
9e8915c6
PA
2697}
2698
f6ac5f3d 2699/* The stopped_by_sw_breakpoint method of target record-btrace. */
9e8915c6 2700
57810aa7 2701bool
f6ac5f3d 2702record_btrace_target::stopped_by_hw_breakpoint ()
9e8915c6 2703{
f6ac5f3d 2704 if (record_is_replaying (minus_one_ptid))
9e8915c6
PA
2705 {
2706 struct thread_info *tp = inferior_thread ();
2707
2708 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2709 }
2710
b6a8c27b 2711 return this->beneath ()->stopped_by_hw_breakpoint ();
9e8915c6
PA
2712}
2713
f6ac5f3d 2714/* The supports_stopped_by_hw_breakpoint method of target
9e8915c6
PA
2715 record-btrace. */
2716
57810aa7 2717bool
f6ac5f3d 2718record_btrace_target::supports_stopped_by_hw_breakpoint ()
9e8915c6 2719{
f6ac5f3d 2720 if (record_is_replaying (minus_one_ptid))
57810aa7 2721 return true;
52834460 2722
b6a8c27b 2723 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
b2f4cfde
MM
2724}
2725
f6ac5f3d 2726/* The update_thread_list method of target record-btrace. */
e2887aa3 2727
f6ac5f3d
PA
2728void
2729record_btrace_target::update_thread_list ()
e2887aa3 2730{
e8032dde 2731 /* We don't add or remove threads during replay. */
f6ac5f3d 2732 if (record_is_replaying (minus_one_ptid))
e2887aa3
MM
2733 return;
2734
2735 /* Forward the request. */
b6a8c27b 2736 this->beneath ()->update_thread_list ();
e2887aa3
MM
2737}
2738
f6ac5f3d 2739/* The thread_alive method of target record-btrace. */
e2887aa3 2740
57810aa7 2741bool
f6ac5f3d 2742record_btrace_target::thread_alive (ptid_t ptid)
e2887aa3
MM
2743{
2744 /* We don't add or remove threads during replay. */
f6ac5f3d 2745 if (record_is_replaying (minus_one_ptid))
00431a78 2746 return true;
e2887aa3
MM
2747
2748 /* Forward the request. */
b6a8c27b 2749 return this->beneath ()->thread_alive (ptid);
e2887aa3
MM
2750}
2751
066ce621
MM
2752/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2753 is stopped. */
2754
2755static void
2756record_btrace_set_replay (struct thread_info *tp,
2757 const struct btrace_insn_iterator *it)
2758{
2759 struct btrace_thread_info *btinfo;
2760
2761 btinfo = &tp->btrace;
2762
a0f1b963 2763 if (it == NULL)
52834460 2764 record_btrace_stop_replaying (tp);
066ce621
MM
2765 else
2766 {
2767 if (btinfo->replay == NULL)
52834460 2768 record_btrace_start_replaying (tp);
066ce621
MM
2769 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2770 return;
2771
2772 *btinfo->replay = *it;
00431a78 2773 registers_changed_thread (tp);
066ce621
MM
2774 }
2775
52834460
MM
2776 /* Start anew from the new replay position. */
2777 record_btrace_clear_histories (btinfo);
485668e5 2778
f2ffa92b
PA
2779 inferior_thread ()->suspend.stop_pc
2780 = regcache_read_pc (get_current_regcache ());
485668e5 2781 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2782}
2783
f6ac5f3d 2784/* The goto_record_begin method of target record-btrace. */
066ce621 2785
f6ac5f3d
PA
2786void
2787record_btrace_target::goto_record_begin ()
066ce621
MM
2788{
2789 struct thread_info *tp;
2790 struct btrace_insn_iterator begin;
2791
2792 tp = require_btrace_thread ();
2793
2794 btrace_insn_begin (&begin, &tp->btrace);
b61ce85c
MM
2795
2796 /* Skip gaps at the beginning of the trace. */
2797 while (btrace_insn_get (&begin) == NULL)
2798 {
2799 unsigned int steps;
2800
2801 steps = btrace_insn_next (&begin, 1);
2802 if (steps == 0)
2803 error (_("No trace."));
2804 }
2805
066ce621 2806 record_btrace_set_replay (tp, &begin);
066ce621
MM
2807}
2808
f6ac5f3d 2809/* The goto_record_end method of target record-btrace. */
066ce621 2810
f6ac5f3d
PA
2811void
2812record_btrace_target::goto_record_end ()
066ce621
MM
2813{
2814 struct thread_info *tp;
2815
2816 tp = require_btrace_thread ();
2817
2818 record_btrace_set_replay (tp, NULL);
066ce621
MM
2819}
2820
f6ac5f3d 2821/* The goto_record method of target record-btrace. */
066ce621 2822
f6ac5f3d
PA
2823void
2824record_btrace_target::goto_record (ULONGEST insn)
066ce621
MM
2825{
2826 struct thread_info *tp;
2827 struct btrace_insn_iterator it;
2828 unsigned int number;
2829 int found;
2830
2831 number = insn;
2832
2833 /* Check for wrap-arounds. */
2834 if (number != insn)
2835 error (_("Instruction number out of range."));
2836
2837 tp = require_btrace_thread ();
2838
2839 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
69090cee
TW
2840
2841 /* Check if the instruction could not be found or is a gap. */
2842 if (found == 0 || btrace_insn_get (&it) == NULL)
066ce621
MM
2843 error (_("No such instruction."));
2844
2845 record_btrace_set_replay (tp, &it);
066ce621
MM
2846}
2847
f6ac5f3d 2848/* The record_stop_replaying method of target record-btrace. */
797094dd 2849
f6ac5f3d
PA
2850void
2851record_btrace_target::record_stop_replaying ()
797094dd 2852{
08036331 2853 for (thread_info *tp : all_non_exited_threads ())
797094dd
MM
2854 record_btrace_stop_replaying (tp);
2855}
2856
f6ac5f3d 2857/* The execution_direction target method. */
70ad5bff 2858
f6ac5f3d
PA
2859enum exec_direction_kind
2860record_btrace_target::execution_direction ()
70ad5bff
MM
2861{
2862 return record_btrace_resume_exec_dir;
2863}
2864
f6ac5f3d 2865/* The prepare_to_generate_core target method. */
aef92902 2866
f6ac5f3d
PA
2867void
2868record_btrace_target::prepare_to_generate_core ()
aef92902
MM
2869{
2870 record_btrace_generating_corefile = 1;
2871}
2872
f6ac5f3d 2873/* The done_generating_core target method. */
aef92902 2874
f6ac5f3d
PA
2875void
2876record_btrace_target::done_generating_core ()
aef92902
MM
2877{
2878 record_btrace_generating_corefile = 0;
2879}
2880
f4abbc16
MM
2881/* Start recording in BTS format. */
2882
2883static void
cdb34d4a 2884cmd_record_btrace_bts_start (const char *args, int from_tty)
f4abbc16 2885{
f4abbc16
MM
2886 if (args != NULL && *args != 0)
2887 error (_("Invalid argument."));
2888
2889 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2890
492d29ea
PA
2891 TRY
2892 {
95a6b0a1 2893 execute_command ("target record-btrace", from_tty);
492d29ea
PA
2894 }
2895 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2896 {
2897 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2898 throw_exception (exception);
2899 }
492d29ea 2900 END_CATCH
f4abbc16
MM
2901}
2902
bc504a31 2903/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2904
2905static void
cdb34d4a 2906cmd_record_btrace_pt_start (const char *args, int from_tty)
afedecd3
MM
2907{
2908 if (args != NULL && *args != 0)
2909 error (_("Invalid argument."));
2910
b20a6524 2911 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2912
492d29ea
PA
2913 TRY
2914 {
95a6b0a1 2915 execute_command ("target record-btrace", from_tty);
492d29ea
PA
2916 }
2917 CATCH (exception, RETURN_MASK_ALL)
2918 {
2919 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2920 throw_exception (exception);
2921 }
2922 END_CATCH
afedecd3
MM
2923}
2924
b20a6524
MM
2925/* Alias for "target record". */
2926
2927static void
981a3fb3 2928cmd_record_btrace_start (const char *args, int from_tty)
b20a6524
MM
2929{
2930 if (args != NULL && *args != 0)
2931 error (_("Invalid argument."));
2932
2933 record_btrace_conf.format = BTRACE_FORMAT_PT;
2934
2935 TRY
2936 {
95a6b0a1 2937 execute_command ("target record-btrace", from_tty);
b20a6524
MM
2938 }
2939 CATCH (exception, RETURN_MASK_ALL)
2940 {
2941 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2942
2943 TRY
2944 {
95a6b0a1 2945 execute_command ("target record-btrace", from_tty);
b20a6524 2946 }
b926417a 2947 CATCH (ex, RETURN_MASK_ALL)
b20a6524
MM
2948 {
2949 record_btrace_conf.format = BTRACE_FORMAT_NONE;
b926417a 2950 throw_exception (ex);
b20a6524
MM
2951 }
2952 END_CATCH
2953 }
2954 END_CATCH
2955}
2956
67b5c0c1
MM
2957/* The "set record btrace" command. */
2958
2959static void
981a3fb3 2960cmd_set_record_btrace (const char *args, int from_tty)
67b5c0c1 2961{
b85310e1
MM
2962 printf_unfiltered (_("\"set record btrace\" must be followed "
2963 "by an appropriate subcommand.\n"));
2964 help_list (set_record_btrace_cmdlist, "set record btrace ",
2965 all_commands, gdb_stdout);
67b5c0c1
MM
2966}
2967
2968/* The "show record btrace" command. */
2969
2970static void
981a3fb3 2971cmd_show_record_btrace (const char *args, int from_tty)
67b5c0c1
MM
2972{
2973 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2974}
2975
2976/* The "show record btrace replay-memory-access" command. */
2977
2978static void
2979cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2980 struct cmd_list_element *c, const char *value)
2981{
2982 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2983 replay_memory_access);
2984}
2985
4a4495d6
MM
2986/* The "set record btrace cpu none" command. */
2987
2988static void
2989cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2990{
2991 if (args != nullptr && *args != 0)
2992 error (_("Trailing junk: '%s'."), args);
2993
2994 record_btrace_cpu_state = CS_NONE;
2995}
2996
2997/* The "set record btrace cpu auto" command. */
2998
2999static void
3000cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
3001{
3002 if (args != nullptr && *args != 0)
3003 error (_("Trailing junk: '%s'."), args);
3004
3005 record_btrace_cpu_state = CS_AUTO;
3006}
3007
3008/* The "set record btrace cpu" command. */
3009
3010static void
3011cmd_set_record_btrace_cpu (const char *args, int from_tty)
3012{
3013 if (args == nullptr)
3014 args = "";
3015
3016 /* We use a hard-coded vendor string for now. */
3017 unsigned int family, model, stepping;
3018 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3019 &model, &l1, &stepping, &l2);
3020 if (matches == 3)
3021 {
3022 if (strlen (args) != l2)
3023 error (_("Trailing junk: '%s'."), args + l2);
3024 }
3025 else if (matches == 2)
3026 {
3027 if (strlen (args) != l1)
3028 error (_("Trailing junk: '%s'."), args + l1);
3029
3030 stepping = 0;
3031 }
3032 else
3033 error (_("Bad format. See \"help set record btrace cpu\"."));
3034
3035 if (USHRT_MAX < family)
3036 error (_("Cpu family too big."));
3037
3038 if (UCHAR_MAX < model)
3039 error (_("Cpu model too big."));
3040
3041 if (UCHAR_MAX < stepping)
3042 error (_("Cpu stepping too big."));
3043
3044 record_btrace_cpu.vendor = CV_INTEL;
3045 record_btrace_cpu.family = family;
3046 record_btrace_cpu.model = model;
3047 record_btrace_cpu.stepping = stepping;
3048
3049 record_btrace_cpu_state = CS_CPU;
3050}
3051
3052/* The "show record btrace cpu" command. */
3053
3054static void
3055cmd_show_record_btrace_cpu (const char *args, int from_tty)
3056{
4a4495d6
MM
3057 if (args != nullptr && *args != 0)
3058 error (_("Trailing junk: '%s'."), args);
3059
3060 switch (record_btrace_cpu_state)
3061 {
3062 case CS_AUTO:
3063 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3064 return;
3065
3066 case CS_NONE:
3067 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3068 return;
3069
3070 case CS_CPU:
3071 switch (record_btrace_cpu.vendor)
3072 {
3073 case CV_INTEL:
3074 if (record_btrace_cpu.stepping == 0)
3075 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3076 record_btrace_cpu.family,
3077 record_btrace_cpu.model);
3078 else
3079 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3080 record_btrace_cpu.family,
3081 record_btrace_cpu.model,
3082 record_btrace_cpu.stepping);
3083 return;
3084 }
3085 }
3086
3087 error (_("Internal error: bad cpu state."));
3088}
3089
3090/* The "s record btrace bts" command. */
d33501a5
MM
3091
3092static void
981a3fb3 3093cmd_set_record_btrace_bts (const char *args, int from_tty)
d33501a5
MM
3094{
3095 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 3096 "by an appropriate subcommand.\n"));
d33501a5
MM
3097 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3098 all_commands, gdb_stdout);
3099}
3100
3101/* The "show record btrace bts" command. */
3102
3103static void
981a3fb3 3104cmd_show_record_btrace_bts (const char *args, int from_tty)
d33501a5
MM
3105{
3106 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3107}
3108
b20a6524
MM
3109/* The "set record btrace pt" command. */
3110
3111static void
981a3fb3 3112cmd_set_record_btrace_pt (const char *args, int from_tty)
b20a6524
MM
3113{
3114 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3115 "by an appropriate subcommand.\n"));
3116 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3117 all_commands, gdb_stdout);
3118}
3119
3120/* The "show record btrace pt" command. */
3121
3122static void
981a3fb3 3123cmd_show_record_btrace_pt (const char *args, int from_tty)
b20a6524
MM
3124{
3125 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3126}
3127
3128/* The "record bts buffer-size" show value function. */
3129
3130static void
3131show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3132 struct cmd_list_element *c,
3133 const char *value)
3134{
3135 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3136 value);
3137}
3138
3139/* The "record pt buffer-size" show value function. */
3140
3141static void
3142show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3143 struct cmd_list_element *c,
3144 const char *value)
3145{
3146 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3147 value);
3148}
3149
afedecd3
MM
3150/* Initialize btrace commands. */
3151
3152void
3153_initialize_record_btrace (void)
3154{
f4abbc16
MM
3155 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3156 _("Start branch trace recording."), &record_btrace_cmdlist,
3157 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3158 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3159
f4abbc16
MM
3160 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3161 _("\
3162Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3163The processor stores a from/to record for each branch into a cyclic buffer.\n\
3164This format may not be available on all processors."),
3165 &record_btrace_cmdlist);
3166 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3167
b20a6524
MM
3168 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3169 _("\
bc504a31 3170Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3171This format may not be available on all processors."),
3172 &record_btrace_cmdlist);
3173 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3174
67b5c0c1
MM
3175 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3176 _("Set record options"), &set_record_btrace_cmdlist,
3177 "set record btrace ", 0, &set_record_cmdlist);
3178
3179 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3180 _("Show record options"), &show_record_btrace_cmdlist,
3181 "show record btrace ", 0, &show_record_cmdlist);
3182
3183 add_setshow_enum_cmd ("replay-memory-access", no_class,
3184 replay_memory_access_types, &replay_memory_access, _("\
3185Set what memory accesses are allowed during replay."), _("\
3186Show what memory accesses are allowed during replay."),
3187 _("Default is READ-ONLY.\n\n\
3188The btrace record target does not trace data.\n\
3189The memory therefore corresponds to the live target and not \
3190to the current replay position.\n\n\
3191When READ-ONLY, allow accesses to read-only memory during replay.\n\
3192When READ-WRITE, allow accesses to read-only and read-write memory during \
3193replay."),
3194 NULL, cmd_show_replay_memory_access,
3195 &set_record_btrace_cmdlist,
3196 &show_record_btrace_cmdlist);
3197
4a4495d6
MM
3198 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3199 _("\
3200Set the cpu to be used for trace decode.\n\n\
55063ddb
TT
3201The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3202For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
4a4495d6
MM
3203When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3204The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3205When GDB does not support that cpu, this option can be used to enable\n\
3206workarounds for a similar cpu that GDB supports.\n\n\
3207When set to \"none\", errata workarounds are disabled."),
3208 &set_record_btrace_cpu_cmdlist,
3209 _("set record btrace cpu "), 1,
3210 &set_record_btrace_cmdlist);
3211
3212 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3213Automatically determine the cpu to be used for trace decode."),
3214 &set_record_btrace_cpu_cmdlist);
3215
3216 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3217Do not enable errata workarounds for trace decode."),
3218 &set_record_btrace_cpu_cmdlist);
3219
3220 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3221Show the cpu to be used for trace decode."),
3222 &show_record_btrace_cmdlist);
3223
d33501a5
MM
3224 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3225 _("Set record btrace bts options"),
3226 &set_record_btrace_bts_cmdlist,
3227 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3228
3229 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3230 _("Show record btrace bts options"),
3231 &show_record_btrace_bts_cmdlist,
3232 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3233
3234 add_setshow_uinteger_cmd ("buffer-size", no_class,
3235 &record_btrace_conf.bts.size,
3236 _("Set the record/replay bts buffer size."),
3237 _("Show the record/replay bts buffer size."), _("\
3238When starting recording request a trace buffer of this size. \
3239The actual buffer size may differ from the requested size. \
3240Use \"info record\" to see the actual buffer size.\n\n\
3241Bigger buffers allow longer recording but also take more time to process \
3242the recorded execution trace.\n\n\
b20a6524
MM
3243The trace buffer size may not be changed while recording."), NULL,
3244 show_record_bts_buffer_size_value,
d33501a5
MM
3245 &set_record_btrace_bts_cmdlist,
3246 &show_record_btrace_bts_cmdlist);
3247
b20a6524
MM
3248 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3249 _("Set record btrace pt options"),
3250 &set_record_btrace_pt_cmdlist,
3251 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3252
3253 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3254 _("Show record btrace pt options"),
3255 &show_record_btrace_pt_cmdlist,
3256 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3257
3258 add_setshow_uinteger_cmd ("buffer-size", no_class,
3259 &record_btrace_conf.pt.size,
3260 _("Set the record/replay pt buffer size."),
3261 _("Show the record/replay pt buffer size."), _("\
3262Bigger buffers allow longer recording but also take more time to process \
3263the recorded execution.\n\
3264The actual buffer size may differ from the requested size. Use \"info record\" \
3265to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3266 &set_record_btrace_pt_cmdlist,
3267 &show_record_btrace_pt_cmdlist);
3268
d9f719f1 3269 add_target (record_btrace_target_info, record_btrace_target_open);
0b722aec
MM
3270
3271 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3272 xcalloc, xfree);
d33501a5
MM
3273
3274 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3275 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3276}
This page took 0.760126 seconds and 4 git commands to generate.