linux_nat_target: More low methods
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
e2882c85 3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
c0272db5 24#include "record-btrace.h"
afedecd3
MM
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
76727919 29#include "observable.h"
afedecd3
MM
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
70ad5bff
MM
39#include "event-loop.h"
40#include "inf-loop.h"
e3cfc1c7 41#include "vec.h"
325fac50 42#include <algorithm>
afedecd3
MM
43
44/* The target_ops of record-btrace. */
f6ac5f3d
PA
45
46class record_btrace_target final : public target_ops
47{
48public:
49 record_btrace_target ()
50 { to_stratum = record_stratum; }
51
52 const char *shortname () override
53 { return "record-btrace"; }
54
55 const char *longname () override
56 { return _("Branch tracing target"); }
57
58 const char *doc () override
59 { return _("Collect control-flow trace and provide the execution history."); }
60
61 void open (const char *, int) override;
62 void close () override;
63 void async (int) override;
64
65 void detach (inferior *inf, int from_tty) override
66 { record_detach (this, inf, from_tty); }
67
68 void disconnect (const char *, int) override;
69
70 void mourn_inferior () override
71 { record_mourn_inferior (this); }
72
73 void kill () override
74 { record_kill (this); }
75
76 enum record_method record_method (ptid_t ptid) override;
77
78 void stop_recording () override;
79 void info_record () override;
80
81 void insn_history (int size, gdb_disassembly_flags flags) override;
82 void insn_history_from (ULONGEST from, int size,
83 gdb_disassembly_flags flags) override;
84 void insn_history_range (ULONGEST begin, ULONGEST end,
85 gdb_disassembly_flags flags) override;
86 void call_history (int size, record_print_flags flags) override;
87 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
88 override;
89 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
90 override;
91
57810aa7
PA
92 bool record_is_replaying (ptid_t ptid) override;
93 bool record_will_replay (ptid_t ptid, int dir) override;
f6ac5f3d
PA
94 void record_stop_replaying () override;
95
96 enum target_xfer_status xfer_partial (enum target_object object,
97 const char *annex,
98 gdb_byte *readbuf,
99 const gdb_byte *writebuf,
100 ULONGEST offset, ULONGEST len,
101 ULONGEST *xfered_len) override;
102
103 int insert_breakpoint (struct gdbarch *,
104 struct bp_target_info *) override;
105 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
106 enum remove_bp_reason) override;
107
108 void fetch_registers (struct regcache *, int) override;
109
110 void store_registers (struct regcache *, int) override;
111 void prepare_to_store (struct regcache *) override;
112
113 const struct frame_unwind *get_unwinder () override;
114
115 const struct frame_unwind *get_tailcall_unwinder () override;
116
117 void commit_resume () override;
118 void resume (ptid_t, int, enum gdb_signal) override;
119 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
120
121 void stop (ptid_t) override;
122 void update_thread_list () override;
57810aa7 123 bool thread_alive (ptid_t ptid) override;
f6ac5f3d
PA
124 void goto_record_begin () override;
125 void goto_record_end () override;
126 void goto_record (ULONGEST insn) override;
127
57810aa7 128 bool can_execute_reverse () override;
f6ac5f3d 129
57810aa7
PA
130 bool stopped_by_sw_breakpoint () override;
131 bool supports_stopped_by_sw_breakpoint () override;
f6ac5f3d 132
57810aa7
PA
133 bool stopped_by_hw_breakpoint () override;
134 bool supports_stopped_by_hw_breakpoint () override;
f6ac5f3d
PA
135
136 enum exec_direction_kind execution_direction () override;
137 void prepare_to_generate_core () override;
138 void done_generating_core () override;
139};
140
141static record_btrace_target record_btrace_ops;
142
143/* Initialize the record-btrace target ops. */
afedecd3 144
76727919
TT
145/* Token associated with a new-thread observer enabling branch tracing
146 for the new thread. */
147static const gdb::observers::token record_btrace_thread_observer_token;
afedecd3 148
67b5c0c1
MM
149/* Memory access types used in set/show record btrace replay-memory-access. */
150static const char replay_memory_access_read_only[] = "read-only";
151static const char replay_memory_access_read_write[] = "read-write";
152static const char *const replay_memory_access_types[] =
153{
154 replay_memory_access_read_only,
155 replay_memory_access_read_write,
156 NULL
157};
158
159/* The currently allowed replay memory access type. */
160static const char *replay_memory_access = replay_memory_access_read_only;
161
4a4495d6
MM
162/* The cpu state kinds. */
163enum record_btrace_cpu_state_kind
164{
165 CS_AUTO,
166 CS_NONE,
167 CS_CPU
168};
169
170/* The current cpu state. */
171static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
172
173/* The current cpu for trace decode. */
174static struct btrace_cpu record_btrace_cpu;
175
67b5c0c1
MM
176/* Command lists for "set/show record btrace". */
177static struct cmd_list_element *set_record_btrace_cmdlist;
178static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 179
70ad5bff
MM
180/* The execution direction of the last resume we got. See record-full.c. */
181static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
182
183/* The async event handler for reverse/replay execution. */
184static struct async_event_handler *record_btrace_async_inferior_event_handler;
185
aef92902
MM
186/* A flag indicating that we are currently generating a core file. */
187static int record_btrace_generating_corefile;
188
f4abbc16
MM
189/* The current branch trace configuration. */
190static struct btrace_config record_btrace_conf;
191
192/* Command list for "record btrace". */
193static struct cmd_list_element *record_btrace_cmdlist;
194
d33501a5
MM
195/* Command lists for "set/show record btrace bts". */
196static struct cmd_list_element *set_record_btrace_bts_cmdlist;
197static struct cmd_list_element *show_record_btrace_bts_cmdlist;
198
b20a6524
MM
199/* Command lists for "set/show record btrace pt". */
200static struct cmd_list_element *set_record_btrace_pt_cmdlist;
201static struct cmd_list_element *show_record_btrace_pt_cmdlist;
202
4a4495d6
MM
203/* Command list for "set record btrace cpu". */
204static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
205
afedecd3
MM
206/* Print a record-btrace debug message. Use do ... while (0) to avoid
207 ambiguities when used in if statements. */
208
209#define DEBUG(msg, args...) \
210 do \
211 { \
212 if (record_debug != 0) \
213 fprintf_unfiltered (gdb_stdlog, \
214 "[record-btrace] " msg "\n", ##args); \
215 } \
216 while (0)
217
218
4a4495d6
MM
219/* Return the cpu configured by the user. Returns NULL if the cpu was
220 configured as auto. */
221const struct btrace_cpu *
222record_btrace_get_cpu (void)
223{
224 switch (record_btrace_cpu_state)
225 {
226 case CS_AUTO:
227 return nullptr;
228
229 case CS_NONE:
230 record_btrace_cpu.vendor = CV_UNKNOWN;
231 /* Fall through. */
232 case CS_CPU:
233 return &record_btrace_cpu;
234 }
235
236 error (_("Internal error: bad record btrace cpu state."));
237}
238
afedecd3 239/* Update the branch trace for the current thread and return a pointer to its
066ce621 240 thread_info.
afedecd3
MM
241
242 Throws an error if there is no thread or no trace. This function never
243 returns NULL. */
244
066ce621
MM
245static struct thread_info *
246require_btrace_thread (void)
afedecd3
MM
247{
248 struct thread_info *tp;
afedecd3
MM
249
250 DEBUG ("require");
251
252 tp = find_thread_ptid (inferior_ptid);
253 if (tp == NULL)
254 error (_("No thread."));
255
cd4007e4
MM
256 validate_registers_access ();
257
4a4495d6 258 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 259
6e07b1d2 260 if (btrace_is_empty (tp))
afedecd3
MM
261 error (_("No trace."));
262
066ce621
MM
263 return tp;
264}
265
266/* Update the branch trace for the current thread and return a pointer to its
267 branch trace information struct.
268
269 Throws an error if there is no thread or no trace. This function never
270 returns NULL. */
271
272static struct btrace_thread_info *
273require_btrace (void)
274{
275 struct thread_info *tp;
276
277 tp = require_btrace_thread ();
278
279 return &tp->btrace;
afedecd3
MM
280}
281
282/* Enable branch tracing for one thread. Warn on errors. */
283
284static void
285record_btrace_enable_warn (struct thread_info *tp)
286{
492d29ea
PA
287 TRY
288 {
289 btrace_enable (tp, &record_btrace_conf);
290 }
291 CATCH (error, RETURN_MASK_ERROR)
292 {
293 warning ("%s", error.message);
294 }
295 END_CATCH
afedecd3
MM
296}
297
afedecd3
MM
298/* Enable automatic tracing of new threads. */
299
300static void
301record_btrace_auto_enable (void)
302{
303 DEBUG ("attach thread observer");
304
76727919
TT
305 gdb::observers::new_thread.attach (record_btrace_enable_warn,
306 record_btrace_thread_observer_token);
afedecd3
MM
307}
308
309/* Disable automatic tracing of new threads. */
310
311static void
312record_btrace_auto_disable (void)
313{
afedecd3
MM
314 DEBUG ("detach thread observer");
315
76727919 316 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
afedecd3
MM
317}
318
70ad5bff
MM
319/* The record-btrace async event handler function. */
320
321static void
322record_btrace_handle_async_inferior_event (gdb_client_data data)
323{
324 inferior_event_handler (INF_REG_EVENT, NULL);
325}
326
c0272db5
TW
327/* See record-btrace.h. */
328
329void
330record_btrace_push_target (void)
331{
332 const char *format;
333
334 record_btrace_auto_enable ();
335
336 push_target (&record_btrace_ops);
337
338 record_btrace_async_inferior_event_handler
339 = create_async_event_handler (record_btrace_handle_async_inferior_event,
340 NULL);
341 record_btrace_generating_corefile = 0;
342
343 format = btrace_format_short_string (record_btrace_conf.format);
76727919 344 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
c0272db5
TW
345}
346
228f1508
SM
347/* Disable btrace on a set of threads on scope exit. */
348
349struct scoped_btrace_disable
350{
351 scoped_btrace_disable () = default;
352
353 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
354
355 ~scoped_btrace_disable ()
356 {
357 for (thread_info *tp : m_threads)
358 btrace_disable (tp);
359 }
360
361 void add_thread (thread_info *thread)
362 {
363 m_threads.push_front (thread);
364 }
365
366 void discard ()
367 {
368 m_threads.clear ();
369 }
370
371private:
372 std::forward_list<thread_info *> m_threads;
373};
374
f6ac5f3d 375/* The open method of target record-btrace. */
afedecd3 376
f6ac5f3d
PA
377void
378record_btrace_target::open (const char *args, int from_tty)
afedecd3 379{
228f1508
SM
380 /* If we fail to enable btrace for one thread, disable it for the threads for
381 which it was successfully enabled. */
382 scoped_btrace_disable btrace_disable;
afedecd3
MM
383 struct thread_info *tp;
384
385 DEBUG ("open");
386
8213266a 387 record_preopen ();
afedecd3
MM
388
389 if (!target_has_execution)
390 error (_("The program is not being run."));
391
034f788c 392 ALL_NON_EXITED_THREADS (tp)
5d5658a1 393 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
afedecd3 394 {
f4abbc16 395 btrace_enable (tp, &record_btrace_conf);
afedecd3 396
228f1508 397 btrace_disable.add_thread (tp);
afedecd3
MM
398 }
399
c0272db5 400 record_btrace_push_target ();
afedecd3 401
228f1508 402 btrace_disable.discard ();
afedecd3
MM
403}
404
f6ac5f3d 405/* The stop_recording method of target record-btrace. */
afedecd3 406
f6ac5f3d
PA
407void
408record_btrace_target::stop_recording ()
afedecd3
MM
409{
410 struct thread_info *tp;
411
412 DEBUG ("stop recording");
413
414 record_btrace_auto_disable ();
415
034f788c 416 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
417 if (tp->btrace.target != NULL)
418 btrace_disable (tp);
419}
420
f6ac5f3d 421/* The disconnect method of target record-btrace. */
c0272db5 422
f6ac5f3d
PA
423void
424record_btrace_target::disconnect (const char *args,
425 int from_tty)
c0272db5 426{
f6ac5f3d 427 struct target_ops *beneath = this->beneath;
c0272db5
TW
428
429 /* Do not stop recording, just clean up GDB side. */
f6ac5f3d 430 unpush_target (this);
c0272db5
TW
431
432 /* Forward disconnect. */
f6ac5f3d 433 beneath->disconnect (args, from_tty);
c0272db5
TW
434}
435
f6ac5f3d 436/* The close method of target record-btrace. */
afedecd3 437
f6ac5f3d
PA
438void
439record_btrace_target::close ()
afedecd3 440{
568e808b
MM
441 struct thread_info *tp;
442
70ad5bff
MM
443 if (record_btrace_async_inferior_event_handler != NULL)
444 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
445
99c819ee
MM
446 /* Make sure automatic recording gets disabled even if we did not stop
447 recording before closing the record-btrace target. */
448 record_btrace_auto_disable ();
449
568e808b
MM
450 /* We should have already stopped recording.
451 Tear down btrace in case we have not. */
034f788c 452 ALL_NON_EXITED_THREADS (tp)
568e808b 453 btrace_teardown (tp);
afedecd3
MM
454}
455
f6ac5f3d 456/* The async method of target record-btrace. */
b7d2e916 457
f6ac5f3d
PA
458void
459record_btrace_target::async (int enable)
b7d2e916 460{
6a3753b3 461 if (enable)
b7d2e916
PA
462 mark_async_event_handler (record_btrace_async_inferior_event_handler);
463 else
464 clear_async_event_handler (record_btrace_async_inferior_event_handler);
465
f6ac5f3d 466 this->beneath->async (enable);
b7d2e916
PA
467}
468
d33501a5
MM
469/* Adjusts the size and returns a human readable size suffix. */
470
471static const char *
472record_btrace_adjust_size (unsigned int *size)
473{
474 unsigned int sz;
475
476 sz = *size;
477
478 if ((sz & ((1u << 30) - 1)) == 0)
479 {
480 *size = sz >> 30;
481 return "GB";
482 }
483 else if ((sz & ((1u << 20) - 1)) == 0)
484 {
485 *size = sz >> 20;
486 return "MB";
487 }
488 else if ((sz & ((1u << 10) - 1)) == 0)
489 {
490 *size = sz >> 10;
491 return "kB";
492 }
493 else
494 return "";
495}
496
497/* Print a BTS configuration. */
498
499static void
500record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
501{
502 const char *suffix;
503 unsigned int size;
504
505 size = conf->size;
506 if (size > 0)
507 {
508 suffix = record_btrace_adjust_size (&size);
509 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
510 }
511}
512
bc504a31 513/* Print an Intel Processor Trace configuration. */
b20a6524
MM
514
515static void
516record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
517{
518 const char *suffix;
519 unsigned int size;
520
521 size = conf->size;
522 if (size > 0)
523 {
524 suffix = record_btrace_adjust_size (&size);
525 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
526 }
527}
528
d33501a5
MM
529/* Print a branch tracing configuration. */
530
531static void
532record_btrace_print_conf (const struct btrace_config *conf)
533{
534 printf_unfiltered (_("Recording format: %s.\n"),
535 btrace_format_string (conf->format));
536
537 switch (conf->format)
538 {
539 case BTRACE_FORMAT_NONE:
540 return;
541
542 case BTRACE_FORMAT_BTS:
543 record_btrace_print_bts_conf (&conf->bts);
544 return;
b20a6524
MM
545
546 case BTRACE_FORMAT_PT:
547 record_btrace_print_pt_conf (&conf->pt);
548 return;
d33501a5
MM
549 }
550
551 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
552}
553
f6ac5f3d 554/* The info_record method of target record-btrace. */
afedecd3 555
f6ac5f3d
PA
556void
557record_btrace_target::info_record ()
afedecd3
MM
558{
559 struct btrace_thread_info *btinfo;
f4abbc16 560 const struct btrace_config *conf;
afedecd3 561 struct thread_info *tp;
31fd9caa 562 unsigned int insns, calls, gaps;
afedecd3
MM
563
564 DEBUG ("info");
565
566 tp = find_thread_ptid (inferior_ptid);
567 if (tp == NULL)
568 error (_("No thread."));
569
cd4007e4
MM
570 validate_registers_access ();
571
f4abbc16
MM
572 btinfo = &tp->btrace;
573
f6ac5f3d 574 conf = ::btrace_conf (btinfo);
f4abbc16 575 if (conf != NULL)
d33501a5 576 record_btrace_print_conf (conf);
f4abbc16 577
4a4495d6 578 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 579
23a7fe75
MM
580 insns = 0;
581 calls = 0;
31fd9caa 582 gaps = 0;
23a7fe75 583
6e07b1d2 584 if (!btrace_is_empty (tp))
23a7fe75
MM
585 {
586 struct btrace_call_iterator call;
587 struct btrace_insn_iterator insn;
588
589 btrace_call_end (&call, btinfo);
590 btrace_call_prev (&call, 1);
5de9129b 591 calls = btrace_call_number (&call);
23a7fe75
MM
592
593 btrace_insn_end (&insn, btinfo);
5de9129b 594 insns = btrace_insn_number (&insn);
31fd9caa 595
69090cee
TW
596 /* If the last instruction is not a gap, it is the current instruction
597 that is not actually part of the record. */
598 if (btrace_insn_get (&insn) != NULL)
599 insns -= 1;
31fd9caa
MM
600
601 gaps = btinfo->ngaps;
23a7fe75 602 }
afedecd3 603
31fd9caa 604 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
43792cf0
PA
605 "for thread %s (%s).\n"), insns, calls, gaps,
606 print_thread_id (tp), target_pid_to_str (tp->ptid));
07bbe694
MM
607
608 if (btrace_is_replaying (tp))
609 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
610 btrace_insn_number (btinfo->replay));
afedecd3
MM
611}
612
31fd9caa
MM
613/* Print a decode error. */
614
615static void
616btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
617 enum btrace_format format)
618{
508352a9 619 const char *errstr = btrace_decode_error (format, errcode);
31fd9caa 620
112e8700 621 uiout->text (_("["));
508352a9
TW
622 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
623 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
31fd9caa 624 {
112e8700
SM
625 uiout->text (_("decode error ("));
626 uiout->field_int ("errcode", errcode);
627 uiout->text (_("): "));
31fd9caa 628 }
112e8700
SM
629 uiout->text (errstr);
630 uiout->text (_("]\n"));
31fd9caa
MM
631}
632
afedecd3
MM
633/* Print an unsigned int. */
634
635static void
636ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
637{
112e8700 638 uiout->field_fmt (fld, "%u", val);
afedecd3
MM
639}
640
f94cc897
MM
641/* A range of source lines. */
642
643struct btrace_line_range
644{
645 /* The symtab this line is from. */
646 struct symtab *symtab;
647
648 /* The first line (inclusive). */
649 int begin;
650
651 /* The last line (exclusive). */
652 int end;
653};
654
655/* Construct a line range. */
656
657static struct btrace_line_range
658btrace_mk_line_range (struct symtab *symtab, int begin, int end)
659{
660 struct btrace_line_range range;
661
662 range.symtab = symtab;
663 range.begin = begin;
664 range.end = end;
665
666 return range;
667}
668
669/* Add a line to a line range. */
670
671static struct btrace_line_range
672btrace_line_range_add (struct btrace_line_range range, int line)
673{
674 if (range.end <= range.begin)
675 {
676 /* This is the first entry. */
677 range.begin = line;
678 range.end = line + 1;
679 }
680 else if (line < range.begin)
681 range.begin = line;
682 else if (range.end < line)
683 range.end = line;
684
685 return range;
686}
687
688/* Return non-zero if RANGE is empty, zero otherwise. */
689
690static int
691btrace_line_range_is_empty (struct btrace_line_range range)
692{
693 return range.end <= range.begin;
694}
695
696/* Return non-zero if LHS contains RHS, zero otherwise. */
697
698static int
699btrace_line_range_contains_range (struct btrace_line_range lhs,
700 struct btrace_line_range rhs)
701{
702 return ((lhs.symtab == rhs.symtab)
703 && (lhs.begin <= rhs.begin)
704 && (rhs.end <= lhs.end));
705}
706
707/* Find the line range associated with PC. */
708
709static struct btrace_line_range
710btrace_find_line_range (CORE_ADDR pc)
711{
712 struct btrace_line_range range;
713 struct linetable_entry *lines;
714 struct linetable *ltable;
715 struct symtab *symtab;
716 int nlines, i;
717
718 symtab = find_pc_line_symtab (pc);
719 if (symtab == NULL)
720 return btrace_mk_line_range (NULL, 0, 0);
721
722 ltable = SYMTAB_LINETABLE (symtab);
723 if (ltable == NULL)
724 return btrace_mk_line_range (symtab, 0, 0);
725
726 nlines = ltable->nitems;
727 lines = ltable->item;
728 if (nlines <= 0)
729 return btrace_mk_line_range (symtab, 0, 0);
730
731 range = btrace_mk_line_range (symtab, 0, 0);
732 for (i = 0; i < nlines - 1; i++)
733 {
734 if ((lines[i].pc == pc) && (lines[i].line != 0))
735 range = btrace_line_range_add (range, lines[i].line);
736 }
737
738 return range;
739}
740
741/* Print source lines in LINES to UIOUT.
742
743 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
744 instructions corresponding to that source line. When printing a new source
745 line, we do the cleanups for the open chain and open a new cleanup chain for
746 the new source line. If the source line range in LINES is not empty, this
747 function will leave the cleanup chain for the last printed source line open
748 so instructions can be added to it. */
749
750static void
751btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
7ea78b59
SM
752 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
753 gdb::optional<ui_out_emit_list> *asm_list,
754 gdb_disassembly_flags flags)
f94cc897 755{
8d297bbf 756 print_source_lines_flags psl_flags;
f94cc897 757
f94cc897
MM
758 if (flags & DISASSEMBLY_FILENAME)
759 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
760
7ea78b59 761 for (int line = lines.begin; line < lines.end; ++line)
f94cc897 762 {
7ea78b59 763 asm_list->reset ();
f94cc897 764
7ea78b59 765 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
f94cc897
MM
766
767 print_source_lines (lines.symtab, line, line + 1, psl_flags);
768
7ea78b59 769 asm_list->emplace (uiout, "line_asm_insn");
f94cc897
MM
770 }
771}
772
afedecd3
MM
773/* Disassemble a section of the recorded instruction trace. */
774
775static void
23a7fe75 776btrace_insn_history (struct ui_out *uiout,
31fd9caa 777 const struct btrace_thread_info *btinfo,
23a7fe75 778 const struct btrace_insn_iterator *begin,
9a24775b
PA
779 const struct btrace_insn_iterator *end,
780 gdb_disassembly_flags flags)
afedecd3 781{
9a24775b
PA
782 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
783 btrace_insn_number (begin), btrace_insn_number (end));
afedecd3 784
f94cc897
MM
785 flags |= DISASSEMBLY_SPECULATIVE;
786
7ea78b59
SM
787 struct gdbarch *gdbarch = target_gdbarch ();
788 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
f94cc897 789
7ea78b59 790 ui_out_emit_list list_emitter (uiout, "asm_insns");
f94cc897 791
7ea78b59
SM
792 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
793 gdb::optional<ui_out_emit_list> asm_list;
afedecd3 794
8b172ce7
PA
795 gdb_pretty_print_disassembler disasm (gdbarch);
796
7ea78b59
SM
797 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
798 btrace_insn_next (&it, 1))
afedecd3 799 {
23a7fe75
MM
800 const struct btrace_insn *insn;
801
802 insn = btrace_insn_get (&it);
803
31fd9caa
MM
804 /* A NULL instruction indicates a gap in the trace. */
805 if (insn == NULL)
806 {
807 const struct btrace_config *conf;
808
809 conf = btrace_conf (btinfo);
afedecd3 810
31fd9caa
MM
811 /* We have trace so we must have a configuration. */
812 gdb_assert (conf != NULL);
813
69090cee
TW
814 uiout->field_fmt ("insn-number", "%u",
815 btrace_insn_number (&it));
816 uiout->text ("\t");
817
818 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
31fd9caa
MM
819 conf->format);
820 }
821 else
822 {
f94cc897 823 struct disasm_insn dinsn;
da8c46d2 824
f94cc897 825 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 826 {
f94cc897
MM
827 struct btrace_line_range lines;
828
829 lines = btrace_find_line_range (insn->pc);
830 if (!btrace_line_range_is_empty (lines)
831 && !btrace_line_range_contains_range (last_lines, lines))
832 {
7ea78b59
SM
833 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
834 flags);
f94cc897
MM
835 last_lines = lines;
836 }
7ea78b59 837 else if (!src_and_asm_tuple.has_value ())
f94cc897 838 {
7ea78b59
SM
839 gdb_assert (!asm_list.has_value ());
840
841 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
842
f94cc897 843 /* No source information. */
7ea78b59 844 asm_list.emplace (uiout, "line_asm_insn");
f94cc897
MM
845 }
846
7ea78b59
SM
847 gdb_assert (src_and_asm_tuple.has_value ());
848 gdb_assert (asm_list.has_value ());
da8c46d2 849 }
da8c46d2 850
f94cc897
MM
851 memset (&dinsn, 0, sizeof (dinsn));
852 dinsn.number = btrace_insn_number (&it);
853 dinsn.addr = insn->pc;
31fd9caa 854
da8c46d2 855 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 856 dinsn.is_speculative = 1;
da8c46d2 857
8b172ce7 858 disasm.pretty_print_insn (uiout, &dinsn, flags);
31fd9caa 859 }
afedecd3
MM
860 }
861}
862
f6ac5f3d 863/* The insn_history method of target record-btrace. */
afedecd3 864
f6ac5f3d
PA
865void
866record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
afedecd3
MM
867{
868 struct btrace_thread_info *btinfo;
23a7fe75
MM
869 struct btrace_insn_history *history;
870 struct btrace_insn_iterator begin, end;
afedecd3 871 struct ui_out *uiout;
23a7fe75 872 unsigned int context, covered;
afedecd3
MM
873
874 uiout = current_uiout;
2e783024 875 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 876 context = abs (size);
afedecd3
MM
877 if (context == 0)
878 error (_("Bad record instruction-history-size."));
879
23a7fe75
MM
880 btinfo = require_btrace ();
881 history = btinfo->insn_history;
882 if (history == NULL)
afedecd3 883 {
07bbe694 884 struct btrace_insn_iterator *replay;
afedecd3 885
9a24775b 886 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
afedecd3 887
07bbe694
MM
888 /* If we're replaying, we start at the replay position. Otherwise, we
889 start at the tail of the trace. */
890 replay = btinfo->replay;
891 if (replay != NULL)
892 begin = *replay;
893 else
894 btrace_insn_end (&begin, btinfo);
895
896 /* We start from here and expand in the requested direction. Then we
897 expand in the other direction, as well, to fill up any remaining
898 context. */
899 end = begin;
900 if (size < 0)
901 {
902 /* We want the current position covered, as well. */
903 covered = btrace_insn_next (&end, 1);
904 covered += btrace_insn_prev (&begin, context - covered);
905 covered += btrace_insn_next (&end, context - covered);
906 }
907 else
908 {
909 covered = btrace_insn_next (&end, context);
910 covered += btrace_insn_prev (&begin, context - covered);
911 }
afedecd3
MM
912 }
913 else
914 {
23a7fe75
MM
915 begin = history->begin;
916 end = history->end;
afedecd3 917
9a24775b 918 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
23a7fe75 919 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 920
23a7fe75
MM
921 if (size < 0)
922 {
923 end = begin;
924 covered = btrace_insn_prev (&begin, context);
925 }
926 else
927 {
928 begin = end;
929 covered = btrace_insn_next (&end, context);
930 }
afedecd3
MM
931 }
932
23a7fe75 933 if (covered > 0)
31fd9caa 934 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
935 else
936 {
937 if (size < 0)
938 printf_unfiltered (_("At the start of the branch trace record.\n"));
939 else
940 printf_unfiltered (_("At the end of the branch trace record.\n"));
941 }
afedecd3 942
23a7fe75 943 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
944}
945
f6ac5f3d 946/* The insn_history_range method of target record-btrace. */
afedecd3 947
f6ac5f3d
PA
948void
949record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
950 gdb_disassembly_flags flags)
afedecd3
MM
951{
952 struct btrace_thread_info *btinfo;
23a7fe75 953 struct btrace_insn_iterator begin, end;
afedecd3 954 struct ui_out *uiout;
23a7fe75
MM
955 unsigned int low, high;
956 int found;
afedecd3
MM
957
958 uiout = current_uiout;
2e783024 959 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
23a7fe75
MM
960 low = from;
961 high = to;
afedecd3 962
9a24775b 963 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
afedecd3
MM
964
965 /* Check for wrap-arounds. */
23a7fe75 966 if (low != from || high != to)
afedecd3
MM
967 error (_("Bad range."));
968
0688d04e 969 if (high < low)
afedecd3
MM
970 error (_("Bad range."));
971
23a7fe75 972 btinfo = require_btrace ();
afedecd3 973
23a7fe75
MM
974 found = btrace_find_insn_by_number (&begin, btinfo, low);
975 if (found == 0)
976 error (_("Range out of bounds."));
afedecd3 977
23a7fe75
MM
978 found = btrace_find_insn_by_number (&end, btinfo, high);
979 if (found == 0)
0688d04e
MM
980 {
981 /* Silently truncate the range. */
982 btrace_insn_end (&end, btinfo);
983 }
984 else
985 {
986 /* We want both begin and end to be inclusive. */
987 btrace_insn_next (&end, 1);
988 }
afedecd3 989
31fd9caa 990 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 991 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
992}
993
f6ac5f3d 994/* The insn_history_from method of target record-btrace. */
afedecd3 995
f6ac5f3d
PA
996void
997record_btrace_target::insn_history_from (ULONGEST from, int size,
998 gdb_disassembly_flags flags)
afedecd3
MM
999{
1000 ULONGEST begin, end, context;
1001
1002 context = abs (size);
0688d04e
MM
1003 if (context == 0)
1004 error (_("Bad record instruction-history-size."));
afedecd3
MM
1005
1006 if (size < 0)
1007 {
1008 end = from;
1009
1010 if (from < context)
1011 begin = 0;
1012 else
0688d04e 1013 begin = from - context + 1;
afedecd3
MM
1014 }
1015 else
1016 {
1017 begin = from;
0688d04e 1018 end = from + context - 1;
afedecd3
MM
1019
1020 /* Check for wrap-around. */
1021 if (end < begin)
1022 end = ULONGEST_MAX;
1023 }
1024
f6ac5f3d 1025 insn_history_range (begin, end, flags);
afedecd3
MM
1026}
1027
1028/* Print the instruction number range for a function call history line. */
1029
1030static void
23a7fe75
MM
1031btrace_call_history_insn_range (struct ui_out *uiout,
1032 const struct btrace_function *bfun)
afedecd3 1033{
7acbe133
MM
1034 unsigned int begin, end, size;
1035
0860c437 1036 size = bfun->insn.size ();
7acbe133 1037 gdb_assert (size > 0);
afedecd3 1038
23a7fe75 1039 begin = bfun->insn_offset;
7acbe133 1040 end = begin + size - 1;
afedecd3 1041
23a7fe75 1042 ui_out_field_uint (uiout, "insn begin", begin);
112e8700 1043 uiout->text (",");
23a7fe75 1044 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
1045}
1046
ce0dfbea
MM
1047/* Compute the lowest and highest source line for the instructions in BFUN
1048 and return them in PBEGIN and PEND.
1049 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1050 result from inlining or macro expansion. */
1051
1052static void
1053btrace_compute_src_line_range (const struct btrace_function *bfun,
1054 int *pbegin, int *pend)
1055{
ce0dfbea
MM
1056 struct symtab *symtab;
1057 struct symbol *sym;
ce0dfbea
MM
1058 int begin, end;
1059
1060 begin = INT_MAX;
1061 end = INT_MIN;
1062
1063 sym = bfun->sym;
1064 if (sym == NULL)
1065 goto out;
1066
1067 symtab = symbol_symtab (sym);
1068
0860c437 1069 for (const btrace_insn &insn : bfun->insn)
ce0dfbea
MM
1070 {
1071 struct symtab_and_line sal;
1072
0860c437 1073 sal = find_pc_line (insn.pc, 0);
ce0dfbea
MM
1074 if (sal.symtab != symtab || sal.line == 0)
1075 continue;
1076
325fac50
PA
1077 begin = std::min (begin, sal.line);
1078 end = std::max (end, sal.line);
ce0dfbea
MM
1079 }
1080
1081 out:
1082 *pbegin = begin;
1083 *pend = end;
1084}
1085
afedecd3
MM
1086/* Print the source line information for a function call history line. */
1087
1088static void
23a7fe75
MM
1089btrace_call_history_src_line (struct ui_out *uiout,
1090 const struct btrace_function *bfun)
afedecd3
MM
1091{
1092 struct symbol *sym;
23a7fe75 1093 int begin, end;
afedecd3
MM
1094
1095 sym = bfun->sym;
1096 if (sym == NULL)
1097 return;
1098
112e8700 1099 uiout->field_string ("file",
08be3fe3 1100 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 1101
ce0dfbea 1102 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 1103 if (end < begin)
afedecd3
MM
1104 return;
1105
112e8700
SM
1106 uiout->text (":");
1107 uiout->field_int ("min line", begin);
afedecd3 1108
23a7fe75 1109 if (end == begin)
afedecd3
MM
1110 return;
1111
112e8700
SM
1112 uiout->text (",");
1113 uiout->field_int ("max line", end);
afedecd3
MM
1114}
1115
0b722aec
MM
1116/* Get the name of a branch trace function. */
1117
1118static const char *
1119btrace_get_bfun_name (const struct btrace_function *bfun)
1120{
1121 struct minimal_symbol *msym;
1122 struct symbol *sym;
1123
1124 if (bfun == NULL)
1125 return "??";
1126
1127 msym = bfun->msym;
1128 sym = bfun->sym;
1129
1130 if (sym != NULL)
1131 return SYMBOL_PRINT_NAME (sym);
1132 else if (msym != NULL)
efd66ac6 1133 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
1134 else
1135 return "??";
1136}
1137
afedecd3
MM
1138/* Disassemble a section of the recorded function trace. */
1139
1140static void
23a7fe75 1141btrace_call_history (struct ui_out *uiout,
8710b709 1142 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1143 const struct btrace_call_iterator *begin,
1144 const struct btrace_call_iterator *end,
8d297bbf 1145 int int_flags)
afedecd3 1146{
23a7fe75 1147 struct btrace_call_iterator it;
8d297bbf 1148 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1149
8d297bbf 1150 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1151 btrace_call_number (end));
afedecd3 1152
23a7fe75 1153 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1154 {
23a7fe75
MM
1155 const struct btrace_function *bfun;
1156 struct minimal_symbol *msym;
1157 struct symbol *sym;
1158
1159 bfun = btrace_call_get (&it);
23a7fe75 1160 sym = bfun->sym;
0b722aec 1161 msym = bfun->msym;
23a7fe75 1162
afedecd3 1163 /* Print the function index. */
23a7fe75 1164 ui_out_field_uint (uiout, "index", bfun->number);
112e8700 1165 uiout->text ("\t");
afedecd3 1166
31fd9caa
MM
1167 /* Indicate gaps in the trace. */
1168 if (bfun->errcode != 0)
1169 {
1170 const struct btrace_config *conf;
1171
1172 conf = btrace_conf (btinfo);
1173
1174 /* We have trace so we must have a configuration. */
1175 gdb_assert (conf != NULL);
1176
1177 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1178
1179 continue;
1180 }
1181
8710b709
MM
1182 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1183 {
1184 int level = bfun->level + btinfo->level, i;
1185
1186 for (i = 0; i < level; ++i)
112e8700 1187 uiout->text (" ");
8710b709
MM
1188 }
1189
1190 if (sym != NULL)
112e8700 1191 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
8710b709 1192 else if (msym != NULL)
112e8700
SM
1193 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1194 else if (!uiout->is_mi_like_p ())
1195 uiout->field_string ("function", "??");
8710b709 1196
1e038f67 1197 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1198 {
112e8700 1199 uiout->text (_("\tinst "));
23a7fe75 1200 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1201 }
1202
1e038f67 1203 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1204 {
112e8700 1205 uiout->text (_("\tat "));
23a7fe75 1206 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1207 }
1208
112e8700 1209 uiout->text ("\n");
afedecd3
MM
1210 }
1211}
1212
f6ac5f3d 1213/* The call_history method of target record-btrace. */
afedecd3 1214
f6ac5f3d
PA
1215void
1216record_btrace_target::call_history (int size, record_print_flags flags)
afedecd3
MM
1217{
1218 struct btrace_thread_info *btinfo;
23a7fe75
MM
1219 struct btrace_call_history *history;
1220 struct btrace_call_iterator begin, end;
afedecd3 1221 struct ui_out *uiout;
23a7fe75 1222 unsigned int context, covered;
afedecd3
MM
1223
1224 uiout = current_uiout;
2e783024 1225 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 1226 context = abs (size);
afedecd3
MM
1227 if (context == 0)
1228 error (_("Bad record function-call-history-size."));
1229
23a7fe75
MM
1230 btinfo = require_btrace ();
1231 history = btinfo->call_history;
1232 if (history == NULL)
afedecd3 1233 {
07bbe694 1234 struct btrace_insn_iterator *replay;
afedecd3 1235
0cb7c7b0 1236 DEBUG ("call-history (0x%x): %d", (int) flags, size);
afedecd3 1237
07bbe694
MM
1238 /* If we're replaying, we start at the replay position. Otherwise, we
1239 start at the tail of the trace. */
1240 replay = btinfo->replay;
1241 if (replay != NULL)
1242 {
07bbe694 1243 begin.btinfo = btinfo;
a0f1b963 1244 begin.index = replay->call_index;
07bbe694
MM
1245 }
1246 else
1247 btrace_call_end (&begin, btinfo);
1248
1249 /* We start from here and expand in the requested direction. Then we
1250 expand in the other direction, as well, to fill up any remaining
1251 context. */
1252 end = begin;
1253 if (size < 0)
1254 {
1255 /* We want the current position covered, as well. */
1256 covered = btrace_call_next (&end, 1);
1257 covered += btrace_call_prev (&begin, context - covered);
1258 covered += btrace_call_next (&end, context - covered);
1259 }
1260 else
1261 {
1262 covered = btrace_call_next (&end, context);
1263 covered += btrace_call_prev (&begin, context- covered);
1264 }
afedecd3
MM
1265 }
1266 else
1267 {
23a7fe75
MM
1268 begin = history->begin;
1269 end = history->end;
afedecd3 1270
0cb7c7b0 1271 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
23a7fe75 1272 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1273
23a7fe75
MM
1274 if (size < 0)
1275 {
1276 end = begin;
1277 covered = btrace_call_prev (&begin, context);
1278 }
1279 else
1280 {
1281 begin = end;
1282 covered = btrace_call_next (&end, context);
1283 }
afedecd3
MM
1284 }
1285
23a7fe75 1286 if (covered > 0)
8710b709 1287 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1288 else
1289 {
1290 if (size < 0)
1291 printf_unfiltered (_("At the start of the branch trace record.\n"));
1292 else
1293 printf_unfiltered (_("At the end of the branch trace record.\n"));
1294 }
afedecd3 1295
23a7fe75 1296 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1297}
1298
f6ac5f3d 1299/* The call_history_range method of target record-btrace. */
afedecd3 1300
f6ac5f3d
PA
1301void
1302record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1303 record_print_flags flags)
afedecd3
MM
1304{
1305 struct btrace_thread_info *btinfo;
23a7fe75 1306 struct btrace_call_iterator begin, end;
afedecd3 1307 struct ui_out *uiout;
23a7fe75
MM
1308 unsigned int low, high;
1309 int found;
afedecd3
MM
1310
1311 uiout = current_uiout;
2e783024 1312 ui_out_emit_tuple tuple_emitter (uiout, "func history");
23a7fe75
MM
1313 low = from;
1314 high = to;
afedecd3 1315
0cb7c7b0 1316 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
afedecd3
MM
1317
1318 /* Check for wrap-arounds. */
23a7fe75 1319 if (low != from || high != to)
afedecd3
MM
1320 error (_("Bad range."));
1321
0688d04e 1322 if (high < low)
afedecd3
MM
1323 error (_("Bad range."));
1324
23a7fe75 1325 btinfo = require_btrace ();
afedecd3 1326
23a7fe75
MM
1327 found = btrace_find_call_by_number (&begin, btinfo, low);
1328 if (found == 0)
1329 error (_("Range out of bounds."));
afedecd3 1330
23a7fe75
MM
1331 found = btrace_find_call_by_number (&end, btinfo, high);
1332 if (found == 0)
0688d04e
MM
1333 {
1334 /* Silently truncate the range. */
1335 btrace_call_end (&end, btinfo);
1336 }
1337 else
1338 {
1339 /* We want both begin and end to be inclusive. */
1340 btrace_call_next (&end, 1);
1341 }
afedecd3 1342
8710b709 1343 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1344 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1345}
1346
f6ac5f3d 1347/* The call_history_from method of target record-btrace. */
afedecd3 1348
f6ac5f3d
PA
1349void
1350record_btrace_target::call_history_from (ULONGEST from, int size,
1351 record_print_flags flags)
afedecd3
MM
1352{
1353 ULONGEST begin, end, context;
1354
1355 context = abs (size);
0688d04e
MM
1356 if (context == 0)
1357 error (_("Bad record function-call-history-size."));
afedecd3
MM
1358
1359 if (size < 0)
1360 {
1361 end = from;
1362
1363 if (from < context)
1364 begin = 0;
1365 else
0688d04e 1366 begin = from - context + 1;
afedecd3
MM
1367 }
1368 else
1369 {
1370 begin = from;
0688d04e 1371 end = from + context - 1;
afedecd3
MM
1372
1373 /* Check for wrap-around. */
1374 if (end < begin)
1375 end = ULONGEST_MAX;
1376 }
1377
f6ac5f3d 1378 call_history_range ( begin, end, flags);
afedecd3
MM
1379}
1380
f6ac5f3d 1381/* The record_method method of target record-btrace. */
b158a20f 1382
f6ac5f3d
PA
1383enum record_method
1384record_btrace_target::record_method (ptid_t ptid)
b158a20f 1385{
b158a20f
TW
1386 struct thread_info * const tp = find_thread_ptid (ptid);
1387
1388 if (tp == NULL)
1389 error (_("No thread."));
1390
1391 if (tp->btrace.target == NULL)
1392 return RECORD_METHOD_NONE;
1393
1394 return RECORD_METHOD_BTRACE;
1395}
1396
f6ac5f3d 1397/* The record_is_replaying method of target record-btrace. */
07bbe694 1398
57810aa7 1399bool
f6ac5f3d 1400record_btrace_target::record_is_replaying (ptid_t ptid)
07bbe694
MM
1401{
1402 struct thread_info *tp;
1403
034f788c 1404 ALL_NON_EXITED_THREADS (tp)
a52eab48 1405 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
57810aa7 1406 return true;
07bbe694 1407
57810aa7 1408 return false;
07bbe694
MM
1409}
1410
f6ac5f3d 1411/* The record_will_replay method of target record-btrace. */
7ff27e9b 1412
57810aa7 1413bool
f6ac5f3d 1414record_btrace_target::record_will_replay (ptid_t ptid, int dir)
7ff27e9b 1415{
f6ac5f3d 1416 return dir == EXEC_REVERSE || record_is_replaying (ptid);
7ff27e9b
MM
1417}
1418
f6ac5f3d 1419/* The xfer_partial method of target record-btrace. */
633785ff 1420
f6ac5f3d
PA
1421enum target_xfer_status
1422record_btrace_target::xfer_partial (enum target_object object,
1423 const char *annex, gdb_byte *readbuf,
1424 const gdb_byte *writebuf, ULONGEST offset,
1425 ULONGEST len, ULONGEST *xfered_len)
633785ff 1426{
633785ff 1427 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1428 if (replay_memory_access == replay_memory_access_read_only
aef92902 1429 && !record_btrace_generating_corefile
f6ac5f3d 1430 && record_is_replaying (inferior_ptid))
633785ff
MM
1431 {
1432 switch (object)
1433 {
1434 case TARGET_OBJECT_MEMORY:
1435 {
1436 struct target_section *section;
1437
1438 /* We do not allow writing memory in general. */
1439 if (writebuf != NULL)
9b409511
YQ
1440 {
1441 *xfered_len = len;
bc113b4e 1442 return TARGET_XFER_UNAVAILABLE;
9b409511 1443 }
633785ff
MM
1444
1445 /* We allow reading readonly memory. */
f6ac5f3d 1446 section = target_section_by_addr (this, offset);
633785ff
MM
1447 if (section != NULL)
1448 {
1449 /* Check if the section we found is readonly. */
1450 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1451 section->the_bfd_section)
1452 & SEC_READONLY) != 0)
1453 {
1454 /* Truncate the request to fit into this section. */
325fac50 1455 len = std::min (len, section->endaddr - offset);
633785ff
MM
1456 break;
1457 }
1458 }
1459
9b409511 1460 *xfered_len = len;
bc113b4e 1461 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1462 }
1463 }
1464 }
1465
1466 /* Forward the request. */
f6ac5f3d
PA
1467 return this->beneath->xfer_partial (object, annex, readbuf, writebuf,
1468 offset, len, xfered_len);
633785ff
MM
1469}
1470
f6ac5f3d 1471/* The insert_breakpoint method of target record-btrace. */
633785ff 1472
f6ac5f3d
PA
1473int
1474record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1475 struct bp_target_info *bp_tgt)
633785ff 1476{
67b5c0c1
MM
1477 const char *old;
1478 int ret;
633785ff
MM
1479
1480 /* Inserting breakpoints requires accessing memory. Allow it for the
1481 duration of this function. */
67b5c0c1
MM
1482 old = replay_memory_access;
1483 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1484
1485 ret = 0;
492d29ea
PA
1486 TRY
1487 {
f6ac5f3d 1488 ret = this->beneath->insert_breakpoint (gdbarch, bp_tgt);
492d29ea 1489 }
492d29ea
PA
1490 CATCH (except, RETURN_MASK_ALL)
1491 {
6c63c96a 1492 replay_memory_access = old;
492d29ea
PA
1493 throw_exception (except);
1494 }
1495 END_CATCH
6c63c96a 1496 replay_memory_access = old;
633785ff
MM
1497
1498 return ret;
1499}
1500
f6ac5f3d 1501/* The remove_breakpoint method of target record-btrace. */
633785ff 1502
f6ac5f3d
PA
1503int
1504record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1505 struct bp_target_info *bp_tgt,
1506 enum remove_bp_reason reason)
633785ff 1507{
67b5c0c1
MM
1508 const char *old;
1509 int ret;
633785ff
MM
1510
1511 /* Removing breakpoints requires accessing memory. Allow it for the
1512 duration of this function. */
67b5c0c1
MM
1513 old = replay_memory_access;
1514 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1515
1516 ret = 0;
492d29ea
PA
1517 TRY
1518 {
f6ac5f3d 1519 ret = this->beneath->remove_breakpoint (gdbarch, bp_tgt, reason);
492d29ea 1520 }
492d29ea
PA
1521 CATCH (except, RETURN_MASK_ALL)
1522 {
6c63c96a 1523 replay_memory_access = old;
492d29ea
PA
1524 throw_exception (except);
1525 }
1526 END_CATCH
6c63c96a 1527 replay_memory_access = old;
633785ff
MM
1528
1529 return ret;
1530}
1531
f6ac5f3d 1532/* The fetch_registers method of target record-btrace. */
1f3ef581 1533
f6ac5f3d
PA
1534void
1535record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1f3ef581
MM
1536{
1537 struct btrace_insn_iterator *replay;
1538 struct thread_info *tp;
1539
bcc0c096 1540 tp = find_thread_ptid (regcache_get_ptid (regcache));
1f3ef581
MM
1541 gdb_assert (tp != NULL);
1542
1543 replay = tp->btrace.replay;
aef92902 1544 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1545 {
1546 const struct btrace_insn *insn;
1547 struct gdbarch *gdbarch;
1548 int pcreg;
1549
ac7936df 1550 gdbarch = regcache->arch ();
1f3ef581
MM
1551 pcreg = gdbarch_pc_regnum (gdbarch);
1552 if (pcreg < 0)
1553 return;
1554
1555 /* We can only provide the PC register. */
1556 if (regno >= 0 && regno != pcreg)
1557 return;
1558
1559 insn = btrace_insn_get (replay);
1560 gdb_assert (insn != NULL);
1561
1562 regcache_raw_supply (regcache, regno, &insn->pc);
1563 }
1564 else
f6ac5f3d 1565 this->beneath->fetch_registers (regcache, regno);
1f3ef581
MM
1566}
1567
f6ac5f3d 1568/* The store_registers method of target record-btrace. */
1f3ef581 1569
f6ac5f3d
PA
1570void
1571record_btrace_target::store_registers (struct regcache *regcache, int regno)
1f3ef581
MM
1572{
1573 struct target_ops *t;
1574
a52eab48 1575 if (!record_btrace_generating_corefile
f6ac5f3d 1576 && record_is_replaying (regcache_get_ptid (regcache)))
4d10e986 1577 error (_("Cannot write registers while replaying."));
1f3ef581
MM
1578
1579 gdb_assert (may_write_registers != 0);
1580
f6ac5f3d 1581 this->beneath->store_registers (regcache, regno);
1f3ef581
MM
1582}
1583
f6ac5f3d 1584/* The prepare_to_store method of target record-btrace. */
1f3ef581 1585
f6ac5f3d
PA
1586void
1587record_btrace_target::prepare_to_store (struct regcache *regcache)
1f3ef581 1588{
a52eab48 1589 if (!record_btrace_generating_corefile
f6ac5f3d 1590 && record_is_replaying (regcache_get_ptid (regcache)))
1f3ef581
MM
1591 return;
1592
f6ac5f3d 1593 this->beneath->prepare_to_store (regcache);
1f3ef581
MM
1594}
1595
0b722aec
MM
1596/* The branch trace frame cache. */
1597
1598struct btrace_frame_cache
1599{
1600 /* The thread. */
1601 struct thread_info *tp;
1602
1603 /* The frame info. */
1604 struct frame_info *frame;
1605
1606 /* The branch trace function segment. */
1607 const struct btrace_function *bfun;
1608};
1609
1610/* A struct btrace_frame_cache hash table indexed by NEXT. */
1611
1612static htab_t bfcache;
1613
1614/* hash_f for htab_create_alloc of bfcache. */
1615
1616static hashval_t
1617bfcache_hash (const void *arg)
1618{
19ba03f4
SM
1619 const struct btrace_frame_cache *cache
1620 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1621
1622 return htab_hash_pointer (cache->frame);
1623}
1624
1625/* eq_f for htab_create_alloc of bfcache. */
1626
1627static int
1628bfcache_eq (const void *arg1, const void *arg2)
1629{
19ba03f4
SM
1630 const struct btrace_frame_cache *cache1
1631 = (const struct btrace_frame_cache *) arg1;
1632 const struct btrace_frame_cache *cache2
1633 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1634
1635 return cache1->frame == cache2->frame;
1636}
1637
1638/* Create a new btrace frame cache. */
1639
1640static struct btrace_frame_cache *
1641bfcache_new (struct frame_info *frame)
1642{
1643 struct btrace_frame_cache *cache;
1644 void **slot;
1645
1646 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1647 cache->frame = frame;
1648
1649 slot = htab_find_slot (bfcache, cache, INSERT);
1650 gdb_assert (*slot == NULL);
1651 *slot = cache;
1652
1653 return cache;
1654}
1655
1656/* Extract the branch trace function from a branch trace frame. */
1657
1658static const struct btrace_function *
1659btrace_get_frame_function (struct frame_info *frame)
1660{
1661 const struct btrace_frame_cache *cache;
0b722aec
MM
1662 struct btrace_frame_cache pattern;
1663 void **slot;
1664
1665 pattern.frame = frame;
1666
1667 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1668 if (slot == NULL)
1669 return NULL;
1670
19ba03f4 1671 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1672 return cache->bfun;
1673}
1674
cecac1ab
MM
1675/* Implement stop_reason method for record_btrace_frame_unwind. */
1676
1677static enum unwind_stop_reason
1678record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1679 void **this_cache)
1680{
0b722aec
MM
1681 const struct btrace_frame_cache *cache;
1682 const struct btrace_function *bfun;
1683
19ba03f4 1684 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1685 bfun = cache->bfun;
1686 gdb_assert (bfun != NULL);
1687
42bfe59e 1688 if (bfun->up == 0)
0b722aec
MM
1689 return UNWIND_UNAVAILABLE;
1690
1691 return UNWIND_NO_REASON;
cecac1ab
MM
1692}
1693
1694/* Implement this_id method for record_btrace_frame_unwind. */
1695
1696static void
1697record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1698 struct frame_id *this_id)
1699{
0b722aec
MM
1700 const struct btrace_frame_cache *cache;
1701 const struct btrace_function *bfun;
4aeb0dfc 1702 struct btrace_call_iterator it;
0b722aec
MM
1703 CORE_ADDR code, special;
1704
19ba03f4 1705 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1706
1707 bfun = cache->bfun;
1708 gdb_assert (bfun != NULL);
1709
4aeb0dfc
TW
1710 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1711 bfun = btrace_call_get (&it);
0b722aec
MM
1712
1713 code = get_frame_func (this_frame);
1714 special = bfun->number;
1715
1716 *this_id = frame_id_build_unavailable_stack_special (code, special);
1717
1718 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1719 btrace_get_bfun_name (cache->bfun),
1720 core_addr_to_string_nz (this_id->code_addr),
1721 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1722}
1723
1724/* Implement prev_register method for record_btrace_frame_unwind. */
1725
1726static struct value *
1727record_btrace_frame_prev_register (struct frame_info *this_frame,
1728 void **this_cache,
1729 int regnum)
1730{
0b722aec
MM
1731 const struct btrace_frame_cache *cache;
1732 const struct btrace_function *bfun, *caller;
42bfe59e 1733 struct btrace_call_iterator it;
0b722aec
MM
1734 struct gdbarch *gdbarch;
1735 CORE_ADDR pc;
1736 int pcreg;
1737
1738 gdbarch = get_frame_arch (this_frame);
1739 pcreg = gdbarch_pc_regnum (gdbarch);
1740 if (pcreg < 0 || regnum != pcreg)
1741 throw_error (NOT_AVAILABLE_ERROR,
1742 _("Registers are not available in btrace record history"));
1743
19ba03f4 1744 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1745 bfun = cache->bfun;
1746 gdb_assert (bfun != NULL);
1747
42bfe59e 1748 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
0b722aec
MM
1749 throw_error (NOT_AVAILABLE_ERROR,
1750 _("No caller in btrace record history"));
1751
42bfe59e
TW
1752 caller = btrace_call_get (&it);
1753
0b722aec 1754 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
0860c437 1755 pc = caller->insn.front ().pc;
0b722aec
MM
1756 else
1757 {
0860c437 1758 pc = caller->insn.back ().pc;
0b722aec
MM
1759 pc += gdb_insn_length (gdbarch, pc);
1760 }
1761
1762 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1763 btrace_get_bfun_name (bfun), bfun->level,
1764 core_addr_to_string_nz (pc));
1765
1766 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1767}
1768
1769/* Implement sniffer method for record_btrace_frame_unwind. */
1770
1771static int
1772record_btrace_frame_sniffer (const struct frame_unwind *self,
1773 struct frame_info *this_frame,
1774 void **this_cache)
1775{
0b722aec
MM
1776 const struct btrace_function *bfun;
1777 struct btrace_frame_cache *cache;
cecac1ab 1778 struct thread_info *tp;
0b722aec 1779 struct frame_info *next;
cecac1ab
MM
1780
1781 /* THIS_FRAME does not contain a reference to its thread. */
1782 tp = find_thread_ptid (inferior_ptid);
1783 gdb_assert (tp != NULL);
1784
0b722aec
MM
1785 bfun = NULL;
1786 next = get_next_frame (this_frame);
1787 if (next == NULL)
1788 {
1789 const struct btrace_insn_iterator *replay;
1790
1791 replay = tp->btrace.replay;
1792 if (replay != NULL)
08c3f6d2 1793 bfun = &replay->btinfo->functions[replay->call_index];
0b722aec
MM
1794 }
1795 else
1796 {
1797 const struct btrace_function *callee;
42bfe59e 1798 struct btrace_call_iterator it;
0b722aec
MM
1799
1800 callee = btrace_get_frame_function (next);
42bfe59e
TW
1801 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1802 return 0;
1803
1804 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1805 return 0;
1806
1807 bfun = btrace_call_get (&it);
0b722aec
MM
1808 }
1809
1810 if (bfun == NULL)
1811 return 0;
1812
1813 DEBUG ("[frame] sniffed frame for %s on level %d",
1814 btrace_get_bfun_name (bfun), bfun->level);
1815
1816 /* This is our frame. Initialize the frame cache. */
1817 cache = bfcache_new (this_frame);
1818 cache->tp = tp;
1819 cache->bfun = bfun;
1820
1821 *this_cache = cache;
1822 return 1;
1823}
1824
1825/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1826
1827static int
1828record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1829 struct frame_info *this_frame,
1830 void **this_cache)
1831{
1832 const struct btrace_function *bfun, *callee;
1833 struct btrace_frame_cache *cache;
42bfe59e 1834 struct btrace_call_iterator it;
0b722aec 1835 struct frame_info *next;
42bfe59e 1836 struct thread_info *tinfo;
0b722aec
MM
1837
1838 next = get_next_frame (this_frame);
1839 if (next == NULL)
1840 return 0;
1841
1842 callee = btrace_get_frame_function (next);
1843 if (callee == NULL)
1844 return 0;
1845
1846 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1847 return 0;
1848
42bfe59e
TW
1849 tinfo = find_thread_ptid (inferior_ptid);
1850 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
0b722aec
MM
1851 return 0;
1852
42bfe59e
TW
1853 bfun = btrace_call_get (&it);
1854
0b722aec
MM
1855 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1856 btrace_get_bfun_name (bfun), bfun->level);
1857
1858 /* This is our frame. Initialize the frame cache. */
1859 cache = bfcache_new (this_frame);
42bfe59e 1860 cache->tp = tinfo;
0b722aec
MM
1861 cache->bfun = bfun;
1862
1863 *this_cache = cache;
1864 return 1;
1865}
1866
1867static void
1868record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1869{
1870 struct btrace_frame_cache *cache;
1871 void **slot;
1872
19ba03f4 1873 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1874
1875 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1876 gdb_assert (slot != NULL);
1877
1878 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1879}
1880
1881/* btrace recording does not store previous memory content, neither the stack
1882 frames content. Any unwinding would return errorneous results as the stack
1883 contents no longer matches the changed PC value restored from history.
1884 Therefore this unwinder reports any possibly unwound registers as
1885 <unavailable>. */
1886
0b722aec 1887const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1888{
1889 NORMAL_FRAME,
1890 record_btrace_frame_unwind_stop_reason,
1891 record_btrace_frame_this_id,
1892 record_btrace_frame_prev_register,
1893 NULL,
0b722aec
MM
1894 record_btrace_frame_sniffer,
1895 record_btrace_frame_dealloc_cache
1896};
1897
1898const struct frame_unwind record_btrace_tailcall_frame_unwind =
1899{
1900 TAILCALL_FRAME,
1901 record_btrace_frame_unwind_stop_reason,
1902 record_btrace_frame_this_id,
1903 record_btrace_frame_prev_register,
1904 NULL,
1905 record_btrace_tailcall_frame_sniffer,
1906 record_btrace_frame_dealloc_cache
cecac1ab 1907};
b2f4cfde 1908
f6ac5f3d 1909/* Implement the get_unwinder method. */
ac01945b 1910
f6ac5f3d
PA
1911const struct frame_unwind *
1912record_btrace_target::get_unwinder ()
ac01945b
TT
1913{
1914 return &record_btrace_frame_unwind;
1915}
1916
f6ac5f3d 1917/* Implement the get_tailcall_unwinder method. */
ac01945b 1918
f6ac5f3d
PA
1919const struct frame_unwind *
1920record_btrace_target::get_tailcall_unwinder ()
ac01945b
TT
1921{
1922 return &record_btrace_tailcall_frame_unwind;
1923}
1924
987e68b1
MM
1925/* Return a human-readable string for FLAG. */
1926
1927static const char *
1928btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1929{
1930 switch (flag)
1931 {
1932 case BTHR_STEP:
1933 return "step";
1934
1935 case BTHR_RSTEP:
1936 return "reverse-step";
1937
1938 case BTHR_CONT:
1939 return "cont";
1940
1941 case BTHR_RCONT:
1942 return "reverse-cont";
1943
1944 case BTHR_STOP:
1945 return "stop";
1946 }
1947
1948 return "<invalid>";
1949}
1950
52834460
MM
1951/* Indicate that TP should be resumed according to FLAG. */
1952
1953static void
1954record_btrace_resume_thread (struct thread_info *tp,
1955 enum btrace_thread_flag flag)
1956{
1957 struct btrace_thread_info *btinfo;
1958
43792cf0 1959 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1 1960 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
52834460
MM
1961
1962 btinfo = &tp->btrace;
1963
52834460 1964 /* Fetch the latest branch trace. */
4a4495d6 1965 btrace_fetch (tp, record_btrace_get_cpu ());
52834460 1966
0ca912df
MM
1967 /* A resume request overwrites a preceding resume or stop request. */
1968 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1969 btinfo->flags |= flag;
1970}
1971
ec71cc2f
MM
1972/* Get the current frame for TP. */
1973
1974static struct frame_info *
1975get_thread_current_frame (struct thread_info *tp)
1976{
1977 struct frame_info *frame;
1978 ptid_t old_inferior_ptid;
1979 int executing;
1980
1981 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1982 old_inferior_ptid = inferior_ptid;
1983 inferior_ptid = tp->ptid;
1984
1985 /* Clear the executing flag to allow changes to the current frame.
1986 We are not actually running, yet. We just started a reverse execution
1987 command or a record goto command.
1988 For the latter, EXECUTING is false and this has no effect.
f6ac5f3d 1989 For the former, EXECUTING is true and we're in wait, about to
ec71cc2f
MM
1990 move the thread. Since we need to recompute the stack, we temporarily
1991 set EXECUTING to flase. */
1992 executing = is_executing (inferior_ptid);
1993 set_executing (inferior_ptid, 0);
1994
1995 frame = NULL;
1996 TRY
1997 {
1998 frame = get_current_frame ();
1999 }
2000 CATCH (except, RETURN_MASK_ALL)
2001 {
2002 /* Restore the previous execution state. */
2003 set_executing (inferior_ptid, executing);
2004
2005 /* Restore the previous inferior_ptid. */
2006 inferior_ptid = old_inferior_ptid;
2007
2008 throw_exception (except);
2009 }
2010 END_CATCH
2011
2012 /* Restore the previous execution state. */
2013 set_executing (inferior_ptid, executing);
2014
2015 /* Restore the previous inferior_ptid. */
2016 inferior_ptid = old_inferior_ptid;
2017
2018 return frame;
2019}
2020
52834460
MM
2021/* Start replaying a thread. */
2022
2023static struct btrace_insn_iterator *
2024record_btrace_start_replaying (struct thread_info *tp)
2025{
52834460
MM
2026 struct btrace_insn_iterator *replay;
2027 struct btrace_thread_info *btinfo;
52834460
MM
2028
2029 btinfo = &tp->btrace;
2030 replay = NULL;
2031
2032 /* We can't start replaying without trace. */
b54b03bd 2033 if (btinfo->functions.empty ())
52834460
MM
2034 return NULL;
2035
52834460
MM
2036 /* GDB stores the current frame_id when stepping in order to detects steps
2037 into subroutines.
2038 Since frames are computed differently when we're replaying, we need to
2039 recompute those stored frames and fix them up so we can still detect
2040 subroutines after we started replaying. */
492d29ea 2041 TRY
52834460
MM
2042 {
2043 struct frame_info *frame;
2044 struct frame_id frame_id;
2045 int upd_step_frame_id, upd_step_stack_frame_id;
2046
2047 /* The current frame without replaying - computed via normal unwind. */
ec71cc2f 2048 frame = get_thread_current_frame (tp);
52834460
MM
2049 frame_id = get_frame_id (frame);
2050
2051 /* Check if we need to update any stepping-related frame id's. */
2052 upd_step_frame_id = frame_id_eq (frame_id,
2053 tp->control.step_frame_id);
2054 upd_step_stack_frame_id = frame_id_eq (frame_id,
2055 tp->control.step_stack_frame_id);
2056
2057 /* We start replaying at the end of the branch trace. This corresponds
2058 to the current instruction. */
8d749320 2059 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
2060 btrace_insn_end (replay, btinfo);
2061
31fd9caa
MM
2062 /* Skip gaps at the end of the trace. */
2063 while (btrace_insn_get (replay) == NULL)
2064 {
2065 unsigned int steps;
2066
2067 steps = btrace_insn_prev (replay, 1);
2068 if (steps == 0)
2069 error (_("No trace."));
2070 }
2071
52834460
MM
2072 /* We're not replaying, yet. */
2073 gdb_assert (btinfo->replay == NULL);
2074 btinfo->replay = replay;
2075
2076 /* Make sure we're not using any stale registers. */
2077 registers_changed_ptid (tp->ptid);
2078
2079 /* The current frame with replaying - computed via btrace unwind. */
ec71cc2f 2080 frame = get_thread_current_frame (tp);
52834460
MM
2081 frame_id = get_frame_id (frame);
2082
2083 /* Replace stepping related frames where necessary. */
2084 if (upd_step_frame_id)
2085 tp->control.step_frame_id = frame_id;
2086 if (upd_step_stack_frame_id)
2087 tp->control.step_stack_frame_id = frame_id;
2088 }
492d29ea 2089 CATCH (except, RETURN_MASK_ALL)
52834460
MM
2090 {
2091 xfree (btinfo->replay);
2092 btinfo->replay = NULL;
2093
2094 registers_changed_ptid (tp->ptid);
2095
2096 throw_exception (except);
2097 }
492d29ea 2098 END_CATCH
52834460
MM
2099
2100 return replay;
2101}
2102
2103/* Stop replaying a thread. */
2104
2105static void
2106record_btrace_stop_replaying (struct thread_info *tp)
2107{
2108 struct btrace_thread_info *btinfo;
2109
2110 btinfo = &tp->btrace;
2111
2112 xfree (btinfo->replay);
2113 btinfo->replay = NULL;
2114
2115 /* Make sure we're not leaving any stale registers. */
2116 registers_changed_ptid (tp->ptid);
2117}
2118
e3cfc1c7
MM
2119/* Stop replaying TP if it is at the end of its execution history. */
2120
2121static void
2122record_btrace_stop_replaying_at_end (struct thread_info *tp)
2123{
2124 struct btrace_insn_iterator *replay, end;
2125 struct btrace_thread_info *btinfo;
2126
2127 btinfo = &tp->btrace;
2128 replay = btinfo->replay;
2129
2130 if (replay == NULL)
2131 return;
2132
2133 btrace_insn_end (&end, btinfo);
2134
2135 if (btrace_insn_cmp (replay, &end) == 0)
2136 record_btrace_stop_replaying (tp);
2137}
2138
f6ac5f3d 2139/* The resume method of target record-btrace. */
b2f4cfde 2140
f6ac5f3d
PA
2141void
2142record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
b2f4cfde 2143{
0ca912df 2144 struct thread_info *tp;
d2939ba2 2145 enum btrace_thread_flag flag, cflag;
52834460 2146
987e68b1 2147 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
f6ac5f3d 2148 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
987e68b1 2149 step ? "step" : "cont");
52834460 2150
0ca912df
MM
2151 /* Store the execution direction of the last resume.
2152
f6ac5f3d 2153 If there is more than one resume call, we have to rely on infrun
0ca912df 2154 to not change the execution direction in-between. */
f6ac5f3d 2155 record_btrace_resume_exec_dir = ::execution_direction;
70ad5bff 2156
0ca912df 2157 /* As long as we're not replaying, just forward the request.
52834460 2158
0ca912df
MM
2159 For non-stop targets this means that no thread is replaying. In order to
2160 make progress, we may need to explicitly move replaying threads to the end
2161 of their execution history. */
f6ac5f3d
PA
2162 if ((::execution_direction != EXEC_REVERSE)
2163 && !record_is_replaying (minus_one_ptid))
b2f4cfde 2164 {
f6ac5f3d 2165 this->beneath->resume (ptid, step, signal);
04c4fe8c 2166 return;
b2f4cfde
MM
2167 }
2168
52834460 2169 /* Compute the btrace thread flag for the requested move. */
f6ac5f3d 2170 if (::execution_direction == EXEC_REVERSE)
d2939ba2
MM
2171 {
2172 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2173 cflag = BTHR_RCONT;
2174 }
52834460 2175 else
d2939ba2
MM
2176 {
2177 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2178 cflag = BTHR_CONT;
2179 }
52834460 2180
52834460 2181 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2182 record_btrace_wait below.
2183
2184 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2185 if (!target_is_non_stop_p ())
2186 {
2187 gdb_assert (ptid_match (inferior_ptid, ptid));
2188
2189 ALL_NON_EXITED_THREADS (tp)
2190 if (ptid_match (tp->ptid, ptid))
2191 {
2192 if (ptid_match (tp->ptid, inferior_ptid))
2193 record_btrace_resume_thread (tp, flag);
2194 else
2195 record_btrace_resume_thread (tp, cflag);
2196 }
2197 }
2198 else
2199 {
2200 ALL_NON_EXITED_THREADS (tp)
2201 if (ptid_match (tp->ptid, ptid))
2202 record_btrace_resume_thread (tp, flag);
2203 }
70ad5bff
MM
2204
2205 /* Async support. */
2206 if (target_can_async_p ())
2207 {
6a3753b3 2208 target_async (1);
70ad5bff
MM
2209 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2210 }
52834460
MM
2211}
2212
f6ac5f3d 2213/* The commit_resume method of target record-btrace. */
85ad3aaf 2214
f6ac5f3d
PA
2215void
2216record_btrace_target::commit_resume ()
85ad3aaf 2217{
f6ac5f3d
PA
2218 if ((::execution_direction != EXEC_REVERSE)
2219 && !record_is_replaying (minus_one_ptid))
2220 beneath->commit_resume ();
85ad3aaf
PA
2221}
2222
987e68b1
MM
2223/* Cancel resuming TP. */
2224
2225static void
2226record_btrace_cancel_resume (struct thread_info *tp)
2227{
2228 enum btrace_thread_flag flags;
2229
2230 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2231 if (flags == 0)
2232 return;
2233
43792cf0
PA
2234 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2235 print_thread_id (tp),
987e68b1
MM
2236 target_pid_to_str (tp->ptid), flags,
2237 btrace_thread_flag_to_str (flags));
2238
2239 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2240 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2241}
2242
2243/* Return a target_waitstatus indicating that we ran out of history. */
2244
2245static struct target_waitstatus
2246btrace_step_no_history (void)
2247{
2248 struct target_waitstatus status;
2249
2250 status.kind = TARGET_WAITKIND_NO_HISTORY;
2251
2252 return status;
2253}
2254
2255/* Return a target_waitstatus indicating that a step finished. */
2256
2257static struct target_waitstatus
2258btrace_step_stopped (void)
2259{
2260 struct target_waitstatus status;
2261
2262 status.kind = TARGET_WAITKIND_STOPPED;
2263 status.value.sig = GDB_SIGNAL_TRAP;
2264
2265 return status;
2266}
2267
6e4879f0
MM
2268/* Return a target_waitstatus indicating that a thread was stopped as
2269 requested. */
2270
2271static struct target_waitstatus
2272btrace_step_stopped_on_request (void)
2273{
2274 struct target_waitstatus status;
2275
2276 status.kind = TARGET_WAITKIND_STOPPED;
2277 status.value.sig = GDB_SIGNAL_0;
2278
2279 return status;
2280}
2281
d825d248
MM
2282/* Return a target_waitstatus indicating a spurious stop. */
2283
2284static struct target_waitstatus
2285btrace_step_spurious (void)
2286{
2287 struct target_waitstatus status;
2288
2289 status.kind = TARGET_WAITKIND_SPURIOUS;
2290
2291 return status;
2292}
2293
e3cfc1c7
MM
2294/* Return a target_waitstatus indicating that the thread was not resumed. */
2295
2296static struct target_waitstatus
2297btrace_step_no_resumed (void)
2298{
2299 struct target_waitstatus status;
2300
2301 status.kind = TARGET_WAITKIND_NO_RESUMED;
2302
2303 return status;
2304}
2305
2306/* Return a target_waitstatus indicating that we should wait again. */
2307
2308static struct target_waitstatus
2309btrace_step_again (void)
2310{
2311 struct target_waitstatus status;
2312
2313 status.kind = TARGET_WAITKIND_IGNORE;
2314
2315 return status;
2316}
2317
52834460
MM
2318/* Clear the record histories. */
2319
2320static void
2321record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2322{
2323 xfree (btinfo->insn_history);
2324 xfree (btinfo->call_history);
2325
2326 btinfo->insn_history = NULL;
2327 btinfo->call_history = NULL;
2328}
2329
3c615f99
MM
2330/* Check whether TP's current replay position is at a breakpoint. */
2331
2332static int
2333record_btrace_replay_at_breakpoint (struct thread_info *tp)
2334{
2335 struct btrace_insn_iterator *replay;
2336 struct btrace_thread_info *btinfo;
2337 const struct btrace_insn *insn;
2338 struct inferior *inf;
2339
2340 btinfo = &tp->btrace;
2341 replay = btinfo->replay;
2342
2343 if (replay == NULL)
2344 return 0;
2345
2346 insn = btrace_insn_get (replay);
2347 if (insn == NULL)
2348 return 0;
2349
2350 inf = find_inferior_ptid (tp->ptid);
2351 if (inf == NULL)
2352 return 0;
2353
2354 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2355 &btinfo->stop_reason);
2356}
2357
d825d248 2358/* Step one instruction in forward direction. */
52834460
MM
2359
2360static struct target_waitstatus
d825d248 2361record_btrace_single_step_forward (struct thread_info *tp)
52834460 2362{
b61ce85c 2363 struct btrace_insn_iterator *replay, end, start;
52834460 2364 struct btrace_thread_info *btinfo;
52834460 2365
d825d248
MM
2366 btinfo = &tp->btrace;
2367 replay = btinfo->replay;
2368
2369 /* We're done if we're not replaying. */
2370 if (replay == NULL)
2371 return btrace_step_no_history ();
2372
011c71b6
MM
2373 /* Check if we're stepping a breakpoint. */
2374 if (record_btrace_replay_at_breakpoint (tp))
2375 return btrace_step_stopped ();
2376
b61ce85c
MM
2377 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2378 jump back to the instruction at which we started. */
2379 start = *replay;
d825d248
MM
2380 do
2381 {
2382 unsigned int steps;
2383
e3cfc1c7
MM
2384 /* We will bail out here if we continue stepping after reaching the end
2385 of the execution history. */
d825d248
MM
2386 steps = btrace_insn_next (replay, 1);
2387 if (steps == 0)
b61ce85c
MM
2388 {
2389 *replay = start;
2390 return btrace_step_no_history ();
2391 }
d825d248
MM
2392 }
2393 while (btrace_insn_get (replay) == NULL);
2394
2395 /* Determine the end of the instruction trace. */
2396 btrace_insn_end (&end, btinfo);
2397
e3cfc1c7
MM
2398 /* The execution trace contains (and ends with) the current instruction.
2399 This instruction has not been executed, yet, so the trace really ends
2400 one instruction earlier. */
d825d248 2401 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2402 return btrace_step_no_history ();
d825d248
MM
2403
2404 return btrace_step_spurious ();
2405}
2406
2407/* Step one instruction in backward direction. */
2408
2409static struct target_waitstatus
2410record_btrace_single_step_backward (struct thread_info *tp)
2411{
b61ce85c 2412 struct btrace_insn_iterator *replay, start;
d825d248 2413 struct btrace_thread_info *btinfo;
e59fa00f 2414
52834460
MM
2415 btinfo = &tp->btrace;
2416 replay = btinfo->replay;
2417
d825d248
MM
2418 /* Start replaying if we're not already doing so. */
2419 if (replay == NULL)
2420 replay = record_btrace_start_replaying (tp);
2421
2422 /* If we can't step any further, we reached the end of the history.
b61ce85c
MM
2423 Skip gaps during replay. If we end up at a gap (at the beginning of
2424 the trace), jump back to the instruction at which we started. */
2425 start = *replay;
d825d248
MM
2426 do
2427 {
2428 unsigned int steps;
2429
2430 steps = btrace_insn_prev (replay, 1);
2431 if (steps == 0)
b61ce85c
MM
2432 {
2433 *replay = start;
2434 return btrace_step_no_history ();
2435 }
d825d248
MM
2436 }
2437 while (btrace_insn_get (replay) == NULL);
2438
011c71b6
MM
2439 /* Check if we're stepping a breakpoint.
2440
2441 For reverse-stepping, this check is after the step. There is logic in
2442 infrun.c that handles reverse-stepping separately. See, for example,
2443 proceed and adjust_pc_after_break.
2444
2445 This code assumes that for reverse-stepping, PC points to the last
2446 de-executed instruction, whereas for forward-stepping PC points to the
2447 next to-be-executed instruction. */
2448 if (record_btrace_replay_at_breakpoint (tp))
2449 return btrace_step_stopped ();
2450
d825d248
MM
2451 return btrace_step_spurious ();
2452}
2453
2454/* Step a single thread. */
2455
2456static struct target_waitstatus
2457record_btrace_step_thread (struct thread_info *tp)
2458{
2459 struct btrace_thread_info *btinfo;
2460 struct target_waitstatus status;
2461 enum btrace_thread_flag flags;
2462
2463 btinfo = &tp->btrace;
2464
6e4879f0
MM
2465 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2466 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2467
43792cf0 2468 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1
MM
2469 target_pid_to_str (tp->ptid), flags,
2470 btrace_thread_flag_to_str (flags));
52834460 2471
6e4879f0
MM
2472 /* We can't step without an execution history. */
2473 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2474 return btrace_step_no_history ();
2475
52834460
MM
2476 switch (flags)
2477 {
2478 default:
2479 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2480
6e4879f0
MM
2481 case BTHR_STOP:
2482 return btrace_step_stopped_on_request ();
2483
52834460 2484 case BTHR_STEP:
d825d248
MM
2485 status = record_btrace_single_step_forward (tp);
2486 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2487 break;
52834460
MM
2488
2489 return btrace_step_stopped ();
2490
2491 case BTHR_RSTEP:
d825d248
MM
2492 status = record_btrace_single_step_backward (tp);
2493 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2494 break;
52834460
MM
2495
2496 return btrace_step_stopped ();
2497
2498 case BTHR_CONT:
e3cfc1c7
MM
2499 status = record_btrace_single_step_forward (tp);
2500 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2501 break;
52834460 2502
e3cfc1c7
MM
2503 btinfo->flags |= flags;
2504 return btrace_step_again ();
52834460
MM
2505
2506 case BTHR_RCONT:
e3cfc1c7
MM
2507 status = record_btrace_single_step_backward (tp);
2508 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2509 break;
52834460 2510
e3cfc1c7
MM
2511 btinfo->flags |= flags;
2512 return btrace_step_again ();
2513 }
d825d248 2514
f6ac5f3d 2515 /* We keep threads moving at the end of their execution history. The wait
e3cfc1c7
MM
2516 method will stop the thread for whom the event is reported. */
2517 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2518 btinfo->flags |= flags;
52834460 2519
e3cfc1c7 2520 return status;
b2f4cfde
MM
2521}
2522
e3cfc1c7
MM
2523/* A vector of threads. */
2524
2525typedef struct thread_info * tp_t;
2526DEF_VEC_P (tp_t);
2527
a6b5be76
MM
2528/* Announce further events if necessary. */
2529
2530static void
53127008
SM
2531record_btrace_maybe_mark_async_event
2532 (const std::vector<thread_info *> &moving,
2533 const std::vector<thread_info *> &no_history)
a6b5be76 2534{
53127008
SM
2535 bool more_moving = !moving.empty ();
2536 bool more_no_history = !no_history.empty ();;
a6b5be76
MM
2537
2538 if (!more_moving && !more_no_history)
2539 return;
2540
2541 if (more_moving)
2542 DEBUG ("movers pending");
2543
2544 if (more_no_history)
2545 DEBUG ("no-history pending");
2546
2547 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2548}
2549
f6ac5f3d 2550/* The wait method of target record-btrace. */
b2f4cfde 2551
f6ac5f3d
PA
2552ptid_t
2553record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2554 int options)
b2f4cfde 2555{
53127008
SM
2556 std::vector<thread_info *> moving;
2557 std::vector<thread_info *> no_history;
52834460
MM
2558
2559 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2560
b2f4cfde 2561 /* As long as we're not replaying, just forward the request. */
f6ac5f3d
PA
2562 if ((::execution_direction != EXEC_REVERSE)
2563 && !record_is_replaying (minus_one_ptid))
b2f4cfde 2564 {
f6ac5f3d 2565 return this->beneath->wait (ptid, status, options);
b2f4cfde
MM
2566 }
2567
e3cfc1c7 2568 /* Keep a work list of moving threads. */
53127008
SM
2569 {
2570 thread_info *tp;
2571
2572 ALL_NON_EXITED_THREADS (tp)
2573 {
2574 if (ptid_match (tp->ptid, ptid)
2575 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2576 moving.push_back (tp);
2577 }
2578 }
e3cfc1c7 2579
53127008 2580 if (moving.empty ())
52834460 2581 {
e3cfc1c7 2582 *status = btrace_step_no_resumed ();
52834460 2583
e3cfc1c7 2584 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
23fdd69e 2585 target_waitstatus_to_string (status).c_str ());
e3cfc1c7 2586
e3cfc1c7 2587 return null_ptid;
52834460
MM
2588 }
2589
e3cfc1c7
MM
2590 /* Step moving threads one by one, one step each, until either one thread
2591 reports an event or we run out of threads to step.
2592
2593 When stepping more than one thread, chances are that some threads reach
2594 the end of their execution history earlier than others. If we reported
2595 this immediately, all-stop on top of non-stop would stop all threads and
2596 resume the same threads next time. And we would report the same thread
2597 having reached the end of its execution history again.
2598
2599 In the worst case, this would starve the other threads. But even if other
2600 threads would be allowed to make progress, this would result in far too
2601 many intermediate stops.
2602
2603 We therefore delay the reporting of "no execution history" until we have
2604 nothing else to report. By this time, all threads should have moved to
2605 either the beginning or the end of their execution history. There will
2606 be a single user-visible stop. */
53127008
SM
2607 struct thread_info *eventing = NULL;
2608 while ((eventing == NULL) && !moving.empty ())
e3cfc1c7 2609 {
53127008 2610 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
e3cfc1c7 2611 {
53127008
SM
2612 thread_info *tp = moving[ix];
2613
e3cfc1c7
MM
2614 *status = record_btrace_step_thread (tp);
2615
2616 switch (status->kind)
2617 {
2618 case TARGET_WAITKIND_IGNORE:
2619 ix++;
2620 break;
2621
2622 case TARGET_WAITKIND_NO_HISTORY:
53127008 2623 no_history.push_back (ordered_remove (moving, ix));
e3cfc1c7
MM
2624 break;
2625
2626 default:
53127008 2627 eventing = unordered_remove (moving, ix);
e3cfc1c7
MM
2628 break;
2629 }
2630 }
2631 }
2632
2633 if (eventing == NULL)
2634 {
2635 /* We started with at least one moving thread. This thread must have
2636 either stopped or reached the end of its execution history.
2637
2638 In the former case, EVENTING must not be NULL.
2639 In the latter case, NO_HISTORY must not be empty. */
53127008 2640 gdb_assert (!no_history.empty ());
e3cfc1c7
MM
2641
2642 /* We kept threads moving at the end of their execution history. Stop
2643 EVENTING now that we are going to report its stop. */
53127008 2644 eventing = unordered_remove (no_history, 0);
e3cfc1c7
MM
2645 eventing->btrace.flags &= ~BTHR_MOVE;
2646
2647 *status = btrace_step_no_history ();
2648 }
2649
2650 gdb_assert (eventing != NULL);
2651
2652 /* We kept threads replaying at the end of their execution history. Stop
2653 replaying EVENTING now that we are going to report its stop. */
2654 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2655
2656 /* Stop all other threads. */
5953356c 2657 if (!target_is_non_stop_p ())
53127008
SM
2658 {
2659 thread_info *tp;
2660
2661 ALL_NON_EXITED_THREADS (tp)
2662 record_btrace_cancel_resume (tp);
2663 }
52834460 2664
a6b5be76
MM
2665 /* In async mode, we need to announce further events. */
2666 if (target_is_async_p ())
2667 record_btrace_maybe_mark_async_event (moving, no_history);
2668
52834460 2669 /* Start record histories anew from the current position. */
e3cfc1c7 2670 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2671
2672 /* We moved the replay position but did not update registers. */
e3cfc1c7
MM
2673 registers_changed_ptid (eventing->ptid);
2674
43792cf0
PA
2675 DEBUG ("wait ended by thread %s (%s): %s",
2676 print_thread_id (eventing),
e3cfc1c7 2677 target_pid_to_str (eventing->ptid),
23fdd69e 2678 target_waitstatus_to_string (status).c_str ());
52834460 2679
e3cfc1c7 2680 return eventing->ptid;
52834460
MM
2681}
2682
f6ac5f3d 2683/* The stop method of target record-btrace. */
6e4879f0 2684
f6ac5f3d
PA
2685void
2686record_btrace_target::stop (ptid_t ptid)
6e4879f0
MM
2687{
2688 DEBUG ("stop %s", target_pid_to_str (ptid));
2689
2690 /* As long as we're not replaying, just forward the request. */
f6ac5f3d
PA
2691 if ((::execution_direction != EXEC_REVERSE)
2692 && !record_is_replaying (minus_one_ptid))
6e4879f0 2693 {
f6ac5f3d 2694 this->beneath->stop (ptid);
6e4879f0
MM
2695 }
2696 else
2697 {
2698 struct thread_info *tp;
2699
2700 ALL_NON_EXITED_THREADS (tp)
2701 if (ptid_match (tp->ptid, ptid))
2702 {
2703 tp->btrace.flags &= ~BTHR_MOVE;
2704 tp->btrace.flags |= BTHR_STOP;
2705 }
2706 }
2707 }
2708
f6ac5f3d 2709/* The can_execute_reverse method of target record-btrace. */
52834460 2710
57810aa7 2711bool
f6ac5f3d 2712record_btrace_target::can_execute_reverse ()
52834460 2713{
57810aa7 2714 return true;
52834460
MM
2715}
2716
f6ac5f3d 2717/* The stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2718
57810aa7 2719bool
f6ac5f3d 2720record_btrace_target::stopped_by_sw_breakpoint ()
52834460 2721{
f6ac5f3d 2722 if (record_is_replaying (minus_one_ptid))
9e8915c6
PA
2723 {
2724 struct thread_info *tp = inferior_thread ();
2725
2726 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2727 }
2728
f6ac5f3d 2729 return this->beneath->stopped_by_sw_breakpoint ();
9e8915c6
PA
2730}
2731
f6ac5f3d 2732/* The supports_stopped_by_sw_breakpoint method of target
9e8915c6
PA
2733 record-btrace. */
2734
57810aa7 2735bool
f6ac5f3d 2736record_btrace_target::supports_stopped_by_sw_breakpoint ()
9e8915c6 2737{
f6ac5f3d 2738 if (record_is_replaying (minus_one_ptid))
57810aa7 2739 return true;
9e8915c6 2740
f6ac5f3d 2741 return this->beneath->supports_stopped_by_sw_breakpoint ();
9e8915c6
PA
2742}
2743
f6ac5f3d 2744/* The stopped_by_sw_breakpoint method of target record-btrace. */
9e8915c6 2745
57810aa7 2746bool
f6ac5f3d 2747record_btrace_target::stopped_by_hw_breakpoint ()
9e8915c6 2748{
f6ac5f3d 2749 if (record_is_replaying (minus_one_ptid))
9e8915c6
PA
2750 {
2751 struct thread_info *tp = inferior_thread ();
2752
2753 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2754 }
2755
f6ac5f3d 2756 return this->beneath->stopped_by_hw_breakpoint ();
9e8915c6
PA
2757}
2758
f6ac5f3d 2759/* The supports_stopped_by_hw_breakpoint method of target
9e8915c6
PA
2760 record-btrace. */
2761
57810aa7 2762bool
f6ac5f3d 2763record_btrace_target::supports_stopped_by_hw_breakpoint ()
9e8915c6 2764{
f6ac5f3d 2765 if (record_is_replaying (minus_one_ptid))
57810aa7 2766 return true;
52834460 2767
f6ac5f3d 2768 return this->beneath->supports_stopped_by_hw_breakpoint ();
b2f4cfde
MM
2769}
2770
f6ac5f3d 2771/* The update_thread_list method of target record-btrace. */
e2887aa3 2772
f6ac5f3d
PA
2773void
2774record_btrace_target::update_thread_list ()
e2887aa3 2775{
e8032dde 2776 /* We don't add or remove threads during replay. */
f6ac5f3d 2777 if (record_is_replaying (minus_one_ptid))
e2887aa3
MM
2778 return;
2779
2780 /* Forward the request. */
f6ac5f3d 2781 this->beneath->update_thread_list ();
e2887aa3
MM
2782}
2783
f6ac5f3d 2784/* The thread_alive method of target record-btrace. */
e2887aa3 2785
57810aa7 2786bool
f6ac5f3d 2787record_btrace_target::thread_alive (ptid_t ptid)
e2887aa3
MM
2788{
2789 /* We don't add or remove threads during replay. */
f6ac5f3d 2790 if (record_is_replaying (minus_one_ptid))
e2887aa3
MM
2791 return find_thread_ptid (ptid) != NULL;
2792
2793 /* Forward the request. */
f6ac5f3d 2794 return this->beneath->thread_alive (ptid);
e2887aa3
MM
2795}
2796
066ce621
MM
2797/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2798 is stopped. */
2799
2800static void
2801record_btrace_set_replay (struct thread_info *tp,
2802 const struct btrace_insn_iterator *it)
2803{
2804 struct btrace_thread_info *btinfo;
2805
2806 btinfo = &tp->btrace;
2807
a0f1b963 2808 if (it == NULL)
52834460 2809 record_btrace_stop_replaying (tp);
066ce621
MM
2810 else
2811 {
2812 if (btinfo->replay == NULL)
52834460 2813 record_btrace_start_replaying (tp);
066ce621
MM
2814 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2815 return;
2816
2817 *btinfo->replay = *it;
52834460 2818 registers_changed_ptid (tp->ptid);
066ce621
MM
2819 }
2820
52834460
MM
2821 /* Start anew from the new replay position. */
2822 record_btrace_clear_histories (btinfo);
485668e5
MM
2823
2824 stop_pc = regcache_read_pc (get_current_regcache ());
2825 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2826}
2827
f6ac5f3d 2828/* The goto_record_begin method of target record-btrace. */
066ce621 2829
f6ac5f3d
PA
2830void
2831record_btrace_target::goto_record_begin ()
066ce621
MM
2832{
2833 struct thread_info *tp;
2834 struct btrace_insn_iterator begin;
2835
2836 tp = require_btrace_thread ();
2837
2838 btrace_insn_begin (&begin, &tp->btrace);
b61ce85c
MM
2839
2840 /* Skip gaps at the beginning of the trace. */
2841 while (btrace_insn_get (&begin) == NULL)
2842 {
2843 unsigned int steps;
2844
2845 steps = btrace_insn_next (&begin, 1);
2846 if (steps == 0)
2847 error (_("No trace."));
2848 }
2849
066ce621 2850 record_btrace_set_replay (tp, &begin);
066ce621
MM
2851}
2852
f6ac5f3d 2853/* The goto_record_end method of target record-btrace. */
066ce621 2854
f6ac5f3d
PA
2855void
2856record_btrace_target::goto_record_end ()
066ce621
MM
2857{
2858 struct thread_info *tp;
2859
2860 tp = require_btrace_thread ();
2861
2862 record_btrace_set_replay (tp, NULL);
066ce621
MM
2863}
2864
f6ac5f3d 2865/* The goto_record method of target record-btrace. */
066ce621 2866
f6ac5f3d
PA
2867void
2868record_btrace_target::goto_record (ULONGEST insn)
066ce621
MM
2869{
2870 struct thread_info *tp;
2871 struct btrace_insn_iterator it;
2872 unsigned int number;
2873 int found;
2874
2875 number = insn;
2876
2877 /* Check for wrap-arounds. */
2878 if (number != insn)
2879 error (_("Instruction number out of range."));
2880
2881 tp = require_btrace_thread ();
2882
2883 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
69090cee
TW
2884
2885 /* Check if the instruction could not be found or is a gap. */
2886 if (found == 0 || btrace_insn_get (&it) == NULL)
066ce621
MM
2887 error (_("No such instruction."));
2888
2889 record_btrace_set_replay (tp, &it);
066ce621
MM
2890}
2891
f6ac5f3d 2892/* The record_stop_replaying method of target record-btrace. */
797094dd 2893
f6ac5f3d
PA
2894void
2895record_btrace_target::record_stop_replaying ()
797094dd
MM
2896{
2897 struct thread_info *tp;
2898
2899 ALL_NON_EXITED_THREADS (tp)
2900 record_btrace_stop_replaying (tp);
2901}
2902
f6ac5f3d 2903/* The execution_direction target method. */
70ad5bff 2904
f6ac5f3d
PA
2905enum exec_direction_kind
2906record_btrace_target::execution_direction ()
70ad5bff
MM
2907{
2908 return record_btrace_resume_exec_dir;
2909}
2910
f6ac5f3d 2911/* The prepare_to_generate_core target method. */
aef92902 2912
f6ac5f3d
PA
2913void
2914record_btrace_target::prepare_to_generate_core ()
aef92902
MM
2915{
2916 record_btrace_generating_corefile = 1;
2917}
2918
f6ac5f3d 2919/* The done_generating_core target method. */
aef92902 2920
f6ac5f3d
PA
2921void
2922record_btrace_target::done_generating_core ()
aef92902
MM
2923{
2924 record_btrace_generating_corefile = 0;
2925}
2926
f4abbc16
MM
2927/* Start recording in BTS format. */
2928
2929static void
cdb34d4a 2930cmd_record_btrace_bts_start (const char *args, int from_tty)
f4abbc16 2931{
f4abbc16
MM
2932 if (args != NULL && *args != 0)
2933 error (_("Invalid argument."));
2934
2935 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2936
492d29ea
PA
2937 TRY
2938 {
95a6b0a1 2939 execute_command ("target record-btrace", from_tty);
492d29ea
PA
2940 }
2941 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2942 {
2943 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2944 throw_exception (exception);
2945 }
492d29ea 2946 END_CATCH
f4abbc16
MM
2947}
2948
bc504a31 2949/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2950
2951static void
cdb34d4a 2952cmd_record_btrace_pt_start (const char *args, int from_tty)
afedecd3
MM
2953{
2954 if (args != NULL && *args != 0)
2955 error (_("Invalid argument."));
2956
b20a6524 2957 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2958
492d29ea
PA
2959 TRY
2960 {
95a6b0a1 2961 execute_command ("target record-btrace", from_tty);
492d29ea
PA
2962 }
2963 CATCH (exception, RETURN_MASK_ALL)
2964 {
2965 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2966 throw_exception (exception);
2967 }
2968 END_CATCH
afedecd3
MM
2969}
2970
b20a6524
MM
2971/* Alias for "target record". */
2972
2973static void
981a3fb3 2974cmd_record_btrace_start (const char *args, int from_tty)
b20a6524
MM
2975{
2976 if (args != NULL && *args != 0)
2977 error (_("Invalid argument."));
2978
2979 record_btrace_conf.format = BTRACE_FORMAT_PT;
2980
2981 TRY
2982 {
95a6b0a1 2983 execute_command ("target record-btrace", from_tty);
b20a6524
MM
2984 }
2985 CATCH (exception, RETURN_MASK_ALL)
2986 {
2987 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2988
2989 TRY
2990 {
95a6b0a1 2991 execute_command ("target record-btrace", from_tty);
b20a6524
MM
2992 }
2993 CATCH (exception, RETURN_MASK_ALL)
2994 {
2995 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2996 throw_exception (exception);
2997 }
2998 END_CATCH
2999 }
3000 END_CATCH
3001}
3002
67b5c0c1
MM
3003/* The "set record btrace" command. */
3004
3005static void
981a3fb3 3006cmd_set_record_btrace (const char *args, int from_tty)
67b5c0c1 3007{
b85310e1
MM
3008 printf_unfiltered (_("\"set record btrace\" must be followed "
3009 "by an appropriate subcommand.\n"));
3010 help_list (set_record_btrace_cmdlist, "set record btrace ",
3011 all_commands, gdb_stdout);
67b5c0c1
MM
3012}
3013
3014/* The "show record btrace" command. */
3015
3016static void
981a3fb3 3017cmd_show_record_btrace (const char *args, int from_tty)
67b5c0c1
MM
3018{
3019 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
3020}
3021
3022/* The "show record btrace replay-memory-access" command. */
3023
3024static void
3025cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
3026 struct cmd_list_element *c, const char *value)
3027{
3028 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
3029 replay_memory_access);
3030}
3031
4a4495d6
MM
3032/* The "set record btrace cpu none" command. */
3033
3034static void
3035cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
3036{
3037 if (args != nullptr && *args != 0)
3038 error (_("Trailing junk: '%s'."), args);
3039
3040 record_btrace_cpu_state = CS_NONE;
3041}
3042
3043/* The "set record btrace cpu auto" command. */
3044
3045static void
3046cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
3047{
3048 if (args != nullptr && *args != 0)
3049 error (_("Trailing junk: '%s'."), args);
3050
3051 record_btrace_cpu_state = CS_AUTO;
3052}
3053
3054/* The "set record btrace cpu" command. */
3055
3056static void
3057cmd_set_record_btrace_cpu (const char *args, int from_tty)
3058{
3059 if (args == nullptr)
3060 args = "";
3061
3062 /* We use a hard-coded vendor string for now. */
3063 unsigned int family, model, stepping;
3064 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3065 &model, &l1, &stepping, &l2);
3066 if (matches == 3)
3067 {
3068 if (strlen (args) != l2)
3069 error (_("Trailing junk: '%s'."), args + l2);
3070 }
3071 else if (matches == 2)
3072 {
3073 if (strlen (args) != l1)
3074 error (_("Trailing junk: '%s'."), args + l1);
3075
3076 stepping = 0;
3077 }
3078 else
3079 error (_("Bad format. See \"help set record btrace cpu\"."));
3080
3081 if (USHRT_MAX < family)
3082 error (_("Cpu family too big."));
3083
3084 if (UCHAR_MAX < model)
3085 error (_("Cpu model too big."));
3086
3087 if (UCHAR_MAX < stepping)
3088 error (_("Cpu stepping too big."));
3089
3090 record_btrace_cpu.vendor = CV_INTEL;
3091 record_btrace_cpu.family = family;
3092 record_btrace_cpu.model = model;
3093 record_btrace_cpu.stepping = stepping;
3094
3095 record_btrace_cpu_state = CS_CPU;
3096}
3097
3098/* The "show record btrace cpu" command. */
3099
3100static void
3101cmd_show_record_btrace_cpu (const char *args, int from_tty)
3102{
3103 const char *cpu;
3104
3105 if (args != nullptr && *args != 0)
3106 error (_("Trailing junk: '%s'."), args);
3107
3108 switch (record_btrace_cpu_state)
3109 {
3110 case CS_AUTO:
3111 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3112 return;
3113
3114 case CS_NONE:
3115 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3116 return;
3117
3118 case CS_CPU:
3119 switch (record_btrace_cpu.vendor)
3120 {
3121 case CV_INTEL:
3122 if (record_btrace_cpu.stepping == 0)
3123 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3124 record_btrace_cpu.family,
3125 record_btrace_cpu.model);
3126 else
3127 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3128 record_btrace_cpu.family,
3129 record_btrace_cpu.model,
3130 record_btrace_cpu.stepping);
3131 return;
3132 }
3133 }
3134
3135 error (_("Internal error: bad cpu state."));
3136}
3137
3138/* The "s record btrace bts" command. */
d33501a5
MM
3139
3140static void
981a3fb3 3141cmd_set_record_btrace_bts (const char *args, int from_tty)
d33501a5
MM
3142{
3143 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 3144 "by an appropriate subcommand.\n"));
d33501a5
MM
3145 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3146 all_commands, gdb_stdout);
3147}
3148
3149/* The "show record btrace bts" command. */
3150
3151static void
981a3fb3 3152cmd_show_record_btrace_bts (const char *args, int from_tty)
d33501a5
MM
3153{
3154 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3155}
3156
b20a6524
MM
3157/* The "set record btrace pt" command. */
3158
3159static void
981a3fb3 3160cmd_set_record_btrace_pt (const char *args, int from_tty)
b20a6524
MM
3161{
3162 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3163 "by an appropriate subcommand.\n"));
3164 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3165 all_commands, gdb_stdout);
3166}
3167
3168/* The "show record btrace pt" command. */
3169
3170static void
981a3fb3 3171cmd_show_record_btrace_pt (const char *args, int from_tty)
b20a6524
MM
3172{
3173 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3174}
3175
3176/* The "record bts buffer-size" show value function. */
3177
3178static void
3179show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3180 struct cmd_list_element *c,
3181 const char *value)
3182{
3183 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3184 value);
3185}
3186
3187/* The "record pt buffer-size" show value function. */
3188
3189static void
3190show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3191 struct cmd_list_element *c,
3192 const char *value)
3193{
3194 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3195 value);
3196}
3197
afedecd3
MM
3198/* Initialize btrace commands. */
3199
3200void
3201_initialize_record_btrace (void)
3202{
f4abbc16
MM
3203 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3204 _("Start branch trace recording."), &record_btrace_cmdlist,
3205 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3206 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3207
f4abbc16
MM
3208 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3209 _("\
3210Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3211The processor stores a from/to record for each branch into a cyclic buffer.\n\
3212This format may not be available on all processors."),
3213 &record_btrace_cmdlist);
3214 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3215
b20a6524
MM
3216 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3217 _("\
bc504a31 3218Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3219This format may not be available on all processors."),
3220 &record_btrace_cmdlist);
3221 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3222
67b5c0c1
MM
3223 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3224 _("Set record options"), &set_record_btrace_cmdlist,
3225 "set record btrace ", 0, &set_record_cmdlist);
3226
3227 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3228 _("Show record options"), &show_record_btrace_cmdlist,
3229 "show record btrace ", 0, &show_record_cmdlist);
3230
3231 add_setshow_enum_cmd ("replay-memory-access", no_class,
3232 replay_memory_access_types, &replay_memory_access, _("\
3233Set what memory accesses are allowed during replay."), _("\
3234Show what memory accesses are allowed during replay."),
3235 _("Default is READ-ONLY.\n\n\
3236The btrace record target does not trace data.\n\
3237The memory therefore corresponds to the live target and not \
3238to the current replay position.\n\n\
3239When READ-ONLY, allow accesses to read-only memory during replay.\n\
3240When READ-WRITE, allow accesses to read-only and read-write memory during \
3241replay."),
3242 NULL, cmd_show_replay_memory_access,
3243 &set_record_btrace_cmdlist,
3244 &show_record_btrace_cmdlist);
3245
4a4495d6
MM
3246 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3247 _("\
3248Set the cpu to be used for trace decode.\n\n\
3249The format is \"<vendor>:<identifier>\" or \"none\" or \"auto\" (default).\n\
3250For vendor \"intel\" the format is \"<family>/<model>[/<stepping>]\".\n\n\
3251When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3252The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3253When GDB does not support that cpu, this option can be used to enable\n\
3254workarounds for a similar cpu that GDB supports.\n\n\
3255When set to \"none\", errata workarounds are disabled."),
3256 &set_record_btrace_cpu_cmdlist,
3257 _("set record btrace cpu "), 1,
3258 &set_record_btrace_cmdlist);
3259
3260 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3261Automatically determine the cpu to be used for trace decode."),
3262 &set_record_btrace_cpu_cmdlist);
3263
3264 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3265Do not enable errata workarounds for trace decode."),
3266 &set_record_btrace_cpu_cmdlist);
3267
3268 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3269Show the cpu to be used for trace decode."),
3270 &show_record_btrace_cmdlist);
3271
d33501a5
MM
3272 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3273 _("Set record btrace bts options"),
3274 &set_record_btrace_bts_cmdlist,
3275 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3276
3277 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3278 _("Show record btrace bts options"),
3279 &show_record_btrace_bts_cmdlist,
3280 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3281
3282 add_setshow_uinteger_cmd ("buffer-size", no_class,
3283 &record_btrace_conf.bts.size,
3284 _("Set the record/replay bts buffer size."),
3285 _("Show the record/replay bts buffer size."), _("\
3286When starting recording request a trace buffer of this size. \
3287The actual buffer size may differ from the requested size. \
3288Use \"info record\" to see the actual buffer size.\n\n\
3289Bigger buffers allow longer recording but also take more time to process \
3290the recorded execution trace.\n\n\
b20a6524
MM
3291The trace buffer size may not be changed while recording."), NULL,
3292 show_record_bts_buffer_size_value,
d33501a5
MM
3293 &set_record_btrace_bts_cmdlist,
3294 &show_record_btrace_bts_cmdlist);
3295
b20a6524
MM
3296 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3297 _("Set record btrace pt options"),
3298 &set_record_btrace_pt_cmdlist,
3299 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3300
3301 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3302 _("Show record btrace pt options"),
3303 &show_record_btrace_pt_cmdlist,
3304 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3305
3306 add_setshow_uinteger_cmd ("buffer-size", no_class,
3307 &record_btrace_conf.pt.size,
3308 _("Set the record/replay pt buffer size."),
3309 _("Show the record/replay pt buffer size."), _("\
3310Bigger buffers allow longer recording but also take more time to process \
3311the recorded execution.\n\
3312The actual buffer size may differ from the requested size. Use \"info record\" \
3313to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3314 &set_record_btrace_pt_cmdlist,
3315 &show_record_btrace_pt_cmdlist);
3316
afedecd3 3317 add_target (&record_btrace_ops);
0b722aec
MM
3318
3319 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3320 xcalloc, xfree);
d33501a5
MM
3321
3322 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3323 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3324}
This page took 0.964785 seconds and 4 git commands to generate.