Eliminate target_ops::to_xclose
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "event-loop.h"
40 #include "inf-loop.h"
41 #include "vec.h"
42 #include <algorithm>
43
44 /* The target_ops of record-btrace. */
45 static struct target_ops record_btrace_ops;
46
47 /* Token associated with a new-thread observer enabling branch tracing
48 for the new thread. */
49 static const gdb::observers::token record_btrace_thread_observer_token;
50
51 /* Memory access types used in set/show record btrace replay-memory-access. */
52 static const char replay_memory_access_read_only[] = "read-only";
53 static const char replay_memory_access_read_write[] = "read-write";
54 static const char *const replay_memory_access_types[] =
55 {
56 replay_memory_access_read_only,
57 replay_memory_access_read_write,
58 NULL
59 };
60
61 /* The currently allowed replay memory access type. */
62 static const char *replay_memory_access = replay_memory_access_read_only;
63
64 /* The cpu state kinds. */
65 enum record_btrace_cpu_state_kind
66 {
67 CS_AUTO,
68 CS_NONE,
69 CS_CPU
70 };
71
72 /* The current cpu state. */
73 static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
74
75 /* The current cpu for trace decode. */
76 static struct btrace_cpu record_btrace_cpu;
77
78 /* Command lists for "set/show record btrace". */
79 static struct cmd_list_element *set_record_btrace_cmdlist;
80 static struct cmd_list_element *show_record_btrace_cmdlist;
81
82 /* The execution direction of the last resume we got. See record-full.c. */
83 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
84
85 /* The async event handler for reverse/replay execution. */
86 static struct async_event_handler *record_btrace_async_inferior_event_handler;
87
88 /* A flag indicating that we are currently generating a core file. */
89 static int record_btrace_generating_corefile;
90
91 /* The current branch trace configuration. */
92 static struct btrace_config record_btrace_conf;
93
94 /* Command list for "record btrace". */
95 static struct cmd_list_element *record_btrace_cmdlist;
96
97 /* Command lists for "set/show record btrace bts". */
98 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
99 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
100
101 /* Command lists for "set/show record btrace pt". */
102 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
103 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
104
105 /* Command list for "set record btrace cpu". */
106 static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
107
108 /* Print a record-btrace debug message. Use do ... while (0) to avoid
109 ambiguities when used in if statements. */
110
111 #define DEBUG(msg, args...) \
112 do \
113 { \
114 if (record_debug != 0) \
115 fprintf_unfiltered (gdb_stdlog, \
116 "[record-btrace] " msg "\n", ##args); \
117 } \
118 while (0)
119
120
121 /* Return the cpu configured by the user. Returns NULL if the cpu was
122 configured as auto. */
123 const struct btrace_cpu *
124 record_btrace_get_cpu (void)
125 {
126 switch (record_btrace_cpu_state)
127 {
128 case CS_AUTO:
129 return nullptr;
130
131 case CS_NONE:
132 record_btrace_cpu.vendor = CV_UNKNOWN;
133 /* Fall through. */
134 case CS_CPU:
135 return &record_btrace_cpu;
136 }
137
138 error (_("Internal error: bad record btrace cpu state."));
139 }
140
141 /* Update the branch trace for the current thread and return a pointer to its
142 thread_info.
143
144 Throws an error if there is no thread or no trace. This function never
145 returns NULL. */
146
147 static struct thread_info *
148 require_btrace_thread (void)
149 {
150 struct thread_info *tp;
151
152 DEBUG ("require");
153
154 tp = find_thread_ptid (inferior_ptid);
155 if (tp == NULL)
156 error (_("No thread."));
157
158 validate_registers_access ();
159
160 btrace_fetch (tp, record_btrace_get_cpu ());
161
162 if (btrace_is_empty (tp))
163 error (_("No trace."));
164
165 return tp;
166 }
167
168 /* Update the branch trace for the current thread and return a pointer to its
169 branch trace information struct.
170
171 Throws an error if there is no thread or no trace. This function never
172 returns NULL. */
173
174 static struct btrace_thread_info *
175 require_btrace (void)
176 {
177 struct thread_info *tp;
178
179 tp = require_btrace_thread ();
180
181 return &tp->btrace;
182 }
183
184 /* Enable branch tracing for one thread. Warn on errors. */
185
186 static void
187 record_btrace_enable_warn (struct thread_info *tp)
188 {
189 TRY
190 {
191 btrace_enable (tp, &record_btrace_conf);
192 }
193 CATCH (error, RETURN_MASK_ERROR)
194 {
195 warning ("%s", error.message);
196 }
197 END_CATCH
198 }
199
200 /* Enable automatic tracing of new threads. */
201
202 static void
203 record_btrace_auto_enable (void)
204 {
205 DEBUG ("attach thread observer");
206
207 gdb::observers::new_thread.attach (record_btrace_enable_warn,
208 record_btrace_thread_observer_token);
209 }
210
211 /* Disable automatic tracing of new threads. */
212
213 static void
214 record_btrace_auto_disable (void)
215 {
216 DEBUG ("detach thread observer");
217
218 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
219 }
220
221 /* The record-btrace async event handler function. */
222
223 static void
224 record_btrace_handle_async_inferior_event (gdb_client_data data)
225 {
226 inferior_event_handler (INF_REG_EVENT, NULL);
227 }
228
229 /* See record-btrace.h. */
230
231 void
232 record_btrace_push_target (void)
233 {
234 const char *format;
235
236 record_btrace_auto_enable ();
237
238 push_target (&record_btrace_ops);
239
240 record_btrace_async_inferior_event_handler
241 = create_async_event_handler (record_btrace_handle_async_inferior_event,
242 NULL);
243 record_btrace_generating_corefile = 0;
244
245 format = btrace_format_short_string (record_btrace_conf.format);
246 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
247 }
248
249 /* Disable btrace on a set of threads on scope exit. */
250
251 struct scoped_btrace_disable
252 {
253 scoped_btrace_disable () = default;
254
255 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
256
257 ~scoped_btrace_disable ()
258 {
259 for (thread_info *tp : m_threads)
260 btrace_disable (tp);
261 }
262
263 void add_thread (thread_info *thread)
264 {
265 m_threads.push_front (thread);
266 }
267
268 void discard ()
269 {
270 m_threads.clear ();
271 }
272
273 private:
274 std::forward_list<thread_info *> m_threads;
275 };
276
277 /* The to_open method of target record-btrace. */
278
279 static void
280 record_btrace_open (const char *args, int from_tty)
281 {
282 /* If we fail to enable btrace for one thread, disable it for the threads for
283 which it was successfully enabled. */
284 scoped_btrace_disable btrace_disable;
285 struct thread_info *tp;
286
287 DEBUG ("open");
288
289 record_preopen ();
290
291 if (!target_has_execution)
292 error (_("The program is not being run."));
293
294 ALL_NON_EXITED_THREADS (tp)
295 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
296 {
297 btrace_enable (tp, &record_btrace_conf);
298
299 btrace_disable.add_thread (tp);
300 }
301
302 record_btrace_push_target ();
303
304 btrace_disable.discard ();
305 }
306
307 /* The to_stop_recording method of target record-btrace. */
308
309 static void
310 record_btrace_stop_recording (struct target_ops *self)
311 {
312 struct thread_info *tp;
313
314 DEBUG ("stop recording");
315
316 record_btrace_auto_disable ();
317
318 ALL_NON_EXITED_THREADS (tp)
319 if (tp->btrace.target != NULL)
320 btrace_disable (tp);
321 }
322
323 /* The to_disconnect method of target record-btrace. */
324
325 static void
326 record_btrace_disconnect (struct target_ops *self, const char *args,
327 int from_tty)
328 {
329 struct target_ops *beneath = self->beneath;
330
331 /* Do not stop recording, just clean up GDB side. */
332 unpush_target (self);
333
334 /* Forward disconnect. */
335 beneath->to_disconnect (beneath, args, from_tty);
336 }
337
338 /* The to_close method of target record-btrace. */
339
340 static void
341 record_btrace_close (struct target_ops *self)
342 {
343 struct thread_info *tp;
344
345 if (record_btrace_async_inferior_event_handler != NULL)
346 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
347
348 /* Make sure automatic recording gets disabled even if we did not stop
349 recording before closing the record-btrace target. */
350 record_btrace_auto_disable ();
351
352 /* We should have already stopped recording.
353 Tear down btrace in case we have not. */
354 ALL_NON_EXITED_THREADS (tp)
355 btrace_teardown (tp);
356 }
357
358 /* The to_async method of target record-btrace. */
359
360 static void
361 record_btrace_async (struct target_ops *ops, int enable)
362 {
363 if (enable)
364 mark_async_event_handler (record_btrace_async_inferior_event_handler);
365 else
366 clear_async_event_handler (record_btrace_async_inferior_event_handler);
367
368 ops->beneath->to_async (ops->beneath, enable);
369 }
370
371 /* Adjusts the size and returns a human readable size suffix. */
372
373 static const char *
374 record_btrace_adjust_size (unsigned int *size)
375 {
376 unsigned int sz;
377
378 sz = *size;
379
380 if ((sz & ((1u << 30) - 1)) == 0)
381 {
382 *size = sz >> 30;
383 return "GB";
384 }
385 else if ((sz & ((1u << 20) - 1)) == 0)
386 {
387 *size = sz >> 20;
388 return "MB";
389 }
390 else if ((sz & ((1u << 10) - 1)) == 0)
391 {
392 *size = sz >> 10;
393 return "kB";
394 }
395 else
396 return "";
397 }
398
399 /* Print a BTS configuration. */
400
401 static void
402 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
403 {
404 const char *suffix;
405 unsigned int size;
406
407 size = conf->size;
408 if (size > 0)
409 {
410 suffix = record_btrace_adjust_size (&size);
411 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
412 }
413 }
414
415 /* Print an Intel Processor Trace configuration. */
416
417 static void
418 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
419 {
420 const char *suffix;
421 unsigned int size;
422
423 size = conf->size;
424 if (size > 0)
425 {
426 suffix = record_btrace_adjust_size (&size);
427 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
428 }
429 }
430
431 /* Print a branch tracing configuration. */
432
433 static void
434 record_btrace_print_conf (const struct btrace_config *conf)
435 {
436 printf_unfiltered (_("Recording format: %s.\n"),
437 btrace_format_string (conf->format));
438
439 switch (conf->format)
440 {
441 case BTRACE_FORMAT_NONE:
442 return;
443
444 case BTRACE_FORMAT_BTS:
445 record_btrace_print_bts_conf (&conf->bts);
446 return;
447
448 case BTRACE_FORMAT_PT:
449 record_btrace_print_pt_conf (&conf->pt);
450 return;
451 }
452
453 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
454 }
455
456 /* The to_info_record method of target record-btrace. */
457
458 static void
459 record_btrace_info (struct target_ops *self)
460 {
461 struct btrace_thread_info *btinfo;
462 const struct btrace_config *conf;
463 struct thread_info *tp;
464 unsigned int insns, calls, gaps;
465
466 DEBUG ("info");
467
468 tp = find_thread_ptid (inferior_ptid);
469 if (tp == NULL)
470 error (_("No thread."));
471
472 validate_registers_access ();
473
474 btinfo = &tp->btrace;
475
476 conf = btrace_conf (btinfo);
477 if (conf != NULL)
478 record_btrace_print_conf (conf);
479
480 btrace_fetch (tp, record_btrace_get_cpu ());
481
482 insns = 0;
483 calls = 0;
484 gaps = 0;
485
486 if (!btrace_is_empty (tp))
487 {
488 struct btrace_call_iterator call;
489 struct btrace_insn_iterator insn;
490
491 btrace_call_end (&call, btinfo);
492 btrace_call_prev (&call, 1);
493 calls = btrace_call_number (&call);
494
495 btrace_insn_end (&insn, btinfo);
496 insns = btrace_insn_number (&insn);
497
498 /* If the last instruction is not a gap, it is the current instruction
499 that is not actually part of the record. */
500 if (btrace_insn_get (&insn) != NULL)
501 insns -= 1;
502
503 gaps = btinfo->ngaps;
504 }
505
506 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
507 "for thread %s (%s).\n"), insns, calls, gaps,
508 print_thread_id (tp), target_pid_to_str (tp->ptid));
509
510 if (btrace_is_replaying (tp))
511 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
512 btrace_insn_number (btinfo->replay));
513 }
514
515 /* Print a decode error. */
516
517 static void
518 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
519 enum btrace_format format)
520 {
521 const char *errstr = btrace_decode_error (format, errcode);
522
523 uiout->text (_("["));
524 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
525 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
526 {
527 uiout->text (_("decode error ("));
528 uiout->field_int ("errcode", errcode);
529 uiout->text (_("): "));
530 }
531 uiout->text (errstr);
532 uiout->text (_("]\n"));
533 }
534
535 /* Print an unsigned int. */
536
537 static void
538 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
539 {
540 uiout->field_fmt (fld, "%u", val);
541 }
542
543 /* A range of source lines. */
544
545 struct btrace_line_range
546 {
547 /* The symtab this line is from. */
548 struct symtab *symtab;
549
550 /* The first line (inclusive). */
551 int begin;
552
553 /* The last line (exclusive). */
554 int end;
555 };
556
557 /* Construct a line range. */
558
559 static struct btrace_line_range
560 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
561 {
562 struct btrace_line_range range;
563
564 range.symtab = symtab;
565 range.begin = begin;
566 range.end = end;
567
568 return range;
569 }
570
571 /* Add a line to a line range. */
572
573 static struct btrace_line_range
574 btrace_line_range_add (struct btrace_line_range range, int line)
575 {
576 if (range.end <= range.begin)
577 {
578 /* This is the first entry. */
579 range.begin = line;
580 range.end = line + 1;
581 }
582 else if (line < range.begin)
583 range.begin = line;
584 else if (range.end < line)
585 range.end = line;
586
587 return range;
588 }
589
590 /* Return non-zero if RANGE is empty, zero otherwise. */
591
592 static int
593 btrace_line_range_is_empty (struct btrace_line_range range)
594 {
595 return range.end <= range.begin;
596 }
597
598 /* Return non-zero if LHS contains RHS, zero otherwise. */
599
600 static int
601 btrace_line_range_contains_range (struct btrace_line_range lhs,
602 struct btrace_line_range rhs)
603 {
604 return ((lhs.symtab == rhs.symtab)
605 && (lhs.begin <= rhs.begin)
606 && (rhs.end <= lhs.end));
607 }
608
609 /* Find the line range associated with PC. */
610
611 static struct btrace_line_range
612 btrace_find_line_range (CORE_ADDR pc)
613 {
614 struct btrace_line_range range;
615 struct linetable_entry *lines;
616 struct linetable *ltable;
617 struct symtab *symtab;
618 int nlines, i;
619
620 symtab = find_pc_line_symtab (pc);
621 if (symtab == NULL)
622 return btrace_mk_line_range (NULL, 0, 0);
623
624 ltable = SYMTAB_LINETABLE (symtab);
625 if (ltable == NULL)
626 return btrace_mk_line_range (symtab, 0, 0);
627
628 nlines = ltable->nitems;
629 lines = ltable->item;
630 if (nlines <= 0)
631 return btrace_mk_line_range (symtab, 0, 0);
632
633 range = btrace_mk_line_range (symtab, 0, 0);
634 for (i = 0; i < nlines - 1; i++)
635 {
636 if ((lines[i].pc == pc) && (lines[i].line != 0))
637 range = btrace_line_range_add (range, lines[i].line);
638 }
639
640 return range;
641 }
642
643 /* Print source lines in LINES to UIOUT.
644
645 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
646 instructions corresponding to that source line. When printing a new source
647 line, we do the cleanups for the open chain and open a new cleanup chain for
648 the new source line. If the source line range in LINES is not empty, this
649 function will leave the cleanup chain for the last printed source line open
650 so instructions can be added to it. */
651
652 static void
653 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
654 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
655 gdb::optional<ui_out_emit_list> *asm_list,
656 gdb_disassembly_flags flags)
657 {
658 print_source_lines_flags psl_flags;
659
660 if (flags & DISASSEMBLY_FILENAME)
661 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
662
663 for (int line = lines.begin; line < lines.end; ++line)
664 {
665 asm_list->reset ();
666
667 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
668
669 print_source_lines (lines.symtab, line, line + 1, psl_flags);
670
671 asm_list->emplace (uiout, "line_asm_insn");
672 }
673 }
674
675 /* Disassemble a section of the recorded instruction trace. */
676
677 static void
678 btrace_insn_history (struct ui_out *uiout,
679 const struct btrace_thread_info *btinfo,
680 const struct btrace_insn_iterator *begin,
681 const struct btrace_insn_iterator *end,
682 gdb_disassembly_flags flags)
683 {
684 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
685 btrace_insn_number (begin), btrace_insn_number (end));
686
687 flags |= DISASSEMBLY_SPECULATIVE;
688
689 struct gdbarch *gdbarch = target_gdbarch ();
690 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
691
692 ui_out_emit_list list_emitter (uiout, "asm_insns");
693
694 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
695 gdb::optional<ui_out_emit_list> asm_list;
696
697 gdb_pretty_print_disassembler disasm (gdbarch);
698
699 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
700 btrace_insn_next (&it, 1))
701 {
702 const struct btrace_insn *insn;
703
704 insn = btrace_insn_get (&it);
705
706 /* A NULL instruction indicates a gap in the trace. */
707 if (insn == NULL)
708 {
709 const struct btrace_config *conf;
710
711 conf = btrace_conf (btinfo);
712
713 /* We have trace so we must have a configuration. */
714 gdb_assert (conf != NULL);
715
716 uiout->field_fmt ("insn-number", "%u",
717 btrace_insn_number (&it));
718 uiout->text ("\t");
719
720 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
721 conf->format);
722 }
723 else
724 {
725 struct disasm_insn dinsn;
726
727 if ((flags & DISASSEMBLY_SOURCE) != 0)
728 {
729 struct btrace_line_range lines;
730
731 lines = btrace_find_line_range (insn->pc);
732 if (!btrace_line_range_is_empty (lines)
733 && !btrace_line_range_contains_range (last_lines, lines))
734 {
735 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
736 flags);
737 last_lines = lines;
738 }
739 else if (!src_and_asm_tuple.has_value ())
740 {
741 gdb_assert (!asm_list.has_value ());
742
743 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
744
745 /* No source information. */
746 asm_list.emplace (uiout, "line_asm_insn");
747 }
748
749 gdb_assert (src_and_asm_tuple.has_value ());
750 gdb_assert (asm_list.has_value ());
751 }
752
753 memset (&dinsn, 0, sizeof (dinsn));
754 dinsn.number = btrace_insn_number (&it);
755 dinsn.addr = insn->pc;
756
757 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
758 dinsn.is_speculative = 1;
759
760 disasm.pretty_print_insn (uiout, &dinsn, flags);
761 }
762 }
763 }
764
765 /* The to_insn_history method of target record-btrace. */
766
767 static void
768 record_btrace_insn_history (struct target_ops *self, int size,
769 gdb_disassembly_flags flags)
770 {
771 struct btrace_thread_info *btinfo;
772 struct btrace_insn_history *history;
773 struct btrace_insn_iterator begin, end;
774 struct ui_out *uiout;
775 unsigned int context, covered;
776
777 uiout = current_uiout;
778 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
779 context = abs (size);
780 if (context == 0)
781 error (_("Bad record instruction-history-size."));
782
783 btinfo = require_btrace ();
784 history = btinfo->insn_history;
785 if (history == NULL)
786 {
787 struct btrace_insn_iterator *replay;
788
789 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
790
791 /* If we're replaying, we start at the replay position. Otherwise, we
792 start at the tail of the trace. */
793 replay = btinfo->replay;
794 if (replay != NULL)
795 begin = *replay;
796 else
797 btrace_insn_end (&begin, btinfo);
798
799 /* We start from here and expand in the requested direction. Then we
800 expand in the other direction, as well, to fill up any remaining
801 context. */
802 end = begin;
803 if (size < 0)
804 {
805 /* We want the current position covered, as well. */
806 covered = btrace_insn_next (&end, 1);
807 covered += btrace_insn_prev (&begin, context - covered);
808 covered += btrace_insn_next (&end, context - covered);
809 }
810 else
811 {
812 covered = btrace_insn_next (&end, context);
813 covered += btrace_insn_prev (&begin, context - covered);
814 }
815 }
816 else
817 {
818 begin = history->begin;
819 end = history->end;
820
821 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
822 btrace_insn_number (&begin), btrace_insn_number (&end));
823
824 if (size < 0)
825 {
826 end = begin;
827 covered = btrace_insn_prev (&begin, context);
828 }
829 else
830 {
831 begin = end;
832 covered = btrace_insn_next (&end, context);
833 }
834 }
835
836 if (covered > 0)
837 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
838 else
839 {
840 if (size < 0)
841 printf_unfiltered (_("At the start of the branch trace record.\n"));
842 else
843 printf_unfiltered (_("At the end of the branch trace record.\n"));
844 }
845
846 btrace_set_insn_history (btinfo, &begin, &end);
847 }
848
849 /* The to_insn_history_range method of target record-btrace. */
850
851 static void
852 record_btrace_insn_history_range (struct target_ops *self,
853 ULONGEST from, ULONGEST to,
854 gdb_disassembly_flags flags)
855 {
856 struct btrace_thread_info *btinfo;
857 struct btrace_insn_iterator begin, end;
858 struct ui_out *uiout;
859 unsigned int low, high;
860 int found;
861
862 uiout = current_uiout;
863 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
864 low = from;
865 high = to;
866
867 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
868
869 /* Check for wrap-arounds. */
870 if (low != from || high != to)
871 error (_("Bad range."));
872
873 if (high < low)
874 error (_("Bad range."));
875
876 btinfo = require_btrace ();
877
878 found = btrace_find_insn_by_number (&begin, btinfo, low);
879 if (found == 0)
880 error (_("Range out of bounds."));
881
882 found = btrace_find_insn_by_number (&end, btinfo, high);
883 if (found == 0)
884 {
885 /* Silently truncate the range. */
886 btrace_insn_end (&end, btinfo);
887 }
888 else
889 {
890 /* We want both begin and end to be inclusive. */
891 btrace_insn_next (&end, 1);
892 }
893
894 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
895 btrace_set_insn_history (btinfo, &begin, &end);
896 }
897
898 /* The to_insn_history_from method of target record-btrace. */
899
900 static void
901 record_btrace_insn_history_from (struct target_ops *self,
902 ULONGEST from, int size,
903 gdb_disassembly_flags flags)
904 {
905 ULONGEST begin, end, context;
906
907 context = abs (size);
908 if (context == 0)
909 error (_("Bad record instruction-history-size."));
910
911 if (size < 0)
912 {
913 end = from;
914
915 if (from < context)
916 begin = 0;
917 else
918 begin = from - context + 1;
919 }
920 else
921 {
922 begin = from;
923 end = from + context - 1;
924
925 /* Check for wrap-around. */
926 if (end < begin)
927 end = ULONGEST_MAX;
928 }
929
930 record_btrace_insn_history_range (self, begin, end, flags);
931 }
932
933 /* Print the instruction number range for a function call history line. */
934
935 static void
936 btrace_call_history_insn_range (struct ui_out *uiout,
937 const struct btrace_function *bfun)
938 {
939 unsigned int begin, end, size;
940
941 size = bfun->insn.size ();
942 gdb_assert (size > 0);
943
944 begin = bfun->insn_offset;
945 end = begin + size - 1;
946
947 ui_out_field_uint (uiout, "insn begin", begin);
948 uiout->text (",");
949 ui_out_field_uint (uiout, "insn end", end);
950 }
951
952 /* Compute the lowest and highest source line for the instructions in BFUN
953 and return them in PBEGIN and PEND.
954 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
955 result from inlining or macro expansion. */
956
957 static void
958 btrace_compute_src_line_range (const struct btrace_function *bfun,
959 int *pbegin, int *pend)
960 {
961 struct symtab *symtab;
962 struct symbol *sym;
963 int begin, end;
964
965 begin = INT_MAX;
966 end = INT_MIN;
967
968 sym = bfun->sym;
969 if (sym == NULL)
970 goto out;
971
972 symtab = symbol_symtab (sym);
973
974 for (const btrace_insn &insn : bfun->insn)
975 {
976 struct symtab_and_line sal;
977
978 sal = find_pc_line (insn.pc, 0);
979 if (sal.symtab != symtab || sal.line == 0)
980 continue;
981
982 begin = std::min (begin, sal.line);
983 end = std::max (end, sal.line);
984 }
985
986 out:
987 *pbegin = begin;
988 *pend = end;
989 }
990
991 /* Print the source line information for a function call history line. */
992
993 static void
994 btrace_call_history_src_line (struct ui_out *uiout,
995 const struct btrace_function *bfun)
996 {
997 struct symbol *sym;
998 int begin, end;
999
1000 sym = bfun->sym;
1001 if (sym == NULL)
1002 return;
1003
1004 uiout->field_string ("file",
1005 symtab_to_filename_for_display (symbol_symtab (sym)));
1006
1007 btrace_compute_src_line_range (bfun, &begin, &end);
1008 if (end < begin)
1009 return;
1010
1011 uiout->text (":");
1012 uiout->field_int ("min line", begin);
1013
1014 if (end == begin)
1015 return;
1016
1017 uiout->text (",");
1018 uiout->field_int ("max line", end);
1019 }
1020
1021 /* Get the name of a branch trace function. */
1022
1023 static const char *
1024 btrace_get_bfun_name (const struct btrace_function *bfun)
1025 {
1026 struct minimal_symbol *msym;
1027 struct symbol *sym;
1028
1029 if (bfun == NULL)
1030 return "??";
1031
1032 msym = bfun->msym;
1033 sym = bfun->sym;
1034
1035 if (sym != NULL)
1036 return SYMBOL_PRINT_NAME (sym);
1037 else if (msym != NULL)
1038 return MSYMBOL_PRINT_NAME (msym);
1039 else
1040 return "??";
1041 }
1042
1043 /* Disassemble a section of the recorded function trace. */
1044
1045 static void
1046 btrace_call_history (struct ui_out *uiout,
1047 const struct btrace_thread_info *btinfo,
1048 const struct btrace_call_iterator *begin,
1049 const struct btrace_call_iterator *end,
1050 int int_flags)
1051 {
1052 struct btrace_call_iterator it;
1053 record_print_flags flags = (enum record_print_flag) int_flags;
1054
1055 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1056 btrace_call_number (end));
1057
1058 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1059 {
1060 const struct btrace_function *bfun;
1061 struct minimal_symbol *msym;
1062 struct symbol *sym;
1063
1064 bfun = btrace_call_get (&it);
1065 sym = bfun->sym;
1066 msym = bfun->msym;
1067
1068 /* Print the function index. */
1069 ui_out_field_uint (uiout, "index", bfun->number);
1070 uiout->text ("\t");
1071
1072 /* Indicate gaps in the trace. */
1073 if (bfun->errcode != 0)
1074 {
1075 const struct btrace_config *conf;
1076
1077 conf = btrace_conf (btinfo);
1078
1079 /* We have trace so we must have a configuration. */
1080 gdb_assert (conf != NULL);
1081
1082 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1083
1084 continue;
1085 }
1086
1087 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1088 {
1089 int level = bfun->level + btinfo->level, i;
1090
1091 for (i = 0; i < level; ++i)
1092 uiout->text (" ");
1093 }
1094
1095 if (sym != NULL)
1096 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
1097 else if (msym != NULL)
1098 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1099 else if (!uiout->is_mi_like_p ())
1100 uiout->field_string ("function", "??");
1101
1102 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1103 {
1104 uiout->text (_("\tinst "));
1105 btrace_call_history_insn_range (uiout, bfun);
1106 }
1107
1108 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1109 {
1110 uiout->text (_("\tat "));
1111 btrace_call_history_src_line (uiout, bfun);
1112 }
1113
1114 uiout->text ("\n");
1115 }
1116 }
1117
1118 /* The to_call_history method of target record-btrace. */
1119
1120 static void
1121 record_btrace_call_history (struct target_ops *self, int size,
1122 record_print_flags flags)
1123 {
1124 struct btrace_thread_info *btinfo;
1125 struct btrace_call_history *history;
1126 struct btrace_call_iterator begin, end;
1127 struct ui_out *uiout;
1128 unsigned int context, covered;
1129
1130 uiout = current_uiout;
1131 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1132 context = abs (size);
1133 if (context == 0)
1134 error (_("Bad record function-call-history-size."));
1135
1136 btinfo = require_btrace ();
1137 history = btinfo->call_history;
1138 if (history == NULL)
1139 {
1140 struct btrace_insn_iterator *replay;
1141
1142 DEBUG ("call-history (0x%x): %d", (int) flags, size);
1143
1144 /* If we're replaying, we start at the replay position. Otherwise, we
1145 start at the tail of the trace. */
1146 replay = btinfo->replay;
1147 if (replay != NULL)
1148 {
1149 begin.btinfo = btinfo;
1150 begin.index = replay->call_index;
1151 }
1152 else
1153 btrace_call_end (&begin, btinfo);
1154
1155 /* We start from here and expand in the requested direction. Then we
1156 expand in the other direction, as well, to fill up any remaining
1157 context. */
1158 end = begin;
1159 if (size < 0)
1160 {
1161 /* We want the current position covered, as well. */
1162 covered = btrace_call_next (&end, 1);
1163 covered += btrace_call_prev (&begin, context - covered);
1164 covered += btrace_call_next (&end, context - covered);
1165 }
1166 else
1167 {
1168 covered = btrace_call_next (&end, context);
1169 covered += btrace_call_prev (&begin, context- covered);
1170 }
1171 }
1172 else
1173 {
1174 begin = history->begin;
1175 end = history->end;
1176
1177 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1178 btrace_call_number (&begin), btrace_call_number (&end));
1179
1180 if (size < 0)
1181 {
1182 end = begin;
1183 covered = btrace_call_prev (&begin, context);
1184 }
1185 else
1186 {
1187 begin = end;
1188 covered = btrace_call_next (&end, context);
1189 }
1190 }
1191
1192 if (covered > 0)
1193 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1194 else
1195 {
1196 if (size < 0)
1197 printf_unfiltered (_("At the start of the branch trace record.\n"));
1198 else
1199 printf_unfiltered (_("At the end of the branch trace record.\n"));
1200 }
1201
1202 btrace_set_call_history (btinfo, &begin, &end);
1203 }
1204
1205 /* The to_call_history_range method of target record-btrace. */
1206
1207 static void
1208 record_btrace_call_history_range (struct target_ops *self,
1209 ULONGEST from, ULONGEST to,
1210 record_print_flags flags)
1211 {
1212 struct btrace_thread_info *btinfo;
1213 struct btrace_call_iterator begin, end;
1214 struct ui_out *uiout;
1215 unsigned int low, high;
1216 int found;
1217
1218 uiout = current_uiout;
1219 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1220 low = from;
1221 high = to;
1222
1223 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1224
1225 /* Check for wrap-arounds. */
1226 if (low != from || high != to)
1227 error (_("Bad range."));
1228
1229 if (high < low)
1230 error (_("Bad range."));
1231
1232 btinfo = require_btrace ();
1233
1234 found = btrace_find_call_by_number (&begin, btinfo, low);
1235 if (found == 0)
1236 error (_("Range out of bounds."));
1237
1238 found = btrace_find_call_by_number (&end, btinfo, high);
1239 if (found == 0)
1240 {
1241 /* Silently truncate the range. */
1242 btrace_call_end (&end, btinfo);
1243 }
1244 else
1245 {
1246 /* We want both begin and end to be inclusive. */
1247 btrace_call_next (&end, 1);
1248 }
1249
1250 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1251 btrace_set_call_history (btinfo, &begin, &end);
1252 }
1253
1254 /* The to_call_history_from method of target record-btrace. */
1255
1256 static void
1257 record_btrace_call_history_from (struct target_ops *self,
1258 ULONGEST from, int size,
1259 record_print_flags flags)
1260 {
1261 ULONGEST begin, end, context;
1262
1263 context = abs (size);
1264 if (context == 0)
1265 error (_("Bad record function-call-history-size."));
1266
1267 if (size < 0)
1268 {
1269 end = from;
1270
1271 if (from < context)
1272 begin = 0;
1273 else
1274 begin = from - context + 1;
1275 }
1276 else
1277 {
1278 begin = from;
1279 end = from + context - 1;
1280
1281 /* Check for wrap-around. */
1282 if (end < begin)
1283 end = ULONGEST_MAX;
1284 }
1285
1286 record_btrace_call_history_range (self, begin, end, flags);
1287 }
1288
1289 /* The to_record_method method of target record-btrace. */
1290
1291 static enum record_method
1292 record_btrace_record_method (struct target_ops *self, ptid_t ptid)
1293 {
1294 struct thread_info * const tp = find_thread_ptid (ptid);
1295
1296 if (tp == NULL)
1297 error (_("No thread."));
1298
1299 if (tp->btrace.target == NULL)
1300 return RECORD_METHOD_NONE;
1301
1302 return RECORD_METHOD_BTRACE;
1303 }
1304
1305 /* The to_record_is_replaying method of target record-btrace. */
1306
1307 static int
1308 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1309 {
1310 struct thread_info *tp;
1311
1312 ALL_NON_EXITED_THREADS (tp)
1313 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1314 return 1;
1315
1316 return 0;
1317 }
1318
1319 /* The to_record_will_replay method of target record-btrace. */
1320
1321 static int
1322 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1323 {
1324 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1325 }
1326
1327 /* The to_xfer_partial method of target record-btrace. */
1328
1329 static enum target_xfer_status
1330 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1331 const char *annex, gdb_byte *readbuf,
1332 const gdb_byte *writebuf, ULONGEST offset,
1333 ULONGEST len, ULONGEST *xfered_len)
1334 {
1335 /* Filter out requests that don't make sense during replay. */
1336 if (replay_memory_access == replay_memory_access_read_only
1337 && !record_btrace_generating_corefile
1338 && record_btrace_is_replaying (ops, inferior_ptid))
1339 {
1340 switch (object)
1341 {
1342 case TARGET_OBJECT_MEMORY:
1343 {
1344 struct target_section *section;
1345
1346 /* We do not allow writing memory in general. */
1347 if (writebuf != NULL)
1348 {
1349 *xfered_len = len;
1350 return TARGET_XFER_UNAVAILABLE;
1351 }
1352
1353 /* We allow reading readonly memory. */
1354 section = target_section_by_addr (ops, offset);
1355 if (section != NULL)
1356 {
1357 /* Check if the section we found is readonly. */
1358 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1359 section->the_bfd_section)
1360 & SEC_READONLY) != 0)
1361 {
1362 /* Truncate the request to fit into this section. */
1363 len = std::min (len, section->endaddr - offset);
1364 break;
1365 }
1366 }
1367
1368 *xfered_len = len;
1369 return TARGET_XFER_UNAVAILABLE;
1370 }
1371 }
1372 }
1373
1374 /* Forward the request. */
1375 ops = ops->beneath;
1376 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1377 offset, len, xfered_len);
1378 }
1379
1380 /* The to_insert_breakpoint method of target record-btrace. */
1381
1382 static int
1383 record_btrace_insert_breakpoint (struct target_ops *ops,
1384 struct gdbarch *gdbarch,
1385 struct bp_target_info *bp_tgt)
1386 {
1387 const char *old;
1388 int ret;
1389
1390 /* Inserting breakpoints requires accessing memory. Allow it for the
1391 duration of this function. */
1392 old = replay_memory_access;
1393 replay_memory_access = replay_memory_access_read_write;
1394
1395 ret = 0;
1396 TRY
1397 {
1398 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1399 }
1400 CATCH (except, RETURN_MASK_ALL)
1401 {
1402 replay_memory_access = old;
1403 throw_exception (except);
1404 }
1405 END_CATCH
1406 replay_memory_access = old;
1407
1408 return ret;
1409 }
1410
1411 /* The to_remove_breakpoint method of target record-btrace. */
1412
1413 static int
1414 record_btrace_remove_breakpoint (struct target_ops *ops,
1415 struct gdbarch *gdbarch,
1416 struct bp_target_info *bp_tgt,
1417 enum remove_bp_reason reason)
1418 {
1419 const char *old;
1420 int ret;
1421
1422 /* Removing breakpoints requires accessing memory. Allow it for the
1423 duration of this function. */
1424 old = replay_memory_access;
1425 replay_memory_access = replay_memory_access_read_write;
1426
1427 ret = 0;
1428 TRY
1429 {
1430 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1431 reason);
1432 }
1433 CATCH (except, RETURN_MASK_ALL)
1434 {
1435 replay_memory_access = old;
1436 throw_exception (except);
1437 }
1438 END_CATCH
1439 replay_memory_access = old;
1440
1441 return ret;
1442 }
1443
1444 /* The to_fetch_registers method of target record-btrace. */
1445
1446 static void
1447 record_btrace_fetch_registers (struct target_ops *ops,
1448 struct regcache *regcache, int regno)
1449 {
1450 struct btrace_insn_iterator *replay;
1451 struct thread_info *tp;
1452
1453 tp = find_thread_ptid (regcache_get_ptid (regcache));
1454 gdb_assert (tp != NULL);
1455
1456 replay = tp->btrace.replay;
1457 if (replay != NULL && !record_btrace_generating_corefile)
1458 {
1459 const struct btrace_insn *insn;
1460 struct gdbarch *gdbarch;
1461 int pcreg;
1462
1463 gdbarch = regcache->arch ();
1464 pcreg = gdbarch_pc_regnum (gdbarch);
1465 if (pcreg < 0)
1466 return;
1467
1468 /* We can only provide the PC register. */
1469 if (regno >= 0 && regno != pcreg)
1470 return;
1471
1472 insn = btrace_insn_get (replay);
1473 gdb_assert (insn != NULL);
1474
1475 regcache_raw_supply (regcache, regno, &insn->pc);
1476 }
1477 else
1478 {
1479 struct target_ops *t = ops->beneath;
1480
1481 t->to_fetch_registers (t, regcache, regno);
1482 }
1483 }
1484
1485 /* The to_store_registers method of target record-btrace. */
1486
1487 static void
1488 record_btrace_store_registers (struct target_ops *ops,
1489 struct regcache *regcache, int regno)
1490 {
1491 struct target_ops *t;
1492
1493 if (!record_btrace_generating_corefile
1494 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1495 error (_("Cannot write registers while replaying."));
1496
1497 gdb_assert (may_write_registers != 0);
1498
1499 t = ops->beneath;
1500 t->to_store_registers (t, regcache, regno);
1501 }
1502
1503 /* The to_prepare_to_store method of target record-btrace. */
1504
1505 static void
1506 record_btrace_prepare_to_store (struct target_ops *ops,
1507 struct regcache *regcache)
1508 {
1509 struct target_ops *t;
1510
1511 if (!record_btrace_generating_corefile
1512 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1513 return;
1514
1515 t = ops->beneath;
1516 t->to_prepare_to_store (t, regcache);
1517 }
1518
1519 /* The branch trace frame cache. */
1520
1521 struct btrace_frame_cache
1522 {
1523 /* The thread. */
1524 struct thread_info *tp;
1525
1526 /* The frame info. */
1527 struct frame_info *frame;
1528
1529 /* The branch trace function segment. */
1530 const struct btrace_function *bfun;
1531 };
1532
1533 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1534
1535 static htab_t bfcache;
1536
1537 /* hash_f for htab_create_alloc of bfcache. */
1538
1539 static hashval_t
1540 bfcache_hash (const void *arg)
1541 {
1542 const struct btrace_frame_cache *cache
1543 = (const struct btrace_frame_cache *) arg;
1544
1545 return htab_hash_pointer (cache->frame);
1546 }
1547
1548 /* eq_f for htab_create_alloc of bfcache. */
1549
1550 static int
1551 bfcache_eq (const void *arg1, const void *arg2)
1552 {
1553 const struct btrace_frame_cache *cache1
1554 = (const struct btrace_frame_cache *) arg1;
1555 const struct btrace_frame_cache *cache2
1556 = (const struct btrace_frame_cache *) arg2;
1557
1558 return cache1->frame == cache2->frame;
1559 }
1560
1561 /* Create a new btrace frame cache. */
1562
1563 static struct btrace_frame_cache *
1564 bfcache_new (struct frame_info *frame)
1565 {
1566 struct btrace_frame_cache *cache;
1567 void **slot;
1568
1569 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1570 cache->frame = frame;
1571
1572 slot = htab_find_slot (bfcache, cache, INSERT);
1573 gdb_assert (*slot == NULL);
1574 *slot = cache;
1575
1576 return cache;
1577 }
1578
1579 /* Extract the branch trace function from a branch trace frame. */
1580
1581 static const struct btrace_function *
1582 btrace_get_frame_function (struct frame_info *frame)
1583 {
1584 const struct btrace_frame_cache *cache;
1585 struct btrace_frame_cache pattern;
1586 void **slot;
1587
1588 pattern.frame = frame;
1589
1590 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1591 if (slot == NULL)
1592 return NULL;
1593
1594 cache = (const struct btrace_frame_cache *) *slot;
1595 return cache->bfun;
1596 }
1597
1598 /* Implement stop_reason method for record_btrace_frame_unwind. */
1599
1600 static enum unwind_stop_reason
1601 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1602 void **this_cache)
1603 {
1604 const struct btrace_frame_cache *cache;
1605 const struct btrace_function *bfun;
1606
1607 cache = (const struct btrace_frame_cache *) *this_cache;
1608 bfun = cache->bfun;
1609 gdb_assert (bfun != NULL);
1610
1611 if (bfun->up == 0)
1612 return UNWIND_UNAVAILABLE;
1613
1614 return UNWIND_NO_REASON;
1615 }
1616
1617 /* Implement this_id method for record_btrace_frame_unwind. */
1618
1619 static void
1620 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1621 struct frame_id *this_id)
1622 {
1623 const struct btrace_frame_cache *cache;
1624 const struct btrace_function *bfun;
1625 struct btrace_call_iterator it;
1626 CORE_ADDR code, special;
1627
1628 cache = (const struct btrace_frame_cache *) *this_cache;
1629
1630 bfun = cache->bfun;
1631 gdb_assert (bfun != NULL);
1632
1633 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1634 bfun = btrace_call_get (&it);
1635
1636 code = get_frame_func (this_frame);
1637 special = bfun->number;
1638
1639 *this_id = frame_id_build_unavailable_stack_special (code, special);
1640
1641 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1642 btrace_get_bfun_name (cache->bfun),
1643 core_addr_to_string_nz (this_id->code_addr),
1644 core_addr_to_string_nz (this_id->special_addr));
1645 }
1646
1647 /* Implement prev_register method for record_btrace_frame_unwind. */
1648
1649 static struct value *
1650 record_btrace_frame_prev_register (struct frame_info *this_frame,
1651 void **this_cache,
1652 int regnum)
1653 {
1654 const struct btrace_frame_cache *cache;
1655 const struct btrace_function *bfun, *caller;
1656 struct btrace_call_iterator it;
1657 struct gdbarch *gdbarch;
1658 CORE_ADDR pc;
1659 int pcreg;
1660
1661 gdbarch = get_frame_arch (this_frame);
1662 pcreg = gdbarch_pc_regnum (gdbarch);
1663 if (pcreg < 0 || regnum != pcreg)
1664 throw_error (NOT_AVAILABLE_ERROR,
1665 _("Registers are not available in btrace record history"));
1666
1667 cache = (const struct btrace_frame_cache *) *this_cache;
1668 bfun = cache->bfun;
1669 gdb_assert (bfun != NULL);
1670
1671 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1672 throw_error (NOT_AVAILABLE_ERROR,
1673 _("No caller in btrace record history"));
1674
1675 caller = btrace_call_get (&it);
1676
1677 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1678 pc = caller->insn.front ().pc;
1679 else
1680 {
1681 pc = caller->insn.back ().pc;
1682 pc += gdb_insn_length (gdbarch, pc);
1683 }
1684
1685 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1686 btrace_get_bfun_name (bfun), bfun->level,
1687 core_addr_to_string_nz (pc));
1688
1689 return frame_unwind_got_address (this_frame, regnum, pc);
1690 }
1691
1692 /* Implement sniffer method for record_btrace_frame_unwind. */
1693
1694 static int
1695 record_btrace_frame_sniffer (const struct frame_unwind *self,
1696 struct frame_info *this_frame,
1697 void **this_cache)
1698 {
1699 const struct btrace_function *bfun;
1700 struct btrace_frame_cache *cache;
1701 struct thread_info *tp;
1702 struct frame_info *next;
1703
1704 /* THIS_FRAME does not contain a reference to its thread. */
1705 tp = find_thread_ptid (inferior_ptid);
1706 gdb_assert (tp != NULL);
1707
1708 bfun = NULL;
1709 next = get_next_frame (this_frame);
1710 if (next == NULL)
1711 {
1712 const struct btrace_insn_iterator *replay;
1713
1714 replay = tp->btrace.replay;
1715 if (replay != NULL)
1716 bfun = &replay->btinfo->functions[replay->call_index];
1717 }
1718 else
1719 {
1720 const struct btrace_function *callee;
1721 struct btrace_call_iterator it;
1722
1723 callee = btrace_get_frame_function (next);
1724 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1725 return 0;
1726
1727 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1728 return 0;
1729
1730 bfun = btrace_call_get (&it);
1731 }
1732
1733 if (bfun == NULL)
1734 return 0;
1735
1736 DEBUG ("[frame] sniffed frame for %s on level %d",
1737 btrace_get_bfun_name (bfun), bfun->level);
1738
1739 /* This is our frame. Initialize the frame cache. */
1740 cache = bfcache_new (this_frame);
1741 cache->tp = tp;
1742 cache->bfun = bfun;
1743
1744 *this_cache = cache;
1745 return 1;
1746 }
1747
1748 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1749
1750 static int
1751 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1752 struct frame_info *this_frame,
1753 void **this_cache)
1754 {
1755 const struct btrace_function *bfun, *callee;
1756 struct btrace_frame_cache *cache;
1757 struct btrace_call_iterator it;
1758 struct frame_info *next;
1759 struct thread_info *tinfo;
1760
1761 next = get_next_frame (this_frame);
1762 if (next == NULL)
1763 return 0;
1764
1765 callee = btrace_get_frame_function (next);
1766 if (callee == NULL)
1767 return 0;
1768
1769 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1770 return 0;
1771
1772 tinfo = find_thread_ptid (inferior_ptid);
1773 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1774 return 0;
1775
1776 bfun = btrace_call_get (&it);
1777
1778 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1779 btrace_get_bfun_name (bfun), bfun->level);
1780
1781 /* This is our frame. Initialize the frame cache. */
1782 cache = bfcache_new (this_frame);
1783 cache->tp = tinfo;
1784 cache->bfun = bfun;
1785
1786 *this_cache = cache;
1787 return 1;
1788 }
1789
1790 static void
1791 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1792 {
1793 struct btrace_frame_cache *cache;
1794 void **slot;
1795
1796 cache = (struct btrace_frame_cache *) this_cache;
1797
1798 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1799 gdb_assert (slot != NULL);
1800
1801 htab_remove_elt (bfcache, cache);
1802 }
1803
1804 /* btrace recording does not store previous memory content, neither the stack
1805 frames content. Any unwinding would return errorneous results as the stack
1806 contents no longer matches the changed PC value restored from history.
1807 Therefore this unwinder reports any possibly unwound registers as
1808 <unavailable>. */
1809
1810 const struct frame_unwind record_btrace_frame_unwind =
1811 {
1812 NORMAL_FRAME,
1813 record_btrace_frame_unwind_stop_reason,
1814 record_btrace_frame_this_id,
1815 record_btrace_frame_prev_register,
1816 NULL,
1817 record_btrace_frame_sniffer,
1818 record_btrace_frame_dealloc_cache
1819 };
1820
1821 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1822 {
1823 TAILCALL_FRAME,
1824 record_btrace_frame_unwind_stop_reason,
1825 record_btrace_frame_this_id,
1826 record_btrace_frame_prev_register,
1827 NULL,
1828 record_btrace_tailcall_frame_sniffer,
1829 record_btrace_frame_dealloc_cache
1830 };
1831
1832 /* Implement the to_get_unwinder method. */
1833
1834 static const struct frame_unwind *
1835 record_btrace_to_get_unwinder (struct target_ops *self)
1836 {
1837 return &record_btrace_frame_unwind;
1838 }
1839
1840 /* Implement the to_get_tailcall_unwinder method. */
1841
1842 static const struct frame_unwind *
1843 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1844 {
1845 return &record_btrace_tailcall_frame_unwind;
1846 }
1847
1848 /* Return a human-readable string for FLAG. */
1849
1850 static const char *
1851 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1852 {
1853 switch (flag)
1854 {
1855 case BTHR_STEP:
1856 return "step";
1857
1858 case BTHR_RSTEP:
1859 return "reverse-step";
1860
1861 case BTHR_CONT:
1862 return "cont";
1863
1864 case BTHR_RCONT:
1865 return "reverse-cont";
1866
1867 case BTHR_STOP:
1868 return "stop";
1869 }
1870
1871 return "<invalid>";
1872 }
1873
1874 /* Indicate that TP should be resumed according to FLAG. */
1875
1876 static void
1877 record_btrace_resume_thread (struct thread_info *tp,
1878 enum btrace_thread_flag flag)
1879 {
1880 struct btrace_thread_info *btinfo;
1881
1882 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1883 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1884
1885 btinfo = &tp->btrace;
1886
1887 /* Fetch the latest branch trace. */
1888 btrace_fetch (tp, record_btrace_get_cpu ());
1889
1890 /* A resume request overwrites a preceding resume or stop request. */
1891 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1892 btinfo->flags |= flag;
1893 }
1894
1895 /* Get the current frame for TP. */
1896
1897 static struct frame_info *
1898 get_thread_current_frame (struct thread_info *tp)
1899 {
1900 struct frame_info *frame;
1901 ptid_t old_inferior_ptid;
1902 int executing;
1903
1904 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1905 old_inferior_ptid = inferior_ptid;
1906 inferior_ptid = tp->ptid;
1907
1908 /* Clear the executing flag to allow changes to the current frame.
1909 We are not actually running, yet. We just started a reverse execution
1910 command or a record goto command.
1911 For the latter, EXECUTING is false and this has no effect.
1912 For the former, EXECUTING is true and we're in to_wait, about to
1913 move the thread. Since we need to recompute the stack, we temporarily
1914 set EXECUTING to flase. */
1915 executing = is_executing (inferior_ptid);
1916 set_executing (inferior_ptid, 0);
1917
1918 frame = NULL;
1919 TRY
1920 {
1921 frame = get_current_frame ();
1922 }
1923 CATCH (except, RETURN_MASK_ALL)
1924 {
1925 /* Restore the previous execution state. */
1926 set_executing (inferior_ptid, executing);
1927
1928 /* Restore the previous inferior_ptid. */
1929 inferior_ptid = old_inferior_ptid;
1930
1931 throw_exception (except);
1932 }
1933 END_CATCH
1934
1935 /* Restore the previous execution state. */
1936 set_executing (inferior_ptid, executing);
1937
1938 /* Restore the previous inferior_ptid. */
1939 inferior_ptid = old_inferior_ptid;
1940
1941 return frame;
1942 }
1943
1944 /* Start replaying a thread. */
1945
1946 static struct btrace_insn_iterator *
1947 record_btrace_start_replaying (struct thread_info *tp)
1948 {
1949 struct btrace_insn_iterator *replay;
1950 struct btrace_thread_info *btinfo;
1951
1952 btinfo = &tp->btrace;
1953 replay = NULL;
1954
1955 /* We can't start replaying without trace. */
1956 if (btinfo->functions.empty ())
1957 return NULL;
1958
1959 /* GDB stores the current frame_id when stepping in order to detects steps
1960 into subroutines.
1961 Since frames are computed differently when we're replaying, we need to
1962 recompute those stored frames and fix them up so we can still detect
1963 subroutines after we started replaying. */
1964 TRY
1965 {
1966 struct frame_info *frame;
1967 struct frame_id frame_id;
1968 int upd_step_frame_id, upd_step_stack_frame_id;
1969
1970 /* The current frame without replaying - computed via normal unwind. */
1971 frame = get_thread_current_frame (tp);
1972 frame_id = get_frame_id (frame);
1973
1974 /* Check if we need to update any stepping-related frame id's. */
1975 upd_step_frame_id = frame_id_eq (frame_id,
1976 tp->control.step_frame_id);
1977 upd_step_stack_frame_id = frame_id_eq (frame_id,
1978 tp->control.step_stack_frame_id);
1979
1980 /* We start replaying at the end of the branch trace. This corresponds
1981 to the current instruction. */
1982 replay = XNEW (struct btrace_insn_iterator);
1983 btrace_insn_end (replay, btinfo);
1984
1985 /* Skip gaps at the end of the trace. */
1986 while (btrace_insn_get (replay) == NULL)
1987 {
1988 unsigned int steps;
1989
1990 steps = btrace_insn_prev (replay, 1);
1991 if (steps == 0)
1992 error (_("No trace."));
1993 }
1994
1995 /* We're not replaying, yet. */
1996 gdb_assert (btinfo->replay == NULL);
1997 btinfo->replay = replay;
1998
1999 /* Make sure we're not using any stale registers. */
2000 registers_changed_ptid (tp->ptid);
2001
2002 /* The current frame with replaying - computed via btrace unwind. */
2003 frame = get_thread_current_frame (tp);
2004 frame_id = get_frame_id (frame);
2005
2006 /* Replace stepping related frames where necessary. */
2007 if (upd_step_frame_id)
2008 tp->control.step_frame_id = frame_id;
2009 if (upd_step_stack_frame_id)
2010 tp->control.step_stack_frame_id = frame_id;
2011 }
2012 CATCH (except, RETURN_MASK_ALL)
2013 {
2014 xfree (btinfo->replay);
2015 btinfo->replay = NULL;
2016
2017 registers_changed_ptid (tp->ptid);
2018
2019 throw_exception (except);
2020 }
2021 END_CATCH
2022
2023 return replay;
2024 }
2025
2026 /* Stop replaying a thread. */
2027
2028 static void
2029 record_btrace_stop_replaying (struct thread_info *tp)
2030 {
2031 struct btrace_thread_info *btinfo;
2032
2033 btinfo = &tp->btrace;
2034
2035 xfree (btinfo->replay);
2036 btinfo->replay = NULL;
2037
2038 /* Make sure we're not leaving any stale registers. */
2039 registers_changed_ptid (tp->ptid);
2040 }
2041
2042 /* Stop replaying TP if it is at the end of its execution history. */
2043
2044 static void
2045 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2046 {
2047 struct btrace_insn_iterator *replay, end;
2048 struct btrace_thread_info *btinfo;
2049
2050 btinfo = &tp->btrace;
2051 replay = btinfo->replay;
2052
2053 if (replay == NULL)
2054 return;
2055
2056 btrace_insn_end (&end, btinfo);
2057
2058 if (btrace_insn_cmp (replay, &end) == 0)
2059 record_btrace_stop_replaying (tp);
2060 }
2061
2062 /* The to_resume method of target record-btrace. */
2063
2064 static void
2065 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2066 enum gdb_signal signal)
2067 {
2068 struct thread_info *tp;
2069 enum btrace_thread_flag flag, cflag;
2070
2071 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2072 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2073 step ? "step" : "cont");
2074
2075 /* Store the execution direction of the last resume.
2076
2077 If there is more than one to_resume call, we have to rely on infrun
2078 to not change the execution direction in-between. */
2079 record_btrace_resume_exec_dir = execution_direction;
2080
2081 /* As long as we're not replaying, just forward the request.
2082
2083 For non-stop targets this means that no thread is replaying. In order to
2084 make progress, we may need to explicitly move replaying threads to the end
2085 of their execution history. */
2086 if ((execution_direction != EXEC_REVERSE)
2087 && !record_btrace_is_replaying (ops, minus_one_ptid))
2088 {
2089 ops = ops->beneath;
2090 ops->to_resume (ops, ptid, step, signal);
2091 return;
2092 }
2093
2094 /* Compute the btrace thread flag for the requested move. */
2095 if (execution_direction == EXEC_REVERSE)
2096 {
2097 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2098 cflag = BTHR_RCONT;
2099 }
2100 else
2101 {
2102 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2103 cflag = BTHR_CONT;
2104 }
2105
2106 /* We just indicate the resume intent here. The actual stepping happens in
2107 record_btrace_wait below.
2108
2109 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2110 if (!target_is_non_stop_p ())
2111 {
2112 gdb_assert (ptid_match (inferior_ptid, ptid));
2113
2114 ALL_NON_EXITED_THREADS (tp)
2115 if (ptid_match (tp->ptid, ptid))
2116 {
2117 if (ptid_match (tp->ptid, inferior_ptid))
2118 record_btrace_resume_thread (tp, flag);
2119 else
2120 record_btrace_resume_thread (tp, cflag);
2121 }
2122 }
2123 else
2124 {
2125 ALL_NON_EXITED_THREADS (tp)
2126 if (ptid_match (tp->ptid, ptid))
2127 record_btrace_resume_thread (tp, flag);
2128 }
2129
2130 /* Async support. */
2131 if (target_can_async_p ())
2132 {
2133 target_async (1);
2134 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2135 }
2136 }
2137
2138 /* The to_commit_resume method of target record-btrace. */
2139
2140 static void
2141 record_btrace_commit_resume (struct target_ops *ops)
2142 {
2143 if ((execution_direction != EXEC_REVERSE)
2144 && !record_btrace_is_replaying (ops, minus_one_ptid))
2145 ops->beneath->to_commit_resume (ops->beneath);
2146 }
2147
2148 /* Cancel resuming TP. */
2149
2150 static void
2151 record_btrace_cancel_resume (struct thread_info *tp)
2152 {
2153 enum btrace_thread_flag flags;
2154
2155 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2156 if (flags == 0)
2157 return;
2158
2159 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2160 print_thread_id (tp),
2161 target_pid_to_str (tp->ptid), flags,
2162 btrace_thread_flag_to_str (flags));
2163
2164 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2165 record_btrace_stop_replaying_at_end (tp);
2166 }
2167
2168 /* Return a target_waitstatus indicating that we ran out of history. */
2169
2170 static struct target_waitstatus
2171 btrace_step_no_history (void)
2172 {
2173 struct target_waitstatus status;
2174
2175 status.kind = TARGET_WAITKIND_NO_HISTORY;
2176
2177 return status;
2178 }
2179
2180 /* Return a target_waitstatus indicating that a step finished. */
2181
2182 static struct target_waitstatus
2183 btrace_step_stopped (void)
2184 {
2185 struct target_waitstatus status;
2186
2187 status.kind = TARGET_WAITKIND_STOPPED;
2188 status.value.sig = GDB_SIGNAL_TRAP;
2189
2190 return status;
2191 }
2192
2193 /* Return a target_waitstatus indicating that a thread was stopped as
2194 requested. */
2195
2196 static struct target_waitstatus
2197 btrace_step_stopped_on_request (void)
2198 {
2199 struct target_waitstatus status;
2200
2201 status.kind = TARGET_WAITKIND_STOPPED;
2202 status.value.sig = GDB_SIGNAL_0;
2203
2204 return status;
2205 }
2206
2207 /* Return a target_waitstatus indicating a spurious stop. */
2208
2209 static struct target_waitstatus
2210 btrace_step_spurious (void)
2211 {
2212 struct target_waitstatus status;
2213
2214 status.kind = TARGET_WAITKIND_SPURIOUS;
2215
2216 return status;
2217 }
2218
2219 /* Return a target_waitstatus indicating that the thread was not resumed. */
2220
2221 static struct target_waitstatus
2222 btrace_step_no_resumed (void)
2223 {
2224 struct target_waitstatus status;
2225
2226 status.kind = TARGET_WAITKIND_NO_RESUMED;
2227
2228 return status;
2229 }
2230
2231 /* Return a target_waitstatus indicating that we should wait again. */
2232
2233 static struct target_waitstatus
2234 btrace_step_again (void)
2235 {
2236 struct target_waitstatus status;
2237
2238 status.kind = TARGET_WAITKIND_IGNORE;
2239
2240 return status;
2241 }
2242
2243 /* Clear the record histories. */
2244
2245 static void
2246 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2247 {
2248 xfree (btinfo->insn_history);
2249 xfree (btinfo->call_history);
2250
2251 btinfo->insn_history = NULL;
2252 btinfo->call_history = NULL;
2253 }
2254
2255 /* Check whether TP's current replay position is at a breakpoint. */
2256
2257 static int
2258 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2259 {
2260 struct btrace_insn_iterator *replay;
2261 struct btrace_thread_info *btinfo;
2262 const struct btrace_insn *insn;
2263 struct inferior *inf;
2264
2265 btinfo = &tp->btrace;
2266 replay = btinfo->replay;
2267
2268 if (replay == NULL)
2269 return 0;
2270
2271 insn = btrace_insn_get (replay);
2272 if (insn == NULL)
2273 return 0;
2274
2275 inf = find_inferior_ptid (tp->ptid);
2276 if (inf == NULL)
2277 return 0;
2278
2279 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2280 &btinfo->stop_reason);
2281 }
2282
2283 /* Step one instruction in forward direction. */
2284
2285 static struct target_waitstatus
2286 record_btrace_single_step_forward (struct thread_info *tp)
2287 {
2288 struct btrace_insn_iterator *replay, end, start;
2289 struct btrace_thread_info *btinfo;
2290
2291 btinfo = &tp->btrace;
2292 replay = btinfo->replay;
2293
2294 /* We're done if we're not replaying. */
2295 if (replay == NULL)
2296 return btrace_step_no_history ();
2297
2298 /* Check if we're stepping a breakpoint. */
2299 if (record_btrace_replay_at_breakpoint (tp))
2300 return btrace_step_stopped ();
2301
2302 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2303 jump back to the instruction at which we started. */
2304 start = *replay;
2305 do
2306 {
2307 unsigned int steps;
2308
2309 /* We will bail out here if we continue stepping after reaching the end
2310 of the execution history. */
2311 steps = btrace_insn_next (replay, 1);
2312 if (steps == 0)
2313 {
2314 *replay = start;
2315 return btrace_step_no_history ();
2316 }
2317 }
2318 while (btrace_insn_get (replay) == NULL);
2319
2320 /* Determine the end of the instruction trace. */
2321 btrace_insn_end (&end, btinfo);
2322
2323 /* The execution trace contains (and ends with) the current instruction.
2324 This instruction has not been executed, yet, so the trace really ends
2325 one instruction earlier. */
2326 if (btrace_insn_cmp (replay, &end) == 0)
2327 return btrace_step_no_history ();
2328
2329 return btrace_step_spurious ();
2330 }
2331
2332 /* Step one instruction in backward direction. */
2333
2334 static struct target_waitstatus
2335 record_btrace_single_step_backward (struct thread_info *tp)
2336 {
2337 struct btrace_insn_iterator *replay, start;
2338 struct btrace_thread_info *btinfo;
2339
2340 btinfo = &tp->btrace;
2341 replay = btinfo->replay;
2342
2343 /* Start replaying if we're not already doing so. */
2344 if (replay == NULL)
2345 replay = record_btrace_start_replaying (tp);
2346
2347 /* If we can't step any further, we reached the end of the history.
2348 Skip gaps during replay. If we end up at a gap (at the beginning of
2349 the trace), jump back to the instruction at which we started. */
2350 start = *replay;
2351 do
2352 {
2353 unsigned int steps;
2354
2355 steps = btrace_insn_prev (replay, 1);
2356 if (steps == 0)
2357 {
2358 *replay = start;
2359 return btrace_step_no_history ();
2360 }
2361 }
2362 while (btrace_insn_get (replay) == NULL);
2363
2364 /* Check if we're stepping a breakpoint.
2365
2366 For reverse-stepping, this check is after the step. There is logic in
2367 infrun.c that handles reverse-stepping separately. See, for example,
2368 proceed and adjust_pc_after_break.
2369
2370 This code assumes that for reverse-stepping, PC points to the last
2371 de-executed instruction, whereas for forward-stepping PC points to the
2372 next to-be-executed instruction. */
2373 if (record_btrace_replay_at_breakpoint (tp))
2374 return btrace_step_stopped ();
2375
2376 return btrace_step_spurious ();
2377 }
2378
2379 /* Step a single thread. */
2380
2381 static struct target_waitstatus
2382 record_btrace_step_thread (struct thread_info *tp)
2383 {
2384 struct btrace_thread_info *btinfo;
2385 struct target_waitstatus status;
2386 enum btrace_thread_flag flags;
2387
2388 btinfo = &tp->btrace;
2389
2390 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2391 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2392
2393 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2394 target_pid_to_str (tp->ptid), flags,
2395 btrace_thread_flag_to_str (flags));
2396
2397 /* We can't step without an execution history. */
2398 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2399 return btrace_step_no_history ();
2400
2401 switch (flags)
2402 {
2403 default:
2404 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2405
2406 case BTHR_STOP:
2407 return btrace_step_stopped_on_request ();
2408
2409 case BTHR_STEP:
2410 status = record_btrace_single_step_forward (tp);
2411 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2412 break;
2413
2414 return btrace_step_stopped ();
2415
2416 case BTHR_RSTEP:
2417 status = record_btrace_single_step_backward (tp);
2418 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2419 break;
2420
2421 return btrace_step_stopped ();
2422
2423 case BTHR_CONT:
2424 status = record_btrace_single_step_forward (tp);
2425 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2426 break;
2427
2428 btinfo->flags |= flags;
2429 return btrace_step_again ();
2430
2431 case BTHR_RCONT:
2432 status = record_btrace_single_step_backward (tp);
2433 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2434 break;
2435
2436 btinfo->flags |= flags;
2437 return btrace_step_again ();
2438 }
2439
2440 /* We keep threads moving at the end of their execution history. The to_wait
2441 method will stop the thread for whom the event is reported. */
2442 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2443 btinfo->flags |= flags;
2444
2445 return status;
2446 }
2447
2448 /* A vector of threads. */
2449
2450 typedef struct thread_info * tp_t;
2451 DEF_VEC_P (tp_t);
2452
2453 /* Announce further events if necessary. */
2454
2455 static void
2456 record_btrace_maybe_mark_async_event
2457 (const std::vector<thread_info *> &moving,
2458 const std::vector<thread_info *> &no_history)
2459 {
2460 bool more_moving = !moving.empty ();
2461 bool more_no_history = !no_history.empty ();;
2462
2463 if (!more_moving && !more_no_history)
2464 return;
2465
2466 if (more_moving)
2467 DEBUG ("movers pending");
2468
2469 if (more_no_history)
2470 DEBUG ("no-history pending");
2471
2472 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2473 }
2474
2475 /* The to_wait method of target record-btrace. */
2476
2477 static ptid_t
2478 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2479 struct target_waitstatus *status, int options)
2480 {
2481 std::vector<thread_info *> moving;
2482 std::vector<thread_info *> no_history;
2483
2484 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2485
2486 /* As long as we're not replaying, just forward the request. */
2487 if ((execution_direction != EXEC_REVERSE)
2488 && !record_btrace_is_replaying (ops, minus_one_ptid))
2489 {
2490 ops = ops->beneath;
2491 return ops->to_wait (ops, ptid, status, options);
2492 }
2493
2494 /* Keep a work list of moving threads. */
2495 {
2496 thread_info *tp;
2497
2498 ALL_NON_EXITED_THREADS (tp)
2499 {
2500 if (ptid_match (tp->ptid, ptid)
2501 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2502 moving.push_back (tp);
2503 }
2504 }
2505
2506 if (moving.empty ())
2507 {
2508 *status = btrace_step_no_resumed ();
2509
2510 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2511 target_waitstatus_to_string (status).c_str ());
2512
2513 return null_ptid;
2514 }
2515
2516 /* Step moving threads one by one, one step each, until either one thread
2517 reports an event or we run out of threads to step.
2518
2519 When stepping more than one thread, chances are that some threads reach
2520 the end of their execution history earlier than others. If we reported
2521 this immediately, all-stop on top of non-stop would stop all threads and
2522 resume the same threads next time. And we would report the same thread
2523 having reached the end of its execution history again.
2524
2525 In the worst case, this would starve the other threads. But even if other
2526 threads would be allowed to make progress, this would result in far too
2527 many intermediate stops.
2528
2529 We therefore delay the reporting of "no execution history" until we have
2530 nothing else to report. By this time, all threads should have moved to
2531 either the beginning or the end of their execution history. There will
2532 be a single user-visible stop. */
2533 struct thread_info *eventing = NULL;
2534 while ((eventing == NULL) && !moving.empty ())
2535 {
2536 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
2537 {
2538 thread_info *tp = moving[ix];
2539
2540 *status = record_btrace_step_thread (tp);
2541
2542 switch (status->kind)
2543 {
2544 case TARGET_WAITKIND_IGNORE:
2545 ix++;
2546 break;
2547
2548 case TARGET_WAITKIND_NO_HISTORY:
2549 no_history.push_back (ordered_remove (moving, ix));
2550 break;
2551
2552 default:
2553 eventing = unordered_remove (moving, ix);
2554 break;
2555 }
2556 }
2557 }
2558
2559 if (eventing == NULL)
2560 {
2561 /* We started with at least one moving thread. This thread must have
2562 either stopped or reached the end of its execution history.
2563
2564 In the former case, EVENTING must not be NULL.
2565 In the latter case, NO_HISTORY must not be empty. */
2566 gdb_assert (!no_history.empty ());
2567
2568 /* We kept threads moving at the end of their execution history. Stop
2569 EVENTING now that we are going to report its stop. */
2570 eventing = unordered_remove (no_history, 0);
2571 eventing->btrace.flags &= ~BTHR_MOVE;
2572
2573 *status = btrace_step_no_history ();
2574 }
2575
2576 gdb_assert (eventing != NULL);
2577
2578 /* We kept threads replaying at the end of their execution history. Stop
2579 replaying EVENTING now that we are going to report its stop. */
2580 record_btrace_stop_replaying_at_end (eventing);
2581
2582 /* Stop all other threads. */
2583 if (!target_is_non_stop_p ())
2584 {
2585 thread_info *tp;
2586
2587 ALL_NON_EXITED_THREADS (tp)
2588 record_btrace_cancel_resume (tp);
2589 }
2590
2591 /* In async mode, we need to announce further events. */
2592 if (target_is_async_p ())
2593 record_btrace_maybe_mark_async_event (moving, no_history);
2594
2595 /* Start record histories anew from the current position. */
2596 record_btrace_clear_histories (&eventing->btrace);
2597
2598 /* We moved the replay position but did not update registers. */
2599 registers_changed_ptid (eventing->ptid);
2600
2601 DEBUG ("wait ended by thread %s (%s): %s",
2602 print_thread_id (eventing),
2603 target_pid_to_str (eventing->ptid),
2604 target_waitstatus_to_string (status).c_str ());
2605
2606 return eventing->ptid;
2607 }
2608
2609 /* The to_stop method of target record-btrace. */
2610
2611 static void
2612 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2613 {
2614 DEBUG ("stop %s", target_pid_to_str (ptid));
2615
2616 /* As long as we're not replaying, just forward the request. */
2617 if ((execution_direction != EXEC_REVERSE)
2618 && !record_btrace_is_replaying (ops, minus_one_ptid))
2619 {
2620 ops = ops->beneath;
2621 ops->to_stop (ops, ptid);
2622 }
2623 else
2624 {
2625 struct thread_info *tp;
2626
2627 ALL_NON_EXITED_THREADS (tp)
2628 if (ptid_match (tp->ptid, ptid))
2629 {
2630 tp->btrace.flags &= ~BTHR_MOVE;
2631 tp->btrace.flags |= BTHR_STOP;
2632 }
2633 }
2634 }
2635
2636 /* The to_can_execute_reverse method of target record-btrace. */
2637
2638 static int
2639 record_btrace_can_execute_reverse (struct target_ops *self)
2640 {
2641 return 1;
2642 }
2643
2644 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2645
2646 static int
2647 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2648 {
2649 if (record_btrace_is_replaying (ops, minus_one_ptid))
2650 {
2651 struct thread_info *tp = inferior_thread ();
2652
2653 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2654 }
2655
2656 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2657 }
2658
2659 /* The to_supports_stopped_by_sw_breakpoint method of target
2660 record-btrace. */
2661
2662 static int
2663 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2664 {
2665 if (record_btrace_is_replaying (ops, minus_one_ptid))
2666 return 1;
2667
2668 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2669 }
2670
2671 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2672
2673 static int
2674 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2675 {
2676 if (record_btrace_is_replaying (ops, minus_one_ptid))
2677 {
2678 struct thread_info *tp = inferior_thread ();
2679
2680 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2681 }
2682
2683 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2684 }
2685
2686 /* The to_supports_stopped_by_hw_breakpoint method of target
2687 record-btrace. */
2688
2689 static int
2690 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2691 {
2692 if (record_btrace_is_replaying (ops, minus_one_ptid))
2693 return 1;
2694
2695 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2696 }
2697
2698 /* The to_update_thread_list method of target record-btrace. */
2699
2700 static void
2701 record_btrace_update_thread_list (struct target_ops *ops)
2702 {
2703 /* We don't add or remove threads during replay. */
2704 if (record_btrace_is_replaying (ops, minus_one_ptid))
2705 return;
2706
2707 /* Forward the request. */
2708 ops = ops->beneath;
2709 ops->to_update_thread_list (ops);
2710 }
2711
2712 /* The to_thread_alive method of target record-btrace. */
2713
2714 static int
2715 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2716 {
2717 /* We don't add or remove threads during replay. */
2718 if (record_btrace_is_replaying (ops, minus_one_ptid))
2719 return find_thread_ptid (ptid) != NULL;
2720
2721 /* Forward the request. */
2722 ops = ops->beneath;
2723 return ops->to_thread_alive (ops, ptid);
2724 }
2725
2726 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2727 is stopped. */
2728
2729 static void
2730 record_btrace_set_replay (struct thread_info *tp,
2731 const struct btrace_insn_iterator *it)
2732 {
2733 struct btrace_thread_info *btinfo;
2734
2735 btinfo = &tp->btrace;
2736
2737 if (it == NULL)
2738 record_btrace_stop_replaying (tp);
2739 else
2740 {
2741 if (btinfo->replay == NULL)
2742 record_btrace_start_replaying (tp);
2743 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2744 return;
2745
2746 *btinfo->replay = *it;
2747 registers_changed_ptid (tp->ptid);
2748 }
2749
2750 /* Start anew from the new replay position. */
2751 record_btrace_clear_histories (btinfo);
2752
2753 stop_pc = regcache_read_pc (get_current_regcache ());
2754 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2755 }
2756
2757 /* The to_goto_record_begin method of target record-btrace. */
2758
2759 static void
2760 record_btrace_goto_begin (struct target_ops *self)
2761 {
2762 struct thread_info *tp;
2763 struct btrace_insn_iterator begin;
2764
2765 tp = require_btrace_thread ();
2766
2767 btrace_insn_begin (&begin, &tp->btrace);
2768
2769 /* Skip gaps at the beginning of the trace. */
2770 while (btrace_insn_get (&begin) == NULL)
2771 {
2772 unsigned int steps;
2773
2774 steps = btrace_insn_next (&begin, 1);
2775 if (steps == 0)
2776 error (_("No trace."));
2777 }
2778
2779 record_btrace_set_replay (tp, &begin);
2780 }
2781
2782 /* The to_goto_record_end method of target record-btrace. */
2783
2784 static void
2785 record_btrace_goto_end (struct target_ops *ops)
2786 {
2787 struct thread_info *tp;
2788
2789 tp = require_btrace_thread ();
2790
2791 record_btrace_set_replay (tp, NULL);
2792 }
2793
2794 /* The to_goto_record method of target record-btrace. */
2795
2796 static void
2797 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2798 {
2799 struct thread_info *tp;
2800 struct btrace_insn_iterator it;
2801 unsigned int number;
2802 int found;
2803
2804 number = insn;
2805
2806 /* Check for wrap-arounds. */
2807 if (number != insn)
2808 error (_("Instruction number out of range."));
2809
2810 tp = require_btrace_thread ();
2811
2812 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2813
2814 /* Check if the instruction could not be found or is a gap. */
2815 if (found == 0 || btrace_insn_get (&it) == NULL)
2816 error (_("No such instruction."));
2817
2818 record_btrace_set_replay (tp, &it);
2819 }
2820
2821 /* The to_record_stop_replaying method of target record-btrace. */
2822
2823 static void
2824 record_btrace_stop_replaying_all (struct target_ops *self)
2825 {
2826 struct thread_info *tp;
2827
2828 ALL_NON_EXITED_THREADS (tp)
2829 record_btrace_stop_replaying (tp);
2830 }
2831
2832 /* The to_execution_direction target method. */
2833
2834 static enum exec_direction_kind
2835 record_btrace_execution_direction (struct target_ops *self)
2836 {
2837 return record_btrace_resume_exec_dir;
2838 }
2839
2840 /* The to_prepare_to_generate_core target method. */
2841
2842 static void
2843 record_btrace_prepare_to_generate_core (struct target_ops *self)
2844 {
2845 record_btrace_generating_corefile = 1;
2846 }
2847
2848 /* The to_done_generating_core target method. */
2849
2850 static void
2851 record_btrace_done_generating_core (struct target_ops *self)
2852 {
2853 record_btrace_generating_corefile = 0;
2854 }
2855
2856 /* Initialize the record-btrace target ops. */
2857
2858 static void
2859 init_record_btrace_ops (void)
2860 {
2861 struct target_ops *ops;
2862
2863 ops = &record_btrace_ops;
2864 ops->to_shortname = "record-btrace";
2865 ops->to_longname = "Branch tracing target";
2866 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2867 ops->to_open = record_btrace_open;
2868 ops->to_close = record_btrace_close;
2869 ops->to_async = record_btrace_async;
2870 ops->to_detach = record_detach;
2871 ops->to_disconnect = record_btrace_disconnect;
2872 ops->to_mourn_inferior = record_mourn_inferior;
2873 ops->to_kill = record_kill;
2874 ops->to_stop_recording = record_btrace_stop_recording;
2875 ops->to_info_record = record_btrace_info;
2876 ops->to_insn_history = record_btrace_insn_history;
2877 ops->to_insn_history_from = record_btrace_insn_history_from;
2878 ops->to_insn_history_range = record_btrace_insn_history_range;
2879 ops->to_call_history = record_btrace_call_history;
2880 ops->to_call_history_from = record_btrace_call_history_from;
2881 ops->to_call_history_range = record_btrace_call_history_range;
2882 ops->to_record_method = record_btrace_record_method;
2883 ops->to_record_is_replaying = record_btrace_is_replaying;
2884 ops->to_record_will_replay = record_btrace_will_replay;
2885 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2886 ops->to_xfer_partial = record_btrace_xfer_partial;
2887 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2888 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2889 ops->to_fetch_registers = record_btrace_fetch_registers;
2890 ops->to_store_registers = record_btrace_store_registers;
2891 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2892 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2893 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2894 ops->to_resume = record_btrace_resume;
2895 ops->to_commit_resume = record_btrace_commit_resume;
2896 ops->to_wait = record_btrace_wait;
2897 ops->to_stop = record_btrace_stop;
2898 ops->to_update_thread_list = record_btrace_update_thread_list;
2899 ops->to_thread_alive = record_btrace_thread_alive;
2900 ops->to_goto_record_begin = record_btrace_goto_begin;
2901 ops->to_goto_record_end = record_btrace_goto_end;
2902 ops->to_goto_record = record_btrace_goto;
2903 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2904 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2905 ops->to_supports_stopped_by_sw_breakpoint
2906 = record_btrace_supports_stopped_by_sw_breakpoint;
2907 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2908 ops->to_supports_stopped_by_hw_breakpoint
2909 = record_btrace_supports_stopped_by_hw_breakpoint;
2910 ops->to_execution_direction = record_btrace_execution_direction;
2911 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2912 ops->to_done_generating_core = record_btrace_done_generating_core;
2913 ops->to_stratum = record_stratum;
2914 ops->to_magic = OPS_MAGIC;
2915 }
2916
2917 /* Start recording in BTS format. */
2918
2919 static void
2920 cmd_record_btrace_bts_start (const char *args, int from_tty)
2921 {
2922 if (args != NULL && *args != 0)
2923 error (_("Invalid argument."));
2924
2925 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2926
2927 TRY
2928 {
2929 execute_command ("target record-btrace", from_tty);
2930 }
2931 CATCH (exception, RETURN_MASK_ALL)
2932 {
2933 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2934 throw_exception (exception);
2935 }
2936 END_CATCH
2937 }
2938
2939 /* Start recording in Intel Processor Trace format. */
2940
2941 static void
2942 cmd_record_btrace_pt_start (const char *args, int from_tty)
2943 {
2944 if (args != NULL && *args != 0)
2945 error (_("Invalid argument."));
2946
2947 record_btrace_conf.format = BTRACE_FORMAT_PT;
2948
2949 TRY
2950 {
2951 execute_command ("target record-btrace", from_tty);
2952 }
2953 CATCH (exception, RETURN_MASK_ALL)
2954 {
2955 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2956 throw_exception (exception);
2957 }
2958 END_CATCH
2959 }
2960
2961 /* Alias for "target record". */
2962
2963 static void
2964 cmd_record_btrace_start (const char *args, int from_tty)
2965 {
2966 if (args != NULL && *args != 0)
2967 error (_("Invalid argument."));
2968
2969 record_btrace_conf.format = BTRACE_FORMAT_PT;
2970
2971 TRY
2972 {
2973 execute_command ("target record-btrace", from_tty);
2974 }
2975 CATCH (exception, RETURN_MASK_ALL)
2976 {
2977 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2978
2979 TRY
2980 {
2981 execute_command ("target record-btrace", from_tty);
2982 }
2983 CATCH (exception, RETURN_MASK_ALL)
2984 {
2985 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2986 throw_exception (exception);
2987 }
2988 END_CATCH
2989 }
2990 END_CATCH
2991 }
2992
2993 /* The "set record btrace" command. */
2994
2995 static void
2996 cmd_set_record_btrace (const char *args, int from_tty)
2997 {
2998 printf_unfiltered (_("\"set record btrace\" must be followed "
2999 "by an appropriate subcommand.\n"));
3000 help_list (set_record_btrace_cmdlist, "set record btrace ",
3001 all_commands, gdb_stdout);
3002 }
3003
3004 /* The "show record btrace" command. */
3005
3006 static void
3007 cmd_show_record_btrace (const char *args, int from_tty)
3008 {
3009 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
3010 }
3011
3012 /* The "show record btrace replay-memory-access" command. */
3013
3014 static void
3015 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
3016 struct cmd_list_element *c, const char *value)
3017 {
3018 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
3019 replay_memory_access);
3020 }
3021
3022 /* The "set record btrace cpu none" command. */
3023
3024 static void
3025 cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
3026 {
3027 if (args != nullptr && *args != 0)
3028 error (_("Trailing junk: '%s'."), args);
3029
3030 record_btrace_cpu_state = CS_NONE;
3031 }
3032
3033 /* The "set record btrace cpu auto" command. */
3034
3035 static void
3036 cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
3037 {
3038 if (args != nullptr && *args != 0)
3039 error (_("Trailing junk: '%s'."), args);
3040
3041 record_btrace_cpu_state = CS_AUTO;
3042 }
3043
3044 /* The "set record btrace cpu" command. */
3045
3046 static void
3047 cmd_set_record_btrace_cpu (const char *args, int from_tty)
3048 {
3049 if (args == nullptr)
3050 args = "";
3051
3052 /* We use a hard-coded vendor string for now. */
3053 unsigned int family, model, stepping;
3054 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3055 &model, &l1, &stepping, &l2);
3056 if (matches == 3)
3057 {
3058 if (strlen (args) != l2)
3059 error (_("Trailing junk: '%s'."), args + l2);
3060 }
3061 else if (matches == 2)
3062 {
3063 if (strlen (args) != l1)
3064 error (_("Trailing junk: '%s'."), args + l1);
3065
3066 stepping = 0;
3067 }
3068 else
3069 error (_("Bad format. See \"help set record btrace cpu\"."));
3070
3071 if (USHRT_MAX < family)
3072 error (_("Cpu family too big."));
3073
3074 if (UCHAR_MAX < model)
3075 error (_("Cpu model too big."));
3076
3077 if (UCHAR_MAX < stepping)
3078 error (_("Cpu stepping too big."));
3079
3080 record_btrace_cpu.vendor = CV_INTEL;
3081 record_btrace_cpu.family = family;
3082 record_btrace_cpu.model = model;
3083 record_btrace_cpu.stepping = stepping;
3084
3085 record_btrace_cpu_state = CS_CPU;
3086 }
3087
3088 /* The "show record btrace cpu" command. */
3089
3090 static void
3091 cmd_show_record_btrace_cpu (const char *args, int from_tty)
3092 {
3093 const char *cpu;
3094
3095 if (args != nullptr && *args != 0)
3096 error (_("Trailing junk: '%s'."), args);
3097
3098 switch (record_btrace_cpu_state)
3099 {
3100 case CS_AUTO:
3101 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3102 return;
3103
3104 case CS_NONE:
3105 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3106 return;
3107
3108 case CS_CPU:
3109 switch (record_btrace_cpu.vendor)
3110 {
3111 case CV_INTEL:
3112 if (record_btrace_cpu.stepping == 0)
3113 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3114 record_btrace_cpu.family,
3115 record_btrace_cpu.model);
3116 else
3117 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3118 record_btrace_cpu.family,
3119 record_btrace_cpu.model,
3120 record_btrace_cpu.stepping);
3121 return;
3122 }
3123 }
3124
3125 error (_("Internal error: bad cpu state."));
3126 }
3127
3128 /* The "s record btrace bts" command. */
3129
3130 static void
3131 cmd_set_record_btrace_bts (const char *args, int from_tty)
3132 {
3133 printf_unfiltered (_("\"set record btrace bts\" must be followed "
3134 "by an appropriate subcommand.\n"));
3135 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3136 all_commands, gdb_stdout);
3137 }
3138
3139 /* The "show record btrace bts" command. */
3140
3141 static void
3142 cmd_show_record_btrace_bts (const char *args, int from_tty)
3143 {
3144 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3145 }
3146
3147 /* The "set record btrace pt" command. */
3148
3149 static void
3150 cmd_set_record_btrace_pt (const char *args, int from_tty)
3151 {
3152 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3153 "by an appropriate subcommand.\n"));
3154 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3155 all_commands, gdb_stdout);
3156 }
3157
3158 /* The "show record btrace pt" command. */
3159
3160 static void
3161 cmd_show_record_btrace_pt (const char *args, int from_tty)
3162 {
3163 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3164 }
3165
3166 /* The "record bts buffer-size" show value function. */
3167
3168 static void
3169 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3170 struct cmd_list_element *c,
3171 const char *value)
3172 {
3173 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3174 value);
3175 }
3176
3177 /* The "record pt buffer-size" show value function. */
3178
3179 static void
3180 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3181 struct cmd_list_element *c,
3182 const char *value)
3183 {
3184 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3185 value);
3186 }
3187
3188 /* Initialize btrace commands. */
3189
3190 void
3191 _initialize_record_btrace (void)
3192 {
3193 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3194 _("Start branch trace recording."), &record_btrace_cmdlist,
3195 "record btrace ", 0, &record_cmdlist);
3196 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3197
3198 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3199 _("\
3200 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3201 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3202 This format may not be available on all processors."),
3203 &record_btrace_cmdlist);
3204 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3205
3206 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3207 _("\
3208 Start branch trace recording in Intel Processor Trace format.\n\n\
3209 This format may not be available on all processors."),
3210 &record_btrace_cmdlist);
3211 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3212
3213 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3214 _("Set record options"), &set_record_btrace_cmdlist,
3215 "set record btrace ", 0, &set_record_cmdlist);
3216
3217 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3218 _("Show record options"), &show_record_btrace_cmdlist,
3219 "show record btrace ", 0, &show_record_cmdlist);
3220
3221 add_setshow_enum_cmd ("replay-memory-access", no_class,
3222 replay_memory_access_types, &replay_memory_access, _("\
3223 Set what memory accesses are allowed during replay."), _("\
3224 Show what memory accesses are allowed during replay."),
3225 _("Default is READ-ONLY.\n\n\
3226 The btrace record target does not trace data.\n\
3227 The memory therefore corresponds to the live target and not \
3228 to the current replay position.\n\n\
3229 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3230 When READ-WRITE, allow accesses to read-only and read-write memory during \
3231 replay."),
3232 NULL, cmd_show_replay_memory_access,
3233 &set_record_btrace_cmdlist,
3234 &show_record_btrace_cmdlist);
3235
3236 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3237 _("\
3238 Set the cpu to be used for trace decode.\n\n\
3239 The format is \"<vendor>:<identifier>\" or \"none\" or \"auto\" (default).\n\
3240 For vendor \"intel\" the format is \"<family>/<model>[/<stepping>]\".\n\n\
3241 When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3242 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3243 When GDB does not support that cpu, this option can be used to enable\n\
3244 workarounds for a similar cpu that GDB supports.\n\n\
3245 When set to \"none\", errata workarounds are disabled."),
3246 &set_record_btrace_cpu_cmdlist,
3247 _("set record btrace cpu "), 1,
3248 &set_record_btrace_cmdlist);
3249
3250 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3251 Automatically determine the cpu to be used for trace decode."),
3252 &set_record_btrace_cpu_cmdlist);
3253
3254 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3255 Do not enable errata workarounds for trace decode."),
3256 &set_record_btrace_cpu_cmdlist);
3257
3258 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3259 Show the cpu to be used for trace decode."),
3260 &show_record_btrace_cmdlist);
3261
3262 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3263 _("Set record btrace bts options"),
3264 &set_record_btrace_bts_cmdlist,
3265 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3266
3267 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3268 _("Show record btrace bts options"),
3269 &show_record_btrace_bts_cmdlist,
3270 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3271
3272 add_setshow_uinteger_cmd ("buffer-size", no_class,
3273 &record_btrace_conf.bts.size,
3274 _("Set the record/replay bts buffer size."),
3275 _("Show the record/replay bts buffer size."), _("\
3276 When starting recording request a trace buffer of this size. \
3277 The actual buffer size may differ from the requested size. \
3278 Use \"info record\" to see the actual buffer size.\n\n\
3279 Bigger buffers allow longer recording but also take more time to process \
3280 the recorded execution trace.\n\n\
3281 The trace buffer size may not be changed while recording."), NULL,
3282 show_record_bts_buffer_size_value,
3283 &set_record_btrace_bts_cmdlist,
3284 &show_record_btrace_bts_cmdlist);
3285
3286 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3287 _("Set record btrace pt options"),
3288 &set_record_btrace_pt_cmdlist,
3289 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3290
3291 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3292 _("Show record btrace pt options"),
3293 &show_record_btrace_pt_cmdlist,
3294 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3295
3296 add_setshow_uinteger_cmd ("buffer-size", no_class,
3297 &record_btrace_conf.pt.size,
3298 _("Set the record/replay pt buffer size."),
3299 _("Show the record/replay pt buffer size."), _("\
3300 Bigger buffers allow longer recording but also take more time to process \
3301 the recorded execution.\n\
3302 The actual buffer size may differ from the requested size. Use \"info record\" \
3303 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3304 &set_record_btrace_pt_cmdlist,
3305 &show_record_btrace_pt_cmdlist);
3306
3307 init_record_btrace_ops ();
3308 add_target (&record_btrace_ops);
3309
3310 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3311 xcalloc, xfree);
3312
3313 record_btrace_conf.bts.size = 64 * 1024;
3314 record_btrace_conf.pt.size = 16 * 1024;
3315 }
This page took 0.155872 seconds and 5 git commands to generate.