configure: check for perf_event.h version
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
... / ...
CommitLineData
1/* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
24#include "gdbthread.h"
25#include "target.h"
26#include "gdbcmd.h"
27#include "disasm.h"
28#include "observer.h"
29#include "cli/cli-utils.h"
30#include "source.h"
31#include "ui-out.h"
32#include "symtab.h"
33#include "filenames.h"
34#include "regcache.h"
35#include "frame-unwind.h"
36#include "hashtab.h"
37#include "infrun.h"
38#include "event-loop.h"
39#include "inf-loop.h"
40
41/* The target_ops of record-btrace. */
42static struct target_ops record_btrace_ops;
43
44/* A new thread observer enabling branch tracing for the new thread. */
45static struct observer *record_btrace_thread_observer;
46
47/* Memory access types used in set/show record btrace replay-memory-access. */
48static const char replay_memory_access_read_only[] = "read-only";
49static const char replay_memory_access_read_write[] = "read-write";
50static const char *const replay_memory_access_types[] =
51{
52 replay_memory_access_read_only,
53 replay_memory_access_read_write,
54 NULL
55};
56
57/* The currently allowed replay memory access type. */
58static const char *replay_memory_access = replay_memory_access_read_only;
59
60/* Command lists for "set/show record btrace". */
61static struct cmd_list_element *set_record_btrace_cmdlist;
62static struct cmd_list_element *show_record_btrace_cmdlist;
63
64/* The execution direction of the last resume we got. See record-full.c. */
65static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
66
67/* The async event handler for reverse/replay execution. */
68static struct async_event_handler *record_btrace_async_inferior_event_handler;
69
70/* A flag indicating that we are currently generating a core file. */
71static int record_btrace_generating_corefile;
72
73/* The current branch trace configuration. */
74static struct btrace_config record_btrace_conf;
75
76/* Command list for "record btrace". */
77static struct cmd_list_element *record_btrace_cmdlist;
78
79/* Command lists for "set/show record btrace bts". */
80static struct cmd_list_element *set_record_btrace_bts_cmdlist;
81static struct cmd_list_element *show_record_btrace_bts_cmdlist;
82
83/* Command lists for "set/show record btrace pt". */
84static struct cmd_list_element *set_record_btrace_pt_cmdlist;
85static struct cmd_list_element *show_record_btrace_pt_cmdlist;
86
87/* Print a record-btrace debug message. Use do ... while (0) to avoid
88 ambiguities when used in if statements. */
89
90#define DEBUG(msg, args...) \
91 do \
92 { \
93 if (record_debug != 0) \
94 fprintf_unfiltered (gdb_stdlog, \
95 "[record-btrace] " msg "\n", ##args); \
96 } \
97 while (0)
98
99
100/* Update the branch trace for the current thread and return a pointer to its
101 thread_info.
102
103 Throws an error if there is no thread or no trace. This function never
104 returns NULL. */
105
106static struct thread_info *
107require_btrace_thread (void)
108{
109 struct thread_info *tp;
110
111 DEBUG ("require");
112
113 tp = find_thread_ptid (inferior_ptid);
114 if (tp == NULL)
115 error (_("No thread."));
116
117 btrace_fetch (tp);
118
119 if (btrace_is_empty (tp))
120 error (_("No trace."));
121
122 return tp;
123}
124
125/* Update the branch trace for the current thread and return a pointer to its
126 branch trace information struct.
127
128 Throws an error if there is no thread or no trace. This function never
129 returns NULL. */
130
131static struct btrace_thread_info *
132require_btrace (void)
133{
134 struct thread_info *tp;
135
136 tp = require_btrace_thread ();
137
138 return &tp->btrace;
139}
140
141/* Enable branch tracing for one thread. Warn on errors. */
142
143static void
144record_btrace_enable_warn (struct thread_info *tp)
145{
146 TRY
147 {
148 btrace_enable (tp, &record_btrace_conf);
149 }
150 CATCH (error, RETURN_MASK_ERROR)
151 {
152 warning ("%s", error.message);
153 }
154 END_CATCH
155}
156
157/* Callback function to disable branch tracing for one thread. */
158
159static void
160record_btrace_disable_callback (void *arg)
161{
162 struct thread_info *tp;
163
164 tp = arg;
165
166 btrace_disable (tp);
167}
168
169/* Enable automatic tracing of new threads. */
170
171static void
172record_btrace_auto_enable (void)
173{
174 DEBUG ("attach thread observer");
175
176 record_btrace_thread_observer
177 = observer_attach_new_thread (record_btrace_enable_warn);
178}
179
180/* Disable automatic tracing of new threads. */
181
182static void
183record_btrace_auto_disable (void)
184{
185 /* The observer may have been detached, already. */
186 if (record_btrace_thread_observer == NULL)
187 return;
188
189 DEBUG ("detach thread observer");
190
191 observer_detach_new_thread (record_btrace_thread_observer);
192 record_btrace_thread_observer = NULL;
193}
194
195/* The record-btrace async event handler function. */
196
197static void
198record_btrace_handle_async_inferior_event (gdb_client_data data)
199{
200 inferior_event_handler (INF_REG_EVENT, NULL);
201}
202
203/* The to_open method of target record-btrace. */
204
205static void
206record_btrace_open (const char *args, int from_tty)
207{
208 struct cleanup *disable_chain;
209 struct thread_info *tp;
210
211 DEBUG ("open");
212
213 record_preopen ();
214
215 if (!target_has_execution)
216 error (_("The program is not being run."));
217
218 if (non_stop)
219 error (_("Record btrace can't debug inferior in non-stop mode."));
220
221 gdb_assert (record_btrace_thread_observer == NULL);
222
223 disable_chain = make_cleanup (null_cleanup, NULL);
224 ALL_NON_EXITED_THREADS (tp)
225 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
226 {
227 btrace_enable (tp, &record_btrace_conf);
228
229 make_cleanup (record_btrace_disable_callback, tp);
230 }
231
232 record_btrace_auto_enable ();
233
234 push_target (&record_btrace_ops);
235
236 record_btrace_async_inferior_event_handler
237 = create_async_event_handler (record_btrace_handle_async_inferior_event,
238 NULL);
239 record_btrace_generating_corefile = 0;
240
241 observer_notify_record_changed (current_inferior (), 1);
242
243 discard_cleanups (disable_chain);
244}
245
246/* The to_stop_recording method of target record-btrace. */
247
248static void
249record_btrace_stop_recording (struct target_ops *self)
250{
251 struct thread_info *tp;
252
253 DEBUG ("stop recording");
254
255 record_btrace_auto_disable ();
256
257 ALL_NON_EXITED_THREADS (tp)
258 if (tp->btrace.target != NULL)
259 btrace_disable (tp);
260}
261
262/* The to_close method of target record-btrace. */
263
264static void
265record_btrace_close (struct target_ops *self)
266{
267 struct thread_info *tp;
268
269 if (record_btrace_async_inferior_event_handler != NULL)
270 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
271
272 /* Make sure automatic recording gets disabled even if we did not stop
273 recording before closing the record-btrace target. */
274 record_btrace_auto_disable ();
275
276 /* We should have already stopped recording.
277 Tear down btrace in case we have not. */
278 ALL_NON_EXITED_THREADS (tp)
279 btrace_teardown (tp);
280}
281
282/* The to_async method of target record-btrace. */
283
284static void
285record_btrace_async (struct target_ops *ops, int enable)
286{
287 if (enable)
288 mark_async_event_handler (record_btrace_async_inferior_event_handler);
289 else
290 clear_async_event_handler (record_btrace_async_inferior_event_handler);
291
292 ops->beneath->to_async (ops->beneath, enable);
293}
294
295/* Adjusts the size and returns a human readable size suffix. */
296
297static const char *
298record_btrace_adjust_size (unsigned int *size)
299{
300 unsigned int sz;
301
302 sz = *size;
303
304 if ((sz & ((1u << 30) - 1)) == 0)
305 {
306 *size = sz >> 30;
307 return "GB";
308 }
309 else if ((sz & ((1u << 20) - 1)) == 0)
310 {
311 *size = sz >> 20;
312 return "MB";
313 }
314 else if ((sz & ((1u << 10) - 1)) == 0)
315 {
316 *size = sz >> 10;
317 return "kB";
318 }
319 else
320 return "";
321}
322
323/* Print a BTS configuration. */
324
325static void
326record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
327{
328 const char *suffix;
329 unsigned int size;
330
331 size = conf->size;
332 if (size > 0)
333 {
334 suffix = record_btrace_adjust_size (&size);
335 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
336 }
337}
338
339/* Print an Intel(R) Processor Trace configuration. */
340
341static void
342record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
343{
344 const char *suffix;
345 unsigned int size;
346
347 size = conf->size;
348 if (size > 0)
349 {
350 suffix = record_btrace_adjust_size (&size);
351 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
352 }
353}
354
355/* Print a branch tracing configuration. */
356
357static void
358record_btrace_print_conf (const struct btrace_config *conf)
359{
360 printf_unfiltered (_("Recording format: %s.\n"),
361 btrace_format_string (conf->format));
362
363 switch (conf->format)
364 {
365 case BTRACE_FORMAT_NONE:
366 return;
367
368 case BTRACE_FORMAT_BTS:
369 record_btrace_print_bts_conf (&conf->bts);
370 return;
371
372 case BTRACE_FORMAT_PT:
373 record_btrace_print_pt_conf (&conf->pt);
374 return;
375 }
376
377 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
378}
379
380/* The to_info_record method of target record-btrace. */
381
382static void
383record_btrace_info (struct target_ops *self)
384{
385 struct btrace_thread_info *btinfo;
386 const struct btrace_config *conf;
387 struct thread_info *tp;
388 unsigned int insns, calls, gaps;
389
390 DEBUG ("info");
391
392 tp = find_thread_ptid (inferior_ptid);
393 if (tp == NULL)
394 error (_("No thread."));
395
396 btinfo = &tp->btrace;
397
398 conf = btrace_conf (btinfo);
399 if (conf != NULL)
400 record_btrace_print_conf (conf);
401
402 btrace_fetch (tp);
403
404 insns = 0;
405 calls = 0;
406 gaps = 0;
407
408 if (!btrace_is_empty (tp))
409 {
410 struct btrace_call_iterator call;
411 struct btrace_insn_iterator insn;
412
413 btrace_call_end (&call, btinfo);
414 btrace_call_prev (&call, 1);
415 calls = btrace_call_number (&call);
416
417 btrace_insn_end (&insn, btinfo);
418
419 insns = btrace_insn_number (&insn);
420 if (insns != 0)
421 {
422 /* The last instruction does not really belong to the trace. */
423 insns -= 1;
424 }
425 else
426 {
427 unsigned int steps;
428
429 /* Skip gaps at the end. */
430 do
431 {
432 steps = btrace_insn_prev (&insn, 1);
433 if (steps == 0)
434 break;
435
436 insns = btrace_insn_number (&insn);
437 }
438 while (insns == 0);
439 }
440
441 gaps = btinfo->ngaps;
442 }
443
444 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
445 "for thread %d (%s).\n"), insns, calls, gaps,
446 tp->num, target_pid_to_str (tp->ptid));
447
448 if (btrace_is_replaying (tp))
449 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
450 btrace_insn_number (btinfo->replay));
451}
452
453/* Print a decode error. */
454
455static void
456btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
457 enum btrace_format format)
458{
459 const char *errstr;
460 int is_error;
461
462 errstr = _("unknown");
463 is_error = 1;
464
465 switch (format)
466 {
467 default:
468 break;
469
470 case BTRACE_FORMAT_BTS:
471 switch (errcode)
472 {
473 default:
474 break;
475
476 case BDE_BTS_OVERFLOW:
477 errstr = _("instruction overflow");
478 break;
479
480 case BDE_BTS_INSN_SIZE:
481 errstr = _("unknown instruction");
482 break;
483 }
484 break;
485
486#if defined (HAVE_LIBIPT)
487 case BTRACE_FORMAT_PT:
488 switch (errcode)
489 {
490 case BDE_PT_USER_QUIT:
491 is_error = 0;
492 errstr = _("trace decode cancelled");
493 break;
494
495 case BDE_PT_DISABLED:
496 is_error = 0;
497 errstr = _("disabled");
498 break;
499
500 case BDE_PT_OVERFLOW:
501 is_error = 0;
502 errstr = _("overflow");
503 break;
504
505 default:
506 if (errcode < 0)
507 errstr = pt_errstr (pt_errcode (errcode));
508 break;
509 }
510 break;
511#endif /* defined (HAVE_LIBIPT) */
512 }
513
514 ui_out_text (uiout, _("["));
515 if (is_error)
516 {
517 ui_out_text (uiout, _("decode error ("));
518 ui_out_field_int (uiout, "errcode", errcode);
519 ui_out_text (uiout, _("): "));
520 }
521 ui_out_text (uiout, errstr);
522 ui_out_text (uiout, _("]\n"));
523}
524
525/* Print an unsigned int. */
526
527static void
528ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
529{
530 ui_out_field_fmt (uiout, fld, "%u", val);
531}
532
533/* Disassemble a section of the recorded instruction trace. */
534
535static void
536btrace_insn_history (struct ui_out *uiout,
537 const struct btrace_thread_info *btinfo,
538 const struct btrace_insn_iterator *begin,
539 const struct btrace_insn_iterator *end, int flags)
540{
541 struct gdbarch *gdbarch;
542 struct btrace_insn_iterator it;
543
544 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
545 btrace_insn_number (end));
546
547 gdbarch = target_gdbarch ();
548
549 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
550 {
551 const struct btrace_insn *insn;
552
553 insn = btrace_insn_get (&it);
554
555 /* A NULL instruction indicates a gap in the trace. */
556 if (insn == NULL)
557 {
558 const struct btrace_config *conf;
559
560 conf = btrace_conf (btinfo);
561
562 /* We have trace so we must have a configuration. */
563 gdb_assert (conf != NULL);
564
565 btrace_ui_out_decode_error (uiout, it.function->errcode,
566 conf->format);
567 }
568 else
569 {
570 /* Print the instruction index. */
571 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
572 ui_out_text (uiout, "\t");
573
574 /* Disassembly with '/m' flag may not produce the expected result.
575 See PR gdb/11833. */
576 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc,
577 insn->pc + 1);
578 }
579 }
580}
581
582/* The to_insn_history method of target record-btrace. */
583
584static void
585record_btrace_insn_history (struct target_ops *self, int size, int flags)
586{
587 struct btrace_thread_info *btinfo;
588 struct btrace_insn_history *history;
589 struct btrace_insn_iterator begin, end;
590 struct cleanup *uiout_cleanup;
591 struct ui_out *uiout;
592 unsigned int context, covered;
593
594 uiout = current_uiout;
595 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
596 "insn history");
597 context = abs (size);
598 if (context == 0)
599 error (_("Bad record instruction-history-size."));
600
601 btinfo = require_btrace ();
602 history = btinfo->insn_history;
603 if (history == NULL)
604 {
605 struct btrace_insn_iterator *replay;
606
607 DEBUG ("insn-history (0x%x): %d", flags, size);
608
609 /* If we're replaying, we start at the replay position. Otherwise, we
610 start at the tail of the trace. */
611 replay = btinfo->replay;
612 if (replay != NULL)
613 begin = *replay;
614 else
615 btrace_insn_end (&begin, btinfo);
616
617 /* We start from here and expand in the requested direction. Then we
618 expand in the other direction, as well, to fill up any remaining
619 context. */
620 end = begin;
621 if (size < 0)
622 {
623 /* We want the current position covered, as well. */
624 covered = btrace_insn_next (&end, 1);
625 covered += btrace_insn_prev (&begin, context - covered);
626 covered += btrace_insn_next (&end, context - covered);
627 }
628 else
629 {
630 covered = btrace_insn_next (&end, context);
631 covered += btrace_insn_prev (&begin, context - covered);
632 }
633 }
634 else
635 {
636 begin = history->begin;
637 end = history->end;
638
639 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
640 btrace_insn_number (&begin), btrace_insn_number (&end));
641
642 if (size < 0)
643 {
644 end = begin;
645 covered = btrace_insn_prev (&begin, context);
646 }
647 else
648 {
649 begin = end;
650 covered = btrace_insn_next (&end, context);
651 }
652 }
653
654 if (covered > 0)
655 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
656 else
657 {
658 if (size < 0)
659 printf_unfiltered (_("At the start of the branch trace record.\n"));
660 else
661 printf_unfiltered (_("At the end of the branch trace record.\n"));
662 }
663
664 btrace_set_insn_history (btinfo, &begin, &end);
665 do_cleanups (uiout_cleanup);
666}
667
668/* The to_insn_history_range method of target record-btrace. */
669
670static void
671record_btrace_insn_history_range (struct target_ops *self,
672 ULONGEST from, ULONGEST to, int flags)
673{
674 struct btrace_thread_info *btinfo;
675 struct btrace_insn_history *history;
676 struct btrace_insn_iterator begin, end;
677 struct cleanup *uiout_cleanup;
678 struct ui_out *uiout;
679 unsigned int low, high;
680 int found;
681
682 uiout = current_uiout;
683 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
684 "insn history");
685 low = from;
686 high = to;
687
688 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
689
690 /* Check for wrap-arounds. */
691 if (low != from || high != to)
692 error (_("Bad range."));
693
694 if (high < low)
695 error (_("Bad range."));
696
697 btinfo = require_btrace ();
698
699 found = btrace_find_insn_by_number (&begin, btinfo, low);
700 if (found == 0)
701 error (_("Range out of bounds."));
702
703 found = btrace_find_insn_by_number (&end, btinfo, high);
704 if (found == 0)
705 {
706 /* Silently truncate the range. */
707 btrace_insn_end (&end, btinfo);
708 }
709 else
710 {
711 /* We want both begin and end to be inclusive. */
712 btrace_insn_next (&end, 1);
713 }
714
715 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
716 btrace_set_insn_history (btinfo, &begin, &end);
717
718 do_cleanups (uiout_cleanup);
719}
720
721/* The to_insn_history_from method of target record-btrace. */
722
723static void
724record_btrace_insn_history_from (struct target_ops *self,
725 ULONGEST from, int size, int flags)
726{
727 ULONGEST begin, end, context;
728
729 context = abs (size);
730 if (context == 0)
731 error (_("Bad record instruction-history-size."));
732
733 if (size < 0)
734 {
735 end = from;
736
737 if (from < context)
738 begin = 0;
739 else
740 begin = from - context + 1;
741 }
742 else
743 {
744 begin = from;
745 end = from + context - 1;
746
747 /* Check for wrap-around. */
748 if (end < begin)
749 end = ULONGEST_MAX;
750 }
751
752 record_btrace_insn_history_range (self, begin, end, flags);
753}
754
755/* Print the instruction number range for a function call history line. */
756
757static void
758btrace_call_history_insn_range (struct ui_out *uiout,
759 const struct btrace_function *bfun)
760{
761 unsigned int begin, end, size;
762
763 size = VEC_length (btrace_insn_s, bfun->insn);
764 gdb_assert (size > 0);
765
766 begin = bfun->insn_offset;
767 end = begin + size - 1;
768
769 ui_out_field_uint (uiout, "insn begin", begin);
770 ui_out_text (uiout, ",");
771 ui_out_field_uint (uiout, "insn end", end);
772}
773
774/* Compute the lowest and highest source line for the instructions in BFUN
775 and return them in PBEGIN and PEND.
776 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
777 result from inlining or macro expansion. */
778
779static void
780btrace_compute_src_line_range (const struct btrace_function *bfun,
781 int *pbegin, int *pend)
782{
783 struct btrace_insn *insn;
784 struct symtab *symtab;
785 struct symbol *sym;
786 unsigned int idx;
787 int begin, end;
788
789 begin = INT_MAX;
790 end = INT_MIN;
791
792 sym = bfun->sym;
793 if (sym == NULL)
794 goto out;
795
796 symtab = symbol_symtab (sym);
797
798 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
799 {
800 struct symtab_and_line sal;
801
802 sal = find_pc_line (insn->pc, 0);
803 if (sal.symtab != symtab || sal.line == 0)
804 continue;
805
806 begin = min (begin, sal.line);
807 end = max (end, sal.line);
808 }
809
810 out:
811 *pbegin = begin;
812 *pend = end;
813}
814
815/* Print the source line information for a function call history line. */
816
817static void
818btrace_call_history_src_line (struct ui_out *uiout,
819 const struct btrace_function *bfun)
820{
821 struct symbol *sym;
822 int begin, end;
823
824 sym = bfun->sym;
825 if (sym == NULL)
826 return;
827
828 ui_out_field_string (uiout, "file",
829 symtab_to_filename_for_display (symbol_symtab (sym)));
830
831 btrace_compute_src_line_range (bfun, &begin, &end);
832 if (end < begin)
833 return;
834
835 ui_out_text (uiout, ":");
836 ui_out_field_int (uiout, "min line", begin);
837
838 if (end == begin)
839 return;
840
841 ui_out_text (uiout, ",");
842 ui_out_field_int (uiout, "max line", end);
843}
844
845/* Get the name of a branch trace function. */
846
847static const char *
848btrace_get_bfun_name (const struct btrace_function *bfun)
849{
850 struct minimal_symbol *msym;
851 struct symbol *sym;
852
853 if (bfun == NULL)
854 return "??";
855
856 msym = bfun->msym;
857 sym = bfun->sym;
858
859 if (sym != NULL)
860 return SYMBOL_PRINT_NAME (sym);
861 else if (msym != NULL)
862 return MSYMBOL_PRINT_NAME (msym);
863 else
864 return "??";
865}
866
867/* Disassemble a section of the recorded function trace. */
868
869static void
870btrace_call_history (struct ui_out *uiout,
871 const struct btrace_thread_info *btinfo,
872 const struct btrace_call_iterator *begin,
873 const struct btrace_call_iterator *end,
874 enum record_print_flag flags)
875{
876 struct btrace_call_iterator it;
877
878 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
879 btrace_call_number (end));
880
881 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
882 {
883 const struct btrace_function *bfun;
884 struct minimal_symbol *msym;
885 struct symbol *sym;
886
887 bfun = btrace_call_get (&it);
888 sym = bfun->sym;
889 msym = bfun->msym;
890
891 /* Print the function index. */
892 ui_out_field_uint (uiout, "index", bfun->number);
893 ui_out_text (uiout, "\t");
894
895 /* Indicate gaps in the trace. */
896 if (bfun->errcode != 0)
897 {
898 const struct btrace_config *conf;
899
900 conf = btrace_conf (btinfo);
901
902 /* We have trace so we must have a configuration. */
903 gdb_assert (conf != NULL);
904
905 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
906
907 continue;
908 }
909
910 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
911 {
912 int level = bfun->level + btinfo->level, i;
913
914 for (i = 0; i < level; ++i)
915 ui_out_text (uiout, " ");
916 }
917
918 if (sym != NULL)
919 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
920 else if (msym != NULL)
921 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
922 else if (!ui_out_is_mi_like_p (uiout))
923 ui_out_field_string (uiout, "function", "??");
924
925 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
926 {
927 ui_out_text (uiout, _("\tinst "));
928 btrace_call_history_insn_range (uiout, bfun);
929 }
930
931 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
932 {
933 ui_out_text (uiout, _("\tat "));
934 btrace_call_history_src_line (uiout, bfun);
935 }
936
937 ui_out_text (uiout, "\n");
938 }
939}
940
941/* The to_call_history method of target record-btrace. */
942
943static void
944record_btrace_call_history (struct target_ops *self, int size, int flags)
945{
946 struct btrace_thread_info *btinfo;
947 struct btrace_call_history *history;
948 struct btrace_call_iterator begin, end;
949 struct cleanup *uiout_cleanup;
950 struct ui_out *uiout;
951 unsigned int context, covered;
952
953 uiout = current_uiout;
954 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
955 "insn history");
956 context = abs (size);
957 if (context == 0)
958 error (_("Bad record function-call-history-size."));
959
960 btinfo = require_btrace ();
961 history = btinfo->call_history;
962 if (history == NULL)
963 {
964 struct btrace_insn_iterator *replay;
965
966 DEBUG ("call-history (0x%x): %d", flags, size);
967
968 /* If we're replaying, we start at the replay position. Otherwise, we
969 start at the tail of the trace. */
970 replay = btinfo->replay;
971 if (replay != NULL)
972 {
973 begin.function = replay->function;
974 begin.btinfo = btinfo;
975 }
976 else
977 btrace_call_end (&begin, btinfo);
978
979 /* We start from here and expand in the requested direction. Then we
980 expand in the other direction, as well, to fill up any remaining
981 context. */
982 end = begin;
983 if (size < 0)
984 {
985 /* We want the current position covered, as well. */
986 covered = btrace_call_next (&end, 1);
987 covered += btrace_call_prev (&begin, context - covered);
988 covered += btrace_call_next (&end, context - covered);
989 }
990 else
991 {
992 covered = btrace_call_next (&end, context);
993 covered += btrace_call_prev (&begin, context- covered);
994 }
995 }
996 else
997 {
998 begin = history->begin;
999 end = history->end;
1000
1001 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
1002 btrace_call_number (&begin), btrace_call_number (&end));
1003
1004 if (size < 0)
1005 {
1006 end = begin;
1007 covered = btrace_call_prev (&begin, context);
1008 }
1009 else
1010 {
1011 begin = end;
1012 covered = btrace_call_next (&end, context);
1013 }
1014 }
1015
1016 if (covered > 0)
1017 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1018 else
1019 {
1020 if (size < 0)
1021 printf_unfiltered (_("At the start of the branch trace record.\n"));
1022 else
1023 printf_unfiltered (_("At the end of the branch trace record.\n"));
1024 }
1025
1026 btrace_set_call_history (btinfo, &begin, &end);
1027 do_cleanups (uiout_cleanup);
1028}
1029
1030/* The to_call_history_range method of target record-btrace. */
1031
1032static void
1033record_btrace_call_history_range (struct target_ops *self,
1034 ULONGEST from, ULONGEST to, int flags)
1035{
1036 struct btrace_thread_info *btinfo;
1037 struct btrace_call_history *history;
1038 struct btrace_call_iterator begin, end;
1039 struct cleanup *uiout_cleanup;
1040 struct ui_out *uiout;
1041 unsigned int low, high;
1042 int found;
1043
1044 uiout = current_uiout;
1045 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1046 "func history");
1047 low = from;
1048 high = to;
1049
1050 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
1051
1052 /* Check for wrap-arounds. */
1053 if (low != from || high != to)
1054 error (_("Bad range."));
1055
1056 if (high < low)
1057 error (_("Bad range."));
1058
1059 btinfo = require_btrace ();
1060
1061 found = btrace_find_call_by_number (&begin, btinfo, low);
1062 if (found == 0)
1063 error (_("Range out of bounds."));
1064
1065 found = btrace_find_call_by_number (&end, btinfo, high);
1066 if (found == 0)
1067 {
1068 /* Silently truncate the range. */
1069 btrace_call_end (&end, btinfo);
1070 }
1071 else
1072 {
1073 /* We want both begin and end to be inclusive. */
1074 btrace_call_next (&end, 1);
1075 }
1076
1077 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1078 btrace_set_call_history (btinfo, &begin, &end);
1079
1080 do_cleanups (uiout_cleanup);
1081}
1082
1083/* The to_call_history_from method of target record-btrace. */
1084
1085static void
1086record_btrace_call_history_from (struct target_ops *self,
1087 ULONGEST from, int size, int flags)
1088{
1089 ULONGEST begin, end, context;
1090
1091 context = abs (size);
1092 if (context == 0)
1093 error (_("Bad record function-call-history-size."));
1094
1095 if (size < 0)
1096 {
1097 end = from;
1098
1099 if (from < context)
1100 begin = 0;
1101 else
1102 begin = from - context + 1;
1103 }
1104 else
1105 {
1106 begin = from;
1107 end = from + context - 1;
1108
1109 /* Check for wrap-around. */
1110 if (end < begin)
1111 end = ULONGEST_MAX;
1112 }
1113
1114 record_btrace_call_history_range (self, begin, end, flags);
1115}
1116
1117/* The to_record_is_replaying method of target record-btrace. */
1118
1119static int
1120record_btrace_is_replaying (struct target_ops *self)
1121{
1122 struct thread_info *tp;
1123
1124 ALL_NON_EXITED_THREADS (tp)
1125 if (btrace_is_replaying (tp))
1126 return 1;
1127
1128 return 0;
1129}
1130
1131/* The to_xfer_partial method of target record-btrace. */
1132
1133static enum target_xfer_status
1134record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1135 const char *annex, gdb_byte *readbuf,
1136 const gdb_byte *writebuf, ULONGEST offset,
1137 ULONGEST len, ULONGEST *xfered_len)
1138{
1139 struct target_ops *t;
1140
1141 /* Filter out requests that don't make sense during replay. */
1142 if (replay_memory_access == replay_memory_access_read_only
1143 && !record_btrace_generating_corefile
1144 && record_btrace_is_replaying (ops))
1145 {
1146 switch (object)
1147 {
1148 case TARGET_OBJECT_MEMORY:
1149 {
1150 struct target_section *section;
1151
1152 /* We do not allow writing memory in general. */
1153 if (writebuf != NULL)
1154 {
1155 *xfered_len = len;
1156 return TARGET_XFER_UNAVAILABLE;
1157 }
1158
1159 /* We allow reading readonly memory. */
1160 section = target_section_by_addr (ops, offset);
1161 if (section != NULL)
1162 {
1163 /* Check if the section we found is readonly. */
1164 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1165 section->the_bfd_section)
1166 & SEC_READONLY) != 0)
1167 {
1168 /* Truncate the request to fit into this section. */
1169 len = min (len, section->endaddr - offset);
1170 break;
1171 }
1172 }
1173
1174 *xfered_len = len;
1175 return TARGET_XFER_UNAVAILABLE;
1176 }
1177 }
1178 }
1179
1180 /* Forward the request. */
1181 ops = ops->beneath;
1182 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1183 offset, len, xfered_len);
1184}
1185
1186/* The to_insert_breakpoint method of target record-btrace. */
1187
1188static int
1189record_btrace_insert_breakpoint (struct target_ops *ops,
1190 struct gdbarch *gdbarch,
1191 struct bp_target_info *bp_tgt)
1192{
1193 const char *old;
1194 int ret;
1195
1196 /* Inserting breakpoints requires accessing memory. Allow it for the
1197 duration of this function. */
1198 old = replay_memory_access;
1199 replay_memory_access = replay_memory_access_read_write;
1200
1201 ret = 0;
1202 TRY
1203 {
1204 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1205 }
1206 CATCH (except, RETURN_MASK_ALL)
1207 {
1208 replay_memory_access = old;
1209 throw_exception (except);
1210 }
1211 END_CATCH
1212 replay_memory_access = old;
1213
1214 return ret;
1215}
1216
1217/* The to_remove_breakpoint method of target record-btrace. */
1218
1219static int
1220record_btrace_remove_breakpoint (struct target_ops *ops,
1221 struct gdbarch *gdbarch,
1222 struct bp_target_info *bp_tgt)
1223{
1224 const char *old;
1225 int ret;
1226
1227 /* Removing breakpoints requires accessing memory. Allow it for the
1228 duration of this function. */
1229 old = replay_memory_access;
1230 replay_memory_access = replay_memory_access_read_write;
1231
1232 ret = 0;
1233 TRY
1234 {
1235 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1236 }
1237 CATCH (except, RETURN_MASK_ALL)
1238 {
1239 replay_memory_access = old;
1240 throw_exception (except);
1241 }
1242 END_CATCH
1243 replay_memory_access = old;
1244
1245 return ret;
1246}
1247
1248/* The to_fetch_registers method of target record-btrace. */
1249
1250static void
1251record_btrace_fetch_registers (struct target_ops *ops,
1252 struct regcache *regcache, int regno)
1253{
1254 struct btrace_insn_iterator *replay;
1255 struct thread_info *tp;
1256
1257 tp = find_thread_ptid (inferior_ptid);
1258 gdb_assert (tp != NULL);
1259
1260 replay = tp->btrace.replay;
1261 if (replay != NULL && !record_btrace_generating_corefile)
1262 {
1263 const struct btrace_insn *insn;
1264 struct gdbarch *gdbarch;
1265 int pcreg;
1266
1267 gdbarch = get_regcache_arch (regcache);
1268 pcreg = gdbarch_pc_regnum (gdbarch);
1269 if (pcreg < 0)
1270 return;
1271
1272 /* We can only provide the PC register. */
1273 if (regno >= 0 && regno != pcreg)
1274 return;
1275
1276 insn = btrace_insn_get (replay);
1277 gdb_assert (insn != NULL);
1278
1279 regcache_raw_supply (regcache, regno, &insn->pc);
1280 }
1281 else
1282 {
1283 struct target_ops *t = ops->beneath;
1284
1285 t->to_fetch_registers (t, regcache, regno);
1286 }
1287}
1288
1289/* The to_store_registers method of target record-btrace. */
1290
1291static void
1292record_btrace_store_registers (struct target_ops *ops,
1293 struct regcache *regcache, int regno)
1294{
1295 struct target_ops *t;
1296
1297 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1298 error (_("This record target does not allow writing registers."));
1299
1300 gdb_assert (may_write_registers != 0);
1301
1302 t = ops->beneath;
1303 t->to_store_registers (t, regcache, regno);
1304}
1305
1306/* The to_prepare_to_store method of target record-btrace. */
1307
1308static void
1309record_btrace_prepare_to_store (struct target_ops *ops,
1310 struct regcache *regcache)
1311{
1312 struct target_ops *t;
1313
1314 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1315 return;
1316
1317 t = ops->beneath;
1318 t->to_prepare_to_store (t, regcache);
1319}
1320
1321/* The branch trace frame cache. */
1322
1323struct btrace_frame_cache
1324{
1325 /* The thread. */
1326 struct thread_info *tp;
1327
1328 /* The frame info. */
1329 struct frame_info *frame;
1330
1331 /* The branch trace function segment. */
1332 const struct btrace_function *bfun;
1333};
1334
1335/* A struct btrace_frame_cache hash table indexed by NEXT. */
1336
1337static htab_t bfcache;
1338
1339/* hash_f for htab_create_alloc of bfcache. */
1340
1341static hashval_t
1342bfcache_hash (const void *arg)
1343{
1344 const struct btrace_frame_cache *cache = arg;
1345
1346 return htab_hash_pointer (cache->frame);
1347}
1348
1349/* eq_f for htab_create_alloc of bfcache. */
1350
1351static int
1352bfcache_eq (const void *arg1, const void *arg2)
1353{
1354 const struct btrace_frame_cache *cache1 = arg1;
1355 const struct btrace_frame_cache *cache2 = arg2;
1356
1357 return cache1->frame == cache2->frame;
1358}
1359
1360/* Create a new btrace frame cache. */
1361
1362static struct btrace_frame_cache *
1363bfcache_new (struct frame_info *frame)
1364{
1365 struct btrace_frame_cache *cache;
1366 void **slot;
1367
1368 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1369 cache->frame = frame;
1370
1371 slot = htab_find_slot (bfcache, cache, INSERT);
1372 gdb_assert (*slot == NULL);
1373 *slot = cache;
1374
1375 return cache;
1376}
1377
1378/* Extract the branch trace function from a branch trace frame. */
1379
1380static const struct btrace_function *
1381btrace_get_frame_function (struct frame_info *frame)
1382{
1383 const struct btrace_frame_cache *cache;
1384 const struct btrace_function *bfun;
1385 struct btrace_frame_cache pattern;
1386 void **slot;
1387
1388 pattern.frame = frame;
1389
1390 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1391 if (slot == NULL)
1392 return NULL;
1393
1394 cache = *slot;
1395 return cache->bfun;
1396}
1397
1398/* Implement stop_reason method for record_btrace_frame_unwind. */
1399
1400static enum unwind_stop_reason
1401record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1402 void **this_cache)
1403{
1404 const struct btrace_frame_cache *cache;
1405 const struct btrace_function *bfun;
1406
1407 cache = *this_cache;
1408 bfun = cache->bfun;
1409 gdb_assert (bfun != NULL);
1410
1411 if (bfun->up == NULL)
1412 return UNWIND_UNAVAILABLE;
1413
1414 return UNWIND_NO_REASON;
1415}
1416
1417/* Implement this_id method for record_btrace_frame_unwind. */
1418
1419static void
1420record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1421 struct frame_id *this_id)
1422{
1423 const struct btrace_frame_cache *cache;
1424 const struct btrace_function *bfun;
1425 CORE_ADDR code, special;
1426
1427 cache = *this_cache;
1428
1429 bfun = cache->bfun;
1430 gdb_assert (bfun != NULL);
1431
1432 while (bfun->segment.prev != NULL)
1433 bfun = bfun->segment.prev;
1434
1435 code = get_frame_func (this_frame);
1436 special = bfun->number;
1437
1438 *this_id = frame_id_build_unavailable_stack_special (code, special);
1439
1440 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1441 btrace_get_bfun_name (cache->bfun),
1442 core_addr_to_string_nz (this_id->code_addr),
1443 core_addr_to_string_nz (this_id->special_addr));
1444}
1445
1446/* Implement prev_register method for record_btrace_frame_unwind. */
1447
1448static struct value *
1449record_btrace_frame_prev_register (struct frame_info *this_frame,
1450 void **this_cache,
1451 int regnum)
1452{
1453 const struct btrace_frame_cache *cache;
1454 const struct btrace_function *bfun, *caller;
1455 const struct btrace_insn *insn;
1456 struct gdbarch *gdbarch;
1457 CORE_ADDR pc;
1458 int pcreg;
1459
1460 gdbarch = get_frame_arch (this_frame);
1461 pcreg = gdbarch_pc_regnum (gdbarch);
1462 if (pcreg < 0 || regnum != pcreg)
1463 throw_error (NOT_AVAILABLE_ERROR,
1464 _("Registers are not available in btrace record history"));
1465
1466 cache = *this_cache;
1467 bfun = cache->bfun;
1468 gdb_assert (bfun != NULL);
1469
1470 caller = bfun->up;
1471 if (caller == NULL)
1472 throw_error (NOT_AVAILABLE_ERROR,
1473 _("No caller in btrace record history"));
1474
1475 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1476 {
1477 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1478 pc = insn->pc;
1479 }
1480 else
1481 {
1482 insn = VEC_last (btrace_insn_s, caller->insn);
1483 pc = insn->pc;
1484
1485 pc += gdb_insn_length (gdbarch, pc);
1486 }
1487
1488 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1489 btrace_get_bfun_name (bfun), bfun->level,
1490 core_addr_to_string_nz (pc));
1491
1492 return frame_unwind_got_address (this_frame, regnum, pc);
1493}
1494
1495/* Implement sniffer method for record_btrace_frame_unwind. */
1496
1497static int
1498record_btrace_frame_sniffer (const struct frame_unwind *self,
1499 struct frame_info *this_frame,
1500 void **this_cache)
1501{
1502 const struct btrace_function *bfun;
1503 struct btrace_frame_cache *cache;
1504 struct thread_info *tp;
1505 struct frame_info *next;
1506
1507 /* THIS_FRAME does not contain a reference to its thread. */
1508 tp = find_thread_ptid (inferior_ptid);
1509 gdb_assert (tp != NULL);
1510
1511 bfun = NULL;
1512 next = get_next_frame (this_frame);
1513 if (next == NULL)
1514 {
1515 const struct btrace_insn_iterator *replay;
1516
1517 replay = tp->btrace.replay;
1518 if (replay != NULL)
1519 bfun = replay->function;
1520 }
1521 else
1522 {
1523 const struct btrace_function *callee;
1524
1525 callee = btrace_get_frame_function (next);
1526 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1527 bfun = callee->up;
1528 }
1529
1530 if (bfun == NULL)
1531 return 0;
1532
1533 DEBUG ("[frame] sniffed frame for %s on level %d",
1534 btrace_get_bfun_name (bfun), bfun->level);
1535
1536 /* This is our frame. Initialize the frame cache. */
1537 cache = bfcache_new (this_frame);
1538 cache->tp = tp;
1539 cache->bfun = bfun;
1540
1541 *this_cache = cache;
1542 return 1;
1543}
1544
1545/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1546
1547static int
1548record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1549 struct frame_info *this_frame,
1550 void **this_cache)
1551{
1552 const struct btrace_function *bfun, *callee;
1553 struct btrace_frame_cache *cache;
1554 struct frame_info *next;
1555
1556 next = get_next_frame (this_frame);
1557 if (next == NULL)
1558 return 0;
1559
1560 callee = btrace_get_frame_function (next);
1561 if (callee == NULL)
1562 return 0;
1563
1564 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1565 return 0;
1566
1567 bfun = callee->up;
1568 if (bfun == NULL)
1569 return 0;
1570
1571 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1572 btrace_get_bfun_name (bfun), bfun->level);
1573
1574 /* This is our frame. Initialize the frame cache. */
1575 cache = bfcache_new (this_frame);
1576 cache->tp = find_thread_ptid (inferior_ptid);
1577 cache->bfun = bfun;
1578
1579 *this_cache = cache;
1580 return 1;
1581}
1582
1583static void
1584record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1585{
1586 struct btrace_frame_cache *cache;
1587 void **slot;
1588
1589 cache = this_cache;
1590
1591 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1592 gdb_assert (slot != NULL);
1593
1594 htab_remove_elt (bfcache, cache);
1595}
1596
1597/* btrace recording does not store previous memory content, neither the stack
1598 frames content. Any unwinding would return errorneous results as the stack
1599 contents no longer matches the changed PC value restored from history.
1600 Therefore this unwinder reports any possibly unwound registers as
1601 <unavailable>. */
1602
1603const struct frame_unwind record_btrace_frame_unwind =
1604{
1605 NORMAL_FRAME,
1606 record_btrace_frame_unwind_stop_reason,
1607 record_btrace_frame_this_id,
1608 record_btrace_frame_prev_register,
1609 NULL,
1610 record_btrace_frame_sniffer,
1611 record_btrace_frame_dealloc_cache
1612};
1613
1614const struct frame_unwind record_btrace_tailcall_frame_unwind =
1615{
1616 TAILCALL_FRAME,
1617 record_btrace_frame_unwind_stop_reason,
1618 record_btrace_frame_this_id,
1619 record_btrace_frame_prev_register,
1620 NULL,
1621 record_btrace_tailcall_frame_sniffer,
1622 record_btrace_frame_dealloc_cache
1623};
1624
1625/* Implement the to_get_unwinder method. */
1626
1627static const struct frame_unwind *
1628record_btrace_to_get_unwinder (struct target_ops *self)
1629{
1630 return &record_btrace_frame_unwind;
1631}
1632
1633/* Implement the to_get_tailcall_unwinder method. */
1634
1635static const struct frame_unwind *
1636record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1637{
1638 return &record_btrace_tailcall_frame_unwind;
1639}
1640
1641/* Indicate that TP should be resumed according to FLAG. */
1642
1643static void
1644record_btrace_resume_thread (struct thread_info *tp,
1645 enum btrace_thread_flag flag)
1646{
1647 struct btrace_thread_info *btinfo;
1648
1649 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1650
1651 btinfo = &tp->btrace;
1652
1653 if ((btinfo->flags & BTHR_MOVE) != 0)
1654 error (_("Thread already moving."));
1655
1656 /* Fetch the latest branch trace. */
1657 btrace_fetch (tp);
1658
1659 btinfo->flags |= flag;
1660}
1661
1662/* Find the thread to resume given a PTID. */
1663
1664static struct thread_info *
1665record_btrace_find_resume_thread (ptid_t ptid)
1666{
1667 struct thread_info *tp;
1668
1669 /* When asked to resume everything, we pick the current thread. */
1670 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1671 ptid = inferior_ptid;
1672
1673 return find_thread_ptid (ptid);
1674}
1675
1676/* Start replaying a thread. */
1677
1678static struct btrace_insn_iterator *
1679record_btrace_start_replaying (struct thread_info *tp)
1680{
1681 struct btrace_insn_iterator *replay;
1682 struct btrace_thread_info *btinfo;
1683 int executing;
1684
1685 btinfo = &tp->btrace;
1686 replay = NULL;
1687
1688 /* We can't start replaying without trace. */
1689 if (btinfo->begin == NULL)
1690 return NULL;
1691
1692 /* Clear the executing flag to allow changes to the current frame.
1693 We are not actually running, yet. We just started a reverse execution
1694 command or a record goto command.
1695 For the latter, EXECUTING is false and this has no effect.
1696 For the former, EXECUTING is true and we're in to_wait, about to
1697 move the thread. Since we need to recompute the stack, we temporarily
1698 set EXECUTING to flase. */
1699 executing = is_executing (tp->ptid);
1700 set_executing (tp->ptid, 0);
1701
1702 /* GDB stores the current frame_id when stepping in order to detects steps
1703 into subroutines.
1704 Since frames are computed differently when we're replaying, we need to
1705 recompute those stored frames and fix them up so we can still detect
1706 subroutines after we started replaying. */
1707 TRY
1708 {
1709 struct frame_info *frame;
1710 struct frame_id frame_id;
1711 int upd_step_frame_id, upd_step_stack_frame_id;
1712
1713 /* The current frame without replaying - computed via normal unwind. */
1714 frame = get_current_frame ();
1715 frame_id = get_frame_id (frame);
1716
1717 /* Check if we need to update any stepping-related frame id's. */
1718 upd_step_frame_id = frame_id_eq (frame_id,
1719 tp->control.step_frame_id);
1720 upd_step_stack_frame_id = frame_id_eq (frame_id,
1721 tp->control.step_stack_frame_id);
1722
1723 /* We start replaying at the end of the branch trace. This corresponds
1724 to the current instruction. */
1725 replay = xmalloc (sizeof (*replay));
1726 btrace_insn_end (replay, btinfo);
1727
1728 /* Skip gaps at the end of the trace. */
1729 while (btrace_insn_get (replay) == NULL)
1730 {
1731 unsigned int steps;
1732
1733 steps = btrace_insn_prev (replay, 1);
1734 if (steps == 0)
1735 error (_("No trace."));
1736 }
1737
1738 /* We're not replaying, yet. */
1739 gdb_assert (btinfo->replay == NULL);
1740 btinfo->replay = replay;
1741
1742 /* Make sure we're not using any stale registers. */
1743 registers_changed_ptid (tp->ptid);
1744
1745 /* The current frame with replaying - computed via btrace unwind. */
1746 frame = get_current_frame ();
1747 frame_id = get_frame_id (frame);
1748
1749 /* Replace stepping related frames where necessary. */
1750 if (upd_step_frame_id)
1751 tp->control.step_frame_id = frame_id;
1752 if (upd_step_stack_frame_id)
1753 tp->control.step_stack_frame_id = frame_id;
1754 }
1755 CATCH (except, RETURN_MASK_ALL)
1756 {
1757 /* Restore the previous execution state. */
1758 set_executing (tp->ptid, executing);
1759
1760 xfree (btinfo->replay);
1761 btinfo->replay = NULL;
1762
1763 registers_changed_ptid (tp->ptid);
1764
1765 throw_exception (except);
1766 }
1767 END_CATCH
1768
1769 /* Restore the previous execution state. */
1770 set_executing (tp->ptid, executing);
1771
1772 return replay;
1773}
1774
1775/* Stop replaying a thread. */
1776
1777static void
1778record_btrace_stop_replaying (struct thread_info *tp)
1779{
1780 struct btrace_thread_info *btinfo;
1781
1782 btinfo = &tp->btrace;
1783
1784 xfree (btinfo->replay);
1785 btinfo->replay = NULL;
1786
1787 /* Make sure we're not leaving any stale registers. */
1788 registers_changed_ptid (tp->ptid);
1789}
1790
1791/* The to_resume method of target record-btrace. */
1792
1793static void
1794record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1795 enum gdb_signal signal)
1796{
1797 struct thread_info *tp, *other;
1798 enum btrace_thread_flag flag;
1799
1800 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1801
1802 /* Store the execution direction of the last resume. */
1803 record_btrace_resume_exec_dir = execution_direction;
1804
1805 tp = record_btrace_find_resume_thread (ptid);
1806 if (tp == NULL)
1807 error (_("Cannot find thread to resume."));
1808
1809 /* Stop replaying other threads if the thread to resume is not replaying. */
1810 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1811 ALL_NON_EXITED_THREADS (other)
1812 record_btrace_stop_replaying (other);
1813
1814 /* As long as we're not replaying, just forward the request. */
1815 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1816 {
1817 ops = ops->beneath;
1818 return ops->to_resume (ops, ptid, step, signal);
1819 }
1820
1821 /* Compute the btrace thread flag for the requested move. */
1822 if (step == 0)
1823 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1824 else
1825 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1826
1827 /* At the moment, we only move a single thread. We could also move
1828 all threads in parallel by single-stepping each resumed thread
1829 until the first runs into an event.
1830 When we do that, we would want to continue all other threads.
1831 For now, just resume one thread to not confuse to_wait. */
1832 record_btrace_resume_thread (tp, flag);
1833
1834 /* We just indicate the resume intent here. The actual stepping happens in
1835 record_btrace_wait below. */
1836
1837 /* Async support. */
1838 if (target_can_async_p ())
1839 {
1840 target_async (1);
1841 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1842 }
1843}
1844
1845/* Find a thread to move. */
1846
1847static struct thread_info *
1848record_btrace_find_thread_to_move (ptid_t ptid)
1849{
1850 struct thread_info *tp;
1851
1852 /* First check the parameter thread. */
1853 tp = find_thread_ptid (ptid);
1854 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1855 return tp;
1856
1857 /* Otherwise, find one other thread that has been resumed. */
1858 ALL_NON_EXITED_THREADS (tp)
1859 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1860 return tp;
1861
1862 return NULL;
1863}
1864
1865/* Return a target_waitstatus indicating that we ran out of history. */
1866
1867static struct target_waitstatus
1868btrace_step_no_history (void)
1869{
1870 struct target_waitstatus status;
1871
1872 status.kind = TARGET_WAITKIND_NO_HISTORY;
1873
1874 return status;
1875}
1876
1877/* Return a target_waitstatus indicating that a step finished. */
1878
1879static struct target_waitstatus
1880btrace_step_stopped (void)
1881{
1882 struct target_waitstatus status;
1883
1884 status.kind = TARGET_WAITKIND_STOPPED;
1885 status.value.sig = GDB_SIGNAL_TRAP;
1886
1887 return status;
1888}
1889
1890/* Clear the record histories. */
1891
1892static void
1893record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1894{
1895 xfree (btinfo->insn_history);
1896 xfree (btinfo->call_history);
1897
1898 btinfo->insn_history = NULL;
1899 btinfo->call_history = NULL;
1900}
1901
1902/* Step a single thread. */
1903
1904static struct target_waitstatus
1905record_btrace_step_thread (struct thread_info *tp)
1906{
1907 struct btrace_insn_iterator *replay, end;
1908 struct btrace_thread_info *btinfo;
1909 struct address_space *aspace;
1910 struct inferior *inf;
1911 enum btrace_thread_flag flags;
1912 unsigned int steps;
1913
1914 /* We can't step without an execution history. */
1915 if (btrace_is_empty (tp))
1916 return btrace_step_no_history ();
1917
1918 btinfo = &tp->btrace;
1919 replay = btinfo->replay;
1920
1921 flags = btinfo->flags & BTHR_MOVE;
1922 btinfo->flags &= ~BTHR_MOVE;
1923
1924 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1925
1926 switch (flags)
1927 {
1928 default:
1929 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1930
1931 case BTHR_STEP:
1932 /* We're done if we're not replaying. */
1933 if (replay == NULL)
1934 return btrace_step_no_history ();
1935
1936 /* Skip gaps during replay. */
1937 do
1938 {
1939 steps = btrace_insn_next (replay, 1);
1940 if (steps == 0)
1941 {
1942 record_btrace_stop_replaying (tp);
1943 return btrace_step_no_history ();
1944 }
1945 }
1946 while (btrace_insn_get (replay) == NULL);
1947
1948 /* Determine the end of the instruction trace. */
1949 btrace_insn_end (&end, btinfo);
1950
1951 /* We stop replaying if we reached the end of the trace. */
1952 if (btrace_insn_cmp (replay, &end) == 0)
1953 record_btrace_stop_replaying (tp);
1954
1955 return btrace_step_stopped ();
1956
1957 case BTHR_RSTEP:
1958 /* Start replaying if we're not already doing so. */
1959 if (replay == NULL)
1960 replay = record_btrace_start_replaying (tp);
1961
1962 /* If we can't step any further, we reached the end of the history.
1963 Skip gaps during replay. */
1964 do
1965 {
1966 steps = btrace_insn_prev (replay, 1);
1967 if (steps == 0)
1968 return btrace_step_no_history ();
1969
1970 }
1971 while (btrace_insn_get (replay) == NULL);
1972
1973 return btrace_step_stopped ();
1974
1975 case BTHR_CONT:
1976 /* We're done if we're not replaying. */
1977 if (replay == NULL)
1978 return btrace_step_no_history ();
1979
1980 inf = find_inferior_ptid (tp->ptid);
1981 aspace = inf->aspace;
1982
1983 /* Determine the end of the instruction trace. */
1984 btrace_insn_end (&end, btinfo);
1985
1986 for (;;)
1987 {
1988 const struct btrace_insn *insn;
1989
1990 /* Skip gaps during replay. */
1991 do
1992 {
1993 steps = btrace_insn_next (replay, 1);
1994 if (steps == 0)
1995 {
1996 record_btrace_stop_replaying (tp);
1997 return btrace_step_no_history ();
1998 }
1999
2000 insn = btrace_insn_get (replay);
2001 }
2002 while (insn == NULL);
2003
2004 /* We stop replaying if we reached the end of the trace. */
2005 if (btrace_insn_cmp (replay, &end) == 0)
2006 {
2007 record_btrace_stop_replaying (tp);
2008 return btrace_step_no_history ();
2009 }
2010
2011 DEBUG ("stepping %d (%s) ... %s", tp->num,
2012 target_pid_to_str (tp->ptid),
2013 core_addr_to_string_nz (insn->pc));
2014
2015 if (record_check_stopped_by_breakpoint (aspace, insn->pc,
2016 &btinfo->stop_reason))
2017 return btrace_step_stopped ();
2018 }
2019
2020 case BTHR_RCONT:
2021 /* Start replaying if we're not already doing so. */
2022 if (replay == NULL)
2023 replay = record_btrace_start_replaying (tp);
2024
2025 inf = find_inferior_ptid (tp->ptid);
2026 aspace = inf->aspace;
2027
2028 for (;;)
2029 {
2030 const struct btrace_insn *insn;
2031
2032 /* If we can't step any further, we reached the end of the history.
2033 Skip gaps during replay. */
2034 do
2035 {
2036 steps = btrace_insn_prev (replay, 1);
2037 if (steps == 0)
2038 return btrace_step_no_history ();
2039
2040 insn = btrace_insn_get (replay);
2041 }
2042 while (insn == NULL);
2043
2044 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
2045 target_pid_to_str (tp->ptid),
2046 core_addr_to_string_nz (insn->pc));
2047
2048 if (record_check_stopped_by_breakpoint (aspace, insn->pc,
2049 &btinfo->stop_reason))
2050 return btrace_step_stopped ();
2051 }
2052 }
2053}
2054
2055/* The to_wait method of target record-btrace. */
2056
2057static ptid_t
2058record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2059 struct target_waitstatus *status, int options)
2060{
2061 struct thread_info *tp, *other;
2062
2063 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2064
2065 /* As long as we're not replaying, just forward the request. */
2066 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
2067 {
2068 ops = ops->beneath;
2069 return ops->to_wait (ops, ptid, status, options);
2070 }
2071
2072 /* Let's find a thread to move. */
2073 tp = record_btrace_find_thread_to_move (ptid);
2074 if (tp == NULL)
2075 {
2076 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
2077
2078 status->kind = TARGET_WAITKIND_IGNORE;
2079 return minus_one_ptid;
2080 }
2081
2082 /* We only move a single thread. We're not able to correlate threads. */
2083 *status = record_btrace_step_thread (tp);
2084
2085 /* Stop all other threads. */
2086 if (!non_stop)
2087 ALL_NON_EXITED_THREADS (other)
2088 other->btrace.flags &= ~BTHR_MOVE;
2089
2090 /* Start record histories anew from the current position. */
2091 record_btrace_clear_histories (&tp->btrace);
2092
2093 /* We moved the replay position but did not update registers. */
2094 registers_changed_ptid (tp->ptid);
2095
2096 return tp->ptid;
2097}
2098
2099/* The to_can_execute_reverse method of target record-btrace. */
2100
2101static int
2102record_btrace_can_execute_reverse (struct target_ops *self)
2103{
2104 return 1;
2105}
2106
2107/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2108
2109static int
2110record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2111{
2112 if (record_btrace_is_replaying (ops))
2113 {
2114 struct thread_info *tp = inferior_thread ();
2115
2116 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2117 }
2118
2119 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2120}
2121
2122/* The to_supports_stopped_by_sw_breakpoint method of target
2123 record-btrace. */
2124
2125static int
2126record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2127{
2128 if (record_btrace_is_replaying (ops))
2129 return 1;
2130
2131 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2132}
2133
2134/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2135
2136static int
2137record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2138{
2139 if (record_btrace_is_replaying (ops))
2140 {
2141 struct thread_info *tp = inferior_thread ();
2142
2143 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2144 }
2145
2146 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2147}
2148
2149/* The to_supports_stopped_by_hw_breakpoint method of target
2150 record-btrace. */
2151
2152static int
2153record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2154{
2155 if (record_btrace_is_replaying (ops))
2156 return 1;
2157
2158 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2159}
2160
2161/* The to_update_thread_list method of target record-btrace. */
2162
2163static void
2164record_btrace_update_thread_list (struct target_ops *ops)
2165{
2166 /* We don't add or remove threads during replay. */
2167 if (record_btrace_is_replaying (ops))
2168 return;
2169
2170 /* Forward the request. */
2171 ops = ops->beneath;
2172 ops->to_update_thread_list (ops);
2173}
2174
2175/* The to_thread_alive method of target record-btrace. */
2176
2177static int
2178record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2179{
2180 /* We don't add or remove threads during replay. */
2181 if (record_btrace_is_replaying (ops))
2182 return find_thread_ptid (ptid) != NULL;
2183
2184 /* Forward the request. */
2185 ops = ops->beneath;
2186 return ops->to_thread_alive (ops, ptid);
2187}
2188
2189/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2190 is stopped. */
2191
2192static void
2193record_btrace_set_replay (struct thread_info *tp,
2194 const struct btrace_insn_iterator *it)
2195{
2196 struct btrace_thread_info *btinfo;
2197
2198 btinfo = &tp->btrace;
2199
2200 if (it == NULL || it->function == NULL)
2201 record_btrace_stop_replaying (tp);
2202 else
2203 {
2204 if (btinfo->replay == NULL)
2205 record_btrace_start_replaying (tp);
2206 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2207 return;
2208
2209 *btinfo->replay = *it;
2210 registers_changed_ptid (tp->ptid);
2211 }
2212
2213 /* Start anew from the new replay position. */
2214 record_btrace_clear_histories (btinfo);
2215
2216 stop_pc = regcache_read_pc (get_current_regcache ());
2217 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2218}
2219
2220/* The to_goto_record_begin method of target record-btrace. */
2221
2222static void
2223record_btrace_goto_begin (struct target_ops *self)
2224{
2225 struct thread_info *tp;
2226 struct btrace_insn_iterator begin;
2227
2228 tp = require_btrace_thread ();
2229
2230 btrace_insn_begin (&begin, &tp->btrace);
2231 record_btrace_set_replay (tp, &begin);
2232}
2233
2234/* The to_goto_record_end method of target record-btrace. */
2235
2236static void
2237record_btrace_goto_end (struct target_ops *ops)
2238{
2239 struct thread_info *tp;
2240
2241 tp = require_btrace_thread ();
2242
2243 record_btrace_set_replay (tp, NULL);
2244}
2245
2246/* The to_goto_record method of target record-btrace. */
2247
2248static void
2249record_btrace_goto (struct target_ops *self, ULONGEST insn)
2250{
2251 struct thread_info *tp;
2252 struct btrace_insn_iterator it;
2253 unsigned int number;
2254 int found;
2255
2256 number = insn;
2257
2258 /* Check for wrap-arounds. */
2259 if (number != insn)
2260 error (_("Instruction number out of range."));
2261
2262 tp = require_btrace_thread ();
2263
2264 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2265 if (found == 0)
2266 error (_("No such instruction."));
2267
2268 record_btrace_set_replay (tp, &it);
2269}
2270
2271/* The to_execution_direction target method. */
2272
2273static enum exec_direction_kind
2274record_btrace_execution_direction (struct target_ops *self)
2275{
2276 return record_btrace_resume_exec_dir;
2277}
2278
2279/* The to_prepare_to_generate_core target method. */
2280
2281static void
2282record_btrace_prepare_to_generate_core (struct target_ops *self)
2283{
2284 record_btrace_generating_corefile = 1;
2285}
2286
2287/* The to_done_generating_core target method. */
2288
2289static void
2290record_btrace_done_generating_core (struct target_ops *self)
2291{
2292 record_btrace_generating_corefile = 0;
2293}
2294
2295/* Initialize the record-btrace target ops. */
2296
2297static void
2298init_record_btrace_ops (void)
2299{
2300 struct target_ops *ops;
2301
2302 ops = &record_btrace_ops;
2303 ops->to_shortname = "record-btrace";
2304 ops->to_longname = "Branch tracing target";
2305 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2306 ops->to_open = record_btrace_open;
2307 ops->to_close = record_btrace_close;
2308 ops->to_async = record_btrace_async;
2309 ops->to_detach = record_detach;
2310 ops->to_disconnect = record_disconnect;
2311 ops->to_mourn_inferior = record_mourn_inferior;
2312 ops->to_kill = record_kill;
2313 ops->to_stop_recording = record_btrace_stop_recording;
2314 ops->to_info_record = record_btrace_info;
2315 ops->to_insn_history = record_btrace_insn_history;
2316 ops->to_insn_history_from = record_btrace_insn_history_from;
2317 ops->to_insn_history_range = record_btrace_insn_history_range;
2318 ops->to_call_history = record_btrace_call_history;
2319 ops->to_call_history_from = record_btrace_call_history_from;
2320 ops->to_call_history_range = record_btrace_call_history_range;
2321 ops->to_record_is_replaying = record_btrace_is_replaying;
2322 ops->to_xfer_partial = record_btrace_xfer_partial;
2323 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2324 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2325 ops->to_fetch_registers = record_btrace_fetch_registers;
2326 ops->to_store_registers = record_btrace_store_registers;
2327 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2328 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2329 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2330 ops->to_resume = record_btrace_resume;
2331 ops->to_wait = record_btrace_wait;
2332 ops->to_update_thread_list = record_btrace_update_thread_list;
2333 ops->to_thread_alive = record_btrace_thread_alive;
2334 ops->to_goto_record_begin = record_btrace_goto_begin;
2335 ops->to_goto_record_end = record_btrace_goto_end;
2336 ops->to_goto_record = record_btrace_goto;
2337 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2338 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2339 ops->to_supports_stopped_by_sw_breakpoint
2340 = record_btrace_supports_stopped_by_sw_breakpoint;
2341 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2342 ops->to_supports_stopped_by_hw_breakpoint
2343 = record_btrace_supports_stopped_by_hw_breakpoint;
2344 ops->to_execution_direction = record_btrace_execution_direction;
2345 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2346 ops->to_done_generating_core = record_btrace_done_generating_core;
2347 ops->to_stratum = record_stratum;
2348 ops->to_magic = OPS_MAGIC;
2349}
2350
2351/* Start recording in BTS format. */
2352
2353static void
2354cmd_record_btrace_bts_start (char *args, int from_tty)
2355{
2356 if (args != NULL && *args != 0)
2357 error (_("Invalid argument."));
2358
2359 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2360
2361 TRY
2362 {
2363 execute_command ("target record-btrace", from_tty);
2364 }
2365 CATCH (exception, RETURN_MASK_ALL)
2366 {
2367 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2368 throw_exception (exception);
2369 }
2370 END_CATCH
2371}
2372
2373/* Start recording Intel(R) Processor Trace. */
2374
2375static void
2376cmd_record_btrace_pt_start (char *args, int from_tty)
2377{
2378 if (args != NULL && *args != 0)
2379 error (_("Invalid argument."));
2380
2381 record_btrace_conf.format = BTRACE_FORMAT_PT;
2382
2383 TRY
2384 {
2385 execute_command ("target record-btrace", from_tty);
2386 }
2387 CATCH (exception, RETURN_MASK_ALL)
2388 {
2389 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2390 throw_exception (exception);
2391 }
2392 END_CATCH
2393}
2394
2395/* Alias for "target record". */
2396
2397static void
2398cmd_record_btrace_start (char *args, int from_tty)
2399{
2400 if (args != NULL && *args != 0)
2401 error (_("Invalid argument."));
2402
2403 record_btrace_conf.format = BTRACE_FORMAT_PT;
2404
2405 TRY
2406 {
2407 execute_command ("target record-btrace", from_tty);
2408 }
2409 CATCH (exception, RETURN_MASK_ALL)
2410 {
2411 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2412
2413 TRY
2414 {
2415 execute_command ("target record-btrace", from_tty);
2416 }
2417 CATCH (exception, RETURN_MASK_ALL)
2418 {
2419 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2420 throw_exception (exception);
2421 }
2422 END_CATCH
2423 }
2424 END_CATCH
2425}
2426
2427/* The "set record btrace" command. */
2428
2429static void
2430cmd_set_record_btrace (char *args, int from_tty)
2431{
2432 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2433}
2434
2435/* The "show record btrace" command. */
2436
2437static void
2438cmd_show_record_btrace (char *args, int from_tty)
2439{
2440 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2441}
2442
2443/* The "show record btrace replay-memory-access" command. */
2444
2445static void
2446cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2447 struct cmd_list_element *c, const char *value)
2448{
2449 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2450 replay_memory_access);
2451}
2452
2453/* The "set record btrace bts" command. */
2454
2455static void
2456cmd_set_record_btrace_bts (char *args, int from_tty)
2457{
2458 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2459 "by an appropriate subcommand.\n"));
2460 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2461 all_commands, gdb_stdout);
2462}
2463
2464/* The "show record btrace bts" command. */
2465
2466static void
2467cmd_show_record_btrace_bts (char *args, int from_tty)
2468{
2469 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2470}
2471
2472/* The "set record btrace pt" command. */
2473
2474static void
2475cmd_set_record_btrace_pt (char *args, int from_tty)
2476{
2477 printf_unfiltered (_("\"set record btrace pt\" must be followed "
2478 "by an appropriate subcommand.\n"));
2479 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
2480 all_commands, gdb_stdout);
2481}
2482
2483/* The "show record btrace pt" command. */
2484
2485static void
2486cmd_show_record_btrace_pt (char *args, int from_tty)
2487{
2488 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
2489}
2490
2491/* The "record bts buffer-size" show value function. */
2492
2493static void
2494show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
2495 struct cmd_list_element *c,
2496 const char *value)
2497{
2498 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
2499 value);
2500}
2501
2502/* The "record pt buffer-size" show value function. */
2503
2504static void
2505show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
2506 struct cmd_list_element *c,
2507 const char *value)
2508{
2509 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
2510 value);
2511}
2512
2513void _initialize_record_btrace (void);
2514
2515/* Initialize btrace commands. */
2516
2517void
2518_initialize_record_btrace (void)
2519{
2520 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2521 _("Start branch trace recording."), &record_btrace_cmdlist,
2522 "record btrace ", 0, &record_cmdlist);
2523 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2524
2525 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
2526 _("\
2527Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2528The processor stores a from/to record for each branch into a cyclic buffer.\n\
2529This format may not be available on all processors."),
2530 &record_btrace_cmdlist);
2531 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
2532
2533 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
2534 _("\
2535Start branch trace recording in Intel(R) Processor Trace format.\n\n\
2536This format may not be available on all processors."),
2537 &record_btrace_cmdlist);
2538 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
2539
2540 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2541 _("Set record options"), &set_record_btrace_cmdlist,
2542 "set record btrace ", 0, &set_record_cmdlist);
2543
2544 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2545 _("Show record options"), &show_record_btrace_cmdlist,
2546 "show record btrace ", 0, &show_record_cmdlist);
2547
2548 add_setshow_enum_cmd ("replay-memory-access", no_class,
2549 replay_memory_access_types, &replay_memory_access, _("\
2550Set what memory accesses are allowed during replay."), _("\
2551Show what memory accesses are allowed during replay."),
2552 _("Default is READ-ONLY.\n\n\
2553The btrace record target does not trace data.\n\
2554The memory therefore corresponds to the live target and not \
2555to the current replay position.\n\n\
2556When READ-ONLY, allow accesses to read-only memory during replay.\n\
2557When READ-WRITE, allow accesses to read-only and read-write memory during \
2558replay."),
2559 NULL, cmd_show_replay_memory_access,
2560 &set_record_btrace_cmdlist,
2561 &show_record_btrace_cmdlist);
2562
2563 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
2564 _("Set record btrace bts options"),
2565 &set_record_btrace_bts_cmdlist,
2566 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
2567
2568 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
2569 _("Show record btrace bts options"),
2570 &show_record_btrace_bts_cmdlist,
2571 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
2572
2573 add_setshow_uinteger_cmd ("buffer-size", no_class,
2574 &record_btrace_conf.bts.size,
2575 _("Set the record/replay bts buffer size."),
2576 _("Show the record/replay bts buffer size."), _("\
2577When starting recording request a trace buffer of this size. \
2578The actual buffer size may differ from the requested size. \
2579Use \"info record\" to see the actual buffer size.\n\n\
2580Bigger buffers allow longer recording but also take more time to process \
2581the recorded execution trace.\n\n\
2582The trace buffer size may not be changed while recording."), NULL,
2583 show_record_bts_buffer_size_value,
2584 &set_record_btrace_bts_cmdlist,
2585 &show_record_btrace_bts_cmdlist);
2586
2587 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
2588 _("Set record btrace pt options"),
2589 &set_record_btrace_pt_cmdlist,
2590 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
2591
2592 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
2593 _("Show record btrace pt options"),
2594 &show_record_btrace_pt_cmdlist,
2595 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
2596
2597 add_setshow_uinteger_cmd ("buffer-size", no_class,
2598 &record_btrace_conf.pt.size,
2599 _("Set the record/replay pt buffer size."),
2600 _("Show the record/replay pt buffer size."), _("\
2601Bigger buffers allow longer recording but also take more time to process \
2602the recorded execution.\n\
2603The actual buffer size may differ from the requested size. Use \"info record\" \
2604to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
2605 &set_record_btrace_pt_cmdlist,
2606 &show_record_btrace_pt_cmdlist);
2607
2608 init_record_btrace_ops ();
2609 add_target (&record_btrace_ops);
2610
2611 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2612 xcalloc, xfree);
2613
2614 record_btrace_conf.bts.size = 64 * 1024;
2615 record_btrace_conf.pt.size = 16 * 1024;
2616}
This page took 0.035466 seconds and 4 git commands to generate.