gdbarch.h: include regcache.h
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
32d0add0 3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
24#include "gdbthread.h"
25#include "target.h"
26#include "gdbcmd.h"
27#include "disasm.h"
28#include "observer.h"
afedecd3
MM
29#include "cli/cli-utils.h"
30#include "source.h"
31#include "ui-out.h"
32#include "symtab.h"
33#include "filenames.h"
1f3ef581 34#include "regcache.h"
cecac1ab 35#include "frame-unwind.h"
0b722aec 36#include "hashtab.h"
45741a9c 37#include "infrun.h"
70ad5bff
MM
38#include "event-loop.h"
39#include "inf-loop.h"
afedecd3
MM
40
41/* The target_ops of record-btrace. */
42static struct target_ops record_btrace_ops;
43
44/* A new thread observer enabling branch tracing for the new thread. */
45static struct observer *record_btrace_thread_observer;
46
67b5c0c1
MM
47/* Memory access types used in set/show record btrace replay-memory-access. */
48static const char replay_memory_access_read_only[] = "read-only";
49static const char replay_memory_access_read_write[] = "read-write";
50static const char *const replay_memory_access_types[] =
51{
52 replay_memory_access_read_only,
53 replay_memory_access_read_write,
54 NULL
55};
56
57/* The currently allowed replay memory access type. */
58static const char *replay_memory_access = replay_memory_access_read_only;
59
60/* Command lists for "set/show record btrace". */
61static struct cmd_list_element *set_record_btrace_cmdlist;
62static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 63
70ad5bff
MM
64/* The execution direction of the last resume we got. See record-full.c. */
65static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
66
67/* The async event handler for reverse/replay execution. */
68static struct async_event_handler *record_btrace_async_inferior_event_handler;
69
aef92902
MM
70/* A flag indicating that we are currently generating a core file. */
71static int record_btrace_generating_corefile;
72
f4abbc16
MM
73/* The current branch trace configuration. */
74static struct btrace_config record_btrace_conf;
75
76/* Command list for "record btrace". */
77static struct cmd_list_element *record_btrace_cmdlist;
78
d33501a5
MM
79/* Command lists for "set/show record btrace bts". */
80static struct cmd_list_element *set_record_btrace_bts_cmdlist;
81static struct cmd_list_element *show_record_btrace_bts_cmdlist;
82
afedecd3
MM
83/* Print a record-btrace debug message. Use do ... while (0) to avoid
84 ambiguities when used in if statements. */
85
86#define DEBUG(msg, args...) \
87 do \
88 { \
89 if (record_debug != 0) \
90 fprintf_unfiltered (gdb_stdlog, \
91 "[record-btrace] " msg "\n", ##args); \
92 } \
93 while (0)
94
95
96/* Update the branch trace for the current thread and return a pointer to its
066ce621 97 thread_info.
afedecd3
MM
98
99 Throws an error if there is no thread or no trace. This function never
100 returns NULL. */
101
066ce621
MM
102static struct thread_info *
103require_btrace_thread (void)
afedecd3
MM
104{
105 struct thread_info *tp;
afedecd3
MM
106
107 DEBUG ("require");
108
109 tp = find_thread_ptid (inferior_ptid);
110 if (tp == NULL)
111 error (_("No thread."));
112
113 btrace_fetch (tp);
114
6e07b1d2 115 if (btrace_is_empty (tp))
afedecd3
MM
116 error (_("No trace."));
117
066ce621
MM
118 return tp;
119}
120
121/* Update the branch trace for the current thread and return a pointer to its
122 branch trace information struct.
123
124 Throws an error if there is no thread or no trace. This function never
125 returns NULL. */
126
127static struct btrace_thread_info *
128require_btrace (void)
129{
130 struct thread_info *tp;
131
132 tp = require_btrace_thread ();
133
134 return &tp->btrace;
afedecd3
MM
135}
136
137/* Enable branch tracing for one thread. Warn on errors. */
138
139static void
140record_btrace_enable_warn (struct thread_info *tp)
141{
142 volatile struct gdb_exception error;
143
144 TRY_CATCH (error, RETURN_MASK_ERROR)
f4abbc16 145 btrace_enable (tp, &record_btrace_conf);
afedecd3
MM
146
147 if (error.message != NULL)
148 warning ("%s", error.message);
149}
150
151/* Callback function to disable branch tracing for one thread. */
152
153static void
154record_btrace_disable_callback (void *arg)
155{
156 struct thread_info *tp;
157
158 tp = arg;
159
160 btrace_disable (tp);
161}
162
163/* Enable automatic tracing of new threads. */
164
165static void
166record_btrace_auto_enable (void)
167{
168 DEBUG ("attach thread observer");
169
170 record_btrace_thread_observer
171 = observer_attach_new_thread (record_btrace_enable_warn);
172}
173
174/* Disable automatic tracing of new threads. */
175
176static void
177record_btrace_auto_disable (void)
178{
179 /* The observer may have been detached, already. */
180 if (record_btrace_thread_observer == NULL)
181 return;
182
183 DEBUG ("detach thread observer");
184
185 observer_detach_new_thread (record_btrace_thread_observer);
186 record_btrace_thread_observer = NULL;
187}
188
70ad5bff
MM
189/* The record-btrace async event handler function. */
190
191static void
192record_btrace_handle_async_inferior_event (gdb_client_data data)
193{
194 inferior_event_handler (INF_REG_EVENT, NULL);
195}
196
afedecd3
MM
197/* The to_open method of target record-btrace. */
198
199static void
014f9477 200record_btrace_open (const char *args, int from_tty)
afedecd3
MM
201{
202 struct cleanup *disable_chain;
203 struct thread_info *tp;
204
205 DEBUG ("open");
206
8213266a 207 record_preopen ();
afedecd3
MM
208
209 if (!target_has_execution)
210 error (_("The program is not being run."));
211
52834460
MM
212 if (non_stop)
213 error (_("Record btrace can't debug inferior in non-stop mode."));
214
afedecd3
MM
215 gdb_assert (record_btrace_thread_observer == NULL);
216
217 disable_chain = make_cleanup (null_cleanup, NULL);
034f788c 218 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
219 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
220 {
f4abbc16 221 btrace_enable (tp, &record_btrace_conf);
afedecd3
MM
222
223 make_cleanup (record_btrace_disable_callback, tp);
224 }
225
226 record_btrace_auto_enable ();
227
228 push_target (&record_btrace_ops);
229
70ad5bff
MM
230 record_btrace_async_inferior_event_handler
231 = create_async_event_handler (record_btrace_handle_async_inferior_event,
232 NULL);
aef92902 233 record_btrace_generating_corefile = 0;
70ad5bff 234
afedecd3
MM
235 observer_notify_record_changed (current_inferior (), 1);
236
237 discard_cleanups (disable_chain);
238}
239
240/* The to_stop_recording method of target record-btrace. */
241
242static void
c6cd7c02 243record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
244{
245 struct thread_info *tp;
246
247 DEBUG ("stop recording");
248
249 record_btrace_auto_disable ();
250
034f788c 251 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
252 if (tp->btrace.target != NULL)
253 btrace_disable (tp);
254}
255
256/* The to_close method of target record-btrace. */
257
258static void
de90e03d 259record_btrace_close (struct target_ops *self)
afedecd3 260{
568e808b
MM
261 struct thread_info *tp;
262
70ad5bff
MM
263 if (record_btrace_async_inferior_event_handler != NULL)
264 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
265
99c819ee
MM
266 /* Make sure automatic recording gets disabled even if we did not stop
267 recording before closing the record-btrace target. */
268 record_btrace_auto_disable ();
269
568e808b
MM
270 /* We should have already stopped recording.
271 Tear down btrace in case we have not. */
034f788c 272 ALL_NON_EXITED_THREADS (tp)
568e808b 273 btrace_teardown (tp);
afedecd3
MM
274}
275
b7d2e916
PA
276/* The to_async method of target record-btrace. */
277
278static void
279record_btrace_async (struct target_ops *ops,
280 void (*callback) (enum inferior_event_type event_type,
281 void *context),
282 void *context)
283{
284 if (callback != NULL)
285 mark_async_event_handler (record_btrace_async_inferior_event_handler);
286 else
287 clear_async_event_handler (record_btrace_async_inferior_event_handler);
288
289 ops->beneath->to_async (ops->beneath, callback, context);
290}
291
d33501a5
MM
292/* Adjusts the size and returns a human readable size suffix. */
293
294static const char *
295record_btrace_adjust_size (unsigned int *size)
296{
297 unsigned int sz;
298
299 sz = *size;
300
301 if ((sz & ((1u << 30) - 1)) == 0)
302 {
303 *size = sz >> 30;
304 return "GB";
305 }
306 else if ((sz & ((1u << 20) - 1)) == 0)
307 {
308 *size = sz >> 20;
309 return "MB";
310 }
311 else if ((sz & ((1u << 10) - 1)) == 0)
312 {
313 *size = sz >> 10;
314 return "kB";
315 }
316 else
317 return "";
318}
319
320/* Print a BTS configuration. */
321
322static void
323record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
324{
325 const char *suffix;
326 unsigned int size;
327
328 size = conf->size;
329 if (size > 0)
330 {
331 suffix = record_btrace_adjust_size (&size);
332 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
333 }
334}
335
336/* Print a branch tracing configuration. */
337
338static void
339record_btrace_print_conf (const struct btrace_config *conf)
340{
341 printf_unfiltered (_("Recording format: %s.\n"),
342 btrace_format_string (conf->format));
343
344 switch (conf->format)
345 {
346 case BTRACE_FORMAT_NONE:
347 return;
348
349 case BTRACE_FORMAT_BTS:
350 record_btrace_print_bts_conf (&conf->bts);
351 return;
352 }
353
354 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
355}
356
afedecd3
MM
357/* The to_info_record method of target record-btrace. */
358
359static void
630d6a4a 360record_btrace_info (struct target_ops *self)
afedecd3
MM
361{
362 struct btrace_thread_info *btinfo;
f4abbc16 363 const struct btrace_config *conf;
afedecd3 364 struct thread_info *tp;
31fd9caa 365 unsigned int insns, calls, gaps;
afedecd3
MM
366
367 DEBUG ("info");
368
369 tp = find_thread_ptid (inferior_ptid);
370 if (tp == NULL)
371 error (_("No thread."));
372
f4abbc16
MM
373 btinfo = &tp->btrace;
374
375 conf = btrace_conf (btinfo);
376 if (conf != NULL)
d33501a5 377 record_btrace_print_conf (conf);
f4abbc16 378
afedecd3
MM
379 btrace_fetch (tp);
380
23a7fe75
MM
381 insns = 0;
382 calls = 0;
31fd9caa 383 gaps = 0;
23a7fe75 384
6e07b1d2 385 if (!btrace_is_empty (tp))
23a7fe75
MM
386 {
387 struct btrace_call_iterator call;
388 struct btrace_insn_iterator insn;
389
390 btrace_call_end (&call, btinfo);
391 btrace_call_prev (&call, 1);
5de9129b 392 calls = btrace_call_number (&call);
23a7fe75
MM
393
394 btrace_insn_end (&insn, btinfo);
31fd9caa 395
5de9129b 396 insns = btrace_insn_number (&insn);
31fd9caa
MM
397 if (insns != 0)
398 {
399 /* The last instruction does not really belong to the trace. */
400 insns -= 1;
401 }
402 else
403 {
404 unsigned int steps;
405
406 /* Skip gaps at the end. */
407 do
408 {
409 steps = btrace_insn_prev (&insn, 1);
410 if (steps == 0)
411 break;
412
413 insns = btrace_insn_number (&insn);
414 }
415 while (insns == 0);
416 }
417
418 gaps = btinfo->ngaps;
23a7fe75 419 }
afedecd3 420
31fd9caa
MM
421 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
422 "for thread %d (%s).\n"), insns, calls, gaps,
423 tp->num, target_pid_to_str (tp->ptid));
07bbe694
MM
424
425 if (btrace_is_replaying (tp))
426 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
427 btrace_insn_number (btinfo->replay));
afedecd3
MM
428}
429
31fd9caa
MM
430/* Print a decode error. */
431
432static void
433btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
434 enum btrace_format format)
435{
436 const char *errstr;
437 int is_error;
438
439 errstr = _("unknown");
440 is_error = 1;
441
442 switch (format)
443 {
444 default:
445 break;
446
447 case BTRACE_FORMAT_BTS:
448 switch (errcode)
449 {
450 default:
451 break;
452
453 case BDE_BTS_OVERFLOW:
454 errstr = _("instruction overflow");
455 break;
456
457 case BDE_BTS_INSN_SIZE:
458 errstr = _("unknown instruction");
459 break;
460 }
461 break;
462 }
463
464 ui_out_text (uiout, _("["));
465 if (is_error)
466 {
467 ui_out_text (uiout, _("decode error ("));
468 ui_out_field_int (uiout, "errcode", errcode);
469 ui_out_text (uiout, _("): "));
470 }
471 ui_out_text (uiout, errstr);
472 ui_out_text (uiout, _("]\n"));
473}
474
afedecd3
MM
475/* Print an unsigned int. */
476
477static void
478ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
479{
480 ui_out_field_fmt (uiout, fld, "%u", val);
481}
482
483/* Disassemble a section of the recorded instruction trace. */
484
485static void
23a7fe75 486btrace_insn_history (struct ui_out *uiout,
31fd9caa 487 const struct btrace_thread_info *btinfo,
23a7fe75
MM
488 const struct btrace_insn_iterator *begin,
489 const struct btrace_insn_iterator *end, int flags)
afedecd3
MM
490{
491 struct gdbarch *gdbarch;
23a7fe75 492 struct btrace_insn_iterator it;
afedecd3 493
23a7fe75
MM
494 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
495 btrace_insn_number (end));
afedecd3
MM
496
497 gdbarch = target_gdbarch ();
498
23a7fe75 499 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 500 {
23a7fe75
MM
501 const struct btrace_insn *insn;
502
503 insn = btrace_insn_get (&it);
504
31fd9caa
MM
505 /* A NULL instruction indicates a gap in the trace. */
506 if (insn == NULL)
507 {
508 const struct btrace_config *conf;
509
510 conf = btrace_conf (btinfo);
afedecd3 511
31fd9caa
MM
512 /* We have trace so we must have a configuration. */
513 gdb_assert (conf != NULL);
514
515 btrace_ui_out_decode_error (uiout, it.function->errcode,
516 conf->format);
517 }
518 else
519 {
520 /* Print the instruction index. */
521 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
522 ui_out_text (uiout, "\t");
523
524 /* Disassembly with '/m' flag may not produce the expected result.
525 See PR gdb/11833. */
526 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc,
527 insn->pc + 1);
528 }
afedecd3
MM
529 }
530}
531
532/* The to_insn_history method of target record-btrace. */
533
534static void
7a6c5609 535record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
536{
537 struct btrace_thread_info *btinfo;
23a7fe75
MM
538 struct btrace_insn_history *history;
539 struct btrace_insn_iterator begin, end;
afedecd3
MM
540 struct cleanup *uiout_cleanup;
541 struct ui_out *uiout;
23a7fe75 542 unsigned int context, covered;
afedecd3
MM
543
544 uiout = current_uiout;
545 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
546 "insn history");
afedecd3 547 context = abs (size);
afedecd3
MM
548 if (context == 0)
549 error (_("Bad record instruction-history-size."));
550
23a7fe75
MM
551 btinfo = require_btrace ();
552 history = btinfo->insn_history;
553 if (history == NULL)
afedecd3 554 {
07bbe694 555 struct btrace_insn_iterator *replay;
afedecd3 556
23a7fe75 557 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 558
07bbe694
MM
559 /* If we're replaying, we start at the replay position. Otherwise, we
560 start at the tail of the trace. */
561 replay = btinfo->replay;
562 if (replay != NULL)
563 begin = *replay;
564 else
565 btrace_insn_end (&begin, btinfo);
566
567 /* We start from here and expand in the requested direction. Then we
568 expand in the other direction, as well, to fill up any remaining
569 context. */
570 end = begin;
571 if (size < 0)
572 {
573 /* We want the current position covered, as well. */
574 covered = btrace_insn_next (&end, 1);
575 covered += btrace_insn_prev (&begin, context - covered);
576 covered += btrace_insn_next (&end, context - covered);
577 }
578 else
579 {
580 covered = btrace_insn_next (&end, context);
581 covered += btrace_insn_prev (&begin, context - covered);
582 }
afedecd3
MM
583 }
584 else
585 {
23a7fe75
MM
586 begin = history->begin;
587 end = history->end;
afedecd3 588
23a7fe75
MM
589 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
590 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 591
23a7fe75
MM
592 if (size < 0)
593 {
594 end = begin;
595 covered = btrace_insn_prev (&begin, context);
596 }
597 else
598 {
599 begin = end;
600 covered = btrace_insn_next (&end, context);
601 }
afedecd3
MM
602 }
603
23a7fe75 604 if (covered > 0)
31fd9caa 605 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
606 else
607 {
608 if (size < 0)
609 printf_unfiltered (_("At the start of the branch trace record.\n"));
610 else
611 printf_unfiltered (_("At the end of the branch trace record.\n"));
612 }
afedecd3 613
23a7fe75 614 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
615 do_cleanups (uiout_cleanup);
616}
617
618/* The to_insn_history_range method of target record-btrace. */
619
620static void
4e99c6b7
TT
621record_btrace_insn_history_range (struct target_ops *self,
622 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
623{
624 struct btrace_thread_info *btinfo;
23a7fe75
MM
625 struct btrace_insn_history *history;
626 struct btrace_insn_iterator begin, end;
afedecd3
MM
627 struct cleanup *uiout_cleanup;
628 struct ui_out *uiout;
23a7fe75
MM
629 unsigned int low, high;
630 int found;
afedecd3
MM
631
632 uiout = current_uiout;
633 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
634 "insn history");
23a7fe75
MM
635 low = from;
636 high = to;
afedecd3 637
23a7fe75 638 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
639
640 /* Check for wrap-arounds. */
23a7fe75 641 if (low != from || high != to)
afedecd3
MM
642 error (_("Bad range."));
643
0688d04e 644 if (high < low)
afedecd3
MM
645 error (_("Bad range."));
646
23a7fe75 647 btinfo = require_btrace ();
afedecd3 648
23a7fe75
MM
649 found = btrace_find_insn_by_number (&begin, btinfo, low);
650 if (found == 0)
651 error (_("Range out of bounds."));
afedecd3 652
23a7fe75
MM
653 found = btrace_find_insn_by_number (&end, btinfo, high);
654 if (found == 0)
0688d04e
MM
655 {
656 /* Silently truncate the range. */
657 btrace_insn_end (&end, btinfo);
658 }
659 else
660 {
661 /* We want both begin and end to be inclusive. */
662 btrace_insn_next (&end, 1);
663 }
afedecd3 664
31fd9caa 665 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 666 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
667
668 do_cleanups (uiout_cleanup);
669}
670
671/* The to_insn_history_from method of target record-btrace. */
672
673static void
9abc3ff3
TT
674record_btrace_insn_history_from (struct target_ops *self,
675 ULONGEST from, int size, int flags)
afedecd3
MM
676{
677 ULONGEST begin, end, context;
678
679 context = abs (size);
0688d04e
MM
680 if (context == 0)
681 error (_("Bad record instruction-history-size."));
afedecd3
MM
682
683 if (size < 0)
684 {
685 end = from;
686
687 if (from < context)
688 begin = 0;
689 else
0688d04e 690 begin = from - context + 1;
afedecd3
MM
691 }
692 else
693 {
694 begin = from;
0688d04e 695 end = from + context - 1;
afedecd3
MM
696
697 /* Check for wrap-around. */
698 if (end < begin)
699 end = ULONGEST_MAX;
700 }
701
4e99c6b7 702 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
703}
704
705/* Print the instruction number range for a function call history line. */
706
707static void
23a7fe75
MM
708btrace_call_history_insn_range (struct ui_out *uiout,
709 const struct btrace_function *bfun)
afedecd3 710{
7acbe133
MM
711 unsigned int begin, end, size;
712
713 size = VEC_length (btrace_insn_s, bfun->insn);
714 gdb_assert (size > 0);
afedecd3 715
23a7fe75 716 begin = bfun->insn_offset;
7acbe133 717 end = begin + size - 1;
afedecd3 718
23a7fe75 719 ui_out_field_uint (uiout, "insn begin", begin);
8710b709 720 ui_out_text (uiout, ",");
23a7fe75 721 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
722}
723
724/* Print the source line information for a function call history line. */
725
726static void
23a7fe75
MM
727btrace_call_history_src_line (struct ui_out *uiout,
728 const struct btrace_function *bfun)
afedecd3
MM
729{
730 struct symbol *sym;
23a7fe75 731 int begin, end;
afedecd3
MM
732
733 sym = bfun->sym;
734 if (sym == NULL)
735 return;
736
737 ui_out_field_string (uiout, "file",
08be3fe3 738 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 739
23a7fe75
MM
740 begin = bfun->lbegin;
741 end = bfun->lend;
742
743 if (end < begin)
afedecd3
MM
744 return;
745
746 ui_out_text (uiout, ":");
23a7fe75 747 ui_out_field_int (uiout, "min line", begin);
afedecd3 748
23a7fe75 749 if (end == begin)
afedecd3
MM
750 return;
751
8710b709 752 ui_out_text (uiout, ",");
23a7fe75 753 ui_out_field_int (uiout, "max line", end);
afedecd3
MM
754}
755
0b722aec
MM
756/* Get the name of a branch trace function. */
757
758static const char *
759btrace_get_bfun_name (const struct btrace_function *bfun)
760{
761 struct minimal_symbol *msym;
762 struct symbol *sym;
763
764 if (bfun == NULL)
765 return "??";
766
767 msym = bfun->msym;
768 sym = bfun->sym;
769
770 if (sym != NULL)
771 return SYMBOL_PRINT_NAME (sym);
772 else if (msym != NULL)
efd66ac6 773 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
774 else
775 return "??";
776}
777
afedecd3
MM
778/* Disassemble a section of the recorded function trace. */
779
780static void
23a7fe75 781btrace_call_history (struct ui_out *uiout,
8710b709 782 const struct btrace_thread_info *btinfo,
23a7fe75
MM
783 const struct btrace_call_iterator *begin,
784 const struct btrace_call_iterator *end,
afedecd3
MM
785 enum record_print_flag flags)
786{
23a7fe75 787 struct btrace_call_iterator it;
afedecd3 788
23a7fe75
MM
789 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
790 btrace_call_number (end));
afedecd3 791
23a7fe75 792 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 793 {
23a7fe75
MM
794 const struct btrace_function *bfun;
795 struct minimal_symbol *msym;
796 struct symbol *sym;
797
798 bfun = btrace_call_get (&it);
23a7fe75 799 sym = bfun->sym;
0b722aec 800 msym = bfun->msym;
23a7fe75 801
afedecd3 802 /* Print the function index. */
23a7fe75 803 ui_out_field_uint (uiout, "index", bfun->number);
afedecd3
MM
804 ui_out_text (uiout, "\t");
805
31fd9caa
MM
806 /* Indicate gaps in the trace. */
807 if (bfun->errcode != 0)
808 {
809 const struct btrace_config *conf;
810
811 conf = btrace_conf (btinfo);
812
813 /* We have trace so we must have a configuration. */
814 gdb_assert (conf != NULL);
815
816 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
817
818 continue;
819 }
820
8710b709
MM
821 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
822 {
823 int level = bfun->level + btinfo->level, i;
824
825 for (i = 0; i < level; ++i)
826 ui_out_text (uiout, " ");
827 }
828
829 if (sym != NULL)
830 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
831 else if (msym != NULL)
efd66ac6 832 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
8710b709
MM
833 else if (!ui_out_is_mi_like_p (uiout))
834 ui_out_field_string (uiout, "function", "??");
835
1e038f67 836 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 837 {
8710b709 838 ui_out_text (uiout, _("\tinst "));
23a7fe75 839 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
840 }
841
1e038f67 842 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 843 {
8710b709 844 ui_out_text (uiout, _("\tat "));
23a7fe75 845 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
846 }
847
afedecd3
MM
848 ui_out_text (uiout, "\n");
849 }
850}
851
852/* The to_call_history method of target record-btrace. */
853
854static void
5df2fcba 855record_btrace_call_history (struct target_ops *self, int size, int flags)
afedecd3
MM
856{
857 struct btrace_thread_info *btinfo;
23a7fe75
MM
858 struct btrace_call_history *history;
859 struct btrace_call_iterator begin, end;
afedecd3
MM
860 struct cleanup *uiout_cleanup;
861 struct ui_out *uiout;
23a7fe75 862 unsigned int context, covered;
afedecd3
MM
863
864 uiout = current_uiout;
865 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
866 "insn history");
afedecd3 867 context = abs (size);
afedecd3
MM
868 if (context == 0)
869 error (_("Bad record function-call-history-size."));
870
23a7fe75
MM
871 btinfo = require_btrace ();
872 history = btinfo->call_history;
873 if (history == NULL)
afedecd3 874 {
07bbe694 875 struct btrace_insn_iterator *replay;
afedecd3 876
23a7fe75 877 DEBUG ("call-history (0x%x): %d", flags, size);
afedecd3 878
07bbe694
MM
879 /* If we're replaying, we start at the replay position. Otherwise, we
880 start at the tail of the trace. */
881 replay = btinfo->replay;
882 if (replay != NULL)
883 {
884 begin.function = replay->function;
885 begin.btinfo = btinfo;
886 }
887 else
888 btrace_call_end (&begin, btinfo);
889
890 /* We start from here and expand in the requested direction. Then we
891 expand in the other direction, as well, to fill up any remaining
892 context. */
893 end = begin;
894 if (size < 0)
895 {
896 /* We want the current position covered, as well. */
897 covered = btrace_call_next (&end, 1);
898 covered += btrace_call_prev (&begin, context - covered);
899 covered += btrace_call_next (&end, context - covered);
900 }
901 else
902 {
903 covered = btrace_call_next (&end, context);
904 covered += btrace_call_prev (&begin, context- covered);
905 }
afedecd3
MM
906 }
907 else
908 {
23a7fe75
MM
909 begin = history->begin;
910 end = history->end;
afedecd3 911
23a7fe75
MM
912 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
913 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 914
23a7fe75
MM
915 if (size < 0)
916 {
917 end = begin;
918 covered = btrace_call_prev (&begin, context);
919 }
920 else
921 {
922 begin = end;
923 covered = btrace_call_next (&end, context);
924 }
afedecd3
MM
925 }
926
23a7fe75 927 if (covered > 0)
8710b709 928 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
929 else
930 {
931 if (size < 0)
932 printf_unfiltered (_("At the start of the branch trace record.\n"));
933 else
934 printf_unfiltered (_("At the end of the branch trace record.\n"));
935 }
afedecd3 936
23a7fe75 937 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
938 do_cleanups (uiout_cleanup);
939}
940
941/* The to_call_history_range method of target record-btrace. */
942
943static void
f0d960ea
TT
944record_btrace_call_history_range (struct target_ops *self,
945 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
946{
947 struct btrace_thread_info *btinfo;
23a7fe75
MM
948 struct btrace_call_history *history;
949 struct btrace_call_iterator begin, end;
afedecd3
MM
950 struct cleanup *uiout_cleanup;
951 struct ui_out *uiout;
23a7fe75
MM
952 unsigned int low, high;
953 int found;
afedecd3
MM
954
955 uiout = current_uiout;
956 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
957 "func history");
23a7fe75
MM
958 low = from;
959 high = to;
afedecd3 960
23a7fe75 961 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
962
963 /* Check for wrap-arounds. */
23a7fe75 964 if (low != from || high != to)
afedecd3
MM
965 error (_("Bad range."));
966
0688d04e 967 if (high < low)
afedecd3
MM
968 error (_("Bad range."));
969
23a7fe75 970 btinfo = require_btrace ();
afedecd3 971
23a7fe75
MM
972 found = btrace_find_call_by_number (&begin, btinfo, low);
973 if (found == 0)
974 error (_("Range out of bounds."));
afedecd3 975
23a7fe75
MM
976 found = btrace_find_call_by_number (&end, btinfo, high);
977 if (found == 0)
0688d04e
MM
978 {
979 /* Silently truncate the range. */
980 btrace_call_end (&end, btinfo);
981 }
982 else
983 {
984 /* We want both begin and end to be inclusive. */
985 btrace_call_next (&end, 1);
986 }
afedecd3 987
8710b709 988 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 989 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
990
991 do_cleanups (uiout_cleanup);
992}
993
994/* The to_call_history_from method of target record-btrace. */
995
996static void
ec0aea04
TT
997record_btrace_call_history_from (struct target_ops *self,
998 ULONGEST from, int size, int flags)
afedecd3
MM
999{
1000 ULONGEST begin, end, context;
1001
1002 context = abs (size);
0688d04e
MM
1003 if (context == 0)
1004 error (_("Bad record function-call-history-size."));
afedecd3
MM
1005
1006 if (size < 0)
1007 {
1008 end = from;
1009
1010 if (from < context)
1011 begin = 0;
1012 else
0688d04e 1013 begin = from - context + 1;
afedecd3
MM
1014 }
1015 else
1016 {
1017 begin = from;
0688d04e 1018 end = from + context - 1;
afedecd3
MM
1019
1020 /* Check for wrap-around. */
1021 if (end < begin)
1022 end = ULONGEST_MAX;
1023 }
1024
f0d960ea 1025 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
1026}
1027
07bbe694
MM
1028/* The to_record_is_replaying method of target record-btrace. */
1029
1030static int
1c63c994 1031record_btrace_is_replaying (struct target_ops *self)
07bbe694
MM
1032{
1033 struct thread_info *tp;
1034
034f788c 1035 ALL_NON_EXITED_THREADS (tp)
07bbe694
MM
1036 if (btrace_is_replaying (tp))
1037 return 1;
1038
1039 return 0;
1040}
1041
633785ff
MM
1042/* The to_xfer_partial method of target record-btrace. */
1043
9b409511 1044static enum target_xfer_status
633785ff
MM
1045record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1046 const char *annex, gdb_byte *readbuf,
1047 const gdb_byte *writebuf, ULONGEST offset,
9b409511 1048 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
1049{
1050 struct target_ops *t;
1051
1052 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1053 if (replay_memory_access == replay_memory_access_read_only
aef92902 1054 && !record_btrace_generating_corefile
67b5c0c1 1055 && record_btrace_is_replaying (ops))
633785ff
MM
1056 {
1057 switch (object)
1058 {
1059 case TARGET_OBJECT_MEMORY:
1060 {
1061 struct target_section *section;
1062
1063 /* We do not allow writing memory in general. */
1064 if (writebuf != NULL)
9b409511
YQ
1065 {
1066 *xfered_len = len;
bc113b4e 1067 return TARGET_XFER_UNAVAILABLE;
9b409511 1068 }
633785ff
MM
1069
1070 /* We allow reading readonly memory. */
1071 section = target_section_by_addr (ops, offset);
1072 if (section != NULL)
1073 {
1074 /* Check if the section we found is readonly. */
1075 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1076 section->the_bfd_section)
1077 & SEC_READONLY) != 0)
1078 {
1079 /* Truncate the request to fit into this section. */
1080 len = min (len, section->endaddr - offset);
1081 break;
1082 }
1083 }
1084
9b409511 1085 *xfered_len = len;
bc113b4e 1086 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1087 }
1088 }
1089 }
1090
1091 /* Forward the request. */
e75fdfca
TT
1092 ops = ops->beneath;
1093 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1094 offset, len, xfered_len);
633785ff
MM
1095}
1096
1097/* The to_insert_breakpoint method of target record-btrace. */
1098
1099static int
1100record_btrace_insert_breakpoint (struct target_ops *ops,
1101 struct gdbarch *gdbarch,
1102 struct bp_target_info *bp_tgt)
1103{
1104 volatile struct gdb_exception except;
67b5c0c1
MM
1105 const char *old;
1106 int ret;
633785ff
MM
1107
1108 /* Inserting breakpoints requires accessing memory. Allow it for the
1109 duration of this function. */
67b5c0c1
MM
1110 old = replay_memory_access;
1111 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1112
1113 ret = 0;
1114 TRY_CATCH (except, RETURN_MASK_ALL)
6b84065d 1115 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
633785ff 1116
67b5c0c1 1117 replay_memory_access = old;
633785ff
MM
1118
1119 if (except.reason < 0)
1120 throw_exception (except);
1121
1122 return ret;
1123}
1124
1125/* The to_remove_breakpoint method of target record-btrace. */
1126
1127static int
1128record_btrace_remove_breakpoint (struct target_ops *ops,
1129 struct gdbarch *gdbarch,
1130 struct bp_target_info *bp_tgt)
1131{
1132 volatile struct gdb_exception except;
67b5c0c1
MM
1133 const char *old;
1134 int ret;
633785ff
MM
1135
1136 /* Removing breakpoints requires accessing memory. Allow it for the
1137 duration of this function. */
67b5c0c1
MM
1138 old = replay_memory_access;
1139 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1140
1141 ret = 0;
1142 TRY_CATCH (except, RETURN_MASK_ALL)
6b84065d 1143 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
633785ff 1144
67b5c0c1 1145 replay_memory_access = old;
633785ff
MM
1146
1147 if (except.reason < 0)
1148 throw_exception (except);
1149
1150 return ret;
1151}
1152
1f3ef581
MM
1153/* The to_fetch_registers method of target record-btrace. */
1154
1155static void
1156record_btrace_fetch_registers (struct target_ops *ops,
1157 struct regcache *regcache, int regno)
1158{
1159 struct btrace_insn_iterator *replay;
1160 struct thread_info *tp;
1161
1162 tp = find_thread_ptid (inferior_ptid);
1163 gdb_assert (tp != NULL);
1164
1165 replay = tp->btrace.replay;
aef92902 1166 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1167 {
1168 const struct btrace_insn *insn;
1169 struct gdbarch *gdbarch;
1170 int pcreg;
1171
1172 gdbarch = get_regcache_arch (regcache);
1173 pcreg = gdbarch_pc_regnum (gdbarch);
1174 if (pcreg < 0)
1175 return;
1176
1177 /* We can only provide the PC register. */
1178 if (regno >= 0 && regno != pcreg)
1179 return;
1180
1181 insn = btrace_insn_get (replay);
1182 gdb_assert (insn != NULL);
1183
1184 regcache_raw_supply (regcache, regno, &insn->pc);
1185 }
1186 else
1187 {
e75fdfca 1188 struct target_ops *t = ops->beneath;
1f3ef581 1189
e75fdfca 1190 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
1191 }
1192}
1193
1194/* The to_store_registers method of target record-btrace. */
1195
1196static void
1197record_btrace_store_registers (struct target_ops *ops,
1198 struct regcache *regcache, int regno)
1199{
1200 struct target_ops *t;
1201
aef92902 1202 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1f3ef581
MM
1203 error (_("This record target does not allow writing registers."));
1204
1205 gdb_assert (may_write_registers != 0);
1206
e75fdfca
TT
1207 t = ops->beneath;
1208 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1209}
1210
1211/* The to_prepare_to_store method of target record-btrace. */
1212
1213static void
1214record_btrace_prepare_to_store (struct target_ops *ops,
1215 struct regcache *regcache)
1216{
1217 struct target_ops *t;
1218
aef92902 1219 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1f3ef581
MM
1220 return;
1221
e75fdfca
TT
1222 t = ops->beneath;
1223 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1224}
1225
0b722aec
MM
1226/* The branch trace frame cache. */
1227
1228struct btrace_frame_cache
1229{
1230 /* The thread. */
1231 struct thread_info *tp;
1232
1233 /* The frame info. */
1234 struct frame_info *frame;
1235
1236 /* The branch trace function segment. */
1237 const struct btrace_function *bfun;
1238};
1239
1240/* A struct btrace_frame_cache hash table indexed by NEXT. */
1241
1242static htab_t bfcache;
1243
1244/* hash_f for htab_create_alloc of bfcache. */
1245
1246static hashval_t
1247bfcache_hash (const void *arg)
1248{
1249 const struct btrace_frame_cache *cache = arg;
1250
1251 return htab_hash_pointer (cache->frame);
1252}
1253
1254/* eq_f for htab_create_alloc of bfcache. */
1255
1256static int
1257bfcache_eq (const void *arg1, const void *arg2)
1258{
1259 const struct btrace_frame_cache *cache1 = arg1;
1260 const struct btrace_frame_cache *cache2 = arg2;
1261
1262 return cache1->frame == cache2->frame;
1263}
1264
1265/* Create a new btrace frame cache. */
1266
1267static struct btrace_frame_cache *
1268bfcache_new (struct frame_info *frame)
1269{
1270 struct btrace_frame_cache *cache;
1271 void **slot;
1272
1273 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1274 cache->frame = frame;
1275
1276 slot = htab_find_slot (bfcache, cache, INSERT);
1277 gdb_assert (*slot == NULL);
1278 *slot = cache;
1279
1280 return cache;
1281}
1282
1283/* Extract the branch trace function from a branch trace frame. */
1284
1285static const struct btrace_function *
1286btrace_get_frame_function (struct frame_info *frame)
1287{
1288 const struct btrace_frame_cache *cache;
1289 const struct btrace_function *bfun;
1290 struct btrace_frame_cache pattern;
1291 void **slot;
1292
1293 pattern.frame = frame;
1294
1295 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1296 if (slot == NULL)
1297 return NULL;
1298
1299 cache = *slot;
1300 return cache->bfun;
1301}
1302
cecac1ab
MM
1303/* Implement stop_reason method for record_btrace_frame_unwind. */
1304
1305static enum unwind_stop_reason
1306record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1307 void **this_cache)
1308{
0b722aec
MM
1309 const struct btrace_frame_cache *cache;
1310 const struct btrace_function *bfun;
1311
1312 cache = *this_cache;
1313 bfun = cache->bfun;
1314 gdb_assert (bfun != NULL);
1315
1316 if (bfun->up == NULL)
1317 return UNWIND_UNAVAILABLE;
1318
1319 return UNWIND_NO_REASON;
cecac1ab
MM
1320}
1321
1322/* Implement this_id method for record_btrace_frame_unwind. */
1323
1324static void
1325record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1326 struct frame_id *this_id)
1327{
0b722aec
MM
1328 const struct btrace_frame_cache *cache;
1329 const struct btrace_function *bfun;
1330 CORE_ADDR code, special;
1331
1332 cache = *this_cache;
1333
1334 bfun = cache->bfun;
1335 gdb_assert (bfun != NULL);
1336
1337 while (bfun->segment.prev != NULL)
1338 bfun = bfun->segment.prev;
1339
1340 code = get_frame_func (this_frame);
1341 special = bfun->number;
1342
1343 *this_id = frame_id_build_unavailable_stack_special (code, special);
1344
1345 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1346 btrace_get_bfun_name (cache->bfun),
1347 core_addr_to_string_nz (this_id->code_addr),
1348 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1349}
1350
1351/* Implement prev_register method for record_btrace_frame_unwind. */
1352
1353static struct value *
1354record_btrace_frame_prev_register (struct frame_info *this_frame,
1355 void **this_cache,
1356 int regnum)
1357{
0b722aec
MM
1358 const struct btrace_frame_cache *cache;
1359 const struct btrace_function *bfun, *caller;
1360 const struct btrace_insn *insn;
1361 struct gdbarch *gdbarch;
1362 CORE_ADDR pc;
1363 int pcreg;
1364
1365 gdbarch = get_frame_arch (this_frame);
1366 pcreg = gdbarch_pc_regnum (gdbarch);
1367 if (pcreg < 0 || regnum != pcreg)
1368 throw_error (NOT_AVAILABLE_ERROR,
1369 _("Registers are not available in btrace record history"));
1370
1371 cache = *this_cache;
1372 bfun = cache->bfun;
1373 gdb_assert (bfun != NULL);
1374
1375 caller = bfun->up;
1376 if (caller == NULL)
1377 throw_error (NOT_AVAILABLE_ERROR,
1378 _("No caller in btrace record history"));
1379
1380 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1381 {
1382 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1383 pc = insn->pc;
1384 }
1385 else
1386 {
1387 insn = VEC_last (btrace_insn_s, caller->insn);
1388 pc = insn->pc;
1389
1390 pc += gdb_insn_length (gdbarch, pc);
1391 }
1392
1393 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1394 btrace_get_bfun_name (bfun), bfun->level,
1395 core_addr_to_string_nz (pc));
1396
1397 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1398}
1399
1400/* Implement sniffer method for record_btrace_frame_unwind. */
1401
1402static int
1403record_btrace_frame_sniffer (const struct frame_unwind *self,
1404 struct frame_info *this_frame,
1405 void **this_cache)
1406{
0b722aec
MM
1407 const struct btrace_function *bfun;
1408 struct btrace_frame_cache *cache;
cecac1ab 1409 struct thread_info *tp;
0b722aec 1410 struct frame_info *next;
cecac1ab
MM
1411
1412 /* THIS_FRAME does not contain a reference to its thread. */
1413 tp = find_thread_ptid (inferior_ptid);
1414 gdb_assert (tp != NULL);
1415
0b722aec
MM
1416 bfun = NULL;
1417 next = get_next_frame (this_frame);
1418 if (next == NULL)
1419 {
1420 const struct btrace_insn_iterator *replay;
1421
1422 replay = tp->btrace.replay;
1423 if (replay != NULL)
1424 bfun = replay->function;
1425 }
1426 else
1427 {
1428 const struct btrace_function *callee;
1429
1430 callee = btrace_get_frame_function (next);
1431 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1432 bfun = callee->up;
1433 }
1434
1435 if (bfun == NULL)
1436 return 0;
1437
1438 DEBUG ("[frame] sniffed frame for %s on level %d",
1439 btrace_get_bfun_name (bfun), bfun->level);
1440
1441 /* This is our frame. Initialize the frame cache. */
1442 cache = bfcache_new (this_frame);
1443 cache->tp = tp;
1444 cache->bfun = bfun;
1445
1446 *this_cache = cache;
1447 return 1;
1448}
1449
1450/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1451
1452static int
1453record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1454 struct frame_info *this_frame,
1455 void **this_cache)
1456{
1457 const struct btrace_function *bfun, *callee;
1458 struct btrace_frame_cache *cache;
1459 struct frame_info *next;
1460
1461 next = get_next_frame (this_frame);
1462 if (next == NULL)
1463 return 0;
1464
1465 callee = btrace_get_frame_function (next);
1466 if (callee == NULL)
1467 return 0;
1468
1469 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1470 return 0;
1471
1472 bfun = callee->up;
1473 if (bfun == NULL)
1474 return 0;
1475
1476 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1477 btrace_get_bfun_name (bfun), bfun->level);
1478
1479 /* This is our frame. Initialize the frame cache. */
1480 cache = bfcache_new (this_frame);
1481 cache->tp = find_thread_ptid (inferior_ptid);
1482 cache->bfun = bfun;
1483
1484 *this_cache = cache;
1485 return 1;
1486}
1487
1488static void
1489record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1490{
1491 struct btrace_frame_cache *cache;
1492 void **slot;
1493
1494 cache = this_cache;
1495
1496 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1497 gdb_assert (slot != NULL);
1498
1499 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1500}
1501
1502/* btrace recording does not store previous memory content, neither the stack
1503 frames content. Any unwinding would return errorneous results as the stack
1504 contents no longer matches the changed PC value restored from history.
1505 Therefore this unwinder reports any possibly unwound registers as
1506 <unavailable>. */
1507
0b722aec 1508const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1509{
1510 NORMAL_FRAME,
1511 record_btrace_frame_unwind_stop_reason,
1512 record_btrace_frame_this_id,
1513 record_btrace_frame_prev_register,
1514 NULL,
0b722aec
MM
1515 record_btrace_frame_sniffer,
1516 record_btrace_frame_dealloc_cache
1517};
1518
1519const struct frame_unwind record_btrace_tailcall_frame_unwind =
1520{
1521 TAILCALL_FRAME,
1522 record_btrace_frame_unwind_stop_reason,
1523 record_btrace_frame_this_id,
1524 record_btrace_frame_prev_register,
1525 NULL,
1526 record_btrace_tailcall_frame_sniffer,
1527 record_btrace_frame_dealloc_cache
cecac1ab 1528};
b2f4cfde 1529
ac01945b
TT
1530/* Implement the to_get_unwinder method. */
1531
1532static const struct frame_unwind *
1533record_btrace_to_get_unwinder (struct target_ops *self)
1534{
1535 return &record_btrace_frame_unwind;
1536}
1537
1538/* Implement the to_get_tailcall_unwinder method. */
1539
1540static const struct frame_unwind *
1541record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1542{
1543 return &record_btrace_tailcall_frame_unwind;
1544}
1545
52834460
MM
1546/* Indicate that TP should be resumed according to FLAG. */
1547
1548static void
1549record_btrace_resume_thread (struct thread_info *tp,
1550 enum btrace_thread_flag flag)
1551{
1552 struct btrace_thread_info *btinfo;
1553
1554 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1555
1556 btinfo = &tp->btrace;
1557
1558 if ((btinfo->flags & BTHR_MOVE) != 0)
1559 error (_("Thread already moving."));
1560
1561 /* Fetch the latest branch trace. */
1562 btrace_fetch (tp);
1563
1564 btinfo->flags |= flag;
1565}
1566
1567/* Find the thread to resume given a PTID. */
1568
1569static struct thread_info *
1570record_btrace_find_resume_thread (ptid_t ptid)
1571{
1572 struct thread_info *tp;
1573
1574 /* When asked to resume everything, we pick the current thread. */
1575 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1576 ptid = inferior_ptid;
1577
1578 return find_thread_ptid (ptid);
1579}
1580
1581/* Start replaying a thread. */
1582
1583static struct btrace_insn_iterator *
1584record_btrace_start_replaying (struct thread_info *tp)
1585{
1586 volatile struct gdb_exception except;
1587 struct btrace_insn_iterator *replay;
1588 struct btrace_thread_info *btinfo;
1589 int executing;
1590
1591 btinfo = &tp->btrace;
1592 replay = NULL;
1593
1594 /* We can't start replaying without trace. */
1595 if (btinfo->begin == NULL)
1596 return NULL;
1597
1598 /* Clear the executing flag to allow changes to the current frame.
1599 We are not actually running, yet. We just started a reverse execution
1600 command or a record goto command.
1601 For the latter, EXECUTING is false and this has no effect.
1602 For the former, EXECUTING is true and we're in to_wait, about to
1603 move the thread. Since we need to recompute the stack, we temporarily
1604 set EXECUTING to flase. */
1605 executing = is_executing (tp->ptid);
1606 set_executing (tp->ptid, 0);
1607
1608 /* GDB stores the current frame_id when stepping in order to detects steps
1609 into subroutines.
1610 Since frames are computed differently when we're replaying, we need to
1611 recompute those stored frames and fix them up so we can still detect
1612 subroutines after we started replaying. */
1613 TRY_CATCH (except, RETURN_MASK_ALL)
1614 {
1615 struct frame_info *frame;
1616 struct frame_id frame_id;
1617 int upd_step_frame_id, upd_step_stack_frame_id;
1618
1619 /* The current frame without replaying - computed via normal unwind. */
1620 frame = get_current_frame ();
1621 frame_id = get_frame_id (frame);
1622
1623 /* Check if we need to update any stepping-related frame id's. */
1624 upd_step_frame_id = frame_id_eq (frame_id,
1625 tp->control.step_frame_id);
1626 upd_step_stack_frame_id = frame_id_eq (frame_id,
1627 tp->control.step_stack_frame_id);
1628
1629 /* We start replaying at the end of the branch trace. This corresponds
1630 to the current instruction. */
1631 replay = xmalloc (sizeof (*replay));
1632 btrace_insn_end (replay, btinfo);
1633
31fd9caa
MM
1634 /* Skip gaps at the end of the trace. */
1635 while (btrace_insn_get (replay) == NULL)
1636 {
1637 unsigned int steps;
1638
1639 steps = btrace_insn_prev (replay, 1);
1640 if (steps == 0)
1641 error (_("No trace."));
1642 }
1643
52834460
MM
1644 /* We're not replaying, yet. */
1645 gdb_assert (btinfo->replay == NULL);
1646 btinfo->replay = replay;
1647
1648 /* Make sure we're not using any stale registers. */
1649 registers_changed_ptid (tp->ptid);
1650
1651 /* The current frame with replaying - computed via btrace unwind. */
1652 frame = get_current_frame ();
1653 frame_id = get_frame_id (frame);
1654
1655 /* Replace stepping related frames where necessary. */
1656 if (upd_step_frame_id)
1657 tp->control.step_frame_id = frame_id;
1658 if (upd_step_stack_frame_id)
1659 tp->control.step_stack_frame_id = frame_id;
1660 }
1661
1662 /* Restore the previous execution state. */
1663 set_executing (tp->ptid, executing);
1664
1665 if (except.reason < 0)
1666 {
1667 xfree (btinfo->replay);
1668 btinfo->replay = NULL;
1669
1670 registers_changed_ptid (tp->ptid);
1671
1672 throw_exception (except);
1673 }
1674
1675 return replay;
1676}
1677
1678/* Stop replaying a thread. */
1679
1680static void
1681record_btrace_stop_replaying (struct thread_info *tp)
1682{
1683 struct btrace_thread_info *btinfo;
1684
1685 btinfo = &tp->btrace;
1686
1687 xfree (btinfo->replay);
1688 btinfo->replay = NULL;
1689
1690 /* Make sure we're not leaving any stale registers. */
1691 registers_changed_ptid (tp->ptid);
1692}
1693
b2f4cfde
MM
1694/* The to_resume method of target record-btrace. */
1695
1696static void
1697record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1698 enum gdb_signal signal)
1699{
52834460
MM
1700 struct thread_info *tp, *other;
1701 enum btrace_thread_flag flag;
1702
1703 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1704
70ad5bff
MM
1705 /* Store the execution direction of the last resume. */
1706 record_btrace_resume_exec_dir = execution_direction;
1707
52834460
MM
1708 tp = record_btrace_find_resume_thread (ptid);
1709 if (tp == NULL)
1710 error (_("Cannot find thread to resume."));
1711
1712 /* Stop replaying other threads if the thread to resume is not replaying. */
1713 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
034f788c 1714 ALL_NON_EXITED_THREADS (other)
52834460
MM
1715 record_btrace_stop_replaying (other);
1716
b2f4cfde 1717 /* As long as we're not replaying, just forward the request. */
1c63c994 1718 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde 1719 {
e75fdfca
TT
1720 ops = ops->beneath;
1721 return ops->to_resume (ops, ptid, step, signal);
b2f4cfde
MM
1722 }
1723
52834460
MM
1724 /* Compute the btrace thread flag for the requested move. */
1725 if (step == 0)
1726 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1727 else
1728 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1729
1730 /* At the moment, we only move a single thread. We could also move
1731 all threads in parallel by single-stepping each resumed thread
1732 until the first runs into an event.
1733 When we do that, we would want to continue all other threads.
1734 For now, just resume one thread to not confuse to_wait. */
1735 record_btrace_resume_thread (tp, flag);
1736
1737 /* We just indicate the resume intent here. The actual stepping happens in
1738 record_btrace_wait below. */
70ad5bff
MM
1739
1740 /* Async support. */
1741 if (target_can_async_p ())
1742 {
1743 target_async (inferior_event_handler, 0);
1744 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1745 }
52834460
MM
1746}
1747
1748/* Find a thread to move. */
1749
1750static struct thread_info *
1751record_btrace_find_thread_to_move (ptid_t ptid)
1752{
1753 struct thread_info *tp;
1754
1755 /* First check the parameter thread. */
1756 tp = find_thread_ptid (ptid);
1757 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1758 return tp;
1759
1760 /* Otherwise, find one other thread that has been resumed. */
034f788c 1761 ALL_NON_EXITED_THREADS (tp)
52834460
MM
1762 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1763 return tp;
1764
1765 return NULL;
1766}
1767
1768/* Return a target_waitstatus indicating that we ran out of history. */
1769
1770static struct target_waitstatus
1771btrace_step_no_history (void)
1772{
1773 struct target_waitstatus status;
1774
1775 status.kind = TARGET_WAITKIND_NO_HISTORY;
1776
1777 return status;
1778}
1779
1780/* Return a target_waitstatus indicating that a step finished. */
1781
1782static struct target_waitstatus
1783btrace_step_stopped (void)
1784{
1785 struct target_waitstatus status;
1786
1787 status.kind = TARGET_WAITKIND_STOPPED;
1788 status.value.sig = GDB_SIGNAL_TRAP;
1789
1790 return status;
1791}
1792
1793/* Clear the record histories. */
1794
1795static void
1796record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1797{
1798 xfree (btinfo->insn_history);
1799 xfree (btinfo->call_history);
1800
1801 btinfo->insn_history = NULL;
1802 btinfo->call_history = NULL;
1803}
1804
1805/* Step a single thread. */
1806
1807static struct target_waitstatus
1808record_btrace_step_thread (struct thread_info *tp)
1809{
1810 struct btrace_insn_iterator *replay, end;
1811 struct btrace_thread_info *btinfo;
1812 struct address_space *aspace;
1813 struct inferior *inf;
1814 enum btrace_thread_flag flags;
1815 unsigned int steps;
1816
e59fa00f
MM
1817 /* We can't step without an execution history. */
1818 if (btrace_is_empty (tp))
1819 return btrace_step_no_history ();
1820
52834460
MM
1821 btinfo = &tp->btrace;
1822 replay = btinfo->replay;
1823
1824 flags = btinfo->flags & BTHR_MOVE;
1825 btinfo->flags &= ~BTHR_MOVE;
1826
1827 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1828
1829 switch (flags)
1830 {
1831 default:
1832 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1833
1834 case BTHR_STEP:
1835 /* We're done if we're not replaying. */
1836 if (replay == NULL)
1837 return btrace_step_no_history ();
1838
31fd9caa
MM
1839 /* Skip gaps during replay. */
1840 do
1841 {
1842 steps = btrace_insn_next (replay, 1);
1843 if (steps == 0)
1844 {
1845 record_btrace_stop_replaying (tp);
1846 return btrace_step_no_history ();
1847 }
1848 }
1849 while (btrace_insn_get (replay) == NULL);
52834460
MM
1850
1851 /* Determine the end of the instruction trace. */
1852 btrace_insn_end (&end, btinfo);
1853
1854 /* We stop replaying if we reached the end of the trace. */
1855 if (btrace_insn_cmp (replay, &end) == 0)
1856 record_btrace_stop_replaying (tp);
1857
1858 return btrace_step_stopped ();
1859
1860 case BTHR_RSTEP:
1861 /* Start replaying if we're not already doing so. */
1862 if (replay == NULL)
1863 replay = record_btrace_start_replaying (tp);
1864
31fd9caa
MM
1865 /* If we can't step any further, we reached the end of the history.
1866 Skip gaps during replay. */
1867 do
1868 {
1869 steps = btrace_insn_prev (replay, 1);
1870 if (steps == 0)
1871 return btrace_step_no_history ();
1872
1873 }
1874 while (btrace_insn_get (replay) == NULL);
52834460
MM
1875
1876 return btrace_step_stopped ();
1877
1878 case BTHR_CONT:
1879 /* We're done if we're not replaying. */
1880 if (replay == NULL)
1881 return btrace_step_no_history ();
1882
c9657e70 1883 inf = find_inferior_ptid (tp->ptid);
52834460
MM
1884 aspace = inf->aspace;
1885
1886 /* Determine the end of the instruction trace. */
1887 btrace_insn_end (&end, btinfo);
1888
1889 for (;;)
1890 {
1891 const struct btrace_insn *insn;
1892
31fd9caa
MM
1893 /* Skip gaps during replay. */
1894 do
1895 {
1896 steps = btrace_insn_next (replay, 1);
1897 if (steps == 0)
1898 {
1899 record_btrace_stop_replaying (tp);
1900 return btrace_step_no_history ();
1901 }
1902
1903 insn = btrace_insn_get (replay);
1904 }
1905 while (insn == NULL);
52834460
MM
1906
1907 /* We stop replaying if we reached the end of the trace. */
1908 if (btrace_insn_cmp (replay, &end) == 0)
1909 {
1910 record_btrace_stop_replaying (tp);
1911 return btrace_step_no_history ();
1912 }
1913
52834460
MM
1914 DEBUG ("stepping %d (%s) ... %s", tp->num,
1915 target_pid_to_str (tp->ptid),
1916 core_addr_to_string_nz (insn->pc));
1917
1918 if (breakpoint_here_p (aspace, insn->pc))
1919 return btrace_step_stopped ();
1920 }
1921
1922 case BTHR_RCONT:
1923 /* Start replaying if we're not already doing so. */
1924 if (replay == NULL)
1925 replay = record_btrace_start_replaying (tp);
1926
c9657e70 1927 inf = find_inferior_ptid (tp->ptid);
52834460
MM
1928 aspace = inf->aspace;
1929
1930 for (;;)
1931 {
1932 const struct btrace_insn *insn;
1933
31fd9caa
MM
1934 /* If we can't step any further, we reached the end of the history.
1935 Skip gaps during replay. */
1936 do
1937 {
1938 steps = btrace_insn_prev (replay, 1);
1939 if (steps == 0)
1940 return btrace_step_no_history ();
52834460 1941
31fd9caa
MM
1942 insn = btrace_insn_get (replay);
1943 }
1944 while (insn == NULL);
52834460
MM
1945
1946 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1947 target_pid_to_str (tp->ptid),
1948 core_addr_to_string_nz (insn->pc));
1949
1950 if (breakpoint_here_p (aspace, insn->pc))
1951 return btrace_step_stopped ();
1952 }
1953 }
b2f4cfde
MM
1954}
1955
1956/* The to_wait method of target record-btrace. */
1957
1958static ptid_t
1959record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1960 struct target_waitstatus *status, int options)
1961{
52834460
MM
1962 struct thread_info *tp, *other;
1963
1964 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1965
b2f4cfde 1966 /* As long as we're not replaying, just forward the request. */
1c63c994 1967 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde 1968 {
e75fdfca
TT
1969 ops = ops->beneath;
1970 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
1971 }
1972
52834460
MM
1973 /* Let's find a thread to move. */
1974 tp = record_btrace_find_thread_to_move (ptid);
1975 if (tp == NULL)
1976 {
1977 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1978
1979 status->kind = TARGET_WAITKIND_IGNORE;
1980 return minus_one_ptid;
1981 }
1982
1983 /* We only move a single thread. We're not able to correlate threads. */
1984 *status = record_btrace_step_thread (tp);
1985
1986 /* Stop all other threads. */
1987 if (!non_stop)
034f788c 1988 ALL_NON_EXITED_THREADS (other)
52834460
MM
1989 other->btrace.flags &= ~BTHR_MOVE;
1990
1991 /* Start record histories anew from the current position. */
1992 record_btrace_clear_histories (&tp->btrace);
1993
1994 /* We moved the replay position but did not update registers. */
1995 registers_changed_ptid (tp->ptid);
1996
1997 return tp->ptid;
1998}
1999
2000/* The to_can_execute_reverse method of target record-btrace. */
2001
2002static int
19db3e69 2003record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
2004{
2005 return 1;
2006}
2007
2008/* The to_decr_pc_after_break method of target record-btrace. */
2009
2010static CORE_ADDR
2011record_btrace_decr_pc_after_break (struct target_ops *ops,
2012 struct gdbarch *gdbarch)
2013{
2014 /* When replaying, we do not actually execute the breakpoint instruction
2015 so there is no need to adjust the PC after hitting a breakpoint. */
1c63c994 2016 if (record_btrace_is_replaying (ops))
52834460
MM
2017 return 0;
2018
c0eca49f 2019 return ops->beneath->to_decr_pc_after_break (ops->beneath, gdbarch);
b2f4cfde
MM
2020}
2021
e8032dde 2022/* The to_update_thread_list method of target record-btrace. */
e2887aa3
MM
2023
2024static void
e8032dde 2025record_btrace_update_thread_list (struct target_ops *ops)
e2887aa3 2026{
e8032dde 2027 /* We don't add or remove threads during replay. */
1c63c994 2028 if (record_btrace_is_replaying (ops))
e2887aa3
MM
2029 return;
2030
2031 /* Forward the request. */
e75fdfca 2032 ops = ops->beneath;
e8032dde 2033 ops->to_update_thread_list (ops);
e2887aa3
MM
2034}
2035
2036/* The to_thread_alive method of target record-btrace. */
2037
2038static int
2039record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2040{
2041 /* We don't add or remove threads during replay. */
1c63c994 2042 if (record_btrace_is_replaying (ops))
e2887aa3
MM
2043 return find_thread_ptid (ptid) != NULL;
2044
2045 /* Forward the request. */
e75fdfca
TT
2046 ops = ops->beneath;
2047 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
2048}
2049
066ce621
MM
2050/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2051 is stopped. */
2052
2053static void
2054record_btrace_set_replay (struct thread_info *tp,
2055 const struct btrace_insn_iterator *it)
2056{
2057 struct btrace_thread_info *btinfo;
2058
2059 btinfo = &tp->btrace;
2060
2061 if (it == NULL || it->function == NULL)
52834460 2062 record_btrace_stop_replaying (tp);
066ce621
MM
2063 else
2064 {
2065 if (btinfo->replay == NULL)
52834460 2066 record_btrace_start_replaying (tp);
066ce621
MM
2067 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2068 return;
2069
2070 *btinfo->replay = *it;
52834460 2071 registers_changed_ptid (tp->ptid);
066ce621
MM
2072 }
2073
52834460
MM
2074 /* Start anew from the new replay position. */
2075 record_btrace_clear_histories (btinfo);
066ce621
MM
2076}
2077
2078/* The to_goto_record_begin method of target record-btrace. */
2079
2080static void
08475817 2081record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
2082{
2083 struct thread_info *tp;
2084 struct btrace_insn_iterator begin;
2085
2086 tp = require_btrace_thread ();
2087
2088 btrace_insn_begin (&begin, &tp->btrace);
2089 record_btrace_set_replay (tp, &begin);
2090
2091 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2092}
2093
2094/* The to_goto_record_end method of target record-btrace. */
2095
2096static void
307a1b91 2097record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
2098{
2099 struct thread_info *tp;
2100
2101 tp = require_btrace_thread ();
2102
2103 record_btrace_set_replay (tp, NULL);
2104
2105 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2106}
2107
2108/* The to_goto_record method of target record-btrace. */
2109
2110static void
606183ac 2111record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
2112{
2113 struct thread_info *tp;
2114 struct btrace_insn_iterator it;
2115 unsigned int number;
2116 int found;
2117
2118 number = insn;
2119
2120 /* Check for wrap-arounds. */
2121 if (number != insn)
2122 error (_("Instruction number out of range."));
2123
2124 tp = require_btrace_thread ();
2125
2126 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2127 if (found == 0)
2128 error (_("No such instruction."));
2129
2130 record_btrace_set_replay (tp, &it);
2131
2132 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2133}
2134
70ad5bff
MM
2135/* The to_execution_direction target method. */
2136
2137static enum exec_direction_kind
2138record_btrace_execution_direction (struct target_ops *self)
2139{
2140 return record_btrace_resume_exec_dir;
2141}
2142
aef92902
MM
2143/* The to_prepare_to_generate_core target method. */
2144
2145static void
2146record_btrace_prepare_to_generate_core (struct target_ops *self)
2147{
2148 record_btrace_generating_corefile = 1;
2149}
2150
2151/* The to_done_generating_core target method. */
2152
2153static void
2154record_btrace_done_generating_core (struct target_ops *self)
2155{
2156 record_btrace_generating_corefile = 0;
2157}
2158
afedecd3
MM
2159/* Initialize the record-btrace target ops. */
2160
2161static void
2162init_record_btrace_ops (void)
2163{
2164 struct target_ops *ops;
2165
2166 ops = &record_btrace_ops;
2167 ops->to_shortname = "record-btrace";
2168 ops->to_longname = "Branch tracing target";
2169 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2170 ops->to_open = record_btrace_open;
2171 ops->to_close = record_btrace_close;
b7d2e916 2172 ops->to_async = record_btrace_async;
afedecd3
MM
2173 ops->to_detach = record_detach;
2174 ops->to_disconnect = record_disconnect;
2175 ops->to_mourn_inferior = record_mourn_inferior;
2176 ops->to_kill = record_kill;
afedecd3
MM
2177 ops->to_stop_recording = record_btrace_stop_recording;
2178 ops->to_info_record = record_btrace_info;
2179 ops->to_insn_history = record_btrace_insn_history;
2180 ops->to_insn_history_from = record_btrace_insn_history_from;
2181 ops->to_insn_history_range = record_btrace_insn_history_range;
2182 ops->to_call_history = record_btrace_call_history;
2183 ops->to_call_history_from = record_btrace_call_history_from;
2184 ops->to_call_history_range = record_btrace_call_history_range;
07bbe694 2185 ops->to_record_is_replaying = record_btrace_is_replaying;
633785ff
MM
2186 ops->to_xfer_partial = record_btrace_xfer_partial;
2187 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2188 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
2189 ops->to_fetch_registers = record_btrace_fetch_registers;
2190 ops->to_store_registers = record_btrace_store_registers;
2191 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
2192 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2193 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde
MM
2194 ops->to_resume = record_btrace_resume;
2195 ops->to_wait = record_btrace_wait;
e8032dde 2196 ops->to_update_thread_list = record_btrace_update_thread_list;
e2887aa3 2197 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
2198 ops->to_goto_record_begin = record_btrace_goto_begin;
2199 ops->to_goto_record_end = record_btrace_goto_end;
2200 ops->to_goto_record = record_btrace_goto;
52834460
MM
2201 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2202 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
70ad5bff 2203 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
2204 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2205 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
2206 ops->to_stratum = record_stratum;
2207 ops->to_magic = OPS_MAGIC;
2208}
2209
f4abbc16
MM
2210/* Start recording in BTS format. */
2211
2212static void
2213cmd_record_btrace_bts_start (char *args, int from_tty)
2214{
2215 volatile struct gdb_exception exception;
2216
2217 if (args != NULL && *args != 0)
2218 error (_("Invalid argument."));
2219
2220 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2221
2222 TRY_CATCH (exception, RETURN_MASK_ALL)
2223 execute_command ("target record-btrace", from_tty);
2224
2225 if (exception.error != 0)
2226 {
2227 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2228 throw_exception (exception);
2229 }
2230}
2231
afedecd3
MM
2232/* Alias for "target record". */
2233
2234static void
2235cmd_record_btrace_start (char *args, int from_tty)
2236{
f4abbc16
MM
2237 volatile struct gdb_exception exception;
2238
afedecd3
MM
2239 if (args != NULL && *args != 0)
2240 error (_("Invalid argument."));
2241
f4abbc16
MM
2242 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2243
2244 TRY_CATCH (exception, RETURN_MASK_ALL)
2245 execute_command ("target record-btrace", from_tty);
2246
2247 if (exception.error == 0)
2248 return;
2249
2250 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2251 throw_exception (exception);
afedecd3
MM
2252}
2253
67b5c0c1
MM
2254/* The "set record btrace" command. */
2255
2256static void
2257cmd_set_record_btrace (char *args, int from_tty)
2258{
2259 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2260}
2261
2262/* The "show record btrace" command. */
2263
2264static void
2265cmd_show_record_btrace (char *args, int from_tty)
2266{
2267 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2268}
2269
2270/* The "show record btrace replay-memory-access" command. */
2271
2272static void
2273cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2274 struct cmd_list_element *c, const char *value)
2275{
2276 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2277 replay_memory_access);
2278}
2279
d33501a5
MM
2280/* The "set record btrace bts" command. */
2281
2282static void
2283cmd_set_record_btrace_bts (char *args, int from_tty)
2284{
2285 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2286 "by an apporpriate subcommand.\n"));
2287 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2288 all_commands, gdb_stdout);
2289}
2290
2291/* The "show record btrace bts" command. */
2292
2293static void
2294cmd_show_record_btrace_bts (char *args, int from_tty)
2295{
2296 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2297}
2298
afedecd3
MM
2299void _initialize_record_btrace (void);
2300
2301/* Initialize btrace commands. */
2302
2303void
2304_initialize_record_btrace (void)
2305{
f4abbc16
MM
2306 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2307 _("Start branch trace recording."), &record_btrace_cmdlist,
2308 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
2309 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2310
f4abbc16
MM
2311 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
2312 _("\
2313Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2314The processor stores a from/to record for each branch into a cyclic buffer.\n\
2315This format may not be available on all processors."),
2316 &record_btrace_cmdlist);
2317 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
2318
67b5c0c1
MM
2319 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2320 _("Set record options"), &set_record_btrace_cmdlist,
2321 "set record btrace ", 0, &set_record_cmdlist);
2322
2323 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2324 _("Show record options"), &show_record_btrace_cmdlist,
2325 "show record btrace ", 0, &show_record_cmdlist);
2326
2327 add_setshow_enum_cmd ("replay-memory-access", no_class,
2328 replay_memory_access_types, &replay_memory_access, _("\
2329Set what memory accesses are allowed during replay."), _("\
2330Show what memory accesses are allowed during replay."),
2331 _("Default is READ-ONLY.\n\n\
2332The btrace record target does not trace data.\n\
2333The memory therefore corresponds to the live target and not \
2334to the current replay position.\n\n\
2335When READ-ONLY, allow accesses to read-only memory during replay.\n\
2336When READ-WRITE, allow accesses to read-only and read-write memory during \
2337replay."),
2338 NULL, cmd_show_replay_memory_access,
2339 &set_record_btrace_cmdlist,
2340 &show_record_btrace_cmdlist);
2341
d33501a5
MM
2342 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
2343 _("Set record btrace bts options"),
2344 &set_record_btrace_bts_cmdlist,
2345 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
2346
2347 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
2348 _("Show record btrace bts options"),
2349 &show_record_btrace_bts_cmdlist,
2350 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
2351
2352 add_setshow_uinteger_cmd ("buffer-size", no_class,
2353 &record_btrace_conf.bts.size,
2354 _("Set the record/replay bts buffer size."),
2355 _("Show the record/replay bts buffer size."), _("\
2356When starting recording request a trace buffer of this size. \
2357The actual buffer size may differ from the requested size. \
2358Use \"info record\" to see the actual buffer size.\n\n\
2359Bigger buffers allow longer recording but also take more time to process \
2360the recorded execution trace.\n\n\
2361The trace buffer size may not be changed while recording."), NULL, NULL,
2362 &set_record_btrace_bts_cmdlist,
2363 &show_record_btrace_bts_cmdlist);
2364
afedecd3
MM
2365 init_record_btrace_ops ();
2366 add_target (&record_btrace_ops);
0b722aec
MM
2367
2368 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2369 xcalloc, xfree);
d33501a5
MM
2370
2371 record_btrace_conf.bts.size = 64 * 1024;
afedecd3 2372}
This page took 0.522576 seconds and 4 git commands to generate.