Add .refsym to msp430 backend
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
ecd75fc8 3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
24#include "gdbthread.h"
25#include "target.h"
26#include "gdbcmd.h"
27#include "disasm.h"
28#include "observer.h"
29#include "exceptions.h"
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
afedecd3
MM
38
39/* The target_ops of record-btrace. */
40static struct target_ops record_btrace_ops;
41
42/* A new thread observer enabling branch tracing for the new thread. */
43static struct observer *record_btrace_thread_observer;
44
633785ff
MM
45/* Temporarily allow memory accesses. */
46static int record_btrace_allow_memory_access;
47
afedecd3
MM
48/* Print a record-btrace debug message. Use do ... while (0) to avoid
49 ambiguities when used in if statements. */
50
51#define DEBUG(msg, args...) \
52 do \
53 { \
54 if (record_debug != 0) \
55 fprintf_unfiltered (gdb_stdlog, \
56 "[record-btrace] " msg "\n", ##args); \
57 } \
58 while (0)
59
60
61/* Update the branch trace for the current thread and return a pointer to its
066ce621 62 thread_info.
afedecd3
MM
63
64 Throws an error if there is no thread or no trace. This function never
65 returns NULL. */
66
066ce621
MM
67static struct thread_info *
68require_btrace_thread (void)
afedecd3
MM
69{
70 struct thread_info *tp;
afedecd3
MM
71
72 DEBUG ("require");
73
74 tp = find_thread_ptid (inferior_ptid);
75 if (tp == NULL)
76 error (_("No thread."));
77
78 btrace_fetch (tp);
79
6e07b1d2 80 if (btrace_is_empty (tp))
afedecd3
MM
81 error (_("No trace."));
82
066ce621
MM
83 return tp;
84}
85
86/* Update the branch trace for the current thread and return a pointer to its
87 branch trace information struct.
88
89 Throws an error if there is no thread or no trace. This function never
90 returns NULL. */
91
92static struct btrace_thread_info *
93require_btrace (void)
94{
95 struct thread_info *tp;
96
97 tp = require_btrace_thread ();
98
99 return &tp->btrace;
afedecd3
MM
100}
101
102/* Enable branch tracing for one thread. Warn on errors. */
103
104static void
105record_btrace_enable_warn (struct thread_info *tp)
106{
107 volatile struct gdb_exception error;
108
109 TRY_CATCH (error, RETURN_MASK_ERROR)
110 btrace_enable (tp);
111
112 if (error.message != NULL)
113 warning ("%s", error.message);
114}
115
116/* Callback function to disable branch tracing for one thread. */
117
118static void
119record_btrace_disable_callback (void *arg)
120{
121 struct thread_info *tp;
122
123 tp = arg;
124
125 btrace_disable (tp);
126}
127
128/* Enable automatic tracing of new threads. */
129
130static void
131record_btrace_auto_enable (void)
132{
133 DEBUG ("attach thread observer");
134
135 record_btrace_thread_observer
136 = observer_attach_new_thread (record_btrace_enable_warn);
137}
138
139/* Disable automatic tracing of new threads. */
140
141static void
142record_btrace_auto_disable (void)
143{
144 /* The observer may have been detached, already. */
145 if (record_btrace_thread_observer == NULL)
146 return;
147
148 DEBUG ("detach thread observer");
149
150 observer_detach_new_thread (record_btrace_thread_observer);
151 record_btrace_thread_observer = NULL;
152}
153
154/* The to_open method of target record-btrace. */
155
156static void
157record_btrace_open (char *args, int from_tty)
158{
159 struct cleanup *disable_chain;
160 struct thread_info *tp;
161
162 DEBUG ("open");
163
8213266a 164 record_preopen ();
afedecd3
MM
165
166 if (!target_has_execution)
167 error (_("The program is not being run."));
168
169 if (!target_supports_btrace ())
170 error (_("Target does not support branch tracing."));
171
52834460
MM
172 if (non_stop)
173 error (_("Record btrace can't debug inferior in non-stop mode."));
174
afedecd3
MM
175 gdb_assert (record_btrace_thread_observer == NULL);
176
177 disable_chain = make_cleanup (null_cleanup, NULL);
178 ALL_THREADS (tp)
179 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
180 {
181 btrace_enable (tp);
182
183 make_cleanup (record_btrace_disable_callback, tp);
184 }
185
186 record_btrace_auto_enable ();
187
188 push_target (&record_btrace_ops);
189
190 observer_notify_record_changed (current_inferior (), 1);
191
192 discard_cleanups (disable_chain);
193}
194
195/* The to_stop_recording method of target record-btrace. */
196
197static void
198record_btrace_stop_recording (void)
199{
200 struct thread_info *tp;
201
202 DEBUG ("stop recording");
203
204 record_btrace_auto_disable ();
205
206 ALL_THREADS (tp)
207 if (tp->btrace.target != NULL)
208 btrace_disable (tp);
209}
210
211/* The to_close method of target record-btrace. */
212
213static void
460014f5 214record_btrace_close (void)
afedecd3 215{
99c819ee
MM
216 /* Make sure automatic recording gets disabled even if we did not stop
217 recording before closing the record-btrace target. */
218 record_btrace_auto_disable ();
219
afedecd3
MM
220 /* We already stopped recording. */
221}
222
223/* The to_info_record method of target record-btrace. */
224
225static void
226record_btrace_info (void)
227{
228 struct btrace_thread_info *btinfo;
229 struct thread_info *tp;
23a7fe75 230 unsigned int insns, calls;
afedecd3
MM
231
232 DEBUG ("info");
233
234 tp = find_thread_ptid (inferior_ptid);
235 if (tp == NULL)
236 error (_("No thread."));
237
238 btrace_fetch (tp);
239
23a7fe75
MM
240 insns = 0;
241 calls = 0;
242
afedecd3 243 btinfo = &tp->btrace;
6e07b1d2
MM
244
245 if (!btrace_is_empty (tp))
23a7fe75
MM
246 {
247 struct btrace_call_iterator call;
248 struct btrace_insn_iterator insn;
249
250 btrace_call_end (&call, btinfo);
251 btrace_call_prev (&call, 1);
5de9129b 252 calls = btrace_call_number (&call);
23a7fe75
MM
253
254 btrace_insn_end (&insn, btinfo);
255 btrace_insn_prev (&insn, 1);
5de9129b 256 insns = btrace_insn_number (&insn);
23a7fe75 257 }
afedecd3
MM
258
259 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
23a7fe75 260 "%d (%s).\n"), insns, calls, tp->num,
afedecd3 261 target_pid_to_str (tp->ptid));
07bbe694
MM
262
263 if (btrace_is_replaying (tp))
264 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
265 btrace_insn_number (btinfo->replay));
afedecd3
MM
266}
267
268/* Print an unsigned int. */
269
270static void
271ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
272{
273 ui_out_field_fmt (uiout, fld, "%u", val);
274}
275
276/* Disassemble a section of the recorded instruction trace. */
277
278static void
23a7fe75
MM
279btrace_insn_history (struct ui_out *uiout,
280 const struct btrace_insn_iterator *begin,
281 const struct btrace_insn_iterator *end, int flags)
afedecd3
MM
282{
283 struct gdbarch *gdbarch;
23a7fe75 284 struct btrace_insn_iterator it;
afedecd3 285
23a7fe75
MM
286 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
287 btrace_insn_number (end));
afedecd3
MM
288
289 gdbarch = target_gdbarch ();
290
23a7fe75 291 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 292 {
23a7fe75
MM
293 const struct btrace_insn *insn;
294
295 insn = btrace_insn_get (&it);
296
afedecd3 297 /* Print the instruction index. */
23a7fe75 298 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
afedecd3
MM
299 ui_out_text (uiout, "\t");
300
301 /* Disassembly with '/m' flag may not produce the expected result.
302 See PR gdb/11833. */
23a7fe75 303 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
afedecd3
MM
304 }
305}
306
307/* The to_insn_history method of target record-btrace. */
308
309static void
310record_btrace_insn_history (int size, int flags)
311{
312 struct btrace_thread_info *btinfo;
23a7fe75
MM
313 struct btrace_insn_history *history;
314 struct btrace_insn_iterator begin, end;
afedecd3
MM
315 struct cleanup *uiout_cleanup;
316 struct ui_out *uiout;
23a7fe75 317 unsigned int context, covered;
afedecd3
MM
318
319 uiout = current_uiout;
320 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
321 "insn history");
afedecd3 322 context = abs (size);
afedecd3
MM
323 if (context == 0)
324 error (_("Bad record instruction-history-size."));
325
23a7fe75
MM
326 btinfo = require_btrace ();
327 history = btinfo->insn_history;
328 if (history == NULL)
afedecd3 329 {
07bbe694 330 struct btrace_insn_iterator *replay;
afedecd3 331
23a7fe75 332 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 333
07bbe694
MM
334 /* If we're replaying, we start at the replay position. Otherwise, we
335 start at the tail of the trace. */
336 replay = btinfo->replay;
337 if (replay != NULL)
338 begin = *replay;
339 else
340 btrace_insn_end (&begin, btinfo);
341
342 /* We start from here and expand in the requested direction. Then we
343 expand in the other direction, as well, to fill up any remaining
344 context. */
345 end = begin;
346 if (size < 0)
347 {
348 /* We want the current position covered, as well. */
349 covered = btrace_insn_next (&end, 1);
350 covered += btrace_insn_prev (&begin, context - covered);
351 covered += btrace_insn_next (&end, context - covered);
352 }
353 else
354 {
355 covered = btrace_insn_next (&end, context);
356 covered += btrace_insn_prev (&begin, context - covered);
357 }
afedecd3
MM
358 }
359 else
360 {
23a7fe75
MM
361 begin = history->begin;
362 end = history->end;
afedecd3 363
23a7fe75
MM
364 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
365 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 366
23a7fe75
MM
367 if (size < 0)
368 {
369 end = begin;
370 covered = btrace_insn_prev (&begin, context);
371 }
372 else
373 {
374 begin = end;
375 covered = btrace_insn_next (&end, context);
376 }
afedecd3
MM
377 }
378
23a7fe75
MM
379 if (covered > 0)
380 btrace_insn_history (uiout, &begin, &end, flags);
381 else
382 {
383 if (size < 0)
384 printf_unfiltered (_("At the start of the branch trace record.\n"));
385 else
386 printf_unfiltered (_("At the end of the branch trace record.\n"));
387 }
afedecd3 388
23a7fe75 389 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
390 do_cleanups (uiout_cleanup);
391}
392
393/* The to_insn_history_range method of target record-btrace. */
394
395static void
396record_btrace_insn_history_range (ULONGEST from, ULONGEST to, int flags)
397{
398 struct btrace_thread_info *btinfo;
23a7fe75
MM
399 struct btrace_insn_history *history;
400 struct btrace_insn_iterator begin, end;
afedecd3
MM
401 struct cleanup *uiout_cleanup;
402 struct ui_out *uiout;
23a7fe75
MM
403 unsigned int low, high;
404 int found;
afedecd3
MM
405
406 uiout = current_uiout;
407 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
408 "insn history");
23a7fe75
MM
409 low = from;
410 high = to;
afedecd3 411
23a7fe75 412 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
413
414 /* Check for wrap-arounds. */
23a7fe75 415 if (low != from || high != to)
afedecd3
MM
416 error (_("Bad range."));
417
0688d04e 418 if (high < low)
afedecd3
MM
419 error (_("Bad range."));
420
23a7fe75 421 btinfo = require_btrace ();
afedecd3 422
23a7fe75
MM
423 found = btrace_find_insn_by_number (&begin, btinfo, low);
424 if (found == 0)
425 error (_("Range out of bounds."));
afedecd3 426
23a7fe75
MM
427 found = btrace_find_insn_by_number (&end, btinfo, high);
428 if (found == 0)
0688d04e
MM
429 {
430 /* Silently truncate the range. */
431 btrace_insn_end (&end, btinfo);
432 }
433 else
434 {
435 /* We want both begin and end to be inclusive. */
436 btrace_insn_next (&end, 1);
437 }
afedecd3 438
23a7fe75
MM
439 btrace_insn_history (uiout, &begin, &end, flags);
440 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
441
442 do_cleanups (uiout_cleanup);
443}
444
445/* The to_insn_history_from method of target record-btrace. */
446
447static void
448record_btrace_insn_history_from (ULONGEST from, int size, int flags)
449{
450 ULONGEST begin, end, context;
451
452 context = abs (size);
0688d04e
MM
453 if (context == 0)
454 error (_("Bad record instruction-history-size."));
afedecd3
MM
455
456 if (size < 0)
457 {
458 end = from;
459
460 if (from < context)
461 begin = 0;
462 else
0688d04e 463 begin = from - context + 1;
afedecd3
MM
464 }
465 else
466 {
467 begin = from;
0688d04e 468 end = from + context - 1;
afedecd3
MM
469
470 /* Check for wrap-around. */
471 if (end < begin)
472 end = ULONGEST_MAX;
473 }
474
475 record_btrace_insn_history_range (begin, end, flags);
476}
477
478/* Print the instruction number range for a function call history line. */
479
480static void
23a7fe75
MM
481btrace_call_history_insn_range (struct ui_out *uiout,
482 const struct btrace_function *bfun)
afedecd3 483{
7acbe133
MM
484 unsigned int begin, end, size;
485
486 size = VEC_length (btrace_insn_s, bfun->insn);
487 gdb_assert (size > 0);
afedecd3 488
23a7fe75 489 begin = bfun->insn_offset;
7acbe133 490 end = begin + size - 1;
afedecd3 491
23a7fe75 492 ui_out_field_uint (uiout, "insn begin", begin);
8710b709 493 ui_out_text (uiout, ",");
23a7fe75 494 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
495}
496
497/* Print the source line information for a function call history line. */
498
499static void
23a7fe75
MM
500btrace_call_history_src_line (struct ui_out *uiout,
501 const struct btrace_function *bfun)
afedecd3
MM
502{
503 struct symbol *sym;
23a7fe75 504 int begin, end;
afedecd3
MM
505
506 sym = bfun->sym;
507 if (sym == NULL)
508 return;
509
510 ui_out_field_string (uiout, "file",
511 symtab_to_filename_for_display (sym->symtab));
512
23a7fe75
MM
513 begin = bfun->lbegin;
514 end = bfun->lend;
515
516 if (end < begin)
afedecd3
MM
517 return;
518
519 ui_out_text (uiout, ":");
23a7fe75 520 ui_out_field_int (uiout, "min line", begin);
afedecd3 521
23a7fe75 522 if (end == begin)
afedecd3
MM
523 return;
524
8710b709 525 ui_out_text (uiout, ",");
23a7fe75 526 ui_out_field_int (uiout, "max line", end);
afedecd3
MM
527}
528
0b722aec
MM
529/* Get the name of a branch trace function. */
530
531static const char *
532btrace_get_bfun_name (const struct btrace_function *bfun)
533{
534 struct minimal_symbol *msym;
535 struct symbol *sym;
536
537 if (bfun == NULL)
538 return "??";
539
540 msym = bfun->msym;
541 sym = bfun->sym;
542
543 if (sym != NULL)
544 return SYMBOL_PRINT_NAME (sym);
545 else if (msym != NULL)
546 return SYMBOL_PRINT_NAME (msym);
547 else
548 return "??";
549}
550
afedecd3
MM
551/* Disassemble a section of the recorded function trace. */
552
553static void
23a7fe75 554btrace_call_history (struct ui_out *uiout,
8710b709 555 const struct btrace_thread_info *btinfo,
23a7fe75
MM
556 const struct btrace_call_iterator *begin,
557 const struct btrace_call_iterator *end,
afedecd3
MM
558 enum record_print_flag flags)
559{
23a7fe75 560 struct btrace_call_iterator it;
afedecd3 561
23a7fe75
MM
562 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
563 btrace_call_number (end));
afedecd3 564
23a7fe75 565 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 566 {
23a7fe75
MM
567 const struct btrace_function *bfun;
568 struct minimal_symbol *msym;
569 struct symbol *sym;
570
571 bfun = btrace_call_get (&it);
23a7fe75 572 sym = bfun->sym;
0b722aec 573 msym = bfun->msym;
23a7fe75 574
afedecd3 575 /* Print the function index. */
23a7fe75 576 ui_out_field_uint (uiout, "index", bfun->number);
afedecd3
MM
577 ui_out_text (uiout, "\t");
578
8710b709
MM
579 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
580 {
581 int level = bfun->level + btinfo->level, i;
582
583 for (i = 0; i < level; ++i)
584 ui_out_text (uiout, " ");
585 }
586
587 if (sym != NULL)
588 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
589 else if (msym != NULL)
590 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (msym));
591 else if (!ui_out_is_mi_like_p (uiout))
592 ui_out_field_string (uiout, "function", "??");
593
1e038f67 594 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 595 {
8710b709 596 ui_out_text (uiout, _("\tinst "));
23a7fe75 597 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
598 }
599
1e038f67 600 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 601 {
8710b709 602 ui_out_text (uiout, _("\tat "));
23a7fe75 603 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
604 }
605
afedecd3
MM
606 ui_out_text (uiout, "\n");
607 }
608}
609
610/* The to_call_history method of target record-btrace. */
611
612static void
613record_btrace_call_history (int size, int flags)
614{
615 struct btrace_thread_info *btinfo;
23a7fe75
MM
616 struct btrace_call_history *history;
617 struct btrace_call_iterator begin, end;
afedecd3
MM
618 struct cleanup *uiout_cleanup;
619 struct ui_out *uiout;
23a7fe75 620 unsigned int context, covered;
afedecd3
MM
621
622 uiout = current_uiout;
623 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
624 "insn history");
afedecd3 625 context = abs (size);
afedecd3
MM
626 if (context == 0)
627 error (_("Bad record function-call-history-size."));
628
23a7fe75
MM
629 btinfo = require_btrace ();
630 history = btinfo->call_history;
631 if (history == NULL)
afedecd3 632 {
07bbe694 633 struct btrace_insn_iterator *replay;
afedecd3 634
23a7fe75 635 DEBUG ("call-history (0x%x): %d", flags, size);
afedecd3 636
07bbe694
MM
637 /* If we're replaying, we start at the replay position. Otherwise, we
638 start at the tail of the trace. */
639 replay = btinfo->replay;
640 if (replay != NULL)
641 {
642 begin.function = replay->function;
643 begin.btinfo = btinfo;
644 }
645 else
646 btrace_call_end (&begin, btinfo);
647
648 /* We start from here and expand in the requested direction. Then we
649 expand in the other direction, as well, to fill up any remaining
650 context. */
651 end = begin;
652 if (size < 0)
653 {
654 /* We want the current position covered, as well. */
655 covered = btrace_call_next (&end, 1);
656 covered += btrace_call_prev (&begin, context - covered);
657 covered += btrace_call_next (&end, context - covered);
658 }
659 else
660 {
661 covered = btrace_call_next (&end, context);
662 covered += btrace_call_prev (&begin, context- covered);
663 }
afedecd3
MM
664 }
665 else
666 {
23a7fe75
MM
667 begin = history->begin;
668 end = history->end;
afedecd3 669
23a7fe75
MM
670 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
671 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 672
23a7fe75
MM
673 if (size < 0)
674 {
675 end = begin;
676 covered = btrace_call_prev (&begin, context);
677 }
678 else
679 {
680 begin = end;
681 covered = btrace_call_next (&end, context);
682 }
afedecd3
MM
683 }
684
23a7fe75 685 if (covered > 0)
8710b709 686 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
687 else
688 {
689 if (size < 0)
690 printf_unfiltered (_("At the start of the branch trace record.\n"));
691 else
692 printf_unfiltered (_("At the end of the branch trace record.\n"));
693 }
afedecd3 694
23a7fe75 695 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
696 do_cleanups (uiout_cleanup);
697}
698
699/* The to_call_history_range method of target record-btrace. */
700
701static void
702record_btrace_call_history_range (ULONGEST from, ULONGEST to, int flags)
703{
704 struct btrace_thread_info *btinfo;
23a7fe75
MM
705 struct btrace_call_history *history;
706 struct btrace_call_iterator begin, end;
afedecd3
MM
707 struct cleanup *uiout_cleanup;
708 struct ui_out *uiout;
23a7fe75
MM
709 unsigned int low, high;
710 int found;
afedecd3
MM
711
712 uiout = current_uiout;
713 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
714 "func history");
23a7fe75
MM
715 low = from;
716 high = to;
afedecd3 717
23a7fe75 718 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
719
720 /* Check for wrap-arounds. */
23a7fe75 721 if (low != from || high != to)
afedecd3
MM
722 error (_("Bad range."));
723
0688d04e 724 if (high < low)
afedecd3
MM
725 error (_("Bad range."));
726
23a7fe75 727 btinfo = require_btrace ();
afedecd3 728
23a7fe75
MM
729 found = btrace_find_call_by_number (&begin, btinfo, low);
730 if (found == 0)
731 error (_("Range out of bounds."));
afedecd3 732
23a7fe75
MM
733 found = btrace_find_call_by_number (&end, btinfo, high);
734 if (found == 0)
0688d04e
MM
735 {
736 /* Silently truncate the range. */
737 btrace_call_end (&end, btinfo);
738 }
739 else
740 {
741 /* We want both begin and end to be inclusive. */
742 btrace_call_next (&end, 1);
743 }
afedecd3 744
8710b709 745 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 746 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
747
748 do_cleanups (uiout_cleanup);
749}
750
751/* The to_call_history_from method of target record-btrace. */
752
753static void
754record_btrace_call_history_from (ULONGEST from, int size, int flags)
755{
756 ULONGEST begin, end, context;
757
758 context = abs (size);
0688d04e
MM
759 if (context == 0)
760 error (_("Bad record function-call-history-size."));
afedecd3
MM
761
762 if (size < 0)
763 {
764 end = from;
765
766 if (from < context)
767 begin = 0;
768 else
0688d04e 769 begin = from - context + 1;
afedecd3
MM
770 }
771 else
772 {
773 begin = from;
0688d04e 774 end = from + context - 1;
afedecd3
MM
775
776 /* Check for wrap-around. */
777 if (end < begin)
778 end = ULONGEST_MAX;
779 }
780
781 record_btrace_call_history_range (begin, end, flags);
782}
783
07bbe694
MM
784/* The to_record_is_replaying method of target record-btrace. */
785
786static int
787record_btrace_is_replaying (void)
788{
789 struct thread_info *tp;
790
791 ALL_THREADS (tp)
792 if (btrace_is_replaying (tp))
793 return 1;
794
795 return 0;
796}
797
633785ff
MM
798/* The to_xfer_partial method of target record-btrace. */
799
800static LONGEST
801record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
802 const char *annex, gdb_byte *readbuf,
803 const gdb_byte *writebuf, ULONGEST offset,
804 ULONGEST len)
805{
806 struct target_ops *t;
807
808 /* Filter out requests that don't make sense during replay. */
809 if (!record_btrace_allow_memory_access && record_btrace_is_replaying ())
810 {
811 switch (object)
812 {
813 case TARGET_OBJECT_MEMORY:
814 {
815 struct target_section *section;
816
817 /* We do not allow writing memory in general. */
818 if (writebuf != NULL)
819 return TARGET_XFER_E_UNAVAILABLE;
820
821 /* We allow reading readonly memory. */
822 section = target_section_by_addr (ops, offset);
823 if (section != NULL)
824 {
825 /* Check if the section we found is readonly. */
826 if ((bfd_get_section_flags (section->the_bfd_section->owner,
827 section->the_bfd_section)
828 & SEC_READONLY) != 0)
829 {
830 /* Truncate the request to fit into this section. */
831 len = min (len, section->endaddr - offset);
832 break;
833 }
834 }
835
836 return TARGET_XFER_E_UNAVAILABLE;
837 }
838 }
839 }
840
841 /* Forward the request. */
842 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
843 if (ops->to_xfer_partial != NULL)
844 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
845 offset, len);
846
847 return TARGET_XFER_E_UNAVAILABLE;
848}
849
850/* The to_insert_breakpoint method of target record-btrace. */
851
852static int
853record_btrace_insert_breakpoint (struct target_ops *ops,
854 struct gdbarch *gdbarch,
855 struct bp_target_info *bp_tgt)
856{
857 volatile struct gdb_exception except;
858 int old, ret;
859
860 /* Inserting breakpoints requires accessing memory. Allow it for the
861 duration of this function. */
862 old = record_btrace_allow_memory_access;
863 record_btrace_allow_memory_access = 1;
864
865 ret = 0;
866 TRY_CATCH (except, RETURN_MASK_ALL)
867 ret = forward_target_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
868
869 record_btrace_allow_memory_access = old;
870
871 if (except.reason < 0)
872 throw_exception (except);
873
874 return ret;
875}
876
877/* The to_remove_breakpoint method of target record-btrace. */
878
879static int
880record_btrace_remove_breakpoint (struct target_ops *ops,
881 struct gdbarch *gdbarch,
882 struct bp_target_info *bp_tgt)
883{
884 volatile struct gdb_exception except;
885 int old, ret;
886
887 /* Removing breakpoints requires accessing memory. Allow it for the
888 duration of this function. */
889 old = record_btrace_allow_memory_access;
890 record_btrace_allow_memory_access = 1;
891
892 ret = 0;
893 TRY_CATCH (except, RETURN_MASK_ALL)
894 ret = forward_target_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
895
896 record_btrace_allow_memory_access = old;
897
898 if (except.reason < 0)
899 throw_exception (except);
900
901 return ret;
902}
903
1f3ef581
MM
904/* The to_fetch_registers method of target record-btrace. */
905
906static void
907record_btrace_fetch_registers (struct target_ops *ops,
908 struct regcache *regcache, int regno)
909{
910 struct btrace_insn_iterator *replay;
911 struct thread_info *tp;
912
913 tp = find_thread_ptid (inferior_ptid);
914 gdb_assert (tp != NULL);
915
916 replay = tp->btrace.replay;
917 if (replay != NULL)
918 {
919 const struct btrace_insn *insn;
920 struct gdbarch *gdbarch;
921 int pcreg;
922
923 gdbarch = get_regcache_arch (regcache);
924 pcreg = gdbarch_pc_regnum (gdbarch);
925 if (pcreg < 0)
926 return;
927
928 /* We can only provide the PC register. */
929 if (regno >= 0 && regno != pcreg)
930 return;
931
932 insn = btrace_insn_get (replay);
933 gdb_assert (insn != NULL);
934
935 regcache_raw_supply (regcache, regno, &insn->pc);
936 }
937 else
938 {
939 struct target_ops *t;
940
941 for (t = ops->beneath; t != NULL; t = t->beneath)
942 if (t->to_fetch_registers != NULL)
943 {
944 t->to_fetch_registers (t, regcache, regno);
945 break;
946 }
947 }
948}
949
950/* The to_store_registers method of target record-btrace. */
951
952static void
953record_btrace_store_registers (struct target_ops *ops,
954 struct regcache *regcache, int regno)
955{
956 struct target_ops *t;
957
958 if (record_btrace_is_replaying ())
959 error (_("This record target does not allow writing registers."));
960
961 gdb_assert (may_write_registers != 0);
962
963 for (t = ops->beneath; t != NULL; t = t->beneath)
964 if (t->to_store_registers != NULL)
965 {
966 t->to_store_registers (t, regcache, regno);
967 return;
968 }
969
970 noprocess ();
971}
972
973/* The to_prepare_to_store method of target record-btrace. */
974
975static void
976record_btrace_prepare_to_store (struct target_ops *ops,
977 struct regcache *regcache)
978{
979 struct target_ops *t;
980
981 if (record_btrace_is_replaying ())
982 return;
983
984 for (t = ops->beneath; t != NULL; t = t->beneath)
985 if (t->to_prepare_to_store != NULL)
986 {
987 t->to_prepare_to_store (t, regcache);
988 return;
989 }
990}
991
0b722aec
MM
992/* The branch trace frame cache. */
993
994struct btrace_frame_cache
995{
996 /* The thread. */
997 struct thread_info *tp;
998
999 /* The frame info. */
1000 struct frame_info *frame;
1001
1002 /* The branch trace function segment. */
1003 const struct btrace_function *bfun;
1004};
1005
1006/* A struct btrace_frame_cache hash table indexed by NEXT. */
1007
1008static htab_t bfcache;
1009
1010/* hash_f for htab_create_alloc of bfcache. */
1011
1012static hashval_t
1013bfcache_hash (const void *arg)
1014{
1015 const struct btrace_frame_cache *cache = arg;
1016
1017 return htab_hash_pointer (cache->frame);
1018}
1019
1020/* eq_f for htab_create_alloc of bfcache. */
1021
1022static int
1023bfcache_eq (const void *arg1, const void *arg2)
1024{
1025 const struct btrace_frame_cache *cache1 = arg1;
1026 const struct btrace_frame_cache *cache2 = arg2;
1027
1028 return cache1->frame == cache2->frame;
1029}
1030
1031/* Create a new btrace frame cache. */
1032
1033static struct btrace_frame_cache *
1034bfcache_new (struct frame_info *frame)
1035{
1036 struct btrace_frame_cache *cache;
1037 void **slot;
1038
1039 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1040 cache->frame = frame;
1041
1042 slot = htab_find_slot (bfcache, cache, INSERT);
1043 gdb_assert (*slot == NULL);
1044 *slot = cache;
1045
1046 return cache;
1047}
1048
1049/* Extract the branch trace function from a branch trace frame. */
1050
1051static const struct btrace_function *
1052btrace_get_frame_function (struct frame_info *frame)
1053{
1054 const struct btrace_frame_cache *cache;
1055 const struct btrace_function *bfun;
1056 struct btrace_frame_cache pattern;
1057 void **slot;
1058
1059 pattern.frame = frame;
1060
1061 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1062 if (slot == NULL)
1063 return NULL;
1064
1065 cache = *slot;
1066 return cache->bfun;
1067}
1068
cecac1ab
MM
1069/* Implement stop_reason method for record_btrace_frame_unwind. */
1070
1071static enum unwind_stop_reason
1072record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1073 void **this_cache)
1074{
0b722aec
MM
1075 const struct btrace_frame_cache *cache;
1076 const struct btrace_function *bfun;
1077
1078 cache = *this_cache;
1079 bfun = cache->bfun;
1080 gdb_assert (bfun != NULL);
1081
1082 if (bfun->up == NULL)
1083 return UNWIND_UNAVAILABLE;
1084
1085 return UNWIND_NO_REASON;
cecac1ab
MM
1086}
1087
1088/* Implement this_id method for record_btrace_frame_unwind. */
1089
1090static void
1091record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1092 struct frame_id *this_id)
1093{
0b722aec
MM
1094 const struct btrace_frame_cache *cache;
1095 const struct btrace_function *bfun;
1096 CORE_ADDR code, special;
1097
1098 cache = *this_cache;
1099
1100 bfun = cache->bfun;
1101 gdb_assert (bfun != NULL);
1102
1103 while (bfun->segment.prev != NULL)
1104 bfun = bfun->segment.prev;
1105
1106 code = get_frame_func (this_frame);
1107 special = bfun->number;
1108
1109 *this_id = frame_id_build_unavailable_stack_special (code, special);
1110
1111 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1112 btrace_get_bfun_name (cache->bfun),
1113 core_addr_to_string_nz (this_id->code_addr),
1114 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1115}
1116
1117/* Implement prev_register method for record_btrace_frame_unwind. */
1118
1119static struct value *
1120record_btrace_frame_prev_register (struct frame_info *this_frame,
1121 void **this_cache,
1122 int regnum)
1123{
0b722aec
MM
1124 const struct btrace_frame_cache *cache;
1125 const struct btrace_function *bfun, *caller;
1126 const struct btrace_insn *insn;
1127 struct gdbarch *gdbarch;
1128 CORE_ADDR pc;
1129 int pcreg;
1130
1131 gdbarch = get_frame_arch (this_frame);
1132 pcreg = gdbarch_pc_regnum (gdbarch);
1133 if (pcreg < 0 || regnum != pcreg)
1134 throw_error (NOT_AVAILABLE_ERROR,
1135 _("Registers are not available in btrace record history"));
1136
1137 cache = *this_cache;
1138 bfun = cache->bfun;
1139 gdb_assert (bfun != NULL);
1140
1141 caller = bfun->up;
1142 if (caller == NULL)
1143 throw_error (NOT_AVAILABLE_ERROR,
1144 _("No caller in btrace record history"));
1145
1146 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1147 {
1148 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1149 pc = insn->pc;
1150 }
1151 else
1152 {
1153 insn = VEC_last (btrace_insn_s, caller->insn);
1154 pc = insn->pc;
1155
1156 pc += gdb_insn_length (gdbarch, pc);
1157 }
1158
1159 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1160 btrace_get_bfun_name (bfun), bfun->level,
1161 core_addr_to_string_nz (pc));
1162
1163 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1164}
1165
1166/* Implement sniffer method for record_btrace_frame_unwind. */
1167
1168static int
1169record_btrace_frame_sniffer (const struct frame_unwind *self,
1170 struct frame_info *this_frame,
1171 void **this_cache)
1172{
0b722aec
MM
1173 const struct btrace_function *bfun;
1174 struct btrace_frame_cache *cache;
cecac1ab 1175 struct thread_info *tp;
0b722aec 1176 struct frame_info *next;
cecac1ab
MM
1177
1178 /* THIS_FRAME does not contain a reference to its thread. */
1179 tp = find_thread_ptid (inferior_ptid);
1180 gdb_assert (tp != NULL);
1181
0b722aec
MM
1182 bfun = NULL;
1183 next = get_next_frame (this_frame);
1184 if (next == NULL)
1185 {
1186 const struct btrace_insn_iterator *replay;
1187
1188 replay = tp->btrace.replay;
1189 if (replay != NULL)
1190 bfun = replay->function;
1191 }
1192 else
1193 {
1194 const struct btrace_function *callee;
1195
1196 callee = btrace_get_frame_function (next);
1197 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1198 bfun = callee->up;
1199 }
1200
1201 if (bfun == NULL)
1202 return 0;
1203
1204 DEBUG ("[frame] sniffed frame for %s on level %d",
1205 btrace_get_bfun_name (bfun), bfun->level);
1206
1207 /* This is our frame. Initialize the frame cache. */
1208 cache = bfcache_new (this_frame);
1209 cache->tp = tp;
1210 cache->bfun = bfun;
1211
1212 *this_cache = cache;
1213 return 1;
1214}
1215
1216/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1217
1218static int
1219record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1220 struct frame_info *this_frame,
1221 void **this_cache)
1222{
1223 const struct btrace_function *bfun, *callee;
1224 struct btrace_frame_cache *cache;
1225 struct frame_info *next;
1226
1227 next = get_next_frame (this_frame);
1228 if (next == NULL)
1229 return 0;
1230
1231 callee = btrace_get_frame_function (next);
1232 if (callee == NULL)
1233 return 0;
1234
1235 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1236 return 0;
1237
1238 bfun = callee->up;
1239 if (bfun == NULL)
1240 return 0;
1241
1242 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1243 btrace_get_bfun_name (bfun), bfun->level);
1244
1245 /* This is our frame. Initialize the frame cache. */
1246 cache = bfcache_new (this_frame);
1247 cache->tp = find_thread_ptid (inferior_ptid);
1248 cache->bfun = bfun;
1249
1250 *this_cache = cache;
1251 return 1;
1252}
1253
1254static void
1255record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1256{
1257 struct btrace_frame_cache *cache;
1258 void **slot;
1259
1260 cache = this_cache;
1261
1262 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1263 gdb_assert (slot != NULL);
1264
1265 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1266}
1267
1268/* btrace recording does not store previous memory content, neither the stack
1269 frames content. Any unwinding would return errorneous results as the stack
1270 contents no longer matches the changed PC value restored from history.
1271 Therefore this unwinder reports any possibly unwound registers as
1272 <unavailable>. */
1273
0b722aec 1274const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1275{
1276 NORMAL_FRAME,
1277 record_btrace_frame_unwind_stop_reason,
1278 record_btrace_frame_this_id,
1279 record_btrace_frame_prev_register,
1280 NULL,
0b722aec
MM
1281 record_btrace_frame_sniffer,
1282 record_btrace_frame_dealloc_cache
1283};
1284
1285const struct frame_unwind record_btrace_tailcall_frame_unwind =
1286{
1287 TAILCALL_FRAME,
1288 record_btrace_frame_unwind_stop_reason,
1289 record_btrace_frame_this_id,
1290 record_btrace_frame_prev_register,
1291 NULL,
1292 record_btrace_tailcall_frame_sniffer,
1293 record_btrace_frame_dealloc_cache
cecac1ab 1294};
b2f4cfde 1295
52834460
MM
1296/* Indicate that TP should be resumed according to FLAG. */
1297
1298static void
1299record_btrace_resume_thread (struct thread_info *tp,
1300 enum btrace_thread_flag flag)
1301{
1302 struct btrace_thread_info *btinfo;
1303
1304 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1305
1306 btinfo = &tp->btrace;
1307
1308 if ((btinfo->flags & BTHR_MOVE) != 0)
1309 error (_("Thread already moving."));
1310
1311 /* Fetch the latest branch trace. */
1312 btrace_fetch (tp);
1313
1314 btinfo->flags |= flag;
1315}
1316
1317/* Find the thread to resume given a PTID. */
1318
1319static struct thread_info *
1320record_btrace_find_resume_thread (ptid_t ptid)
1321{
1322 struct thread_info *tp;
1323
1324 /* When asked to resume everything, we pick the current thread. */
1325 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1326 ptid = inferior_ptid;
1327
1328 return find_thread_ptid (ptid);
1329}
1330
1331/* Start replaying a thread. */
1332
1333static struct btrace_insn_iterator *
1334record_btrace_start_replaying (struct thread_info *tp)
1335{
1336 volatile struct gdb_exception except;
1337 struct btrace_insn_iterator *replay;
1338 struct btrace_thread_info *btinfo;
1339 int executing;
1340
1341 btinfo = &tp->btrace;
1342 replay = NULL;
1343
1344 /* We can't start replaying without trace. */
1345 if (btinfo->begin == NULL)
1346 return NULL;
1347
1348 /* Clear the executing flag to allow changes to the current frame.
1349 We are not actually running, yet. We just started a reverse execution
1350 command or a record goto command.
1351 For the latter, EXECUTING is false and this has no effect.
1352 For the former, EXECUTING is true and we're in to_wait, about to
1353 move the thread. Since we need to recompute the stack, we temporarily
1354 set EXECUTING to flase. */
1355 executing = is_executing (tp->ptid);
1356 set_executing (tp->ptid, 0);
1357
1358 /* GDB stores the current frame_id when stepping in order to detects steps
1359 into subroutines.
1360 Since frames are computed differently when we're replaying, we need to
1361 recompute those stored frames and fix them up so we can still detect
1362 subroutines after we started replaying. */
1363 TRY_CATCH (except, RETURN_MASK_ALL)
1364 {
1365 struct frame_info *frame;
1366 struct frame_id frame_id;
1367 int upd_step_frame_id, upd_step_stack_frame_id;
1368
1369 /* The current frame without replaying - computed via normal unwind. */
1370 frame = get_current_frame ();
1371 frame_id = get_frame_id (frame);
1372
1373 /* Check if we need to update any stepping-related frame id's. */
1374 upd_step_frame_id = frame_id_eq (frame_id,
1375 tp->control.step_frame_id);
1376 upd_step_stack_frame_id = frame_id_eq (frame_id,
1377 tp->control.step_stack_frame_id);
1378
1379 /* We start replaying at the end of the branch trace. This corresponds
1380 to the current instruction. */
1381 replay = xmalloc (sizeof (*replay));
1382 btrace_insn_end (replay, btinfo);
1383
1384 /* We're not replaying, yet. */
1385 gdb_assert (btinfo->replay == NULL);
1386 btinfo->replay = replay;
1387
1388 /* Make sure we're not using any stale registers. */
1389 registers_changed_ptid (tp->ptid);
1390
1391 /* The current frame with replaying - computed via btrace unwind. */
1392 frame = get_current_frame ();
1393 frame_id = get_frame_id (frame);
1394
1395 /* Replace stepping related frames where necessary. */
1396 if (upd_step_frame_id)
1397 tp->control.step_frame_id = frame_id;
1398 if (upd_step_stack_frame_id)
1399 tp->control.step_stack_frame_id = frame_id;
1400 }
1401
1402 /* Restore the previous execution state. */
1403 set_executing (tp->ptid, executing);
1404
1405 if (except.reason < 0)
1406 {
1407 xfree (btinfo->replay);
1408 btinfo->replay = NULL;
1409
1410 registers_changed_ptid (tp->ptid);
1411
1412 throw_exception (except);
1413 }
1414
1415 return replay;
1416}
1417
1418/* Stop replaying a thread. */
1419
1420static void
1421record_btrace_stop_replaying (struct thread_info *tp)
1422{
1423 struct btrace_thread_info *btinfo;
1424
1425 btinfo = &tp->btrace;
1426
1427 xfree (btinfo->replay);
1428 btinfo->replay = NULL;
1429
1430 /* Make sure we're not leaving any stale registers. */
1431 registers_changed_ptid (tp->ptid);
1432}
1433
b2f4cfde
MM
1434/* The to_resume method of target record-btrace. */
1435
1436static void
1437record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1438 enum gdb_signal signal)
1439{
52834460
MM
1440 struct thread_info *tp, *other;
1441 enum btrace_thread_flag flag;
1442
1443 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1444
1445 tp = record_btrace_find_resume_thread (ptid);
1446 if (tp == NULL)
1447 error (_("Cannot find thread to resume."));
1448
1449 /* Stop replaying other threads if the thread to resume is not replaying. */
1450 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1451 ALL_THREADS (other)
1452 record_btrace_stop_replaying (other);
1453
b2f4cfde 1454 /* As long as we're not replaying, just forward the request. */
52834460 1455 if (!record_btrace_is_replaying () && execution_direction != EXEC_REVERSE)
b2f4cfde
MM
1456 {
1457 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1458 if (ops->to_resume != NULL)
1459 return ops->to_resume (ops, ptid, step, signal);
1460
1461 error (_("Cannot find target for stepping."));
1462 }
1463
52834460
MM
1464 /* Compute the btrace thread flag for the requested move. */
1465 if (step == 0)
1466 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1467 else
1468 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1469
1470 /* At the moment, we only move a single thread. We could also move
1471 all threads in parallel by single-stepping each resumed thread
1472 until the first runs into an event.
1473 When we do that, we would want to continue all other threads.
1474 For now, just resume one thread to not confuse to_wait. */
1475 record_btrace_resume_thread (tp, flag);
1476
1477 /* We just indicate the resume intent here. The actual stepping happens in
1478 record_btrace_wait below. */
1479}
1480
1481/* Find a thread to move. */
1482
1483static struct thread_info *
1484record_btrace_find_thread_to_move (ptid_t ptid)
1485{
1486 struct thread_info *tp;
1487
1488 /* First check the parameter thread. */
1489 tp = find_thread_ptid (ptid);
1490 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1491 return tp;
1492
1493 /* Otherwise, find one other thread that has been resumed. */
1494 ALL_THREADS (tp)
1495 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1496 return tp;
1497
1498 return NULL;
1499}
1500
1501/* Return a target_waitstatus indicating that we ran out of history. */
1502
1503static struct target_waitstatus
1504btrace_step_no_history (void)
1505{
1506 struct target_waitstatus status;
1507
1508 status.kind = TARGET_WAITKIND_NO_HISTORY;
1509
1510 return status;
1511}
1512
1513/* Return a target_waitstatus indicating that a step finished. */
1514
1515static struct target_waitstatus
1516btrace_step_stopped (void)
1517{
1518 struct target_waitstatus status;
1519
1520 status.kind = TARGET_WAITKIND_STOPPED;
1521 status.value.sig = GDB_SIGNAL_TRAP;
1522
1523 return status;
1524}
1525
1526/* Clear the record histories. */
1527
1528static void
1529record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1530{
1531 xfree (btinfo->insn_history);
1532 xfree (btinfo->call_history);
1533
1534 btinfo->insn_history = NULL;
1535 btinfo->call_history = NULL;
1536}
1537
1538/* Step a single thread. */
1539
1540static struct target_waitstatus
1541record_btrace_step_thread (struct thread_info *tp)
1542{
1543 struct btrace_insn_iterator *replay, end;
1544 struct btrace_thread_info *btinfo;
1545 struct address_space *aspace;
1546 struct inferior *inf;
1547 enum btrace_thread_flag flags;
1548 unsigned int steps;
1549
1550 btinfo = &tp->btrace;
1551 replay = btinfo->replay;
1552
1553 flags = btinfo->flags & BTHR_MOVE;
1554 btinfo->flags &= ~BTHR_MOVE;
1555
1556 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1557
1558 switch (flags)
1559 {
1560 default:
1561 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1562
1563 case BTHR_STEP:
1564 /* We're done if we're not replaying. */
1565 if (replay == NULL)
1566 return btrace_step_no_history ();
1567
1568 /* We are always able to step at least once. */
1569 steps = btrace_insn_next (replay, 1);
1570 gdb_assert (steps == 1);
1571
1572 /* Determine the end of the instruction trace. */
1573 btrace_insn_end (&end, btinfo);
1574
1575 /* We stop replaying if we reached the end of the trace. */
1576 if (btrace_insn_cmp (replay, &end) == 0)
1577 record_btrace_stop_replaying (tp);
1578
1579 return btrace_step_stopped ();
1580
1581 case BTHR_RSTEP:
1582 /* Start replaying if we're not already doing so. */
1583 if (replay == NULL)
1584 replay = record_btrace_start_replaying (tp);
1585
1586 /* If we can't step any further, we reached the end of the history. */
1587 steps = btrace_insn_prev (replay, 1);
1588 if (steps == 0)
1589 return btrace_step_no_history ();
1590
1591 return btrace_step_stopped ();
1592
1593 case BTHR_CONT:
1594 /* We're done if we're not replaying. */
1595 if (replay == NULL)
1596 return btrace_step_no_history ();
1597
1598 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1599 aspace = inf->aspace;
1600
1601 /* Determine the end of the instruction trace. */
1602 btrace_insn_end (&end, btinfo);
1603
1604 for (;;)
1605 {
1606 const struct btrace_insn *insn;
1607
1608 /* We are always able to step at least once. */
1609 steps = btrace_insn_next (replay, 1);
1610 gdb_assert (steps == 1);
1611
1612 /* We stop replaying if we reached the end of the trace. */
1613 if (btrace_insn_cmp (replay, &end) == 0)
1614 {
1615 record_btrace_stop_replaying (tp);
1616 return btrace_step_no_history ();
1617 }
1618
1619 insn = btrace_insn_get (replay);
1620 gdb_assert (insn);
1621
1622 DEBUG ("stepping %d (%s) ... %s", tp->num,
1623 target_pid_to_str (tp->ptid),
1624 core_addr_to_string_nz (insn->pc));
1625
1626 if (breakpoint_here_p (aspace, insn->pc))
1627 return btrace_step_stopped ();
1628 }
1629
1630 case BTHR_RCONT:
1631 /* Start replaying if we're not already doing so. */
1632 if (replay == NULL)
1633 replay = record_btrace_start_replaying (tp);
1634
1635 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1636 aspace = inf->aspace;
1637
1638 for (;;)
1639 {
1640 const struct btrace_insn *insn;
1641
1642 /* If we can't step any further, we're done. */
1643 steps = btrace_insn_prev (replay, 1);
1644 if (steps == 0)
1645 return btrace_step_no_history ();
1646
1647 insn = btrace_insn_get (replay);
1648 gdb_assert (insn);
1649
1650 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1651 target_pid_to_str (tp->ptid),
1652 core_addr_to_string_nz (insn->pc));
1653
1654 if (breakpoint_here_p (aspace, insn->pc))
1655 return btrace_step_stopped ();
1656 }
1657 }
b2f4cfde
MM
1658}
1659
1660/* The to_wait method of target record-btrace. */
1661
1662static ptid_t
1663record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1664 struct target_waitstatus *status, int options)
1665{
52834460
MM
1666 struct thread_info *tp, *other;
1667
1668 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1669
b2f4cfde 1670 /* As long as we're not replaying, just forward the request. */
52834460 1671 if (!record_btrace_is_replaying () && execution_direction != EXEC_REVERSE)
b2f4cfde
MM
1672 {
1673 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1674 if (ops->to_wait != NULL)
1675 return ops->to_wait (ops, ptid, status, options);
1676
1677 error (_("Cannot find target for waiting."));
1678 }
1679
52834460
MM
1680 /* Let's find a thread to move. */
1681 tp = record_btrace_find_thread_to_move (ptid);
1682 if (tp == NULL)
1683 {
1684 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1685
1686 status->kind = TARGET_WAITKIND_IGNORE;
1687 return minus_one_ptid;
1688 }
1689
1690 /* We only move a single thread. We're not able to correlate threads. */
1691 *status = record_btrace_step_thread (tp);
1692
1693 /* Stop all other threads. */
1694 if (!non_stop)
1695 ALL_THREADS (other)
1696 other->btrace.flags &= ~BTHR_MOVE;
1697
1698 /* Start record histories anew from the current position. */
1699 record_btrace_clear_histories (&tp->btrace);
1700
1701 /* We moved the replay position but did not update registers. */
1702 registers_changed_ptid (tp->ptid);
1703
1704 return tp->ptid;
1705}
1706
1707/* The to_can_execute_reverse method of target record-btrace. */
1708
1709static int
1710record_btrace_can_execute_reverse (void)
1711{
1712 return 1;
1713}
1714
1715/* The to_decr_pc_after_break method of target record-btrace. */
1716
1717static CORE_ADDR
1718record_btrace_decr_pc_after_break (struct target_ops *ops,
1719 struct gdbarch *gdbarch)
1720{
1721 /* When replaying, we do not actually execute the breakpoint instruction
1722 so there is no need to adjust the PC after hitting a breakpoint. */
1723 if (record_btrace_is_replaying ())
1724 return 0;
1725
1726 return forward_target_decr_pc_after_break (ops->beneath, gdbarch);
b2f4cfde
MM
1727}
1728
e2887aa3
MM
1729/* The to_find_new_threads method of target record-btrace. */
1730
1731static void
1732record_btrace_find_new_threads (struct target_ops *ops)
1733{
1734 /* Don't expect new threads if we're replaying. */
1735 if (record_btrace_is_replaying ())
1736 return;
1737
1738 /* Forward the request. */
1739 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1740 if (ops->to_find_new_threads != NULL)
1741 {
1742 ops->to_find_new_threads (ops);
1743 break;
1744 }
1745}
1746
1747/* The to_thread_alive method of target record-btrace. */
1748
1749static int
1750record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1751{
1752 /* We don't add or remove threads during replay. */
1753 if (record_btrace_is_replaying ())
1754 return find_thread_ptid (ptid) != NULL;
1755
1756 /* Forward the request. */
1757 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1758 if (ops->to_thread_alive != NULL)
1759 return ops->to_thread_alive (ops, ptid);
1760
1761 return 0;
1762}
1763
066ce621
MM
1764/* Set the replay branch trace instruction iterator. If IT is NULL, replay
1765 is stopped. */
1766
1767static void
1768record_btrace_set_replay (struct thread_info *tp,
1769 const struct btrace_insn_iterator *it)
1770{
1771 struct btrace_thread_info *btinfo;
1772
1773 btinfo = &tp->btrace;
1774
1775 if (it == NULL || it->function == NULL)
52834460 1776 record_btrace_stop_replaying (tp);
066ce621
MM
1777 else
1778 {
1779 if (btinfo->replay == NULL)
52834460 1780 record_btrace_start_replaying (tp);
066ce621
MM
1781 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
1782 return;
1783
1784 *btinfo->replay = *it;
52834460 1785 registers_changed_ptid (tp->ptid);
066ce621
MM
1786 }
1787
52834460
MM
1788 /* Start anew from the new replay position. */
1789 record_btrace_clear_histories (btinfo);
066ce621
MM
1790}
1791
1792/* The to_goto_record_begin method of target record-btrace. */
1793
1794static void
1795record_btrace_goto_begin (void)
1796{
1797 struct thread_info *tp;
1798 struct btrace_insn_iterator begin;
1799
1800 tp = require_btrace_thread ();
1801
1802 btrace_insn_begin (&begin, &tp->btrace);
1803 record_btrace_set_replay (tp, &begin);
1804
1805 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1806}
1807
1808/* The to_goto_record_end method of target record-btrace. */
1809
1810static void
1811record_btrace_goto_end (void)
1812{
1813 struct thread_info *tp;
1814
1815 tp = require_btrace_thread ();
1816
1817 record_btrace_set_replay (tp, NULL);
1818
1819 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1820}
1821
1822/* The to_goto_record method of target record-btrace. */
1823
1824static void
1825record_btrace_goto (ULONGEST insn)
1826{
1827 struct thread_info *tp;
1828 struct btrace_insn_iterator it;
1829 unsigned int number;
1830 int found;
1831
1832 number = insn;
1833
1834 /* Check for wrap-arounds. */
1835 if (number != insn)
1836 error (_("Instruction number out of range."));
1837
1838 tp = require_btrace_thread ();
1839
1840 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
1841 if (found == 0)
1842 error (_("No such instruction."));
1843
1844 record_btrace_set_replay (tp, &it);
1845
1846 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1847}
1848
afedecd3
MM
1849/* Initialize the record-btrace target ops. */
1850
1851static void
1852init_record_btrace_ops (void)
1853{
1854 struct target_ops *ops;
1855
1856 ops = &record_btrace_ops;
1857 ops->to_shortname = "record-btrace";
1858 ops->to_longname = "Branch tracing target";
1859 ops->to_doc = "Collect control-flow trace and provide the execution history.";
1860 ops->to_open = record_btrace_open;
1861 ops->to_close = record_btrace_close;
1862 ops->to_detach = record_detach;
1863 ops->to_disconnect = record_disconnect;
1864 ops->to_mourn_inferior = record_mourn_inferior;
1865 ops->to_kill = record_kill;
1866 ops->to_create_inferior = find_default_create_inferior;
1867 ops->to_stop_recording = record_btrace_stop_recording;
1868 ops->to_info_record = record_btrace_info;
1869 ops->to_insn_history = record_btrace_insn_history;
1870 ops->to_insn_history_from = record_btrace_insn_history_from;
1871 ops->to_insn_history_range = record_btrace_insn_history_range;
1872 ops->to_call_history = record_btrace_call_history;
1873 ops->to_call_history_from = record_btrace_call_history_from;
1874 ops->to_call_history_range = record_btrace_call_history_range;
07bbe694 1875 ops->to_record_is_replaying = record_btrace_is_replaying;
633785ff
MM
1876 ops->to_xfer_partial = record_btrace_xfer_partial;
1877 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
1878 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
1879 ops->to_fetch_registers = record_btrace_fetch_registers;
1880 ops->to_store_registers = record_btrace_store_registers;
1881 ops->to_prepare_to_store = record_btrace_prepare_to_store;
cecac1ab 1882 ops->to_get_unwinder = &record_btrace_frame_unwind;
0b722aec 1883 ops->to_get_tailcall_unwinder = &record_btrace_tailcall_frame_unwind;
b2f4cfde
MM
1884 ops->to_resume = record_btrace_resume;
1885 ops->to_wait = record_btrace_wait;
e2887aa3
MM
1886 ops->to_find_new_threads = record_btrace_find_new_threads;
1887 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
1888 ops->to_goto_record_begin = record_btrace_goto_begin;
1889 ops->to_goto_record_end = record_btrace_goto_end;
1890 ops->to_goto_record = record_btrace_goto;
52834460
MM
1891 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
1892 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
afedecd3
MM
1893 ops->to_stratum = record_stratum;
1894 ops->to_magic = OPS_MAGIC;
1895}
1896
1897/* Alias for "target record". */
1898
1899static void
1900cmd_record_btrace_start (char *args, int from_tty)
1901{
1902 if (args != NULL && *args != 0)
1903 error (_("Invalid argument."));
1904
1905 execute_command ("target record-btrace", from_tty);
1906}
1907
1908void _initialize_record_btrace (void);
1909
1910/* Initialize btrace commands. */
1911
1912void
1913_initialize_record_btrace (void)
1914{
1915 add_cmd ("btrace", class_obscure, cmd_record_btrace_start,
1916 _("Start branch trace recording."),
1917 &record_cmdlist);
1918 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
1919
1920 init_record_btrace_ops ();
1921 add_target (&record_btrace_ops);
0b722aec
MM
1922
1923 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
1924 xcalloc, xfree);
afedecd3 1925}
This page took 0.232306 seconds and 4 git commands to generate.