Fix thinko in mi/mi-main.c::mi_cmd_data_write_memory_bytes comment.
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
ecd75fc8 3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
24#include "gdbthread.h"
25#include "target.h"
26#include "gdbcmd.h"
27#include "disasm.h"
28#include "observer.h"
29#include "exceptions.h"
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
afedecd3
MM
38
39/* The target_ops of record-btrace. */
40static struct target_ops record_btrace_ops;
41
42/* A new thread observer enabling branch tracing for the new thread. */
43static struct observer *record_btrace_thread_observer;
44
633785ff
MM
45/* Temporarily allow memory accesses. */
46static int record_btrace_allow_memory_access;
47
afedecd3
MM
48/* Print a record-btrace debug message. Use do ... while (0) to avoid
49 ambiguities when used in if statements. */
50
51#define DEBUG(msg, args...) \
52 do \
53 { \
54 if (record_debug != 0) \
55 fprintf_unfiltered (gdb_stdlog, \
56 "[record-btrace] " msg "\n", ##args); \
57 } \
58 while (0)
59
60
61/* Update the branch trace for the current thread and return a pointer to its
066ce621 62 thread_info.
afedecd3
MM
63
64 Throws an error if there is no thread or no trace. This function never
65 returns NULL. */
66
066ce621
MM
67static struct thread_info *
68require_btrace_thread (void)
afedecd3
MM
69{
70 struct thread_info *tp;
afedecd3
MM
71
72 DEBUG ("require");
73
74 tp = find_thread_ptid (inferior_ptid);
75 if (tp == NULL)
76 error (_("No thread."));
77
78 btrace_fetch (tp);
79
6e07b1d2 80 if (btrace_is_empty (tp))
afedecd3
MM
81 error (_("No trace."));
82
066ce621
MM
83 return tp;
84}
85
86/* Update the branch trace for the current thread and return a pointer to its
87 branch trace information struct.
88
89 Throws an error if there is no thread or no trace. This function never
90 returns NULL. */
91
92static struct btrace_thread_info *
93require_btrace (void)
94{
95 struct thread_info *tp;
96
97 tp = require_btrace_thread ();
98
99 return &tp->btrace;
afedecd3
MM
100}
101
102/* Enable branch tracing for one thread. Warn on errors. */
103
104static void
105record_btrace_enable_warn (struct thread_info *tp)
106{
107 volatile struct gdb_exception error;
108
109 TRY_CATCH (error, RETURN_MASK_ERROR)
110 btrace_enable (tp);
111
112 if (error.message != NULL)
113 warning ("%s", error.message);
114}
115
116/* Callback function to disable branch tracing for one thread. */
117
118static void
119record_btrace_disable_callback (void *arg)
120{
121 struct thread_info *tp;
122
123 tp = arg;
124
125 btrace_disable (tp);
126}
127
128/* Enable automatic tracing of new threads. */
129
130static void
131record_btrace_auto_enable (void)
132{
133 DEBUG ("attach thread observer");
134
135 record_btrace_thread_observer
136 = observer_attach_new_thread (record_btrace_enable_warn);
137}
138
139/* Disable automatic tracing of new threads. */
140
141static void
142record_btrace_auto_disable (void)
143{
144 /* The observer may have been detached, already. */
145 if (record_btrace_thread_observer == NULL)
146 return;
147
148 DEBUG ("detach thread observer");
149
150 observer_detach_new_thread (record_btrace_thread_observer);
151 record_btrace_thread_observer = NULL;
152}
153
154/* The to_open method of target record-btrace. */
155
156static void
157record_btrace_open (char *args, int from_tty)
158{
159 struct cleanup *disable_chain;
160 struct thread_info *tp;
161
162 DEBUG ("open");
163
8213266a 164 record_preopen ();
afedecd3
MM
165
166 if (!target_has_execution)
167 error (_("The program is not being run."));
168
169 if (!target_supports_btrace ())
170 error (_("Target does not support branch tracing."));
171
52834460
MM
172 if (non_stop)
173 error (_("Record btrace can't debug inferior in non-stop mode."));
174
afedecd3
MM
175 gdb_assert (record_btrace_thread_observer == NULL);
176
177 disable_chain = make_cleanup (null_cleanup, NULL);
178 ALL_THREADS (tp)
179 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
180 {
181 btrace_enable (tp);
182
183 make_cleanup (record_btrace_disable_callback, tp);
184 }
185
186 record_btrace_auto_enable ();
187
188 push_target (&record_btrace_ops);
189
190 observer_notify_record_changed (current_inferior (), 1);
191
192 discard_cleanups (disable_chain);
193}
194
195/* The to_stop_recording method of target record-btrace. */
196
197static void
198record_btrace_stop_recording (void)
199{
200 struct thread_info *tp;
201
202 DEBUG ("stop recording");
203
204 record_btrace_auto_disable ();
205
206 ALL_THREADS (tp)
207 if (tp->btrace.target != NULL)
208 btrace_disable (tp);
209}
210
211/* The to_close method of target record-btrace. */
212
213static void
460014f5 214record_btrace_close (void)
afedecd3 215{
568e808b
MM
216 struct thread_info *tp;
217
99c819ee
MM
218 /* Make sure automatic recording gets disabled even if we did not stop
219 recording before closing the record-btrace target. */
220 record_btrace_auto_disable ();
221
568e808b
MM
222 /* We should have already stopped recording.
223 Tear down btrace in case we have not. */
224 ALL_THREADS (tp)
225 btrace_teardown (tp);
afedecd3
MM
226}
227
228/* The to_info_record method of target record-btrace. */
229
230static void
231record_btrace_info (void)
232{
233 struct btrace_thread_info *btinfo;
234 struct thread_info *tp;
23a7fe75 235 unsigned int insns, calls;
afedecd3
MM
236
237 DEBUG ("info");
238
239 tp = find_thread_ptid (inferior_ptid);
240 if (tp == NULL)
241 error (_("No thread."));
242
243 btrace_fetch (tp);
244
23a7fe75
MM
245 insns = 0;
246 calls = 0;
247
afedecd3 248 btinfo = &tp->btrace;
6e07b1d2
MM
249
250 if (!btrace_is_empty (tp))
23a7fe75
MM
251 {
252 struct btrace_call_iterator call;
253 struct btrace_insn_iterator insn;
254
255 btrace_call_end (&call, btinfo);
256 btrace_call_prev (&call, 1);
5de9129b 257 calls = btrace_call_number (&call);
23a7fe75
MM
258
259 btrace_insn_end (&insn, btinfo);
260 btrace_insn_prev (&insn, 1);
5de9129b 261 insns = btrace_insn_number (&insn);
23a7fe75 262 }
afedecd3
MM
263
264 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
23a7fe75 265 "%d (%s).\n"), insns, calls, tp->num,
afedecd3 266 target_pid_to_str (tp->ptid));
07bbe694
MM
267
268 if (btrace_is_replaying (tp))
269 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
270 btrace_insn_number (btinfo->replay));
afedecd3
MM
271}
272
273/* Print an unsigned int. */
274
275static void
276ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
277{
278 ui_out_field_fmt (uiout, fld, "%u", val);
279}
280
281/* Disassemble a section of the recorded instruction trace. */
282
283static void
23a7fe75
MM
284btrace_insn_history (struct ui_out *uiout,
285 const struct btrace_insn_iterator *begin,
286 const struct btrace_insn_iterator *end, int flags)
afedecd3
MM
287{
288 struct gdbarch *gdbarch;
23a7fe75 289 struct btrace_insn_iterator it;
afedecd3 290
23a7fe75
MM
291 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
292 btrace_insn_number (end));
afedecd3
MM
293
294 gdbarch = target_gdbarch ();
295
23a7fe75 296 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 297 {
23a7fe75
MM
298 const struct btrace_insn *insn;
299
300 insn = btrace_insn_get (&it);
301
afedecd3 302 /* Print the instruction index. */
23a7fe75 303 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
afedecd3
MM
304 ui_out_text (uiout, "\t");
305
306 /* Disassembly with '/m' flag may not produce the expected result.
307 See PR gdb/11833. */
23a7fe75 308 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
afedecd3
MM
309 }
310}
311
312/* The to_insn_history method of target record-btrace. */
313
314static void
315record_btrace_insn_history (int size, int flags)
316{
317 struct btrace_thread_info *btinfo;
23a7fe75
MM
318 struct btrace_insn_history *history;
319 struct btrace_insn_iterator begin, end;
afedecd3
MM
320 struct cleanup *uiout_cleanup;
321 struct ui_out *uiout;
23a7fe75 322 unsigned int context, covered;
afedecd3
MM
323
324 uiout = current_uiout;
325 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
326 "insn history");
afedecd3 327 context = abs (size);
afedecd3
MM
328 if (context == 0)
329 error (_("Bad record instruction-history-size."));
330
23a7fe75
MM
331 btinfo = require_btrace ();
332 history = btinfo->insn_history;
333 if (history == NULL)
afedecd3 334 {
07bbe694 335 struct btrace_insn_iterator *replay;
afedecd3 336
23a7fe75 337 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 338
07bbe694
MM
339 /* If we're replaying, we start at the replay position. Otherwise, we
340 start at the tail of the trace. */
341 replay = btinfo->replay;
342 if (replay != NULL)
343 begin = *replay;
344 else
345 btrace_insn_end (&begin, btinfo);
346
347 /* We start from here and expand in the requested direction. Then we
348 expand in the other direction, as well, to fill up any remaining
349 context. */
350 end = begin;
351 if (size < 0)
352 {
353 /* We want the current position covered, as well. */
354 covered = btrace_insn_next (&end, 1);
355 covered += btrace_insn_prev (&begin, context - covered);
356 covered += btrace_insn_next (&end, context - covered);
357 }
358 else
359 {
360 covered = btrace_insn_next (&end, context);
361 covered += btrace_insn_prev (&begin, context - covered);
362 }
afedecd3
MM
363 }
364 else
365 {
23a7fe75
MM
366 begin = history->begin;
367 end = history->end;
afedecd3 368
23a7fe75
MM
369 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
370 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 371
23a7fe75
MM
372 if (size < 0)
373 {
374 end = begin;
375 covered = btrace_insn_prev (&begin, context);
376 }
377 else
378 {
379 begin = end;
380 covered = btrace_insn_next (&end, context);
381 }
afedecd3
MM
382 }
383
23a7fe75
MM
384 if (covered > 0)
385 btrace_insn_history (uiout, &begin, &end, flags);
386 else
387 {
388 if (size < 0)
389 printf_unfiltered (_("At the start of the branch trace record.\n"));
390 else
391 printf_unfiltered (_("At the end of the branch trace record.\n"));
392 }
afedecd3 393
23a7fe75 394 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
395 do_cleanups (uiout_cleanup);
396}
397
398/* The to_insn_history_range method of target record-btrace. */
399
400static void
401record_btrace_insn_history_range (ULONGEST from, ULONGEST to, int flags)
402{
403 struct btrace_thread_info *btinfo;
23a7fe75
MM
404 struct btrace_insn_history *history;
405 struct btrace_insn_iterator begin, end;
afedecd3
MM
406 struct cleanup *uiout_cleanup;
407 struct ui_out *uiout;
23a7fe75
MM
408 unsigned int low, high;
409 int found;
afedecd3
MM
410
411 uiout = current_uiout;
412 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
413 "insn history");
23a7fe75
MM
414 low = from;
415 high = to;
afedecd3 416
23a7fe75 417 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
418
419 /* Check for wrap-arounds. */
23a7fe75 420 if (low != from || high != to)
afedecd3
MM
421 error (_("Bad range."));
422
0688d04e 423 if (high < low)
afedecd3
MM
424 error (_("Bad range."));
425
23a7fe75 426 btinfo = require_btrace ();
afedecd3 427
23a7fe75
MM
428 found = btrace_find_insn_by_number (&begin, btinfo, low);
429 if (found == 0)
430 error (_("Range out of bounds."));
afedecd3 431
23a7fe75
MM
432 found = btrace_find_insn_by_number (&end, btinfo, high);
433 if (found == 0)
0688d04e
MM
434 {
435 /* Silently truncate the range. */
436 btrace_insn_end (&end, btinfo);
437 }
438 else
439 {
440 /* We want both begin and end to be inclusive. */
441 btrace_insn_next (&end, 1);
442 }
afedecd3 443
23a7fe75
MM
444 btrace_insn_history (uiout, &begin, &end, flags);
445 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
446
447 do_cleanups (uiout_cleanup);
448}
449
450/* The to_insn_history_from method of target record-btrace. */
451
452static void
453record_btrace_insn_history_from (ULONGEST from, int size, int flags)
454{
455 ULONGEST begin, end, context;
456
457 context = abs (size);
0688d04e
MM
458 if (context == 0)
459 error (_("Bad record instruction-history-size."));
afedecd3
MM
460
461 if (size < 0)
462 {
463 end = from;
464
465 if (from < context)
466 begin = 0;
467 else
0688d04e 468 begin = from - context + 1;
afedecd3
MM
469 }
470 else
471 {
472 begin = from;
0688d04e 473 end = from + context - 1;
afedecd3
MM
474
475 /* Check for wrap-around. */
476 if (end < begin)
477 end = ULONGEST_MAX;
478 }
479
480 record_btrace_insn_history_range (begin, end, flags);
481}
482
483/* Print the instruction number range for a function call history line. */
484
485static void
23a7fe75
MM
486btrace_call_history_insn_range (struct ui_out *uiout,
487 const struct btrace_function *bfun)
afedecd3 488{
7acbe133
MM
489 unsigned int begin, end, size;
490
491 size = VEC_length (btrace_insn_s, bfun->insn);
492 gdb_assert (size > 0);
afedecd3 493
23a7fe75 494 begin = bfun->insn_offset;
7acbe133 495 end = begin + size - 1;
afedecd3 496
23a7fe75 497 ui_out_field_uint (uiout, "insn begin", begin);
8710b709 498 ui_out_text (uiout, ",");
23a7fe75 499 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
500}
501
502/* Print the source line information for a function call history line. */
503
504static void
23a7fe75
MM
505btrace_call_history_src_line (struct ui_out *uiout,
506 const struct btrace_function *bfun)
afedecd3
MM
507{
508 struct symbol *sym;
23a7fe75 509 int begin, end;
afedecd3
MM
510
511 sym = bfun->sym;
512 if (sym == NULL)
513 return;
514
515 ui_out_field_string (uiout, "file",
516 symtab_to_filename_for_display (sym->symtab));
517
23a7fe75
MM
518 begin = bfun->lbegin;
519 end = bfun->lend;
520
521 if (end < begin)
afedecd3
MM
522 return;
523
524 ui_out_text (uiout, ":");
23a7fe75 525 ui_out_field_int (uiout, "min line", begin);
afedecd3 526
23a7fe75 527 if (end == begin)
afedecd3
MM
528 return;
529
8710b709 530 ui_out_text (uiout, ",");
23a7fe75 531 ui_out_field_int (uiout, "max line", end);
afedecd3
MM
532}
533
0b722aec
MM
534/* Get the name of a branch trace function. */
535
536static const char *
537btrace_get_bfun_name (const struct btrace_function *bfun)
538{
539 struct minimal_symbol *msym;
540 struct symbol *sym;
541
542 if (bfun == NULL)
543 return "??";
544
545 msym = bfun->msym;
546 sym = bfun->sym;
547
548 if (sym != NULL)
549 return SYMBOL_PRINT_NAME (sym);
550 else if (msym != NULL)
551 return SYMBOL_PRINT_NAME (msym);
552 else
553 return "??";
554}
555
afedecd3
MM
556/* Disassemble a section of the recorded function trace. */
557
558static void
23a7fe75 559btrace_call_history (struct ui_out *uiout,
8710b709 560 const struct btrace_thread_info *btinfo,
23a7fe75
MM
561 const struct btrace_call_iterator *begin,
562 const struct btrace_call_iterator *end,
afedecd3
MM
563 enum record_print_flag flags)
564{
23a7fe75 565 struct btrace_call_iterator it;
afedecd3 566
23a7fe75
MM
567 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
568 btrace_call_number (end));
afedecd3 569
23a7fe75 570 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 571 {
23a7fe75
MM
572 const struct btrace_function *bfun;
573 struct minimal_symbol *msym;
574 struct symbol *sym;
575
576 bfun = btrace_call_get (&it);
23a7fe75 577 sym = bfun->sym;
0b722aec 578 msym = bfun->msym;
23a7fe75 579
afedecd3 580 /* Print the function index. */
23a7fe75 581 ui_out_field_uint (uiout, "index", bfun->number);
afedecd3
MM
582 ui_out_text (uiout, "\t");
583
8710b709
MM
584 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
585 {
586 int level = bfun->level + btinfo->level, i;
587
588 for (i = 0; i < level; ++i)
589 ui_out_text (uiout, " ");
590 }
591
592 if (sym != NULL)
593 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
594 else if (msym != NULL)
595 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (msym));
596 else if (!ui_out_is_mi_like_p (uiout))
597 ui_out_field_string (uiout, "function", "??");
598
1e038f67 599 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 600 {
8710b709 601 ui_out_text (uiout, _("\tinst "));
23a7fe75 602 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
603 }
604
1e038f67 605 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 606 {
8710b709 607 ui_out_text (uiout, _("\tat "));
23a7fe75 608 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
609 }
610
afedecd3
MM
611 ui_out_text (uiout, "\n");
612 }
613}
614
615/* The to_call_history method of target record-btrace. */
616
617static void
618record_btrace_call_history (int size, int flags)
619{
620 struct btrace_thread_info *btinfo;
23a7fe75
MM
621 struct btrace_call_history *history;
622 struct btrace_call_iterator begin, end;
afedecd3
MM
623 struct cleanup *uiout_cleanup;
624 struct ui_out *uiout;
23a7fe75 625 unsigned int context, covered;
afedecd3
MM
626
627 uiout = current_uiout;
628 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
629 "insn history");
afedecd3 630 context = abs (size);
afedecd3
MM
631 if (context == 0)
632 error (_("Bad record function-call-history-size."));
633
23a7fe75
MM
634 btinfo = require_btrace ();
635 history = btinfo->call_history;
636 if (history == NULL)
afedecd3 637 {
07bbe694 638 struct btrace_insn_iterator *replay;
afedecd3 639
23a7fe75 640 DEBUG ("call-history (0x%x): %d", flags, size);
afedecd3 641
07bbe694
MM
642 /* If we're replaying, we start at the replay position. Otherwise, we
643 start at the tail of the trace. */
644 replay = btinfo->replay;
645 if (replay != NULL)
646 {
647 begin.function = replay->function;
648 begin.btinfo = btinfo;
649 }
650 else
651 btrace_call_end (&begin, btinfo);
652
653 /* We start from here and expand in the requested direction. Then we
654 expand in the other direction, as well, to fill up any remaining
655 context. */
656 end = begin;
657 if (size < 0)
658 {
659 /* We want the current position covered, as well. */
660 covered = btrace_call_next (&end, 1);
661 covered += btrace_call_prev (&begin, context - covered);
662 covered += btrace_call_next (&end, context - covered);
663 }
664 else
665 {
666 covered = btrace_call_next (&end, context);
667 covered += btrace_call_prev (&begin, context- covered);
668 }
afedecd3
MM
669 }
670 else
671 {
23a7fe75
MM
672 begin = history->begin;
673 end = history->end;
afedecd3 674
23a7fe75
MM
675 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
676 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 677
23a7fe75
MM
678 if (size < 0)
679 {
680 end = begin;
681 covered = btrace_call_prev (&begin, context);
682 }
683 else
684 {
685 begin = end;
686 covered = btrace_call_next (&end, context);
687 }
afedecd3
MM
688 }
689
23a7fe75 690 if (covered > 0)
8710b709 691 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
692 else
693 {
694 if (size < 0)
695 printf_unfiltered (_("At the start of the branch trace record.\n"));
696 else
697 printf_unfiltered (_("At the end of the branch trace record.\n"));
698 }
afedecd3 699
23a7fe75 700 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
701 do_cleanups (uiout_cleanup);
702}
703
704/* The to_call_history_range method of target record-btrace. */
705
706static void
707record_btrace_call_history_range (ULONGEST from, ULONGEST to, int flags)
708{
709 struct btrace_thread_info *btinfo;
23a7fe75
MM
710 struct btrace_call_history *history;
711 struct btrace_call_iterator begin, end;
afedecd3
MM
712 struct cleanup *uiout_cleanup;
713 struct ui_out *uiout;
23a7fe75
MM
714 unsigned int low, high;
715 int found;
afedecd3
MM
716
717 uiout = current_uiout;
718 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
719 "func history");
23a7fe75
MM
720 low = from;
721 high = to;
afedecd3 722
23a7fe75 723 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
724
725 /* Check for wrap-arounds. */
23a7fe75 726 if (low != from || high != to)
afedecd3
MM
727 error (_("Bad range."));
728
0688d04e 729 if (high < low)
afedecd3
MM
730 error (_("Bad range."));
731
23a7fe75 732 btinfo = require_btrace ();
afedecd3 733
23a7fe75
MM
734 found = btrace_find_call_by_number (&begin, btinfo, low);
735 if (found == 0)
736 error (_("Range out of bounds."));
afedecd3 737
23a7fe75
MM
738 found = btrace_find_call_by_number (&end, btinfo, high);
739 if (found == 0)
0688d04e
MM
740 {
741 /* Silently truncate the range. */
742 btrace_call_end (&end, btinfo);
743 }
744 else
745 {
746 /* We want both begin and end to be inclusive. */
747 btrace_call_next (&end, 1);
748 }
afedecd3 749
8710b709 750 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 751 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
752
753 do_cleanups (uiout_cleanup);
754}
755
756/* The to_call_history_from method of target record-btrace. */
757
758static void
759record_btrace_call_history_from (ULONGEST from, int size, int flags)
760{
761 ULONGEST begin, end, context;
762
763 context = abs (size);
0688d04e
MM
764 if (context == 0)
765 error (_("Bad record function-call-history-size."));
afedecd3
MM
766
767 if (size < 0)
768 {
769 end = from;
770
771 if (from < context)
772 begin = 0;
773 else
0688d04e 774 begin = from - context + 1;
afedecd3
MM
775 }
776 else
777 {
778 begin = from;
0688d04e 779 end = from + context - 1;
afedecd3
MM
780
781 /* Check for wrap-around. */
782 if (end < begin)
783 end = ULONGEST_MAX;
784 }
785
786 record_btrace_call_history_range (begin, end, flags);
787}
788
07bbe694
MM
789/* The to_record_is_replaying method of target record-btrace. */
790
791static int
792record_btrace_is_replaying (void)
793{
794 struct thread_info *tp;
795
796 ALL_THREADS (tp)
797 if (btrace_is_replaying (tp))
798 return 1;
799
800 return 0;
801}
802
633785ff
MM
803/* The to_xfer_partial method of target record-btrace. */
804
805static LONGEST
806record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
807 const char *annex, gdb_byte *readbuf,
808 const gdb_byte *writebuf, ULONGEST offset,
809 ULONGEST len)
810{
811 struct target_ops *t;
812
813 /* Filter out requests that don't make sense during replay. */
814 if (!record_btrace_allow_memory_access && record_btrace_is_replaying ())
815 {
816 switch (object)
817 {
818 case TARGET_OBJECT_MEMORY:
819 {
820 struct target_section *section;
821
822 /* We do not allow writing memory in general. */
823 if (writebuf != NULL)
824 return TARGET_XFER_E_UNAVAILABLE;
825
826 /* We allow reading readonly memory. */
827 section = target_section_by_addr (ops, offset);
828 if (section != NULL)
829 {
830 /* Check if the section we found is readonly. */
831 if ((bfd_get_section_flags (section->the_bfd_section->owner,
832 section->the_bfd_section)
833 & SEC_READONLY) != 0)
834 {
835 /* Truncate the request to fit into this section. */
836 len = min (len, section->endaddr - offset);
837 break;
838 }
839 }
840
841 return TARGET_XFER_E_UNAVAILABLE;
842 }
843 }
844 }
845
846 /* Forward the request. */
847 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
848 if (ops->to_xfer_partial != NULL)
849 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
850 offset, len);
851
852 return TARGET_XFER_E_UNAVAILABLE;
853}
854
855/* The to_insert_breakpoint method of target record-btrace. */
856
857static int
858record_btrace_insert_breakpoint (struct target_ops *ops,
859 struct gdbarch *gdbarch,
860 struct bp_target_info *bp_tgt)
861{
862 volatile struct gdb_exception except;
863 int old, ret;
864
865 /* Inserting breakpoints requires accessing memory. Allow it for the
866 duration of this function. */
867 old = record_btrace_allow_memory_access;
868 record_btrace_allow_memory_access = 1;
869
870 ret = 0;
871 TRY_CATCH (except, RETURN_MASK_ALL)
872 ret = forward_target_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
873
874 record_btrace_allow_memory_access = old;
875
876 if (except.reason < 0)
877 throw_exception (except);
878
879 return ret;
880}
881
882/* The to_remove_breakpoint method of target record-btrace. */
883
884static int
885record_btrace_remove_breakpoint (struct target_ops *ops,
886 struct gdbarch *gdbarch,
887 struct bp_target_info *bp_tgt)
888{
889 volatile struct gdb_exception except;
890 int old, ret;
891
892 /* Removing breakpoints requires accessing memory. Allow it for the
893 duration of this function. */
894 old = record_btrace_allow_memory_access;
895 record_btrace_allow_memory_access = 1;
896
897 ret = 0;
898 TRY_CATCH (except, RETURN_MASK_ALL)
899 ret = forward_target_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
900
901 record_btrace_allow_memory_access = old;
902
903 if (except.reason < 0)
904 throw_exception (except);
905
906 return ret;
907}
908
1f3ef581
MM
909/* The to_fetch_registers method of target record-btrace. */
910
911static void
912record_btrace_fetch_registers (struct target_ops *ops,
913 struct regcache *regcache, int regno)
914{
915 struct btrace_insn_iterator *replay;
916 struct thread_info *tp;
917
918 tp = find_thread_ptid (inferior_ptid);
919 gdb_assert (tp != NULL);
920
921 replay = tp->btrace.replay;
922 if (replay != NULL)
923 {
924 const struct btrace_insn *insn;
925 struct gdbarch *gdbarch;
926 int pcreg;
927
928 gdbarch = get_regcache_arch (regcache);
929 pcreg = gdbarch_pc_regnum (gdbarch);
930 if (pcreg < 0)
931 return;
932
933 /* We can only provide the PC register. */
934 if (regno >= 0 && regno != pcreg)
935 return;
936
937 insn = btrace_insn_get (replay);
938 gdb_assert (insn != NULL);
939
940 regcache_raw_supply (regcache, regno, &insn->pc);
941 }
942 else
943 {
944 struct target_ops *t;
945
946 for (t = ops->beneath; t != NULL; t = t->beneath)
947 if (t->to_fetch_registers != NULL)
948 {
949 t->to_fetch_registers (t, regcache, regno);
950 break;
951 }
952 }
953}
954
955/* The to_store_registers method of target record-btrace. */
956
957static void
958record_btrace_store_registers (struct target_ops *ops,
959 struct regcache *regcache, int regno)
960{
961 struct target_ops *t;
962
963 if (record_btrace_is_replaying ())
964 error (_("This record target does not allow writing registers."));
965
966 gdb_assert (may_write_registers != 0);
967
968 for (t = ops->beneath; t != NULL; t = t->beneath)
969 if (t->to_store_registers != NULL)
970 {
971 t->to_store_registers (t, regcache, regno);
972 return;
973 }
974
975 noprocess ();
976}
977
978/* The to_prepare_to_store method of target record-btrace. */
979
980static void
981record_btrace_prepare_to_store (struct target_ops *ops,
982 struct regcache *regcache)
983{
984 struct target_ops *t;
985
986 if (record_btrace_is_replaying ())
987 return;
988
989 for (t = ops->beneath; t != NULL; t = t->beneath)
990 if (t->to_prepare_to_store != NULL)
991 {
992 t->to_prepare_to_store (t, regcache);
993 return;
994 }
995}
996
0b722aec
MM
997/* The branch trace frame cache. */
998
999struct btrace_frame_cache
1000{
1001 /* The thread. */
1002 struct thread_info *tp;
1003
1004 /* The frame info. */
1005 struct frame_info *frame;
1006
1007 /* The branch trace function segment. */
1008 const struct btrace_function *bfun;
1009};
1010
1011/* A struct btrace_frame_cache hash table indexed by NEXT. */
1012
1013static htab_t bfcache;
1014
1015/* hash_f for htab_create_alloc of bfcache. */
1016
1017static hashval_t
1018bfcache_hash (const void *arg)
1019{
1020 const struct btrace_frame_cache *cache = arg;
1021
1022 return htab_hash_pointer (cache->frame);
1023}
1024
1025/* eq_f for htab_create_alloc of bfcache. */
1026
1027static int
1028bfcache_eq (const void *arg1, const void *arg2)
1029{
1030 const struct btrace_frame_cache *cache1 = arg1;
1031 const struct btrace_frame_cache *cache2 = arg2;
1032
1033 return cache1->frame == cache2->frame;
1034}
1035
1036/* Create a new btrace frame cache. */
1037
1038static struct btrace_frame_cache *
1039bfcache_new (struct frame_info *frame)
1040{
1041 struct btrace_frame_cache *cache;
1042 void **slot;
1043
1044 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1045 cache->frame = frame;
1046
1047 slot = htab_find_slot (bfcache, cache, INSERT);
1048 gdb_assert (*slot == NULL);
1049 *slot = cache;
1050
1051 return cache;
1052}
1053
1054/* Extract the branch trace function from a branch trace frame. */
1055
1056static const struct btrace_function *
1057btrace_get_frame_function (struct frame_info *frame)
1058{
1059 const struct btrace_frame_cache *cache;
1060 const struct btrace_function *bfun;
1061 struct btrace_frame_cache pattern;
1062 void **slot;
1063
1064 pattern.frame = frame;
1065
1066 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1067 if (slot == NULL)
1068 return NULL;
1069
1070 cache = *slot;
1071 return cache->bfun;
1072}
1073
cecac1ab
MM
1074/* Implement stop_reason method for record_btrace_frame_unwind. */
1075
1076static enum unwind_stop_reason
1077record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1078 void **this_cache)
1079{
0b722aec
MM
1080 const struct btrace_frame_cache *cache;
1081 const struct btrace_function *bfun;
1082
1083 cache = *this_cache;
1084 bfun = cache->bfun;
1085 gdb_assert (bfun != NULL);
1086
1087 if (bfun->up == NULL)
1088 return UNWIND_UNAVAILABLE;
1089
1090 return UNWIND_NO_REASON;
cecac1ab
MM
1091}
1092
1093/* Implement this_id method for record_btrace_frame_unwind. */
1094
1095static void
1096record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1097 struct frame_id *this_id)
1098{
0b722aec
MM
1099 const struct btrace_frame_cache *cache;
1100 const struct btrace_function *bfun;
1101 CORE_ADDR code, special;
1102
1103 cache = *this_cache;
1104
1105 bfun = cache->bfun;
1106 gdb_assert (bfun != NULL);
1107
1108 while (bfun->segment.prev != NULL)
1109 bfun = bfun->segment.prev;
1110
1111 code = get_frame_func (this_frame);
1112 special = bfun->number;
1113
1114 *this_id = frame_id_build_unavailable_stack_special (code, special);
1115
1116 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1117 btrace_get_bfun_name (cache->bfun),
1118 core_addr_to_string_nz (this_id->code_addr),
1119 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1120}
1121
1122/* Implement prev_register method for record_btrace_frame_unwind. */
1123
1124static struct value *
1125record_btrace_frame_prev_register (struct frame_info *this_frame,
1126 void **this_cache,
1127 int regnum)
1128{
0b722aec
MM
1129 const struct btrace_frame_cache *cache;
1130 const struct btrace_function *bfun, *caller;
1131 const struct btrace_insn *insn;
1132 struct gdbarch *gdbarch;
1133 CORE_ADDR pc;
1134 int pcreg;
1135
1136 gdbarch = get_frame_arch (this_frame);
1137 pcreg = gdbarch_pc_regnum (gdbarch);
1138 if (pcreg < 0 || regnum != pcreg)
1139 throw_error (NOT_AVAILABLE_ERROR,
1140 _("Registers are not available in btrace record history"));
1141
1142 cache = *this_cache;
1143 bfun = cache->bfun;
1144 gdb_assert (bfun != NULL);
1145
1146 caller = bfun->up;
1147 if (caller == NULL)
1148 throw_error (NOT_AVAILABLE_ERROR,
1149 _("No caller in btrace record history"));
1150
1151 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1152 {
1153 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1154 pc = insn->pc;
1155 }
1156 else
1157 {
1158 insn = VEC_last (btrace_insn_s, caller->insn);
1159 pc = insn->pc;
1160
1161 pc += gdb_insn_length (gdbarch, pc);
1162 }
1163
1164 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1165 btrace_get_bfun_name (bfun), bfun->level,
1166 core_addr_to_string_nz (pc));
1167
1168 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1169}
1170
1171/* Implement sniffer method for record_btrace_frame_unwind. */
1172
1173static int
1174record_btrace_frame_sniffer (const struct frame_unwind *self,
1175 struct frame_info *this_frame,
1176 void **this_cache)
1177{
0b722aec
MM
1178 const struct btrace_function *bfun;
1179 struct btrace_frame_cache *cache;
cecac1ab 1180 struct thread_info *tp;
0b722aec 1181 struct frame_info *next;
cecac1ab
MM
1182
1183 /* THIS_FRAME does not contain a reference to its thread. */
1184 tp = find_thread_ptid (inferior_ptid);
1185 gdb_assert (tp != NULL);
1186
0b722aec
MM
1187 bfun = NULL;
1188 next = get_next_frame (this_frame);
1189 if (next == NULL)
1190 {
1191 const struct btrace_insn_iterator *replay;
1192
1193 replay = tp->btrace.replay;
1194 if (replay != NULL)
1195 bfun = replay->function;
1196 }
1197 else
1198 {
1199 const struct btrace_function *callee;
1200
1201 callee = btrace_get_frame_function (next);
1202 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1203 bfun = callee->up;
1204 }
1205
1206 if (bfun == NULL)
1207 return 0;
1208
1209 DEBUG ("[frame] sniffed frame for %s on level %d",
1210 btrace_get_bfun_name (bfun), bfun->level);
1211
1212 /* This is our frame. Initialize the frame cache. */
1213 cache = bfcache_new (this_frame);
1214 cache->tp = tp;
1215 cache->bfun = bfun;
1216
1217 *this_cache = cache;
1218 return 1;
1219}
1220
1221/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1222
1223static int
1224record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1225 struct frame_info *this_frame,
1226 void **this_cache)
1227{
1228 const struct btrace_function *bfun, *callee;
1229 struct btrace_frame_cache *cache;
1230 struct frame_info *next;
1231
1232 next = get_next_frame (this_frame);
1233 if (next == NULL)
1234 return 0;
1235
1236 callee = btrace_get_frame_function (next);
1237 if (callee == NULL)
1238 return 0;
1239
1240 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1241 return 0;
1242
1243 bfun = callee->up;
1244 if (bfun == NULL)
1245 return 0;
1246
1247 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1248 btrace_get_bfun_name (bfun), bfun->level);
1249
1250 /* This is our frame. Initialize the frame cache. */
1251 cache = bfcache_new (this_frame);
1252 cache->tp = find_thread_ptid (inferior_ptid);
1253 cache->bfun = bfun;
1254
1255 *this_cache = cache;
1256 return 1;
1257}
1258
1259static void
1260record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1261{
1262 struct btrace_frame_cache *cache;
1263 void **slot;
1264
1265 cache = this_cache;
1266
1267 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1268 gdb_assert (slot != NULL);
1269
1270 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1271}
1272
1273/* btrace recording does not store previous memory content, neither the stack
1274 frames content. Any unwinding would return errorneous results as the stack
1275 contents no longer matches the changed PC value restored from history.
1276 Therefore this unwinder reports any possibly unwound registers as
1277 <unavailable>. */
1278
0b722aec 1279const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1280{
1281 NORMAL_FRAME,
1282 record_btrace_frame_unwind_stop_reason,
1283 record_btrace_frame_this_id,
1284 record_btrace_frame_prev_register,
1285 NULL,
0b722aec
MM
1286 record_btrace_frame_sniffer,
1287 record_btrace_frame_dealloc_cache
1288};
1289
1290const struct frame_unwind record_btrace_tailcall_frame_unwind =
1291{
1292 TAILCALL_FRAME,
1293 record_btrace_frame_unwind_stop_reason,
1294 record_btrace_frame_this_id,
1295 record_btrace_frame_prev_register,
1296 NULL,
1297 record_btrace_tailcall_frame_sniffer,
1298 record_btrace_frame_dealloc_cache
cecac1ab 1299};
b2f4cfde 1300
52834460
MM
1301/* Indicate that TP should be resumed according to FLAG. */
1302
1303static void
1304record_btrace_resume_thread (struct thread_info *tp,
1305 enum btrace_thread_flag flag)
1306{
1307 struct btrace_thread_info *btinfo;
1308
1309 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1310
1311 btinfo = &tp->btrace;
1312
1313 if ((btinfo->flags & BTHR_MOVE) != 0)
1314 error (_("Thread already moving."));
1315
1316 /* Fetch the latest branch trace. */
1317 btrace_fetch (tp);
1318
1319 btinfo->flags |= flag;
1320}
1321
1322/* Find the thread to resume given a PTID. */
1323
1324static struct thread_info *
1325record_btrace_find_resume_thread (ptid_t ptid)
1326{
1327 struct thread_info *tp;
1328
1329 /* When asked to resume everything, we pick the current thread. */
1330 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1331 ptid = inferior_ptid;
1332
1333 return find_thread_ptid (ptid);
1334}
1335
1336/* Start replaying a thread. */
1337
1338static struct btrace_insn_iterator *
1339record_btrace_start_replaying (struct thread_info *tp)
1340{
1341 volatile struct gdb_exception except;
1342 struct btrace_insn_iterator *replay;
1343 struct btrace_thread_info *btinfo;
1344 int executing;
1345
1346 btinfo = &tp->btrace;
1347 replay = NULL;
1348
1349 /* We can't start replaying without trace. */
1350 if (btinfo->begin == NULL)
1351 return NULL;
1352
1353 /* Clear the executing flag to allow changes to the current frame.
1354 We are not actually running, yet. We just started a reverse execution
1355 command or a record goto command.
1356 For the latter, EXECUTING is false and this has no effect.
1357 For the former, EXECUTING is true and we're in to_wait, about to
1358 move the thread. Since we need to recompute the stack, we temporarily
1359 set EXECUTING to flase. */
1360 executing = is_executing (tp->ptid);
1361 set_executing (tp->ptid, 0);
1362
1363 /* GDB stores the current frame_id when stepping in order to detects steps
1364 into subroutines.
1365 Since frames are computed differently when we're replaying, we need to
1366 recompute those stored frames and fix them up so we can still detect
1367 subroutines after we started replaying. */
1368 TRY_CATCH (except, RETURN_MASK_ALL)
1369 {
1370 struct frame_info *frame;
1371 struct frame_id frame_id;
1372 int upd_step_frame_id, upd_step_stack_frame_id;
1373
1374 /* The current frame without replaying - computed via normal unwind. */
1375 frame = get_current_frame ();
1376 frame_id = get_frame_id (frame);
1377
1378 /* Check if we need to update any stepping-related frame id's. */
1379 upd_step_frame_id = frame_id_eq (frame_id,
1380 tp->control.step_frame_id);
1381 upd_step_stack_frame_id = frame_id_eq (frame_id,
1382 tp->control.step_stack_frame_id);
1383
1384 /* We start replaying at the end of the branch trace. This corresponds
1385 to the current instruction. */
1386 replay = xmalloc (sizeof (*replay));
1387 btrace_insn_end (replay, btinfo);
1388
1389 /* We're not replaying, yet. */
1390 gdb_assert (btinfo->replay == NULL);
1391 btinfo->replay = replay;
1392
1393 /* Make sure we're not using any stale registers. */
1394 registers_changed_ptid (tp->ptid);
1395
1396 /* The current frame with replaying - computed via btrace unwind. */
1397 frame = get_current_frame ();
1398 frame_id = get_frame_id (frame);
1399
1400 /* Replace stepping related frames where necessary. */
1401 if (upd_step_frame_id)
1402 tp->control.step_frame_id = frame_id;
1403 if (upd_step_stack_frame_id)
1404 tp->control.step_stack_frame_id = frame_id;
1405 }
1406
1407 /* Restore the previous execution state. */
1408 set_executing (tp->ptid, executing);
1409
1410 if (except.reason < 0)
1411 {
1412 xfree (btinfo->replay);
1413 btinfo->replay = NULL;
1414
1415 registers_changed_ptid (tp->ptid);
1416
1417 throw_exception (except);
1418 }
1419
1420 return replay;
1421}
1422
1423/* Stop replaying a thread. */
1424
1425static void
1426record_btrace_stop_replaying (struct thread_info *tp)
1427{
1428 struct btrace_thread_info *btinfo;
1429
1430 btinfo = &tp->btrace;
1431
1432 xfree (btinfo->replay);
1433 btinfo->replay = NULL;
1434
1435 /* Make sure we're not leaving any stale registers. */
1436 registers_changed_ptid (tp->ptid);
1437}
1438
b2f4cfde
MM
1439/* The to_resume method of target record-btrace. */
1440
1441static void
1442record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1443 enum gdb_signal signal)
1444{
52834460
MM
1445 struct thread_info *tp, *other;
1446 enum btrace_thread_flag flag;
1447
1448 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1449
1450 tp = record_btrace_find_resume_thread (ptid);
1451 if (tp == NULL)
1452 error (_("Cannot find thread to resume."));
1453
1454 /* Stop replaying other threads if the thread to resume is not replaying. */
1455 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1456 ALL_THREADS (other)
1457 record_btrace_stop_replaying (other);
1458
b2f4cfde 1459 /* As long as we're not replaying, just forward the request. */
52834460 1460 if (!record_btrace_is_replaying () && execution_direction != EXEC_REVERSE)
b2f4cfde
MM
1461 {
1462 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1463 if (ops->to_resume != NULL)
1464 return ops->to_resume (ops, ptid, step, signal);
1465
1466 error (_("Cannot find target for stepping."));
1467 }
1468
52834460
MM
1469 /* Compute the btrace thread flag for the requested move. */
1470 if (step == 0)
1471 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1472 else
1473 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1474
1475 /* At the moment, we only move a single thread. We could also move
1476 all threads in parallel by single-stepping each resumed thread
1477 until the first runs into an event.
1478 When we do that, we would want to continue all other threads.
1479 For now, just resume one thread to not confuse to_wait. */
1480 record_btrace_resume_thread (tp, flag);
1481
1482 /* We just indicate the resume intent here. The actual stepping happens in
1483 record_btrace_wait below. */
1484}
1485
1486/* Find a thread to move. */
1487
1488static struct thread_info *
1489record_btrace_find_thread_to_move (ptid_t ptid)
1490{
1491 struct thread_info *tp;
1492
1493 /* First check the parameter thread. */
1494 tp = find_thread_ptid (ptid);
1495 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1496 return tp;
1497
1498 /* Otherwise, find one other thread that has been resumed. */
1499 ALL_THREADS (tp)
1500 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1501 return tp;
1502
1503 return NULL;
1504}
1505
1506/* Return a target_waitstatus indicating that we ran out of history. */
1507
1508static struct target_waitstatus
1509btrace_step_no_history (void)
1510{
1511 struct target_waitstatus status;
1512
1513 status.kind = TARGET_WAITKIND_NO_HISTORY;
1514
1515 return status;
1516}
1517
1518/* Return a target_waitstatus indicating that a step finished. */
1519
1520static struct target_waitstatus
1521btrace_step_stopped (void)
1522{
1523 struct target_waitstatus status;
1524
1525 status.kind = TARGET_WAITKIND_STOPPED;
1526 status.value.sig = GDB_SIGNAL_TRAP;
1527
1528 return status;
1529}
1530
1531/* Clear the record histories. */
1532
1533static void
1534record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1535{
1536 xfree (btinfo->insn_history);
1537 xfree (btinfo->call_history);
1538
1539 btinfo->insn_history = NULL;
1540 btinfo->call_history = NULL;
1541}
1542
1543/* Step a single thread. */
1544
1545static struct target_waitstatus
1546record_btrace_step_thread (struct thread_info *tp)
1547{
1548 struct btrace_insn_iterator *replay, end;
1549 struct btrace_thread_info *btinfo;
1550 struct address_space *aspace;
1551 struct inferior *inf;
1552 enum btrace_thread_flag flags;
1553 unsigned int steps;
1554
1555 btinfo = &tp->btrace;
1556 replay = btinfo->replay;
1557
1558 flags = btinfo->flags & BTHR_MOVE;
1559 btinfo->flags &= ~BTHR_MOVE;
1560
1561 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1562
1563 switch (flags)
1564 {
1565 default:
1566 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1567
1568 case BTHR_STEP:
1569 /* We're done if we're not replaying. */
1570 if (replay == NULL)
1571 return btrace_step_no_history ();
1572
1573 /* We are always able to step at least once. */
1574 steps = btrace_insn_next (replay, 1);
1575 gdb_assert (steps == 1);
1576
1577 /* Determine the end of the instruction trace. */
1578 btrace_insn_end (&end, btinfo);
1579
1580 /* We stop replaying if we reached the end of the trace. */
1581 if (btrace_insn_cmp (replay, &end) == 0)
1582 record_btrace_stop_replaying (tp);
1583
1584 return btrace_step_stopped ();
1585
1586 case BTHR_RSTEP:
1587 /* Start replaying if we're not already doing so. */
1588 if (replay == NULL)
1589 replay = record_btrace_start_replaying (tp);
1590
1591 /* If we can't step any further, we reached the end of the history. */
1592 steps = btrace_insn_prev (replay, 1);
1593 if (steps == 0)
1594 return btrace_step_no_history ();
1595
1596 return btrace_step_stopped ();
1597
1598 case BTHR_CONT:
1599 /* We're done if we're not replaying. */
1600 if (replay == NULL)
1601 return btrace_step_no_history ();
1602
1603 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1604 aspace = inf->aspace;
1605
1606 /* Determine the end of the instruction trace. */
1607 btrace_insn_end (&end, btinfo);
1608
1609 for (;;)
1610 {
1611 const struct btrace_insn *insn;
1612
1613 /* We are always able to step at least once. */
1614 steps = btrace_insn_next (replay, 1);
1615 gdb_assert (steps == 1);
1616
1617 /* We stop replaying if we reached the end of the trace. */
1618 if (btrace_insn_cmp (replay, &end) == 0)
1619 {
1620 record_btrace_stop_replaying (tp);
1621 return btrace_step_no_history ();
1622 }
1623
1624 insn = btrace_insn_get (replay);
1625 gdb_assert (insn);
1626
1627 DEBUG ("stepping %d (%s) ... %s", tp->num,
1628 target_pid_to_str (tp->ptid),
1629 core_addr_to_string_nz (insn->pc));
1630
1631 if (breakpoint_here_p (aspace, insn->pc))
1632 return btrace_step_stopped ();
1633 }
1634
1635 case BTHR_RCONT:
1636 /* Start replaying if we're not already doing so. */
1637 if (replay == NULL)
1638 replay = record_btrace_start_replaying (tp);
1639
1640 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1641 aspace = inf->aspace;
1642
1643 for (;;)
1644 {
1645 const struct btrace_insn *insn;
1646
1647 /* If we can't step any further, we're done. */
1648 steps = btrace_insn_prev (replay, 1);
1649 if (steps == 0)
1650 return btrace_step_no_history ();
1651
1652 insn = btrace_insn_get (replay);
1653 gdb_assert (insn);
1654
1655 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1656 target_pid_to_str (tp->ptid),
1657 core_addr_to_string_nz (insn->pc));
1658
1659 if (breakpoint_here_p (aspace, insn->pc))
1660 return btrace_step_stopped ();
1661 }
1662 }
b2f4cfde
MM
1663}
1664
1665/* The to_wait method of target record-btrace. */
1666
1667static ptid_t
1668record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1669 struct target_waitstatus *status, int options)
1670{
52834460
MM
1671 struct thread_info *tp, *other;
1672
1673 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1674
b2f4cfde 1675 /* As long as we're not replaying, just forward the request. */
52834460 1676 if (!record_btrace_is_replaying () && execution_direction != EXEC_REVERSE)
b2f4cfde
MM
1677 {
1678 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1679 if (ops->to_wait != NULL)
1680 return ops->to_wait (ops, ptid, status, options);
1681
1682 error (_("Cannot find target for waiting."));
1683 }
1684
52834460
MM
1685 /* Let's find a thread to move. */
1686 tp = record_btrace_find_thread_to_move (ptid);
1687 if (tp == NULL)
1688 {
1689 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1690
1691 status->kind = TARGET_WAITKIND_IGNORE;
1692 return minus_one_ptid;
1693 }
1694
1695 /* We only move a single thread. We're not able to correlate threads. */
1696 *status = record_btrace_step_thread (tp);
1697
1698 /* Stop all other threads. */
1699 if (!non_stop)
1700 ALL_THREADS (other)
1701 other->btrace.flags &= ~BTHR_MOVE;
1702
1703 /* Start record histories anew from the current position. */
1704 record_btrace_clear_histories (&tp->btrace);
1705
1706 /* We moved the replay position but did not update registers. */
1707 registers_changed_ptid (tp->ptid);
1708
1709 return tp->ptid;
1710}
1711
1712/* The to_can_execute_reverse method of target record-btrace. */
1713
1714static int
1715record_btrace_can_execute_reverse (void)
1716{
1717 return 1;
1718}
1719
1720/* The to_decr_pc_after_break method of target record-btrace. */
1721
1722static CORE_ADDR
1723record_btrace_decr_pc_after_break (struct target_ops *ops,
1724 struct gdbarch *gdbarch)
1725{
1726 /* When replaying, we do not actually execute the breakpoint instruction
1727 so there is no need to adjust the PC after hitting a breakpoint. */
1728 if (record_btrace_is_replaying ())
1729 return 0;
1730
1731 return forward_target_decr_pc_after_break (ops->beneath, gdbarch);
b2f4cfde
MM
1732}
1733
e2887aa3
MM
1734/* The to_find_new_threads method of target record-btrace. */
1735
1736static void
1737record_btrace_find_new_threads (struct target_ops *ops)
1738{
1739 /* Don't expect new threads if we're replaying. */
1740 if (record_btrace_is_replaying ())
1741 return;
1742
1743 /* Forward the request. */
1744 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1745 if (ops->to_find_new_threads != NULL)
1746 {
1747 ops->to_find_new_threads (ops);
1748 break;
1749 }
1750}
1751
1752/* The to_thread_alive method of target record-btrace. */
1753
1754static int
1755record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1756{
1757 /* We don't add or remove threads during replay. */
1758 if (record_btrace_is_replaying ())
1759 return find_thread_ptid (ptid) != NULL;
1760
1761 /* Forward the request. */
1762 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1763 if (ops->to_thread_alive != NULL)
1764 return ops->to_thread_alive (ops, ptid);
1765
1766 return 0;
1767}
1768
066ce621
MM
1769/* Set the replay branch trace instruction iterator. If IT is NULL, replay
1770 is stopped. */
1771
1772static void
1773record_btrace_set_replay (struct thread_info *tp,
1774 const struct btrace_insn_iterator *it)
1775{
1776 struct btrace_thread_info *btinfo;
1777
1778 btinfo = &tp->btrace;
1779
1780 if (it == NULL || it->function == NULL)
52834460 1781 record_btrace_stop_replaying (tp);
066ce621
MM
1782 else
1783 {
1784 if (btinfo->replay == NULL)
52834460 1785 record_btrace_start_replaying (tp);
066ce621
MM
1786 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
1787 return;
1788
1789 *btinfo->replay = *it;
52834460 1790 registers_changed_ptid (tp->ptid);
066ce621
MM
1791 }
1792
52834460
MM
1793 /* Start anew from the new replay position. */
1794 record_btrace_clear_histories (btinfo);
066ce621
MM
1795}
1796
1797/* The to_goto_record_begin method of target record-btrace. */
1798
1799static void
1800record_btrace_goto_begin (void)
1801{
1802 struct thread_info *tp;
1803 struct btrace_insn_iterator begin;
1804
1805 tp = require_btrace_thread ();
1806
1807 btrace_insn_begin (&begin, &tp->btrace);
1808 record_btrace_set_replay (tp, &begin);
1809
1810 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1811}
1812
1813/* The to_goto_record_end method of target record-btrace. */
1814
1815static void
1816record_btrace_goto_end (void)
1817{
1818 struct thread_info *tp;
1819
1820 tp = require_btrace_thread ();
1821
1822 record_btrace_set_replay (tp, NULL);
1823
1824 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1825}
1826
1827/* The to_goto_record method of target record-btrace. */
1828
1829static void
1830record_btrace_goto (ULONGEST insn)
1831{
1832 struct thread_info *tp;
1833 struct btrace_insn_iterator it;
1834 unsigned int number;
1835 int found;
1836
1837 number = insn;
1838
1839 /* Check for wrap-arounds. */
1840 if (number != insn)
1841 error (_("Instruction number out of range."));
1842
1843 tp = require_btrace_thread ();
1844
1845 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
1846 if (found == 0)
1847 error (_("No such instruction."));
1848
1849 record_btrace_set_replay (tp, &it);
1850
1851 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1852}
1853
afedecd3
MM
1854/* Initialize the record-btrace target ops. */
1855
1856static void
1857init_record_btrace_ops (void)
1858{
1859 struct target_ops *ops;
1860
1861 ops = &record_btrace_ops;
1862 ops->to_shortname = "record-btrace";
1863 ops->to_longname = "Branch tracing target";
1864 ops->to_doc = "Collect control-flow trace and provide the execution history.";
1865 ops->to_open = record_btrace_open;
1866 ops->to_close = record_btrace_close;
1867 ops->to_detach = record_detach;
1868 ops->to_disconnect = record_disconnect;
1869 ops->to_mourn_inferior = record_mourn_inferior;
1870 ops->to_kill = record_kill;
1871 ops->to_create_inferior = find_default_create_inferior;
1872 ops->to_stop_recording = record_btrace_stop_recording;
1873 ops->to_info_record = record_btrace_info;
1874 ops->to_insn_history = record_btrace_insn_history;
1875 ops->to_insn_history_from = record_btrace_insn_history_from;
1876 ops->to_insn_history_range = record_btrace_insn_history_range;
1877 ops->to_call_history = record_btrace_call_history;
1878 ops->to_call_history_from = record_btrace_call_history_from;
1879 ops->to_call_history_range = record_btrace_call_history_range;
07bbe694 1880 ops->to_record_is_replaying = record_btrace_is_replaying;
633785ff
MM
1881 ops->to_xfer_partial = record_btrace_xfer_partial;
1882 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
1883 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
1884 ops->to_fetch_registers = record_btrace_fetch_registers;
1885 ops->to_store_registers = record_btrace_store_registers;
1886 ops->to_prepare_to_store = record_btrace_prepare_to_store;
cecac1ab 1887 ops->to_get_unwinder = &record_btrace_frame_unwind;
0b722aec 1888 ops->to_get_tailcall_unwinder = &record_btrace_tailcall_frame_unwind;
b2f4cfde
MM
1889 ops->to_resume = record_btrace_resume;
1890 ops->to_wait = record_btrace_wait;
e2887aa3
MM
1891 ops->to_find_new_threads = record_btrace_find_new_threads;
1892 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
1893 ops->to_goto_record_begin = record_btrace_goto_begin;
1894 ops->to_goto_record_end = record_btrace_goto_end;
1895 ops->to_goto_record = record_btrace_goto;
52834460
MM
1896 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
1897 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
afedecd3
MM
1898 ops->to_stratum = record_stratum;
1899 ops->to_magic = OPS_MAGIC;
1900}
1901
1902/* Alias for "target record". */
1903
1904static void
1905cmd_record_btrace_start (char *args, int from_tty)
1906{
1907 if (args != NULL && *args != 0)
1908 error (_("Invalid argument."));
1909
1910 execute_command ("target record-btrace", from_tty);
1911}
1912
1913void _initialize_record_btrace (void);
1914
1915/* Initialize btrace commands. */
1916
1917void
1918_initialize_record_btrace (void)
1919{
1920 add_cmd ("btrace", class_obscure, cmd_record_btrace_start,
1921 _("Start branch trace recording."),
1922 &record_cmdlist);
1923 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
1924
1925 init_record_btrace_ops ();
1926 add_target (&record_btrace_ops);
0b722aec
MM
1927
1928 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
1929 xcalloc, xfree);
afedecd3 1930}
This page took 0.322341 seconds and 4 git commands to generate.