btrace: add format argument to supports_btrace
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
32d0add0 3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
24#include "gdbthread.h"
25#include "target.h"
26#include "gdbcmd.h"
27#include "disasm.h"
28#include "observer.h"
afedecd3
MM
29#include "cli/cli-utils.h"
30#include "source.h"
31#include "ui-out.h"
32#include "symtab.h"
33#include "filenames.h"
1f3ef581 34#include "regcache.h"
cecac1ab 35#include "frame-unwind.h"
0b722aec 36#include "hashtab.h"
45741a9c 37#include "infrun.h"
70ad5bff
MM
38#include "event-loop.h"
39#include "inf-loop.h"
afedecd3
MM
40
41/* The target_ops of record-btrace. */
42static struct target_ops record_btrace_ops;
43
44/* A new thread observer enabling branch tracing for the new thread. */
45static struct observer *record_btrace_thread_observer;
46
67b5c0c1
MM
47/* Memory access types used in set/show record btrace replay-memory-access. */
48static const char replay_memory_access_read_only[] = "read-only";
49static const char replay_memory_access_read_write[] = "read-write";
50static const char *const replay_memory_access_types[] =
51{
52 replay_memory_access_read_only,
53 replay_memory_access_read_write,
54 NULL
55};
56
57/* The currently allowed replay memory access type. */
58static const char *replay_memory_access = replay_memory_access_read_only;
59
60/* Command lists for "set/show record btrace". */
61static struct cmd_list_element *set_record_btrace_cmdlist;
62static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 63
70ad5bff
MM
64/* The execution direction of the last resume we got. See record-full.c. */
65static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
66
67/* The async event handler for reverse/replay execution. */
68static struct async_event_handler *record_btrace_async_inferior_event_handler;
69
aef92902
MM
70/* A flag indicating that we are currently generating a core file. */
71static int record_btrace_generating_corefile;
72
afedecd3
MM
73/* Print a record-btrace debug message. Use do ... while (0) to avoid
74 ambiguities when used in if statements. */
75
76#define DEBUG(msg, args...) \
77 do \
78 { \
79 if (record_debug != 0) \
80 fprintf_unfiltered (gdb_stdlog, \
81 "[record-btrace] " msg "\n", ##args); \
82 } \
83 while (0)
84
85
86/* Update the branch trace for the current thread and return a pointer to its
066ce621 87 thread_info.
afedecd3
MM
88
89 Throws an error if there is no thread or no trace. This function never
90 returns NULL. */
91
066ce621
MM
92static struct thread_info *
93require_btrace_thread (void)
afedecd3
MM
94{
95 struct thread_info *tp;
afedecd3
MM
96
97 DEBUG ("require");
98
99 tp = find_thread_ptid (inferior_ptid);
100 if (tp == NULL)
101 error (_("No thread."));
102
103 btrace_fetch (tp);
104
6e07b1d2 105 if (btrace_is_empty (tp))
afedecd3
MM
106 error (_("No trace."));
107
066ce621
MM
108 return tp;
109}
110
111/* Update the branch trace for the current thread and return a pointer to its
112 branch trace information struct.
113
114 Throws an error if there is no thread or no trace. This function never
115 returns NULL. */
116
117static struct btrace_thread_info *
118require_btrace (void)
119{
120 struct thread_info *tp;
121
122 tp = require_btrace_thread ();
123
124 return &tp->btrace;
afedecd3
MM
125}
126
127/* Enable branch tracing for one thread. Warn on errors. */
128
129static void
130record_btrace_enable_warn (struct thread_info *tp)
131{
132 volatile struct gdb_exception error;
133
134 TRY_CATCH (error, RETURN_MASK_ERROR)
135 btrace_enable (tp);
136
137 if (error.message != NULL)
138 warning ("%s", error.message);
139}
140
141/* Callback function to disable branch tracing for one thread. */
142
143static void
144record_btrace_disable_callback (void *arg)
145{
146 struct thread_info *tp;
147
148 tp = arg;
149
150 btrace_disable (tp);
151}
152
153/* Enable automatic tracing of new threads. */
154
155static void
156record_btrace_auto_enable (void)
157{
158 DEBUG ("attach thread observer");
159
160 record_btrace_thread_observer
161 = observer_attach_new_thread (record_btrace_enable_warn);
162}
163
164/* Disable automatic tracing of new threads. */
165
166static void
167record_btrace_auto_disable (void)
168{
169 /* The observer may have been detached, already. */
170 if (record_btrace_thread_observer == NULL)
171 return;
172
173 DEBUG ("detach thread observer");
174
175 observer_detach_new_thread (record_btrace_thread_observer);
176 record_btrace_thread_observer = NULL;
177}
178
70ad5bff
MM
179/* The record-btrace async event handler function. */
180
181static void
182record_btrace_handle_async_inferior_event (gdb_client_data data)
183{
184 inferior_event_handler (INF_REG_EVENT, NULL);
185}
186
afedecd3
MM
187/* The to_open method of target record-btrace. */
188
189static void
014f9477 190record_btrace_open (const char *args, int from_tty)
afedecd3
MM
191{
192 struct cleanup *disable_chain;
193 struct thread_info *tp;
194
195 DEBUG ("open");
196
8213266a 197 record_preopen ();
afedecd3
MM
198
199 if (!target_has_execution)
200 error (_("The program is not being run."));
201
52834460
MM
202 if (non_stop)
203 error (_("Record btrace can't debug inferior in non-stop mode."));
204
afedecd3
MM
205 gdb_assert (record_btrace_thread_observer == NULL);
206
207 disable_chain = make_cleanup (null_cleanup, NULL);
034f788c 208 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
209 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
210 {
211 btrace_enable (tp);
212
213 make_cleanup (record_btrace_disable_callback, tp);
214 }
215
216 record_btrace_auto_enable ();
217
218 push_target (&record_btrace_ops);
219
70ad5bff
MM
220 record_btrace_async_inferior_event_handler
221 = create_async_event_handler (record_btrace_handle_async_inferior_event,
222 NULL);
aef92902 223 record_btrace_generating_corefile = 0;
70ad5bff 224
afedecd3
MM
225 observer_notify_record_changed (current_inferior (), 1);
226
227 discard_cleanups (disable_chain);
228}
229
230/* The to_stop_recording method of target record-btrace. */
231
232static void
c6cd7c02 233record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
234{
235 struct thread_info *tp;
236
237 DEBUG ("stop recording");
238
239 record_btrace_auto_disable ();
240
034f788c 241 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
242 if (tp->btrace.target != NULL)
243 btrace_disable (tp);
244}
245
246/* The to_close method of target record-btrace. */
247
248static void
de90e03d 249record_btrace_close (struct target_ops *self)
afedecd3 250{
568e808b
MM
251 struct thread_info *tp;
252
70ad5bff
MM
253 if (record_btrace_async_inferior_event_handler != NULL)
254 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
255
99c819ee
MM
256 /* Make sure automatic recording gets disabled even if we did not stop
257 recording before closing the record-btrace target. */
258 record_btrace_auto_disable ();
259
568e808b
MM
260 /* We should have already stopped recording.
261 Tear down btrace in case we have not. */
034f788c 262 ALL_NON_EXITED_THREADS (tp)
568e808b 263 btrace_teardown (tp);
afedecd3
MM
264}
265
b7d2e916
PA
266/* The to_async method of target record-btrace. */
267
268static void
269record_btrace_async (struct target_ops *ops,
270 void (*callback) (enum inferior_event_type event_type,
271 void *context),
272 void *context)
273{
274 if (callback != NULL)
275 mark_async_event_handler (record_btrace_async_inferior_event_handler);
276 else
277 clear_async_event_handler (record_btrace_async_inferior_event_handler);
278
279 ops->beneath->to_async (ops->beneath, callback, context);
280}
281
afedecd3
MM
282/* The to_info_record method of target record-btrace. */
283
284static void
630d6a4a 285record_btrace_info (struct target_ops *self)
afedecd3
MM
286{
287 struct btrace_thread_info *btinfo;
288 struct thread_info *tp;
23a7fe75 289 unsigned int insns, calls;
afedecd3
MM
290
291 DEBUG ("info");
292
293 tp = find_thread_ptid (inferior_ptid);
294 if (tp == NULL)
295 error (_("No thread."));
296
297 btrace_fetch (tp);
298
23a7fe75
MM
299 insns = 0;
300 calls = 0;
301
afedecd3 302 btinfo = &tp->btrace;
6e07b1d2
MM
303
304 if (!btrace_is_empty (tp))
23a7fe75
MM
305 {
306 struct btrace_call_iterator call;
307 struct btrace_insn_iterator insn;
308
309 btrace_call_end (&call, btinfo);
310 btrace_call_prev (&call, 1);
5de9129b 311 calls = btrace_call_number (&call);
23a7fe75
MM
312
313 btrace_insn_end (&insn, btinfo);
314 btrace_insn_prev (&insn, 1);
5de9129b 315 insns = btrace_insn_number (&insn);
23a7fe75 316 }
afedecd3
MM
317
318 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
23a7fe75 319 "%d (%s).\n"), insns, calls, tp->num,
afedecd3 320 target_pid_to_str (tp->ptid));
07bbe694
MM
321
322 if (btrace_is_replaying (tp))
323 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
324 btrace_insn_number (btinfo->replay));
afedecd3
MM
325}
326
327/* Print an unsigned int. */
328
329static void
330ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
331{
332 ui_out_field_fmt (uiout, fld, "%u", val);
333}
334
335/* Disassemble a section of the recorded instruction trace. */
336
337static void
23a7fe75
MM
338btrace_insn_history (struct ui_out *uiout,
339 const struct btrace_insn_iterator *begin,
340 const struct btrace_insn_iterator *end, int flags)
afedecd3
MM
341{
342 struct gdbarch *gdbarch;
23a7fe75 343 struct btrace_insn_iterator it;
afedecd3 344
23a7fe75
MM
345 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
346 btrace_insn_number (end));
afedecd3
MM
347
348 gdbarch = target_gdbarch ();
349
23a7fe75 350 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 351 {
23a7fe75
MM
352 const struct btrace_insn *insn;
353
354 insn = btrace_insn_get (&it);
355
afedecd3 356 /* Print the instruction index. */
23a7fe75 357 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
afedecd3
MM
358 ui_out_text (uiout, "\t");
359
360 /* Disassembly with '/m' flag may not produce the expected result.
361 See PR gdb/11833. */
23a7fe75 362 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
afedecd3
MM
363 }
364}
365
366/* The to_insn_history method of target record-btrace. */
367
368static void
7a6c5609 369record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
370{
371 struct btrace_thread_info *btinfo;
23a7fe75
MM
372 struct btrace_insn_history *history;
373 struct btrace_insn_iterator begin, end;
afedecd3
MM
374 struct cleanup *uiout_cleanup;
375 struct ui_out *uiout;
23a7fe75 376 unsigned int context, covered;
afedecd3
MM
377
378 uiout = current_uiout;
379 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
380 "insn history");
afedecd3 381 context = abs (size);
afedecd3
MM
382 if (context == 0)
383 error (_("Bad record instruction-history-size."));
384
23a7fe75
MM
385 btinfo = require_btrace ();
386 history = btinfo->insn_history;
387 if (history == NULL)
afedecd3 388 {
07bbe694 389 struct btrace_insn_iterator *replay;
afedecd3 390
23a7fe75 391 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 392
07bbe694
MM
393 /* If we're replaying, we start at the replay position. Otherwise, we
394 start at the tail of the trace. */
395 replay = btinfo->replay;
396 if (replay != NULL)
397 begin = *replay;
398 else
399 btrace_insn_end (&begin, btinfo);
400
401 /* We start from here and expand in the requested direction. Then we
402 expand in the other direction, as well, to fill up any remaining
403 context. */
404 end = begin;
405 if (size < 0)
406 {
407 /* We want the current position covered, as well. */
408 covered = btrace_insn_next (&end, 1);
409 covered += btrace_insn_prev (&begin, context - covered);
410 covered += btrace_insn_next (&end, context - covered);
411 }
412 else
413 {
414 covered = btrace_insn_next (&end, context);
415 covered += btrace_insn_prev (&begin, context - covered);
416 }
afedecd3
MM
417 }
418 else
419 {
23a7fe75
MM
420 begin = history->begin;
421 end = history->end;
afedecd3 422
23a7fe75
MM
423 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
424 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 425
23a7fe75
MM
426 if (size < 0)
427 {
428 end = begin;
429 covered = btrace_insn_prev (&begin, context);
430 }
431 else
432 {
433 begin = end;
434 covered = btrace_insn_next (&end, context);
435 }
afedecd3
MM
436 }
437
23a7fe75
MM
438 if (covered > 0)
439 btrace_insn_history (uiout, &begin, &end, flags);
440 else
441 {
442 if (size < 0)
443 printf_unfiltered (_("At the start of the branch trace record.\n"));
444 else
445 printf_unfiltered (_("At the end of the branch trace record.\n"));
446 }
afedecd3 447
23a7fe75 448 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
449 do_cleanups (uiout_cleanup);
450}
451
452/* The to_insn_history_range method of target record-btrace. */
453
454static void
4e99c6b7
TT
455record_btrace_insn_history_range (struct target_ops *self,
456 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
457{
458 struct btrace_thread_info *btinfo;
23a7fe75
MM
459 struct btrace_insn_history *history;
460 struct btrace_insn_iterator begin, end;
afedecd3
MM
461 struct cleanup *uiout_cleanup;
462 struct ui_out *uiout;
23a7fe75
MM
463 unsigned int low, high;
464 int found;
afedecd3
MM
465
466 uiout = current_uiout;
467 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
468 "insn history");
23a7fe75
MM
469 low = from;
470 high = to;
afedecd3 471
23a7fe75 472 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
473
474 /* Check for wrap-arounds. */
23a7fe75 475 if (low != from || high != to)
afedecd3
MM
476 error (_("Bad range."));
477
0688d04e 478 if (high < low)
afedecd3
MM
479 error (_("Bad range."));
480
23a7fe75 481 btinfo = require_btrace ();
afedecd3 482
23a7fe75
MM
483 found = btrace_find_insn_by_number (&begin, btinfo, low);
484 if (found == 0)
485 error (_("Range out of bounds."));
afedecd3 486
23a7fe75
MM
487 found = btrace_find_insn_by_number (&end, btinfo, high);
488 if (found == 0)
0688d04e
MM
489 {
490 /* Silently truncate the range. */
491 btrace_insn_end (&end, btinfo);
492 }
493 else
494 {
495 /* We want both begin and end to be inclusive. */
496 btrace_insn_next (&end, 1);
497 }
afedecd3 498
23a7fe75
MM
499 btrace_insn_history (uiout, &begin, &end, flags);
500 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
501
502 do_cleanups (uiout_cleanup);
503}
504
505/* The to_insn_history_from method of target record-btrace. */
506
507static void
9abc3ff3
TT
508record_btrace_insn_history_from (struct target_ops *self,
509 ULONGEST from, int size, int flags)
afedecd3
MM
510{
511 ULONGEST begin, end, context;
512
513 context = abs (size);
0688d04e
MM
514 if (context == 0)
515 error (_("Bad record instruction-history-size."));
afedecd3
MM
516
517 if (size < 0)
518 {
519 end = from;
520
521 if (from < context)
522 begin = 0;
523 else
0688d04e 524 begin = from - context + 1;
afedecd3
MM
525 }
526 else
527 {
528 begin = from;
0688d04e 529 end = from + context - 1;
afedecd3
MM
530
531 /* Check for wrap-around. */
532 if (end < begin)
533 end = ULONGEST_MAX;
534 }
535
4e99c6b7 536 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
537}
538
539/* Print the instruction number range for a function call history line. */
540
541static void
23a7fe75
MM
542btrace_call_history_insn_range (struct ui_out *uiout,
543 const struct btrace_function *bfun)
afedecd3 544{
7acbe133
MM
545 unsigned int begin, end, size;
546
547 size = VEC_length (btrace_insn_s, bfun->insn);
548 gdb_assert (size > 0);
afedecd3 549
23a7fe75 550 begin = bfun->insn_offset;
7acbe133 551 end = begin + size - 1;
afedecd3 552
23a7fe75 553 ui_out_field_uint (uiout, "insn begin", begin);
8710b709 554 ui_out_text (uiout, ",");
23a7fe75 555 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
556}
557
558/* Print the source line information for a function call history line. */
559
560static void
23a7fe75
MM
561btrace_call_history_src_line (struct ui_out *uiout,
562 const struct btrace_function *bfun)
afedecd3
MM
563{
564 struct symbol *sym;
23a7fe75 565 int begin, end;
afedecd3
MM
566
567 sym = bfun->sym;
568 if (sym == NULL)
569 return;
570
571 ui_out_field_string (uiout, "file",
08be3fe3 572 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 573
23a7fe75
MM
574 begin = bfun->lbegin;
575 end = bfun->lend;
576
577 if (end < begin)
afedecd3
MM
578 return;
579
580 ui_out_text (uiout, ":");
23a7fe75 581 ui_out_field_int (uiout, "min line", begin);
afedecd3 582
23a7fe75 583 if (end == begin)
afedecd3
MM
584 return;
585
8710b709 586 ui_out_text (uiout, ",");
23a7fe75 587 ui_out_field_int (uiout, "max line", end);
afedecd3
MM
588}
589
0b722aec
MM
590/* Get the name of a branch trace function. */
591
592static const char *
593btrace_get_bfun_name (const struct btrace_function *bfun)
594{
595 struct minimal_symbol *msym;
596 struct symbol *sym;
597
598 if (bfun == NULL)
599 return "??";
600
601 msym = bfun->msym;
602 sym = bfun->sym;
603
604 if (sym != NULL)
605 return SYMBOL_PRINT_NAME (sym);
606 else if (msym != NULL)
efd66ac6 607 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
608 else
609 return "??";
610}
611
afedecd3
MM
612/* Disassemble a section of the recorded function trace. */
613
614static void
23a7fe75 615btrace_call_history (struct ui_out *uiout,
8710b709 616 const struct btrace_thread_info *btinfo,
23a7fe75
MM
617 const struct btrace_call_iterator *begin,
618 const struct btrace_call_iterator *end,
afedecd3
MM
619 enum record_print_flag flags)
620{
23a7fe75 621 struct btrace_call_iterator it;
afedecd3 622
23a7fe75
MM
623 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
624 btrace_call_number (end));
afedecd3 625
23a7fe75 626 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 627 {
23a7fe75
MM
628 const struct btrace_function *bfun;
629 struct minimal_symbol *msym;
630 struct symbol *sym;
631
632 bfun = btrace_call_get (&it);
23a7fe75 633 sym = bfun->sym;
0b722aec 634 msym = bfun->msym;
23a7fe75 635
afedecd3 636 /* Print the function index. */
23a7fe75 637 ui_out_field_uint (uiout, "index", bfun->number);
afedecd3
MM
638 ui_out_text (uiout, "\t");
639
8710b709
MM
640 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
641 {
642 int level = bfun->level + btinfo->level, i;
643
644 for (i = 0; i < level; ++i)
645 ui_out_text (uiout, " ");
646 }
647
648 if (sym != NULL)
649 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
650 else if (msym != NULL)
efd66ac6 651 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
8710b709
MM
652 else if (!ui_out_is_mi_like_p (uiout))
653 ui_out_field_string (uiout, "function", "??");
654
1e038f67 655 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 656 {
8710b709 657 ui_out_text (uiout, _("\tinst "));
23a7fe75 658 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
659 }
660
1e038f67 661 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 662 {
8710b709 663 ui_out_text (uiout, _("\tat "));
23a7fe75 664 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
665 }
666
afedecd3
MM
667 ui_out_text (uiout, "\n");
668 }
669}
670
671/* The to_call_history method of target record-btrace. */
672
673static void
5df2fcba 674record_btrace_call_history (struct target_ops *self, int size, int flags)
afedecd3
MM
675{
676 struct btrace_thread_info *btinfo;
23a7fe75
MM
677 struct btrace_call_history *history;
678 struct btrace_call_iterator begin, end;
afedecd3
MM
679 struct cleanup *uiout_cleanup;
680 struct ui_out *uiout;
23a7fe75 681 unsigned int context, covered;
afedecd3
MM
682
683 uiout = current_uiout;
684 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
685 "insn history");
afedecd3 686 context = abs (size);
afedecd3
MM
687 if (context == 0)
688 error (_("Bad record function-call-history-size."));
689
23a7fe75
MM
690 btinfo = require_btrace ();
691 history = btinfo->call_history;
692 if (history == NULL)
afedecd3 693 {
07bbe694 694 struct btrace_insn_iterator *replay;
afedecd3 695
23a7fe75 696 DEBUG ("call-history (0x%x): %d", flags, size);
afedecd3 697
07bbe694
MM
698 /* If we're replaying, we start at the replay position. Otherwise, we
699 start at the tail of the trace. */
700 replay = btinfo->replay;
701 if (replay != NULL)
702 {
703 begin.function = replay->function;
704 begin.btinfo = btinfo;
705 }
706 else
707 btrace_call_end (&begin, btinfo);
708
709 /* We start from here and expand in the requested direction. Then we
710 expand in the other direction, as well, to fill up any remaining
711 context. */
712 end = begin;
713 if (size < 0)
714 {
715 /* We want the current position covered, as well. */
716 covered = btrace_call_next (&end, 1);
717 covered += btrace_call_prev (&begin, context - covered);
718 covered += btrace_call_next (&end, context - covered);
719 }
720 else
721 {
722 covered = btrace_call_next (&end, context);
723 covered += btrace_call_prev (&begin, context- covered);
724 }
afedecd3
MM
725 }
726 else
727 {
23a7fe75
MM
728 begin = history->begin;
729 end = history->end;
afedecd3 730
23a7fe75
MM
731 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
732 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 733
23a7fe75
MM
734 if (size < 0)
735 {
736 end = begin;
737 covered = btrace_call_prev (&begin, context);
738 }
739 else
740 {
741 begin = end;
742 covered = btrace_call_next (&end, context);
743 }
afedecd3
MM
744 }
745
23a7fe75 746 if (covered > 0)
8710b709 747 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
748 else
749 {
750 if (size < 0)
751 printf_unfiltered (_("At the start of the branch trace record.\n"));
752 else
753 printf_unfiltered (_("At the end of the branch trace record.\n"));
754 }
afedecd3 755
23a7fe75 756 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
757 do_cleanups (uiout_cleanup);
758}
759
760/* The to_call_history_range method of target record-btrace. */
761
762static void
f0d960ea
TT
763record_btrace_call_history_range (struct target_ops *self,
764 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
765{
766 struct btrace_thread_info *btinfo;
23a7fe75
MM
767 struct btrace_call_history *history;
768 struct btrace_call_iterator begin, end;
afedecd3
MM
769 struct cleanup *uiout_cleanup;
770 struct ui_out *uiout;
23a7fe75
MM
771 unsigned int low, high;
772 int found;
afedecd3
MM
773
774 uiout = current_uiout;
775 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
776 "func history");
23a7fe75
MM
777 low = from;
778 high = to;
afedecd3 779
23a7fe75 780 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
781
782 /* Check for wrap-arounds. */
23a7fe75 783 if (low != from || high != to)
afedecd3
MM
784 error (_("Bad range."));
785
0688d04e 786 if (high < low)
afedecd3
MM
787 error (_("Bad range."));
788
23a7fe75 789 btinfo = require_btrace ();
afedecd3 790
23a7fe75
MM
791 found = btrace_find_call_by_number (&begin, btinfo, low);
792 if (found == 0)
793 error (_("Range out of bounds."));
afedecd3 794
23a7fe75
MM
795 found = btrace_find_call_by_number (&end, btinfo, high);
796 if (found == 0)
0688d04e
MM
797 {
798 /* Silently truncate the range. */
799 btrace_call_end (&end, btinfo);
800 }
801 else
802 {
803 /* We want both begin and end to be inclusive. */
804 btrace_call_next (&end, 1);
805 }
afedecd3 806
8710b709 807 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 808 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
809
810 do_cleanups (uiout_cleanup);
811}
812
813/* The to_call_history_from method of target record-btrace. */
814
815static void
ec0aea04
TT
816record_btrace_call_history_from (struct target_ops *self,
817 ULONGEST from, int size, int flags)
afedecd3
MM
818{
819 ULONGEST begin, end, context;
820
821 context = abs (size);
0688d04e
MM
822 if (context == 0)
823 error (_("Bad record function-call-history-size."));
afedecd3
MM
824
825 if (size < 0)
826 {
827 end = from;
828
829 if (from < context)
830 begin = 0;
831 else
0688d04e 832 begin = from - context + 1;
afedecd3
MM
833 }
834 else
835 {
836 begin = from;
0688d04e 837 end = from + context - 1;
afedecd3
MM
838
839 /* Check for wrap-around. */
840 if (end < begin)
841 end = ULONGEST_MAX;
842 }
843
f0d960ea 844 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
845}
846
07bbe694
MM
847/* The to_record_is_replaying method of target record-btrace. */
848
849static int
1c63c994 850record_btrace_is_replaying (struct target_ops *self)
07bbe694
MM
851{
852 struct thread_info *tp;
853
034f788c 854 ALL_NON_EXITED_THREADS (tp)
07bbe694
MM
855 if (btrace_is_replaying (tp))
856 return 1;
857
858 return 0;
859}
860
633785ff
MM
861/* The to_xfer_partial method of target record-btrace. */
862
9b409511 863static enum target_xfer_status
633785ff
MM
864record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
865 const char *annex, gdb_byte *readbuf,
866 const gdb_byte *writebuf, ULONGEST offset,
9b409511 867 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
868{
869 struct target_ops *t;
870
871 /* Filter out requests that don't make sense during replay. */
67b5c0c1 872 if (replay_memory_access == replay_memory_access_read_only
aef92902 873 && !record_btrace_generating_corefile
67b5c0c1 874 && record_btrace_is_replaying (ops))
633785ff
MM
875 {
876 switch (object)
877 {
878 case TARGET_OBJECT_MEMORY:
879 {
880 struct target_section *section;
881
882 /* We do not allow writing memory in general. */
883 if (writebuf != NULL)
9b409511
YQ
884 {
885 *xfered_len = len;
bc113b4e 886 return TARGET_XFER_UNAVAILABLE;
9b409511 887 }
633785ff
MM
888
889 /* We allow reading readonly memory. */
890 section = target_section_by_addr (ops, offset);
891 if (section != NULL)
892 {
893 /* Check if the section we found is readonly. */
894 if ((bfd_get_section_flags (section->the_bfd_section->owner,
895 section->the_bfd_section)
896 & SEC_READONLY) != 0)
897 {
898 /* Truncate the request to fit into this section. */
899 len = min (len, section->endaddr - offset);
900 break;
901 }
902 }
903
9b409511 904 *xfered_len = len;
bc113b4e 905 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
906 }
907 }
908 }
909
910 /* Forward the request. */
e75fdfca
TT
911 ops = ops->beneath;
912 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
913 offset, len, xfered_len);
633785ff
MM
914}
915
916/* The to_insert_breakpoint method of target record-btrace. */
917
918static int
919record_btrace_insert_breakpoint (struct target_ops *ops,
920 struct gdbarch *gdbarch,
921 struct bp_target_info *bp_tgt)
922{
923 volatile struct gdb_exception except;
67b5c0c1
MM
924 const char *old;
925 int ret;
633785ff
MM
926
927 /* Inserting breakpoints requires accessing memory. Allow it for the
928 duration of this function. */
67b5c0c1
MM
929 old = replay_memory_access;
930 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
931
932 ret = 0;
933 TRY_CATCH (except, RETURN_MASK_ALL)
6b84065d 934 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
633785ff 935
67b5c0c1 936 replay_memory_access = old;
633785ff
MM
937
938 if (except.reason < 0)
939 throw_exception (except);
940
941 return ret;
942}
943
944/* The to_remove_breakpoint method of target record-btrace. */
945
946static int
947record_btrace_remove_breakpoint (struct target_ops *ops,
948 struct gdbarch *gdbarch,
949 struct bp_target_info *bp_tgt)
950{
951 volatile struct gdb_exception except;
67b5c0c1
MM
952 const char *old;
953 int ret;
633785ff
MM
954
955 /* Removing breakpoints requires accessing memory. Allow it for the
956 duration of this function. */
67b5c0c1
MM
957 old = replay_memory_access;
958 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
959
960 ret = 0;
961 TRY_CATCH (except, RETURN_MASK_ALL)
6b84065d 962 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
633785ff 963
67b5c0c1 964 replay_memory_access = old;
633785ff
MM
965
966 if (except.reason < 0)
967 throw_exception (except);
968
969 return ret;
970}
971
1f3ef581
MM
972/* The to_fetch_registers method of target record-btrace. */
973
974static void
975record_btrace_fetch_registers (struct target_ops *ops,
976 struct regcache *regcache, int regno)
977{
978 struct btrace_insn_iterator *replay;
979 struct thread_info *tp;
980
981 tp = find_thread_ptid (inferior_ptid);
982 gdb_assert (tp != NULL);
983
984 replay = tp->btrace.replay;
aef92902 985 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
986 {
987 const struct btrace_insn *insn;
988 struct gdbarch *gdbarch;
989 int pcreg;
990
991 gdbarch = get_regcache_arch (regcache);
992 pcreg = gdbarch_pc_regnum (gdbarch);
993 if (pcreg < 0)
994 return;
995
996 /* We can only provide the PC register. */
997 if (regno >= 0 && regno != pcreg)
998 return;
999
1000 insn = btrace_insn_get (replay);
1001 gdb_assert (insn != NULL);
1002
1003 regcache_raw_supply (regcache, regno, &insn->pc);
1004 }
1005 else
1006 {
e75fdfca 1007 struct target_ops *t = ops->beneath;
1f3ef581 1008
e75fdfca 1009 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
1010 }
1011}
1012
1013/* The to_store_registers method of target record-btrace. */
1014
1015static void
1016record_btrace_store_registers (struct target_ops *ops,
1017 struct regcache *regcache, int regno)
1018{
1019 struct target_ops *t;
1020
aef92902 1021 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1f3ef581
MM
1022 error (_("This record target does not allow writing registers."));
1023
1024 gdb_assert (may_write_registers != 0);
1025
e75fdfca
TT
1026 t = ops->beneath;
1027 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1028}
1029
1030/* The to_prepare_to_store method of target record-btrace. */
1031
1032static void
1033record_btrace_prepare_to_store (struct target_ops *ops,
1034 struct regcache *regcache)
1035{
1036 struct target_ops *t;
1037
aef92902 1038 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1f3ef581
MM
1039 return;
1040
e75fdfca
TT
1041 t = ops->beneath;
1042 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1043}
1044
0b722aec
MM
1045/* The branch trace frame cache. */
1046
1047struct btrace_frame_cache
1048{
1049 /* The thread. */
1050 struct thread_info *tp;
1051
1052 /* The frame info. */
1053 struct frame_info *frame;
1054
1055 /* The branch trace function segment. */
1056 const struct btrace_function *bfun;
1057};
1058
1059/* A struct btrace_frame_cache hash table indexed by NEXT. */
1060
1061static htab_t bfcache;
1062
1063/* hash_f for htab_create_alloc of bfcache. */
1064
1065static hashval_t
1066bfcache_hash (const void *arg)
1067{
1068 const struct btrace_frame_cache *cache = arg;
1069
1070 return htab_hash_pointer (cache->frame);
1071}
1072
1073/* eq_f for htab_create_alloc of bfcache. */
1074
1075static int
1076bfcache_eq (const void *arg1, const void *arg2)
1077{
1078 const struct btrace_frame_cache *cache1 = arg1;
1079 const struct btrace_frame_cache *cache2 = arg2;
1080
1081 return cache1->frame == cache2->frame;
1082}
1083
1084/* Create a new btrace frame cache. */
1085
1086static struct btrace_frame_cache *
1087bfcache_new (struct frame_info *frame)
1088{
1089 struct btrace_frame_cache *cache;
1090 void **slot;
1091
1092 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1093 cache->frame = frame;
1094
1095 slot = htab_find_slot (bfcache, cache, INSERT);
1096 gdb_assert (*slot == NULL);
1097 *slot = cache;
1098
1099 return cache;
1100}
1101
1102/* Extract the branch trace function from a branch trace frame. */
1103
1104static const struct btrace_function *
1105btrace_get_frame_function (struct frame_info *frame)
1106{
1107 const struct btrace_frame_cache *cache;
1108 const struct btrace_function *bfun;
1109 struct btrace_frame_cache pattern;
1110 void **slot;
1111
1112 pattern.frame = frame;
1113
1114 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1115 if (slot == NULL)
1116 return NULL;
1117
1118 cache = *slot;
1119 return cache->bfun;
1120}
1121
cecac1ab
MM
1122/* Implement stop_reason method for record_btrace_frame_unwind. */
1123
1124static enum unwind_stop_reason
1125record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1126 void **this_cache)
1127{
0b722aec
MM
1128 const struct btrace_frame_cache *cache;
1129 const struct btrace_function *bfun;
1130
1131 cache = *this_cache;
1132 bfun = cache->bfun;
1133 gdb_assert (bfun != NULL);
1134
1135 if (bfun->up == NULL)
1136 return UNWIND_UNAVAILABLE;
1137
1138 return UNWIND_NO_REASON;
cecac1ab
MM
1139}
1140
1141/* Implement this_id method for record_btrace_frame_unwind. */
1142
1143static void
1144record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1145 struct frame_id *this_id)
1146{
0b722aec
MM
1147 const struct btrace_frame_cache *cache;
1148 const struct btrace_function *bfun;
1149 CORE_ADDR code, special;
1150
1151 cache = *this_cache;
1152
1153 bfun = cache->bfun;
1154 gdb_assert (bfun != NULL);
1155
1156 while (bfun->segment.prev != NULL)
1157 bfun = bfun->segment.prev;
1158
1159 code = get_frame_func (this_frame);
1160 special = bfun->number;
1161
1162 *this_id = frame_id_build_unavailable_stack_special (code, special);
1163
1164 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1165 btrace_get_bfun_name (cache->bfun),
1166 core_addr_to_string_nz (this_id->code_addr),
1167 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1168}
1169
1170/* Implement prev_register method for record_btrace_frame_unwind. */
1171
1172static struct value *
1173record_btrace_frame_prev_register (struct frame_info *this_frame,
1174 void **this_cache,
1175 int regnum)
1176{
0b722aec
MM
1177 const struct btrace_frame_cache *cache;
1178 const struct btrace_function *bfun, *caller;
1179 const struct btrace_insn *insn;
1180 struct gdbarch *gdbarch;
1181 CORE_ADDR pc;
1182 int pcreg;
1183
1184 gdbarch = get_frame_arch (this_frame);
1185 pcreg = gdbarch_pc_regnum (gdbarch);
1186 if (pcreg < 0 || regnum != pcreg)
1187 throw_error (NOT_AVAILABLE_ERROR,
1188 _("Registers are not available in btrace record history"));
1189
1190 cache = *this_cache;
1191 bfun = cache->bfun;
1192 gdb_assert (bfun != NULL);
1193
1194 caller = bfun->up;
1195 if (caller == NULL)
1196 throw_error (NOT_AVAILABLE_ERROR,
1197 _("No caller in btrace record history"));
1198
1199 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1200 {
1201 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1202 pc = insn->pc;
1203 }
1204 else
1205 {
1206 insn = VEC_last (btrace_insn_s, caller->insn);
1207 pc = insn->pc;
1208
1209 pc += gdb_insn_length (gdbarch, pc);
1210 }
1211
1212 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1213 btrace_get_bfun_name (bfun), bfun->level,
1214 core_addr_to_string_nz (pc));
1215
1216 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1217}
1218
1219/* Implement sniffer method for record_btrace_frame_unwind. */
1220
1221static int
1222record_btrace_frame_sniffer (const struct frame_unwind *self,
1223 struct frame_info *this_frame,
1224 void **this_cache)
1225{
0b722aec
MM
1226 const struct btrace_function *bfun;
1227 struct btrace_frame_cache *cache;
cecac1ab 1228 struct thread_info *tp;
0b722aec 1229 struct frame_info *next;
cecac1ab
MM
1230
1231 /* THIS_FRAME does not contain a reference to its thread. */
1232 tp = find_thread_ptid (inferior_ptid);
1233 gdb_assert (tp != NULL);
1234
0b722aec
MM
1235 bfun = NULL;
1236 next = get_next_frame (this_frame);
1237 if (next == NULL)
1238 {
1239 const struct btrace_insn_iterator *replay;
1240
1241 replay = tp->btrace.replay;
1242 if (replay != NULL)
1243 bfun = replay->function;
1244 }
1245 else
1246 {
1247 const struct btrace_function *callee;
1248
1249 callee = btrace_get_frame_function (next);
1250 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1251 bfun = callee->up;
1252 }
1253
1254 if (bfun == NULL)
1255 return 0;
1256
1257 DEBUG ("[frame] sniffed frame for %s on level %d",
1258 btrace_get_bfun_name (bfun), bfun->level);
1259
1260 /* This is our frame. Initialize the frame cache. */
1261 cache = bfcache_new (this_frame);
1262 cache->tp = tp;
1263 cache->bfun = bfun;
1264
1265 *this_cache = cache;
1266 return 1;
1267}
1268
1269/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1270
1271static int
1272record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1273 struct frame_info *this_frame,
1274 void **this_cache)
1275{
1276 const struct btrace_function *bfun, *callee;
1277 struct btrace_frame_cache *cache;
1278 struct frame_info *next;
1279
1280 next = get_next_frame (this_frame);
1281 if (next == NULL)
1282 return 0;
1283
1284 callee = btrace_get_frame_function (next);
1285 if (callee == NULL)
1286 return 0;
1287
1288 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1289 return 0;
1290
1291 bfun = callee->up;
1292 if (bfun == NULL)
1293 return 0;
1294
1295 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1296 btrace_get_bfun_name (bfun), bfun->level);
1297
1298 /* This is our frame. Initialize the frame cache. */
1299 cache = bfcache_new (this_frame);
1300 cache->tp = find_thread_ptid (inferior_ptid);
1301 cache->bfun = bfun;
1302
1303 *this_cache = cache;
1304 return 1;
1305}
1306
1307static void
1308record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1309{
1310 struct btrace_frame_cache *cache;
1311 void **slot;
1312
1313 cache = this_cache;
1314
1315 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1316 gdb_assert (slot != NULL);
1317
1318 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1319}
1320
1321/* btrace recording does not store previous memory content, neither the stack
1322 frames content. Any unwinding would return errorneous results as the stack
1323 contents no longer matches the changed PC value restored from history.
1324 Therefore this unwinder reports any possibly unwound registers as
1325 <unavailable>. */
1326
0b722aec 1327const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1328{
1329 NORMAL_FRAME,
1330 record_btrace_frame_unwind_stop_reason,
1331 record_btrace_frame_this_id,
1332 record_btrace_frame_prev_register,
1333 NULL,
0b722aec
MM
1334 record_btrace_frame_sniffer,
1335 record_btrace_frame_dealloc_cache
1336};
1337
1338const struct frame_unwind record_btrace_tailcall_frame_unwind =
1339{
1340 TAILCALL_FRAME,
1341 record_btrace_frame_unwind_stop_reason,
1342 record_btrace_frame_this_id,
1343 record_btrace_frame_prev_register,
1344 NULL,
1345 record_btrace_tailcall_frame_sniffer,
1346 record_btrace_frame_dealloc_cache
cecac1ab 1347};
b2f4cfde 1348
ac01945b
TT
1349/* Implement the to_get_unwinder method. */
1350
1351static const struct frame_unwind *
1352record_btrace_to_get_unwinder (struct target_ops *self)
1353{
1354 return &record_btrace_frame_unwind;
1355}
1356
1357/* Implement the to_get_tailcall_unwinder method. */
1358
1359static const struct frame_unwind *
1360record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1361{
1362 return &record_btrace_tailcall_frame_unwind;
1363}
1364
52834460
MM
1365/* Indicate that TP should be resumed according to FLAG. */
1366
1367static void
1368record_btrace_resume_thread (struct thread_info *tp,
1369 enum btrace_thread_flag flag)
1370{
1371 struct btrace_thread_info *btinfo;
1372
1373 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1374
1375 btinfo = &tp->btrace;
1376
1377 if ((btinfo->flags & BTHR_MOVE) != 0)
1378 error (_("Thread already moving."));
1379
1380 /* Fetch the latest branch trace. */
1381 btrace_fetch (tp);
1382
1383 btinfo->flags |= flag;
1384}
1385
1386/* Find the thread to resume given a PTID. */
1387
1388static struct thread_info *
1389record_btrace_find_resume_thread (ptid_t ptid)
1390{
1391 struct thread_info *tp;
1392
1393 /* When asked to resume everything, we pick the current thread. */
1394 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1395 ptid = inferior_ptid;
1396
1397 return find_thread_ptid (ptid);
1398}
1399
1400/* Start replaying a thread. */
1401
1402static struct btrace_insn_iterator *
1403record_btrace_start_replaying (struct thread_info *tp)
1404{
1405 volatile struct gdb_exception except;
1406 struct btrace_insn_iterator *replay;
1407 struct btrace_thread_info *btinfo;
1408 int executing;
1409
1410 btinfo = &tp->btrace;
1411 replay = NULL;
1412
1413 /* We can't start replaying without trace. */
1414 if (btinfo->begin == NULL)
1415 return NULL;
1416
1417 /* Clear the executing flag to allow changes to the current frame.
1418 We are not actually running, yet. We just started a reverse execution
1419 command or a record goto command.
1420 For the latter, EXECUTING is false and this has no effect.
1421 For the former, EXECUTING is true and we're in to_wait, about to
1422 move the thread. Since we need to recompute the stack, we temporarily
1423 set EXECUTING to flase. */
1424 executing = is_executing (tp->ptid);
1425 set_executing (tp->ptid, 0);
1426
1427 /* GDB stores the current frame_id when stepping in order to detects steps
1428 into subroutines.
1429 Since frames are computed differently when we're replaying, we need to
1430 recompute those stored frames and fix them up so we can still detect
1431 subroutines after we started replaying. */
1432 TRY_CATCH (except, RETURN_MASK_ALL)
1433 {
1434 struct frame_info *frame;
1435 struct frame_id frame_id;
1436 int upd_step_frame_id, upd_step_stack_frame_id;
1437
1438 /* The current frame without replaying - computed via normal unwind. */
1439 frame = get_current_frame ();
1440 frame_id = get_frame_id (frame);
1441
1442 /* Check if we need to update any stepping-related frame id's. */
1443 upd_step_frame_id = frame_id_eq (frame_id,
1444 tp->control.step_frame_id);
1445 upd_step_stack_frame_id = frame_id_eq (frame_id,
1446 tp->control.step_stack_frame_id);
1447
1448 /* We start replaying at the end of the branch trace. This corresponds
1449 to the current instruction. */
1450 replay = xmalloc (sizeof (*replay));
1451 btrace_insn_end (replay, btinfo);
1452
1453 /* We're not replaying, yet. */
1454 gdb_assert (btinfo->replay == NULL);
1455 btinfo->replay = replay;
1456
1457 /* Make sure we're not using any stale registers. */
1458 registers_changed_ptid (tp->ptid);
1459
1460 /* The current frame with replaying - computed via btrace unwind. */
1461 frame = get_current_frame ();
1462 frame_id = get_frame_id (frame);
1463
1464 /* Replace stepping related frames where necessary. */
1465 if (upd_step_frame_id)
1466 tp->control.step_frame_id = frame_id;
1467 if (upd_step_stack_frame_id)
1468 tp->control.step_stack_frame_id = frame_id;
1469 }
1470
1471 /* Restore the previous execution state. */
1472 set_executing (tp->ptid, executing);
1473
1474 if (except.reason < 0)
1475 {
1476 xfree (btinfo->replay);
1477 btinfo->replay = NULL;
1478
1479 registers_changed_ptid (tp->ptid);
1480
1481 throw_exception (except);
1482 }
1483
1484 return replay;
1485}
1486
1487/* Stop replaying a thread. */
1488
1489static void
1490record_btrace_stop_replaying (struct thread_info *tp)
1491{
1492 struct btrace_thread_info *btinfo;
1493
1494 btinfo = &tp->btrace;
1495
1496 xfree (btinfo->replay);
1497 btinfo->replay = NULL;
1498
1499 /* Make sure we're not leaving any stale registers. */
1500 registers_changed_ptid (tp->ptid);
1501}
1502
b2f4cfde
MM
1503/* The to_resume method of target record-btrace. */
1504
1505static void
1506record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1507 enum gdb_signal signal)
1508{
52834460
MM
1509 struct thread_info *tp, *other;
1510 enum btrace_thread_flag flag;
1511
1512 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1513
70ad5bff
MM
1514 /* Store the execution direction of the last resume. */
1515 record_btrace_resume_exec_dir = execution_direction;
1516
52834460
MM
1517 tp = record_btrace_find_resume_thread (ptid);
1518 if (tp == NULL)
1519 error (_("Cannot find thread to resume."));
1520
1521 /* Stop replaying other threads if the thread to resume is not replaying. */
1522 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
034f788c 1523 ALL_NON_EXITED_THREADS (other)
52834460
MM
1524 record_btrace_stop_replaying (other);
1525
b2f4cfde 1526 /* As long as we're not replaying, just forward the request. */
1c63c994 1527 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde 1528 {
e75fdfca
TT
1529 ops = ops->beneath;
1530 return ops->to_resume (ops, ptid, step, signal);
b2f4cfde
MM
1531 }
1532
52834460
MM
1533 /* Compute the btrace thread flag for the requested move. */
1534 if (step == 0)
1535 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1536 else
1537 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1538
1539 /* At the moment, we only move a single thread. We could also move
1540 all threads in parallel by single-stepping each resumed thread
1541 until the first runs into an event.
1542 When we do that, we would want to continue all other threads.
1543 For now, just resume one thread to not confuse to_wait. */
1544 record_btrace_resume_thread (tp, flag);
1545
1546 /* We just indicate the resume intent here. The actual stepping happens in
1547 record_btrace_wait below. */
70ad5bff
MM
1548
1549 /* Async support. */
1550 if (target_can_async_p ())
1551 {
1552 target_async (inferior_event_handler, 0);
1553 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1554 }
52834460
MM
1555}
1556
1557/* Find a thread to move. */
1558
1559static struct thread_info *
1560record_btrace_find_thread_to_move (ptid_t ptid)
1561{
1562 struct thread_info *tp;
1563
1564 /* First check the parameter thread. */
1565 tp = find_thread_ptid (ptid);
1566 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1567 return tp;
1568
1569 /* Otherwise, find one other thread that has been resumed. */
034f788c 1570 ALL_NON_EXITED_THREADS (tp)
52834460
MM
1571 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1572 return tp;
1573
1574 return NULL;
1575}
1576
1577/* Return a target_waitstatus indicating that we ran out of history. */
1578
1579static struct target_waitstatus
1580btrace_step_no_history (void)
1581{
1582 struct target_waitstatus status;
1583
1584 status.kind = TARGET_WAITKIND_NO_HISTORY;
1585
1586 return status;
1587}
1588
1589/* Return a target_waitstatus indicating that a step finished. */
1590
1591static struct target_waitstatus
1592btrace_step_stopped (void)
1593{
1594 struct target_waitstatus status;
1595
1596 status.kind = TARGET_WAITKIND_STOPPED;
1597 status.value.sig = GDB_SIGNAL_TRAP;
1598
1599 return status;
1600}
1601
1602/* Clear the record histories. */
1603
1604static void
1605record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1606{
1607 xfree (btinfo->insn_history);
1608 xfree (btinfo->call_history);
1609
1610 btinfo->insn_history = NULL;
1611 btinfo->call_history = NULL;
1612}
1613
1614/* Step a single thread. */
1615
1616static struct target_waitstatus
1617record_btrace_step_thread (struct thread_info *tp)
1618{
1619 struct btrace_insn_iterator *replay, end;
1620 struct btrace_thread_info *btinfo;
1621 struct address_space *aspace;
1622 struct inferior *inf;
1623 enum btrace_thread_flag flags;
1624 unsigned int steps;
1625
e59fa00f
MM
1626 /* We can't step without an execution history. */
1627 if (btrace_is_empty (tp))
1628 return btrace_step_no_history ();
1629
52834460
MM
1630 btinfo = &tp->btrace;
1631 replay = btinfo->replay;
1632
1633 flags = btinfo->flags & BTHR_MOVE;
1634 btinfo->flags &= ~BTHR_MOVE;
1635
1636 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1637
1638 switch (flags)
1639 {
1640 default:
1641 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1642
1643 case BTHR_STEP:
1644 /* We're done if we're not replaying. */
1645 if (replay == NULL)
1646 return btrace_step_no_history ();
1647
1648 /* We are always able to step at least once. */
1649 steps = btrace_insn_next (replay, 1);
1650 gdb_assert (steps == 1);
1651
1652 /* Determine the end of the instruction trace. */
1653 btrace_insn_end (&end, btinfo);
1654
1655 /* We stop replaying if we reached the end of the trace. */
1656 if (btrace_insn_cmp (replay, &end) == 0)
1657 record_btrace_stop_replaying (tp);
1658
1659 return btrace_step_stopped ();
1660
1661 case BTHR_RSTEP:
1662 /* Start replaying if we're not already doing so. */
1663 if (replay == NULL)
1664 replay = record_btrace_start_replaying (tp);
1665
1666 /* If we can't step any further, we reached the end of the history. */
1667 steps = btrace_insn_prev (replay, 1);
1668 if (steps == 0)
1669 return btrace_step_no_history ();
1670
1671 return btrace_step_stopped ();
1672
1673 case BTHR_CONT:
1674 /* We're done if we're not replaying. */
1675 if (replay == NULL)
1676 return btrace_step_no_history ();
1677
c9657e70 1678 inf = find_inferior_ptid (tp->ptid);
52834460
MM
1679 aspace = inf->aspace;
1680
1681 /* Determine the end of the instruction trace. */
1682 btrace_insn_end (&end, btinfo);
1683
1684 for (;;)
1685 {
1686 const struct btrace_insn *insn;
1687
1688 /* We are always able to step at least once. */
1689 steps = btrace_insn_next (replay, 1);
1690 gdb_assert (steps == 1);
1691
1692 /* We stop replaying if we reached the end of the trace. */
1693 if (btrace_insn_cmp (replay, &end) == 0)
1694 {
1695 record_btrace_stop_replaying (tp);
1696 return btrace_step_no_history ();
1697 }
1698
1699 insn = btrace_insn_get (replay);
1700 gdb_assert (insn);
1701
1702 DEBUG ("stepping %d (%s) ... %s", tp->num,
1703 target_pid_to_str (tp->ptid),
1704 core_addr_to_string_nz (insn->pc));
1705
1706 if (breakpoint_here_p (aspace, insn->pc))
1707 return btrace_step_stopped ();
1708 }
1709
1710 case BTHR_RCONT:
1711 /* Start replaying if we're not already doing so. */
1712 if (replay == NULL)
1713 replay = record_btrace_start_replaying (tp);
1714
c9657e70 1715 inf = find_inferior_ptid (tp->ptid);
52834460
MM
1716 aspace = inf->aspace;
1717
1718 for (;;)
1719 {
1720 const struct btrace_insn *insn;
1721
1722 /* If we can't step any further, we're done. */
1723 steps = btrace_insn_prev (replay, 1);
1724 if (steps == 0)
1725 return btrace_step_no_history ();
1726
1727 insn = btrace_insn_get (replay);
1728 gdb_assert (insn);
1729
1730 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1731 target_pid_to_str (tp->ptid),
1732 core_addr_to_string_nz (insn->pc));
1733
1734 if (breakpoint_here_p (aspace, insn->pc))
1735 return btrace_step_stopped ();
1736 }
1737 }
b2f4cfde
MM
1738}
1739
1740/* The to_wait method of target record-btrace. */
1741
1742static ptid_t
1743record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1744 struct target_waitstatus *status, int options)
1745{
52834460
MM
1746 struct thread_info *tp, *other;
1747
1748 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1749
b2f4cfde 1750 /* As long as we're not replaying, just forward the request. */
1c63c994 1751 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde 1752 {
e75fdfca
TT
1753 ops = ops->beneath;
1754 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
1755 }
1756
52834460
MM
1757 /* Let's find a thread to move. */
1758 tp = record_btrace_find_thread_to_move (ptid);
1759 if (tp == NULL)
1760 {
1761 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1762
1763 status->kind = TARGET_WAITKIND_IGNORE;
1764 return minus_one_ptid;
1765 }
1766
1767 /* We only move a single thread. We're not able to correlate threads. */
1768 *status = record_btrace_step_thread (tp);
1769
1770 /* Stop all other threads. */
1771 if (!non_stop)
034f788c 1772 ALL_NON_EXITED_THREADS (other)
52834460
MM
1773 other->btrace.flags &= ~BTHR_MOVE;
1774
1775 /* Start record histories anew from the current position. */
1776 record_btrace_clear_histories (&tp->btrace);
1777
1778 /* We moved the replay position but did not update registers. */
1779 registers_changed_ptid (tp->ptid);
1780
1781 return tp->ptid;
1782}
1783
1784/* The to_can_execute_reverse method of target record-btrace. */
1785
1786static int
19db3e69 1787record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
1788{
1789 return 1;
1790}
1791
1792/* The to_decr_pc_after_break method of target record-btrace. */
1793
1794static CORE_ADDR
1795record_btrace_decr_pc_after_break (struct target_ops *ops,
1796 struct gdbarch *gdbarch)
1797{
1798 /* When replaying, we do not actually execute the breakpoint instruction
1799 so there is no need to adjust the PC after hitting a breakpoint. */
1c63c994 1800 if (record_btrace_is_replaying (ops))
52834460
MM
1801 return 0;
1802
c0eca49f 1803 return ops->beneath->to_decr_pc_after_break (ops->beneath, gdbarch);
b2f4cfde
MM
1804}
1805
e8032dde 1806/* The to_update_thread_list method of target record-btrace. */
e2887aa3
MM
1807
1808static void
e8032dde 1809record_btrace_update_thread_list (struct target_ops *ops)
e2887aa3 1810{
e8032dde 1811 /* We don't add or remove threads during replay. */
1c63c994 1812 if (record_btrace_is_replaying (ops))
e2887aa3
MM
1813 return;
1814
1815 /* Forward the request. */
e75fdfca 1816 ops = ops->beneath;
e8032dde 1817 ops->to_update_thread_list (ops);
e2887aa3
MM
1818}
1819
1820/* The to_thread_alive method of target record-btrace. */
1821
1822static int
1823record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1824{
1825 /* We don't add or remove threads during replay. */
1c63c994 1826 if (record_btrace_is_replaying (ops))
e2887aa3
MM
1827 return find_thread_ptid (ptid) != NULL;
1828
1829 /* Forward the request. */
e75fdfca
TT
1830 ops = ops->beneath;
1831 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
1832}
1833
066ce621
MM
1834/* Set the replay branch trace instruction iterator. If IT is NULL, replay
1835 is stopped. */
1836
1837static void
1838record_btrace_set_replay (struct thread_info *tp,
1839 const struct btrace_insn_iterator *it)
1840{
1841 struct btrace_thread_info *btinfo;
1842
1843 btinfo = &tp->btrace;
1844
1845 if (it == NULL || it->function == NULL)
52834460 1846 record_btrace_stop_replaying (tp);
066ce621
MM
1847 else
1848 {
1849 if (btinfo->replay == NULL)
52834460 1850 record_btrace_start_replaying (tp);
066ce621
MM
1851 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
1852 return;
1853
1854 *btinfo->replay = *it;
52834460 1855 registers_changed_ptid (tp->ptid);
066ce621
MM
1856 }
1857
52834460
MM
1858 /* Start anew from the new replay position. */
1859 record_btrace_clear_histories (btinfo);
066ce621
MM
1860}
1861
1862/* The to_goto_record_begin method of target record-btrace. */
1863
1864static void
08475817 1865record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
1866{
1867 struct thread_info *tp;
1868 struct btrace_insn_iterator begin;
1869
1870 tp = require_btrace_thread ();
1871
1872 btrace_insn_begin (&begin, &tp->btrace);
1873 record_btrace_set_replay (tp, &begin);
1874
1875 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1876}
1877
1878/* The to_goto_record_end method of target record-btrace. */
1879
1880static void
307a1b91 1881record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
1882{
1883 struct thread_info *tp;
1884
1885 tp = require_btrace_thread ();
1886
1887 record_btrace_set_replay (tp, NULL);
1888
1889 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1890}
1891
1892/* The to_goto_record method of target record-btrace. */
1893
1894static void
606183ac 1895record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
1896{
1897 struct thread_info *tp;
1898 struct btrace_insn_iterator it;
1899 unsigned int number;
1900 int found;
1901
1902 number = insn;
1903
1904 /* Check for wrap-arounds. */
1905 if (number != insn)
1906 error (_("Instruction number out of range."));
1907
1908 tp = require_btrace_thread ();
1909
1910 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
1911 if (found == 0)
1912 error (_("No such instruction."));
1913
1914 record_btrace_set_replay (tp, &it);
1915
1916 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1917}
1918
70ad5bff
MM
1919/* The to_execution_direction target method. */
1920
1921static enum exec_direction_kind
1922record_btrace_execution_direction (struct target_ops *self)
1923{
1924 return record_btrace_resume_exec_dir;
1925}
1926
aef92902
MM
1927/* The to_prepare_to_generate_core target method. */
1928
1929static void
1930record_btrace_prepare_to_generate_core (struct target_ops *self)
1931{
1932 record_btrace_generating_corefile = 1;
1933}
1934
1935/* The to_done_generating_core target method. */
1936
1937static void
1938record_btrace_done_generating_core (struct target_ops *self)
1939{
1940 record_btrace_generating_corefile = 0;
1941}
1942
afedecd3
MM
1943/* Initialize the record-btrace target ops. */
1944
1945static void
1946init_record_btrace_ops (void)
1947{
1948 struct target_ops *ops;
1949
1950 ops = &record_btrace_ops;
1951 ops->to_shortname = "record-btrace";
1952 ops->to_longname = "Branch tracing target";
1953 ops->to_doc = "Collect control-flow trace and provide the execution history.";
1954 ops->to_open = record_btrace_open;
1955 ops->to_close = record_btrace_close;
b7d2e916 1956 ops->to_async = record_btrace_async;
afedecd3
MM
1957 ops->to_detach = record_detach;
1958 ops->to_disconnect = record_disconnect;
1959 ops->to_mourn_inferior = record_mourn_inferior;
1960 ops->to_kill = record_kill;
afedecd3
MM
1961 ops->to_stop_recording = record_btrace_stop_recording;
1962 ops->to_info_record = record_btrace_info;
1963 ops->to_insn_history = record_btrace_insn_history;
1964 ops->to_insn_history_from = record_btrace_insn_history_from;
1965 ops->to_insn_history_range = record_btrace_insn_history_range;
1966 ops->to_call_history = record_btrace_call_history;
1967 ops->to_call_history_from = record_btrace_call_history_from;
1968 ops->to_call_history_range = record_btrace_call_history_range;
07bbe694 1969 ops->to_record_is_replaying = record_btrace_is_replaying;
633785ff
MM
1970 ops->to_xfer_partial = record_btrace_xfer_partial;
1971 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
1972 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
1973 ops->to_fetch_registers = record_btrace_fetch_registers;
1974 ops->to_store_registers = record_btrace_store_registers;
1975 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
1976 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
1977 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde
MM
1978 ops->to_resume = record_btrace_resume;
1979 ops->to_wait = record_btrace_wait;
e8032dde 1980 ops->to_update_thread_list = record_btrace_update_thread_list;
e2887aa3 1981 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
1982 ops->to_goto_record_begin = record_btrace_goto_begin;
1983 ops->to_goto_record_end = record_btrace_goto_end;
1984 ops->to_goto_record = record_btrace_goto;
52834460
MM
1985 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
1986 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
70ad5bff 1987 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
1988 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
1989 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
1990 ops->to_stratum = record_stratum;
1991 ops->to_magic = OPS_MAGIC;
1992}
1993
1994/* Alias for "target record". */
1995
1996static void
1997cmd_record_btrace_start (char *args, int from_tty)
1998{
1999 if (args != NULL && *args != 0)
2000 error (_("Invalid argument."));
2001
2002 execute_command ("target record-btrace", from_tty);
2003}
2004
67b5c0c1
MM
2005/* The "set record btrace" command. */
2006
2007static void
2008cmd_set_record_btrace (char *args, int from_tty)
2009{
2010 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2011}
2012
2013/* The "show record btrace" command. */
2014
2015static void
2016cmd_show_record_btrace (char *args, int from_tty)
2017{
2018 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2019}
2020
2021/* The "show record btrace replay-memory-access" command. */
2022
2023static void
2024cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2025 struct cmd_list_element *c, const char *value)
2026{
2027 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2028 replay_memory_access);
2029}
2030
afedecd3
MM
2031void _initialize_record_btrace (void);
2032
2033/* Initialize btrace commands. */
2034
2035void
2036_initialize_record_btrace (void)
2037{
2038 add_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2039 _("Start branch trace recording."),
2040 &record_cmdlist);
2041 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2042
67b5c0c1
MM
2043 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2044 _("Set record options"), &set_record_btrace_cmdlist,
2045 "set record btrace ", 0, &set_record_cmdlist);
2046
2047 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2048 _("Show record options"), &show_record_btrace_cmdlist,
2049 "show record btrace ", 0, &show_record_cmdlist);
2050
2051 add_setshow_enum_cmd ("replay-memory-access", no_class,
2052 replay_memory_access_types, &replay_memory_access, _("\
2053Set what memory accesses are allowed during replay."), _("\
2054Show what memory accesses are allowed during replay."),
2055 _("Default is READ-ONLY.\n\n\
2056The btrace record target does not trace data.\n\
2057The memory therefore corresponds to the live target and not \
2058to the current replay position.\n\n\
2059When READ-ONLY, allow accesses to read-only memory during replay.\n\
2060When READ-WRITE, allow accesses to read-only and read-write memory during \
2061replay."),
2062 NULL, cmd_show_replay_memory_access,
2063 &set_record_btrace_cmdlist,
2064 &show_record_btrace_cmdlist);
2065
afedecd3
MM
2066 init_record_btrace_ops ();
2067 add_target (&record_btrace_ops);
0b722aec
MM
2068
2069 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2070 xcalloc, xfree);
afedecd3 2071}
This page took 0.389828 seconds and 4 git commands to generate.