Merge remote thread listing methods
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
ecd75fc8 3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
24#include "gdbthread.h"
25#include "target.h"
26#include "gdbcmd.h"
27#include "disasm.h"
28#include "observer.h"
afedecd3
MM
29#include "cli/cli-utils.h"
30#include "source.h"
31#include "ui-out.h"
32#include "symtab.h"
33#include "filenames.h"
1f3ef581 34#include "regcache.h"
cecac1ab 35#include "frame-unwind.h"
0b722aec 36#include "hashtab.h"
45741a9c 37#include "infrun.h"
70ad5bff
MM
38#include "event-loop.h"
39#include "inf-loop.h"
afedecd3
MM
40
41/* The target_ops of record-btrace. */
42static struct target_ops record_btrace_ops;
43
44/* A new thread observer enabling branch tracing for the new thread. */
45static struct observer *record_btrace_thread_observer;
46
67b5c0c1
MM
47/* Memory access types used in set/show record btrace replay-memory-access. */
48static const char replay_memory_access_read_only[] = "read-only";
49static const char replay_memory_access_read_write[] = "read-write";
50static const char *const replay_memory_access_types[] =
51{
52 replay_memory_access_read_only,
53 replay_memory_access_read_write,
54 NULL
55};
56
57/* The currently allowed replay memory access type. */
58static const char *replay_memory_access = replay_memory_access_read_only;
59
60/* Command lists for "set/show record btrace". */
61static struct cmd_list_element *set_record_btrace_cmdlist;
62static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 63
70ad5bff
MM
64/* The execution direction of the last resume we got. See record-full.c. */
65static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
66
67/* The async event handler for reverse/replay execution. */
68static struct async_event_handler *record_btrace_async_inferior_event_handler;
69
aef92902
MM
70/* A flag indicating that we are currently generating a core file. */
71static int record_btrace_generating_corefile;
72
afedecd3
MM
73/* Print a record-btrace debug message. Use do ... while (0) to avoid
74 ambiguities when used in if statements. */
75
76#define DEBUG(msg, args...) \
77 do \
78 { \
79 if (record_debug != 0) \
80 fprintf_unfiltered (gdb_stdlog, \
81 "[record-btrace] " msg "\n", ##args); \
82 } \
83 while (0)
84
85
86/* Update the branch trace for the current thread and return a pointer to its
066ce621 87 thread_info.
afedecd3
MM
88
89 Throws an error if there is no thread or no trace. This function never
90 returns NULL. */
91
066ce621
MM
92static struct thread_info *
93require_btrace_thread (void)
afedecd3
MM
94{
95 struct thread_info *tp;
afedecd3
MM
96
97 DEBUG ("require");
98
99 tp = find_thread_ptid (inferior_ptid);
100 if (tp == NULL)
101 error (_("No thread."));
102
103 btrace_fetch (tp);
104
6e07b1d2 105 if (btrace_is_empty (tp))
afedecd3
MM
106 error (_("No trace."));
107
066ce621
MM
108 return tp;
109}
110
111/* Update the branch trace for the current thread and return a pointer to its
112 branch trace information struct.
113
114 Throws an error if there is no thread or no trace. This function never
115 returns NULL. */
116
117static struct btrace_thread_info *
118require_btrace (void)
119{
120 struct thread_info *tp;
121
122 tp = require_btrace_thread ();
123
124 return &tp->btrace;
afedecd3
MM
125}
126
127/* Enable branch tracing for one thread. Warn on errors. */
128
129static void
130record_btrace_enable_warn (struct thread_info *tp)
131{
132 volatile struct gdb_exception error;
133
134 TRY_CATCH (error, RETURN_MASK_ERROR)
135 btrace_enable (tp);
136
137 if (error.message != NULL)
138 warning ("%s", error.message);
139}
140
141/* Callback function to disable branch tracing for one thread. */
142
143static void
144record_btrace_disable_callback (void *arg)
145{
146 struct thread_info *tp;
147
148 tp = arg;
149
150 btrace_disable (tp);
151}
152
153/* Enable automatic tracing of new threads. */
154
155static void
156record_btrace_auto_enable (void)
157{
158 DEBUG ("attach thread observer");
159
160 record_btrace_thread_observer
161 = observer_attach_new_thread (record_btrace_enable_warn);
162}
163
164/* Disable automatic tracing of new threads. */
165
166static void
167record_btrace_auto_disable (void)
168{
169 /* The observer may have been detached, already. */
170 if (record_btrace_thread_observer == NULL)
171 return;
172
173 DEBUG ("detach thread observer");
174
175 observer_detach_new_thread (record_btrace_thread_observer);
176 record_btrace_thread_observer = NULL;
177}
178
70ad5bff
MM
179/* The record-btrace async event handler function. */
180
181static void
182record_btrace_handle_async_inferior_event (gdb_client_data data)
183{
184 inferior_event_handler (INF_REG_EVENT, NULL);
185}
186
afedecd3
MM
187/* The to_open method of target record-btrace. */
188
189static void
014f9477 190record_btrace_open (const char *args, int from_tty)
afedecd3
MM
191{
192 struct cleanup *disable_chain;
193 struct thread_info *tp;
194
195 DEBUG ("open");
196
8213266a 197 record_preopen ();
afedecd3
MM
198
199 if (!target_has_execution)
200 error (_("The program is not being run."));
201
202 if (!target_supports_btrace ())
203 error (_("Target does not support branch tracing."));
204
52834460
MM
205 if (non_stop)
206 error (_("Record btrace can't debug inferior in non-stop mode."));
207
afedecd3
MM
208 gdb_assert (record_btrace_thread_observer == NULL);
209
210 disable_chain = make_cleanup (null_cleanup, NULL);
034f788c 211 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
212 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
213 {
214 btrace_enable (tp);
215
216 make_cleanup (record_btrace_disable_callback, tp);
217 }
218
219 record_btrace_auto_enable ();
220
221 push_target (&record_btrace_ops);
222
70ad5bff
MM
223 record_btrace_async_inferior_event_handler
224 = create_async_event_handler (record_btrace_handle_async_inferior_event,
225 NULL);
aef92902 226 record_btrace_generating_corefile = 0;
70ad5bff 227
afedecd3
MM
228 observer_notify_record_changed (current_inferior (), 1);
229
230 discard_cleanups (disable_chain);
231}
232
233/* The to_stop_recording method of target record-btrace. */
234
235static void
c6cd7c02 236record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
237{
238 struct thread_info *tp;
239
240 DEBUG ("stop recording");
241
242 record_btrace_auto_disable ();
243
034f788c 244 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
245 if (tp->btrace.target != NULL)
246 btrace_disable (tp);
247}
248
249/* The to_close method of target record-btrace. */
250
251static void
de90e03d 252record_btrace_close (struct target_ops *self)
afedecd3 253{
568e808b
MM
254 struct thread_info *tp;
255
70ad5bff
MM
256 if (record_btrace_async_inferior_event_handler != NULL)
257 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
258
99c819ee
MM
259 /* Make sure automatic recording gets disabled even if we did not stop
260 recording before closing the record-btrace target. */
261 record_btrace_auto_disable ();
262
568e808b
MM
263 /* We should have already stopped recording.
264 Tear down btrace in case we have not. */
034f788c 265 ALL_NON_EXITED_THREADS (tp)
568e808b 266 btrace_teardown (tp);
afedecd3
MM
267}
268
269/* The to_info_record method of target record-btrace. */
270
271static void
630d6a4a 272record_btrace_info (struct target_ops *self)
afedecd3
MM
273{
274 struct btrace_thread_info *btinfo;
275 struct thread_info *tp;
23a7fe75 276 unsigned int insns, calls;
afedecd3
MM
277
278 DEBUG ("info");
279
280 tp = find_thread_ptid (inferior_ptid);
281 if (tp == NULL)
282 error (_("No thread."));
283
284 btrace_fetch (tp);
285
23a7fe75
MM
286 insns = 0;
287 calls = 0;
288
afedecd3 289 btinfo = &tp->btrace;
6e07b1d2
MM
290
291 if (!btrace_is_empty (tp))
23a7fe75
MM
292 {
293 struct btrace_call_iterator call;
294 struct btrace_insn_iterator insn;
295
296 btrace_call_end (&call, btinfo);
297 btrace_call_prev (&call, 1);
5de9129b 298 calls = btrace_call_number (&call);
23a7fe75
MM
299
300 btrace_insn_end (&insn, btinfo);
301 btrace_insn_prev (&insn, 1);
5de9129b 302 insns = btrace_insn_number (&insn);
23a7fe75 303 }
afedecd3
MM
304
305 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
23a7fe75 306 "%d (%s).\n"), insns, calls, tp->num,
afedecd3 307 target_pid_to_str (tp->ptid));
07bbe694
MM
308
309 if (btrace_is_replaying (tp))
310 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
311 btrace_insn_number (btinfo->replay));
afedecd3
MM
312}
313
314/* Print an unsigned int. */
315
316static void
317ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
318{
319 ui_out_field_fmt (uiout, fld, "%u", val);
320}
321
322/* Disassemble a section of the recorded instruction trace. */
323
324static void
23a7fe75
MM
325btrace_insn_history (struct ui_out *uiout,
326 const struct btrace_insn_iterator *begin,
327 const struct btrace_insn_iterator *end, int flags)
afedecd3
MM
328{
329 struct gdbarch *gdbarch;
23a7fe75 330 struct btrace_insn_iterator it;
afedecd3 331
23a7fe75
MM
332 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
333 btrace_insn_number (end));
afedecd3
MM
334
335 gdbarch = target_gdbarch ();
336
23a7fe75 337 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 338 {
23a7fe75
MM
339 const struct btrace_insn *insn;
340
341 insn = btrace_insn_get (&it);
342
afedecd3 343 /* Print the instruction index. */
23a7fe75 344 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
afedecd3
MM
345 ui_out_text (uiout, "\t");
346
347 /* Disassembly with '/m' flag may not produce the expected result.
348 See PR gdb/11833. */
23a7fe75 349 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
afedecd3
MM
350 }
351}
352
353/* The to_insn_history method of target record-btrace. */
354
355static void
7a6c5609 356record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
357{
358 struct btrace_thread_info *btinfo;
23a7fe75
MM
359 struct btrace_insn_history *history;
360 struct btrace_insn_iterator begin, end;
afedecd3
MM
361 struct cleanup *uiout_cleanup;
362 struct ui_out *uiout;
23a7fe75 363 unsigned int context, covered;
afedecd3
MM
364
365 uiout = current_uiout;
366 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
367 "insn history");
afedecd3 368 context = abs (size);
afedecd3
MM
369 if (context == 0)
370 error (_("Bad record instruction-history-size."));
371
23a7fe75
MM
372 btinfo = require_btrace ();
373 history = btinfo->insn_history;
374 if (history == NULL)
afedecd3 375 {
07bbe694 376 struct btrace_insn_iterator *replay;
afedecd3 377
23a7fe75 378 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 379
07bbe694
MM
380 /* If we're replaying, we start at the replay position. Otherwise, we
381 start at the tail of the trace. */
382 replay = btinfo->replay;
383 if (replay != NULL)
384 begin = *replay;
385 else
386 btrace_insn_end (&begin, btinfo);
387
388 /* We start from here and expand in the requested direction. Then we
389 expand in the other direction, as well, to fill up any remaining
390 context. */
391 end = begin;
392 if (size < 0)
393 {
394 /* We want the current position covered, as well. */
395 covered = btrace_insn_next (&end, 1);
396 covered += btrace_insn_prev (&begin, context - covered);
397 covered += btrace_insn_next (&end, context - covered);
398 }
399 else
400 {
401 covered = btrace_insn_next (&end, context);
402 covered += btrace_insn_prev (&begin, context - covered);
403 }
afedecd3
MM
404 }
405 else
406 {
23a7fe75
MM
407 begin = history->begin;
408 end = history->end;
afedecd3 409
23a7fe75
MM
410 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
411 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 412
23a7fe75
MM
413 if (size < 0)
414 {
415 end = begin;
416 covered = btrace_insn_prev (&begin, context);
417 }
418 else
419 {
420 begin = end;
421 covered = btrace_insn_next (&end, context);
422 }
afedecd3
MM
423 }
424
23a7fe75
MM
425 if (covered > 0)
426 btrace_insn_history (uiout, &begin, &end, flags);
427 else
428 {
429 if (size < 0)
430 printf_unfiltered (_("At the start of the branch trace record.\n"));
431 else
432 printf_unfiltered (_("At the end of the branch trace record.\n"));
433 }
afedecd3 434
23a7fe75 435 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
436 do_cleanups (uiout_cleanup);
437}
438
439/* The to_insn_history_range method of target record-btrace. */
440
441static void
4e99c6b7
TT
442record_btrace_insn_history_range (struct target_ops *self,
443 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
444{
445 struct btrace_thread_info *btinfo;
23a7fe75
MM
446 struct btrace_insn_history *history;
447 struct btrace_insn_iterator begin, end;
afedecd3
MM
448 struct cleanup *uiout_cleanup;
449 struct ui_out *uiout;
23a7fe75
MM
450 unsigned int low, high;
451 int found;
afedecd3
MM
452
453 uiout = current_uiout;
454 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
455 "insn history");
23a7fe75
MM
456 low = from;
457 high = to;
afedecd3 458
23a7fe75 459 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
460
461 /* Check for wrap-arounds. */
23a7fe75 462 if (low != from || high != to)
afedecd3
MM
463 error (_("Bad range."));
464
0688d04e 465 if (high < low)
afedecd3
MM
466 error (_("Bad range."));
467
23a7fe75 468 btinfo = require_btrace ();
afedecd3 469
23a7fe75
MM
470 found = btrace_find_insn_by_number (&begin, btinfo, low);
471 if (found == 0)
472 error (_("Range out of bounds."));
afedecd3 473
23a7fe75
MM
474 found = btrace_find_insn_by_number (&end, btinfo, high);
475 if (found == 0)
0688d04e
MM
476 {
477 /* Silently truncate the range. */
478 btrace_insn_end (&end, btinfo);
479 }
480 else
481 {
482 /* We want both begin and end to be inclusive. */
483 btrace_insn_next (&end, 1);
484 }
afedecd3 485
23a7fe75
MM
486 btrace_insn_history (uiout, &begin, &end, flags);
487 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
488
489 do_cleanups (uiout_cleanup);
490}
491
492/* The to_insn_history_from method of target record-btrace. */
493
494static void
9abc3ff3
TT
495record_btrace_insn_history_from (struct target_ops *self,
496 ULONGEST from, int size, int flags)
afedecd3
MM
497{
498 ULONGEST begin, end, context;
499
500 context = abs (size);
0688d04e
MM
501 if (context == 0)
502 error (_("Bad record instruction-history-size."));
afedecd3
MM
503
504 if (size < 0)
505 {
506 end = from;
507
508 if (from < context)
509 begin = 0;
510 else
0688d04e 511 begin = from - context + 1;
afedecd3
MM
512 }
513 else
514 {
515 begin = from;
0688d04e 516 end = from + context - 1;
afedecd3
MM
517
518 /* Check for wrap-around. */
519 if (end < begin)
520 end = ULONGEST_MAX;
521 }
522
4e99c6b7 523 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
524}
525
526/* Print the instruction number range for a function call history line. */
527
528static void
23a7fe75
MM
529btrace_call_history_insn_range (struct ui_out *uiout,
530 const struct btrace_function *bfun)
afedecd3 531{
7acbe133
MM
532 unsigned int begin, end, size;
533
534 size = VEC_length (btrace_insn_s, bfun->insn);
535 gdb_assert (size > 0);
afedecd3 536
23a7fe75 537 begin = bfun->insn_offset;
7acbe133 538 end = begin + size - 1;
afedecd3 539
23a7fe75 540 ui_out_field_uint (uiout, "insn begin", begin);
8710b709 541 ui_out_text (uiout, ",");
23a7fe75 542 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
543}
544
545/* Print the source line information for a function call history line. */
546
547static void
23a7fe75
MM
548btrace_call_history_src_line (struct ui_out *uiout,
549 const struct btrace_function *bfun)
afedecd3
MM
550{
551 struct symbol *sym;
23a7fe75 552 int begin, end;
afedecd3
MM
553
554 sym = bfun->sym;
555 if (sym == NULL)
556 return;
557
558 ui_out_field_string (uiout, "file",
559 symtab_to_filename_for_display (sym->symtab));
560
23a7fe75
MM
561 begin = bfun->lbegin;
562 end = bfun->lend;
563
564 if (end < begin)
afedecd3
MM
565 return;
566
567 ui_out_text (uiout, ":");
23a7fe75 568 ui_out_field_int (uiout, "min line", begin);
afedecd3 569
23a7fe75 570 if (end == begin)
afedecd3
MM
571 return;
572
8710b709 573 ui_out_text (uiout, ",");
23a7fe75 574 ui_out_field_int (uiout, "max line", end);
afedecd3
MM
575}
576
0b722aec
MM
577/* Get the name of a branch trace function. */
578
579static const char *
580btrace_get_bfun_name (const struct btrace_function *bfun)
581{
582 struct minimal_symbol *msym;
583 struct symbol *sym;
584
585 if (bfun == NULL)
586 return "??";
587
588 msym = bfun->msym;
589 sym = bfun->sym;
590
591 if (sym != NULL)
592 return SYMBOL_PRINT_NAME (sym);
593 else if (msym != NULL)
efd66ac6 594 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
595 else
596 return "??";
597}
598
afedecd3
MM
599/* Disassemble a section of the recorded function trace. */
600
601static void
23a7fe75 602btrace_call_history (struct ui_out *uiout,
8710b709 603 const struct btrace_thread_info *btinfo,
23a7fe75
MM
604 const struct btrace_call_iterator *begin,
605 const struct btrace_call_iterator *end,
afedecd3
MM
606 enum record_print_flag flags)
607{
23a7fe75 608 struct btrace_call_iterator it;
afedecd3 609
23a7fe75
MM
610 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
611 btrace_call_number (end));
afedecd3 612
23a7fe75 613 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 614 {
23a7fe75
MM
615 const struct btrace_function *bfun;
616 struct minimal_symbol *msym;
617 struct symbol *sym;
618
619 bfun = btrace_call_get (&it);
23a7fe75 620 sym = bfun->sym;
0b722aec 621 msym = bfun->msym;
23a7fe75 622
afedecd3 623 /* Print the function index. */
23a7fe75 624 ui_out_field_uint (uiout, "index", bfun->number);
afedecd3
MM
625 ui_out_text (uiout, "\t");
626
8710b709
MM
627 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
628 {
629 int level = bfun->level + btinfo->level, i;
630
631 for (i = 0; i < level; ++i)
632 ui_out_text (uiout, " ");
633 }
634
635 if (sym != NULL)
636 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
637 else if (msym != NULL)
efd66ac6 638 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
8710b709
MM
639 else if (!ui_out_is_mi_like_p (uiout))
640 ui_out_field_string (uiout, "function", "??");
641
1e038f67 642 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 643 {
8710b709 644 ui_out_text (uiout, _("\tinst "));
23a7fe75 645 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
646 }
647
1e038f67 648 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 649 {
8710b709 650 ui_out_text (uiout, _("\tat "));
23a7fe75 651 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
652 }
653
afedecd3
MM
654 ui_out_text (uiout, "\n");
655 }
656}
657
658/* The to_call_history method of target record-btrace. */
659
660static void
5df2fcba 661record_btrace_call_history (struct target_ops *self, int size, int flags)
afedecd3
MM
662{
663 struct btrace_thread_info *btinfo;
23a7fe75
MM
664 struct btrace_call_history *history;
665 struct btrace_call_iterator begin, end;
afedecd3
MM
666 struct cleanup *uiout_cleanup;
667 struct ui_out *uiout;
23a7fe75 668 unsigned int context, covered;
afedecd3
MM
669
670 uiout = current_uiout;
671 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
672 "insn history");
afedecd3 673 context = abs (size);
afedecd3
MM
674 if (context == 0)
675 error (_("Bad record function-call-history-size."));
676
23a7fe75
MM
677 btinfo = require_btrace ();
678 history = btinfo->call_history;
679 if (history == NULL)
afedecd3 680 {
07bbe694 681 struct btrace_insn_iterator *replay;
afedecd3 682
23a7fe75 683 DEBUG ("call-history (0x%x): %d", flags, size);
afedecd3 684
07bbe694
MM
685 /* If we're replaying, we start at the replay position. Otherwise, we
686 start at the tail of the trace. */
687 replay = btinfo->replay;
688 if (replay != NULL)
689 {
690 begin.function = replay->function;
691 begin.btinfo = btinfo;
692 }
693 else
694 btrace_call_end (&begin, btinfo);
695
696 /* We start from here and expand in the requested direction. Then we
697 expand in the other direction, as well, to fill up any remaining
698 context. */
699 end = begin;
700 if (size < 0)
701 {
702 /* We want the current position covered, as well. */
703 covered = btrace_call_next (&end, 1);
704 covered += btrace_call_prev (&begin, context - covered);
705 covered += btrace_call_next (&end, context - covered);
706 }
707 else
708 {
709 covered = btrace_call_next (&end, context);
710 covered += btrace_call_prev (&begin, context- covered);
711 }
afedecd3
MM
712 }
713 else
714 {
23a7fe75
MM
715 begin = history->begin;
716 end = history->end;
afedecd3 717
23a7fe75
MM
718 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
719 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 720
23a7fe75
MM
721 if (size < 0)
722 {
723 end = begin;
724 covered = btrace_call_prev (&begin, context);
725 }
726 else
727 {
728 begin = end;
729 covered = btrace_call_next (&end, context);
730 }
afedecd3
MM
731 }
732
23a7fe75 733 if (covered > 0)
8710b709 734 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
735 else
736 {
737 if (size < 0)
738 printf_unfiltered (_("At the start of the branch trace record.\n"));
739 else
740 printf_unfiltered (_("At the end of the branch trace record.\n"));
741 }
afedecd3 742
23a7fe75 743 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
744 do_cleanups (uiout_cleanup);
745}
746
747/* The to_call_history_range method of target record-btrace. */
748
749static void
f0d960ea
TT
750record_btrace_call_history_range (struct target_ops *self,
751 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
752{
753 struct btrace_thread_info *btinfo;
23a7fe75
MM
754 struct btrace_call_history *history;
755 struct btrace_call_iterator begin, end;
afedecd3
MM
756 struct cleanup *uiout_cleanup;
757 struct ui_out *uiout;
23a7fe75
MM
758 unsigned int low, high;
759 int found;
afedecd3
MM
760
761 uiout = current_uiout;
762 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
763 "func history");
23a7fe75
MM
764 low = from;
765 high = to;
afedecd3 766
23a7fe75 767 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
768
769 /* Check for wrap-arounds. */
23a7fe75 770 if (low != from || high != to)
afedecd3
MM
771 error (_("Bad range."));
772
0688d04e 773 if (high < low)
afedecd3
MM
774 error (_("Bad range."));
775
23a7fe75 776 btinfo = require_btrace ();
afedecd3 777
23a7fe75
MM
778 found = btrace_find_call_by_number (&begin, btinfo, low);
779 if (found == 0)
780 error (_("Range out of bounds."));
afedecd3 781
23a7fe75
MM
782 found = btrace_find_call_by_number (&end, btinfo, high);
783 if (found == 0)
0688d04e
MM
784 {
785 /* Silently truncate the range. */
786 btrace_call_end (&end, btinfo);
787 }
788 else
789 {
790 /* We want both begin and end to be inclusive. */
791 btrace_call_next (&end, 1);
792 }
afedecd3 793
8710b709 794 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 795 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
796
797 do_cleanups (uiout_cleanup);
798}
799
800/* The to_call_history_from method of target record-btrace. */
801
802static void
ec0aea04
TT
803record_btrace_call_history_from (struct target_ops *self,
804 ULONGEST from, int size, int flags)
afedecd3
MM
805{
806 ULONGEST begin, end, context;
807
808 context = abs (size);
0688d04e
MM
809 if (context == 0)
810 error (_("Bad record function-call-history-size."));
afedecd3
MM
811
812 if (size < 0)
813 {
814 end = from;
815
816 if (from < context)
817 begin = 0;
818 else
0688d04e 819 begin = from - context + 1;
afedecd3
MM
820 }
821 else
822 {
823 begin = from;
0688d04e 824 end = from + context - 1;
afedecd3
MM
825
826 /* Check for wrap-around. */
827 if (end < begin)
828 end = ULONGEST_MAX;
829 }
830
f0d960ea 831 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
832}
833
07bbe694
MM
834/* The to_record_is_replaying method of target record-btrace. */
835
836static int
1c63c994 837record_btrace_is_replaying (struct target_ops *self)
07bbe694
MM
838{
839 struct thread_info *tp;
840
034f788c 841 ALL_NON_EXITED_THREADS (tp)
07bbe694
MM
842 if (btrace_is_replaying (tp))
843 return 1;
844
845 return 0;
846}
847
633785ff
MM
848/* The to_xfer_partial method of target record-btrace. */
849
9b409511 850static enum target_xfer_status
633785ff
MM
851record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
852 const char *annex, gdb_byte *readbuf,
853 const gdb_byte *writebuf, ULONGEST offset,
9b409511 854 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
855{
856 struct target_ops *t;
857
858 /* Filter out requests that don't make sense during replay. */
67b5c0c1 859 if (replay_memory_access == replay_memory_access_read_only
aef92902 860 && !record_btrace_generating_corefile
67b5c0c1 861 && record_btrace_is_replaying (ops))
633785ff
MM
862 {
863 switch (object)
864 {
865 case TARGET_OBJECT_MEMORY:
866 {
867 struct target_section *section;
868
869 /* We do not allow writing memory in general. */
870 if (writebuf != NULL)
9b409511
YQ
871 {
872 *xfered_len = len;
bc113b4e 873 return TARGET_XFER_UNAVAILABLE;
9b409511 874 }
633785ff
MM
875
876 /* We allow reading readonly memory. */
877 section = target_section_by_addr (ops, offset);
878 if (section != NULL)
879 {
880 /* Check if the section we found is readonly. */
881 if ((bfd_get_section_flags (section->the_bfd_section->owner,
882 section->the_bfd_section)
883 & SEC_READONLY) != 0)
884 {
885 /* Truncate the request to fit into this section. */
886 len = min (len, section->endaddr - offset);
887 break;
888 }
889 }
890
9b409511 891 *xfered_len = len;
bc113b4e 892 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
893 }
894 }
895 }
896
897 /* Forward the request. */
e75fdfca
TT
898 ops = ops->beneath;
899 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
900 offset, len, xfered_len);
633785ff
MM
901}
902
903/* The to_insert_breakpoint method of target record-btrace. */
904
905static int
906record_btrace_insert_breakpoint (struct target_ops *ops,
907 struct gdbarch *gdbarch,
908 struct bp_target_info *bp_tgt)
909{
910 volatile struct gdb_exception except;
67b5c0c1
MM
911 const char *old;
912 int ret;
633785ff
MM
913
914 /* Inserting breakpoints requires accessing memory. Allow it for the
915 duration of this function. */
67b5c0c1
MM
916 old = replay_memory_access;
917 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
918
919 ret = 0;
920 TRY_CATCH (except, RETURN_MASK_ALL)
6b84065d 921 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
633785ff 922
67b5c0c1 923 replay_memory_access = old;
633785ff
MM
924
925 if (except.reason < 0)
926 throw_exception (except);
927
928 return ret;
929}
930
931/* The to_remove_breakpoint method of target record-btrace. */
932
933static int
934record_btrace_remove_breakpoint (struct target_ops *ops,
935 struct gdbarch *gdbarch,
936 struct bp_target_info *bp_tgt)
937{
938 volatile struct gdb_exception except;
67b5c0c1
MM
939 const char *old;
940 int ret;
633785ff
MM
941
942 /* Removing breakpoints requires accessing memory. Allow it for the
943 duration of this function. */
67b5c0c1
MM
944 old = replay_memory_access;
945 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
946
947 ret = 0;
948 TRY_CATCH (except, RETURN_MASK_ALL)
6b84065d 949 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
633785ff 950
67b5c0c1 951 replay_memory_access = old;
633785ff
MM
952
953 if (except.reason < 0)
954 throw_exception (except);
955
956 return ret;
957}
958
1f3ef581
MM
959/* The to_fetch_registers method of target record-btrace. */
960
961static void
962record_btrace_fetch_registers (struct target_ops *ops,
963 struct regcache *regcache, int regno)
964{
965 struct btrace_insn_iterator *replay;
966 struct thread_info *tp;
967
968 tp = find_thread_ptid (inferior_ptid);
969 gdb_assert (tp != NULL);
970
971 replay = tp->btrace.replay;
aef92902 972 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
973 {
974 const struct btrace_insn *insn;
975 struct gdbarch *gdbarch;
976 int pcreg;
977
978 gdbarch = get_regcache_arch (regcache);
979 pcreg = gdbarch_pc_regnum (gdbarch);
980 if (pcreg < 0)
981 return;
982
983 /* We can only provide the PC register. */
984 if (regno >= 0 && regno != pcreg)
985 return;
986
987 insn = btrace_insn_get (replay);
988 gdb_assert (insn != NULL);
989
990 regcache_raw_supply (regcache, regno, &insn->pc);
991 }
992 else
993 {
e75fdfca 994 struct target_ops *t = ops->beneath;
1f3ef581 995
e75fdfca 996 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
997 }
998}
999
1000/* The to_store_registers method of target record-btrace. */
1001
1002static void
1003record_btrace_store_registers (struct target_ops *ops,
1004 struct regcache *regcache, int regno)
1005{
1006 struct target_ops *t;
1007
aef92902 1008 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1f3ef581
MM
1009 error (_("This record target does not allow writing registers."));
1010
1011 gdb_assert (may_write_registers != 0);
1012
e75fdfca
TT
1013 t = ops->beneath;
1014 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1015}
1016
1017/* The to_prepare_to_store method of target record-btrace. */
1018
1019static void
1020record_btrace_prepare_to_store (struct target_ops *ops,
1021 struct regcache *regcache)
1022{
1023 struct target_ops *t;
1024
aef92902 1025 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1f3ef581
MM
1026 return;
1027
e75fdfca
TT
1028 t = ops->beneath;
1029 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1030}
1031
0b722aec
MM
1032/* The branch trace frame cache. */
1033
1034struct btrace_frame_cache
1035{
1036 /* The thread. */
1037 struct thread_info *tp;
1038
1039 /* The frame info. */
1040 struct frame_info *frame;
1041
1042 /* The branch trace function segment. */
1043 const struct btrace_function *bfun;
1044};
1045
1046/* A struct btrace_frame_cache hash table indexed by NEXT. */
1047
1048static htab_t bfcache;
1049
1050/* hash_f for htab_create_alloc of bfcache. */
1051
1052static hashval_t
1053bfcache_hash (const void *arg)
1054{
1055 const struct btrace_frame_cache *cache = arg;
1056
1057 return htab_hash_pointer (cache->frame);
1058}
1059
1060/* eq_f for htab_create_alloc of bfcache. */
1061
1062static int
1063bfcache_eq (const void *arg1, const void *arg2)
1064{
1065 const struct btrace_frame_cache *cache1 = arg1;
1066 const struct btrace_frame_cache *cache2 = arg2;
1067
1068 return cache1->frame == cache2->frame;
1069}
1070
1071/* Create a new btrace frame cache. */
1072
1073static struct btrace_frame_cache *
1074bfcache_new (struct frame_info *frame)
1075{
1076 struct btrace_frame_cache *cache;
1077 void **slot;
1078
1079 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1080 cache->frame = frame;
1081
1082 slot = htab_find_slot (bfcache, cache, INSERT);
1083 gdb_assert (*slot == NULL);
1084 *slot = cache;
1085
1086 return cache;
1087}
1088
1089/* Extract the branch trace function from a branch trace frame. */
1090
1091static const struct btrace_function *
1092btrace_get_frame_function (struct frame_info *frame)
1093{
1094 const struct btrace_frame_cache *cache;
1095 const struct btrace_function *bfun;
1096 struct btrace_frame_cache pattern;
1097 void **slot;
1098
1099 pattern.frame = frame;
1100
1101 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1102 if (slot == NULL)
1103 return NULL;
1104
1105 cache = *slot;
1106 return cache->bfun;
1107}
1108
cecac1ab
MM
1109/* Implement stop_reason method for record_btrace_frame_unwind. */
1110
1111static enum unwind_stop_reason
1112record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1113 void **this_cache)
1114{
0b722aec
MM
1115 const struct btrace_frame_cache *cache;
1116 const struct btrace_function *bfun;
1117
1118 cache = *this_cache;
1119 bfun = cache->bfun;
1120 gdb_assert (bfun != NULL);
1121
1122 if (bfun->up == NULL)
1123 return UNWIND_UNAVAILABLE;
1124
1125 return UNWIND_NO_REASON;
cecac1ab
MM
1126}
1127
1128/* Implement this_id method for record_btrace_frame_unwind. */
1129
1130static void
1131record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1132 struct frame_id *this_id)
1133{
0b722aec
MM
1134 const struct btrace_frame_cache *cache;
1135 const struct btrace_function *bfun;
1136 CORE_ADDR code, special;
1137
1138 cache = *this_cache;
1139
1140 bfun = cache->bfun;
1141 gdb_assert (bfun != NULL);
1142
1143 while (bfun->segment.prev != NULL)
1144 bfun = bfun->segment.prev;
1145
1146 code = get_frame_func (this_frame);
1147 special = bfun->number;
1148
1149 *this_id = frame_id_build_unavailable_stack_special (code, special);
1150
1151 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1152 btrace_get_bfun_name (cache->bfun),
1153 core_addr_to_string_nz (this_id->code_addr),
1154 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1155}
1156
1157/* Implement prev_register method for record_btrace_frame_unwind. */
1158
1159static struct value *
1160record_btrace_frame_prev_register (struct frame_info *this_frame,
1161 void **this_cache,
1162 int regnum)
1163{
0b722aec
MM
1164 const struct btrace_frame_cache *cache;
1165 const struct btrace_function *bfun, *caller;
1166 const struct btrace_insn *insn;
1167 struct gdbarch *gdbarch;
1168 CORE_ADDR pc;
1169 int pcreg;
1170
1171 gdbarch = get_frame_arch (this_frame);
1172 pcreg = gdbarch_pc_regnum (gdbarch);
1173 if (pcreg < 0 || regnum != pcreg)
1174 throw_error (NOT_AVAILABLE_ERROR,
1175 _("Registers are not available in btrace record history"));
1176
1177 cache = *this_cache;
1178 bfun = cache->bfun;
1179 gdb_assert (bfun != NULL);
1180
1181 caller = bfun->up;
1182 if (caller == NULL)
1183 throw_error (NOT_AVAILABLE_ERROR,
1184 _("No caller in btrace record history"));
1185
1186 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1187 {
1188 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1189 pc = insn->pc;
1190 }
1191 else
1192 {
1193 insn = VEC_last (btrace_insn_s, caller->insn);
1194 pc = insn->pc;
1195
1196 pc += gdb_insn_length (gdbarch, pc);
1197 }
1198
1199 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1200 btrace_get_bfun_name (bfun), bfun->level,
1201 core_addr_to_string_nz (pc));
1202
1203 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1204}
1205
1206/* Implement sniffer method for record_btrace_frame_unwind. */
1207
1208static int
1209record_btrace_frame_sniffer (const struct frame_unwind *self,
1210 struct frame_info *this_frame,
1211 void **this_cache)
1212{
0b722aec
MM
1213 const struct btrace_function *bfun;
1214 struct btrace_frame_cache *cache;
cecac1ab 1215 struct thread_info *tp;
0b722aec 1216 struct frame_info *next;
cecac1ab
MM
1217
1218 /* THIS_FRAME does not contain a reference to its thread. */
1219 tp = find_thread_ptid (inferior_ptid);
1220 gdb_assert (tp != NULL);
1221
0b722aec
MM
1222 bfun = NULL;
1223 next = get_next_frame (this_frame);
1224 if (next == NULL)
1225 {
1226 const struct btrace_insn_iterator *replay;
1227
1228 replay = tp->btrace.replay;
1229 if (replay != NULL)
1230 bfun = replay->function;
1231 }
1232 else
1233 {
1234 const struct btrace_function *callee;
1235
1236 callee = btrace_get_frame_function (next);
1237 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1238 bfun = callee->up;
1239 }
1240
1241 if (bfun == NULL)
1242 return 0;
1243
1244 DEBUG ("[frame] sniffed frame for %s on level %d",
1245 btrace_get_bfun_name (bfun), bfun->level);
1246
1247 /* This is our frame. Initialize the frame cache. */
1248 cache = bfcache_new (this_frame);
1249 cache->tp = tp;
1250 cache->bfun = bfun;
1251
1252 *this_cache = cache;
1253 return 1;
1254}
1255
1256/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1257
1258static int
1259record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1260 struct frame_info *this_frame,
1261 void **this_cache)
1262{
1263 const struct btrace_function *bfun, *callee;
1264 struct btrace_frame_cache *cache;
1265 struct frame_info *next;
1266
1267 next = get_next_frame (this_frame);
1268 if (next == NULL)
1269 return 0;
1270
1271 callee = btrace_get_frame_function (next);
1272 if (callee == NULL)
1273 return 0;
1274
1275 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1276 return 0;
1277
1278 bfun = callee->up;
1279 if (bfun == NULL)
1280 return 0;
1281
1282 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1283 btrace_get_bfun_name (bfun), bfun->level);
1284
1285 /* This is our frame. Initialize the frame cache. */
1286 cache = bfcache_new (this_frame);
1287 cache->tp = find_thread_ptid (inferior_ptid);
1288 cache->bfun = bfun;
1289
1290 *this_cache = cache;
1291 return 1;
1292}
1293
1294static void
1295record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1296{
1297 struct btrace_frame_cache *cache;
1298 void **slot;
1299
1300 cache = this_cache;
1301
1302 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1303 gdb_assert (slot != NULL);
1304
1305 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1306}
1307
1308/* btrace recording does not store previous memory content, neither the stack
1309 frames content. Any unwinding would return errorneous results as the stack
1310 contents no longer matches the changed PC value restored from history.
1311 Therefore this unwinder reports any possibly unwound registers as
1312 <unavailable>. */
1313
0b722aec 1314const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1315{
1316 NORMAL_FRAME,
1317 record_btrace_frame_unwind_stop_reason,
1318 record_btrace_frame_this_id,
1319 record_btrace_frame_prev_register,
1320 NULL,
0b722aec
MM
1321 record_btrace_frame_sniffer,
1322 record_btrace_frame_dealloc_cache
1323};
1324
1325const struct frame_unwind record_btrace_tailcall_frame_unwind =
1326{
1327 TAILCALL_FRAME,
1328 record_btrace_frame_unwind_stop_reason,
1329 record_btrace_frame_this_id,
1330 record_btrace_frame_prev_register,
1331 NULL,
1332 record_btrace_tailcall_frame_sniffer,
1333 record_btrace_frame_dealloc_cache
cecac1ab 1334};
b2f4cfde 1335
ac01945b
TT
1336/* Implement the to_get_unwinder method. */
1337
1338static const struct frame_unwind *
1339record_btrace_to_get_unwinder (struct target_ops *self)
1340{
1341 return &record_btrace_frame_unwind;
1342}
1343
1344/* Implement the to_get_tailcall_unwinder method. */
1345
1346static const struct frame_unwind *
1347record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1348{
1349 return &record_btrace_tailcall_frame_unwind;
1350}
1351
52834460
MM
1352/* Indicate that TP should be resumed according to FLAG. */
1353
1354static void
1355record_btrace_resume_thread (struct thread_info *tp,
1356 enum btrace_thread_flag flag)
1357{
1358 struct btrace_thread_info *btinfo;
1359
1360 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1361
1362 btinfo = &tp->btrace;
1363
1364 if ((btinfo->flags & BTHR_MOVE) != 0)
1365 error (_("Thread already moving."));
1366
1367 /* Fetch the latest branch trace. */
1368 btrace_fetch (tp);
1369
1370 btinfo->flags |= flag;
1371}
1372
1373/* Find the thread to resume given a PTID. */
1374
1375static struct thread_info *
1376record_btrace_find_resume_thread (ptid_t ptid)
1377{
1378 struct thread_info *tp;
1379
1380 /* When asked to resume everything, we pick the current thread. */
1381 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1382 ptid = inferior_ptid;
1383
1384 return find_thread_ptid (ptid);
1385}
1386
1387/* Start replaying a thread. */
1388
1389static struct btrace_insn_iterator *
1390record_btrace_start_replaying (struct thread_info *tp)
1391{
1392 volatile struct gdb_exception except;
1393 struct btrace_insn_iterator *replay;
1394 struct btrace_thread_info *btinfo;
1395 int executing;
1396
1397 btinfo = &tp->btrace;
1398 replay = NULL;
1399
1400 /* We can't start replaying without trace. */
1401 if (btinfo->begin == NULL)
1402 return NULL;
1403
1404 /* Clear the executing flag to allow changes to the current frame.
1405 We are not actually running, yet. We just started a reverse execution
1406 command or a record goto command.
1407 For the latter, EXECUTING is false and this has no effect.
1408 For the former, EXECUTING is true and we're in to_wait, about to
1409 move the thread. Since we need to recompute the stack, we temporarily
1410 set EXECUTING to flase. */
1411 executing = is_executing (tp->ptid);
1412 set_executing (tp->ptid, 0);
1413
1414 /* GDB stores the current frame_id when stepping in order to detects steps
1415 into subroutines.
1416 Since frames are computed differently when we're replaying, we need to
1417 recompute those stored frames and fix them up so we can still detect
1418 subroutines after we started replaying. */
1419 TRY_CATCH (except, RETURN_MASK_ALL)
1420 {
1421 struct frame_info *frame;
1422 struct frame_id frame_id;
1423 int upd_step_frame_id, upd_step_stack_frame_id;
1424
1425 /* The current frame without replaying - computed via normal unwind. */
1426 frame = get_current_frame ();
1427 frame_id = get_frame_id (frame);
1428
1429 /* Check if we need to update any stepping-related frame id's. */
1430 upd_step_frame_id = frame_id_eq (frame_id,
1431 tp->control.step_frame_id);
1432 upd_step_stack_frame_id = frame_id_eq (frame_id,
1433 tp->control.step_stack_frame_id);
1434
1435 /* We start replaying at the end of the branch trace. This corresponds
1436 to the current instruction. */
1437 replay = xmalloc (sizeof (*replay));
1438 btrace_insn_end (replay, btinfo);
1439
1440 /* We're not replaying, yet. */
1441 gdb_assert (btinfo->replay == NULL);
1442 btinfo->replay = replay;
1443
1444 /* Make sure we're not using any stale registers. */
1445 registers_changed_ptid (tp->ptid);
1446
1447 /* The current frame with replaying - computed via btrace unwind. */
1448 frame = get_current_frame ();
1449 frame_id = get_frame_id (frame);
1450
1451 /* Replace stepping related frames where necessary. */
1452 if (upd_step_frame_id)
1453 tp->control.step_frame_id = frame_id;
1454 if (upd_step_stack_frame_id)
1455 tp->control.step_stack_frame_id = frame_id;
1456 }
1457
1458 /* Restore the previous execution state. */
1459 set_executing (tp->ptid, executing);
1460
1461 if (except.reason < 0)
1462 {
1463 xfree (btinfo->replay);
1464 btinfo->replay = NULL;
1465
1466 registers_changed_ptid (tp->ptid);
1467
1468 throw_exception (except);
1469 }
1470
1471 return replay;
1472}
1473
1474/* Stop replaying a thread. */
1475
1476static void
1477record_btrace_stop_replaying (struct thread_info *tp)
1478{
1479 struct btrace_thread_info *btinfo;
1480
1481 btinfo = &tp->btrace;
1482
1483 xfree (btinfo->replay);
1484 btinfo->replay = NULL;
1485
1486 /* Make sure we're not leaving any stale registers. */
1487 registers_changed_ptid (tp->ptid);
1488}
1489
b2f4cfde
MM
1490/* The to_resume method of target record-btrace. */
1491
1492static void
1493record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1494 enum gdb_signal signal)
1495{
52834460
MM
1496 struct thread_info *tp, *other;
1497 enum btrace_thread_flag flag;
1498
1499 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1500
70ad5bff
MM
1501 /* Store the execution direction of the last resume. */
1502 record_btrace_resume_exec_dir = execution_direction;
1503
52834460
MM
1504 tp = record_btrace_find_resume_thread (ptid);
1505 if (tp == NULL)
1506 error (_("Cannot find thread to resume."));
1507
1508 /* Stop replaying other threads if the thread to resume is not replaying. */
1509 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
034f788c 1510 ALL_NON_EXITED_THREADS (other)
52834460
MM
1511 record_btrace_stop_replaying (other);
1512
b2f4cfde 1513 /* As long as we're not replaying, just forward the request. */
1c63c994 1514 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde 1515 {
e75fdfca
TT
1516 ops = ops->beneath;
1517 return ops->to_resume (ops, ptid, step, signal);
b2f4cfde
MM
1518 }
1519
52834460
MM
1520 /* Compute the btrace thread flag for the requested move. */
1521 if (step == 0)
1522 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1523 else
1524 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1525
1526 /* At the moment, we only move a single thread. We could also move
1527 all threads in parallel by single-stepping each resumed thread
1528 until the first runs into an event.
1529 When we do that, we would want to continue all other threads.
1530 For now, just resume one thread to not confuse to_wait. */
1531 record_btrace_resume_thread (tp, flag);
1532
1533 /* We just indicate the resume intent here. The actual stepping happens in
1534 record_btrace_wait below. */
70ad5bff
MM
1535
1536 /* Async support. */
1537 if (target_can_async_p ())
1538 {
1539 target_async (inferior_event_handler, 0);
1540 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1541 }
52834460
MM
1542}
1543
1544/* Find a thread to move. */
1545
1546static struct thread_info *
1547record_btrace_find_thread_to_move (ptid_t ptid)
1548{
1549 struct thread_info *tp;
1550
1551 /* First check the parameter thread. */
1552 tp = find_thread_ptid (ptid);
1553 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1554 return tp;
1555
1556 /* Otherwise, find one other thread that has been resumed. */
034f788c 1557 ALL_NON_EXITED_THREADS (tp)
52834460
MM
1558 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1559 return tp;
1560
1561 return NULL;
1562}
1563
1564/* Return a target_waitstatus indicating that we ran out of history. */
1565
1566static struct target_waitstatus
1567btrace_step_no_history (void)
1568{
1569 struct target_waitstatus status;
1570
1571 status.kind = TARGET_WAITKIND_NO_HISTORY;
1572
1573 return status;
1574}
1575
1576/* Return a target_waitstatus indicating that a step finished. */
1577
1578static struct target_waitstatus
1579btrace_step_stopped (void)
1580{
1581 struct target_waitstatus status;
1582
1583 status.kind = TARGET_WAITKIND_STOPPED;
1584 status.value.sig = GDB_SIGNAL_TRAP;
1585
1586 return status;
1587}
1588
1589/* Clear the record histories. */
1590
1591static void
1592record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1593{
1594 xfree (btinfo->insn_history);
1595 xfree (btinfo->call_history);
1596
1597 btinfo->insn_history = NULL;
1598 btinfo->call_history = NULL;
1599}
1600
1601/* Step a single thread. */
1602
1603static struct target_waitstatus
1604record_btrace_step_thread (struct thread_info *tp)
1605{
1606 struct btrace_insn_iterator *replay, end;
1607 struct btrace_thread_info *btinfo;
1608 struct address_space *aspace;
1609 struct inferior *inf;
1610 enum btrace_thread_flag flags;
1611 unsigned int steps;
1612
e59fa00f
MM
1613 /* We can't step without an execution history. */
1614 if (btrace_is_empty (tp))
1615 return btrace_step_no_history ();
1616
52834460
MM
1617 btinfo = &tp->btrace;
1618 replay = btinfo->replay;
1619
1620 flags = btinfo->flags & BTHR_MOVE;
1621 btinfo->flags &= ~BTHR_MOVE;
1622
1623 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1624
1625 switch (flags)
1626 {
1627 default:
1628 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1629
1630 case BTHR_STEP:
1631 /* We're done if we're not replaying. */
1632 if (replay == NULL)
1633 return btrace_step_no_history ();
1634
1635 /* We are always able to step at least once. */
1636 steps = btrace_insn_next (replay, 1);
1637 gdb_assert (steps == 1);
1638
1639 /* Determine the end of the instruction trace. */
1640 btrace_insn_end (&end, btinfo);
1641
1642 /* We stop replaying if we reached the end of the trace. */
1643 if (btrace_insn_cmp (replay, &end) == 0)
1644 record_btrace_stop_replaying (tp);
1645
1646 return btrace_step_stopped ();
1647
1648 case BTHR_RSTEP:
1649 /* Start replaying if we're not already doing so. */
1650 if (replay == NULL)
1651 replay = record_btrace_start_replaying (tp);
1652
1653 /* If we can't step any further, we reached the end of the history. */
1654 steps = btrace_insn_prev (replay, 1);
1655 if (steps == 0)
1656 return btrace_step_no_history ();
1657
1658 return btrace_step_stopped ();
1659
1660 case BTHR_CONT:
1661 /* We're done if we're not replaying. */
1662 if (replay == NULL)
1663 return btrace_step_no_history ();
1664
1665 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1666 aspace = inf->aspace;
1667
1668 /* Determine the end of the instruction trace. */
1669 btrace_insn_end (&end, btinfo);
1670
1671 for (;;)
1672 {
1673 const struct btrace_insn *insn;
1674
1675 /* We are always able to step at least once. */
1676 steps = btrace_insn_next (replay, 1);
1677 gdb_assert (steps == 1);
1678
1679 /* We stop replaying if we reached the end of the trace. */
1680 if (btrace_insn_cmp (replay, &end) == 0)
1681 {
1682 record_btrace_stop_replaying (tp);
1683 return btrace_step_no_history ();
1684 }
1685
1686 insn = btrace_insn_get (replay);
1687 gdb_assert (insn);
1688
1689 DEBUG ("stepping %d (%s) ... %s", tp->num,
1690 target_pid_to_str (tp->ptid),
1691 core_addr_to_string_nz (insn->pc));
1692
1693 if (breakpoint_here_p (aspace, insn->pc))
1694 return btrace_step_stopped ();
1695 }
1696
1697 case BTHR_RCONT:
1698 /* Start replaying if we're not already doing so. */
1699 if (replay == NULL)
1700 replay = record_btrace_start_replaying (tp);
1701
1702 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1703 aspace = inf->aspace;
1704
1705 for (;;)
1706 {
1707 const struct btrace_insn *insn;
1708
1709 /* If we can't step any further, we're done. */
1710 steps = btrace_insn_prev (replay, 1);
1711 if (steps == 0)
1712 return btrace_step_no_history ();
1713
1714 insn = btrace_insn_get (replay);
1715 gdb_assert (insn);
1716
1717 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1718 target_pid_to_str (tp->ptid),
1719 core_addr_to_string_nz (insn->pc));
1720
1721 if (breakpoint_here_p (aspace, insn->pc))
1722 return btrace_step_stopped ();
1723 }
1724 }
b2f4cfde
MM
1725}
1726
1727/* The to_wait method of target record-btrace. */
1728
1729static ptid_t
1730record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1731 struct target_waitstatus *status, int options)
1732{
52834460
MM
1733 struct thread_info *tp, *other;
1734
1735 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1736
b2f4cfde 1737 /* As long as we're not replaying, just forward the request. */
1c63c994 1738 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde 1739 {
e75fdfca
TT
1740 ops = ops->beneath;
1741 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
1742 }
1743
52834460
MM
1744 /* Let's find a thread to move. */
1745 tp = record_btrace_find_thread_to_move (ptid);
1746 if (tp == NULL)
1747 {
1748 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1749
1750 status->kind = TARGET_WAITKIND_IGNORE;
1751 return minus_one_ptid;
1752 }
1753
1754 /* We only move a single thread. We're not able to correlate threads. */
1755 *status = record_btrace_step_thread (tp);
1756
1757 /* Stop all other threads. */
1758 if (!non_stop)
034f788c 1759 ALL_NON_EXITED_THREADS (other)
52834460
MM
1760 other->btrace.flags &= ~BTHR_MOVE;
1761
1762 /* Start record histories anew from the current position. */
1763 record_btrace_clear_histories (&tp->btrace);
1764
1765 /* We moved the replay position but did not update registers. */
1766 registers_changed_ptid (tp->ptid);
1767
1768 return tp->ptid;
1769}
1770
1771/* The to_can_execute_reverse method of target record-btrace. */
1772
1773static int
19db3e69 1774record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
1775{
1776 return 1;
1777}
1778
1779/* The to_decr_pc_after_break method of target record-btrace. */
1780
1781static CORE_ADDR
1782record_btrace_decr_pc_after_break (struct target_ops *ops,
1783 struct gdbarch *gdbarch)
1784{
1785 /* When replaying, we do not actually execute the breakpoint instruction
1786 so there is no need to adjust the PC after hitting a breakpoint. */
1c63c994 1787 if (record_btrace_is_replaying (ops))
52834460
MM
1788 return 0;
1789
c0eca49f 1790 return ops->beneath->to_decr_pc_after_break (ops->beneath, gdbarch);
b2f4cfde
MM
1791}
1792
e2887aa3
MM
1793/* The to_find_new_threads method of target record-btrace. */
1794
1795static void
1796record_btrace_find_new_threads (struct target_ops *ops)
1797{
1798 /* Don't expect new threads if we're replaying. */
1c63c994 1799 if (record_btrace_is_replaying (ops))
e2887aa3
MM
1800 return;
1801
1802 /* Forward the request. */
e75fdfca
TT
1803 ops = ops->beneath;
1804 ops->to_find_new_threads (ops);
e2887aa3
MM
1805}
1806
1807/* The to_thread_alive method of target record-btrace. */
1808
1809static int
1810record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1811{
1812 /* We don't add or remove threads during replay. */
1c63c994 1813 if (record_btrace_is_replaying (ops))
e2887aa3
MM
1814 return find_thread_ptid (ptid) != NULL;
1815
1816 /* Forward the request. */
e75fdfca
TT
1817 ops = ops->beneath;
1818 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
1819}
1820
066ce621
MM
1821/* Set the replay branch trace instruction iterator. If IT is NULL, replay
1822 is stopped. */
1823
1824static void
1825record_btrace_set_replay (struct thread_info *tp,
1826 const struct btrace_insn_iterator *it)
1827{
1828 struct btrace_thread_info *btinfo;
1829
1830 btinfo = &tp->btrace;
1831
1832 if (it == NULL || it->function == NULL)
52834460 1833 record_btrace_stop_replaying (tp);
066ce621
MM
1834 else
1835 {
1836 if (btinfo->replay == NULL)
52834460 1837 record_btrace_start_replaying (tp);
066ce621
MM
1838 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
1839 return;
1840
1841 *btinfo->replay = *it;
52834460 1842 registers_changed_ptid (tp->ptid);
066ce621
MM
1843 }
1844
52834460
MM
1845 /* Start anew from the new replay position. */
1846 record_btrace_clear_histories (btinfo);
066ce621
MM
1847}
1848
1849/* The to_goto_record_begin method of target record-btrace. */
1850
1851static void
08475817 1852record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
1853{
1854 struct thread_info *tp;
1855 struct btrace_insn_iterator begin;
1856
1857 tp = require_btrace_thread ();
1858
1859 btrace_insn_begin (&begin, &tp->btrace);
1860 record_btrace_set_replay (tp, &begin);
1861
1862 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1863}
1864
1865/* The to_goto_record_end method of target record-btrace. */
1866
1867static void
307a1b91 1868record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
1869{
1870 struct thread_info *tp;
1871
1872 tp = require_btrace_thread ();
1873
1874 record_btrace_set_replay (tp, NULL);
1875
1876 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1877}
1878
1879/* The to_goto_record method of target record-btrace. */
1880
1881static void
606183ac 1882record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
1883{
1884 struct thread_info *tp;
1885 struct btrace_insn_iterator it;
1886 unsigned int number;
1887 int found;
1888
1889 number = insn;
1890
1891 /* Check for wrap-arounds. */
1892 if (number != insn)
1893 error (_("Instruction number out of range."));
1894
1895 tp = require_btrace_thread ();
1896
1897 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
1898 if (found == 0)
1899 error (_("No such instruction."));
1900
1901 record_btrace_set_replay (tp, &it);
1902
1903 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1904}
1905
70ad5bff
MM
1906/* The to_execution_direction target method. */
1907
1908static enum exec_direction_kind
1909record_btrace_execution_direction (struct target_ops *self)
1910{
1911 return record_btrace_resume_exec_dir;
1912}
1913
aef92902
MM
1914/* The to_prepare_to_generate_core target method. */
1915
1916static void
1917record_btrace_prepare_to_generate_core (struct target_ops *self)
1918{
1919 record_btrace_generating_corefile = 1;
1920}
1921
1922/* The to_done_generating_core target method. */
1923
1924static void
1925record_btrace_done_generating_core (struct target_ops *self)
1926{
1927 record_btrace_generating_corefile = 0;
1928}
1929
afedecd3
MM
1930/* Initialize the record-btrace target ops. */
1931
1932static void
1933init_record_btrace_ops (void)
1934{
1935 struct target_ops *ops;
1936
1937 ops = &record_btrace_ops;
1938 ops->to_shortname = "record-btrace";
1939 ops->to_longname = "Branch tracing target";
1940 ops->to_doc = "Collect control-flow trace and provide the execution history.";
1941 ops->to_open = record_btrace_open;
1942 ops->to_close = record_btrace_close;
1943 ops->to_detach = record_detach;
1944 ops->to_disconnect = record_disconnect;
1945 ops->to_mourn_inferior = record_mourn_inferior;
1946 ops->to_kill = record_kill;
afedecd3
MM
1947 ops->to_stop_recording = record_btrace_stop_recording;
1948 ops->to_info_record = record_btrace_info;
1949 ops->to_insn_history = record_btrace_insn_history;
1950 ops->to_insn_history_from = record_btrace_insn_history_from;
1951 ops->to_insn_history_range = record_btrace_insn_history_range;
1952 ops->to_call_history = record_btrace_call_history;
1953 ops->to_call_history_from = record_btrace_call_history_from;
1954 ops->to_call_history_range = record_btrace_call_history_range;
07bbe694 1955 ops->to_record_is_replaying = record_btrace_is_replaying;
633785ff
MM
1956 ops->to_xfer_partial = record_btrace_xfer_partial;
1957 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
1958 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
1959 ops->to_fetch_registers = record_btrace_fetch_registers;
1960 ops->to_store_registers = record_btrace_store_registers;
1961 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
1962 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
1963 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde
MM
1964 ops->to_resume = record_btrace_resume;
1965 ops->to_wait = record_btrace_wait;
e2887aa3
MM
1966 ops->to_find_new_threads = record_btrace_find_new_threads;
1967 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
1968 ops->to_goto_record_begin = record_btrace_goto_begin;
1969 ops->to_goto_record_end = record_btrace_goto_end;
1970 ops->to_goto_record = record_btrace_goto;
52834460
MM
1971 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
1972 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
70ad5bff 1973 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
1974 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
1975 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
1976 ops->to_stratum = record_stratum;
1977 ops->to_magic = OPS_MAGIC;
1978}
1979
1980/* Alias for "target record". */
1981
1982static void
1983cmd_record_btrace_start (char *args, int from_tty)
1984{
1985 if (args != NULL && *args != 0)
1986 error (_("Invalid argument."));
1987
1988 execute_command ("target record-btrace", from_tty);
1989}
1990
67b5c0c1
MM
1991/* The "set record btrace" command. */
1992
1993static void
1994cmd_set_record_btrace (char *args, int from_tty)
1995{
1996 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
1997}
1998
1999/* The "show record btrace" command. */
2000
2001static void
2002cmd_show_record_btrace (char *args, int from_tty)
2003{
2004 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2005}
2006
2007/* The "show record btrace replay-memory-access" command. */
2008
2009static void
2010cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2011 struct cmd_list_element *c, const char *value)
2012{
2013 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2014 replay_memory_access);
2015}
2016
afedecd3
MM
2017void _initialize_record_btrace (void);
2018
2019/* Initialize btrace commands. */
2020
2021void
2022_initialize_record_btrace (void)
2023{
2024 add_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2025 _("Start branch trace recording."),
2026 &record_cmdlist);
2027 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2028
67b5c0c1
MM
2029 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2030 _("Set record options"), &set_record_btrace_cmdlist,
2031 "set record btrace ", 0, &set_record_cmdlist);
2032
2033 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2034 _("Show record options"), &show_record_btrace_cmdlist,
2035 "show record btrace ", 0, &show_record_cmdlist);
2036
2037 add_setshow_enum_cmd ("replay-memory-access", no_class,
2038 replay_memory_access_types, &replay_memory_access, _("\
2039Set what memory accesses are allowed during replay."), _("\
2040Show what memory accesses are allowed during replay."),
2041 _("Default is READ-ONLY.\n\n\
2042The btrace record target does not trace data.\n\
2043The memory therefore corresponds to the live target and not \
2044to the current replay position.\n\n\
2045When READ-ONLY, allow accesses to read-only memory during replay.\n\
2046When READ-WRITE, allow accesses to read-only and read-write memory during \
2047replay."),
2048 NULL, cmd_show_replay_memory_access,
2049 &set_record_btrace_cmdlist,
2050 &show_record_btrace_cmdlist);
2051
afedecd3
MM
2052 init_record_btrace_ops ();
2053 add_target (&record_btrace_ops);
0b722aec
MM
2054
2055 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2056 xcalloc, xfree);
afedecd3 2057}
This page took 0.283389 seconds and 4 git commands to generate.