enum lwp_stop_reason -> enum target_stop_reason
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
32d0add0 3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
24#include "gdbthread.h"
25#include "target.h"
26#include "gdbcmd.h"
27#include "disasm.h"
28#include "observer.h"
afedecd3
MM
29#include "cli/cli-utils.h"
30#include "source.h"
31#include "ui-out.h"
32#include "symtab.h"
33#include "filenames.h"
1f3ef581 34#include "regcache.h"
cecac1ab 35#include "frame-unwind.h"
0b722aec 36#include "hashtab.h"
45741a9c 37#include "infrun.h"
70ad5bff
MM
38#include "event-loop.h"
39#include "inf-loop.h"
afedecd3
MM
40
41/* The target_ops of record-btrace. */
42static struct target_ops record_btrace_ops;
43
44/* A new thread observer enabling branch tracing for the new thread. */
45static struct observer *record_btrace_thread_observer;
46
67b5c0c1
MM
47/* Memory access types used in set/show record btrace replay-memory-access. */
48static const char replay_memory_access_read_only[] = "read-only";
49static const char replay_memory_access_read_write[] = "read-write";
50static const char *const replay_memory_access_types[] =
51{
52 replay_memory_access_read_only,
53 replay_memory_access_read_write,
54 NULL
55};
56
57/* The currently allowed replay memory access type. */
58static const char *replay_memory_access = replay_memory_access_read_only;
59
60/* Command lists for "set/show record btrace". */
61static struct cmd_list_element *set_record_btrace_cmdlist;
62static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 63
70ad5bff
MM
64/* The execution direction of the last resume we got. See record-full.c. */
65static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
66
67/* The async event handler for reverse/replay execution. */
68static struct async_event_handler *record_btrace_async_inferior_event_handler;
69
aef92902
MM
70/* A flag indicating that we are currently generating a core file. */
71static int record_btrace_generating_corefile;
72
f4abbc16
MM
73/* The current branch trace configuration. */
74static struct btrace_config record_btrace_conf;
75
76/* Command list for "record btrace". */
77static struct cmd_list_element *record_btrace_cmdlist;
78
d33501a5
MM
79/* Command lists for "set/show record btrace bts". */
80static struct cmd_list_element *set_record_btrace_bts_cmdlist;
81static struct cmd_list_element *show_record_btrace_bts_cmdlist;
82
afedecd3
MM
83/* Print a record-btrace debug message. Use do ... while (0) to avoid
84 ambiguities when used in if statements. */
85
86#define DEBUG(msg, args...) \
87 do \
88 { \
89 if (record_debug != 0) \
90 fprintf_unfiltered (gdb_stdlog, \
91 "[record-btrace] " msg "\n", ##args); \
92 } \
93 while (0)
94
95
96/* Update the branch trace for the current thread and return a pointer to its
066ce621 97 thread_info.
afedecd3
MM
98
99 Throws an error if there is no thread or no trace. This function never
100 returns NULL. */
101
066ce621
MM
102static struct thread_info *
103require_btrace_thread (void)
afedecd3
MM
104{
105 struct thread_info *tp;
afedecd3
MM
106
107 DEBUG ("require");
108
109 tp = find_thread_ptid (inferior_ptid);
110 if (tp == NULL)
111 error (_("No thread."));
112
113 btrace_fetch (tp);
114
6e07b1d2 115 if (btrace_is_empty (tp))
afedecd3
MM
116 error (_("No trace."));
117
066ce621
MM
118 return tp;
119}
120
121/* Update the branch trace for the current thread and return a pointer to its
122 branch trace information struct.
123
124 Throws an error if there is no thread or no trace. This function never
125 returns NULL. */
126
127static struct btrace_thread_info *
128require_btrace (void)
129{
130 struct thread_info *tp;
131
132 tp = require_btrace_thread ();
133
134 return &tp->btrace;
afedecd3
MM
135}
136
137/* Enable branch tracing for one thread. Warn on errors. */
138
139static void
140record_btrace_enable_warn (struct thread_info *tp)
141{
142 volatile struct gdb_exception error;
143
144 TRY_CATCH (error, RETURN_MASK_ERROR)
f4abbc16 145 btrace_enable (tp, &record_btrace_conf);
afedecd3
MM
146
147 if (error.message != NULL)
148 warning ("%s", error.message);
149}
150
151/* Callback function to disable branch tracing for one thread. */
152
153static void
154record_btrace_disable_callback (void *arg)
155{
156 struct thread_info *tp;
157
158 tp = arg;
159
160 btrace_disable (tp);
161}
162
163/* Enable automatic tracing of new threads. */
164
165static void
166record_btrace_auto_enable (void)
167{
168 DEBUG ("attach thread observer");
169
170 record_btrace_thread_observer
171 = observer_attach_new_thread (record_btrace_enable_warn);
172}
173
174/* Disable automatic tracing of new threads. */
175
176static void
177record_btrace_auto_disable (void)
178{
179 /* The observer may have been detached, already. */
180 if (record_btrace_thread_observer == NULL)
181 return;
182
183 DEBUG ("detach thread observer");
184
185 observer_detach_new_thread (record_btrace_thread_observer);
186 record_btrace_thread_observer = NULL;
187}
188
70ad5bff
MM
189/* The record-btrace async event handler function. */
190
191static void
192record_btrace_handle_async_inferior_event (gdb_client_data data)
193{
194 inferior_event_handler (INF_REG_EVENT, NULL);
195}
196
afedecd3
MM
197/* The to_open method of target record-btrace. */
198
199static void
014f9477 200record_btrace_open (const char *args, int from_tty)
afedecd3
MM
201{
202 struct cleanup *disable_chain;
203 struct thread_info *tp;
204
205 DEBUG ("open");
206
8213266a 207 record_preopen ();
afedecd3
MM
208
209 if (!target_has_execution)
210 error (_("The program is not being run."));
211
52834460
MM
212 if (non_stop)
213 error (_("Record btrace can't debug inferior in non-stop mode."));
214
afedecd3
MM
215 gdb_assert (record_btrace_thread_observer == NULL);
216
217 disable_chain = make_cleanup (null_cleanup, NULL);
034f788c 218 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
219 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
220 {
f4abbc16 221 btrace_enable (tp, &record_btrace_conf);
afedecd3
MM
222
223 make_cleanup (record_btrace_disable_callback, tp);
224 }
225
226 record_btrace_auto_enable ();
227
228 push_target (&record_btrace_ops);
229
70ad5bff
MM
230 record_btrace_async_inferior_event_handler
231 = create_async_event_handler (record_btrace_handle_async_inferior_event,
232 NULL);
aef92902 233 record_btrace_generating_corefile = 0;
70ad5bff 234
afedecd3
MM
235 observer_notify_record_changed (current_inferior (), 1);
236
237 discard_cleanups (disable_chain);
238}
239
240/* The to_stop_recording method of target record-btrace. */
241
242static void
c6cd7c02 243record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
244{
245 struct thread_info *tp;
246
247 DEBUG ("stop recording");
248
249 record_btrace_auto_disable ();
250
034f788c 251 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
252 if (tp->btrace.target != NULL)
253 btrace_disable (tp);
254}
255
256/* The to_close method of target record-btrace. */
257
258static void
de90e03d 259record_btrace_close (struct target_ops *self)
afedecd3 260{
568e808b
MM
261 struct thread_info *tp;
262
70ad5bff
MM
263 if (record_btrace_async_inferior_event_handler != NULL)
264 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
265
99c819ee
MM
266 /* Make sure automatic recording gets disabled even if we did not stop
267 recording before closing the record-btrace target. */
268 record_btrace_auto_disable ();
269
568e808b
MM
270 /* We should have already stopped recording.
271 Tear down btrace in case we have not. */
034f788c 272 ALL_NON_EXITED_THREADS (tp)
568e808b 273 btrace_teardown (tp);
afedecd3
MM
274}
275
b7d2e916
PA
276/* The to_async method of target record-btrace. */
277
278static void
279record_btrace_async (struct target_ops *ops,
280 void (*callback) (enum inferior_event_type event_type,
281 void *context),
282 void *context)
283{
284 if (callback != NULL)
285 mark_async_event_handler (record_btrace_async_inferior_event_handler);
286 else
287 clear_async_event_handler (record_btrace_async_inferior_event_handler);
288
289 ops->beneath->to_async (ops->beneath, callback, context);
290}
291
d33501a5
MM
292/* Adjusts the size and returns a human readable size suffix. */
293
294static const char *
295record_btrace_adjust_size (unsigned int *size)
296{
297 unsigned int sz;
298
299 sz = *size;
300
301 if ((sz & ((1u << 30) - 1)) == 0)
302 {
303 *size = sz >> 30;
304 return "GB";
305 }
306 else if ((sz & ((1u << 20) - 1)) == 0)
307 {
308 *size = sz >> 20;
309 return "MB";
310 }
311 else if ((sz & ((1u << 10) - 1)) == 0)
312 {
313 *size = sz >> 10;
314 return "kB";
315 }
316 else
317 return "";
318}
319
320/* Print a BTS configuration. */
321
322static void
323record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
324{
325 const char *suffix;
326 unsigned int size;
327
328 size = conf->size;
329 if (size > 0)
330 {
331 suffix = record_btrace_adjust_size (&size);
332 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
333 }
334}
335
336/* Print a branch tracing configuration. */
337
338static void
339record_btrace_print_conf (const struct btrace_config *conf)
340{
341 printf_unfiltered (_("Recording format: %s.\n"),
342 btrace_format_string (conf->format));
343
344 switch (conf->format)
345 {
346 case BTRACE_FORMAT_NONE:
347 return;
348
349 case BTRACE_FORMAT_BTS:
350 record_btrace_print_bts_conf (&conf->bts);
351 return;
352 }
353
354 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
355}
356
afedecd3
MM
357/* The to_info_record method of target record-btrace. */
358
359static void
630d6a4a 360record_btrace_info (struct target_ops *self)
afedecd3
MM
361{
362 struct btrace_thread_info *btinfo;
f4abbc16 363 const struct btrace_config *conf;
afedecd3 364 struct thread_info *tp;
31fd9caa 365 unsigned int insns, calls, gaps;
afedecd3
MM
366
367 DEBUG ("info");
368
369 tp = find_thread_ptid (inferior_ptid);
370 if (tp == NULL)
371 error (_("No thread."));
372
f4abbc16
MM
373 btinfo = &tp->btrace;
374
375 conf = btrace_conf (btinfo);
376 if (conf != NULL)
d33501a5 377 record_btrace_print_conf (conf);
f4abbc16 378
afedecd3
MM
379 btrace_fetch (tp);
380
23a7fe75
MM
381 insns = 0;
382 calls = 0;
31fd9caa 383 gaps = 0;
23a7fe75 384
6e07b1d2 385 if (!btrace_is_empty (tp))
23a7fe75
MM
386 {
387 struct btrace_call_iterator call;
388 struct btrace_insn_iterator insn;
389
390 btrace_call_end (&call, btinfo);
391 btrace_call_prev (&call, 1);
5de9129b 392 calls = btrace_call_number (&call);
23a7fe75
MM
393
394 btrace_insn_end (&insn, btinfo);
31fd9caa 395
5de9129b 396 insns = btrace_insn_number (&insn);
31fd9caa
MM
397 if (insns != 0)
398 {
399 /* The last instruction does not really belong to the trace. */
400 insns -= 1;
401 }
402 else
403 {
404 unsigned int steps;
405
406 /* Skip gaps at the end. */
407 do
408 {
409 steps = btrace_insn_prev (&insn, 1);
410 if (steps == 0)
411 break;
412
413 insns = btrace_insn_number (&insn);
414 }
415 while (insns == 0);
416 }
417
418 gaps = btinfo->ngaps;
23a7fe75 419 }
afedecd3 420
31fd9caa
MM
421 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
422 "for thread %d (%s).\n"), insns, calls, gaps,
423 tp->num, target_pid_to_str (tp->ptid));
07bbe694
MM
424
425 if (btrace_is_replaying (tp))
426 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
427 btrace_insn_number (btinfo->replay));
afedecd3
MM
428}
429
31fd9caa
MM
430/* Print a decode error. */
431
432static void
433btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
434 enum btrace_format format)
435{
436 const char *errstr;
437 int is_error;
438
439 errstr = _("unknown");
440 is_error = 1;
441
442 switch (format)
443 {
444 default:
445 break;
446
447 case BTRACE_FORMAT_BTS:
448 switch (errcode)
449 {
450 default:
451 break;
452
453 case BDE_BTS_OVERFLOW:
454 errstr = _("instruction overflow");
455 break;
456
457 case BDE_BTS_INSN_SIZE:
458 errstr = _("unknown instruction");
459 break;
460 }
461 break;
462 }
463
464 ui_out_text (uiout, _("["));
465 if (is_error)
466 {
467 ui_out_text (uiout, _("decode error ("));
468 ui_out_field_int (uiout, "errcode", errcode);
469 ui_out_text (uiout, _("): "));
470 }
471 ui_out_text (uiout, errstr);
472 ui_out_text (uiout, _("]\n"));
473}
474
afedecd3
MM
475/* Print an unsigned int. */
476
477static void
478ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
479{
480 ui_out_field_fmt (uiout, fld, "%u", val);
481}
482
483/* Disassemble a section of the recorded instruction trace. */
484
485static void
23a7fe75 486btrace_insn_history (struct ui_out *uiout,
31fd9caa 487 const struct btrace_thread_info *btinfo,
23a7fe75
MM
488 const struct btrace_insn_iterator *begin,
489 const struct btrace_insn_iterator *end, int flags)
afedecd3
MM
490{
491 struct gdbarch *gdbarch;
23a7fe75 492 struct btrace_insn_iterator it;
afedecd3 493
23a7fe75
MM
494 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
495 btrace_insn_number (end));
afedecd3
MM
496
497 gdbarch = target_gdbarch ();
498
23a7fe75 499 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 500 {
23a7fe75
MM
501 const struct btrace_insn *insn;
502
503 insn = btrace_insn_get (&it);
504
31fd9caa
MM
505 /* A NULL instruction indicates a gap in the trace. */
506 if (insn == NULL)
507 {
508 const struct btrace_config *conf;
509
510 conf = btrace_conf (btinfo);
afedecd3 511
31fd9caa
MM
512 /* We have trace so we must have a configuration. */
513 gdb_assert (conf != NULL);
514
515 btrace_ui_out_decode_error (uiout, it.function->errcode,
516 conf->format);
517 }
518 else
519 {
520 /* Print the instruction index. */
521 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
522 ui_out_text (uiout, "\t");
523
524 /* Disassembly with '/m' flag may not produce the expected result.
525 See PR gdb/11833. */
526 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc,
527 insn->pc + 1);
528 }
afedecd3
MM
529 }
530}
531
532/* The to_insn_history method of target record-btrace. */
533
534static void
7a6c5609 535record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
536{
537 struct btrace_thread_info *btinfo;
23a7fe75
MM
538 struct btrace_insn_history *history;
539 struct btrace_insn_iterator begin, end;
afedecd3
MM
540 struct cleanup *uiout_cleanup;
541 struct ui_out *uiout;
23a7fe75 542 unsigned int context, covered;
afedecd3
MM
543
544 uiout = current_uiout;
545 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
546 "insn history");
afedecd3 547 context = abs (size);
afedecd3
MM
548 if (context == 0)
549 error (_("Bad record instruction-history-size."));
550
23a7fe75
MM
551 btinfo = require_btrace ();
552 history = btinfo->insn_history;
553 if (history == NULL)
afedecd3 554 {
07bbe694 555 struct btrace_insn_iterator *replay;
afedecd3 556
23a7fe75 557 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 558
07bbe694
MM
559 /* If we're replaying, we start at the replay position. Otherwise, we
560 start at the tail of the trace. */
561 replay = btinfo->replay;
562 if (replay != NULL)
563 begin = *replay;
564 else
565 btrace_insn_end (&begin, btinfo);
566
567 /* We start from here and expand in the requested direction. Then we
568 expand in the other direction, as well, to fill up any remaining
569 context. */
570 end = begin;
571 if (size < 0)
572 {
573 /* We want the current position covered, as well. */
574 covered = btrace_insn_next (&end, 1);
575 covered += btrace_insn_prev (&begin, context - covered);
576 covered += btrace_insn_next (&end, context - covered);
577 }
578 else
579 {
580 covered = btrace_insn_next (&end, context);
581 covered += btrace_insn_prev (&begin, context - covered);
582 }
afedecd3
MM
583 }
584 else
585 {
23a7fe75
MM
586 begin = history->begin;
587 end = history->end;
afedecd3 588
23a7fe75
MM
589 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
590 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 591
23a7fe75
MM
592 if (size < 0)
593 {
594 end = begin;
595 covered = btrace_insn_prev (&begin, context);
596 }
597 else
598 {
599 begin = end;
600 covered = btrace_insn_next (&end, context);
601 }
afedecd3
MM
602 }
603
23a7fe75 604 if (covered > 0)
31fd9caa 605 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
606 else
607 {
608 if (size < 0)
609 printf_unfiltered (_("At the start of the branch trace record.\n"));
610 else
611 printf_unfiltered (_("At the end of the branch trace record.\n"));
612 }
afedecd3 613
23a7fe75 614 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
615 do_cleanups (uiout_cleanup);
616}
617
618/* The to_insn_history_range method of target record-btrace. */
619
620static void
4e99c6b7
TT
621record_btrace_insn_history_range (struct target_ops *self,
622 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
623{
624 struct btrace_thread_info *btinfo;
23a7fe75
MM
625 struct btrace_insn_history *history;
626 struct btrace_insn_iterator begin, end;
afedecd3
MM
627 struct cleanup *uiout_cleanup;
628 struct ui_out *uiout;
23a7fe75
MM
629 unsigned int low, high;
630 int found;
afedecd3
MM
631
632 uiout = current_uiout;
633 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
634 "insn history");
23a7fe75
MM
635 low = from;
636 high = to;
afedecd3 637
23a7fe75 638 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
639
640 /* Check for wrap-arounds. */
23a7fe75 641 if (low != from || high != to)
afedecd3
MM
642 error (_("Bad range."));
643
0688d04e 644 if (high < low)
afedecd3
MM
645 error (_("Bad range."));
646
23a7fe75 647 btinfo = require_btrace ();
afedecd3 648
23a7fe75
MM
649 found = btrace_find_insn_by_number (&begin, btinfo, low);
650 if (found == 0)
651 error (_("Range out of bounds."));
afedecd3 652
23a7fe75
MM
653 found = btrace_find_insn_by_number (&end, btinfo, high);
654 if (found == 0)
0688d04e
MM
655 {
656 /* Silently truncate the range. */
657 btrace_insn_end (&end, btinfo);
658 }
659 else
660 {
661 /* We want both begin and end to be inclusive. */
662 btrace_insn_next (&end, 1);
663 }
afedecd3 664
31fd9caa 665 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 666 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
667
668 do_cleanups (uiout_cleanup);
669}
670
671/* The to_insn_history_from method of target record-btrace. */
672
673static void
9abc3ff3
TT
674record_btrace_insn_history_from (struct target_ops *self,
675 ULONGEST from, int size, int flags)
afedecd3
MM
676{
677 ULONGEST begin, end, context;
678
679 context = abs (size);
0688d04e
MM
680 if (context == 0)
681 error (_("Bad record instruction-history-size."));
afedecd3
MM
682
683 if (size < 0)
684 {
685 end = from;
686
687 if (from < context)
688 begin = 0;
689 else
0688d04e 690 begin = from - context + 1;
afedecd3
MM
691 }
692 else
693 {
694 begin = from;
0688d04e 695 end = from + context - 1;
afedecd3
MM
696
697 /* Check for wrap-around. */
698 if (end < begin)
699 end = ULONGEST_MAX;
700 }
701
4e99c6b7 702 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
703}
704
705/* Print the instruction number range for a function call history line. */
706
707static void
23a7fe75
MM
708btrace_call_history_insn_range (struct ui_out *uiout,
709 const struct btrace_function *bfun)
afedecd3 710{
7acbe133
MM
711 unsigned int begin, end, size;
712
713 size = VEC_length (btrace_insn_s, bfun->insn);
714 gdb_assert (size > 0);
afedecd3 715
23a7fe75 716 begin = bfun->insn_offset;
7acbe133 717 end = begin + size - 1;
afedecd3 718
23a7fe75 719 ui_out_field_uint (uiout, "insn begin", begin);
8710b709 720 ui_out_text (uiout, ",");
23a7fe75 721 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
722}
723
ce0dfbea
MM
724/* Compute the lowest and highest source line for the instructions in BFUN
725 and return them in PBEGIN and PEND.
726 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
727 result from inlining or macro expansion. */
728
729static void
730btrace_compute_src_line_range (const struct btrace_function *bfun,
731 int *pbegin, int *pend)
732{
733 struct btrace_insn *insn;
734 struct symtab *symtab;
735 struct symbol *sym;
736 unsigned int idx;
737 int begin, end;
738
739 begin = INT_MAX;
740 end = INT_MIN;
741
742 sym = bfun->sym;
743 if (sym == NULL)
744 goto out;
745
746 symtab = symbol_symtab (sym);
747
748 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
749 {
750 struct symtab_and_line sal;
751
752 sal = find_pc_line (insn->pc, 0);
753 if (sal.symtab != symtab || sal.line == 0)
754 continue;
755
756 begin = min (begin, sal.line);
757 end = max (end, sal.line);
758 }
759
760 out:
761 *pbegin = begin;
762 *pend = end;
763}
764
afedecd3
MM
765/* Print the source line information for a function call history line. */
766
767static void
23a7fe75
MM
768btrace_call_history_src_line (struct ui_out *uiout,
769 const struct btrace_function *bfun)
afedecd3
MM
770{
771 struct symbol *sym;
23a7fe75 772 int begin, end;
afedecd3
MM
773
774 sym = bfun->sym;
775 if (sym == NULL)
776 return;
777
778 ui_out_field_string (uiout, "file",
08be3fe3 779 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 780
ce0dfbea 781 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 782 if (end < begin)
afedecd3
MM
783 return;
784
785 ui_out_text (uiout, ":");
23a7fe75 786 ui_out_field_int (uiout, "min line", begin);
afedecd3 787
23a7fe75 788 if (end == begin)
afedecd3
MM
789 return;
790
8710b709 791 ui_out_text (uiout, ",");
23a7fe75 792 ui_out_field_int (uiout, "max line", end);
afedecd3
MM
793}
794
0b722aec
MM
795/* Get the name of a branch trace function. */
796
797static const char *
798btrace_get_bfun_name (const struct btrace_function *bfun)
799{
800 struct minimal_symbol *msym;
801 struct symbol *sym;
802
803 if (bfun == NULL)
804 return "??";
805
806 msym = bfun->msym;
807 sym = bfun->sym;
808
809 if (sym != NULL)
810 return SYMBOL_PRINT_NAME (sym);
811 else if (msym != NULL)
efd66ac6 812 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
813 else
814 return "??";
815}
816
afedecd3
MM
817/* Disassemble a section of the recorded function trace. */
818
819static void
23a7fe75 820btrace_call_history (struct ui_out *uiout,
8710b709 821 const struct btrace_thread_info *btinfo,
23a7fe75
MM
822 const struct btrace_call_iterator *begin,
823 const struct btrace_call_iterator *end,
afedecd3
MM
824 enum record_print_flag flags)
825{
23a7fe75 826 struct btrace_call_iterator it;
afedecd3 827
23a7fe75
MM
828 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
829 btrace_call_number (end));
afedecd3 830
23a7fe75 831 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 832 {
23a7fe75
MM
833 const struct btrace_function *bfun;
834 struct minimal_symbol *msym;
835 struct symbol *sym;
836
837 bfun = btrace_call_get (&it);
23a7fe75 838 sym = bfun->sym;
0b722aec 839 msym = bfun->msym;
23a7fe75 840
afedecd3 841 /* Print the function index. */
23a7fe75 842 ui_out_field_uint (uiout, "index", bfun->number);
afedecd3
MM
843 ui_out_text (uiout, "\t");
844
31fd9caa
MM
845 /* Indicate gaps in the trace. */
846 if (bfun->errcode != 0)
847 {
848 const struct btrace_config *conf;
849
850 conf = btrace_conf (btinfo);
851
852 /* We have trace so we must have a configuration. */
853 gdb_assert (conf != NULL);
854
855 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
856
857 continue;
858 }
859
8710b709
MM
860 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
861 {
862 int level = bfun->level + btinfo->level, i;
863
864 for (i = 0; i < level; ++i)
865 ui_out_text (uiout, " ");
866 }
867
868 if (sym != NULL)
869 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
870 else if (msym != NULL)
efd66ac6 871 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
8710b709
MM
872 else if (!ui_out_is_mi_like_p (uiout))
873 ui_out_field_string (uiout, "function", "??");
874
1e038f67 875 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 876 {
8710b709 877 ui_out_text (uiout, _("\tinst "));
23a7fe75 878 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
879 }
880
1e038f67 881 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 882 {
8710b709 883 ui_out_text (uiout, _("\tat "));
23a7fe75 884 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
885 }
886
afedecd3
MM
887 ui_out_text (uiout, "\n");
888 }
889}
890
891/* The to_call_history method of target record-btrace. */
892
893static void
5df2fcba 894record_btrace_call_history (struct target_ops *self, int size, int flags)
afedecd3
MM
895{
896 struct btrace_thread_info *btinfo;
23a7fe75
MM
897 struct btrace_call_history *history;
898 struct btrace_call_iterator begin, end;
afedecd3
MM
899 struct cleanup *uiout_cleanup;
900 struct ui_out *uiout;
23a7fe75 901 unsigned int context, covered;
afedecd3
MM
902
903 uiout = current_uiout;
904 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
905 "insn history");
afedecd3 906 context = abs (size);
afedecd3
MM
907 if (context == 0)
908 error (_("Bad record function-call-history-size."));
909
23a7fe75
MM
910 btinfo = require_btrace ();
911 history = btinfo->call_history;
912 if (history == NULL)
afedecd3 913 {
07bbe694 914 struct btrace_insn_iterator *replay;
afedecd3 915
23a7fe75 916 DEBUG ("call-history (0x%x): %d", flags, size);
afedecd3 917
07bbe694
MM
918 /* If we're replaying, we start at the replay position. Otherwise, we
919 start at the tail of the trace. */
920 replay = btinfo->replay;
921 if (replay != NULL)
922 {
923 begin.function = replay->function;
924 begin.btinfo = btinfo;
925 }
926 else
927 btrace_call_end (&begin, btinfo);
928
929 /* We start from here and expand in the requested direction. Then we
930 expand in the other direction, as well, to fill up any remaining
931 context. */
932 end = begin;
933 if (size < 0)
934 {
935 /* We want the current position covered, as well. */
936 covered = btrace_call_next (&end, 1);
937 covered += btrace_call_prev (&begin, context - covered);
938 covered += btrace_call_next (&end, context - covered);
939 }
940 else
941 {
942 covered = btrace_call_next (&end, context);
943 covered += btrace_call_prev (&begin, context- covered);
944 }
afedecd3
MM
945 }
946 else
947 {
23a7fe75
MM
948 begin = history->begin;
949 end = history->end;
afedecd3 950
23a7fe75
MM
951 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
952 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 953
23a7fe75
MM
954 if (size < 0)
955 {
956 end = begin;
957 covered = btrace_call_prev (&begin, context);
958 }
959 else
960 {
961 begin = end;
962 covered = btrace_call_next (&end, context);
963 }
afedecd3
MM
964 }
965
23a7fe75 966 if (covered > 0)
8710b709 967 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
968 else
969 {
970 if (size < 0)
971 printf_unfiltered (_("At the start of the branch trace record.\n"));
972 else
973 printf_unfiltered (_("At the end of the branch trace record.\n"));
974 }
afedecd3 975
23a7fe75 976 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
977 do_cleanups (uiout_cleanup);
978}
979
980/* The to_call_history_range method of target record-btrace. */
981
982static void
f0d960ea
TT
983record_btrace_call_history_range (struct target_ops *self,
984 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
985{
986 struct btrace_thread_info *btinfo;
23a7fe75
MM
987 struct btrace_call_history *history;
988 struct btrace_call_iterator begin, end;
afedecd3
MM
989 struct cleanup *uiout_cleanup;
990 struct ui_out *uiout;
23a7fe75
MM
991 unsigned int low, high;
992 int found;
afedecd3
MM
993
994 uiout = current_uiout;
995 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
996 "func history");
23a7fe75
MM
997 low = from;
998 high = to;
afedecd3 999
23a7fe75 1000 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
1001
1002 /* Check for wrap-arounds. */
23a7fe75 1003 if (low != from || high != to)
afedecd3
MM
1004 error (_("Bad range."));
1005
0688d04e 1006 if (high < low)
afedecd3
MM
1007 error (_("Bad range."));
1008
23a7fe75 1009 btinfo = require_btrace ();
afedecd3 1010
23a7fe75
MM
1011 found = btrace_find_call_by_number (&begin, btinfo, low);
1012 if (found == 0)
1013 error (_("Range out of bounds."));
afedecd3 1014
23a7fe75
MM
1015 found = btrace_find_call_by_number (&end, btinfo, high);
1016 if (found == 0)
0688d04e
MM
1017 {
1018 /* Silently truncate the range. */
1019 btrace_call_end (&end, btinfo);
1020 }
1021 else
1022 {
1023 /* We want both begin and end to be inclusive. */
1024 btrace_call_next (&end, 1);
1025 }
afedecd3 1026
8710b709 1027 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1028 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1029
1030 do_cleanups (uiout_cleanup);
1031}
1032
1033/* The to_call_history_from method of target record-btrace. */
1034
1035static void
ec0aea04
TT
1036record_btrace_call_history_from (struct target_ops *self,
1037 ULONGEST from, int size, int flags)
afedecd3
MM
1038{
1039 ULONGEST begin, end, context;
1040
1041 context = abs (size);
0688d04e
MM
1042 if (context == 0)
1043 error (_("Bad record function-call-history-size."));
afedecd3
MM
1044
1045 if (size < 0)
1046 {
1047 end = from;
1048
1049 if (from < context)
1050 begin = 0;
1051 else
0688d04e 1052 begin = from - context + 1;
afedecd3
MM
1053 }
1054 else
1055 {
1056 begin = from;
0688d04e 1057 end = from + context - 1;
afedecd3
MM
1058
1059 /* Check for wrap-around. */
1060 if (end < begin)
1061 end = ULONGEST_MAX;
1062 }
1063
f0d960ea 1064 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
1065}
1066
07bbe694
MM
1067/* The to_record_is_replaying method of target record-btrace. */
1068
1069static int
1c63c994 1070record_btrace_is_replaying (struct target_ops *self)
07bbe694
MM
1071{
1072 struct thread_info *tp;
1073
034f788c 1074 ALL_NON_EXITED_THREADS (tp)
07bbe694
MM
1075 if (btrace_is_replaying (tp))
1076 return 1;
1077
1078 return 0;
1079}
1080
633785ff
MM
1081/* The to_xfer_partial method of target record-btrace. */
1082
9b409511 1083static enum target_xfer_status
633785ff
MM
1084record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1085 const char *annex, gdb_byte *readbuf,
1086 const gdb_byte *writebuf, ULONGEST offset,
9b409511 1087 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
1088{
1089 struct target_ops *t;
1090
1091 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1092 if (replay_memory_access == replay_memory_access_read_only
aef92902 1093 && !record_btrace_generating_corefile
67b5c0c1 1094 && record_btrace_is_replaying (ops))
633785ff
MM
1095 {
1096 switch (object)
1097 {
1098 case TARGET_OBJECT_MEMORY:
1099 {
1100 struct target_section *section;
1101
1102 /* We do not allow writing memory in general. */
1103 if (writebuf != NULL)
9b409511
YQ
1104 {
1105 *xfered_len = len;
bc113b4e 1106 return TARGET_XFER_UNAVAILABLE;
9b409511 1107 }
633785ff
MM
1108
1109 /* We allow reading readonly memory. */
1110 section = target_section_by_addr (ops, offset);
1111 if (section != NULL)
1112 {
1113 /* Check if the section we found is readonly. */
1114 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1115 section->the_bfd_section)
1116 & SEC_READONLY) != 0)
1117 {
1118 /* Truncate the request to fit into this section. */
1119 len = min (len, section->endaddr - offset);
1120 break;
1121 }
1122 }
1123
9b409511 1124 *xfered_len = len;
bc113b4e 1125 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1126 }
1127 }
1128 }
1129
1130 /* Forward the request. */
e75fdfca
TT
1131 ops = ops->beneath;
1132 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1133 offset, len, xfered_len);
633785ff
MM
1134}
1135
1136/* The to_insert_breakpoint method of target record-btrace. */
1137
1138static int
1139record_btrace_insert_breakpoint (struct target_ops *ops,
1140 struct gdbarch *gdbarch,
1141 struct bp_target_info *bp_tgt)
1142{
1143 volatile struct gdb_exception except;
67b5c0c1
MM
1144 const char *old;
1145 int ret;
633785ff
MM
1146
1147 /* Inserting breakpoints requires accessing memory. Allow it for the
1148 duration of this function. */
67b5c0c1
MM
1149 old = replay_memory_access;
1150 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1151
1152 ret = 0;
1153 TRY_CATCH (except, RETURN_MASK_ALL)
6b84065d 1154 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
633785ff 1155
67b5c0c1 1156 replay_memory_access = old;
633785ff
MM
1157
1158 if (except.reason < 0)
1159 throw_exception (except);
1160
1161 return ret;
1162}
1163
1164/* The to_remove_breakpoint method of target record-btrace. */
1165
1166static int
1167record_btrace_remove_breakpoint (struct target_ops *ops,
1168 struct gdbarch *gdbarch,
1169 struct bp_target_info *bp_tgt)
1170{
1171 volatile struct gdb_exception except;
67b5c0c1
MM
1172 const char *old;
1173 int ret;
633785ff
MM
1174
1175 /* Removing breakpoints requires accessing memory. Allow it for the
1176 duration of this function. */
67b5c0c1
MM
1177 old = replay_memory_access;
1178 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1179
1180 ret = 0;
1181 TRY_CATCH (except, RETURN_MASK_ALL)
6b84065d 1182 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
633785ff 1183
67b5c0c1 1184 replay_memory_access = old;
633785ff
MM
1185
1186 if (except.reason < 0)
1187 throw_exception (except);
1188
1189 return ret;
1190}
1191
1f3ef581
MM
1192/* The to_fetch_registers method of target record-btrace. */
1193
1194static void
1195record_btrace_fetch_registers (struct target_ops *ops,
1196 struct regcache *regcache, int regno)
1197{
1198 struct btrace_insn_iterator *replay;
1199 struct thread_info *tp;
1200
1201 tp = find_thread_ptid (inferior_ptid);
1202 gdb_assert (tp != NULL);
1203
1204 replay = tp->btrace.replay;
aef92902 1205 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1206 {
1207 const struct btrace_insn *insn;
1208 struct gdbarch *gdbarch;
1209 int pcreg;
1210
1211 gdbarch = get_regcache_arch (regcache);
1212 pcreg = gdbarch_pc_regnum (gdbarch);
1213 if (pcreg < 0)
1214 return;
1215
1216 /* We can only provide the PC register. */
1217 if (regno >= 0 && regno != pcreg)
1218 return;
1219
1220 insn = btrace_insn_get (replay);
1221 gdb_assert (insn != NULL);
1222
1223 regcache_raw_supply (regcache, regno, &insn->pc);
1224 }
1225 else
1226 {
e75fdfca 1227 struct target_ops *t = ops->beneath;
1f3ef581 1228
e75fdfca 1229 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
1230 }
1231}
1232
1233/* The to_store_registers method of target record-btrace. */
1234
1235static void
1236record_btrace_store_registers (struct target_ops *ops,
1237 struct regcache *regcache, int regno)
1238{
1239 struct target_ops *t;
1240
aef92902 1241 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1f3ef581
MM
1242 error (_("This record target does not allow writing registers."));
1243
1244 gdb_assert (may_write_registers != 0);
1245
e75fdfca
TT
1246 t = ops->beneath;
1247 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1248}
1249
1250/* The to_prepare_to_store method of target record-btrace. */
1251
1252static void
1253record_btrace_prepare_to_store (struct target_ops *ops,
1254 struct regcache *regcache)
1255{
1256 struct target_ops *t;
1257
aef92902 1258 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1f3ef581
MM
1259 return;
1260
e75fdfca
TT
1261 t = ops->beneath;
1262 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1263}
1264
0b722aec
MM
1265/* The branch trace frame cache. */
1266
1267struct btrace_frame_cache
1268{
1269 /* The thread. */
1270 struct thread_info *tp;
1271
1272 /* The frame info. */
1273 struct frame_info *frame;
1274
1275 /* The branch trace function segment. */
1276 const struct btrace_function *bfun;
1277};
1278
1279/* A struct btrace_frame_cache hash table indexed by NEXT. */
1280
1281static htab_t bfcache;
1282
1283/* hash_f for htab_create_alloc of bfcache. */
1284
1285static hashval_t
1286bfcache_hash (const void *arg)
1287{
1288 const struct btrace_frame_cache *cache = arg;
1289
1290 return htab_hash_pointer (cache->frame);
1291}
1292
1293/* eq_f for htab_create_alloc of bfcache. */
1294
1295static int
1296bfcache_eq (const void *arg1, const void *arg2)
1297{
1298 const struct btrace_frame_cache *cache1 = arg1;
1299 const struct btrace_frame_cache *cache2 = arg2;
1300
1301 return cache1->frame == cache2->frame;
1302}
1303
1304/* Create a new btrace frame cache. */
1305
1306static struct btrace_frame_cache *
1307bfcache_new (struct frame_info *frame)
1308{
1309 struct btrace_frame_cache *cache;
1310 void **slot;
1311
1312 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1313 cache->frame = frame;
1314
1315 slot = htab_find_slot (bfcache, cache, INSERT);
1316 gdb_assert (*slot == NULL);
1317 *slot = cache;
1318
1319 return cache;
1320}
1321
1322/* Extract the branch trace function from a branch trace frame. */
1323
1324static const struct btrace_function *
1325btrace_get_frame_function (struct frame_info *frame)
1326{
1327 const struct btrace_frame_cache *cache;
1328 const struct btrace_function *bfun;
1329 struct btrace_frame_cache pattern;
1330 void **slot;
1331
1332 pattern.frame = frame;
1333
1334 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1335 if (slot == NULL)
1336 return NULL;
1337
1338 cache = *slot;
1339 return cache->bfun;
1340}
1341
cecac1ab
MM
1342/* Implement stop_reason method for record_btrace_frame_unwind. */
1343
1344static enum unwind_stop_reason
1345record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1346 void **this_cache)
1347{
0b722aec
MM
1348 const struct btrace_frame_cache *cache;
1349 const struct btrace_function *bfun;
1350
1351 cache = *this_cache;
1352 bfun = cache->bfun;
1353 gdb_assert (bfun != NULL);
1354
1355 if (bfun->up == NULL)
1356 return UNWIND_UNAVAILABLE;
1357
1358 return UNWIND_NO_REASON;
cecac1ab
MM
1359}
1360
1361/* Implement this_id method for record_btrace_frame_unwind. */
1362
1363static void
1364record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1365 struct frame_id *this_id)
1366{
0b722aec
MM
1367 const struct btrace_frame_cache *cache;
1368 const struct btrace_function *bfun;
1369 CORE_ADDR code, special;
1370
1371 cache = *this_cache;
1372
1373 bfun = cache->bfun;
1374 gdb_assert (bfun != NULL);
1375
1376 while (bfun->segment.prev != NULL)
1377 bfun = bfun->segment.prev;
1378
1379 code = get_frame_func (this_frame);
1380 special = bfun->number;
1381
1382 *this_id = frame_id_build_unavailable_stack_special (code, special);
1383
1384 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1385 btrace_get_bfun_name (cache->bfun),
1386 core_addr_to_string_nz (this_id->code_addr),
1387 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1388}
1389
1390/* Implement prev_register method for record_btrace_frame_unwind. */
1391
1392static struct value *
1393record_btrace_frame_prev_register (struct frame_info *this_frame,
1394 void **this_cache,
1395 int regnum)
1396{
0b722aec
MM
1397 const struct btrace_frame_cache *cache;
1398 const struct btrace_function *bfun, *caller;
1399 const struct btrace_insn *insn;
1400 struct gdbarch *gdbarch;
1401 CORE_ADDR pc;
1402 int pcreg;
1403
1404 gdbarch = get_frame_arch (this_frame);
1405 pcreg = gdbarch_pc_regnum (gdbarch);
1406 if (pcreg < 0 || regnum != pcreg)
1407 throw_error (NOT_AVAILABLE_ERROR,
1408 _("Registers are not available in btrace record history"));
1409
1410 cache = *this_cache;
1411 bfun = cache->bfun;
1412 gdb_assert (bfun != NULL);
1413
1414 caller = bfun->up;
1415 if (caller == NULL)
1416 throw_error (NOT_AVAILABLE_ERROR,
1417 _("No caller in btrace record history"));
1418
1419 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1420 {
1421 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1422 pc = insn->pc;
1423 }
1424 else
1425 {
1426 insn = VEC_last (btrace_insn_s, caller->insn);
1427 pc = insn->pc;
1428
1429 pc += gdb_insn_length (gdbarch, pc);
1430 }
1431
1432 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1433 btrace_get_bfun_name (bfun), bfun->level,
1434 core_addr_to_string_nz (pc));
1435
1436 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1437}
1438
1439/* Implement sniffer method for record_btrace_frame_unwind. */
1440
1441static int
1442record_btrace_frame_sniffer (const struct frame_unwind *self,
1443 struct frame_info *this_frame,
1444 void **this_cache)
1445{
0b722aec
MM
1446 const struct btrace_function *bfun;
1447 struct btrace_frame_cache *cache;
cecac1ab 1448 struct thread_info *tp;
0b722aec 1449 struct frame_info *next;
cecac1ab
MM
1450
1451 /* THIS_FRAME does not contain a reference to its thread. */
1452 tp = find_thread_ptid (inferior_ptid);
1453 gdb_assert (tp != NULL);
1454
0b722aec
MM
1455 bfun = NULL;
1456 next = get_next_frame (this_frame);
1457 if (next == NULL)
1458 {
1459 const struct btrace_insn_iterator *replay;
1460
1461 replay = tp->btrace.replay;
1462 if (replay != NULL)
1463 bfun = replay->function;
1464 }
1465 else
1466 {
1467 const struct btrace_function *callee;
1468
1469 callee = btrace_get_frame_function (next);
1470 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1471 bfun = callee->up;
1472 }
1473
1474 if (bfun == NULL)
1475 return 0;
1476
1477 DEBUG ("[frame] sniffed frame for %s on level %d",
1478 btrace_get_bfun_name (bfun), bfun->level);
1479
1480 /* This is our frame. Initialize the frame cache. */
1481 cache = bfcache_new (this_frame);
1482 cache->tp = tp;
1483 cache->bfun = bfun;
1484
1485 *this_cache = cache;
1486 return 1;
1487}
1488
1489/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1490
1491static int
1492record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1493 struct frame_info *this_frame,
1494 void **this_cache)
1495{
1496 const struct btrace_function *bfun, *callee;
1497 struct btrace_frame_cache *cache;
1498 struct frame_info *next;
1499
1500 next = get_next_frame (this_frame);
1501 if (next == NULL)
1502 return 0;
1503
1504 callee = btrace_get_frame_function (next);
1505 if (callee == NULL)
1506 return 0;
1507
1508 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1509 return 0;
1510
1511 bfun = callee->up;
1512 if (bfun == NULL)
1513 return 0;
1514
1515 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1516 btrace_get_bfun_name (bfun), bfun->level);
1517
1518 /* This is our frame. Initialize the frame cache. */
1519 cache = bfcache_new (this_frame);
1520 cache->tp = find_thread_ptid (inferior_ptid);
1521 cache->bfun = bfun;
1522
1523 *this_cache = cache;
1524 return 1;
1525}
1526
1527static void
1528record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1529{
1530 struct btrace_frame_cache *cache;
1531 void **slot;
1532
1533 cache = this_cache;
1534
1535 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1536 gdb_assert (slot != NULL);
1537
1538 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1539}
1540
1541/* btrace recording does not store previous memory content, neither the stack
1542 frames content. Any unwinding would return errorneous results as the stack
1543 contents no longer matches the changed PC value restored from history.
1544 Therefore this unwinder reports any possibly unwound registers as
1545 <unavailable>. */
1546
0b722aec 1547const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1548{
1549 NORMAL_FRAME,
1550 record_btrace_frame_unwind_stop_reason,
1551 record_btrace_frame_this_id,
1552 record_btrace_frame_prev_register,
1553 NULL,
0b722aec
MM
1554 record_btrace_frame_sniffer,
1555 record_btrace_frame_dealloc_cache
1556};
1557
1558const struct frame_unwind record_btrace_tailcall_frame_unwind =
1559{
1560 TAILCALL_FRAME,
1561 record_btrace_frame_unwind_stop_reason,
1562 record_btrace_frame_this_id,
1563 record_btrace_frame_prev_register,
1564 NULL,
1565 record_btrace_tailcall_frame_sniffer,
1566 record_btrace_frame_dealloc_cache
cecac1ab 1567};
b2f4cfde 1568
ac01945b
TT
1569/* Implement the to_get_unwinder method. */
1570
1571static const struct frame_unwind *
1572record_btrace_to_get_unwinder (struct target_ops *self)
1573{
1574 return &record_btrace_frame_unwind;
1575}
1576
1577/* Implement the to_get_tailcall_unwinder method. */
1578
1579static const struct frame_unwind *
1580record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1581{
1582 return &record_btrace_tailcall_frame_unwind;
1583}
1584
52834460
MM
1585/* Indicate that TP should be resumed according to FLAG. */
1586
1587static void
1588record_btrace_resume_thread (struct thread_info *tp,
1589 enum btrace_thread_flag flag)
1590{
1591 struct btrace_thread_info *btinfo;
1592
1593 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1594
1595 btinfo = &tp->btrace;
1596
1597 if ((btinfo->flags & BTHR_MOVE) != 0)
1598 error (_("Thread already moving."));
1599
1600 /* Fetch the latest branch trace. */
1601 btrace_fetch (tp);
1602
1603 btinfo->flags |= flag;
1604}
1605
1606/* Find the thread to resume given a PTID. */
1607
1608static struct thread_info *
1609record_btrace_find_resume_thread (ptid_t ptid)
1610{
1611 struct thread_info *tp;
1612
1613 /* When asked to resume everything, we pick the current thread. */
1614 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1615 ptid = inferior_ptid;
1616
1617 return find_thread_ptid (ptid);
1618}
1619
1620/* Start replaying a thread. */
1621
1622static struct btrace_insn_iterator *
1623record_btrace_start_replaying (struct thread_info *tp)
1624{
1625 volatile struct gdb_exception except;
1626 struct btrace_insn_iterator *replay;
1627 struct btrace_thread_info *btinfo;
1628 int executing;
1629
1630 btinfo = &tp->btrace;
1631 replay = NULL;
1632
1633 /* We can't start replaying without trace. */
1634 if (btinfo->begin == NULL)
1635 return NULL;
1636
1637 /* Clear the executing flag to allow changes to the current frame.
1638 We are not actually running, yet. We just started a reverse execution
1639 command or a record goto command.
1640 For the latter, EXECUTING is false and this has no effect.
1641 For the former, EXECUTING is true and we're in to_wait, about to
1642 move the thread. Since we need to recompute the stack, we temporarily
1643 set EXECUTING to flase. */
1644 executing = is_executing (tp->ptid);
1645 set_executing (tp->ptid, 0);
1646
1647 /* GDB stores the current frame_id when stepping in order to detects steps
1648 into subroutines.
1649 Since frames are computed differently when we're replaying, we need to
1650 recompute those stored frames and fix them up so we can still detect
1651 subroutines after we started replaying. */
1652 TRY_CATCH (except, RETURN_MASK_ALL)
1653 {
1654 struct frame_info *frame;
1655 struct frame_id frame_id;
1656 int upd_step_frame_id, upd_step_stack_frame_id;
1657
1658 /* The current frame without replaying - computed via normal unwind. */
1659 frame = get_current_frame ();
1660 frame_id = get_frame_id (frame);
1661
1662 /* Check if we need to update any stepping-related frame id's. */
1663 upd_step_frame_id = frame_id_eq (frame_id,
1664 tp->control.step_frame_id);
1665 upd_step_stack_frame_id = frame_id_eq (frame_id,
1666 tp->control.step_stack_frame_id);
1667
1668 /* We start replaying at the end of the branch trace. This corresponds
1669 to the current instruction. */
1670 replay = xmalloc (sizeof (*replay));
1671 btrace_insn_end (replay, btinfo);
1672
31fd9caa
MM
1673 /* Skip gaps at the end of the trace. */
1674 while (btrace_insn_get (replay) == NULL)
1675 {
1676 unsigned int steps;
1677
1678 steps = btrace_insn_prev (replay, 1);
1679 if (steps == 0)
1680 error (_("No trace."));
1681 }
1682
52834460
MM
1683 /* We're not replaying, yet. */
1684 gdb_assert (btinfo->replay == NULL);
1685 btinfo->replay = replay;
1686
1687 /* Make sure we're not using any stale registers. */
1688 registers_changed_ptid (tp->ptid);
1689
1690 /* The current frame with replaying - computed via btrace unwind. */
1691 frame = get_current_frame ();
1692 frame_id = get_frame_id (frame);
1693
1694 /* Replace stepping related frames where necessary. */
1695 if (upd_step_frame_id)
1696 tp->control.step_frame_id = frame_id;
1697 if (upd_step_stack_frame_id)
1698 tp->control.step_stack_frame_id = frame_id;
1699 }
1700
1701 /* Restore the previous execution state. */
1702 set_executing (tp->ptid, executing);
1703
1704 if (except.reason < 0)
1705 {
1706 xfree (btinfo->replay);
1707 btinfo->replay = NULL;
1708
1709 registers_changed_ptid (tp->ptid);
1710
1711 throw_exception (except);
1712 }
1713
1714 return replay;
1715}
1716
1717/* Stop replaying a thread. */
1718
1719static void
1720record_btrace_stop_replaying (struct thread_info *tp)
1721{
1722 struct btrace_thread_info *btinfo;
1723
1724 btinfo = &tp->btrace;
1725
1726 xfree (btinfo->replay);
1727 btinfo->replay = NULL;
1728
1729 /* Make sure we're not leaving any stale registers. */
1730 registers_changed_ptid (tp->ptid);
1731}
1732
b2f4cfde
MM
1733/* The to_resume method of target record-btrace. */
1734
1735static void
1736record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1737 enum gdb_signal signal)
1738{
52834460
MM
1739 struct thread_info *tp, *other;
1740 enum btrace_thread_flag flag;
1741
1742 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1743
70ad5bff
MM
1744 /* Store the execution direction of the last resume. */
1745 record_btrace_resume_exec_dir = execution_direction;
1746
52834460
MM
1747 tp = record_btrace_find_resume_thread (ptid);
1748 if (tp == NULL)
1749 error (_("Cannot find thread to resume."));
1750
1751 /* Stop replaying other threads if the thread to resume is not replaying. */
1752 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
034f788c 1753 ALL_NON_EXITED_THREADS (other)
52834460
MM
1754 record_btrace_stop_replaying (other);
1755
b2f4cfde 1756 /* As long as we're not replaying, just forward the request. */
1c63c994 1757 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde 1758 {
e75fdfca
TT
1759 ops = ops->beneath;
1760 return ops->to_resume (ops, ptid, step, signal);
b2f4cfde
MM
1761 }
1762
52834460
MM
1763 /* Compute the btrace thread flag for the requested move. */
1764 if (step == 0)
1765 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1766 else
1767 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1768
1769 /* At the moment, we only move a single thread. We could also move
1770 all threads in parallel by single-stepping each resumed thread
1771 until the first runs into an event.
1772 When we do that, we would want to continue all other threads.
1773 For now, just resume one thread to not confuse to_wait. */
1774 record_btrace_resume_thread (tp, flag);
1775
1776 /* We just indicate the resume intent here. The actual stepping happens in
1777 record_btrace_wait below. */
70ad5bff
MM
1778
1779 /* Async support. */
1780 if (target_can_async_p ())
1781 {
1782 target_async (inferior_event_handler, 0);
1783 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1784 }
52834460
MM
1785}
1786
1787/* Find a thread to move. */
1788
1789static struct thread_info *
1790record_btrace_find_thread_to_move (ptid_t ptid)
1791{
1792 struct thread_info *tp;
1793
1794 /* First check the parameter thread. */
1795 tp = find_thread_ptid (ptid);
1796 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1797 return tp;
1798
1799 /* Otherwise, find one other thread that has been resumed. */
034f788c 1800 ALL_NON_EXITED_THREADS (tp)
52834460
MM
1801 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1802 return tp;
1803
1804 return NULL;
1805}
1806
1807/* Return a target_waitstatus indicating that we ran out of history. */
1808
1809static struct target_waitstatus
1810btrace_step_no_history (void)
1811{
1812 struct target_waitstatus status;
1813
1814 status.kind = TARGET_WAITKIND_NO_HISTORY;
1815
1816 return status;
1817}
1818
1819/* Return a target_waitstatus indicating that a step finished. */
1820
1821static struct target_waitstatus
1822btrace_step_stopped (void)
1823{
1824 struct target_waitstatus status;
1825
1826 status.kind = TARGET_WAITKIND_STOPPED;
1827 status.value.sig = GDB_SIGNAL_TRAP;
1828
1829 return status;
1830}
1831
1832/* Clear the record histories. */
1833
1834static void
1835record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1836{
1837 xfree (btinfo->insn_history);
1838 xfree (btinfo->call_history);
1839
1840 btinfo->insn_history = NULL;
1841 btinfo->call_history = NULL;
1842}
1843
1844/* Step a single thread. */
1845
1846static struct target_waitstatus
1847record_btrace_step_thread (struct thread_info *tp)
1848{
1849 struct btrace_insn_iterator *replay, end;
1850 struct btrace_thread_info *btinfo;
1851 struct address_space *aspace;
1852 struct inferior *inf;
1853 enum btrace_thread_flag flags;
1854 unsigned int steps;
1855
e59fa00f
MM
1856 /* We can't step without an execution history. */
1857 if (btrace_is_empty (tp))
1858 return btrace_step_no_history ();
1859
52834460
MM
1860 btinfo = &tp->btrace;
1861 replay = btinfo->replay;
1862
1863 flags = btinfo->flags & BTHR_MOVE;
1864 btinfo->flags &= ~BTHR_MOVE;
1865
1866 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1867
1868 switch (flags)
1869 {
1870 default:
1871 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1872
1873 case BTHR_STEP:
1874 /* We're done if we're not replaying. */
1875 if (replay == NULL)
1876 return btrace_step_no_history ();
1877
31fd9caa
MM
1878 /* Skip gaps during replay. */
1879 do
1880 {
1881 steps = btrace_insn_next (replay, 1);
1882 if (steps == 0)
1883 {
1884 record_btrace_stop_replaying (tp);
1885 return btrace_step_no_history ();
1886 }
1887 }
1888 while (btrace_insn_get (replay) == NULL);
52834460
MM
1889
1890 /* Determine the end of the instruction trace. */
1891 btrace_insn_end (&end, btinfo);
1892
1893 /* We stop replaying if we reached the end of the trace. */
1894 if (btrace_insn_cmp (replay, &end) == 0)
1895 record_btrace_stop_replaying (tp);
1896
1897 return btrace_step_stopped ();
1898
1899 case BTHR_RSTEP:
1900 /* Start replaying if we're not already doing so. */
1901 if (replay == NULL)
1902 replay = record_btrace_start_replaying (tp);
1903
31fd9caa
MM
1904 /* If we can't step any further, we reached the end of the history.
1905 Skip gaps during replay. */
1906 do
1907 {
1908 steps = btrace_insn_prev (replay, 1);
1909 if (steps == 0)
1910 return btrace_step_no_history ();
1911
1912 }
1913 while (btrace_insn_get (replay) == NULL);
52834460
MM
1914
1915 return btrace_step_stopped ();
1916
1917 case BTHR_CONT:
1918 /* We're done if we're not replaying. */
1919 if (replay == NULL)
1920 return btrace_step_no_history ();
1921
c9657e70 1922 inf = find_inferior_ptid (tp->ptid);
52834460
MM
1923 aspace = inf->aspace;
1924
1925 /* Determine the end of the instruction trace. */
1926 btrace_insn_end (&end, btinfo);
1927
1928 for (;;)
1929 {
1930 const struct btrace_insn *insn;
1931
31fd9caa
MM
1932 /* Skip gaps during replay. */
1933 do
1934 {
1935 steps = btrace_insn_next (replay, 1);
1936 if (steps == 0)
1937 {
1938 record_btrace_stop_replaying (tp);
1939 return btrace_step_no_history ();
1940 }
1941
1942 insn = btrace_insn_get (replay);
1943 }
1944 while (insn == NULL);
52834460
MM
1945
1946 /* We stop replaying if we reached the end of the trace. */
1947 if (btrace_insn_cmp (replay, &end) == 0)
1948 {
1949 record_btrace_stop_replaying (tp);
1950 return btrace_step_no_history ();
1951 }
1952
52834460
MM
1953 DEBUG ("stepping %d (%s) ... %s", tp->num,
1954 target_pid_to_str (tp->ptid),
1955 core_addr_to_string_nz (insn->pc));
1956
1957 if (breakpoint_here_p (aspace, insn->pc))
1958 return btrace_step_stopped ();
1959 }
1960
1961 case BTHR_RCONT:
1962 /* Start replaying if we're not already doing so. */
1963 if (replay == NULL)
1964 replay = record_btrace_start_replaying (tp);
1965
c9657e70 1966 inf = find_inferior_ptid (tp->ptid);
52834460
MM
1967 aspace = inf->aspace;
1968
1969 for (;;)
1970 {
1971 const struct btrace_insn *insn;
1972
31fd9caa
MM
1973 /* If we can't step any further, we reached the end of the history.
1974 Skip gaps during replay. */
1975 do
1976 {
1977 steps = btrace_insn_prev (replay, 1);
1978 if (steps == 0)
1979 return btrace_step_no_history ();
52834460 1980
31fd9caa
MM
1981 insn = btrace_insn_get (replay);
1982 }
1983 while (insn == NULL);
52834460
MM
1984
1985 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1986 target_pid_to_str (tp->ptid),
1987 core_addr_to_string_nz (insn->pc));
1988
1989 if (breakpoint_here_p (aspace, insn->pc))
1990 return btrace_step_stopped ();
1991 }
1992 }
b2f4cfde
MM
1993}
1994
1995/* The to_wait method of target record-btrace. */
1996
1997static ptid_t
1998record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1999 struct target_waitstatus *status, int options)
2000{
52834460
MM
2001 struct thread_info *tp, *other;
2002
2003 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2004
b2f4cfde 2005 /* As long as we're not replaying, just forward the request. */
1c63c994 2006 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde 2007 {
e75fdfca
TT
2008 ops = ops->beneath;
2009 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
2010 }
2011
52834460
MM
2012 /* Let's find a thread to move. */
2013 tp = record_btrace_find_thread_to_move (ptid);
2014 if (tp == NULL)
2015 {
2016 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
2017
2018 status->kind = TARGET_WAITKIND_IGNORE;
2019 return minus_one_ptid;
2020 }
2021
2022 /* We only move a single thread. We're not able to correlate threads. */
2023 *status = record_btrace_step_thread (tp);
2024
2025 /* Stop all other threads. */
2026 if (!non_stop)
034f788c 2027 ALL_NON_EXITED_THREADS (other)
52834460
MM
2028 other->btrace.flags &= ~BTHR_MOVE;
2029
2030 /* Start record histories anew from the current position. */
2031 record_btrace_clear_histories (&tp->btrace);
2032
2033 /* We moved the replay position but did not update registers. */
2034 registers_changed_ptid (tp->ptid);
2035
2036 return tp->ptid;
2037}
2038
2039/* The to_can_execute_reverse method of target record-btrace. */
2040
2041static int
19db3e69 2042record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
2043{
2044 return 1;
2045}
2046
2047/* The to_decr_pc_after_break method of target record-btrace. */
2048
2049static CORE_ADDR
2050record_btrace_decr_pc_after_break (struct target_ops *ops,
2051 struct gdbarch *gdbarch)
2052{
2053 /* When replaying, we do not actually execute the breakpoint instruction
2054 so there is no need to adjust the PC after hitting a breakpoint. */
1c63c994 2055 if (record_btrace_is_replaying (ops))
52834460
MM
2056 return 0;
2057
c0eca49f 2058 return ops->beneath->to_decr_pc_after_break (ops->beneath, gdbarch);
b2f4cfde
MM
2059}
2060
e8032dde 2061/* The to_update_thread_list method of target record-btrace. */
e2887aa3
MM
2062
2063static void
e8032dde 2064record_btrace_update_thread_list (struct target_ops *ops)
e2887aa3 2065{
e8032dde 2066 /* We don't add or remove threads during replay. */
1c63c994 2067 if (record_btrace_is_replaying (ops))
e2887aa3
MM
2068 return;
2069
2070 /* Forward the request. */
e75fdfca 2071 ops = ops->beneath;
e8032dde 2072 ops->to_update_thread_list (ops);
e2887aa3
MM
2073}
2074
2075/* The to_thread_alive method of target record-btrace. */
2076
2077static int
2078record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2079{
2080 /* We don't add or remove threads during replay. */
1c63c994 2081 if (record_btrace_is_replaying (ops))
e2887aa3
MM
2082 return find_thread_ptid (ptid) != NULL;
2083
2084 /* Forward the request. */
e75fdfca
TT
2085 ops = ops->beneath;
2086 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
2087}
2088
066ce621
MM
2089/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2090 is stopped. */
2091
2092static void
2093record_btrace_set_replay (struct thread_info *tp,
2094 const struct btrace_insn_iterator *it)
2095{
2096 struct btrace_thread_info *btinfo;
2097
2098 btinfo = &tp->btrace;
2099
2100 if (it == NULL || it->function == NULL)
52834460 2101 record_btrace_stop_replaying (tp);
066ce621
MM
2102 else
2103 {
2104 if (btinfo->replay == NULL)
52834460 2105 record_btrace_start_replaying (tp);
066ce621
MM
2106 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2107 return;
2108
2109 *btinfo->replay = *it;
52834460 2110 registers_changed_ptid (tp->ptid);
066ce621
MM
2111 }
2112
52834460
MM
2113 /* Start anew from the new replay position. */
2114 record_btrace_clear_histories (btinfo);
066ce621
MM
2115}
2116
2117/* The to_goto_record_begin method of target record-btrace. */
2118
2119static void
08475817 2120record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
2121{
2122 struct thread_info *tp;
2123 struct btrace_insn_iterator begin;
2124
2125 tp = require_btrace_thread ();
2126
2127 btrace_insn_begin (&begin, &tp->btrace);
2128 record_btrace_set_replay (tp, &begin);
2129
2130 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2131}
2132
2133/* The to_goto_record_end method of target record-btrace. */
2134
2135static void
307a1b91 2136record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
2137{
2138 struct thread_info *tp;
2139
2140 tp = require_btrace_thread ();
2141
2142 record_btrace_set_replay (tp, NULL);
2143
2144 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2145}
2146
2147/* The to_goto_record method of target record-btrace. */
2148
2149static void
606183ac 2150record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
2151{
2152 struct thread_info *tp;
2153 struct btrace_insn_iterator it;
2154 unsigned int number;
2155 int found;
2156
2157 number = insn;
2158
2159 /* Check for wrap-arounds. */
2160 if (number != insn)
2161 error (_("Instruction number out of range."));
2162
2163 tp = require_btrace_thread ();
2164
2165 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2166 if (found == 0)
2167 error (_("No such instruction."));
2168
2169 record_btrace_set_replay (tp, &it);
2170
2171 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2172}
2173
70ad5bff
MM
2174/* The to_execution_direction target method. */
2175
2176static enum exec_direction_kind
2177record_btrace_execution_direction (struct target_ops *self)
2178{
2179 return record_btrace_resume_exec_dir;
2180}
2181
aef92902
MM
2182/* The to_prepare_to_generate_core target method. */
2183
2184static void
2185record_btrace_prepare_to_generate_core (struct target_ops *self)
2186{
2187 record_btrace_generating_corefile = 1;
2188}
2189
2190/* The to_done_generating_core target method. */
2191
2192static void
2193record_btrace_done_generating_core (struct target_ops *self)
2194{
2195 record_btrace_generating_corefile = 0;
2196}
2197
afedecd3
MM
2198/* Initialize the record-btrace target ops. */
2199
2200static void
2201init_record_btrace_ops (void)
2202{
2203 struct target_ops *ops;
2204
2205 ops = &record_btrace_ops;
2206 ops->to_shortname = "record-btrace";
2207 ops->to_longname = "Branch tracing target";
2208 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2209 ops->to_open = record_btrace_open;
2210 ops->to_close = record_btrace_close;
b7d2e916 2211 ops->to_async = record_btrace_async;
afedecd3
MM
2212 ops->to_detach = record_detach;
2213 ops->to_disconnect = record_disconnect;
2214 ops->to_mourn_inferior = record_mourn_inferior;
2215 ops->to_kill = record_kill;
afedecd3
MM
2216 ops->to_stop_recording = record_btrace_stop_recording;
2217 ops->to_info_record = record_btrace_info;
2218 ops->to_insn_history = record_btrace_insn_history;
2219 ops->to_insn_history_from = record_btrace_insn_history_from;
2220 ops->to_insn_history_range = record_btrace_insn_history_range;
2221 ops->to_call_history = record_btrace_call_history;
2222 ops->to_call_history_from = record_btrace_call_history_from;
2223 ops->to_call_history_range = record_btrace_call_history_range;
07bbe694 2224 ops->to_record_is_replaying = record_btrace_is_replaying;
633785ff
MM
2225 ops->to_xfer_partial = record_btrace_xfer_partial;
2226 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2227 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
2228 ops->to_fetch_registers = record_btrace_fetch_registers;
2229 ops->to_store_registers = record_btrace_store_registers;
2230 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
2231 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2232 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde
MM
2233 ops->to_resume = record_btrace_resume;
2234 ops->to_wait = record_btrace_wait;
e8032dde 2235 ops->to_update_thread_list = record_btrace_update_thread_list;
e2887aa3 2236 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
2237 ops->to_goto_record_begin = record_btrace_goto_begin;
2238 ops->to_goto_record_end = record_btrace_goto_end;
2239 ops->to_goto_record = record_btrace_goto;
52834460
MM
2240 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2241 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
70ad5bff 2242 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
2243 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2244 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
2245 ops->to_stratum = record_stratum;
2246 ops->to_magic = OPS_MAGIC;
2247}
2248
f4abbc16
MM
2249/* Start recording in BTS format. */
2250
2251static void
2252cmd_record_btrace_bts_start (char *args, int from_tty)
2253{
2254 volatile struct gdb_exception exception;
2255
2256 if (args != NULL && *args != 0)
2257 error (_("Invalid argument."));
2258
2259 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2260
2261 TRY_CATCH (exception, RETURN_MASK_ALL)
2262 execute_command ("target record-btrace", from_tty);
2263
2264 if (exception.error != 0)
2265 {
2266 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2267 throw_exception (exception);
2268 }
2269}
2270
afedecd3
MM
2271/* Alias for "target record". */
2272
2273static void
2274cmd_record_btrace_start (char *args, int from_tty)
2275{
f4abbc16
MM
2276 volatile struct gdb_exception exception;
2277
afedecd3
MM
2278 if (args != NULL && *args != 0)
2279 error (_("Invalid argument."));
2280
f4abbc16
MM
2281 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2282
2283 TRY_CATCH (exception, RETURN_MASK_ALL)
2284 execute_command ("target record-btrace", from_tty);
2285
2286 if (exception.error == 0)
2287 return;
2288
2289 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2290 throw_exception (exception);
afedecd3
MM
2291}
2292
67b5c0c1
MM
2293/* The "set record btrace" command. */
2294
2295static void
2296cmd_set_record_btrace (char *args, int from_tty)
2297{
2298 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2299}
2300
2301/* The "show record btrace" command. */
2302
2303static void
2304cmd_show_record_btrace (char *args, int from_tty)
2305{
2306 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2307}
2308
2309/* The "show record btrace replay-memory-access" command. */
2310
2311static void
2312cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2313 struct cmd_list_element *c, const char *value)
2314{
2315 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2316 replay_memory_access);
2317}
2318
d33501a5
MM
2319/* The "set record btrace bts" command. */
2320
2321static void
2322cmd_set_record_btrace_bts (char *args, int from_tty)
2323{
2324 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2325 "by an apporpriate subcommand.\n"));
2326 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2327 all_commands, gdb_stdout);
2328}
2329
2330/* The "show record btrace bts" command. */
2331
2332static void
2333cmd_show_record_btrace_bts (char *args, int from_tty)
2334{
2335 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2336}
2337
afedecd3
MM
2338void _initialize_record_btrace (void);
2339
2340/* Initialize btrace commands. */
2341
2342void
2343_initialize_record_btrace (void)
2344{
f4abbc16
MM
2345 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2346 _("Start branch trace recording."), &record_btrace_cmdlist,
2347 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
2348 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2349
f4abbc16
MM
2350 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
2351 _("\
2352Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2353The processor stores a from/to record for each branch into a cyclic buffer.\n\
2354This format may not be available on all processors."),
2355 &record_btrace_cmdlist);
2356 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
2357
67b5c0c1
MM
2358 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2359 _("Set record options"), &set_record_btrace_cmdlist,
2360 "set record btrace ", 0, &set_record_cmdlist);
2361
2362 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2363 _("Show record options"), &show_record_btrace_cmdlist,
2364 "show record btrace ", 0, &show_record_cmdlist);
2365
2366 add_setshow_enum_cmd ("replay-memory-access", no_class,
2367 replay_memory_access_types, &replay_memory_access, _("\
2368Set what memory accesses are allowed during replay."), _("\
2369Show what memory accesses are allowed during replay."),
2370 _("Default is READ-ONLY.\n\n\
2371The btrace record target does not trace data.\n\
2372The memory therefore corresponds to the live target and not \
2373to the current replay position.\n\n\
2374When READ-ONLY, allow accesses to read-only memory during replay.\n\
2375When READ-WRITE, allow accesses to read-only and read-write memory during \
2376replay."),
2377 NULL, cmd_show_replay_memory_access,
2378 &set_record_btrace_cmdlist,
2379 &show_record_btrace_cmdlist);
2380
d33501a5
MM
2381 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
2382 _("Set record btrace bts options"),
2383 &set_record_btrace_bts_cmdlist,
2384 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
2385
2386 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
2387 _("Show record btrace bts options"),
2388 &show_record_btrace_bts_cmdlist,
2389 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
2390
2391 add_setshow_uinteger_cmd ("buffer-size", no_class,
2392 &record_btrace_conf.bts.size,
2393 _("Set the record/replay bts buffer size."),
2394 _("Show the record/replay bts buffer size."), _("\
2395When starting recording request a trace buffer of this size. \
2396The actual buffer size may differ from the requested size. \
2397Use \"info record\" to see the actual buffer size.\n\n\
2398Bigger buffers allow longer recording but also take more time to process \
2399the recorded execution trace.\n\n\
2400The trace buffer size may not be changed while recording."), NULL, NULL,
2401 &set_record_btrace_bts_cmdlist,
2402 &show_record_btrace_bts_cmdlist);
2403
afedecd3
MM
2404 init_record_btrace_ops ();
2405 add_target (&record_btrace_ops);
0b722aec
MM
2406
2407 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2408 xcalloc, xfree);
d33501a5
MM
2409
2410 record_btrace_conf.bts.size = 64 * 1024;
afedecd3 2411}
This page took 0.506103 seconds and 4 git commands to generate.