Make GDB wait for events after handling target File-I/O
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
32d0add0 3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
24#include "gdbthread.h"
25#include "target.h"
26#include "gdbcmd.h"
27#include "disasm.h"
28#include "observer.h"
afedecd3
MM
29#include "cli/cli-utils.h"
30#include "source.h"
31#include "ui-out.h"
32#include "symtab.h"
33#include "filenames.h"
1f3ef581 34#include "regcache.h"
cecac1ab 35#include "frame-unwind.h"
0b722aec 36#include "hashtab.h"
45741a9c 37#include "infrun.h"
70ad5bff
MM
38#include "event-loop.h"
39#include "inf-loop.h"
e3cfc1c7 40#include "vec.h"
afedecd3
MM
41
42/* The target_ops of record-btrace. */
43static struct target_ops record_btrace_ops;
44
45/* A new thread observer enabling branch tracing for the new thread. */
46static struct observer *record_btrace_thread_observer;
47
67b5c0c1
MM
48/* Memory access types used in set/show record btrace replay-memory-access. */
49static const char replay_memory_access_read_only[] = "read-only";
50static const char replay_memory_access_read_write[] = "read-write";
51static const char *const replay_memory_access_types[] =
52{
53 replay_memory_access_read_only,
54 replay_memory_access_read_write,
55 NULL
56};
57
58/* The currently allowed replay memory access type. */
59static const char *replay_memory_access = replay_memory_access_read_only;
60
61/* Command lists for "set/show record btrace". */
62static struct cmd_list_element *set_record_btrace_cmdlist;
63static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 64
70ad5bff
MM
65/* The execution direction of the last resume we got. See record-full.c. */
66static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
67
68/* The async event handler for reverse/replay execution. */
69static struct async_event_handler *record_btrace_async_inferior_event_handler;
70
aef92902
MM
71/* A flag indicating that we are currently generating a core file. */
72static int record_btrace_generating_corefile;
73
f4abbc16
MM
74/* The current branch trace configuration. */
75static struct btrace_config record_btrace_conf;
76
77/* Command list for "record btrace". */
78static struct cmd_list_element *record_btrace_cmdlist;
79
d33501a5
MM
80/* Command lists for "set/show record btrace bts". */
81static struct cmd_list_element *set_record_btrace_bts_cmdlist;
82static struct cmd_list_element *show_record_btrace_bts_cmdlist;
83
b20a6524
MM
84/* Command lists for "set/show record btrace pt". */
85static struct cmd_list_element *set_record_btrace_pt_cmdlist;
86static struct cmd_list_element *show_record_btrace_pt_cmdlist;
87
afedecd3
MM
88/* Print a record-btrace debug message. Use do ... while (0) to avoid
89 ambiguities when used in if statements. */
90
91#define DEBUG(msg, args...) \
92 do \
93 { \
94 if (record_debug != 0) \
95 fprintf_unfiltered (gdb_stdlog, \
96 "[record-btrace] " msg "\n", ##args); \
97 } \
98 while (0)
99
100
101/* Update the branch trace for the current thread and return a pointer to its
066ce621 102 thread_info.
afedecd3
MM
103
104 Throws an error if there is no thread or no trace. This function never
105 returns NULL. */
106
066ce621
MM
107static struct thread_info *
108require_btrace_thread (void)
afedecd3
MM
109{
110 struct thread_info *tp;
afedecd3
MM
111
112 DEBUG ("require");
113
114 tp = find_thread_ptid (inferior_ptid);
115 if (tp == NULL)
116 error (_("No thread."));
117
118 btrace_fetch (tp);
119
6e07b1d2 120 if (btrace_is_empty (tp))
afedecd3
MM
121 error (_("No trace."));
122
066ce621
MM
123 return tp;
124}
125
126/* Update the branch trace for the current thread and return a pointer to its
127 branch trace information struct.
128
129 Throws an error if there is no thread or no trace. This function never
130 returns NULL. */
131
132static struct btrace_thread_info *
133require_btrace (void)
134{
135 struct thread_info *tp;
136
137 tp = require_btrace_thread ();
138
139 return &tp->btrace;
afedecd3
MM
140}
141
142/* Enable branch tracing for one thread. Warn on errors. */
143
144static void
145record_btrace_enable_warn (struct thread_info *tp)
146{
492d29ea
PA
147 TRY
148 {
149 btrace_enable (tp, &record_btrace_conf);
150 }
151 CATCH (error, RETURN_MASK_ERROR)
152 {
153 warning ("%s", error.message);
154 }
155 END_CATCH
afedecd3
MM
156}
157
158/* Callback function to disable branch tracing for one thread. */
159
160static void
161record_btrace_disable_callback (void *arg)
162{
19ba03f4 163 struct thread_info *tp = (struct thread_info *) arg;
afedecd3
MM
164
165 btrace_disable (tp);
166}
167
168/* Enable automatic tracing of new threads. */
169
170static void
171record_btrace_auto_enable (void)
172{
173 DEBUG ("attach thread observer");
174
175 record_btrace_thread_observer
176 = observer_attach_new_thread (record_btrace_enable_warn);
177}
178
179/* Disable automatic tracing of new threads. */
180
181static void
182record_btrace_auto_disable (void)
183{
184 /* The observer may have been detached, already. */
185 if (record_btrace_thread_observer == NULL)
186 return;
187
188 DEBUG ("detach thread observer");
189
190 observer_detach_new_thread (record_btrace_thread_observer);
191 record_btrace_thread_observer = NULL;
192}
193
70ad5bff
MM
194/* The record-btrace async event handler function. */
195
196static void
197record_btrace_handle_async_inferior_event (gdb_client_data data)
198{
199 inferior_event_handler (INF_REG_EVENT, NULL);
200}
201
afedecd3
MM
202/* The to_open method of target record-btrace. */
203
204static void
014f9477 205record_btrace_open (const char *args, int from_tty)
afedecd3
MM
206{
207 struct cleanup *disable_chain;
208 struct thread_info *tp;
209
210 DEBUG ("open");
211
8213266a 212 record_preopen ();
afedecd3
MM
213
214 if (!target_has_execution)
215 error (_("The program is not being run."));
216
afedecd3
MM
217 gdb_assert (record_btrace_thread_observer == NULL);
218
219 disable_chain = make_cleanup (null_cleanup, NULL);
034f788c 220 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
221 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
222 {
f4abbc16 223 btrace_enable (tp, &record_btrace_conf);
afedecd3
MM
224
225 make_cleanup (record_btrace_disable_callback, tp);
226 }
227
228 record_btrace_auto_enable ();
229
230 push_target (&record_btrace_ops);
231
70ad5bff
MM
232 record_btrace_async_inferior_event_handler
233 = create_async_event_handler (record_btrace_handle_async_inferior_event,
234 NULL);
aef92902 235 record_btrace_generating_corefile = 0;
70ad5bff 236
afedecd3
MM
237 observer_notify_record_changed (current_inferior (), 1);
238
239 discard_cleanups (disable_chain);
240}
241
242/* The to_stop_recording method of target record-btrace. */
243
244static void
c6cd7c02 245record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
246{
247 struct thread_info *tp;
248
249 DEBUG ("stop recording");
250
251 record_btrace_auto_disable ();
252
034f788c 253 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
254 if (tp->btrace.target != NULL)
255 btrace_disable (tp);
256}
257
258/* The to_close method of target record-btrace. */
259
260static void
de90e03d 261record_btrace_close (struct target_ops *self)
afedecd3 262{
568e808b
MM
263 struct thread_info *tp;
264
70ad5bff
MM
265 if (record_btrace_async_inferior_event_handler != NULL)
266 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
267
99c819ee
MM
268 /* Make sure automatic recording gets disabled even if we did not stop
269 recording before closing the record-btrace target. */
270 record_btrace_auto_disable ();
271
568e808b
MM
272 /* We should have already stopped recording.
273 Tear down btrace in case we have not. */
034f788c 274 ALL_NON_EXITED_THREADS (tp)
568e808b 275 btrace_teardown (tp);
afedecd3
MM
276}
277
b7d2e916
PA
278/* The to_async method of target record-btrace. */
279
280static void
6a3753b3 281record_btrace_async (struct target_ops *ops, int enable)
b7d2e916 282{
6a3753b3 283 if (enable)
b7d2e916
PA
284 mark_async_event_handler (record_btrace_async_inferior_event_handler);
285 else
286 clear_async_event_handler (record_btrace_async_inferior_event_handler);
287
6a3753b3 288 ops->beneath->to_async (ops->beneath, enable);
b7d2e916
PA
289}
290
d33501a5
MM
291/* Adjusts the size and returns a human readable size suffix. */
292
293static const char *
294record_btrace_adjust_size (unsigned int *size)
295{
296 unsigned int sz;
297
298 sz = *size;
299
300 if ((sz & ((1u << 30) - 1)) == 0)
301 {
302 *size = sz >> 30;
303 return "GB";
304 }
305 else if ((sz & ((1u << 20) - 1)) == 0)
306 {
307 *size = sz >> 20;
308 return "MB";
309 }
310 else if ((sz & ((1u << 10) - 1)) == 0)
311 {
312 *size = sz >> 10;
313 return "kB";
314 }
315 else
316 return "";
317}
318
319/* Print a BTS configuration. */
320
321static void
322record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
323{
324 const char *suffix;
325 unsigned int size;
326
327 size = conf->size;
328 if (size > 0)
329 {
330 suffix = record_btrace_adjust_size (&size);
331 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
332 }
333}
334
b20a6524
MM
335/* Print an Intel(R) Processor Trace configuration. */
336
337static void
338record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
339{
340 const char *suffix;
341 unsigned int size;
342
343 size = conf->size;
344 if (size > 0)
345 {
346 suffix = record_btrace_adjust_size (&size);
347 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
348 }
349}
350
d33501a5
MM
351/* Print a branch tracing configuration. */
352
353static void
354record_btrace_print_conf (const struct btrace_config *conf)
355{
356 printf_unfiltered (_("Recording format: %s.\n"),
357 btrace_format_string (conf->format));
358
359 switch (conf->format)
360 {
361 case BTRACE_FORMAT_NONE:
362 return;
363
364 case BTRACE_FORMAT_BTS:
365 record_btrace_print_bts_conf (&conf->bts);
366 return;
b20a6524
MM
367
368 case BTRACE_FORMAT_PT:
369 record_btrace_print_pt_conf (&conf->pt);
370 return;
d33501a5
MM
371 }
372
373 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
374}
375
afedecd3
MM
376/* The to_info_record method of target record-btrace. */
377
378static void
630d6a4a 379record_btrace_info (struct target_ops *self)
afedecd3
MM
380{
381 struct btrace_thread_info *btinfo;
f4abbc16 382 const struct btrace_config *conf;
afedecd3 383 struct thread_info *tp;
31fd9caa 384 unsigned int insns, calls, gaps;
afedecd3
MM
385
386 DEBUG ("info");
387
388 tp = find_thread_ptid (inferior_ptid);
389 if (tp == NULL)
390 error (_("No thread."));
391
f4abbc16
MM
392 btinfo = &tp->btrace;
393
394 conf = btrace_conf (btinfo);
395 if (conf != NULL)
d33501a5 396 record_btrace_print_conf (conf);
f4abbc16 397
afedecd3
MM
398 btrace_fetch (tp);
399
23a7fe75
MM
400 insns = 0;
401 calls = 0;
31fd9caa 402 gaps = 0;
23a7fe75 403
6e07b1d2 404 if (!btrace_is_empty (tp))
23a7fe75
MM
405 {
406 struct btrace_call_iterator call;
407 struct btrace_insn_iterator insn;
408
409 btrace_call_end (&call, btinfo);
410 btrace_call_prev (&call, 1);
5de9129b 411 calls = btrace_call_number (&call);
23a7fe75
MM
412
413 btrace_insn_end (&insn, btinfo);
31fd9caa 414
5de9129b 415 insns = btrace_insn_number (&insn);
31fd9caa
MM
416 if (insns != 0)
417 {
418 /* The last instruction does not really belong to the trace. */
419 insns -= 1;
420 }
421 else
422 {
423 unsigned int steps;
424
425 /* Skip gaps at the end. */
426 do
427 {
428 steps = btrace_insn_prev (&insn, 1);
429 if (steps == 0)
430 break;
431
432 insns = btrace_insn_number (&insn);
433 }
434 while (insns == 0);
435 }
436
437 gaps = btinfo->ngaps;
23a7fe75 438 }
afedecd3 439
31fd9caa
MM
440 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
441 "for thread %d (%s).\n"), insns, calls, gaps,
442 tp->num, target_pid_to_str (tp->ptid));
07bbe694
MM
443
444 if (btrace_is_replaying (tp))
445 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
446 btrace_insn_number (btinfo->replay));
afedecd3
MM
447}
448
31fd9caa
MM
449/* Print a decode error. */
450
451static void
452btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
453 enum btrace_format format)
454{
455 const char *errstr;
456 int is_error;
457
458 errstr = _("unknown");
459 is_error = 1;
460
461 switch (format)
462 {
463 default:
464 break;
465
466 case BTRACE_FORMAT_BTS:
467 switch (errcode)
468 {
469 default:
470 break;
471
472 case BDE_BTS_OVERFLOW:
473 errstr = _("instruction overflow");
474 break;
475
476 case BDE_BTS_INSN_SIZE:
477 errstr = _("unknown instruction");
478 break;
479 }
480 break;
b20a6524
MM
481
482#if defined (HAVE_LIBIPT)
483 case BTRACE_FORMAT_PT:
484 switch (errcode)
485 {
486 case BDE_PT_USER_QUIT:
487 is_error = 0;
488 errstr = _("trace decode cancelled");
489 break;
490
491 case BDE_PT_DISABLED:
492 is_error = 0;
493 errstr = _("disabled");
494 break;
495
496 case BDE_PT_OVERFLOW:
497 is_error = 0;
498 errstr = _("overflow");
499 break;
500
501 default:
502 if (errcode < 0)
503 errstr = pt_errstr (pt_errcode (errcode));
504 break;
505 }
506 break;
507#endif /* defined (HAVE_LIBIPT) */
31fd9caa
MM
508 }
509
510 ui_out_text (uiout, _("["));
511 if (is_error)
512 {
513 ui_out_text (uiout, _("decode error ("));
514 ui_out_field_int (uiout, "errcode", errcode);
515 ui_out_text (uiout, _("): "));
516 }
517 ui_out_text (uiout, errstr);
518 ui_out_text (uiout, _("]\n"));
519}
520
afedecd3
MM
521/* Print an unsigned int. */
522
523static void
524ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
525{
526 ui_out_field_fmt (uiout, fld, "%u", val);
527}
528
529/* Disassemble a section of the recorded instruction trace. */
530
531static void
23a7fe75 532btrace_insn_history (struct ui_out *uiout,
31fd9caa 533 const struct btrace_thread_info *btinfo,
23a7fe75
MM
534 const struct btrace_insn_iterator *begin,
535 const struct btrace_insn_iterator *end, int flags)
afedecd3
MM
536{
537 struct gdbarch *gdbarch;
23a7fe75 538 struct btrace_insn_iterator it;
afedecd3 539
23a7fe75
MM
540 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
541 btrace_insn_number (end));
afedecd3
MM
542
543 gdbarch = target_gdbarch ();
544
23a7fe75 545 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 546 {
23a7fe75
MM
547 const struct btrace_insn *insn;
548
549 insn = btrace_insn_get (&it);
550
31fd9caa
MM
551 /* A NULL instruction indicates a gap in the trace. */
552 if (insn == NULL)
553 {
554 const struct btrace_config *conf;
555
556 conf = btrace_conf (btinfo);
afedecd3 557
31fd9caa
MM
558 /* We have trace so we must have a configuration. */
559 gdb_assert (conf != NULL);
560
561 btrace_ui_out_decode_error (uiout, it.function->errcode,
562 conf->format);
563 }
564 else
565 {
da8c46d2
MM
566 char prefix[4];
567
568 /* We may add a speculation prefix later. We use the same space
569 that is used for the pc prefix. */
570 if ((flags & DISASSEMBLY_OMIT_PC) == 0)
571 strncpy (prefix, pc_prefix (insn->pc), 3);
572 else
573 {
574 prefix[0] = ' ';
575 prefix[1] = ' ';
576 prefix[2] = ' ';
577 }
578 prefix[3] = 0;
579
31fd9caa
MM
580 /* Print the instruction index. */
581 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
582 ui_out_text (uiout, "\t");
583
da8c46d2
MM
584 /* Indicate speculative execution by a leading '?'. */
585 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
586 prefix[0] = '?';
587
588 /* Print the prefix; we tell gdb_disassembly below to omit it. */
589 ui_out_field_fmt (uiout, "prefix", "%s", prefix);
590
31fd9caa
MM
591 /* Disassembly with '/m' flag may not produce the expected result.
592 See PR gdb/11833. */
da8c46d2
MM
593 gdb_disassembly (gdbarch, uiout, NULL, flags | DISASSEMBLY_OMIT_PC,
594 1, insn->pc, insn->pc + 1);
31fd9caa 595 }
afedecd3
MM
596 }
597}
598
599/* The to_insn_history method of target record-btrace. */
600
601static void
7a6c5609 602record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
603{
604 struct btrace_thread_info *btinfo;
23a7fe75
MM
605 struct btrace_insn_history *history;
606 struct btrace_insn_iterator begin, end;
afedecd3
MM
607 struct cleanup *uiout_cleanup;
608 struct ui_out *uiout;
23a7fe75 609 unsigned int context, covered;
afedecd3
MM
610
611 uiout = current_uiout;
612 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
613 "insn history");
afedecd3 614 context = abs (size);
afedecd3
MM
615 if (context == 0)
616 error (_("Bad record instruction-history-size."));
617
23a7fe75
MM
618 btinfo = require_btrace ();
619 history = btinfo->insn_history;
620 if (history == NULL)
afedecd3 621 {
07bbe694 622 struct btrace_insn_iterator *replay;
afedecd3 623
23a7fe75 624 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 625
07bbe694
MM
626 /* If we're replaying, we start at the replay position. Otherwise, we
627 start at the tail of the trace. */
628 replay = btinfo->replay;
629 if (replay != NULL)
630 begin = *replay;
631 else
632 btrace_insn_end (&begin, btinfo);
633
634 /* We start from here and expand in the requested direction. Then we
635 expand in the other direction, as well, to fill up any remaining
636 context. */
637 end = begin;
638 if (size < 0)
639 {
640 /* We want the current position covered, as well. */
641 covered = btrace_insn_next (&end, 1);
642 covered += btrace_insn_prev (&begin, context - covered);
643 covered += btrace_insn_next (&end, context - covered);
644 }
645 else
646 {
647 covered = btrace_insn_next (&end, context);
648 covered += btrace_insn_prev (&begin, context - covered);
649 }
afedecd3
MM
650 }
651 else
652 {
23a7fe75
MM
653 begin = history->begin;
654 end = history->end;
afedecd3 655
23a7fe75
MM
656 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
657 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 658
23a7fe75
MM
659 if (size < 0)
660 {
661 end = begin;
662 covered = btrace_insn_prev (&begin, context);
663 }
664 else
665 {
666 begin = end;
667 covered = btrace_insn_next (&end, context);
668 }
afedecd3
MM
669 }
670
23a7fe75 671 if (covered > 0)
31fd9caa 672 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
673 else
674 {
675 if (size < 0)
676 printf_unfiltered (_("At the start of the branch trace record.\n"));
677 else
678 printf_unfiltered (_("At the end of the branch trace record.\n"));
679 }
afedecd3 680
23a7fe75 681 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
682 do_cleanups (uiout_cleanup);
683}
684
685/* The to_insn_history_range method of target record-btrace. */
686
687static void
4e99c6b7
TT
688record_btrace_insn_history_range (struct target_ops *self,
689 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
690{
691 struct btrace_thread_info *btinfo;
23a7fe75
MM
692 struct btrace_insn_history *history;
693 struct btrace_insn_iterator begin, end;
afedecd3
MM
694 struct cleanup *uiout_cleanup;
695 struct ui_out *uiout;
23a7fe75
MM
696 unsigned int low, high;
697 int found;
afedecd3
MM
698
699 uiout = current_uiout;
700 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
701 "insn history");
23a7fe75
MM
702 low = from;
703 high = to;
afedecd3 704
23a7fe75 705 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
706
707 /* Check for wrap-arounds. */
23a7fe75 708 if (low != from || high != to)
afedecd3
MM
709 error (_("Bad range."));
710
0688d04e 711 if (high < low)
afedecd3
MM
712 error (_("Bad range."));
713
23a7fe75 714 btinfo = require_btrace ();
afedecd3 715
23a7fe75
MM
716 found = btrace_find_insn_by_number (&begin, btinfo, low);
717 if (found == 0)
718 error (_("Range out of bounds."));
afedecd3 719
23a7fe75
MM
720 found = btrace_find_insn_by_number (&end, btinfo, high);
721 if (found == 0)
0688d04e
MM
722 {
723 /* Silently truncate the range. */
724 btrace_insn_end (&end, btinfo);
725 }
726 else
727 {
728 /* We want both begin and end to be inclusive. */
729 btrace_insn_next (&end, 1);
730 }
afedecd3 731
31fd9caa 732 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 733 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
734
735 do_cleanups (uiout_cleanup);
736}
737
738/* The to_insn_history_from method of target record-btrace. */
739
740static void
9abc3ff3
TT
741record_btrace_insn_history_from (struct target_ops *self,
742 ULONGEST from, int size, int flags)
afedecd3
MM
743{
744 ULONGEST begin, end, context;
745
746 context = abs (size);
0688d04e
MM
747 if (context == 0)
748 error (_("Bad record instruction-history-size."));
afedecd3
MM
749
750 if (size < 0)
751 {
752 end = from;
753
754 if (from < context)
755 begin = 0;
756 else
0688d04e 757 begin = from - context + 1;
afedecd3
MM
758 }
759 else
760 {
761 begin = from;
0688d04e 762 end = from + context - 1;
afedecd3
MM
763
764 /* Check for wrap-around. */
765 if (end < begin)
766 end = ULONGEST_MAX;
767 }
768
4e99c6b7 769 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
770}
771
772/* Print the instruction number range for a function call history line. */
773
774static void
23a7fe75
MM
775btrace_call_history_insn_range (struct ui_out *uiout,
776 const struct btrace_function *bfun)
afedecd3 777{
7acbe133
MM
778 unsigned int begin, end, size;
779
780 size = VEC_length (btrace_insn_s, bfun->insn);
781 gdb_assert (size > 0);
afedecd3 782
23a7fe75 783 begin = bfun->insn_offset;
7acbe133 784 end = begin + size - 1;
afedecd3 785
23a7fe75 786 ui_out_field_uint (uiout, "insn begin", begin);
8710b709 787 ui_out_text (uiout, ",");
23a7fe75 788 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
789}
790
ce0dfbea
MM
791/* Compute the lowest and highest source line for the instructions in BFUN
792 and return them in PBEGIN and PEND.
793 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
794 result from inlining or macro expansion. */
795
796static void
797btrace_compute_src_line_range (const struct btrace_function *bfun,
798 int *pbegin, int *pend)
799{
800 struct btrace_insn *insn;
801 struct symtab *symtab;
802 struct symbol *sym;
803 unsigned int idx;
804 int begin, end;
805
806 begin = INT_MAX;
807 end = INT_MIN;
808
809 sym = bfun->sym;
810 if (sym == NULL)
811 goto out;
812
813 symtab = symbol_symtab (sym);
814
815 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
816 {
817 struct symtab_and_line sal;
818
819 sal = find_pc_line (insn->pc, 0);
820 if (sal.symtab != symtab || sal.line == 0)
821 continue;
822
823 begin = min (begin, sal.line);
824 end = max (end, sal.line);
825 }
826
827 out:
828 *pbegin = begin;
829 *pend = end;
830}
831
afedecd3
MM
832/* Print the source line information for a function call history line. */
833
834static void
23a7fe75
MM
835btrace_call_history_src_line (struct ui_out *uiout,
836 const struct btrace_function *bfun)
afedecd3
MM
837{
838 struct symbol *sym;
23a7fe75 839 int begin, end;
afedecd3
MM
840
841 sym = bfun->sym;
842 if (sym == NULL)
843 return;
844
845 ui_out_field_string (uiout, "file",
08be3fe3 846 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 847
ce0dfbea 848 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 849 if (end < begin)
afedecd3
MM
850 return;
851
852 ui_out_text (uiout, ":");
23a7fe75 853 ui_out_field_int (uiout, "min line", begin);
afedecd3 854
23a7fe75 855 if (end == begin)
afedecd3
MM
856 return;
857
8710b709 858 ui_out_text (uiout, ",");
23a7fe75 859 ui_out_field_int (uiout, "max line", end);
afedecd3
MM
860}
861
0b722aec
MM
862/* Get the name of a branch trace function. */
863
864static const char *
865btrace_get_bfun_name (const struct btrace_function *bfun)
866{
867 struct minimal_symbol *msym;
868 struct symbol *sym;
869
870 if (bfun == NULL)
871 return "??";
872
873 msym = bfun->msym;
874 sym = bfun->sym;
875
876 if (sym != NULL)
877 return SYMBOL_PRINT_NAME (sym);
878 else if (msym != NULL)
efd66ac6 879 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
880 else
881 return "??";
882}
883
afedecd3
MM
884/* Disassemble a section of the recorded function trace. */
885
886static void
23a7fe75 887btrace_call_history (struct ui_out *uiout,
8710b709 888 const struct btrace_thread_info *btinfo,
23a7fe75
MM
889 const struct btrace_call_iterator *begin,
890 const struct btrace_call_iterator *end,
afedecd3
MM
891 enum record_print_flag flags)
892{
23a7fe75 893 struct btrace_call_iterator it;
afedecd3 894
23a7fe75
MM
895 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
896 btrace_call_number (end));
afedecd3 897
23a7fe75 898 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 899 {
23a7fe75
MM
900 const struct btrace_function *bfun;
901 struct minimal_symbol *msym;
902 struct symbol *sym;
903
904 bfun = btrace_call_get (&it);
23a7fe75 905 sym = bfun->sym;
0b722aec 906 msym = bfun->msym;
23a7fe75 907
afedecd3 908 /* Print the function index. */
23a7fe75 909 ui_out_field_uint (uiout, "index", bfun->number);
afedecd3
MM
910 ui_out_text (uiout, "\t");
911
31fd9caa
MM
912 /* Indicate gaps in the trace. */
913 if (bfun->errcode != 0)
914 {
915 const struct btrace_config *conf;
916
917 conf = btrace_conf (btinfo);
918
919 /* We have trace so we must have a configuration. */
920 gdb_assert (conf != NULL);
921
922 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
923
924 continue;
925 }
926
8710b709
MM
927 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
928 {
929 int level = bfun->level + btinfo->level, i;
930
931 for (i = 0; i < level; ++i)
932 ui_out_text (uiout, " ");
933 }
934
935 if (sym != NULL)
936 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
937 else if (msym != NULL)
efd66ac6 938 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
8710b709
MM
939 else if (!ui_out_is_mi_like_p (uiout))
940 ui_out_field_string (uiout, "function", "??");
941
1e038f67 942 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 943 {
8710b709 944 ui_out_text (uiout, _("\tinst "));
23a7fe75 945 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
946 }
947
1e038f67 948 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 949 {
8710b709 950 ui_out_text (uiout, _("\tat "));
23a7fe75 951 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
952 }
953
afedecd3
MM
954 ui_out_text (uiout, "\n");
955 }
956}
957
958/* The to_call_history method of target record-btrace. */
959
960static void
5df2fcba 961record_btrace_call_history (struct target_ops *self, int size, int flags)
afedecd3
MM
962{
963 struct btrace_thread_info *btinfo;
23a7fe75
MM
964 struct btrace_call_history *history;
965 struct btrace_call_iterator begin, end;
afedecd3
MM
966 struct cleanup *uiout_cleanup;
967 struct ui_out *uiout;
23a7fe75 968 unsigned int context, covered;
afedecd3
MM
969
970 uiout = current_uiout;
971 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
972 "insn history");
afedecd3 973 context = abs (size);
afedecd3
MM
974 if (context == 0)
975 error (_("Bad record function-call-history-size."));
976
23a7fe75
MM
977 btinfo = require_btrace ();
978 history = btinfo->call_history;
979 if (history == NULL)
afedecd3 980 {
07bbe694 981 struct btrace_insn_iterator *replay;
afedecd3 982
23a7fe75 983 DEBUG ("call-history (0x%x): %d", flags, size);
afedecd3 984
07bbe694
MM
985 /* If we're replaying, we start at the replay position. Otherwise, we
986 start at the tail of the trace. */
987 replay = btinfo->replay;
988 if (replay != NULL)
989 {
990 begin.function = replay->function;
991 begin.btinfo = btinfo;
992 }
993 else
994 btrace_call_end (&begin, btinfo);
995
996 /* We start from here and expand in the requested direction. Then we
997 expand in the other direction, as well, to fill up any remaining
998 context. */
999 end = begin;
1000 if (size < 0)
1001 {
1002 /* We want the current position covered, as well. */
1003 covered = btrace_call_next (&end, 1);
1004 covered += btrace_call_prev (&begin, context - covered);
1005 covered += btrace_call_next (&end, context - covered);
1006 }
1007 else
1008 {
1009 covered = btrace_call_next (&end, context);
1010 covered += btrace_call_prev (&begin, context- covered);
1011 }
afedecd3
MM
1012 }
1013 else
1014 {
23a7fe75
MM
1015 begin = history->begin;
1016 end = history->end;
afedecd3 1017
23a7fe75
MM
1018 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
1019 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1020
23a7fe75
MM
1021 if (size < 0)
1022 {
1023 end = begin;
1024 covered = btrace_call_prev (&begin, context);
1025 }
1026 else
1027 {
1028 begin = end;
1029 covered = btrace_call_next (&end, context);
1030 }
afedecd3
MM
1031 }
1032
23a7fe75 1033 if (covered > 0)
8710b709 1034 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1035 else
1036 {
1037 if (size < 0)
1038 printf_unfiltered (_("At the start of the branch trace record.\n"));
1039 else
1040 printf_unfiltered (_("At the end of the branch trace record.\n"));
1041 }
afedecd3 1042
23a7fe75 1043 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1044 do_cleanups (uiout_cleanup);
1045}
1046
1047/* The to_call_history_range method of target record-btrace. */
1048
1049static void
f0d960ea
TT
1050record_btrace_call_history_range (struct target_ops *self,
1051 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
1052{
1053 struct btrace_thread_info *btinfo;
23a7fe75
MM
1054 struct btrace_call_history *history;
1055 struct btrace_call_iterator begin, end;
afedecd3
MM
1056 struct cleanup *uiout_cleanup;
1057 struct ui_out *uiout;
23a7fe75
MM
1058 unsigned int low, high;
1059 int found;
afedecd3
MM
1060
1061 uiout = current_uiout;
1062 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1063 "func history");
23a7fe75
MM
1064 low = from;
1065 high = to;
afedecd3 1066
23a7fe75 1067 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
1068
1069 /* Check for wrap-arounds. */
23a7fe75 1070 if (low != from || high != to)
afedecd3
MM
1071 error (_("Bad range."));
1072
0688d04e 1073 if (high < low)
afedecd3
MM
1074 error (_("Bad range."));
1075
23a7fe75 1076 btinfo = require_btrace ();
afedecd3 1077
23a7fe75
MM
1078 found = btrace_find_call_by_number (&begin, btinfo, low);
1079 if (found == 0)
1080 error (_("Range out of bounds."));
afedecd3 1081
23a7fe75
MM
1082 found = btrace_find_call_by_number (&end, btinfo, high);
1083 if (found == 0)
0688d04e
MM
1084 {
1085 /* Silently truncate the range. */
1086 btrace_call_end (&end, btinfo);
1087 }
1088 else
1089 {
1090 /* We want both begin and end to be inclusive. */
1091 btrace_call_next (&end, 1);
1092 }
afedecd3 1093
8710b709 1094 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1095 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1096
1097 do_cleanups (uiout_cleanup);
1098}
1099
1100/* The to_call_history_from method of target record-btrace. */
1101
1102static void
ec0aea04
TT
1103record_btrace_call_history_from (struct target_ops *self,
1104 ULONGEST from, int size, int flags)
afedecd3
MM
1105{
1106 ULONGEST begin, end, context;
1107
1108 context = abs (size);
0688d04e
MM
1109 if (context == 0)
1110 error (_("Bad record function-call-history-size."));
afedecd3
MM
1111
1112 if (size < 0)
1113 {
1114 end = from;
1115
1116 if (from < context)
1117 begin = 0;
1118 else
0688d04e 1119 begin = from - context + 1;
afedecd3
MM
1120 }
1121 else
1122 {
1123 begin = from;
0688d04e 1124 end = from + context - 1;
afedecd3
MM
1125
1126 /* Check for wrap-around. */
1127 if (end < begin)
1128 end = ULONGEST_MAX;
1129 }
1130
f0d960ea 1131 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
1132}
1133
07bbe694
MM
1134/* The to_record_is_replaying method of target record-btrace. */
1135
1136static int
a52eab48 1137record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
07bbe694
MM
1138{
1139 struct thread_info *tp;
1140
034f788c 1141 ALL_NON_EXITED_THREADS (tp)
a52eab48 1142 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
07bbe694
MM
1143 return 1;
1144
1145 return 0;
1146}
1147
7ff27e9b
MM
1148/* The to_record_will_replay method of target record-btrace. */
1149
1150static int
1151record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1152{
1153 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1154}
1155
633785ff
MM
1156/* The to_xfer_partial method of target record-btrace. */
1157
9b409511 1158static enum target_xfer_status
633785ff
MM
1159record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1160 const char *annex, gdb_byte *readbuf,
1161 const gdb_byte *writebuf, ULONGEST offset,
9b409511 1162 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
1163{
1164 struct target_ops *t;
1165
1166 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1167 if (replay_memory_access == replay_memory_access_read_only
aef92902 1168 && !record_btrace_generating_corefile
4d10e986 1169 && record_btrace_is_replaying (ops, inferior_ptid))
633785ff
MM
1170 {
1171 switch (object)
1172 {
1173 case TARGET_OBJECT_MEMORY:
1174 {
1175 struct target_section *section;
1176
1177 /* We do not allow writing memory in general. */
1178 if (writebuf != NULL)
9b409511
YQ
1179 {
1180 *xfered_len = len;
bc113b4e 1181 return TARGET_XFER_UNAVAILABLE;
9b409511 1182 }
633785ff
MM
1183
1184 /* We allow reading readonly memory. */
1185 section = target_section_by_addr (ops, offset);
1186 if (section != NULL)
1187 {
1188 /* Check if the section we found is readonly. */
1189 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1190 section->the_bfd_section)
1191 & SEC_READONLY) != 0)
1192 {
1193 /* Truncate the request to fit into this section. */
1194 len = min (len, section->endaddr - offset);
1195 break;
1196 }
1197 }
1198
9b409511 1199 *xfered_len = len;
bc113b4e 1200 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1201 }
1202 }
1203 }
1204
1205 /* Forward the request. */
e75fdfca
TT
1206 ops = ops->beneath;
1207 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1208 offset, len, xfered_len);
633785ff
MM
1209}
1210
1211/* The to_insert_breakpoint method of target record-btrace. */
1212
1213static int
1214record_btrace_insert_breakpoint (struct target_ops *ops,
1215 struct gdbarch *gdbarch,
1216 struct bp_target_info *bp_tgt)
1217{
67b5c0c1
MM
1218 const char *old;
1219 int ret;
633785ff
MM
1220
1221 /* Inserting breakpoints requires accessing memory. Allow it for the
1222 duration of this function. */
67b5c0c1
MM
1223 old = replay_memory_access;
1224 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1225
1226 ret = 0;
492d29ea
PA
1227 TRY
1228 {
1229 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1230 }
492d29ea
PA
1231 CATCH (except, RETURN_MASK_ALL)
1232 {
6c63c96a 1233 replay_memory_access = old;
492d29ea
PA
1234 throw_exception (except);
1235 }
1236 END_CATCH
6c63c96a 1237 replay_memory_access = old;
633785ff
MM
1238
1239 return ret;
1240}
1241
1242/* The to_remove_breakpoint method of target record-btrace. */
1243
1244static int
1245record_btrace_remove_breakpoint (struct target_ops *ops,
1246 struct gdbarch *gdbarch,
1247 struct bp_target_info *bp_tgt)
1248{
67b5c0c1
MM
1249 const char *old;
1250 int ret;
633785ff
MM
1251
1252 /* Removing breakpoints requires accessing memory. Allow it for the
1253 duration of this function. */
67b5c0c1
MM
1254 old = replay_memory_access;
1255 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1256
1257 ret = 0;
492d29ea
PA
1258 TRY
1259 {
1260 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1261 }
492d29ea
PA
1262 CATCH (except, RETURN_MASK_ALL)
1263 {
6c63c96a 1264 replay_memory_access = old;
492d29ea
PA
1265 throw_exception (except);
1266 }
1267 END_CATCH
6c63c96a 1268 replay_memory_access = old;
633785ff
MM
1269
1270 return ret;
1271}
1272
1f3ef581
MM
1273/* The to_fetch_registers method of target record-btrace. */
1274
1275static void
1276record_btrace_fetch_registers (struct target_ops *ops,
1277 struct regcache *regcache, int regno)
1278{
1279 struct btrace_insn_iterator *replay;
1280 struct thread_info *tp;
1281
1282 tp = find_thread_ptid (inferior_ptid);
1283 gdb_assert (tp != NULL);
1284
1285 replay = tp->btrace.replay;
aef92902 1286 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1287 {
1288 const struct btrace_insn *insn;
1289 struct gdbarch *gdbarch;
1290 int pcreg;
1291
1292 gdbarch = get_regcache_arch (regcache);
1293 pcreg = gdbarch_pc_regnum (gdbarch);
1294 if (pcreg < 0)
1295 return;
1296
1297 /* We can only provide the PC register. */
1298 if (regno >= 0 && regno != pcreg)
1299 return;
1300
1301 insn = btrace_insn_get (replay);
1302 gdb_assert (insn != NULL);
1303
1304 regcache_raw_supply (regcache, regno, &insn->pc);
1305 }
1306 else
1307 {
e75fdfca 1308 struct target_ops *t = ops->beneath;
1f3ef581 1309
e75fdfca 1310 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
1311 }
1312}
1313
1314/* The to_store_registers method of target record-btrace. */
1315
1316static void
1317record_btrace_store_registers (struct target_ops *ops,
1318 struct regcache *regcache, int regno)
1319{
1320 struct target_ops *t;
1321
a52eab48 1322 if (!record_btrace_generating_corefile
4d10e986
MM
1323 && record_btrace_is_replaying (ops, inferior_ptid))
1324 error (_("Cannot write registers while replaying."));
1f3ef581
MM
1325
1326 gdb_assert (may_write_registers != 0);
1327
e75fdfca
TT
1328 t = ops->beneath;
1329 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1330}
1331
1332/* The to_prepare_to_store method of target record-btrace. */
1333
1334static void
1335record_btrace_prepare_to_store (struct target_ops *ops,
1336 struct regcache *regcache)
1337{
1338 struct target_ops *t;
1339
a52eab48 1340 if (!record_btrace_generating_corefile
4d10e986 1341 && record_btrace_is_replaying (ops, inferior_ptid))
1f3ef581
MM
1342 return;
1343
e75fdfca
TT
1344 t = ops->beneath;
1345 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1346}
1347
0b722aec
MM
1348/* The branch trace frame cache. */
1349
1350struct btrace_frame_cache
1351{
1352 /* The thread. */
1353 struct thread_info *tp;
1354
1355 /* The frame info. */
1356 struct frame_info *frame;
1357
1358 /* The branch trace function segment. */
1359 const struct btrace_function *bfun;
1360};
1361
1362/* A struct btrace_frame_cache hash table indexed by NEXT. */
1363
1364static htab_t bfcache;
1365
1366/* hash_f for htab_create_alloc of bfcache. */
1367
1368static hashval_t
1369bfcache_hash (const void *arg)
1370{
19ba03f4
SM
1371 const struct btrace_frame_cache *cache
1372 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1373
1374 return htab_hash_pointer (cache->frame);
1375}
1376
1377/* eq_f for htab_create_alloc of bfcache. */
1378
1379static int
1380bfcache_eq (const void *arg1, const void *arg2)
1381{
19ba03f4
SM
1382 const struct btrace_frame_cache *cache1
1383 = (const struct btrace_frame_cache *) arg1;
1384 const struct btrace_frame_cache *cache2
1385 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1386
1387 return cache1->frame == cache2->frame;
1388}
1389
1390/* Create a new btrace frame cache. */
1391
1392static struct btrace_frame_cache *
1393bfcache_new (struct frame_info *frame)
1394{
1395 struct btrace_frame_cache *cache;
1396 void **slot;
1397
1398 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1399 cache->frame = frame;
1400
1401 slot = htab_find_slot (bfcache, cache, INSERT);
1402 gdb_assert (*slot == NULL);
1403 *slot = cache;
1404
1405 return cache;
1406}
1407
1408/* Extract the branch trace function from a branch trace frame. */
1409
1410static const struct btrace_function *
1411btrace_get_frame_function (struct frame_info *frame)
1412{
1413 const struct btrace_frame_cache *cache;
1414 const struct btrace_function *bfun;
1415 struct btrace_frame_cache pattern;
1416 void **slot;
1417
1418 pattern.frame = frame;
1419
1420 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1421 if (slot == NULL)
1422 return NULL;
1423
19ba03f4 1424 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1425 return cache->bfun;
1426}
1427
cecac1ab
MM
1428/* Implement stop_reason method for record_btrace_frame_unwind. */
1429
1430static enum unwind_stop_reason
1431record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1432 void **this_cache)
1433{
0b722aec
MM
1434 const struct btrace_frame_cache *cache;
1435 const struct btrace_function *bfun;
1436
19ba03f4 1437 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1438 bfun = cache->bfun;
1439 gdb_assert (bfun != NULL);
1440
1441 if (bfun->up == NULL)
1442 return UNWIND_UNAVAILABLE;
1443
1444 return UNWIND_NO_REASON;
cecac1ab
MM
1445}
1446
1447/* Implement this_id method for record_btrace_frame_unwind. */
1448
1449static void
1450record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1451 struct frame_id *this_id)
1452{
0b722aec
MM
1453 const struct btrace_frame_cache *cache;
1454 const struct btrace_function *bfun;
1455 CORE_ADDR code, special;
1456
19ba03f4 1457 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1458
1459 bfun = cache->bfun;
1460 gdb_assert (bfun != NULL);
1461
1462 while (bfun->segment.prev != NULL)
1463 bfun = bfun->segment.prev;
1464
1465 code = get_frame_func (this_frame);
1466 special = bfun->number;
1467
1468 *this_id = frame_id_build_unavailable_stack_special (code, special);
1469
1470 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1471 btrace_get_bfun_name (cache->bfun),
1472 core_addr_to_string_nz (this_id->code_addr),
1473 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1474}
1475
1476/* Implement prev_register method for record_btrace_frame_unwind. */
1477
1478static struct value *
1479record_btrace_frame_prev_register (struct frame_info *this_frame,
1480 void **this_cache,
1481 int regnum)
1482{
0b722aec
MM
1483 const struct btrace_frame_cache *cache;
1484 const struct btrace_function *bfun, *caller;
1485 const struct btrace_insn *insn;
1486 struct gdbarch *gdbarch;
1487 CORE_ADDR pc;
1488 int pcreg;
1489
1490 gdbarch = get_frame_arch (this_frame);
1491 pcreg = gdbarch_pc_regnum (gdbarch);
1492 if (pcreg < 0 || regnum != pcreg)
1493 throw_error (NOT_AVAILABLE_ERROR,
1494 _("Registers are not available in btrace record history"));
1495
19ba03f4 1496 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1497 bfun = cache->bfun;
1498 gdb_assert (bfun != NULL);
1499
1500 caller = bfun->up;
1501 if (caller == NULL)
1502 throw_error (NOT_AVAILABLE_ERROR,
1503 _("No caller in btrace record history"));
1504
1505 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1506 {
1507 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1508 pc = insn->pc;
1509 }
1510 else
1511 {
1512 insn = VEC_last (btrace_insn_s, caller->insn);
1513 pc = insn->pc;
1514
1515 pc += gdb_insn_length (gdbarch, pc);
1516 }
1517
1518 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1519 btrace_get_bfun_name (bfun), bfun->level,
1520 core_addr_to_string_nz (pc));
1521
1522 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1523}
1524
1525/* Implement sniffer method for record_btrace_frame_unwind. */
1526
1527static int
1528record_btrace_frame_sniffer (const struct frame_unwind *self,
1529 struct frame_info *this_frame,
1530 void **this_cache)
1531{
0b722aec
MM
1532 const struct btrace_function *bfun;
1533 struct btrace_frame_cache *cache;
cecac1ab 1534 struct thread_info *tp;
0b722aec 1535 struct frame_info *next;
cecac1ab
MM
1536
1537 /* THIS_FRAME does not contain a reference to its thread. */
1538 tp = find_thread_ptid (inferior_ptid);
1539 gdb_assert (tp != NULL);
1540
0b722aec
MM
1541 bfun = NULL;
1542 next = get_next_frame (this_frame);
1543 if (next == NULL)
1544 {
1545 const struct btrace_insn_iterator *replay;
1546
1547 replay = tp->btrace.replay;
1548 if (replay != NULL)
1549 bfun = replay->function;
1550 }
1551 else
1552 {
1553 const struct btrace_function *callee;
1554
1555 callee = btrace_get_frame_function (next);
1556 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1557 bfun = callee->up;
1558 }
1559
1560 if (bfun == NULL)
1561 return 0;
1562
1563 DEBUG ("[frame] sniffed frame for %s on level %d",
1564 btrace_get_bfun_name (bfun), bfun->level);
1565
1566 /* This is our frame. Initialize the frame cache. */
1567 cache = bfcache_new (this_frame);
1568 cache->tp = tp;
1569 cache->bfun = bfun;
1570
1571 *this_cache = cache;
1572 return 1;
1573}
1574
1575/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1576
1577static int
1578record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1579 struct frame_info *this_frame,
1580 void **this_cache)
1581{
1582 const struct btrace_function *bfun, *callee;
1583 struct btrace_frame_cache *cache;
1584 struct frame_info *next;
1585
1586 next = get_next_frame (this_frame);
1587 if (next == NULL)
1588 return 0;
1589
1590 callee = btrace_get_frame_function (next);
1591 if (callee == NULL)
1592 return 0;
1593
1594 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1595 return 0;
1596
1597 bfun = callee->up;
1598 if (bfun == NULL)
1599 return 0;
1600
1601 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1602 btrace_get_bfun_name (bfun), bfun->level);
1603
1604 /* This is our frame. Initialize the frame cache. */
1605 cache = bfcache_new (this_frame);
1606 cache->tp = find_thread_ptid (inferior_ptid);
1607 cache->bfun = bfun;
1608
1609 *this_cache = cache;
1610 return 1;
1611}
1612
1613static void
1614record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1615{
1616 struct btrace_frame_cache *cache;
1617 void **slot;
1618
19ba03f4 1619 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1620
1621 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1622 gdb_assert (slot != NULL);
1623
1624 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1625}
1626
1627/* btrace recording does not store previous memory content, neither the stack
1628 frames content. Any unwinding would return errorneous results as the stack
1629 contents no longer matches the changed PC value restored from history.
1630 Therefore this unwinder reports any possibly unwound registers as
1631 <unavailable>. */
1632
0b722aec 1633const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1634{
1635 NORMAL_FRAME,
1636 record_btrace_frame_unwind_stop_reason,
1637 record_btrace_frame_this_id,
1638 record_btrace_frame_prev_register,
1639 NULL,
0b722aec
MM
1640 record_btrace_frame_sniffer,
1641 record_btrace_frame_dealloc_cache
1642};
1643
1644const struct frame_unwind record_btrace_tailcall_frame_unwind =
1645{
1646 TAILCALL_FRAME,
1647 record_btrace_frame_unwind_stop_reason,
1648 record_btrace_frame_this_id,
1649 record_btrace_frame_prev_register,
1650 NULL,
1651 record_btrace_tailcall_frame_sniffer,
1652 record_btrace_frame_dealloc_cache
cecac1ab 1653};
b2f4cfde 1654
ac01945b
TT
1655/* Implement the to_get_unwinder method. */
1656
1657static const struct frame_unwind *
1658record_btrace_to_get_unwinder (struct target_ops *self)
1659{
1660 return &record_btrace_frame_unwind;
1661}
1662
1663/* Implement the to_get_tailcall_unwinder method. */
1664
1665static const struct frame_unwind *
1666record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1667{
1668 return &record_btrace_tailcall_frame_unwind;
1669}
1670
987e68b1
MM
1671/* Return a human-readable string for FLAG. */
1672
1673static const char *
1674btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1675{
1676 switch (flag)
1677 {
1678 case BTHR_STEP:
1679 return "step";
1680
1681 case BTHR_RSTEP:
1682 return "reverse-step";
1683
1684 case BTHR_CONT:
1685 return "cont";
1686
1687 case BTHR_RCONT:
1688 return "reverse-cont";
1689
1690 case BTHR_STOP:
1691 return "stop";
1692 }
1693
1694 return "<invalid>";
1695}
1696
52834460
MM
1697/* Indicate that TP should be resumed according to FLAG. */
1698
1699static void
1700record_btrace_resume_thread (struct thread_info *tp,
1701 enum btrace_thread_flag flag)
1702{
1703 struct btrace_thread_info *btinfo;
1704
987e68b1
MM
1705 DEBUG ("resuming thread %d (%s): %x (%s)", tp->num,
1706 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
52834460
MM
1707
1708 btinfo = &tp->btrace;
1709
52834460
MM
1710 /* Fetch the latest branch trace. */
1711 btrace_fetch (tp);
1712
0ca912df
MM
1713 /* A resume request overwrites a preceding resume or stop request. */
1714 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1715 btinfo->flags |= flag;
1716}
1717
ec71cc2f
MM
1718/* Get the current frame for TP. */
1719
1720static struct frame_info *
1721get_thread_current_frame (struct thread_info *tp)
1722{
1723 struct frame_info *frame;
1724 ptid_t old_inferior_ptid;
1725 int executing;
1726
1727 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1728 old_inferior_ptid = inferior_ptid;
1729 inferior_ptid = tp->ptid;
1730
1731 /* Clear the executing flag to allow changes to the current frame.
1732 We are not actually running, yet. We just started a reverse execution
1733 command or a record goto command.
1734 For the latter, EXECUTING is false and this has no effect.
1735 For the former, EXECUTING is true and we're in to_wait, about to
1736 move the thread. Since we need to recompute the stack, we temporarily
1737 set EXECUTING to flase. */
1738 executing = is_executing (inferior_ptid);
1739 set_executing (inferior_ptid, 0);
1740
1741 frame = NULL;
1742 TRY
1743 {
1744 frame = get_current_frame ();
1745 }
1746 CATCH (except, RETURN_MASK_ALL)
1747 {
1748 /* Restore the previous execution state. */
1749 set_executing (inferior_ptid, executing);
1750
1751 /* Restore the previous inferior_ptid. */
1752 inferior_ptid = old_inferior_ptid;
1753
1754 throw_exception (except);
1755 }
1756 END_CATCH
1757
1758 /* Restore the previous execution state. */
1759 set_executing (inferior_ptid, executing);
1760
1761 /* Restore the previous inferior_ptid. */
1762 inferior_ptid = old_inferior_ptid;
1763
1764 return frame;
1765}
1766
52834460
MM
1767/* Start replaying a thread. */
1768
1769static struct btrace_insn_iterator *
1770record_btrace_start_replaying (struct thread_info *tp)
1771{
52834460
MM
1772 struct btrace_insn_iterator *replay;
1773 struct btrace_thread_info *btinfo;
52834460
MM
1774
1775 btinfo = &tp->btrace;
1776 replay = NULL;
1777
1778 /* We can't start replaying without trace. */
1779 if (btinfo->begin == NULL)
1780 return NULL;
1781
52834460
MM
1782 /* GDB stores the current frame_id when stepping in order to detects steps
1783 into subroutines.
1784 Since frames are computed differently when we're replaying, we need to
1785 recompute those stored frames and fix them up so we can still detect
1786 subroutines after we started replaying. */
492d29ea 1787 TRY
52834460
MM
1788 {
1789 struct frame_info *frame;
1790 struct frame_id frame_id;
1791 int upd_step_frame_id, upd_step_stack_frame_id;
1792
1793 /* The current frame without replaying - computed via normal unwind. */
ec71cc2f 1794 frame = get_thread_current_frame (tp);
52834460
MM
1795 frame_id = get_frame_id (frame);
1796
1797 /* Check if we need to update any stepping-related frame id's. */
1798 upd_step_frame_id = frame_id_eq (frame_id,
1799 tp->control.step_frame_id);
1800 upd_step_stack_frame_id = frame_id_eq (frame_id,
1801 tp->control.step_stack_frame_id);
1802
1803 /* We start replaying at the end of the branch trace. This corresponds
1804 to the current instruction. */
8d749320 1805 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
1806 btrace_insn_end (replay, btinfo);
1807
31fd9caa
MM
1808 /* Skip gaps at the end of the trace. */
1809 while (btrace_insn_get (replay) == NULL)
1810 {
1811 unsigned int steps;
1812
1813 steps = btrace_insn_prev (replay, 1);
1814 if (steps == 0)
1815 error (_("No trace."));
1816 }
1817
52834460
MM
1818 /* We're not replaying, yet. */
1819 gdb_assert (btinfo->replay == NULL);
1820 btinfo->replay = replay;
1821
1822 /* Make sure we're not using any stale registers. */
1823 registers_changed_ptid (tp->ptid);
1824
1825 /* The current frame with replaying - computed via btrace unwind. */
ec71cc2f 1826 frame = get_thread_current_frame (tp);
52834460
MM
1827 frame_id = get_frame_id (frame);
1828
1829 /* Replace stepping related frames where necessary. */
1830 if (upd_step_frame_id)
1831 tp->control.step_frame_id = frame_id;
1832 if (upd_step_stack_frame_id)
1833 tp->control.step_stack_frame_id = frame_id;
1834 }
492d29ea 1835 CATCH (except, RETURN_MASK_ALL)
52834460
MM
1836 {
1837 xfree (btinfo->replay);
1838 btinfo->replay = NULL;
1839
1840 registers_changed_ptid (tp->ptid);
1841
1842 throw_exception (except);
1843 }
492d29ea 1844 END_CATCH
52834460
MM
1845
1846 return replay;
1847}
1848
1849/* Stop replaying a thread. */
1850
1851static void
1852record_btrace_stop_replaying (struct thread_info *tp)
1853{
1854 struct btrace_thread_info *btinfo;
1855
1856 btinfo = &tp->btrace;
1857
1858 xfree (btinfo->replay);
1859 btinfo->replay = NULL;
1860
1861 /* Make sure we're not leaving any stale registers. */
1862 registers_changed_ptid (tp->ptid);
1863}
1864
e3cfc1c7
MM
1865/* Stop replaying TP if it is at the end of its execution history. */
1866
1867static void
1868record_btrace_stop_replaying_at_end (struct thread_info *tp)
1869{
1870 struct btrace_insn_iterator *replay, end;
1871 struct btrace_thread_info *btinfo;
1872
1873 btinfo = &tp->btrace;
1874 replay = btinfo->replay;
1875
1876 if (replay == NULL)
1877 return;
1878
1879 btrace_insn_end (&end, btinfo);
1880
1881 if (btrace_insn_cmp (replay, &end) == 0)
1882 record_btrace_stop_replaying (tp);
1883}
1884
b2f4cfde
MM
1885/* The to_resume method of target record-btrace. */
1886
1887static void
1888record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1889 enum gdb_signal signal)
1890{
0ca912df 1891 struct thread_info *tp;
d2939ba2 1892 enum btrace_thread_flag flag, cflag;
52834460 1893
987e68b1
MM
1894 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
1895 execution_direction == EXEC_REVERSE ? "reverse-" : "",
1896 step ? "step" : "cont");
52834460 1897
0ca912df
MM
1898 /* Store the execution direction of the last resume.
1899
1900 If there is more than one to_resume call, we have to rely on infrun
1901 to not change the execution direction in-between. */
70ad5bff
MM
1902 record_btrace_resume_exec_dir = execution_direction;
1903
0ca912df 1904 /* As long as we're not replaying, just forward the request.
52834460 1905
0ca912df
MM
1906 For non-stop targets this means that no thread is replaying. In order to
1907 make progress, we may need to explicitly move replaying threads to the end
1908 of their execution history. */
a52eab48
MM
1909 if ((execution_direction != EXEC_REVERSE)
1910 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 1911 {
e75fdfca 1912 ops = ops->beneath;
04c4fe8c
MM
1913 ops->to_resume (ops, ptid, step, signal);
1914 return;
b2f4cfde
MM
1915 }
1916
52834460 1917 /* Compute the btrace thread flag for the requested move. */
d2939ba2
MM
1918 if (execution_direction == EXEC_REVERSE)
1919 {
1920 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
1921 cflag = BTHR_RCONT;
1922 }
52834460 1923 else
d2939ba2
MM
1924 {
1925 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
1926 cflag = BTHR_CONT;
1927 }
52834460 1928
52834460 1929 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
1930 record_btrace_wait below.
1931
1932 For all-stop targets, we only step INFERIOR_PTID and continue others. */
1933 if (!target_is_non_stop_p ())
1934 {
1935 gdb_assert (ptid_match (inferior_ptid, ptid));
1936
1937 ALL_NON_EXITED_THREADS (tp)
1938 if (ptid_match (tp->ptid, ptid))
1939 {
1940 if (ptid_match (tp->ptid, inferior_ptid))
1941 record_btrace_resume_thread (tp, flag);
1942 else
1943 record_btrace_resume_thread (tp, cflag);
1944 }
1945 }
1946 else
1947 {
1948 ALL_NON_EXITED_THREADS (tp)
1949 if (ptid_match (tp->ptid, ptid))
1950 record_btrace_resume_thread (tp, flag);
1951 }
70ad5bff
MM
1952
1953 /* Async support. */
1954 if (target_can_async_p ())
1955 {
6a3753b3 1956 target_async (1);
70ad5bff
MM
1957 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1958 }
52834460
MM
1959}
1960
987e68b1
MM
1961/* Cancel resuming TP. */
1962
1963static void
1964record_btrace_cancel_resume (struct thread_info *tp)
1965{
1966 enum btrace_thread_flag flags;
1967
1968 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
1969 if (flags == 0)
1970 return;
1971
1972 DEBUG ("cancel resume thread %d (%s): %x (%s)", tp->num,
1973 target_pid_to_str (tp->ptid), flags,
1974 btrace_thread_flag_to_str (flags));
1975
1976 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 1977 record_btrace_stop_replaying_at_end (tp);
52834460
MM
1978}
1979
1980/* Return a target_waitstatus indicating that we ran out of history. */
1981
1982static struct target_waitstatus
1983btrace_step_no_history (void)
1984{
1985 struct target_waitstatus status;
1986
1987 status.kind = TARGET_WAITKIND_NO_HISTORY;
1988
1989 return status;
1990}
1991
1992/* Return a target_waitstatus indicating that a step finished. */
1993
1994static struct target_waitstatus
1995btrace_step_stopped (void)
1996{
1997 struct target_waitstatus status;
1998
1999 status.kind = TARGET_WAITKIND_STOPPED;
2000 status.value.sig = GDB_SIGNAL_TRAP;
2001
2002 return status;
2003}
2004
6e4879f0
MM
2005/* Return a target_waitstatus indicating that a thread was stopped as
2006 requested. */
2007
2008static struct target_waitstatus
2009btrace_step_stopped_on_request (void)
2010{
2011 struct target_waitstatus status;
2012
2013 status.kind = TARGET_WAITKIND_STOPPED;
2014 status.value.sig = GDB_SIGNAL_0;
2015
2016 return status;
2017}
2018
d825d248
MM
2019/* Return a target_waitstatus indicating a spurious stop. */
2020
2021static struct target_waitstatus
2022btrace_step_spurious (void)
2023{
2024 struct target_waitstatus status;
2025
2026 status.kind = TARGET_WAITKIND_SPURIOUS;
2027
2028 return status;
2029}
2030
e3cfc1c7
MM
2031/* Return a target_waitstatus indicating that the thread was not resumed. */
2032
2033static struct target_waitstatus
2034btrace_step_no_resumed (void)
2035{
2036 struct target_waitstatus status;
2037
2038 status.kind = TARGET_WAITKIND_NO_RESUMED;
2039
2040 return status;
2041}
2042
2043/* Return a target_waitstatus indicating that we should wait again. */
2044
2045static struct target_waitstatus
2046btrace_step_again (void)
2047{
2048 struct target_waitstatus status;
2049
2050 status.kind = TARGET_WAITKIND_IGNORE;
2051
2052 return status;
2053}
2054
52834460
MM
2055/* Clear the record histories. */
2056
2057static void
2058record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2059{
2060 xfree (btinfo->insn_history);
2061 xfree (btinfo->call_history);
2062
2063 btinfo->insn_history = NULL;
2064 btinfo->call_history = NULL;
2065}
2066
3c615f99
MM
2067/* Check whether TP's current replay position is at a breakpoint. */
2068
2069static int
2070record_btrace_replay_at_breakpoint (struct thread_info *tp)
2071{
2072 struct btrace_insn_iterator *replay;
2073 struct btrace_thread_info *btinfo;
2074 const struct btrace_insn *insn;
2075 struct inferior *inf;
2076
2077 btinfo = &tp->btrace;
2078 replay = btinfo->replay;
2079
2080 if (replay == NULL)
2081 return 0;
2082
2083 insn = btrace_insn_get (replay);
2084 if (insn == NULL)
2085 return 0;
2086
2087 inf = find_inferior_ptid (tp->ptid);
2088 if (inf == NULL)
2089 return 0;
2090
2091 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2092 &btinfo->stop_reason);
2093}
2094
d825d248 2095/* Step one instruction in forward direction. */
52834460
MM
2096
2097static struct target_waitstatus
d825d248 2098record_btrace_single_step_forward (struct thread_info *tp)
52834460
MM
2099{
2100 struct btrace_insn_iterator *replay, end;
2101 struct btrace_thread_info *btinfo;
52834460 2102
d825d248
MM
2103 btinfo = &tp->btrace;
2104 replay = btinfo->replay;
2105
2106 /* We're done if we're not replaying. */
2107 if (replay == NULL)
2108 return btrace_step_no_history ();
2109
011c71b6
MM
2110 /* Check if we're stepping a breakpoint. */
2111 if (record_btrace_replay_at_breakpoint (tp))
2112 return btrace_step_stopped ();
2113
d825d248
MM
2114 /* Skip gaps during replay. */
2115 do
2116 {
2117 unsigned int steps;
2118
e3cfc1c7
MM
2119 /* We will bail out here if we continue stepping after reaching the end
2120 of the execution history. */
d825d248
MM
2121 steps = btrace_insn_next (replay, 1);
2122 if (steps == 0)
e3cfc1c7 2123 return btrace_step_no_history ();
d825d248
MM
2124 }
2125 while (btrace_insn_get (replay) == NULL);
2126
2127 /* Determine the end of the instruction trace. */
2128 btrace_insn_end (&end, btinfo);
2129
e3cfc1c7
MM
2130 /* The execution trace contains (and ends with) the current instruction.
2131 This instruction has not been executed, yet, so the trace really ends
2132 one instruction earlier. */
d825d248 2133 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2134 return btrace_step_no_history ();
d825d248
MM
2135
2136 return btrace_step_spurious ();
2137}
2138
2139/* Step one instruction in backward direction. */
2140
2141static struct target_waitstatus
2142record_btrace_single_step_backward (struct thread_info *tp)
2143{
2144 struct btrace_insn_iterator *replay;
2145 struct btrace_thread_info *btinfo;
e59fa00f 2146
52834460
MM
2147 btinfo = &tp->btrace;
2148 replay = btinfo->replay;
2149
d825d248
MM
2150 /* Start replaying if we're not already doing so. */
2151 if (replay == NULL)
2152 replay = record_btrace_start_replaying (tp);
2153
2154 /* If we can't step any further, we reached the end of the history.
2155 Skip gaps during replay. */
2156 do
2157 {
2158 unsigned int steps;
2159
2160 steps = btrace_insn_prev (replay, 1);
2161 if (steps == 0)
2162 return btrace_step_no_history ();
2163 }
2164 while (btrace_insn_get (replay) == NULL);
2165
011c71b6
MM
2166 /* Check if we're stepping a breakpoint.
2167
2168 For reverse-stepping, this check is after the step. There is logic in
2169 infrun.c that handles reverse-stepping separately. See, for example,
2170 proceed and adjust_pc_after_break.
2171
2172 This code assumes that for reverse-stepping, PC points to the last
2173 de-executed instruction, whereas for forward-stepping PC points to the
2174 next to-be-executed instruction. */
2175 if (record_btrace_replay_at_breakpoint (tp))
2176 return btrace_step_stopped ();
2177
d825d248
MM
2178 return btrace_step_spurious ();
2179}
2180
2181/* Step a single thread. */
2182
2183static struct target_waitstatus
2184record_btrace_step_thread (struct thread_info *tp)
2185{
2186 struct btrace_thread_info *btinfo;
2187 struct target_waitstatus status;
2188 enum btrace_thread_flag flags;
2189
2190 btinfo = &tp->btrace;
2191
6e4879f0
MM
2192 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2193 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2194
987e68b1
MM
2195 DEBUG ("stepping thread %d (%s): %x (%s)", tp->num,
2196 target_pid_to_str (tp->ptid), flags,
2197 btrace_thread_flag_to_str (flags));
52834460 2198
6e4879f0
MM
2199 /* We can't step without an execution history. */
2200 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2201 return btrace_step_no_history ();
2202
52834460
MM
2203 switch (flags)
2204 {
2205 default:
2206 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2207
6e4879f0
MM
2208 case BTHR_STOP:
2209 return btrace_step_stopped_on_request ();
2210
52834460 2211 case BTHR_STEP:
d825d248
MM
2212 status = record_btrace_single_step_forward (tp);
2213 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2214 break;
52834460
MM
2215
2216 return btrace_step_stopped ();
2217
2218 case BTHR_RSTEP:
d825d248
MM
2219 status = record_btrace_single_step_backward (tp);
2220 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2221 break;
52834460
MM
2222
2223 return btrace_step_stopped ();
2224
2225 case BTHR_CONT:
e3cfc1c7
MM
2226 status = record_btrace_single_step_forward (tp);
2227 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2228 break;
52834460 2229
e3cfc1c7
MM
2230 btinfo->flags |= flags;
2231 return btrace_step_again ();
52834460
MM
2232
2233 case BTHR_RCONT:
e3cfc1c7
MM
2234 status = record_btrace_single_step_backward (tp);
2235 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2236 break;
52834460 2237
e3cfc1c7
MM
2238 btinfo->flags |= flags;
2239 return btrace_step_again ();
2240 }
d825d248 2241
e3cfc1c7
MM
2242 /* We keep threads moving at the end of their execution history. The to_wait
2243 method will stop the thread for whom the event is reported. */
2244 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2245 btinfo->flags |= flags;
52834460 2246
e3cfc1c7 2247 return status;
b2f4cfde
MM
2248}
2249
e3cfc1c7
MM
2250/* A vector of threads. */
2251
2252typedef struct thread_info * tp_t;
2253DEF_VEC_P (tp_t);
2254
a6b5be76
MM
2255/* Announce further events if necessary. */
2256
2257static void
2258record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2259 const VEC (tp_t) *no_history)
2260{
2261 int more_moving, more_no_history;
2262
2263 more_moving = !VEC_empty (tp_t, moving);
2264 more_no_history = !VEC_empty (tp_t, no_history);
2265
2266 if (!more_moving && !more_no_history)
2267 return;
2268
2269 if (more_moving)
2270 DEBUG ("movers pending");
2271
2272 if (more_no_history)
2273 DEBUG ("no-history pending");
2274
2275 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2276}
2277
b2f4cfde
MM
2278/* The to_wait method of target record-btrace. */
2279
2280static ptid_t
2281record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2282 struct target_waitstatus *status, int options)
2283{
e3cfc1c7
MM
2284 VEC (tp_t) *moving, *no_history;
2285 struct thread_info *tp, *eventing;
2286 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
52834460
MM
2287
2288 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2289
b2f4cfde 2290 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2291 if ((execution_direction != EXEC_REVERSE)
2292 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2293 {
e75fdfca
TT
2294 ops = ops->beneath;
2295 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
2296 }
2297
e3cfc1c7
MM
2298 moving = NULL;
2299 no_history = NULL;
2300
2301 make_cleanup (VEC_cleanup (tp_t), &moving);
2302 make_cleanup (VEC_cleanup (tp_t), &no_history);
2303
2304 /* Keep a work list of moving threads. */
2305 ALL_NON_EXITED_THREADS (tp)
2306 if (ptid_match (tp->ptid, ptid)
2307 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2308 VEC_safe_push (tp_t, moving, tp);
2309
2310 if (VEC_empty (tp_t, moving))
52834460 2311 {
e3cfc1c7 2312 *status = btrace_step_no_resumed ();
52834460 2313
e3cfc1c7
MM
2314 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2315 target_waitstatus_to_string (status));
2316
2317 do_cleanups (cleanups);
2318 return null_ptid;
52834460
MM
2319 }
2320
e3cfc1c7
MM
2321 /* Step moving threads one by one, one step each, until either one thread
2322 reports an event or we run out of threads to step.
2323
2324 When stepping more than one thread, chances are that some threads reach
2325 the end of their execution history earlier than others. If we reported
2326 this immediately, all-stop on top of non-stop would stop all threads and
2327 resume the same threads next time. And we would report the same thread
2328 having reached the end of its execution history again.
2329
2330 In the worst case, this would starve the other threads. But even if other
2331 threads would be allowed to make progress, this would result in far too
2332 many intermediate stops.
2333
2334 We therefore delay the reporting of "no execution history" until we have
2335 nothing else to report. By this time, all threads should have moved to
2336 either the beginning or the end of their execution history. There will
2337 be a single user-visible stop. */
2338 eventing = NULL;
2339 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2340 {
2341 unsigned int ix;
2342
2343 ix = 0;
2344 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2345 {
2346 *status = record_btrace_step_thread (tp);
2347
2348 switch (status->kind)
2349 {
2350 case TARGET_WAITKIND_IGNORE:
2351 ix++;
2352 break;
2353
2354 case TARGET_WAITKIND_NO_HISTORY:
2355 VEC_safe_push (tp_t, no_history,
2356 VEC_ordered_remove (tp_t, moving, ix));
2357 break;
2358
2359 default:
2360 eventing = VEC_unordered_remove (tp_t, moving, ix);
2361 break;
2362 }
2363 }
2364 }
2365
2366 if (eventing == NULL)
2367 {
2368 /* We started with at least one moving thread. This thread must have
2369 either stopped or reached the end of its execution history.
2370
2371 In the former case, EVENTING must not be NULL.
2372 In the latter case, NO_HISTORY must not be empty. */
2373 gdb_assert (!VEC_empty (tp_t, no_history));
2374
2375 /* We kept threads moving at the end of their execution history. Stop
2376 EVENTING now that we are going to report its stop. */
2377 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2378 eventing->btrace.flags &= ~BTHR_MOVE;
2379
2380 *status = btrace_step_no_history ();
2381 }
2382
2383 gdb_assert (eventing != NULL);
2384
2385 /* We kept threads replaying at the end of their execution history. Stop
2386 replaying EVENTING now that we are going to report its stop. */
2387 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2388
2389 /* Stop all other threads. */
5953356c 2390 if (!target_is_non_stop_p ())
e3cfc1c7
MM
2391 ALL_NON_EXITED_THREADS (tp)
2392 record_btrace_cancel_resume (tp);
52834460 2393
a6b5be76
MM
2394 /* In async mode, we need to announce further events. */
2395 if (target_is_async_p ())
2396 record_btrace_maybe_mark_async_event (moving, no_history);
2397
52834460 2398 /* Start record histories anew from the current position. */
e3cfc1c7 2399 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2400
2401 /* We moved the replay position but did not update registers. */
e3cfc1c7
MM
2402 registers_changed_ptid (eventing->ptid);
2403
2404 DEBUG ("wait ended by thread %d (%s): %s", eventing->num,
2405 target_pid_to_str (eventing->ptid),
2406 target_waitstatus_to_string (status));
52834460 2407
e3cfc1c7
MM
2408 do_cleanups (cleanups);
2409 return eventing->ptid;
52834460
MM
2410}
2411
6e4879f0
MM
2412/* The to_stop method of target record-btrace. */
2413
2414static void
2415record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2416{
2417 DEBUG ("stop %s", target_pid_to_str (ptid));
2418
2419 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2420 if ((execution_direction != EXEC_REVERSE)
2421 && !record_btrace_is_replaying (ops, minus_one_ptid))
6e4879f0
MM
2422 {
2423 ops = ops->beneath;
2424 ops->to_stop (ops, ptid);
2425 }
2426 else
2427 {
2428 struct thread_info *tp;
2429
2430 ALL_NON_EXITED_THREADS (tp)
2431 if (ptid_match (tp->ptid, ptid))
2432 {
2433 tp->btrace.flags &= ~BTHR_MOVE;
2434 tp->btrace.flags |= BTHR_STOP;
2435 }
2436 }
2437 }
2438
52834460
MM
2439/* The to_can_execute_reverse method of target record-btrace. */
2440
2441static int
19db3e69 2442record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
2443{
2444 return 1;
2445}
2446
9e8915c6 2447/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2448
9e8915c6
PA
2449static int
2450record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
52834460 2451{
a52eab48 2452 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2453 {
2454 struct thread_info *tp = inferior_thread ();
2455
2456 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2457 }
2458
2459 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2460}
2461
2462/* The to_supports_stopped_by_sw_breakpoint method of target
2463 record-btrace. */
2464
2465static int
2466record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2467{
a52eab48 2468 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2469 return 1;
2470
2471 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2472}
2473
2474/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2475
2476static int
2477record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2478{
a52eab48 2479 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2480 {
2481 struct thread_info *tp = inferior_thread ();
2482
2483 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2484 }
2485
2486 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2487}
2488
2489/* The to_supports_stopped_by_hw_breakpoint method of target
2490 record-btrace. */
2491
2492static int
2493record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2494{
a52eab48 2495 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6 2496 return 1;
52834460 2497
9e8915c6 2498 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
b2f4cfde
MM
2499}
2500
e8032dde 2501/* The to_update_thread_list method of target record-btrace. */
e2887aa3
MM
2502
2503static void
e8032dde 2504record_btrace_update_thread_list (struct target_ops *ops)
e2887aa3 2505{
e8032dde 2506 /* We don't add or remove threads during replay. */
a52eab48 2507 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2508 return;
2509
2510 /* Forward the request. */
e75fdfca 2511 ops = ops->beneath;
e8032dde 2512 ops->to_update_thread_list (ops);
e2887aa3
MM
2513}
2514
2515/* The to_thread_alive method of target record-btrace. */
2516
2517static int
2518record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2519{
2520 /* We don't add or remove threads during replay. */
a52eab48 2521 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2522 return find_thread_ptid (ptid) != NULL;
2523
2524 /* Forward the request. */
e75fdfca
TT
2525 ops = ops->beneath;
2526 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
2527}
2528
066ce621
MM
2529/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2530 is stopped. */
2531
2532static void
2533record_btrace_set_replay (struct thread_info *tp,
2534 const struct btrace_insn_iterator *it)
2535{
2536 struct btrace_thread_info *btinfo;
2537
2538 btinfo = &tp->btrace;
2539
2540 if (it == NULL || it->function == NULL)
52834460 2541 record_btrace_stop_replaying (tp);
066ce621
MM
2542 else
2543 {
2544 if (btinfo->replay == NULL)
52834460 2545 record_btrace_start_replaying (tp);
066ce621
MM
2546 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2547 return;
2548
2549 *btinfo->replay = *it;
52834460 2550 registers_changed_ptid (tp->ptid);
066ce621
MM
2551 }
2552
52834460
MM
2553 /* Start anew from the new replay position. */
2554 record_btrace_clear_histories (btinfo);
485668e5
MM
2555
2556 stop_pc = regcache_read_pc (get_current_regcache ());
2557 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2558}
2559
2560/* The to_goto_record_begin method of target record-btrace. */
2561
2562static void
08475817 2563record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
2564{
2565 struct thread_info *tp;
2566 struct btrace_insn_iterator begin;
2567
2568 tp = require_btrace_thread ();
2569
2570 btrace_insn_begin (&begin, &tp->btrace);
2571 record_btrace_set_replay (tp, &begin);
066ce621
MM
2572}
2573
2574/* The to_goto_record_end method of target record-btrace. */
2575
2576static void
307a1b91 2577record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
2578{
2579 struct thread_info *tp;
2580
2581 tp = require_btrace_thread ();
2582
2583 record_btrace_set_replay (tp, NULL);
066ce621
MM
2584}
2585
2586/* The to_goto_record method of target record-btrace. */
2587
2588static void
606183ac 2589record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
2590{
2591 struct thread_info *tp;
2592 struct btrace_insn_iterator it;
2593 unsigned int number;
2594 int found;
2595
2596 number = insn;
2597
2598 /* Check for wrap-arounds. */
2599 if (number != insn)
2600 error (_("Instruction number out of range."));
2601
2602 tp = require_btrace_thread ();
2603
2604 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2605 if (found == 0)
2606 error (_("No such instruction."));
2607
2608 record_btrace_set_replay (tp, &it);
066ce621
MM
2609}
2610
797094dd
MM
2611/* The to_record_stop_replaying method of target record-btrace. */
2612
2613static void
2614record_btrace_stop_replaying_all (struct target_ops *self)
2615{
2616 struct thread_info *tp;
2617
2618 ALL_NON_EXITED_THREADS (tp)
2619 record_btrace_stop_replaying (tp);
2620}
2621
70ad5bff
MM
2622/* The to_execution_direction target method. */
2623
2624static enum exec_direction_kind
2625record_btrace_execution_direction (struct target_ops *self)
2626{
2627 return record_btrace_resume_exec_dir;
2628}
2629
aef92902
MM
2630/* The to_prepare_to_generate_core target method. */
2631
2632static void
2633record_btrace_prepare_to_generate_core (struct target_ops *self)
2634{
2635 record_btrace_generating_corefile = 1;
2636}
2637
2638/* The to_done_generating_core target method. */
2639
2640static void
2641record_btrace_done_generating_core (struct target_ops *self)
2642{
2643 record_btrace_generating_corefile = 0;
2644}
2645
afedecd3
MM
2646/* Initialize the record-btrace target ops. */
2647
2648static void
2649init_record_btrace_ops (void)
2650{
2651 struct target_ops *ops;
2652
2653 ops = &record_btrace_ops;
2654 ops->to_shortname = "record-btrace";
2655 ops->to_longname = "Branch tracing target";
2656 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2657 ops->to_open = record_btrace_open;
2658 ops->to_close = record_btrace_close;
b7d2e916 2659 ops->to_async = record_btrace_async;
afedecd3
MM
2660 ops->to_detach = record_detach;
2661 ops->to_disconnect = record_disconnect;
2662 ops->to_mourn_inferior = record_mourn_inferior;
2663 ops->to_kill = record_kill;
afedecd3
MM
2664 ops->to_stop_recording = record_btrace_stop_recording;
2665 ops->to_info_record = record_btrace_info;
2666 ops->to_insn_history = record_btrace_insn_history;
2667 ops->to_insn_history_from = record_btrace_insn_history_from;
2668 ops->to_insn_history_range = record_btrace_insn_history_range;
2669 ops->to_call_history = record_btrace_call_history;
2670 ops->to_call_history_from = record_btrace_call_history_from;
2671 ops->to_call_history_range = record_btrace_call_history_range;
07bbe694 2672 ops->to_record_is_replaying = record_btrace_is_replaying;
7ff27e9b 2673 ops->to_record_will_replay = record_btrace_will_replay;
797094dd 2674 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
633785ff
MM
2675 ops->to_xfer_partial = record_btrace_xfer_partial;
2676 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2677 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
2678 ops->to_fetch_registers = record_btrace_fetch_registers;
2679 ops->to_store_registers = record_btrace_store_registers;
2680 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
2681 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2682 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde
MM
2683 ops->to_resume = record_btrace_resume;
2684 ops->to_wait = record_btrace_wait;
6e4879f0 2685 ops->to_stop = record_btrace_stop;
e8032dde 2686 ops->to_update_thread_list = record_btrace_update_thread_list;
e2887aa3 2687 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
2688 ops->to_goto_record_begin = record_btrace_goto_begin;
2689 ops->to_goto_record_end = record_btrace_goto_end;
2690 ops->to_goto_record = record_btrace_goto;
52834460 2691 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
9e8915c6
PA
2692 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2693 ops->to_supports_stopped_by_sw_breakpoint
2694 = record_btrace_supports_stopped_by_sw_breakpoint;
2695 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2696 ops->to_supports_stopped_by_hw_breakpoint
2697 = record_btrace_supports_stopped_by_hw_breakpoint;
70ad5bff 2698 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
2699 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2700 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
2701 ops->to_stratum = record_stratum;
2702 ops->to_magic = OPS_MAGIC;
2703}
2704
f4abbc16
MM
2705/* Start recording in BTS format. */
2706
2707static void
2708cmd_record_btrace_bts_start (char *args, int from_tty)
2709{
f4abbc16
MM
2710 if (args != NULL && *args != 0)
2711 error (_("Invalid argument."));
2712
2713 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2714
492d29ea
PA
2715 TRY
2716 {
2717 execute_command ("target record-btrace", from_tty);
2718 }
2719 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2720 {
2721 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2722 throw_exception (exception);
2723 }
492d29ea 2724 END_CATCH
f4abbc16
MM
2725}
2726
b20a6524 2727/* Start recording Intel(R) Processor Trace. */
afedecd3
MM
2728
2729static void
b20a6524 2730cmd_record_btrace_pt_start (char *args, int from_tty)
afedecd3
MM
2731{
2732 if (args != NULL && *args != 0)
2733 error (_("Invalid argument."));
2734
b20a6524 2735 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2736
492d29ea
PA
2737 TRY
2738 {
2739 execute_command ("target record-btrace", from_tty);
2740 }
2741 CATCH (exception, RETURN_MASK_ALL)
2742 {
2743 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2744 throw_exception (exception);
2745 }
2746 END_CATCH
afedecd3
MM
2747}
2748
b20a6524
MM
2749/* Alias for "target record". */
2750
2751static void
2752cmd_record_btrace_start (char *args, int from_tty)
2753{
2754 if (args != NULL && *args != 0)
2755 error (_("Invalid argument."));
2756
2757 record_btrace_conf.format = BTRACE_FORMAT_PT;
2758
2759 TRY
2760 {
2761 execute_command ("target record-btrace", from_tty);
2762 }
2763 CATCH (exception, RETURN_MASK_ALL)
2764 {
2765 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2766
2767 TRY
2768 {
2769 execute_command ("target record-btrace", from_tty);
2770 }
2771 CATCH (exception, RETURN_MASK_ALL)
2772 {
2773 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2774 throw_exception (exception);
2775 }
2776 END_CATCH
2777 }
2778 END_CATCH
2779}
2780
67b5c0c1
MM
2781/* The "set record btrace" command. */
2782
2783static void
2784cmd_set_record_btrace (char *args, int from_tty)
2785{
2786 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2787}
2788
2789/* The "show record btrace" command. */
2790
2791static void
2792cmd_show_record_btrace (char *args, int from_tty)
2793{
2794 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2795}
2796
2797/* The "show record btrace replay-memory-access" command. */
2798
2799static void
2800cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2801 struct cmd_list_element *c, const char *value)
2802{
2803 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2804 replay_memory_access);
2805}
2806
d33501a5
MM
2807/* The "set record btrace bts" command. */
2808
2809static void
2810cmd_set_record_btrace_bts (char *args, int from_tty)
2811{
2812 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 2813 "by an appropriate subcommand.\n"));
d33501a5
MM
2814 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2815 all_commands, gdb_stdout);
2816}
2817
2818/* The "show record btrace bts" command. */
2819
2820static void
2821cmd_show_record_btrace_bts (char *args, int from_tty)
2822{
2823 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2824}
2825
b20a6524
MM
2826/* The "set record btrace pt" command. */
2827
2828static void
2829cmd_set_record_btrace_pt (char *args, int from_tty)
2830{
2831 printf_unfiltered (_("\"set record btrace pt\" must be followed "
2832 "by an appropriate subcommand.\n"));
2833 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
2834 all_commands, gdb_stdout);
2835}
2836
2837/* The "show record btrace pt" command. */
2838
2839static void
2840cmd_show_record_btrace_pt (char *args, int from_tty)
2841{
2842 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
2843}
2844
2845/* The "record bts buffer-size" show value function. */
2846
2847static void
2848show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
2849 struct cmd_list_element *c,
2850 const char *value)
2851{
2852 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
2853 value);
2854}
2855
2856/* The "record pt buffer-size" show value function. */
2857
2858static void
2859show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
2860 struct cmd_list_element *c,
2861 const char *value)
2862{
2863 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
2864 value);
2865}
2866
afedecd3
MM
2867void _initialize_record_btrace (void);
2868
2869/* Initialize btrace commands. */
2870
2871void
2872_initialize_record_btrace (void)
2873{
f4abbc16
MM
2874 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2875 _("Start branch trace recording."), &record_btrace_cmdlist,
2876 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
2877 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2878
f4abbc16
MM
2879 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
2880 _("\
2881Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2882The processor stores a from/to record for each branch into a cyclic buffer.\n\
2883This format may not be available on all processors."),
2884 &record_btrace_cmdlist);
2885 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
2886
b20a6524
MM
2887 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
2888 _("\
2889Start branch trace recording in Intel(R) Processor Trace format.\n\n\
2890This format may not be available on all processors."),
2891 &record_btrace_cmdlist);
2892 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
2893
67b5c0c1
MM
2894 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2895 _("Set record options"), &set_record_btrace_cmdlist,
2896 "set record btrace ", 0, &set_record_cmdlist);
2897
2898 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2899 _("Show record options"), &show_record_btrace_cmdlist,
2900 "show record btrace ", 0, &show_record_cmdlist);
2901
2902 add_setshow_enum_cmd ("replay-memory-access", no_class,
2903 replay_memory_access_types, &replay_memory_access, _("\
2904Set what memory accesses are allowed during replay."), _("\
2905Show what memory accesses are allowed during replay."),
2906 _("Default is READ-ONLY.\n\n\
2907The btrace record target does not trace data.\n\
2908The memory therefore corresponds to the live target and not \
2909to the current replay position.\n\n\
2910When READ-ONLY, allow accesses to read-only memory during replay.\n\
2911When READ-WRITE, allow accesses to read-only and read-write memory during \
2912replay."),
2913 NULL, cmd_show_replay_memory_access,
2914 &set_record_btrace_cmdlist,
2915 &show_record_btrace_cmdlist);
2916
d33501a5
MM
2917 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
2918 _("Set record btrace bts options"),
2919 &set_record_btrace_bts_cmdlist,
2920 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
2921
2922 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
2923 _("Show record btrace bts options"),
2924 &show_record_btrace_bts_cmdlist,
2925 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
2926
2927 add_setshow_uinteger_cmd ("buffer-size", no_class,
2928 &record_btrace_conf.bts.size,
2929 _("Set the record/replay bts buffer size."),
2930 _("Show the record/replay bts buffer size."), _("\
2931When starting recording request a trace buffer of this size. \
2932The actual buffer size may differ from the requested size. \
2933Use \"info record\" to see the actual buffer size.\n\n\
2934Bigger buffers allow longer recording but also take more time to process \
2935the recorded execution trace.\n\n\
b20a6524
MM
2936The trace buffer size may not be changed while recording."), NULL,
2937 show_record_bts_buffer_size_value,
d33501a5
MM
2938 &set_record_btrace_bts_cmdlist,
2939 &show_record_btrace_bts_cmdlist);
2940
b20a6524
MM
2941 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
2942 _("Set record btrace pt options"),
2943 &set_record_btrace_pt_cmdlist,
2944 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
2945
2946 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
2947 _("Show record btrace pt options"),
2948 &show_record_btrace_pt_cmdlist,
2949 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
2950
2951 add_setshow_uinteger_cmd ("buffer-size", no_class,
2952 &record_btrace_conf.pt.size,
2953 _("Set the record/replay pt buffer size."),
2954 _("Show the record/replay pt buffer size."), _("\
2955Bigger buffers allow longer recording but also take more time to process \
2956the recorded execution.\n\
2957The actual buffer size may differ from the requested size. Use \"info record\" \
2958to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
2959 &set_record_btrace_pt_cmdlist,
2960 &show_record_btrace_pt_cmdlist);
2961
afedecd3
MM
2962 init_record_btrace_ops ();
2963 add_target (&record_btrace_ops);
0b722aec
MM
2964
2965 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2966 xcalloc, xfree);
d33501a5
MM
2967
2968 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 2969 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 2970}
This page took 0.45897 seconds and 4 git commands to generate.