Split TRY_CATCH into TRY + CATCH
[deliverable/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
32d0add0 3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
24#include "gdbthread.h"
25#include "target.h"
26#include "gdbcmd.h"
27#include "disasm.h"
28#include "observer.h"
afedecd3
MM
29#include "cli/cli-utils.h"
30#include "source.h"
31#include "ui-out.h"
32#include "symtab.h"
33#include "filenames.h"
1f3ef581 34#include "regcache.h"
cecac1ab 35#include "frame-unwind.h"
0b722aec 36#include "hashtab.h"
45741a9c 37#include "infrun.h"
70ad5bff
MM
38#include "event-loop.h"
39#include "inf-loop.h"
afedecd3
MM
40
41/* The target_ops of record-btrace. */
42static struct target_ops record_btrace_ops;
43
44/* A new thread observer enabling branch tracing for the new thread. */
45static struct observer *record_btrace_thread_observer;
46
67b5c0c1
MM
47/* Memory access types used in set/show record btrace replay-memory-access. */
48static const char replay_memory_access_read_only[] = "read-only";
49static const char replay_memory_access_read_write[] = "read-write";
50static const char *const replay_memory_access_types[] =
51{
52 replay_memory_access_read_only,
53 replay_memory_access_read_write,
54 NULL
55};
56
57/* The currently allowed replay memory access type. */
58static const char *replay_memory_access = replay_memory_access_read_only;
59
60/* Command lists for "set/show record btrace". */
61static struct cmd_list_element *set_record_btrace_cmdlist;
62static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 63
70ad5bff
MM
64/* The execution direction of the last resume we got. See record-full.c. */
65static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
66
67/* The async event handler for reverse/replay execution. */
68static struct async_event_handler *record_btrace_async_inferior_event_handler;
69
aef92902
MM
70/* A flag indicating that we are currently generating a core file. */
71static int record_btrace_generating_corefile;
72
f4abbc16
MM
73/* The current branch trace configuration. */
74static struct btrace_config record_btrace_conf;
75
76/* Command list for "record btrace". */
77static struct cmd_list_element *record_btrace_cmdlist;
78
d33501a5
MM
79/* Command lists for "set/show record btrace bts". */
80static struct cmd_list_element *set_record_btrace_bts_cmdlist;
81static struct cmd_list_element *show_record_btrace_bts_cmdlist;
82
afedecd3
MM
83/* Print a record-btrace debug message. Use do ... while (0) to avoid
84 ambiguities when used in if statements. */
85
86#define DEBUG(msg, args...) \
87 do \
88 { \
89 if (record_debug != 0) \
90 fprintf_unfiltered (gdb_stdlog, \
91 "[record-btrace] " msg "\n", ##args); \
92 } \
93 while (0)
94
95
96/* Update the branch trace for the current thread and return a pointer to its
066ce621 97 thread_info.
afedecd3
MM
98
99 Throws an error if there is no thread or no trace. This function never
100 returns NULL. */
101
066ce621
MM
102static struct thread_info *
103require_btrace_thread (void)
afedecd3
MM
104{
105 struct thread_info *tp;
afedecd3
MM
106
107 DEBUG ("require");
108
109 tp = find_thread_ptid (inferior_ptid);
110 if (tp == NULL)
111 error (_("No thread."));
112
113 btrace_fetch (tp);
114
6e07b1d2 115 if (btrace_is_empty (tp))
afedecd3
MM
116 error (_("No trace."));
117
066ce621
MM
118 return tp;
119}
120
121/* Update the branch trace for the current thread and return a pointer to its
122 branch trace information struct.
123
124 Throws an error if there is no thread or no trace. This function never
125 returns NULL. */
126
127static struct btrace_thread_info *
128require_btrace (void)
129{
130 struct thread_info *tp;
131
132 tp = require_btrace_thread ();
133
134 return &tp->btrace;
afedecd3
MM
135}
136
137/* Enable branch tracing for one thread. Warn on errors. */
138
139static void
140record_btrace_enable_warn (struct thread_info *tp)
141{
492d29ea
PA
142 TRY
143 {
144 btrace_enable (tp, &record_btrace_conf);
145 }
146 CATCH (error, RETURN_MASK_ERROR)
147 {
148 warning ("%s", error.message);
149 }
150 END_CATCH
afedecd3
MM
151}
152
153/* Callback function to disable branch tracing for one thread. */
154
155static void
156record_btrace_disable_callback (void *arg)
157{
158 struct thread_info *tp;
159
160 tp = arg;
161
162 btrace_disable (tp);
163}
164
165/* Enable automatic tracing of new threads. */
166
167static void
168record_btrace_auto_enable (void)
169{
170 DEBUG ("attach thread observer");
171
172 record_btrace_thread_observer
173 = observer_attach_new_thread (record_btrace_enable_warn);
174}
175
176/* Disable automatic tracing of new threads. */
177
178static void
179record_btrace_auto_disable (void)
180{
181 /* The observer may have been detached, already. */
182 if (record_btrace_thread_observer == NULL)
183 return;
184
185 DEBUG ("detach thread observer");
186
187 observer_detach_new_thread (record_btrace_thread_observer);
188 record_btrace_thread_observer = NULL;
189}
190
70ad5bff
MM
191/* The record-btrace async event handler function. */
192
193static void
194record_btrace_handle_async_inferior_event (gdb_client_data data)
195{
196 inferior_event_handler (INF_REG_EVENT, NULL);
197}
198
afedecd3
MM
199/* The to_open method of target record-btrace. */
200
201static void
014f9477 202record_btrace_open (const char *args, int from_tty)
afedecd3
MM
203{
204 struct cleanup *disable_chain;
205 struct thread_info *tp;
206
207 DEBUG ("open");
208
8213266a 209 record_preopen ();
afedecd3
MM
210
211 if (!target_has_execution)
212 error (_("The program is not being run."));
213
52834460
MM
214 if (non_stop)
215 error (_("Record btrace can't debug inferior in non-stop mode."));
216
afedecd3
MM
217 gdb_assert (record_btrace_thread_observer == NULL);
218
219 disable_chain = make_cleanup (null_cleanup, NULL);
034f788c 220 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
221 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
222 {
f4abbc16 223 btrace_enable (tp, &record_btrace_conf);
afedecd3
MM
224
225 make_cleanup (record_btrace_disable_callback, tp);
226 }
227
228 record_btrace_auto_enable ();
229
230 push_target (&record_btrace_ops);
231
70ad5bff
MM
232 record_btrace_async_inferior_event_handler
233 = create_async_event_handler (record_btrace_handle_async_inferior_event,
234 NULL);
aef92902 235 record_btrace_generating_corefile = 0;
70ad5bff 236
afedecd3
MM
237 observer_notify_record_changed (current_inferior (), 1);
238
239 discard_cleanups (disable_chain);
240}
241
242/* The to_stop_recording method of target record-btrace. */
243
244static void
c6cd7c02 245record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
246{
247 struct thread_info *tp;
248
249 DEBUG ("stop recording");
250
251 record_btrace_auto_disable ();
252
034f788c 253 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
254 if (tp->btrace.target != NULL)
255 btrace_disable (tp);
256}
257
258/* The to_close method of target record-btrace. */
259
260static void
de90e03d 261record_btrace_close (struct target_ops *self)
afedecd3 262{
568e808b
MM
263 struct thread_info *tp;
264
70ad5bff
MM
265 if (record_btrace_async_inferior_event_handler != NULL)
266 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
267
99c819ee
MM
268 /* Make sure automatic recording gets disabled even if we did not stop
269 recording before closing the record-btrace target. */
270 record_btrace_auto_disable ();
271
568e808b
MM
272 /* We should have already stopped recording.
273 Tear down btrace in case we have not. */
034f788c 274 ALL_NON_EXITED_THREADS (tp)
568e808b 275 btrace_teardown (tp);
afedecd3
MM
276}
277
b7d2e916
PA
278/* The to_async method of target record-btrace. */
279
280static void
281record_btrace_async (struct target_ops *ops,
282 void (*callback) (enum inferior_event_type event_type,
283 void *context),
284 void *context)
285{
286 if (callback != NULL)
287 mark_async_event_handler (record_btrace_async_inferior_event_handler);
288 else
289 clear_async_event_handler (record_btrace_async_inferior_event_handler);
290
291 ops->beneath->to_async (ops->beneath, callback, context);
292}
293
d33501a5
MM
294/* Adjusts the size and returns a human readable size suffix. */
295
296static const char *
297record_btrace_adjust_size (unsigned int *size)
298{
299 unsigned int sz;
300
301 sz = *size;
302
303 if ((sz & ((1u << 30) - 1)) == 0)
304 {
305 *size = sz >> 30;
306 return "GB";
307 }
308 else if ((sz & ((1u << 20) - 1)) == 0)
309 {
310 *size = sz >> 20;
311 return "MB";
312 }
313 else if ((sz & ((1u << 10) - 1)) == 0)
314 {
315 *size = sz >> 10;
316 return "kB";
317 }
318 else
319 return "";
320}
321
322/* Print a BTS configuration. */
323
324static void
325record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
326{
327 const char *suffix;
328 unsigned int size;
329
330 size = conf->size;
331 if (size > 0)
332 {
333 suffix = record_btrace_adjust_size (&size);
334 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
335 }
336}
337
338/* Print a branch tracing configuration. */
339
340static void
341record_btrace_print_conf (const struct btrace_config *conf)
342{
343 printf_unfiltered (_("Recording format: %s.\n"),
344 btrace_format_string (conf->format));
345
346 switch (conf->format)
347 {
348 case BTRACE_FORMAT_NONE:
349 return;
350
351 case BTRACE_FORMAT_BTS:
352 record_btrace_print_bts_conf (&conf->bts);
353 return;
354 }
355
356 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
357}
358
afedecd3
MM
359/* The to_info_record method of target record-btrace. */
360
361static void
630d6a4a 362record_btrace_info (struct target_ops *self)
afedecd3
MM
363{
364 struct btrace_thread_info *btinfo;
f4abbc16 365 const struct btrace_config *conf;
afedecd3 366 struct thread_info *tp;
31fd9caa 367 unsigned int insns, calls, gaps;
afedecd3
MM
368
369 DEBUG ("info");
370
371 tp = find_thread_ptid (inferior_ptid);
372 if (tp == NULL)
373 error (_("No thread."));
374
f4abbc16
MM
375 btinfo = &tp->btrace;
376
377 conf = btrace_conf (btinfo);
378 if (conf != NULL)
d33501a5 379 record_btrace_print_conf (conf);
f4abbc16 380
afedecd3
MM
381 btrace_fetch (tp);
382
23a7fe75
MM
383 insns = 0;
384 calls = 0;
31fd9caa 385 gaps = 0;
23a7fe75 386
6e07b1d2 387 if (!btrace_is_empty (tp))
23a7fe75
MM
388 {
389 struct btrace_call_iterator call;
390 struct btrace_insn_iterator insn;
391
392 btrace_call_end (&call, btinfo);
393 btrace_call_prev (&call, 1);
5de9129b 394 calls = btrace_call_number (&call);
23a7fe75
MM
395
396 btrace_insn_end (&insn, btinfo);
31fd9caa 397
5de9129b 398 insns = btrace_insn_number (&insn);
31fd9caa
MM
399 if (insns != 0)
400 {
401 /* The last instruction does not really belong to the trace. */
402 insns -= 1;
403 }
404 else
405 {
406 unsigned int steps;
407
408 /* Skip gaps at the end. */
409 do
410 {
411 steps = btrace_insn_prev (&insn, 1);
412 if (steps == 0)
413 break;
414
415 insns = btrace_insn_number (&insn);
416 }
417 while (insns == 0);
418 }
419
420 gaps = btinfo->ngaps;
23a7fe75 421 }
afedecd3 422
31fd9caa
MM
423 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
424 "for thread %d (%s).\n"), insns, calls, gaps,
425 tp->num, target_pid_to_str (tp->ptid));
07bbe694
MM
426
427 if (btrace_is_replaying (tp))
428 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
429 btrace_insn_number (btinfo->replay));
afedecd3
MM
430}
431
31fd9caa
MM
432/* Print a decode error. */
433
434static void
435btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
436 enum btrace_format format)
437{
438 const char *errstr;
439 int is_error;
440
441 errstr = _("unknown");
442 is_error = 1;
443
444 switch (format)
445 {
446 default:
447 break;
448
449 case BTRACE_FORMAT_BTS:
450 switch (errcode)
451 {
452 default:
453 break;
454
455 case BDE_BTS_OVERFLOW:
456 errstr = _("instruction overflow");
457 break;
458
459 case BDE_BTS_INSN_SIZE:
460 errstr = _("unknown instruction");
461 break;
462 }
463 break;
464 }
465
466 ui_out_text (uiout, _("["));
467 if (is_error)
468 {
469 ui_out_text (uiout, _("decode error ("));
470 ui_out_field_int (uiout, "errcode", errcode);
471 ui_out_text (uiout, _("): "));
472 }
473 ui_out_text (uiout, errstr);
474 ui_out_text (uiout, _("]\n"));
475}
476
afedecd3
MM
477/* Print an unsigned int. */
478
479static void
480ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
481{
482 ui_out_field_fmt (uiout, fld, "%u", val);
483}
484
485/* Disassemble a section of the recorded instruction trace. */
486
487static void
23a7fe75 488btrace_insn_history (struct ui_out *uiout,
31fd9caa 489 const struct btrace_thread_info *btinfo,
23a7fe75
MM
490 const struct btrace_insn_iterator *begin,
491 const struct btrace_insn_iterator *end, int flags)
afedecd3
MM
492{
493 struct gdbarch *gdbarch;
23a7fe75 494 struct btrace_insn_iterator it;
afedecd3 495
23a7fe75
MM
496 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
497 btrace_insn_number (end));
afedecd3
MM
498
499 gdbarch = target_gdbarch ();
500
23a7fe75 501 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 502 {
23a7fe75
MM
503 const struct btrace_insn *insn;
504
505 insn = btrace_insn_get (&it);
506
31fd9caa
MM
507 /* A NULL instruction indicates a gap in the trace. */
508 if (insn == NULL)
509 {
510 const struct btrace_config *conf;
511
512 conf = btrace_conf (btinfo);
afedecd3 513
31fd9caa
MM
514 /* We have trace so we must have a configuration. */
515 gdb_assert (conf != NULL);
516
517 btrace_ui_out_decode_error (uiout, it.function->errcode,
518 conf->format);
519 }
520 else
521 {
522 /* Print the instruction index. */
523 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
524 ui_out_text (uiout, "\t");
525
526 /* Disassembly with '/m' flag may not produce the expected result.
527 See PR gdb/11833. */
528 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc,
529 insn->pc + 1);
530 }
afedecd3
MM
531 }
532}
533
534/* The to_insn_history method of target record-btrace. */
535
536static void
7a6c5609 537record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
538{
539 struct btrace_thread_info *btinfo;
23a7fe75
MM
540 struct btrace_insn_history *history;
541 struct btrace_insn_iterator begin, end;
afedecd3
MM
542 struct cleanup *uiout_cleanup;
543 struct ui_out *uiout;
23a7fe75 544 unsigned int context, covered;
afedecd3
MM
545
546 uiout = current_uiout;
547 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
548 "insn history");
afedecd3 549 context = abs (size);
afedecd3
MM
550 if (context == 0)
551 error (_("Bad record instruction-history-size."));
552
23a7fe75
MM
553 btinfo = require_btrace ();
554 history = btinfo->insn_history;
555 if (history == NULL)
afedecd3 556 {
07bbe694 557 struct btrace_insn_iterator *replay;
afedecd3 558
23a7fe75 559 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 560
07bbe694
MM
561 /* If we're replaying, we start at the replay position. Otherwise, we
562 start at the tail of the trace. */
563 replay = btinfo->replay;
564 if (replay != NULL)
565 begin = *replay;
566 else
567 btrace_insn_end (&begin, btinfo);
568
569 /* We start from here and expand in the requested direction. Then we
570 expand in the other direction, as well, to fill up any remaining
571 context. */
572 end = begin;
573 if (size < 0)
574 {
575 /* We want the current position covered, as well. */
576 covered = btrace_insn_next (&end, 1);
577 covered += btrace_insn_prev (&begin, context - covered);
578 covered += btrace_insn_next (&end, context - covered);
579 }
580 else
581 {
582 covered = btrace_insn_next (&end, context);
583 covered += btrace_insn_prev (&begin, context - covered);
584 }
afedecd3
MM
585 }
586 else
587 {
23a7fe75
MM
588 begin = history->begin;
589 end = history->end;
afedecd3 590
23a7fe75
MM
591 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
592 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 593
23a7fe75
MM
594 if (size < 0)
595 {
596 end = begin;
597 covered = btrace_insn_prev (&begin, context);
598 }
599 else
600 {
601 begin = end;
602 covered = btrace_insn_next (&end, context);
603 }
afedecd3
MM
604 }
605
23a7fe75 606 if (covered > 0)
31fd9caa 607 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
608 else
609 {
610 if (size < 0)
611 printf_unfiltered (_("At the start of the branch trace record.\n"));
612 else
613 printf_unfiltered (_("At the end of the branch trace record.\n"));
614 }
afedecd3 615
23a7fe75 616 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
617 do_cleanups (uiout_cleanup);
618}
619
620/* The to_insn_history_range method of target record-btrace. */
621
622static void
4e99c6b7
TT
623record_btrace_insn_history_range (struct target_ops *self,
624 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
625{
626 struct btrace_thread_info *btinfo;
23a7fe75
MM
627 struct btrace_insn_history *history;
628 struct btrace_insn_iterator begin, end;
afedecd3
MM
629 struct cleanup *uiout_cleanup;
630 struct ui_out *uiout;
23a7fe75
MM
631 unsigned int low, high;
632 int found;
afedecd3
MM
633
634 uiout = current_uiout;
635 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
636 "insn history");
23a7fe75
MM
637 low = from;
638 high = to;
afedecd3 639
23a7fe75 640 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
641
642 /* Check for wrap-arounds. */
23a7fe75 643 if (low != from || high != to)
afedecd3
MM
644 error (_("Bad range."));
645
0688d04e 646 if (high < low)
afedecd3
MM
647 error (_("Bad range."));
648
23a7fe75 649 btinfo = require_btrace ();
afedecd3 650
23a7fe75
MM
651 found = btrace_find_insn_by_number (&begin, btinfo, low);
652 if (found == 0)
653 error (_("Range out of bounds."));
afedecd3 654
23a7fe75
MM
655 found = btrace_find_insn_by_number (&end, btinfo, high);
656 if (found == 0)
0688d04e
MM
657 {
658 /* Silently truncate the range. */
659 btrace_insn_end (&end, btinfo);
660 }
661 else
662 {
663 /* We want both begin and end to be inclusive. */
664 btrace_insn_next (&end, 1);
665 }
afedecd3 666
31fd9caa 667 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 668 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
669
670 do_cleanups (uiout_cleanup);
671}
672
673/* The to_insn_history_from method of target record-btrace. */
674
675static void
9abc3ff3
TT
676record_btrace_insn_history_from (struct target_ops *self,
677 ULONGEST from, int size, int flags)
afedecd3
MM
678{
679 ULONGEST begin, end, context;
680
681 context = abs (size);
0688d04e
MM
682 if (context == 0)
683 error (_("Bad record instruction-history-size."));
afedecd3
MM
684
685 if (size < 0)
686 {
687 end = from;
688
689 if (from < context)
690 begin = 0;
691 else
0688d04e 692 begin = from - context + 1;
afedecd3
MM
693 }
694 else
695 {
696 begin = from;
0688d04e 697 end = from + context - 1;
afedecd3
MM
698
699 /* Check for wrap-around. */
700 if (end < begin)
701 end = ULONGEST_MAX;
702 }
703
4e99c6b7 704 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
705}
706
707/* Print the instruction number range for a function call history line. */
708
709static void
23a7fe75
MM
710btrace_call_history_insn_range (struct ui_out *uiout,
711 const struct btrace_function *bfun)
afedecd3 712{
7acbe133
MM
713 unsigned int begin, end, size;
714
715 size = VEC_length (btrace_insn_s, bfun->insn);
716 gdb_assert (size > 0);
afedecd3 717
23a7fe75 718 begin = bfun->insn_offset;
7acbe133 719 end = begin + size - 1;
afedecd3 720
23a7fe75 721 ui_out_field_uint (uiout, "insn begin", begin);
8710b709 722 ui_out_text (uiout, ",");
23a7fe75 723 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
724}
725
ce0dfbea
MM
726/* Compute the lowest and highest source line for the instructions in BFUN
727 and return them in PBEGIN and PEND.
728 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
729 result from inlining or macro expansion. */
730
731static void
732btrace_compute_src_line_range (const struct btrace_function *bfun,
733 int *pbegin, int *pend)
734{
735 struct btrace_insn *insn;
736 struct symtab *symtab;
737 struct symbol *sym;
738 unsigned int idx;
739 int begin, end;
740
741 begin = INT_MAX;
742 end = INT_MIN;
743
744 sym = bfun->sym;
745 if (sym == NULL)
746 goto out;
747
748 symtab = symbol_symtab (sym);
749
750 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
751 {
752 struct symtab_and_line sal;
753
754 sal = find_pc_line (insn->pc, 0);
755 if (sal.symtab != symtab || sal.line == 0)
756 continue;
757
758 begin = min (begin, sal.line);
759 end = max (end, sal.line);
760 }
761
762 out:
763 *pbegin = begin;
764 *pend = end;
765}
766
afedecd3
MM
767/* Print the source line information for a function call history line. */
768
769static void
23a7fe75
MM
770btrace_call_history_src_line (struct ui_out *uiout,
771 const struct btrace_function *bfun)
afedecd3
MM
772{
773 struct symbol *sym;
23a7fe75 774 int begin, end;
afedecd3
MM
775
776 sym = bfun->sym;
777 if (sym == NULL)
778 return;
779
780 ui_out_field_string (uiout, "file",
08be3fe3 781 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 782
ce0dfbea 783 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 784 if (end < begin)
afedecd3
MM
785 return;
786
787 ui_out_text (uiout, ":");
23a7fe75 788 ui_out_field_int (uiout, "min line", begin);
afedecd3 789
23a7fe75 790 if (end == begin)
afedecd3
MM
791 return;
792
8710b709 793 ui_out_text (uiout, ",");
23a7fe75 794 ui_out_field_int (uiout, "max line", end);
afedecd3
MM
795}
796
0b722aec
MM
797/* Get the name of a branch trace function. */
798
799static const char *
800btrace_get_bfun_name (const struct btrace_function *bfun)
801{
802 struct minimal_symbol *msym;
803 struct symbol *sym;
804
805 if (bfun == NULL)
806 return "??";
807
808 msym = bfun->msym;
809 sym = bfun->sym;
810
811 if (sym != NULL)
812 return SYMBOL_PRINT_NAME (sym);
813 else if (msym != NULL)
efd66ac6 814 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
815 else
816 return "??";
817}
818
afedecd3
MM
819/* Disassemble a section of the recorded function trace. */
820
821static void
23a7fe75 822btrace_call_history (struct ui_out *uiout,
8710b709 823 const struct btrace_thread_info *btinfo,
23a7fe75
MM
824 const struct btrace_call_iterator *begin,
825 const struct btrace_call_iterator *end,
afedecd3
MM
826 enum record_print_flag flags)
827{
23a7fe75 828 struct btrace_call_iterator it;
afedecd3 829
23a7fe75
MM
830 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
831 btrace_call_number (end));
afedecd3 832
23a7fe75 833 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 834 {
23a7fe75
MM
835 const struct btrace_function *bfun;
836 struct minimal_symbol *msym;
837 struct symbol *sym;
838
839 bfun = btrace_call_get (&it);
23a7fe75 840 sym = bfun->sym;
0b722aec 841 msym = bfun->msym;
23a7fe75 842
afedecd3 843 /* Print the function index. */
23a7fe75 844 ui_out_field_uint (uiout, "index", bfun->number);
afedecd3
MM
845 ui_out_text (uiout, "\t");
846
31fd9caa
MM
847 /* Indicate gaps in the trace. */
848 if (bfun->errcode != 0)
849 {
850 const struct btrace_config *conf;
851
852 conf = btrace_conf (btinfo);
853
854 /* We have trace so we must have a configuration. */
855 gdb_assert (conf != NULL);
856
857 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
858
859 continue;
860 }
861
8710b709
MM
862 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
863 {
864 int level = bfun->level + btinfo->level, i;
865
866 for (i = 0; i < level; ++i)
867 ui_out_text (uiout, " ");
868 }
869
870 if (sym != NULL)
871 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
872 else if (msym != NULL)
efd66ac6 873 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
8710b709
MM
874 else if (!ui_out_is_mi_like_p (uiout))
875 ui_out_field_string (uiout, "function", "??");
876
1e038f67 877 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 878 {
8710b709 879 ui_out_text (uiout, _("\tinst "));
23a7fe75 880 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
881 }
882
1e038f67 883 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 884 {
8710b709 885 ui_out_text (uiout, _("\tat "));
23a7fe75 886 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
887 }
888
afedecd3
MM
889 ui_out_text (uiout, "\n");
890 }
891}
892
893/* The to_call_history method of target record-btrace. */
894
895static void
5df2fcba 896record_btrace_call_history (struct target_ops *self, int size, int flags)
afedecd3
MM
897{
898 struct btrace_thread_info *btinfo;
23a7fe75
MM
899 struct btrace_call_history *history;
900 struct btrace_call_iterator begin, end;
afedecd3
MM
901 struct cleanup *uiout_cleanup;
902 struct ui_out *uiout;
23a7fe75 903 unsigned int context, covered;
afedecd3
MM
904
905 uiout = current_uiout;
906 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
907 "insn history");
afedecd3 908 context = abs (size);
afedecd3
MM
909 if (context == 0)
910 error (_("Bad record function-call-history-size."));
911
23a7fe75
MM
912 btinfo = require_btrace ();
913 history = btinfo->call_history;
914 if (history == NULL)
afedecd3 915 {
07bbe694 916 struct btrace_insn_iterator *replay;
afedecd3 917
23a7fe75 918 DEBUG ("call-history (0x%x): %d", flags, size);
afedecd3 919
07bbe694
MM
920 /* If we're replaying, we start at the replay position. Otherwise, we
921 start at the tail of the trace. */
922 replay = btinfo->replay;
923 if (replay != NULL)
924 {
925 begin.function = replay->function;
926 begin.btinfo = btinfo;
927 }
928 else
929 btrace_call_end (&begin, btinfo);
930
931 /* We start from here and expand in the requested direction. Then we
932 expand in the other direction, as well, to fill up any remaining
933 context. */
934 end = begin;
935 if (size < 0)
936 {
937 /* We want the current position covered, as well. */
938 covered = btrace_call_next (&end, 1);
939 covered += btrace_call_prev (&begin, context - covered);
940 covered += btrace_call_next (&end, context - covered);
941 }
942 else
943 {
944 covered = btrace_call_next (&end, context);
945 covered += btrace_call_prev (&begin, context- covered);
946 }
afedecd3
MM
947 }
948 else
949 {
23a7fe75
MM
950 begin = history->begin;
951 end = history->end;
afedecd3 952
23a7fe75
MM
953 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
954 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 955
23a7fe75
MM
956 if (size < 0)
957 {
958 end = begin;
959 covered = btrace_call_prev (&begin, context);
960 }
961 else
962 {
963 begin = end;
964 covered = btrace_call_next (&end, context);
965 }
afedecd3
MM
966 }
967
23a7fe75 968 if (covered > 0)
8710b709 969 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
970 else
971 {
972 if (size < 0)
973 printf_unfiltered (_("At the start of the branch trace record.\n"));
974 else
975 printf_unfiltered (_("At the end of the branch trace record.\n"));
976 }
afedecd3 977
23a7fe75 978 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
979 do_cleanups (uiout_cleanup);
980}
981
982/* The to_call_history_range method of target record-btrace. */
983
984static void
f0d960ea
TT
985record_btrace_call_history_range (struct target_ops *self,
986 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
987{
988 struct btrace_thread_info *btinfo;
23a7fe75
MM
989 struct btrace_call_history *history;
990 struct btrace_call_iterator begin, end;
afedecd3
MM
991 struct cleanup *uiout_cleanup;
992 struct ui_out *uiout;
23a7fe75
MM
993 unsigned int low, high;
994 int found;
afedecd3
MM
995
996 uiout = current_uiout;
997 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
998 "func history");
23a7fe75
MM
999 low = from;
1000 high = to;
afedecd3 1001
23a7fe75 1002 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
1003
1004 /* Check for wrap-arounds. */
23a7fe75 1005 if (low != from || high != to)
afedecd3
MM
1006 error (_("Bad range."));
1007
0688d04e 1008 if (high < low)
afedecd3
MM
1009 error (_("Bad range."));
1010
23a7fe75 1011 btinfo = require_btrace ();
afedecd3 1012
23a7fe75
MM
1013 found = btrace_find_call_by_number (&begin, btinfo, low);
1014 if (found == 0)
1015 error (_("Range out of bounds."));
afedecd3 1016
23a7fe75
MM
1017 found = btrace_find_call_by_number (&end, btinfo, high);
1018 if (found == 0)
0688d04e
MM
1019 {
1020 /* Silently truncate the range. */
1021 btrace_call_end (&end, btinfo);
1022 }
1023 else
1024 {
1025 /* We want both begin and end to be inclusive. */
1026 btrace_call_next (&end, 1);
1027 }
afedecd3 1028
8710b709 1029 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1030 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1031
1032 do_cleanups (uiout_cleanup);
1033}
1034
1035/* The to_call_history_from method of target record-btrace. */
1036
1037static void
ec0aea04
TT
1038record_btrace_call_history_from (struct target_ops *self,
1039 ULONGEST from, int size, int flags)
afedecd3
MM
1040{
1041 ULONGEST begin, end, context;
1042
1043 context = abs (size);
0688d04e
MM
1044 if (context == 0)
1045 error (_("Bad record function-call-history-size."));
afedecd3
MM
1046
1047 if (size < 0)
1048 {
1049 end = from;
1050
1051 if (from < context)
1052 begin = 0;
1053 else
0688d04e 1054 begin = from - context + 1;
afedecd3
MM
1055 }
1056 else
1057 {
1058 begin = from;
0688d04e 1059 end = from + context - 1;
afedecd3
MM
1060
1061 /* Check for wrap-around. */
1062 if (end < begin)
1063 end = ULONGEST_MAX;
1064 }
1065
f0d960ea 1066 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
1067}
1068
07bbe694
MM
1069/* The to_record_is_replaying method of target record-btrace. */
1070
1071static int
1c63c994 1072record_btrace_is_replaying (struct target_ops *self)
07bbe694
MM
1073{
1074 struct thread_info *tp;
1075
034f788c 1076 ALL_NON_EXITED_THREADS (tp)
07bbe694
MM
1077 if (btrace_is_replaying (tp))
1078 return 1;
1079
1080 return 0;
1081}
1082
633785ff
MM
1083/* The to_xfer_partial method of target record-btrace. */
1084
9b409511 1085static enum target_xfer_status
633785ff
MM
1086record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1087 const char *annex, gdb_byte *readbuf,
1088 const gdb_byte *writebuf, ULONGEST offset,
9b409511 1089 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
1090{
1091 struct target_ops *t;
1092
1093 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1094 if (replay_memory_access == replay_memory_access_read_only
aef92902 1095 && !record_btrace_generating_corefile
67b5c0c1 1096 && record_btrace_is_replaying (ops))
633785ff
MM
1097 {
1098 switch (object)
1099 {
1100 case TARGET_OBJECT_MEMORY:
1101 {
1102 struct target_section *section;
1103
1104 /* We do not allow writing memory in general. */
1105 if (writebuf != NULL)
9b409511
YQ
1106 {
1107 *xfered_len = len;
bc113b4e 1108 return TARGET_XFER_UNAVAILABLE;
9b409511 1109 }
633785ff
MM
1110
1111 /* We allow reading readonly memory. */
1112 section = target_section_by_addr (ops, offset);
1113 if (section != NULL)
1114 {
1115 /* Check if the section we found is readonly. */
1116 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1117 section->the_bfd_section)
1118 & SEC_READONLY) != 0)
1119 {
1120 /* Truncate the request to fit into this section. */
1121 len = min (len, section->endaddr - offset);
1122 break;
1123 }
1124 }
1125
9b409511 1126 *xfered_len = len;
bc113b4e 1127 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1128 }
1129 }
1130 }
1131
1132 /* Forward the request. */
e75fdfca
TT
1133 ops = ops->beneath;
1134 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1135 offset, len, xfered_len);
633785ff
MM
1136}
1137
1138/* The to_insert_breakpoint method of target record-btrace. */
1139
1140static int
1141record_btrace_insert_breakpoint (struct target_ops *ops,
1142 struct gdbarch *gdbarch,
1143 struct bp_target_info *bp_tgt)
1144{
67b5c0c1
MM
1145 const char *old;
1146 int ret;
633785ff
MM
1147
1148 /* Inserting breakpoints requires accessing memory. Allow it for the
1149 duration of this function. */
67b5c0c1
MM
1150 old = replay_memory_access;
1151 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1152
1153 ret = 0;
492d29ea
PA
1154 TRY
1155 {
1156 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1157 }
633785ff 1158
67b5c0c1 1159 replay_memory_access = old;
633785ff 1160
492d29ea
PA
1161 CATCH (except, RETURN_MASK_ALL)
1162 {
1163 throw_exception (except);
1164 }
1165 END_CATCH
633785ff
MM
1166
1167 return ret;
1168}
1169
1170/* The to_remove_breakpoint method of target record-btrace. */
1171
1172static int
1173record_btrace_remove_breakpoint (struct target_ops *ops,
1174 struct gdbarch *gdbarch,
1175 struct bp_target_info *bp_tgt)
1176{
67b5c0c1
MM
1177 const char *old;
1178 int ret;
633785ff
MM
1179
1180 /* Removing breakpoints requires accessing memory. Allow it for the
1181 duration of this function. */
67b5c0c1
MM
1182 old = replay_memory_access;
1183 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1184
1185 ret = 0;
492d29ea
PA
1186 TRY
1187 {
1188 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1189 }
633785ff 1190
67b5c0c1 1191 replay_memory_access = old;
633785ff 1192
492d29ea
PA
1193 CATCH (except, RETURN_MASK_ALL)
1194 {
1195 throw_exception (except);
1196 }
1197 END_CATCH
633785ff
MM
1198
1199 return ret;
1200}
1201
1f3ef581
MM
1202/* The to_fetch_registers method of target record-btrace. */
1203
1204static void
1205record_btrace_fetch_registers (struct target_ops *ops,
1206 struct regcache *regcache, int regno)
1207{
1208 struct btrace_insn_iterator *replay;
1209 struct thread_info *tp;
1210
1211 tp = find_thread_ptid (inferior_ptid);
1212 gdb_assert (tp != NULL);
1213
1214 replay = tp->btrace.replay;
aef92902 1215 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1216 {
1217 const struct btrace_insn *insn;
1218 struct gdbarch *gdbarch;
1219 int pcreg;
1220
1221 gdbarch = get_regcache_arch (regcache);
1222 pcreg = gdbarch_pc_regnum (gdbarch);
1223 if (pcreg < 0)
1224 return;
1225
1226 /* We can only provide the PC register. */
1227 if (regno >= 0 && regno != pcreg)
1228 return;
1229
1230 insn = btrace_insn_get (replay);
1231 gdb_assert (insn != NULL);
1232
1233 regcache_raw_supply (regcache, regno, &insn->pc);
1234 }
1235 else
1236 {
e75fdfca 1237 struct target_ops *t = ops->beneath;
1f3ef581 1238
e75fdfca 1239 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
1240 }
1241}
1242
1243/* The to_store_registers method of target record-btrace. */
1244
1245static void
1246record_btrace_store_registers (struct target_ops *ops,
1247 struct regcache *regcache, int regno)
1248{
1249 struct target_ops *t;
1250
aef92902 1251 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1f3ef581
MM
1252 error (_("This record target does not allow writing registers."));
1253
1254 gdb_assert (may_write_registers != 0);
1255
e75fdfca
TT
1256 t = ops->beneath;
1257 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1258}
1259
1260/* The to_prepare_to_store method of target record-btrace. */
1261
1262static void
1263record_btrace_prepare_to_store (struct target_ops *ops,
1264 struct regcache *regcache)
1265{
1266 struct target_ops *t;
1267
aef92902 1268 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1f3ef581
MM
1269 return;
1270
e75fdfca
TT
1271 t = ops->beneath;
1272 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1273}
1274
0b722aec
MM
1275/* The branch trace frame cache. */
1276
1277struct btrace_frame_cache
1278{
1279 /* The thread. */
1280 struct thread_info *tp;
1281
1282 /* The frame info. */
1283 struct frame_info *frame;
1284
1285 /* The branch trace function segment. */
1286 const struct btrace_function *bfun;
1287};
1288
1289/* A struct btrace_frame_cache hash table indexed by NEXT. */
1290
1291static htab_t bfcache;
1292
1293/* hash_f for htab_create_alloc of bfcache. */
1294
1295static hashval_t
1296bfcache_hash (const void *arg)
1297{
1298 const struct btrace_frame_cache *cache = arg;
1299
1300 return htab_hash_pointer (cache->frame);
1301}
1302
1303/* eq_f for htab_create_alloc of bfcache. */
1304
1305static int
1306bfcache_eq (const void *arg1, const void *arg2)
1307{
1308 const struct btrace_frame_cache *cache1 = arg1;
1309 const struct btrace_frame_cache *cache2 = arg2;
1310
1311 return cache1->frame == cache2->frame;
1312}
1313
1314/* Create a new btrace frame cache. */
1315
1316static struct btrace_frame_cache *
1317bfcache_new (struct frame_info *frame)
1318{
1319 struct btrace_frame_cache *cache;
1320 void **slot;
1321
1322 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1323 cache->frame = frame;
1324
1325 slot = htab_find_slot (bfcache, cache, INSERT);
1326 gdb_assert (*slot == NULL);
1327 *slot = cache;
1328
1329 return cache;
1330}
1331
1332/* Extract the branch trace function from a branch trace frame. */
1333
1334static const struct btrace_function *
1335btrace_get_frame_function (struct frame_info *frame)
1336{
1337 const struct btrace_frame_cache *cache;
1338 const struct btrace_function *bfun;
1339 struct btrace_frame_cache pattern;
1340 void **slot;
1341
1342 pattern.frame = frame;
1343
1344 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1345 if (slot == NULL)
1346 return NULL;
1347
1348 cache = *slot;
1349 return cache->bfun;
1350}
1351
cecac1ab
MM
1352/* Implement stop_reason method for record_btrace_frame_unwind. */
1353
1354static enum unwind_stop_reason
1355record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1356 void **this_cache)
1357{
0b722aec
MM
1358 const struct btrace_frame_cache *cache;
1359 const struct btrace_function *bfun;
1360
1361 cache = *this_cache;
1362 bfun = cache->bfun;
1363 gdb_assert (bfun != NULL);
1364
1365 if (bfun->up == NULL)
1366 return UNWIND_UNAVAILABLE;
1367
1368 return UNWIND_NO_REASON;
cecac1ab
MM
1369}
1370
1371/* Implement this_id method for record_btrace_frame_unwind. */
1372
1373static void
1374record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1375 struct frame_id *this_id)
1376{
0b722aec
MM
1377 const struct btrace_frame_cache *cache;
1378 const struct btrace_function *bfun;
1379 CORE_ADDR code, special;
1380
1381 cache = *this_cache;
1382
1383 bfun = cache->bfun;
1384 gdb_assert (bfun != NULL);
1385
1386 while (bfun->segment.prev != NULL)
1387 bfun = bfun->segment.prev;
1388
1389 code = get_frame_func (this_frame);
1390 special = bfun->number;
1391
1392 *this_id = frame_id_build_unavailable_stack_special (code, special);
1393
1394 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1395 btrace_get_bfun_name (cache->bfun),
1396 core_addr_to_string_nz (this_id->code_addr),
1397 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1398}
1399
1400/* Implement prev_register method for record_btrace_frame_unwind. */
1401
1402static struct value *
1403record_btrace_frame_prev_register (struct frame_info *this_frame,
1404 void **this_cache,
1405 int regnum)
1406{
0b722aec
MM
1407 const struct btrace_frame_cache *cache;
1408 const struct btrace_function *bfun, *caller;
1409 const struct btrace_insn *insn;
1410 struct gdbarch *gdbarch;
1411 CORE_ADDR pc;
1412 int pcreg;
1413
1414 gdbarch = get_frame_arch (this_frame);
1415 pcreg = gdbarch_pc_regnum (gdbarch);
1416 if (pcreg < 0 || regnum != pcreg)
1417 throw_error (NOT_AVAILABLE_ERROR,
1418 _("Registers are not available in btrace record history"));
1419
1420 cache = *this_cache;
1421 bfun = cache->bfun;
1422 gdb_assert (bfun != NULL);
1423
1424 caller = bfun->up;
1425 if (caller == NULL)
1426 throw_error (NOT_AVAILABLE_ERROR,
1427 _("No caller in btrace record history"));
1428
1429 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1430 {
1431 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1432 pc = insn->pc;
1433 }
1434 else
1435 {
1436 insn = VEC_last (btrace_insn_s, caller->insn);
1437 pc = insn->pc;
1438
1439 pc += gdb_insn_length (gdbarch, pc);
1440 }
1441
1442 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1443 btrace_get_bfun_name (bfun), bfun->level,
1444 core_addr_to_string_nz (pc));
1445
1446 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1447}
1448
1449/* Implement sniffer method for record_btrace_frame_unwind. */
1450
1451static int
1452record_btrace_frame_sniffer (const struct frame_unwind *self,
1453 struct frame_info *this_frame,
1454 void **this_cache)
1455{
0b722aec
MM
1456 const struct btrace_function *bfun;
1457 struct btrace_frame_cache *cache;
cecac1ab 1458 struct thread_info *tp;
0b722aec 1459 struct frame_info *next;
cecac1ab
MM
1460
1461 /* THIS_FRAME does not contain a reference to its thread. */
1462 tp = find_thread_ptid (inferior_ptid);
1463 gdb_assert (tp != NULL);
1464
0b722aec
MM
1465 bfun = NULL;
1466 next = get_next_frame (this_frame);
1467 if (next == NULL)
1468 {
1469 const struct btrace_insn_iterator *replay;
1470
1471 replay = tp->btrace.replay;
1472 if (replay != NULL)
1473 bfun = replay->function;
1474 }
1475 else
1476 {
1477 const struct btrace_function *callee;
1478
1479 callee = btrace_get_frame_function (next);
1480 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1481 bfun = callee->up;
1482 }
1483
1484 if (bfun == NULL)
1485 return 0;
1486
1487 DEBUG ("[frame] sniffed frame for %s on level %d",
1488 btrace_get_bfun_name (bfun), bfun->level);
1489
1490 /* This is our frame. Initialize the frame cache. */
1491 cache = bfcache_new (this_frame);
1492 cache->tp = tp;
1493 cache->bfun = bfun;
1494
1495 *this_cache = cache;
1496 return 1;
1497}
1498
1499/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1500
1501static int
1502record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1503 struct frame_info *this_frame,
1504 void **this_cache)
1505{
1506 const struct btrace_function *bfun, *callee;
1507 struct btrace_frame_cache *cache;
1508 struct frame_info *next;
1509
1510 next = get_next_frame (this_frame);
1511 if (next == NULL)
1512 return 0;
1513
1514 callee = btrace_get_frame_function (next);
1515 if (callee == NULL)
1516 return 0;
1517
1518 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1519 return 0;
1520
1521 bfun = callee->up;
1522 if (bfun == NULL)
1523 return 0;
1524
1525 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1526 btrace_get_bfun_name (bfun), bfun->level);
1527
1528 /* This is our frame. Initialize the frame cache. */
1529 cache = bfcache_new (this_frame);
1530 cache->tp = find_thread_ptid (inferior_ptid);
1531 cache->bfun = bfun;
1532
1533 *this_cache = cache;
1534 return 1;
1535}
1536
1537static void
1538record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1539{
1540 struct btrace_frame_cache *cache;
1541 void **slot;
1542
1543 cache = this_cache;
1544
1545 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1546 gdb_assert (slot != NULL);
1547
1548 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1549}
1550
1551/* btrace recording does not store previous memory content, neither the stack
1552 frames content. Any unwinding would return errorneous results as the stack
1553 contents no longer matches the changed PC value restored from history.
1554 Therefore this unwinder reports any possibly unwound registers as
1555 <unavailable>. */
1556
0b722aec 1557const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1558{
1559 NORMAL_FRAME,
1560 record_btrace_frame_unwind_stop_reason,
1561 record_btrace_frame_this_id,
1562 record_btrace_frame_prev_register,
1563 NULL,
0b722aec
MM
1564 record_btrace_frame_sniffer,
1565 record_btrace_frame_dealloc_cache
1566};
1567
1568const struct frame_unwind record_btrace_tailcall_frame_unwind =
1569{
1570 TAILCALL_FRAME,
1571 record_btrace_frame_unwind_stop_reason,
1572 record_btrace_frame_this_id,
1573 record_btrace_frame_prev_register,
1574 NULL,
1575 record_btrace_tailcall_frame_sniffer,
1576 record_btrace_frame_dealloc_cache
cecac1ab 1577};
b2f4cfde 1578
ac01945b
TT
1579/* Implement the to_get_unwinder method. */
1580
1581static const struct frame_unwind *
1582record_btrace_to_get_unwinder (struct target_ops *self)
1583{
1584 return &record_btrace_frame_unwind;
1585}
1586
1587/* Implement the to_get_tailcall_unwinder method. */
1588
1589static const struct frame_unwind *
1590record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1591{
1592 return &record_btrace_tailcall_frame_unwind;
1593}
1594
52834460
MM
1595/* Indicate that TP should be resumed according to FLAG. */
1596
1597static void
1598record_btrace_resume_thread (struct thread_info *tp,
1599 enum btrace_thread_flag flag)
1600{
1601 struct btrace_thread_info *btinfo;
1602
1603 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1604
1605 btinfo = &tp->btrace;
1606
1607 if ((btinfo->flags & BTHR_MOVE) != 0)
1608 error (_("Thread already moving."));
1609
1610 /* Fetch the latest branch trace. */
1611 btrace_fetch (tp);
1612
1613 btinfo->flags |= flag;
1614}
1615
1616/* Find the thread to resume given a PTID. */
1617
1618static struct thread_info *
1619record_btrace_find_resume_thread (ptid_t ptid)
1620{
1621 struct thread_info *tp;
1622
1623 /* When asked to resume everything, we pick the current thread. */
1624 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1625 ptid = inferior_ptid;
1626
1627 return find_thread_ptid (ptid);
1628}
1629
1630/* Start replaying a thread. */
1631
1632static struct btrace_insn_iterator *
1633record_btrace_start_replaying (struct thread_info *tp)
1634{
52834460
MM
1635 struct btrace_insn_iterator *replay;
1636 struct btrace_thread_info *btinfo;
1637 int executing;
1638
1639 btinfo = &tp->btrace;
1640 replay = NULL;
1641
1642 /* We can't start replaying without trace. */
1643 if (btinfo->begin == NULL)
1644 return NULL;
1645
1646 /* Clear the executing flag to allow changes to the current frame.
1647 We are not actually running, yet. We just started a reverse execution
1648 command or a record goto command.
1649 For the latter, EXECUTING is false and this has no effect.
1650 For the former, EXECUTING is true and we're in to_wait, about to
1651 move the thread. Since we need to recompute the stack, we temporarily
1652 set EXECUTING to flase. */
1653 executing = is_executing (tp->ptid);
1654 set_executing (tp->ptid, 0);
1655
1656 /* GDB stores the current frame_id when stepping in order to detects steps
1657 into subroutines.
1658 Since frames are computed differently when we're replaying, we need to
1659 recompute those stored frames and fix them up so we can still detect
1660 subroutines after we started replaying. */
492d29ea 1661 TRY
52834460
MM
1662 {
1663 struct frame_info *frame;
1664 struct frame_id frame_id;
1665 int upd_step_frame_id, upd_step_stack_frame_id;
1666
1667 /* The current frame without replaying - computed via normal unwind. */
1668 frame = get_current_frame ();
1669 frame_id = get_frame_id (frame);
1670
1671 /* Check if we need to update any stepping-related frame id's. */
1672 upd_step_frame_id = frame_id_eq (frame_id,
1673 tp->control.step_frame_id);
1674 upd_step_stack_frame_id = frame_id_eq (frame_id,
1675 tp->control.step_stack_frame_id);
1676
1677 /* We start replaying at the end of the branch trace. This corresponds
1678 to the current instruction. */
1679 replay = xmalloc (sizeof (*replay));
1680 btrace_insn_end (replay, btinfo);
1681
31fd9caa
MM
1682 /* Skip gaps at the end of the trace. */
1683 while (btrace_insn_get (replay) == NULL)
1684 {
1685 unsigned int steps;
1686
1687 steps = btrace_insn_prev (replay, 1);
1688 if (steps == 0)
1689 error (_("No trace."));
1690 }
1691
52834460
MM
1692 /* We're not replaying, yet. */
1693 gdb_assert (btinfo->replay == NULL);
1694 btinfo->replay = replay;
1695
1696 /* Make sure we're not using any stale registers. */
1697 registers_changed_ptid (tp->ptid);
1698
1699 /* The current frame with replaying - computed via btrace unwind. */
1700 frame = get_current_frame ();
1701 frame_id = get_frame_id (frame);
1702
1703 /* Replace stepping related frames where necessary. */
1704 if (upd_step_frame_id)
1705 tp->control.step_frame_id = frame_id;
1706 if (upd_step_stack_frame_id)
1707 tp->control.step_stack_frame_id = frame_id;
1708 }
1709
1710 /* Restore the previous execution state. */
1711 set_executing (tp->ptid, executing);
1712
492d29ea 1713 CATCH (except, RETURN_MASK_ALL)
52834460
MM
1714 {
1715 xfree (btinfo->replay);
1716 btinfo->replay = NULL;
1717
1718 registers_changed_ptid (tp->ptid);
1719
1720 throw_exception (except);
1721 }
492d29ea 1722 END_CATCH
52834460
MM
1723
1724 return replay;
1725}
1726
1727/* Stop replaying a thread. */
1728
1729static void
1730record_btrace_stop_replaying (struct thread_info *tp)
1731{
1732 struct btrace_thread_info *btinfo;
1733
1734 btinfo = &tp->btrace;
1735
1736 xfree (btinfo->replay);
1737 btinfo->replay = NULL;
1738
1739 /* Make sure we're not leaving any stale registers. */
1740 registers_changed_ptid (tp->ptid);
1741}
1742
b2f4cfde
MM
1743/* The to_resume method of target record-btrace. */
1744
1745static void
1746record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1747 enum gdb_signal signal)
1748{
52834460
MM
1749 struct thread_info *tp, *other;
1750 enum btrace_thread_flag flag;
1751
1752 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1753
70ad5bff
MM
1754 /* Store the execution direction of the last resume. */
1755 record_btrace_resume_exec_dir = execution_direction;
1756
52834460
MM
1757 tp = record_btrace_find_resume_thread (ptid);
1758 if (tp == NULL)
1759 error (_("Cannot find thread to resume."));
1760
1761 /* Stop replaying other threads if the thread to resume is not replaying. */
1762 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
034f788c 1763 ALL_NON_EXITED_THREADS (other)
52834460
MM
1764 record_btrace_stop_replaying (other);
1765
b2f4cfde 1766 /* As long as we're not replaying, just forward the request. */
1c63c994 1767 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde 1768 {
e75fdfca
TT
1769 ops = ops->beneath;
1770 return ops->to_resume (ops, ptid, step, signal);
b2f4cfde
MM
1771 }
1772
52834460
MM
1773 /* Compute the btrace thread flag for the requested move. */
1774 if (step == 0)
1775 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1776 else
1777 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1778
1779 /* At the moment, we only move a single thread. We could also move
1780 all threads in parallel by single-stepping each resumed thread
1781 until the first runs into an event.
1782 When we do that, we would want to continue all other threads.
1783 For now, just resume one thread to not confuse to_wait. */
1784 record_btrace_resume_thread (tp, flag);
1785
1786 /* We just indicate the resume intent here. The actual stepping happens in
1787 record_btrace_wait below. */
70ad5bff
MM
1788
1789 /* Async support. */
1790 if (target_can_async_p ())
1791 {
1792 target_async (inferior_event_handler, 0);
1793 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1794 }
52834460
MM
1795}
1796
1797/* Find a thread to move. */
1798
1799static struct thread_info *
1800record_btrace_find_thread_to_move (ptid_t ptid)
1801{
1802 struct thread_info *tp;
1803
1804 /* First check the parameter thread. */
1805 tp = find_thread_ptid (ptid);
1806 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1807 return tp;
1808
1809 /* Otherwise, find one other thread that has been resumed. */
034f788c 1810 ALL_NON_EXITED_THREADS (tp)
52834460
MM
1811 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1812 return tp;
1813
1814 return NULL;
1815}
1816
1817/* Return a target_waitstatus indicating that we ran out of history. */
1818
1819static struct target_waitstatus
1820btrace_step_no_history (void)
1821{
1822 struct target_waitstatus status;
1823
1824 status.kind = TARGET_WAITKIND_NO_HISTORY;
1825
1826 return status;
1827}
1828
1829/* Return a target_waitstatus indicating that a step finished. */
1830
1831static struct target_waitstatus
1832btrace_step_stopped (void)
1833{
1834 struct target_waitstatus status;
1835
1836 status.kind = TARGET_WAITKIND_STOPPED;
1837 status.value.sig = GDB_SIGNAL_TRAP;
1838
1839 return status;
1840}
1841
1842/* Clear the record histories. */
1843
1844static void
1845record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1846{
1847 xfree (btinfo->insn_history);
1848 xfree (btinfo->call_history);
1849
1850 btinfo->insn_history = NULL;
1851 btinfo->call_history = NULL;
1852}
1853
1854/* Step a single thread. */
1855
1856static struct target_waitstatus
1857record_btrace_step_thread (struct thread_info *tp)
1858{
1859 struct btrace_insn_iterator *replay, end;
1860 struct btrace_thread_info *btinfo;
1861 struct address_space *aspace;
1862 struct inferior *inf;
1863 enum btrace_thread_flag flags;
1864 unsigned int steps;
1865
e59fa00f
MM
1866 /* We can't step without an execution history. */
1867 if (btrace_is_empty (tp))
1868 return btrace_step_no_history ();
1869
52834460
MM
1870 btinfo = &tp->btrace;
1871 replay = btinfo->replay;
1872
1873 flags = btinfo->flags & BTHR_MOVE;
1874 btinfo->flags &= ~BTHR_MOVE;
1875
1876 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1877
1878 switch (flags)
1879 {
1880 default:
1881 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1882
1883 case BTHR_STEP:
1884 /* We're done if we're not replaying. */
1885 if (replay == NULL)
1886 return btrace_step_no_history ();
1887
31fd9caa
MM
1888 /* Skip gaps during replay. */
1889 do
1890 {
1891 steps = btrace_insn_next (replay, 1);
1892 if (steps == 0)
1893 {
1894 record_btrace_stop_replaying (tp);
1895 return btrace_step_no_history ();
1896 }
1897 }
1898 while (btrace_insn_get (replay) == NULL);
52834460
MM
1899
1900 /* Determine the end of the instruction trace. */
1901 btrace_insn_end (&end, btinfo);
1902
1903 /* We stop replaying if we reached the end of the trace. */
1904 if (btrace_insn_cmp (replay, &end) == 0)
1905 record_btrace_stop_replaying (tp);
1906
1907 return btrace_step_stopped ();
1908
1909 case BTHR_RSTEP:
1910 /* Start replaying if we're not already doing so. */
1911 if (replay == NULL)
1912 replay = record_btrace_start_replaying (tp);
1913
31fd9caa
MM
1914 /* If we can't step any further, we reached the end of the history.
1915 Skip gaps during replay. */
1916 do
1917 {
1918 steps = btrace_insn_prev (replay, 1);
1919 if (steps == 0)
1920 return btrace_step_no_history ();
1921
1922 }
1923 while (btrace_insn_get (replay) == NULL);
52834460
MM
1924
1925 return btrace_step_stopped ();
1926
1927 case BTHR_CONT:
1928 /* We're done if we're not replaying. */
1929 if (replay == NULL)
1930 return btrace_step_no_history ();
1931
c9657e70 1932 inf = find_inferior_ptid (tp->ptid);
52834460
MM
1933 aspace = inf->aspace;
1934
1935 /* Determine the end of the instruction trace. */
1936 btrace_insn_end (&end, btinfo);
1937
1938 for (;;)
1939 {
1940 const struct btrace_insn *insn;
1941
31fd9caa
MM
1942 /* Skip gaps during replay. */
1943 do
1944 {
1945 steps = btrace_insn_next (replay, 1);
1946 if (steps == 0)
1947 {
1948 record_btrace_stop_replaying (tp);
1949 return btrace_step_no_history ();
1950 }
1951
1952 insn = btrace_insn_get (replay);
1953 }
1954 while (insn == NULL);
52834460
MM
1955
1956 /* We stop replaying if we reached the end of the trace. */
1957 if (btrace_insn_cmp (replay, &end) == 0)
1958 {
1959 record_btrace_stop_replaying (tp);
1960 return btrace_step_no_history ();
1961 }
1962
52834460
MM
1963 DEBUG ("stepping %d (%s) ... %s", tp->num,
1964 target_pid_to_str (tp->ptid),
1965 core_addr_to_string_nz (insn->pc));
1966
9e8915c6
PA
1967 if (record_check_stopped_by_breakpoint (aspace, insn->pc,
1968 &btinfo->stop_reason))
52834460
MM
1969 return btrace_step_stopped ();
1970 }
1971
1972 case BTHR_RCONT:
1973 /* Start replaying if we're not already doing so. */
1974 if (replay == NULL)
1975 replay = record_btrace_start_replaying (tp);
1976
c9657e70 1977 inf = find_inferior_ptid (tp->ptid);
52834460
MM
1978 aspace = inf->aspace;
1979
1980 for (;;)
1981 {
1982 const struct btrace_insn *insn;
1983
31fd9caa
MM
1984 /* If we can't step any further, we reached the end of the history.
1985 Skip gaps during replay. */
1986 do
1987 {
1988 steps = btrace_insn_prev (replay, 1);
1989 if (steps == 0)
1990 return btrace_step_no_history ();
52834460 1991
31fd9caa
MM
1992 insn = btrace_insn_get (replay);
1993 }
1994 while (insn == NULL);
52834460
MM
1995
1996 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1997 target_pid_to_str (tp->ptid),
1998 core_addr_to_string_nz (insn->pc));
1999
9e8915c6
PA
2000 if (record_check_stopped_by_breakpoint (aspace, insn->pc,
2001 &btinfo->stop_reason))
52834460
MM
2002 return btrace_step_stopped ();
2003 }
2004 }
b2f4cfde
MM
2005}
2006
2007/* The to_wait method of target record-btrace. */
2008
2009static ptid_t
2010record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2011 struct target_waitstatus *status, int options)
2012{
52834460
MM
2013 struct thread_info *tp, *other;
2014
2015 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2016
b2f4cfde 2017 /* As long as we're not replaying, just forward the request. */
1c63c994 2018 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde 2019 {
e75fdfca
TT
2020 ops = ops->beneath;
2021 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
2022 }
2023
52834460
MM
2024 /* Let's find a thread to move. */
2025 tp = record_btrace_find_thread_to_move (ptid);
2026 if (tp == NULL)
2027 {
2028 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
2029
2030 status->kind = TARGET_WAITKIND_IGNORE;
2031 return minus_one_ptid;
2032 }
2033
2034 /* We only move a single thread. We're not able to correlate threads. */
2035 *status = record_btrace_step_thread (tp);
2036
2037 /* Stop all other threads. */
2038 if (!non_stop)
034f788c 2039 ALL_NON_EXITED_THREADS (other)
52834460
MM
2040 other->btrace.flags &= ~BTHR_MOVE;
2041
2042 /* Start record histories anew from the current position. */
2043 record_btrace_clear_histories (&tp->btrace);
2044
2045 /* We moved the replay position but did not update registers. */
2046 registers_changed_ptid (tp->ptid);
2047
2048 return tp->ptid;
2049}
2050
2051/* The to_can_execute_reverse method of target record-btrace. */
2052
2053static int
19db3e69 2054record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
2055{
2056 return 1;
2057}
2058
9e8915c6 2059/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2060
9e8915c6
PA
2061static int
2062record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
52834460 2063{
1c63c994 2064 if (record_btrace_is_replaying (ops))
9e8915c6
PA
2065 {
2066 struct thread_info *tp = inferior_thread ();
2067
2068 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2069 }
2070
2071 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2072}
2073
2074/* The to_supports_stopped_by_sw_breakpoint method of target
2075 record-btrace. */
2076
2077static int
2078record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2079{
2080 if (record_btrace_is_replaying (ops))
2081 return 1;
2082
2083 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2084}
2085
2086/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2087
2088static int
2089record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2090{
2091 if (record_btrace_is_replaying (ops))
2092 {
2093 struct thread_info *tp = inferior_thread ();
2094
2095 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2096 }
2097
2098 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2099}
2100
2101/* The to_supports_stopped_by_hw_breakpoint method of target
2102 record-btrace. */
2103
2104static int
2105record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2106{
2107 if (record_btrace_is_replaying (ops))
2108 return 1;
52834460 2109
9e8915c6 2110 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
b2f4cfde
MM
2111}
2112
e8032dde 2113/* The to_update_thread_list method of target record-btrace. */
e2887aa3
MM
2114
2115static void
e8032dde 2116record_btrace_update_thread_list (struct target_ops *ops)
e2887aa3 2117{
e8032dde 2118 /* We don't add or remove threads during replay. */
1c63c994 2119 if (record_btrace_is_replaying (ops))
e2887aa3
MM
2120 return;
2121
2122 /* Forward the request. */
e75fdfca 2123 ops = ops->beneath;
e8032dde 2124 ops->to_update_thread_list (ops);
e2887aa3
MM
2125}
2126
2127/* The to_thread_alive method of target record-btrace. */
2128
2129static int
2130record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2131{
2132 /* We don't add or remove threads during replay. */
1c63c994 2133 if (record_btrace_is_replaying (ops))
e2887aa3
MM
2134 return find_thread_ptid (ptid) != NULL;
2135
2136 /* Forward the request. */
e75fdfca
TT
2137 ops = ops->beneath;
2138 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
2139}
2140
066ce621
MM
2141/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2142 is stopped. */
2143
2144static void
2145record_btrace_set_replay (struct thread_info *tp,
2146 const struct btrace_insn_iterator *it)
2147{
2148 struct btrace_thread_info *btinfo;
2149
2150 btinfo = &tp->btrace;
2151
2152 if (it == NULL || it->function == NULL)
52834460 2153 record_btrace_stop_replaying (tp);
066ce621
MM
2154 else
2155 {
2156 if (btinfo->replay == NULL)
52834460 2157 record_btrace_start_replaying (tp);
066ce621
MM
2158 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2159 return;
2160
2161 *btinfo->replay = *it;
52834460 2162 registers_changed_ptid (tp->ptid);
066ce621
MM
2163 }
2164
52834460
MM
2165 /* Start anew from the new replay position. */
2166 record_btrace_clear_histories (btinfo);
066ce621
MM
2167}
2168
2169/* The to_goto_record_begin method of target record-btrace. */
2170
2171static void
08475817 2172record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
2173{
2174 struct thread_info *tp;
2175 struct btrace_insn_iterator begin;
2176
2177 tp = require_btrace_thread ();
2178
2179 btrace_insn_begin (&begin, &tp->btrace);
2180 record_btrace_set_replay (tp, &begin);
2181
2182 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2183}
2184
2185/* The to_goto_record_end method of target record-btrace. */
2186
2187static void
307a1b91 2188record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
2189{
2190 struct thread_info *tp;
2191
2192 tp = require_btrace_thread ();
2193
2194 record_btrace_set_replay (tp, NULL);
2195
2196 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2197}
2198
2199/* The to_goto_record method of target record-btrace. */
2200
2201static void
606183ac 2202record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
2203{
2204 struct thread_info *tp;
2205 struct btrace_insn_iterator it;
2206 unsigned int number;
2207 int found;
2208
2209 number = insn;
2210
2211 /* Check for wrap-arounds. */
2212 if (number != insn)
2213 error (_("Instruction number out of range."));
2214
2215 tp = require_btrace_thread ();
2216
2217 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2218 if (found == 0)
2219 error (_("No such instruction."));
2220
2221 record_btrace_set_replay (tp, &it);
2222
2223 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2224}
2225
70ad5bff
MM
2226/* The to_execution_direction target method. */
2227
2228static enum exec_direction_kind
2229record_btrace_execution_direction (struct target_ops *self)
2230{
2231 return record_btrace_resume_exec_dir;
2232}
2233
aef92902
MM
2234/* The to_prepare_to_generate_core target method. */
2235
2236static void
2237record_btrace_prepare_to_generate_core (struct target_ops *self)
2238{
2239 record_btrace_generating_corefile = 1;
2240}
2241
2242/* The to_done_generating_core target method. */
2243
2244static void
2245record_btrace_done_generating_core (struct target_ops *self)
2246{
2247 record_btrace_generating_corefile = 0;
2248}
2249
afedecd3
MM
2250/* Initialize the record-btrace target ops. */
2251
2252static void
2253init_record_btrace_ops (void)
2254{
2255 struct target_ops *ops;
2256
2257 ops = &record_btrace_ops;
2258 ops->to_shortname = "record-btrace";
2259 ops->to_longname = "Branch tracing target";
2260 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2261 ops->to_open = record_btrace_open;
2262 ops->to_close = record_btrace_close;
b7d2e916 2263 ops->to_async = record_btrace_async;
afedecd3
MM
2264 ops->to_detach = record_detach;
2265 ops->to_disconnect = record_disconnect;
2266 ops->to_mourn_inferior = record_mourn_inferior;
2267 ops->to_kill = record_kill;
afedecd3
MM
2268 ops->to_stop_recording = record_btrace_stop_recording;
2269 ops->to_info_record = record_btrace_info;
2270 ops->to_insn_history = record_btrace_insn_history;
2271 ops->to_insn_history_from = record_btrace_insn_history_from;
2272 ops->to_insn_history_range = record_btrace_insn_history_range;
2273 ops->to_call_history = record_btrace_call_history;
2274 ops->to_call_history_from = record_btrace_call_history_from;
2275 ops->to_call_history_range = record_btrace_call_history_range;
07bbe694 2276 ops->to_record_is_replaying = record_btrace_is_replaying;
633785ff
MM
2277 ops->to_xfer_partial = record_btrace_xfer_partial;
2278 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2279 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
2280 ops->to_fetch_registers = record_btrace_fetch_registers;
2281 ops->to_store_registers = record_btrace_store_registers;
2282 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
2283 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2284 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde
MM
2285 ops->to_resume = record_btrace_resume;
2286 ops->to_wait = record_btrace_wait;
e8032dde 2287 ops->to_update_thread_list = record_btrace_update_thread_list;
e2887aa3 2288 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
2289 ops->to_goto_record_begin = record_btrace_goto_begin;
2290 ops->to_goto_record_end = record_btrace_goto_end;
2291 ops->to_goto_record = record_btrace_goto;
52834460 2292 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
9e8915c6
PA
2293 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2294 ops->to_supports_stopped_by_sw_breakpoint
2295 = record_btrace_supports_stopped_by_sw_breakpoint;
2296 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2297 ops->to_supports_stopped_by_hw_breakpoint
2298 = record_btrace_supports_stopped_by_hw_breakpoint;
70ad5bff 2299 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
2300 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2301 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
2302 ops->to_stratum = record_stratum;
2303 ops->to_magic = OPS_MAGIC;
2304}
2305
f4abbc16
MM
2306/* Start recording in BTS format. */
2307
2308static void
2309cmd_record_btrace_bts_start (char *args, int from_tty)
2310{
f4abbc16
MM
2311
2312 if (args != NULL && *args != 0)
2313 error (_("Invalid argument."));
2314
2315 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2316
492d29ea
PA
2317 TRY
2318 {
2319 execute_command ("target record-btrace", from_tty);
2320 }
2321 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2322 {
2323 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2324 throw_exception (exception);
2325 }
492d29ea 2326 END_CATCH
f4abbc16
MM
2327}
2328
afedecd3
MM
2329/* Alias for "target record". */
2330
2331static void
2332cmd_record_btrace_start (char *args, int from_tty)
2333{
f4abbc16 2334
afedecd3
MM
2335 if (args != NULL && *args != 0)
2336 error (_("Invalid argument."));
2337
f4abbc16
MM
2338 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2339
492d29ea
PA
2340 TRY
2341 {
2342 execute_command ("target record-btrace", from_tty);
2343 }
2344 CATCH (exception, RETURN_MASK_ALL)
2345 {
2346 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2347 throw_exception (exception);
2348 }
2349 END_CATCH
afedecd3
MM
2350}
2351
67b5c0c1
MM
2352/* The "set record btrace" command. */
2353
2354static void
2355cmd_set_record_btrace (char *args, int from_tty)
2356{
2357 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2358}
2359
2360/* The "show record btrace" command. */
2361
2362static void
2363cmd_show_record_btrace (char *args, int from_tty)
2364{
2365 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2366}
2367
2368/* The "show record btrace replay-memory-access" command. */
2369
2370static void
2371cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2372 struct cmd_list_element *c, const char *value)
2373{
2374 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2375 replay_memory_access);
2376}
2377
d33501a5
MM
2378/* The "set record btrace bts" command. */
2379
2380static void
2381cmd_set_record_btrace_bts (char *args, int from_tty)
2382{
2383 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2384 "by an apporpriate subcommand.\n"));
2385 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2386 all_commands, gdb_stdout);
2387}
2388
2389/* The "show record btrace bts" command. */
2390
2391static void
2392cmd_show_record_btrace_bts (char *args, int from_tty)
2393{
2394 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2395}
2396
afedecd3
MM
2397void _initialize_record_btrace (void);
2398
2399/* Initialize btrace commands. */
2400
2401void
2402_initialize_record_btrace (void)
2403{
f4abbc16
MM
2404 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2405 _("Start branch trace recording."), &record_btrace_cmdlist,
2406 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
2407 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2408
f4abbc16
MM
2409 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
2410 _("\
2411Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2412The processor stores a from/to record for each branch into a cyclic buffer.\n\
2413This format may not be available on all processors."),
2414 &record_btrace_cmdlist);
2415 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
2416
67b5c0c1
MM
2417 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2418 _("Set record options"), &set_record_btrace_cmdlist,
2419 "set record btrace ", 0, &set_record_cmdlist);
2420
2421 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2422 _("Show record options"), &show_record_btrace_cmdlist,
2423 "show record btrace ", 0, &show_record_cmdlist);
2424
2425 add_setshow_enum_cmd ("replay-memory-access", no_class,
2426 replay_memory_access_types, &replay_memory_access, _("\
2427Set what memory accesses are allowed during replay."), _("\
2428Show what memory accesses are allowed during replay."),
2429 _("Default is READ-ONLY.\n\n\
2430The btrace record target does not trace data.\n\
2431The memory therefore corresponds to the live target and not \
2432to the current replay position.\n\n\
2433When READ-ONLY, allow accesses to read-only memory during replay.\n\
2434When READ-WRITE, allow accesses to read-only and read-write memory during \
2435replay."),
2436 NULL, cmd_show_replay_memory_access,
2437 &set_record_btrace_cmdlist,
2438 &show_record_btrace_cmdlist);
2439
d33501a5
MM
2440 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
2441 _("Set record btrace bts options"),
2442 &set_record_btrace_bts_cmdlist,
2443 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
2444
2445 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
2446 _("Show record btrace bts options"),
2447 &show_record_btrace_bts_cmdlist,
2448 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
2449
2450 add_setshow_uinteger_cmd ("buffer-size", no_class,
2451 &record_btrace_conf.bts.size,
2452 _("Set the record/replay bts buffer size."),
2453 _("Show the record/replay bts buffer size."), _("\
2454When starting recording request a trace buffer of this size. \
2455The actual buffer size may differ from the requested size. \
2456Use \"info record\" to see the actual buffer size.\n\n\
2457Bigger buffers allow longer recording but also take more time to process \
2458the recorded execution trace.\n\n\
2459The trace buffer size may not be changed while recording."), NULL, NULL,
2460 &set_record_btrace_bts_cmdlist,
2461 &show_record_btrace_bts_cmdlist);
2462
afedecd3
MM
2463 init_record_btrace_ops ();
2464 add_target (&record_btrace_ops);
0b722aec
MM
2465
2466 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2467 xcalloc, xfree);
d33501a5
MM
2468
2469 record_btrace_conf.bts.size = 64 * 1024;
afedecd3 2470}
This page took 0.34614 seconds and 4 git commands to generate.