btrace, gdbserver: read branch trace incrementally
[deliverable/binutils-gdb.git] / gdb / btrace.c
CommitLineData
02d27625
MM
1/* Branch trace support for GDB, the GNU debugger.
2
ecd75fc8 3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
02d27625
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "btrace.h"
23#include "gdbthread.h"
24#include "exceptions.h"
25#include "inferior.h"
26#include "target.h"
27#include "record.h"
28#include "symtab.h"
29#include "disasm.h"
30#include "source.h"
31#include "filenames.h"
c12a2917 32#include "xml-support.h"
02d27625
MM
33
34/* Print a record debug message. Use do ... while (0) to avoid ambiguities
35 when used in if statements. */
36
37#define DEBUG(msg, args...) \
38 do \
39 { \
40 if (record_debug != 0) \
41 fprintf_unfiltered (gdb_stdlog, \
42 "[btrace] " msg "\n", ##args); \
43 } \
44 while (0)
45
46#define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
47
02d27625
MM
48/* Return the function name of a recorded function segment for printing.
49 This function never returns NULL. */
50
51static const char *
23a7fe75 52ftrace_print_function_name (const struct btrace_function *bfun)
02d27625
MM
53{
54 struct minimal_symbol *msym;
55 struct symbol *sym;
56
57 msym = bfun->msym;
58 sym = bfun->sym;
59
60 if (sym != NULL)
61 return SYMBOL_PRINT_NAME (sym);
62
63 if (msym != NULL)
64 return SYMBOL_PRINT_NAME (msym);
65
66 return "<unknown>";
67}
68
69/* Return the file name of a recorded function segment for printing.
70 This function never returns NULL. */
71
72static const char *
23a7fe75 73ftrace_print_filename (const struct btrace_function *bfun)
02d27625
MM
74{
75 struct symbol *sym;
76 const char *filename;
77
78 sym = bfun->sym;
79
80 if (sym != NULL)
81 filename = symtab_to_filename_for_display (sym->symtab);
82 else
83 filename = "<unknown>";
84
85 return filename;
86}
87
23a7fe75
MM
88/* Return a string representation of the address of an instruction.
89 This function never returns NULL. */
02d27625 90
23a7fe75
MM
91static const char *
92ftrace_print_insn_addr (const struct btrace_insn *insn)
02d27625 93{
23a7fe75
MM
94 if (insn == NULL)
95 return "<nil>";
96
97 return core_addr_to_string_nz (insn->pc);
02d27625
MM
98}
99
23a7fe75 100/* Print an ftrace debug status message. */
02d27625
MM
101
102static void
23a7fe75 103ftrace_debug (const struct btrace_function *bfun, const char *prefix)
02d27625 104{
23a7fe75
MM
105 const char *fun, *file;
106 unsigned int ibegin, iend;
107 int lbegin, lend, level;
108
109 fun = ftrace_print_function_name (bfun);
110 file = ftrace_print_filename (bfun);
111 level = bfun->level;
112
113 lbegin = bfun->lbegin;
114 lend = bfun->lend;
115
116 ibegin = bfun->insn_offset;
117 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
118
119 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, lines = [%d; %d], "
120 "insn = [%u; %u)", prefix, fun, file, level, lbegin, lend,
121 ibegin, iend);
02d27625
MM
122}
123
23a7fe75
MM
124/* Return non-zero if BFUN does not match MFUN and FUN,
125 return zero otherwise. */
02d27625
MM
126
127static int
23a7fe75
MM
128ftrace_function_switched (const struct btrace_function *bfun,
129 const struct minimal_symbol *mfun,
130 const struct symbol *fun)
02d27625
MM
131{
132 struct minimal_symbol *msym;
133 struct symbol *sym;
134
02d27625
MM
135 msym = bfun->msym;
136 sym = bfun->sym;
137
138 /* If the minimal symbol changed, we certainly switched functions. */
139 if (mfun != NULL && msym != NULL
140 && strcmp (SYMBOL_LINKAGE_NAME (mfun), SYMBOL_LINKAGE_NAME (msym)) != 0)
141 return 1;
142
143 /* If the symbol changed, we certainly switched functions. */
144 if (fun != NULL && sym != NULL)
145 {
146 const char *bfname, *fname;
147
148 /* Check the function name. */
149 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
150 return 1;
151
152 /* Check the location of those functions, as well. */
153 bfname = symtab_to_fullname (sym->symtab);
154 fname = symtab_to_fullname (fun->symtab);
155 if (filename_cmp (fname, bfname) != 0)
156 return 1;
157 }
158
23a7fe75
MM
159 /* If we lost symbol information, we switched functions. */
160 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
161 return 1;
162
163 /* If we gained symbol information, we switched functions. */
164 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
165 return 1;
166
02d27625
MM
167 return 0;
168}
169
23a7fe75
MM
170/* Return non-zero if we should skip this file when generating the function
171 call history, zero otherwise.
172 We would want to do that if, say, a macro that is defined in another file
173 is expanded in this function. */
02d27625
MM
174
175static int
23a7fe75 176ftrace_skip_file (const struct btrace_function *bfun, const char *fullname)
02d27625
MM
177{
178 struct symbol *sym;
179 const char *bfile;
180
181 sym = bfun->sym;
23a7fe75
MM
182 if (sym == NULL)
183 return 1;
02d27625 184
23a7fe75
MM
185 bfile = symtab_to_fullname (sym->symtab);
186
187 return (filename_cmp (bfile, fullname) != 0);
188}
189
190/* Allocate and initialize a new branch trace function segment.
191 PREV is the chronologically preceding function segment.
192 MFUN and FUN are the symbol information we have for this function. */
193
194static struct btrace_function *
195ftrace_new_function (struct btrace_function *prev,
196 struct minimal_symbol *mfun,
197 struct symbol *fun)
198{
199 struct btrace_function *bfun;
200
201 bfun = xzalloc (sizeof (*bfun));
202
203 bfun->msym = mfun;
204 bfun->sym = fun;
205 bfun->flow.prev = prev;
206
207 /* We start with the identities of min and max, respectively. */
208 bfun->lbegin = INT_MAX;
209 bfun->lend = INT_MIN;
02d27625 210
5de9129b
MM
211 if (prev == NULL)
212 {
213 /* Start counting at one. */
214 bfun->number = 1;
215 bfun->insn_offset = 1;
216 }
217 else
23a7fe75
MM
218 {
219 gdb_assert (prev->flow.next == NULL);
220 prev->flow.next = bfun;
02d27625 221
23a7fe75
MM
222 bfun->number = prev->number + 1;
223 bfun->insn_offset = (prev->insn_offset
224 + VEC_length (btrace_insn_s, prev->insn));
225 }
226
227 return bfun;
02d27625
MM
228}
229
23a7fe75 230/* Update the UP field of a function segment. */
02d27625 231
23a7fe75
MM
232static void
233ftrace_update_caller (struct btrace_function *bfun,
234 struct btrace_function *caller,
235 enum btrace_function_flag flags)
02d27625 236{
23a7fe75
MM
237 if (bfun->up != NULL)
238 ftrace_debug (bfun, "updating caller");
02d27625 239
23a7fe75
MM
240 bfun->up = caller;
241 bfun->flags = flags;
242
243 ftrace_debug (bfun, "set caller");
244}
245
246/* Fix up the caller for all segments of a function. */
247
248static void
249ftrace_fixup_caller (struct btrace_function *bfun,
250 struct btrace_function *caller,
251 enum btrace_function_flag flags)
252{
253 struct btrace_function *prev, *next;
254
255 ftrace_update_caller (bfun, caller, flags);
256
257 /* Update all function segments belonging to the same function. */
258 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
259 ftrace_update_caller (prev, caller, flags);
260
261 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
262 ftrace_update_caller (next, caller, flags);
263}
264
265/* Add a new function segment for a call.
266 CALLER is the chronologically preceding function segment.
267 MFUN and FUN are the symbol information we have for this function. */
268
269static struct btrace_function *
270ftrace_new_call (struct btrace_function *caller,
271 struct minimal_symbol *mfun,
272 struct symbol *fun)
273{
274 struct btrace_function *bfun;
275
276 bfun = ftrace_new_function (caller, mfun, fun);
277 bfun->up = caller;
278 bfun->level = caller->level + 1;
279
280 ftrace_debug (bfun, "new call");
281
282 return bfun;
283}
284
285/* Add a new function segment for a tail call.
286 CALLER is the chronologically preceding function segment.
287 MFUN and FUN are the symbol information we have for this function. */
288
289static struct btrace_function *
290ftrace_new_tailcall (struct btrace_function *caller,
291 struct minimal_symbol *mfun,
292 struct symbol *fun)
293{
294 struct btrace_function *bfun;
02d27625 295
23a7fe75
MM
296 bfun = ftrace_new_function (caller, mfun, fun);
297 bfun->up = caller;
298 bfun->level = caller->level + 1;
299 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
02d27625 300
23a7fe75
MM
301 ftrace_debug (bfun, "new tail call");
302
303 return bfun;
304}
305
306/* Find the innermost caller in the back trace of BFUN with MFUN/FUN
307 symbol information. */
308
309static struct btrace_function *
310ftrace_find_caller (struct btrace_function *bfun,
311 struct minimal_symbol *mfun,
312 struct symbol *fun)
313{
314 for (; bfun != NULL; bfun = bfun->up)
315 {
316 /* Skip functions with incompatible symbol information. */
317 if (ftrace_function_switched (bfun, mfun, fun))
318 continue;
319
320 /* This is the function segment we're looking for. */
321 break;
322 }
323
324 return bfun;
325}
326
327/* Find the innermost caller in the back trace of BFUN, skipping all
328 function segments that do not end with a call instruction (e.g.
329 tail calls ending with a jump). */
330
331static struct btrace_function *
332ftrace_find_call (struct gdbarch *gdbarch, struct btrace_function *bfun)
333{
334 for (; bfun != NULL; bfun = bfun->up)
02d27625 335 {
23a7fe75 336 struct btrace_insn *last;
02d27625
MM
337 CORE_ADDR pc;
338
23a7fe75
MM
339 /* We do not allow empty function segments. */
340 gdb_assert (!VEC_empty (btrace_insn_s, bfun->insn));
341
342 last = VEC_last (btrace_insn_s, bfun->insn);
343 pc = last->pc;
02d27625 344
23a7fe75
MM
345 if (gdbarch_insn_is_call (gdbarch, pc))
346 break;
347 }
348
349 return bfun;
350}
351
352/* Add a continuation segment for a function into which we return.
353 PREV is the chronologically preceding function segment.
354 MFUN and FUN are the symbol information we have for this function. */
355
356static struct btrace_function *
357ftrace_new_return (struct gdbarch *gdbarch,
358 struct btrace_function *prev,
359 struct minimal_symbol *mfun,
360 struct symbol *fun)
361{
362 struct btrace_function *bfun, *caller;
363
364 bfun = ftrace_new_function (prev, mfun, fun);
365
366 /* It is important to start at PREV's caller. Otherwise, we might find
367 PREV itself, if PREV is a recursive function. */
368 caller = ftrace_find_caller (prev->up, mfun, fun);
369 if (caller != NULL)
370 {
371 /* The caller of PREV is the preceding btrace function segment in this
372 function instance. */
373 gdb_assert (caller->segment.next == NULL);
374
375 caller->segment.next = bfun;
376 bfun->segment.prev = caller;
377
378 /* Maintain the function level. */
379 bfun->level = caller->level;
380
381 /* Maintain the call stack. */
382 bfun->up = caller->up;
383 bfun->flags = caller->flags;
384
385 ftrace_debug (bfun, "new return");
386 }
387 else
388 {
389 /* We did not find a caller. This could mean that something went
390 wrong or that the call is simply not included in the trace. */
02d27625 391
23a7fe75
MM
392 /* Let's search for some actual call. */
393 caller = ftrace_find_call (gdbarch, prev->up);
394 if (caller == NULL)
02d27625 395 {
23a7fe75
MM
396 /* There is no call in PREV's back trace. We assume that the
397 branch trace did not include it. */
398
399 /* Let's find the topmost call function - this skips tail calls. */
400 while (prev->up != NULL)
401 prev = prev->up;
02d27625 402
23a7fe75
MM
403 /* We maintain levels for a series of returns for which we have
404 not seen the calls.
405 We start at the preceding function's level in case this has
406 already been a return for which we have not seen the call.
407 We start at level 0 otherwise, to handle tail calls correctly. */
408 bfun->level = min (0, prev->level) - 1;
409
410 /* Fix up the call stack for PREV. */
411 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
412
413 ftrace_debug (bfun, "new return - no caller");
414 }
415 else
02d27625 416 {
23a7fe75
MM
417 /* There is a call in PREV's back trace to which we should have
418 returned. Let's remain at this level. */
419 bfun->level = prev->level;
02d27625 420
23a7fe75 421 ftrace_debug (bfun, "new return - unknown caller");
02d27625 422 }
23a7fe75
MM
423 }
424
425 return bfun;
426}
427
428/* Add a new function segment for a function switch.
429 PREV is the chronologically preceding function segment.
430 MFUN and FUN are the symbol information we have for this function. */
431
432static struct btrace_function *
433ftrace_new_switch (struct btrace_function *prev,
434 struct minimal_symbol *mfun,
435 struct symbol *fun)
436{
437 struct btrace_function *bfun;
438
439 /* This is an unexplained function switch. The call stack will likely
440 be wrong at this point. */
441 bfun = ftrace_new_function (prev, mfun, fun);
02d27625 442
23a7fe75
MM
443 /* We keep the function level. */
444 bfun->level = prev->level;
02d27625 445
23a7fe75
MM
446 ftrace_debug (bfun, "new switch");
447
448 return bfun;
449}
450
451/* Update BFUN with respect to the instruction at PC. This may create new
452 function segments.
453 Return the chronologically latest function segment, never NULL. */
454
455static struct btrace_function *
456ftrace_update_function (struct gdbarch *gdbarch,
457 struct btrace_function *bfun, CORE_ADDR pc)
458{
459 struct bound_minimal_symbol bmfun;
460 struct minimal_symbol *mfun;
461 struct symbol *fun;
462 struct btrace_insn *last;
463
464 /* Try to determine the function we're in. We use both types of symbols
465 to avoid surprises when we sometimes get a full symbol and sometimes
466 only a minimal symbol. */
467 fun = find_pc_function (pc);
468 bmfun = lookup_minimal_symbol_by_pc (pc);
469 mfun = bmfun.minsym;
470
471 if (fun == NULL && mfun == NULL)
472 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
473
474 /* If we didn't have a function before, we create one. */
475 if (bfun == NULL)
476 return ftrace_new_function (bfun, mfun, fun);
477
478 /* Check the last instruction, if we have one.
479 We do this check first, since it allows us to fill in the call stack
480 links in addition to the normal flow links. */
481 last = NULL;
482 if (!VEC_empty (btrace_insn_s, bfun->insn))
483 last = VEC_last (btrace_insn_s, bfun->insn);
484
485 if (last != NULL)
486 {
487 CORE_ADDR lpc;
488
489 lpc = last->pc;
490
491 /* Check for returns. */
492 if (gdbarch_insn_is_ret (gdbarch, lpc))
493 return ftrace_new_return (gdbarch, bfun, mfun, fun);
494
495 /* Check for calls. */
496 if (gdbarch_insn_is_call (gdbarch, lpc))
02d27625 497 {
23a7fe75
MM
498 int size;
499
500 size = gdb_insn_length (gdbarch, lpc);
501
502 /* Ignore calls to the next instruction. They are used for PIC. */
503 if (lpc + size != pc)
504 return ftrace_new_call (bfun, mfun, fun);
02d27625 505 }
23a7fe75
MM
506 }
507
508 /* Check if we're switching functions for some other reason. */
509 if (ftrace_function_switched (bfun, mfun, fun))
510 {
511 DEBUG_FTRACE ("switching from %s in %s at %s",
512 ftrace_print_insn_addr (last),
513 ftrace_print_function_name (bfun),
514 ftrace_print_filename (bfun));
02d27625 515
23a7fe75 516 if (last != NULL)
02d27625 517 {
23a7fe75
MM
518 CORE_ADDR start, lpc;
519
520 start = get_pc_function_start (pc);
521
522 /* If we can't determine the function for PC, we treat a jump at
523 the end of the block as tail call. */
524 if (start == 0)
525 start = pc;
526
527 lpc = last->pc;
528
529 /* Jumps indicate optimized tail calls. */
530 if (start == pc && gdbarch_insn_is_jump (gdbarch, lpc))
531 return ftrace_new_tailcall (bfun, mfun, fun);
02d27625
MM
532 }
533
23a7fe75
MM
534 return ftrace_new_switch (bfun, mfun, fun);
535 }
536
537 return bfun;
538}
539
540/* Update BFUN's source range with respect to the instruction at PC. */
541
542static void
543ftrace_update_lines (struct btrace_function *bfun, CORE_ADDR pc)
544{
545 struct symtab_and_line sal;
546 const char *fullname;
547
548 sal = find_pc_line (pc, 0);
549 if (sal.symtab == NULL || sal.line == 0)
550 {
551 DEBUG_FTRACE ("no lines at %s", core_addr_to_string_nz (pc));
552 return;
553 }
554
555 /* Check if we switched files. This could happen if, say, a macro that
556 is defined in another file is expanded here. */
557 fullname = symtab_to_fullname (sal.symtab);
558 if (ftrace_skip_file (bfun, fullname))
559 {
560 DEBUG_FTRACE ("ignoring file at %s, file=%s",
561 core_addr_to_string_nz (pc), fullname);
562 return;
563 }
564
565 /* Update the line range. */
566 bfun->lbegin = min (bfun->lbegin, sal.line);
567 bfun->lend = max (bfun->lend, sal.line);
568
569 if (record_debug > 1)
570 ftrace_debug (bfun, "update lines");
571}
572
573/* Add the instruction at PC to BFUN's instructions. */
574
575static void
576ftrace_update_insns (struct btrace_function *bfun, CORE_ADDR pc)
577{
578 struct btrace_insn *insn;
579
580 insn = VEC_safe_push (btrace_insn_s, bfun->insn, NULL);
581 insn->pc = pc;
582
583 if (record_debug > 1)
584 ftrace_debug (bfun, "update insn");
585}
586
587/* Compute the function branch trace from a block branch trace BTRACE for
588 a thread given by BTINFO. */
589
590static void
591btrace_compute_ftrace (struct btrace_thread_info *btinfo,
592 VEC (btrace_block_s) *btrace)
593{
594 struct btrace_function *begin, *end;
595 struct gdbarch *gdbarch;
596 unsigned int blk;
597 int level;
598
599 DEBUG ("compute ftrace");
600
601 gdbarch = target_gdbarch ();
969c39fb
MM
602 begin = btinfo->begin;
603 end = btinfo->end;
604 level = begin != NULL ? -btinfo->level : INT_MAX;
23a7fe75
MM
605 blk = VEC_length (btrace_block_s, btrace);
606
607 while (blk != 0)
608 {
609 btrace_block_s *block;
610 CORE_ADDR pc;
611
612 blk -= 1;
613
614 block = VEC_index (btrace_block_s, btrace, blk);
615 pc = block->begin;
616
617 for (;;)
618 {
619 int size;
620
621 /* We should hit the end of the block. Warn if we went too far. */
622 if (block->end < pc)
623 {
624 warning (_("Recorded trace may be corrupted around %s."),
625 core_addr_to_string_nz (pc));
626 break;
627 }
628
629 end = ftrace_update_function (gdbarch, end, pc);
630 if (begin == NULL)
631 begin = end;
632
8710b709
MM
633 /* Maintain the function level offset.
634 For all but the last block, we do it here. */
635 if (blk != 0)
636 level = min (level, end->level);
23a7fe75
MM
637
638 ftrace_update_insns (end, pc);
639 ftrace_update_lines (end, pc);
640
641 /* We're done once we pushed the instruction at the end. */
642 if (block->end == pc)
643 break;
644
645 size = gdb_insn_length (gdbarch, pc);
646
647 /* Make sure we terminate if we fail to compute the size. */
648 if (size <= 0)
649 {
650 warning (_("Recorded trace may be incomplete around %s."),
651 core_addr_to_string_nz (pc));
652 break;
653 }
654
655 pc += size;
8710b709
MM
656
657 /* Maintain the function level offset.
658 For the last block, we do it here to not consider the last
659 instruction.
660 Since the last instruction corresponds to the current instruction
661 and is not really part of the execution history, it shouldn't
662 affect the level. */
663 if (blk == 0)
664 level = min (level, end->level);
23a7fe75 665 }
02d27625
MM
666 }
667
23a7fe75
MM
668 btinfo->begin = begin;
669 btinfo->end = end;
670
671 /* LEVEL is the minimal function level of all btrace function segments.
672 Define the global level offset to -LEVEL so all function levels are
673 normalized to start at zero. */
674 btinfo->level = -level;
02d27625
MM
675}
676
677/* See btrace.h. */
678
679void
680btrace_enable (struct thread_info *tp)
681{
682 if (tp->btrace.target != NULL)
683 return;
684
685 if (!target_supports_btrace ())
686 error (_("Target does not support branch tracing."));
687
688 DEBUG ("enable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
689
690 tp->btrace.target = target_enable_btrace (tp->ptid);
691}
692
693/* See btrace.h. */
694
695void
696btrace_disable (struct thread_info *tp)
697{
698 struct btrace_thread_info *btp = &tp->btrace;
699 int errcode = 0;
700
701 if (btp->target == NULL)
702 return;
703
704 DEBUG ("disable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
705
706 target_disable_btrace (btp->target);
707 btp->target = NULL;
708
709 btrace_clear (tp);
710}
711
712/* See btrace.h. */
713
714void
715btrace_teardown (struct thread_info *tp)
716{
717 struct btrace_thread_info *btp = &tp->btrace;
718 int errcode = 0;
719
720 if (btp->target == NULL)
721 return;
722
723 DEBUG ("teardown thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
724
725 target_teardown_btrace (btp->target);
726 btp->target = NULL;
727
728 btrace_clear (tp);
729}
730
969c39fb
MM
731/* Adjust the block trace in order to stitch old and new trace together.
732 BTRACE is the new delta trace between the last and the current stop.
733 BTINFO is the old branch trace until the last stop.
734 May modify BTRACE as well as the existing trace in BTINFO.
735 Return 0 on success, -1 otherwise. */
736
737static int
738btrace_stitch_trace (VEC (btrace_block_s) **btrace,
739 const struct btrace_thread_info *btinfo)
740{
741 struct btrace_function *last_bfun;
742 struct btrace_insn *last_insn;
743 btrace_block_s *first_new_block;
744
745 /* If we don't have trace, there's nothing to do. */
746 if (VEC_empty (btrace_block_s, *btrace))
747 return 0;
748
749 last_bfun = btinfo->end;
750 gdb_assert (last_bfun != NULL);
751
752 /* Beware that block trace starts with the most recent block, so the
753 chronologically first block in the new trace is the last block in
754 the new trace's block vector. */
755 first_new_block = VEC_last (btrace_block_s, *btrace);
756 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
757
758 /* If the current PC at the end of the block is the same as in our current
759 trace, there are two explanations:
760 1. we executed the instruction and some branch brought us back.
761 2. we have not made any progress.
762 In the first case, the delta trace vector should contain at least two
763 entries.
764 In the second case, the delta trace vector should contain exactly one
765 entry for the partial block containing the current PC. Remove it. */
766 if (first_new_block->end == last_insn->pc
767 && VEC_length (btrace_block_s, *btrace) == 1)
768 {
769 VEC_pop (btrace_block_s, *btrace);
770 return 0;
771 }
772
773 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
774 core_addr_to_string_nz (first_new_block->end));
775
776 /* Do a simple sanity check to make sure we don't accidentally end up
777 with a bad block. This should not occur in practice. */
778 if (first_new_block->end < last_insn->pc)
779 {
780 warning (_("Error while trying to read delta trace. Falling back to "
781 "a full read."));
782 return -1;
783 }
784
785 /* We adjust the last block to start at the end of our current trace. */
786 gdb_assert (first_new_block->begin == 0);
787 first_new_block->begin = last_insn->pc;
788
789 /* We simply pop the last insn so we can insert it again as part of
790 the normal branch trace computation.
791 Since instruction iterators are based on indices in the instructions
792 vector, we don't leave any pointers dangling. */
793 DEBUG ("pruning insn at %s for stitching",
794 ftrace_print_insn_addr (last_insn));
795
796 VEC_pop (btrace_insn_s, last_bfun->insn);
797
798 /* The instructions vector may become empty temporarily if this has
799 been the only instruction in this function segment.
800 This violates the invariant but will be remedied shortly by
801 btrace_compute_ftrace when we add the new trace. */
802 return 0;
803}
804
805/* Clear the branch trace histories in BTINFO. */
806
807static void
808btrace_clear_history (struct btrace_thread_info *btinfo)
809{
810 xfree (btinfo->insn_history);
811 xfree (btinfo->call_history);
812 xfree (btinfo->replay);
813
814 btinfo->insn_history = NULL;
815 btinfo->call_history = NULL;
816 btinfo->replay = NULL;
817}
818
02d27625
MM
819/* See btrace.h. */
820
821void
822btrace_fetch (struct thread_info *tp)
823{
824 struct btrace_thread_info *btinfo;
969c39fb 825 struct btrace_target_info *tinfo;
02d27625 826 VEC (btrace_block_s) *btrace;
23a7fe75 827 struct cleanup *cleanup;
969c39fb 828 int errcode;
02d27625
MM
829
830 DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
831
969c39fb 832 btrace = NULL;
02d27625 833 btinfo = &tp->btrace;
969c39fb
MM
834 tinfo = btinfo->target;
835 if (tinfo == NULL)
836 return;
837
838 /* There's no way we could get new trace while replaying.
839 On the other hand, delta trace would return a partial record with the
840 current PC, which is the replay PC, not the last PC, as expected. */
841 if (btinfo->replay != NULL)
02d27625
MM
842 return;
843
23a7fe75 844 cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace);
02d27625 845
969c39fb
MM
846 /* Let's first try to extend the trace we already have. */
847 if (btinfo->end != NULL)
848 {
849 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
850 if (errcode == 0)
851 {
852 /* Success. Let's try to stitch the traces together. */
853 errcode = btrace_stitch_trace (&btrace, btinfo);
854 }
855 else
856 {
857 /* We failed to read delta trace. Let's try to read new trace. */
858 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
859
860 /* If we got any new trace, discard what we have. */
861 if (errcode == 0 && !VEC_empty (btrace_block_s, btrace))
862 btrace_clear (tp);
863 }
864
865 /* If we were not able to read the trace, we start over. */
866 if (errcode != 0)
867 {
868 btrace_clear (tp);
869 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
870 }
871 }
872 else
873 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
874
875 /* If we were not able to read the branch trace, signal an error. */
876 if (errcode != 0)
877 error (_("Failed to read branch trace."));
878
879 /* Compute the trace, provided we have any. */
23a7fe75
MM
880 if (!VEC_empty (btrace_block_s, btrace))
881 {
969c39fb 882 btrace_clear_history (btinfo);
23a7fe75
MM
883 btrace_compute_ftrace (btinfo, btrace);
884 }
02d27625 885
23a7fe75 886 do_cleanups (cleanup);
02d27625
MM
887}
888
889/* See btrace.h. */
890
891void
892btrace_clear (struct thread_info *tp)
893{
894 struct btrace_thread_info *btinfo;
23a7fe75 895 struct btrace_function *it, *trash;
02d27625
MM
896
897 DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
898
0b722aec
MM
899 /* Make sure btrace frames that may hold a pointer into the branch
900 trace data are destroyed. */
901 reinit_frame_cache ();
902
02d27625
MM
903 btinfo = &tp->btrace;
904
23a7fe75
MM
905 it = btinfo->begin;
906 while (it != NULL)
907 {
908 trash = it;
909 it = it->flow.next;
02d27625 910
23a7fe75
MM
911 xfree (trash);
912 }
913
914 btinfo->begin = NULL;
915 btinfo->end = NULL;
916
969c39fb 917 btrace_clear_history (btinfo);
02d27625
MM
918}
919
920/* See btrace.h. */
921
922void
923btrace_free_objfile (struct objfile *objfile)
924{
925 struct thread_info *tp;
926
927 DEBUG ("free objfile");
928
929 ALL_THREADS (tp)
930 btrace_clear (tp);
931}
c12a2917
MM
932
933#if defined (HAVE_LIBEXPAT)
934
935/* Check the btrace document version. */
936
937static void
938check_xml_btrace_version (struct gdb_xml_parser *parser,
939 const struct gdb_xml_element *element,
940 void *user_data, VEC (gdb_xml_value_s) *attributes)
941{
942 const char *version = xml_find_attribute (attributes, "version")->value;
943
944 if (strcmp (version, "1.0") != 0)
945 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
946}
947
948/* Parse a btrace "block" xml record. */
949
950static void
951parse_xml_btrace_block (struct gdb_xml_parser *parser,
952 const struct gdb_xml_element *element,
953 void *user_data, VEC (gdb_xml_value_s) *attributes)
954{
955 VEC (btrace_block_s) **btrace;
956 struct btrace_block *block;
957 ULONGEST *begin, *end;
958
959 btrace = user_data;
960 block = VEC_safe_push (btrace_block_s, *btrace, NULL);
961
962 begin = xml_find_attribute (attributes, "begin")->value;
963 end = xml_find_attribute (attributes, "end")->value;
964
965 block->begin = *begin;
966 block->end = *end;
967}
968
969static const struct gdb_xml_attribute block_attributes[] = {
970 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
971 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
972 { NULL, GDB_XML_AF_NONE, NULL, NULL }
973};
974
975static const struct gdb_xml_attribute btrace_attributes[] = {
976 { "version", GDB_XML_AF_NONE, NULL, NULL },
977 { NULL, GDB_XML_AF_NONE, NULL, NULL }
978};
979
980static const struct gdb_xml_element btrace_children[] = {
981 { "block", block_attributes, NULL,
982 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
983 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
984};
985
986static const struct gdb_xml_element btrace_elements[] = {
987 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
988 check_xml_btrace_version, NULL },
989 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
990};
991
992#endif /* defined (HAVE_LIBEXPAT) */
993
994/* See btrace.h. */
995
996VEC (btrace_block_s) *
997parse_xml_btrace (const char *buffer)
998{
999 VEC (btrace_block_s) *btrace = NULL;
1000 struct cleanup *cleanup;
1001 int errcode;
1002
1003#if defined (HAVE_LIBEXPAT)
1004
1005 cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace);
1006 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
1007 buffer, &btrace);
1008 if (errcode != 0)
969c39fb 1009 error (_("Error parsing branch trace."));
c12a2917
MM
1010
1011 /* Keep parse results. */
1012 discard_cleanups (cleanup);
1013
1014#else /* !defined (HAVE_LIBEXPAT) */
1015
1016 error (_("Cannot process branch trace. XML parsing is not supported."));
1017
1018#endif /* !defined (HAVE_LIBEXPAT) */
1019
1020 return btrace;
1021}
23a7fe75
MM
1022
1023/* See btrace.h. */
1024
1025const struct btrace_insn *
1026btrace_insn_get (const struct btrace_insn_iterator *it)
1027{
1028 const struct btrace_function *bfun;
1029 unsigned int index, end;
1030
1031 index = it->index;
1032 bfun = it->function;
1033
1034 /* The index is within the bounds of this function's instruction vector. */
1035 end = VEC_length (btrace_insn_s, bfun->insn);
1036 gdb_assert (0 < end);
1037 gdb_assert (index < end);
1038
1039 return VEC_index (btrace_insn_s, bfun->insn, index);
1040}
1041
1042/* See btrace.h. */
1043
1044unsigned int
1045btrace_insn_number (const struct btrace_insn_iterator *it)
1046{
1047 const struct btrace_function *bfun;
1048
1049 bfun = it->function;
1050 return bfun->insn_offset + it->index;
1051}
1052
1053/* See btrace.h. */
1054
1055void
1056btrace_insn_begin (struct btrace_insn_iterator *it,
1057 const struct btrace_thread_info *btinfo)
1058{
1059 const struct btrace_function *bfun;
1060
1061 bfun = btinfo->begin;
1062 if (bfun == NULL)
1063 error (_("No trace."));
1064
1065 it->function = bfun;
1066 it->index = 0;
1067}
1068
1069/* See btrace.h. */
1070
1071void
1072btrace_insn_end (struct btrace_insn_iterator *it,
1073 const struct btrace_thread_info *btinfo)
1074{
1075 const struct btrace_function *bfun;
1076 unsigned int length;
1077
1078 bfun = btinfo->end;
1079 if (bfun == NULL)
1080 error (_("No trace."));
1081
1082 /* The last instruction in the last function is the current instruction.
1083 We point to it - it is one past the end of the execution trace. */
1084 length = VEC_length (btrace_insn_s, bfun->insn);
1085
1086 it->function = bfun;
1087 it->index = length - 1;
1088}
1089
1090/* See btrace.h. */
1091
1092unsigned int
1093btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
1094{
1095 const struct btrace_function *bfun;
1096 unsigned int index, steps;
1097
1098 bfun = it->function;
1099 steps = 0;
1100 index = it->index;
1101
1102 while (stride != 0)
1103 {
1104 unsigned int end, space, adv;
1105
1106 end = VEC_length (btrace_insn_s, bfun->insn);
1107
1108 gdb_assert (0 < end);
1109 gdb_assert (index < end);
1110
1111 /* Compute the number of instructions remaining in this segment. */
1112 space = end - index;
1113
1114 /* Advance the iterator as far as possible within this segment. */
1115 adv = min (space, stride);
1116 stride -= adv;
1117 index += adv;
1118 steps += adv;
1119
1120 /* Move to the next function if we're at the end of this one. */
1121 if (index == end)
1122 {
1123 const struct btrace_function *next;
1124
1125 next = bfun->flow.next;
1126 if (next == NULL)
1127 {
1128 /* We stepped past the last function.
1129
1130 Let's adjust the index to point to the last instruction in
1131 the previous function. */
1132 index -= 1;
1133 steps -= 1;
1134 break;
1135 }
1136
1137 /* We now point to the first instruction in the new function. */
1138 bfun = next;
1139 index = 0;
1140 }
1141
1142 /* We did make progress. */
1143 gdb_assert (adv > 0);
1144 }
1145
1146 /* Update the iterator. */
1147 it->function = bfun;
1148 it->index = index;
1149
1150 return steps;
1151}
1152
1153/* See btrace.h. */
1154
1155unsigned int
1156btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1157{
1158 const struct btrace_function *bfun;
1159 unsigned int index, steps;
1160
1161 bfun = it->function;
1162 steps = 0;
1163 index = it->index;
1164
1165 while (stride != 0)
1166 {
1167 unsigned int adv;
1168
1169 /* Move to the previous function if we're at the start of this one. */
1170 if (index == 0)
1171 {
1172 const struct btrace_function *prev;
1173
1174 prev = bfun->flow.prev;
1175 if (prev == NULL)
1176 break;
1177
1178 /* We point to one after the last instruction in the new function. */
1179 bfun = prev;
1180 index = VEC_length (btrace_insn_s, bfun->insn);
1181
1182 /* There is at least one instruction in this function segment. */
1183 gdb_assert (index > 0);
1184 }
1185
1186 /* Advance the iterator as far as possible within this segment. */
1187 adv = min (index, stride);
1188 stride -= adv;
1189 index -= adv;
1190 steps += adv;
1191
1192 /* We did make progress. */
1193 gdb_assert (adv > 0);
1194 }
1195
1196 /* Update the iterator. */
1197 it->function = bfun;
1198 it->index = index;
1199
1200 return steps;
1201}
1202
1203/* See btrace.h. */
1204
1205int
1206btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
1207 const struct btrace_insn_iterator *rhs)
1208{
1209 unsigned int lnum, rnum;
1210
1211 lnum = btrace_insn_number (lhs);
1212 rnum = btrace_insn_number (rhs);
1213
1214 return (int) (lnum - rnum);
1215}
1216
1217/* See btrace.h. */
1218
1219int
1220btrace_find_insn_by_number (struct btrace_insn_iterator *it,
1221 const struct btrace_thread_info *btinfo,
1222 unsigned int number)
1223{
1224 const struct btrace_function *bfun;
1225 unsigned int end;
1226
1227 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1228 if (bfun->insn_offset <= number)
1229 break;
1230
1231 if (bfun == NULL)
1232 return 0;
1233
1234 end = bfun->insn_offset + VEC_length (btrace_insn_s, bfun->insn);
1235 if (end <= number)
1236 return 0;
1237
1238 it->function = bfun;
1239 it->index = number - bfun->insn_offset;
1240
1241 return 1;
1242}
1243
1244/* See btrace.h. */
1245
1246const struct btrace_function *
1247btrace_call_get (const struct btrace_call_iterator *it)
1248{
1249 return it->function;
1250}
1251
1252/* See btrace.h. */
1253
1254unsigned int
1255btrace_call_number (const struct btrace_call_iterator *it)
1256{
1257 const struct btrace_thread_info *btinfo;
1258 const struct btrace_function *bfun;
1259 unsigned int insns;
1260
1261 btinfo = it->btinfo;
1262 bfun = it->function;
1263 if (bfun != NULL)
1264 return bfun->number;
1265
1266 /* For the end iterator, i.e. bfun == NULL, we return one more than the
1267 number of the last function. */
1268 bfun = btinfo->end;
1269 insns = VEC_length (btrace_insn_s, bfun->insn);
1270
1271 /* If the function contains only a single instruction (i.e. the current
1272 instruction), it will be skipped and its number is already the number
1273 we seek. */
1274 if (insns == 1)
1275 return bfun->number;
1276
1277 /* Otherwise, return one more than the number of the last function. */
1278 return bfun->number + 1;
1279}
1280
1281/* See btrace.h. */
1282
1283void
1284btrace_call_begin (struct btrace_call_iterator *it,
1285 const struct btrace_thread_info *btinfo)
1286{
1287 const struct btrace_function *bfun;
1288
1289 bfun = btinfo->begin;
1290 if (bfun == NULL)
1291 error (_("No trace."));
1292
1293 it->btinfo = btinfo;
1294 it->function = bfun;
1295}
1296
1297/* See btrace.h. */
1298
1299void
1300btrace_call_end (struct btrace_call_iterator *it,
1301 const struct btrace_thread_info *btinfo)
1302{
1303 const struct btrace_function *bfun;
1304
1305 bfun = btinfo->end;
1306 if (bfun == NULL)
1307 error (_("No trace."));
1308
1309 it->btinfo = btinfo;
1310 it->function = NULL;
1311}
1312
1313/* See btrace.h. */
1314
1315unsigned int
1316btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
1317{
1318 const struct btrace_function *bfun;
1319 unsigned int steps;
1320
1321 bfun = it->function;
1322 steps = 0;
1323 while (bfun != NULL)
1324 {
1325 const struct btrace_function *next;
1326 unsigned int insns;
1327
1328 next = bfun->flow.next;
1329 if (next == NULL)
1330 {
1331 /* Ignore the last function if it only contains a single
1332 (i.e. the current) instruction. */
1333 insns = VEC_length (btrace_insn_s, bfun->insn);
1334 if (insns == 1)
1335 steps -= 1;
1336 }
1337
1338 if (stride == steps)
1339 break;
1340
1341 bfun = next;
1342 steps += 1;
1343 }
1344
1345 it->function = bfun;
1346 return steps;
1347}
1348
1349/* See btrace.h. */
1350
1351unsigned int
1352btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
1353{
1354 const struct btrace_thread_info *btinfo;
1355 const struct btrace_function *bfun;
1356 unsigned int steps;
1357
1358 bfun = it->function;
1359 steps = 0;
1360
1361 if (bfun == NULL)
1362 {
1363 unsigned int insns;
1364
1365 btinfo = it->btinfo;
1366 bfun = btinfo->end;
1367 if (bfun == NULL)
1368 return 0;
1369
1370 /* Ignore the last function if it only contains a single
1371 (i.e. the current) instruction. */
1372 insns = VEC_length (btrace_insn_s, bfun->insn);
1373 if (insns == 1)
1374 bfun = bfun->flow.prev;
1375
1376 if (bfun == NULL)
1377 return 0;
1378
1379 steps += 1;
1380 }
1381
1382 while (steps < stride)
1383 {
1384 const struct btrace_function *prev;
1385
1386 prev = bfun->flow.prev;
1387 if (prev == NULL)
1388 break;
1389
1390 bfun = prev;
1391 steps += 1;
1392 }
1393
1394 it->function = bfun;
1395 return steps;
1396}
1397
1398/* See btrace.h. */
1399
1400int
1401btrace_call_cmp (const struct btrace_call_iterator *lhs,
1402 const struct btrace_call_iterator *rhs)
1403{
1404 unsigned int lnum, rnum;
1405
1406 lnum = btrace_call_number (lhs);
1407 rnum = btrace_call_number (rhs);
1408
1409 return (int) (lnum - rnum);
1410}
1411
1412/* See btrace.h. */
1413
1414int
1415btrace_find_call_by_number (struct btrace_call_iterator *it,
1416 const struct btrace_thread_info *btinfo,
1417 unsigned int number)
1418{
1419 const struct btrace_function *bfun;
1420
1421 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1422 {
1423 unsigned int bnum;
1424
1425 bnum = bfun->number;
1426 if (number == bnum)
1427 {
1428 it->btinfo = btinfo;
1429 it->function = bfun;
1430 return 1;
1431 }
1432
1433 /* Functions are ordered and numbered consecutively. We could bail out
1434 earlier. On the other hand, it is very unlikely that we search for
1435 a nonexistent function. */
1436 }
1437
1438 return 0;
1439}
1440
1441/* See btrace.h. */
1442
1443void
1444btrace_set_insn_history (struct btrace_thread_info *btinfo,
1445 const struct btrace_insn_iterator *begin,
1446 const struct btrace_insn_iterator *end)
1447{
1448 if (btinfo->insn_history == NULL)
1449 btinfo->insn_history = xzalloc (sizeof (*btinfo->insn_history));
1450
1451 btinfo->insn_history->begin = *begin;
1452 btinfo->insn_history->end = *end;
1453}
1454
1455/* See btrace.h. */
1456
1457void
1458btrace_set_call_history (struct btrace_thread_info *btinfo,
1459 const struct btrace_call_iterator *begin,
1460 const struct btrace_call_iterator *end)
1461{
1462 gdb_assert (begin->btinfo == end->btinfo);
1463
1464 if (btinfo->call_history == NULL)
1465 btinfo->call_history = xzalloc (sizeof (*btinfo->call_history));
1466
1467 btinfo->call_history->begin = *begin;
1468 btinfo->call_history->end = *end;
1469}
07bbe694
MM
1470
1471/* See btrace.h. */
1472
1473int
1474btrace_is_replaying (struct thread_info *tp)
1475{
1476 return tp->btrace.replay != NULL;
1477}
This page took 0.137188 seconds and 4 git commands to generate.