1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
31 #include "filenames.h"
32 #include "xml-support.h"
35 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
36 when used in if statements. */
38 #define DEBUG(msg, args...) \
41 if (record_debug != 0) \
42 fprintf_unfiltered (gdb_stdlog, \
43 "[btrace] " msg "\n", ##args); \
47 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
49 /* Return the function name of a recorded function segment for printing.
50 This function never returns NULL. */
53 ftrace_print_function_name (const struct btrace_function
*bfun
)
55 struct minimal_symbol
*msym
;
62 return SYMBOL_PRINT_NAME (sym
);
65 return MSYMBOL_PRINT_NAME (msym
);
70 /* Return the file name of a recorded function segment for printing.
71 This function never returns NULL. */
74 ftrace_print_filename (const struct btrace_function
*bfun
)
82 filename
= symtab_to_filename_for_display (symbol_symtab (sym
));
84 filename
= "<unknown>";
89 /* Return a string representation of the address of an instruction.
90 This function never returns NULL. */
93 ftrace_print_insn_addr (const struct btrace_insn
*insn
)
98 return core_addr_to_string_nz (insn
->pc
);
101 /* Print an ftrace debug status message. */
104 ftrace_debug (const struct btrace_function
*bfun
, const char *prefix
)
106 const char *fun
, *file
;
107 unsigned int ibegin
, iend
;
110 fun
= ftrace_print_function_name (bfun
);
111 file
= ftrace_print_filename (bfun
);
114 ibegin
= bfun
->insn_offset
;
115 iend
= ibegin
+ VEC_length (btrace_insn_s
, bfun
->insn
);
117 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
118 prefix
, fun
, file
, level
, ibegin
, iend
);
121 /* Return non-zero if BFUN does not match MFUN and FUN,
122 return zero otherwise. */
125 ftrace_function_switched (const struct btrace_function
*bfun
,
126 const struct minimal_symbol
*mfun
,
127 const struct symbol
*fun
)
129 struct minimal_symbol
*msym
;
135 /* If the minimal symbol changed, we certainly switched functions. */
136 if (mfun
!= NULL
&& msym
!= NULL
137 && strcmp (MSYMBOL_LINKAGE_NAME (mfun
), MSYMBOL_LINKAGE_NAME (msym
)) != 0)
140 /* If the symbol changed, we certainly switched functions. */
141 if (fun
!= NULL
&& sym
!= NULL
)
143 const char *bfname
, *fname
;
145 /* Check the function name. */
146 if (strcmp (SYMBOL_LINKAGE_NAME (fun
), SYMBOL_LINKAGE_NAME (sym
)) != 0)
149 /* Check the location of those functions, as well. */
150 bfname
= symtab_to_fullname (symbol_symtab (sym
));
151 fname
= symtab_to_fullname (symbol_symtab (fun
));
152 if (filename_cmp (fname
, bfname
) != 0)
156 /* If we lost symbol information, we switched functions. */
157 if (!(msym
== NULL
&& sym
== NULL
) && mfun
== NULL
&& fun
== NULL
)
160 /* If we gained symbol information, we switched functions. */
161 if (msym
== NULL
&& sym
== NULL
&& !(mfun
== NULL
&& fun
== NULL
))
167 /* Allocate and initialize a new branch trace function segment.
168 PREV is the chronologically preceding function segment.
169 MFUN and FUN are the symbol information we have for this function. */
171 static struct btrace_function
*
172 ftrace_new_function (struct btrace_function
*prev
,
173 struct minimal_symbol
*mfun
,
176 struct btrace_function
*bfun
;
178 bfun
= xzalloc (sizeof (*bfun
));
182 bfun
->flow
.prev
= prev
;
186 /* Start counting at one. */
188 bfun
->insn_offset
= 1;
192 gdb_assert (prev
->flow
.next
== NULL
);
193 prev
->flow
.next
= bfun
;
195 bfun
->number
= prev
->number
+ 1;
196 bfun
->insn_offset
= (prev
->insn_offset
197 + VEC_length (btrace_insn_s
, prev
->insn
));
198 bfun
->level
= prev
->level
;
204 /* Update the UP field of a function segment. */
207 ftrace_update_caller (struct btrace_function
*bfun
,
208 struct btrace_function
*caller
,
209 enum btrace_function_flag flags
)
211 if (bfun
->up
!= NULL
)
212 ftrace_debug (bfun
, "updating caller");
217 ftrace_debug (bfun
, "set caller");
220 /* Fix up the caller for all segments of a function. */
223 ftrace_fixup_caller (struct btrace_function
*bfun
,
224 struct btrace_function
*caller
,
225 enum btrace_function_flag flags
)
227 struct btrace_function
*prev
, *next
;
229 ftrace_update_caller (bfun
, caller
, flags
);
231 /* Update all function segments belonging to the same function. */
232 for (prev
= bfun
->segment
.prev
; prev
!= NULL
; prev
= prev
->segment
.prev
)
233 ftrace_update_caller (prev
, caller
, flags
);
235 for (next
= bfun
->segment
.next
; next
!= NULL
; next
= next
->segment
.next
)
236 ftrace_update_caller (next
, caller
, flags
);
239 /* Add a new function segment for a call.
240 CALLER is the chronologically preceding function segment.
241 MFUN and FUN are the symbol information we have for this function. */
243 static struct btrace_function
*
244 ftrace_new_call (struct btrace_function
*caller
,
245 struct minimal_symbol
*mfun
,
248 struct btrace_function
*bfun
;
250 bfun
= ftrace_new_function (caller
, mfun
, fun
);
254 ftrace_debug (bfun
, "new call");
259 /* Add a new function segment for a tail call.
260 CALLER is the chronologically preceding function segment.
261 MFUN and FUN are the symbol information we have for this function. */
263 static struct btrace_function
*
264 ftrace_new_tailcall (struct btrace_function
*caller
,
265 struct minimal_symbol
*mfun
,
268 struct btrace_function
*bfun
;
270 bfun
= ftrace_new_function (caller
, mfun
, fun
);
273 bfun
->flags
|= BFUN_UP_LINKS_TO_TAILCALL
;
275 ftrace_debug (bfun
, "new tail call");
280 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
281 symbol information. */
283 static struct btrace_function
*
284 ftrace_find_caller (struct btrace_function
*bfun
,
285 struct minimal_symbol
*mfun
,
288 for (; bfun
!= NULL
; bfun
= bfun
->up
)
290 /* Skip functions with incompatible symbol information. */
291 if (ftrace_function_switched (bfun
, mfun
, fun
))
294 /* This is the function segment we're looking for. */
301 /* Find the innermost caller in the back trace of BFUN, skipping all
302 function segments that do not end with a call instruction (e.g.
303 tail calls ending with a jump). */
305 static struct btrace_function
*
306 ftrace_find_call (struct btrace_function
*bfun
)
308 for (; bfun
!= NULL
; bfun
= bfun
->up
)
310 struct btrace_insn
*last
;
313 if (bfun
->errcode
!= 0)
316 last
= VEC_last (btrace_insn_s
, bfun
->insn
);
318 if (last
->iclass
== BTRACE_INSN_CALL
)
325 /* Add a continuation segment for a function into which we return.
326 PREV is the chronologically preceding function segment.
327 MFUN and FUN are the symbol information we have for this function. */
329 static struct btrace_function
*
330 ftrace_new_return (struct btrace_function
*prev
,
331 struct minimal_symbol
*mfun
,
334 struct btrace_function
*bfun
, *caller
;
336 bfun
= ftrace_new_function (prev
, mfun
, fun
);
338 /* It is important to start at PREV's caller. Otherwise, we might find
339 PREV itself, if PREV is a recursive function. */
340 caller
= ftrace_find_caller (prev
->up
, mfun
, fun
);
343 /* The caller of PREV is the preceding btrace function segment in this
344 function instance. */
345 gdb_assert (caller
->segment
.next
== NULL
);
347 caller
->segment
.next
= bfun
;
348 bfun
->segment
.prev
= caller
;
350 /* Maintain the function level. */
351 bfun
->level
= caller
->level
;
353 /* Maintain the call stack. */
354 bfun
->up
= caller
->up
;
355 bfun
->flags
= caller
->flags
;
357 ftrace_debug (bfun
, "new return");
361 /* We did not find a caller. This could mean that something went
362 wrong or that the call is simply not included in the trace. */
364 /* Let's search for some actual call. */
365 caller
= ftrace_find_call (prev
->up
);
368 /* There is no call in PREV's back trace. We assume that the
369 branch trace did not include it. */
371 /* Let's find the topmost call function - this skips tail calls. */
372 while (prev
->up
!= NULL
)
375 /* We maintain levels for a series of returns for which we have
377 We start at the preceding function's level in case this has
378 already been a return for which we have not seen the call.
379 We start at level 0 otherwise, to handle tail calls correctly. */
380 bfun
->level
= min (0, prev
->level
) - 1;
382 /* Fix up the call stack for PREV. */
383 ftrace_fixup_caller (prev
, bfun
, BFUN_UP_LINKS_TO_RET
);
385 ftrace_debug (bfun
, "new return - no caller");
389 /* There is a call in PREV's back trace to which we should have
390 returned. Let's remain at this level. */
391 bfun
->level
= prev
->level
;
393 ftrace_debug (bfun
, "new return - unknown caller");
400 /* Add a new function segment for a function switch.
401 PREV is the chronologically preceding function segment.
402 MFUN and FUN are the symbol information we have for this function. */
404 static struct btrace_function
*
405 ftrace_new_switch (struct btrace_function
*prev
,
406 struct minimal_symbol
*mfun
,
409 struct btrace_function
*bfun
;
411 /* This is an unexplained function switch. The call stack will likely
412 be wrong at this point. */
413 bfun
= ftrace_new_function (prev
, mfun
, fun
);
415 ftrace_debug (bfun
, "new switch");
420 /* Add a new function segment for a gap in the trace due to a decode error.
421 PREV is the chronologically preceding function segment.
422 ERRCODE is the format-specific error code. */
424 static struct btrace_function
*
425 ftrace_new_gap (struct btrace_function
*prev
, int errcode
)
427 struct btrace_function
*bfun
;
429 /* We hijack prev if it was empty. */
430 if (prev
!= NULL
&& prev
->errcode
== 0
431 && VEC_empty (btrace_insn_s
, prev
->insn
))
434 bfun
= ftrace_new_function (prev
, NULL
, NULL
);
436 bfun
->errcode
= errcode
;
438 ftrace_debug (bfun
, "new gap");
443 /* Update BFUN with respect to the instruction at PC. This may create new
445 Return the chronologically latest function segment, never NULL. */
447 static struct btrace_function
*
448 ftrace_update_function (struct btrace_function
*bfun
, CORE_ADDR pc
)
450 struct bound_minimal_symbol bmfun
;
451 struct minimal_symbol
*mfun
;
453 struct btrace_insn
*last
;
455 /* Try to determine the function we're in. We use both types of symbols
456 to avoid surprises when we sometimes get a full symbol and sometimes
457 only a minimal symbol. */
458 fun
= find_pc_function (pc
);
459 bmfun
= lookup_minimal_symbol_by_pc (pc
);
462 if (fun
== NULL
&& mfun
== NULL
)
463 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc
));
465 /* If we didn't have a function or if we had a gap before, we create one. */
466 if (bfun
== NULL
|| bfun
->errcode
!= 0)
467 return ftrace_new_function (bfun
, mfun
, fun
);
469 /* Check the last instruction, if we have one.
470 We do this check first, since it allows us to fill in the call stack
471 links in addition to the normal flow links. */
473 if (!VEC_empty (btrace_insn_s
, bfun
->insn
))
474 last
= VEC_last (btrace_insn_s
, bfun
->insn
);
478 switch (last
->iclass
)
480 case BTRACE_INSN_RETURN
:
481 return ftrace_new_return (bfun
, mfun
, fun
);
483 case BTRACE_INSN_CALL
:
484 /* Ignore calls to the next instruction. They are used for PIC. */
485 if (last
->pc
+ last
->size
== pc
)
488 return ftrace_new_call (bfun
, mfun
, fun
);
490 case BTRACE_INSN_JUMP
:
494 start
= get_pc_function_start (pc
);
496 /* If we can't determine the function for PC, we treat a jump at
497 the end of the block as tail call. */
498 if (start
== 0 || start
== pc
)
499 return ftrace_new_tailcall (bfun
, mfun
, fun
);
504 /* Check if we're switching functions for some other reason. */
505 if (ftrace_function_switched (bfun
, mfun
, fun
))
507 DEBUG_FTRACE ("switching from %s in %s at %s",
508 ftrace_print_insn_addr (last
),
509 ftrace_print_function_name (bfun
),
510 ftrace_print_filename (bfun
));
512 return ftrace_new_switch (bfun
, mfun
, fun
);
518 /* Add the instruction at PC to BFUN's instructions. */
521 ftrace_update_insns (struct btrace_function
*bfun
,
522 const struct btrace_insn
*insn
)
524 VEC_safe_push (btrace_insn_s
, bfun
->insn
, insn
);
526 if (record_debug
> 1)
527 ftrace_debug (bfun
, "update insn");
530 /* Classify the instruction at PC. */
532 static enum btrace_insn_class
533 ftrace_classify_insn (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
535 volatile struct gdb_exception error
;
536 enum btrace_insn_class iclass
;
538 iclass
= BTRACE_INSN_OTHER
;
539 TRY_CATCH (error
, RETURN_MASK_ERROR
)
541 if (gdbarch_insn_is_call (gdbarch
, pc
))
542 iclass
= BTRACE_INSN_CALL
;
543 else if (gdbarch_insn_is_ret (gdbarch
, pc
))
544 iclass
= BTRACE_INSN_RETURN
;
545 else if (gdbarch_insn_is_jump (gdbarch
, pc
))
546 iclass
= BTRACE_INSN_JUMP
;
552 /* Compute the function branch trace from BTS trace. */
555 btrace_compute_ftrace_bts (struct thread_info
*tp
,
556 const struct btrace_data_bts
*btrace
)
558 struct btrace_thread_info
*btinfo
;
559 struct btrace_function
*begin
, *end
;
560 struct gdbarch
*gdbarch
;
561 unsigned int blk
, ngaps
;
564 gdbarch
= target_gdbarch ();
565 btinfo
= &tp
->btrace
;
566 begin
= btinfo
->begin
;
568 ngaps
= btinfo
->ngaps
;
569 level
= begin
!= NULL
? -btinfo
->level
: INT_MAX
;
570 blk
= VEC_length (btrace_block_s
, btrace
->blocks
);
574 btrace_block_s
*block
;
579 block
= VEC_index (btrace_block_s
, btrace
->blocks
, blk
);
584 volatile struct gdb_exception error
;
585 struct btrace_insn insn
;
588 /* We should hit the end of the block. Warn if we went too far. */
591 /* Indicate the gap in the trace - unless we're at the
595 warning (_("Recorded trace may be corrupted around %s."),
596 core_addr_to_string_nz (pc
));
598 end
= ftrace_new_gap (end
, BDE_BTS_OVERFLOW
);
604 end
= ftrace_update_function (end
, pc
);
608 /* Maintain the function level offset.
609 For all but the last block, we do it here. */
611 level
= min (level
, end
->level
);
614 TRY_CATCH (error
, RETURN_MASK_ERROR
)
615 size
= gdb_insn_length (gdbarch
, pc
);
619 insn
.iclass
= ftrace_classify_insn (gdbarch
, pc
);
621 ftrace_update_insns (end
, &insn
);
623 /* We're done once we pushed the instruction at the end. */
624 if (block
->end
== pc
)
627 /* We can't continue if we fail to compute the size. */
630 warning (_("Recorded trace may be incomplete around %s."),
631 core_addr_to_string_nz (pc
));
633 /* Indicate the gap in the trace. We just added INSN so we're
634 not at the beginning. */
635 end
= ftrace_new_gap (end
, BDE_BTS_INSN_SIZE
);
643 /* Maintain the function level offset.
644 For the last block, we do it here to not consider the last
646 Since the last instruction corresponds to the current instruction
647 and is not really part of the execution history, it shouldn't
650 level
= min (level
, end
->level
);
654 btinfo
->begin
= begin
;
656 btinfo
->ngaps
= ngaps
;
658 /* LEVEL is the minimal function level of all btrace function segments.
659 Define the global level offset to -LEVEL so all function levels are
660 normalized to start at zero. */
661 btinfo
->level
= -level
;
664 /* Compute the function branch trace from a block branch trace BTRACE for
665 a thread given by BTINFO. */
668 btrace_compute_ftrace (struct thread_info
*tp
, struct btrace_data
*btrace
)
670 DEBUG ("compute ftrace");
672 switch (btrace
->format
)
674 case BTRACE_FORMAT_NONE
:
677 case BTRACE_FORMAT_BTS
:
678 btrace_compute_ftrace_bts (tp
, &btrace
->variant
.bts
);
682 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
685 /* Add an entry for the current PC. */
688 btrace_add_pc (struct thread_info
*tp
)
690 struct btrace_data btrace
;
691 struct btrace_block
*block
;
692 struct regcache
*regcache
;
693 struct cleanup
*cleanup
;
696 regcache
= get_thread_regcache (tp
->ptid
);
697 pc
= regcache_read_pc (regcache
);
699 btrace_data_init (&btrace
);
700 btrace
.format
= BTRACE_FORMAT_BTS
;
701 btrace
.variant
.bts
.blocks
= NULL
;
703 cleanup
= make_cleanup_btrace_data (&btrace
);
705 block
= VEC_safe_push (btrace_block_s
, btrace
.variant
.bts
.blocks
, NULL
);
709 btrace_compute_ftrace (tp
, &btrace
);
711 do_cleanups (cleanup
);
717 btrace_enable (struct thread_info
*tp
, const struct btrace_config
*conf
)
719 if (tp
->btrace
.target
!= NULL
)
722 if (!target_supports_btrace (conf
->format
))
723 error (_("Target does not support branch tracing."));
725 DEBUG ("enable thread %d (%s)", tp
->num
, target_pid_to_str (tp
->ptid
));
727 tp
->btrace
.target
= target_enable_btrace (tp
->ptid
, conf
);
729 /* Add an entry for the current PC so we start tracing from where we
731 if (tp
->btrace
.target
!= NULL
)
737 const struct btrace_config
*
738 btrace_conf (const struct btrace_thread_info
*btinfo
)
740 if (btinfo
->target
== NULL
)
743 return target_btrace_conf (btinfo
->target
);
749 btrace_disable (struct thread_info
*tp
)
751 struct btrace_thread_info
*btp
= &tp
->btrace
;
754 if (btp
->target
== NULL
)
757 DEBUG ("disable thread %d (%s)", tp
->num
, target_pid_to_str (tp
->ptid
));
759 target_disable_btrace (btp
->target
);
768 btrace_teardown (struct thread_info
*tp
)
770 struct btrace_thread_info
*btp
= &tp
->btrace
;
773 if (btp
->target
== NULL
)
776 DEBUG ("teardown thread %d (%s)", tp
->num
, target_pid_to_str (tp
->ptid
));
778 target_teardown_btrace (btp
->target
);
784 /* Stitch branch trace in BTS format. */
787 btrace_stitch_bts (struct btrace_data_bts
*btrace
, struct thread_info
*tp
)
789 struct btrace_thread_info
*btinfo
;
790 struct btrace_function
*last_bfun
;
791 struct btrace_insn
*last_insn
;
792 btrace_block_s
*first_new_block
;
794 btinfo
= &tp
->btrace
;
795 last_bfun
= btinfo
->end
;
796 gdb_assert (last_bfun
!= NULL
);
797 gdb_assert (!VEC_empty (btrace_block_s
, btrace
->blocks
));
799 /* If the existing trace ends with a gap, we just glue the traces
800 together. We need to drop the last (i.e. chronologically first) block
801 of the new trace, though, since we can't fill in the start address.*/
802 if (VEC_empty (btrace_insn_s
, last_bfun
->insn
))
804 VEC_pop (btrace_block_s
, btrace
->blocks
);
808 /* Beware that block trace starts with the most recent block, so the
809 chronologically first block in the new trace is the last block in
810 the new trace's block vector. */
811 first_new_block
= VEC_last (btrace_block_s
, btrace
->blocks
);
812 last_insn
= VEC_last (btrace_insn_s
, last_bfun
->insn
);
814 /* If the current PC at the end of the block is the same as in our current
815 trace, there are two explanations:
816 1. we executed the instruction and some branch brought us back.
817 2. we have not made any progress.
818 In the first case, the delta trace vector should contain at least two
820 In the second case, the delta trace vector should contain exactly one
821 entry for the partial block containing the current PC. Remove it. */
822 if (first_new_block
->end
== last_insn
->pc
823 && VEC_length (btrace_block_s
, btrace
->blocks
) == 1)
825 VEC_pop (btrace_block_s
, btrace
->blocks
);
829 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn
),
830 core_addr_to_string_nz (first_new_block
->end
));
832 /* Do a simple sanity check to make sure we don't accidentally end up
833 with a bad block. This should not occur in practice. */
834 if (first_new_block
->end
< last_insn
->pc
)
836 warning (_("Error while trying to read delta trace. Falling back to "
841 /* We adjust the last block to start at the end of our current trace. */
842 gdb_assert (first_new_block
->begin
== 0);
843 first_new_block
->begin
= last_insn
->pc
;
845 /* We simply pop the last insn so we can insert it again as part of
846 the normal branch trace computation.
847 Since instruction iterators are based on indices in the instructions
848 vector, we don't leave any pointers dangling. */
849 DEBUG ("pruning insn at %s for stitching",
850 ftrace_print_insn_addr (last_insn
));
852 VEC_pop (btrace_insn_s
, last_bfun
->insn
);
854 /* The instructions vector may become empty temporarily if this has
855 been the only instruction in this function segment.
856 This violates the invariant but will be remedied shortly by
857 btrace_compute_ftrace when we add the new trace. */
859 /* The only case where this would hurt is if the entire trace consisted
860 of just that one instruction. If we remove it, we might turn the now
861 empty btrace function segment into a gap. But we don't want gaps at
862 the beginning. To avoid this, we remove the entire old trace. */
863 if (last_bfun
== btinfo
->begin
&& VEC_empty (btrace_insn_s
, last_bfun
->insn
))
869 /* Adjust the block trace in order to stitch old and new trace together.
870 BTRACE is the new delta trace between the last and the current stop.
871 TP is the traced thread.
872 May modifx BTRACE as well as the existing trace in TP.
873 Return 0 on success, -1 otherwise. */
876 btrace_stitch_trace (struct btrace_data
*btrace
, struct thread_info
*tp
)
878 /* If we don't have trace, there's nothing to do. */
879 if (btrace_data_empty (btrace
))
882 switch (btrace
->format
)
884 case BTRACE_FORMAT_NONE
:
887 case BTRACE_FORMAT_BTS
:
888 return btrace_stitch_bts (&btrace
->variant
.bts
, tp
);
891 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
894 /* Clear the branch trace histories in BTINFO. */
897 btrace_clear_history (struct btrace_thread_info
*btinfo
)
899 xfree (btinfo
->insn_history
);
900 xfree (btinfo
->call_history
);
901 xfree (btinfo
->replay
);
903 btinfo
->insn_history
= NULL
;
904 btinfo
->call_history
= NULL
;
905 btinfo
->replay
= NULL
;
911 btrace_fetch (struct thread_info
*tp
)
913 struct btrace_thread_info
*btinfo
;
914 struct btrace_target_info
*tinfo
;
915 struct btrace_data btrace
;
916 struct cleanup
*cleanup
;
919 DEBUG ("fetch thread %d (%s)", tp
->num
, target_pid_to_str (tp
->ptid
));
921 btinfo
= &tp
->btrace
;
922 tinfo
= btinfo
->target
;
926 /* There's no way we could get new trace while replaying.
927 On the other hand, delta trace would return a partial record with the
928 current PC, which is the replay PC, not the last PC, as expected. */
929 if (btinfo
->replay
!= NULL
)
932 btrace_data_init (&btrace
);
933 cleanup
= make_cleanup_btrace_data (&btrace
);
935 /* Let's first try to extend the trace we already have. */
936 if (btinfo
->end
!= NULL
)
938 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_DELTA
);
941 /* Success. Let's try to stitch the traces together. */
942 errcode
= btrace_stitch_trace (&btrace
, tp
);
946 /* We failed to read delta trace. Let's try to read new trace. */
947 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_NEW
);
949 /* If we got any new trace, discard what we have. */
950 if (errcode
== 0 && !btrace_data_empty (&btrace
))
954 /* If we were not able to read the trace, we start over. */
958 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_ALL
);
962 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_ALL
);
964 /* If we were not able to read the branch trace, signal an error. */
966 error (_("Failed to read branch trace."));
968 /* Compute the trace, provided we have any. */
969 if (!btrace_data_empty (&btrace
))
971 btrace_clear_history (btinfo
);
972 btrace_compute_ftrace (tp
, &btrace
);
975 do_cleanups (cleanup
);
981 btrace_clear (struct thread_info
*tp
)
983 struct btrace_thread_info
*btinfo
;
984 struct btrace_function
*it
, *trash
;
986 DEBUG ("clear thread %d (%s)", tp
->num
, target_pid_to_str (tp
->ptid
));
988 /* Make sure btrace frames that may hold a pointer into the branch
989 trace data are destroyed. */
990 reinit_frame_cache ();
992 btinfo
= &tp
->btrace
;
1003 btinfo
->begin
= NULL
;
1007 btrace_clear_history (btinfo
);
1013 btrace_free_objfile (struct objfile
*objfile
)
1015 struct thread_info
*tp
;
1017 DEBUG ("free objfile");
1019 ALL_NON_EXITED_THREADS (tp
)
1023 #if defined (HAVE_LIBEXPAT)
1025 /* Check the btrace document version. */
1028 check_xml_btrace_version (struct gdb_xml_parser
*parser
,
1029 const struct gdb_xml_element
*element
,
1030 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
1032 const char *version
= xml_find_attribute (attributes
, "version")->value
;
1034 if (strcmp (version
, "1.0") != 0)
1035 gdb_xml_error (parser
, _("Unsupported btrace version: \"%s\""), version
);
1038 /* Parse a btrace "block" xml record. */
1041 parse_xml_btrace_block (struct gdb_xml_parser
*parser
,
1042 const struct gdb_xml_element
*element
,
1043 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
1045 struct btrace_data
*btrace
;
1046 struct btrace_block
*block
;
1047 ULONGEST
*begin
, *end
;
1051 switch (btrace
->format
)
1053 case BTRACE_FORMAT_BTS
:
1056 case BTRACE_FORMAT_NONE
:
1057 btrace
->format
= BTRACE_FORMAT_BTS
;
1058 btrace
->variant
.bts
.blocks
= NULL
;
1062 gdb_xml_error (parser
, _("Btrace format error."));
1065 begin
= xml_find_attribute (attributes
, "begin")->value
;
1066 end
= xml_find_attribute (attributes
, "end")->value
;
1068 block
= VEC_safe_push (btrace_block_s
, btrace
->variant
.bts
.blocks
, NULL
);
1069 block
->begin
= *begin
;
1073 static const struct gdb_xml_attribute block_attributes
[] = {
1074 { "begin", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
1075 { "end", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
1076 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
1079 static const struct gdb_xml_attribute btrace_attributes
[] = {
1080 { "version", GDB_XML_AF_NONE
, NULL
, NULL
},
1081 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
1084 static const struct gdb_xml_element btrace_children
[] = {
1085 { "block", block_attributes
, NULL
,
1086 GDB_XML_EF_REPEATABLE
| GDB_XML_EF_OPTIONAL
, parse_xml_btrace_block
, NULL
},
1087 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
1090 static const struct gdb_xml_element btrace_elements
[] = {
1091 { "btrace", btrace_attributes
, btrace_children
, GDB_XML_EF_NONE
,
1092 check_xml_btrace_version
, NULL
},
1093 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
1096 #endif /* defined (HAVE_LIBEXPAT) */
1101 parse_xml_btrace (struct btrace_data
*btrace
, const char *buffer
)
1103 struct cleanup
*cleanup
;
1106 #if defined (HAVE_LIBEXPAT)
1108 btrace
->format
= BTRACE_FORMAT_NONE
;
1110 cleanup
= make_cleanup_btrace_data (btrace
);
1111 errcode
= gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements
,
1114 error (_("Error parsing branch trace."));
1116 /* Keep parse results. */
1117 discard_cleanups (cleanup
);
1119 #else /* !defined (HAVE_LIBEXPAT) */
1121 error (_("Cannot process branch trace. XML parsing is not supported."));
1123 #endif /* !defined (HAVE_LIBEXPAT) */
1126 #if defined (HAVE_LIBEXPAT)
1128 /* Parse a btrace-conf "bts" xml record. */
1131 parse_xml_btrace_conf_bts (struct gdb_xml_parser
*parser
,
1132 const struct gdb_xml_element
*element
,
1133 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
1135 struct btrace_config
*conf
;
1136 struct gdb_xml_value
*size
;
1139 conf
->format
= BTRACE_FORMAT_BTS
;
1142 size
= xml_find_attribute (attributes
, "size");
1144 conf
->bts
.size
= (unsigned int) * (ULONGEST
*) size
->value
;
1147 static const struct gdb_xml_attribute btrace_conf_bts_attributes
[] = {
1148 { "size", GDB_XML_AF_OPTIONAL
, gdb_xml_parse_attr_ulongest
, NULL
},
1149 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
1152 static const struct gdb_xml_element btrace_conf_children
[] = {
1153 { "bts", btrace_conf_bts_attributes
, NULL
, GDB_XML_EF_OPTIONAL
,
1154 parse_xml_btrace_conf_bts
, NULL
},
1155 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
1158 static const struct gdb_xml_attribute btrace_conf_attributes
[] = {
1159 { "version", GDB_XML_AF_NONE
, NULL
, NULL
},
1160 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
1163 static const struct gdb_xml_element btrace_conf_elements
[] = {
1164 { "btrace-conf", btrace_conf_attributes
, btrace_conf_children
,
1165 GDB_XML_EF_NONE
, NULL
, NULL
},
1166 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
1169 #endif /* defined (HAVE_LIBEXPAT) */
1174 parse_xml_btrace_conf (struct btrace_config
*conf
, const char *xml
)
1178 #if defined (HAVE_LIBEXPAT)
1180 errcode
= gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
1181 btrace_conf_elements
, xml
, conf
);
1183 error (_("Error parsing branch trace configuration."));
1185 #else /* !defined (HAVE_LIBEXPAT) */
1187 error (_("XML parsing is not supported."));
1189 #endif /* !defined (HAVE_LIBEXPAT) */
1194 const struct btrace_insn
*
1195 btrace_insn_get (const struct btrace_insn_iterator
*it
)
1197 const struct btrace_function
*bfun
;
1198 unsigned int index
, end
;
1201 bfun
= it
->function
;
1203 /* Check if the iterator points to a gap in the trace. */
1204 if (bfun
->errcode
!= 0)
1207 /* The index is within the bounds of this function's instruction vector. */
1208 end
= VEC_length (btrace_insn_s
, bfun
->insn
);
1209 gdb_assert (0 < end
);
1210 gdb_assert (index
< end
);
1212 return VEC_index (btrace_insn_s
, bfun
->insn
, index
);
1218 btrace_insn_number (const struct btrace_insn_iterator
*it
)
1220 const struct btrace_function
*bfun
;
1222 bfun
= it
->function
;
1224 /* Return zero if the iterator points to a gap in the trace. */
1225 if (bfun
->errcode
!= 0)
1228 return bfun
->insn_offset
+ it
->index
;
1234 btrace_insn_begin (struct btrace_insn_iterator
*it
,
1235 const struct btrace_thread_info
*btinfo
)
1237 const struct btrace_function
*bfun
;
1239 bfun
= btinfo
->begin
;
1241 error (_("No trace."));
1243 it
->function
= bfun
;
1250 btrace_insn_end (struct btrace_insn_iterator
*it
,
1251 const struct btrace_thread_info
*btinfo
)
1253 const struct btrace_function
*bfun
;
1254 unsigned int length
;
1258 error (_("No trace."));
1260 length
= VEC_length (btrace_insn_s
, bfun
->insn
);
1262 /* The last function may either be a gap or it contains the current
1263 instruction, which is one past the end of the execution trace; ignore
1268 it
->function
= bfun
;
1275 btrace_insn_next (struct btrace_insn_iterator
*it
, unsigned int stride
)
1277 const struct btrace_function
*bfun
;
1278 unsigned int index
, steps
;
1280 bfun
= it
->function
;
1286 unsigned int end
, space
, adv
;
1288 end
= VEC_length (btrace_insn_s
, bfun
->insn
);
1290 /* An empty function segment represents a gap in the trace. We count
1291 it as one instruction. */
1294 const struct btrace_function
*next
;
1296 next
= bfun
->flow
.next
;
1309 gdb_assert (0 < end
);
1310 gdb_assert (index
< end
);
1312 /* Compute the number of instructions remaining in this segment. */
1313 space
= end
- index
;
1315 /* Advance the iterator as far as possible within this segment. */
1316 adv
= min (space
, stride
);
1321 /* Move to the next function if we're at the end of this one. */
1324 const struct btrace_function
*next
;
1326 next
= bfun
->flow
.next
;
1329 /* We stepped past the last function.
1331 Let's adjust the index to point to the last instruction in
1332 the previous function. */
1338 /* We now point to the first instruction in the new function. */
1343 /* We did make progress. */
1344 gdb_assert (adv
> 0);
1347 /* Update the iterator. */
1348 it
->function
= bfun
;
1357 btrace_insn_prev (struct btrace_insn_iterator
*it
, unsigned int stride
)
1359 const struct btrace_function
*bfun
;
1360 unsigned int index
, steps
;
1362 bfun
= it
->function
;
1370 /* Move to the previous function if we're at the start of this one. */
1373 const struct btrace_function
*prev
;
1375 prev
= bfun
->flow
.prev
;
1379 /* We point to one after the last instruction in the new function. */
1381 index
= VEC_length (btrace_insn_s
, bfun
->insn
);
1383 /* An empty function segment represents a gap in the trace. We count
1384 it as one instruction. */
1394 /* Advance the iterator as far as possible within this segment. */
1395 adv
= min (index
, stride
);
1401 /* We did make progress. */
1402 gdb_assert (adv
> 0);
1405 /* Update the iterator. */
1406 it
->function
= bfun
;
1415 btrace_insn_cmp (const struct btrace_insn_iterator
*lhs
,
1416 const struct btrace_insn_iterator
*rhs
)
1418 unsigned int lnum
, rnum
;
1420 lnum
= btrace_insn_number (lhs
);
1421 rnum
= btrace_insn_number (rhs
);
1423 /* A gap has an instruction number of zero. Things are getting more
1424 complicated if gaps are involved.
1426 We take the instruction number offset from the iterator's function.
1427 This is the number of the first instruction after the gap.
1429 This is OK as long as both lhs and rhs point to gaps. If only one of
1430 them does, we need to adjust the number based on the other's regular
1431 instruction number. Otherwise, a gap might compare equal to an
1434 if (lnum
== 0 && rnum
== 0)
1436 lnum
= lhs
->function
->insn_offset
;
1437 rnum
= rhs
->function
->insn_offset
;
1441 lnum
= lhs
->function
->insn_offset
;
1448 rnum
= rhs
->function
->insn_offset
;
1454 return (int) (lnum
- rnum
);
1460 btrace_find_insn_by_number (struct btrace_insn_iterator
*it
,
1461 const struct btrace_thread_info
*btinfo
,
1462 unsigned int number
)
1464 const struct btrace_function
*bfun
;
1465 unsigned int end
, length
;
1467 for (bfun
= btinfo
->end
; bfun
!= NULL
; bfun
= bfun
->flow
.prev
)
1470 if (bfun
->errcode
!= 0)
1473 if (bfun
->insn_offset
<= number
)
1480 length
= VEC_length (btrace_insn_s
, bfun
->insn
);
1481 gdb_assert (length
> 0);
1483 end
= bfun
->insn_offset
+ length
;
1487 it
->function
= bfun
;
1488 it
->index
= number
- bfun
->insn_offset
;
1495 const struct btrace_function
*
1496 btrace_call_get (const struct btrace_call_iterator
*it
)
1498 return it
->function
;
1504 btrace_call_number (const struct btrace_call_iterator
*it
)
1506 const struct btrace_thread_info
*btinfo
;
1507 const struct btrace_function
*bfun
;
1510 btinfo
= it
->btinfo
;
1511 bfun
= it
->function
;
1513 return bfun
->number
;
1515 /* For the end iterator, i.e. bfun == NULL, we return one more than the
1516 number of the last function. */
1518 insns
= VEC_length (btrace_insn_s
, bfun
->insn
);
1520 /* If the function contains only a single instruction (i.e. the current
1521 instruction), it will be skipped and its number is already the number
1524 return bfun
->number
;
1526 /* Otherwise, return one more than the number of the last function. */
1527 return bfun
->number
+ 1;
1533 btrace_call_begin (struct btrace_call_iterator
*it
,
1534 const struct btrace_thread_info
*btinfo
)
1536 const struct btrace_function
*bfun
;
1538 bfun
= btinfo
->begin
;
1540 error (_("No trace."));
1542 it
->btinfo
= btinfo
;
1543 it
->function
= bfun
;
1549 btrace_call_end (struct btrace_call_iterator
*it
,
1550 const struct btrace_thread_info
*btinfo
)
1552 const struct btrace_function
*bfun
;
1556 error (_("No trace."));
1558 it
->btinfo
= btinfo
;
1559 it
->function
= NULL
;
1565 btrace_call_next (struct btrace_call_iterator
*it
, unsigned int stride
)
1567 const struct btrace_function
*bfun
;
1570 bfun
= it
->function
;
1572 while (bfun
!= NULL
)
1574 const struct btrace_function
*next
;
1577 next
= bfun
->flow
.next
;
1580 /* Ignore the last function if it only contains a single
1581 (i.e. the current) instruction. */
1582 insns
= VEC_length (btrace_insn_s
, bfun
->insn
);
1587 if (stride
== steps
)
1594 it
->function
= bfun
;
1601 btrace_call_prev (struct btrace_call_iterator
*it
, unsigned int stride
)
1603 const struct btrace_thread_info
*btinfo
;
1604 const struct btrace_function
*bfun
;
1607 bfun
= it
->function
;
1614 btinfo
= it
->btinfo
;
1619 /* Ignore the last function if it only contains a single
1620 (i.e. the current) instruction. */
1621 insns
= VEC_length (btrace_insn_s
, bfun
->insn
);
1623 bfun
= bfun
->flow
.prev
;
1631 while (steps
< stride
)
1633 const struct btrace_function
*prev
;
1635 prev
= bfun
->flow
.prev
;
1643 it
->function
= bfun
;
1650 btrace_call_cmp (const struct btrace_call_iterator
*lhs
,
1651 const struct btrace_call_iterator
*rhs
)
1653 unsigned int lnum
, rnum
;
1655 lnum
= btrace_call_number (lhs
);
1656 rnum
= btrace_call_number (rhs
);
1658 return (int) (lnum
- rnum
);
1664 btrace_find_call_by_number (struct btrace_call_iterator
*it
,
1665 const struct btrace_thread_info
*btinfo
,
1666 unsigned int number
)
1668 const struct btrace_function
*bfun
;
1670 for (bfun
= btinfo
->end
; bfun
!= NULL
; bfun
= bfun
->flow
.prev
)
1674 bnum
= bfun
->number
;
1677 it
->btinfo
= btinfo
;
1678 it
->function
= bfun
;
1682 /* Functions are ordered and numbered consecutively. We could bail out
1683 earlier. On the other hand, it is very unlikely that we search for
1684 a nonexistent function. */
1693 btrace_set_insn_history (struct btrace_thread_info
*btinfo
,
1694 const struct btrace_insn_iterator
*begin
,
1695 const struct btrace_insn_iterator
*end
)
1697 if (btinfo
->insn_history
== NULL
)
1698 btinfo
->insn_history
= xzalloc (sizeof (*btinfo
->insn_history
));
1700 btinfo
->insn_history
->begin
= *begin
;
1701 btinfo
->insn_history
->end
= *end
;
1707 btrace_set_call_history (struct btrace_thread_info
*btinfo
,
1708 const struct btrace_call_iterator
*begin
,
1709 const struct btrace_call_iterator
*end
)
1711 gdb_assert (begin
->btinfo
== end
->btinfo
);
1713 if (btinfo
->call_history
== NULL
)
1714 btinfo
->call_history
= xzalloc (sizeof (*btinfo
->call_history
));
1716 btinfo
->call_history
->begin
= *begin
;
1717 btinfo
->call_history
->end
= *end
;
1723 btrace_is_replaying (struct thread_info
*tp
)
1725 return tp
->btrace
.replay
!= NULL
;
1731 btrace_is_empty (struct thread_info
*tp
)
1733 struct btrace_insn_iterator begin
, end
;
1734 struct btrace_thread_info
*btinfo
;
1736 btinfo
= &tp
->btrace
;
1738 if (btinfo
->begin
== NULL
)
1741 btrace_insn_begin (&begin
, btinfo
);
1742 btrace_insn_end (&end
, btinfo
);
1744 return btrace_insn_cmp (&begin
, &end
) == 0;
1747 /* Forward the cleanup request. */
1750 do_btrace_data_cleanup (void *arg
)
1752 btrace_data_fini (arg
);
1758 make_cleanup_btrace_data (struct btrace_data
*data
)
1760 return make_cleanup (do_btrace_data_cleanup
, data
);