1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
31 #include "filenames.h"
32 #include "xml-support.h"
36 #include "cli/cli-utils.h"
42 /* Command lists for btrace maintenance commands. */
43 static struct cmd_list_element
*maint_btrace_cmdlist
;
44 static struct cmd_list_element
*maint_btrace_set_cmdlist
;
45 static struct cmd_list_element
*maint_btrace_show_cmdlist
;
46 static struct cmd_list_element
*maint_btrace_pt_set_cmdlist
;
47 static struct cmd_list_element
*maint_btrace_pt_show_cmdlist
;
49 /* Control whether to skip PAD packets when computing the packet history. */
50 static int maint_btrace_pt_skip_pad
= 1;
52 /* A vector of function segments. */
53 typedef struct btrace_function
* bfun_s
;
56 static void btrace_add_pc (struct thread_info
*tp
);
58 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
59 when used in if statements. */
61 #define DEBUG(msg, args...) \
64 if (record_debug != 0) \
65 fprintf_unfiltered (gdb_stdlog, \
66 "[btrace] " msg "\n", ##args); \
70 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
72 /* Return the function name of a recorded function segment for printing.
73 This function never returns NULL. */
76 ftrace_print_function_name (const struct btrace_function
*bfun
)
78 struct minimal_symbol
*msym
;
85 return SYMBOL_PRINT_NAME (sym
);
88 return MSYMBOL_PRINT_NAME (msym
);
93 /* Return the file name of a recorded function segment for printing.
94 This function never returns NULL. */
97 ftrace_print_filename (const struct btrace_function
*bfun
)
100 const char *filename
;
105 filename
= symtab_to_filename_for_display (symbol_symtab (sym
));
107 filename
= "<unknown>";
112 /* Return a string representation of the address of an instruction.
113 This function never returns NULL. */
116 ftrace_print_insn_addr (const struct btrace_insn
*insn
)
121 return core_addr_to_string_nz (insn
->pc
);
124 /* Print an ftrace debug status message. */
127 ftrace_debug (const struct btrace_function
*bfun
, const char *prefix
)
129 const char *fun
, *file
;
130 unsigned int ibegin
, iend
;
133 fun
= ftrace_print_function_name (bfun
);
134 file
= ftrace_print_filename (bfun
);
137 ibegin
= bfun
->insn_offset
;
138 iend
= ibegin
+ VEC_length (btrace_insn_s
, bfun
->insn
);
140 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
141 prefix
, fun
, file
, level
, ibegin
, iend
);
144 /* Return the number of instructions in a given function call segment. */
147 ftrace_call_num_insn (const struct btrace_function
* bfun
)
152 /* A gap is always counted as one instruction. */
153 if (bfun
->errcode
!= 0)
156 return VEC_length (btrace_insn_s
, bfun
->insn
);
159 /* Return non-zero if BFUN does not match MFUN and FUN,
160 return zero otherwise. */
163 ftrace_function_switched (const struct btrace_function
*bfun
,
164 const struct minimal_symbol
*mfun
,
165 const struct symbol
*fun
)
167 struct minimal_symbol
*msym
;
173 /* If the minimal symbol changed, we certainly switched functions. */
174 if (mfun
!= NULL
&& msym
!= NULL
175 && strcmp (MSYMBOL_LINKAGE_NAME (mfun
), MSYMBOL_LINKAGE_NAME (msym
)) != 0)
178 /* If the symbol changed, we certainly switched functions. */
179 if (fun
!= NULL
&& sym
!= NULL
)
181 const char *bfname
, *fname
;
183 /* Check the function name. */
184 if (strcmp (SYMBOL_LINKAGE_NAME (fun
), SYMBOL_LINKAGE_NAME (sym
)) != 0)
187 /* Check the location of those functions, as well. */
188 bfname
= symtab_to_fullname (symbol_symtab (sym
));
189 fname
= symtab_to_fullname (symbol_symtab (fun
));
190 if (filename_cmp (fname
, bfname
) != 0)
194 /* If we lost symbol information, we switched functions. */
195 if (!(msym
== NULL
&& sym
== NULL
) && mfun
== NULL
&& fun
== NULL
)
198 /* If we gained symbol information, we switched functions. */
199 if (msym
== NULL
&& sym
== NULL
&& !(mfun
== NULL
&& fun
== NULL
))
205 /* Allocate and initialize a new branch trace function segment.
206 PREV is the chronologically preceding function segment.
207 MFUN and FUN are the symbol information we have for this function. */
209 static struct btrace_function
*
210 ftrace_new_function (struct btrace_function
*prev
,
211 struct minimal_symbol
*mfun
,
214 struct btrace_function
*bfun
;
216 bfun
= XCNEW (struct btrace_function
);
220 bfun
->flow
.prev
= prev
;
224 /* Start counting at one. */
226 bfun
->insn_offset
= 1;
230 gdb_assert (prev
->flow
.next
== NULL
);
231 prev
->flow
.next
= bfun
;
233 bfun
->number
= prev
->number
+ 1;
234 bfun
->insn_offset
= prev
->insn_offset
+ ftrace_call_num_insn (prev
);
235 bfun
->level
= prev
->level
;
241 /* Update the UP field of a function segment. */
244 ftrace_update_caller (struct btrace_function
*bfun
,
245 struct btrace_function
*caller
,
246 enum btrace_function_flag flags
)
248 if (bfun
->up
!= NULL
)
249 ftrace_debug (bfun
, "updating caller");
254 ftrace_debug (bfun
, "set caller");
255 ftrace_debug (caller
, "..to");
258 /* Fix up the caller for all segments of a function. */
261 ftrace_fixup_caller (struct btrace_function
*bfun
,
262 struct btrace_function
*caller
,
263 enum btrace_function_flag flags
)
265 struct btrace_function
*prev
, *next
;
267 ftrace_update_caller (bfun
, caller
, flags
);
269 /* Update all function segments belonging to the same function. */
270 for (prev
= bfun
->segment
.prev
; prev
!= NULL
; prev
= prev
->segment
.prev
)
271 ftrace_update_caller (prev
, caller
, flags
);
273 for (next
= bfun
->segment
.next
; next
!= NULL
; next
= next
->segment
.next
)
274 ftrace_update_caller (next
, caller
, flags
);
277 /* Add a new function segment for a call.
278 CALLER is the chronologically preceding function segment.
279 MFUN and FUN are the symbol information we have for this function. */
281 static struct btrace_function
*
282 ftrace_new_call (struct btrace_function
*caller
,
283 struct minimal_symbol
*mfun
,
286 struct btrace_function
*bfun
;
288 bfun
= ftrace_new_function (caller
, mfun
, fun
);
292 ftrace_debug (bfun
, "new call");
297 /* Add a new function segment for a tail call.
298 CALLER is the chronologically preceding function segment.
299 MFUN and FUN are the symbol information we have for this function. */
301 static struct btrace_function
*
302 ftrace_new_tailcall (struct btrace_function
*caller
,
303 struct minimal_symbol
*mfun
,
306 struct btrace_function
*bfun
;
308 bfun
= ftrace_new_function (caller
, mfun
, fun
);
311 bfun
->flags
|= BFUN_UP_LINKS_TO_TAILCALL
;
313 ftrace_debug (bfun
, "new tail call");
318 /* Return the caller of BFUN or NULL if there is none. This function skips
319 tail calls in the call chain. */
320 static struct btrace_function
*
321 ftrace_get_caller (struct btrace_function
*bfun
)
323 for (; bfun
!= NULL
; bfun
= bfun
->up
)
324 if ((bfun
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
330 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
331 symbol information. */
333 static struct btrace_function
*
334 ftrace_find_caller (struct btrace_function
*bfun
,
335 struct minimal_symbol
*mfun
,
338 for (; bfun
!= NULL
; bfun
= bfun
->up
)
340 /* Skip functions with incompatible symbol information. */
341 if (ftrace_function_switched (bfun
, mfun
, fun
))
344 /* This is the function segment we're looking for. */
351 /* Find the innermost caller in the back trace of BFUN, skipping all
352 function segments that do not end with a call instruction (e.g.
353 tail calls ending with a jump). */
355 static struct btrace_function
*
356 ftrace_find_call (struct btrace_function
*bfun
)
358 for (; bfun
!= NULL
; bfun
= bfun
->up
)
360 struct btrace_insn
*last
;
363 if (bfun
->errcode
!= 0)
366 last
= VEC_last (btrace_insn_s
, bfun
->insn
);
368 if (last
->iclass
== BTRACE_INSN_CALL
)
375 /* Add a continuation segment for a function into which we return.
376 PREV is the chronologically preceding function segment.
377 MFUN and FUN are the symbol information we have for this function. */
379 static struct btrace_function
*
380 ftrace_new_return (struct btrace_function
*prev
,
381 struct minimal_symbol
*mfun
,
384 struct btrace_function
*bfun
, *caller
;
386 bfun
= ftrace_new_function (prev
, mfun
, fun
);
388 /* It is important to start at PREV's caller. Otherwise, we might find
389 PREV itself, if PREV is a recursive function. */
390 caller
= ftrace_find_caller (prev
->up
, mfun
, fun
);
393 /* The caller of PREV is the preceding btrace function segment in this
394 function instance. */
395 gdb_assert (caller
->segment
.next
== NULL
);
397 caller
->segment
.next
= bfun
;
398 bfun
->segment
.prev
= caller
;
400 /* Maintain the function level. */
401 bfun
->level
= caller
->level
;
403 /* Maintain the call stack. */
404 bfun
->up
= caller
->up
;
405 bfun
->flags
= caller
->flags
;
407 ftrace_debug (bfun
, "new return");
411 /* We did not find a caller. This could mean that something went
412 wrong or that the call is simply not included in the trace. */
414 /* Let's search for some actual call. */
415 caller
= ftrace_find_call (prev
->up
);
418 /* There is no call in PREV's back trace. We assume that the
419 branch trace did not include it. */
421 /* Let's find the topmost function and add a new caller for it.
422 This should handle a series of initial tail calls. */
423 while (prev
->up
!= NULL
)
426 bfun
->level
= prev
->level
- 1;
428 /* Fix up the call stack for PREV. */
429 ftrace_fixup_caller (prev
, bfun
, BFUN_UP_LINKS_TO_RET
);
431 ftrace_debug (bfun
, "new return - no caller");
435 /* There is a call in PREV's back trace to which we should have
436 returned but didn't. Let's start a new, separate back trace
437 from PREV's level. */
438 bfun
->level
= prev
->level
- 1;
440 /* We fix up the back trace for PREV but leave other function segments
441 on the same level as they are.
442 This should handle things like schedule () correctly where we're
443 switching contexts. */
445 prev
->flags
= BFUN_UP_LINKS_TO_RET
;
447 ftrace_debug (bfun
, "new return - unknown caller");
454 /* Add a new function segment for a function switch.
455 PREV is the chronologically preceding function segment.
456 MFUN and FUN are the symbol information we have for this function. */
458 static struct btrace_function
*
459 ftrace_new_switch (struct btrace_function
*prev
,
460 struct minimal_symbol
*mfun
,
463 struct btrace_function
*bfun
;
465 /* This is an unexplained function switch. We can't really be sure about the
466 call stack, yet the best I can think of right now is to preserve it. */
467 bfun
= ftrace_new_function (prev
, mfun
, fun
);
469 bfun
->flags
= prev
->flags
;
471 ftrace_debug (bfun
, "new switch");
476 /* Add a new function segment for a gap in the trace due to a decode error.
477 PREV is the chronologically preceding function segment.
478 ERRCODE is the format-specific error code. */
480 static struct btrace_function
*
481 ftrace_new_gap (struct btrace_function
*prev
, int errcode
)
483 struct btrace_function
*bfun
;
485 /* We hijack prev if it was empty. */
486 if (prev
!= NULL
&& prev
->errcode
== 0
487 && VEC_empty (btrace_insn_s
, prev
->insn
))
490 bfun
= ftrace_new_function (prev
, NULL
, NULL
);
492 bfun
->errcode
= errcode
;
494 ftrace_debug (bfun
, "new gap");
499 /* Update BFUN with respect to the instruction at PC. This may create new
501 Return the chronologically latest function segment, never NULL. */
503 static struct btrace_function
*
504 ftrace_update_function (struct btrace_function
*bfun
, CORE_ADDR pc
)
506 struct bound_minimal_symbol bmfun
;
507 struct minimal_symbol
*mfun
;
509 struct btrace_insn
*last
;
511 /* Try to determine the function we're in. We use both types of symbols
512 to avoid surprises when we sometimes get a full symbol and sometimes
513 only a minimal symbol. */
514 fun
= find_pc_function (pc
);
515 bmfun
= lookup_minimal_symbol_by_pc (pc
);
518 if (fun
== NULL
&& mfun
== NULL
)
519 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc
));
521 /* If we didn't have a function or if we had a gap before, we create one. */
522 if (bfun
== NULL
|| bfun
->errcode
!= 0)
523 return ftrace_new_function (bfun
, mfun
, fun
);
525 /* Check the last instruction, if we have one.
526 We do this check first, since it allows us to fill in the call stack
527 links in addition to the normal flow links. */
529 if (!VEC_empty (btrace_insn_s
, bfun
->insn
))
530 last
= VEC_last (btrace_insn_s
, bfun
->insn
);
534 switch (last
->iclass
)
536 case BTRACE_INSN_RETURN
:
540 /* On some systems, _dl_runtime_resolve returns to the resolved
541 function instead of jumping to it. From our perspective,
542 however, this is a tailcall.
543 If we treated it as return, we wouldn't be able to find the
544 resolved function in our stack back trace. Hence, we would
545 lose the current stack back trace and start anew with an empty
546 back trace. When the resolved function returns, we would then
547 create a stack back trace with the same function names but
548 different frame id's. This will confuse stepping. */
549 fname
= ftrace_print_function_name (bfun
);
550 if (strcmp (fname
, "_dl_runtime_resolve") == 0)
551 return ftrace_new_tailcall (bfun
, mfun
, fun
);
553 return ftrace_new_return (bfun
, mfun
, fun
);
556 case BTRACE_INSN_CALL
:
557 /* Ignore calls to the next instruction. They are used for PIC. */
558 if (last
->pc
+ last
->size
== pc
)
561 return ftrace_new_call (bfun
, mfun
, fun
);
563 case BTRACE_INSN_JUMP
:
567 start
= get_pc_function_start (pc
);
569 /* A jump to the start of a function is (typically) a tail call. */
571 return ftrace_new_tailcall (bfun
, mfun
, fun
);
573 /* If we can't determine the function for PC, we treat a jump at
574 the end of the block as tail call if we're switching functions
575 and as an intra-function branch if we don't. */
576 if (start
== 0 && ftrace_function_switched (bfun
, mfun
, fun
))
577 return ftrace_new_tailcall (bfun
, mfun
, fun
);
584 /* Check if we're switching functions for some other reason. */
585 if (ftrace_function_switched (bfun
, mfun
, fun
))
587 DEBUG_FTRACE ("switching from %s in %s at %s",
588 ftrace_print_insn_addr (last
),
589 ftrace_print_function_name (bfun
),
590 ftrace_print_filename (bfun
));
592 return ftrace_new_switch (bfun
, mfun
, fun
);
598 /* Add the instruction at PC to BFUN's instructions. */
601 ftrace_update_insns (struct btrace_function
*bfun
,
602 const struct btrace_insn
*insn
)
604 VEC_safe_push (btrace_insn_s
, bfun
->insn
, insn
);
606 if (record_debug
> 1)
607 ftrace_debug (bfun
, "update insn");
610 /* Classify the instruction at PC. */
612 static enum btrace_insn_class
613 ftrace_classify_insn (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
615 enum btrace_insn_class iclass
;
617 iclass
= BTRACE_INSN_OTHER
;
620 if (gdbarch_insn_is_call (gdbarch
, pc
))
621 iclass
= BTRACE_INSN_CALL
;
622 else if (gdbarch_insn_is_ret (gdbarch
, pc
))
623 iclass
= BTRACE_INSN_RETURN
;
624 else if (gdbarch_insn_is_jump (gdbarch
, pc
))
625 iclass
= BTRACE_INSN_JUMP
;
627 CATCH (error
, RETURN_MASK_ERROR
)
635 /* Try to match the back trace at LHS to the back trace at RHS. Returns the
636 number of matching function segments or zero if the back traces do not
640 ftrace_match_backtrace (struct btrace_function
*lhs
,
641 struct btrace_function
*rhs
)
645 for (matches
= 0; lhs
!= NULL
&& rhs
!= NULL
; ++matches
)
647 if (ftrace_function_switched (lhs
, rhs
->msym
, rhs
->sym
))
650 lhs
= ftrace_get_caller (lhs
);
651 rhs
= ftrace_get_caller (rhs
);
657 /* Add ADJUSTMENT to the level of BFUN and succeeding function segments. */
660 ftrace_fixup_level (struct btrace_function
*bfun
, int adjustment
)
665 DEBUG_FTRACE ("fixup level (%+d)", adjustment
);
666 ftrace_debug (bfun
, "..bfun");
668 for (; bfun
!= NULL
; bfun
= bfun
->flow
.next
)
669 bfun
->level
+= adjustment
;
672 /* Recompute the global level offset. Traverse the function trace and compute
673 the global level offset as the negative of the minimal function level. */
676 ftrace_compute_global_level_offset (struct btrace_thread_info
*btinfo
)
678 struct btrace_function
*bfun
, *end
;
684 bfun
= btinfo
->begin
;
688 /* The last function segment contains the current instruction, which is not
689 really part of the trace. If it contains just this one instruction, we
690 stop when we reach it; otherwise, we let the below loop run to the end. */
692 if (VEC_length (btrace_insn_s
, end
->insn
) > 1)
696 for (; bfun
!= end
; bfun
= bfun
->flow
.next
)
697 level
= std::min (level
, bfun
->level
);
699 DEBUG_FTRACE ("setting global level offset: %d", -level
);
700 btinfo
->level
= -level
;
703 /* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
704 ftrace_connect_backtrace. */
707 ftrace_connect_bfun (struct btrace_function
*prev
,
708 struct btrace_function
*next
)
710 DEBUG_FTRACE ("connecting...");
711 ftrace_debug (prev
, "..prev");
712 ftrace_debug (next
, "..next");
714 /* The function segments are not yet connected. */
715 gdb_assert (prev
->segment
.next
== NULL
);
716 gdb_assert (next
->segment
.prev
== NULL
);
718 prev
->segment
.next
= next
;
719 next
->segment
.prev
= prev
;
721 /* We may have moved NEXT to a different function level. */
722 ftrace_fixup_level (next
, prev
->level
- next
->level
);
724 /* If we run out of back trace for one, let's use the other's. */
725 if (prev
->up
== NULL
)
727 if (next
->up
!= NULL
)
729 DEBUG_FTRACE ("using next's callers");
730 ftrace_fixup_caller (prev
, next
->up
, next
->flags
);
733 else if (next
->up
== NULL
)
735 if (prev
->up
!= NULL
)
737 DEBUG_FTRACE ("using prev's callers");
738 ftrace_fixup_caller (next
, prev
->up
, prev
->flags
);
743 /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
744 link to add the tail callers to NEXT's back trace.
746 This removes NEXT->UP from NEXT's back trace. It will be added back
747 when connecting NEXT and PREV's callers - provided they exist.
749 If PREV's back trace consists of a series of tail calls without an
750 actual call, there will be no further connection and NEXT's caller will
751 be removed for good. To catch this case, we handle it here and connect
752 the top of PREV's back trace to NEXT's caller. */
753 if ((prev
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) != 0)
755 struct btrace_function
*caller
;
756 btrace_function_flags flags
;
758 /* We checked NEXT->UP above so CALLER can't be NULL. */
762 DEBUG_FTRACE ("adding prev's tail calls to next");
764 ftrace_fixup_caller (next
, prev
->up
, prev
->flags
);
766 for (prev
= prev
->up
; prev
!= NULL
; prev
= prev
->up
)
768 /* At the end of PREV's back trace, continue with CALLER. */
769 if (prev
->up
== NULL
)
771 DEBUG_FTRACE ("fixing up link for tailcall chain");
772 ftrace_debug (prev
, "..top");
773 ftrace_debug (caller
, "..up");
775 ftrace_fixup_caller (prev
, caller
, flags
);
777 /* If we skipped any tail calls, this may move CALLER to a
778 different function level.
780 Note that changing CALLER's level is only OK because we
781 know that this is the last iteration of the bottom-to-top
782 walk in ftrace_connect_backtrace.
784 Otherwise we will fix up CALLER's level when we connect it
785 to PREV's caller in the next iteration. */
786 ftrace_fixup_level (caller
, prev
->level
- caller
->level
- 1);
790 /* There's nothing to do if we find a real call. */
791 if ((prev
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
793 DEBUG_FTRACE ("will fix up link in next iteration");
801 /* Connect function segments on the same level in the back trace at LHS and RHS.
802 The back traces at LHS and RHS are expected to match according to
803 ftrace_match_backtrace. */
806 ftrace_connect_backtrace (struct btrace_function
*lhs
,
807 struct btrace_function
*rhs
)
809 while (lhs
!= NULL
&& rhs
!= NULL
)
811 struct btrace_function
*prev
, *next
;
813 gdb_assert (!ftrace_function_switched (lhs
, rhs
->msym
, rhs
->sym
));
815 /* Connecting LHS and RHS may change the up link. */
819 lhs
= ftrace_get_caller (lhs
);
820 rhs
= ftrace_get_caller (rhs
);
822 ftrace_connect_bfun (prev
, next
);
826 /* Bridge the gap between two function segments left and right of a gap if their
827 respective back traces match in at least MIN_MATCHES functions.
829 Returns non-zero if the gap could be bridged, zero otherwise. */
832 ftrace_bridge_gap (struct btrace_function
*lhs
, struct btrace_function
*rhs
,
835 struct btrace_function
*best_l
, *best_r
, *cand_l
, *cand_r
;
838 DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
839 rhs
->insn_offset
- 1, min_matches
);
845 /* We search the back traces of LHS and RHS for valid connections and connect
846 the two functon segments that give the longest combined back trace. */
848 for (cand_l
= lhs
; cand_l
!= NULL
; cand_l
= ftrace_get_caller (cand_l
))
849 for (cand_r
= rhs
; cand_r
!= NULL
; cand_r
= ftrace_get_caller (cand_r
))
853 matches
= ftrace_match_backtrace (cand_l
, cand_r
);
854 if (best_matches
< matches
)
856 best_matches
= matches
;
862 /* We need at least MIN_MATCHES matches. */
863 gdb_assert (min_matches
> 0);
864 if (best_matches
< min_matches
)
867 DEBUG_FTRACE ("..matches: %d", best_matches
);
869 /* We will fix up the level of BEST_R and succeeding function segments such
870 that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
872 This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
873 BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
875 To catch this, we already fix up the level here where we can start at RHS
876 instead of at BEST_R. We will ignore the level fixup when connecting
877 BEST_L to BEST_R as they will already be on the same level. */
878 ftrace_fixup_level (rhs
, best_l
->level
- best_r
->level
);
880 ftrace_connect_backtrace (best_l
, best_r
);
885 /* Try to bridge gaps due to overflow or decode errors by connecting the
886 function segments that are separated by the gap. */
889 btrace_bridge_gaps (struct thread_info
*tp
, VEC (bfun_s
) **gaps
)
891 VEC (bfun_s
) *remaining
;
892 struct cleanup
*old_chain
;
895 DEBUG ("bridge gaps");
898 old_chain
= make_cleanup (VEC_cleanup (bfun_s
), &remaining
);
900 /* We require a minimum amount of matches for bridging a gap. The number of
901 required matches will be lowered with each iteration.
903 The more matches the higher our confidence that the bridging is correct.
904 For big gaps or small traces, however, it may not be feasible to require a
905 high number of matches. */
906 for (min_matches
= 5; min_matches
> 0; --min_matches
)
908 /* Let's try to bridge as many gaps as we can. In some cases, we need to
909 skip a gap and revisit it again after we closed later gaps. */
910 while (!VEC_empty (bfun_s
, *gaps
))
912 struct btrace_function
*gap
;
915 for (idx
= 0; VEC_iterate (bfun_s
, *gaps
, idx
, gap
); ++idx
)
917 struct btrace_function
*lhs
, *rhs
;
920 /* We may have a sequence of gaps if we run from one error into
921 the next as we try to re-sync onto the trace stream. Ignore
922 all but the leftmost gap in such a sequence.
924 Also ignore gaps at the beginning of the trace. */
925 lhs
= gap
->flow
.prev
;
926 if (lhs
== NULL
|| lhs
->errcode
!= 0)
929 /* Skip gaps to the right. */
930 for (rhs
= gap
->flow
.next
; rhs
!= NULL
; rhs
= rhs
->flow
.next
)
931 if (rhs
->errcode
== 0)
934 /* Ignore gaps at the end of the trace. */
938 bridged
= ftrace_bridge_gap (lhs
, rhs
, min_matches
);
940 /* Keep track of gaps we were not able to bridge and try again.
941 If we just pushed them to the end of GAPS we would risk an
942 infinite loop in case we simply cannot bridge a gap. */
944 VEC_safe_push (bfun_s
, remaining
, gap
);
947 /* Let's see if we made any progress. */
948 if (VEC_length (bfun_s
, remaining
) == VEC_length (bfun_s
, *gaps
))
951 VEC_free (bfun_s
, *gaps
);
957 /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
958 if (VEC_empty (bfun_s
, *gaps
))
961 VEC_free (bfun_s
, remaining
);
964 do_cleanups (old_chain
);
966 /* We may omit this in some cases. Not sure it is worth the extra
967 complication, though. */
968 ftrace_compute_global_level_offset (&tp
->btrace
);
971 /* Compute the function branch trace from BTS trace. */
974 btrace_compute_ftrace_bts (struct thread_info
*tp
,
975 const struct btrace_data_bts
*btrace
,
978 struct btrace_thread_info
*btinfo
;
979 struct btrace_function
*begin
, *end
;
980 struct gdbarch
*gdbarch
;
984 gdbarch
= target_gdbarch ();
985 btinfo
= &tp
->btrace
;
986 begin
= btinfo
->begin
;
988 level
= begin
!= NULL
? -btinfo
->level
: INT_MAX
;
989 blk
= VEC_length (btrace_block_s
, btrace
->blocks
);
993 btrace_block_s
*block
;
998 block
= VEC_index (btrace_block_s
, btrace
->blocks
, blk
);
1003 struct btrace_insn insn
;
1006 /* We should hit the end of the block. Warn if we went too far. */
1007 if (block
->end
< pc
)
1009 /* Indicate the gap in the trace. */
1010 end
= ftrace_new_gap (end
, BDE_BTS_OVERFLOW
);
1014 VEC_safe_push (bfun_s
, *gaps
, end
);
1016 warning (_("Recorded trace may be corrupted at instruction "
1017 "%u (pc = %s)."), end
->insn_offset
- 1,
1018 core_addr_to_string_nz (pc
));
1023 end
= ftrace_update_function (end
, pc
);
1027 /* Maintain the function level offset.
1028 For all but the last block, we do it here. */
1030 level
= std::min (level
, end
->level
);
1035 size
= gdb_insn_length (gdbarch
, pc
);
1037 CATCH (error
, RETURN_MASK_ERROR
)
1044 insn
.iclass
= ftrace_classify_insn (gdbarch
, pc
);
1047 ftrace_update_insns (end
, &insn
);
1049 /* We're done once we pushed the instruction at the end. */
1050 if (block
->end
== pc
)
1053 /* We can't continue if we fail to compute the size. */
1056 /* Indicate the gap in the trace. We just added INSN so we're
1057 not at the beginning. */
1058 end
= ftrace_new_gap (end
, BDE_BTS_INSN_SIZE
);
1060 VEC_safe_push (bfun_s
, *gaps
, end
);
1062 warning (_("Recorded trace may be incomplete at instruction %u "
1063 "(pc = %s)."), end
->insn_offset
- 1,
1064 core_addr_to_string_nz (pc
));
1071 /* Maintain the function level offset.
1072 For the last block, we do it here to not consider the last
1074 Since the last instruction corresponds to the current instruction
1075 and is not really part of the execution history, it shouldn't
1076 affect the level. */
1078 level
= std::min (level
, end
->level
);
1082 btinfo
->begin
= begin
;
1085 /* LEVEL is the minimal function level of all btrace function segments.
1086 Define the global level offset to -LEVEL so all function levels are
1087 normalized to start at zero. */
1088 btinfo
->level
= -level
;
1091 #if defined (HAVE_LIBIPT)
1093 static enum btrace_insn_class
1094 pt_reclassify_insn (enum pt_insn_class iclass
)
1099 return BTRACE_INSN_CALL
;
1102 return BTRACE_INSN_RETURN
;
1105 return BTRACE_INSN_JUMP
;
1108 return BTRACE_INSN_OTHER
;
1112 /* Return the btrace instruction flags for INSN. */
1114 static btrace_insn_flags
1115 pt_btrace_insn_flags (const struct pt_insn
&insn
)
1117 btrace_insn_flags flags
= 0;
1119 if (insn
.speculative
)
1120 flags
|= BTRACE_INSN_FLAG_SPECULATIVE
;
1125 /* Return the btrace instruction for INSN. */
1128 pt_btrace_insn (const struct pt_insn
&insn
)
1130 return {(CORE_ADDR
) insn
.ip
, (gdb_byte
) insn
.size
,
1131 pt_reclassify_insn (insn
.iclass
),
1132 pt_btrace_insn_flags (insn
)};
1136 /* Add function branch trace using DECODER. */
1139 ftrace_add_pt (struct pt_insn_decoder
*decoder
,
1140 struct btrace_function
**pbegin
,
1141 struct btrace_function
**pend
, int *plevel
,
1142 VEC (bfun_s
) **gaps
)
1144 struct btrace_function
*begin
, *end
, *upd
;
1152 struct pt_insn insn
;
1154 errcode
= pt_insn_sync_forward (decoder
);
1157 if (errcode
!= -pte_eos
)
1158 warning (_("Failed to synchronize onto the Intel Processor "
1159 "Trace stream: %s."), pt_errstr (pt_errcode (errcode
)));
1165 errcode
= pt_insn_next (decoder
, &insn
, sizeof(insn
));
1169 /* Look for gaps in the trace - unless we're at the beginning. */
1172 /* Tracing is disabled and re-enabled each time we enter the
1173 kernel. Most times, we continue from the same instruction we
1174 stopped before. This is indicated via the RESUMED instruction
1175 flag. The ENABLED instruction flag means that we continued
1176 from some other instruction. Indicate this as a trace gap. */
1179 *pend
= end
= ftrace_new_gap (end
, BDE_PT_DISABLED
);
1181 VEC_safe_push (bfun_s
, *gaps
, end
);
1183 pt_insn_get_offset (decoder
, &offset
);
1185 warning (_("Non-contiguous trace at instruction %u (offset "
1186 "= 0x%" PRIx64
", pc = 0x%" PRIx64
")."),
1187 end
->insn_offset
- 1, offset
, insn
.ip
);
1191 /* Indicate trace overflows. */
1194 *pend
= end
= ftrace_new_gap (end
, BDE_PT_OVERFLOW
);
1196 *pbegin
= begin
= end
;
1198 VEC_safe_push (bfun_s
, *gaps
, end
);
1200 pt_insn_get_offset (decoder
, &offset
);
1202 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
1203 ", pc = 0x%" PRIx64
")."), end
->insn_offset
- 1,
1207 upd
= ftrace_update_function (end
, insn
.ip
);
1213 *pbegin
= begin
= upd
;
1216 /* Maintain the function level offset. */
1217 *plevel
= std::min (*plevel
, end
->level
);
1219 btrace_insn btinsn
= pt_btrace_insn (insn
);
1220 ftrace_update_insns (end
, &btinsn
);
1223 if (errcode
== -pte_eos
)
1226 /* Indicate the gap in the trace. */
1227 *pend
= end
= ftrace_new_gap (end
, errcode
);
1229 *pbegin
= begin
= end
;
1231 VEC_safe_push (bfun_s
, *gaps
, end
);
1233 pt_insn_get_offset (decoder
, &offset
);
1235 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
1236 ", pc = 0x%" PRIx64
"): %s."), errcode
, end
->insn_offset
- 1,
1237 offset
, insn
.ip
, pt_errstr (pt_errcode (errcode
)));
1241 /* A callback function to allow the trace decoder to read the inferior's
1245 btrace_pt_readmem_callback (gdb_byte
*buffer
, size_t size
,
1246 const struct pt_asid
*asid
, uint64_t pc
,
1249 int result
, errcode
;
1251 result
= (int) size
;
1254 errcode
= target_read_code ((CORE_ADDR
) pc
, buffer
, size
);
1256 result
= -pte_nomap
;
1258 CATCH (error
, RETURN_MASK_ERROR
)
1260 result
= -pte_nomap
;
1267 /* Translate the vendor from one enum to another. */
1269 static enum pt_cpu_vendor
1270 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor
)
1282 /* Finalize the function branch trace after decode. */
1284 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder
*decoder
,
1285 struct thread_info
*tp
, int level
)
1287 pt_insn_free_decoder (decoder
);
1289 /* LEVEL is the minimal function level of all btrace function segments.
1290 Define the global level offset to -LEVEL so all function levels are
1291 normalized to start at zero. */
1292 tp
->btrace
.level
= -level
;
1294 /* Add a single last instruction entry for the current PC.
1295 This allows us to compute the backtrace at the current PC using both
1296 standard unwind and btrace unwind.
1297 This extra entry is ignored by all record commands. */
1301 /* Compute the function branch trace from Intel Processor Trace
1305 btrace_compute_ftrace_pt (struct thread_info
*tp
,
1306 const struct btrace_data_pt
*btrace
,
1307 VEC (bfun_s
) **gaps
)
1309 struct btrace_thread_info
*btinfo
;
1310 struct pt_insn_decoder
*decoder
;
1311 struct pt_config config
;
1314 if (btrace
->size
== 0)
1317 btinfo
= &tp
->btrace
;
1318 level
= btinfo
->begin
!= NULL
? -btinfo
->level
: INT_MAX
;
1320 pt_config_init(&config
);
1321 config
.begin
= btrace
->data
;
1322 config
.end
= btrace
->data
+ btrace
->size
;
1324 config
.cpu
.vendor
= pt_translate_cpu_vendor (btrace
->config
.cpu
.vendor
);
1325 config
.cpu
.family
= btrace
->config
.cpu
.family
;
1326 config
.cpu
.model
= btrace
->config
.cpu
.model
;
1327 config
.cpu
.stepping
= btrace
->config
.cpu
.stepping
;
1329 errcode
= pt_cpu_errata (&config
.errata
, &config
.cpu
);
1331 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
1332 pt_errstr (pt_errcode (errcode
)));
1334 decoder
= pt_insn_alloc_decoder (&config
);
1335 if (decoder
== NULL
)
1336 error (_("Failed to allocate the Intel Processor Trace decoder."));
1340 struct pt_image
*image
;
1342 image
= pt_insn_get_image(decoder
);
1344 error (_("Failed to configure the Intel Processor Trace decoder."));
1346 errcode
= pt_image_set_callback(image
, btrace_pt_readmem_callback
, NULL
);
1348 error (_("Failed to configure the Intel Processor Trace decoder: "
1349 "%s."), pt_errstr (pt_errcode (errcode
)));
1351 ftrace_add_pt (decoder
, &btinfo
->begin
, &btinfo
->end
, &level
, gaps
);
1353 CATCH (error
, RETURN_MASK_ALL
)
1355 /* Indicate a gap in the trace if we quit trace processing. */
1356 if (error
.reason
== RETURN_QUIT
&& btinfo
->end
!= NULL
)
1358 btinfo
->end
= ftrace_new_gap (btinfo
->end
, BDE_PT_USER_QUIT
);
1360 VEC_safe_push (bfun_s
, *gaps
, btinfo
->end
);
1363 btrace_finalize_ftrace_pt (decoder
, tp
, level
);
1365 throw_exception (error
);
1369 btrace_finalize_ftrace_pt (decoder
, tp
, level
);
1372 #else /* defined (HAVE_LIBIPT) */
1375 btrace_compute_ftrace_pt (struct thread_info
*tp
,
1376 const struct btrace_data_pt
*btrace
,
1377 VEC (bfun_s
) **gaps
)
1379 internal_error (__FILE__
, __LINE__
, _("Unexpected branch trace format."));
1382 #endif /* defined (HAVE_LIBIPT) */
1384 /* Compute the function branch trace from a block branch trace BTRACE for
1385 a thread given by BTINFO. */
1388 btrace_compute_ftrace_1 (struct thread_info
*tp
, struct btrace_data
*btrace
,
1389 VEC (bfun_s
) **gaps
)
1391 DEBUG ("compute ftrace");
1393 switch (btrace
->format
)
1395 case BTRACE_FORMAT_NONE
:
1398 case BTRACE_FORMAT_BTS
:
1399 btrace_compute_ftrace_bts (tp
, &btrace
->variant
.bts
, gaps
);
1402 case BTRACE_FORMAT_PT
:
1403 btrace_compute_ftrace_pt (tp
, &btrace
->variant
.pt
, gaps
);
1407 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
1411 btrace_finalize_ftrace (struct thread_info
*tp
, VEC (bfun_s
) **gaps
)
1413 if (!VEC_empty (bfun_s
, *gaps
))
1415 tp
->btrace
.ngaps
+= VEC_length (bfun_s
, *gaps
);
1416 btrace_bridge_gaps (tp
, gaps
);
1421 btrace_compute_ftrace (struct thread_info
*tp
, struct btrace_data
*btrace
)
1424 struct cleanup
*old_chain
;
1427 old_chain
= make_cleanup (VEC_cleanup (bfun_s
), &gaps
);
1431 btrace_compute_ftrace_1 (tp
, btrace
, &gaps
);
1433 CATCH (error
, RETURN_MASK_ALL
)
1435 btrace_finalize_ftrace (tp
, &gaps
);
1437 throw_exception (error
);
1441 btrace_finalize_ftrace (tp
, &gaps
);
1443 do_cleanups (old_chain
);
1446 /* Add an entry for the current PC. */
1449 btrace_add_pc (struct thread_info
*tp
)
1451 struct btrace_data btrace
;
1452 struct btrace_block
*block
;
1453 struct regcache
*regcache
;
1454 struct cleanup
*cleanup
;
1457 regcache
= get_thread_regcache (tp
->ptid
);
1458 pc
= regcache_read_pc (regcache
);
1460 btrace_data_init (&btrace
);
1461 btrace
.format
= BTRACE_FORMAT_BTS
;
1462 btrace
.variant
.bts
.blocks
= NULL
;
1464 cleanup
= make_cleanup_btrace_data (&btrace
);
1466 block
= VEC_safe_push (btrace_block_s
, btrace
.variant
.bts
.blocks
, NULL
);
1470 btrace_compute_ftrace (tp
, &btrace
);
1472 do_cleanups (cleanup
);
1478 btrace_enable (struct thread_info
*tp
, const struct btrace_config
*conf
)
1480 if (tp
->btrace
.target
!= NULL
)
1483 #if !defined (HAVE_LIBIPT)
1484 if (conf
->format
== BTRACE_FORMAT_PT
)
1485 error (_("GDB does not support Intel Processor Trace."));
1486 #endif /* !defined (HAVE_LIBIPT) */
1488 if (!target_supports_btrace (conf
->format
))
1489 error (_("Target does not support branch tracing."));
1491 DEBUG ("enable thread %s (%s)", print_thread_id (tp
),
1492 target_pid_to_str (tp
->ptid
));
1494 tp
->btrace
.target
= target_enable_btrace (tp
->ptid
, conf
);
1496 /* We're done if we failed to enable tracing. */
1497 if (tp
->btrace
.target
== NULL
)
1500 /* We need to undo the enable in case of errors. */
1503 /* Add an entry for the current PC so we start tracing from where we
1506 If we can't access TP's registers, TP is most likely running. In this
1507 case, we can't really say where tracing was enabled so it should be
1508 safe to simply skip this step.
1510 This is not relevant for BTRACE_FORMAT_PT since the trace will already
1511 start at the PC at which tracing was enabled. */
1512 if (conf
->format
!= BTRACE_FORMAT_PT
1513 && can_access_registers_ptid (tp
->ptid
))
1516 CATCH (exception
, RETURN_MASK_ALL
)
1518 btrace_disable (tp
);
1520 throw_exception (exception
);
1527 const struct btrace_config
*
1528 btrace_conf (const struct btrace_thread_info
*btinfo
)
1530 if (btinfo
->target
== NULL
)
1533 return target_btrace_conf (btinfo
->target
);
1539 btrace_disable (struct thread_info
*tp
)
1541 struct btrace_thread_info
*btp
= &tp
->btrace
;
1544 if (btp
->target
== NULL
)
1547 DEBUG ("disable thread %s (%s)", print_thread_id (tp
),
1548 target_pid_to_str (tp
->ptid
));
1550 target_disable_btrace (btp
->target
);
1559 btrace_teardown (struct thread_info
*tp
)
1561 struct btrace_thread_info
*btp
= &tp
->btrace
;
1564 if (btp
->target
== NULL
)
1567 DEBUG ("teardown thread %s (%s)", print_thread_id (tp
),
1568 target_pid_to_str (tp
->ptid
));
1570 target_teardown_btrace (btp
->target
);
1576 /* Stitch branch trace in BTS format. */
1579 btrace_stitch_bts (struct btrace_data_bts
*btrace
, struct thread_info
*tp
)
1581 struct btrace_thread_info
*btinfo
;
1582 struct btrace_function
*last_bfun
;
1583 struct btrace_insn
*last_insn
;
1584 btrace_block_s
*first_new_block
;
1586 btinfo
= &tp
->btrace
;
1587 last_bfun
= btinfo
->end
;
1588 gdb_assert (last_bfun
!= NULL
);
1589 gdb_assert (!VEC_empty (btrace_block_s
, btrace
->blocks
));
1591 /* If the existing trace ends with a gap, we just glue the traces
1592 together. We need to drop the last (i.e. chronologically first) block
1593 of the new trace, though, since we can't fill in the start address.*/
1594 if (VEC_empty (btrace_insn_s
, last_bfun
->insn
))
1596 VEC_pop (btrace_block_s
, btrace
->blocks
);
1600 /* Beware that block trace starts with the most recent block, so the
1601 chronologically first block in the new trace is the last block in
1602 the new trace's block vector. */
1603 first_new_block
= VEC_last (btrace_block_s
, btrace
->blocks
);
1604 last_insn
= VEC_last (btrace_insn_s
, last_bfun
->insn
);
1606 /* If the current PC at the end of the block is the same as in our current
1607 trace, there are two explanations:
1608 1. we executed the instruction and some branch brought us back.
1609 2. we have not made any progress.
1610 In the first case, the delta trace vector should contain at least two
1612 In the second case, the delta trace vector should contain exactly one
1613 entry for the partial block containing the current PC. Remove it. */
1614 if (first_new_block
->end
== last_insn
->pc
1615 && VEC_length (btrace_block_s
, btrace
->blocks
) == 1)
1617 VEC_pop (btrace_block_s
, btrace
->blocks
);
1621 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn
),
1622 core_addr_to_string_nz (first_new_block
->end
));
1624 /* Do a simple sanity check to make sure we don't accidentally end up
1625 with a bad block. This should not occur in practice. */
1626 if (first_new_block
->end
< last_insn
->pc
)
1628 warning (_("Error while trying to read delta trace. Falling back to "
1633 /* We adjust the last block to start at the end of our current trace. */
1634 gdb_assert (first_new_block
->begin
== 0);
1635 first_new_block
->begin
= last_insn
->pc
;
1637 /* We simply pop the last insn so we can insert it again as part of
1638 the normal branch trace computation.
1639 Since instruction iterators are based on indices in the instructions
1640 vector, we don't leave any pointers dangling. */
1641 DEBUG ("pruning insn at %s for stitching",
1642 ftrace_print_insn_addr (last_insn
));
1644 VEC_pop (btrace_insn_s
, last_bfun
->insn
);
1646 /* The instructions vector may become empty temporarily if this has
1647 been the only instruction in this function segment.
1648 This violates the invariant but will be remedied shortly by
1649 btrace_compute_ftrace when we add the new trace. */
1651 /* The only case where this would hurt is if the entire trace consisted
1652 of just that one instruction. If we remove it, we might turn the now
1653 empty btrace function segment into a gap. But we don't want gaps at
1654 the beginning. To avoid this, we remove the entire old trace. */
1655 if (last_bfun
== btinfo
->begin
&& VEC_empty (btrace_insn_s
, last_bfun
->insn
))
1661 /* Adjust the block trace in order to stitch old and new trace together.
1662 BTRACE is the new delta trace between the last and the current stop.
1663 TP is the traced thread.
1664 May modifx BTRACE as well as the existing trace in TP.
1665 Return 0 on success, -1 otherwise. */
1668 btrace_stitch_trace (struct btrace_data
*btrace
, struct thread_info
*tp
)
1670 /* If we don't have trace, there's nothing to do. */
1671 if (btrace_data_empty (btrace
))
1674 switch (btrace
->format
)
1676 case BTRACE_FORMAT_NONE
:
1679 case BTRACE_FORMAT_BTS
:
1680 return btrace_stitch_bts (&btrace
->variant
.bts
, tp
);
1682 case BTRACE_FORMAT_PT
:
1683 /* Delta reads are not supported. */
1687 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
1690 /* Clear the branch trace histories in BTINFO. */
1693 btrace_clear_history (struct btrace_thread_info
*btinfo
)
1695 xfree (btinfo
->insn_history
);
1696 xfree (btinfo
->call_history
);
1697 xfree (btinfo
->replay
);
1699 btinfo
->insn_history
= NULL
;
1700 btinfo
->call_history
= NULL
;
1701 btinfo
->replay
= NULL
;
1704 /* Clear the branch trace maintenance histories in BTINFO. */
1707 btrace_maint_clear (struct btrace_thread_info
*btinfo
)
1709 switch (btinfo
->data
.format
)
1714 case BTRACE_FORMAT_BTS
:
1715 btinfo
->maint
.variant
.bts
.packet_history
.begin
= 0;
1716 btinfo
->maint
.variant
.bts
.packet_history
.end
= 0;
1719 #if defined (HAVE_LIBIPT)
1720 case BTRACE_FORMAT_PT
:
1721 xfree (btinfo
->maint
.variant
.pt
.packets
);
1723 btinfo
->maint
.variant
.pt
.packets
= NULL
;
1724 btinfo
->maint
.variant
.pt
.packet_history
.begin
= 0;
1725 btinfo
->maint
.variant
.pt
.packet_history
.end
= 0;
1727 #endif /* defined (HAVE_LIBIPT) */
1734 btrace_decode_error (enum btrace_format format
, int errcode
)
1738 case BTRACE_FORMAT_BTS
:
1741 case BDE_BTS_OVERFLOW
:
1742 return _("instruction overflow");
1744 case BDE_BTS_INSN_SIZE
:
1745 return _("unknown instruction");
1752 #if defined (HAVE_LIBIPT)
1753 case BTRACE_FORMAT_PT
:
1756 case BDE_PT_USER_QUIT
:
1757 return _("trace decode cancelled");
1759 case BDE_PT_DISABLED
:
1760 return _("disabled");
1762 case BDE_PT_OVERFLOW
:
1763 return _("overflow");
1767 return pt_errstr (pt_errcode (errcode
));
1771 #endif /* defined (HAVE_LIBIPT) */
1777 return _("unknown");
1783 btrace_fetch (struct thread_info
*tp
)
1785 struct btrace_thread_info
*btinfo
;
1786 struct btrace_target_info
*tinfo
;
1787 struct btrace_data btrace
;
1788 struct cleanup
*cleanup
;
1791 DEBUG ("fetch thread %s (%s)", print_thread_id (tp
),
1792 target_pid_to_str (tp
->ptid
));
1794 btinfo
= &tp
->btrace
;
1795 tinfo
= btinfo
->target
;
1799 /* There's no way we could get new trace while replaying.
1800 On the other hand, delta trace would return a partial record with the
1801 current PC, which is the replay PC, not the last PC, as expected. */
1802 if (btinfo
->replay
!= NULL
)
1805 /* We should not be called on running or exited threads. */
1806 gdb_assert (can_access_registers_ptid (tp
->ptid
));
1808 btrace_data_init (&btrace
);
1809 cleanup
= make_cleanup_btrace_data (&btrace
);
1811 /* Let's first try to extend the trace we already have. */
1812 if (btinfo
->end
!= NULL
)
1814 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_DELTA
);
1817 /* Success. Let's try to stitch the traces together. */
1818 errcode
= btrace_stitch_trace (&btrace
, tp
);
1822 /* We failed to read delta trace. Let's try to read new trace. */
1823 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_NEW
);
1825 /* If we got any new trace, discard what we have. */
1826 if (errcode
== 0 && !btrace_data_empty (&btrace
))
1830 /* If we were not able to read the trace, we start over. */
1834 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_ALL
);
1838 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_ALL
);
1840 /* If we were not able to read the branch trace, signal an error. */
1842 error (_("Failed to read branch trace."));
1844 /* Compute the trace, provided we have any. */
1845 if (!btrace_data_empty (&btrace
))
1847 struct btrace_function
*bfun
;
1849 /* Store the raw trace data. The stored data will be cleared in
1850 btrace_clear, so we always append the new trace. */
1851 btrace_data_append (&btinfo
->data
, &btrace
);
1852 btrace_maint_clear (btinfo
);
1854 VEC_truncate (btrace_fun_p
, btinfo
->functions
, 0);
1855 btrace_clear_history (btinfo
);
1856 btrace_compute_ftrace (tp
, &btrace
);
1858 for (bfun
= btinfo
->begin
; bfun
!= NULL
; bfun
= bfun
->flow
.next
)
1859 VEC_safe_push (btrace_fun_p
, btinfo
->functions
, bfun
);
1862 do_cleanups (cleanup
);
1868 btrace_clear (struct thread_info
*tp
)
1870 struct btrace_thread_info
*btinfo
;
1871 struct btrace_function
*it
, *trash
;
1873 DEBUG ("clear thread %s (%s)", print_thread_id (tp
),
1874 target_pid_to_str (tp
->ptid
));
1876 /* Make sure btrace frames that may hold a pointer into the branch
1877 trace data are destroyed. */
1878 reinit_frame_cache ();
1880 btinfo
= &tp
->btrace
;
1882 VEC_free (btrace_fun_p
, btinfo
->functions
);
1893 btinfo
->begin
= NULL
;
1897 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1898 btrace_maint_clear (btinfo
);
1899 btrace_data_clear (&btinfo
->data
);
1900 btrace_clear_history (btinfo
);
1906 btrace_free_objfile (struct objfile
*objfile
)
1908 struct thread_info
*tp
;
1910 DEBUG ("free objfile");
1912 ALL_NON_EXITED_THREADS (tp
)
1916 #if defined (HAVE_LIBEXPAT)
1918 /* Check the btrace document version. */
1921 check_xml_btrace_version (struct gdb_xml_parser
*parser
,
1922 const struct gdb_xml_element
*element
,
1923 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
1926 = (const char *) xml_find_attribute (attributes
, "version")->value
;
1928 if (strcmp (version
, "1.0") != 0)
1929 gdb_xml_error (parser
, _("Unsupported btrace version: \"%s\""), version
);
1932 /* Parse a btrace "block" xml record. */
1935 parse_xml_btrace_block (struct gdb_xml_parser
*parser
,
1936 const struct gdb_xml_element
*element
,
1937 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
1939 struct btrace_data
*btrace
;
1940 struct btrace_block
*block
;
1941 ULONGEST
*begin
, *end
;
1943 btrace
= (struct btrace_data
*) user_data
;
1945 switch (btrace
->format
)
1947 case BTRACE_FORMAT_BTS
:
1950 case BTRACE_FORMAT_NONE
:
1951 btrace
->format
= BTRACE_FORMAT_BTS
;
1952 btrace
->variant
.bts
.blocks
= NULL
;
1956 gdb_xml_error (parser
, _("Btrace format error."));
1959 begin
= (ULONGEST
*) xml_find_attribute (attributes
, "begin")->value
;
1960 end
= (ULONGEST
*) xml_find_attribute (attributes
, "end")->value
;
1962 block
= VEC_safe_push (btrace_block_s
, btrace
->variant
.bts
.blocks
, NULL
);
1963 block
->begin
= *begin
;
1967 /* Parse a "raw" xml record. */
1970 parse_xml_raw (struct gdb_xml_parser
*parser
, const char *body_text
,
1971 gdb_byte
**pdata
, size_t *psize
)
1973 struct cleanup
*cleanup
;
1974 gdb_byte
*data
, *bin
;
1977 len
= strlen (body_text
);
1979 gdb_xml_error (parser
, _("Bad raw data size."));
1983 bin
= data
= (gdb_byte
*) xmalloc (size
);
1984 cleanup
= make_cleanup (xfree
, data
);
1986 /* We use hex encoding - see common/rsp-low.h. */
1994 if (hi
== 0 || lo
== 0)
1995 gdb_xml_error (parser
, _("Bad hex encoding."));
1997 *bin
++ = fromhex (hi
) * 16 + fromhex (lo
);
2001 discard_cleanups (cleanup
);
2007 /* Parse a btrace pt-config "cpu" xml record. */
2010 parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser
*parser
,
2011 const struct gdb_xml_element
*element
,
2013 VEC (gdb_xml_value_s
) *attributes
)
2015 struct btrace_data
*btrace
;
2017 ULONGEST
*family
, *model
, *stepping
;
2019 vendor
= (const char *) xml_find_attribute (attributes
, "vendor")->value
;
2020 family
= (ULONGEST
*) xml_find_attribute (attributes
, "family")->value
;
2021 model
= (ULONGEST
*) xml_find_attribute (attributes
, "model")->value
;
2022 stepping
= (ULONGEST
*) xml_find_attribute (attributes
, "stepping")->value
;
2024 btrace
= (struct btrace_data
*) user_data
;
2026 if (strcmp (vendor
, "GenuineIntel") == 0)
2027 btrace
->variant
.pt
.config
.cpu
.vendor
= CV_INTEL
;
2029 btrace
->variant
.pt
.config
.cpu
.family
= *family
;
2030 btrace
->variant
.pt
.config
.cpu
.model
= *model
;
2031 btrace
->variant
.pt
.config
.cpu
.stepping
= *stepping
;
2034 /* Parse a btrace pt "raw" xml record. */
2037 parse_xml_btrace_pt_raw (struct gdb_xml_parser
*parser
,
2038 const struct gdb_xml_element
*element
,
2039 void *user_data
, const char *body_text
)
2041 struct btrace_data
*btrace
;
2043 btrace
= (struct btrace_data
*) user_data
;
2044 parse_xml_raw (parser
, body_text
, &btrace
->variant
.pt
.data
,
2045 &btrace
->variant
.pt
.size
);
2048 /* Parse a btrace "pt" xml record. */
2051 parse_xml_btrace_pt (struct gdb_xml_parser
*parser
,
2052 const struct gdb_xml_element
*element
,
2053 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
2055 struct btrace_data
*btrace
;
2057 btrace
= (struct btrace_data
*) user_data
;
2058 btrace
->format
= BTRACE_FORMAT_PT
;
2059 btrace
->variant
.pt
.config
.cpu
.vendor
= CV_UNKNOWN
;
2060 btrace
->variant
.pt
.data
= NULL
;
2061 btrace
->variant
.pt
.size
= 0;
2064 static const struct gdb_xml_attribute block_attributes
[] = {
2065 { "begin", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
2066 { "end", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
2067 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2070 static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes
[] = {
2071 { "vendor", GDB_XML_AF_NONE
, NULL
, NULL
},
2072 { "family", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
2073 { "model", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
2074 { "stepping", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
2075 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2078 static const struct gdb_xml_element btrace_pt_config_children
[] = {
2079 { "cpu", btrace_pt_config_cpu_attributes
, NULL
, GDB_XML_EF_OPTIONAL
,
2080 parse_xml_btrace_pt_config_cpu
, NULL
},
2081 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2084 static const struct gdb_xml_element btrace_pt_children
[] = {
2085 { "pt-config", NULL
, btrace_pt_config_children
, GDB_XML_EF_OPTIONAL
, NULL
,
2087 { "raw", NULL
, NULL
, GDB_XML_EF_OPTIONAL
, NULL
, parse_xml_btrace_pt_raw
},
2088 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2091 static const struct gdb_xml_attribute btrace_attributes
[] = {
2092 { "version", GDB_XML_AF_NONE
, NULL
, NULL
},
2093 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2096 static const struct gdb_xml_element btrace_children
[] = {
2097 { "block", block_attributes
, NULL
,
2098 GDB_XML_EF_REPEATABLE
| GDB_XML_EF_OPTIONAL
, parse_xml_btrace_block
, NULL
},
2099 { "pt", NULL
, btrace_pt_children
, GDB_XML_EF_OPTIONAL
, parse_xml_btrace_pt
,
2101 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2104 static const struct gdb_xml_element btrace_elements
[] = {
2105 { "btrace", btrace_attributes
, btrace_children
, GDB_XML_EF_NONE
,
2106 check_xml_btrace_version
, NULL
},
2107 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2110 #endif /* defined (HAVE_LIBEXPAT) */
2115 parse_xml_btrace (struct btrace_data
*btrace
, const char *buffer
)
2117 struct cleanup
*cleanup
;
2120 #if defined (HAVE_LIBEXPAT)
2122 btrace
->format
= BTRACE_FORMAT_NONE
;
2124 cleanup
= make_cleanup_btrace_data (btrace
);
2125 errcode
= gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements
,
2128 error (_("Error parsing branch trace."));
2130 /* Keep parse results. */
2131 discard_cleanups (cleanup
);
2133 #else /* !defined (HAVE_LIBEXPAT) */
2135 error (_("Cannot process branch trace. XML parsing is not supported."));
2137 #endif /* !defined (HAVE_LIBEXPAT) */
2140 #if defined (HAVE_LIBEXPAT)
2142 /* Parse a btrace-conf "bts" xml record. */
2145 parse_xml_btrace_conf_bts (struct gdb_xml_parser
*parser
,
2146 const struct gdb_xml_element
*element
,
2147 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
2149 struct btrace_config
*conf
;
2150 struct gdb_xml_value
*size
;
2152 conf
= (struct btrace_config
*) user_data
;
2153 conf
->format
= BTRACE_FORMAT_BTS
;
2156 size
= xml_find_attribute (attributes
, "size");
2158 conf
->bts
.size
= (unsigned int) *(ULONGEST
*) size
->value
;
2161 /* Parse a btrace-conf "pt" xml record. */
2164 parse_xml_btrace_conf_pt (struct gdb_xml_parser
*parser
,
2165 const struct gdb_xml_element
*element
,
2166 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
2168 struct btrace_config
*conf
;
2169 struct gdb_xml_value
*size
;
2171 conf
= (struct btrace_config
*) user_data
;
2172 conf
->format
= BTRACE_FORMAT_PT
;
2175 size
= xml_find_attribute (attributes
, "size");
2177 conf
->pt
.size
= (unsigned int) *(ULONGEST
*) size
->value
;
2180 static const struct gdb_xml_attribute btrace_conf_pt_attributes
[] = {
2181 { "size", GDB_XML_AF_OPTIONAL
, gdb_xml_parse_attr_ulongest
, NULL
},
2182 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2185 static const struct gdb_xml_attribute btrace_conf_bts_attributes
[] = {
2186 { "size", GDB_XML_AF_OPTIONAL
, gdb_xml_parse_attr_ulongest
, NULL
},
2187 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2190 static const struct gdb_xml_element btrace_conf_children
[] = {
2191 { "bts", btrace_conf_bts_attributes
, NULL
, GDB_XML_EF_OPTIONAL
,
2192 parse_xml_btrace_conf_bts
, NULL
},
2193 { "pt", btrace_conf_pt_attributes
, NULL
, GDB_XML_EF_OPTIONAL
,
2194 parse_xml_btrace_conf_pt
, NULL
},
2195 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2198 static const struct gdb_xml_attribute btrace_conf_attributes
[] = {
2199 { "version", GDB_XML_AF_NONE
, NULL
, NULL
},
2200 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2203 static const struct gdb_xml_element btrace_conf_elements
[] = {
2204 { "btrace-conf", btrace_conf_attributes
, btrace_conf_children
,
2205 GDB_XML_EF_NONE
, NULL
, NULL
},
2206 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2209 #endif /* defined (HAVE_LIBEXPAT) */
2214 parse_xml_btrace_conf (struct btrace_config
*conf
, const char *xml
)
2218 #if defined (HAVE_LIBEXPAT)
2220 errcode
= gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
2221 btrace_conf_elements
, xml
, conf
);
2223 error (_("Error parsing branch trace configuration."));
2225 #else /* !defined (HAVE_LIBEXPAT) */
2227 error (_("XML parsing is not supported."));
2229 #endif /* !defined (HAVE_LIBEXPAT) */
2234 const struct btrace_insn
*
2235 btrace_insn_get (const struct btrace_insn_iterator
*it
)
2237 const struct btrace_function
*bfun
;
2238 unsigned int index
, end
;
2241 bfun
= it
->function
;
2243 /* Check if the iterator points to a gap in the trace. */
2244 if (bfun
->errcode
!= 0)
2247 /* The index is within the bounds of this function's instruction vector. */
2248 end
= VEC_length (btrace_insn_s
, bfun
->insn
);
2249 gdb_assert (0 < end
);
2250 gdb_assert (index
< end
);
2252 return VEC_index (btrace_insn_s
, bfun
->insn
, index
);
2258 btrace_insn_get_error (const struct btrace_insn_iterator
*it
)
2260 return it
->function
->errcode
;
2266 btrace_insn_number (const struct btrace_insn_iterator
*it
)
2268 return it
->function
->insn_offset
+ it
->index
;
2274 btrace_insn_begin (struct btrace_insn_iterator
*it
,
2275 const struct btrace_thread_info
*btinfo
)
2277 const struct btrace_function
*bfun
;
2279 bfun
= btinfo
->begin
;
2281 error (_("No trace."));
2283 it
->function
= bfun
;
2290 btrace_insn_end (struct btrace_insn_iterator
*it
,
2291 const struct btrace_thread_info
*btinfo
)
2293 const struct btrace_function
*bfun
;
2294 unsigned int length
;
2298 error (_("No trace."));
2300 length
= VEC_length (btrace_insn_s
, bfun
->insn
);
2302 /* The last function may either be a gap or it contains the current
2303 instruction, which is one past the end of the execution trace; ignore
2308 it
->function
= bfun
;
2315 btrace_insn_next (struct btrace_insn_iterator
*it
, unsigned int stride
)
2317 const struct btrace_function
*bfun
;
2318 unsigned int index
, steps
;
2320 bfun
= it
->function
;
2326 unsigned int end
, space
, adv
;
2328 end
= VEC_length (btrace_insn_s
, bfun
->insn
);
2330 /* An empty function segment represents a gap in the trace. We count
2331 it as one instruction. */
2334 const struct btrace_function
*next
;
2336 next
= bfun
->flow
.next
;
2349 gdb_assert (0 < end
);
2350 gdb_assert (index
< end
);
2352 /* Compute the number of instructions remaining in this segment. */
2353 space
= end
- index
;
2355 /* Advance the iterator as far as possible within this segment. */
2356 adv
= std::min (space
, stride
);
2361 /* Move to the next function if we're at the end of this one. */
2364 const struct btrace_function
*next
;
2366 next
= bfun
->flow
.next
;
2369 /* We stepped past the last function.
2371 Let's adjust the index to point to the last instruction in
2372 the previous function. */
2378 /* We now point to the first instruction in the new function. */
2383 /* We did make progress. */
2384 gdb_assert (adv
> 0);
2387 /* Update the iterator. */
2388 it
->function
= bfun
;
2397 btrace_insn_prev (struct btrace_insn_iterator
*it
, unsigned int stride
)
2399 const struct btrace_function
*bfun
;
2400 unsigned int index
, steps
;
2402 bfun
= it
->function
;
2410 /* Move to the previous function if we're at the start of this one. */
2413 const struct btrace_function
*prev
;
2415 prev
= bfun
->flow
.prev
;
2419 /* We point to one after the last instruction in the new function. */
2421 index
= VEC_length (btrace_insn_s
, bfun
->insn
);
2423 /* An empty function segment represents a gap in the trace. We count
2424 it as one instruction. */
2434 /* Advance the iterator as far as possible within this segment. */
2435 adv
= std::min (index
, stride
);
2441 /* We did make progress. */
2442 gdb_assert (adv
> 0);
2445 /* Update the iterator. */
2446 it
->function
= bfun
;
2455 btrace_insn_cmp (const struct btrace_insn_iterator
*lhs
,
2456 const struct btrace_insn_iterator
*rhs
)
2458 unsigned int lnum
, rnum
;
2460 lnum
= btrace_insn_number (lhs
);
2461 rnum
= btrace_insn_number (rhs
);
2463 return (int) (lnum
- rnum
);
2469 btrace_find_insn_by_number (struct btrace_insn_iterator
*it
,
2470 const struct btrace_thread_info
*btinfo
,
2471 unsigned int number
)
2473 const struct btrace_function
*bfun
;
2474 unsigned int upper
, lower
;
2476 if (VEC_empty (btrace_fun_p
, btinfo
->functions
))
2480 bfun
= VEC_index (btrace_fun_p
, btinfo
->functions
, lower
);
2481 if (number
< bfun
->insn_offset
)
2484 upper
= VEC_length (btrace_fun_p
, btinfo
->functions
) - 1;
2485 bfun
= VEC_index (btrace_fun_p
, btinfo
->functions
, upper
);
2486 if (number
>= bfun
->insn_offset
+ ftrace_call_num_insn (bfun
))
2489 /* We assume that there are no holes in the numbering. */
2492 const unsigned int average
= lower
+ (upper
- lower
) / 2;
2494 bfun
= VEC_index (btrace_fun_p
, btinfo
->functions
, average
);
2496 if (number
< bfun
->insn_offset
)
2498 upper
= average
- 1;
2502 if (number
>= bfun
->insn_offset
+ ftrace_call_num_insn (bfun
))
2504 lower
= average
+ 1;
2511 it
->function
= bfun
;
2512 it
->index
= number
- bfun
->insn_offset
;
2518 const struct btrace_function
*
2519 btrace_call_get (const struct btrace_call_iterator
*it
)
2521 return it
->function
;
2527 btrace_call_number (const struct btrace_call_iterator
*it
)
2529 const struct btrace_thread_info
*btinfo
;
2530 const struct btrace_function
*bfun
;
2533 btinfo
= it
->btinfo
;
2534 bfun
= it
->function
;
2536 return bfun
->number
;
2538 /* For the end iterator, i.e. bfun == NULL, we return one more than the
2539 number of the last function. */
2541 insns
= VEC_length (btrace_insn_s
, bfun
->insn
);
2543 /* If the function contains only a single instruction (i.e. the current
2544 instruction), it will be skipped and its number is already the number
2547 return bfun
->number
;
2549 /* Otherwise, return one more than the number of the last function. */
2550 return bfun
->number
+ 1;
2556 btrace_call_begin (struct btrace_call_iterator
*it
,
2557 const struct btrace_thread_info
*btinfo
)
2559 const struct btrace_function
*bfun
;
2561 bfun
= btinfo
->begin
;
2563 error (_("No trace."));
2565 it
->btinfo
= btinfo
;
2566 it
->function
= bfun
;
2572 btrace_call_end (struct btrace_call_iterator
*it
,
2573 const struct btrace_thread_info
*btinfo
)
2575 const struct btrace_function
*bfun
;
2579 error (_("No trace."));
2581 it
->btinfo
= btinfo
;
2582 it
->function
= NULL
;
2588 btrace_call_next (struct btrace_call_iterator
*it
, unsigned int stride
)
2590 const struct btrace_function
*bfun
;
2593 bfun
= it
->function
;
2595 while (bfun
!= NULL
)
2597 const struct btrace_function
*next
;
2600 next
= bfun
->flow
.next
;
2603 /* Ignore the last function if it only contains a single
2604 (i.e. the current) instruction. */
2605 insns
= VEC_length (btrace_insn_s
, bfun
->insn
);
2610 if (stride
== steps
)
2617 it
->function
= bfun
;
2624 btrace_call_prev (struct btrace_call_iterator
*it
, unsigned int stride
)
2626 const struct btrace_thread_info
*btinfo
;
2627 const struct btrace_function
*bfun
;
2630 bfun
= it
->function
;
2637 btinfo
= it
->btinfo
;
2642 /* Ignore the last function if it only contains a single
2643 (i.e. the current) instruction. */
2644 insns
= VEC_length (btrace_insn_s
, bfun
->insn
);
2646 bfun
= bfun
->flow
.prev
;
2654 while (steps
< stride
)
2656 const struct btrace_function
*prev
;
2658 prev
= bfun
->flow
.prev
;
2666 it
->function
= bfun
;
2673 btrace_call_cmp (const struct btrace_call_iterator
*lhs
,
2674 const struct btrace_call_iterator
*rhs
)
2676 unsigned int lnum
, rnum
;
2678 lnum
= btrace_call_number (lhs
);
2679 rnum
= btrace_call_number (rhs
);
2681 return (int) (lnum
- rnum
);
2687 btrace_find_call_by_number (struct btrace_call_iterator
*it
,
2688 const struct btrace_thread_info
*btinfo
,
2689 unsigned int number
)
2691 const struct btrace_function
*bfun
;
2693 for (bfun
= btinfo
->end
; bfun
!= NULL
; bfun
= bfun
->flow
.prev
)
2697 bnum
= bfun
->number
;
2700 it
->btinfo
= btinfo
;
2701 it
->function
= bfun
;
2705 /* Functions are ordered and numbered consecutively. We could bail out
2706 earlier. On the other hand, it is very unlikely that we search for
2707 a nonexistent function. */
2716 btrace_set_insn_history (struct btrace_thread_info
*btinfo
,
2717 const struct btrace_insn_iterator
*begin
,
2718 const struct btrace_insn_iterator
*end
)
2720 if (btinfo
->insn_history
== NULL
)
2721 btinfo
->insn_history
= XCNEW (struct btrace_insn_history
);
2723 btinfo
->insn_history
->begin
= *begin
;
2724 btinfo
->insn_history
->end
= *end
;
2730 btrace_set_call_history (struct btrace_thread_info
*btinfo
,
2731 const struct btrace_call_iterator
*begin
,
2732 const struct btrace_call_iterator
*end
)
2734 gdb_assert (begin
->btinfo
== end
->btinfo
);
2736 if (btinfo
->call_history
== NULL
)
2737 btinfo
->call_history
= XCNEW (struct btrace_call_history
);
2739 btinfo
->call_history
->begin
= *begin
;
2740 btinfo
->call_history
->end
= *end
;
2746 btrace_is_replaying (struct thread_info
*tp
)
2748 return tp
->btrace
.replay
!= NULL
;
2754 btrace_is_empty (struct thread_info
*tp
)
2756 struct btrace_insn_iterator begin
, end
;
2757 struct btrace_thread_info
*btinfo
;
2759 btinfo
= &tp
->btrace
;
2761 if (btinfo
->begin
== NULL
)
2764 btrace_insn_begin (&begin
, btinfo
);
2765 btrace_insn_end (&end
, btinfo
);
2767 return btrace_insn_cmp (&begin
, &end
) == 0;
2770 /* Forward the cleanup request. */
2773 do_btrace_data_cleanup (void *arg
)
2775 btrace_data_fini ((struct btrace_data
*) arg
);
2781 make_cleanup_btrace_data (struct btrace_data
*data
)
2783 return make_cleanup (do_btrace_data_cleanup
, data
);
2786 #if defined (HAVE_LIBIPT)
2788 /* Print a single packet. */
2791 pt_print_packet (const struct pt_packet
*packet
)
2793 switch (packet
->type
)
2796 printf_unfiltered (("[??: %x]"), packet
->type
);
2800 printf_unfiltered (("psb"));
2804 printf_unfiltered (("psbend"));
2808 printf_unfiltered (("pad"));
2812 printf_unfiltered (("tip %u: 0x%" PRIx64
""),
2813 packet
->payload
.ip
.ipc
,
2814 packet
->payload
.ip
.ip
);
2818 printf_unfiltered (("tip.pge %u: 0x%" PRIx64
""),
2819 packet
->payload
.ip
.ipc
,
2820 packet
->payload
.ip
.ip
);
2824 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64
""),
2825 packet
->payload
.ip
.ipc
,
2826 packet
->payload
.ip
.ip
);
2830 printf_unfiltered (("fup %u: 0x%" PRIx64
""),
2831 packet
->payload
.ip
.ipc
,
2832 packet
->payload
.ip
.ip
);
2836 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64
""),
2837 packet
->payload
.tnt
.bit_size
,
2838 packet
->payload
.tnt
.payload
);
2842 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64
""),
2843 packet
->payload
.tnt
.bit_size
,
2844 packet
->payload
.tnt
.payload
);
2848 printf_unfiltered (("pip %" PRIx64
"%s"), packet
->payload
.pip
.cr3
,
2849 packet
->payload
.pip
.nr
? (" nr") : (""));
2853 printf_unfiltered (("tsc %" PRIx64
""), packet
->payload
.tsc
.tsc
);
2857 printf_unfiltered (("cbr %u"), packet
->payload
.cbr
.ratio
);
2861 switch (packet
->payload
.mode
.leaf
)
2864 printf_unfiltered (("mode %u"), packet
->payload
.mode
.leaf
);
2868 printf_unfiltered (("mode.exec%s%s"),
2869 packet
->payload
.mode
.bits
.exec
.csl
2871 packet
->payload
.mode
.bits
.exec
.csd
2872 ? (" cs.d") : (""));
2876 printf_unfiltered (("mode.tsx%s%s"),
2877 packet
->payload
.mode
.bits
.tsx
.intx
2879 packet
->payload
.mode
.bits
.tsx
.abrt
2880 ? (" abrt") : (""));
2886 printf_unfiltered (("ovf"));
2890 printf_unfiltered (("stop"));
2894 printf_unfiltered (("vmcs %" PRIx64
""), packet
->payload
.vmcs
.base
);
2898 printf_unfiltered (("tma %x %x"), packet
->payload
.tma
.ctc
,
2899 packet
->payload
.tma
.fc
);
2903 printf_unfiltered (("mtc %x"), packet
->payload
.mtc
.ctc
);
2907 printf_unfiltered (("cyc %" PRIx64
""), packet
->payload
.cyc
.value
);
2911 printf_unfiltered (("mnt %" PRIx64
""), packet
->payload
.mnt
.payload
);
2916 /* Decode packets into MAINT using DECODER. */
2919 btrace_maint_decode_pt (struct btrace_maint_info
*maint
,
2920 struct pt_packet_decoder
*decoder
)
2926 struct btrace_pt_packet packet
;
2928 errcode
= pt_pkt_sync_forward (decoder
);
2934 pt_pkt_get_offset (decoder
, &packet
.offset
);
2936 errcode
= pt_pkt_next (decoder
, &packet
.packet
,
2937 sizeof(packet
.packet
));
2941 if (maint_btrace_pt_skip_pad
== 0 || packet
.packet
.type
!= ppt_pad
)
2943 packet
.errcode
= pt_errcode (errcode
);
2944 VEC_safe_push (btrace_pt_packet_s
, maint
->variant
.pt
.packets
,
2949 if (errcode
== -pte_eos
)
2952 packet
.errcode
= pt_errcode (errcode
);
2953 VEC_safe_push (btrace_pt_packet_s
, maint
->variant
.pt
.packets
,
2956 warning (_("Error at trace offset 0x%" PRIx64
": %s."),
2957 packet
.offset
, pt_errstr (packet
.errcode
));
2960 if (errcode
!= -pte_eos
)
2961 warning (_("Failed to synchronize onto the Intel Processor Trace "
2962 "stream: %s."), pt_errstr (pt_errcode (errcode
)));
2965 /* Update the packet history in BTINFO. */
2968 btrace_maint_update_pt_packets (struct btrace_thread_info
*btinfo
)
2970 volatile struct gdb_exception except
;
2971 struct pt_packet_decoder
*decoder
;
2972 struct btrace_data_pt
*pt
;
2973 struct pt_config config
;
2976 pt
= &btinfo
->data
.variant
.pt
;
2978 /* Nothing to do if there is no trace. */
2982 memset (&config
, 0, sizeof(config
));
2984 config
.size
= sizeof (config
);
2985 config
.begin
= pt
->data
;
2986 config
.end
= pt
->data
+ pt
->size
;
2988 config
.cpu
.vendor
= pt_translate_cpu_vendor (pt
->config
.cpu
.vendor
);
2989 config
.cpu
.family
= pt
->config
.cpu
.family
;
2990 config
.cpu
.model
= pt
->config
.cpu
.model
;
2991 config
.cpu
.stepping
= pt
->config
.cpu
.stepping
;
2993 errcode
= pt_cpu_errata (&config
.errata
, &config
.cpu
);
2995 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
2996 pt_errstr (pt_errcode (errcode
)));
2998 decoder
= pt_pkt_alloc_decoder (&config
);
2999 if (decoder
== NULL
)
3000 error (_("Failed to allocate the Intel Processor Trace decoder."));
3004 btrace_maint_decode_pt (&btinfo
->maint
, decoder
);
3006 CATCH (except
, RETURN_MASK_ALL
)
3008 pt_pkt_free_decoder (decoder
);
3010 if (except
.reason
< 0)
3011 throw_exception (except
);
3015 pt_pkt_free_decoder (decoder
);
3018 #endif /* !defined (HAVE_LIBIPT) */
3020 /* Update the packet maintenance information for BTINFO and store the
3021 low and high bounds into BEGIN and END, respectively.
3022 Store the current iterator state into FROM and TO. */
3025 btrace_maint_update_packets (struct btrace_thread_info
*btinfo
,
3026 unsigned int *begin
, unsigned int *end
,
3027 unsigned int *from
, unsigned int *to
)
3029 switch (btinfo
->data
.format
)
3038 case BTRACE_FORMAT_BTS
:
3039 /* Nothing to do - we operate directly on BTINFO->DATA. */
3041 *end
= VEC_length (btrace_block_s
, btinfo
->data
.variant
.bts
.blocks
);
3042 *from
= btinfo
->maint
.variant
.bts
.packet_history
.begin
;
3043 *to
= btinfo
->maint
.variant
.bts
.packet_history
.end
;
3046 #if defined (HAVE_LIBIPT)
3047 case BTRACE_FORMAT_PT
:
3048 if (VEC_empty (btrace_pt_packet_s
, btinfo
->maint
.variant
.pt
.packets
))
3049 btrace_maint_update_pt_packets (btinfo
);
3052 *end
= VEC_length (btrace_pt_packet_s
, btinfo
->maint
.variant
.pt
.packets
);
3053 *from
= btinfo
->maint
.variant
.pt
.packet_history
.begin
;
3054 *to
= btinfo
->maint
.variant
.pt
.packet_history
.end
;
3056 #endif /* defined (HAVE_LIBIPT) */
3060 /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
3061 update the current iterator position. */
3064 btrace_maint_print_packets (struct btrace_thread_info
*btinfo
,
3065 unsigned int begin
, unsigned int end
)
3067 switch (btinfo
->data
.format
)
3072 case BTRACE_FORMAT_BTS
:
3074 VEC (btrace_block_s
) *blocks
;
3077 blocks
= btinfo
->data
.variant
.bts
.blocks
;
3078 for (blk
= begin
; blk
< end
; ++blk
)
3080 const btrace_block_s
*block
;
3082 block
= VEC_index (btrace_block_s
, blocks
, blk
);
3084 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk
,
3085 core_addr_to_string_nz (block
->begin
),
3086 core_addr_to_string_nz (block
->end
));
3089 btinfo
->maint
.variant
.bts
.packet_history
.begin
= begin
;
3090 btinfo
->maint
.variant
.bts
.packet_history
.end
= end
;
3094 #if defined (HAVE_LIBIPT)
3095 case BTRACE_FORMAT_PT
:
3097 VEC (btrace_pt_packet_s
) *packets
;
3100 packets
= btinfo
->maint
.variant
.pt
.packets
;
3101 for (pkt
= begin
; pkt
< end
; ++pkt
)
3103 const struct btrace_pt_packet
*packet
;
3105 packet
= VEC_index (btrace_pt_packet_s
, packets
, pkt
);
3107 printf_unfiltered ("%u\t", pkt
);
3108 printf_unfiltered ("0x%" PRIx64
"\t", packet
->offset
);
3110 if (packet
->errcode
== pte_ok
)
3111 pt_print_packet (&packet
->packet
);
3113 printf_unfiltered ("[error: %s]", pt_errstr (packet
->errcode
));
3115 printf_unfiltered ("\n");
3118 btinfo
->maint
.variant
.pt
.packet_history
.begin
= begin
;
3119 btinfo
->maint
.variant
.pt
.packet_history
.end
= end
;
3122 #endif /* defined (HAVE_LIBIPT) */
3126 /* Read a number from an argument string. */
3129 get_uint (char **arg
)
3131 char *begin
, *end
, *pos
;
3132 unsigned long number
;
3135 pos
= skip_spaces (begin
);
3137 if (!isdigit (*pos
))
3138 error (_("Expected positive number, got: %s."), pos
);
3140 number
= strtoul (pos
, &end
, 10);
3141 if (number
> UINT_MAX
)
3142 error (_("Number too big."));
3144 *arg
+= (end
- begin
);
3146 return (unsigned int) number
;
3149 /* Read a context size from an argument string. */
3152 get_context_size (char **arg
)
3157 pos
= skip_spaces (*arg
);
3159 if (!isdigit (*pos
))
3160 error (_("Expected positive number, got: %s."), pos
);
3162 return strtol (pos
, arg
, 10);
3165 /* Complain about junk at the end of an argument string. */
3168 no_chunk (char *arg
)
3171 error (_("Junk after argument: %s."), arg
);
3174 /* The "maintenance btrace packet-history" command. */
3177 maint_btrace_packet_history_cmd (char *arg
, int from_tty
)
3179 struct btrace_thread_info
*btinfo
;
3180 struct thread_info
*tp
;
3181 unsigned int size
, begin
, end
, from
, to
;
3183 tp
= find_thread_ptid (inferior_ptid
);
3185 error (_("No thread."));
3188 btinfo
= &tp
->btrace
;
3190 btrace_maint_update_packets (btinfo
, &begin
, &end
, &from
, &to
);
3193 printf_unfiltered (_("No trace.\n"));
3197 if (arg
== NULL
|| *arg
== 0 || strcmp (arg
, "+") == 0)
3201 if (end
- from
< size
)
3205 else if (strcmp (arg
, "-") == 0)
3209 if (to
- begin
< size
)
3215 from
= get_uint (&arg
);
3217 error (_("'%u' is out of range."), from
);
3219 arg
= skip_spaces (arg
);
3222 arg
= skip_spaces (++arg
);
3227 size
= get_context_size (&arg
);
3231 if (end
- from
< size
)
3235 else if (*arg
== '-')
3238 size
= get_context_size (&arg
);
3242 /* Include the packet given as first argument. */
3246 if (to
- begin
< size
)
3252 to
= get_uint (&arg
);
3254 /* Include the packet at the second argument and silently
3255 truncate the range. */
3268 if (end
- from
< size
)
3276 btrace_maint_print_packets (btinfo
, from
, to
);
3279 /* The "maintenance btrace clear-packet-history" command. */
3282 maint_btrace_clear_packet_history_cmd (char *args
, int from_tty
)
3284 struct btrace_thread_info
*btinfo
;
3285 struct thread_info
*tp
;
3287 if (args
!= NULL
&& *args
!= 0)
3288 error (_("Invalid argument."));
3290 tp
= find_thread_ptid (inferior_ptid
);
3292 error (_("No thread."));
3294 btinfo
= &tp
->btrace
;
3296 /* Must clear the maint data before - it depends on BTINFO->DATA. */
3297 btrace_maint_clear (btinfo
);
3298 btrace_data_clear (&btinfo
->data
);
3301 /* The "maintenance btrace clear" command. */
3304 maint_btrace_clear_cmd (char *args
, int from_tty
)
3306 struct btrace_thread_info
*btinfo
;
3307 struct thread_info
*tp
;
3309 if (args
!= NULL
&& *args
!= 0)
3310 error (_("Invalid argument."));
3312 tp
= find_thread_ptid (inferior_ptid
);
3314 error (_("No thread."));
3319 /* The "maintenance btrace" command. */
3322 maint_btrace_cmd (char *args
, int from_tty
)
3324 help_list (maint_btrace_cmdlist
, "maintenance btrace ", all_commands
,
3328 /* The "maintenance set btrace" command. */
3331 maint_btrace_set_cmd (char *args
, int from_tty
)
3333 help_list (maint_btrace_set_cmdlist
, "maintenance set btrace ", all_commands
,
3337 /* The "maintenance show btrace" command. */
3340 maint_btrace_show_cmd (char *args
, int from_tty
)
3342 help_list (maint_btrace_show_cmdlist
, "maintenance show btrace ",
3343 all_commands
, gdb_stdout
);
3346 /* The "maintenance set btrace pt" command. */
3349 maint_btrace_pt_set_cmd (char *args
, int from_tty
)
3351 help_list (maint_btrace_pt_set_cmdlist
, "maintenance set btrace pt ",
3352 all_commands
, gdb_stdout
);
3355 /* The "maintenance show btrace pt" command. */
3358 maint_btrace_pt_show_cmd (char *args
, int from_tty
)
3360 help_list (maint_btrace_pt_show_cmdlist
, "maintenance show btrace pt ",
3361 all_commands
, gdb_stdout
);
3364 /* The "maintenance info btrace" command. */
3367 maint_info_btrace_cmd (char *args
, int from_tty
)
3369 struct btrace_thread_info
*btinfo
;
3370 struct thread_info
*tp
;
3371 const struct btrace_config
*conf
;
3373 if (args
!= NULL
&& *args
!= 0)
3374 error (_("Invalid argument."));
3376 tp
= find_thread_ptid (inferior_ptid
);
3378 error (_("No thread."));
3380 btinfo
= &tp
->btrace
;
3382 conf
= btrace_conf (btinfo
);
3384 error (_("No btrace configuration."));
3386 printf_unfiltered (_("Format: %s.\n"),
3387 btrace_format_string (conf
->format
));
3389 switch (conf
->format
)
3394 case BTRACE_FORMAT_BTS
:
3395 printf_unfiltered (_("Number of packets: %u.\n"),
3396 VEC_length (btrace_block_s
,
3397 btinfo
->data
.variant
.bts
.blocks
));
3400 #if defined (HAVE_LIBIPT)
3401 case BTRACE_FORMAT_PT
:
3403 struct pt_version version
;
3405 version
= pt_library_version ();
3406 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version
.major
,
3407 version
.minor
, version
.build
,
3408 version
.ext
!= NULL
? version
.ext
: "");
3410 btrace_maint_update_pt_packets (btinfo
);
3411 printf_unfiltered (_("Number of packets: %u.\n"),
3412 VEC_length (btrace_pt_packet_s
,
3413 btinfo
->maint
.variant
.pt
.packets
));
3416 #endif /* defined (HAVE_LIBIPT) */
3420 /* The "maint show btrace pt skip-pad" show value function. */
3423 show_maint_btrace_pt_skip_pad (struct ui_file
*file
, int from_tty
,
3424 struct cmd_list_element
*c
,
3427 fprintf_filtered (file
, _("Skip PAD packets is %s.\n"), value
);
3431 /* Initialize btrace maintenance commands. */
3433 void _initialize_btrace (void);
3435 _initialize_btrace (void)
3437 add_cmd ("btrace", class_maintenance
, maint_info_btrace_cmd
,
3438 _("Info about branch tracing data."), &maintenanceinfolist
);
3440 add_prefix_cmd ("btrace", class_maintenance
, maint_btrace_cmd
,
3441 _("Branch tracing maintenance commands."),
3442 &maint_btrace_cmdlist
, "maintenance btrace ",
3443 0, &maintenancelist
);
3445 add_prefix_cmd ("btrace", class_maintenance
, maint_btrace_set_cmd
, _("\
3446 Set branch tracing specific variables."),
3447 &maint_btrace_set_cmdlist
, "maintenance set btrace ",
3448 0, &maintenance_set_cmdlist
);
3450 add_prefix_cmd ("pt", class_maintenance
, maint_btrace_pt_set_cmd
, _("\
3451 Set Intel Processor Trace specific variables."),
3452 &maint_btrace_pt_set_cmdlist
, "maintenance set btrace pt ",
3453 0, &maint_btrace_set_cmdlist
);
3455 add_prefix_cmd ("btrace", class_maintenance
, maint_btrace_show_cmd
, _("\
3456 Show branch tracing specific variables."),
3457 &maint_btrace_show_cmdlist
, "maintenance show btrace ",
3458 0, &maintenance_show_cmdlist
);
3460 add_prefix_cmd ("pt", class_maintenance
, maint_btrace_pt_show_cmd
, _("\
3461 Show Intel Processor Trace specific variables."),
3462 &maint_btrace_pt_show_cmdlist
, "maintenance show btrace pt ",
3463 0, &maint_btrace_show_cmdlist
);
3465 add_setshow_boolean_cmd ("skip-pad", class_maintenance
,
3466 &maint_btrace_pt_skip_pad
, _("\
3467 Set whether PAD packets should be skipped in the btrace packet history."), _("\
3468 Show whether PAD packets should be skipped in the btrace packet history."),_("\
3469 When enabled, PAD packets are ignored in the btrace packet history."),
3470 NULL
, show_maint_btrace_pt_skip_pad
,
3471 &maint_btrace_pt_set_cmdlist
,
3472 &maint_btrace_pt_show_cmdlist
);
3474 add_cmd ("packet-history", class_maintenance
, maint_btrace_packet_history_cmd
,
3475 _("Print the raw branch tracing data.\n\
3476 With no argument, print ten more packets after the previous ten-line print.\n\
3477 With '-' as argument print ten packets before a previous ten-line print.\n\
3478 One argument specifies the starting packet of a ten-line print.\n\
3479 Two arguments with comma between specify starting and ending packets to \
3481 Preceded with '+'/'-' the second argument specifies the distance from the \
3483 &maint_btrace_cmdlist
);
3485 add_cmd ("clear-packet-history", class_maintenance
,
3486 maint_btrace_clear_packet_history_cmd
,
3487 _("Clears the branch tracing packet history.\n\
3488 Discards the raw branch tracing data but not the execution history data.\n\
3490 &maint_btrace_cmdlist
);
3492 add_cmd ("clear", class_maintenance
, maint_btrace_clear_cmd
,
3493 _("Clears the branch tracing data.\n\
3494 Discards the raw branch tracing data and the execution history data.\n\
3495 The next 'record' command will fetch the branch tracing data anew.\n\
3497 &maint_btrace_cmdlist
);