1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
31 #include "filenames.h"
32 #include "xml-support.h"
36 #include "cli/cli-utils.h"
42 /* Command lists for btrace maintenance commands. */
43 static struct cmd_list_element
*maint_btrace_cmdlist
;
44 static struct cmd_list_element
*maint_btrace_set_cmdlist
;
45 static struct cmd_list_element
*maint_btrace_show_cmdlist
;
46 static struct cmd_list_element
*maint_btrace_pt_set_cmdlist
;
47 static struct cmd_list_element
*maint_btrace_pt_show_cmdlist
;
49 /* Control whether to skip PAD packets when computing the packet history. */
50 static int maint_btrace_pt_skip_pad
= 1;
52 /* A vector of function segments. */
53 typedef struct btrace_function
* bfun_s
;
56 static void btrace_add_pc (struct thread_info
*tp
);
58 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
59 when used in if statements. */
61 #define DEBUG(msg, args...) \
64 if (record_debug != 0) \
65 fprintf_unfiltered (gdb_stdlog, \
66 "[btrace] " msg "\n", ##args); \
70 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
72 /* Return the function name of a recorded function segment for printing.
73 This function never returns NULL. */
76 ftrace_print_function_name (const struct btrace_function
*bfun
)
78 struct minimal_symbol
*msym
;
85 return SYMBOL_PRINT_NAME (sym
);
88 return MSYMBOL_PRINT_NAME (msym
);
93 /* Return the file name of a recorded function segment for printing.
94 This function never returns NULL. */
97 ftrace_print_filename (const struct btrace_function
*bfun
)
100 const char *filename
;
105 filename
= symtab_to_filename_for_display (symbol_symtab (sym
));
107 filename
= "<unknown>";
112 /* Return a string representation of the address of an instruction.
113 This function never returns NULL. */
116 ftrace_print_insn_addr (const struct btrace_insn
*insn
)
121 return core_addr_to_string_nz (insn
->pc
);
124 /* Print an ftrace debug status message. */
127 ftrace_debug (const struct btrace_function
*bfun
, const char *prefix
)
129 const char *fun
, *file
;
130 unsigned int ibegin
, iend
;
133 fun
= ftrace_print_function_name (bfun
);
134 file
= ftrace_print_filename (bfun
);
137 ibegin
= bfun
->insn_offset
;
138 iend
= ibegin
+ VEC_length (btrace_insn_s
, bfun
->insn
);
140 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
141 prefix
, fun
, file
, level
, ibegin
, iend
);
144 /* Return the number of instructions in a given function call segment. */
147 ftrace_call_num_insn (const struct btrace_function
* bfun
)
152 /* A gap is always counted as one instruction. */
153 if (bfun
->errcode
!= 0)
156 return VEC_length (btrace_insn_s
, bfun
->insn
);
159 /* Return the function segment with the given NUMBER or NULL if no such segment
160 exists. BTINFO is the branch trace information for the current thread. */
162 static struct btrace_function
*
163 ftrace_find_call_by_number (const struct btrace_thread_info
*btinfo
,
166 if (number
== 0 || number
> btinfo
->functions
.size ())
169 return btinfo
->functions
[number
- 1];
172 /* Return non-zero if BFUN does not match MFUN and FUN,
173 return zero otherwise. */
176 ftrace_function_switched (const struct btrace_function
*bfun
,
177 const struct minimal_symbol
*mfun
,
178 const struct symbol
*fun
)
180 struct minimal_symbol
*msym
;
186 /* If the minimal symbol changed, we certainly switched functions. */
187 if (mfun
!= NULL
&& msym
!= NULL
188 && strcmp (MSYMBOL_LINKAGE_NAME (mfun
), MSYMBOL_LINKAGE_NAME (msym
)) != 0)
191 /* If the symbol changed, we certainly switched functions. */
192 if (fun
!= NULL
&& sym
!= NULL
)
194 const char *bfname
, *fname
;
196 /* Check the function name. */
197 if (strcmp (SYMBOL_LINKAGE_NAME (fun
), SYMBOL_LINKAGE_NAME (sym
)) != 0)
200 /* Check the location of those functions, as well. */
201 bfname
= symtab_to_fullname (symbol_symtab (sym
));
202 fname
= symtab_to_fullname (symbol_symtab (fun
));
203 if (filename_cmp (fname
, bfname
) != 0)
207 /* If we lost symbol information, we switched functions. */
208 if (!(msym
== NULL
&& sym
== NULL
) && mfun
== NULL
&& fun
== NULL
)
211 /* If we gained symbol information, we switched functions. */
212 if (msym
== NULL
&& sym
== NULL
&& !(mfun
== NULL
&& fun
== NULL
))
218 /* Allocate and initialize a new branch trace function segment at the end of
220 BTINFO is the branch trace information for the current thread.
221 MFUN and FUN are the symbol information we have for this function. */
223 static struct btrace_function
*
224 ftrace_new_function (struct btrace_thread_info
*btinfo
,
225 struct minimal_symbol
*mfun
,
228 struct btrace_function
*bfun
;
230 bfun
= XCNEW (struct btrace_function
);
235 if (btinfo
->functions
.empty ())
237 /* Start counting at one. */
239 bfun
->insn_offset
= 1;
243 struct btrace_function
*prev
= btinfo
->functions
.back ();
245 gdb_assert (prev
->flow
.next
== NULL
);
246 prev
->flow
.next
= bfun
;
247 bfun
->flow
.prev
= prev
;
249 bfun
->number
= prev
->number
+ 1;
250 bfun
->insn_offset
= prev
->insn_offset
+ ftrace_call_num_insn (prev
);
251 bfun
->level
= prev
->level
;
254 btinfo
->functions
.push_back (bfun
);
258 /* Update the UP field of a function segment. */
261 ftrace_update_caller (struct btrace_function
*bfun
,
262 struct btrace_function
*caller
,
263 enum btrace_function_flag flags
)
266 ftrace_debug (bfun
, "updating caller");
268 bfun
->up
= caller
->number
;
271 ftrace_debug (bfun
, "set caller");
272 ftrace_debug (caller
, "..to");
275 /* Fix up the caller for all segments of a function. */
278 ftrace_fixup_caller (struct btrace_function
*bfun
,
279 struct btrace_function
*caller
,
280 enum btrace_function_flag flags
)
282 struct btrace_function
*prev
, *next
;
284 ftrace_update_caller (bfun
, caller
, flags
);
286 /* Update all function segments belonging to the same function. */
287 for (prev
= bfun
->segment
.prev
; prev
!= NULL
; prev
= prev
->segment
.prev
)
288 ftrace_update_caller (prev
, caller
, flags
);
290 for (next
= bfun
->segment
.next
; next
!= NULL
; next
= next
->segment
.next
)
291 ftrace_update_caller (next
, caller
, flags
);
294 /* Add a new function segment for a call at the end of the trace.
295 BTINFO is the branch trace information for the current thread.
296 MFUN and FUN are the symbol information we have for this function. */
298 static struct btrace_function
*
299 ftrace_new_call (struct btrace_thread_info
*btinfo
,
300 struct minimal_symbol
*mfun
,
303 const unsigned int length
= btinfo
->functions
.size ();
304 struct btrace_function
*bfun
= ftrace_new_function (btinfo
, mfun
, fun
);
309 ftrace_debug (bfun
, "new call");
314 /* Add a new function segment for a tail call at the end of the trace.
315 BTINFO is the branch trace information for the current thread.
316 MFUN and FUN are the symbol information we have for this function. */
318 static struct btrace_function
*
319 ftrace_new_tailcall (struct btrace_thread_info
*btinfo
,
320 struct minimal_symbol
*mfun
,
323 const unsigned int length
= btinfo
->functions
.size ();
324 struct btrace_function
*bfun
= ftrace_new_function (btinfo
, mfun
, fun
);
328 bfun
->flags
|= BFUN_UP_LINKS_TO_TAILCALL
;
330 ftrace_debug (bfun
, "new tail call");
335 /* Return the caller of BFUN or NULL if there is none. This function skips
336 tail calls in the call chain. BTINFO is the branch trace information for
337 the current thread. */
338 static struct btrace_function
*
339 ftrace_get_caller (struct btrace_thread_info
*btinfo
,
340 struct btrace_function
*bfun
)
342 for (; bfun
!= NULL
; bfun
= ftrace_find_call_by_number (btinfo
, bfun
->up
))
343 if ((bfun
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
344 return ftrace_find_call_by_number (btinfo
, bfun
->up
);
349 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
350 symbol information. BTINFO is the branch trace information for the current
353 static struct btrace_function
*
354 ftrace_find_caller (struct btrace_thread_info
*btinfo
,
355 struct btrace_function
*bfun
,
356 struct minimal_symbol
*mfun
,
359 for (; bfun
!= NULL
; bfun
= ftrace_find_call_by_number (btinfo
, bfun
->up
))
361 /* Skip functions with incompatible symbol information. */
362 if (ftrace_function_switched (bfun
, mfun
, fun
))
365 /* This is the function segment we're looking for. */
372 /* Find the innermost caller in the back trace of BFUN, skipping all
373 function segments that do not end with a call instruction (e.g.
374 tail calls ending with a jump). BTINFO is the branch trace information for
375 the current thread. */
377 static struct btrace_function
*
378 ftrace_find_call (struct btrace_thread_info
*btinfo
,
379 struct btrace_function
*bfun
)
381 for (; bfun
!= NULL
; bfun
= ftrace_find_call_by_number (btinfo
, bfun
->up
))
383 struct btrace_insn
*last
;
386 if (bfun
->errcode
!= 0)
389 last
= VEC_last (btrace_insn_s
, bfun
->insn
);
391 if (last
->iclass
== BTRACE_INSN_CALL
)
398 /* Add a continuation segment for a function into which we return at the end of
400 BTINFO is the branch trace information for the current thread.
401 MFUN and FUN are the symbol information we have for this function. */
403 static struct btrace_function
*
404 ftrace_new_return (struct btrace_thread_info
*btinfo
,
405 struct minimal_symbol
*mfun
,
408 struct btrace_function
*prev
= btinfo
->functions
.back ();
409 struct btrace_function
*bfun
, *caller
;
411 bfun
= ftrace_new_function (btinfo
, mfun
, fun
);
413 /* It is important to start at PREV's caller. Otherwise, we might find
414 PREV itself, if PREV is a recursive function. */
415 caller
= ftrace_find_call_by_number (btinfo
, prev
->up
);
416 caller
= ftrace_find_caller (btinfo
, caller
, mfun
, fun
);
419 /* The caller of PREV is the preceding btrace function segment in this
420 function instance. */
421 gdb_assert (caller
->segment
.next
== NULL
);
423 caller
->segment
.next
= bfun
;
424 bfun
->segment
.prev
= caller
;
426 /* Maintain the function level. */
427 bfun
->level
= caller
->level
;
429 /* Maintain the call stack. */
430 bfun
->up
= caller
->up
;
431 bfun
->flags
= caller
->flags
;
433 ftrace_debug (bfun
, "new return");
437 /* We did not find a caller. This could mean that something went
438 wrong or that the call is simply not included in the trace. */
440 /* Let's search for some actual call. */
441 caller
= ftrace_find_call_by_number (btinfo
, prev
->up
);
442 caller
= ftrace_find_call (btinfo
, caller
);
445 /* There is no call in PREV's back trace. We assume that the
446 branch trace did not include it. */
448 /* Let's find the topmost function and add a new caller for it.
449 This should handle a series of initial tail calls. */
450 while (prev
->up
!= 0)
451 prev
= ftrace_find_call_by_number (btinfo
, prev
->up
);
453 bfun
->level
= prev
->level
- 1;
455 /* Fix up the call stack for PREV. */
456 ftrace_fixup_caller (prev
, bfun
, BFUN_UP_LINKS_TO_RET
);
458 ftrace_debug (bfun
, "new return - no caller");
462 /* There is a call in PREV's back trace to which we should have
463 returned but didn't. Let's start a new, separate back trace
464 from PREV's level. */
465 bfun
->level
= prev
->level
- 1;
467 /* We fix up the back trace for PREV but leave other function segments
468 on the same level as they are.
469 This should handle things like schedule () correctly where we're
470 switching contexts. */
471 prev
->up
= bfun
->number
;
472 prev
->flags
= BFUN_UP_LINKS_TO_RET
;
474 ftrace_debug (bfun
, "new return - unknown caller");
481 /* Add a new function segment for a function switch at the end of the trace.
482 BTINFO is the branch trace information for the current thread.
483 MFUN and FUN are the symbol information we have for this function. */
485 static struct btrace_function
*
486 ftrace_new_switch (struct btrace_thread_info
*btinfo
,
487 struct minimal_symbol
*mfun
,
490 struct btrace_function
*prev
= btinfo
->functions
.back ();
491 struct btrace_function
*bfun
;
493 /* This is an unexplained function switch. We can't really be sure about the
494 call stack, yet the best I can think of right now is to preserve it. */
495 bfun
= ftrace_new_function (btinfo
, mfun
, fun
);
497 bfun
->flags
= prev
->flags
;
499 ftrace_debug (bfun
, "new switch");
504 /* Add a new function segment for a gap in the trace due to a decode error at
505 the end of the trace.
506 BTINFO is the branch trace information for the current thread.
507 ERRCODE is the format-specific error code. */
509 static struct btrace_function
*
510 ftrace_new_gap (struct btrace_thread_info
*btinfo
, int errcode
)
512 struct btrace_function
*bfun
;
514 if (btinfo
->functions
.empty ())
515 bfun
= ftrace_new_function (btinfo
, NULL
, NULL
);
518 /* We hijack the previous function segment if it was empty. */
519 bfun
= btinfo
->functions
.back ();
520 if (bfun
->errcode
!= 0 || !VEC_empty (btrace_insn_s
, bfun
->insn
))
521 bfun
= ftrace_new_function (btinfo
, NULL
, NULL
);
524 bfun
->errcode
= errcode
;
526 ftrace_debug (bfun
, "new gap");
531 /* Update the current function segment at the end of the trace in BTINFO with
532 respect to the instruction at PC. This may create new function segments.
533 Return the chronologically latest function segment, never NULL. */
535 static struct btrace_function
*
536 ftrace_update_function (struct btrace_thread_info
*btinfo
, CORE_ADDR pc
)
538 struct bound_minimal_symbol bmfun
;
539 struct minimal_symbol
*mfun
;
541 struct btrace_insn
*last
;
542 struct btrace_function
*bfun
;
544 /* Try to determine the function we're in. We use both types of symbols
545 to avoid surprises when we sometimes get a full symbol and sometimes
546 only a minimal symbol. */
547 fun
= find_pc_function (pc
);
548 bmfun
= lookup_minimal_symbol_by_pc (pc
);
551 if (fun
== NULL
&& mfun
== NULL
)
552 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc
));
554 /* If we didn't have a function, we create one. */
555 if (btinfo
->functions
.empty ())
556 return ftrace_new_function (btinfo
, mfun
, fun
);
558 /* If we had a gap before, we create a function. */
559 bfun
= btinfo
->functions
.back ();
560 if (bfun
->errcode
!= 0)
561 return ftrace_new_function (btinfo
, mfun
, fun
);
563 /* Check the last instruction, if we have one.
564 We do this check first, since it allows us to fill in the call stack
565 links in addition to the normal flow links. */
567 if (!VEC_empty (btrace_insn_s
, bfun
->insn
))
568 last
= VEC_last (btrace_insn_s
, bfun
->insn
);
572 switch (last
->iclass
)
574 case BTRACE_INSN_RETURN
:
578 /* On some systems, _dl_runtime_resolve returns to the resolved
579 function instead of jumping to it. From our perspective,
580 however, this is a tailcall.
581 If we treated it as return, we wouldn't be able to find the
582 resolved function in our stack back trace. Hence, we would
583 lose the current stack back trace and start anew with an empty
584 back trace. When the resolved function returns, we would then
585 create a stack back trace with the same function names but
586 different frame id's. This will confuse stepping. */
587 fname
= ftrace_print_function_name (bfun
);
588 if (strcmp (fname
, "_dl_runtime_resolve") == 0)
589 return ftrace_new_tailcall (btinfo
, mfun
, fun
);
591 return ftrace_new_return (btinfo
, mfun
, fun
);
594 case BTRACE_INSN_CALL
:
595 /* Ignore calls to the next instruction. They are used for PIC. */
596 if (last
->pc
+ last
->size
== pc
)
599 return ftrace_new_call (btinfo
, mfun
, fun
);
601 case BTRACE_INSN_JUMP
:
605 start
= get_pc_function_start (pc
);
607 /* A jump to the start of a function is (typically) a tail call. */
609 return ftrace_new_tailcall (btinfo
, mfun
, fun
);
611 /* If we can't determine the function for PC, we treat a jump at
612 the end of the block as tail call if we're switching functions
613 and as an intra-function branch if we don't. */
614 if (start
== 0 && ftrace_function_switched (bfun
, mfun
, fun
))
615 return ftrace_new_tailcall (btinfo
, mfun
, fun
);
622 /* Check if we're switching functions for some other reason. */
623 if (ftrace_function_switched (bfun
, mfun
, fun
))
625 DEBUG_FTRACE ("switching from %s in %s at %s",
626 ftrace_print_insn_addr (last
),
627 ftrace_print_function_name (bfun
),
628 ftrace_print_filename (bfun
));
630 return ftrace_new_switch (btinfo
, mfun
, fun
);
636 /* Add the instruction at PC to BFUN's instructions. */
639 ftrace_update_insns (struct btrace_function
*bfun
,
640 const struct btrace_insn
*insn
)
642 VEC_safe_push (btrace_insn_s
, bfun
->insn
, insn
);
644 if (record_debug
> 1)
645 ftrace_debug (bfun
, "update insn");
648 /* Classify the instruction at PC. */
650 static enum btrace_insn_class
651 ftrace_classify_insn (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
653 enum btrace_insn_class iclass
;
655 iclass
= BTRACE_INSN_OTHER
;
658 if (gdbarch_insn_is_call (gdbarch
, pc
))
659 iclass
= BTRACE_INSN_CALL
;
660 else if (gdbarch_insn_is_ret (gdbarch
, pc
))
661 iclass
= BTRACE_INSN_RETURN
;
662 else if (gdbarch_insn_is_jump (gdbarch
, pc
))
663 iclass
= BTRACE_INSN_JUMP
;
665 CATCH (error
, RETURN_MASK_ERROR
)
673 /* Try to match the back trace at LHS to the back trace at RHS. Returns the
674 number of matching function segments or zero if the back traces do not
675 match. BTINFO is the branch trace information for the current thread. */
678 ftrace_match_backtrace (struct btrace_thread_info
*btinfo
,
679 struct btrace_function
*lhs
,
680 struct btrace_function
*rhs
)
684 for (matches
= 0; lhs
!= NULL
&& rhs
!= NULL
; ++matches
)
686 if (ftrace_function_switched (lhs
, rhs
->msym
, rhs
->sym
))
689 lhs
= ftrace_get_caller (btinfo
, lhs
);
690 rhs
= ftrace_get_caller (btinfo
, rhs
);
696 /* Add ADJUSTMENT to the level of BFUN and succeeding function segments. */
699 ftrace_fixup_level (struct btrace_function
*bfun
, int adjustment
)
704 DEBUG_FTRACE ("fixup level (%+d)", adjustment
);
705 ftrace_debug (bfun
, "..bfun");
707 for (; bfun
!= NULL
; bfun
= bfun
->flow
.next
)
708 bfun
->level
+= adjustment
;
711 /* Recompute the global level offset. Traverse the function trace and compute
712 the global level offset as the negative of the minimal function level. */
715 ftrace_compute_global_level_offset (struct btrace_thread_info
*btinfo
)
722 if (btinfo
->functions
.empty ())
725 unsigned int length
= btinfo
->functions
.size() - 1;
726 for (unsigned int i
= 0; i
< length
; ++i
)
727 level
= std::min (level
, btinfo
->functions
[i
]->level
);
729 /* The last function segment contains the current instruction, which is not
730 really part of the trace. If it contains just this one instruction, we
731 ignore the segment. */
732 struct btrace_function
*last
= btinfo
->functions
.back();
733 if (VEC_length (btrace_insn_s
, last
->insn
) != 1)
734 level
= std::min (level
, last
->level
);
736 DEBUG_FTRACE ("setting global level offset: %d", -level
);
737 btinfo
->level
= -level
;
740 /* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
741 ftrace_connect_backtrace. BTINFO is the branch trace information for the
745 ftrace_connect_bfun (struct btrace_thread_info
*btinfo
,
746 struct btrace_function
*prev
,
747 struct btrace_function
*next
)
749 DEBUG_FTRACE ("connecting...");
750 ftrace_debug (prev
, "..prev");
751 ftrace_debug (next
, "..next");
753 /* The function segments are not yet connected. */
754 gdb_assert (prev
->segment
.next
== NULL
);
755 gdb_assert (next
->segment
.prev
== NULL
);
757 prev
->segment
.next
= next
;
758 next
->segment
.prev
= prev
;
760 /* We may have moved NEXT to a different function level. */
761 ftrace_fixup_level (next
, prev
->level
- next
->level
);
763 /* If we run out of back trace for one, let's use the other's. */
766 const btrace_function_flags flags
= next
->flags
;
768 next
= ftrace_find_call_by_number (btinfo
, next
->up
);
771 DEBUG_FTRACE ("using next's callers");
772 ftrace_fixup_caller (prev
, next
, flags
);
775 else if (next
->up
== 0)
777 const btrace_function_flags flags
= prev
->flags
;
779 prev
= ftrace_find_call_by_number (btinfo
, prev
->up
);
782 DEBUG_FTRACE ("using prev's callers");
783 ftrace_fixup_caller (next
, prev
, flags
);
788 /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
789 link to add the tail callers to NEXT's back trace.
791 This removes NEXT->UP from NEXT's back trace. It will be added back
792 when connecting NEXT and PREV's callers - provided they exist.
794 If PREV's back trace consists of a series of tail calls without an
795 actual call, there will be no further connection and NEXT's caller will
796 be removed for good. To catch this case, we handle it here and connect
797 the top of PREV's back trace to NEXT's caller. */
798 if ((prev
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) != 0)
800 struct btrace_function
*caller
;
801 btrace_function_flags next_flags
, prev_flags
;
803 /* We checked NEXT->UP above so CALLER can't be NULL. */
804 caller
= ftrace_find_call_by_number (btinfo
, next
->up
);
805 next_flags
= next
->flags
;
806 prev_flags
= prev
->flags
;
808 DEBUG_FTRACE ("adding prev's tail calls to next");
810 prev
= ftrace_find_call_by_number (btinfo
, prev
->up
);
811 ftrace_fixup_caller (next
, prev
, prev_flags
);
813 for (; prev
!= NULL
; prev
= ftrace_find_call_by_number (btinfo
,
816 /* At the end of PREV's back trace, continue with CALLER. */
819 DEBUG_FTRACE ("fixing up link for tailcall chain");
820 ftrace_debug (prev
, "..top");
821 ftrace_debug (caller
, "..up");
823 ftrace_fixup_caller (prev
, caller
, next_flags
);
825 /* If we skipped any tail calls, this may move CALLER to a
826 different function level.
828 Note that changing CALLER's level is only OK because we
829 know that this is the last iteration of the bottom-to-top
830 walk in ftrace_connect_backtrace.
832 Otherwise we will fix up CALLER's level when we connect it
833 to PREV's caller in the next iteration. */
834 ftrace_fixup_level (caller
, prev
->level
- caller
->level
- 1);
838 /* There's nothing to do if we find a real call. */
839 if ((prev
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
841 DEBUG_FTRACE ("will fix up link in next iteration");
849 /* Connect function segments on the same level in the back trace at LHS and RHS.
850 The back traces at LHS and RHS are expected to match according to
851 ftrace_match_backtrace. BTINFO is the branch trace information for the
855 ftrace_connect_backtrace (struct btrace_thread_info
*btinfo
,
856 struct btrace_function
*lhs
,
857 struct btrace_function
*rhs
)
859 while (lhs
!= NULL
&& rhs
!= NULL
)
861 struct btrace_function
*prev
, *next
;
863 gdb_assert (!ftrace_function_switched (lhs
, rhs
->msym
, rhs
->sym
));
865 /* Connecting LHS and RHS may change the up link. */
869 lhs
= ftrace_get_caller (btinfo
, lhs
);
870 rhs
= ftrace_get_caller (btinfo
, rhs
);
872 ftrace_connect_bfun (btinfo
, prev
, next
);
876 /* Bridge the gap between two function segments left and right of a gap if their
877 respective back traces match in at least MIN_MATCHES functions. BTINFO is
878 the branch trace information for the current thread.
880 Returns non-zero if the gap could be bridged, zero otherwise. */
883 ftrace_bridge_gap (struct btrace_thread_info
*btinfo
,
884 struct btrace_function
*lhs
, struct btrace_function
*rhs
,
887 struct btrace_function
*best_l
, *best_r
, *cand_l
, *cand_r
;
890 DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
891 rhs
->insn_offset
- 1, min_matches
);
897 /* We search the back traces of LHS and RHS for valid connections and connect
898 the two functon segments that give the longest combined back trace. */
900 for (cand_l
= lhs
; cand_l
!= NULL
;
901 cand_l
= ftrace_get_caller (btinfo
, cand_l
))
902 for (cand_r
= rhs
; cand_r
!= NULL
;
903 cand_r
= ftrace_get_caller (btinfo
, cand_r
))
907 matches
= ftrace_match_backtrace (btinfo
, cand_l
, cand_r
);
908 if (best_matches
< matches
)
910 best_matches
= matches
;
916 /* We need at least MIN_MATCHES matches. */
917 gdb_assert (min_matches
> 0);
918 if (best_matches
< min_matches
)
921 DEBUG_FTRACE ("..matches: %d", best_matches
);
923 /* We will fix up the level of BEST_R and succeeding function segments such
924 that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
926 This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
927 BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
929 To catch this, we already fix up the level here where we can start at RHS
930 instead of at BEST_R. We will ignore the level fixup when connecting
931 BEST_L to BEST_R as they will already be on the same level. */
932 ftrace_fixup_level (rhs
, best_l
->level
- best_r
->level
);
934 ftrace_connect_backtrace (btinfo
, best_l
, best_r
);
939 /* Try to bridge gaps due to overflow or decode errors by connecting the
940 function segments that are separated by the gap. */
943 btrace_bridge_gaps (struct thread_info
*tp
, VEC (bfun_s
) **gaps
)
945 VEC (bfun_s
) *remaining
;
946 struct cleanup
*old_chain
;
949 DEBUG ("bridge gaps");
952 old_chain
= make_cleanup (VEC_cleanup (bfun_s
), &remaining
);
954 /* We require a minimum amount of matches for bridging a gap. The number of
955 required matches will be lowered with each iteration.
957 The more matches the higher our confidence that the bridging is correct.
958 For big gaps or small traces, however, it may not be feasible to require a
959 high number of matches. */
960 for (min_matches
= 5; min_matches
> 0; --min_matches
)
962 /* Let's try to bridge as many gaps as we can. In some cases, we need to
963 skip a gap and revisit it again after we closed later gaps. */
964 while (!VEC_empty (bfun_s
, *gaps
))
966 struct btrace_function
*gap
;
969 for (idx
= 0; VEC_iterate (bfun_s
, *gaps
, idx
, gap
); ++idx
)
971 struct btrace_function
*lhs
, *rhs
;
974 /* We may have a sequence of gaps if we run from one error into
975 the next as we try to re-sync onto the trace stream. Ignore
976 all but the leftmost gap in such a sequence.
978 Also ignore gaps at the beginning of the trace. */
979 lhs
= gap
->flow
.prev
;
980 if (lhs
== NULL
|| lhs
->errcode
!= 0)
983 /* Skip gaps to the right. */
984 for (rhs
= gap
->flow
.next
; rhs
!= NULL
; rhs
= rhs
->flow
.next
)
985 if (rhs
->errcode
== 0)
988 /* Ignore gaps at the end of the trace. */
992 bridged
= ftrace_bridge_gap (&tp
->btrace
, lhs
, rhs
, min_matches
);
994 /* Keep track of gaps we were not able to bridge and try again.
995 If we just pushed them to the end of GAPS we would risk an
996 infinite loop in case we simply cannot bridge a gap. */
998 VEC_safe_push (bfun_s
, remaining
, gap
);
1001 /* Let's see if we made any progress. */
1002 if (VEC_length (bfun_s
, remaining
) == VEC_length (bfun_s
, *gaps
))
1005 VEC_free (bfun_s
, *gaps
);
1011 /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
1012 if (VEC_empty (bfun_s
, *gaps
))
1015 VEC_free (bfun_s
, remaining
);
1018 do_cleanups (old_chain
);
1020 /* We may omit this in some cases. Not sure it is worth the extra
1021 complication, though. */
1022 ftrace_compute_global_level_offset (&tp
->btrace
);
1025 /* Compute the function branch trace from BTS trace. */
1028 btrace_compute_ftrace_bts (struct thread_info
*tp
,
1029 const struct btrace_data_bts
*btrace
,
1030 VEC (bfun_s
) **gaps
)
1032 struct btrace_thread_info
*btinfo
;
1033 struct gdbarch
*gdbarch
;
1037 gdbarch
= target_gdbarch ();
1038 btinfo
= &tp
->btrace
;
1039 blk
= VEC_length (btrace_block_s
, btrace
->blocks
);
1041 if (btinfo
->functions
.empty ())
1044 level
= -btinfo
->level
;
1048 btrace_block_s
*block
;
1053 block
= VEC_index (btrace_block_s
, btrace
->blocks
, blk
);
1058 struct btrace_function
*bfun
;
1059 struct btrace_insn insn
;
1062 /* We should hit the end of the block. Warn if we went too far. */
1063 if (block
->end
< pc
)
1065 /* Indicate the gap in the trace. */
1066 bfun
= ftrace_new_gap (btinfo
, BDE_BTS_OVERFLOW
);
1068 VEC_safe_push (bfun_s
, *gaps
, bfun
);
1070 warning (_("Recorded trace may be corrupted at instruction "
1071 "%u (pc = %s)."), bfun
->insn_offset
- 1,
1072 core_addr_to_string_nz (pc
));
1077 bfun
= ftrace_update_function (btinfo
, pc
);
1079 /* Maintain the function level offset.
1080 For all but the last block, we do it here. */
1082 level
= std::min (level
, bfun
->level
);
1087 size
= gdb_insn_length (gdbarch
, pc
);
1089 CATCH (error
, RETURN_MASK_ERROR
)
1096 insn
.iclass
= ftrace_classify_insn (gdbarch
, pc
);
1099 ftrace_update_insns (bfun
, &insn
);
1101 /* We're done once we pushed the instruction at the end. */
1102 if (block
->end
== pc
)
1105 /* We can't continue if we fail to compute the size. */
1108 /* Indicate the gap in the trace. We just added INSN so we're
1109 not at the beginning. */
1110 bfun
= ftrace_new_gap (btinfo
, BDE_BTS_INSN_SIZE
);
1112 VEC_safe_push (bfun_s
, *gaps
, bfun
);
1114 warning (_("Recorded trace may be incomplete at instruction %u "
1115 "(pc = %s)."), bfun
->insn_offset
- 1,
1116 core_addr_to_string_nz (pc
));
1123 /* Maintain the function level offset.
1124 For the last block, we do it here to not consider the last
1126 Since the last instruction corresponds to the current instruction
1127 and is not really part of the execution history, it shouldn't
1128 affect the level. */
1130 level
= std::min (level
, bfun
->level
);
1134 /* LEVEL is the minimal function level of all btrace function segments.
1135 Define the global level offset to -LEVEL so all function levels are
1136 normalized to start at zero. */
1137 btinfo
->level
= -level
;
1140 #if defined (HAVE_LIBIPT)
1142 static enum btrace_insn_class
1143 pt_reclassify_insn (enum pt_insn_class iclass
)
1148 return BTRACE_INSN_CALL
;
1151 return BTRACE_INSN_RETURN
;
1154 return BTRACE_INSN_JUMP
;
1157 return BTRACE_INSN_OTHER
;
1161 /* Return the btrace instruction flags for INSN. */
1163 static btrace_insn_flags
1164 pt_btrace_insn_flags (const struct pt_insn
&insn
)
1166 btrace_insn_flags flags
= 0;
1168 if (insn
.speculative
)
1169 flags
|= BTRACE_INSN_FLAG_SPECULATIVE
;
1174 /* Return the btrace instruction for INSN. */
1177 pt_btrace_insn (const struct pt_insn
&insn
)
1179 return {(CORE_ADDR
) insn
.ip
, (gdb_byte
) insn
.size
,
1180 pt_reclassify_insn (insn
.iclass
),
1181 pt_btrace_insn_flags (insn
)};
1185 /* Add function branch trace to BTINFO using DECODER. */
1188 ftrace_add_pt (struct btrace_thread_info
*btinfo
,
1189 struct pt_insn_decoder
*decoder
,
1191 VEC (bfun_s
) **gaps
)
1193 struct btrace_function
*bfun
;
1199 struct pt_insn insn
;
1201 errcode
= pt_insn_sync_forward (decoder
);
1204 if (errcode
!= -pte_eos
)
1205 warning (_("Failed to synchronize onto the Intel Processor "
1206 "Trace stream: %s."), pt_errstr (pt_errcode (errcode
)));
1212 errcode
= pt_insn_next (decoder
, &insn
, sizeof(insn
));
1216 /* Look for gaps in the trace - unless we're at the beginning. */
1217 if (!btinfo
->functions
.empty ())
1219 /* Tracing is disabled and re-enabled each time we enter the
1220 kernel. Most times, we continue from the same instruction we
1221 stopped before. This is indicated via the RESUMED instruction
1222 flag. The ENABLED instruction flag means that we continued
1223 from some other instruction. Indicate this as a trace gap. */
1226 bfun
= ftrace_new_gap (btinfo
, BDE_PT_DISABLED
);
1228 VEC_safe_push (bfun_s
, *gaps
, bfun
);
1230 pt_insn_get_offset (decoder
, &offset
);
1232 warning (_("Non-contiguous trace at instruction %u (offset "
1233 "= 0x%" PRIx64
", pc = 0x%" PRIx64
")."),
1234 bfun
->insn_offset
- 1, offset
, insn
.ip
);
1238 /* Indicate trace overflows. */
1241 bfun
= ftrace_new_gap (btinfo
, BDE_PT_OVERFLOW
);
1243 VEC_safe_push (bfun_s
, *gaps
, bfun
);
1245 pt_insn_get_offset (decoder
, &offset
);
1247 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
1248 ", pc = 0x%" PRIx64
")."), bfun
->insn_offset
- 1,
1252 bfun
= ftrace_update_function (btinfo
, insn
.ip
);
1254 /* Maintain the function level offset. */
1255 *plevel
= std::min (*plevel
, bfun
->level
);
1257 btrace_insn btinsn
= pt_btrace_insn (insn
);
1258 ftrace_update_insns (bfun
, &btinsn
);
1261 if (errcode
== -pte_eos
)
1264 /* Indicate the gap in the trace. */
1265 bfun
= ftrace_new_gap (btinfo
, errcode
);
1267 VEC_safe_push (bfun_s
, *gaps
, bfun
);
1269 pt_insn_get_offset (decoder
, &offset
);
1271 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
1272 ", pc = 0x%" PRIx64
"): %s."), errcode
, bfun
->insn_offset
- 1,
1273 offset
, insn
.ip
, pt_errstr (pt_errcode (errcode
)));
1277 /* A callback function to allow the trace decoder to read the inferior's
1281 btrace_pt_readmem_callback (gdb_byte
*buffer
, size_t size
,
1282 const struct pt_asid
*asid
, uint64_t pc
,
1285 int result
, errcode
;
1287 result
= (int) size
;
1290 errcode
= target_read_code ((CORE_ADDR
) pc
, buffer
, size
);
1292 result
= -pte_nomap
;
1294 CATCH (error
, RETURN_MASK_ERROR
)
1296 result
= -pte_nomap
;
1303 /* Translate the vendor from one enum to another. */
1305 static enum pt_cpu_vendor
1306 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor
)
1318 /* Finalize the function branch trace after decode. */
1320 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder
*decoder
,
1321 struct thread_info
*tp
, int level
)
1323 pt_insn_free_decoder (decoder
);
1325 /* LEVEL is the minimal function level of all btrace function segments.
1326 Define the global level offset to -LEVEL so all function levels are
1327 normalized to start at zero. */
1328 tp
->btrace
.level
= -level
;
1330 /* Add a single last instruction entry for the current PC.
1331 This allows us to compute the backtrace at the current PC using both
1332 standard unwind and btrace unwind.
1333 This extra entry is ignored by all record commands. */
1337 /* Compute the function branch trace from Intel Processor Trace
1341 btrace_compute_ftrace_pt (struct thread_info
*tp
,
1342 const struct btrace_data_pt
*btrace
,
1343 VEC (bfun_s
) **gaps
)
1345 struct btrace_thread_info
*btinfo
;
1346 struct pt_insn_decoder
*decoder
;
1347 struct pt_config config
;
1350 if (btrace
->size
== 0)
1353 btinfo
= &tp
->btrace
;
1354 if (btinfo
->functions
.empty ())
1357 level
= -btinfo
->level
;
1359 pt_config_init(&config
);
1360 config
.begin
= btrace
->data
;
1361 config
.end
= btrace
->data
+ btrace
->size
;
1363 config
.cpu
.vendor
= pt_translate_cpu_vendor (btrace
->config
.cpu
.vendor
);
1364 config
.cpu
.family
= btrace
->config
.cpu
.family
;
1365 config
.cpu
.model
= btrace
->config
.cpu
.model
;
1366 config
.cpu
.stepping
= btrace
->config
.cpu
.stepping
;
1368 errcode
= pt_cpu_errata (&config
.errata
, &config
.cpu
);
1370 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
1371 pt_errstr (pt_errcode (errcode
)));
1373 decoder
= pt_insn_alloc_decoder (&config
);
1374 if (decoder
== NULL
)
1375 error (_("Failed to allocate the Intel Processor Trace decoder."));
1379 struct pt_image
*image
;
1381 image
= pt_insn_get_image(decoder
);
1383 error (_("Failed to configure the Intel Processor Trace decoder."));
1385 errcode
= pt_image_set_callback(image
, btrace_pt_readmem_callback
, NULL
);
1387 error (_("Failed to configure the Intel Processor Trace decoder: "
1388 "%s."), pt_errstr (pt_errcode (errcode
)));
1390 ftrace_add_pt (btinfo
, decoder
, &level
, gaps
);
1392 CATCH (error
, RETURN_MASK_ALL
)
1394 /* Indicate a gap in the trace if we quit trace processing. */
1395 if (error
.reason
== RETURN_QUIT
&& !btinfo
->functions
.empty ())
1397 struct btrace_function
*bfun
;
1399 bfun
= ftrace_new_gap (btinfo
, BDE_PT_USER_QUIT
);
1401 VEC_safe_push (bfun_s
, *gaps
, bfun
);
1404 btrace_finalize_ftrace_pt (decoder
, tp
, level
);
1406 throw_exception (error
);
1410 btrace_finalize_ftrace_pt (decoder
, tp
, level
);
1413 #else /* defined (HAVE_LIBIPT) */
1416 btrace_compute_ftrace_pt (struct thread_info
*tp
,
1417 const struct btrace_data_pt
*btrace
,
1418 VEC (bfun_s
) **gaps
)
1420 internal_error (__FILE__
, __LINE__
, _("Unexpected branch trace format."));
1423 #endif /* defined (HAVE_LIBIPT) */
1425 /* Compute the function branch trace from a block branch trace BTRACE for
1426 a thread given by BTINFO. */
1429 btrace_compute_ftrace_1 (struct thread_info
*tp
, struct btrace_data
*btrace
,
1430 VEC (bfun_s
) **gaps
)
1432 DEBUG ("compute ftrace");
1434 switch (btrace
->format
)
1436 case BTRACE_FORMAT_NONE
:
1439 case BTRACE_FORMAT_BTS
:
1440 btrace_compute_ftrace_bts (tp
, &btrace
->variant
.bts
, gaps
);
1443 case BTRACE_FORMAT_PT
:
1444 btrace_compute_ftrace_pt (tp
, &btrace
->variant
.pt
, gaps
);
1448 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
1452 btrace_finalize_ftrace (struct thread_info
*tp
, VEC (bfun_s
) **gaps
)
1454 if (!VEC_empty (bfun_s
, *gaps
))
1456 tp
->btrace
.ngaps
+= VEC_length (bfun_s
, *gaps
);
1457 btrace_bridge_gaps (tp
, gaps
);
1462 btrace_compute_ftrace (struct thread_info
*tp
, struct btrace_data
*btrace
)
1465 struct cleanup
*old_chain
;
1468 old_chain
= make_cleanup (VEC_cleanup (bfun_s
), &gaps
);
1472 btrace_compute_ftrace_1 (tp
, btrace
, &gaps
);
1474 CATCH (error
, RETURN_MASK_ALL
)
1476 btrace_finalize_ftrace (tp
, &gaps
);
1478 throw_exception (error
);
1482 btrace_finalize_ftrace (tp
, &gaps
);
1484 do_cleanups (old_chain
);
1487 /* Add an entry for the current PC. */
1490 btrace_add_pc (struct thread_info
*tp
)
1492 struct btrace_data btrace
;
1493 struct btrace_block
*block
;
1494 struct regcache
*regcache
;
1495 struct cleanup
*cleanup
;
1498 regcache
= get_thread_regcache (tp
->ptid
);
1499 pc
= regcache_read_pc (regcache
);
1501 btrace_data_init (&btrace
);
1502 btrace
.format
= BTRACE_FORMAT_BTS
;
1503 btrace
.variant
.bts
.blocks
= NULL
;
1505 cleanup
= make_cleanup_btrace_data (&btrace
);
1507 block
= VEC_safe_push (btrace_block_s
, btrace
.variant
.bts
.blocks
, NULL
);
1511 btrace_compute_ftrace (tp
, &btrace
);
1513 do_cleanups (cleanup
);
1519 btrace_enable (struct thread_info
*tp
, const struct btrace_config
*conf
)
1521 if (tp
->btrace
.target
!= NULL
)
1524 #if !defined (HAVE_LIBIPT)
1525 if (conf
->format
== BTRACE_FORMAT_PT
)
1526 error (_("GDB does not support Intel Processor Trace."));
1527 #endif /* !defined (HAVE_LIBIPT) */
1529 if (!target_supports_btrace (conf
->format
))
1530 error (_("Target does not support branch tracing."));
1532 DEBUG ("enable thread %s (%s)", print_thread_id (tp
),
1533 target_pid_to_str (tp
->ptid
));
1535 tp
->btrace
.target
= target_enable_btrace (tp
->ptid
, conf
);
1537 /* We're done if we failed to enable tracing. */
1538 if (tp
->btrace
.target
== NULL
)
1541 /* We need to undo the enable in case of errors. */
1544 /* Add an entry for the current PC so we start tracing from where we
1547 If we can't access TP's registers, TP is most likely running. In this
1548 case, we can't really say where tracing was enabled so it should be
1549 safe to simply skip this step.
1551 This is not relevant for BTRACE_FORMAT_PT since the trace will already
1552 start at the PC at which tracing was enabled. */
1553 if (conf
->format
!= BTRACE_FORMAT_PT
1554 && can_access_registers_ptid (tp
->ptid
))
1557 CATCH (exception
, RETURN_MASK_ALL
)
1559 btrace_disable (tp
);
1561 throw_exception (exception
);
1568 const struct btrace_config
*
1569 btrace_conf (const struct btrace_thread_info
*btinfo
)
1571 if (btinfo
->target
== NULL
)
1574 return target_btrace_conf (btinfo
->target
);
1580 btrace_disable (struct thread_info
*tp
)
1582 struct btrace_thread_info
*btp
= &tp
->btrace
;
1585 if (btp
->target
== NULL
)
1588 DEBUG ("disable thread %s (%s)", print_thread_id (tp
),
1589 target_pid_to_str (tp
->ptid
));
1591 target_disable_btrace (btp
->target
);
1600 btrace_teardown (struct thread_info
*tp
)
1602 struct btrace_thread_info
*btp
= &tp
->btrace
;
1605 if (btp
->target
== NULL
)
1608 DEBUG ("teardown thread %s (%s)", print_thread_id (tp
),
1609 target_pid_to_str (tp
->ptid
));
1611 target_teardown_btrace (btp
->target
);
1617 /* Stitch branch trace in BTS format. */
1620 btrace_stitch_bts (struct btrace_data_bts
*btrace
, struct thread_info
*tp
)
1622 struct btrace_thread_info
*btinfo
;
1623 struct btrace_function
*last_bfun
;
1624 struct btrace_insn
*last_insn
;
1625 btrace_block_s
*first_new_block
;
1627 btinfo
= &tp
->btrace
;
1628 gdb_assert (!btinfo
->functions
.empty ());
1629 gdb_assert (!VEC_empty (btrace_block_s
, btrace
->blocks
));
1631 last_bfun
= btinfo
->functions
.back ();
1633 /* If the existing trace ends with a gap, we just glue the traces
1634 together. We need to drop the last (i.e. chronologically first) block
1635 of the new trace, though, since we can't fill in the start address.*/
1636 if (VEC_empty (btrace_insn_s
, last_bfun
->insn
))
1638 VEC_pop (btrace_block_s
, btrace
->blocks
);
1642 /* Beware that block trace starts with the most recent block, so the
1643 chronologically first block in the new trace is the last block in
1644 the new trace's block vector. */
1645 first_new_block
= VEC_last (btrace_block_s
, btrace
->blocks
);
1646 last_insn
= VEC_last (btrace_insn_s
, last_bfun
->insn
);
1648 /* If the current PC at the end of the block is the same as in our current
1649 trace, there are two explanations:
1650 1. we executed the instruction and some branch brought us back.
1651 2. we have not made any progress.
1652 In the first case, the delta trace vector should contain at least two
1654 In the second case, the delta trace vector should contain exactly one
1655 entry for the partial block containing the current PC. Remove it. */
1656 if (first_new_block
->end
== last_insn
->pc
1657 && VEC_length (btrace_block_s
, btrace
->blocks
) == 1)
1659 VEC_pop (btrace_block_s
, btrace
->blocks
);
1663 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn
),
1664 core_addr_to_string_nz (first_new_block
->end
));
1666 /* Do a simple sanity check to make sure we don't accidentally end up
1667 with a bad block. This should not occur in practice. */
1668 if (first_new_block
->end
< last_insn
->pc
)
1670 warning (_("Error while trying to read delta trace. Falling back to "
1675 /* We adjust the last block to start at the end of our current trace. */
1676 gdb_assert (first_new_block
->begin
== 0);
1677 first_new_block
->begin
= last_insn
->pc
;
1679 /* We simply pop the last insn so we can insert it again as part of
1680 the normal branch trace computation.
1681 Since instruction iterators are based on indices in the instructions
1682 vector, we don't leave any pointers dangling. */
1683 DEBUG ("pruning insn at %s for stitching",
1684 ftrace_print_insn_addr (last_insn
));
1686 VEC_pop (btrace_insn_s
, last_bfun
->insn
);
1688 /* The instructions vector may become empty temporarily if this has
1689 been the only instruction in this function segment.
1690 This violates the invariant but will be remedied shortly by
1691 btrace_compute_ftrace when we add the new trace. */
1693 /* The only case where this would hurt is if the entire trace consisted
1694 of just that one instruction. If we remove it, we might turn the now
1695 empty btrace function segment into a gap. But we don't want gaps at
1696 the beginning. To avoid this, we remove the entire old trace. */
1697 if (last_bfun
->number
== 1 && VEC_empty (btrace_insn_s
, last_bfun
->insn
))
1703 /* Adjust the block trace in order to stitch old and new trace together.
1704 BTRACE is the new delta trace between the last and the current stop.
1705 TP is the traced thread.
1706 May modifx BTRACE as well as the existing trace in TP.
1707 Return 0 on success, -1 otherwise. */
1710 btrace_stitch_trace (struct btrace_data
*btrace
, struct thread_info
*tp
)
1712 /* If we don't have trace, there's nothing to do. */
1713 if (btrace_data_empty (btrace
))
1716 switch (btrace
->format
)
1718 case BTRACE_FORMAT_NONE
:
1721 case BTRACE_FORMAT_BTS
:
1722 return btrace_stitch_bts (&btrace
->variant
.bts
, tp
);
1724 case BTRACE_FORMAT_PT
:
1725 /* Delta reads are not supported. */
1729 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
1732 /* Clear the branch trace histories in BTINFO. */
1735 btrace_clear_history (struct btrace_thread_info
*btinfo
)
1737 xfree (btinfo
->insn_history
);
1738 xfree (btinfo
->call_history
);
1739 xfree (btinfo
->replay
);
1741 btinfo
->insn_history
= NULL
;
1742 btinfo
->call_history
= NULL
;
1743 btinfo
->replay
= NULL
;
1746 /* Clear the branch trace maintenance histories in BTINFO. */
1749 btrace_maint_clear (struct btrace_thread_info
*btinfo
)
1751 switch (btinfo
->data
.format
)
1756 case BTRACE_FORMAT_BTS
:
1757 btinfo
->maint
.variant
.bts
.packet_history
.begin
= 0;
1758 btinfo
->maint
.variant
.bts
.packet_history
.end
= 0;
1761 #if defined (HAVE_LIBIPT)
1762 case BTRACE_FORMAT_PT
:
1763 xfree (btinfo
->maint
.variant
.pt
.packets
);
1765 btinfo
->maint
.variant
.pt
.packets
= NULL
;
1766 btinfo
->maint
.variant
.pt
.packet_history
.begin
= 0;
1767 btinfo
->maint
.variant
.pt
.packet_history
.end
= 0;
1769 #endif /* defined (HAVE_LIBIPT) */
1776 btrace_decode_error (enum btrace_format format
, int errcode
)
1780 case BTRACE_FORMAT_BTS
:
1783 case BDE_BTS_OVERFLOW
:
1784 return _("instruction overflow");
1786 case BDE_BTS_INSN_SIZE
:
1787 return _("unknown instruction");
1794 #if defined (HAVE_LIBIPT)
1795 case BTRACE_FORMAT_PT
:
1798 case BDE_PT_USER_QUIT
:
1799 return _("trace decode cancelled");
1801 case BDE_PT_DISABLED
:
1802 return _("disabled");
1804 case BDE_PT_OVERFLOW
:
1805 return _("overflow");
1809 return pt_errstr (pt_errcode (errcode
));
1813 #endif /* defined (HAVE_LIBIPT) */
1819 return _("unknown");
1825 btrace_fetch (struct thread_info
*tp
)
1827 struct btrace_thread_info
*btinfo
;
1828 struct btrace_target_info
*tinfo
;
1829 struct btrace_data btrace
;
1830 struct cleanup
*cleanup
;
1833 DEBUG ("fetch thread %s (%s)", print_thread_id (tp
),
1834 target_pid_to_str (tp
->ptid
));
1836 btinfo
= &tp
->btrace
;
1837 tinfo
= btinfo
->target
;
1841 /* There's no way we could get new trace while replaying.
1842 On the other hand, delta trace would return a partial record with the
1843 current PC, which is the replay PC, not the last PC, as expected. */
1844 if (btinfo
->replay
!= NULL
)
1847 /* With CLI usage, TP->PTID always equals INFERIOR_PTID here. Now that we
1848 can store a gdb.Record object in Python referring to a different thread
1849 than the current one, temporarily set INFERIOR_PTID. */
1850 cleanup
= save_inferior_ptid ();
1851 inferior_ptid
= tp
->ptid
;
1853 /* We should not be called on running or exited threads. */
1854 gdb_assert (can_access_registers_ptid (tp
->ptid
));
1856 btrace_data_init (&btrace
);
1857 make_cleanup_btrace_data (&btrace
);
1859 /* Let's first try to extend the trace we already have. */
1860 if (!btinfo
->functions
.empty ())
1862 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_DELTA
);
1865 /* Success. Let's try to stitch the traces together. */
1866 errcode
= btrace_stitch_trace (&btrace
, tp
);
1870 /* We failed to read delta trace. Let's try to read new trace. */
1871 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_NEW
);
1873 /* If we got any new trace, discard what we have. */
1874 if (errcode
== 0 && !btrace_data_empty (&btrace
))
1878 /* If we were not able to read the trace, we start over. */
1882 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_ALL
);
1886 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_ALL
);
1888 /* If we were not able to read the branch trace, signal an error. */
1890 error (_("Failed to read branch trace."));
1892 /* Compute the trace, provided we have any. */
1893 if (!btrace_data_empty (&btrace
))
1895 /* Store the raw trace data. The stored data will be cleared in
1896 btrace_clear, so we always append the new trace. */
1897 btrace_data_append (&btinfo
->data
, &btrace
);
1898 btrace_maint_clear (btinfo
);
1900 btrace_clear_history (btinfo
);
1901 btrace_compute_ftrace (tp
, &btrace
);
1904 do_cleanups (cleanup
);
1910 btrace_clear (struct thread_info
*tp
)
1912 struct btrace_thread_info
*btinfo
;
1914 DEBUG ("clear thread %s (%s)", print_thread_id (tp
),
1915 target_pid_to_str (tp
->ptid
));
1917 /* Make sure btrace frames that may hold a pointer into the branch
1918 trace data are destroyed. */
1919 reinit_frame_cache ();
1921 btinfo
= &tp
->btrace
;
1922 for (auto &bfun
: btinfo
->functions
)
1924 VEC_free (btrace_insn_s
, bfun
->insn
);
1928 btinfo
->functions
.clear ();
1931 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1932 btrace_maint_clear (btinfo
);
1933 btrace_data_clear (&btinfo
->data
);
1934 btrace_clear_history (btinfo
);
1940 btrace_free_objfile (struct objfile
*objfile
)
1942 struct thread_info
*tp
;
1944 DEBUG ("free objfile");
1946 ALL_NON_EXITED_THREADS (tp
)
1950 #if defined (HAVE_LIBEXPAT)
1952 /* Check the btrace document version. */
1955 check_xml_btrace_version (struct gdb_xml_parser
*parser
,
1956 const struct gdb_xml_element
*element
,
1957 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
1960 = (const char *) xml_find_attribute (attributes
, "version")->value
;
1962 if (strcmp (version
, "1.0") != 0)
1963 gdb_xml_error (parser
, _("Unsupported btrace version: \"%s\""), version
);
1966 /* Parse a btrace "block" xml record. */
1969 parse_xml_btrace_block (struct gdb_xml_parser
*parser
,
1970 const struct gdb_xml_element
*element
,
1971 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
1973 struct btrace_data
*btrace
;
1974 struct btrace_block
*block
;
1975 ULONGEST
*begin
, *end
;
1977 btrace
= (struct btrace_data
*) user_data
;
1979 switch (btrace
->format
)
1981 case BTRACE_FORMAT_BTS
:
1984 case BTRACE_FORMAT_NONE
:
1985 btrace
->format
= BTRACE_FORMAT_BTS
;
1986 btrace
->variant
.bts
.blocks
= NULL
;
1990 gdb_xml_error (parser
, _("Btrace format error."));
1993 begin
= (ULONGEST
*) xml_find_attribute (attributes
, "begin")->value
;
1994 end
= (ULONGEST
*) xml_find_attribute (attributes
, "end")->value
;
1996 block
= VEC_safe_push (btrace_block_s
, btrace
->variant
.bts
.blocks
, NULL
);
1997 block
->begin
= *begin
;
2001 /* Parse a "raw" xml record. */
2004 parse_xml_raw (struct gdb_xml_parser
*parser
, const char *body_text
,
2005 gdb_byte
**pdata
, size_t *psize
)
2007 struct cleanup
*cleanup
;
2008 gdb_byte
*data
, *bin
;
2011 len
= strlen (body_text
);
2013 gdb_xml_error (parser
, _("Bad raw data size."));
2017 bin
= data
= (gdb_byte
*) xmalloc (size
);
2018 cleanup
= make_cleanup (xfree
, data
);
2020 /* We use hex encoding - see common/rsp-low.h. */
2028 if (hi
== 0 || lo
== 0)
2029 gdb_xml_error (parser
, _("Bad hex encoding."));
2031 *bin
++ = fromhex (hi
) * 16 + fromhex (lo
);
2035 discard_cleanups (cleanup
);
2041 /* Parse a btrace pt-config "cpu" xml record. */
2044 parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser
*parser
,
2045 const struct gdb_xml_element
*element
,
2047 VEC (gdb_xml_value_s
) *attributes
)
2049 struct btrace_data
*btrace
;
2051 ULONGEST
*family
, *model
, *stepping
;
2053 vendor
= (const char *) xml_find_attribute (attributes
, "vendor")->value
;
2054 family
= (ULONGEST
*) xml_find_attribute (attributes
, "family")->value
;
2055 model
= (ULONGEST
*) xml_find_attribute (attributes
, "model")->value
;
2056 stepping
= (ULONGEST
*) xml_find_attribute (attributes
, "stepping")->value
;
2058 btrace
= (struct btrace_data
*) user_data
;
2060 if (strcmp (vendor
, "GenuineIntel") == 0)
2061 btrace
->variant
.pt
.config
.cpu
.vendor
= CV_INTEL
;
2063 btrace
->variant
.pt
.config
.cpu
.family
= *family
;
2064 btrace
->variant
.pt
.config
.cpu
.model
= *model
;
2065 btrace
->variant
.pt
.config
.cpu
.stepping
= *stepping
;
2068 /* Parse a btrace pt "raw" xml record. */
2071 parse_xml_btrace_pt_raw (struct gdb_xml_parser
*parser
,
2072 const struct gdb_xml_element
*element
,
2073 void *user_data
, const char *body_text
)
2075 struct btrace_data
*btrace
;
2077 btrace
= (struct btrace_data
*) user_data
;
2078 parse_xml_raw (parser
, body_text
, &btrace
->variant
.pt
.data
,
2079 &btrace
->variant
.pt
.size
);
2082 /* Parse a btrace "pt" xml record. */
2085 parse_xml_btrace_pt (struct gdb_xml_parser
*parser
,
2086 const struct gdb_xml_element
*element
,
2087 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
2089 struct btrace_data
*btrace
;
2091 btrace
= (struct btrace_data
*) user_data
;
2092 btrace
->format
= BTRACE_FORMAT_PT
;
2093 btrace
->variant
.pt
.config
.cpu
.vendor
= CV_UNKNOWN
;
2094 btrace
->variant
.pt
.data
= NULL
;
2095 btrace
->variant
.pt
.size
= 0;
2098 static const struct gdb_xml_attribute block_attributes
[] = {
2099 { "begin", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
2100 { "end", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
2101 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2104 static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes
[] = {
2105 { "vendor", GDB_XML_AF_NONE
, NULL
, NULL
},
2106 { "family", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
2107 { "model", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
2108 { "stepping", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
2109 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2112 static const struct gdb_xml_element btrace_pt_config_children
[] = {
2113 { "cpu", btrace_pt_config_cpu_attributes
, NULL
, GDB_XML_EF_OPTIONAL
,
2114 parse_xml_btrace_pt_config_cpu
, NULL
},
2115 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2118 static const struct gdb_xml_element btrace_pt_children
[] = {
2119 { "pt-config", NULL
, btrace_pt_config_children
, GDB_XML_EF_OPTIONAL
, NULL
,
2121 { "raw", NULL
, NULL
, GDB_XML_EF_OPTIONAL
, NULL
, parse_xml_btrace_pt_raw
},
2122 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2125 static const struct gdb_xml_attribute btrace_attributes
[] = {
2126 { "version", GDB_XML_AF_NONE
, NULL
, NULL
},
2127 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2130 static const struct gdb_xml_element btrace_children
[] = {
2131 { "block", block_attributes
, NULL
,
2132 GDB_XML_EF_REPEATABLE
| GDB_XML_EF_OPTIONAL
, parse_xml_btrace_block
, NULL
},
2133 { "pt", NULL
, btrace_pt_children
, GDB_XML_EF_OPTIONAL
, parse_xml_btrace_pt
,
2135 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2138 static const struct gdb_xml_element btrace_elements
[] = {
2139 { "btrace", btrace_attributes
, btrace_children
, GDB_XML_EF_NONE
,
2140 check_xml_btrace_version
, NULL
},
2141 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2144 #endif /* defined (HAVE_LIBEXPAT) */
2149 parse_xml_btrace (struct btrace_data
*btrace
, const char *buffer
)
2151 struct cleanup
*cleanup
;
2154 #if defined (HAVE_LIBEXPAT)
2156 btrace
->format
= BTRACE_FORMAT_NONE
;
2158 cleanup
= make_cleanup_btrace_data (btrace
);
2159 errcode
= gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements
,
2162 error (_("Error parsing branch trace."));
2164 /* Keep parse results. */
2165 discard_cleanups (cleanup
);
2167 #else /* !defined (HAVE_LIBEXPAT) */
2169 error (_("Cannot process branch trace. XML parsing is not supported."));
2171 #endif /* !defined (HAVE_LIBEXPAT) */
2174 #if defined (HAVE_LIBEXPAT)
2176 /* Parse a btrace-conf "bts" xml record. */
2179 parse_xml_btrace_conf_bts (struct gdb_xml_parser
*parser
,
2180 const struct gdb_xml_element
*element
,
2181 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
2183 struct btrace_config
*conf
;
2184 struct gdb_xml_value
*size
;
2186 conf
= (struct btrace_config
*) user_data
;
2187 conf
->format
= BTRACE_FORMAT_BTS
;
2190 size
= xml_find_attribute (attributes
, "size");
2192 conf
->bts
.size
= (unsigned int) *(ULONGEST
*) size
->value
;
2195 /* Parse a btrace-conf "pt" xml record. */
2198 parse_xml_btrace_conf_pt (struct gdb_xml_parser
*parser
,
2199 const struct gdb_xml_element
*element
,
2200 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
2202 struct btrace_config
*conf
;
2203 struct gdb_xml_value
*size
;
2205 conf
= (struct btrace_config
*) user_data
;
2206 conf
->format
= BTRACE_FORMAT_PT
;
2209 size
= xml_find_attribute (attributes
, "size");
2211 conf
->pt
.size
= (unsigned int) *(ULONGEST
*) size
->value
;
2214 static const struct gdb_xml_attribute btrace_conf_pt_attributes
[] = {
2215 { "size", GDB_XML_AF_OPTIONAL
, gdb_xml_parse_attr_ulongest
, NULL
},
2216 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2219 static const struct gdb_xml_attribute btrace_conf_bts_attributes
[] = {
2220 { "size", GDB_XML_AF_OPTIONAL
, gdb_xml_parse_attr_ulongest
, NULL
},
2221 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2224 static const struct gdb_xml_element btrace_conf_children
[] = {
2225 { "bts", btrace_conf_bts_attributes
, NULL
, GDB_XML_EF_OPTIONAL
,
2226 parse_xml_btrace_conf_bts
, NULL
},
2227 { "pt", btrace_conf_pt_attributes
, NULL
, GDB_XML_EF_OPTIONAL
,
2228 parse_xml_btrace_conf_pt
, NULL
},
2229 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2232 static const struct gdb_xml_attribute btrace_conf_attributes
[] = {
2233 { "version", GDB_XML_AF_NONE
, NULL
, NULL
},
2234 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
2237 static const struct gdb_xml_element btrace_conf_elements
[] = {
2238 { "btrace-conf", btrace_conf_attributes
, btrace_conf_children
,
2239 GDB_XML_EF_NONE
, NULL
, NULL
},
2240 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
2243 #endif /* defined (HAVE_LIBEXPAT) */
2248 parse_xml_btrace_conf (struct btrace_config
*conf
, const char *xml
)
2252 #if defined (HAVE_LIBEXPAT)
2254 errcode
= gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
2255 btrace_conf_elements
, xml
, conf
);
2257 error (_("Error parsing branch trace configuration."));
2259 #else /* !defined (HAVE_LIBEXPAT) */
2261 error (_("XML parsing is not supported."));
2263 #endif /* !defined (HAVE_LIBEXPAT) */
2268 const struct btrace_insn
*
2269 btrace_insn_get (const struct btrace_insn_iterator
*it
)
2271 const struct btrace_function
*bfun
;
2272 unsigned int index
, end
;
2274 index
= it
->insn_index
;
2275 bfun
= it
->btinfo
->functions
[it
->call_index
];
2277 /* Check if the iterator points to a gap in the trace. */
2278 if (bfun
->errcode
!= 0)
2281 /* The index is within the bounds of this function's instruction vector. */
2282 end
= VEC_length (btrace_insn_s
, bfun
->insn
);
2283 gdb_assert (0 < end
);
2284 gdb_assert (index
< end
);
2286 return VEC_index (btrace_insn_s
, bfun
->insn
, index
);
2292 btrace_insn_get_error (const struct btrace_insn_iterator
*it
)
2294 const struct btrace_function
*bfun
;
2296 bfun
= it
->btinfo
->functions
[it
->call_index
];
2297 return bfun
->errcode
;
2303 btrace_insn_number (const struct btrace_insn_iterator
*it
)
2305 const struct btrace_function
*bfun
;
2307 bfun
= it
->btinfo
->functions
[it
->call_index
];
2308 return bfun
->insn_offset
+ it
->insn_index
;
2314 btrace_insn_begin (struct btrace_insn_iterator
*it
,
2315 const struct btrace_thread_info
*btinfo
)
2317 if (btinfo
->functions
.empty ())
2318 error (_("No trace."));
2320 it
->btinfo
= btinfo
;
2328 btrace_insn_end (struct btrace_insn_iterator
*it
,
2329 const struct btrace_thread_info
*btinfo
)
2331 const struct btrace_function
*bfun
;
2332 unsigned int length
;
2334 if (btinfo
->functions
.empty ())
2335 error (_("No trace."));
2337 bfun
= btinfo
->functions
.back ();
2338 length
= VEC_length (btrace_insn_s
, bfun
->insn
);
2340 /* The last function may either be a gap or it contains the current
2341 instruction, which is one past the end of the execution trace; ignore
2346 it
->btinfo
= btinfo
;
2347 it
->call_index
= bfun
->number
- 1;
2348 it
->insn_index
= length
;
2354 btrace_insn_next (struct btrace_insn_iterator
*it
, unsigned int stride
)
2356 const struct btrace_function
*bfun
;
2357 unsigned int index
, steps
;
2359 bfun
= it
->btinfo
->functions
[it
->call_index
];
2361 index
= it
->insn_index
;
2365 unsigned int end
, space
, adv
;
2367 end
= VEC_length (btrace_insn_s
, bfun
->insn
);
2369 /* An empty function segment represents a gap in the trace. We count
2370 it as one instruction. */
2373 const struct btrace_function
*next
;
2375 next
= bfun
->flow
.next
;
2388 gdb_assert (0 < end
);
2389 gdb_assert (index
< end
);
2391 /* Compute the number of instructions remaining in this segment. */
2392 space
= end
- index
;
2394 /* Advance the iterator as far as possible within this segment. */
2395 adv
= std::min (space
, stride
);
2400 /* Move to the next function if we're at the end of this one. */
2403 const struct btrace_function
*next
;
2405 next
= bfun
->flow
.next
;
2408 /* We stepped past the last function.
2410 Let's adjust the index to point to the last instruction in
2411 the previous function. */
2417 /* We now point to the first instruction in the new function. */
2422 /* We did make progress. */
2423 gdb_assert (adv
> 0);
2426 /* Update the iterator. */
2427 it
->call_index
= bfun
->number
- 1;
2428 it
->insn_index
= index
;
2436 btrace_insn_prev (struct btrace_insn_iterator
*it
, unsigned int stride
)
2438 const struct btrace_function
*bfun
;
2439 unsigned int index
, steps
;
2441 bfun
= it
->btinfo
->functions
[it
->call_index
];
2443 index
= it
->insn_index
;
2449 /* Move to the previous function if we're at the start of this one. */
2452 const struct btrace_function
*prev
;
2454 prev
= bfun
->flow
.prev
;
2458 /* We point to one after the last instruction in the new function. */
2460 index
= VEC_length (btrace_insn_s
, bfun
->insn
);
2462 /* An empty function segment represents a gap in the trace. We count
2463 it as one instruction. */
2473 /* Advance the iterator as far as possible within this segment. */
2474 adv
= std::min (index
, stride
);
2480 /* We did make progress. */
2481 gdb_assert (adv
> 0);
2484 /* Update the iterator. */
2485 it
->call_index
= bfun
->number
- 1;
2486 it
->insn_index
= index
;
2494 btrace_insn_cmp (const struct btrace_insn_iterator
*lhs
,
2495 const struct btrace_insn_iterator
*rhs
)
2497 gdb_assert (lhs
->btinfo
== rhs
->btinfo
);
2499 if (lhs
->call_index
!= rhs
->call_index
)
2500 return lhs
->call_index
- rhs
->call_index
;
2502 return lhs
->insn_index
- rhs
->insn_index
;
2508 btrace_find_insn_by_number (struct btrace_insn_iterator
*it
,
2509 const struct btrace_thread_info
*btinfo
,
2510 unsigned int number
)
2512 const struct btrace_function
*bfun
;
2513 unsigned int upper
, lower
;
2515 if (btinfo
->functions
.empty ())
2519 bfun
= btinfo
->functions
[lower
];
2520 if (number
< bfun
->insn_offset
)
2523 upper
= btinfo
->functions
.size () - 1;
2524 bfun
= btinfo
->functions
[upper
];
2525 if (number
>= bfun
->insn_offset
+ ftrace_call_num_insn (bfun
))
2528 /* We assume that there are no holes in the numbering. */
2531 const unsigned int average
= lower
+ (upper
- lower
) / 2;
2533 bfun
= btinfo
->functions
[average
];
2535 if (number
< bfun
->insn_offset
)
2537 upper
= average
- 1;
2541 if (number
>= bfun
->insn_offset
+ ftrace_call_num_insn (bfun
))
2543 lower
= average
+ 1;
2550 it
->btinfo
= btinfo
;
2551 it
->call_index
= bfun
->number
- 1;
2552 it
->insn_index
= number
- bfun
->insn_offset
;
2556 /* Returns true if the recording ends with a function segment that
2557 contains only a single (i.e. the current) instruction. */
2560 btrace_ends_with_single_insn (const struct btrace_thread_info
*btinfo
)
2562 const btrace_function
*bfun
;
2564 if (btinfo
->functions
.empty ())
2567 bfun
= btinfo
->functions
.back ();
2568 if (bfun
->errcode
!= 0)
2571 return ftrace_call_num_insn (bfun
) == 1;
2576 const struct btrace_function
*
2577 btrace_call_get (const struct btrace_call_iterator
*it
)
2579 if (it
->index
>= it
->btinfo
->functions
.size ())
2582 return it
->btinfo
->functions
[it
->index
];
2588 btrace_call_number (const struct btrace_call_iterator
*it
)
2590 const unsigned int length
= it
->btinfo
->functions
.size ();
2592 /* If the last function segment contains only a single instruction (i.e. the
2593 current instruction), skip it. */
2594 if ((it
->index
== length
) && btrace_ends_with_single_insn (it
->btinfo
))
2597 return it
->index
+ 1;
2603 btrace_call_begin (struct btrace_call_iterator
*it
,
2604 const struct btrace_thread_info
*btinfo
)
2606 if (btinfo
->functions
.empty ())
2607 error (_("No trace."));
2609 it
->btinfo
= btinfo
;
2616 btrace_call_end (struct btrace_call_iterator
*it
,
2617 const struct btrace_thread_info
*btinfo
)
2619 if (btinfo
->functions
.empty ())
2620 error (_("No trace."));
2622 it
->btinfo
= btinfo
;
2623 it
->index
= btinfo
->functions
.size ();
2629 btrace_call_next (struct btrace_call_iterator
*it
, unsigned int stride
)
2631 const unsigned int length
= it
->btinfo
->functions
.size ();
2633 if (it
->index
+ stride
< length
- 1)
2634 /* Default case: Simply advance the iterator. */
2635 it
->index
+= stride
;
2636 else if (it
->index
+ stride
== length
- 1)
2638 /* We land exactly at the last function segment. If it contains only one
2639 instruction (i.e. the current instruction) it is not actually part of
2641 if (btrace_ends_with_single_insn (it
->btinfo
))
2644 it
->index
= length
- 1;
2648 /* We land past the last function segment and have to adjust the stride.
2649 If the last function segment contains only one instruction (i.e. the
2650 current instruction) it is not actually part of the trace. */
2651 if (btrace_ends_with_single_insn (it
->btinfo
))
2652 stride
= length
- it
->index
- 1;
2654 stride
= length
- it
->index
;
2665 btrace_call_prev (struct btrace_call_iterator
*it
, unsigned int stride
)
2667 const unsigned int length
= it
->btinfo
->functions
.size ();
2670 gdb_assert (it
->index
<= length
);
2672 if (stride
== 0 || it
->index
== 0)
2675 /* If we are at the end, the first step is a special case. If the last
2676 function segment contains only one instruction (i.e. the current
2677 instruction) it is not actually part of the trace. To be able to step
2678 over this instruction, we need at least one more function segment. */
2679 if ((it
->index
== length
) && (length
> 1))
2681 if (btrace_ends_with_single_insn (it
->btinfo
))
2682 it
->index
= length
- 2;
2684 it
->index
= length
- 1;
2690 stride
= std::min (stride
, it
->index
);
2692 it
->index
-= stride
;
2693 return steps
+ stride
;
2699 btrace_call_cmp (const struct btrace_call_iterator
*lhs
,
2700 const struct btrace_call_iterator
*rhs
)
2702 gdb_assert (lhs
->btinfo
== rhs
->btinfo
);
2703 return (int) (lhs
->index
- rhs
->index
);
2709 btrace_find_call_by_number (struct btrace_call_iterator
*it
,
2710 const struct btrace_thread_info
*btinfo
,
2711 unsigned int number
)
2713 const unsigned int length
= btinfo
->functions
.size ();
2715 if ((number
== 0) || (number
> length
))
2718 it
->btinfo
= btinfo
;
2719 it
->index
= number
- 1;
2726 btrace_set_insn_history (struct btrace_thread_info
*btinfo
,
2727 const struct btrace_insn_iterator
*begin
,
2728 const struct btrace_insn_iterator
*end
)
2730 if (btinfo
->insn_history
== NULL
)
2731 btinfo
->insn_history
= XCNEW (struct btrace_insn_history
);
2733 btinfo
->insn_history
->begin
= *begin
;
2734 btinfo
->insn_history
->end
= *end
;
2740 btrace_set_call_history (struct btrace_thread_info
*btinfo
,
2741 const struct btrace_call_iterator
*begin
,
2742 const struct btrace_call_iterator
*end
)
2744 gdb_assert (begin
->btinfo
== end
->btinfo
);
2746 if (btinfo
->call_history
== NULL
)
2747 btinfo
->call_history
= XCNEW (struct btrace_call_history
);
2749 btinfo
->call_history
->begin
= *begin
;
2750 btinfo
->call_history
->end
= *end
;
2756 btrace_is_replaying (struct thread_info
*tp
)
2758 return tp
->btrace
.replay
!= NULL
;
2764 btrace_is_empty (struct thread_info
*tp
)
2766 struct btrace_insn_iterator begin
, end
;
2767 struct btrace_thread_info
*btinfo
;
2769 btinfo
= &tp
->btrace
;
2771 if (btinfo
->functions
.empty ())
2774 btrace_insn_begin (&begin
, btinfo
);
2775 btrace_insn_end (&end
, btinfo
);
2777 return btrace_insn_cmp (&begin
, &end
) == 0;
2780 /* Forward the cleanup request. */
2783 do_btrace_data_cleanup (void *arg
)
2785 btrace_data_fini ((struct btrace_data
*) arg
);
2791 make_cleanup_btrace_data (struct btrace_data
*data
)
2793 return make_cleanup (do_btrace_data_cleanup
, data
);
2796 #if defined (HAVE_LIBIPT)
2798 /* Print a single packet. */
2801 pt_print_packet (const struct pt_packet
*packet
)
2803 switch (packet
->type
)
2806 printf_unfiltered (("[??: %x]"), packet
->type
);
2810 printf_unfiltered (("psb"));
2814 printf_unfiltered (("psbend"));
2818 printf_unfiltered (("pad"));
2822 printf_unfiltered (("tip %u: 0x%" PRIx64
""),
2823 packet
->payload
.ip
.ipc
,
2824 packet
->payload
.ip
.ip
);
2828 printf_unfiltered (("tip.pge %u: 0x%" PRIx64
""),
2829 packet
->payload
.ip
.ipc
,
2830 packet
->payload
.ip
.ip
);
2834 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64
""),
2835 packet
->payload
.ip
.ipc
,
2836 packet
->payload
.ip
.ip
);
2840 printf_unfiltered (("fup %u: 0x%" PRIx64
""),
2841 packet
->payload
.ip
.ipc
,
2842 packet
->payload
.ip
.ip
);
2846 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64
""),
2847 packet
->payload
.tnt
.bit_size
,
2848 packet
->payload
.tnt
.payload
);
2852 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64
""),
2853 packet
->payload
.tnt
.bit_size
,
2854 packet
->payload
.tnt
.payload
);
2858 printf_unfiltered (("pip %" PRIx64
"%s"), packet
->payload
.pip
.cr3
,
2859 packet
->payload
.pip
.nr
? (" nr") : (""));
2863 printf_unfiltered (("tsc %" PRIx64
""), packet
->payload
.tsc
.tsc
);
2867 printf_unfiltered (("cbr %u"), packet
->payload
.cbr
.ratio
);
2871 switch (packet
->payload
.mode
.leaf
)
2874 printf_unfiltered (("mode %u"), packet
->payload
.mode
.leaf
);
2878 printf_unfiltered (("mode.exec%s%s"),
2879 packet
->payload
.mode
.bits
.exec
.csl
2881 packet
->payload
.mode
.bits
.exec
.csd
2882 ? (" cs.d") : (""));
2886 printf_unfiltered (("mode.tsx%s%s"),
2887 packet
->payload
.mode
.bits
.tsx
.intx
2889 packet
->payload
.mode
.bits
.tsx
.abrt
2890 ? (" abrt") : (""));
2896 printf_unfiltered (("ovf"));
2900 printf_unfiltered (("stop"));
2904 printf_unfiltered (("vmcs %" PRIx64
""), packet
->payload
.vmcs
.base
);
2908 printf_unfiltered (("tma %x %x"), packet
->payload
.tma
.ctc
,
2909 packet
->payload
.tma
.fc
);
2913 printf_unfiltered (("mtc %x"), packet
->payload
.mtc
.ctc
);
2917 printf_unfiltered (("cyc %" PRIx64
""), packet
->payload
.cyc
.value
);
2921 printf_unfiltered (("mnt %" PRIx64
""), packet
->payload
.mnt
.payload
);
2926 /* Decode packets into MAINT using DECODER. */
2929 btrace_maint_decode_pt (struct btrace_maint_info
*maint
,
2930 struct pt_packet_decoder
*decoder
)
2936 struct btrace_pt_packet packet
;
2938 errcode
= pt_pkt_sync_forward (decoder
);
2944 pt_pkt_get_offset (decoder
, &packet
.offset
);
2946 errcode
= pt_pkt_next (decoder
, &packet
.packet
,
2947 sizeof(packet
.packet
));
2951 if (maint_btrace_pt_skip_pad
== 0 || packet
.packet
.type
!= ppt_pad
)
2953 packet
.errcode
= pt_errcode (errcode
);
2954 VEC_safe_push (btrace_pt_packet_s
, maint
->variant
.pt
.packets
,
2959 if (errcode
== -pte_eos
)
2962 packet
.errcode
= pt_errcode (errcode
);
2963 VEC_safe_push (btrace_pt_packet_s
, maint
->variant
.pt
.packets
,
2966 warning (_("Error at trace offset 0x%" PRIx64
": %s."),
2967 packet
.offset
, pt_errstr (packet
.errcode
));
2970 if (errcode
!= -pte_eos
)
2971 warning (_("Failed to synchronize onto the Intel Processor Trace "
2972 "stream: %s."), pt_errstr (pt_errcode (errcode
)));
2975 /* Update the packet history in BTINFO. */
2978 btrace_maint_update_pt_packets (struct btrace_thread_info
*btinfo
)
2980 volatile struct gdb_exception except
;
2981 struct pt_packet_decoder
*decoder
;
2982 struct btrace_data_pt
*pt
;
2983 struct pt_config config
;
2986 pt
= &btinfo
->data
.variant
.pt
;
2988 /* Nothing to do if there is no trace. */
2992 memset (&config
, 0, sizeof(config
));
2994 config
.size
= sizeof (config
);
2995 config
.begin
= pt
->data
;
2996 config
.end
= pt
->data
+ pt
->size
;
2998 config
.cpu
.vendor
= pt_translate_cpu_vendor (pt
->config
.cpu
.vendor
);
2999 config
.cpu
.family
= pt
->config
.cpu
.family
;
3000 config
.cpu
.model
= pt
->config
.cpu
.model
;
3001 config
.cpu
.stepping
= pt
->config
.cpu
.stepping
;
3003 errcode
= pt_cpu_errata (&config
.errata
, &config
.cpu
);
3005 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
3006 pt_errstr (pt_errcode (errcode
)));
3008 decoder
= pt_pkt_alloc_decoder (&config
);
3009 if (decoder
== NULL
)
3010 error (_("Failed to allocate the Intel Processor Trace decoder."));
3014 btrace_maint_decode_pt (&btinfo
->maint
, decoder
);
3016 CATCH (except
, RETURN_MASK_ALL
)
3018 pt_pkt_free_decoder (decoder
);
3020 if (except
.reason
< 0)
3021 throw_exception (except
);
3025 pt_pkt_free_decoder (decoder
);
3028 #endif /* !defined (HAVE_LIBIPT) */
3030 /* Update the packet maintenance information for BTINFO and store the
3031 low and high bounds into BEGIN and END, respectively.
3032 Store the current iterator state into FROM and TO. */
3035 btrace_maint_update_packets (struct btrace_thread_info
*btinfo
,
3036 unsigned int *begin
, unsigned int *end
,
3037 unsigned int *from
, unsigned int *to
)
3039 switch (btinfo
->data
.format
)
3048 case BTRACE_FORMAT_BTS
:
3049 /* Nothing to do - we operate directly on BTINFO->DATA. */
3051 *end
= VEC_length (btrace_block_s
, btinfo
->data
.variant
.bts
.blocks
);
3052 *from
= btinfo
->maint
.variant
.bts
.packet_history
.begin
;
3053 *to
= btinfo
->maint
.variant
.bts
.packet_history
.end
;
3056 #if defined (HAVE_LIBIPT)
3057 case BTRACE_FORMAT_PT
:
3058 if (VEC_empty (btrace_pt_packet_s
, btinfo
->maint
.variant
.pt
.packets
))
3059 btrace_maint_update_pt_packets (btinfo
);
3062 *end
= VEC_length (btrace_pt_packet_s
, btinfo
->maint
.variant
.pt
.packets
);
3063 *from
= btinfo
->maint
.variant
.pt
.packet_history
.begin
;
3064 *to
= btinfo
->maint
.variant
.pt
.packet_history
.end
;
3066 #endif /* defined (HAVE_LIBIPT) */
3070 /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
3071 update the current iterator position. */
3074 btrace_maint_print_packets (struct btrace_thread_info
*btinfo
,
3075 unsigned int begin
, unsigned int end
)
3077 switch (btinfo
->data
.format
)
3082 case BTRACE_FORMAT_BTS
:
3084 VEC (btrace_block_s
) *blocks
;
3087 blocks
= btinfo
->data
.variant
.bts
.blocks
;
3088 for (blk
= begin
; blk
< end
; ++blk
)
3090 const btrace_block_s
*block
;
3092 block
= VEC_index (btrace_block_s
, blocks
, blk
);
3094 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk
,
3095 core_addr_to_string_nz (block
->begin
),
3096 core_addr_to_string_nz (block
->end
));
3099 btinfo
->maint
.variant
.bts
.packet_history
.begin
= begin
;
3100 btinfo
->maint
.variant
.bts
.packet_history
.end
= end
;
3104 #if defined (HAVE_LIBIPT)
3105 case BTRACE_FORMAT_PT
:
3107 VEC (btrace_pt_packet_s
) *packets
;
3110 packets
= btinfo
->maint
.variant
.pt
.packets
;
3111 for (pkt
= begin
; pkt
< end
; ++pkt
)
3113 const struct btrace_pt_packet
*packet
;
3115 packet
= VEC_index (btrace_pt_packet_s
, packets
, pkt
);
3117 printf_unfiltered ("%u\t", pkt
);
3118 printf_unfiltered ("0x%" PRIx64
"\t", packet
->offset
);
3120 if (packet
->errcode
== pte_ok
)
3121 pt_print_packet (&packet
->packet
);
3123 printf_unfiltered ("[error: %s]", pt_errstr (packet
->errcode
));
3125 printf_unfiltered ("\n");
3128 btinfo
->maint
.variant
.pt
.packet_history
.begin
= begin
;
3129 btinfo
->maint
.variant
.pt
.packet_history
.end
= end
;
3132 #endif /* defined (HAVE_LIBIPT) */
3136 /* Read a number from an argument string. */
3139 get_uint (char **arg
)
3141 char *begin
, *end
, *pos
;
3142 unsigned long number
;
3145 pos
= skip_spaces (begin
);
3147 if (!isdigit (*pos
))
3148 error (_("Expected positive number, got: %s."), pos
);
3150 number
= strtoul (pos
, &end
, 10);
3151 if (number
> UINT_MAX
)
3152 error (_("Number too big."));
3154 *arg
+= (end
- begin
);
3156 return (unsigned int) number
;
3159 /* Read a context size from an argument string. */
3162 get_context_size (char **arg
)
3167 pos
= skip_spaces (*arg
);
3169 if (!isdigit (*pos
))
3170 error (_("Expected positive number, got: %s."), pos
);
3172 return strtol (pos
, arg
, 10);
3175 /* Complain about junk at the end of an argument string. */
3178 no_chunk (char *arg
)
3181 error (_("Junk after argument: %s."), arg
);
3184 /* The "maintenance btrace packet-history" command. */
3187 maint_btrace_packet_history_cmd (char *arg
, int from_tty
)
3189 struct btrace_thread_info
*btinfo
;
3190 struct thread_info
*tp
;
3191 unsigned int size
, begin
, end
, from
, to
;
3193 tp
= find_thread_ptid (inferior_ptid
);
3195 error (_("No thread."));
3198 btinfo
= &tp
->btrace
;
3200 btrace_maint_update_packets (btinfo
, &begin
, &end
, &from
, &to
);
3203 printf_unfiltered (_("No trace.\n"));
3207 if (arg
== NULL
|| *arg
== 0 || strcmp (arg
, "+") == 0)
3211 if (end
- from
< size
)
3215 else if (strcmp (arg
, "-") == 0)
3219 if (to
- begin
< size
)
3225 from
= get_uint (&arg
);
3227 error (_("'%u' is out of range."), from
);
3229 arg
= skip_spaces (arg
);
3232 arg
= skip_spaces (++arg
);
3237 size
= get_context_size (&arg
);
3241 if (end
- from
< size
)
3245 else if (*arg
== '-')
3248 size
= get_context_size (&arg
);
3252 /* Include the packet given as first argument. */
3256 if (to
- begin
< size
)
3262 to
= get_uint (&arg
);
3264 /* Include the packet at the second argument and silently
3265 truncate the range. */
3278 if (end
- from
< size
)
3286 btrace_maint_print_packets (btinfo
, from
, to
);
3289 /* The "maintenance btrace clear-packet-history" command. */
3292 maint_btrace_clear_packet_history_cmd (char *args
, int from_tty
)
3294 struct btrace_thread_info
*btinfo
;
3295 struct thread_info
*tp
;
3297 if (args
!= NULL
&& *args
!= 0)
3298 error (_("Invalid argument."));
3300 tp
= find_thread_ptid (inferior_ptid
);
3302 error (_("No thread."));
3304 btinfo
= &tp
->btrace
;
3306 /* Must clear the maint data before - it depends on BTINFO->DATA. */
3307 btrace_maint_clear (btinfo
);
3308 btrace_data_clear (&btinfo
->data
);
3311 /* The "maintenance btrace clear" command. */
3314 maint_btrace_clear_cmd (char *args
, int from_tty
)
3316 struct btrace_thread_info
*btinfo
;
3317 struct thread_info
*tp
;
3319 if (args
!= NULL
&& *args
!= 0)
3320 error (_("Invalid argument."));
3322 tp
= find_thread_ptid (inferior_ptid
);
3324 error (_("No thread."));
3329 /* The "maintenance btrace" command. */
3332 maint_btrace_cmd (char *args
, int from_tty
)
3334 help_list (maint_btrace_cmdlist
, "maintenance btrace ", all_commands
,
3338 /* The "maintenance set btrace" command. */
3341 maint_btrace_set_cmd (char *args
, int from_tty
)
3343 help_list (maint_btrace_set_cmdlist
, "maintenance set btrace ", all_commands
,
3347 /* The "maintenance show btrace" command. */
3350 maint_btrace_show_cmd (char *args
, int from_tty
)
3352 help_list (maint_btrace_show_cmdlist
, "maintenance show btrace ",
3353 all_commands
, gdb_stdout
);
3356 /* The "maintenance set btrace pt" command. */
3359 maint_btrace_pt_set_cmd (char *args
, int from_tty
)
3361 help_list (maint_btrace_pt_set_cmdlist
, "maintenance set btrace pt ",
3362 all_commands
, gdb_stdout
);
3365 /* The "maintenance show btrace pt" command. */
3368 maint_btrace_pt_show_cmd (char *args
, int from_tty
)
3370 help_list (maint_btrace_pt_show_cmdlist
, "maintenance show btrace pt ",
3371 all_commands
, gdb_stdout
);
3374 /* The "maintenance info btrace" command. */
3377 maint_info_btrace_cmd (char *args
, int from_tty
)
3379 struct btrace_thread_info
*btinfo
;
3380 struct thread_info
*tp
;
3381 const struct btrace_config
*conf
;
3383 if (args
!= NULL
&& *args
!= 0)
3384 error (_("Invalid argument."));
3386 tp
= find_thread_ptid (inferior_ptid
);
3388 error (_("No thread."));
3390 btinfo
= &tp
->btrace
;
3392 conf
= btrace_conf (btinfo
);
3394 error (_("No btrace configuration."));
3396 printf_unfiltered (_("Format: %s.\n"),
3397 btrace_format_string (conf
->format
));
3399 switch (conf
->format
)
3404 case BTRACE_FORMAT_BTS
:
3405 printf_unfiltered (_("Number of packets: %u.\n"),
3406 VEC_length (btrace_block_s
,
3407 btinfo
->data
.variant
.bts
.blocks
));
3410 #if defined (HAVE_LIBIPT)
3411 case BTRACE_FORMAT_PT
:
3413 struct pt_version version
;
3415 version
= pt_library_version ();
3416 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version
.major
,
3417 version
.minor
, version
.build
,
3418 version
.ext
!= NULL
? version
.ext
: "");
3420 btrace_maint_update_pt_packets (btinfo
);
3421 printf_unfiltered (_("Number of packets: %u.\n"),
3422 VEC_length (btrace_pt_packet_s
,
3423 btinfo
->maint
.variant
.pt
.packets
));
3426 #endif /* defined (HAVE_LIBIPT) */
3430 /* The "maint show btrace pt skip-pad" show value function. */
3433 show_maint_btrace_pt_skip_pad (struct ui_file
*file
, int from_tty
,
3434 struct cmd_list_element
*c
,
3437 fprintf_filtered (file
, _("Skip PAD packets is %s.\n"), value
);
3441 /* Initialize btrace maintenance commands. */
3443 void _initialize_btrace (void);
3445 _initialize_btrace (void)
3447 add_cmd ("btrace", class_maintenance
, maint_info_btrace_cmd
,
3448 _("Info about branch tracing data."), &maintenanceinfolist
);
3450 add_prefix_cmd ("btrace", class_maintenance
, maint_btrace_cmd
,
3451 _("Branch tracing maintenance commands."),
3452 &maint_btrace_cmdlist
, "maintenance btrace ",
3453 0, &maintenancelist
);
3455 add_prefix_cmd ("btrace", class_maintenance
, maint_btrace_set_cmd
, _("\
3456 Set branch tracing specific variables."),
3457 &maint_btrace_set_cmdlist
, "maintenance set btrace ",
3458 0, &maintenance_set_cmdlist
);
3460 add_prefix_cmd ("pt", class_maintenance
, maint_btrace_pt_set_cmd
, _("\
3461 Set Intel Processor Trace specific variables."),
3462 &maint_btrace_pt_set_cmdlist
, "maintenance set btrace pt ",
3463 0, &maint_btrace_set_cmdlist
);
3465 add_prefix_cmd ("btrace", class_maintenance
, maint_btrace_show_cmd
, _("\
3466 Show branch tracing specific variables."),
3467 &maint_btrace_show_cmdlist
, "maintenance show btrace ",
3468 0, &maintenance_show_cmdlist
);
3470 add_prefix_cmd ("pt", class_maintenance
, maint_btrace_pt_show_cmd
, _("\
3471 Show Intel Processor Trace specific variables."),
3472 &maint_btrace_pt_show_cmdlist
, "maintenance show btrace pt ",
3473 0, &maint_btrace_show_cmdlist
);
3475 add_setshow_boolean_cmd ("skip-pad", class_maintenance
,
3476 &maint_btrace_pt_skip_pad
, _("\
3477 Set whether PAD packets should be skipped in the btrace packet history."), _("\
3478 Show whether PAD packets should be skipped in the btrace packet history."),_("\
3479 When enabled, PAD packets are ignored in the btrace packet history."),
3480 NULL
, show_maint_btrace_pt_skip_pad
,
3481 &maint_btrace_pt_set_cmdlist
,
3482 &maint_btrace_pt_show_cmdlist
);
3484 add_cmd ("packet-history", class_maintenance
, maint_btrace_packet_history_cmd
,
3485 _("Print the raw branch tracing data.\n\
3486 With no argument, print ten more packets after the previous ten-line print.\n\
3487 With '-' as argument print ten packets before a previous ten-line print.\n\
3488 One argument specifies the starting packet of a ten-line print.\n\
3489 Two arguments with comma between specify starting and ending packets to \
3491 Preceded with '+'/'-' the second argument specifies the distance from the \
3493 &maint_btrace_cmdlist
);
3495 add_cmd ("clear-packet-history", class_maintenance
,
3496 maint_btrace_clear_packet_history_cmd
,
3497 _("Clears the branch tracing packet history.\n\
3498 Discards the raw branch tracing data but not the execution history data.\n\
3500 &maint_btrace_cmdlist
);
3502 add_cmd ("clear", class_maintenance
, maint_btrace_clear_cmd
,
3503 _("Clears the branch tracing data.\n\
3504 Discards the raw branch tracing data and the execution history data.\n\
3505 The next 'record' command will fetch the branch tracing data anew.\n\
3507 &maint_btrace_cmdlist
);