06122cdfbb5c4ffdbd51c1e15bbb2f5c03b035bb
[deliverable/binutils-gdb.git] / gdb / btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "btrace.h"
24 #include "gdbthread.h"
25 #include "inferior.h"
26 #include "target.h"
27 #include "record.h"
28 #include "symtab.h"
29 #include "disasm.h"
30 #include "source.h"
31 #include "filenames.h"
32 #include "xml-support.h"
33 #include "regcache.h"
34 #include "rsp-low.h"
35 #include "gdbcmd.h"
36 #include "cli/cli-utils.h"
37
38 #include <inttypes.h>
39 #include <ctype.h>
40 #include <algorithm>
41
42 /* Command lists for btrace maintenance commands. */
43 static struct cmd_list_element *maint_btrace_cmdlist;
44 static struct cmd_list_element *maint_btrace_set_cmdlist;
45 static struct cmd_list_element *maint_btrace_show_cmdlist;
46 static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
47 static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
48
49 /* Control whether to skip PAD packets when computing the packet history. */
50 static int maint_btrace_pt_skip_pad = 1;
51
52 /* A vector of function segments. */
53 typedef struct btrace_function * bfun_s;
54 DEF_VEC_P (bfun_s);
55
56 static void btrace_add_pc (struct thread_info *tp);
57
58 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
59 when used in if statements. */
60
61 #define DEBUG(msg, args...) \
62 do \
63 { \
64 if (record_debug != 0) \
65 fprintf_unfiltered (gdb_stdlog, \
66 "[btrace] " msg "\n", ##args); \
67 } \
68 while (0)
69
70 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
71
72 /* Return the function name of a recorded function segment for printing.
73 This function never returns NULL. */
74
75 static const char *
76 ftrace_print_function_name (const struct btrace_function *bfun)
77 {
78 struct minimal_symbol *msym;
79 struct symbol *sym;
80
81 msym = bfun->msym;
82 sym = bfun->sym;
83
84 if (sym != NULL)
85 return SYMBOL_PRINT_NAME (sym);
86
87 if (msym != NULL)
88 return MSYMBOL_PRINT_NAME (msym);
89
90 return "<unknown>";
91 }
92
93 /* Return the file name of a recorded function segment for printing.
94 This function never returns NULL. */
95
96 static const char *
97 ftrace_print_filename (const struct btrace_function *bfun)
98 {
99 struct symbol *sym;
100 const char *filename;
101
102 sym = bfun->sym;
103
104 if (sym != NULL)
105 filename = symtab_to_filename_for_display (symbol_symtab (sym));
106 else
107 filename = "<unknown>";
108
109 return filename;
110 }
111
112 /* Return a string representation of the address of an instruction.
113 This function never returns NULL. */
114
115 static const char *
116 ftrace_print_insn_addr (const struct btrace_insn *insn)
117 {
118 if (insn == NULL)
119 return "<nil>";
120
121 return core_addr_to_string_nz (insn->pc);
122 }
123
124 /* Print an ftrace debug status message. */
125
126 static void
127 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
128 {
129 const char *fun, *file;
130 unsigned int ibegin, iend;
131 int level;
132
133 fun = ftrace_print_function_name (bfun);
134 file = ftrace_print_filename (bfun);
135 level = bfun->level;
136
137 ibegin = bfun->insn_offset;
138 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
139
140 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
141 prefix, fun, file, level, ibegin, iend);
142 }
143
144 /* Return the number of instructions in a given function call segment. */
145
146 static unsigned int
147 ftrace_call_num_insn (const struct btrace_function* bfun)
148 {
149 if (bfun == NULL)
150 return 0;
151
152 /* A gap is always counted as one instruction. */
153 if (bfun->errcode != 0)
154 return 1;
155
156 return VEC_length (btrace_insn_s, bfun->insn);
157 }
158
159 /* Return non-zero if BFUN does not match MFUN and FUN,
160 return zero otherwise. */
161
162 static int
163 ftrace_function_switched (const struct btrace_function *bfun,
164 const struct minimal_symbol *mfun,
165 const struct symbol *fun)
166 {
167 struct minimal_symbol *msym;
168 struct symbol *sym;
169
170 msym = bfun->msym;
171 sym = bfun->sym;
172
173 /* If the minimal symbol changed, we certainly switched functions. */
174 if (mfun != NULL && msym != NULL
175 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
176 return 1;
177
178 /* If the symbol changed, we certainly switched functions. */
179 if (fun != NULL && sym != NULL)
180 {
181 const char *bfname, *fname;
182
183 /* Check the function name. */
184 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
185 return 1;
186
187 /* Check the location of those functions, as well. */
188 bfname = symtab_to_fullname (symbol_symtab (sym));
189 fname = symtab_to_fullname (symbol_symtab (fun));
190 if (filename_cmp (fname, bfname) != 0)
191 return 1;
192 }
193
194 /* If we lost symbol information, we switched functions. */
195 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
196 return 1;
197
198 /* If we gained symbol information, we switched functions. */
199 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
200 return 1;
201
202 return 0;
203 }
204
205 /* Allocate and initialize a new branch trace function segment.
206 PREV is the chronologically preceding function segment.
207 MFUN and FUN are the symbol information we have for this function. */
208
209 static struct btrace_function *
210 ftrace_new_function (struct btrace_function *prev,
211 struct minimal_symbol *mfun,
212 struct symbol *fun)
213 {
214 struct btrace_function *bfun;
215
216 bfun = XCNEW (struct btrace_function);
217
218 bfun->msym = mfun;
219 bfun->sym = fun;
220 bfun->flow.prev = prev;
221
222 if (prev == NULL)
223 {
224 /* Start counting at one. */
225 bfun->number = 1;
226 bfun->insn_offset = 1;
227 }
228 else
229 {
230 gdb_assert (prev->flow.next == NULL);
231 prev->flow.next = bfun;
232
233 bfun->number = prev->number + 1;
234 bfun->insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
235 bfun->level = prev->level;
236 }
237
238 return bfun;
239 }
240
241 /* Update the UP field of a function segment. */
242
243 static void
244 ftrace_update_caller (struct btrace_function *bfun,
245 struct btrace_function *caller,
246 enum btrace_function_flag flags)
247 {
248 if (bfun->up != NULL)
249 ftrace_debug (bfun, "updating caller");
250
251 bfun->up = caller;
252 bfun->flags = flags;
253
254 ftrace_debug (bfun, "set caller");
255 ftrace_debug (caller, "..to");
256 }
257
258 /* Fix up the caller for all segments of a function. */
259
260 static void
261 ftrace_fixup_caller (struct btrace_function *bfun,
262 struct btrace_function *caller,
263 enum btrace_function_flag flags)
264 {
265 struct btrace_function *prev, *next;
266
267 ftrace_update_caller (bfun, caller, flags);
268
269 /* Update all function segments belonging to the same function. */
270 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
271 ftrace_update_caller (prev, caller, flags);
272
273 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
274 ftrace_update_caller (next, caller, flags);
275 }
276
277 /* Add a new function segment for a call.
278 CALLER is the chronologically preceding function segment.
279 MFUN and FUN are the symbol information we have for this function. */
280
281 static struct btrace_function *
282 ftrace_new_call (struct btrace_function *caller,
283 struct minimal_symbol *mfun,
284 struct symbol *fun)
285 {
286 struct btrace_function *bfun;
287
288 bfun = ftrace_new_function (caller, mfun, fun);
289 bfun->up = caller;
290 bfun->level += 1;
291
292 ftrace_debug (bfun, "new call");
293
294 return bfun;
295 }
296
297 /* Add a new function segment for a tail call.
298 CALLER is the chronologically preceding function segment.
299 MFUN and FUN are the symbol information we have for this function. */
300
301 static struct btrace_function *
302 ftrace_new_tailcall (struct btrace_function *caller,
303 struct minimal_symbol *mfun,
304 struct symbol *fun)
305 {
306 struct btrace_function *bfun;
307
308 bfun = ftrace_new_function (caller, mfun, fun);
309 bfun->up = caller;
310 bfun->level += 1;
311 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
312
313 ftrace_debug (bfun, "new tail call");
314
315 return bfun;
316 }
317
318 /* Return the caller of BFUN or NULL if there is none. This function skips
319 tail calls in the call chain. */
320 static struct btrace_function *
321 ftrace_get_caller (struct btrace_function *bfun)
322 {
323 for (; bfun != NULL; bfun = bfun->up)
324 if ((bfun->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
325 return bfun->up;
326
327 return NULL;
328 }
329
330 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
331 symbol information. */
332
333 static struct btrace_function *
334 ftrace_find_caller (struct btrace_function *bfun,
335 struct minimal_symbol *mfun,
336 struct symbol *fun)
337 {
338 for (; bfun != NULL; bfun = bfun->up)
339 {
340 /* Skip functions with incompatible symbol information. */
341 if (ftrace_function_switched (bfun, mfun, fun))
342 continue;
343
344 /* This is the function segment we're looking for. */
345 break;
346 }
347
348 return bfun;
349 }
350
351 /* Find the innermost caller in the back trace of BFUN, skipping all
352 function segments that do not end with a call instruction (e.g.
353 tail calls ending with a jump). */
354
355 static struct btrace_function *
356 ftrace_find_call (struct btrace_function *bfun)
357 {
358 for (; bfun != NULL; bfun = bfun->up)
359 {
360 struct btrace_insn *last;
361
362 /* Skip gaps. */
363 if (bfun->errcode != 0)
364 continue;
365
366 last = VEC_last (btrace_insn_s, bfun->insn);
367
368 if (last->iclass == BTRACE_INSN_CALL)
369 break;
370 }
371
372 return bfun;
373 }
374
375 /* Add a continuation segment for a function into which we return.
376 PREV is the chronologically preceding function segment.
377 MFUN and FUN are the symbol information we have for this function. */
378
379 static struct btrace_function *
380 ftrace_new_return (struct btrace_function *prev,
381 struct minimal_symbol *mfun,
382 struct symbol *fun)
383 {
384 struct btrace_function *bfun, *caller;
385
386 bfun = ftrace_new_function (prev, mfun, fun);
387
388 /* It is important to start at PREV's caller. Otherwise, we might find
389 PREV itself, if PREV is a recursive function. */
390 caller = ftrace_find_caller (prev->up, mfun, fun);
391 if (caller != NULL)
392 {
393 /* The caller of PREV is the preceding btrace function segment in this
394 function instance. */
395 gdb_assert (caller->segment.next == NULL);
396
397 caller->segment.next = bfun;
398 bfun->segment.prev = caller;
399
400 /* Maintain the function level. */
401 bfun->level = caller->level;
402
403 /* Maintain the call stack. */
404 bfun->up = caller->up;
405 bfun->flags = caller->flags;
406
407 ftrace_debug (bfun, "new return");
408 }
409 else
410 {
411 /* We did not find a caller. This could mean that something went
412 wrong or that the call is simply not included in the trace. */
413
414 /* Let's search for some actual call. */
415 caller = ftrace_find_call (prev->up);
416 if (caller == NULL)
417 {
418 /* There is no call in PREV's back trace. We assume that the
419 branch trace did not include it. */
420
421 /* Let's find the topmost function and add a new caller for it.
422 This should handle a series of initial tail calls. */
423 while (prev->up != NULL)
424 prev = prev->up;
425
426 bfun->level = prev->level - 1;
427
428 /* Fix up the call stack for PREV. */
429 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
430
431 ftrace_debug (bfun, "new return - no caller");
432 }
433 else
434 {
435 /* There is a call in PREV's back trace to which we should have
436 returned but didn't. Let's start a new, separate back trace
437 from PREV's level. */
438 bfun->level = prev->level - 1;
439
440 /* We fix up the back trace for PREV but leave other function segments
441 on the same level as they are.
442 This should handle things like schedule () correctly where we're
443 switching contexts. */
444 prev->up = bfun;
445 prev->flags = BFUN_UP_LINKS_TO_RET;
446
447 ftrace_debug (bfun, "new return - unknown caller");
448 }
449 }
450
451 return bfun;
452 }
453
454 /* Add a new function segment for a function switch.
455 PREV is the chronologically preceding function segment.
456 MFUN and FUN are the symbol information we have for this function. */
457
458 static struct btrace_function *
459 ftrace_new_switch (struct btrace_function *prev,
460 struct minimal_symbol *mfun,
461 struct symbol *fun)
462 {
463 struct btrace_function *bfun;
464
465 /* This is an unexplained function switch. We can't really be sure about the
466 call stack, yet the best I can think of right now is to preserve it. */
467 bfun = ftrace_new_function (prev, mfun, fun);
468 bfun->up = prev->up;
469 bfun->flags = prev->flags;
470
471 ftrace_debug (bfun, "new switch");
472
473 return bfun;
474 }
475
476 /* Add a new function segment for a gap in the trace due to a decode error.
477 PREV is the chronologically preceding function segment.
478 ERRCODE is the format-specific error code. */
479
480 static struct btrace_function *
481 ftrace_new_gap (struct btrace_function *prev, int errcode)
482 {
483 struct btrace_function *bfun;
484
485 /* We hijack prev if it was empty. */
486 if (prev != NULL && prev->errcode == 0
487 && VEC_empty (btrace_insn_s, prev->insn))
488 bfun = prev;
489 else
490 bfun = ftrace_new_function (prev, NULL, NULL);
491
492 bfun->errcode = errcode;
493
494 ftrace_debug (bfun, "new gap");
495
496 return bfun;
497 }
498
499 /* Update BFUN with respect to the instruction at PC. This may create new
500 function segments.
501 Return the chronologically latest function segment, never NULL. */
502
503 static struct btrace_function *
504 ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
505 {
506 struct bound_minimal_symbol bmfun;
507 struct minimal_symbol *mfun;
508 struct symbol *fun;
509 struct btrace_insn *last;
510
511 /* Try to determine the function we're in. We use both types of symbols
512 to avoid surprises when we sometimes get a full symbol and sometimes
513 only a minimal symbol. */
514 fun = find_pc_function (pc);
515 bmfun = lookup_minimal_symbol_by_pc (pc);
516 mfun = bmfun.minsym;
517
518 if (fun == NULL && mfun == NULL)
519 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
520
521 /* If we didn't have a function or if we had a gap before, we create one. */
522 if (bfun == NULL || bfun->errcode != 0)
523 return ftrace_new_function (bfun, mfun, fun);
524
525 /* Check the last instruction, if we have one.
526 We do this check first, since it allows us to fill in the call stack
527 links in addition to the normal flow links. */
528 last = NULL;
529 if (!VEC_empty (btrace_insn_s, bfun->insn))
530 last = VEC_last (btrace_insn_s, bfun->insn);
531
532 if (last != NULL)
533 {
534 switch (last->iclass)
535 {
536 case BTRACE_INSN_RETURN:
537 {
538 const char *fname;
539
540 /* On some systems, _dl_runtime_resolve returns to the resolved
541 function instead of jumping to it. From our perspective,
542 however, this is a tailcall.
543 If we treated it as return, we wouldn't be able to find the
544 resolved function in our stack back trace. Hence, we would
545 lose the current stack back trace and start anew with an empty
546 back trace. When the resolved function returns, we would then
547 create a stack back trace with the same function names but
548 different frame id's. This will confuse stepping. */
549 fname = ftrace_print_function_name (bfun);
550 if (strcmp (fname, "_dl_runtime_resolve") == 0)
551 return ftrace_new_tailcall (bfun, mfun, fun);
552
553 return ftrace_new_return (bfun, mfun, fun);
554 }
555
556 case BTRACE_INSN_CALL:
557 /* Ignore calls to the next instruction. They are used for PIC. */
558 if (last->pc + last->size == pc)
559 break;
560
561 return ftrace_new_call (bfun, mfun, fun);
562
563 case BTRACE_INSN_JUMP:
564 {
565 CORE_ADDR start;
566
567 start = get_pc_function_start (pc);
568
569 /* A jump to the start of a function is (typically) a tail call. */
570 if (start == pc)
571 return ftrace_new_tailcall (bfun, mfun, fun);
572
573 /* If we can't determine the function for PC, we treat a jump at
574 the end of the block as tail call if we're switching functions
575 and as an intra-function branch if we don't. */
576 if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
577 return ftrace_new_tailcall (bfun, mfun, fun);
578
579 break;
580 }
581 }
582 }
583
584 /* Check if we're switching functions for some other reason. */
585 if (ftrace_function_switched (bfun, mfun, fun))
586 {
587 DEBUG_FTRACE ("switching from %s in %s at %s",
588 ftrace_print_insn_addr (last),
589 ftrace_print_function_name (bfun),
590 ftrace_print_filename (bfun));
591
592 return ftrace_new_switch (bfun, mfun, fun);
593 }
594
595 return bfun;
596 }
597
598 /* Add the instruction at PC to BFUN's instructions. */
599
600 static void
601 ftrace_update_insns (struct btrace_function *bfun,
602 const struct btrace_insn *insn)
603 {
604 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
605
606 if (record_debug > 1)
607 ftrace_debug (bfun, "update insn");
608 }
609
610 /* Classify the instruction at PC. */
611
612 static enum btrace_insn_class
613 ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
614 {
615 enum btrace_insn_class iclass;
616
617 iclass = BTRACE_INSN_OTHER;
618 TRY
619 {
620 if (gdbarch_insn_is_call (gdbarch, pc))
621 iclass = BTRACE_INSN_CALL;
622 else if (gdbarch_insn_is_ret (gdbarch, pc))
623 iclass = BTRACE_INSN_RETURN;
624 else if (gdbarch_insn_is_jump (gdbarch, pc))
625 iclass = BTRACE_INSN_JUMP;
626 }
627 CATCH (error, RETURN_MASK_ERROR)
628 {
629 }
630 END_CATCH
631
632 return iclass;
633 }
634
635 /* Try to match the back trace at LHS to the back trace at RHS. Returns the
636 number of matching function segments or zero if the back traces do not
637 match. */
638
639 static int
640 ftrace_match_backtrace (struct btrace_function *lhs,
641 struct btrace_function *rhs)
642 {
643 int matches;
644
645 for (matches = 0; lhs != NULL && rhs != NULL; ++matches)
646 {
647 if (ftrace_function_switched (lhs, rhs->msym, rhs->sym))
648 return 0;
649
650 lhs = ftrace_get_caller (lhs);
651 rhs = ftrace_get_caller (rhs);
652 }
653
654 return matches;
655 }
656
657 /* Add ADJUSTMENT to the level of BFUN and succeeding function segments. */
658
659 static void
660 ftrace_fixup_level (struct btrace_function *bfun, int adjustment)
661 {
662 if (adjustment == 0)
663 return;
664
665 DEBUG_FTRACE ("fixup level (%+d)", adjustment);
666 ftrace_debug (bfun, "..bfun");
667
668 for (; bfun != NULL; bfun = bfun->flow.next)
669 bfun->level += adjustment;
670 }
671
672 /* Recompute the global level offset. Traverse the function trace and compute
673 the global level offset as the negative of the minimal function level. */
674
675 static void
676 ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
677 {
678 struct btrace_function *bfun, *end;
679 int level;
680
681 if (btinfo == NULL)
682 return;
683
684 bfun = btinfo->begin;
685 if (bfun == NULL)
686 return;
687
688 /* The last function segment contains the current instruction, which is not
689 really part of the trace. If it contains just this one instruction, we
690 stop when we reach it; otherwise, we let the below loop run to the end. */
691 end = btinfo->end;
692 if (VEC_length (btrace_insn_s, end->insn) > 1)
693 end = NULL;
694
695 level = INT_MAX;
696 for (; bfun != end; bfun = bfun->flow.next)
697 level = std::min (level, bfun->level);
698
699 DEBUG_FTRACE ("setting global level offset: %d", -level);
700 btinfo->level = -level;
701 }
702
703 /* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
704 ftrace_connect_backtrace. */
705
706 static void
707 ftrace_connect_bfun (struct btrace_function *prev,
708 struct btrace_function *next)
709 {
710 DEBUG_FTRACE ("connecting...");
711 ftrace_debug (prev, "..prev");
712 ftrace_debug (next, "..next");
713
714 /* The function segments are not yet connected. */
715 gdb_assert (prev->segment.next == NULL);
716 gdb_assert (next->segment.prev == NULL);
717
718 prev->segment.next = next;
719 next->segment.prev = prev;
720
721 /* We may have moved NEXT to a different function level. */
722 ftrace_fixup_level (next, prev->level - next->level);
723
724 /* If we run out of back trace for one, let's use the other's. */
725 if (prev->up == NULL)
726 {
727 if (next->up != NULL)
728 {
729 DEBUG_FTRACE ("using next's callers");
730 ftrace_fixup_caller (prev, next->up, next->flags);
731 }
732 }
733 else if (next->up == NULL)
734 {
735 if (prev->up != NULL)
736 {
737 DEBUG_FTRACE ("using prev's callers");
738 ftrace_fixup_caller (next, prev->up, prev->flags);
739 }
740 }
741 else
742 {
743 /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
744 link to add the tail callers to NEXT's back trace.
745
746 This removes NEXT->UP from NEXT's back trace. It will be added back
747 when connecting NEXT and PREV's callers - provided they exist.
748
749 If PREV's back trace consists of a series of tail calls without an
750 actual call, there will be no further connection and NEXT's caller will
751 be removed for good. To catch this case, we handle it here and connect
752 the top of PREV's back trace to NEXT's caller. */
753 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
754 {
755 struct btrace_function *caller;
756 btrace_function_flags flags;
757
758 /* We checked NEXT->UP above so CALLER can't be NULL. */
759 caller = next->up;
760 flags = next->flags;
761
762 DEBUG_FTRACE ("adding prev's tail calls to next");
763
764 ftrace_fixup_caller (next, prev->up, prev->flags);
765
766 for (prev = prev->up; prev != NULL; prev = prev->up)
767 {
768 /* At the end of PREV's back trace, continue with CALLER. */
769 if (prev->up == NULL)
770 {
771 DEBUG_FTRACE ("fixing up link for tailcall chain");
772 ftrace_debug (prev, "..top");
773 ftrace_debug (caller, "..up");
774
775 ftrace_fixup_caller (prev, caller, flags);
776
777 /* If we skipped any tail calls, this may move CALLER to a
778 different function level.
779
780 Note that changing CALLER's level is only OK because we
781 know that this is the last iteration of the bottom-to-top
782 walk in ftrace_connect_backtrace.
783
784 Otherwise we will fix up CALLER's level when we connect it
785 to PREV's caller in the next iteration. */
786 ftrace_fixup_level (caller, prev->level - caller->level - 1);
787 break;
788 }
789
790 /* There's nothing to do if we find a real call. */
791 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
792 {
793 DEBUG_FTRACE ("will fix up link in next iteration");
794 break;
795 }
796 }
797 }
798 }
799 }
800
801 /* Connect function segments on the same level in the back trace at LHS and RHS.
802 The back traces at LHS and RHS are expected to match according to
803 ftrace_match_backtrace. */
804
805 static void
806 ftrace_connect_backtrace (struct btrace_function *lhs,
807 struct btrace_function *rhs)
808 {
809 while (lhs != NULL && rhs != NULL)
810 {
811 struct btrace_function *prev, *next;
812
813 gdb_assert (!ftrace_function_switched (lhs, rhs->msym, rhs->sym));
814
815 /* Connecting LHS and RHS may change the up link. */
816 prev = lhs;
817 next = rhs;
818
819 lhs = ftrace_get_caller (lhs);
820 rhs = ftrace_get_caller (rhs);
821
822 ftrace_connect_bfun (prev, next);
823 }
824 }
825
826 /* Bridge the gap between two function segments left and right of a gap if their
827 respective back traces match in at least MIN_MATCHES functions.
828
829 Returns non-zero if the gap could be bridged, zero otherwise. */
830
831 static int
832 ftrace_bridge_gap (struct btrace_function *lhs, struct btrace_function *rhs,
833 int min_matches)
834 {
835 struct btrace_function *best_l, *best_r, *cand_l, *cand_r;
836 int best_matches;
837
838 DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
839 rhs->insn_offset - 1, min_matches);
840
841 best_matches = 0;
842 best_l = NULL;
843 best_r = NULL;
844
845 /* We search the back traces of LHS and RHS for valid connections and connect
846 the two functon segments that give the longest combined back trace. */
847
848 for (cand_l = lhs; cand_l != NULL; cand_l = ftrace_get_caller (cand_l))
849 for (cand_r = rhs; cand_r != NULL; cand_r = ftrace_get_caller (cand_r))
850 {
851 int matches;
852
853 matches = ftrace_match_backtrace (cand_l, cand_r);
854 if (best_matches < matches)
855 {
856 best_matches = matches;
857 best_l = cand_l;
858 best_r = cand_r;
859 }
860 }
861
862 /* We need at least MIN_MATCHES matches. */
863 gdb_assert (min_matches > 0);
864 if (best_matches < min_matches)
865 return 0;
866
867 DEBUG_FTRACE ("..matches: %d", best_matches);
868
869 /* We will fix up the level of BEST_R and succeeding function segments such
870 that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
871
872 This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
873 BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
874
875 To catch this, we already fix up the level here where we can start at RHS
876 instead of at BEST_R. We will ignore the level fixup when connecting
877 BEST_L to BEST_R as they will already be on the same level. */
878 ftrace_fixup_level (rhs, best_l->level - best_r->level);
879
880 ftrace_connect_backtrace (best_l, best_r);
881
882 return best_matches;
883 }
884
885 /* Try to bridge gaps due to overflow or decode errors by connecting the
886 function segments that are separated by the gap. */
887
888 static void
889 btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
890 {
891 VEC (bfun_s) *remaining;
892 struct cleanup *old_chain;
893 int min_matches;
894
895 DEBUG ("bridge gaps");
896
897 remaining = NULL;
898 old_chain = make_cleanup (VEC_cleanup (bfun_s), &remaining);
899
900 /* We require a minimum amount of matches for bridging a gap. The number of
901 required matches will be lowered with each iteration.
902
903 The more matches the higher our confidence that the bridging is correct.
904 For big gaps or small traces, however, it may not be feasible to require a
905 high number of matches. */
906 for (min_matches = 5; min_matches > 0; --min_matches)
907 {
908 /* Let's try to bridge as many gaps as we can. In some cases, we need to
909 skip a gap and revisit it again after we closed later gaps. */
910 while (!VEC_empty (bfun_s, *gaps))
911 {
912 struct btrace_function *gap;
913 unsigned int idx;
914
915 for (idx = 0; VEC_iterate (bfun_s, *gaps, idx, gap); ++idx)
916 {
917 struct btrace_function *lhs, *rhs;
918 int bridged;
919
920 /* We may have a sequence of gaps if we run from one error into
921 the next as we try to re-sync onto the trace stream. Ignore
922 all but the leftmost gap in such a sequence.
923
924 Also ignore gaps at the beginning of the trace. */
925 lhs = gap->flow.prev;
926 if (lhs == NULL || lhs->errcode != 0)
927 continue;
928
929 /* Skip gaps to the right. */
930 for (rhs = gap->flow.next; rhs != NULL; rhs = rhs->flow.next)
931 if (rhs->errcode == 0)
932 break;
933
934 /* Ignore gaps at the end of the trace. */
935 if (rhs == NULL)
936 continue;
937
938 bridged = ftrace_bridge_gap (lhs, rhs, min_matches);
939
940 /* Keep track of gaps we were not able to bridge and try again.
941 If we just pushed them to the end of GAPS we would risk an
942 infinite loop in case we simply cannot bridge a gap. */
943 if (bridged == 0)
944 VEC_safe_push (bfun_s, remaining, gap);
945 }
946
947 /* Let's see if we made any progress. */
948 if (VEC_length (bfun_s, remaining) == VEC_length (bfun_s, *gaps))
949 break;
950
951 VEC_free (bfun_s, *gaps);
952
953 *gaps = remaining;
954 remaining = NULL;
955 }
956
957 /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
958 if (VEC_empty (bfun_s, *gaps))
959 break;
960
961 VEC_free (bfun_s, remaining);
962 }
963
964 do_cleanups (old_chain);
965
966 /* We may omit this in some cases. Not sure it is worth the extra
967 complication, though. */
968 ftrace_compute_global_level_offset (&tp->btrace);
969 }
970
971 /* Compute the function branch trace from BTS trace. */
972
973 static void
974 btrace_compute_ftrace_bts (struct thread_info *tp,
975 const struct btrace_data_bts *btrace,
976 VEC (bfun_s) **gaps)
977 {
978 struct btrace_thread_info *btinfo;
979 struct btrace_function *begin, *end;
980 struct gdbarch *gdbarch;
981 unsigned int blk;
982 int level;
983
984 gdbarch = target_gdbarch ();
985 btinfo = &tp->btrace;
986 begin = btinfo->begin;
987 end = btinfo->end;
988 level = begin != NULL ? -btinfo->level : INT_MAX;
989 blk = VEC_length (btrace_block_s, btrace->blocks);
990
991 while (blk != 0)
992 {
993 btrace_block_s *block;
994 CORE_ADDR pc;
995
996 blk -= 1;
997
998 block = VEC_index (btrace_block_s, btrace->blocks, blk);
999 pc = block->begin;
1000
1001 for (;;)
1002 {
1003 struct btrace_insn insn;
1004 int size;
1005
1006 /* We should hit the end of the block. Warn if we went too far. */
1007 if (block->end < pc)
1008 {
1009 /* Indicate the gap in the trace. */
1010 end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
1011 if (begin == NULL)
1012 begin = end;
1013
1014 VEC_safe_push (bfun_s, *gaps, end);
1015
1016 warning (_("Recorded trace may be corrupted at instruction "
1017 "%u (pc = %s)."), end->insn_offset - 1,
1018 core_addr_to_string_nz (pc));
1019
1020 break;
1021 }
1022
1023 end = ftrace_update_function (end, pc);
1024 if (begin == NULL)
1025 begin = end;
1026
1027 /* Maintain the function level offset.
1028 For all but the last block, we do it here. */
1029 if (blk != 0)
1030 level = std::min (level, end->level);
1031
1032 size = 0;
1033 TRY
1034 {
1035 size = gdb_insn_length (gdbarch, pc);
1036 }
1037 CATCH (error, RETURN_MASK_ERROR)
1038 {
1039 }
1040 END_CATCH
1041
1042 insn.pc = pc;
1043 insn.size = size;
1044 insn.iclass = ftrace_classify_insn (gdbarch, pc);
1045 insn.flags = 0;
1046
1047 ftrace_update_insns (end, &insn);
1048
1049 /* We're done once we pushed the instruction at the end. */
1050 if (block->end == pc)
1051 break;
1052
1053 /* We can't continue if we fail to compute the size. */
1054 if (size <= 0)
1055 {
1056 /* Indicate the gap in the trace. We just added INSN so we're
1057 not at the beginning. */
1058 end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
1059
1060 VEC_safe_push (bfun_s, *gaps, end);
1061
1062 warning (_("Recorded trace may be incomplete at instruction %u "
1063 "(pc = %s)."), end->insn_offset - 1,
1064 core_addr_to_string_nz (pc));
1065
1066 break;
1067 }
1068
1069 pc += size;
1070
1071 /* Maintain the function level offset.
1072 For the last block, we do it here to not consider the last
1073 instruction.
1074 Since the last instruction corresponds to the current instruction
1075 and is not really part of the execution history, it shouldn't
1076 affect the level. */
1077 if (blk == 0)
1078 level = std::min (level, end->level);
1079 }
1080 }
1081
1082 btinfo->begin = begin;
1083 btinfo->end = end;
1084
1085 /* LEVEL is the minimal function level of all btrace function segments.
1086 Define the global level offset to -LEVEL so all function levels are
1087 normalized to start at zero. */
1088 btinfo->level = -level;
1089 }
1090
1091 #if defined (HAVE_LIBIPT)
1092
1093 static enum btrace_insn_class
1094 pt_reclassify_insn (enum pt_insn_class iclass)
1095 {
1096 switch (iclass)
1097 {
1098 case ptic_call:
1099 return BTRACE_INSN_CALL;
1100
1101 case ptic_return:
1102 return BTRACE_INSN_RETURN;
1103
1104 case ptic_jump:
1105 return BTRACE_INSN_JUMP;
1106
1107 default:
1108 return BTRACE_INSN_OTHER;
1109 }
1110 }
1111
1112 /* Return the btrace instruction flags for INSN. */
1113
1114 static btrace_insn_flags
1115 pt_btrace_insn_flags (const struct pt_insn *insn)
1116 {
1117 btrace_insn_flags flags = 0;
1118
1119 if (insn->speculative)
1120 flags |= BTRACE_INSN_FLAG_SPECULATIVE;
1121
1122 return flags;
1123 }
1124
1125 /* Add function branch trace using DECODER. */
1126
1127 static void
1128 ftrace_add_pt (struct pt_insn_decoder *decoder,
1129 struct btrace_function **pbegin,
1130 struct btrace_function **pend, int *plevel,
1131 VEC (bfun_s) **gaps)
1132 {
1133 struct btrace_function *begin, *end, *upd;
1134 uint64_t offset;
1135 int errcode;
1136
1137 begin = *pbegin;
1138 end = *pend;
1139 for (;;)
1140 {
1141 struct btrace_insn btinsn;
1142 struct pt_insn insn;
1143
1144 errcode = pt_insn_sync_forward (decoder);
1145 if (errcode < 0)
1146 {
1147 if (errcode != -pte_eos)
1148 warning (_("Failed to synchronize onto the Intel Processor "
1149 "Trace stream: %s."), pt_errstr (pt_errcode (errcode)));
1150 break;
1151 }
1152
1153 memset (&btinsn, 0, sizeof (btinsn));
1154 for (;;)
1155 {
1156 errcode = pt_insn_next (decoder, &insn, sizeof(insn));
1157 if (errcode < 0)
1158 break;
1159
1160 /* Look for gaps in the trace - unless we're at the beginning. */
1161 if (begin != NULL)
1162 {
1163 /* Tracing is disabled and re-enabled each time we enter the
1164 kernel. Most times, we continue from the same instruction we
1165 stopped before. This is indicated via the RESUMED instruction
1166 flag. The ENABLED instruction flag means that we continued
1167 from some other instruction. Indicate this as a trace gap. */
1168 if (insn.enabled)
1169 {
1170 *pend = end = ftrace_new_gap (end, BDE_PT_DISABLED);
1171
1172 VEC_safe_push (bfun_s, *gaps, end);
1173
1174 pt_insn_get_offset (decoder, &offset);
1175
1176 warning (_("Non-contiguous trace at instruction %u (offset "
1177 "= 0x%" PRIx64 ", pc = 0x%" PRIx64 ")."),
1178 end->insn_offset - 1, offset, insn.ip);
1179 }
1180 }
1181
1182 /* Indicate trace overflows. */
1183 if (insn.resynced)
1184 {
1185 *pend = end = ftrace_new_gap (end, BDE_PT_OVERFLOW);
1186 if (begin == NULL)
1187 *pbegin = begin = end;
1188
1189 VEC_safe_push (bfun_s, *gaps, end);
1190
1191 pt_insn_get_offset (decoder, &offset);
1192
1193 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
1194 ", pc = 0x%" PRIx64 ")."), end->insn_offset - 1,
1195 offset, insn.ip);
1196 }
1197
1198 upd = ftrace_update_function (end, insn.ip);
1199 if (upd != end)
1200 {
1201 *pend = end = upd;
1202
1203 if (begin == NULL)
1204 *pbegin = begin = upd;
1205 }
1206
1207 /* Maintain the function level offset. */
1208 *plevel = std::min (*plevel, end->level);
1209
1210 btinsn.pc = (CORE_ADDR) insn.ip;
1211 btinsn.size = (gdb_byte) insn.size;
1212 btinsn.iclass = pt_reclassify_insn (insn.iclass);
1213 btinsn.flags = pt_btrace_insn_flags (&insn);
1214
1215 ftrace_update_insns (end, &btinsn);
1216 }
1217
1218 if (errcode == -pte_eos)
1219 break;
1220
1221 /* Indicate the gap in the trace. */
1222 *pend = end = ftrace_new_gap (end, errcode);
1223 if (begin == NULL)
1224 *pbegin = begin = end;
1225
1226 VEC_safe_push (bfun_s, *gaps, end);
1227
1228 pt_insn_get_offset (decoder, &offset);
1229
1230 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
1231 ", pc = 0x%" PRIx64 "): %s."), errcode, end->insn_offset - 1,
1232 offset, insn.ip, pt_errstr (pt_errcode (errcode)));
1233 }
1234 }
1235
1236 /* A callback function to allow the trace decoder to read the inferior's
1237 memory. */
1238
1239 static int
1240 btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
1241 const struct pt_asid *asid, uint64_t pc,
1242 void *context)
1243 {
1244 int result, errcode;
1245
1246 result = (int) size;
1247 TRY
1248 {
1249 errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
1250 if (errcode != 0)
1251 result = -pte_nomap;
1252 }
1253 CATCH (error, RETURN_MASK_ERROR)
1254 {
1255 result = -pte_nomap;
1256 }
1257 END_CATCH
1258
1259 return result;
1260 }
1261
1262 /* Translate the vendor from one enum to another. */
1263
1264 static enum pt_cpu_vendor
1265 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
1266 {
1267 switch (vendor)
1268 {
1269 default:
1270 return pcv_unknown;
1271
1272 case CV_INTEL:
1273 return pcv_intel;
1274 }
1275 }
1276
1277 /* Finalize the function branch trace after decode. */
1278
1279 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
1280 struct thread_info *tp, int level)
1281 {
1282 pt_insn_free_decoder (decoder);
1283
1284 /* LEVEL is the minimal function level of all btrace function segments.
1285 Define the global level offset to -LEVEL so all function levels are
1286 normalized to start at zero. */
1287 tp->btrace.level = -level;
1288
1289 /* Add a single last instruction entry for the current PC.
1290 This allows us to compute the backtrace at the current PC using both
1291 standard unwind and btrace unwind.
1292 This extra entry is ignored by all record commands. */
1293 btrace_add_pc (tp);
1294 }
1295
1296 /* Compute the function branch trace from Intel Processor Trace
1297 format. */
1298
1299 static void
1300 btrace_compute_ftrace_pt (struct thread_info *tp,
1301 const struct btrace_data_pt *btrace,
1302 VEC (bfun_s) **gaps)
1303 {
1304 struct btrace_thread_info *btinfo;
1305 struct pt_insn_decoder *decoder;
1306 struct pt_config config;
1307 int level, errcode;
1308
1309 if (btrace->size == 0)
1310 return;
1311
1312 btinfo = &tp->btrace;
1313 level = btinfo->begin != NULL ? -btinfo->level : INT_MAX;
1314
1315 pt_config_init(&config);
1316 config.begin = btrace->data;
1317 config.end = btrace->data + btrace->size;
1318
1319 config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
1320 config.cpu.family = btrace->config.cpu.family;
1321 config.cpu.model = btrace->config.cpu.model;
1322 config.cpu.stepping = btrace->config.cpu.stepping;
1323
1324 errcode = pt_cpu_errata (&config.errata, &config.cpu);
1325 if (errcode < 0)
1326 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
1327 pt_errstr (pt_errcode (errcode)));
1328
1329 decoder = pt_insn_alloc_decoder (&config);
1330 if (decoder == NULL)
1331 error (_("Failed to allocate the Intel Processor Trace decoder."));
1332
1333 TRY
1334 {
1335 struct pt_image *image;
1336
1337 image = pt_insn_get_image(decoder);
1338 if (image == NULL)
1339 error (_("Failed to configure the Intel Processor Trace decoder."));
1340
1341 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
1342 if (errcode < 0)
1343 error (_("Failed to configure the Intel Processor Trace decoder: "
1344 "%s."), pt_errstr (pt_errcode (errcode)));
1345
1346 ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level, gaps);
1347 }
1348 CATCH (error, RETURN_MASK_ALL)
1349 {
1350 /* Indicate a gap in the trace if we quit trace processing. */
1351 if (error.reason == RETURN_QUIT && btinfo->end != NULL)
1352 {
1353 btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_USER_QUIT);
1354
1355 VEC_safe_push (bfun_s, *gaps, btinfo->end);
1356 }
1357
1358 btrace_finalize_ftrace_pt (decoder, tp, level);
1359
1360 throw_exception (error);
1361 }
1362 END_CATCH
1363
1364 btrace_finalize_ftrace_pt (decoder, tp, level);
1365 }
1366
1367 #else /* defined (HAVE_LIBIPT) */
1368
1369 static void
1370 btrace_compute_ftrace_pt (struct thread_info *tp,
1371 const struct btrace_data_pt *btrace,
1372 VEC (bfun_s) **gaps)
1373 {
1374 internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
1375 }
1376
1377 #endif /* defined (HAVE_LIBIPT) */
1378
1379 /* Compute the function branch trace from a block branch trace BTRACE for
1380 a thread given by BTINFO. */
1381
1382 static void
1383 btrace_compute_ftrace_1 (struct thread_info *tp, struct btrace_data *btrace,
1384 VEC (bfun_s) **gaps)
1385 {
1386 DEBUG ("compute ftrace");
1387
1388 switch (btrace->format)
1389 {
1390 case BTRACE_FORMAT_NONE:
1391 return;
1392
1393 case BTRACE_FORMAT_BTS:
1394 btrace_compute_ftrace_bts (tp, &btrace->variant.bts, gaps);
1395 return;
1396
1397 case BTRACE_FORMAT_PT:
1398 btrace_compute_ftrace_pt (tp, &btrace->variant.pt, gaps);
1399 return;
1400 }
1401
1402 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1403 }
1404
1405 static void
1406 btrace_finalize_ftrace (struct thread_info *tp, VEC (bfun_s) **gaps)
1407 {
1408 if (!VEC_empty (bfun_s, *gaps))
1409 {
1410 tp->btrace.ngaps += VEC_length (bfun_s, *gaps);
1411 btrace_bridge_gaps (tp, gaps);
1412 }
1413 }
1414
1415 static void
1416 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
1417 {
1418 VEC (bfun_s) *gaps;
1419 struct cleanup *old_chain;
1420
1421 gaps = NULL;
1422 old_chain = make_cleanup (VEC_cleanup (bfun_s), &gaps);
1423
1424 TRY
1425 {
1426 btrace_compute_ftrace_1 (tp, btrace, &gaps);
1427 }
1428 CATCH (error, RETURN_MASK_ALL)
1429 {
1430 btrace_finalize_ftrace (tp, &gaps);
1431
1432 throw_exception (error);
1433 }
1434 END_CATCH
1435
1436 btrace_finalize_ftrace (tp, &gaps);
1437
1438 do_cleanups (old_chain);
1439 }
1440
1441 /* Add an entry for the current PC. */
1442
1443 static void
1444 btrace_add_pc (struct thread_info *tp)
1445 {
1446 struct btrace_data btrace;
1447 struct btrace_block *block;
1448 struct regcache *regcache;
1449 struct cleanup *cleanup;
1450 CORE_ADDR pc;
1451
1452 regcache = get_thread_regcache (tp->ptid);
1453 pc = regcache_read_pc (regcache);
1454
1455 btrace_data_init (&btrace);
1456 btrace.format = BTRACE_FORMAT_BTS;
1457 btrace.variant.bts.blocks = NULL;
1458
1459 cleanup = make_cleanup_btrace_data (&btrace);
1460
1461 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
1462 block->begin = pc;
1463 block->end = pc;
1464
1465 btrace_compute_ftrace (tp, &btrace);
1466
1467 do_cleanups (cleanup);
1468 }
1469
1470 /* See btrace.h. */
1471
1472 void
1473 btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
1474 {
1475 if (tp->btrace.target != NULL)
1476 return;
1477
1478 #if !defined (HAVE_LIBIPT)
1479 if (conf->format == BTRACE_FORMAT_PT)
1480 error (_("GDB does not support Intel Processor Trace."));
1481 #endif /* !defined (HAVE_LIBIPT) */
1482
1483 if (!target_supports_btrace (conf->format))
1484 error (_("Target does not support branch tracing."));
1485
1486 DEBUG ("enable thread %s (%s)", print_thread_id (tp),
1487 target_pid_to_str (tp->ptid));
1488
1489 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
1490
1491 /* We're done if we failed to enable tracing. */
1492 if (tp->btrace.target == NULL)
1493 return;
1494
1495 /* We need to undo the enable in case of errors. */
1496 TRY
1497 {
1498 /* Add an entry for the current PC so we start tracing from where we
1499 enabled it.
1500
1501 If we can't access TP's registers, TP is most likely running. In this
1502 case, we can't really say where tracing was enabled so it should be
1503 safe to simply skip this step.
1504
1505 This is not relevant for BTRACE_FORMAT_PT since the trace will already
1506 start at the PC at which tracing was enabled. */
1507 if (conf->format != BTRACE_FORMAT_PT
1508 && can_access_registers_ptid (tp->ptid))
1509 btrace_add_pc (tp);
1510 }
1511 CATCH (exception, RETURN_MASK_ALL)
1512 {
1513 btrace_disable (tp);
1514
1515 throw_exception (exception);
1516 }
1517 END_CATCH
1518 }
1519
1520 /* See btrace.h. */
1521
1522 const struct btrace_config *
1523 btrace_conf (const struct btrace_thread_info *btinfo)
1524 {
1525 if (btinfo->target == NULL)
1526 return NULL;
1527
1528 return target_btrace_conf (btinfo->target);
1529 }
1530
1531 /* See btrace.h. */
1532
1533 void
1534 btrace_disable (struct thread_info *tp)
1535 {
1536 struct btrace_thread_info *btp = &tp->btrace;
1537 int errcode = 0;
1538
1539 if (btp->target == NULL)
1540 return;
1541
1542 DEBUG ("disable thread %s (%s)", print_thread_id (tp),
1543 target_pid_to_str (tp->ptid));
1544
1545 target_disable_btrace (btp->target);
1546 btp->target = NULL;
1547
1548 btrace_clear (tp);
1549 }
1550
1551 /* See btrace.h. */
1552
1553 void
1554 btrace_teardown (struct thread_info *tp)
1555 {
1556 struct btrace_thread_info *btp = &tp->btrace;
1557 int errcode = 0;
1558
1559 if (btp->target == NULL)
1560 return;
1561
1562 DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
1563 target_pid_to_str (tp->ptid));
1564
1565 target_teardown_btrace (btp->target);
1566 btp->target = NULL;
1567
1568 btrace_clear (tp);
1569 }
1570
1571 /* Stitch branch trace in BTS format. */
1572
1573 static int
1574 btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
1575 {
1576 struct btrace_thread_info *btinfo;
1577 struct btrace_function *last_bfun;
1578 struct btrace_insn *last_insn;
1579 btrace_block_s *first_new_block;
1580
1581 btinfo = &tp->btrace;
1582 last_bfun = btinfo->end;
1583 gdb_assert (last_bfun != NULL);
1584 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
1585
1586 /* If the existing trace ends with a gap, we just glue the traces
1587 together. We need to drop the last (i.e. chronologically first) block
1588 of the new trace, though, since we can't fill in the start address.*/
1589 if (VEC_empty (btrace_insn_s, last_bfun->insn))
1590 {
1591 VEC_pop (btrace_block_s, btrace->blocks);
1592 return 0;
1593 }
1594
1595 /* Beware that block trace starts with the most recent block, so the
1596 chronologically first block in the new trace is the last block in
1597 the new trace's block vector. */
1598 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
1599 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
1600
1601 /* If the current PC at the end of the block is the same as in our current
1602 trace, there are two explanations:
1603 1. we executed the instruction and some branch brought us back.
1604 2. we have not made any progress.
1605 In the first case, the delta trace vector should contain at least two
1606 entries.
1607 In the second case, the delta trace vector should contain exactly one
1608 entry for the partial block containing the current PC. Remove it. */
1609 if (first_new_block->end == last_insn->pc
1610 && VEC_length (btrace_block_s, btrace->blocks) == 1)
1611 {
1612 VEC_pop (btrace_block_s, btrace->blocks);
1613 return 0;
1614 }
1615
1616 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
1617 core_addr_to_string_nz (first_new_block->end));
1618
1619 /* Do a simple sanity check to make sure we don't accidentally end up
1620 with a bad block. This should not occur in practice. */
1621 if (first_new_block->end < last_insn->pc)
1622 {
1623 warning (_("Error while trying to read delta trace. Falling back to "
1624 "a full read."));
1625 return -1;
1626 }
1627
1628 /* We adjust the last block to start at the end of our current trace. */
1629 gdb_assert (first_new_block->begin == 0);
1630 first_new_block->begin = last_insn->pc;
1631
1632 /* We simply pop the last insn so we can insert it again as part of
1633 the normal branch trace computation.
1634 Since instruction iterators are based on indices in the instructions
1635 vector, we don't leave any pointers dangling. */
1636 DEBUG ("pruning insn at %s for stitching",
1637 ftrace_print_insn_addr (last_insn));
1638
1639 VEC_pop (btrace_insn_s, last_bfun->insn);
1640
1641 /* The instructions vector may become empty temporarily if this has
1642 been the only instruction in this function segment.
1643 This violates the invariant but will be remedied shortly by
1644 btrace_compute_ftrace when we add the new trace. */
1645
1646 /* The only case where this would hurt is if the entire trace consisted
1647 of just that one instruction. If we remove it, we might turn the now
1648 empty btrace function segment into a gap. But we don't want gaps at
1649 the beginning. To avoid this, we remove the entire old trace. */
1650 if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
1651 btrace_clear (tp);
1652
1653 return 0;
1654 }
1655
1656 /* Adjust the block trace in order to stitch old and new trace together.
1657 BTRACE is the new delta trace between the last and the current stop.
1658 TP is the traced thread.
1659 May modifx BTRACE as well as the existing trace in TP.
1660 Return 0 on success, -1 otherwise. */
1661
1662 static int
1663 btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
1664 {
1665 /* If we don't have trace, there's nothing to do. */
1666 if (btrace_data_empty (btrace))
1667 return 0;
1668
1669 switch (btrace->format)
1670 {
1671 case BTRACE_FORMAT_NONE:
1672 return 0;
1673
1674 case BTRACE_FORMAT_BTS:
1675 return btrace_stitch_bts (&btrace->variant.bts, tp);
1676
1677 case BTRACE_FORMAT_PT:
1678 /* Delta reads are not supported. */
1679 return -1;
1680 }
1681
1682 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1683 }
1684
1685 /* Clear the branch trace histories in BTINFO. */
1686
1687 static void
1688 btrace_clear_history (struct btrace_thread_info *btinfo)
1689 {
1690 xfree (btinfo->insn_history);
1691 xfree (btinfo->call_history);
1692 xfree (btinfo->replay);
1693
1694 btinfo->insn_history = NULL;
1695 btinfo->call_history = NULL;
1696 btinfo->replay = NULL;
1697 }
1698
1699 /* Clear the branch trace maintenance histories in BTINFO. */
1700
1701 static void
1702 btrace_maint_clear (struct btrace_thread_info *btinfo)
1703 {
1704 switch (btinfo->data.format)
1705 {
1706 default:
1707 break;
1708
1709 case BTRACE_FORMAT_BTS:
1710 btinfo->maint.variant.bts.packet_history.begin = 0;
1711 btinfo->maint.variant.bts.packet_history.end = 0;
1712 break;
1713
1714 #if defined (HAVE_LIBIPT)
1715 case BTRACE_FORMAT_PT:
1716 xfree (btinfo->maint.variant.pt.packets);
1717
1718 btinfo->maint.variant.pt.packets = NULL;
1719 btinfo->maint.variant.pt.packet_history.begin = 0;
1720 btinfo->maint.variant.pt.packet_history.end = 0;
1721 break;
1722 #endif /* defined (HAVE_LIBIPT) */
1723 }
1724 }
1725
1726 /* See btrace.h. */
1727
1728 const char *
1729 btrace_decode_error (enum btrace_format format, int errcode)
1730 {
1731 switch (format)
1732 {
1733 case BTRACE_FORMAT_BTS:
1734 switch (errcode)
1735 {
1736 case BDE_BTS_OVERFLOW:
1737 return _("instruction overflow");
1738
1739 case BDE_BTS_INSN_SIZE:
1740 return _("unknown instruction");
1741
1742 default:
1743 break;
1744 }
1745 break;
1746
1747 #if defined (HAVE_LIBIPT)
1748 case BTRACE_FORMAT_PT:
1749 switch (errcode)
1750 {
1751 case BDE_PT_USER_QUIT:
1752 return _("trace decode cancelled");
1753
1754 case BDE_PT_DISABLED:
1755 return _("disabled");
1756
1757 case BDE_PT_OVERFLOW:
1758 return _("overflow");
1759
1760 default:
1761 if (errcode < 0)
1762 return pt_errstr (pt_errcode (errcode));
1763 break;
1764 }
1765 break;
1766 #endif /* defined (HAVE_LIBIPT) */
1767
1768 default:
1769 break;
1770 }
1771
1772 return _("unknown");
1773 }
1774
1775 /* See btrace.h. */
1776
1777 void
1778 btrace_fetch (struct thread_info *tp)
1779 {
1780 struct btrace_thread_info *btinfo;
1781 struct btrace_target_info *tinfo;
1782 struct btrace_data btrace;
1783 struct cleanup *cleanup;
1784 int errcode;
1785
1786 DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
1787 target_pid_to_str (tp->ptid));
1788
1789 btinfo = &tp->btrace;
1790 tinfo = btinfo->target;
1791 if (tinfo == NULL)
1792 return;
1793
1794 /* There's no way we could get new trace while replaying.
1795 On the other hand, delta trace would return a partial record with the
1796 current PC, which is the replay PC, not the last PC, as expected. */
1797 if (btinfo->replay != NULL)
1798 return;
1799
1800 /* We should not be called on running or exited threads. */
1801 gdb_assert (can_access_registers_ptid (tp->ptid));
1802
1803 btrace_data_init (&btrace);
1804 cleanup = make_cleanup_btrace_data (&btrace);
1805
1806 /* Let's first try to extend the trace we already have. */
1807 if (btinfo->end != NULL)
1808 {
1809 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1810 if (errcode == 0)
1811 {
1812 /* Success. Let's try to stitch the traces together. */
1813 errcode = btrace_stitch_trace (&btrace, tp);
1814 }
1815 else
1816 {
1817 /* We failed to read delta trace. Let's try to read new trace. */
1818 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1819
1820 /* If we got any new trace, discard what we have. */
1821 if (errcode == 0 && !btrace_data_empty (&btrace))
1822 btrace_clear (tp);
1823 }
1824
1825 /* If we were not able to read the trace, we start over. */
1826 if (errcode != 0)
1827 {
1828 btrace_clear (tp);
1829 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1830 }
1831 }
1832 else
1833 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1834
1835 /* If we were not able to read the branch trace, signal an error. */
1836 if (errcode != 0)
1837 error (_("Failed to read branch trace."));
1838
1839 /* Compute the trace, provided we have any. */
1840 if (!btrace_data_empty (&btrace))
1841 {
1842 /* Store the raw trace data. The stored data will be cleared in
1843 btrace_clear, so we always append the new trace. */
1844 btrace_data_append (&btinfo->data, &btrace);
1845 btrace_maint_clear (btinfo);
1846
1847 btrace_clear_history (btinfo);
1848 btrace_compute_ftrace (tp, &btrace);
1849 }
1850
1851 do_cleanups (cleanup);
1852 }
1853
1854 /* See btrace.h. */
1855
1856 void
1857 btrace_clear (struct thread_info *tp)
1858 {
1859 struct btrace_thread_info *btinfo;
1860 struct btrace_function *it, *trash;
1861
1862 DEBUG ("clear thread %s (%s)", print_thread_id (tp),
1863 target_pid_to_str (tp->ptid));
1864
1865 /* Make sure btrace frames that may hold a pointer into the branch
1866 trace data are destroyed. */
1867 reinit_frame_cache ();
1868
1869 btinfo = &tp->btrace;
1870
1871 it = btinfo->begin;
1872 while (it != NULL)
1873 {
1874 trash = it;
1875 it = it->flow.next;
1876
1877 xfree (trash);
1878 }
1879
1880 btinfo->begin = NULL;
1881 btinfo->end = NULL;
1882 btinfo->ngaps = 0;
1883
1884 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1885 btrace_maint_clear (btinfo);
1886 btrace_data_clear (&btinfo->data);
1887 btrace_clear_history (btinfo);
1888 }
1889
1890 /* See btrace.h. */
1891
1892 void
1893 btrace_free_objfile (struct objfile *objfile)
1894 {
1895 struct thread_info *tp;
1896
1897 DEBUG ("free objfile");
1898
1899 ALL_NON_EXITED_THREADS (tp)
1900 btrace_clear (tp);
1901 }
1902
1903 #if defined (HAVE_LIBEXPAT)
1904
1905 /* Check the btrace document version. */
1906
1907 static void
1908 check_xml_btrace_version (struct gdb_xml_parser *parser,
1909 const struct gdb_xml_element *element,
1910 void *user_data, VEC (gdb_xml_value_s) *attributes)
1911 {
1912 const char *version
1913 = (const char *) xml_find_attribute (attributes, "version")->value;
1914
1915 if (strcmp (version, "1.0") != 0)
1916 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1917 }
1918
1919 /* Parse a btrace "block" xml record. */
1920
1921 static void
1922 parse_xml_btrace_block (struct gdb_xml_parser *parser,
1923 const struct gdb_xml_element *element,
1924 void *user_data, VEC (gdb_xml_value_s) *attributes)
1925 {
1926 struct btrace_data *btrace;
1927 struct btrace_block *block;
1928 ULONGEST *begin, *end;
1929
1930 btrace = (struct btrace_data *) user_data;
1931
1932 switch (btrace->format)
1933 {
1934 case BTRACE_FORMAT_BTS:
1935 break;
1936
1937 case BTRACE_FORMAT_NONE:
1938 btrace->format = BTRACE_FORMAT_BTS;
1939 btrace->variant.bts.blocks = NULL;
1940 break;
1941
1942 default:
1943 gdb_xml_error (parser, _("Btrace format error."));
1944 }
1945
1946 begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value;
1947 end = (ULONGEST *) xml_find_attribute (attributes, "end")->value;
1948
1949 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
1950 block->begin = *begin;
1951 block->end = *end;
1952 }
1953
1954 /* Parse a "raw" xml record. */
1955
1956 static void
1957 parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
1958 gdb_byte **pdata, size_t *psize)
1959 {
1960 struct cleanup *cleanup;
1961 gdb_byte *data, *bin;
1962 size_t len, size;
1963
1964 len = strlen (body_text);
1965 if (len % 2 != 0)
1966 gdb_xml_error (parser, _("Bad raw data size."));
1967
1968 size = len / 2;
1969
1970 bin = data = (gdb_byte *) xmalloc (size);
1971 cleanup = make_cleanup (xfree, data);
1972
1973 /* We use hex encoding - see common/rsp-low.h. */
1974 while (len > 0)
1975 {
1976 char hi, lo;
1977
1978 hi = *body_text++;
1979 lo = *body_text++;
1980
1981 if (hi == 0 || lo == 0)
1982 gdb_xml_error (parser, _("Bad hex encoding."));
1983
1984 *bin++ = fromhex (hi) * 16 + fromhex (lo);
1985 len -= 2;
1986 }
1987
1988 discard_cleanups (cleanup);
1989
1990 *pdata = data;
1991 *psize = size;
1992 }
1993
1994 /* Parse a btrace pt-config "cpu" xml record. */
1995
1996 static void
1997 parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
1998 const struct gdb_xml_element *element,
1999 void *user_data,
2000 VEC (gdb_xml_value_s) *attributes)
2001 {
2002 struct btrace_data *btrace;
2003 const char *vendor;
2004 ULONGEST *family, *model, *stepping;
2005
2006 vendor = (const char *) xml_find_attribute (attributes, "vendor")->value;
2007 family = (ULONGEST *) xml_find_attribute (attributes, "family")->value;
2008 model = (ULONGEST *) xml_find_attribute (attributes, "model")->value;
2009 stepping = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value;
2010
2011 btrace = (struct btrace_data *) user_data;
2012
2013 if (strcmp (vendor, "GenuineIntel") == 0)
2014 btrace->variant.pt.config.cpu.vendor = CV_INTEL;
2015
2016 btrace->variant.pt.config.cpu.family = *family;
2017 btrace->variant.pt.config.cpu.model = *model;
2018 btrace->variant.pt.config.cpu.stepping = *stepping;
2019 }
2020
2021 /* Parse a btrace pt "raw" xml record. */
2022
2023 static void
2024 parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
2025 const struct gdb_xml_element *element,
2026 void *user_data, const char *body_text)
2027 {
2028 struct btrace_data *btrace;
2029
2030 btrace = (struct btrace_data *) user_data;
2031 parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
2032 &btrace->variant.pt.size);
2033 }
2034
2035 /* Parse a btrace "pt" xml record. */
2036
2037 static void
2038 parse_xml_btrace_pt (struct gdb_xml_parser *parser,
2039 const struct gdb_xml_element *element,
2040 void *user_data, VEC (gdb_xml_value_s) *attributes)
2041 {
2042 struct btrace_data *btrace;
2043
2044 btrace = (struct btrace_data *) user_data;
2045 btrace->format = BTRACE_FORMAT_PT;
2046 btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
2047 btrace->variant.pt.data = NULL;
2048 btrace->variant.pt.size = 0;
2049 }
2050
2051 static const struct gdb_xml_attribute block_attributes[] = {
2052 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2053 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2054 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2055 };
2056
2057 static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
2058 { "vendor", GDB_XML_AF_NONE, NULL, NULL },
2059 { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2060 { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2061 { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2062 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2063 };
2064
2065 static const struct gdb_xml_element btrace_pt_config_children[] = {
2066 { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
2067 parse_xml_btrace_pt_config_cpu, NULL },
2068 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2069 };
2070
2071 static const struct gdb_xml_element btrace_pt_children[] = {
2072 { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
2073 NULL },
2074 { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
2075 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2076 };
2077
2078 static const struct gdb_xml_attribute btrace_attributes[] = {
2079 { "version", GDB_XML_AF_NONE, NULL, NULL },
2080 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2081 };
2082
2083 static const struct gdb_xml_element btrace_children[] = {
2084 { "block", block_attributes, NULL,
2085 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
2086 { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
2087 NULL },
2088 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2089 };
2090
2091 static const struct gdb_xml_element btrace_elements[] = {
2092 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
2093 check_xml_btrace_version, NULL },
2094 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2095 };
2096
2097 #endif /* defined (HAVE_LIBEXPAT) */
2098
2099 /* See btrace.h. */
2100
2101 void
2102 parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
2103 {
2104 struct cleanup *cleanup;
2105 int errcode;
2106
2107 #if defined (HAVE_LIBEXPAT)
2108
2109 btrace->format = BTRACE_FORMAT_NONE;
2110
2111 cleanup = make_cleanup_btrace_data (btrace);
2112 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
2113 buffer, btrace);
2114 if (errcode != 0)
2115 error (_("Error parsing branch trace."));
2116
2117 /* Keep parse results. */
2118 discard_cleanups (cleanup);
2119
2120 #else /* !defined (HAVE_LIBEXPAT) */
2121
2122 error (_("Cannot process branch trace. XML parsing is not supported."));
2123
2124 #endif /* !defined (HAVE_LIBEXPAT) */
2125 }
2126
2127 #if defined (HAVE_LIBEXPAT)
2128
2129 /* Parse a btrace-conf "bts" xml record. */
2130
2131 static void
2132 parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
2133 const struct gdb_xml_element *element,
2134 void *user_data, VEC (gdb_xml_value_s) *attributes)
2135 {
2136 struct btrace_config *conf;
2137 struct gdb_xml_value *size;
2138
2139 conf = (struct btrace_config *) user_data;
2140 conf->format = BTRACE_FORMAT_BTS;
2141 conf->bts.size = 0;
2142
2143 size = xml_find_attribute (attributes, "size");
2144 if (size != NULL)
2145 conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
2146 }
2147
2148 /* Parse a btrace-conf "pt" xml record. */
2149
2150 static void
2151 parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
2152 const struct gdb_xml_element *element,
2153 void *user_data, VEC (gdb_xml_value_s) *attributes)
2154 {
2155 struct btrace_config *conf;
2156 struct gdb_xml_value *size;
2157
2158 conf = (struct btrace_config *) user_data;
2159 conf->format = BTRACE_FORMAT_PT;
2160 conf->pt.size = 0;
2161
2162 size = xml_find_attribute (attributes, "size");
2163 if (size != NULL)
2164 conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
2165 }
2166
2167 static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
2168 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2169 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2170 };
2171
2172 static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
2173 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2174 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2175 };
2176
2177 static const struct gdb_xml_element btrace_conf_children[] = {
2178 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
2179 parse_xml_btrace_conf_bts, NULL },
2180 { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
2181 parse_xml_btrace_conf_pt, NULL },
2182 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2183 };
2184
2185 static const struct gdb_xml_attribute btrace_conf_attributes[] = {
2186 { "version", GDB_XML_AF_NONE, NULL, NULL },
2187 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2188 };
2189
2190 static const struct gdb_xml_element btrace_conf_elements[] = {
2191 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
2192 GDB_XML_EF_NONE, NULL, NULL },
2193 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2194 };
2195
2196 #endif /* defined (HAVE_LIBEXPAT) */
2197
2198 /* See btrace.h. */
2199
2200 void
2201 parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
2202 {
2203 int errcode;
2204
2205 #if defined (HAVE_LIBEXPAT)
2206
2207 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
2208 btrace_conf_elements, xml, conf);
2209 if (errcode != 0)
2210 error (_("Error parsing branch trace configuration."));
2211
2212 #else /* !defined (HAVE_LIBEXPAT) */
2213
2214 error (_("XML parsing is not supported."));
2215
2216 #endif /* !defined (HAVE_LIBEXPAT) */
2217 }
2218
2219 /* See btrace.h. */
2220
2221 const struct btrace_insn *
2222 btrace_insn_get (const struct btrace_insn_iterator *it)
2223 {
2224 const struct btrace_function *bfun;
2225 unsigned int index, end;
2226
2227 index = it->index;
2228 bfun = it->function;
2229
2230 /* Check if the iterator points to a gap in the trace. */
2231 if (bfun->errcode != 0)
2232 return NULL;
2233
2234 /* The index is within the bounds of this function's instruction vector. */
2235 end = VEC_length (btrace_insn_s, bfun->insn);
2236 gdb_assert (0 < end);
2237 gdb_assert (index < end);
2238
2239 return VEC_index (btrace_insn_s, bfun->insn, index);
2240 }
2241
2242 /* See btrace.h. */
2243
2244 int
2245 btrace_insn_get_error (const struct btrace_insn_iterator *it)
2246 {
2247 return it->function->errcode;
2248 }
2249
2250 /* See btrace.h. */
2251
2252 unsigned int
2253 btrace_insn_number (const struct btrace_insn_iterator *it)
2254 {
2255 return it->function->insn_offset + it->index;
2256 }
2257
2258 /* See btrace.h. */
2259
2260 void
2261 btrace_insn_begin (struct btrace_insn_iterator *it,
2262 const struct btrace_thread_info *btinfo)
2263 {
2264 const struct btrace_function *bfun;
2265
2266 bfun = btinfo->begin;
2267 if (bfun == NULL)
2268 error (_("No trace."));
2269
2270 it->function = bfun;
2271 it->index = 0;
2272 }
2273
2274 /* See btrace.h. */
2275
2276 void
2277 btrace_insn_end (struct btrace_insn_iterator *it,
2278 const struct btrace_thread_info *btinfo)
2279 {
2280 const struct btrace_function *bfun;
2281 unsigned int length;
2282
2283 bfun = btinfo->end;
2284 if (bfun == NULL)
2285 error (_("No trace."));
2286
2287 length = VEC_length (btrace_insn_s, bfun->insn);
2288
2289 /* The last function may either be a gap or it contains the current
2290 instruction, which is one past the end of the execution trace; ignore
2291 it. */
2292 if (length > 0)
2293 length -= 1;
2294
2295 it->function = bfun;
2296 it->index = length;
2297 }
2298
2299 /* See btrace.h. */
2300
2301 unsigned int
2302 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
2303 {
2304 const struct btrace_function *bfun;
2305 unsigned int index, steps;
2306
2307 bfun = it->function;
2308 steps = 0;
2309 index = it->index;
2310
2311 while (stride != 0)
2312 {
2313 unsigned int end, space, adv;
2314
2315 end = VEC_length (btrace_insn_s, bfun->insn);
2316
2317 /* An empty function segment represents a gap in the trace. We count
2318 it as one instruction. */
2319 if (end == 0)
2320 {
2321 const struct btrace_function *next;
2322
2323 next = bfun->flow.next;
2324 if (next == NULL)
2325 break;
2326
2327 stride -= 1;
2328 steps += 1;
2329
2330 bfun = next;
2331 index = 0;
2332
2333 continue;
2334 }
2335
2336 gdb_assert (0 < end);
2337 gdb_assert (index < end);
2338
2339 /* Compute the number of instructions remaining in this segment. */
2340 space = end - index;
2341
2342 /* Advance the iterator as far as possible within this segment. */
2343 adv = std::min (space, stride);
2344 stride -= adv;
2345 index += adv;
2346 steps += adv;
2347
2348 /* Move to the next function if we're at the end of this one. */
2349 if (index == end)
2350 {
2351 const struct btrace_function *next;
2352
2353 next = bfun->flow.next;
2354 if (next == NULL)
2355 {
2356 /* We stepped past the last function.
2357
2358 Let's adjust the index to point to the last instruction in
2359 the previous function. */
2360 index -= 1;
2361 steps -= 1;
2362 break;
2363 }
2364
2365 /* We now point to the first instruction in the new function. */
2366 bfun = next;
2367 index = 0;
2368 }
2369
2370 /* We did make progress. */
2371 gdb_assert (adv > 0);
2372 }
2373
2374 /* Update the iterator. */
2375 it->function = bfun;
2376 it->index = index;
2377
2378 return steps;
2379 }
2380
2381 /* See btrace.h. */
2382
2383 unsigned int
2384 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
2385 {
2386 const struct btrace_function *bfun;
2387 unsigned int index, steps;
2388
2389 bfun = it->function;
2390 steps = 0;
2391 index = it->index;
2392
2393 while (stride != 0)
2394 {
2395 unsigned int adv;
2396
2397 /* Move to the previous function if we're at the start of this one. */
2398 if (index == 0)
2399 {
2400 const struct btrace_function *prev;
2401
2402 prev = bfun->flow.prev;
2403 if (prev == NULL)
2404 break;
2405
2406 /* We point to one after the last instruction in the new function. */
2407 bfun = prev;
2408 index = VEC_length (btrace_insn_s, bfun->insn);
2409
2410 /* An empty function segment represents a gap in the trace. We count
2411 it as one instruction. */
2412 if (index == 0)
2413 {
2414 stride -= 1;
2415 steps += 1;
2416
2417 continue;
2418 }
2419 }
2420
2421 /* Advance the iterator as far as possible within this segment. */
2422 adv = std::min (index, stride);
2423
2424 stride -= adv;
2425 index -= adv;
2426 steps += adv;
2427
2428 /* We did make progress. */
2429 gdb_assert (adv > 0);
2430 }
2431
2432 /* Update the iterator. */
2433 it->function = bfun;
2434 it->index = index;
2435
2436 return steps;
2437 }
2438
2439 /* See btrace.h. */
2440
2441 int
2442 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
2443 const struct btrace_insn_iterator *rhs)
2444 {
2445 unsigned int lnum, rnum;
2446
2447 lnum = btrace_insn_number (lhs);
2448 rnum = btrace_insn_number (rhs);
2449
2450 return (int) (lnum - rnum);
2451 }
2452
2453 /* See btrace.h. */
2454
2455 int
2456 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
2457 const struct btrace_thread_info *btinfo,
2458 unsigned int number)
2459 {
2460 const struct btrace_function *bfun;
2461
2462 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
2463 if (bfun->insn_offset <= number)
2464 break;
2465
2466 if (bfun == NULL)
2467 return 0;
2468
2469 if (bfun->insn_offset + ftrace_call_num_insn (bfun) <= number)
2470 return 0;
2471
2472 it->function = bfun;
2473 it->index = number - bfun->insn_offset;
2474
2475 return 1;
2476 }
2477
2478 /* See btrace.h. */
2479
2480 const struct btrace_function *
2481 btrace_call_get (const struct btrace_call_iterator *it)
2482 {
2483 return it->function;
2484 }
2485
2486 /* See btrace.h. */
2487
2488 unsigned int
2489 btrace_call_number (const struct btrace_call_iterator *it)
2490 {
2491 const struct btrace_thread_info *btinfo;
2492 const struct btrace_function *bfun;
2493 unsigned int insns;
2494
2495 btinfo = it->btinfo;
2496 bfun = it->function;
2497 if (bfun != NULL)
2498 return bfun->number;
2499
2500 /* For the end iterator, i.e. bfun == NULL, we return one more than the
2501 number of the last function. */
2502 bfun = btinfo->end;
2503 insns = VEC_length (btrace_insn_s, bfun->insn);
2504
2505 /* If the function contains only a single instruction (i.e. the current
2506 instruction), it will be skipped and its number is already the number
2507 we seek. */
2508 if (insns == 1)
2509 return bfun->number;
2510
2511 /* Otherwise, return one more than the number of the last function. */
2512 return bfun->number + 1;
2513 }
2514
2515 /* See btrace.h. */
2516
2517 void
2518 btrace_call_begin (struct btrace_call_iterator *it,
2519 const struct btrace_thread_info *btinfo)
2520 {
2521 const struct btrace_function *bfun;
2522
2523 bfun = btinfo->begin;
2524 if (bfun == NULL)
2525 error (_("No trace."));
2526
2527 it->btinfo = btinfo;
2528 it->function = bfun;
2529 }
2530
2531 /* See btrace.h. */
2532
2533 void
2534 btrace_call_end (struct btrace_call_iterator *it,
2535 const struct btrace_thread_info *btinfo)
2536 {
2537 const struct btrace_function *bfun;
2538
2539 bfun = btinfo->end;
2540 if (bfun == NULL)
2541 error (_("No trace."));
2542
2543 it->btinfo = btinfo;
2544 it->function = NULL;
2545 }
2546
2547 /* See btrace.h. */
2548
2549 unsigned int
2550 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2551 {
2552 const struct btrace_function *bfun;
2553 unsigned int steps;
2554
2555 bfun = it->function;
2556 steps = 0;
2557 while (bfun != NULL)
2558 {
2559 const struct btrace_function *next;
2560 unsigned int insns;
2561
2562 next = bfun->flow.next;
2563 if (next == NULL)
2564 {
2565 /* Ignore the last function if it only contains a single
2566 (i.e. the current) instruction. */
2567 insns = VEC_length (btrace_insn_s, bfun->insn);
2568 if (insns == 1)
2569 steps -= 1;
2570 }
2571
2572 if (stride == steps)
2573 break;
2574
2575 bfun = next;
2576 steps += 1;
2577 }
2578
2579 it->function = bfun;
2580 return steps;
2581 }
2582
2583 /* See btrace.h. */
2584
2585 unsigned int
2586 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2587 {
2588 const struct btrace_thread_info *btinfo;
2589 const struct btrace_function *bfun;
2590 unsigned int steps;
2591
2592 bfun = it->function;
2593 steps = 0;
2594
2595 if (bfun == NULL)
2596 {
2597 unsigned int insns;
2598
2599 btinfo = it->btinfo;
2600 bfun = btinfo->end;
2601 if (bfun == NULL)
2602 return 0;
2603
2604 /* Ignore the last function if it only contains a single
2605 (i.e. the current) instruction. */
2606 insns = VEC_length (btrace_insn_s, bfun->insn);
2607 if (insns == 1)
2608 bfun = bfun->flow.prev;
2609
2610 if (bfun == NULL)
2611 return 0;
2612
2613 steps += 1;
2614 }
2615
2616 while (steps < stride)
2617 {
2618 const struct btrace_function *prev;
2619
2620 prev = bfun->flow.prev;
2621 if (prev == NULL)
2622 break;
2623
2624 bfun = prev;
2625 steps += 1;
2626 }
2627
2628 it->function = bfun;
2629 return steps;
2630 }
2631
2632 /* See btrace.h. */
2633
2634 int
2635 btrace_call_cmp (const struct btrace_call_iterator *lhs,
2636 const struct btrace_call_iterator *rhs)
2637 {
2638 unsigned int lnum, rnum;
2639
2640 lnum = btrace_call_number (lhs);
2641 rnum = btrace_call_number (rhs);
2642
2643 return (int) (lnum - rnum);
2644 }
2645
2646 /* See btrace.h. */
2647
2648 int
2649 btrace_find_call_by_number (struct btrace_call_iterator *it,
2650 const struct btrace_thread_info *btinfo,
2651 unsigned int number)
2652 {
2653 const struct btrace_function *bfun;
2654
2655 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
2656 {
2657 unsigned int bnum;
2658
2659 bnum = bfun->number;
2660 if (number == bnum)
2661 {
2662 it->btinfo = btinfo;
2663 it->function = bfun;
2664 return 1;
2665 }
2666
2667 /* Functions are ordered and numbered consecutively. We could bail out
2668 earlier. On the other hand, it is very unlikely that we search for
2669 a nonexistent function. */
2670 }
2671
2672 return 0;
2673 }
2674
2675 /* See btrace.h. */
2676
2677 void
2678 btrace_set_insn_history (struct btrace_thread_info *btinfo,
2679 const struct btrace_insn_iterator *begin,
2680 const struct btrace_insn_iterator *end)
2681 {
2682 if (btinfo->insn_history == NULL)
2683 btinfo->insn_history = XCNEW (struct btrace_insn_history);
2684
2685 btinfo->insn_history->begin = *begin;
2686 btinfo->insn_history->end = *end;
2687 }
2688
2689 /* See btrace.h. */
2690
2691 void
2692 btrace_set_call_history (struct btrace_thread_info *btinfo,
2693 const struct btrace_call_iterator *begin,
2694 const struct btrace_call_iterator *end)
2695 {
2696 gdb_assert (begin->btinfo == end->btinfo);
2697
2698 if (btinfo->call_history == NULL)
2699 btinfo->call_history = XCNEW (struct btrace_call_history);
2700
2701 btinfo->call_history->begin = *begin;
2702 btinfo->call_history->end = *end;
2703 }
2704
2705 /* See btrace.h. */
2706
2707 int
2708 btrace_is_replaying (struct thread_info *tp)
2709 {
2710 return tp->btrace.replay != NULL;
2711 }
2712
2713 /* See btrace.h. */
2714
2715 int
2716 btrace_is_empty (struct thread_info *tp)
2717 {
2718 struct btrace_insn_iterator begin, end;
2719 struct btrace_thread_info *btinfo;
2720
2721 btinfo = &tp->btrace;
2722
2723 if (btinfo->begin == NULL)
2724 return 1;
2725
2726 btrace_insn_begin (&begin, btinfo);
2727 btrace_insn_end (&end, btinfo);
2728
2729 return btrace_insn_cmp (&begin, &end) == 0;
2730 }
2731
2732 /* Forward the cleanup request. */
2733
2734 static void
2735 do_btrace_data_cleanup (void *arg)
2736 {
2737 btrace_data_fini ((struct btrace_data *) arg);
2738 }
2739
2740 /* See btrace.h. */
2741
2742 struct cleanup *
2743 make_cleanup_btrace_data (struct btrace_data *data)
2744 {
2745 return make_cleanup (do_btrace_data_cleanup, data);
2746 }
2747
2748 #if defined (HAVE_LIBIPT)
2749
2750 /* Print a single packet. */
2751
2752 static void
2753 pt_print_packet (const struct pt_packet *packet)
2754 {
2755 switch (packet->type)
2756 {
2757 default:
2758 printf_unfiltered (("[??: %x]"), packet->type);
2759 break;
2760
2761 case ppt_psb:
2762 printf_unfiltered (("psb"));
2763 break;
2764
2765 case ppt_psbend:
2766 printf_unfiltered (("psbend"));
2767 break;
2768
2769 case ppt_pad:
2770 printf_unfiltered (("pad"));
2771 break;
2772
2773 case ppt_tip:
2774 printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
2775 packet->payload.ip.ipc,
2776 packet->payload.ip.ip);
2777 break;
2778
2779 case ppt_tip_pge:
2780 printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
2781 packet->payload.ip.ipc,
2782 packet->payload.ip.ip);
2783 break;
2784
2785 case ppt_tip_pgd:
2786 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
2787 packet->payload.ip.ipc,
2788 packet->payload.ip.ip);
2789 break;
2790
2791 case ppt_fup:
2792 printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
2793 packet->payload.ip.ipc,
2794 packet->payload.ip.ip);
2795 break;
2796
2797 case ppt_tnt_8:
2798 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
2799 packet->payload.tnt.bit_size,
2800 packet->payload.tnt.payload);
2801 break;
2802
2803 case ppt_tnt_64:
2804 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
2805 packet->payload.tnt.bit_size,
2806 packet->payload.tnt.payload);
2807 break;
2808
2809 case ppt_pip:
2810 printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2811 packet->payload.pip.nr ? (" nr") : (""));
2812 break;
2813
2814 case ppt_tsc:
2815 printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2816 break;
2817
2818 case ppt_cbr:
2819 printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
2820 break;
2821
2822 case ppt_mode:
2823 switch (packet->payload.mode.leaf)
2824 {
2825 default:
2826 printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
2827 break;
2828
2829 case pt_mol_exec:
2830 printf_unfiltered (("mode.exec%s%s"),
2831 packet->payload.mode.bits.exec.csl
2832 ? (" cs.l") : (""),
2833 packet->payload.mode.bits.exec.csd
2834 ? (" cs.d") : (""));
2835 break;
2836
2837 case pt_mol_tsx:
2838 printf_unfiltered (("mode.tsx%s%s"),
2839 packet->payload.mode.bits.tsx.intx
2840 ? (" intx") : (""),
2841 packet->payload.mode.bits.tsx.abrt
2842 ? (" abrt") : (""));
2843 break;
2844 }
2845 break;
2846
2847 case ppt_ovf:
2848 printf_unfiltered (("ovf"));
2849 break;
2850
2851 case ppt_stop:
2852 printf_unfiltered (("stop"));
2853 break;
2854
2855 case ppt_vmcs:
2856 printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2857 break;
2858
2859 case ppt_tma:
2860 printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
2861 packet->payload.tma.fc);
2862 break;
2863
2864 case ppt_mtc:
2865 printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
2866 break;
2867
2868 case ppt_cyc:
2869 printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
2870 break;
2871
2872 case ppt_mnt:
2873 printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
2874 break;
2875 }
2876 }
2877
2878 /* Decode packets into MAINT using DECODER. */
2879
2880 static void
2881 btrace_maint_decode_pt (struct btrace_maint_info *maint,
2882 struct pt_packet_decoder *decoder)
2883 {
2884 int errcode;
2885
2886 for (;;)
2887 {
2888 struct btrace_pt_packet packet;
2889
2890 errcode = pt_pkt_sync_forward (decoder);
2891 if (errcode < 0)
2892 break;
2893
2894 for (;;)
2895 {
2896 pt_pkt_get_offset (decoder, &packet.offset);
2897
2898 errcode = pt_pkt_next (decoder, &packet.packet,
2899 sizeof(packet.packet));
2900 if (errcode < 0)
2901 break;
2902
2903 if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
2904 {
2905 packet.errcode = pt_errcode (errcode);
2906 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2907 &packet);
2908 }
2909 }
2910
2911 if (errcode == -pte_eos)
2912 break;
2913
2914 packet.errcode = pt_errcode (errcode);
2915 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2916 &packet);
2917
2918 warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
2919 packet.offset, pt_errstr (packet.errcode));
2920 }
2921
2922 if (errcode != -pte_eos)
2923 warning (_("Failed to synchronize onto the Intel Processor Trace "
2924 "stream: %s."), pt_errstr (pt_errcode (errcode)));
2925 }
2926
2927 /* Update the packet history in BTINFO. */
2928
2929 static void
2930 btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
2931 {
2932 volatile struct gdb_exception except;
2933 struct pt_packet_decoder *decoder;
2934 struct btrace_data_pt *pt;
2935 struct pt_config config;
2936 int errcode;
2937
2938 pt = &btinfo->data.variant.pt;
2939
2940 /* Nothing to do if there is no trace. */
2941 if (pt->size == 0)
2942 return;
2943
2944 memset (&config, 0, sizeof(config));
2945
2946 config.size = sizeof (config);
2947 config.begin = pt->data;
2948 config.end = pt->data + pt->size;
2949
2950 config.cpu.vendor = pt_translate_cpu_vendor (pt->config.cpu.vendor);
2951 config.cpu.family = pt->config.cpu.family;
2952 config.cpu.model = pt->config.cpu.model;
2953 config.cpu.stepping = pt->config.cpu.stepping;
2954
2955 errcode = pt_cpu_errata (&config.errata, &config.cpu);
2956 if (errcode < 0)
2957 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
2958 pt_errstr (pt_errcode (errcode)));
2959
2960 decoder = pt_pkt_alloc_decoder (&config);
2961 if (decoder == NULL)
2962 error (_("Failed to allocate the Intel Processor Trace decoder."));
2963
2964 TRY
2965 {
2966 btrace_maint_decode_pt (&btinfo->maint, decoder);
2967 }
2968 CATCH (except, RETURN_MASK_ALL)
2969 {
2970 pt_pkt_free_decoder (decoder);
2971
2972 if (except.reason < 0)
2973 throw_exception (except);
2974 }
2975 END_CATCH
2976
2977 pt_pkt_free_decoder (decoder);
2978 }
2979
2980 #endif /* !defined (HAVE_LIBIPT) */
2981
2982 /* Update the packet maintenance information for BTINFO and store the
2983 low and high bounds into BEGIN and END, respectively.
2984 Store the current iterator state into FROM and TO. */
2985
2986 static void
2987 btrace_maint_update_packets (struct btrace_thread_info *btinfo,
2988 unsigned int *begin, unsigned int *end,
2989 unsigned int *from, unsigned int *to)
2990 {
2991 switch (btinfo->data.format)
2992 {
2993 default:
2994 *begin = 0;
2995 *end = 0;
2996 *from = 0;
2997 *to = 0;
2998 break;
2999
3000 case BTRACE_FORMAT_BTS:
3001 /* Nothing to do - we operate directly on BTINFO->DATA. */
3002 *begin = 0;
3003 *end = VEC_length (btrace_block_s, btinfo->data.variant.bts.blocks);
3004 *from = btinfo->maint.variant.bts.packet_history.begin;
3005 *to = btinfo->maint.variant.bts.packet_history.end;
3006 break;
3007
3008 #if defined (HAVE_LIBIPT)
3009 case BTRACE_FORMAT_PT:
3010 if (VEC_empty (btrace_pt_packet_s, btinfo->maint.variant.pt.packets))
3011 btrace_maint_update_pt_packets (btinfo);
3012
3013 *begin = 0;
3014 *end = VEC_length (btrace_pt_packet_s, btinfo->maint.variant.pt.packets);
3015 *from = btinfo->maint.variant.pt.packet_history.begin;
3016 *to = btinfo->maint.variant.pt.packet_history.end;
3017 break;
3018 #endif /* defined (HAVE_LIBIPT) */
3019 }
3020 }
3021
3022 /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
3023 update the current iterator position. */
3024
3025 static void
3026 btrace_maint_print_packets (struct btrace_thread_info *btinfo,
3027 unsigned int begin, unsigned int end)
3028 {
3029 switch (btinfo->data.format)
3030 {
3031 default:
3032 break;
3033
3034 case BTRACE_FORMAT_BTS:
3035 {
3036 VEC (btrace_block_s) *blocks;
3037 unsigned int blk;
3038
3039 blocks = btinfo->data.variant.bts.blocks;
3040 for (blk = begin; blk < end; ++blk)
3041 {
3042 const btrace_block_s *block;
3043
3044 block = VEC_index (btrace_block_s, blocks, blk);
3045
3046 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
3047 core_addr_to_string_nz (block->begin),
3048 core_addr_to_string_nz (block->end));
3049 }
3050
3051 btinfo->maint.variant.bts.packet_history.begin = begin;
3052 btinfo->maint.variant.bts.packet_history.end = end;
3053 }
3054 break;
3055
3056 #if defined (HAVE_LIBIPT)
3057 case BTRACE_FORMAT_PT:
3058 {
3059 VEC (btrace_pt_packet_s) *packets;
3060 unsigned int pkt;
3061
3062 packets = btinfo->maint.variant.pt.packets;
3063 for (pkt = begin; pkt < end; ++pkt)
3064 {
3065 const struct btrace_pt_packet *packet;
3066
3067 packet = VEC_index (btrace_pt_packet_s, packets, pkt);
3068
3069 printf_unfiltered ("%u\t", pkt);
3070 printf_unfiltered ("0x%" PRIx64 "\t", packet->offset);
3071
3072 if (packet->errcode == pte_ok)
3073 pt_print_packet (&packet->packet);
3074 else
3075 printf_unfiltered ("[error: %s]", pt_errstr (packet->errcode));
3076
3077 printf_unfiltered ("\n");
3078 }
3079
3080 btinfo->maint.variant.pt.packet_history.begin = begin;
3081 btinfo->maint.variant.pt.packet_history.end = end;
3082 }
3083 break;
3084 #endif /* defined (HAVE_LIBIPT) */
3085 }
3086 }
3087
3088 /* Read a number from an argument string. */
3089
3090 static unsigned int
3091 get_uint (char **arg)
3092 {
3093 char *begin, *end, *pos;
3094 unsigned long number;
3095
3096 begin = *arg;
3097 pos = skip_spaces (begin);
3098
3099 if (!isdigit (*pos))
3100 error (_("Expected positive number, got: %s."), pos);
3101
3102 number = strtoul (pos, &end, 10);
3103 if (number > UINT_MAX)
3104 error (_("Number too big."));
3105
3106 *arg += (end - begin);
3107
3108 return (unsigned int) number;
3109 }
3110
3111 /* Read a context size from an argument string. */
3112
3113 static int
3114 get_context_size (char **arg)
3115 {
3116 char *pos;
3117 int number;
3118
3119 pos = skip_spaces (*arg);
3120
3121 if (!isdigit (*pos))
3122 error (_("Expected positive number, got: %s."), pos);
3123
3124 return strtol (pos, arg, 10);
3125 }
3126
3127 /* Complain about junk at the end of an argument string. */
3128
3129 static void
3130 no_chunk (char *arg)
3131 {
3132 if (*arg != 0)
3133 error (_("Junk after argument: %s."), arg);
3134 }
3135
3136 /* The "maintenance btrace packet-history" command. */
3137
3138 static void
3139 maint_btrace_packet_history_cmd (char *arg, int from_tty)
3140 {
3141 struct btrace_thread_info *btinfo;
3142 struct thread_info *tp;
3143 unsigned int size, begin, end, from, to;
3144
3145 tp = find_thread_ptid (inferior_ptid);
3146 if (tp == NULL)
3147 error (_("No thread."));
3148
3149 size = 10;
3150 btinfo = &tp->btrace;
3151
3152 btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
3153 if (begin == end)
3154 {
3155 printf_unfiltered (_("No trace.\n"));
3156 return;
3157 }
3158
3159 if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
3160 {
3161 from = to;
3162
3163 if (end - from < size)
3164 size = end - from;
3165 to = from + size;
3166 }
3167 else if (strcmp (arg, "-") == 0)
3168 {
3169 to = from;
3170
3171 if (to - begin < size)
3172 size = to - begin;
3173 from = to - size;
3174 }
3175 else
3176 {
3177 from = get_uint (&arg);
3178 if (end <= from)
3179 error (_("'%u' is out of range."), from);
3180
3181 arg = skip_spaces (arg);
3182 if (*arg == ',')
3183 {
3184 arg = skip_spaces (++arg);
3185
3186 if (*arg == '+')
3187 {
3188 arg += 1;
3189 size = get_context_size (&arg);
3190
3191 no_chunk (arg);
3192
3193 if (end - from < size)
3194 size = end - from;
3195 to = from + size;
3196 }
3197 else if (*arg == '-')
3198 {
3199 arg += 1;
3200 size = get_context_size (&arg);
3201
3202 no_chunk (arg);
3203
3204 /* Include the packet given as first argument. */
3205 from += 1;
3206 to = from;
3207
3208 if (to - begin < size)
3209 size = to - begin;
3210 from = to - size;
3211 }
3212 else
3213 {
3214 to = get_uint (&arg);
3215
3216 /* Include the packet at the second argument and silently
3217 truncate the range. */
3218 if (to < end)
3219 to += 1;
3220 else
3221 to = end;
3222
3223 no_chunk (arg);
3224 }
3225 }
3226 else
3227 {
3228 no_chunk (arg);
3229
3230 if (end - from < size)
3231 size = end - from;
3232 to = from + size;
3233 }
3234
3235 dont_repeat ();
3236 }
3237
3238 btrace_maint_print_packets (btinfo, from, to);
3239 }
3240
3241 /* The "maintenance btrace clear-packet-history" command. */
3242
3243 static void
3244 maint_btrace_clear_packet_history_cmd (char *args, int from_tty)
3245 {
3246 struct btrace_thread_info *btinfo;
3247 struct thread_info *tp;
3248
3249 if (args != NULL && *args != 0)
3250 error (_("Invalid argument."));
3251
3252 tp = find_thread_ptid (inferior_ptid);
3253 if (tp == NULL)
3254 error (_("No thread."));
3255
3256 btinfo = &tp->btrace;
3257
3258 /* Must clear the maint data before - it depends on BTINFO->DATA. */
3259 btrace_maint_clear (btinfo);
3260 btrace_data_clear (&btinfo->data);
3261 }
3262
3263 /* The "maintenance btrace clear" command. */
3264
3265 static void
3266 maint_btrace_clear_cmd (char *args, int from_tty)
3267 {
3268 struct btrace_thread_info *btinfo;
3269 struct thread_info *tp;
3270
3271 if (args != NULL && *args != 0)
3272 error (_("Invalid argument."));
3273
3274 tp = find_thread_ptid (inferior_ptid);
3275 if (tp == NULL)
3276 error (_("No thread."));
3277
3278 btrace_clear (tp);
3279 }
3280
3281 /* The "maintenance btrace" command. */
3282
3283 static void
3284 maint_btrace_cmd (char *args, int from_tty)
3285 {
3286 help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands,
3287 gdb_stdout);
3288 }
3289
3290 /* The "maintenance set btrace" command. */
3291
3292 static void
3293 maint_btrace_set_cmd (char *args, int from_tty)
3294 {
3295 help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands,
3296 gdb_stdout);
3297 }
3298
3299 /* The "maintenance show btrace" command. */
3300
3301 static void
3302 maint_btrace_show_cmd (char *args, int from_tty)
3303 {
3304 help_list (maint_btrace_show_cmdlist, "maintenance show btrace ",
3305 all_commands, gdb_stdout);
3306 }
3307
3308 /* The "maintenance set btrace pt" command. */
3309
3310 static void
3311 maint_btrace_pt_set_cmd (char *args, int from_tty)
3312 {
3313 help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3314 all_commands, gdb_stdout);
3315 }
3316
3317 /* The "maintenance show btrace pt" command. */
3318
3319 static void
3320 maint_btrace_pt_show_cmd (char *args, int from_tty)
3321 {
3322 help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3323 all_commands, gdb_stdout);
3324 }
3325
3326 /* The "maintenance info btrace" command. */
3327
3328 static void
3329 maint_info_btrace_cmd (char *args, int from_tty)
3330 {
3331 struct btrace_thread_info *btinfo;
3332 struct thread_info *tp;
3333 const struct btrace_config *conf;
3334
3335 if (args != NULL && *args != 0)
3336 error (_("Invalid argument."));
3337
3338 tp = find_thread_ptid (inferior_ptid);
3339 if (tp == NULL)
3340 error (_("No thread."));
3341
3342 btinfo = &tp->btrace;
3343
3344 conf = btrace_conf (btinfo);
3345 if (conf == NULL)
3346 error (_("No btrace configuration."));
3347
3348 printf_unfiltered (_("Format: %s.\n"),
3349 btrace_format_string (conf->format));
3350
3351 switch (conf->format)
3352 {
3353 default:
3354 break;
3355
3356 case BTRACE_FORMAT_BTS:
3357 printf_unfiltered (_("Number of packets: %u.\n"),
3358 VEC_length (btrace_block_s,
3359 btinfo->data.variant.bts.blocks));
3360 break;
3361
3362 #if defined (HAVE_LIBIPT)
3363 case BTRACE_FORMAT_PT:
3364 {
3365 struct pt_version version;
3366
3367 version = pt_library_version ();
3368 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
3369 version.minor, version.build,
3370 version.ext != NULL ? version.ext : "");
3371
3372 btrace_maint_update_pt_packets (btinfo);
3373 printf_unfiltered (_("Number of packets: %u.\n"),
3374 VEC_length (btrace_pt_packet_s,
3375 btinfo->maint.variant.pt.packets));
3376 }
3377 break;
3378 #endif /* defined (HAVE_LIBIPT) */
3379 }
3380 }
3381
3382 /* The "maint show btrace pt skip-pad" show value function. */
3383
3384 static void
3385 show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
3386 struct cmd_list_element *c,
3387 const char *value)
3388 {
3389 fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
3390 }
3391
3392
3393 /* Initialize btrace maintenance commands. */
3394
3395 void _initialize_btrace (void);
3396 void
3397 _initialize_btrace (void)
3398 {
3399 add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
3400 _("Info about branch tracing data."), &maintenanceinfolist);
3401
3402 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_cmd,
3403 _("Branch tracing maintenance commands."),
3404 &maint_btrace_cmdlist, "maintenance btrace ",
3405 0, &maintenancelist);
3406
3407 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_set_cmd, _("\
3408 Set branch tracing specific variables."),
3409 &maint_btrace_set_cmdlist, "maintenance set btrace ",
3410 0, &maintenance_set_cmdlist);
3411
3412 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\
3413 Set Intel Processor Trace specific variables."),
3414 &maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3415 0, &maint_btrace_set_cmdlist);
3416
3417 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_show_cmd, _("\
3418 Show branch tracing specific variables."),
3419 &maint_btrace_show_cmdlist, "maintenance show btrace ",
3420 0, &maintenance_show_cmdlist);
3421
3422 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\
3423 Show Intel Processor Trace specific variables."),
3424 &maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3425 0, &maint_btrace_show_cmdlist);
3426
3427 add_setshow_boolean_cmd ("skip-pad", class_maintenance,
3428 &maint_btrace_pt_skip_pad, _("\
3429 Set whether PAD packets should be skipped in the btrace packet history."), _("\
3430 Show whether PAD packets should be skipped in the btrace packet history."),_("\
3431 When enabled, PAD packets are ignored in the btrace packet history."),
3432 NULL, show_maint_btrace_pt_skip_pad,
3433 &maint_btrace_pt_set_cmdlist,
3434 &maint_btrace_pt_show_cmdlist);
3435
3436 add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
3437 _("Print the raw branch tracing data.\n\
3438 With no argument, print ten more packets after the previous ten-line print.\n\
3439 With '-' as argument print ten packets before a previous ten-line print.\n\
3440 One argument specifies the starting packet of a ten-line print.\n\
3441 Two arguments with comma between specify starting and ending packets to \
3442 print.\n\
3443 Preceded with '+'/'-' the second argument specifies the distance from the \
3444 first.\n"),
3445 &maint_btrace_cmdlist);
3446
3447 add_cmd ("clear-packet-history", class_maintenance,
3448 maint_btrace_clear_packet_history_cmd,
3449 _("Clears the branch tracing packet history.\n\
3450 Discards the raw branch tracing data but not the execution history data.\n\
3451 "),
3452 &maint_btrace_cmdlist);
3453
3454 add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
3455 _("Clears the branch tracing data.\n\
3456 Discards the raw branch tracing data and the execution history data.\n\
3457 The next 'record' command will fetch the branch tracing data anew.\n\
3458 "),
3459 &maint_btrace_cmdlist);
3460
3461 }
This page took 0.101485 seconds and 4 git commands to generate.