Don't memset non-POD types: struct btrace_insn
[deliverable/binutils-gdb.git] / gdb / btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "btrace.h"
24 #include "gdbthread.h"
25 #include "inferior.h"
26 #include "target.h"
27 #include "record.h"
28 #include "symtab.h"
29 #include "disasm.h"
30 #include "source.h"
31 #include "filenames.h"
32 #include "xml-support.h"
33 #include "regcache.h"
34 #include "rsp-low.h"
35 #include "gdbcmd.h"
36 #include "cli/cli-utils.h"
37
38 #include <inttypes.h>
39 #include <ctype.h>
40 #include <algorithm>
41
42 /* Command lists for btrace maintenance commands. */
43 static struct cmd_list_element *maint_btrace_cmdlist;
44 static struct cmd_list_element *maint_btrace_set_cmdlist;
45 static struct cmd_list_element *maint_btrace_show_cmdlist;
46 static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
47 static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
48
49 /* Control whether to skip PAD packets when computing the packet history. */
50 static int maint_btrace_pt_skip_pad = 1;
51
52 /* A vector of function segments. */
53 typedef struct btrace_function * bfun_s;
54 DEF_VEC_P (bfun_s);
55
56 static void btrace_add_pc (struct thread_info *tp);
57
58 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
59 when used in if statements. */
60
61 #define DEBUG(msg, args...) \
62 do \
63 { \
64 if (record_debug != 0) \
65 fprintf_unfiltered (gdb_stdlog, \
66 "[btrace] " msg "\n", ##args); \
67 } \
68 while (0)
69
70 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
71
72 /* Return the function name of a recorded function segment for printing.
73 This function never returns NULL. */
74
75 static const char *
76 ftrace_print_function_name (const struct btrace_function *bfun)
77 {
78 struct minimal_symbol *msym;
79 struct symbol *sym;
80
81 msym = bfun->msym;
82 sym = bfun->sym;
83
84 if (sym != NULL)
85 return SYMBOL_PRINT_NAME (sym);
86
87 if (msym != NULL)
88 return MSYMBOL_PRINT_NAME (msym);
89
90 return "<unknown>";
91 }
92
93 /* Return the file name of a recorded function segment for printing.
94 This function never returns NULL. */
95
96 static const char *
97 ftrace_print_filename (const struct btrace_function *bfun)
98 {
99 struct symbol *sym;
100 const char *filename;
101
102 sym = bfun->sym;
103
104 if (sym != NULL)
105 filename = symtab_to_filename_for_display (symbol_symtab (sym));
106 else
107 filename = "<unknown>";
108
109 return filename;
110 }
111
112 /* Return a string representation of the address of an instruction.
113 This function never returns NULL. */
114
115 static const char *
116 ftrace_print_insn_addr (const struct btrace_insn *insn)
117 {
118 if (insn == NULL)
119 return "<nil>";
120
121 return core_addr_to_string_nz (insn->pc);
122 }
123
124 /* Print an ftrace debug status message. */
125
126 static void
127 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
128 {
129 const char *fun, *file;
130 unsigned int ibegin, iend;
131 int level;
132
133 fun = ftrace_print_function_name (bfun);
134 file = ftrace_print_filename (bfun);
135 level = bfun->level;
136
137 ibegin = bfun->insn_offset;
138 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
139
140 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
141 prefix, fun, file, level, ibegin, iend);
142 }
143
144 /* Return the number of instructions in a given function call segment. */
145
146 static unsigned int
147 ftrace_call_num_insn (const struct btrace_function* bfun)
148 {
149 if (bfun == NULL)
150 return 0;
151
152 /* A gap is always counted as one instruction. */
153 if (bfun->errcode != 0)
154 return 1;
155
156 return VEC_length (btrace_insn_s, bfun->insn);
157 }
158
159 /* Return non-zero if BFUN does not match MFUN and FUN,
160 return zero otherwise. */
161
162 static int
163 ftrace_function_switched (const struct btrace_function *bfun,
164 const struct minimal_symbol *mfun,
165 const struct symbol *fun)
166 {
167 struct minimal_symbol *msym;
168 struct symbol *sym;
169
170 msym = bfun->msym;
171 sym = bfun->sym;
172
173 /* If the minimal symbol changed, we certainly switched functions. */
174 if (mfun != NULL && msym != NULL
175 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
176 return 1;
177
178 /* If the symbol changed, we certainly switched functions. */
179 if (fun != NULL && sym != NULL)
180 {
181 const char *bfname, *fname;
182
183 /* Check the function name. */
184 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
185 return 1;
186
187 /* Check the location of those functions, as well. */
188 bfname = symtab_to_fullname (symbol_symtab (sym));
189 fname = symtab_to_fullname (symbol_symtab (fun));
190 if (filename_cmp (fname, bfname) != 0)
191 return 1;
192 }
193
194 /* If we lost symbol information, we switched functions. */
195 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
196 return 1;
197
198 /* If we gained symbol information, we switched functions. */
199 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
200 return 1;
201
202 return 0;
203 }
204
205 /* Allocate and initialize a new branch trace function segment.
206 PREV is the chronologically preceding function segment.
207 MFUN and FUN are the symbol information we have for this function. */
208
209 static struct btrace_function *
210 ftrace_new_function (struct btrace_function *prev,
211 struct minimal_symbol *mfun,
212 struct symbol *fun)
213 {
214 struct btrace_function *bfun;
215
216 bfun = XCNEW (struct btrace_function);
217
218 bfun->msym = mfun;
219 bfun->sym = fun;
220 bfun->flow.prev = prev;
221
222 if (prev == NULL)
223 {
224 /* Start counting at one. */
225 bfun->number = 1;
226 bfun->insn_offset = 1;
227 }
228 else
229 {
230 gdb_assert (prev->flow.next == NULL);
231 prev->flow.next = bfun;
232
233 bfun->number = prev->number + 1;
234 bfun->insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
235 bfun->level = prev->level;
236 }
237
238 return bfun;
239 }
240
241 /* Update the UP field of a function segment. */
242
243 static void
244 ftrace_update_caller (struct btrace_function *bfun,
245 struct btrace_function *caller,
246 enum btrace_function_flag flags)
247 {
248 if (bfun->up != NULL)
249 ftrace_debug (bfun, "updating caller");
250
251 bfun->up = caller;
252 bfun->flags = flags;
253
254 ftrace_debug (bfun, "set caller");
255 ftrace_debug (caller, "..to");
256 }
257
258 /* Fix up the caller for all segments of a function. */
259
260 static void
261 ftrace_fixup_caller (struct btrace_function *bfun,
262 struct btrace_function *caller,
263 enum btrace_function_flag flags)
264 {
265 struct btrace_function *prev, *next;
266
267 ftrace_update_caller (bfun, caller, flags);
268
269 /* Update all function segments belonging to the same function. */
270 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
271 ftrace_update_caller (prev, caller, flags);
272
273 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
274 ftrace_update_caller (next, caller, flags);
275 }
276
277 /* Add a new function segment for a call.
278 CALLER is the chronologically preceding function segment.
279 MFUN and FUN are the symbol information we have for this function. */
280
281 static struct btrace_function *
282 ftrace_new_call (struct btrace_function *caller,
283 struct minimal_symbol *mfun,
284 struct symbol *fun)
285 {
286 struct btrace_function *bfun;
287
288 bfun = ftrace_new_function (caller, mfun, fun);
289 bfun->up = caller;
290 bfun->level += 1;
291
292 ftrace_debug (bfun, "new call");
293
294 return bfun;
295 }
296
297 /* Add a new function segment for a tail call.
298 CALLER is the chronologically preceding function segment.
299 MFUN and FUN are the symbol information we have for this function. */
300
301 static struct btrace_function *
302 ftrace_new_tailcall (struct btrace_function *caller,
303 struct minimal_symbol *mfun,
304 struct symbol *fun)
305 {
306 struct btrace_function *bfun;
307
308 bfun = ftrace_new_function (caller, mfun, fun);
309 bfun->up = caller;
310 bfun->level += 1;
311 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
312
313 ftrace_debug (bfun, "new tail call");
314
315 return bfun;
316 }
317
318 /* Return the caller of BFUN or NULL if there is none. This function skips
319 tail calls in the call chain. */
320 static struct btrace_function *
321 ftrace_get_caller (struct btrace_function *bfun)
322 {
323 for (; bfun != NULL; bfun = bfun->up)
324 if ((bfun->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
325 return bfun->up;
326
327 return NULL;
328 }
329
330 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
331 symbol information. */
332
333 static struct btrace_function *
334 ftrace_find_caller (struct btrace_function *bfun,
335 struct minimal_symbol *mfun,
336 struct symbol *fun)
337 {
338 for (; bfun != NULL; bfun = bfun->up)
339 {
340 /* Skip functions with incompatible symbol information. */
341 if (ftrace_function_switched (bfun, mfun, fun))
342 continue;
343
344 /* This is the function segment we're looking for. */
345 break;
346 }
347
348 return bfun;
349 }
350
351 /* Find the innermost caller in the back trace of BFUN, skipping all
352 function segments that do not end with a call instruction (e.g.
353 tail calls ending with a jump). */
354
355 static struct btrace_function *
356 ftrace_find_call (struct btrace_function *bfun)
357 {
358 for (; bfun != NULL; bfun = bfun->up)
359 {
360 struct btrace_insn *last;
361
362 /* Skip gaps. */
363 if (bfun->errcode != 0)
364 continue;
365
366 last = VEC_last (btrace_insn_s, bfun->insn);
367
368 if (last->iclass == BTRACE_INSN_CALL)
369 break;
370 }
371
372 return bfun;
373 }
374
375 /* Add a continuation segment for a function into which we return.
376 PREV is the chronologically preceding function segment.
377 MFUN and FUN are the symbol information we have for this function. */
378
379 static struct btrace_function *
380 ftrace_new_return (struct btrace_function *prev,
381 struct minimal_symbol *mfun,
382 struct symbol *fun)
383 {
384 struct btrace_function *bfun, *caller;
385
386 bfun = ftrace_new_function (prev, mfun, fun);
387
388 /* It is important to start at PREV's caller. Otherwise, we might find
389 PREV itself, if PREV is a recursive function. */
390 caller = ftrace_find_caller (prev->up, mfun, fun);
391 if (caller != NULL)
392 {
393 /* The caller of PREV is the preceding btrace function segment in this
394 function instance. */
395 gdb_assert (caller->segment.next == NULL);
396
397 caller->segment.next = bfun;
398 bfun->segment.prev = caller;
399
400 /* Maintain the function level. */
401 bfun->level = caller->level;
402
403 /* Maintain the call stack. */
404 bfun->up = caller->up;
405 bfun->flags = caller->flags;
406
407 ftrace_debug (bfun, "new return");
408 }
409 else
410 {
411 /* We did not find a caller. This could mean that something went
412 wrong or that the call is simply not included in the trace. */
413
414 /* Let's search for some actual call. */
415 caller = ftrace_find_call (prev->up);
416 if (caller == NULL)
417 {
418 /* There is no call in PREV's back trace. We assume that the
419 branch trace did not include it. */
420
421 /* Let's find the topmost function and add a new caller for it.
422 This should handle a series of initial tail calls. */
423 while (prev->up != NULL)
424 prev = prev->up;
425
426 bfun->level = prev->level - 1;
427
428 /* Fix up the call stack for PREV. */
429 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
430
431 ftrace_debug (bfun, "new return - no caller");
432 }
433 else
434 {
435 /* There is a call in PREV's back trace to which we should have
436 returned but didn't. Let's start a new, separate back trace
437 from PREV's level. */
438 bfun->level = prev->level - 1;
439
440 /* We fix up the back trace for PREV but leave other function segments
441 on the same level as they are.
442 This should handle things like schedule () correctly where we're
443 switching contexts. */
444 prev->up = bfun;
445 prev->flags = BFUN_UP_LINKS_TO_RET;
446
447 ftrace_debug (bfun, "new return - unknown caller");
448 }
449 }
450
451 return bfun;
452 }
453
454 /* Add a new function segment for a function switch.
455 PREV is the chronologically preceding function segment.
456 MFUN and FUN are the symbol information we have for this function. */
457
458 static struct btrace_function *
459 ftrace_new_switch (struct btrace_function *prev,
460 struct minimal_symbol *mfun,
461 struct symbol *fun)
462 {
463 struct btrace_function *bfun;
464
465 /* This is an unexplained function switch. We can't really be sure about the
466 call stack, yet the best I can think of right now is to preserve it. */
467 bfun = ftrace_new_function (prev, mfun, fun);
468 bfun->up = prev->up;
469 bfun->flags = prev->flags;
470
471 ftrace_debug (bfun, "new switch");
472
473 return bfun;
474 }
475
476 /* Add a new function segment for a gap in the trace due to a decode error.
477 PREV is the chronologically preceding function segment.
478 ERRCODE is the format-specific error code. */
479
480 static struct btrace_function *
481 ftrace_new_gap (struct btrace_function *prev, int errcode)
482 {
483 struct btrace_function *bfun;
484
485 /* We hijack prev if it was empty. */
486 if (prev != NULL && prev->errcode == 0
487 && VEC_empty (btrace_insn_s, prev->insn))
488 bfun = prev;
489 else
490 bfun = ftrace_new_function (prev, NULL, NULL);
491
492 bfun->errcode = errcode;
493
494 ftrace_debug (bfun, "new gap");
495
496 return bfun;
497 }
498
499 /* Update BFUN with respect to the instruction at PC. This may create new
500 function segments.
501 Return the chronologically latest function segment, never NULL. */
502
503 static struct btrace_function *
504 ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
505 {
506 struct bound_minimal_symbol bmfun;
507 struct minimal_symbol *mfun;
508 struct symbol *fun;
509 struct btrace_insn *last;
510
511 /* Try to determine the function we're in. We use both types of symbols
512 to avoid surprises when we sometimes get a full symbol and sometimes
513 only a minimal symbol. */
514 fun = find_pc_function (pc);
515 bmfun = lookup_minimal_symbol_by_pc (pc);
516 mfun = bmfun.minsym;
517
518 if (fun == NULL && mfun == NULL)
519 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
520
521 /* If we didn't have a function or if we had a gap before, we create one. */
522 if (bfun == NULL || bfun->errcode != 0)
523 return ftrace_new_function (bfun, mfun, fun);
524
525 /* Check the last instruction, if we have one.
526 We do this check first, since it allows us to fill in the call stack
527 links in addition to the normal flow links. */
528 last = NULL;
529 if (!VEC_empty (btrace_insn_s, bfun->insn))
530 last = VEC_last (btrace_insn_s, bfun->insn);
531
532 if (last != NULL)
533 {
534 switch (last->iclass)
535 {
536 case BTRACE_INSN_RETURN:
537 {
538 const char *fname;
539
540 /* On some systems, _dl_runtime_resolve returns to the resolved
541 function instead of jumping to it. From our perspective,
542 however, this is a tailcall.
543 If we treated it as return, we wouldn't be able to find the
544 resolved function in our stack back trace. Hence, we would
545 lose the current stack back trace and start anew with an empty
546 back trace. When the resolved function returns, we would then
547 create a stack back trace with the same function names but
548 different frame id's. This will confuse stepping. */
549 fname = ftrace_print_function_name (bfun);
550 if (strcmp (fname, "_dl_runtime_resolve") == 0)
551 return ftrace_new_tailcall (bfun, mfun, fun);
552
553 return ftrace_new_return (bfun, mfun, fun);
554 }
555
556 case BTRACE_INSN_CALL:
557 /* Ignore calls to the next instruction. They are used for PIC. */
558 if (last->pc + last->size == pc)
559 break;
560
561 return ftrace_new_call (bfun, mfun, fun);
562
563 case BTRACE_INSN_JUMP:
564 {
565 CORE_ADDR start;
566
567 start = get_pc_function_start (pc);
568
569 /* A jump to the start of a function is (typically) a tail call. */
570 if (start == pc)
571 return ftrace_new_tailcall (bfun, mfun, fun);
572
573 /* If we can't determine the function for PC, we treat a jump at
574 the end of the block as tail call if we're switching functions
575 and as an intra-function branch if we don't. */
576 if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
577 return ftrace_new_tailcall (bfun, mfun, fun);
578
579 break;
580 }
581 }
582 }
583
584 /* Check if we're switching functions for some other reason. */
585 if (ftrace_function_switched (bfun, mfun, fun))
586 {
587 DEBUG_FTRACE ("switching from %s in %s at %s",
588 ftrace_print_insn_addr (last),
589 ftrace_print_function_name (bfun),
590 ftrace_print_filename (bfun));
591
592 return ftrace_new_switch (bfun, mfun, fun);
593 }
594
595 return bfun;
596 }
597
598 /* Add the instruction at PC to BFUN's instructions. */
599
600 static void
601 ftrace_update_insns (struct btrace_function *bfun,
602 const struct btrace_insn *insn)
603 {
604 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
605
606 if (record_debug > 1)
607 ftrace_debug (bfun, "update insn");
608 }
609
610 /* Classify the instruction at PC. */
611
612 static enum btrace_insn_class
613 ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
614 {
615 enum btrace_insn_class iclass;
616
617 iclass = BTRACE_INSN_OTHER;
618 TRY
619 {
620 if (gdbarch_insn_is_call (gdbarch, pc))
621 iclass = BTRACE_INSN_CALL;
622 else if (gdbarch_insn_is_ret (gdbarch, pc))
623 iclass = BTRACE_INSN_RETURN;
624 else if (gdbarch_insn_is_jump (gdbarch, pc))
625 iclass = BTRACE_INSN_JUMP;
626 }
627 CATCH (error, RETURN_MASK_ERROR)
628 {
629 }
630 END_CATCH
631
632 return iclass;
633 }
634
635 /* Try to match the back trace at LHS to the back trace at RHS. Returns the
636 number of matching function segments or zero if the back traces do not
637 match. */
638
639 static int
640 ftrace_match_backtrace (struct btrace_function *lhs,
641 struct btrace_function *rhs)
642 {
643 int matches;
644
645 for (matches = 0; lhs != NULL && rhs != NULL; ++matches)
646 {
647 if (ftrace_function_switched (lhs, rhs->msym, rhs->sym))
648 return 0;
649
650 lhs = ftrace_get_caller (lhs);
651 rhs = ftrace_get_caller (rhs);
652 }
653
654 return matches;
655 }
656
657 /* Add ADJUSTMENT to the level of BFUN and succeeding function segments. */
658
659 static void
660 ftrace_fixup_level (struct btrace_function *bfun, int adjustment)
661 {
662 if (adjustment == 0)
663 return;
664
665 DEBUG_FTRACE ("fixup level (%+d)", adjustment);
666 ftrace_debug (bfun, "..bfun");
667
668 for (; bfun != NULL; bfun = bfun->flow.next)
669 bfun->level += adjustment;
670 }
671
672 /* Recompute the global level offset. Traverse the function trace and compute
673 the global level offset as the negative of the minimal function level. */
674
675 static void
676 ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
677 {
678 struct btrace_function *bfun, *end;
679 int level;
680
681 if (btinfo == NULL)
682 return;
683
684 bfun = btinfo->begin;
685 if (bfun == NULL)
686 return;
687
688 /* The last function segment contains the current instruction, which is not
689 really part of the trace. If it contains just this one instruction, we
690 stop when we reach it; otherwise, we let the below loop run to the end. */
691 end = btinfo->end;
692 if (VEC_length (btrace_insn_s, end->insn) > 1)
693 end = NULL;
694
695 level = INT_MAX;
696 for (; bfun != end; bfun = bfun->flow.next)
697 level = std::min (level, bfun->level);
698
699 DEBUG_FTRACE ("setting global level offset: %d", -level);
700 btinfo->level = -level;
701 }
702
703 /* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
704 ftrace_connect_backtrace. */
705
706 static void
707 ftrace_connect_bfun (struct btrace_function *prev,
708 struct btrace_function *next)
709 {
710 DEBUG_FTRACE ("connecting...");
711 ftrace_debug (prev, "..prev");
712 ftrace_debug (next, "..next");
713
714 /* The function segments are not yet connected. */
715 gdb_assert (prev->segment.next == NULL);
716 gdb_assert (next->segment.prev == NULL);
717
718 prev->segment.next = next;
719 next->segment.prev = prev;
720
721 /* We may have moved NEXT to a different function level. */
722 ftrace_fixup_level (next, prev->level - next->level);
723
724 /* If we run out of back trace for one, let's use the other's. */
725 if (prev->up == NULL)
726 {
727 if (next->up != NULL)
728 {
729 DEBUG_FTRACE ("using next's callers");
730 ftrace_fixup_caller (prev, next->up, next->flags);
731 }
732 }
733 else if (next->up == NULL)
734 {
735 if (prev->up != NULL)
736 {
737 DEBUG_FTRACE ("using prev's callers");
738 ftrace_fixup_caller (next, prev->up, prev->flags);
739 }
740 }
741 else
742 {
743 /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
744 link to add the tail callers to NEXT's back trace.
745
746 This removes NEXT->UP from NEXT's back trace. It will be added back
747 when connecting NEXT and PREV's callers - provided they exist.
748
749 If PREV's back trace consists of a series of tail calls without an
750 actual call, there will be no further connection and NEXT's caller will
751 be removed for good. To catch this case, we handle it here and connect
752 the top of PREV's back trace to NEXT's caller. */
753 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
754 {
755 struct btrace_function *caller;
756 btrace_function_flags flags;
757
758 /* We checked NEXT->UP above so CALLER can't be NULL. */
759 caller = next->up;
760 flags = next->flags;
761
762 DEBUG_FTRACE ("adding prev's tail calls to next");
763
764 ftrace_fixup_caller (next, prev->up, prev->flags);
765
766 for (prev = prev->up; prev != NULL; prev = prev->up)
767 {
768 /* At the end of PREV's back trace, continue with CALLER. */
769 if (prev->up == NULL)
770 {
771 DEBUG_FTRACE ("fixing up link for tailcall chain");
772 ftrace_debug (prev, "..top");
773 ftrace_debug (caller, "..up");
774
775 ftrace_fixup_caller (prev, caller, flags);
776
777 /* If we skipped any tail calls, this may move CALLER to a
778 different function level.
779
780 Note that changing CALLER's level is only OK because we
781 know that this is the last iteration of the bottom-to-top
782 walk in ftrace_connect_backtrace.
783
784 Otherwise we will fix up CALLER's level when we connect it
785 to PREV's caller in the next iteration. */
786 ftrace_fixup_level (caller, prev->level - caller->level - 1);
787 break;
788 }
789
790 /* There's nothing to do if we find a real call. */
791 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
792 {
793 DEBUG_FTRACE ("will fix up link in next iteration");
794 break;
795 }
796 }
797 }
798 }
799 }
800
801 /* Connect function segments on the same level in the back trace at LHS and RHS.
802 The back traces at LHS and RHS are expected to match according to
803 ftrace_match_backtrace. */
804
805 static void
806 ftrace_connect_backtrace (struct btrace_function *lhs,
807 struct btrace_function *rhs)
808 {
809 while (lhs != NULL && rhs != NULL)
810 {
811 struct btrace_function *prev, *next;
812
813 gdb_assert (!ftrace_function_switched (lhs, rhs->msym, rhs->sym));
814
815 /* Connecting LHS and RHS may change the up link. */
816 prev = lhs;
817 next = rhs;
818
819 lhs = ftrace_get_caller (lhs);
820 rhs = ftrace_get_caller (rhs);
821
822 ftrace_connect_bfun (prev, next);
823 }
824 }
825
826 /* Bridge the gap between two function segments left and right of a gap if their
827 respective back traces match in at least MIN_MATCHES functions.
828
829 Returns non-zero if the gap could be bridged, zero otherwise. */
830
831 static int
832 ftrace_bridge_gap (struct btrace_function *lhs, struct btrace_function *rhs,
833 int min_matches)
834 {
835 struct btrace_function *best_l, *best_r, *cand_l, *cand_r;
836 int best_matches;
837
838 DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
839 rhs->insn_offset - 1, min_matches);
840
841 best_matches = 0;
842 best_l = NULL;
843 best_r = NULL;
844
845 /* We search the back traces of LHS and RHS for valid connections and connect
846 the two functon segments that give the longest combined back trace. */
847
848 for (cand_l = lhs; cand_l != NULL; cand_l = ftrace_get_caller (cand_l))
849 for (cand_r = rhs; cand_r != NULL; cand_r = ftrace_get_caller (cand_r))
850 {
851 int matches;
852
853 matches = ftrace_match_backtrace (cand_l, cand_r);
854 if (best_matches < matches)
855 {
856 best_matches = matches;
857 best_l = cand_l;
858 best_r = cand_r;
859 }
860 }
861
862 /* We need at least MIN_MATCHES matches. */
863 gdb_assert (min_matches > 0);
864 if (best_matches < min_matches)
865 return 0;
866
867 DEBUG_FTRACE ("..matches: %d", best_matches);
868
869 /* We will fix up the level of BEST_R and succeeding function segments such
870 that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
871
872 This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
873 BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
874
875 To catch this, we already fix up the level here where we can start at RHS
876 instead of at BEST_R. We will ignore the level fixup when connecting
877 BEST_L to BEST_R as they will already be on the same level. */
878 ftrace_fixup_level (rhs, best_l->level - best_r->level);
879
880 ftrace_connect_backtrace (best_l, best_r);
881
882 return best_matches;
883 }
884
885 /* Try to bridge gaps due to overflow or decode errors by connecting the
886 function segments that are separated by the gap. */
887
888 static void
889 btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
890 {
891 VEC (bfun_s) *remaining;
892 struct cleanup *old_chain;
893 int min_matches;
894
895 DEBUG ("bridge gaps");
896
897 remaining = NULL;
898 old_chain = make_cleanup (VEC_cleanup (bfun_s), &remaining);
899
900 /* We require a minimum amount of matches for bridging a gap. The number of
901 required matches will be lowered with each iteration.
902
903 The more matches the higher our confidence that the bridging is correct.
904 For big gaps or small traces, however, it may not be feasible to require a
905 high number of matches. */
906 for (min_matches = 5; min_matches > 0; --min_matches)
907 {
908 /* Let's try to bridge as many gaps as we can. In some cases, we need to
909 skip a gap and revisit it again after we closed later gaps. */
910 while (!VEC_empty (bfun_s, *gaps))
911 {
912 struct btrace_function *gap;
913 unsigned int idx;
914
915 for (idx = 0; VEC_iterate (bfun_s, *gaps, idx, gap); ++idx)
916 {
917 struct btrace_function *lhs, *rhs;
918 int bridged;
919
920 /* We may have a sequence of gaps if we run from one error into
921 the next as we try to re-sync onto the trace stream. Ignore
922 all but the leftmost gap in such a sequence.
923
924 Also ignore gaps at the beginning of the trace. */
925 lhs = gap->flow.prev;
926 if (lhs == NULL || lhs->errcode != 0)
927 continue;
928
929 /* Skip gaps to the right. */
930 for (rhs = gap->flow.next; rhs != NULL; rhs = rhs->flow.next)
931 if (rhs->errcode == 0)
932 break;
933
934 /* Ignore gaps at the end of the trace. */
935 if (rhs == NULL)
936 continue;
937
938 bridged = ftrace_bridge_gap (lhs, rhs, min_matches);
939
940 /* Keep track of gaps we were not able to bridge and try again.
941 If we just pushed them to the end of GAPS we would risk an
942 infinite loop in case we simply cannot bridge a gap. */
943 if (bridged == 0)
944 VEC_safe_push (bfun_s, remaining, gap);
945 }
946
947 /* Let's see if we made any progress. */
948 if (VEC_length (bfun_s, remaining) == VEC_length (bfun_s, *gaps))
949 break;
950
951 VEC_free (bfun_s, *gaps);
952
953 *gaps = remaining;
954 remaining = NULL;
955 }
956
957 /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
958 if (VEC_empty (bfun_s, *gaps))
959 break;
960
961 VEC_free (bfun_s, remaining);
962 }
963
964 do_cleanups (old_chain);
965
966 /* We may omit this in some cases. Not sure it is worth the extra
967 complication, though. */
968 ftrace_compute_global_level_offset (&tp->btrace);
969 }
970
971 /* Compute the function branch trace from BTS trace. */
972
973 static void
974 btrace_compute_ftrace_bts (struct thread_info *tp,
975 const struct btrace_data_bts *btrace,
976 VEC (bfun_s) **gaps)
977 {
978 struct btrace_thread_info *btinfo;
979 struct btrace_function *begin, *end;
980 struct gdbarch *gdbarch;
981 unsigned int blk;
982 int level;
983
984 gdbarch = target_gdbarch ();
985 btinfo = &tp->btrace;
986 begin = btinfo->begin;
987 end = btinfo->end;
988 level = begin != NULL ? -btinfo->level : INT_MAX;
989 blk = VEC_length (btrace_block_s, btrace->blocks);
990
991 while (blk != 0)
992 {
993 btrace_block_s *block;
994 CORE_ADDR pc;
995
996 blk -= 1;
997
998 block = VEC_index (btrace_block_s, btrace->blocks, blk);
999 pc = block->begin;
1000
1001 for (;;)
1002 {
1003 struct btrace_insn insn;
1004 int size;
1005
1006 /* We should hit the end of the block. Warn if we went too far. */
1007 if (block->end < pc)
1008 {
1009 /* Indicate the gap in the trace. */
1010 end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
1011 if (begin == NULL)
1012 begin = end;
1013
1014 VEC_safe_push (bfun_s, *gaps, end);
1015
1016 warning (_("Recorded trace may be corrupted at instruction "
1017 "%u (pc = %s)."), end->insn_offset - 1,
1018 core_addr_to_string_nz (pc));
1019
1020 break;
1021 }
1022
1023 end = ftrace_update_function (end, pc);
1024 if (begin == NULL)
1025 begin = end;
1026
1027 /* Maintain the function level offset.
1028 For all but the last block, we do it here. */
1029 if (blk != 0)
1030 level = std::min (level, end->level);
1031
1032 size = 0;
1033 TRY
1034 {
1035 size = gdb_insn_length (gdbarch, pc);
1036 }
1037 CATCH (error, RETURN_MASK_ERROR)
1038 {
1039 }
1040 END_CATCH
1041
1042 insn.pc = pc;
1043 insn.size = size;
1044 insn.iclass = ftrace_classify_insn (gdbarch, pc);
1045 insn.flags = 0;
1046
1047 ftrace_update_insns (end, &insn);
1048
1049 /* We're done once we pushed the instruction at the end. */
1050 if (block->end == pc)
1051 break;
1052
1053 /* We can't continue if we fail to compute the size. */
1054 if (size <= 0)
1055 {
1056 /* Indicate the gap in the trace. We just added INSN so we're
1057 not at the beginning. */
1058 end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
1059
1060 VEC_safe_push (bfun_s, *gaps, end);
1061
1062 warning (_("Recorded trace may be incomplete at instruction %u "
1063 "(pc = %s)."), end->insn_offset - 1,
1064 core_addr_to_string_nz (pc));
1065
1066 break;
1067 }
1068
1069 pc += size;
1070
1071 /* Maintain the function level offset.
1072 For the last block, we do it here to not consider the last
1073 instruction.
1074 Since the last instruction corresponds to the current instruction
1075 and is not really part of the execution history, it shouldn't
1076 affect the level. */
1077 if (blk == 0)
1078 level = std::min (level, end->level);
1079 }
1080 }
1081
1082 btinfo->begin = begin;
1083 btinfo->end = end;
1084
1085 /* LEVEL is the minimal function level of all btrace function segments.
1086 Define the global level offset to -LEVEL so all function levels are
1087 normalized to start at zero. */
1088 btinfo->level = -level;
1089 }
1090
1091 #if defined (HAVE_LIBIPT)
1092
1093 static enum btrace_insn_class
1094 pt_reclassify_insn (enum pt_insn_class iclass)
1095 {
1096 switch (iclass)
1097 {
1098 case ptic_call:
1099 return BTRACE_INSN_CALL;
1100
1101 case ptic_return:
1102 return BTRACE_INSN_RETURN;
1103
1104 case ptic_jump:
1105 return BTRACE_INSN_JUMP;
1106
1107 default:
1108 return BTRACE_INSN_OTHER;
1109 }
1110 }
1111
1112 /* Return the btrace instruction flags for INSN. */
1113
1114 static btrace_insn_flags
1115 pt_btrace_insn_flags (const struct pt_insn &insn)
1116 {
1117 btrace_insn_flags flags = 0;
1118
1119 if (insn.speculative)
1120 flags |= BTRACE_INSN_FLAG_SPECULATIVE;
1121
1122 return flags;
1123 }
1124
1125 /* Return the btrace instruction for INSN. */
1126
1127 static btrace_insn
1128 pt_btrace_insn (const struct pt_insn &insn)
1129 {
1130 return {(CORE_ADDR) insn.ip, (gdb_byte) insn.size,
1131 pt_reclassify_insn (insn.iclass),
1132 pt_btrace_insn_flags (insn)};
1133 }
1134
1135
1136 /* Add function branch trace using DECODER. */
1137
1138 static void
1139 ftrace_add_pt (struct pt_insn_decoder *decoder,
1140 struct btrace_function **pbegin,
1141 struct btrace_function **pend, int *plevel,
1142 VEC (bfun_s) **gaps)
1143 {
1144 struct btrace_function *begin, *end, *upd;
1145 uint64_t offset;
1146 int errcode;
1147
1148 begin = *pbegin;
1149 end = *pend;
1150 for (;;)
1151 {
1152 struct pt_insn insn;
1153
1154 errcode = pt_insn_sync_forward (decoder);
1155 if (errcode < 0)
1156 {
1157 if (errcode != -pte_eos)
1158 warning (_("Failed to synchronize onto the Intel Processor "
1159 "Trace stream: %s."), pt_errstr (pt_errcode (errcode)));
1160 break;
1161 }
1162
1163 for (;;)
1164 {
1165 errcode = pt_insn_next (decoder, &insn, sizeof(insn));
1166 if (errcode < 0)
1167 break;
1168
1169 /* Look for gaps in the trace - unless we're at the beginning. */
1170 if (begin != NULL)
1171 {
1172 /* Tracing is disabled and re-enabled each time we enter the
1173 kernel. Most times, we continue from the same instruction we
1174 stopped before. This is indicated via the RESUMED instruction
1175 flag. The ENABLED instruction flag means that we continued
1176 from some other instruction. Indicate this as a trace gap. */
1177 if (insn.enabled)
1178 {
1179 *pend = end = ftrace_new_gap (end, BDE_PT_DISABLED);
1180
1181 VEC_safe_push (bfun_s, *gaps, end);
1182
1183 pt_insn_get_offset (decoder, &offset);
1184
1185 warning (_("Non-contiguous trace at instruction %u (offset "
1186 "= 0x%" PRIx64 ", pc = 0x%" PRIx64 ")."),
1187 end->insn_offset - 1, offset, insn.ip);
1188 }
1189 }
1190
1191 /* Indicate trace overflows. */
1192 if (insn.resynced)
1193 {
1194 *pend = end = ftrace_new_gap (end, BDE_PT_OVERFLOW);
1195 if (begin == NULL)
1196 *pbegin = begin = end;
1197
1198 VEC_safe_push (bfun_s, *gaps, end);
1199
1200 pt_insn_get_offset (decoder, &offset);
1201
1202 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
1203 ", pc = 0x%" PRIx64 ")."), end->insn_offset - 1,
1204 offset, insn.ip);
1205 }
1206
1207 upd = ftrace_update_function (end, insn.ip);
1208 if (upd != end)
1209 {
1210 *pend = end = upd;
1211
1212 if (begin == NULL)
1213 *pbegin = begin = upd;
1214 }
1215
1216 /* Maintain the function level offset. */
1217 *plevel = std::min (*plevel, end->level);
1218
1219 btrace_insn btinsn = pt_btrace_insn (insn);
1220 ftrace_update_insns (end, &btinsn);
1221 }
1222
1223 if (errcode == -pte_eos)
1224 break;
1225
1226 /* Indicate the gap in the trace. */
1227 *pend = end = ftrace_new_gap (end, errcode);
1228 if (begin == NULL)
1229 *pbegin = begin = end;
1230
1231 VEC_safe_push (bfun_s, *gaps, end);
1232
1233 pt_insn_get_offset (decoder, &offset);
1234
1235 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
1236 ", pc = 0x%" PRIx64 "): %s."), errcode, end->insn_offset - 1,
1237 offset, insn.ip, pt_errstr (pt_errcode (errcode)));
1238 }
1239 }
1240
1241 /* A callback function to allow the trace decoder to read the inferior's
1242 memory. */
1243
1244 static int
1245 btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
1246 const struct pt_asid *asid, uint64_t pc,
1247 void *context)
1248 {
1249 int result, errcode;
1250
1251 result = (int) size;
1252 TRY
1253 {
1254 errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
1255 if (errcode != 0)
1256 result = -pte_nomap;
1257 }
1258 CATCH (error, RETURN_MASK_ERROR)
1259 {
1260 result = -pte_nomap;
1261 }
1262 END_CATCH
1263
1264 return result;
1265 }
1266
1267 /* Translate the vendor from one enum to another. */
1268
1269 static enum pt_cpu_vendor
1270 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
1271 {
1272 switch (vendor)
1273 {
1274 default:
1275 return pcv_unknown;
1276
1277 case CV_INTEL:
1278 return pcv_intel;
1279 }
1280 }
1281
1282 /* Finalize the function branch trace after decode. */
1283
1284 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
1285 struct thread_info *tp, int level)
1286 {
1287 pt_insn_free_decoder (decoder);
1288
1289 /* LEVEL is the minimal function level of all btrace function segments.
1290 Define the global level offset to -LEVEL so all function levels are
1291 normalized to start at zero. */
1292 tp->btrace.level = -level;
1293
1294 /* Add a single last instruction entry for the current PC.
1295 This allows us to compute the backtrace at the current PC using both
1296 standard unwind and btrace unwind.
1297 This extra entry is ignored by all record commands. */
1298 btrace_add_pc (tp);
1299 }
1300
1301 /* Compute the function branch trace from Intel Processor Trace
1302 format. */
1303
1304 static void
1305 btrace_compute_ftrace_pt (struct thread_info *tp,
1306 const struct btrace_data_pt *btrace,
1307 VEC (bfun_s) **gaps)
1308 {
1309 struct btrace_thread_info *btinfo;
1310 struct pt_insn_decoder *decoder;
1311 struct pt_config config;
1312 int level, errcode;
1313
1314 if (btrace->size == 0)
1315 return;
1316
1317 btinfo = &tp->btrace;
1318 level = btinfo->begin != NULL ? -btinfo->level : INT_MAX;
1319
1320 pt_config_init(&config);
1321 config.begin = btrace->data;
1322 config.end = btrace->data + btrace->size;
1323
1324 config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
1325 config.cpu.family = btrace->config.cpu.family;
1326 config.cpu.model = btrace->config.cpu.model;
1327 config.cpu.stepping = btrace->config.cpu.stepping;
1328
1329 errcode = pt_cpu_errata (&config.errata, &config.cpu);
1330 if (errcode < 0)
1331 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
1332 pt_errstr (pt_errcode (errcode)));
1333
1334 decoder = pt_insn_alloc_decoder (&config);
1335 if (decoder == NULL)
1336 error (_("Failed to allocate the Intel Processor Trace decoder."));
1337
1338 TRY
1339 {
1340 struct pt_image *image;
1341
1342 image = pt_insn_get_image(decoder);
1343 if (image == NULL)
1344 error (_("Failed to configure the Intel Processor Trace decoder."));
1345
1346 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
1347 if (errcode < 0)
1348 error (_("Failed to configure the Intel Processor Trace decoder: "
1349 "%s."), pt_errstr (pt_errcode (errcode)));
1350
1351 ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level, gaps);
1352 }
1353 CATCH (error, RETURN_MASK_ALL)
1354 {
1355 /* Indicate a gap in the trace if we quit trace processing. */
1356 if (error.reason == RETURN_QUIT && btinfo->end != NULL)
1357 {
1358 btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_USER_QUIT);
1359
1360 VEC_safe_push (bfun_s, *gaps, btinfo->end);
1361 }
1362
1363 btrace_finalize_ftrace_pt (decoder, tp, level);
1364
1365 throw_exception (error);
1366 }
1367 END_CATCH
1368
1369 btrace_finalize_ftrace_pt (decoder, tp, level);
1370 }
1371
1372 #else /* defined (HAVE_LIBIPT) */
1373
1374 static void
1375 btrace_compute_ftrace_pt (struct thread_info *tp,
1376 const struct btrace_data_pt *btrace,
1377 VEC (bfun_s) **gaps)
1378 {
1379 internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
1380 }
1381
1382 #endif /* defined (HAVE_LIBIPT) */
1383
1384 /* Compute the function branch trace from a block branch trace BTRACE for
1385 a thread given by BTINFO. */
1386
1387 static void
1388 btrace_compute_ftrace_1 (struct thread_info *tp, struct btrace_data *btrace,
1389 VEC (bfun_s) **gaps)
1390 {
1391 DEBUG ("compute ftrace");
1392
1393 switch (btrace->format)
1394 {
1395 case BTRACE_FORMAT_NONE:
1396 return;
1397
1398 case BTRACE_FORMAT_BTS:
1399 btrace_compute_ftrace_bts (tp, &btrace->variant.bts, gaps);
1400 return;
1401
1402 case BTRACE_FORMAT_PT:
1403 btrace_compute_ftrace_pt (tp, &btrace->variant.pt, gaps);
1404 return;
1405 }
1406
1407 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1408 }
1409
1410 static void
1411 btrace_finalize_ftrace (struct thread_info *tp, VEC (bfun_s) **gaps)
1412 {
1413 if (!VEC_empty (bfun_s, *gaps))
1414 {
1415 tp->btrace.ngaps += VEC_length (bfun_s, *gaps);
1416 btrace_bridge_gaps (tp, gaps);
1417 }
1418 }
1419
1420 static void
1421 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
1422 {
1423 VEC (bfun_s) *gaps;
1424 struct cleanup *old_chain;
1425
1426 gaps = NULL;
1427 old_chain = make_cleanup (VEC_cleanup (bfun_s), &gaps);
1428
1429 TRY
1430 {
1431 btrace_compute_ftrace_1 (tp, btrace, &gaps);
1432 }
1433 CATCH (error, RETURN_MASK_ALL)
1434 {
1435 btrace_finalize_ftrace (tp, &gaps);
1436
1437 throw_exception (error);
1438 }
1439 END_CATCH
1440
1441 btrace_finalize_ftrace (tp, &gaps);
1442
1443 do_cleanups (old_chain);
1444 }
1445
1446 /* Add an entry for the current PC. */
1447
1448 static void
1449 btrace_add_pc (struct thread_info *tp)
1450 {
1451 struct btrace_data btrace;
1452 struct btrace_block *block;
1453 struct regcache *regcache;
1454 struct cleanup *cleanup;
1455 CORE_ADDR pc;
1456
1457 regcache = get_thread_regcache (tp->ptid);
1458 pc = regcache_read_pc (regcache);
1459
1460 btrace_data_init (&btrace);
1461 btrace.format = BTRACE_FORMAT_BTS;
1462 btrace.variant.bts.blocks = NULL;
1463
1464 cleanup = make_cleanup_btrace_data (&btrace);
1465
1466 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
1467 block->begin = pc;
1468 block->end = pc;
1469
1470 btrace_compute_ftrace (tp, &btrace);
1471
1472 do_cleanups (cleanup);
1473 }
1474
1475 /* See btrace.h. */
1476
1477 void
1478 btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
1479 {
1480 if (tp->btrace.target != NULL)
1481 return;
1482
1483 #if !defined (HAVE_LIBIPT)
1484 if (conf->format == BTRACE_FORMAT_PT)
1485 error (_("GDB does not support Intel Processor Trace."));
1486 #endif /* !defined (HAVE_LIBIPT) */
1487
1488 if (!target_supports_btrace (conf->format))
1489 error (_("Target does not support branch tracing."));
1490
1491 DEBUG ("enable thread %s (%s)", print_thread_id (tp),
1492 target_pid_to_str (tp->ptid));
1493
1494 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
1495
1496 /* We're done if we failed to enable tracing. */
1497 if (tp->btrace.target == NULL)
1498 return;
1499
1500 /* We need to undo the enable in case of errors. */
1501 TRY
1502 {
1503 /* Add an entry for the current PC so we start tracing from where we
1504 enabled it.
1505
1506 If we can't access TP's registers, TP is most likely running. In this
1507 case, we can't really say where tracing was enabled so it should be
1508 safe to simply skip this step.
1509
1510 This is not relevant for BTRACE_FORMAT_PT since the trace will already
1511 start at the PC at which tracing was enabled. */
1512 if (conf->format != BTRACE_FORMAT_PT
1513 && can_access_registers_ptid (tp->ptid))
1514 btrace_add_pc (tp);
1515 }
1516 CATCH (exception, RETURN_MASK_ALL)
1517 {
1518 btrace_disable (tp);
1519
1520 throw_exception (exception);
1521 }
1522 END_CATCH
1523 }
1524
1525 /* See btrace.h. */
1526
1527 const struct btrace_config *
1528 btrace_conf (const struct btrace_thread_info *btinfo)
1529 {
1530 if (btinfo->target == NULL)
1531 return NULL;
1532
1533 return target_btrace_conf (btinfo->target);
1534 }
1535
1536 /* See btrace.h. */
1537
1538 void
1539 btrace_disable (struct thread_info *tp)
1540 {
1541 struct btrace_thread_info *btp = &tp->btrace;
1542 int errcode = 0;
1543
1544 if (btp->target == NULL)
1545 return;
1546
1547 DEBUG ("disable thread %s (%s)", print_thread_id (tp),
1548 target_pid_to_str (tp->ptid));
1549
1550 target_disable_btrace (btp->target);
1551 btp->target = NULL;
1552
1553 btrace_clear (tp);
1554 }
1555
1556 /* See btrace.h. */
1557
1558 void
1559 btrace_teardown (struct thread_info *tp)
1560 {
1561 struct btrace_thread_info *btp = &tp->btrace;
1562 int errcode = 0;
1563
1564 if (btp->target == NULL)
1565 return;
1566
1567 DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
1568 target_pid_to_str (tp->ptid));
1569
1570 target_teardown_btrace (btp->target);
1571 btp->target = NULL;
1572
1573 btrace_clear (tp);
1574 }
1575
1576 /* Stitch branch trace in BTS format. */
1577
1578 static int
1579 btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
1580 {
1581 struct btrace_thread_info *btinfo;
1582 struct btrace_function *last_bfun;
1583 struct btrace_insn *last_insn;
1584 btrace_block_s *first_new_block;
1585
1586 btinfo = &tp->btrace;
1587 last_bfun = btinfo->end;
1588 gdb_assert (last_bfun != NULL);
1589 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
1590
1591 /* If the existing trace ends with a gap, we just glue the traces
1592 together. We need to drop the last (i.e. chronologically first) block
1593 of the new trace, though, since we can't fill in the start address.*/
1594 if (VEC_empty (btrace_insn_s, last_bfun->insn))
1595 {
1596 VEC_pop (btrace_block_s, btrace->blocks);
1597 return 0;
1598 }
1599
1600 /* Beware that block trace starts with the most recent block, so the
1601 chronologically first block in the new trace is the last block in
1602 the new trace's block vector. */
1603 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
1604 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
1605
1606 /* If the current PC at the end of the block is the same as in our current
1607 trace, there are two explanations:
1608 1. we executed the instruction and some branch brought us back.
1609 2. we have not made any progress.
1610 In the first case, the delta trace vector should contain at least two
1611 entries.
1612 In the second case, the delta trace vector should contain exactly one
1613 entry for the partial block containing the current PC. Remove it. */
1614 if (first_new_block->end == last_insn->pc
1615 && VEC_length (btrace_block_s, btrace->blocks) == 1)
1616 {
1617 VEC_pop (btrace_block_s, btrace->blocks);
1618 return 0;
1619 }
1620
1621 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
1622 core_addr_to_string_nz (first_new_block->end));
1623
1624 /* Do a simple sanity check to make sure we don't accidentally end up
1625 with a bad block. This should not occur in practice. */
1626 if (first_new_block->end < last_insn->pc)
1627 {
1628 warning (_("Error while trying to read delta trace. Falling back to "
1629 "a full read."));
1630 return -1;
1631 }
1632
1633 /* We adjust the last block to start at the end of our current trace. */
1634 gdb_assert (first_new_block->begin == 0);
1635 first_new_block->begin = last_insn->pc;
1636
1637 /* We simply pop the last insn so we can insert it again as part of
1638 the normal branch trace computation.
1639 Since instruction iterators are based on indices in the instructions
1640 vector, we don't leave any pointers dangling. */
1641 DEBUG ("pruning insn at %s for stitching",
1642 ftrace_print_insn_addr (last_insn));
1643
1644 VEC_pop (btrace_insn_s, last_bfun->insn);
1645
1646 /* The instructions vector may become empty temporarily if this has
1647 been the only instruction in this function segment.
1648 This violates the invariant but will be remedied shortly by
1649 btrace_compute_ftrace when we add the new trace. */
1650
1651 /* The only case where this would hurt is if the entire trace consisted
1652 of just that one instruction. If we remove it, we might turn the now
1653 empty btrace function segment into a gap. But we don't want gaps at
1654 the beginning. To avoid this, we remove the entire old trace. */
1655 if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
1656 btrace_clear (tp);
1657
1658 return 0;
1659 }
1660
1661 /* Adjust the block trace in order to stitch old and new trace together.
1662 BTRACE is the new delta trace between the last and the current stop.
1663 TP is the traced thread.
1664 May modifx BTRACE as well as the existing trace in TP.
1665 Return 0 on success, -1 otherwise. */
1666
1667 static int
1668 btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
1669 {
1670 /* If we don't have trace, there's nothing to do. */
1671 if (btrace_data_empty (btrace))
1672 return 0;
1673
1674 switch (btrace->format)
1675 {
1676 case BTRACE_FORMAT_NONE:
1677 return 0;
1678
1679 case BTRACE_FORMAT_BTS:
1680 return btrace_stitch_bts (&btrace->variant.bts, tp);
1681
1682 case BTRACE_FORMAT_PT:
1683 /* Delta reads are not supported. */
1684 return -1;
1685 }
1686
1687 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1688 }
1689
1690 /* Clear the branch trace histories in BTINFO. */
1691
1692 static void
1693 btrace_clear_history (struct btrace_thread_info *btinfo)
1694 {
1695 xfree (btinfo->insn_history);
1696 xfree (btinfo->call_history);
1697 xfree (btinfo->replay);
1698
1699 btinfo->insn_history = NULL;
1700 btinfo->call_history = NULL;
1701 btinfo->replay = NULL;
1702 }
1703
1704 /* Clear the branch trace maintenance histories in BTINFO. */
1705
1706 static void
1707 btrace_maint_clear (struct btrace_thread_info *btinfo)
1708 {
1709 switch (btinfo->data.format)
1710 {
1711 default:
1712 break;
1713
1714 case BTRACE_FORMAT_BTS:
1715 btinfo->maint.variant.bts.packet_history.begin = 0;
1716 btinfo->maint.variant.bts.packet_history.end = 0;
1717 break;
1718
1719 #if defined (HAVE_LIBIPT)
1720 case BTRACE_FORMAT_PT:
1721 xfree (btinfo->maint.variant.pt.packets);
1722
1723 btinfo->maint.variant.pt.packets = NULL;
1724 btinfo->maint.variant.pt.packet_history.begin = 0;
1725 btinfo->maint.variant.pt.packet_history.end = 0;
1726 break;
1727 #endif /* defined (HAVE_LIBIPT) */
1728 }
1729 }
1730
1731 /* See btrace.h. */
1732
1733 const char *
1734 btrace_decode_error (enum btrace_format format, int errcode)
1735 {
1736 switch (format)
1737 {
1738 case BTRACE_FORMAT_BTS:
1739 switch (errcode)
1740 {
1741 case BDE_BTS_OVERFLOW:
1742 return _("instruction overflow");
1743
1744 case BDE_BTS_INSN_SIZE:
1745 return _("unknown instruction");
1746
1747 default:
1748 break;
1749 }
1750 break;
1751
1752 #if defined (HAVE_LIBIPT)
1753 case BTRACE_FORMAT_PT:
1754 switch (errcode)
1755 {
1756 case BDE_PT_USER_QUIT:
1757 return _("trace decode cancelled");
1758
1759 case BDE_PT_DISABLED:
1760 return _("disabled");
1761
1762 case BDE_PT_OVERFLOW:
1763 return _("overflow");
1764
1765 default:
1766 if (errcode < 0)
1767 return pt_errstr (pt_errcode (errcode));
1768 break;
1769 }
1770 break;
1771 #endif /* defined (HAVE_LIBIPT) */
1772
1773 default:
1774 break;
1775 }
1776
1777 return _("unknown");
1778 }
1779
1780 /* See btrace.h. */
1781
1782 void
1783 btrace_fetch (struct thread_info *tp)
1784 {
1785 struct btrace_thread_info *btinfo;
1786 struct btrace_target_info *tinfo;
1787 struct btrace_data btrace;
1788 struct cleanup *cleanup;
1789 int errcode;
1790
1791 DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
1792 target_pid_to_str (tp->ptid));
1793
1794 btinfo = &tp->btrace;
1795 tinfo = btinfo->target;
1796 if (tinfo == NULL)
1797 return;
1798
1799 /* There's no way we could get new trace while replaying.
1800 On the other hand, delta trace would return a partial record with the
1801 current PC, which is the replay PC, not the last PC, as expected. */
1802 if (btinfo->replay != NULL)
1803 return;
1804
1805 /* We should not be called on running or exited threads. */
1806 gdb_assert (can_access_registers_ptid (tp->ptid));
1807
1808 btrace_data_init (&btrace);
1809 cleanup = make_cleanup_btrace_data (&btrace);
1810
1811 /* Let's first try to extend the trace we already have. */
1812 if (btinfo->end != NULL)
1813 {
1814 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1815 if (errcode == 0)
1816 {
1817 /* Success. Let's try to stitch the traces together. */
1818 errcode = btrace_stitch_trace (&btrace, tp);
1819 }
1820 else
1821 {
1822 /* We failed to read delta trace. Let's try to read new trace. */
1823 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1824
1825 /* If we got any new trace, discard what we have. */
1826 if (errcode == 0 && !btrace_data_empty (&btrace))
1827 btrace_clear (tp);
1828 }
1829
1830 /* If we were not able to read the trace, we start over. */
1831 if (errcode != 0)
1832 {
1833 btrace_clear (tp);
1834 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1835 }
1836 }
1837 else
1838 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1839
1840 /* If we were not able to read the branch trace, signal an error. */
1841 if (errcode != 0)
1842 error (_("Failed to read branch trace."));
1843
1844 /* Compute the trace, provided we have any. */
1845 if (!btrace_data_empty (&btrace))
1846 {
1847 struct btrace_function *bfun;
1848
1849 /* Store the raw trace data. The stored data will be cleared in
1850 btrace_clear, so we always append the new trace. */
1851 btrace_data_append (&btinfo->data, &btrace);
1852 btrace_maint_clear (btinfo);
1853
1854 VEC_truncate (btrace_fun_p, btinfo->functions, 0);
1855 btrace_clear_history (btinfo);
1856 btrace_compute_ftrace (tp, &btrace);
1857
1858 for (bfun = btinfo->begin; bfun != NULL; bfun = bfun->flow.next)
1859 VEC_safe_push (btrace_fun_p, btinfo->functions, bfun);
1860 }
1861
1862 do_cleanups (cleanup);
1863 }
1864
1865 /* See btrace.h. */
1866
1867 void
1868 btrace_clear (struct thread_info *tp)
1869 {
1870 struct btrace_thread_info *btinfo;
1871 struct btrace_function *it, *trash;
1872
1873 DEBUG ("clear thread %s (%s)", print_thread_id (tp),
1874 target_pid_to_str (tp->ptid));
1875
1876 /* Make sure btrace frames that may hold a pointer into the branch
1877 trace data are destroyed. */
1878 reinit_frame_cache ();
1879
1880 btinfo = &tp->btrace;
1881
1882 VEC_free (btrace_fun_p, btinfo->functions);
1883
1884 it = btinfo->begin;
1885 while (it != NULL)
1886 {
1887 trash = it;
1888 it = it->flow.next;
1889
1890 xfree (trash);
1891 }
1892
1893 btinfo->begin = NULL;
1894 btinfo->end = NULL;
1895 btinfo->ngaps = 0;
1896
1897 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1898 btrace_maint_clear (btinfo);
1899 btrace_data_clear (&btinfo->data);
1900 btrace_clear_history (btinfo);
1901 }
1902
1903 /* See btrace.h. */
1904
1905 void
1906 btrace_free_objfile (struct objfile *objfile)
1907 {
1908 struct thread_info *tp;
1909
1910 DEBUG ("free objfile");
1911
1912 ALL_NON_EXITED_THREADS (tp)
1913 btrace_clear (tp);
1914 }
1915
1916 #if defined (HAVE_LIBEXPAT)
1917
1918 /* Check the btrace document version. */
1919
1920 static void
1921 check_xml_btrace_version (struct gdb_xml_parser *parser,
1922 const struct gdb_xml_element *element,
1923 void *user_data, VEC (gdb_xml_value_s) *attributes)
1924 {
1925 const char *version
1926 = (const char *) xml_find_attribute (attributes, "version")->value;
1927
1928 if (strcmp (version, "1.0") != 0)
1929 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1930 }
1931
1932 /* Parse a btrace "block" xml record. */
1933
1934 static void
1935 parse_xml_btrace_block (struct gdb_xml_parser *parser,
1936 const struct gdb_xml_element *element,
1937 void *user_data, VEC (gdb_xml_value_s) *attributes)
1938 {
1939 struct btrace_data *btrace;
1940 struct btrace_block *block;
1941 ULONGEST *begin, *end;
1942
1943 btrace = (struct btrace_data *) user_data;
1944
1945 switch (btrace->format)
1946 {
1947 case BTRACE_FORMAT_BTS:
1948 break;
1949
1950 case BTRACE_FORMAT_NONE:
1951 btrace->format = BTRACE_FORMAT_BTS;
1952 btrace->variant.bts.blocks = NULL;
1953 break;
1954
1955 default:
1956 gdb_xml_error (parser, _("Btrace format error."));
1957 }
1958
1959 begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value;
1960 end = (ULONGEST *) xml_find_attribute (attributes, "end")->value;
1961
1962 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
1963 block->begin = *begin;
1964 block->end = *end;
1965 }
1966
1967 /* Parse a "raw" xml record. */
1968
1969 static void
1970 parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
1971 gdb_byte **pdata, size_t *psize)
1972 {
1973 struct cleanup *cleanup;
1974 gdb_byte *data, *bin;
1975 size_t len, size;
1976
1977 len = strlen (body_text);
1978 if (len % 2 != 0)
1979 gdb_xml_error (parser, _("Bad raw data size."));
1980
1981 size = len / 2;
1982
1983 bin = data = (gdb_byte *) xmalloc (size);
1984 cleanup = make_cleanup (xfree, data);
1985
1986 /* We use hex encoding - see common/rsp-low.h. */
1987 while (len > 0)
1988 {
1989 char hi, lo;
1990
1991 hi = *body_text++;
1992 lo = *body_text++;
1993
1994 if (hi == 0 || lo == 0)
1995 gdb_xml_error (parser, _("Bad hex encoding."));
1996
1997 *bin++ = fromhex (hi) * 16 + fromhex (lo);
1998 len -= 2;
1999 }
2000
2001 discard_cleanups (cleanup);
2002
2003 *pdata = data;
2004 *psize = size;
2005 }
2006
2007 /* Parse a btrace pt-config "cpu" xml record. */
2008
2009 static void
2010 parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
2011 const struct gdb_xml_element *element,
2012 void *user_data,
2013 VEC (gdb_xml_value_s) *attributes)
2014 {
2015 struct btrace_data *btrace;
2016 const char *vendor;
2017 ULONGEST *family, *model, *stepping;
2018
2019 vendor = (const char *) xml_find_attribute (attributes, "vendor")->value;
2020 family = (ULONGEST *) xml_find_attribute (attributes, "family")->value;
2021 model = (ULONGEST *) xml_find_attribute (attributes, "model")->value;
2022 stepping = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value;
2023
2024 btrace = (struct btrace_data *) user_data;
2025
2026 if (strcmp (vendor, "GenuineIntel") == 0)
2027 btrace->variant.pt.config.cpu.vendor = CV_INTEL;
2028
2029 btrace->variant.pt.config.cpu.family = *family;
2030 btrace->variant.pt.config.cpu.model = *model;
2031 btrace->variant.pt.config.cpu.stepping = *stepping;
2032 }
2033
2034 /* Parse a btrace pt "raw" xml record. */
2035
2036 static void
2037 parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
2038 const struct gdb_xml_element *element,
2039 void *user_data, const char *body_text)
2040 {
2041 struct btrace_data *btrace;
2042
2043 btrace = (struct btrace_data *) user_data;
2044 parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
2045 &btrace->variant.pt.size);
2046 }
2047
2048 /* Parse a btrace "pt" xml record. */
2049
2050 static void
2051 parse_xml_btrace_pt (struct gdb_xml_parser *parser,
2052 const struct gdb_xml_element *element,
2053 void *user_data, VEC (gdb_xml_value_s) *attributes)
2054 {
2055 struct btrace_data *btrace;
2056
2057 btrace = (struct btrace_data *) user_data;
2058 btrace->format = BTRACE_FORMAT_PT;
2059 btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
2060 btrace->variant.pt.data = NULL;
2061 btrace->variant.pt.size = 0;
2062 }
2063
2064 static const struct gdb_xml_attribute block_attributes[] = {
2065 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2066 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2067 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2068 };
2069
2070 static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
2071 { "vendor", GDB_XML_AF_NONE, NULL, NULL },
2072 { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2073 { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2074 { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2075 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2076 };
2077
2078 static const struct gdb_xml_element btrace_pt_config_children[] = {
2079 { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
2080 parse_xml_btrace_pt_config_cpu, NULL },
2081 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2082 };
2083
2084 static const struct gdb_xml_element btrace_pt_children[] = {
2085 { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
2086 NULL },
2087 { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
2088 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2089 };
2090
2091 static const struct gdb_xml_attribute btrace_attributes[] = {
2092 { "version", GDB_XML_AF_NONE, NULL, NULL },
2093 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2094 };
2095
2096 static const struct gdb_xml_element btrace_children[] = {
2097 { "block", block_attributes, NULL,
2098 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
2099 { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
2100 NULL },
2101 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2102 };
2103
2104 static const struct gdb_xml_element btrace_elements[] = {
2105 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
2106 check_xml_btrace_version, NULL },
2107 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2108 };
2109
2110 #endif /* defined (HAVE_LIBEXPAT) */
2111
2112 /* See btrace.h. */
2113
2114 void
2115 parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
2116 {
2117 struct cleanup *cleanup;
2118 int errcode;
2119
2120 #if defined (HAVE_LIBEXPAT)
2121
2122 btrace->format = BTRACE_FORMAT_NONE;
2123
2124 cleanup = make_cleanup_btrace_data (btrace);
2125 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
2126 buffer, btrace);
2127 if (errcode != 0)
2128 error (_("Error parsing branch trace."));
2129
2130 /* Keep parse results. */
2131 discard_cleanups (cleanup);
2132
2133 #else /* !defined (HAVE_LIBEXPAT) */
2134
2135 error (_("Cannot process branch trace. XML parsing is not supported."));
2136
2137 #endif /* !defined (HAVE_LIBEXPAT) */
2138 }
2139
2140 #if defined (HAVE_LIBEXPAT)
2141
2142 /* Parse a btrace-conf "bts" xml record. */
2143
2144 static void
2145 parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
2146 const struct gdb_xml_element *element,
2147 void *user_data, VEC (gdb_xml_value_s) *attributes)
2148 {
2149 struct btrace_config *conf;
2150 struct gdb_xml_value *size;
2151
2152 conf = (struct btrace_config *) user_data;
2153 conf->format = BTRACE_FORMAT_BTS;
2154 conf->bts.size = 0;
2155
2156 size = xml_find_attribute (attributes, "size");
2157 if (size != NULL)
2158 conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
2159 }
2160
2161 /* Parse a btrace-conf "pt" xml record. */
2162
2163 static void
2164 parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
2165 const struct gdb_xml_element *element,
2166 void *user_data, VEC (gdb_xml_value_s) *attributes)
2167 {
2168 struct btrace_config *conf;
2169 struct gdb_xml_value *size;
2170
2171 conf = (struct btrace_config *) user_data;
2172 conf->format = BTRACE_FORMAT_PT;
2173 conf->pt.size = 0;
2174
2175 size = xml_find_attribute (attributes, "size");
2176 if (size != NULL)
2177 conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
2178 }
2179
2180 static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
2181 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2182 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2183 };
2184
2185 static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
2186 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2187 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2188 };
2189
2190 static const struct gdb_xml_element btrace_conf_children[] = {
2191 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
2192 parse_xml_btrace_conf_bts, NULL },
2193 { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
2194 parse_xml_btrace_conf_pt, NULL },
2195 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2196 };
2197
2198 static const struct gdb_xml_attribute btrace_conf_attributes[] = {
2199 { "version", GDB_XML_AF_NONE, NULL, NULL },
2200 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2201 };
2202
2203 static const struct gdb_xml_element btrace_conf_elements[] = {
2204 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
2205 GDB_XML_EF_NONE, NULL, NULL },
2206 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2207 };
2208
2209 #endif /* defined (HAVE_LIBEXPAT) */
2210
2211 /* See btrace.h. */
2212
2213 void
2214 parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
2215 {
2216 int errcode;
2217
2218 #if defined (HAVE_LIBEXPAT)
2219
2220 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
2221 btrace_conf_elements, xml, conf);
2222 if (errcode != 0)
2223 error (_("Error parsing branch trace configuration."));
2224
2225 #else /* !defined (HAVE_LIBEXPAT) */
2226
2227 error (_("XML parsing is not supported."));
2228
2229 #endif /* !defined (HAVE_LIBEXPAT) */
2230 }
2231
2232 /* See btrace.h. */
2233
2234 const struct btrace_insn *
2235 btrace_insn_get (const struct btrace_insn_iterator *it)
2236 {
2237 const struct btrace_function *bfun;
2238 unsigned int index, end;
2239
2240 index = it->index;
2241 bfun = it->function;
2242
2243 /* Check if the iterator points to a gap in the trace. */
2244 if (bfun->errcode != 0)
2245 return NULL;
2246
2247 /* The index is within the bounds of this function's instruction vector. */
2248 end = VEC_length (btrace_insn_s, bfun->insn);
2249 gdb_assert (0 < end);
2250 gdb_assert (index < end);
2251
2252 return VEC_index (btrace_insn_s, bfun->insn, index);
2253 }
2254
2255 /* See btrace.h. */
2256
2257 int
2258 btrace_insn_get_error (const struct btrace_insn_iterator *it)
2259 {
2260 return it->function->errcode;
2261 }
2262
2263 /* See btrace.h. */
2264
2265 unsigned int
2266 btrace_insn_number (const struct btrace_insn_iterator *it)
2267 {
2268 return it->function->insn_offset + it->index;
2269 }
2270
2271 /* See btrace.h. */
2272
2273 void
2274 btrace_insn_begin (struct btrace_insn_iterator *it,
2275 const struct btrace_thread_info *btinfo)
2276 {
2277 const struct btrace_function *bfun;
2278
2279 bfun = btinfo->begin;
2280 if (bfun == NULL)
2281 error (_("No trace."));
2282
2283 it->function = bfun;
2284 it->index = 0;
2285 }
2286
2287 /* See btrace.h. */
2288
2289 void
2290 btrace_insn_end (struct btrace_insn_iterator *it,
2291 const struct btrace_thread_info *btinfo)
2292 {
2293 const struct btrace_function *bfun;
2294 unsigned int length;
2295
2296 bfun = btinfo->end;
2297 if (bfun == NULL)
2298 error (_("No trace."));
2299
2300 length = VEC_length (btrace_insn_s, bfun->insn);
2301
2302 /* The last function may either be a gap or it contains the current
2303 instruction, which is one past the end of the execution trace; ignore
2304 it. */
2305 if (length > 0)
2306 length -= 1;
2307
2308 it->function = bfun;
2309 it->index = length;
2310 }
2311
2312 /* See btrace.h. */
2313
2314 unsigned int
2315 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
2316 {
2317 const struct btrace_function *bfun;
2318 unsigned int index, steps;
2319
2320 bfun = it->function;
2321 steps = 0;
2322 index = it->index;
2323
2324 while (stride != 0)
2325 {
2326 unsigned int end, space, adv;
2327
2328 end = VEC_length (btrace_insn_s, bfun->insn);
2329
2330 /* An empty function segment represents a gap in the trace. We count
2331 it as one instruction. */
2332 if (end == 0)
2333 {
2334 const struct btrace_function *next;
2335
2336 next = bfun->flow.next;
2337 if (next == NULL)
2338 break;
2339
2340 stride -= 1;
2341 steps += 1;
2342
2343 bfun = next;
2344 index = 0;
2345
2346 continue;
2347 }
2348
2349 gdb_assert (0 < end);
2350 gdb_assert (index < end);
2351
2352 /* Compute the number of instructions remaining in this segment. */
2353 space = end - index;
2354
2355 /* Advance the iterator as far as possible within this segment. */
2356 adv = std::min (space, stride);
2357 stride -= adv;
2358 index += adv;
2359 steps += adv;
2360
2361 /* Move to the next function if we're at the end of this one. */
2362 if (index == end)
2363 {
2364 const struct btrace_function *next;
2365
2366 next = bfun->flow.next;
2367 if (next == NULL)
2368 {
2369 /* We stepped past the last function.
2370
2371 Let's adjust the index to point to the last instruction in
2372 the previous function. */
2373 index -= 1;
2374 steps -= 1;
2375 break;
2376 }
2377
2378 /* We now point to the first instruction in the new function. */
2379 bfun = next;
2380 index = 0;
2381 }
2382
2383 /* We did make progress. */
2384 gdb_assert (adv > 0);
2385 }
2386
2387 /* Update the iterator. */
2388 it->function = bfun;
2389 it->index = index;
2390
2391 return steps;
2392 }
2393
2394 /* See btrace.h. */
2395
2396 unsigned int
2397 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
2398 {
2399 const struct btrace_function *bfun;
2400 unsigned int index, steps;
2401
2402 bfun = it->function;
2403 steps = 0;
2404 index = it->index;
2405
2406 while (stride != 0)
2407 {
2408 unsigned int adv;
2409
2410 /* Move to the previous function if we're at the start of this one. */
2411 if (index == 0)
2412 {
2413 const struct btrace_function *prev;
2414
2415 prev = bfun->flow.prev;
2416 if (prev == NULL)
2417 break;
2418
2419 /* We point to one after the last instruction in the new function. */
2420 bfun = prev;
2421 index = VEC_length (btrace_insn_s, bfun->insn);
2422
2423 /* An empty function segment represents a gap in the trace. We count
2424 it as one instruction. */
2425 if (index == 0)
2426 {
2427 stride -= 1;
2428 steps += 1;
2429
2430 continue;
2431 }
2432 }
2433
2434 /* Advance the iterator as far as possible within this segment. */
2435 adv = std::min (index, stride);
2436
2437 stride -= adv;
2438 index -= adv;
2439 steps += adv;
2440
2441 /* We did make progress. */
2442 gdb_assert (adv > 0);
2443 }
2444
2445 /* Update the iterator. */
2446 it->function = bfun;
2447 it->index = index;
2448
2449 return steps;
2450 }
2451
2452 /* See btrace.h. */
2453
2454 int
2455 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
2456 const struct btrace_insn_iterator *rhs)
2457 {
2458 unsigned int lnum, rnum;
2459
2460 lnum = btrace_insn_number (lhs);
2461 rnum = btrace_insn_number (rhs);
2462
2463 return (int) (lnum - rnum);
2464 }
2465
2466 /* See btrace.h. */
2467
2468 int
2469 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
2470 const struct btrace_thread_info *btinfo,
2471 unsigned int number)
2472 {
2473 const struct btrace_function *bfun;
2474 unsigned int upper, lower;
2475
2476 if (VEC_empty (btrace_fun_p, btinfo->functions))
2477 return 0;
2478
2479 lower = 0;
2480 bfun = VEC_index (btrace_fun_p, btinfo->functions, lower);
2481 if (number < bfun->insn_offset)
2482 return 0;
2483
2484 upper = VEC_length (btrace_fun_p, btinfo->functions) - 1;
2485 bfun = VEC_index (btrace_fun_p, btinfo->functions, upper);
2486 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2487 return 0;
2488
2489 /* We assume that there are no holes in the numbering. */
2490 for (;;)
2491 {
2492 const unsigned int average = lower + (upper - lower) / 2;
2493
2494 bfun = VEC_index (btrace_fun_p, btinfo->functions, average);
2495
2496 if (number < bfun->insn_offset)
2497 {
2498 upper = average - 1;
2499 continue;
2500 }
2501
2502 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2503 {
2504 lower = average + 1;
2505 continue;
2506 }
2507
2508 break;
2509 }
2510
2511 it->function = bfun;
2512 it->index = number - bfun->insn_offset;
2513 return 1;
2514 }
2515
2516 /* See btrace.h. */
2517
2518 const struct btrace_function *
2519 btrace_call_get (const struct btrace_call_iterator *it)
2520 {
2521 return it->function;
2522 }
2523
2524 /* See btrace.h. */
2525
2526 unsigned int
2527 btrace_call_number (const struct btrace_call_iterator *it)
2528 {
2529 const struct btrace_thread_info *btinfo;
2530 const struct btrace_function *bfun;
2531 unsigned int insns;
2532
2533 btinfo = it->btinfo;
2534 bfun = it->function;
2535 if (bfun != NULL)
2536 return bfun->number;
2537
2538 /* For the end iterator, i.e. bfun == NULL, we return one more than the
2539 number of the last function. */
2540 bfun = btinfo->end;
2541 insns = VEC_length (btrace_insn_s, bfun->insn);
2542
2543 /* If the function contains only a single instruction (i.e. the current
2544 instruction), it will be skipped and its number is already the number
2545 we seek. */
2546 if (insns == 1)
2547 return bfun->number;
2548
2549 /* Otherwise, return one more than the number of the last function. */
2550 return bfun->number + 1;
2551 }
2552
2553 /* See btrace.h. */
2554
2555 void
2556 btrace_call_begin (struct btrace_call_iterator *it,
2557 const struct btrace_thread_info *btinfo)
2558 {
2559 const struct btrace_function *bfun;
2560
2561 bfun = btinfo->begin;
2562 if (bfun == NULL)
2563 error (_("No trace."));
2564
2565 it->btinfo = btinfo;
2566 it->function = bfun;
2567 }
2568
2569 /* See btrace.h. */
2570
2571 void
2572 btrace_call_end (struct btrace_call_iterator *it,
2573 const struct btrace_thread_info *btinfo)
2574 {
2575 const struct btrace_function *bfun;
2576
2577 bfun = btinfo->end;
2578 if (bfun == NULL)
2579 error (_("No trace."));
2580
2581 it->btinfo = btinfo;
2582 it->function = NULL;
2583 }
2584
2585 /* See btrace.h. */
2586
2587 unsigned int
2588 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2589 {
2590 const struct btrace_function *bfun;
2591 unsigned int steps;
2592
2593 bfun = it->function;
2594 steps = 0;
2595 while (bfun != NULL)
2596 {
2597 const struct btrace_function *next;
2598 unsigned int insns;
2599
2600 next = bfun->flow.next;
2601 if (next == NULL)
2602 {
2603 /* Ignore the last function if it only contains a single
2604 (i.e. the current) instruction. */
2605 insns = VEC_length (btrace_insn_s, bfun->insn);
2606 if (insns == 1)
2607 steps -= 1;
2608 }
2609
2610 if (stride == steps)
2611 break;
2612
2613 bfun = next;
2614 steps += 1;
2615 }
2616
2617 it->function = bfun;
2618 return steps;
2619 }
2620
2621 /* See btrace.h. */
2622
2623 unsigned int
2624 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2625 {
2626 const struct btrace_thread_info *btinfo;
2627 const struct btrace_function *bfun;
2628 unsigned int steps;
2629
2630 bfun = it->function;
2631 steps = 0;
2632
2633 if (bfun == NULL)
2634 {
2635 unsigned int insns;
2636
2637 btinfo = it->btinfo;
2638 bfun = btinfo->end;
2639 if (bfun == NULL)
2640 return 0;
2641
2642 /* Ignore the last function if it only contains a single
2643 (i.e. the current) instruction. */
2644 insns = VEC_length (btrace_insn_s, bfun->insn);
2645 if (insns == 1)
2646 bfun = bfun->flow.prev;
2647
2648 if (bfun == NULL)
2649 return 0;
2650
2651 steps += 1;
2652 }
2653
2654 while (steps < stride)
2655 {
2656 const struct btrace_function *prev;
2657
2658 prev = bfun->flow.prev;
2659 if (prev == NULL)
2660 break;
2661
2662 bfun = prev;
2663 steps += 1;
2664 }
2665
2666 it->function = bfun;
2667 return steps;
2668 }
2669
2670 /* See btrace.h. */
2671
2672 int
2673 btrace_call_cmp (const struct btrace_call_iterator *lhs,
2674 const struct btrace_call_iterator *rhs)
2675 {
2676 unsigned int lnum, rnum;
2677
2678 lnum = btrace_call_number (lhs);
2679 rnum = btrace_call_number (rhs);
2680
2681 return (int) (lnum - rnum);
2682 }
2683
2684 /* See btrace.h. */
2685
2686 int
2687 btrace_find_call_by_number (struct btrace_call_iterator *it,
2688 const struct btrace_thread_info *btinfo,
2689 unsigned int number)
2690 {
2691 const struct btrace_function *bfun;
2692
2693 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
2694 {
2695 unsigned int bnum;
2696
2697 bnum = bfun->number;
2698 if (number == bnum)
2699 {
2700 it->btinfo = btinfo;
2701 it->function = bfun;
2702 return 1;
2703 }
2704
2705 /* Functions are ordered and numbered consecutively. We could bail out
2706 earlier. On the other hand, it is very unlikely that we search for
2707 a nonexistent function. */
2708 }
2709
2710 return 0;
2711 }
2712
2713 /* See btrace.h. */
2714
2715 void
2716 btrace_set_insn_history (struct btrace_thread_info *btinfo,
2717 const struct btrace_insn_iterator *begin,
2718 const struct btrace_insn_iterator *end)
2719 {
2720 if (btinfo->insn_history == NULL)
2721 btinfo->insn_history = XCNEW (struct btrace_insn_history);
2722
2723 btinfo->insn_history->begin = *begin;
2724 btinfo->insn_history->end = *end;
2725 }
2726
2727 /* See btrace.h. */
2728
2729 void
2730 btrace_set_call_history (struct btrace_thread_info *btinfo,
2731 const struct btrace_call_iterator *begin,
2732 const struct btrace_call_iterator *end)
2733 {
2734 gdb_assert (begin->btinfo == end->btinfo);
2735
2736 if (btinfo->call_history == NULL)
2737 btinfo->call_history = XCNEW (struct btrace_call_history);
2738
2739 btinfo->call_history->begin = *begin;
2740 btinfo->call_history->end = *end;
2741 }
2742
2743 /* See btrace.h. */
2744
2745 int
2746 btrace_is_replaying (struct thread_info *tp)
2747 {
2748 return tp->btrace.replay != NULL;
2749 }
2750
2751 /* See btrace.h. */
2752
2753 int
2754 btrace_is_empty (struct thread_info *tp)
2755 {
2756 struct btrace_insn_iterator begin, end;
2757 struct btrace_thread_info *btinfo;
2758
2759 btinfo = &tp->btrace;
2760
2761 if (btinfo->begin == NULL)
2762 return 1;
2763
2764 btrace_insn_begin (&begin, btinfo);
2765 btrace_insn_end (&end, btinfo);
2766
2767 return btrace_insn_cmp (&begin, &end) == 0;
2768 }
2769
2770 /* Forward the cleanup request. */
2771
2772 static void
2773 do_btrace_data_cleanup (void *arg)
2774 {
2775 btrace_data_fini ((struct btrace_data *) arg);
2776 }
2777
2778 /* See btrace.h. */
2779
2780 struct cleanup *
2781 make_cleanup_btrace_data (struct btrace_data *data)
2782 {
2783 return make_cleanup (do_btrace_data_cleanup, data);
2784 }
2785
2786 #if defined (HAVE_LIBIPT)
2787
2788 /* Print a single packet. */
2789
2790 static void
2791 pt_print_packet (const struct pt_packet *packet)
2792 {
2793 switch (packet->type)
2794 {
2795 default:
2796 printf_unfiltered (("[??: %x]"), packet->type);
2797 break;
2798
2799 case ppt_psb:
2800 printf_unfiltered (("psb"));
2801 break;
2802
2803 case ppt_psbend:
2804 printf_unfiltered (("psbend"));
2805 break;
2806
2807 case ppt_pad:
2808 printf_unfiltered (("pad"));
2809 break;
2810
2811 case ppt_tip:
2812 printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
2813 packet->payload.ip.ipc,
2814 packet->payload.ip.ip);
2815 break;
2816
2817 case ppt_tip_pge:
2818 printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
2819 packet->payload.ip.ipc,
2820 packet->payload.ip.ip);
2821 break;
2822
2823 case ppt_tip_pgd:
2824 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
2825 packet->payload.ip.ipc,
2826 packet->payload.ip.ip);
2827 break;
2828
2829 case ppt_fup:
2830 printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
2831 packet->payload.ip.ipc,
2832 packet->payload.ip.ip);
2833 break;
2834
2835 case ppt_tnt_8:
2836 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
2837 packet->payload.tnt.bit_size,
2838 packet->payload.tnt.payload);
2839 break;
2840
2841 case ppt_tnt_64:
2842 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
2843 packet->payload.tnt.bit_size,
2844 packet->payload.tnt.payload);
2845 break;
2846
2847 case ppt_pip:
2848 printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2849 packet->payload.pip.nr ? (" nr") : (""));
2850 break;
2851
2852 case ppt_tsc:
2853 printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2854 break;
2855
2856 case ppt_cbr:
2857 printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
2858 break;
2859
2860 case ppt_mode:
2861 switch (packet->payload.mode.leaf)
2862 {
2863 default:
2864 printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
2865 break;
2866
2867 case pt_mol_exec:
2868 printf_unfiltered (("mode.exec%s%s"),
2869 packet->payload.mode.bits.exec.csl
2870 ? (" cs.l") : (""),
2871 packet->payload.mode.bits.exec.csd
2872 ? (" cs.d") : (""));
2873 break;
2874
2875 case pt_mol_tsx:
2876 printf_unfiltered (("mode.tsx%s%s"),
2877 packet->payload.mode.bits.tsx.intx
2878 ? (" intx") : (""),
2879 packet->payload.mode.bits.tsx.abrt
2880 ? (" abrt") : (""));
2881 break;
2882 }
2883 break;
2884
2885 case ppt_ovf:
2886 printf_unfiltered (("ovf"));
2887 break;
2888
2889 case ppt_stop:
2890 printf_unfiltered (("stop"));
2891 break;
2892
2893 case ppt_vmcs:
2894 printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2895 break;
2896
2897 case ppt_tma:
2898 printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
2899 packet->payload.tma.fc);
2900 break;
2901
2902 case ppt_mtc:
2903 printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
2904 break;
2905
2906 case ppt_cyc:
2907 printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
2908 break;
2909
2910 case ppt_mnt:
2911 printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
2912 break;
2913 }
2914 }
2915
2916 /* Decode packets into MAINT using DECODER. */
2917
2918 static void
2919 btrace_maint_decode_pt (struct btrace_maint_info *maint,
2920 struct pt_packet_decoder *decoder)
2921 {
2922 int errcode;
2923
2924 for (;;)
2925 {
2926 struct btrace_pt_packet packet;
2927
2928 errcode = pt_pkt_sync_forward (decoder);
2929 if (errcode < 0)
2930 break;
2931
2932 for (;;)
2933 {
2934 pt_pkt_get_offset (decoder, &packet.offset);
2935
2936 errcode = pt_pkt_next (decoder, &packet.packet,
2937 sizeof(packet.packet));
2938 if (errcode < 0)
2939 break;
2940
2941 if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
2942 {
2943 packet.errcode = pt_errcode (errcode);
2944 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2945 &packet);
2946 }
2947 }
2948
2949 if (errcode == -pte_eos)
2950 break;
2951
2952 packet.errcode = pt_errcode (errcode);
2953 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2954 &packet);
2955
2956 warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
2957 packet.offset, pt_errstr (packet.errcode));
2958 }
2959
2960 if (errcode != -pte_eos)
2961 warning (_("Failed to synchronize onto the Intel Processor Trace "
2962 "stream: %s."), pt_errstr (pt_errcode (errcode)));
2963 }
2964
2965 /* Update the packet history in BTINFO. */
2966
2967 static void
2968 btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
2969 {
2970 volatile struct gdb_exception except;
2971 struct pt_packet_decoder *decoder;
2972 struct btrace_data_pt *pt;
2973 struct pt_config config;
2974 int errcode;
2975
2976 pt = &btinfo->data.variant.pt;
2977
2978 /* Nothing to do if there is no trace. */
2979 if (pt->size == 0)
2980 return;
2981
2982 memset (&config, 0, sizeof(config));
2983
2984 config.size = sizeof (config);
2985 config.begin = pt->data;
2986 config.end = pt->data + pt->size;
2987
2988 config.cpu.vendor = pt_translate_cpu_vendor (pt->config.cpu.vendor);
2989 config.cpu.family = pt->config.cpu.family;
2990 config.cpu.model = pt->config.cpu.model;
2991 config.cpu.stepping = pt->config.cpu.stepping;
2992
2993 errcode = pt_cpu_errata (&config.errata, &config.cpu);
2994 if (errcode < 0)
2995 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
2996 pt_errstr (pt_errcode (errcode)));
2997
2998 decoder = pt_pkt_alloc_decoder (&config);
2999 if (decoder == NULL)
3000 error (_("Failed to allocate the Intel Processor Trace decoder."));
3001
3002 TRY
3003 {
3004 btrace_maint_decode_pt (&btinfo->maint, decoder);
3005 }
3006 CATCH (except, RETURN_MASK_ALL)
3007 {
3008 pt_pkt_free_decoder (decoder);
3009
3010 if (except.reason < 0)
3011 throw_exception (except);
3012 }
3013 END_CATCH
3014
3015 pt_pkt_free_decoder (decoder);
3016 }
3017
3018 #endif /* !defined (HAVE_LIBIPT) */
3019
3020 /* Update the packet maintenance information for BTINFO and store the
3021 low and high bounds into BEGIN and END, respectively.
3022 Store the current iterator state into FROM and TO. */
3023
3024 static void
3025 btrace_maint_update_packets (struct btrace_thread_info *btinfo,
3026 unsigned int *begin, unsigned int *end,
3027 unsigned int *from, unsigned int *to)
3028 {
3029 switch (btinfo->data.format)
3030 {
3031 default:
3032 *begin = 0;
3033 *end = 0;
3034 *from = 0;
3035 *to = 0;
3036 break;
3037
3038 case BTRACE_FORMAT_BTS:
3039 /* Nothing to do - we operate directly on BTINFO->DATA. */
3040 *begin = 0;
3041 *end = VEC_length (btrace_block_s, btinfo->data.variant.bts.blocks);
3042 *from = btinfo->maint.variant.bts.packet_history.begin;
3043 *to = btinfo->maint.variant.bts.packet_history.end;
3044 break;
3045
3046 #if defined (HAVE_LIBIPT)
3047 case BTRACE_FORMAT_PT:
3048 if (VEC_empty (btrace_pt_packet_s, btinfo->maint.variant.pt.packets))
3049 btrace_maint_update_pt_packets (btinfo);
3050
3051 *begin = 0;
3052 *end = VEC_length (btrace_pt_packet_s, btinfo->maint.variant.pt.packets);
3053 *from = btinfo->maint.variant.pt.packet_history.begin;
3054 *to = btinfo->maint.variant.pt.packet_history.end;
3055 break;
3056 #endif /* defined (HAVE_LIBIPT) */
3057 }
3058 }
3059
3060 /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
3061 update the current iterator position. */
3062
3063 static void
3064 btrace_maint_print_packets (struct btrace_thread_info *btinfo,
3065 unsigned int begin, unsigned int end)
3066 {
3067 switch (btinfo->data.format)
3068 {
3069 default:
3070 break;
3071
3072 case BTRACE_FORMAT_BTS:
3073 {
3074 VEC (btrace_block_s) *blocks;
3075 unsigned int blk;
3076
3077 blocks = btinfo->data.variant.bts.blocks;
3078 for (blk = begin; blk < end; ++blk)
3079 {
3080 const btrace_block_s *block;
3081
3082 block = VEC_index (btrace_block_s, blocks, blk);
3083
3084 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
3085 core_addr_to_string_nz (block->begin),
3086 core_addr_to_string_nz (block->end));
3087 }
3088
3089 btinfo->maint.variant.bts.packet_history.begin = begin;
3090 btinfo->maint.variant.bts.packet_history.end = end;
3091 }
3092 break;
3093
3094 #if defined (HAVE_LIBIPT)
3095 case BTRACE_FORMAT_PT:
3096 {
3097 VEC (btrace_pt_packet_s) *packets;
3098 unsigned int pkt;
3099
3100 packets = btinfo->maint.variant.pt.packets;
3101 for (pkt = begin; pkt < end; ++pkt)
3102 {
3103 const struct btrace_pt_packet *packet;
3104
3105 packet = VEC_index (btrace_pt_packet_s, packets, pkt);
3106
3107 printf_unfiltered ("%u\t", pkt);
3108 printf_unfiltered ("0x%" PRIx64 "\t", packet->offset);
3109
3110 if (packet->errcode == pte_ok)
3111 pt_print_packet (&packet->packet);
3112 else
3113 printf_unfiltered ("[error: %s]", pt_errstr (packet->errcode));
3114
3115 printf_unfiltered ("\n");
3116 }
3117
3118 btinfo->maint.variant.pt.packet_history.begin = begin;
3119 btinfo->maint.variant.pt.packet_history.end = end;
3120 }
3121 break;
3122 #endif /* defined (HAVE_LIBIPT) */
3123 }
3124 }
3125
3126 /* Read a number from an argument string. */
3127
3128 static unsigned int
3129 get_uint (char **arg)
3130 {
3131 char *begin, *end, *pos;
3132 unsigned long number;
3133
3134 begin = *arg;
3135 pos = skip_spaces (begin);
3136
3137 if (!isdigit (*pos))
3138 error (_("Expected positive number, got: %s."), pos);
3139
3140 number = strtoul (pos, &end, 10);
3141 if (number > UINT_MAX)
3142 error (_("Number too big."));
3143
3144 *arg += (end - begin);
3145
3146 return (unsigned int) number;
3147 }
3148
3149 /* Read a context size from an argument string. */
3150
3151 static int
3152 get_context_size (char **arg)
3153 {
3154 char *pos;
3155 int number;
3156
3157 pos = skip_spaces (*arg);
3158
3159 if (!isdigit (*pos))
3160 error (_("Expected positive number, got: %s."), pos);
3161
3162 return strtol (pos, arg, 10);
3163 }
3164
3165 /* Complain about junk at the end of an argument string. */
3166
3167 static void
3168 no_chunk (char *arg)
3169 {
3170 if (*arg != 0)
3171 error (_("Junk after argument: %s."), arg);
3172 }
3173
3174 /* The "maintenance btrace packet-history" command. */
3175
3176 static void
3177 maint_btrace_packet_history_cmd (char *arg, int from_tty)
3178 {
3179 struct btrace_thread_info *btinfo;
3180 struct thread_info *tp;
3181 unsigned int size, begin, end, from, to;
3182
3183 tp = find_thread_ptid (inferior_ptid);
3184 if (tp == NULL)
3185 error (_("No thread."));
3186
3187 size = 10;
3188 btinfo = &tp->btrace;
3189
3190 btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
3191 if (begin == end)
3192 {
3193 printf_unfiltered (_("No trace.\n"));
3194 return;
3195 }
3196
3197 if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
3198 {
3199 from = to;
3200
3201 if (end - from < size)
3202 size = end - from;
3203 to = from + size;
3204 }
3205 else if (strcmp (arg, "-") == 0)
3206 {
3207 to = from;
3208
3209 if (to - begin < size)
3210 size = to - begin;
3211 from = to - size;
3212 }
3213 else
3214 {
3215 from = get_uint (&arg);
3216 if (end <= from)
3217 error (_("'%u' is out of range."), from);
3218
3219 arg = skip_spaces (arg);
3220 if (*arg == ',')
3221 {
3222 arg = skip_spaces (++arg);
3223
3224 if (*arg == '+')
3225 {
3226 arg += 1;
3227 size = get_context_size (&arg);
3228
3229 no_chunk (arg);
3230
3231 if (end - from < size)
3232 size = end - from;
3233 to = from + size;
3234 }
3235 else if (*arg == '-')
3236 {
3237 arg += 1;
3238 size = get_context_size (&arg);
3239
3240 no_chunk (arg);
3241
3242 /* Include the packet given as first argument. */
3243 from += 1;
3244 to = from;
3245
3246 if (to - begin < size)
3247 size = to - begin;
3248 from = to - size;
3249 }
3250 else
3251 {
3252 to = get_uint (&arg);
3253
3254 /* Include the packet at the second argument and silently
3255 truncate the range. */
3256 if (to < end)
3257 to += 1;
3258 else
3259 to = end;
3260
3261 no_chunk (arg);
3262 }
3263 }
3264 else
3265 {
3266 no_chunk (arg);
3267
3268 if (end - from < size)
3269 size = end - from;
3270 to = from + size;
3271 }
3272
3273 dont_repeat ();
3274 }
3275
3276 btrace_maint_print_packets (btinfo, from, to);
3277 }
3278
3279 /* The "maintenance btrace clear-packet-history" command. */
3280
3281 static void
3282 maint_btrace_clear_packet_history_cmd (char *args, int from_tty)
3283 {
3284 struct btrace_thread_info *btinfo;
3285 struct thread_info *tp;
3286
3287 if (args != NULL && *args != 0)
3288 error (_("Invalid argument."));
3289
3290 tp = find_thread_ptid (inferior_ptid);
3291 if (tp == NULL)
3292 error (_("No thread."));
3293
3294 btinfo = &tp->btrace;
3295
3296 /* Must clear the maint data before - it depends on BTINFO->DATA. */
3297 btrace_maint_clear (btinfo);
3298 btrace_data_clear (&btinfo->data);
3299 }
3300
3301 /* The "maintenance btrace clear" command. */
3302
3303 static void
3304 maint_btrace_clear_cmd (char *args, int from_tty)
3305 {
3306 struct btrace_thread_info *btinfo;
3307 struct thread_info *tp;
3308
3309 if (args != NULL && *args != 0)
3310 error (_("Invalid argument."));
3311
3312 tp = find_thread_ptid (inferior_ptid);
3313 if (tp == NULL)
3314 error (_("No thread."));
3315
3316 btrace_clear (tp);
3317 }
3318
3319 /* The "maintenance btrace" command. */
3320
3321 static void
3322 maint_btrace_cmd (char *args, int from_tty)
3323 {
3324 help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands,
3325 gdb_stdout);
3326 }
3327
3328 /* The "maintenance set btrace" command. */
3329
3330 static void
3331 maint_btrace_set_cmd (char *args, int from_tty)
3332 {
3333 help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands,
3334 gdb_stdout);
3335 }
3336
3337 /* The "maintenance show btrace" command. */
3338
3339 static void
3340 maint_btrace_show_cmd (char *args, int from_tty)
3341 {
3342 help_list (maint_btrace_show_cmdlist, "maintenance show btrace ",
3343 all_commands, gdb_stdout);
3344 }
3345
3346 /* The "maintenance set btrace pt" command. */
3347
3348 static void
3349 maint_btrace_pt_set_cmd (char *args, int from_tty)
3350 {
3351 help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3352 all_commands, gdb_stdout);
3353 }
3354
3355 /* The "maintenance show btrace pt" command. */
3356
3357 static void
3358 maint_btrace_pt_show_cmd (char *args, int from_tty)
3359 {
3360 help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3361 all_commands, gdb_stdout);
3362 }
3363
3364 /* The "maintenance info btrace" command. */
3365
3366 static void
3367 maint_info_btrace_cmd (char *args, int from_tty)
3368 {
3369 struct btrace_thread_info *btinfo;
3370 struct thread_info *tp;
3371 const struct btrace_config *conf;
3372
3373 if (args != NULL && *args != 0)
3374 error (_("Invalid argument."));
3375
3376 tp = find_thread_ptid (inferior_ptid);
3377 if (tp == NULL)
3378 error (_("No thread."));
3379
3380 btinfo = &tp->btrace;
3381
3382 conf = btrace_conf (btinfo);
3383 if (conf == NULL)
3384 error (_("No btrace configuration."));
3385
3386 printf_unfiltered (_("Format: %s.\n"),
3387 btrace_format_string (conf->format));
3388
3389 switch (conf->format)
3390 {
3391 default:
3392 break;
3393
3394 case BTRACE_FORMAT_BTS:
3395 printf_unfiltered (_("Number of packets: %u.\n"),
3396 VEC_length (btrace_block_s,
3397 btinfo->data.variant.bts.blocks));
3398 break;
3399
3400 #if defined (HAVE_LIBIPT)
3401 case BTRACE_FORMAT_PT:
3402 {
3403 struct pt_version version;
3404
3405 version = pt_library_version ();
3406 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
3407 version.minor, version.build,
3408 version.ext != NULL ? version.ext : "");
3409
3410 btrace_maint_update_pt_packets (btinfo);
3411 printf_unfiltered (_("Number of packets: %u.\n"),
3412 VEC_length (btrace_pt_packet_s,
3413 btinfo->maint.variant.pt.packets));
3414 }
3415 break;
3416 #endif /* defined (HAVE_LIBIPT) */
3417 }
3418 }
3419
3420 /* The "maint show btrace pt skip-pad" show value function. */
3421
3422 static void
3423 show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
3424 struct cmd_list_element *c,
3425 const char *value)
3426 {
3427 fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
3428 }
3429
3430
3431 /* Initialize btrace maintenance commands. */
3432
3433 void _initialize_btrace (void);
3434 void
3435 _initialize_btrace (void)
3436 {
3437 add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
3438 _("Info about branch tracing data."), &maintenanceinfolist);
3439
3440 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_cmd,
3441 _("Branch tracing maintenance commands."),
3442 &maint_btrace_cmdlist, "maintenance btrace ",
3443 0, &maintenancelist);
3444
3445 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_set_cmd, _("\
3446 Set branch tracing specific variables."),
3447 &maint_btrace_set_cmdlist, "maintenance set btrace ",
3448 0, &maintenance_set_cmdlist);
3449
3450 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\
3451 Set Intel Processor Trace specific variables."),
3452 &maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3453 0, &maint_btrace_set_cmdlist);
3454
3455 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_show_cmd, _("\
3456 Show branch tracing specific variables."),
3457 &maint_btrace_show_cmdlist, "maintenance show btrace ",
3458 0, &maintenance_show_cmdlist);
3459
3460 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\
3461 Show Intel Processor Trace specific variables."),
3462 &maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3463 0, &maint_btrace_show_cmdlist);
3464
3465 add_setshow_boolean_cmd ("skip-pad", class_maintenance,
3466 &maint_btrace_pt_skip_pad, _("\
3467 Set whether PAD packets should be skipped in the btrace packet history."), _("\
3468 Show whether PAD packets should be skipped in the btrace packet history."),_("\
3469 When enabled, PAD packets are ignored in the btrace packet history."),
3470 NULL, show_maint_btrace_pt_skip_pad,
3471 &maint_btrace_pt_set_cmdlist,
3472 &maint_btrace_pt_show_cmdlist);
3473
3474 add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
3475 _("Print the raw branch tracing data.\n\
3476 With no argument, print ten more packets after the previous ten-line print.\n\
3477 With '-' as argument print ten packets before a previous ten-line print.\n\
3478 One argument specifies the starting packet of a ten-line print.\n\
3479 Two arguments with comma between specify starting and ending packets to \
3480 print.\n\
3481 Preceded with '+'/'-' the second argument specifies the distance from the \
3482 first.\n"),
3483 &maint_btrace_cmdlist);
3484
3485 add_cmd ("clear-packet-history", class_maintenance,
3486 maint_btrace_clear_packet_history_cmd,
3487 _("Clears the branch tracing packet history.\n\
3488 Discards the raw branch tracing data but not the execution history data.\n\
3489 "),
3490 &maint_btrace_cmdlist);
3491
3492 add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
3493 _("Clears the branch tracing data.\n\
3494 Discards the raw branch tracing data and the execution history data.\n\
3495 The next 'record' command will fetch the branch tracing data anew.\n\
3496 "),
3497 &maint_btrace_cmdlist);
3498
3499 }
This page took 0.099643 seconds and 5 git commands to generate.