btrace: Use std::vector in struct btrace_thread_information.
[deliverable/binutils-gdb.git] / gdb / btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "btrace.h"
24 #include "gdbthread.h"
25 #include "inferior.h"
26 #include "target.h"
27 #include "record.h"
28 #include "symtab.h"
29 #include "disasm.h"
30 #include "source.h"
31 #include "filenames.h"
32 #include "xml-support.h"
33 #include "regcache.h"
34 #include "rsp-low.h"
35 #include "gdbcmd.h"
36 #include "cli/cli-utils.h"
37
38 #include <inttypes.h>
39 #include <ctype.h>
40 #include <algorithm>
41
42 /* Command lists for btrace maintenance commands. */
43 static struct cmd_list_element *maint_btrace_cmdlist;
44 static struct cmd_list_element *maint_btrace_set_cmdlist;
45 static struct cmd_list_element *maint_btrace_show_cmdlist;
46 static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
47 static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
48
49 /* Control whether to skip PAD packets when computing the packet history. */
50 static int maint_btrace_pt_skip_pad = 1;
51
52 /* A vector of function segments. */
53 typedef struct btrace_function * bfun_s;
54 DEF_VEC_P (bfun_s);
55
56 static void btrace_add_pc (struct thread_info *tp);
57
58 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
59 when used in if statements. */
60
61 #define DEBUG(msg, args...) \
62 do \
63 { \
64 if (record_debug != 0) \
65 fprintf_unfiltered (gdb_stdlog, \
66 "[btrace] " msg "\n", ##args); \
67 } \
68 while (0)
69
70 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
71
72 /* Return the function name of a recorded function segment for printing.
73 This function never returns NULL. */
74
75 static const char *
76 ftrace_print_function_name (const struct btrace_function *bfun)
77 {
78 struct minimal_symbol *msym;
79 struct symbol *sym;
80
81 msym = bfun->msym;
82 sym = bfun->sym;
83
84 if (sym != NULL)
85 return SYMBOL_PRINT_NAME (sym);
86
87 if (msym != NULL)
88 return MSYMBOL_PRINT_NAME (msym);
89
90 return "<unknown>";
91 }
92
93 /* Return the file name of a recorded function segment for printing.
94 This function never returns NULL. */
95
96 static const char *
97 ftrace_print_filename (const struct btrace_function *bfun)
98 {
99 struct symbol *sym;
100 const char *filename;
101
102 sym = bfun->sym;
103
104 if (sym != NULL)
105 filename = symtab_to_filename_for_display (symbol_symtab (sym));
106 else
107 filename = "<unknown>";
108
109 return filename;
110 }
111
112 /* Return a string representation of the address of an instruction.
113 This function never returns NULL. */
114
115 static const char *
116 ftrace_print_insn_addr (const struct btrace_insn *insn)
117 {
118 if (insn == NULL)
119 return "<nil>";
120
121 return core_addr_to_string_nz (insn->pc);
122 }
123
124 /* Print an ftrace debug status message. */
125
126 static void
127 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
128 {
129 const char *fun, *file;
130 unsigned int ibegin, iend;
131 int level;
132
133 fun = ftrace_print_function_name (bfun);
134 file = ftrace_print_filename (bfun);
135 level = bfun->level;
136
137 ibegin = bfun->insn_offset;
138 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
139
140 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
141 prefix, fun, file, level, ibegin, iend);
142 }
143
144 /* Return the number of instructions in a given function call segment. */
145
146 static unsigned int
147 ftrace_call_num_insn (const struct btrace_function* bfun)
148 {
149 if (bfun == NULL)
150 return 0;
151
152 /* A gap is always counted as one instruction. */
153 if (bfun->errcode != 0)
154 return 1;
155
156 return VEC_length (btrace_insn_s, bfun->insn);
157 }
158
159 /* Return non-zero if BFUN does not match MFUN and FUN,
160 return zero otherwise. */
161
162 static int
163 ftrace_function_switched (const struct btrace_function *bfun,
164 const struct minimal_symbol *mfun,
165 const struct symbol *fun)
166 {
167 struct minimal_symbol *msym;
168 struct symbol *sym;
169
170 msym = bfun->msym;
171 sym = bfun->sym;
172
173 /* If the minimal symbol changed, we certainly switched functions. */
174 if (mfun != NULL && msym != NULL
175 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
176 return 1;
177
178 /* If the symbol changed, we certainly switched functions. */
179 if (fun != NULL && sym != NULL)
180 {
181 const char *bfname, *fname;
182
183 /* Check the function name. */
184 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
185 return 1;
186
187 /* Check the location of those functions, as well. */
188 bfname = symtab_to_fullname (symbol_symtab (sym));
189 fname = symtab_to_fullname (symbol_symtab (fun));
190 if (filename_cmp (fname, bfname) != 0)
191 return 1;
192 }
193
194 /* If we lost symbol information, we switched functions. */
195 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
196 return 1;
197
198 /* If we gained symbol information, we switched functions. */
199 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
200 return 1;
201
202 return 0;
203 }
204
205 /* Allocate and initialize a new branch trace function segment.
206 PREV is the chronologically preceding function segment.
207 MFUN and FUN are the symbol information we have for this function. */
208
209 static struct btrace_function *
210 ftrace_new_function (struct btrace_function *prev,
211 struct minimal_symbol *mfun,
212 struct symbol *fun)
213 {
214 struct btrace_function *bfun;
215
216 bfun = XCNEW (struct btrace_function);
217
218 bfun->msym = mfun;
219 bfun->sym = fun;
220 bfun->flow.prev = prev;
221
222 if (prev == NULL)
223 {
224 /* Start counting at one. */
225 bfun->number = 1;
226 bfun->insn_offset = 1;
227 }
228 else
229 {
230 gdb_assert (prev->flow.next == NULL);
231 prev->flow.next = bfun;
232
233 bfun->number = prev->number + 1;
234 bfun->insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
235 bfun->level = prev->level;
236 }
237
238 return bfun;
239 }
240
241 /* Update the UP field of a function segment. */
242
243 static void
244 ftrace_update_caller (struct btrace_function *bfun,
245 struct btrace_function *caller,
246 enum btrace_function_flag flags)
247 {
248 if (bfun->up != NULL)
249 ftrace_debug (bfun, "updating caller");
250
251 bfun->up = caller;
252 bfun->flags = flags;
253
254 ftrace_debug (bfun, "set caller");
255 ftrace_debug (caller, "..to");
256 }
257
258 /* Fix up the caller for all segments of a function. */
259
260 static void
261 ftrace_fixup_caller (struct btrace_function *bfun,
262 struct btrace_function *caller,
263 enum btrace_function_flag flags)
264 {
265 struct btrace_function *prev, *next;
266
267 ftrace_update_caller (bfun, caller, flags);
268
269 /* Update all function segments belonging to the same function. */
270 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
271 ftrace_update_caller (prev, caller, flags);
272
273 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
274 ftrace_update_caller (next, caller, flags);
275 }
276
277 /* Add a new function segment for a call.
278 CALLER is the chronologically preceding function segment.
279 MFUN and FUN are the symbol information we have for this function. */
280
281 static struct btrace_function *
282 ftrace_new_call (struct btrace_function *caller,
283 struct minimal_symbol *mfun,
284 struct symbol *fun)
285 {
286 struct btrace_function *bfun;
287
288 bfun = ftrace_new_function (caller, mfun, fun);
289 bfun->up = caller;
290 bfun->level += 1;
291
292 ftrace_debug (bfun, "new call");
293
294 return bfun;
295 }
296
297 /* Add a new function segment for a tail call.
298 CALLER is the chronologically preceding function segment.
299 MFUN and FUN are the symbol information we have for this function. */
300
301 static struct btrace_function *
302 ftrace_new_tailcall (struct btrace_function *caller,
303 struct minimal_symbol *mfun,
304 struct symbol *fun)
305 {
306 struct btrace_function *bfun;
307
308 bfun = ftrace_new_function (caller, mfun, fun);
309 bfun->up = caller;
310 bfun->level += 1;
311 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
312
313 ftrace_debug (bfun, "new tail call");
314
315 return bfun;
316 }
317
318 /* Return the caller of BFUN or NULL if there is none. This function skips
319 tail calls in the call chain. */
320 static struct btrace_function *
321 ftrace_get_caller (struct btrace_function *bfun)
322 {
323 for (; bfun != NULL; bfun = bfun->up)
324 if ((bfun->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
325 return bfun->up;
326
327 return NULL;
328 }
329
330 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
331 symbol information. */
332
333 static struct btrace_function *
334 ftrace_find_caller (struct btrace_function *bfun,
335 struct minimal_symbol *mfun,
336 struct symbol *fun)
337 {
338 for (; bfun != NULL; bfun = bfun->up)
339 {
340 /* Skip functions with incompatible symbol information. */
341 if (ftrace_function_switched (bfun, mfun, fun))
342 continue;
343
344 /* This is the function segment we're looking for. */
345 break;
346 }
347
348 return bfun;
349 }
350
351 /* Find the innermost caller in the back trace of BFUN, skipping all
352 function segments that do not end with a call instruction (e.g.
353 tail calls ending with a jump). */
354
355 static struct btrace_function *
356 ftrace_find_call (struct btrace_function *bfun)
357 {
358 for (; bfun != NULL; bfun = bfun->up)
359 {
360 struct btrace_insn *last;
361
362 /* Skip gaps. */
363 if (bfun->errcode != 0)
364 continue;
365
366 last = VEC_last (btrace_insn_s, bfun->insn);
367
368 if (last->iclass == BTRACE_INSN_CALL)
369 break;
370 }
371
372 return bfun;
373 }
374
375 /* Add a continuation segment for a function into which we return.
376 PREV is the chronologically preceding function segment.
377 MFUN and FUN are the symbol information we have for this function. */
378
379 static struct btrace_function *
380 ftrace_new_return (struct btrace_function *prev,
381 struct minimal_symbol *mfun,
382 struct symbol *fun)
383 {
384 struct btrace_function *bfun, *caller;
385
386 bfun = ftrace_new_function (prev, mfun, fun);
387
388 /* It is important to start at PREV's caller. Otherwise, we might find
389 PREV itself, if PREV is a recursive function. */
390 caller = ftrace_find_caller (prev->up, mfun, fun);
391 if (caller != NULL)
392 {
393 /* The caller of PREV is the preceding btrace function segment in this
394 function instance. */
395 gdb_assert (caller->segment.next == NULL);
396
397 caller->segment.next = bfun;
398 bfun->segment.prev = caller;
399
400 /* Maintain the function level. */
401 bfun->level = caller->level;
402
403 /* Maintain the call stack. */
404 bfun->up = caller->up;
405 bfun->flags = caller->flags;
406
407 ftrace_debug (bfun, "new return");
408 }
409 else
410 {
411 /* We did not find a caller. This could mean that something went
412 wrong or that the call is simply not included in the trace. */
413
414 /* Let's search for some actual call. */
415 caller = ftrace_find_call (prev->up);
416 if (caller == NULL)
417 {
418 /* There is no call in PREV's back trace. We assume that the
419 branch trace did not include it. */
420
421 /* Let's find the topmost function and add a new caller for it.
422 This should handle a series of initial tail calls. */
423 while (prev->up != NULL)
424 prev = prev->up;
425
426 bfun->level = prev->level - 1;
427
428 /* Fix up the call stack for PREV. */
429 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
430
431 ftrace_debug (bfun, "new return - no caller");
432 }
433 else
434 {
435 /* There is a call in PREV's back trace to which we should have
436 returned but didn't. Let's start a new, separate back trace
437 from PREV's level. */
438 bfun->level = prev->level - 1;
439
440 /* We fix up the back trace for PREV but leave other function segments
441 on the same level as they are.
442 This should handle things like schedule () correctly where we're
443 switching contexts. */
444 prev->up = bfun;
445 prev->flags = BFUN_UP_LINKS_TO_RET;
446
447 ftrace_debug (bfun, "new return - unknown caller");
448 }
449 }
450
451 return bfun;
452 }
453
454 /* Add a new function segment for a function switch.
455 PREV is the chronologically preceding function segment.
456 MFUN and FUN are the symbol information we have for this function. */
457
458 static struct btrace_function *
459 ftrace_new_switch (struct btrace_function *prev,
460 struct minimal_symbol *mfun,
461 struct symbol *fun)
462 {
463 struct btrace_function *bfun;
464
465 /* This is an unexplained function switch. We can't really be sure about the
466 call stack, yet the best I can think of right now is to preserve it. */
467 bfun = ftrace_new_function (prev, mfun, fun);
468 bfun->up = prev->up;
469 bfun->flags = prev->flags;
470
471 ftrace_debug (bfun, "new switch");
472
473 return bfun;
474 }
475
476 /* Add a new function segment for a gap in the trace due to a decode error.
477 PREV is the chronologically preceding function segment.
478 ERRCODE is the format-specific error code. */
479
480 static struct btrace_function *
481 ftrace_new_gap (struct btrace_function *prev, int errcode)
482 {
483 struct btrace_function *bfun;
484
485 /* We hijack prev if it was empty. */
486 if (prev != NULL && prev->errcode == 0
487 && VEC_empty (btrace_insn_s, prev->insn))
488 bfun = prev;
489 else
490 bfun = ftrace_new_function (prev, NULL, NULL);
491
492 bfun->errcode = errcode;
493
494 ftrace_debug (bfun, "new gap");
495
496 return bfun;
497 }
498
499 /* Update BFUN with respect to the instruction at PC. This may create new
500 function segments.
501 Return the chronologically latest function segment, never NULL. */
502
503 static struct btrace_function *
504 ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
505 {
506 struct bound_minimal_symbol bmfun;
507 struct minimal_symbol *mfun;
508 struct symbol *fun;
509 struct btrace_insn *last;
510
511 /* Try to determine the function we're in. We use both types of symbols
512 to avoid surprises when we sometimes get a full symbol and sometimes
513 only a minimal symbol. */
514 fun = find_pc_function (pc);
515 bmfun = lookup_minimal_symbol_by_pc (pc);
516 mfun = bmfun.minsym;
517
518 if (fun == NULL && mfun == NULL)
519 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
520
521 /* If we didn't have a function or if we had a gap before, we create one. */
522 if (bfun == NULL || bfun->errcode != 0)
523 return ftrace_new_function (bfun, mfun, fun);
524
525 /* Check the last instruction, if we have one.
526 We do this check first, since it allows us to fill in the call stack
527 links in addition to the normal flow links. */
528 last = NULL;
529 if (!VEC_empty (btrace_insn_s, bfun->insn))
530 last = VEC_last (btrace_insn_s, bfun->insn);
531
532 if (last != NULL)
533 {
534 switch (last->iclass)
535 {
536 case BTRACE_INSN_RETURN:
537 {
538 const char *fname;
539
540 /* On some systems, _dl_runtime_resolve returns to the resolved
541 function instead of jumping to it. From our perspective,
542 however, this is a tailcall.
543 If we treated it as return, we wouldn't be able to find the
544 resolved function in our stack back trace. Hence, we would
545 lose the current stack back trace and start anew with an empty
546 back trace. When the resolved function returns, we would then
547 create a stack back trace with the same function names but
548 different frame id's. This will confuse stepping. */
549 fname = ftrace_print_function_name (bfun);
550 if (strcmp (fname, "_dl_runtime_resolve") == 0)
551 return ftrace_new_tailcall (bfun, mfun, fun);
552
553 return ftrace_new_return (bfun, mfun, fun);
554 }
555
556 case BTRACE_INSN_CALL:
557 /* Ignore calls to the next instruction. They are used for PIC. */
558 if (last->pc + last->size == pc)
559 break;
560
561 return ftrace_new_call (bfun, mfun, fun);
562
563 case BTRACE_INSN_JUMP:
564 {
565 CORE_ADDR start;
566
567 start = get_pc_function_start (pc);
568
569 /* A jump to the start of a function is (typically) a tail call. */
570 if (start == pc)
571 return ftrace_new_tailcall (bfun, mfun, fun);
572
573 /* If we can't determine the function for PC, we treat a jump at
574 the end of the block as tail call if we're switching functions
575 and as an intra-function branch if we don't. */
576 if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
577 return ftrace_new_tailcall (bfun, mfun, fun);
578
579 break;
580 }
581 }
582 }
583
584 /* Check if we're switching functions for some other reason. */
585 if (ftrace_function_switched (bfun, mfun, fun))
586 {
587 DEBUG_FTRACE ("switching from %s in %s at %s",
588 ftrace_print_insn_addr (last),
589 ftrace_print_function_name (bfun),
590 ftrace_print_filename (bfun));
591
592 return ftrace_new_switch (bfun, mfun, fun);
593 }
594
595 return bfun;
596 }
597
598 /* Add the instruction at PC to BFUN's instructions. */
599
600 static void
601 ftrace_update_insns (struct btrace_function *bfun,
602 const struct btrace_insn *insn)
603 {
604 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
605
606 if (record_debug > 1)
607 ftrace_debug (bfun, "update insn");
608 }
609
610 /* Classify the instruction at PC. */
611
612 static enum btrace_insn_class
613 ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
614 {
615 enum btrace_insn_class iclass;
616
617 iclass = BTRACE_INSN_OTHER;
618 TRY
619 {
620 if (gdbarch_insn_is_call (gdbarch, pc))
621 iclass = BTRACE_INSN_CALL;
622 else if (gdbarch_insn_is_ret (gdbarch, pc))
623 iclass = BTRACE_INSN_RETURN;
624 else if (gdbarch_insn_is_jump (gdbarch, pc))
625 iclass = BTRACE_INSN_JUMP;
626 }
627 CATCH (error, RETURN_MASK_ERROR)
628 {
629 }
630 END_CATCH
631
632 return iclass;
633 }
634
635 /* Try to match the back trace at LHS to the back trace at RHS. Returns the
636 number of matching function segments or zero if the back traces do not
637 match. */
638
639 static int
640 ftrace_match_backtrace (struct btrace_function *lhs,
641 struct btrace_function *rhs)
642 {
643 int matches;
644
645 for (matches = 0; lhs != NULL && rhs != NULL; ++matches)
646 {
647 if (ftrace_function_switched (lhs, rhs->msym, rhs->sym))
648 return 0;
649
650 lhs = ftrace_get_caller (lhs);
651 rhs = ftrace_get_caller (rhs);
652 }
653
654 return matches;
655 }
656
657 /* Add ADJUSTMENT to the level of BFUN and succeeding function segments. */
658
659 static void
660 ftrace_fixup_level (struct btrace_function *bfun, int adjustment)
661 {
662 if (adjustment == 0)
663 return;
664
665 DEBUG_FTRACE ("fixup level (%+d)", adjustment);
666 ftrace_debug (bfun, "..bfun");
667
668 for (; bfun != NULL; bfun = bfun->flow.next)
669 bfun->level += adjustment;
670 }
671
672 /* Recompute the global level offset. Traverse the function trace and compute
673 the global level offset as the negative of the minimal function level. */
674
675 static void
676 ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
677 {
678 struct btrace_function *bfun, *end;
679 int level;
680
681 if (btinfo == NULL)
682 return;
683
684 bfun = btinfo->begin;
685 if (bfun == NULL)
686 return;
687
688 /* The last function segment contains the current instruction, which is not
689 really part of the trace. If it contains just this one instruction, we
690 stop when we reach it; otherwise, we let the below loop run to the end. */
691 end = btinfo->end;
692 if (VEC_length (btrace_insn_s, end->insn) > 1)
693 end = NULL;
694
695 level = INT_MAX;
696 for (; bfun != end; bfun = bfun->flow.next)
697 level = std::min (level, bfun->level);
698
699 DEBUG_FTRACE ("setting global level offset: %d", -level);
700 btinfo->level = -level;
701 }
702
703 /* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
704 ftrace_connect_backtrace. */
705
706 static void
707 ftrace_connect_bfun (struct btrace_function *prev,
708 struct btrace_function *next)
709 {
710 DEBUG_FTRACE ("connecting...");
711 ftrace_debug (prev, "..prev");
712 ftrace_debug (next, "..next");
713
714 /* The function segments are not yet connected. */
715 gdb_assert (prev->segment.next == NULL);
716 gdb_assert (next->segment.prev == NULL);
717
718 prev->segment.next = next;
719 next->segment.prev = prev;
720
721 /* We may have moved NEXT to a different function level. */
722 ftrace_fixup_level (next, prev->level - next->level);
723
724 /* If we run out of back trace for one, let's use the other's. */
725 if (prev->up == NULL)
726 {
727 if (next->up != NULL)
728 {
729 DEBUG_FTRACE ("using next's callers");
730 ftrace_fixup_caller (prev, next->up, next->flags);
731 }
732 }
733 else if (next->up == NULL)
734 {
735 if (prev->up != NULL)
736 {
737 DEBUG_FTRACE ("using prev's callers");
738 ftrace_fixup_caller (next, prev->up, prev->flags);
739 }
740 }
741 else
742 {
743 /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
744 link to add the tail callers to NEXT's back trace.
745
746 This removes NEXT->UP from NEXT's back trace. It will be added back
747 when connecting NEXT and PREV's callers - provided they exist.
748
749 If PREV's back trace consists of a series of tail calls without an
750 actual call, there will be no further connection and NEXT's caller will
751 be removed for good. To catch this case, we handle it here and connect
752 the top of PREV's back trace to NEXT's caller. */
753 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
754 {
755 struct btrace_function *caller;
756 btrace_function_flags flags;
757
758 /* We checked NEXT->UP above so CALLER can't be NULL. */
759 caller = next->up;
760 flags = next->flags;
761
762 DEBUG_FTRACE ("adding prev's tail calls to next");
763
764 ftrace_fixup_caller (next, prev->up, prev->flags);
765
766 for (prev = prev->up; prev != NULL; prev = prev->up)
767 {
768 /* At the end of PREV's back trace, continue with CALLER. */
769 if (prev->up == NULL)
770 {
771 DEBUG_FTRACE ("fixing up link for tailcall chain");
772 ftrace_debug (prev, "..top");
773 ftrace_debug (caller, "..up");
774
775 ftrace_fixup_caller (prev, caller, flags);
776
777 /* If we skipped any tail calls, this may move CALLER to a
778 different function level.
779
780 Note that changing CALLER's level is only OK because we
781 know that this is the last iteration of the bottom-to-top
782 walk in ftrace_connect_backtrace.
783
784 Otherwise we will fix up CALLER's level when we connect it
785 to PREV's caller in the next iteration. */
786 ftrace_fixup_level (caller, prev->level - caller->level - 1);
787 break;
788 }
789
790 /* There's nothing to do if we find a real call. */
791 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
792 {
793 DEBUG_FTRACE ("will fix up link in next iteration");
794 break;
795 }
796 }
797 }
798 }
799 }
800
801 /* Connect function segments on the same level in the back trace at LHS and RHS.
802 The back traces at LHS and RHS are expected to match according to
803 ftrace_match_backtrace. */
804
805 static void
806 ftrace_connect_backtrace (struct btrace_function *lhs,
807 struct btrace_function *rhs)
808 {
809 while (lhs != NULL && rhs != NULL)
810 {
811 struct btrace_function *prev, *next;
812
813 gdb_assert (!ftrace_function_switched (lhs, rhs->msym, rhs->sym));
814
815 /* Connecting LHS and RHS may change the up link. */
816 prev = lhs;
817 next = rhs;
818
819 lhs = ftrace_get_caller (lhs);
820 rhs = ftrace_get_caller (rhs);
821
822 ftrace_connect_bfun (prev, next);
823 }
824 }
825
826 /* Bridge the gap between two function segments left and right of a gap if their
827 respective back traces match in at least MIN_MATCHES functions.
828
829 Returns non-zero if the gap could be bridged, zero otherwise. */
830
831 static int
832 ftrace_bridge_gap (struct btrace_function *lhs, struct btrace_function *rhs,
833 int min_matches)
834 {
835 struct btrace_function *best_l, *best_r, *cand_l, *cand_r;
836 int best_matches;
837
838 DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
839 rhs->insn_offset - 1, min_matches);
840
841 best_matches = 0;
842 best_l = NULL;
843 best_r = NULL;
844
845 /* We search the back traces of LHS and RHS for valid connections and connect
846 the two functon segments that give the longest combined back trace. */
847
848 for (cand_l = lhs; cand_l != NULL; cand_l = ftrace_get_caller (cand_l))
849 for (cand_r = rhs; cand_r != NULL; cand_r = ftrace_get_caller (cand_r))
850 {
851 int matches;
852
853 matches = ftrace_match_backtrace (cand_l, cand_r);
854 if (best_matches < matches)
855 {
856 best_matches = matches;
857 best_l = cand_l;
858 best_r = cand_r;
859 }
860 }
861
862 /* We need at least MIN_MATCHES matches. */
863 gdb_assert (min_matches > 0);
864 if (best_matches < min_matches)
865 return 0;
866
867 DEBUG_FTRACE ("..matches: %d", best_matches);
868
869 /* We will fix up the level of BEST_R and succeeding function segments such
870 that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
871
872 This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
873 BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
874
875 To catch this, we already fix up the level here where we can start at RHS
876 instead of at BEST_R. We will ignore the level fixup when connecting
877 BEST_L to BEST_R as they will already be on the same level. */
878 ftrace_fixup_level (rhs, best_l->level - best_r->level);
879
880 ftrace_connect_backtrace (best_l, best_r);
881
882 return best_matches;
883 }
884
885 /* Try to bridge gaps due to overflow or decode errors by connecting the
886 function segments that are separated by the gap. */
887
888 static void
889 btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
890 {
891 VEC (bfun_s) *remaining;
892 struct cleanup *old_chain;
893 int min_matches;
894
895 DEBUG ("bridge gaps");
896
897 remaining = NULL;
898 old_chain = make_cleanup (VEC_cleanup (bfun_s), &remaining);
899
900 /* We require a minimum amount of matches for bridging a gap. The number of
901 required matches will be lowered with each iteration.
902
903 The more matches the higher our confidence that the bridging is correct.
904 For big gaps or small traces, however, it may not be feasible to require a
905 high number of matches. */
906 for (min_matches = 5; min_matches > 0; --min_matches)
907 {
908 /* Let's try to bridge as many gaps as we can. In some cases, we need to
909 skip a gap and revisit it again after we closed later gaps. */
910 while (!VEC_empty (bfun_s, *gaps))
911 {
912 struct btrace_function *gap;
913 unsigned int idx;
914
915 for (idx = 0; VEC_iterate (bfun_s, *gaps, idx, gap); ++idx)
916 {
917 struct btrace_function *lhs, *rhs;
918 int bridged;
919
920 /* We may have a sequence of gaps if we run from one error into
921 the next as we try to re-sync onto the trace stream. Ignore
922 all but the leftmost gap in such a sequence.
923
924 Also ignore gaps at the beginning of the trace. */
925 lhs = gap->flow.prev;
926 if (lhs == NULL || lhs->errcode != 0)
927 continue;
928
929 /* Skip gaps to the right. */
930 for (rhs = gap->flow.next; rhs != NULL; rhs = rhs->flow.next)
931 if (rhs->errcode == 0)
932 break;
933
934 /* Ignore gaps at the end of the trace. */
935 if (rhs == NULL)
936 continue;
937
938 bridged = ftrace_bridge_gap (lhs, rhs, min_matches);
939
940 /* Keep track of gaps we were not able to bridge and try again.
941 If we just pushed them to the end of GAPS we would risk an
942 infinite loop in case we simply cannot bridge a gap. */
943 if (bridged == 0)
944 VEC_safe_push (bfun_s, remaining, gap);
945 }
946
947 /* Let's see if we made any progress. */
948 if (VEC_length (bfun_s, remaining) == VEC_length (bfun_s, *gaps))
949 break;
950
951 VEC_free (bfun_s, *gaps);
952
953 *gaps = remaining;
954 remaining = NULL;
955 }
956
957 /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
958 if (VEC_empty (bfun_s, *gaps))
959 break;
960
961 VEC_free (bfun_s, remaining);
962 }
963
964 do_cleanups (old_chain);
965
966 /* We may omit this in some cases. Not sure it is worth the extra
967 complication, though. */
968 ftrace_compute_global_level_offset (&tp->btrace);
969 }
970
971 /* Compute the function branch trace from BTS trace. */
972
973 static void
974 btrace_compute_ftrace_bts (struct thread_info *tp,
975 const struct btrace_data_bts *btrace,
976 VEC (bfun_s) **gaps)
977 {
978 struct btrace_thread_info *btinfo;
979 struct btrace_function *begin, *end;
980 struct gdbarch *gdbarch;
981 unsigned int blk;
982 int level;
983
984 gdbarch = target_gdbarch ();
985 btinfo = &tp->btrace;
986 begin = btinfo->begin;
987 end = btinfo->end;
988 level = begin != NULL ? -btinfo->level : INT_MAX;
989 blk = VEC_length (btrace_block_s, btrace->blocks);
990
991 while (blk != 0)
992 {
993 btrace_block_s *block;
994 CORE_ADDR pc;
995
996 blk -= 1;
997
998 block = VEC_index (btrace_block_s, btrace->blocks, blk);
999 pc = block->begin;
1000
1001 for (;;)
1002 {
1003 struct btrace_insn insn;
1004 int size;
1005
1006 /* We should hit the end of the block. Warn if we went too far. */
1007 if (block->end < pc)
1008 {
1009 /* Indicate the gap in the trace. */
1010 end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
1011 if (begin == NULL)
1012 begin = end;
1013
1014 VEC_safe_push (bfun_s, *gaps, end);
1015
1016 warning (_("Recorded trace may be corrupted at instruction "
1017 "%u (pc = %s)."), end->insn_offset - 1,
1018 core_addr_to_string_nz (pc));
1019
1020 break;
1021 }
1022
1023 end = ftrace_update_function (end, pc);
1024 if (begin == NULL)
1025 begin = end;
1026
1027 /* Maintain the function level offset.
1028 For all but the last block, we do it here. */
1029 if (blk != 0)
1030 level = std::min (level, end->level);
1031
1032 size = 0;
1033 TRY
1034 {
1035 size = gdb_insn_length (gdbarch, pc);
1036 }
1037 CATCH (error, RETURN_MASK_ERROR)
1038 {
1039 }
1040 END_CATCH
1041
1042 insn.pc = pc;
1043 insn.size = size;
1044 insn.iclass = ftrace_classify_insn (gdbarch, pc);
1045 insn.flags = 0;
1046
1047 ftrace_update_insns (end, &insn);
1048
1049 /* We're done once we pushed the instruction at the end. */
1050 if (block->end == pc)
1051 break;
1052
1053 /* We can't continue if we fail to compute the size. */
1054 if (size <= 0)
1055 {
1056 /* Indicate the gap in the trace. We just added INSN so we're
1057 not at the beginning. */
1058 end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
1059
1060 VEC_safe_push (bfun_s, *gaps, end);
1061
1062 warning (_("Recorded trace may be incomplete at instruction %u "
1063 "(pc = %s)."), end->insn_offset - 1,
1064 core_addr_to_string_nz (pc));
1065
1066 break;
1067 }
1068
1069 pc += size;
1070
1071 /* Maintain the function level offset.
1072 For the last block, we do it here to not consider the last
1073 instruction.
1074 Since the last instruction corresponds to the current instruction
1075 and is not really part of the execution history, it shouldn't
1076 affect the level. */
1077 if (blk == 0)
1078 level = std::min (level, end->level);
1079 }
1080 }
1081
1082 btinfo->begin = begin;
1083 btinfo->end = end;
1084
1085 /* LEVEL is the minimal function level of all btrace function segments.
1086 Define the global level offset to -LEVEL so all function levels are
1087 normalized to start at zero. */
1088 btinfo->level = -level;
1089 }
1090
1091 #if defined (HAVE_LIBIPT)
1092
1093 static enum btrace_insn_class
1094 pt_reclassify_insn (enum pt_insn_class iclass)
1095 {
1096 switch (iclass)
1097 {
1098 case ptic_call:
1099 return BTRACE_INSN_CALL;
1100
1101 case ptic_return:
1102 return BTRACE_INSN_RETURN;
1103
1104 case ptic_jump:
1105 return BTRACE_INSN_JUMP;
1106
1107 default:
1108 return BTRACE_INSN_OTHER;
1109 }
1110 }
1111
1112 /* Return the btrace instruction flags for INSN. */
1113
1114 static btrace_insn_flags
1115 pt_btrace_insn_flags (const struct pt_insn &insn)
1116 {
1117 btrace_insn_flags flags = 0;
1118
1119 if (insn.speculative)
1120 flags |= BTRACE_INSN_FLAG_SPECULATIVE;
1121
1122 return flags;
1123 }
1124
1125 /* Return the btrace instruction for INSN. */
1126
1127 static btrace_insn
1128 pt_btrace_insn (const struct pt_insn &insn)
1129 {
1130 return {(CORE_ADDR) insn.ip, (gdb_byte) insn.size,
1131 pt_reclassify_insn (insn.iclass),
1132 pt_btrace_insn_flags (insn)};
1133 }
1134
1135
1136 /* Add function branch trace using DECODER. */
1137
1138 static void
1139 ftrace_add_pt (struct pt_insn_decoder *decoder,
1140 struct btrace_function **pbegin,
1141 struct btrace_function **pend, int *plevel,
1142 VEC (bfun_s) **gaps)
1143 {
1144 struct btrace_function *begin, *end, *upd;
1145 uint64_t offset;
1146 int errcode;
1147
1148 begin = *pbegin;
1149 end = *pend;
1150 for (;;)
1151 {
1152 struct pt_insn insn;
1153
1154 errcode = pt_insn_sync_forward (decoder);
1155 if (errcode < 0)
1156 {
1157 if (errcode != -pte_eos)
1158 warning (_("Failed to synchronize onto the Intel Processor "
1159 "Trace stream: %s."), pt_errstr (pt_errcode (errcode)));
1160 break;
1161 }
1162
1163 for (;;)
1164 {
1165 errcode = pt_insn_next (decoder, &insn, sizeof(insn));
1166 if (errcode < 0)
1167 break;
1168
1169 /* Look for gaps in the trace - unless we're at the beginning. */
1170 if (begin != NULL)
1171 {
1172 /* Tracing is disabled and re-enabled each time we enter the
1173 kernel. Most times, we continue from the same instruction we
1174 stopped before. This is indicated via the RESUMED instruction
1175 flag. The ENABLED instruction flag means that we continued
1176 from some other instruction. Indicate this as a trace gap. */
1177 if (insn.enabled)
1178 {
1179 *pend = end = ftrace_new_gap (end, BDE_PT_DISABLED);
1180
1181 VEC_safe_push (bfun_s, *gaps, end);
1182
1183 pt_insn_get_offset (decoder, &offset);
1184
1185 warning (_("Non-contiguous trace at instruction %u (offset "
1186 "= 0x%" PRIx64 ", pc = 0x%" PRIx64 ")."),
1187 end->insn_offset - 1, offset, insn.ip);
1188 }
1189 }
1190
1191 /* Indicate trace overflows. */
1192 if (insn.resynced)
1193 {
1194 *pend = end = ftrace_new_gap (end, BDE_PT_OVERFLOW);
1195 if (begin == NULL)
1196 *pbegin = begin = end;
1197
1198 VEC_safe_push (bfun_s, *gaps, end);
1199
1200 pt_insn_get_offset (decoder, &offset);
1201
1202 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
1203 ", pc = 0x%" PRIx64 ")."), end->insn_offset - 1,
1204 offset, insn.ip);
1205 }
1206
1207 upd = ftrace_update_function (end, insn.ip);
1208 if (upd != end)
1209 {
1210 *pend = end = upd;
1211
1212 if (begin == NULL)
1213 *pbegin = begin = upd;
1214 }
1215
1216 /* Maintain the function level offset. */
1217 *plevel = std::min (*plevel, end->level);
1218
1219 btrace_insn btinsn = pt_btrace_insn (insn);
1220 ftrace_update_insns (end, &btinsn);
1221 }
1222
1223 if (errcode == -pte_eos)
1224 break;
1225
1226 /* Indicate the gap in the trace. */
1227 *pend = end = ftrace_new_gap (end, errcode);
1228 if (begin == NULL)
1229 *pbegin = begin = end;
1230
1231 VEC_safe_push (bfun_s, *gaps, end);
1232
1233 pt_insn_get_offset (decoder, &offset);
1234
1235 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
1236 ", pc = 0x%" PRIx64 "): %s."), errcode, end->insn_offset - 1,
1237 offset, insn.ip, pt_errstr (pt_errcode (errcode)));
1238 }
1239 }
1240
1241 /* A callback function to allow the trace decoder to read the inferior's
1242 memory. */
1243
1244 static int
1245 btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
1246 const struct pt_asid *asid, uint64_t pc,
1247 void *context)
1248 {
1249 int result, errcode;
1250
1251 result = (int) size;
1252 TRY
1253 {
1254 errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
1255 if (errcode != 0)
1256 result = -pte_nomap;
1257 }
1258 CATCH (error, RETURN_MASK_ERROR)
1259 {
1260 result = -pte_nomap;
1261 }
1262 END_CATCH
1263
1264 return result;
1265 }
1266
1267 /* Translate the vendor from one enum to another. */
1268
1269 static enum pt_cpu_vendor
1270 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
1271 {
1272 switch (vendor)
1273 {
1274 default:
1275 return pcv_unknown;
1276
1277 case CV_INTEL:
1278 return pcv_intel;
1279 }
1280 }
1281
1282 /* Finalize the function branch trace after decode. */
1283
1284 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
1285 struct thread_info *tp, int level)
1286 {
1287 pt_insn_free_decoder (decoder);
1288
1289 /* LEVEL is the minimal function level of all btrace function segments.
1290 Define the global level offset to -LEVEL so all function levels are
1291 normalized to start at zero. */
1292 tp->btrace.level = -level;
1293
1294 /* Add a single last instruction entry for the current PC.
1295 This allows us to compute the backtrace at the current PC using both
1296 standard unwind and btrace unwind.
1297 This extra entry is ignored by all record commands. */
1298 btrace_add_pc (tp);
1299 }
1300
1301 /* Compute the function branch trace from Intel Processor Trace
1302 format. */
1303
1304 static void
1305 btrace_compute_ftrace_pt (struct thread_info *tp,
1306 const struct btrace_data_pt *btrace,
1307 VEC (bfun_s) **gaps)
1308 {
1309 struct btrace_thread_info *btinfo;
1310 struct pt_insn_decoder *decoder;
1311 struct pt_config config;
1312 int level, errcode;
1313
1314 if (btrace->size == 0)
1315 return;
1316
1317 btinfo = &tp->btrace;
1318 level = btinfo->begin != NULL ? -btinfo->level : INT_MAX;
1319
1320 pt_config_init(&config);
1321 config.begin = btrace->data;
1322 config.end = btrace->data + btrace->size;
1323
1324 config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
1325 config.cpu.family = btrace->config.cpu.family;
1326 config.cpu.model = btrace->config.cpu.model;
1327 config.cpu.stepping = btrace->config.cpu.stepping;
1328
1329 errcode = pt_cpu_errata (&config.errata, &config.cpu);
1330 if (errcode < 0)
1331 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
1332 pt_errstr (pt_errcode (errcode)));
1333
1334 decoder = pt_insn_alloc_decoder (&config);
1335 if (decoder == NULL)
1336 error (_("Failed to allocate the Intel Processor Trace decoder."));
1337
1338 TRY
1339 {
1340 struct pt_image *image;
1341
1342 image = pt_insn_get_image(decoder);
1343 if (image == NULL)
1344 error (_("Failed to configure the Intel Processor Trace decoder."));
1345
1346 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
1347 if (errcode < 0)
1348 error (_("Failed to configure the Intel Processor Trace decoder: "
1349 "%s."), pt_errstr (pt_errcode (errcode)));
1350
1351 ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level, gaps);
1352 }
1353 CATCH (error, RETURN_MASK_ALL)
1354 {
1355 /* Indicate a gap in the trace if we quit trace processing. */
1356 if (error.reason == RETURN_QUIT && btinfo->end != NULL)
1357 {
1358 btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_USER_QUIT);
1359
1360 VEC_safe_push (bfun_s, *gaps, btinfo->end);
1361 }
1362
1363 btrace_finalize_ftrace_pt (decoder, tp, level);
1364
1365 throw_exception (error);
1366 }
1367 END_CATCH
1368
1369 btrace_finalize_ftrace_pt (decoder, tp, level);
1370 }
1371
1372 #else /* defined (HAVE_LIBIPT) */
1373
1374 static void
1375 btrace_compute_ftrace_pt (struct thread_info *tp,
1376 const struct btrace_data_pt *btrace,
1377 VEC (bfun_s) **gaps)
1378 {
1379 internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
1380 }
1381
1382 #endif /* defined (HAVE_LIBIPT) */
1383
1384 /* Compute the function branch trace from a block branch trace BTRACE for
1385 a thread given by BTINFO. */
1386
1387 static void
1388 btrace_compute_ftrace_1 (struct thread_info *tp, struct btrace_data *btrace,
1389 VEC (bfun_s) **gaps)
1390 {
1391 DEBUG ("compute ftrace");
1392
1393 switch (btrace->format)
1394 {
1395 case BTRACE_FORMAT_NONE:
1396 return;
1397
1398 case BTRACE_FORMAT_BTS:
1399 btrace_compute_ftrace_bts (tp, &btrace->variant.bts, gaps);
1400 return;
1401
1402 case BTRACE_FORMAT_PT:
1403 btrace_compute_ftrace_pt (tp, &btrace->variant.pt, gaps);
1404 return;
1405 }
1406
1407 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1408 }
1409
1410 static void
1411 btrace_finalize_ftrace (struct thread_info *tp, VEC (bfun_s) **gaps)
1412 {
1413 if (!VEC_empty (bfun_s, *gaps))
1414 {
1415 tp->btrace.ngaps += VEC_length (bfun_s, *gaps);
1416 btrace_bridge_gaps (tp, gaps);
1417 }
1418 }
1419
1420 static void
1421 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
1422 {
1423 VEC (bfun_s) *gaps;
1424 struct cleanup *old_chain;
1425
1426 gaps = NULL;
1427 old_chain = make_cleanup (VEC_cleanup (bfun_s), &gaps);
1428
1429 TRY
1430 {
1431 btrace_compute_ftrace_1 (tp, btrace, &gaps);
1432 }
1433 CATCH (error, RETURN_MASK_ALL)
1434 {
1435 btrace_finalize_ftrace (tp, &gaps);
1436
1437 throw_exception (error);
1438 }
1439 END_CATCH
1440
1441 btrace_finalize_ftrace (tp, &gaps);
1442
1443 do_cleanups (old_chain);
1444 }
1445
1446 /* Add an entry for the current PC. */
1447
1448 static void
1449 btrace_add_pc (struct thread_info *tp)
1450 {
1451 struct btrace_data btrace;
1452 struct btrace_block *block;
1453 struct regcache *regcache;
1454 struct cleanup *cleanup;
1455 CORE_ADDR pc;
1456
1457 regcache = get_thread_regcache (tp->ptid);
1458 pc = regcache_read_pc (regcache);
1459
1460 btrace_data_init (&btrace);
1461 btrace.format = BTRACE_FORMAT_BTS;
1462 btrace.variant.bts.blocks = NULL;
1463
1464 cleanup = make_cleanup_btrace_data (&btrace);
1465
1466 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
1467 block->begin = pc;
1468 block->end = pc;
1469
1470 btrace_compute_ftrace (tp, &btrace);
1471
1472 do_cleanups (cleanup);
1473 }
1474
1475 /* See btrace.h. */
1476
1477 void
1478 btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
1479 {
1480 if (tp->btrace.target != NULL)
1481 return;
1482
1483 #if !defined (HAVE_LIBIPT)
1484 if (conf->format == BTRACE_FORMAT_PT)
1485 error (_("GDB does not support Intel Processor Trace."));
1486 #endif /* !defined (HAVE_LIBIPT) */
1487
1488 if (!target_supports_btrace (conf->format))
1489 error (_("Target does not support branch tracing."));
1490
1491 DEBUG ("enable thread %s (%s)", print_thread_id (tp),
1492 target_pid_to_str (tp->ptid));
1493
1494 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
1495
1496 /* We're done if we failed to enable tracing. */
1497 if (tp->btrace.target == NULL)
1498 return;
1499
1500 /* We need to undo the enable in case of errors. */
1501 TRY
1502 {
1503 /* Add an entry for the current PC so we start tracing from where we
1504 enabled it.
1505
1506 If we can't access TP's registers, TP is most likely running. In this
1507 case, we can't really say where tracing was enabled so it should be
1508 safe to simply skip this step.
1509
1510 This is not relevant for BTRACE_FORMAT_PT since the trace will already
1511 start at the PC at which tracing was enabled. */
1512 if (conf->format != BTRACE_FORMAT_PT
1513 && can_access_registers_ptid (tp->ptid))
1514 btrace_add_pc (tp);
1515 }
1516 CATCH (exception, RETURN_MASK_ALL)
1517 {
1518 btrace_disable (tp);
1519
1520 throw_exception (exception);
1521 }
1522 END_CATCH
1523 }
1524
1525 /* See btrace.h. */
1526
1527 const struct btrace_config *
1528 btrace_conf (const struct btrace_thread_info *btinfo)
1529 {
1530 if (btinfo->target == NULL)
1531 return NULL;
1532
1533 return target_btrace_conf (btinfo->target);
1534 }
1535
1536 /* See btrace.h. */
1537
1538 void
1539 btrace_disable (struct thread_info *tp)
1540 {
1541 struct btrace_thread_info *btp = &tp->btrace;
1542 int errcode = 0;
1543
1544 if (btp->target == NULL)
1545 return;
1546
1547 DEBUG ("disable thread %s (%s)", print_thread_id (tp),
1548 target_pid_to_str (tp->ptid));
1549
1550 target_disable_btrace (btp->target);
1551 btp->target = NULL;
1552
1553 btrace_clear (tp);
1554 }
1555
1556 /* See btrace.h. */
1557
1558 void
1559 btrace_teardown (struct thread_info *tp)
1560 {
1561 struct btrace_thread_info *btp = &tp->btrace;
1562 int errcode = 0;
1563
1564 if (btp->target == NULL)
1565 return;
1566
1567 DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
1568 target_pid_to_str (tp->ptid));
1569
1570 target_teardown_btrace (btp->target);
1571 btp->target = NULL;
1572
1573 btrace_clear (tp);
1574 }
1575
1576 /* Stitch branch trace in BTS format. */
1577
1578 static int
1579 btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
1580 {
1581 struct btrace_thread_info *btinfo;
1582 struct btrace_function *last_bfun;
1583 struct btrace_insn *last_insn;
1584 btrace_block_s *first_new_block;
1585
1586 btinfo = &tp->btrace;
1587 last_bfun = btinfo->end;
1588 gdb_assert (last_bfun != NULL);
1589 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
1590
1591 /* If the existing trace ends with a gap, we just glue the traces
1592 together. We need to drop the last (i.e. chronologically first) block
1593 of the new trace, though, since we can't fill in the start address.*/
1594 if (VEC_empty (btrace_insn_s, last_bfun->insn))
1595 {
1596 VEC_pop (btrace_block_s, btrace->blocks);
1597 return 0;
1598 }
1599
1600 /* Beware that block trace starts with the most recent block, so the
1601 chronologically first block in the new trace is the last block in
1602 the new trace's block vector. */
1603 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
1604 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
1605
1606 /* If the current PC at the end of the block is the same as in our current
1607 trace, there are two explanations:
1608 1. we executed the instruction and some branch brought us back.
1609 2. we have not made any progress.
1610 In the first case, the delta trace vector should contain at least two
1611 entries.
1612 In the second case, the delta trace vector should contain exactly one
1613 entry for the partial block containing the current PC. Remove it. */
1614 if (first_new_block->end == last_insn->pc
1615 && VEC_length (btrace_block_s, btrace->blocks) == 1)
1616 {
1617 VEC_pop (btrace_block_s, btrace->blocks);
1618 return 0;
1619 }
1620
1621 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
1622 core_addr_to_string_nz (first_new_block->end));
1623
1624 /* Do a simple sanity check to make sure we don't accidentally end up
1625 with a bad block. This should not occur in practice. */
1626 if (first_new_block->end < last_insn->pc)
1627 {
1628 warning (_("Error while trying to read delta trace. Falling back to "
1629 "a full read."));
1630 return -1;
1631 }
1632
1633 /* We adjust the last block to start at the end of our current trace. */
1634 gdb_assert (first_new_block->begin == 0);
1635 first_new_block->begin = last_insn->pc;
1636
1637 /* We simply pop the last insn so we can insert it again as part of
1638 the normal branch trace computation.
1639 Since instruction iterators are based on indices in the instructions
1640 vector, we don't leave any pointers dangling. */
1641 DEBUG ("pruning insn at %s for stitching",
1642 ftrace_print_insn_addr (last_insn));
1643
1644 VEC_pop (btrace_insn_s, last_bfun->insn);
1645
1646 /* The instructions vector may become empty temporarily if this has
1647 been the only instruction in this function segment.
1648 This violates the invariant but will be remedied shortly by
1649 btrace_compute_ftrace when we add the new trace. */
1650
1651 /* The only case where this would hurt is if the entire trace consisted
1652 of just that one instruction. If we remove it, we might turn the now
1653 empty btrace function segment into a gap. But we don't want gaps at
1654 the beginning. To avoid this, we remove the entire old trace. */
1655 if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
1656 btrace_clear (tp);
1657
1658 return 0;
1659 }
1660
1661 /* Adjust the block trace in order to stitch old and new trace together.
1662 BTRACE is the new delta trace between the last and the current stop.
1663 TP is the traced thread.
1664 May modifx BTRACE as well as the existing trace in TP.
1665 Return 0 on success, -1 otherwise. */
1666
1667 static int
1668 btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
1669 {
1670 /* If we don't have trace, there's nothing to do. */
1671 if (btrace_data_empty (btrace))
1672 return 0;
1673
1674 switch (btrace->format)
1675 {
1676 case BTRACE_FORMAT_NONE:
1677 return 0;
1678
1679 case BTRACE_FORMAT_BTS:
1680 return btrace_stitch_bts (&btrace->variant.bts, tp);
1681
1682 case BTRACE_FORMAT_PT:
1683 /* Delta reads are not supported. */
1684 return -1;
1685 }
1686
1687 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1688 }
1689
1690 /* Clear the branch trace histories in BTINFO. */
1691
1692 static void
1693 btrace_clear_history (struct btrace_thread_info *btinfo)
1694 {
1695 xfree (btinfo->insn_history);
1696 xfree (btinfo->call_history);
1697 xfree (btinfo->replay);
1698
1699 btinfo->insn_history = NULL;
1700 btinfo->call_history = NULL;
1701 btinfo->replay = NULL;
1702 }
1703
1704 /* Clear the branch trace maintenance histories in BTINFO. */
1705
1706 static void
1707 btrace_maint_clear (struct btrace_thread_info *btinfo)
1708 {
1709 switch (btinfo->data.format)
1710 {
1711 default:
1712 break;
1713
1714 case BTRACE_FORMAT_BTS:
1715 btinfo->maint.variant.bts.packet_history.begin = 0;
1716 btinfo->maint.variant.bts.packet_history.end = 0;
1717 break;
1718
1719 #if defined (HAVE_LIBIPT)
1720 case BTRACE_FORMAT_PT:
1721 xfree (btinfo->maint.variant.pt.packets);
1722
1723 btinfo->maint.variant.pt.packets = NULL;
1724 btinfo->maint.variant.pt.packet_history.begin = 0;
1725 btinfo->maint.variant.pt.packet_history.end = 0;
1726 break;
1727 #endif /* defined (HAVE_LIBIPT) */
1728 }
1729 }
1730
1731 /* See btrace.h. */
1732
1733 const char *
1734 btrace_decode_error (enum btrace_format format, int errcode)
1735 {
1736 switch (format)
1737 {
1738 case BTRACE_FORMAT_BTS:
1739 switch (errcode)
1740 {
1741 case BDE_BTS_OVERFLOW:
1742 return _("instruction overflow");
1743
1744 case BDE_BTS_INSN_SIZE:
1745 return _("unknown instruction");
1746
1747 default:
1748 break;
1749 }
1750 break;
1751
1752 #if defined (HAVE_LIBIPT)
1753 case BTRACE_FORMAT_PT:
1754 switch (errcode)
1755 {
1756 case BDE_PT_USER_QUIT:
1757 return _("trace decode cancelled");
1758
1759 case BDE_PT_DISABLED:
1760 return _("disabled");
1761
1762 case BDE_PT_OVERFLOW:
1763 return _("overflow");
1764
1765 default:
1766 if (errcode < 0)
1767 return pt_errstr (pt_errcode (errcode));
1768 break;
1769 }
1770 break;
1771 #endif /* defined (HAVE_LIBIPT) */
1772
1773 default:
1774 break;
1775 }
1776
1777 return _("unknown");
1778 }
1779
1780 /* See btrace.h. */
1781
1782 void
1783 btrace_fetch (struct thread_info *tp)
1784 {
1785 struct btrace_thread_info *btinfo;
1786 struct btrace_target_info *tinfo;
1787 struct btrace_data btrace;
1788 struct cleanup *cleanup;
1789 int errcode;
1790
1791 DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
1792 target_pid_to_str (tp->ptid));
1793
1794 btinfo = &tp->btrace;
1795 tinfo = btinfo->target;
1796 if (tinfo == NULL)
1797 return;
1798
1799 /* There's no way we could get new trace while replaying.
1800 On the other hand, delta trace would return a partial record with the
1801 current PC, which is the replay PC, not the last PC, as expected. */
1802 if (btinfo->replay != NULL)
1803 return;
1804
1805 /* With CLI usage, TP->PTID always equals INFERIOR_PTID here. Now that we
1806 can store a gdb.Record object in Python referring to a different thread
1807 than the current one, temporarily set INFERIOR_PTID. */
1808 cleanup = save_inferior_ptid ();
1809 inferior_ptid = tp->ptid;
1810
1811 /* We should not be called on running or exited threads. */
1812 gdb_assert (can_access_registers_ptid (tp->ptid));
1813
1814 btrace_data_init (&btrace);
1815 make_cleanup_btrace_data (&btrace);
1816
1817 /* Let's first try to extend the trace we already have. */
1818 if (btinfo->end != NULL)
1819 {
1820 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1821 if (errcode == 0)
1822 {
1823 /* Success. Let's try to stitch the traces together. */
1824 errcode = btrace_stitch_trace (&btrace, tp);
1825 }
1826 else
1827 {
1828 /* We failed to read delta trace. Let's try to read new trace. */
1829 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1830
1831 /* If we got any new trace, discard what we have. */
1832 if (errcode == 0 && !btrace_data_empty (&btrace))
1833 btrace_clear (tp);
1834 }
1835
1836 /* If we were not able to read the trace, we start over. */
1837 if (errcode != 0)
1838 {
1839 btrace_clear (tp);
1840 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1841 }
1842 }
1843 else
1844 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1845
1846 /* If we were not able to read the branch trace, signal an error. */
1847 if (errcode != 0)
1848 error (_("Failed to read branch trace."));
1849
1850 /* Compute the trace, provided we have any. */
1851 if (!btrace_data_empty (&btrace))
1852 {
1853 struct btrace_function *bfun;
1854
1855 /* Store the raw trace data. The stored data will be cleared in
1856 btrace_clear, so we always append the new trace. */
1857 btrace_data_append (&btinfo->data, &btrace);
1858 btrace_maint_clear (btinfo);
1859
1860 btinfo->functions.clear ();
1861 btrace_clear_history (btinfo);
1862 btrace_compute_ftrace (tp, &btrace);
1863
1864 for (bfun = btinfo->begin; bfun != NULL; bfun = bfun->flow.next)
1865 btinfo->functions.push_back (bfun);
1866 }
1867
1868 do_cleanups (cleanup);
1869 }
1870
1871 /* See btrace.h. */
1872
1873 void
1874 btrace_clear (struct thread_info *tp)
1875 {
1876 struct btrace_thread_info *btinfo;
1877 struct btrace_function *it, *trash;
1878
1879 DEBUG ("clear thread %s (%s)", print_thread_id (tp),
1880 target_pid_to_str (tp->ptid));
1881
1882 /* Make sure btrace frames that may hold a pointer into the branch
1883 trace data are destroyed. */
1884 reinit_frame_cache ();
1885
1886 btinfo = &tp->btrace;
1887 btinfo->functions.clear ();
1888
1889 it = btinfo->begin;
1890 while (it != NULL)
1891 {
1892 trash = it;
1893 it = it->flow.next;
1894
1895 VEC_free (btrace_insn_s, trash->insn);
1896 xfree (trash);
1897 }
1898
1899 btinfo->begin = NULL;
1900 btinfo->end = NULL;
1901 btinfo->ngaps = 0;
1902
1903 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1904 btrace_maint_clear (btinfo);
1905 btrace_data_clear (&btinfo->data);
1906 btrace_clear_history (btinfo);
1907 }
1908
1909 /* See btrace.h. */
1910
1911 void
1912 btrace_free_objfile (struct objfile *objfile)
1913 {
1914 struct thread_info *tp;
1915
1916 DEBUG ("free objfile");
1917
1918 ALL_NON_EXITED_THREADS (tp)
1919 btrace_clear (tp);
1920 }
1921
1922 #if defined (HAVE_LIBEXPAT)
1923
1924 /* Check the btrace document version. */
1925
1926 static void
1927 check_xml_btrace_version (struct gdb_xml_parser *parser,
1928 const struct gdb_xml_element *element,
1929 void *user_data, VEC (gdb_xml_value_s) *attributes)
1930 {
1931 const char *version
1932 = (const char *) xml_find_attribute (attributes, "version")->value;
1933
1934 if (strcmp (version, "1.0") != 0)
1935 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1936 }
1937
1938 /* Parse a btrace "block" xml record. */
1939
1940 static void
1941 parse_xml_btrace_block (struct gdb_xml_parser *parser,
1942 const struct gdb_xml_element *element,
1943 void *user_data, VEC (gdb_xml_value_s) *attributes)
1944 {
1945 struct btrace_data *btrace;
1946 struct btrace_block *block;
1947 ULONGEST *begin, *end;
1948
1949 btrace = (struct btrace_data *) user_data;
1950
1951 switch (btrace->format)
1952 {
1953 case BTRACE_FORMAT_BTS:
1954 break;
1955
1956 case BTRACE_FORMAT_NONE:
1957 btrace->format = BTRACE_FORMAT_BTS;
1958 btrace->variant.bts.blocks = NULL;
1959 break;
1960
1961 default:
1962 gdb_xml_error (parser, _("Btrace format error."));
1963 }
1964
1965 begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value;
1966 end = (ULONGEST *) xml_find_attribute (attributes, "end")->value;
1967
1968 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
1969 block->begin = *begin;
1970 block->end = *end;
1971 }
1972
1973 /* Parse a "raw" xml record. */
1974
1975 static void
1976 parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
1977 gdb_byte **pdata, size_t *psize)
1978 {
1979 struct cleanup *cleanup;
1980 gdb_byte *data, *bin;
1981 size_t len, size;
1982
1983 len = strlen (body_text);
1984 if (len % 2 != 0)
1985 gdb_xml_error (parser, _("Bad raw data size."));
1986
1987 size = len / 2;
1988
1989 bin = data = (gdb_byte *) xmalloc (size);
1990 cleanup = make_cleanup (xfree, data);
1991
1992 /* We use hex encoding - see common/rsp-low.h. */
1993 while (len > 0)
1994 {
1995 char hi, lo;
1996
1997 hi = *body_text++;
1998 lo = *body_text++;
1999
2000 if (hi == 0 || lo == 0)
2001 gdb_xml_error (parser, _("Bad hex encoding."));
2002
2003 *bin++ = fromhex (hi) * 16 + fromhex (lo);
2004 len -= 2;
2005 }
2006
2007 discard_cleanups (cleanup);
2008
2009 *pdata = data;
2010 *psize = size;
2011 }
2012
2013 /* Parse a btrace pt-config "cpu" xml record. */
2014
2015 static void
2016 parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
2017 const struct gdb_xml_element *element,
2018 void *user_data,
2019 VEC (gdb_xml_value_s) *attributes)
2020 {
2021 struct btrace_data *btrace;
2022 const char *vendor;
2023 ULONGEST *family, *model, *stepping;
2024
2025 vendor = (const char *) xml_find_attribute (attributes, "vendor")->value;
2026 family = (ULONGEST *) xml_find_attribute (attributes, "family")->value;
2027 model = (ULONGEST *) xml_find_attribute (attributes, "model")->value;
2028 stepping = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value;
2029
2030 btrace = (struct btrace_data *) user_data;
2031
2032 if (strcmp (vendor, "GenuineIntel") == 0)
2033 btrace->variant.pt.config.cpu.vendor = CV_INTEL;
2034
2035 btrace->variant.pt.config.cpu.family = *family;
2036 btrace->variant.pt.config.cpu.model = *model;
2037 btrace->variant.pt.config.cpu.stepping = *stepping;
2038 }
2039
2040 /* Parse a btrace pt "raw" xml record. */
2041
2042 static void
2043 parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
2044 const struct gdb_xml_element *element,
2045 void *user_data, const char *body_text)
2046 {
2047 struct btrace_data *btrace;
2048
2049 btrace = (struct btrace_data *) user_data;
2050 parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
2051 &btrace->variant.pt.size);
2052 }
2053
2054 /* Parse a btrace "pt" xml record. */
2055
2056 static void
2057 parse_xml_btrace_pt (struct gdb_xml_parser *parser,
2058 const struct gdb_xml_element *element,
2059 void *user_data, VEC (gdb_xml_value_s) *attributes)
2060 {
2061 struct btrace_data *btrace;
2062
2063 btrace = (struct btrace_data *) user_data;
2064 btrace->format = BTRACE_FORMAT_PT;
2065 btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
2066 btrace->variant.pt.data = NULL;
2067 btrace->variant.pt.size = 0;
2068 }
2069
2070 static const struct gdb_xml_attribute block_attributes[] = {
2071 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2072 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2073 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2074 };
2075
2076 static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
2077 { "vendor", GDB_XML_AF_NONE, NULL, NULL },
2078 { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2079 { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2080 { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2081 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2082 };
2083
2084 static const struct gdb_xml_element btrace_pt_config_children[] = {
2085 { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
2086 parse_xml_btrace_pt_config_cpu, NULL },
2087 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2088 };
2089
2090 static const struct gdb_xml_element btrace_pt_children[] = {
2091 { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
2092 NULL },
2093 { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
2094 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2095 };
2096
2097 static const struct gdb_xml_attribute btrace_attributes[] = {
2098 { "version", GDB_XML_AF_NONE, NULL, NULL },
2099 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2100 };
2101
2102 static const struct gdb_xml_element btrace_children[] = {
2103 { "block", block_attributes, NULL,
2104 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
2105 { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
2106 NULL },
2107 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2108 };
2109
2110 static const struct gdb_xml_element btrace_elements[] = {
2111 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
2112 check_xml_btrace_version, NULL },
2113 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2114 };
2115
2116 #endif /* defined (HAVE_LIBEXPAT) */
2117
2118 /* See btrace.h. */
2119
2120 void
2121 parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
2122 {
2123 struct cleanup *cleanup;
2124 int errcode;
2125
2126 #if defined (HAVE_LIBEXPAT)
2127
2128 btrace->format = BTRACE_FORMAT_NONE;
2129
2130 cleanup = make_cleanup_btrace_data (btrace);
2131 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
2132 buffer, btrace);
2133 if (errcode != 0)
2134 error (_("Error parsing branch trace."));
2135
2136 /* Keep parse results. */
2137 discard_cleanups (cleanup);
2138
2139 #else /* !defined (HAVE_LIBEXPAT) */
2140
2141 error (_("Cannot process branch trace. XML parsing is not supported."));
2142
2143 #endif /* !defined (HAVE_LIBEXPAT) */
2144 }
2145
2146 #if defined (HAVE_LIBEXPAT)
2147
2148 /* Parse a btrace-conf "bts" xml record. */
2149
2150 static void
2151 parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
2152 const struct gdb_xml_element *element,
2153 void *user_data, VEC (gdb_xml_value_s) *attributes)
2154 {
2155 struct btrace_config *conf;
2156 struct gdb_xml_value *size;
2157
2158 conf = (struct btrace_config *) user_data;
2159 conf->format = BTRACE_FORMAT_BTS;
2160 conf->bts.size = 0;
2161
2162 size = xml_find_attribute (attributes, "size");
2163 if (size != NULL)
2164 conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
2165 }
2166
2167 /* Parse a btrace-conf "pt" xml record. */
2168
2169 static void
2170 parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
2171 const struct gdb_xml_element *element,
2172 void *user_data, VEC (gdb_xml_value_s) *attributes)
2173 {
2174 struct btrace_config *conf;
2175 struct gdb_xml_value *size;
2176
2177 conf = (struct btrace_config *) user_data;
2178 conf->format = BTRACE_FORMAT_PT;
2179 conf->pt.size = 0;
2180
2181 size = xml_find_attribute (attributes, "size");
2182 if (size != NULL)
2183 conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
2184 }
2185
2186 static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
2187 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2188 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2189 };
2190
2191 static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
2192 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2193 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2194 };
2195
2196 static const struct gdb_xml_element btrace_conf_children[] = {
2197 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
2198 parse_xml_btrace_conf_bts, NULL },
2199 { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
2200 parse_xml_btrace_conf_pt, NULL },
2201 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2202 };
2203
2204 static const struct gdb_xml_attribute btrace_conf_attributes[] = {
2205 { "version", GDB_XML_AF_NONE, NULL, NULL },
2206 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2207 };
2208
2209 static const struct gdb_xml_element btrace_conf_elements[] = {
2210 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
2211 GDB_XML_EF_NONE, NULL, NULL },
2212 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2213 };
2214
2215 #endif /* defined (HAVE_LIBEXPAT) */
2216
2217 /* See btrace.h. */
2218
2219 void
2220 parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
2221 {
2222 int errcode;
2223
2224 #if defined (HAVE_LIBEXPAT)
2225
2226 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
2227 btrace_conf_elements, xml, conf);
2228 if (errcode != 0)
2229 error (_("Error parsing branch trace configuration."));
2230
2231 #else /* !defined (HAVE_LIBEXPAT) */
2232
2233 error (_("XML parsing is not supported."));
2234
2235 #endif /* !defined (HAVE_LIBEXPAT) */
2236 }
2237
2238 /* See btrace.h. */
2239
2240 const struct btrace_insn *
2241 btrace_insn_get (const struct btrace_insn_iterator *it)
2242 {
2243 const struct btrace_function *bfun;
2244 unsigned int index, end;
2245
2246 index = it->index;
2247 bfun = it->function;
2248
2249 /* Check if the iterator points to a gap in the trace. */
2250 if (bfun->errcode != 0)
2251 return NULL;
2252
2253 /* The index is within the bounds of this function's instruction vector. */
2254 end = VEC_length (btrace_insn_s, bfun->insn);
2255 gdb_assert (0 < end);
2256 gdb_assert (index < end);
2257
2258 return VEC_index (btrace_insn_s, bfun->insn, index);
2259 }
2260
2261 /* See btrace.h. */
2262
2263 int
2264 btrace_insn_get_error (const struct btrace_insn_iterator *it)
2265 {
2266 return it->function->errcode;
2267 }
2268
2269 /* See btrace.h. */
2270
2271 unsigned int
2272 btrace_insn_number (const struct btrace_insn_iterator *it)
2273 {
2274 return it->function->insn_offset + it->index;
2275 }
2276
2277 /* See btrace.h. */
2278
2279 void
2280 btrace_insn_begin (struct btrace_insn_iterator *it,
2281 const struct btrace_thread_info *btinfo)
2282 {
2283 const struct btrace_function *bfun;
2284
2285 bfun = btinfo->begin;
2286 if (bfun == NULL)
2287 error (_("No trace."));
2288
2289 it->function = bfun;
2290 it->index = 0;
2291 }
2292
2293 /* See btrace.h. */
2294
2295 void
2296 btrace_insn_end (struct btrace_insn_iterator *it,
2297 const struct btrace_thread_info *btinfo)
2298 {
2299 const struct btrace_function *bfun;
2300 unsigned int length;
2301
2302 bfun = btinfo->end;
2303 if (bfun == NULL)
2304 error (_("No trace."));
2305
2306 length = VEC_length (btrace_insn_s, bfun->insn);
2307
2308 /* The last function may either be a gap or it contains the current
2309 instruction, which is one past the end of the execution trace; ignore
2310 it. */
2311 if (length > 0)
2312 length -= 1;
2313
2314 it->function = bfun;
2315 it->index = length;
2316 }
2317
2318 /* See btrace.h. */
2319
2320 unsigned int
2321 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
2322 {
2323 const struct btrace_function *bfun;
2324 unsigned int index, steps;
2325
2326 bfun = it->function;
2327 steps = 0;
2328 index = it->index;
2329
2330 while (stride != 0)
2331 {
2332 unsigned int end, space, adv;
2333
2334 end = VEC_length (btrace_insn_s, bfun->insn);
2335
2336 /* An empty function segment represents a gap in the trace. We count
2337 it as one instruction. */
2338 if (end == 0)
2339 {
2340 const struct btrace_function *next;
2341
2342 next = bfun->flow.next;
2343 if (next == NULL)
2344 break;
2345
2346 stride -= 1;
2347 steps += 1;
2348
2349 bfun = next;
2350 index = 0;
2351
2352 continue;
2353 }
2354
2355 gdb_assert (0 < end);
2356 gdb_assert (index < end);
2357
2358 /* Compute the number of instructions remaining in this segment. */
2359 space = end - index;
2360
2361 /* Advance the iterator as far as possible within this segment. */
2362 adv = std::min (space, stride);
2363 stride -= adv;
2364 index += adv;
2365 steps += adv;
2366
2367 /* Move to the next function if we're at the end of this one. */
2368 if (index == end)
2369 {
2370 const struct btrace_function *next;
2371
2372 next = bfun->flow.next;
2373 if (next == NULL)
2374 {
2375 /* We stepped past the last function.
2376
2377 Let's adjust the index to point to the last instruction in
2378 the previous function. */
2379 index -= 1;
2380 steps -= 1;
2381 break;
2382 }
2383
2384 /* We now point to the first instruction in the new function. */
2385 bfun = next;
2386 index = 0;
2387 }
2388
2389 /* We did make progress. */
2390 gdb_assert (adv > 0);
2391 }
2392
2393 /* Update the iterator. */
2394 it->function = bfun;
2395 it->index = index;
2396
2397 return steps;
2398 }
2399
2400 /* See btrace.h. */
2401
2402 unsigned int
2403 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
2404 {
2405 const struct btrace_function *bfun;
2406 unsigned int index, steps;
2407
2408 bfun = it->function;
2409 steps = 0;
2410 index = it->index;
2411
2412 while (stride != 0)
2413 {
2414 unsigned int adv;
2415
2416 /* Move to the previous function if we're at the start of this one. */
2417 if (index == 0)
2418 {
2419 const struct btrace_function *prev;
2420
2421 prev = bfun->flow.prev;
2422 if (prev == NULL)
2423 break;
2424
2425 /* We point to one after the last instruction in the new function. */
2426 bfun = prev;
2427 index = VEC_length (btrace_insn_s, bfun->insn);
2428
2429 /* An empty function segment represents a gap in the trace. We count
2430 it as one instruction. */
2431 if (index == 0)
2432 {
2433 stride -= 1;
2434 steps += 1;
2435
2436 continue;
2437 }
2438 }
2439
2440 /* Advance the iterator as far as possible within this segment. */
2441 adv = std::min (index, stride);
2442
2443 stride -= adv;
2444 index -= adv;
2445 steps += adv;
2446
2447 /* We did make progress. */
2448 gdb_assert (adv > 0);
2449 }
2450
2451 /* Update the iterator. */
2452 it->function = bfun;
2453 it->index = index;
2454
2455 return steps;
2456 }
2457
2458 /* See btrace.h. */
2459
2460 int
2461 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
2462 const struct btrace_insn_iterator *rhs)
2463 {
2464 unsigned int lnum, rnum;
2465
2466 lnum = btrace_insn_number (lhs);
2467 rnum = btrace_insn_number (rhs);
2468
2469 return (int) (lnum - rnum);
2470 }
2471
2472 /* See btrace.h. */
2473
2474 int
2475 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
2476 const struct btrace_thread_info *btinfo,
2477 unsigned int number)
2478 {
2479 const struct btrace_function *bfun;
2480 unsigned int upper, lower;
2481
2482 if (btinfo->functions.empty ())
2483 return 0;
2484
2485 lower = 0;
2486 bfun = btinfo->functions[lower];
2487 if (number < bfun->insn_offset)
2488 return 0;
2489
2490 upper = btinfo->functions.size () - 1;
2491 bfun = btinfo->functions[upper];
2492 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2493 return 0;
2494
2495 /* We assume that there are no holes in the numbering. */
2496 for (;;)
2497 {
2498 const unsigned int average = lower + (upper - lower) / 2;
2499
2500 bfun = btinfo->functions[average];
2501
2502 if (number < bfun->insn_offset)
2503 {
2504 upper = average - 1;
2505 continue;
2506 }
2507
2508 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2509 {
2510 lower = average + 1;
2511 continue;
2512 }
2513
2514 break;
2515 }
2516
2517 it->function = bfun;
2518 it->index = number - bfun->insn_offset;
2519 return 1;
2520 }
2521
2522 /* See btrace.h. */
2523
2524 const struct btrace_function *
2525 btrace_call_get (const struct btrace_call_iterator *it)
2526 {
2527 return it->function;
2528 }
2529
2530 /* See btrace.h. */
2531
2532 unsigned int
2533 btrace_call_number (const struct btrace_call_iterator *it)
2534 {
2535 const struct btrace_thread_info *btinfo;
2536 const struct btrace_function *bfun;
2537 unsigned int insns;
2538
2539 btinfo = it->btinfo;
2540 bfun = it->function;
2541 if (bfun != NULL)
2542 return bfun->number;
2543
2544 /* For the end iterator, i.e. bfun == NULL, we return one more than the
2545 number of the last function. */
2546 bfun = btinfo->end;
2547 insns = VEC_length (btrace_insn_s, bfun->insn);
2548
2549 /* If the function contains only a single instruction (i.e. the current
2550 instruction), it will be skipped and its number is already the number
2551 we seek. */
2552 if (insns == 1)
2553 return bfun->number;
2554
2555 /* Otherwise, return one more than the number of the last function. */
2556 return bfun->number + 1;
2557 }
2558
2559 /* See btrace.h. */
2560
2561 void
2562 btrace_call_begin (struct btrace_call_iterator *it,
2563 const struct btrace_thread_info *btinfo)
2564 {
2565 const struct btrace_function *bfun;
2566
2567 bfun = btinfo->begin;
2568 if (bfun == NULL)
2569 error (_("No trace."));
2570
2571 it->btinfo = btinfo;
2572 it->function = bfun;
2573 }
2574
2575 /* See btrace.h. */
2576
2577 void
2578 btrace_call_end (struct btrace_call_iterator *it,
2579 const struct btrace_thread_info *btinfo)
2580 {
2581 const struct btrace_function *bfun;
2582
2583 bfun = btinfo->end;
2584 if (bfun == NULL)
2585 error (_("No trace."));
2586
2587 it->btinfo = btinfo;
2588 it->function = NULL;
2589 }
2590
2591 /* See btrace.h. */
2592
2593 unsigned int
2594 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2595 {
2596 const struct btrace_function *bfun;
2597 unsigned int steps;
2598
2599 bfun = it->function;
2600 steps = 0;
2601 while (bfun != NULL)
2602 {
2603 const struct btrace_function *next;
2604 unsigned int insns;
2605
2606 next = bfun->flow.next;
2607 if (next == NULL)
2608 {
2609 /* Ignore the last function if it only contains a single
2610 (i.e. the current) instruction. */
2611 insns = VEC_length (btrace_insn_s, bfun->insn);
2612 if (insns == 1)
2613 steps -= 1;
2614 }
2615
2616 if (stride == steps)
2617 break;
2618
2619 bfun = next;
2620 steps += 1;
2621 }
2622
2623 it->function = bfun;
2624 return steps;
2625 }
2626
2627 /* See btrace.h. */
2628
2629 unsigned int
2630 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2631 {
2632 const struct btrace_thread_info *btinfo;
2633 const struct btrace_function *bfun;
2634 unsigned int steps;
2635
2636 bfun = it->function;
2637 steps = 0;
2638
2639 if (bfun == NULL)
2640 {
2641 unsigned int insns;
2642
2643 btinfo = it->btinfo;
2644 bfun = btinfo->end;
2645 if (bfun == NULL)
2646 return 0;
2647
2648 /* Ignore the last function if it only contains a single
2649 (i.e. the current) instruction. */
2650 insns = VEC_length (btrace_insn_s, bfun->insn);
2651 if (insns == 1)
2652 bfun = bfun->flow.prev;
2653
2654 if (bfun == NULL)
2655 return 0;
2656
2657 steps += 1;
2658 }
2659
2660 while (steps < stride)
2661 {
2662 const struct btrace_function *prev;
2663
2664 prev = bfun->flow.prev;
2665 if (prev == NULL)
2666 break;
2667
2668 bfun = prev;
2669 steps += 1;
2670 }
2671
2672 it->function = bfun;
2673 return steps;
2674 }
2675
2676 /* See btrace.h. */
2677
2678 int
2679 btrace_call_cmp (const struct btrace_call_iterator *lhs,
2680 const struct btrace_call_iterator *rhs)
2681 {
2682 unsigned int lnum, rnum;
2683
2684 lnum = btrace_call_number (lhs);
2685 rnum = btrace_call_number (rhs);
2686
2687 return (int) (lnum - rnum);
2688 }
2689
2690 /* See btrace.h. */
2691
2692 int
2693 btrace_find_call_by_number (struct btrace_call_iterator *it,
2694 const struct btrace_thread_info *btinfo,
2695 unsigned int number)
2696 {
2697 const struct btrace_function *bfun;
2698
2699 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
2700 {
2701 unsigned int bnum;
2702
2703 bnum = bfun->number;
2704 if (number == bnum)
2705 {
2706 it->btinfo = btinfo;
2707 it->function = bfun;
2708 return 1;
2709 }
2710
2711 /* Functions are ordered and numbered consecutively. We could bail out
2712 earlier. On the other hand, it is very unlikely that we search for
2713 a nonexistent function. */
2714 }
2715
2716 return 0;
2717 }
2718
2719 /* See btrace.h. */
2720
2721 void
2722 btrace_set_insn_history (struct btrace_thread_info *btinfo,
2723 const struct btrace_insn_iterator *begin,
2724 const struct btrace_insn_iterator *end)
2725 {
2726 if (btinfo->insn_history == NULL)
2727 btinfo->insn_history = XCNEW (struct btrace_insn_history);
2728
2729 btinfo->insn_history->begin = *begin;
2730 btinfo->insn_history->end = *end;
2731 }
2732
2733 /* See btrace.h. */
2734
2735 void
2736 btrace_set_call_history (struct btrace_thread_info *btinfo,
2737 const struct btrace_call_iterator *begin,
2738 const struct btrace_call_iterator *end)
2739 {
2740 gdb_assert (begin->btinfo == end->btinfo);
2741
2742 if (btinfo->call_history == NULL)
2743 btinfo->call_history = XCNEW (struct btrace_call_history);
2744
2745 btinfo->call_history->begin = *begin;
2746 btinfo->call_history->end = *end;
2747 }
2748
2749 /* See btrace.h. */
2750
2751 int
2752 btrace_is_replaying (struct thread_info *tp)
2753 {
2754 return tp->btrace.replay != NULL;
2755 }
2756
2757 /* See btrace.h. */
2758
2759 int
2760 btrace_is_empty (struct thread_info *tp)
2761 {
2762 struct btrace_insn_iterator begin, end;
2763 struct btrace_thread_info *btinfo;
2764
2765 btinfo = &tp->btrace;
2766
2767 if (btinfo->begin == NULL)
2768 return 1;
2769
2770 btrace_insn_begin (&begin, btinfo);
2771 btrace_insn_end (&end, btinfo);
2772
2773 return btrace_insn_cmp (&begin, &end) == 0;
2774 }
2775
2776 /* Forward the cleanup request. */
2777
2778 static void
2779 do_btrace_data_cleanup (void *arg)
2780 {
2781 btrace_data_fini ((struct btrace_data *) arg);
2782 }
2783
2784 /* See btrace.h. */
2785
2786 struct cleanup *
2787 make_cleanup_btrace_data (struct btrace_data *data)
2788 {
2789 return make_cleanup (do_btrace_data_cleanup, data);
2790 }
2791
2792 #if defined (HAVE_LIBIPT)
2793
2794 /* Print a single packet. */
2795
2796 static void
2797 pt_print_packet (const struct pt_packet *packet)
2798 {
2799 switch (packet->type)
2800 {
2801 default:
2802 printf_unfiltered (("[??: %x]"), packet->type);
2803 break;
2804
2805 case ppt_psb:
2806 printf_unfiltered (("psb"));
2807 break;
2808
2809 case ppt_psbend:
2810 printf_unfiltered (("psbend"));
2811 break;
2812
2813 case ppt_pad:
2814 printf_unfiltered (("pad"));
2815 break;
2816
2817 case ppt_tip:
2818 printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
2819 packet->payload.ip.ipc,
2820 packet->payload.ip.ip);
2821 break;
2822
2823 case ppt_tip_pge:
2824 printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
2825 packet->payload.ip.ipc,
2826 packet->payload.ip.ip);
2827 break;
2828
2829 case ppt_tip_pgd:
2830 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
2831 packet->payload.ip.ipc,
2832 packet->payload.ip.ip);
2833 break;
2834
2835 case ppt_fup:
2836 printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
2837 packet->payload.ip.ipc,
2838 packet->payload.ip.ip);
2839 break;
2840
2841 case ppt_tnt_8:
2842 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
2843 packet->payload.tnt.bit_size,
2844 packet->payload.tnt.payload);
2845 break;
2846
2847 case ppt_tnt_64:
2848 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
2849 packet->payload.tnt.bit_size,
2850 packet->payload.tnt.payload);
2851 break;
2852
2853 case ppt_pip:
2854 printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2855 packet->payload.pip.nr ? (" nr") : (""));
2856 break;
2857
2858 case ppt_tsc:
2859 printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2860 break;
2861
2862 case ppt_cbr:
2863 printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
2864 break;
2865
2866 case ppt_mode:
2867 switch (packet->payload.mode.leaf)
2868 {
2869 default:
2870 printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
2871 break;
2872
2873 case pt_mol_exec:
2874 printf_unfiltered (("mode.exec%s%s"),
2875 packet->payload.mode.bits.exec.csl
2876 ? (" cs.l") : (""),
2877 packet->payload.mode.bits.exec.csd
2878 ? (" cs.d") : (""));
2879 break;
2880
2881 case pt_mol_tsx:
2882 printf_unfiltered (("mode.tsx%s%s"),
2883 packet->payload.mode.bits.tsx.intx
2884 ? (" intx") : (""),
2885 packet->payload.mode.bits.tsx.abrt
2886 ? (" abrt") : (""));
2887 break;
2888 }
2889 break;
2890
2891 case ppt_ovf:
2892 printf_unfiltered (("ovf"));
2893 break;
2894
2895 case ppt_stop:
2896 printf_unfiltered (("stop"));
2897 break;
2898
2899 case ppt_vmcs:
2900 printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2901 break;
2902
2903 case ppt_tma:
2904 printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
2905 packet->payload.tma.fc);
2906 break;
2907
2908 case ppt_mtc:
2909 printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
2910 break;
2911
2912 case ppt_cyc:
2913 printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
2914 break;
2915
2916 case ppt_mnt:
2917 printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
2918 break;
2919 }
2920 }
2921
2922 /* Decode packets into MAINT using DECODER. */
2923
2924 static void
2925 btrace_maint_decode_pt (struct btrace_maint_info *maint,
2926 struct pt_packet_decoder *decoder)
2927 {
2928 int errcode;
2929
2930 for (;;)
2931 {
2932 struct btrace_pt_packet packet;
2933
2934 errcode = pt_pkt_sync_forward (decoder);
2935 if (errcode < 0)
2936 break;
2937
2938 for (;;)
2939 {
2940 pt_pkt_get_offset (decoder, &packet.offset);
2941
2942 errcode = pt_pkt_next (decoder, &packet.packet,
2943 sizeof(packet.packet));
2944 if (errcode < 0)
2945 break;
2946
2947 if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
2948 {
2949 packet.errcode = pt_errcode (errcode);
2950 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2951 &packet);
2952 }
2953 }
2954
2955 if (errcode == -pte_eos)
2956 break;
2957
2958 packet.errcode = pt_errcode (errcode);
2959 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2960 &packet);
2961
2962 warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
2963 packet.offset, pt_errstr (packet.errcode));
2964 }
2965
2966 if (errcode != -pte_eos)
2967 warning (_("Failed to synchronize onto the Intel Processor Trace "
2968 "stream: %s."), pt_errstr (pt_errcode (errcode)));
2969 }
2970
2971 /* Update the packet history in BTINFO. */
2972
2973 static void
2974 btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
2975 {
2976 volatile struct gdb_exception except;
2977 struct pt_packet_decoder *decoder;
2978 struct btrace_data_pt *pt;
2979 struct pt_config config;
2980 int errcode;
2981
2982 pt = &btinfo->data.variant.pt;
2983
2984 /* Nothing to do if there is no trace. */
2985 if (pt->size == 0)
2986 return;
2987
2988 memset (&config, 0, sizeof(config));
2989
2990 config.size = sizeof (config);
2991 config.begin = pt->data;
2992 config.end = pt->data + pt->size;
2993
2994 config.cpu.vendor = pt_translate_cpu_vendor (pt->config.cpu.vendor);
2995 config.cpu.family = pt->config.cpu.family;
2996 config.cpu.model = pt->config.cpu.model;
2997 config.cpu.stepping = pt->config.cpu.stepping;
2998
2999 errcode = pt_cpu_errata (&config.errata, &config.cpu);
3000 if (errcode < 0)
3001 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
3002 pt_errstr (pt_errcode (errcode)));
3003
3004 decoder = pt_pkt_alloc_decoder (&config);
3005 if (decoder == NULL)
3006 error (_("Failed to allocate the Intel Processor Trace decoder."));
3007
3008 TRY
3009 {
3010 btrace_maint_decode_pt (&btinfo->maint, decoder);
3011 }
3012 CATCH (except, RETURN_MASK_ALL)
3013 {
3014 pt_pkt_free_decoder (decoder);
3015
3016 if (except.reason < 0)
3017 throw_exception (except);
3018 }
3019 END_CATCH
3020
3021 pt_pkt_free_decoder (decoder);
3022 }
3023
3024 #endif /* !defined (HAVE_LIBIPT) */
3025
3026 /* Update the packet maintenance information for BTINFO and store the
3027 low and high bounds into BEGIN and END, respectively.
3028 Store the current iterator state into FROM and TO. */
3029
3030 static void
3031 btrace_maint_update_packets (struct btrace_thread_info *btinfo,
3032 unsigned int *begin, unsigned int *end,
3033 unsigned int *from, unsigned int *to)
3034 {
3035 switch (btinfo->data.format)
3036 {
3037 default:
3038 *begin = 0;
3039 *end = 0;
3040 *from = 0;
3041 *to = 0;
3042 break;
3043
3044 case BTRACE_FORMAT_BTS:
3045 /* Nothing to do - we operate directly on BTINFO->DATA. */
3046 *begin = 0;
3047 *end = VEC_length (btrace_block_s, btinfo->data.variant.bts.blocks);
3048 *from = btinfo->maint.variant.bts.packet_history.begin;
3049 *to = btinfo->maint.variant.bts.packet_history.end;
3050 break;
3051
3052 #if defined (HAVE_LIBIPT)
3053 case BTRACE_FORMAT_PT:
3054 if (VEC_empty (btrace_pt_packet_s, btinfo->maint.variant.pt.packets))
3055 btrace_maint_update_pt_packets (btinfo);
3056
3057 *begin = 0;
3058 *end = VEC_length (btrace_pt_packet_s, btinfo->maint.variant.pt.packets);
3059 *from = btinfo->maint.variant.pt.packet_history.begin;
3060 *to = btinfo->maint.variant.pt.packet_history.end;
3061 break;
3062 #endif /* defined (HAVE_LIBIPT) */
3063 }
3064 }
3065
3066 /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
3067 update the current iterator position. */
3068
3069 static void
3070 btrace_maint_print_packets (struct btrace_thread_info *btinfo,
3071 unsigned int begin, unsigned int end)
3072 {
3073 switch (btinfo->data.format)
3074 {
3075 default:
3076 break;
3077
3078 case BTRACE_FORMAT_BTS:
3079 {
3080 VEC (btrace_block_s) *blocks;
3081 unsigned int blk;
3082
3083 blocks = btinfo->data.variant.bts.blocks;
3084 for (blk = begin; blk < end; ++blk)
3085 {
3086 const btrace_block_s *block;
3087
3088 block = VEC_index (btrace_block_s, blocks, blk);
3089
3090 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
3091 core_addr_to_string_nz (block->begin),
3092 core_addr_to_string_nz (block->end));
3093 }
3094
3095 btinfo->maint.variant.bts.packet_history.begin = begin;
3096 btinfo->maint.variant.bts.packet_history.end = end;
3097 }
3098 break;
3099
3100 #if defined (HAVE_LIBIPT)
3101 case BTRACE_FORMAT_PT:
3102 {
3103 VEC (btrace_pt_packet_s) *packets;
3104 unsigned int pkt;
3105
3106 packets = btinfo->maint.variant.pt.packets;
3107 for (pkt = begin; pkt < end; ++pkt)
3108 {
3109 const struct btrace_pt_packet *packet;
3110
3111 packet = VEC_index (btrace_pt_packet_s, packets, pkt);
3112
3113 printf_unfiltered ("%u\t", pkt);
3114 printf_unfiltered ("0x%" PRIx64 "\t", packet->offset);
3115
3116 if (packet->errcode == pte_ok)
3117 pt_print_packet (&packet->packet);
3118 else
3119 printf_unfiltered ("[error: %s]", pt_errstr (packet->errcode));
3120
3121 printf_unfiltered ("\n");
3122 }
3123
3124 btinfo->maint.variant.pt.packet_history.begin = begin;
3125 btinfo->maint.variant.pt.packet_history.end = end;
3126 }
3127 break;
3128 #endif /* defined (HAVE_LIBIPT) */
3129 }
3130 }
3131
3132 /* Read a number from an argument string. */
3133
3134 static unsigned int
3135 get_uint (char **arg)
3136 {
3137 char *begin, *end, *pos;
3138 unsigned long number;
3139
3140 begin = *arg;
3141 pos = skip_spaces (begin);
3142
3143 if (!isdigit (*pos))
3144 error (_("Expected positive number, got: %s."), pos);
3145
3146 number = strtoul (pos, &end, 10);
3147 if (number > UINT_MAX)
3148 error (_("Number too big."));
3149
3150 *arg += (end - begin);
3151
3152 return (unsigned int) number;
3153 }
3154
3155 /* Read a context size from an argument string. */
3156
3157 static int
3158 get_context_size (char **arg)
3159 {
3160 char *pos;
3161 int number;
3162
3163 pos = skip_spaces (*arg);
3164
3165 if (!isdigit (*pos))
3166 error (_("Expected positive number, got: %s."), pos);
3167
3168 return strtol (pos, arg, 10);
3169 }
3170
3171 /* Complain about junk at the end of an argument string. */
3172
3173 static void
3174 no_chunk (char *arg)
3175 {
3176 if (*arg != 0)
3177 error (_("Junk after argument: %s."), arg);
3178 }
3179
3180 /* The "maintenance btrace packet-history" command. */
3181
3182 static void
3183 maint_btrace_packet_history_cmd (char *arg, int from_tty)
3184 {
3185 struct btrace_thread_info *btinfo;
3186 struct thread_info *tp;
3187 unsigned int size, begin, end, from, to;
3188
3189 tp = find_thread_ptid (inferior_ptid);
3190 if (tp == NULL)
3191 error (_("No thread."));
3192
3193 size = 10;
3194 btinfo = &tp->btrace;
3195
3196 btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
3197 if (begin == end)
3198 {
3199 printf_unfiltered (_("No trace.\n"));
3200 return;
3201 }
3202
3203 if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
3204 {
3205 from = to;
3206
3207 if (end - from < size)
3208 size = end - from;
3209 to = from + size;
3210 }
3211 else if (strcmp (arg, "-") == 0)
3212 {
3213 to = from;
3214
3215 if (to - begin < size)
3216 size = to - begin;
3217 from = to - size;
3218 }
3219 else
3220 {
3221 from = get_uint (&arg);
3222 if (end <= from)
3223 error (_("'%u' is out of range."), from);
3224
3225 arg = skip_spaces (arg);
3226 if (*arg == ',')
3227 {
3228 arg = skip_spaces (++arg);
3229
3230 if (*arg == '+')
3231 {
3232 arg += 1;
3233 size = get_context_size (&arg);
3234
3235 no_chunk (arg);
3236
3237 if (end - from < size)
3238 size = end - from;
3239 to = from + size;
3240 }
3241 else if (*arg == '-')
3242 {
3243 arg += 1;
3244 size = get_context_size (&arg);
3245
3246 no_chunk (arg);
3247
3248 /* Include the packet given as first argument. */
3249 from += 1;
3250 to = from;
3251
3252 if (to - begin < size)
3253 size = to - begin;
3254 from = to - size;
3255 }
3256 else
3257 {
3258 to = get_uint (&arg);
3259
3260 /* Include the packet at the second argument and silently
3261 truncate the range. */
3262 if (to < end)
3263 to += 1;
3264 else
3265 to = end;
3266
3267 no_chunk (arg);
3268 }
3269 }
3270 else
3271 {
3272 no_chunk (arg);
3273
3274 if (end - from < size)
3275 size = end - from;
3276 to = from + size;
3277 }
3278
3279 dont_repeat ();
3280 }
3281
3282 btrace_maint_print_packets (btinfo, from, to);
3283 }
3284
3285 /* The "maintenance btrace clear-packet-history" command. */
3286
3287 static void
3288 maint_btrace_clear_packet_history_cmd (char *args, int from_tty)
3289 {
3290 struct btrace_thread_info *btinfo;
3291 struct thread_info *tp;
3292
3293 if (args != NULL && *args != 0)
3294 error (_("Invalid argument."));
3295
3296 tp = find_thread_ptid (inferior_ptid);
3297 if (tp == NULL)
3298 error (_("No thread."));
3299
3300 btinfo = &tp->btrace;
3301
3302 /* Must clear the maint data before - it depends on BTINFO->DATA. */
3303 btrace_maint_clear (btinfo);
3304 btrace_data_clear (&btinfo->data);
3305 }
3306
3307 /* The "maintenance btrace clear" command. */
3308
3309 static void
3310 maint_btrace_clear_cmd (char *args, int from_tty)
3311 {
3312 struct btrace_thread_info *btinfo;
3313 struct thread_info *tp;
3314
3315 if (args != NULL && *args != 0)
3316 error (_("Invalid argument."));
3317
3318 tp = find_thread_ptid (inferior_ptid);
3319 if (tp == NULL)
3320 error (_("No thread."));
3321
3322 btrace_clear (tp);
3323 }
3324
3325 /* The "maintenance btrace" command. */
3326
3327 static void
3328 maint_btrace_cmd (char *args, int from_tty)
3329 {
3330 help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands,
3331 gdb_stdout);
3332 }
3333
3334 /* The "maintenance set btrace" command. */
3335
3336 static void
3337 maint_btrace_set_cmd (char *args, int from_tty)
3338 {
3339 help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands,
3340 gdb_stdout);
3341 }
3342
3343 /* The "maintenance show btrace" command. */
3344
3345 static void
3346 maint_btrace_show_cmd (char *args, int from_tty)
3347 {
3348 help_list (maint_btrace_show_cmdlist, "maintenance show btrace ",
3349 all_commands, gdb_stdout);
3350 }
3351
3352 /* The "maintenance set btrace pt" command. */
3353
3354 static void
3355 maint_btrace_pt_set_cmd (char *args, int from_tty)
3356 {
3357 help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3358 all_commands, gdb_stdout);
3359 }
3360
3361 /* The "maintenance show btrace pt" command. */
3362
3363 static void
3364 maint_btrace_pt_show_cmd (char *args, int from_tty)
3365 {
3366 help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3367 all_commands, gdb_stdout);
3368 }
3369
3370 /* The "maintenance info btrace" command. */
3371
3372 static void
3373 maint_info_btrace_cmd (char *args, int from_tty)
3374 {
3375 struct btrace_thread_info *btinfo;
3376 struct thread_info *tp;
3377 const struct btrace_config *conf;
3378
3379 if (args != NULL && *args != 0)
3380 error (_("Invalid argument."));
3381
3382 tp = find_thread_ptid (inferior_ptid);
3383 if (tp == NULL)
3384 error (_("No thread."));
3385
3386 btinfo = &tp->btrace;
3387
3388 conf = btrace_conf (btinfo);
3389 if (conf == NULL)
3390 error (_("No btrace configuration."));
3391
3392 printf_unfiltered (_("Format: %s.\n"),
3393 btrace_format_string (conf->format));
3394
3395 switch (conf->format)
3396 {
3397 default:
3398 break;
3399
3400 case BTRACE_FORMAT_BTS:
3401 printf_unfiltered (_("Number of packets: %u.\n"),
3402 VEC_length (btrace_block_s,
3403 btinfo->data.variant.bts.blocks));
3404 break;
3405
3406 #if defined (HAVE_LIBIPT)
3407 case BTRACE_FORMAT_PT:
3408 {
3409 struct pt_version version;
3410
3411 version = pt_library_version ();
3412 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
3413 version.minor, version.build,
3414 version.ext != NULL ? version.ext : "");
3415
3416 btrace_maint_update_pt_packets (btinfo);
3417 printf_unfiltered (_("Number of packets: %u.\n"),
3418 VEC_length (btrace_pt_packet_s,
3419 btinfo->maint.variant.pt.packets));
3420 }
3421 break;
3422 #endif /* defined (HAVE_LIBIPT) */
3423 }
3424 }
3425
3426 /* The "maint show btrace pt skip-pad" show value function. */
3427
3428 static void
3429 show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
3430 struct cmd_list_element *c,
3431 const char *value)
3432 {
3433 fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
3434 }
3435
3436
3437 /* Initialize btrace maintenance commands. */
3438
3439 void _initialize_btrace (void);
3440 void
3441 _initialize_btrace (void)
3442 {
3443 add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
3444 _("Info about branch tracing data."), &maintenanceinfolist);
3445
3446 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_cmd,
3447 _("Branch tracing maintenance commands."),
3448 &maint_btrace_cmdlist, "maintenance btrace ",
3449 0, &maintenancelist);
3450
3451 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_set_cmd, _("\
3452 Set branch tracing specific variables."),
3453 &maint_btrace_set_cmdlist, "maintenance set btrace ",
3454 0, &maintenance_set_cmdlist);
3455
3456 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\
3457 Set Intel Processor Trace specific variables."),
3458 &maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3459 0, &maint_btrace_set_cmdlist);
3460
3461 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_show_cmd, _("\
3462 Show branch tracing specific variables."),
3463 &maint_btrace_show_cmdlist, "maintenance show btrace ",
3464 0, &maintenance_show_cmdlist);
3465
3466 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\
3467 Show Intel Processor Trace specific variables."),
3468 &maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3469 0, &maint_btrace_show_cmdlist);
3470
3471 add_setshow_boolean_cmd ("skip-pad", class_maintenance,
3472 &maint_btrace_pt_skip_pad, _("\
3473 Set whether PAD packets should be skipped in the btrace packet history."), _("\
3474 Show whether PAD packets should be skipped in the btrace packet history."),_("\
3475 When enabled, PAD packets are ignored in the btrace packet history."),
3476 NULL, show_maint_btrace_pt_skip_pad,
3477 &maint_btrace_pt_set_cmdlist,
3478 &maint_btrace_pt_show_cmdlist);
3479
3480 add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
3481 _("Print the raw branch tracing data.\n\
3482 With no argument, print ten more packets after the previous ten-line print.\n\
3483 With '-' as argument print ten packets before a previous ten-line print.\n\
3484 One argument specifies the starting packet of a ten-line print.\n\
3485 Two arguments with comma between specify starting and ending packets to \
3486 print.\n\
3487 Preceded with '+'/'-' the second argument specifies the distance from the \
3488 first.\n"),
3489 &maint_btrace_cmdlist);
3490
3491 add_cmd ("clear-packet-history", class_maintenance,
3492 maint_btrace_clear_packet_history_cmd,
3493 _("Clears the branch tracing packet history.\n\
3494 Discards the raw branch tracing data but not the execution history data.\n\
3495 "),
3496 &maint_btrace_cmdlist);
3497
3498 add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
3499 _("Clears the branch tracing data.\n\
3500 Discards the raw branch tracing data and the execution history data.\n\
3501 The next 'record' command will fetch the branch tracing data anew.\n\
3502 "),
3503 &maint_btrace_cmdlist);
3504
3505 }
This page took 0.110902 seconds and 4 git commands to generate.