btrace: update tail call heuristic
[deliverable/binutils-gdb.git] / gdb / btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2016 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "btrace.h"
24 #include "gdbthread.h"
25 #include "inferior.h"
26 #include "target.h"
27 #include "record.h"
28 #include "symtab.h"
29 #include "disasm.h"
30 #include "source.h"
31 #include "filenames.h"
32 #include "xml-support.h"
33 #include "regcache.h"
34 #include "rsp-low.h"
35 #include "gdbcmd.h"
36 #include "cli/cli-utils.h"
37
38 #include <inttypes.h>
39 #include <ctype.h>
40 #include <algorithm>
41
42 /* Command lists for btrace maintenance commands. */
43 static struct cmd_list_element *maint_btrace_cmdlist;
44 static struct cmd_list_element *maint_btrace_set_cmdlist;
45 static struct cmd_list_element *maint_btrace_show_cmdlist;
46 static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
47 static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
48
49 /* Control whether to skip PAD packets when computing the packet history. */
50 static int maint_btrace_pt_skip_pad = 1;
51
52 static void btrace_add_pc (struct thread_info *tp);
53
54 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
55 when used in if statements. */
56
57 #define DEBUG(msg, args...) \
58 do \
59 { \
60 if (record_debug != 0) \
61 fprintf_unfiltered (gdb_stdlog, \
62 "[btrace] " msg "\n", ##args); \
63 } \
64 while (0)
65
66 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
67
68 /* Return the function name of a recorded function segment for printing.
69 This function never returns NULL. */
70
71 static const char *
72 ftrace_print_function_name (const struct btrace_function *bfun)
73 {
74 struct minimal_symbol *msym;
75 struct symbol *sym;
76
77 msym = bfun->msym;
78 sym = bfun->sym;
79
80 if (sym != NULL)
81 return SYMBOL_PRINT_NAME (sym);
82
83 if (msym != NULL)
84 return MSYMBOL_PRINT_NAME (msym);
85
86 return "<unknown>";
87 }
88
89 /* Return the file name of a recorded function segment for printing.
90 This function never returns NULL. */
91
92 static const char *
93 ftrace_print_filename (const struct btrace_function *bfun)
94 {
95 struct symbol *sym;
96 const char *filename;
97
98 sym = bfun->sym;
99
100 if (sym != NULL)
101 filename = symtab_to_filename_for_display (symbol_symtab (sym));
102 else
103 filename = "<unknown>";
104
105 return filename;
106 }
107
108 /* Return a string representation of the address of an instruction.
109 This function never returns NULL. */
110
111 static const char *
112 ftrace_print_insn_addr (const struct btrace_insn *insn)
113 {
114 if (insn == NULL)
115 return "<nil>";
116
117 return core_addr_to_string_nz (insn->pc);
118 }
119
120 /* Print an ftrace debug status message. */
121
122 static void
123 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
124 {
125 const char *fun, *file;
126 unsigned int ibegin, iend;
127 int level;
128
129 fun = ftrace_print_function_name (bfun);
130 file = ftrace_print_filename (bfun);
131 level = bfun->level;
132
133 ibegin = bfun->insn_offset;
134 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
135
136 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
137 prefix, fun, file, level, ibegin, iend);
138 }
139
140 /* Return non-zero if BFUN does not match MFUN and FUN,
141 return zero otherwise. */
142
143 static int
144 ftrace_function_switched (const struct btrace_function *bfun,
145 const struct minimal_symbol *mfun,
146 const struct symbol *fun)
147 {
148 struct minimal_symbol *msym;
149 struct symbol *sym;
150
151 msym = bfun->msym;
152 sym = bfun->sym;
153
154 /* If the minimal symbol changed, we certainly switched functions. */
155 if (mfun != NULL && msym != NULL
156 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
157 return 1;
158
159 /* If the symbol changed, we certainly switched functions. */
160 if (fun != NULL && sym != NULL)
161 {
162 const char *bfname, *fname;
163
164 /* Check the function name. */
165 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
166 return 1;
167
168 /* Check the location of those functions, as well. */
169 bfname = symtab_to_fullname (symbol_symtab (sym));
170 fname = symtab_to_fullname (symbol_symtab (fun));
171 if (filename_cmp (fname, bfname) != 0)
172 return 1;
173 }
174
175 /* If we lost symbol information, we switched functions. */
176 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
177 return 1;
178
179 /* If we gained symbol information, we switched functions. */
180 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
181 return 1;
182
183 return 0;
184 }
185
186 /* Allocate and initialize a new branch trace function segment.
187 PREV is the chronologically preceding function segment.
188 MFUN and FUN are the symbol information we have for this function. */
189
190 static struct btrace_function *
191 ftrace_new_function (struct btrace_function *prev,
192 struct minimal_symbol *mfun,
193 struct symbol *fun)
194 {
195 struct btrace_function *bfun;
196
197 bfun = XCNEW (struct btrace_function);
198
199 bfun->msym = mfun;
200 bfun->sym = fun;
201 bfun->flow.prev = prev;
202
203 if (prev == NULL)
204 {
205 /* Start counting at one. */
206 bfun->number = 1;
207 bfun->insn_offset = 1;
208 }
209 else
210 {
211 gdb_assert (prev->flow.next == NULL);
212 prev->flow.next = bfun;
213
214 bfun->number = prev->number + 1;
215 bfun->insn_offset = (prev->insn_offset
216 + VEC_length (btrace_insn_s, prev->insn));
217 bfun->level = prev->level;
218 }
219
220 return bfun;
221 }
222
223 /* Update the UP field of a function segment. */
224
225 static void
226 ftrace_update_caller (struct btrace_function *bfun,
227 struct btrace_function *caller,
228 enum btrace_function_flag flags)
229 {
230 if (bfun->up != NULL)
231 ftrace_debug (bfun, "updating caller");
232
233 bfun->up = caller;
234 bfun->flags = flags;
235
236 ftrace_debug (bfun, "set caller");
237 }
238
239 /* Fix up the caller for all segments of a function. */
240
241 static void
242 ftrace_fixup_caller (struct btrace_function *bfun,
243 struct btrace_function *caller,
244 enum btrace_function_flag flags)
245 {
246 struct btrace_function *prev, *next;
247
248 ftrace_update_caller (bfun, caller, flags);
249
250 /* Update all function segments belonging to the same function. */
251 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
252 ftrace_update_caller (prev, caller, flags);
253
254 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
255 ftrace_update_caller (next, caller, flags);
256 }
257
258 /* Add a new function segment for a call.
259 CALLER is the chronologically preceding function segment.
260 MFUN and FUN are the symbol information we have for this function. */
261
262 static struct btrace_function *
263 ftrace_new_call (struct btrace_function *caller,
264 struct minimal_symbol *mfun,
265 struct symbol *fun)
266 {
267 struct btrace_function *bfun;
268
269 bfun = ftrace_new_function (caller, mfun, fun);
270 bfun->up = caller;
271 bfun->level += 1;
272
273 ftrace_debug (bfun, "new call");
274
275 return bfun;
276 }
277
278 /* Add a new function segment for a tail call.
279 CALLER is the chronologically preceding function segment.
280 MFUN and FUN are the symbol information we have for this function. */
281
282 static struct btrace_function *
283 ftrace_new_tailcall (struct btrace_function *caller,
284 struct minimal_symbol *mfun,
285 struct symbol *fun)
286 {
287 struct btrace_function *bfun;
288
289 bfun = ftrace_new_function (caller, mfun, fun);
290 bfun->up = caller;
291 bfun->level += 1;
292 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
293
294 ftrace_debug (bfun, "new tail call");
295
296 return bfun;
297 }
298
299 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
300 symbol information. */
301
302 static struct btrace_function *
303 ftrace_find_caller (struct btrace_function *bfun,
304 struct minimal_symbol *mfun,
305 struct symbol *fun)
306 {
307 for (; bfun != NULL; bfun = bfun->up)
308 {
309 /* Skip functions with incompatible symbol information. */
310 if (ftrace_function_switched (bfun, mfun, fun))
311 continue;
312
313 /* This is the function segment we're looking for. */
314 break;
315 }
316
317 return bfun;
318 }
319
320 /* Find the innermost caller in the back trace of BFUN, skipping all
321 function segments that do not end with a call instruction (e.g.
322 tail calls ending with a jump). */
323
324 static struct btrace_function *
325 ftrace_find_call (struct btrace_function *bfun)
326 {
327 for (; bfun != NULL; bfun = bfun->up)
328 {
329 struct btrace_insn *last;
330
331 /* Skip gaps. */
332 if (bfun->errcode != 0)
333 continue;
334
335 last = VEC_last (btrace_insn_s, bfun->insn);
336
337 if (last->iclass == BTRACE_INSN_CALL)
338 break;
339 }
340
341 return bfun;
342 }
343
344 /* Add a continuation segment for a function into which we return.
345 PREV is the chronologically preceding function segment.
346 MFUN and FUN are the symbol information we have for this function. */
347
348 static struct btrace_function *
349 ftrace_new_return (struct btrace_function *prev,
350 struct minimal_symbol *mfun,
351 struct symbol *fun)
352 {
353 struct btrace_function *bfun, *caller;
354
355 bfun = ftrace_new_function (prev, mfun, fun);
356
357 /* It is important to start at PREV's caller. Otherwise, we might find
358 PREV itself, if PREV is a recursive function. */
359 caller = ftrace_find_caller (prev->up, mfun, fun);
360 if (caller != NULL)
361 {
362 /* The caller of PREV is the preceding btrace function segment in this
363 function instance. */
364 gdb_assert (caller->segment.next == NULL);
365
366 caller->segment.next = bfun;
367 bfun->segment.prev = caller;
368
369 /* Maintain the function level. */
370 bfun->level = caller->level;
371
372 /* Maintain the call stack. */
373 bfun->up = caller->up;
374 bfun->flags = caller->flags;
375
376 ftrace_debug (bfun, "new return");
377 }
378 else
379 {
380 /* We did not find a caller. This could mean that something went
381 wrong or that the call is simply not included in the trace. */
382
383 /* Let's search for some actual call. */
384 caller = ftrace_find_call (prev->up);
385 if (caller == NULL)
386 {
387 /* There is no call in PREV's back trace. We assume that the
388 branch trace did not include it. */
389
390 /* Let's find the topmost call function - this skips tail calls. */
391 while (prev->up != NULL)
392 prev = prev->up;
393
394 /* We maintain levels for a series of returns for which we have
395 not seen the calls.
396 We start at the preceding function's level in case this has
397 already been a return for which we have not seen the call.
398 We start at level 0 otherwise, to handle tail calls correctly. */
399 bfun->level = std::min (0, prev->level) - 1;
400
401 /* Fix up the call stack for PREV. */
402 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
403
404 ftrace_debug (bfun, "new return - no caller");
405 }
406 else
407 {
408 /* There is a call in PREV's back trace to which we should have
409 returned. Let's remain at this level. */
410 bfun->level = prev->level;
411
412 ftrace_debug (bfun, "new return - unknown caller");
413 }
414 }
415
416 return bfun;
417 }
418
419 /* Add a new function segment for a function switch.
420 PREV is the chronologically preceding function segment.
421 MFUN and FUN are the symbol information we have for this function. */
422
423 static struct btrace_function *
424 ftrace_new_switch (struct btrace_function *prev,
425 struct minimal_symbol *mfun,
426 struct symbol *fun)
427 {
428 struct btrace_function *bfun;
429
430 /* This is an unexplained function switch. The call stack will likely
431 be wrong at this point. */
432 bfun = ftrace_new_function (prev, mfun, fun);
433
434 ftrace_debug (bfun, "new switch");
435
436 return bfun;
437 }
438
439 /* Add a new function segment for a gap in the trace due to a decode error.
440 PREV is the chronologically preceding function segment.
441 ERRCODE is the format-specific error code. */
442
443 static struct btrace_function *
444 ftrace_new_gap (struct btrace_function *prev, int errcode)
445 {
446 struct btrace_function *bfun;
447
448 /* We hijack prev if it was empty. */
449 if (prev != NULL && prev->errcode == 0
450 && VEC_empty (btrace_insn_s, prev->insn))
451 bfun = prev;
452 else
453 bfun = ftrace_new_function (prev, NULL, NULL);
454
455 bfun->errcode = errcode;
456
457 ftrace_debug (bfun, "new gap");
458
459 return bfun;
460 }
461
462 /* Update BFUN with respect to the instruction at PC. This may create new
463 function segments.
464 Return the chronologically latest function segment, never NULL. */
465
466 static struct btrace_function *
467 ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
468 {
469 struct bound_minimal_symbol bmfun;
470 struct minimal_symbol *mfun;
471 struct symbol *fun;
472 struct btrace_insn *last;
473
474 /* Try to determine the function we're in. We use both types of symbols
475 to avoid surprises when we sometimes get a full symbol and sometimes
476 only a minimal symbol. */
477 fun = find_pc_function (pc);
478 bmfun = lookup_minimal_symbol_by_pc (pc);
479 mfun = bmfun.minsym;
480
481 if (fun == NULL && mfun == NULL)
482 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
483
484 /* If we didn't have a function or if we had a gap before, we create one. */
485 if (bfun == NULL || bfun->errcode != 0)
486 return ftrace_new_function (bfun, mfun, fun);
487
488 /* Check the last instruction, if we have one.
489 We do this check first, since it allows us to fill in the call stack
490 links in addition to the normal flow links. */
491 last = NULL;
492 if (!VEC_empty (btrace_insn_s, bfun->insn))
493 last = VEC_last (btrace_insn_s, bfun->insn);
494
495 if (last != NULL)
496 {
497 switch (last->iclass)
498 {
499 case BTRACE_INSN_RETURN:
500 {
501 const char *fname;
502
503 /* On some systems, _dl_runtime_resolve returns to the resolved
504 function instead of jumping to it. From our perspective,
505 however, this is a tailcall.
506 If we treated it as return, we wouldn't be able to find the
507 resolved function in our stack back trace. Hence, we would
508 lose the current stack back trace and start anew with an empty
509 back trace. When the resolved function returns, we would then
510 create a stack back trace with the same function names but
511 different frame id's. This will confuse stepping. */
512 fname = ftrace_print_function_name (bfun);
513 if (strcmp (fname, "_dl_runtime_resolve") == 0)
514 return ftrace_new_tailcall (bfun, mfun, fun);
515
516 return ftrace_new_return (bfun, mfun, fun);
517 }
518
519 case BTRACE_INSN_CALL:
520 /* Ignore calls to the next instruction. They are used for PIC. */
521 if (last->pc + last->size == pc)
522 break;
523
524 return ftrace_new_call (bfun, mfun, fun);
525
526 case BTRACE_INSN_JUMP:
527 {
528 CORE_ADDR start;
529
530 start = get_pc_function_start (pc);
531
532 /* A jump to the start of a function is (typically) a tail call. */
533 if (start == pc)
534 return ftrace_new_tailcall (bfun, mfun, fun);
535
536 /* If we can't determine the function for PC, we treat a jump at
537 the end of the block as tail call if we're switching functions
538 and as an intra-function branch if we don't. */
539 if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
540 return ftrace_new_tailcall (bfun, mfun, fun);
541
542 break;
543 }
544 }
545 }
546
547 /* Check if we're switching functions for some other reason. */
548 if (ftrace_function_switched (bfun, mfun, fun))
549 {
550 DEBUG_FTRACE ("switching from %s in %s at %s",
551 ftrace_print_insn_addr (last),
552 ftrace_print_function_name (bfun),
553 ftrace_print_filename (bfun));
554
555 return ftrace_new_switch (bfun, mfun, fun);
556 }
557
558 return bfun;
559 }
560
561 /* Add the instruction at PC to BFUN's instructions. */
562
563 static void
564 ftrace_update_insns (struct btrace_function *bfun,
565 const struct btrace_insn *insn)
566 {
567 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
568
569 if (record_debug > 1)
570 ftrace_debug (bfun, "update insn");
571 }
572
573 /* Classify the instruction at PC. */
574
575 static enum btrace_insn_class
576 ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
577 {
578 enum btrace_insn_class iclass;
579
580 iclass = BTRACE_INSN_OTHER;
581 TRY
582 {
583 if (gdbarch_insn_is_call (gdbarch, pc))
584 iclass = BTRACE_INSN_CALL;
585 else if (gdbarch_insn_is_ret (gdbarch, pc))
586 iclass = BTRACE_INSN_RETURN;
587 else if (gdbarch_insn_is_jump (gdbarch, pc))
588 iclass = BTRACE_INSN_JUMP;
589 }
590 CATCH (error, RETURN_MASK_ERROR)
591 {
592 }
593 END_CATCH
594
595 return iclass;
596 }
597
598 /* Compute the function branch trace from BTS trace. */
599
600 static void
601 btrace_compute_ftrace_bts (struct thread_info *tp,
602 const struct btrace_data_bts *btrace)
603 {
604 struct btrace_thread_info *btinfo;
605 struct btrace_function *begin, *end;
606 struct gdbarch *gdbarch;
607 unsigned int blk, ngaps;
608 int level;
609
610 gdbarch = target_gdbarch ();
611 btinfo = &tp->btrace;
612 begin = btinfo->begin;
613 end = btinfo->end;
614 ngaps = btinfo->ngaps;
615 level = begin != NULL ? -btinfo->level : INT_MAX;
616 blk = VEC_length (btrace_block_s, btrace->blocks);
617
618 while (blk != 0)
619 {
620 btrace_block_s *block;
621 CORE_ADDR pc;
622
623 blk -= 1;
624
625 block = VEC_index (btrace_block_s, btrace->blocks, blk);
626 pc = block->begin;
627
628 for (;;)
629 {
630 struct btrace_insn insn;
631 int size;
632
633 /* We should hit the end of the block. Warn if we went too far. */
634 if (block->end < pc)
635 {
636 /* Indicate the gap in the trace. */
637 end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
638 if (begin == NULL)
639 begin = end;
640
641 ngaps += 1;
642
643 warning (_("Recorded trace may be corrupted at instruction "
644 "%u (pc = %s)."), end->insn_offset - 1,
645 core_addr_to_string_nz (pc));
646
647 break;
648 }
649
650 end = ftrace_update_function (end, pc);
651 if (begin == NULL)
652 begin = end;
653
654 /* Maintain the function level offset.
655 For all but the last block, we do it here. */
656 if (blk != 0)
657 level = std::min (level, end->level);
658
659 size = 0;
660 TRY
661 {
662 size = gdb_insn_length (gdbarch, pc);
663 }
664 CATCH (error, RETURN_MASK_ERROR)
665 {
666 }
667 END_CATCH
668
669 insn.pc = pc;
670 insn.size = size;
671 insn.iclass = ftrace_classify_insn (gdbarch, pc);
672 insn.flags = 0;
673
674 ftrace_update_insns (end, &insn);
675
676 /* We're done once we pushed the instruction at the end. */
677 if (block->end == pc)
678 break;
679
680 /* We can't continue if we fail to compute the size. */
681 if (size <= 0)
682 {
683 /* Indicate the gap in the trace. We just added INSN so we're
684 not at the beginning. */
685 end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
686 ngaps += 1;
687
688 warning (_("Recorded trace may be incomplete at instruction %u "
689 "(pc = %s)."), end->insn_offset - 1,
690 core_addr_to_string_nz (pc));
691
692 break;
693 }
694
695 pc += size;
696
697 /* Maintain the function level offset.
698 For the last block, we do it here to not consider the last
699 instruction.
700 Since the last instruction corresponds to the current instruction
701 and is not really part of the execution history, it shouldn't
702 affect the level. */
703 if (blk == 0)
704 level = std::min (level, end->level);
705 }
706 }
707
708 btinfo->begin = begin;
709 btinfo->end = end;
710 btinfo->ngaps = ngaps;
711
712 /* LEVEL is the minimal function level of all btrace function segments.
713 Define the global level offset to -LEVEL so all function levels are
714 normalized to start at zero. */
715 btinfo->level = -level;
716 }
717
718 #if defined (HAVE_LIBIPT)
719
720 static enum btrace_insn_class
721 pt_reclassify_insn (enum pt_insn_class iclass)
722 {
723 switch (iclass)
724 {
725 case ptic_call:
726 return BTRACE_INSN_CALL;
727
728 case ptic_return:
729 return BTRACE_INSN_RETURN;
730
731 case ptic_jump:
732 return BTRACE_INSN_JUMP;
733
734 default:
735 return BTRACE_INSN_OTHER;
736 }
737 }
738
739 /* Return the btrace instruction flags for INSN. */
740
741 static btrace_insn_flags
742 pt_btrace_insn_flags (const struct pt_insn *insn)
743 {
744 btrace_insn_flags flags = 0;
745
746 if (insn->speculative)
747 flags |= BTRACE_INSN_FLAG_SPECULATIVE;
748
749 return flags;
750 }
751
752 /* Add function branch trace using DECODER. */
753
754 static void
755 ftrace_add_pt (struct pt_insn_decoder *decoder,
756 struct btrace_function **pbegin,
757 struct btrace_function **pend, int *plevel,
758 unsigned int *ngaps)
759 {
760 struct btrace_function *begin, *end, *upd;
761 uint64_t offset;
762 int errcode;
763
764 begin = *pbegin;
765 end = *pend;
766 for (;;)
767 {
768 struct btrace_insn btinsn;
769 struct pt_insn insn;
770
771 errcode = pt_insn_sync_forward (decoder);
772 if (errcode < 0)
773 {
774 if (errcode != -pte_eos)
775 warning (_("Failed to synchronize onto the Intel Processor "
776 "Trace stream: %s."), pt_errstr (pt_errcode (errcode)));
777 break;
778 }
779
780 memset (&btinsn, 0, sizeof (btinsn));
781 for (;;)
782 {
783 errcode = pt_insn_next (decoder, &insn, sizeof(insn));
784 if (errcode < 0)
785 break;
786
787 /* Look for gaps in the trace - unless we're at the beginning. */
788 if (begin != NULL)
789 {
790 /* Tracing is disabled and re-enabled each time we enter the
791 kernel. Most times, we continue from the same instruction we
792 stopped before. This is indicated via the RESUMED instruction
793 flag. The ENABLED instruction flag means that we continued
794 from some other instruction. Indicate this as a trace gap. */
795 if (insn.enabled)
796 {
797 *pend = end = ftrace_new_gap (end, BDE_PT_DISABLED);
798 *ngaps += 1;
799
800 pt_insn_get_offset (decoder, &offset);
801
802 warning (_("Non-contiguous trace at instruction %u (offset "
803 "= 0x%" PRIx64 ", pc = 0x%" PRIx64 ")."),
804 end->insn_offset - 1, offset, insn.ip);
805 }
806 }
807
808 /* Indicate trace overflows. */
809 if (insn.resynced)
810 {
811 *pend = end = ftrace_new_gap (end, BDE_PT_OVERFLOW);
812 if (begin == NULL)
813 *pbegin = begin = end;
814
815 *ngaps += 1;
816
817 pt_insn_get_offset (decoder, &offset);
818
819 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
820 ", pc = 0x%" PRIx64 ")."), end->insn_offset - 1,
821 offset, insn.ip);
822 }
823
824 upd = ftrace_update_function (end, insn.ip);
825 if (upd != end)
826 {
827 *pend = end = upd;
828
829 if (begin == NULL)
830 *pbegin = begin = upd;
831 }
832
833 /* Maintain the function level offset. */
834 *plevel = std::min (*plevel, end->level);
835
836 btinsn.pc = (CORE_ADDR) insn.ip;
837 btinsn.size = (gdb_byte) insn.size;
838 btinsn.iclass = pt_reclassify_insn (insn.iclass);
839 btinsn.flags = pt_btrace_insn_flags (&insn);
840
841 ftrace_update_insns (end, &btinsn);
842 }
843
844 if (errcode == -pte_eos)
845 break;
846
847 /* Indicate the gap in the trace. */
848 *pend = end = ftrace_new_gap (end, errcode);
849 if (begin == NULL)
850 *pbegin = begin = end;
851 *ngaps += 1;
852
853 pt_insn_get_offset (decoder, &offset);
854
855 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
856 ", pc = 0x%" PRIx64 "): %s."), errcode, end->insn_offset - 1,
857 offset, insn.ip, pt_errstr (pt_errcode (errcode)));
858 }
859 }
860
861 /* A callback function to allow the trace decoder to read the inferior's
862 memory. */
863
864 static int
865 btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
866 const struct pt_asid *asid, uint64_t pc,
867 void *context)
868 {
869 int result, errcode;
870
871 result = (int) size;
872 TRY
873 {
874 errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
875 if (errcode != 0)
876 result = -pte_nomap;
877 }
878 CATCH (error, RETURN_MASK_ERROR)
879 {
880 result = -pte_nomap;
881 }
882 END_CATCH
883
884 return result;
885 }
886
887 /* Translate the vendor from one enum to another. */
888
889 static enum pt_cpu_vendor
890 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
891 {
892 switch (vendor)
893 {
894 default:
895 return pcv_unknown;
896
897 case CV_INTEL:
898 return pcv_intel;
899 }
900 }
901
902 /* Finalize the function branch trace after decode. */
903
904 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
905 struct thread_info *tp, int level)
906 {
907 pt_insn_free_decoder (decoder);
908
909 /* LEVEL is the minimal function level of all btrace function segments.
910 Define the global level offset to -LEVEL so all function levels are
911 normalized to start at zero. */
912 tp->btrace.level = -level;
913
914 /* Add a single last instruction entry for the current PC.
915 This allows us to compute the backtrace at the current PC using both
916 standard unwind and btrace unwind.
917 This extra entry is ignored by all record commands. */
918 btrace_add_pc (tp);
919 }
920
921 /* Compute the function branch trace from Intel Processor Trace
922 format. */
923
924 static void
925 btrace_compute_ftrace_pt (struct thread_info *tp,
926 const struct btrace_data_pt *btrace)
927 {
928 struct btrace_thread_info *btinfo;
929 struct pt_insn_decoder *decoder;
930 struct pt_config config;
931 int level, errcode;
932
933 if (btrace->size == 0)
934 return;
935
936 btinfo = &tp->btrace;
937 level = btinfo->begin != NULL ? -btinfo->level : INT_MAX;
938
939 pt_config_init(&config);
940 config.begin = btrace->data;
941 config.end = btrace->data + btrace->size;
942
943 config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
944 config.cpu.family = btrace->config.cpu.family;
945 config.cpu.model = btrace->config.cpu.model;
946 config.cpu.stepping = btrace->config.cpu.stepping;
947
948 errcode = pt_cpu_errata (&config.errata, &config.cpu);
949 if (errcode < 0)
950 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
951 pt_errstr (pt_errcode (errcode)));
952
953 decoder = pt_insn_alloc_decoder (&config);
954 if (decoder == NULL)
955 error (_("Failed to allocate the Intel Processor Trace decoder."));
956
957 TRY
958 {
959 struct pt_image *image;
960
961 image = pt_insn_get_image(decoder);
962 if (image == NULL)
963 error (_("Failed to configure the Intel Processor Trace decoder."));
964
965 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
966 if (errcode < 0)
967 error (_("Failed to configure the Intel Processor Trace decoder: "
968 "%s."), pt_errstr (pt_errcode (errcode)));
969
970 ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level,
971 &btinfo->ngaps);
972 }
973 CATCH (error, RETURN_MASK_ALL)
974 {
975 /* Indicate a gap in the trace if we quit trace processing. */
976 if (error.reason == RETURN_QUIT && btinfo->end != NULL)
977 {
978 btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_USER_QUIT);
979 btinfo->ngaps++;
980 }
981
982 btrace_finalize_ftrace_pt (decoder, tp, level);
983
984 throw_exception (error);
985 }
986 END_CATCH
987
988 btrace_finalize_ftrace_pt (decoder, tp, level);
989 }
990
991 #else /* defined (HAVE_LIBIPT) */
992
993 static void
994 btrace_compute_ftrace_pt (struct thread_info *tp,
995 const struct btrace_data_pt *btrace)
996 {
997 internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
998 }
999
1000 #endif /* defined (HAVE_LIBIPT) */
1001
1002 /* Compute the function branch trace from a block branch trace BTRACE for
1003 a thread given by BTINFO. */
1004
1005 static void
1006 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
1007 {
1008 DEBUG ("compute ftrace");
1009
1010 switch (btrace->format)
1011 {
1012 case BTRACE_FORMAT_NONE:
1013 return;
1014
1015 case BTRACE_FORMAT_BTS:
1016 btrace_compute_ftrace_bts (tp, &btrace->variant.bts);
1017 return;
1018
1019 case BTRACE_FORMAT_PT:
1020 btrace_compute_ftrace_pt (tp, &btrace->variant.pt);
1021 return;
1022 }
1023
1024 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1025 }
1026
1027 /* Add an entry for the current PC. */
1028
1029 static void
1030 btrace_add_pc (struct thread_info *tp)
1031 {
1032 struct btrace_data btrace;
1033 struct btrace_block *block;
1034 struct regcache *regcache;
1035 struct cleanup *cleanup;
1036 CORE_ADDR pc;
1037
1038 regcache = get_thread_regcache (tp->ptid);
1039 pc = regcache_read_pc (regcache);
1040
1041 btrace_data_init (&btrace);
1042 btrace.format = BTRACE_FORMAT_BTS;
1043 btrace.variant.bts.blocks = NULL;
1044
1045 cleanup = make_cleanup_btrace_data (&btrace);
1046
1047 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
1048 block->begin = pc;
1049 block->end = pc;
1050
1051 btrace_compute_ftrace (tp, &btrace);
1052
1053 do_cleanups (cleanup);
1054 }
1055
1056 /* See btrace.h. */
1057
1058 void
1059 btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
1060 {
1061 if (tp->btrace.target != NULL)
1062 return;
1063
1064 #if !defined (HAVE_LIBIPT)
1065 if (conf->format == BTRACE_FORMAT_PT)
1066 error (_("GDB does not support Intel Processor Trace."));
1067 #endif /* !defined (HAVE_LIBIPT) */
1068
1069 if (!target_supports_btrace (conf->format))
1070 error (_("Target does not support branch tracing."));
1071
1072 DEBUG ("enable thread %s (%s)", print_thread_id (tp),
1073 target_pid_to_str (tp->ptid));
1074
1075 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
1076
1077 /* Add an entry for the current PC so we start tracing from where we
1078 enabled it. */
1079 if (tp->btrace.target != NULL)
1080 btrace_add_pc (tp);
1081 }
1082
1083 /* See btrace.h. */
1084
1085 const struct btrace_config *
1086 btrace_conf (const struct btrace_thread_info *btinfo)
1087 {
1088 if (btinfo->target == NULL)
1089 return NULL;
1090
1091 return target_btrace_conf (btinfo->target);
1092 }
1093
1094 /* See btrace.h. */
1095
1096 void
1097 btrace_disable (struct thread_info *tp)
1098 {
1099 struct btrace_thread_info *btp = &tp->btrace;
1100 int errcode = 0;
1101
1102 if (btp->target == NULL)
1103 return;
1104
1105 DEBUG ("disable thread %s (%s)", print_thread_id (tp),
1106 target_pid_to_str (tp->ptid));
1107
1108 target_disable_btrace (btp->target);
1109 btp->target = NULL;
1110
1111 btrace_clear (tp);
1112 }
1113
1114 /* See btrace.h. */
1115
1116 void
1117 btrace_teardown (struct thread_info *tp)
1118 {
1119 struct btrace_thread_info *btp = &tp->btrace;
1120 int errcode = 0;
1121
1122 if (btp->target == NULL)
1123 return;
1124
1125 DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
1126 target_pid_to_str (tp->ptid));
1127
1128 target_teardown_btrace (btp->target);
1129 btp->target = NULL;
1130
1131 btrace_clear (tp);
1132 }
1133
1134 /* Stitch branch trace in BTS format. */
1135
1136 static int
1137 btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
1138 {
1139 struct btrace_thread_info *btinfo;
1140 struct btrace_function *last_bfun;
1141 struct btrace_insn *last_insn;
1142 btrace_block_s *first_new_block;
1143
1144 btinfo = &tp->btrace;
1145 last_bfun = btinfo->end;
1146 gdb_assert (last_bfun != NULL);
1147 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
1148
1149 /* If the existing trace ends with a gap, we just glue the traces
1150 together. We need to drop the last (i.e. chronologically first) block
1151 of the new trace, though, since we can't fill in the start address.*/
1152 if (VEC_empty (btrace_insn_s, last_bfun->insn))
1153 {
1154 VEC_pop (btrace_block_s, btrace->blocks);
1155 return 0;
1156 }
1157
1158 /* Beware that block trace starts with the most recent block, so the
1159 chronologically first block in the new trace is the last block in
1160 the new trace's block vector. */
1161 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
1162 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
1163
1164 /* If the current PC at the end of the block is the same as in our current
1165 trace, there are two explanations:
1166 1. we executed the instruction and some branch brought us back.
1167 2. we have not made any progress.
1168 In the first case, the delta trace vector should contain at least two
1169 entries.
1170 In the second case, the delta trace vector should contain exactly one
1171 entry for the partial block containing the current PC. Remove it. */
1172 if (first_new_block->end == last_insn->pc
1173 && VEC_length (btrace_block_s, btrace->blocks) == 1)
1174 {
1175 VEC_pop (btrace_block_s, btrace->blocks);
1176 return 0;
1177 }
1178
1179 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
1180 core_addr_to_string_nz (first_new_block->end));
1181
1182 /* Do a simple sanity check to make sure we don't accidentally end up
1183 with a bad block. This should not occur in practice. */
1184 if (first_new_block->end < last_insn->pc)
1185 {
1186 warning (_("Error while trying to read delta trace. Falling back to "
1187 "a full read."));
1188 return -1;
1189 }
1190
1191 /* We adjust the last block to start at the end of our current trace. */
1192 gdb_assert (first_new_block->begin == 0);
1193 first_new_block->begin = last_insn->pc;
1194
1195 /* We simply pop the last insn so we can insert it again as part of
1196 the normal branch trace computation.
1197 Since instruction iterators are based on indices in the instructions
1198 vector, we don't leave any pointers dangling. */
1199 DEBUG ("pruning insn at %s for stitching",
1200 ftrace_print_insn_addr (last_insn));
1201
1202 VEC_pop (btrace_insn_s, last_bfun->insn);
1203
1204 /* The instructions vector may become empty temporarily if this has
1205 been the only instruction in this function segment.
1206 This violates the invariant but will be remedied shortly by
1207 btrace_compute_ftrace when we add the new trace. */
1208
1209 /* The only case where this would hurt is if the entire trace consisted
1210 of just that one instruction. If we remove it, we might turn the now
1211 empty btrace function segment into a gap. But we don't want gaps at
1212 the beginning. To avoid this, we remove the entire old trace. */
1213 if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
1214 btrace_clear (tp);
1215
1216 return 0;
1217 }
1218
1219 /* Adjust the block trace in order to stitch old and new trace together.
1220 BTRACE is the new delta trace between the last and the current stop.
1221 TP is the traced thread.
1222 May modifx BTRACE as well as the existing trace in TP.
1223 Return 0 on success, -1 otherwise. */
1224
1225 static int
1226 btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
1227 {
1228 /* If we don't have trace, there's nothing to do. */
1229 if (btrace_data_empty (btrace))
1230 return 0;
1231
1232 switch (btrace->format)
1233 {
1234 case BTRACE_FORMAT_NONE:
1235 return 0;
1236
1237 case BTRACE_FORMAT_BTS:
1238 return btrace_stitch_bts (&btrace->variant.bts, tp);
1239
1240 case BTRACE_FORMAT_PT:
1241 /* Delta reads are not supported. */
1242 return -1;
1243 }
1244
1245 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1246 }
1247
1248 /* Clear the branch trace histories in BTINFO. */
1249
1250 static void
1251 btrace_clear_history (struct btrace_thread_info *btinfo)
1252 {
1253 xfree (btinfo->insn_history);
1254 xfree (btinfo->call_history);
1255 xfree (btinfo->replay);
1256
1257 btinfo->insn_history = NULL;
1258 btinfo->call_history = NULL;
1259 btinfo->replay = NULL;
1260 }
1261
1262 /* Clear the branch trace maintenance histories in BTINFO. */
1263
1264 static void
1265 btrace_maint_clear (struct btrace_thread_info *btinfo)
1266 {
1267 switch (btinfo->data.format)
1268 {
1269 default:
1270 break;
1271
1272 case BTRACE_FORMAT_BTS:
1273 btinfo->maint.variant.bts.packet_history.begin = 0;
1274 btinfo->maint.variant.bts.packet_history.end = 0;
1275 break;
1276
1277 #if defined (HAVE_LIBIPT)
1278 case BTRACE_FORMAT_PT:
1279 xfree (btinfo->maint.variant.pt.packets);
1280
1281 btinfo->maint.variant.pt.packets = NULL;
1282 btinfo->maint.variant.pt.packet_history.begin = 0;
1283 btinfo->maint.variant.pt.packet_history.end = 0;
1284 break;
1285 #endif /* defined (HAVE_LIBIPT) */
1286 }
1287 }
1288
1289 /* See btrace.h. */
1290
1291 void
1292 btrace_fetch (struct thread_info *tp)
1293 {
1294 struct btrace_thread_info *btinfo;
1295 struct btrace_target_info *tinfo;
1296 struct btrace_data btrace;
1297 struct cleanup *cleanup;
1298 int errcode;
1299
1300 DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
1301 target_pid_to_str (tp->ptid));
1302
1303 btinfo = &tp->btrace;
1304 tinfo = btinfo->target;
1305 if (tinfo == NULL)
1306 return;
1307
1308 /* There's no way we could get new trace while replaying.
1309 On the other hand, delta trace would return a partial record with the
1310 current PC, which is the replay PC, not the last PC, as expected. */
1311 if (btinfo->replay != NULL)
1312 return;
1313
1314 btrace_data_init (&btrace);
1315 cleanup = make_cleanup_btrace_data (&btrace);
1316
1317 /* Let's first try to extend the trace we already have. */
1318 if (btinfo->end != NULL)
1319 {
1320 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1321 if (errcode == 0)
1322 {
1323 /* Success. Let's try to stitch the traces together. */
1324 errcode = btrace_stitch_trace (&btrace, tp);
1325 }
1326 else
1327 {
1328 /* We failed to read delta trace. Let's try to read new trace. */
1329 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1330
1331 /* If we got any new trace, discard what we have. */
1332 if (errcode == 0 && !btrace_data_empty (&btrace))
1333 btrace_clear (tp);
1334 }
1335
1336 /* If we were not able to read the trace, we start over. */
1337 if (errcode != 0)
1338 {
1339 btrace_clear (tp);
1340 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1341 }
1342 }
1343 else
1344 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1345
1346 /* If we were not able to read the branch trace, signal an error. */
1347 if (errcode != 0)
1348 error (_("Failed to read branch trace."));
1349
1350 /* Compute the trace, provided we have any. */
1351 if (!btrace_data_empty (&btrace))
1352 {
1353 /* Store the raw trace data. The stored data will be cleared in
1354 btrace_clear, so we always append the new trace. */
1355 btrace_data_append (&btinfo->data, &btrace);
1356 btrace_maint_clear (btinfo);
1357
1358 btrace_clear_history (btinfo);
1359 btrace_compute_ftrace (tp, &btrace);
1360 }
1361
1362 do_cleanups (cleanup);
1363 }
1364
1365 /* See btrace.h. */
1366
1367 void
1368 btrace_clear (struct thread_info *tp)
1369 {
1370 struct btrace_thread_info *btinfo;
1371 struct btrace_function *it, *trash;
1372
1373 DEBUG ("clear thread %s (%s)", print_thread_id (tp),
1374 target_pid_to_str (tp->ptid));
1375
1376 /* Make sure btrace frames that may hold a pointer into the branch
1377 trace data are destroyed. */
1378 reinit_frame_cache ();
1379
1380 btinfo = &tp->btrace;
1381
1382 it = btinfo->begin;
1383 while (it != NULL)
1384 {
1385 trash = it;
1386 it = it->flow.next;
1387
1388 xfree (trash);
1389 }
1390
1391 btinfo->begin = NULL;
1392 btinfo->end = NULL;
1393 btinfo->ngaps = 0;
1394
1395 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1396 btrace_maint_clear (btinfo);
1397 btrace_data_clear (&btinfo->data);
1398 btrace_clear_history (btinfo);
1399 }
1400
1401 /* See btrace.h. */
1402
1403 void
1404 btrace_free_objfile (struct objfile *objfile)
1405 {
1406 struct thread_info *tp;
1407
1408 DEBUG ("free objfile");
1409
1410 ALL_NON_EXITED_THREADS (tp)
1411 btrace_clear (tp);
1412 }
1413
1414 #if defined (HAVE_LIBEXPAT)
1415
1416 /* Check the btrace document version. */
1417
1418 static void
1419 check_xml_btrace_version (struct gdb_xml_parser *parser,
1420 const struct gdb_xml_element *element,
1421 void *user_data, VEC (gdb_xml_value_s) *attributes)
1422 {
1423 const char *version
1424 = (const char *) xml_find_attribute (attributes, "version")->value;
1425
1426 if (strcmp (version, "1.0") != 0)
1427 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1428 }
1429
1430 /* Parse a btrace "block" xml record. */
1431
1432 static void
1433 parse_xml_btrace_block (struct gdb_xml_parser *parser,
1434 const struct gdb_xml_element *element,
1435 void *user_data, VEC (gdb_xml_value_s) *attributes)
1436 {
1437 struct btrace_data *btrace;
1438 struct btrace_block *block;
1439 ULONGEST *begin, *end;
1440
1441 btrace = (struct btrace_data *) user_data;
1442
1443 switch (btrace->format)
1444 {
1445 case BTRACE_FORMAT_BTS:
1446 break;
1447
1448 case BTRACE_FORMAT_NONE:
1449 btrace->format = BTRACE_FORMAT_BTS;
1450 btrace->variant.bts.blocks = NULL;
1451 break;
1452
1453 default:
1454 gdb_xml_error (parser, _("Btrace format error."));
1455 }
1456
1457 begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value;
1458 end = (ULONGEST *) xml_find_attribute (attributes, "end")->value;
1459
1460 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
1461 block->begin = *begin;
1462 block->end = *end;
1463 }
1464
1465 /* Parse a "raw" xml record. */
1466
1467 static void
1468 parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
1469 gdb_byte **pdata, size_t *psize)
1470 {
1471 struct cleanup *cleanup;
1472 gdb_byte *data, *bin;
1473 size_t len, size;
1474
1475 len = strlen (body_text);
1476 if (len % 2 != 0)
1477 gdb_xml_error (parser, _("Bad raw data size."));
1478
1479 size = len / 2;
1480
1481 bin = data = (gdb_byte *) xmalloc (size);
1482 cleanup = make_cleanup (xfree, data);
1483
1484 /* We use hex encoding - see common/rsp-low.h. */
1485 while (len > 0)
1486 {
1487 char hi, lo;
1488
1489 hi = *body_text++;
1490 lo = *body_text++;
1491
1492 if (hi == 0 || lo == 0)
1493 gdb_xml_error (parser, _("Bad hex encoding."));
1494
1495 *bin++ = fromhex (hi) * 16 + fromhex (lo);
1496 len -= 2;
1497 }
1498
1499 discard_cleanups (cleanup);
1500
1501 *pdata = data;
1502 *psize = size;
1503 }
1504
1505 /* Parse a btrace pt-config "cpu" xml record. */
1506
1507 static void
1508 parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
1509 const struct gdb_xml_element *element,
1510 void *user_data,
1511 VEC (gdb_xml_value_s) *attributes)
1512 {
1513 struct btrace_data *btrace;
1514 const char *vendor;
1515 ULONGEST *family, *model, *stepping;
1516
1517 vendor = (const char *) xml_find_attribute (attributes, "vendor")->value;
1518 family = (ULONGEST *) xml_find_attribute (attributes, "family")->value;
1519 model = (ULONGEST *) xml_find_attribute (attributes, "model")->value;
1520 stepping = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value;
1521
1522 btrace = (struct btrace_data *) user_data;
1523
1524 if (strcmp (vendor, "GenuineIntel") == 0)
1525 btrace->variant.pt.config.cpu.vendor = CV_INTEL;
1526
1527 btrace->variant.pt.config.cpu.family = *family;
1528 btrace->variant.pt.config.cpu.model = *model;
1529 btrace->variant.pt.config.cpu.stepping = *stepping;
1530 }
1531
1532 /* Parse a btrace pt "raw" xml record. */
1533
1534 static void
1535 parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
1536 const struct gdb_xml_element *element,
1537 void *user_data, const char *body_text)
1538 {
1539 struct btrace_data *btrace;
1540
1541 btrace = (struct btrace_data *) user_data;
1542 parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
1543 &btrace->variant.pt.size);
1544 }
1545
1546 /* Parse a btrace "pt" xml record. */
1547
1548 static void
1549 parse_xml_btrace_pt (struct gdb_xml_parser *parser,
1550 const struct gdb_xml_element *element,
1551 void *user_data, VEC (gdb_xml_value_s) *attributes)
1552 {
1553 struct btrace_data *btrace;
1554
1555 btrace = (struct btrace_data *) user_data;
1556 btrace->format = BTRACE_FORMAT_PT;
1557 btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
1558 btrace->variant.pt.data = NULL;
1559 btrace->variant.pt.size = 0;
1560 }
1561
1562 static const struct gdb_xml_attribute block_attributes[] = {
1563 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1564 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1565 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1566 };
1567
1568 static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
1569 { "vendor", GDB_XML_AF_NONE, NULL, NULL },
1570 { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1571 { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1572 { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1573 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1574 };
1575
1576 static const struct gdb_xml_element btrace_pt_config_children[] = {
1577 { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
1578 parse_xml_btrace_pt_config_cpu, NULL },
1579 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1580 };
1581
1582 static const struct gdb_xml_element btrace_pt_children[] = {
1583 { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
1584 NULL },
1585 { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
1586 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1587 };
1588
1589 static const struct gdb_xml_attribute btrace_attributes[] = {
1590 { "version", GDB_XML_AF_NONE, NULL, NULL },
1591 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1592 };
1593
1594 static const struct gdb_xml_element btrace_children[] = {
1595 { "block", block_attributes, NULL,
1596 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
1597 { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
1598 NULL },
1599 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1600 };
1601
1602 static const struct gdb_xml_element btrace_elements[] = {
1603 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
1604 check_xml_btrace_version, NULL },
1605 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1606 };
1607
1608 #endif /* defined (HAVE_LIBEXPAT) */
1609
1610 /* See btrace.h. */
1611
1612 void
1613 parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
1614 {
1615 struct cleanup *cleanup;
1616 int errcode;
1617
1618 #if defined (HAVE_LIBEXPAT)
1619
1620 btrace->format = BTRACE_FORMAT_NONE;
1621
1622 cleanup = make_cleanup_btrace_data (btrace);
1623 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
1624 buffer, btrace);
1625 if (errcode != 0)
1626 error (_("Error parsing branch trace."));
1627
1628 /* Keep parse results. */
1629 discard_cleanups (cleanup);
1630
1631 #else /* !defined (HAVE_LIBEXPAT) */
1632
1633 error (_("Cannot process branch trace. XML parsing is not supported."));
1634
1635 #endif /* !defined (HAVE_LIBEXPAT) */
1636 }
1637
1638 #if defined (HAVE_LIBEXPAT)
1639
1640 /* Parse a btrace-conf "bts" xml record. */
1641
1642 static void
1643 parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
1644 const struct gdb_xml_element *element,
1645 void *user_data, VEC (gdb_xml_value_s) *attributes)
1646 {
1647 struct btrace_config *conf;
1648 struct gdb_xml_value *size;
1649
1650 conf = (struct btrace_config *) user_data;
1651 conf->format = BTRACE_FORMAT_BTS;
1652 conf->bts.size = 0;
1653
1654 size = xml_find_attribute (attributes, "size");
1655 if (size != NULL)
1656 conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
1657 }
1658
1659 /* Parse a btrace-conf "pt" xml record. */
1660
1661 static void
1662 parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
1663 const struct gdb_xml_element *element,
1664 void *user_data, VEC (gdb_xml_value_s) *attributes)
1665 {
1666 struct btrace_config *conf;
1667 struct gdb_xml_value *size;
1668
1669 conf = (struct btrace_config *) user_data;
1670 conf->format = BTRACE_FORMAT_PT;
1671 conf->pt.size = 0;
1672
1673 size = xml_find_attribute (attributes, "size");
1674 if (size != NULL)
1675 conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
1676 }
1677
1678 static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
1679 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1680 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1681 };
1682
1683 static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
1684 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1685 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1686 };
1687
1688 static const struct gdb_xml_element btrace_conf_children[] = {
1689 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
1690 parse_xml_btrace_conf_bts, NULL },
1691 { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
1692 parse_xml_btrace_conf_pt, NULL },
1693 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1694 };
1695
1696 static const struct gdb_xml_attribute btrace_conf_attributes[] = {
1697 { "version", GDB_XML_AF_NONE, NULL, NULL },
1698 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1699 };
1700
1701 static const struct gdb_xml_element btrace_conf_elements[] = {
1702 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
1703 GDB_XML_EF_NONE, NULL, NULL },
1704 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1705 };
1706
1707 #endif /* defined (HAVE_LIBEXPAT) */
1708
1709 /* See btrace.h. */
1710
1711 void
1712 parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
1713 {
1714 int errcode;
1715
1716 #if defined (HAVE_LIBEXPAT)
1717
1718 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
1719 btrace_conf_elements, xml, conf);
1720 if (errcode != 0)
1721 error (_("Error parsing branch trace configuration."));
1722
1723 #else /* !defined (HAVE_LIBEXPAT) */
1724
1725 error (_("XML parsing is not supported."));
1726
1727 #endif /* !defined (HAVE_LIBEXPAT) */
1728 }
1729
1730 /* See btrace.h. */
1731
1732 const struct btrace_insn *
1733 btrace_insn_get (const struct btrace_insn_iterator *it)
1734 {
1735 const struct btrace_function *bfun;
1736 unsigned int index, end;
1737
1738 index = it->index;
1739 bfun = it->function;
1740
1741 /* Check if the iterator points to a gap in the trace. */
1742 if (bfun->errcode != 0)
1743 return NULL;
1744
1745 /* The index is within the bounds of this function's instruction vector. */
1746 end = VEC_length (btrace_insn_s, bfun->insn);
1747 gdb_assert (0 < end);
1748 gdb_assert (index < end);
1749
1750 return VEC_index (btrace_insn_s, bfun->insn, index);
1751 }
1752
1753 /* See btrace.h. */
1754
1755 unsigned int
1756 btrace_insn_number (const struct btrace_insn_iterator *it)
1757 {
1758 const struct btrace_function *bfun;
1759
1760 bfun = it->function;
1761
1762 /* Return zero if the iterator points to a gap in the trace. */
1763 if (bfun->errcode != 0)
1764 return 0;
1765
1766 return bfun->insn_offset + it->index;
1767 }
1768
1769 /* See btrace.h. */
1770
1771 void
1772 btrace_insn_begin (struct btrace_insn_iterator *it,
1773 const struct btrace_thread_info *btinfo)
1774 {
1775 const struct btrace_function *bfun;
1776
1777 bfun = btinfo->begin;
1778 if (bfun == NULL)
1779 error (_("No trace."));
1780
1781 it->function = bfun;
1782 it->index = 0;
1783 }
1784
1785 /* See btrace.h. */
1786
1787 void
1788 btrace_insn_end (struct btrace_insn_iterator *it,
1789 const struct btrace_thread_info *btinfo)
1790 {
1791 const struct btrace_function *bfun;
1792 unsigned int length;
1793
1794 bfun = btinfo->end;
1795 if (bfun == NULL)
1796 error (_("No trace."));
1797
1798 length = VEC_length (btrace_insn_s, bfun->insn);
1799
1800 /* The last function may either be a gap or it contains the current
1801 instruction, which is one past the end of the execution trace; ignore
1802 it. */
1803 if (length > 0)
1804 length -= 1;
1805
1806 it->function = bfun;
1807 it->index = length;
1808 }
1809
1810 /* See btrace.h. */
1811
1812 unsigned int
1813 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
1814 {
1815 const struct btrace_function *bfun;
1816 unsigned int index, steps;
1817
1818 bfun = it->function;
1819 steps = 0;
1820 index = it->index;
1821
1822 while (stride != 0)
1823 {
1824 unsigned int end, space, adv;
1825
1826 end = VEC_length (btrace_insn_s, bfun->insn);
1827
1828 /* An empty function segment represents a gap in the trace. We count
1829 it as one instruction. */
1830 if (end == 0)
1831 {
1832 const struct btrace_function *next;
1833
1834 next = bfun->flow.next;
1835 if (next == NULL)
1836 break;
1837
1838 stride -= 1;
1839 steps += 1;
1840
1841 bfun = next;
1842 index = 0;
1843
1844 continue;
1845 }
1846
1847 gdb_assert (0 < end);
1848 gdb_assert (index < end);
1849
1850 /* Compute the number of instructions remaining in this segment. */
1851 space = end - index;
1852
1853 /* Advance the iterator as far as possible within this segment. */
1854 adv = std::min (space, stride);
1855 stride -= adv;
1856 index += adv;
1857 steps += adv;
1858
1859 /* Move to the next function if we're at the end of this one. */
1860 if (index == end)
1861 {
1862 const struct btrace_function *next;
1863
1864 next = bfun->flow.next;
1865 if (next == NULL)
1866 {
1867 /* We stepped past the last function.
1868
1869 Let's adjust the index to point to the last instruction in
1870 the previous function. */
1871 index -= 1;
1872 steps -= 1;
1873 break;
1874 }
1875
1876 /* We now point to the first instruction in the new function. */
1877 bfun = next;
1878 index = 0;
1879 }
1880
1881 /* We did make progress. */
1882 gdb_assert (adv > 0);
1883 }
1884
1885 /* Update the iterator. */
1886 it->function = bfun;
1887 it->index = index;
1888
1889 return steps;
1890 }
1891
1892 /* See btrace.h. */
1893
1894 unsigned int
1895 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1896 {
1897 const struct btrace_function *bfun;
1898 unsigned int index, steps;
1899
1900 bfun = it->function;
1901 steps = 0;
1902 index = it->index;
1903
1904 while (stride != 0)
1905 {
1906 unsigned int adv;
1907
1908 /* Move to the previous function if we're at the start of this one. */
1909 if (index == 0)
1910 {
1911 const struct btrace_function *prev;
1912
1913 prev = bfun->flow.prev;
1914 if (prev == NULL)
1915 break;
1916
1917 /* We point to one after the last instruction in the new function. */
1918 bfun = prev;
1919 index = VEC_length (btrace_insn_s, bfun->insn);
1920
1921 /* An empty function segment represents a gap in the trace. We count
1922 it as one instruction. */
1923 if (index == 0)
1924 {
1925 stride -= 1;
1926 steps += 1;
1927
1928 continue;
1929 }
1930 }
1931
1932 /* Advance the iterator as far as possible within this segment. */
1933 adv = std::min (index, stride);
1934
1935 stride -= adv;
1936 index -= adv;
1937 steps += adv;
1938
1939 /* We did make progress. */
1940 gdb_assert (adv > 0);
1941 }
1942
1943 /* Update the iterator. */
1944 it->function = bfun;
1945 it->index = index;
1946
1947 return steps;
1948 }
1949
1950 /* See btrace.h. */
1951
1952 int
1953 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
1954 const struct btrace_insn_iterator *rhs)
1955 {
1956 unsigned int lnum, rnum;
1957
1958 lnum = btrace_insn_number (lhs);
1959 rnum = btrace_insn_number (rhs);
1960
1961 /* A gap has an instruction number of zero. Things are getting more
1962 complicated if gaps are involved.
1963
1964 We take the instruction number offset from the iterator's function.
1965 This is the number of the first instruction after the gap.
1966
1967 This is OK as long as both lhs and rhs point to gaps. If only one of
1968 them does, we need to adjust the number based on the other's regular
1969 instruction number. Otherwise, a gap might compare equal to an
1970 instruction. */
1971
1972 if (lnum == 0 && rnum == 0)
1973 {
1974 lnum = lhs->function->insn_offset;
1975 rnum = rhs->function->insn_offset;
1976 }
1977 else if (lnum == 0)
1978 {
1979 lnum = lhs->function->insn_offset;
1980
1981 if (lnum == rnum)
1982 lnum -= 1;
1983 }
1984 else if (rnum == 0)
1985 {
1986 rnum = rhs->function->insn_offset;
1987
1988 if (rnum == lnum)
1989 rnum -= 1;
1990 }
1991
1992 return (int) (lnum - rnum);
1993 }
1994
1995 /* See btrace.h. */
1996
1997 int
1998 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
1999 const struct btrace_thread_info *btinfo,
2000 unsigned int number)
2001 {
2002 const struct btrace_function *bfun;
2003 unsigned int end, length;
2004
2005 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
2006 {
2007 /* Skip gaps. */
2008 if (bfun->errcode != 0)
2009 continue;
2010
2011 if (bfun->insn_offset <= number)
2012 break;
2013 }
2014
2015 if (bfun == NULL)
2016 return 0;
2017
2018 length = VEC_length (btrace_insn_s, bfun->insn);
2019 gdb_assert (length > 0);
2020
2021 end = bfun->insn_offset + length;
2022 if (end <= number)
2023 return 0;
2024
2025 it->function = bfun;
2026 it->index = number - bfun->insn_offset;
2027
2028 return 1;
2029 }
2030
2031 /* See btrace.h. */
2032
2033 const struct btrace_function *
2034 btrace_call_get (const struct btrace_call_iterator *it)
2035 {
2036 return it->function;
2037 }
2038
2039 /* See btrace.h. */
2040
2041 unsigned int
2042 btrace_call_number (const struct btrace_call_iterator *it)
2043 {
2044 const struct btrace_thread_info *btinfo;
2045 const struct btrace_function *bfun;
2046 unsigned int insns;
2047
2048 btinfo = it->btinfo;
2049 bfun = it->function;
2050 if (bfun != NULL)
2051 return bfun->number;
2052
2053 /* For the end iterator, i.e. bfun == NULL, we return one more than the
2054 number of the last function. */
2055 bfun = btinfo->end;
2056 insns = VEC_length (btrace_insn_s, bfun->insn);
2057
2058 /* If the function contains only a single instruction (i.e. the current
2059 instruction), it will be skipped and its number is already the number
2060 we seek. */
2061 if (insns == 1)
2062 return bfun->number;
2063
2064 /* Otherwise, return one more than the number of the last function. */
2065 return bfun->number + 1;
2066 }
2067
2068 /* See btrace.h. */
2069
2070 void
2071 btrace_call_begin (struct btrace_call_iterator *it,
2072 const struct btrace_thread_info *btinfo)
2073 {
2074 const struct btrace_function *bfun;
2075
2076 bfun = btinfo->begin;
2077 if (bfun == NULL)
2078 error (_("No trace."));
2079
2080 it->btinfo = btinfo;
2081 it->function = bfun;
2082 }
2083
2084 /* See btrace.h. */
2085
2086 void
2087 btrace_call_end (struct btrace_call_iterator *it,
2088 const struct btrace_thread_info *btinfo)
2089 {
2090 const struct btrace_function *bfun;
2091
2092 bfun = btinfo->end;
2093 if (bfun == NULL)
2094 error (_("No trace."));
2095
2096 it->btinfo = btinfo;
2097 it->function = NULL;
2098 }
2099
2100 /* See btrace.h. */
2101
2102 unsigned int
2103 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2104 {
2105 const struct btrace_function *bfun;
2106 unsigned int steps;
2107
2108 bfun = it->function;
2109 steps = 0;
2110 while (bfun != NULL)
2111 {
2112 const struct btrace_function *next;
2113 unsigned int insns;
2114
2115 next = bfun->flow.next;
2116 if (next == NULL)
2117 {
2118 /* Ignore the last function if it only contains a single
2119 (i.e. the current) instruction. */
2120 insns = VEC_length (btrace_insn_s, bfun->insn);
2121 if (insns == 1)
2122 steps -= 1;
2123 }
2124
2125 if (stride == steps)
2126 break;
2127
2128 bfun = next;
2129 steps += 1;
2130 }
2131
2132 it->function = bfun;
2133 return steps;
2134 }
2135
2136 /* See btrace.h. */
2137
2138 unsigned int
2139 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2140 {
2141 const struct btrace_thread_info *btinfo;
2142 const struct btrace_function *bfun;
2143 unsigned int steps;
2144
2145 bfun = it->function;
2146 steps = 0;
2147
2148 if (bfun == NULL)
2149 {
2150 unsigned int insns;
2151
2152 btinfo = it->btinfo;
2153 bfun = btinfo->end;
2154 if (bfun == NULL)
2155 return 0;
2156
2157 /* Ignore the last function if it only contains a single
2158 (i.e. the current) instruction. */
2159 insns = VEC_length (btrace_insn_s, bfun->insn);
2160 if (insns == 1)
2161 bfun = bfun->flow.prev;
2162
2163 if (bfun == NULL)
2164 return 0;
2165
2166 steps += 1;
2167 }
2168
2169 while (steps < stride)
2170 {
2171 const struct btrace_function *prev;
2172
2173 prev = bfun->flow.prev;
2174 if (prev == NULL)
2175 break;
2176
2177 bfun = prev;
2178 steps += 1;
2179 }
2180
2181 it->function = bfun;
2182 return steps;
2183 }
2184
2185 /* See btrace.h. */
2186
2187 int
2188 btrace_call_cmp (const struct btrace_call_iterator *lhs,
2189 const struct btrace_call_iterator *rhs)
2190 {
2191 unsigned int lnum, rnum;
2192
2193 lnum = btrace_call_number (lhs);
2194 rnum = btrace_call_number (rhs);
2195
2196 return (int) (lnum - rnum);
2197 }
2198
2199 /* See btrace.h. */
2200
2201 int
2202 btrace_find_call_by_number (struct btrace_call_iterator *it,
2203 const struct btrace_thread_info *btinfo,
2204 unsigned int number)
2205 {
2206 const struct btrace_function *bfun;
2207
2208 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
2209 {
2210 unsigned int bnum;
2211
2212 bnum = bfun->number;
2213 if (number == bnum)
2214 {
2215 it->btinfo = btinfo;
2216 it->function = bfun;
2217 return 1;
2218 }
2219
2220 /* Functions are ordered and numbered consecutively. We could bail out
2221 earlier. On the other hand, it is very unlikely that we search for
2222 a nonexistent function. */
2223 }
2224
2225 return 0;
2226 }
2227
2228 /* See btrace.h. */
2229
2230 void
2231 btrace_set_insn_history (struct btrace_thread_info *btinfo,
2232 const struct btrace_insn_iterator *begin,
2233 const struct btrace_insn_iterator *end)
2234 {
2235 if (btinfo->insn_history == NULL)
2236 btinfo->insn_history = XCNEW (struct btrace_insn_history);
2237
2238 btinfo->insn_history->begin = *begin;
2239 btinfo->insn_history->end = *end;
2240 }
2241
2242 /* See btrace.h. */
2243
2244 void
2245 btrace_set_call_history (struct btrace_thread_info *btinfo,
2246 const struct btrace_call_iterator *begin,
2247 const struct btrace_call_iterator *end)
2248 {
2249 gdb_assert (begin->btinfo == end->btinfo);
2250
2251 if (btinfo->call_history == NULL)
2252 btinfo->call_history = XCNEW (struct btrace_call_history);
2253
2254 btinfo->call_history->begin = *begin;
2255 btinfo->call_history->end = *end;
2256 }
2257
2258 /* See btrace.h. */
2259
2260 int
2261 btrace_is_replaying (struct thread_info *tp)
2262 {
2263 return tp->btrace.replay != NULL;
2264 }
2265
2266 /* See btrace.h. */
2267
2268 int
2269 btrace_is_empty (struct thread_info *tp)
2270 {
2271 struct btrace_insn_iterator begin, end;
2272 struct btrace_thread_info *btinfo;
2273
2274 btinfo = &tp->btrace;
2275
2276 if (btinfo->begin == NULL)
2277 return 1;
2278
2279 btrace_insn_begin (&begin, btinfo);
2280 btrace_insn_end (&end, btinfo);
2281
2282 return btrace_insn_cmp (&begin, &end) == 0;
2283 }
2284
2285 /* Forward the cleanup request. */
2286
2287 static void
2288 do_btrace_data_cleanup (void *arg)
2289 {
2290 btrace_data_fini ((struct btrace_data *) arg);
2291 }
2292
2293 /* See btrace.h. */
2294
2295 struct cleanup *
2296 make_cleanup_btrace_data (struct btrace_data *data)
2297 {
2298 return make_cleanup (do_btrace_data_cleanup, data);
2299 }
2300
2301 #if defined (HAVE_LIBIPT)
2302
2303 /* Print a single packet. */
2304
2305 static void
2306 pt_print_packet (const struct pt_packet *packet)
2307 {
2308 switch (packet->type)
2309 {
2310 default:
2311 printf_unfiltered (("[??: %x]"), packet->type);
2312 break;
2313
2314 case ppt_psb:
2315 printf_unfiltered (("psb"));
2316 break;
2317
2318 case ppt_psbend:
2319 printf_unfiltered (("psbend"));
2320 break;
2321
2322 case ppt_pad:
2323 printf_unfiltered (("pad"));
2324 break;
2325
2326 case ppt_tip:
2327 printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
2328 packet->payload.ip.ipc,
2329 packet->payload.ip.ip);
2330 break;
2331
2332 case ppt_tip_pge:
2333 printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
2334 packet->payload.ip.ipc,
2335 packet->payload.ip.ip);
2336 break;
2337
2338 case ppt_tip_pgd:
2339 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
2340 packet->payload.ip.ipc,
2341 packet->payload.ip.ip);
2342 break;
2343
2344 case ppt_fup:
2345 printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
2346 packet->payload.ip.ipc,
2347 packet->payload.ip.ip);
2348 break;
2349
2350 case ppt_tnt_8:
2351 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
2352 packet->payload.tnt.bit_size,
2353 packet->payload.tnt.payload);
2354 break;
2355
2356 case ppt_tnt_64:
2357 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
2358 packet->payload.tnt.bit_size,
2359 packet->payload.tnt.payload);
2360 break;
2361
2362 case ppt_pip:
2363 printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2364 packet->payload.pip.nr ? (" nr") : (""));
2365 break;
2366
2367 case ppt_tsc:
2368 printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2369 break;
2370
2371 case ppt_cbr:
2372 printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
2373 break;
2374
2375 case ppt_mode:
2376 switch (packet->payload.mode.leaf)
2377 {
2378 default:
2379 printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
2380 break;
2381
2382 case pt_mol_exec:
2383 printf_unfiltered (("mode.exec%s%s"),
2384 packet->payload.mode.bits.exec.csl
2385 ? (" cs.l") : (""),
2386 packet->payload.mode.bits.exec.csd
2387 ? (" cs.d") : (""));
2388 break;
2389
2390 case pt_mol_tsx:
2391 printf_unfiltered (("mode.tsx%s%s"),
2392 packet->payload.mode.bits.tsx.intx
2393 ? (" intx") : (""),
2394 packet->payload.mode.bits.tsx.abrt
2395 ? (" abrt") : (""));
2396 break;
2397 }
2398 break;
2399
2400 case ppt_ovf:
2401 printf_unfiltered (("ovf"));
2402 break;
2403
2404 case ppt_stop:
2405 printf_unfiltered (("stop"));
2406 break;
2407
2408 case ppt_vmcs:
2409 printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2410 break;
2411
2412 case ppt_tma:
2413 printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
2414 packet->payload.tma.fc);
2415 break;
2416
2417 case ppt_mtc:
2418 printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
2419 break;
2420
2421 case ppt_cyc:
2422 printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
2423 break;
2424
2425 case ppt_mnt:
2426 printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
2427 break;
2428 }
2429 }
2430
2431 /* Decode packets into MAINT using DECODER. */
2432
2433 static void
2434 btrace_maint_decode_pt (struct btrace_maint_info *maint,
2435 struct pt_packet_decoder *decoder)
2436 {
2437 int errcode;
2438
2439 for (;;)
2440 {
2441 struct btrace_pt_packet packet;
2442
2443 errcode = pt_pkt_sync_forward (decoder);
2444 if (errcode < 0)
2445 break;
2446
2447 for (;;)
2448 {
2449 pt_pkt_get_offset (decoder, &packet.offset);
2450
2451 errcode = pt_pkt_next (decoder, &packet.packet,
2452 sizeof(packet.packet));
2453 if (errcode < 0)
2454 break;
2455
2456 if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
2457 {
2458 packet.errcode = pt_errcode (errcode);
2459 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2460 &packet);
2461 }
2462 }
2463
2464 if (errcode == -pte_eos)
2465 break;
2466
2467 packet.errcode = pt_errcode (errcode);
2468 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2469 &packet);
2470
2471 warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
2472 packet.offset, pt_errstr (packet.errcode));
2473 }
2474
2475 if (errcode != -pte_eos)
2476 warning (_("Failed to synchronize onto the Intel Processor Trace "
2477 "stream: %s."), pt_errstr (pt_errcode (errcode)));
2478 }
2479
2480 /* Update the packet history in BTINFO. */
2481
2482 static void
2483 btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
2484 {
2485 volatile struct gdb_exception except;
2486 struct pt_packet_decoder *decoder;
2487 struct btrace_data_pt *pt;
2488 struct pt_config config;
2489 int errcode;
2490
2491 pt = &btinfo->data.variant.pt;
2492
2493 /* Nothing to do if there is no trace. */
2494 if (pt->size == 0)
2495 return;
2496
2497 memset (&config, 0, sizeof(config));
2498
2499 config.size = sizeof (config);
2500 config.begin = pt->data;
2501 config.end = pt->data + pt->size;
2502
2503 config.cpu.vendor = pt_translate_cpu_vendor (pt->config.cpu.vendor);
2504 config.cpu.family = pt->config.cpu.family;
2505 config.cpu.model = pt->config.cpu.model;
2506 config.cpu.stepping = pt->config.cpu.stepping;
2507
2508 errcode = pt_cpu_errata (&config.errata, &config.cpu);
2509 if (errcode < 0)
2510 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
2511 pt_errstr (pt_errcode (errcode)));
2512
2513 decoder = pt_pkt_alloc_decoder (&config);
2514 if (decoder == NULL)
2515 error (_("Failed to allocate the Intel Processor Trace decoder."));
2516
2517 TRY
2518 {
2519 btrace_maint_decode_pt (&btinfo->maint, decoder);
2520 }
2521 CATCH (except, RETURN_MASK_ALL)
2522 {
2523 pt_pkt_free_decoder (decoder);
2524
2525 if (except.reason < 0)
2526 throw_exception (except);
2527 }
2528 END_CATCH
2529
2530 pt_pkt_free_decoder (decoder);
2531 }
2532
2533 #endif /* !defined (HAVE_LIBIPT) */
2534
2535 /* Update the packet maintenance information for BTINFO and store the
2536 low and high bounds into BEGIN and END, respectively.
2537 Store the current iterator state into FROM and TO. */
2538
2539 static void
2540 btrace_maint_update_packets (struct btrace_thread_info *btinfo,
2541 unsigned int *begin, unsigned int *end,
2542 unsigned int *from, unsigned int *to)
2543 {
2544 switch (btinfo->data.format)
2545 {
2546 default:
2547 *begin = 0;
2548 *end = 0;
2549 *from = 0;
2550 *to = 0;
2551 break;
2552
2553 case BTRACE_FORMAT_BTS:
2554 /* Nothing to do - we operate directly on BTINFO->DATA. */
2555 *begin = 0;
2556 *end = VEC_length (btrace_block_s, btinfo->data.variant.bts.blocks);
2557 *from = btinfo->maint.variant.bts.packet_history.begin;
2558 *to = btinfo->maint.variant.bts.packet_history.end;
2559 break;
2560
2561 #if defined (HAVE_LIBIPT)
2562 case BTRACE_FORMAT_PT:
2563 if (VEC_empty (btrace_pt_packet_s, btinfo->maint.variant.pt.packets))
2564 btrace_maint_update_pt_packets (btinfo);
2565
2566 *begin = 0;
2567 *end = VEC_length (btrace_pt_packet_s, btinfo->maint.variant.pt.packets);
2568 *from = btinfo->maint.variant.pt.packet_history.begin;
2569 *to = btinfo->maint.variant.pt.packet_history.end;
2570 break;
2571 #endif /* defined (HAVE_LIBIPT) */
2572 }
2573 }
2574
2575 /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
2576 update the current iterator position. */
2577
2578 static void
2579 btrace_maint_print_packets (struct btrace_thread_info *btinfo,
2580 unsigned int begin, unsigned int end)
2581 {
2582 switch (btinfo->data.format)
2583 {
2584 default:
2585 break;
2586
2587 case BTRACE_FORMAT_BTS:
2588 {
2589 VEC (btrace_block_s) *blocks;
2590 unsigned int blk;
2591
2592 blocks = btinfo->data.variant.bts.blocks;
2593 for (blk = begin; blk < end; ++blk)
2594 {
2595 const btrace_block_s *block;
2596
2597 block = VEC_index (btrace_block_s, blocks, blk);
2598
2599 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
2600 core_addr_to_string_nz (block->begin),
2601 core_addr_to_string_nz (block->end));
2602 }
2603
2604 btinfo->maint.variant.bts.packet_history.begin = begin;
2605 btinfo->maint.variant.bts.packet_history.end = end;
2606 }
2607 break;
2608
2609 #if defined (HAVE_LIBIPT)
2610 case BTRACE_FORMAT_PT:
2611 {
2612 VEC (btrace_pt_packet_s) *packets;
2613 unsigned int pkt;
2614
2615 packets = btinfo->maint.variant.pt.packets;
2616 for (pkt = begin; pkt < end; ++pkt)
2617 {
2618 const struct btrace_pt_packet *packet;
2619
2620 packet = VEC_index (btrace_pt_packet_s, packets, pkt);
2621
2622 printf_unfiltered ("%u\t", pkt);
2623 printf_unfiltered ("0x%" PRIx64 "\t", packet->offset);
2624
2625 if (packet->errcode == pte_ok)
2626 pt_print_packet (&packet->packet);
2627 else
2628 printf_unfiltered ("[error: %s]", pt_errstr (packet->errcode));
2629
2630 printf_unfiltered ("\n");
2631 }
2632
2633 btinfo->maint.variant.pt.packet_history.begin = begin;
2634 btinfo->maint.variant.pt.packet_history.end = end;
2635 }
2636 break;
2637 #endif /* defined (HAVE_LIBIPT) */
2638 }
2639 }
2640
2641 /* Read a number from an argument string. */
2642
2643 static unsigned int
2644 get_uint (char **arg)
2645 {
2646 char *begin, *end, *pos;
2647 unsigned long number;
2648
2649 begin = *arg;
2650 pos = skip_spaces (begin);
2651
2652 if (!isdigit (*pos))
2653 error (_("Expected positive number, got: %s."), pos);
2654
2655 number = strtoul (pos, &end, 10);
2656 if (number > UINT_MAX)
2657 error (_("Number too big."));
2658
2659 *arg += (end - begin);
2660
2661 return (unsigned int) number;
2662 }
2663
2664 /* Read a context size from an argument string. */
2665
2666 static int
2667 get_context_size (char **arg)
2668 {
2669 char *pos;
2670 int number;
2671
2672 pos = skip_spaces (*arg);
2673
2674 if (!isdigit (*pos))
2675 error (_("Expected positive number, got: %s."), pos);
2676
2677 return strtol (pos, arg, 10);
2678 }
2679
2680 /* Complain about junk at the end of an argument string. */
2681
2682 static void
2683 no_chunk (char *arg)
2684 {
2685 if (*arg != 0)
2686 error (_("Junk after argument: %s."), arg);
2687 }
2688
2689 /* The "maintenance btrace packet-history" command. */
2690
2691 static void
2692 maint_btrace_packet_history_cmd (char *arg, int from_tty)
2693 {
2694 struct btrace_thread_info *btinfo;
2695 struct thread_info *tp;
2696 unsigned int size, begin, end, from, to;
2697
2698 tp = find_thread_ptid (inferior_ptid);
2699 if (tp == NULL)
2700 error (_("No thread."));
2701
2702 size = 10;
2703 btinfo = &tp->btrace;
2704
2705 btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
2706 if (begin == end)
2707 {
2708 printf_unfiltered (_("No trace.\n"));
2709 return;
2710 }
2711
2712 if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
2713 {
2714 from = to;
2715
2716 if (end - from < size)
2717 size = end - from;
2718 to = from + size;
2719 }
2720 else if (strcmp (arg, "-") == 0)
2721 {
2722 to = from;
2723
2724 if (to - begin < size)
2725 size = to - begin;
2726 from = to - size;
2727 }
2728 else
2729 {
2730 from = get_uint (&arg);
2731 if (end <= from)
2732 error (_("'%u' is out of range."), from);
2733
2734 arg = skip_spaces (arg);
2735 if (*arg == ',')
2736 {
2737 arg = skip_spaces (++arg);
2738
2739 if (*arg == '+')
2740 {
2741 arg += 1;
2742 size = get_context_size (&arg);
2743
2744 no_chunk (arg);
2745
2746 if (end - from < size)
2747 size = end - from;
2748 to = from + size;
2749 }
2750 else if (*arg == '-')
2751 {
2752 arg += 1;
2753 size = get_context_size (&arg);
2754
2755 no_chunk (arg);
2756
2757 /* Include the packet given as first argument. */
2758 from += 1;
2759 to = from;
2760
2761 if (to - begin < size)
2762 size = to - begin;
2763 from = to - size;
2764 }
2765 else
2766 {
2767 to = get_uint (&arg);
2768
2769 /* Include the packet at the second argument and silently
2770 truncate the range. */
2771 if (to < end)
2772 to += 1;
2773 else
2774 to = end;
2775
2776 no_chunk (arg);
2777 }
2778 }
2779 else
2780 {
2781 no_chunk (arg);
2782
2783 if (end - from < size)
2784 size = end - from;
2785 to = from + size;
2786 }
2787
2788 dont_repeat ();
2789 }
2790
2791 btrace_maint_print_packets (btinfo, from, to);
2792 }
2793
2794 /* The "maintenance btrace clear-packet-history" command. */
2795
2796 static void
2797 maint_btrace_clear_packet_history_cmd (char *args, int from_tty)
2798 {
2799 struct btrace_thread_info *btinfo;
2800 struct thread_info *tp;
2801
2802 if (args != NULL && *args != 0)
2803 error (_("Invalid argument."));
2804
2805 tp = find_thread_ptid (inferior_ptid);
2806 if (tp == NULL)
2807 error (_("No thread."));
2808
2809 btinfo = &tp->btrace;
2810
2811 /* Must clear the maint data before - it depends on BTINFO->DATA. */
2812 btrace_maint_clear (btinfo);
2813 btrace_data_clear (&btinfo->data);
2814 }
2815
2816 /* The "maintenance btrace clear" command. */
2817
2818 static void
2819 maint_btrace_clear_cmd (char *args, int from_tty)
2820 {
2821 struct btrace_thread_info *btinfo;
2822 struct thread_info *tp;
2823
2824 if (args != NULL && *args != 0)
2825 error (_("Invalid argument."));
2826
2827 tp = find_thread_ptid (inferior_ptid);
2828 if (tp == NULL)
2829 error (_("No thread."));
2830
2831 btrace_clear (tp);
2832 }
2833
2834 /* The "maintenance btrace" command. */
2835
2836 static void
2837 maint_btrace_cmd (char *args, int from_tty)
2838 {
2839 help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands,
2840 gdb_stdout);
2841 }
2842
2843 /* The "maintenance set btrace" command. */
2844
2845 static void
2846 maint_btrace_set_cmd (char *args, int from_tty)
2847 {
2848 help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands,
2849 gdb_stdout);
2850 }
2851
2852 /* The "maintenance show btrace" command. */
2853
2854 static void
2855 maint_btrace_show_cmd (char *args, int from_tty)
2856 {
2857 help_list (maint_btrace_show_cmdlist, "maintenance show btrace ",
2858 all_commands, gdb_stdout);
2859 }
2860
2861 /* The "maintenance set btrace pt" command. */
2862
2863 static void
2864 maint_btrace_pt_set_cmd (char *args, int from_tty)
2865 {
2866 help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
2867 all_commands, gdb_stdout);
2868 }
2869
2870 /* The "maintenance show btrace pt" command. */
2871
2872 static void
2873 maint_btrace_pt_show_cmd (char *args, int from_tty)
2874 {
2875 help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
2876 all_commands, gdb_stdout);
2877 }
2878
2879 /* The "maintenance info btrace" command. */
2880
2881 static void
2882 maint_info_btrace_cmd (char *args, int from_tty)
2883 {
2884 struct btrace_thread_info *btinfo;
2885 struct thread_info *tp;
2886 const struct btrace_config *conf;
2887
2888 if (args != NULL && *args != 0)
2889 error (_("Invalid argument."));
2890
2891 tp = find_thread_ptid (inferior_ptid);
2892 if (tp == NULL)
2893 error (_("No thread."));
2894
2895 btinfo = &tp->btrace;
2896
2897 conf = btrace_conf (btinfo);
2898 if (conf == NULL)
2899 error (_("No btrace configuration."));
2900
2901 printf_unfiltered (_("Format: %s.\n"),
2902 btrace_format_string (conf->format));
2903
2904 switch (conf->format)
2905 {
2906 default:
2907 break;
2908
2909 case BTRACE_FORMAT_BTS:
2910 printf_unfiltered (_("Number of packets: %u.\n"),
2911 VEC_length (btrace_block_s,
2912 btinfo->data.variant.bts.blocks));
2913 break;
2914
2915 #if defined (HAVE_LIBIPT)
2916 case BTRACE_FORMAT_PT:
2917 {
2918 struct pt_version version;
2919
2920 version = pt_library_version ();
2921 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
2922 version.minor, version.build,
2923 version.ext != NULL ? version.ext : "");
2924
2925 btrace_maint_update_pt_packets (btinfo);
2926 printf_unfiltered (_("Number of packets: %u.\n"),
2927 VEC_length (btrace_pt_packet_s,
2928 btinfo->maint.variant.pt.packets));
2929 }
2930 break;
2931 #endif /* defined (HAVE_LIBIPT) */
2932 }
2933 }
2934
2935 /* The "maint show btrace pt skip-pad" show value function. */
2936
2937 static void
2938 show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
2939 struct cmd_list_element *c,
2940 const char *value)
2941 {
2942 fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
2943 }
2944
2945
2946 /* Initialize btrace maintenance commands. */
2947
2948 void _initialize_btrace (void);
2949 void
2950 _initialize_btrace (void)
2951 {
2952 add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
2953 _("Info about branch tracing data."), &maintenanceinfolist);
2954
2955 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_cmd,
2956 _("Branch tracing maintenance commands."),
2957 &maint_btrace_cmdlist, "maintenance btrace ",
2958 0, &maintenancelist);
2959
2960 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_set_cmd, _("\
2961 Set branch tracing specific variables."),
2962 &maint_btrace_set_cmdlist, "maintenance set btrace ",
2963 0, &maintenance_set_cmdlist);
2964
2965 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\
2966 Set Intel Processor Trace specific variables."),
2967 &maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
2968 0, &maint_btrace_set_cmdlist);
2969
2970 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_show_cmd, _("\
2971 Show branch tracing specific variables."),
2972 &maint_btrace_show_cmdlist, "maintenance show btrace ",
2973 0, &maintenance_show_cmdlist);
2974
2975 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\
2976 Show Intel Processor Trace specific variables."),
2977 &maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
2978 0, &maint_btrace_show_cmdlist);
2979
2980 add_setshow_boolean_cmd ("skip-pad", class_maintenance,
2981 &maint_btrace_pt_skip_pad, _("\
2982 Set whether PAD packets should be skipped in the btrace packet history."), _("\
2983 Show whether PAD packets should be skipped in the btrace packet history."),_("\
2984 When enabled, PAD packets are ignored in the btrace packet history."),
2985 NULL, show_maint_btrace_pt_skip_pad,
2986 &maint_btrace_pt_set_cmdlist,
2987 &maint_btrace_pt_show_cmdlist);
2988
2989 add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
2990 _("Print the raw branch tracing data.\n\
2991 With no argument, print ten more packets after the previous ten-line print.\n\
2992 With '-' as argument print ten packets before a previous ten-line print.\n\
2993 One argument specifies the starting packet of a ten-line print.\n\
2994 Two arguments with comma between specify starting and ending packets to \
2995 print.\n\
2996 Preceded with '+'/'-' the second argument specifies the distance from the \
2997 first.\n"),
2998 &maint_btrace_cmdlist);
2999
3000 add_cmd ("clear-packet-history", class_maintenance,
3001 maint_btrace_clear_packet_history_cmd,
3002 _("Clears the branch tracing packet history.\n\
3003 Discards the raw branch tracing data but not the execution history data.\n\
3004 "),
3005 &maint_btrace_cmdlist);
3006
3007 add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
3008 _("Clears the branch tracing data.\n\
3009 Discards the raw branch tracing data and the execution history data.\n\
3010 The next 'record' command will fetch the branch tracing data anew.\n\
3011 "),
3012 &maint_btrace_cmdlist);
3013
3014 }
This page took 0.102403 seconds and 4 git commands to generate.