btrace: preserve function level for unexpected returns
[deliverable/binutils-gdb.git] / gdb / btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2016 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "btrace.h"
24 #include "gdbthread.h"
25 #include "inferior.h"
26 #include "target.h"
27 #include "record.h"
28 #include "symtab.h"
29 #include "disasm.h"
30 #include "source.h"
31 #include "filenames.h"
32 #include "xml-support.h"
33 #include "regcache.h"
34 #include "rsp-low.h"
35 #include "gdbcmd.h"
36 #include "cli/cli-utils.h"
37
38 #include <inttypes.h>
39 #include <ctype.h>
40 #include <algorithm>
41
42 /* Command lists for btrace maintenance commands. */
43 static struct cmd_list_element *maint_btrace_cmdlist;
44 static struct cmd_list_element *maint_btrace_set_cmdlist;
45 static struct cmd_list_element *maint_btrace_show_cmdlist;
46 static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
47 static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
48
49 /* Control whether to skip PAD packets when computing the packet history. */
50 static int maint_btrace_pt_skip_pad = 1;
51
52 static void btrace_add_pc (struct thread_info *tp);
53
54 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
55 when used in if statements. */
56
57 #define DEBUG(msg, args...) \
58 do \
59 { \
60 if (record_debug != 0) \
61 fprintf_unfiltered (gdb_stdlog, \
62 "[btrace] " msg "\n", ##args); \
63 } \
64 while (0)
65
66 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
67
68 /* Return the function name of a recorded function segment for printing.
69 This function never returns NULL. */
70
71 static const char *
72 ftrace_print_function_name (const struct btrace_function *bfun)
73 {
74 struct minimal_symbol *msym;
75 struct symbol *sym;
76
77 msym = bfun->msym;
78 sym = bfun->sym;
79
80 if (sym != NULL)
81 return SYMBOL_PRINT_NAME (sym);
82
83 if (msym != NULL)
84 return MSYMBOL_PRINT_NAME (msym);
85
86 return "<unknown>";
87 }
88
89 /* Return the file name of a recorded function segment for printing.
90 This function never returns NULL. */
91
92 static const char *
93 ftrace_print_filename (const struct btrace_function *bfun)
94 {
95 struct symbol *sym;
96 const char *filename;
97
98 sym = bfun->sym;
99
100 if (sym != NULL)
101 filename = symtab_to_filename_for_display (symbol_symtab (sym));
102 else
103 filename = "<unknown>";
104
105 return filename;
106 }
107
108 /* Return a string representation of the address of an instruction.
109 This function never returns NULL. */
110
111 static const char *
112 ftrace_print_insn_addr (const struct btrace_insn *insn)
113 {
114 if (insn == NULL)
115 return "<nil>";
116
117 return core_addr_to_string_nz (insn->pc);
118 }
119
120 /* Print an ftrace debug status message. */
121
122 static void
123 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
124 {
125 const char *fun, *file;
126 unsigned int ibegin, iend;
127 int level;
128
129 fun = ftrace_print_function_name (bfun);
130 file = ftrace_print_filename (bfun);
131 level = bfun->level;
132
133 ibegin = bfun->insn_offset;
134 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
135
136 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
137 prefix, fun, file, level, ibegin, iend);
138 }
139
140 /* Return non-zero if BFUN does not match MFUN and FUN,
141 return zero otherwise. */
142
143 static int
144 ftrace_function_switched (const struct btrace_function *bfun,
145 const struct minimal_symbol *mfun,
146 const struct symbol *fun)
147 {
148 struct minimal_symbol *msym;
149 struct symbol *sym;
150
151 msym = bfun->msym;
152 sym = bfun->sym;
153
154 /* If the minimal symbol changed, we certainly switched functions. */
155 if (mfun != NULL && msym != NULL
156 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
157 return 1;
158
159 /* If the symbol changed, we certainly switched functions. */
160 if (fun != NULL && sym != NULL)
161 {
162 const char *bfname, *fname;
163
164 /* Check the function name. */
165 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
166 return 1;
167
168 /* Check the location of those functions, as well. */
169 bfname = symtab_to_fullname (symbol_symtab (sym));
170 fname = symtab_to_fullname (symbol_symtab (fun));
171 if (filename_cmp (fname, bfname) != 0)
172 return 1;
173 }
174
175 /* If we lost symbol information, we switched functions. */
176 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
177 return 1;
178
179 /* If we gained symbol information, we switched functions. */
180 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
181 return 1;
182
183 return 0;
184 }
185
186 /* Allocate and initialize a new branch trace function segment.
187 PREV is the chronologically preceding function segment.
188 MFUN and FUN are the symbol information we have for this function. */
189
190 static struct btrace_function *
191 ftrace_new_function (struct btrace_function *prev,
192 struct minimal_symbol *mfun,
193 struct symbol *fun)
194 {
195 struct btrace_function *bfun;
196
197 bfun = XCNEW (struct btrace_function);
198
199 bfun->msym = mfun;
200 bfun->sym = fun;
201 bfun->flow.prev = prev;
202
203 if (prev == NULL)
204 {
205 /* Start counting at one. */
206 bfun->number = 1;
207 bfun->insn_offset = 1;
208 }
209 else
210 {
211 gdb_assert (prev->flow.next == NULL);
212 prev->flow.next = bfun;
213
214 bfun->number = prev->number + 1;
215 bfun->insn_offset = (prev->insn_offset
216 + VEC_length (btrace_insn_s, prev->insn));
217 bfun->level = prev->level;
218 }
219
220 return bfun;
221 }
222
223 /* Update the UP field of a function segment. */
224
225 static void
226 ftrace_update_caller (struct btrace_function *bfun,
227 struct btrace_function *caller,
228 enum btrace_function_flag flags)
229 {
230 if (bfun->up != NULL)
231 ftrace_debug (bfun, "updating caller");
232
233 bfun->up = caller;
234 bfun->flags = flags;
235
236 ftrace_debug (bfun, "set caller");
237 }
238
239 /* Fix up the caller for all segments of a function. */
240
241 static void
242 ftrace_fixup_caller (struct btrace_function *bfun,
243 struct btrace_function *caller,
244 enum btrace_function_flag flags)
245 {
246 struct btrace_function *prev, *next;
247
248 ftrace_update_caller (bfun, caller, flags);
249
250 /* Update all function segments belonging to the same function. */
251 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
252 ftrace_update_caller (prev, caller, flags);
253
254 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
255 ftrace_update_caller (next, caller, flags);
256 }
257
258 /* Add a new function segment for a call.
259 CALLER is the chronologically preceding function segment.
260 MFUN and FUN are the symbol information we have for this function. */
261
262 static struct btrace_function *
263 ftrace_new_call (struct btrace_function *caller,
264 struct minimal_symbol *mfun,
265 struct symbol *fun)
266 {
267 struct btrace_function *bfun;
268
269 bfun = ftrace_new_function (caller, mfun, fun);
270 bfun->up = caller;
271 bfun->level += 1;
272
273 ftrace_debug (bfun, "new call");
274
275 return bfun;
276 }
277
278 /* Add a new function segment for a tail call.
279 CALLER is the chronologically preceding function segment.
280 MFUN and FUN are the symbol information we have for this function. */
281
282 static struct btrace_function *
283 ftrace_new_tailcall (struct btrace_function *caller,
284 struct minimal_symbol *mfun,
285 struct symbol *fun)
286 {
287 struct btrace_function *bfun;
288
289 bfun = ftrace_new_function (caller, mfun, fun);
290 bfun->up = caller;
291 bfun->level += 1;
292 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
293
294 ftrace_debug (bfun, "new tail call");
295
296 return bfun;
297 }
298
299 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
300 symbol information. */
301
302 static struct btrace_function *
303 ftrace_find_caller (struct btrace_function *bfun,
304 struct minimal_symbol *mfun,
305 struct symbol *fun)
306 {
307 for (; bfun != NULL; bfun = bfun->up)
308 {
309 /* Skip functions with incompatible symbol information. */
310 if (ftrace_function_switched (bfun, mfun, fun))
311 continue;
312
313 /* This is the function segment we're looking for. */
314 break;
315 }
316
317 return bfun;
318 }
319
320 /* Find the innermost caller in the back trace of BFUN, skipping all
321 function segments that do not end with a call instruction (e.g.
322 tail calls ending with a jump). */
323
324 static struct btrace_function *
325 ftrace_find_call (struct btrace_function *bfun)
326 {
327 for (; bfun != NULL; bfun = bfun->up)
328 {
329 struct btrace_insn *last;
330
331 /* Skip gaps. */
332 if (bfun->errcode != 0)
333 continue;
334
335 last = VEC_last (btrace_insn_s, bfun->insn);
336
337 if (last->iclass == BTRACE_INSN_CALL)
338 break;
339 }
340
341 return bfun;
342 }
343
344 /* Add a continuation segment for a function into which we return.
345 PREV is the chronologically preceding function segment.
346 MFUN and FUN are the symbol information we have for this function. */
347
348 static struct btrace_function *
349 ftrace_new_return (struct btrace_function *prev,
350 struct minimal_symbol *mfun,
351 struct symbol *fun)
352 {
353 struct btrace_function *bfun, *caller;
354
355 bfun = ftrace_new_function (prev, mfun, fun);
356
357 /* It is important to start at PREV's caller. Otherwise, we might find
358 PREV itself, if PREV is a recursive function. */
359 caller = ftrace_find_caller (prev->up, mfun, fun);
360 if (caller != NULL)
361 {
362 /* The caller of PREV is the preceding btrace function segment in this
363 function instance. */
364 gdb_assert (caller->segment.next == NULL);
365
366 caller->segment.next = bfun;
367 bfun->segment.prev = caller;
368
369 /* Maintain the function level. */
370 bfun->level = caller->level;
371
372 /* Maintain the call stack. */
373 bfun->up = caller->up;
374 bfun->flags = caller->flags;
375
376 ftrace_debug (bfun, "new return");
377 }
378 else
379 {
380 /* We did not find a caller. This could mean that something went
381 wrong or that the call is simply not included in the trace. */
382
383 /* Let's search for some actual call. */
384 caller = ftrace_find_call (prev->up);
385 if (caller == NULL)
386 {
387 /* There is no call in PREV's back trace. We assume that the
388 branch trace did not include it. */
389
390 /* Let's find the topmost function and add a new caller for it.
391 This should handle a series of initial tail calls. */
392 while (prev->up != NULL)
393 prev = prev->up;
394
395 bfun->level = prev->level - 1;
396
397 /* Fix up the call stack for PREV. */
398 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
399
400 ftrace_debug (bfun, "new return - no caller");
401 }
402 else
403 {
404 /* There is a call in PREV's back trace to which we should have
405 returned but didn't. Let's start a new, separate back trace
406 from PREV's level. */
407 bfun->level = prev->level - 1;
408
409 /* We fix up the back trace for PREV but leave other function segments
410 on the same level as they are.
411 This should handle things like schedule () correctly where we're
412 switching contexts. */
413 prev->up = bfun;
414 prev->flags = BFUN_UP_LINKS_TO_RET;
415
416 ftrace_debug (bfun, "new return - unknown caller");
417 }
418 }
419
420 return bfun;
421 }
422
423 /* Add a new function segment for a function switch.
424 PREV is the chronologically preceding function segment.
425 MFUN and FUN are the symbol information we have for this function. */
426
427 static struct btrace_function *
428 ftrace_new_switch (struct btrace_function *prev,
429 struct minimal_symbol *mfun,
430 struct symbol *fun)
431 {
432 struct btrace_function *bfun;
433
434 /* This is an unexplained function switch. The call stack will likely
435 be wrong at this point. */
436 bfun = ftrace_new_function (prev, mfun, fun);
437
438 ftrace_debug (bfun, "new switch");
439
440 return bfun;
441 }
442
443 /* Add a new function segment for a gap in the trace due to a decode error.
444 PREV is the chronologically preceding function segment.
445 ERRCODE is the format-specific error code. */
446
447 static struct btrace_function *
448 ftrace_new_gap (struct btrace_function *prev, int errcode)
449 {
450 struct btrace_function *bfun;
451
452 /* We hijack prev if it was empty. */
453 if (prev != NULL && prev->errcode == 0
454 && VEC_empty (btrace_insn_s, prev->insn))
455 bfun = prev;
456 else
457 bfun = ftrace_new_function (prev, NULL, NULL);
458
459 bfun->errcode = errcode;
460
461 ftrace_debug (bfun, "new gap");
462
463 return bfun;
464 }
465
466 /* Update BFUN with respect to the instruction at PC. This may create new
467 function segments.
468 Return the chronologically latest function segment, never NULL. */
469
470 static struct btrace_function *
471 ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
472 {
473 struct bound_minimal_symbol bmfun;
474 struct minimal_symbol *mfun;
475 struct symbol *fun;
476 struct btrace_insn *last;
477
478 /* Try to determine the function we're in. We use both types of symbols
479 to avoid surprises when we sometimes get a full symbol and sometimes
480 only a minimal symbol. */
481 fun = find_pc_function (pc);
482 bmfun = lookup_minimal_symbol_by_pc (pc);
483 mfun = bmfun.minsym;
484
485 if (fun == NULL && mfun == NULL)
486 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
487
488 /* If we didn't have a function or if we had a gap before, we create one. */
489 if (bfun == NULL || bfun->errcode != 0)
490 return ftrace_new_function (bfun, mfun, fun);
491
492 /* Check the last instruction, if we have one.
493 We do this check first, since it allows us to fill in the call stack
494 links in addition to the normal flow links. */
495 last = NULL;
496 if (!VEC_empty (btrace_insn_s, bfun->insn))
497 last = VEC_last (btrace_insn_s, bfun->insn);
498
499 if (last != NULL)
500 {
501 switch (last->iclass)
502 {
503 case BTRACE_INSN_RETURN:
504 {
505 const char *fname;
506
507 /* On some systems, _dl_runtime_resolve returns to the resolved
508 function instead of jumping to it. From our perspective,
509 however, this is a tailcall.
510 If we treated it as return, we wouldn't be able to find the
511 resolved function in our stack back trace. Hence, we would
512 lose the current stack back trace and start anew with an empty
513 back trace. When the resolved function returns, we would then
514 create a stack back trace with the same function names but
515 different frame id's. This will confuse stepping. */
516 fname = ftrace_print_function_name (bfun);
517 if (strcmp (fname, "_dl_runtime_resolve") == 0)
518 return ftrace_new_tailcall (bfun, mfun, fun);
519
520 return ftrace_new_return (bfun, mfun, fun);
521 }
522
523 case BTRACE_INSN_CALL:
524 /* Ignore calls to the next instruction. They are used for PIC. */
525 if (last->pc + last->size == pc)
526 break;
527
528 return ftrace_new_call (bfun, mfun, fun);
529
530 case BTRACE_INSN_JUMP:
531 {
532 CORE_ADDR start;
533
534 start = get_pc_function_start (pc);
535
536 /* A jump to the start of a function is (typically) a tail call. */
537 if (start == pc)
538 return ftrace_new_tailcall (bfun, mfun, fun);
539
540 /* If we can't determine the function for PC, we treat a jump at
541 the end of the block as tail call if we're switching functions
542 and as an intra-function branch if we don't. */
543 if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
544 return ftrace_new_tailcall (bfun, mfun, fun);
545
546 break;
547 }
548 }
549 }
550
551 /* Check if we're switching functions for some other reason. */
552 if (ftrace_function_switched (bfun, mfun, fun))
553 {
554 DEBUG_FTRACE ("switching from %s in %s at %s",
555 ftrace_print_insn_addr (last),
556 ftrace_print_function_name (bfun),
557 ftrace_print_filename (bfun));
558
559 return ftrace_new_switch (bfun, mfun, fun);
560 }
561
562 return bfun;
563 }
564
565 /* Add the instruction at PC to BFUN's instructions. */
566
567 static void
568 ftrace_update_insns (struct btrace_function *bfun,
569 const struct btrace_insn *insn)
570 {
571 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
572
573 if (record_debug > 1)
574 ftrace_debug (bfun, "update insn");
575 }
576
577 /* Classify the instruction at PC. */
578
579 static enum btrace_insn_class
580 ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
581 {
582 enum btrace_insn_class iclass;
583
584 iclass = BTRACE_INSN_OTHER;
585 TRY
586 {
587 if (gdbarch_insn_is_call (gdbarch, pc))
588 iclass = BTRACE_INSN_CALL;
589 else if (gdbarch_insn_is_ret (gdbarch, pc))
590 iclass = BTRACE_INSN_RETURN;
591 else if (gdbarch_insn_is_jump (gdbarch, pc))
592 iclass = BTRACE_INSN_JUMP;
593 }
594 CATCH (error, RETURN_MASK_ERROR)
595 {
596 }
597 END_CATCH
598
599 return iclass;
600 }
601
602 /* Compute the function branch trace from BTS trace. */
603
604 static void
605 btrace_compute_ftrace_bts (struct thread_info *tp,
606 const struct btrace_data_bts *btrace)
607 {
608 struct btrace_thread_info *btinfo;
609 struct btrace_function *begin, *end;
610 struct gdbarch *gdbarch;
611 unsigned int blk, ngaps;
612 int level;
613
614 gdbarch = target_gdbarch ();
615 btinfo = &tp->btrace;
616 begin = btinfo->begin;
617 end = btinfo->end;
618 ngaps = btinfo->ngaps;
619 level = begin != NULL ? -btinfo->level : INT_MAX;
620 blk = VEC_length (btrace_block_s, btrace->blocks);
621
622 while (blk != 0)
623 {
624 btrace_block_s *block;
625 CORE_ADDR pc;
626
627 blk -= 1;
628
629 block = VEC_index (btrace_block_s, btrace->blocks, blk);
630 pc = block->begin;
631
632 for (;;)
633 {
634 struct btrace_insn insn;
635 int size;
636
637 /* We should hit the end of the block. Warn if we went too far. */
638 if (block->end < pc)
639 {
640 /* Indicate the gap in the trace. */
641 end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
642 if (begin == NULL)
643 begin = end;
644
645 ngaps += 1;
646
647 warning (_("Recorded trace may be corrupted at instruction "
648 "%u (pc = %s)."), end->insn_offset - 1,
649 core_addr_to_string_nz (pc));
650
651 break;
652 }
653
654 end = ftrace_update_function (end, pc);
655 if (begin == NULL)
656 begin = end;
657
658 /* Maintain the function level offset.
659 For all but the last block, we do it here. */
660 if (blk != 0)
661 level = std::min (level, end->level);
662
663 size = 0;
664 TRY
665 {
666 size = gdb_insn_length (gdbarch, pc);
667 }
668 CATCH (error, RETURN_MASK_ERROR)
669 {
670 }
671 END_CATCH
672
673 insn.pc = pc;
674 insn.size = size;
675 insn.iclass = ftrace_classify_insn (gdbarch, pc);
676 insn.flags = 0;
677
678 ftrace_update_insns (end, &insn);
679
680 /* We're done once we pushed the instruction at the end. */
681 if (block->end == pc)
682 break;
683
684 /* We can't continue if we fail to compute the size. */
685 if (size <= 0)
686 {
687 /* Indicate the gap in the trace. We just added INSN so we're
688 not at the beginning. */
689 end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
690 ngaps += 1;
691
692 warning (_("Recorded trace may be incomplete at instruction %u "
693 "(pc = %s)."), end->insn_offset - 1,
694 core_addr_to_string_nz (pc));
695
696 break;
697 }
698
699 pc += size;
700
701 /* Maintain the function level offset.
702 For the last block, we do it here to not consider the last
703 instruction.
704 Since the last instruction corresponds to the current instruction
705 and is not really part of the execution history, it shouldn't
706 affect the level. */
707 if (blk == 0)
708 level = std::min (level, end->level);
709 }
710 }
711
712 btinfo->begin = begin;
713 btinfo->end = end;
714 btinfo->ngaps = ngaps;
715
716 /* LEVEL is the minimal function level of all btrace function segments.
717 Define the global level offset to -LEVEL so all function levels are
718 normalized to start at zero. */
719 btinfo->level = -level;
720 }
721
722 #if defined (HAVE_LIBIPT)
723
724 static enum btrace_insn_class
725 pt_reclassify_insn (enum pt_insn_class iclass)
726 {
727 switch (iclass)
728 {
729 case ptic_call:
730 return BTRACE_INSN_CALL;
731
732 case ptic_return:
733 return BTRACE_INSN_RETURN;
734
735 case ptic_jump:
736 return BTRACE_INSN_JUMP;
737
738 default:
739 return BTRACE_INSN_OTHER;
740 }
741 }
742
743 /* Return the btrace instruction flags for INSN. */
744
745 static btrace_insn_flags
746 pt_btrace_insn_flags (const struct pt_insn *insn)
747 {
748 btrace_insn_flags flags = 0;
749
750 if (insn->speculative)
751 flags |= BTRACE_INSN_FLAG_SPECULATIVE;
752
753 return flags;
754 }
755
756 /* Add function branch trace using DECODER. */
757
758 static void
759 ftrace_add_pt (struct pt_insn_decoder *decoder,
760 struct btrace_function **pbegin,
761 struct btrace_function **pend, int *plevel,
762 unsigned int *ngaps)
763 {
764 struct btrace_function *begin, *end, *upd;
765 uint64_t offset;
766 int errcode;
767
768 begin = *pbegin;
769 end = *pend;
770 for (;;)
771 {
772 struct btrace_insn btinsn;
773 struct pt_insn insn;
774
775 errcode = pt_insn_sync_forward (decoder);
776 if (errcode < 0)
777 {
778 if (errcode != -pte_eos)
779 warning (_("Failed to synchronize onto the Intel Processor "
780 "Trace stream: %s."), pt_errstr (pt_errcode (errcode)));
781 break;
782 }
783
784 memset (&btinsn, 0, sizeof (btinsn));
785 for (;;)
786 {
787 errcode = pt_insn_next (decoder, &insn, sizeof(insn));
788 if (errcode < 0)
789 break;
790
791 /* Look for gaps in the trace - unless we're at the beginning. */
792 if (begin != NULL)
793 {
794 /* Tracing is disabled and re-enabled each time we enter the
795 kernel. Most times, we continue from the same instruction we
796 stopped before. This is indicated via the RESUMED instruction
797 flag. The ENABLED instruction flag means that we continued
798 from some other instruction. Indicate this as a trace gap. */
799 if (insn.enabled)
800 {
801 *pend = end = ftrace_new_gap (end, BDE_PT_DISABLED);
802 *ngaps += 1;
803
804 pt_insn_get_offset (decoder, &offset);
805
806 warning (_("Non-contiguous trace at instruction %u (offset "
807 "= 0x%" PRIx64 ", pc = 0x%" PRIx64 ")."),
808 end->insn_offset - 1, offset, insn.ip);
809 }
810 }
811
812 /* Indicate trace overflows. */
813 if (insn.resynced)
814 {
815 *pend = end = ftrace_new_gap (end, BDE_PT_OVERFLOW);
816 if (begin == NULL)
817 *pbegin = begin = end;
818
819 *ngaps += 1;
820
821 pt_insn_get_offset (decoder, &offset);
822
823 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
824 ", pc = 0x%" PRIx64 ")."), end->insn_offset - 1,
825 offset, insn.ip);
826 }
827
828 upd = ftrace_update_function (end, insn.ip);
829 if (upd != end)
830 {
831 *pend = end = upd;
832
833 if (begin == NULL)
834 *pbegin = begin = upd;
835 }
836
837 /* Maintain the function level offset. */
838 *plevel = std::min (*plevel, end->level);
839
840 btinsn.pc = (CORE_ADDR) insn.ip;
841 btinsn.size = (gdb_byte) insn.size;
842 btinsn.iclass = pt_reclassify_insn (insn.iclass);
843 btinsn.flags = pt_btrace_insn_flags (&insn);
844
845 ftrace_update_insns (end, &btinsn);
846 }
847
848 if (errcode == -pte_eos)
849 break;
850
851 /* Indicate the gap in the trace. */
852 *pend = end = ftrace_new_gap (end, errcode);
853 if (begin == NULL)
854 *pbegin = begin = end;
855 *ngaps += 1;
856
857 pt_insn_get_offset (decoder, &offset);
858
859 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
860 ", pc = 0x%" PRIx64 "): %s."), errcode, end->insn_offset - 1,
861 offset, insn.ip, pt_errstr (pt_errcode (errcode)));
862 }
863 }
864
865 /* A callback function to allow the trace decoder to read the inferior's
866 memory. */
867
868 static int
869 btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
870 const struct pt_asid *asid, uint64_t pc,
871 void *context)
872 {
873 int result, errcode;
874
875 result = (int) size;
876 TRY
877 {
878 errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
879 if (errcode != 0)
880 result = -pte_nomap;
881 }
882 CATCH (error, RETURN_MASK_ERROR)
883 {
884 result = -pte_nomap;
885 }
886 END_CATCH
887
888 return result;
889 }
890
891 /* Translate the vendor from one enum to another. */
892
893 static enum pt_cpu_vendor
894 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
895 {
896 switch (vendor)
897 {
898 default:
899 return pcv_unknown;
900
901 case CV_INTEL:
902 return pcv_intel;
903 }
904 }
905
906 /* Finalize the function branch trace after decode. */
907
908 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
909 struct thread_info *tp, int level)
910 {
911 pt_insn_free_decoder (decoder);
912
913 /* LEVEL is the minimal function level of all btrace function segments.
914 Define the global level offset to -LEVEL so all function levels are
915 normalized to start at zero. */
916 tp->btrace.level = -level;
917
918 /* Add a single last instruction entry for the current PC.
919 This allows us to compute the backtrace at the current PC using both
920 standard unwind and btrace unwind.
921 This extra entry is ignored by all record commands. */
922 btrace_add_pc (tp);
923 }
924
925 /* Compute the function branch trace from Intel Processor Trace
926 format. */
927
928 static void
929 btrace_compute_ftrace_pt (struct thread_info *tp,
930 const struct btrace_data_pt *btrace)
931 {
932 struct btrace_thread_info *btinfo;
933 struct pt_insn_decoder *decoder;
934 struct pt_config config;
935 int level, errcode;
936
937 if (btrace->size == 0)
938 return;
939
940 btinfo = &tp->btrace;
941 level = btinfo->begin != NULL ? -btinfo->level : INT_MAX;
942
943 pt_config_init(&config);
944 config.begin = btrace->data;
945 config.end = btrace->data + btrace->size;
946
947 config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
948 config.cpu.family = btrace->config.cpu.family;
949 config.cpu.model = btrace->config.cpu.model;
950 config.cpu.stepping = btrace->config.cpu.stepping;
951
952 errcode = pt_cpu_errata (&config.errata, &config.cpu);
953 if (errcode < 0)
954 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
955 pt_errstr (pt_errcode (errcode)));
956
957 decoder = pt_insn_alloc_decoder (&config);
958 if (decoder == NULL)
959 error (_("Failed to allocate the Intel Processor Trace decoder."));
960
961 TRY
962 {
963 struct pt_image *image;
964
965 image = pt_insn_get_image(decoder);
966 if (image == NULL)
967 error (_("Failed to configure the Intel Processor Trace decoder."));
968
969 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
970 if (errcode < 0)
971 error (_("Failed to configure the Intel Processor Trace decoder: "
972 "%s."), pt_errstr (pt_errcode (errcode)));
973
974 ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level,
975 &btinfo->ngaps);
976 }
977 CATCH (error, RETURN_MASK_ALL)
978 {
979 /* Indicate a gap in the trace if we quit trace processing. */
980 if (error.reason == RETURN_QUIT && btinfo->end != NULL)
981 {
982 btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_USER_QUIT);
983 btinfo->ngaps++;
984 }
985
986 btrace_finalize_ftrace_pt (decoder, tp, level);
987
988 throw_exception (error);
989 }
990 END_CATCH
991
992 btrace_finalize_ftrace_pt (decoder, tp, level);
993 }
994
995 #else /* defined (HAVE_LIBIPT) */
996
997 static void
998 btrace_compute_ftrace_pt (struct thread_info *tp,
999 const struct btrace_data_pt *btrace)
1000 {
1001 internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
1002 }
1003
1004 #endif /* defined (HAVE_LIBIPT) */
1005
1006 /* Compute the function branch trace from a block branch trace BTRACE for
1007 a thread given by BTINFO. */
1008
1009 static void
1010 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
1011 {
1012 DEBUG ("compute ftrace");
1013
1014 switch (btrace->format)
1015 {
1016 case BTRACE_FORMAT_NONE:
1017 return;
1018
1019 case BTRACE_FORMAT_BTS:
1020 btrace_compute_ftrace_bts (tp, &btrace->variant.bts);
1021 return;
1022
1023 case BTRACE_FORMAT_PT:
1024 btrace_compute_ftrace_pt (tp, &btrace->variant.pt);
1025 return;
1026 }
1027
1028 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1029 }
1030
1031 /* Add an entry for the current PC. */
1032
1033 static void
1034 btrace_add_pc (struct thread_info *tp)
1035 {
1036 struct btrace_data btrace;
1037 struct btrace_block *block;
1038 struct regcache *regcache;
1039 struct cleanup *cleanup;
1040 CORE_ADDR pc;
1041
1042 regcache = get_thread_regcache (tp->ptid);
1043 pc = regcache_read_pc (regcache);
1044
1045 btrace_data_init (&btrace);
1046 btrace.format = BTRACE_FORMAT_BTS;
1047 btrace.variant.bts.blocks = NULL;
1048
1049 cleanup = make_cleanup_btrace_data (&btrace);
1050
1051 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
1052 block->begin = pc;
1053 block->end = pc;
1054
1055 btrace_compute_ftrace (tp, &btrace);
1056
1057 do_cleanups (cleanup);
1058 }
1059
1060 /* See btrace.h. */
1061
1062 void
1063 btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
1064 {
1065 if (tp->btrace.target != NULL)
1066 return;
1067
1068 #if !defined (HAVE_LIBIPT)
1069 if (conf->format == BTRACE_FORMAT_PT)
1070 error (_("GDB does not support Intel Processor Trace."));
1071 #endif /* !defined (HAVE_LIBIPT) */
1072
1073 if (!target_supports_btrace (conf->format))
1074 error (_("Target does not support branch tracing."));
1075
1076 DEBUG ("enable thread %s (%s)", print_thread_id (tp),
1077 target_pid_to_str (tp->ptid));
1078
1079 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
1080
1081 /* Add an entry for the current PC so we start tracing from where we
1082 enabled it. */
1083 if (tp->btrace.target != NULL)
1084 btrace_add_pc (tp);
1085 }
1086
1087 /* See btrace.h. */
1088
1089 const struct btrace_config *
1090 btrace_conf (const struct btrace_thread_info *btinfo)
1091 {
1092 if (btinfo->target == NULL)
1093 return NULL;
1094
1095 return target_btrace_conf (btinfo->target);
1096 }
1097
1098 /* See btrace.h. */
1099
1100 void
1101 btrace_disable (struct thread_info *tp)
1102 {
1103 struct btrace_thread_info *btp = &tp->btrace;
1104 int errcode = 0;
1105
1106 if (btp->target == NULL)
1107 return;
1108
1109 DEBUG ("disable thread %s (%s)", print_thread_id (tp),
1110 target_pid_to_str (tp->ptid));
1111
1112 target_disable_btrace (btp->target);
1113 btp->target = NULL;
1114
1115 btrace_clear (tp);
1116 }
1117
1118 /* See btrace.h. */
1119
1120 void
1121 btrace_teardown (struct thread_info *tp)
1122 {
1123 struct btrace_thread_info *btp = &tp->btrace;
1124 int errcode = 0;
1125
1126 if (btp->target == NULL)
1127 return;
1128
1129 DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
1130 target_pid_to_str (tp->ptid));
1131
1132 target_teardown_btrace (btp->target);
1133 btp->target = NULL;
1134
1135 btrace_clear (tp);
1136 }
1137
1138 /* Stitch branch trace in BTS format. */
1139
1140 static int
1141 btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
1142 {
1143 struct btrace_thread_info *btinfo;
1144 struct btrace_function *last_bfun;
1145 struct btrace_insn *last_insn;
1146 btrace_block_s *first_new_block;
1147
1148 btinfo = &tp->btrace;
1149 last_bfun = btinfo->end;
1150 gdb_assert (last_bfun != NULL);
1151 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
1152
1153 /* If the existing trace ends with a gap, we just glue the traces
1154 together. We need to drop the last (i.e. chronologically first) block
1155 of the new trace, though, since we can't fill in the start address.*/
1156 if (VEC_empty (btrace_insn_s, last_bfun->insn))
1157 {
1158 VEC_pop (btrace_block_s, btrace->blocks);
1159 return 0;
1160 }
1161
1162 /* Beware that block trace starts with the most recent block, so the
1163 chronologically first block in the new trace is the last block in
1164 the new trace's block vector. */
1165 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
1166 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
1167
1168 /* If the current PC at the end of the block is the same as in our current
1169 trace, there are two explanations:
1170 1. we executed the instruction and some branch brought us back.
1171 2. we have not made any progress.
1172 In the first case, the delta trace vector should contain at least two
1173 entries.
1174 In the second case, the delta trace vector should contain exactly one
1175 entry for the partial block containing the current PC. Remove it. */
1176 if (first_new_block->end == last_insn->pc
1177 && VEC_length (btrace_block_s, btrace->blocks) == 1)
1178 {
1179 VEC_pop (btrace_block_s, btrace->blocks);
1180 return 0;
1181 }
1182
1183 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
1184 core_addr_to_string_nz (first_new_block->end));
1185
1186 /* Do a simple sanity check to make sure we don't accidentally end up
1187 with a bad block. This should not occur in practice. */
1188 if (first_new_block->end < last_insn->pc)
1189 {
1190 warning (_("Error while trying to read delta trace. Falling back to "
1191 "a full read."));
1192 return -1;
1193 }
1194
1195 /* We adjust the last block to start at the end of our current trace. */
1196 gdb_assert (first_new_block->begin == 0);
1197 first_new_block->begin = last_insn->pc;
1198
1199 /* We simply pop the last insn so we can insert it again as part of
1200 the normal branch trace computation.
1201 Since instruction iterators are based on indices in the instructions
1202 vector, we don't leave any pointers dangling. */
1203 DEBUG ("pruning insn at %s for stitching",
1204 ftrace_print_insn_addr (last_insn));
1205
1206 VEC_pop (btrace_insn_s, last_bfun->insn);
1207
1208 /* The instructions vector may become empty temporarily if this has
1209 been the only instruction in this function segment.
1210 This violates the invariant but will be remedied shortly by
1211 btrace_compute_ftrace when we add the new trace. */
1212
1213 /* The only case where this would hurt is if the entire trace consisted
1214 of just that one instruction. If we remove it, we might turn the now
1215 empty btrace function segment into a gap. But we don't want gaps at
1216 the beginning. To avoid this, we remove the entire old trace. */
1217 if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
1218 btrace_clear (tp);
1219
1220 return 0;
1221 }
1222
1223 /* Adjust the block trace in order to stitch old and new trace together.
1224 BTRACE is the new delta trace between the last and the current stop.
1225 TP is the traced thread.
1226 May modifx BTRACE as well as the existing trace in TP.
1227 Return 0 on success, -1 otherwise. */
1228
1229 static int
1230 btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
1231 {
1232 /* If we don't have trace, there's nothing to do. */
1233 if (btrace_data_empty (btrace))
1234 return 0;
1235
1236 switch (btrace->format)
1237 {
1238 case BTRACE_FORMAT_NONE:
1239 return 0;
1240
1241 case BTRACE_FORMAT_BTS:
1242 return btrace_stitch_bts (&btrace->variant.bts, tp);
1243
1244 case BTRACE_FORMAT_PT:
1245 /* Delta reads are not supported. */
1246 return -1;
1247 }
1248
1249 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1250 }
1251
1252 /* Clear the branch trace histories in BTINFO. */
1253
1254 static void
1255 btrace_clear_history (struct btrace_thread_info *btinfo)
1256 {
1257 xfree (btinfo->insn_history);
1258 xfree (btinfo->call_history);
1259 xfree (btinfo->replay);
1260
1261 btinfo->insn_history = NULL;
1262 btinfo->call_history = NULL;
1263 btinfo->replay = NULL;
1264 }
1265
1266 /* Clear the branch trace maintenance histories in BTINFO. */
1267
1268 static void
1269 btrace_maint_clear (struct btrace_thread_info *btinfo)
1270 {
1271 switch (btinfo->data.format)
1272 {
1273 default:
1274 break;
1275
1276 case BTRACE_FORMAT_BTS:
1277 btinfo->maint.variant.bts.packet_history.begin = 0;
1278 btinfo->maint.variant.bts.packet_history.end = 0;
1279 break;
1280
1281 #if defined (HAVE_LIBIPT)
1282 case BTRACE_FORMAT_PT:
1283 xfree (btinfo->maint.variant.pt.packets);
1284
1285 btinfo->maint.variant.pt.packets = NULL;
1286 btinfo->maint.variant.pt.packet_history.begin = 0;
1287 btinfo->maint.variant.pt.packet_history.end = 0;
1288 break;
1289 #endif /* defined (HAVE_LIBIPT) */
1290 }
1291 }
1292
1293 /* See btrace.h. */
1294
1295 void
1296 btrace_fetch (struct thread_info *tp)
1297 {
1298 struct btrace_thread_info *btinfo;
1299 struct btrace_target_info *tinfo;
1300 struct btrace_data btrace;
1301 struct cleanup *cleanup;
1302 int errcode;
1303
1304 DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
1305 target_pid_to_str (tp->ptid));
1306
1307 btinfo = &tp->btrace;
1308 tinfo = btinfo->target;
1309 if (tinfo == NULL)
1310 return;
1311
1312 /* There's no way we could get new trace while replaying.
1313 On the other hand, delta trace would return a partial record with the
1314 current PC, which is the replay PC, not the last PC, as expected. */
1315 if (btinfo->replay != NULL)
1316 return;
1317
1318 btrace_data_init (&btrace);
1319 cleanup = make_cleanup_btrace_data (&btrace);
1320
1321 /* Let's first try to extend the trace we already have. */
1322 if (btinfo->end != NULL)
1323 {
1324 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1325 if (errcode == 0)
1326 {
1327 /* Success. Let's try to stitch the traces together. */
1328 errcode = btrace_stitch_trace (&btrace, tp);
1329 }
1330 else
1331 {
1332 /* We failed to read delta trace. Let's try to read new trace. */
1333 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1334
1335 /* If we got any new trace, discard what we have. */
1336 if (errcode == 0 && !btrace_data_empty (&btrace))
1337 btrace_clear (tp);
1338 }
1339
1340 /* If we were not able to read the trace, we start over. */
1341 if (errcode != 0)
1342 {
1343 btrace_clear (tp);
1344 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1345 }
1346 }
1347 else
1348 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1349
1350 /* If we were not able to read the branch trace, signal an error. */
1351 if (errcode != 0)
1352 error (_("Failed to read branch trace."));
1353
1354 /* Compute the trace, provided we have any. */
1355 if (!btrace_data_empty (&btrace))
1356 {
1357 /* Store the raw trace data. The stored data will be cleared in
1358 btrace_clear, so we always append the new trace. */
1359 btrace_data_append (&btinfo->data, &btrace);
1360 btrace_maint_clear (btinfo);
1361
1362 btrace_clear_history (btinfo);
1363 btrace_compute_ftrace (tp, &btrace);
1364 }
1365
1366 do_cleanups (cleanup);
1367 }
1368
1369 /* See btrace.h. */
1370
1371 void
1372 btrace_clear (struct thread_info *tp)
1373 {
1374 struct btrace_thread_info *btinfo;
1375 struct btrace_function *it, *trash;
1376
1377 DEBUG ("clear thread %s (%s)", print_thread_id (tp),
1378 target_pid_to_str (tp->ptid));
1379
1380 /* Make sure btrace frames that may hold a pointer into the branch
1381 trace data are destroyed. */
1382 reinit_frame_cache ();
1383
1384 btinfo = &tp->btrace;
1385
1386 it = btinfo->begin;
1387 while (it != NULL)
1388 {
1389 trash = it;
1390 it = it->flow.next;
1391
1392 xfree (trash);
1393 }
1394
1395 btinfo->begin = NULL;
1396 btinfo->end = NULL;
1397 btinfo->ngaps = 0;
1398
1399 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1400 btrace_maint_clear (btinfo);
1401 btrace_data_clear (&btinfo->data);
1402 btrace_clear_history (btinfo);
1403 }
1404
1405 /* See btrace.h. */
1406
1407 void
1408 btrace_free_objfile (struct objfile *objfile)
1409 {
1410 struct thread_info *tp;
1411
1412 DEBUG ("free objfile");
1413
1414 ALL_NON_EXITED_THREADS (tp)
1415 btrace_clear (tp);
1416 }
1417
1418 #if defined (HAVE_LIBEXPAT)
1419
1420 /* Check the btrace document version. */
1421
1422 static void
1423 check_xml_btrace_version (struct gdb_xml_parser *parser,
1424 const struct gdb_xml_element *element,
1425 void *user_data, VEC (gdb_xml_value_s) *attributes)
1426 {
1427 const char *version
1428 = (const char *) xml_find_attribute (attributes, "version")->value;
1429
1430 if (strcmp (version, "1.0") != 0)
1431 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1432 }
1433
1434 /* Parse a btrace "block" xml record. */
1435
1436 static void
1437 parse_xml_btrace_block (struct gdb_xml_parser *parser,
1438 const struct gdb_xml_element *element,
1439 void *user_data, VEC (gdb_xml_value_s) *attributes)
1440 {
1441 struct btrace_data *btrace;
1442 struct btrace_block *block;
1443 ULONGEST *begin, *end;
1444
1445 btrace = (struct btrace_data *) user_data;
1446
1447 switch (btrace->format)
1448 {
1449 case BTRACE_FORMAT_BTS:
1450 break;
1451
1452 case BTRACE_FORMAT_NONE:
1453 btrace->format = BTRACE_FORMAT_BTS;
1454 btrace->variant.bts.blocks = NULL;
1455 break;
1456
1457 default:
1458 gdb_xml_error (parser, _("Btrace format error."));
1459 }
1460
1461 begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value;
1462 end = (ULONGEST *) xml_find_attribute (attributes, "end")->value;
1463
1464 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
1465 block->begin = *begin;
1466 block->end = *end;
1467 }
1468
1469 /* Parse a "raw" xml record. */
1470
1471 static void
1472 parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
1473 gdb_byte **pdata, size_t *psize)
1474 {
1475 struct cleanup *cleanup;
1476 gdb_byte *data, *bin;
1477 size_t len, size;
1478
1479 len = strlen (body_text);
1480 if (len % 2 != 0)
1481 gdb_xml_error (parser, _("Bad raw data size."));
1482
1483 size = len / 2;
1484
1485 bin = data = (gdb_byte *) xmalloc (size);
1486 cleanup = make_cleanup (xfree, data);
1487
1488 /* We use hex encoding - see common/rsp-low.h. */
1489 while (len > 0)
1490 {
1491 char hi, lo;
1492
1493 hi = *body_text++;
1494 lo = *body_text++;
1495
1496 if (hi == 0 || lo == 0)
1497 gdb_xml_error (parser, _("Bad hex encoding."));
1498
1499 *bin++ = fromhex (hi) * 16 + fromhex (lo);
1500 len -= 2;
1501 }
1502
1503 discard_cleanups (cleanup);
1504
1505 *pdata = data;
1506 *psize = size;
1507 }
1508
1509 /* Parse a btrace pt-config "cpu" xml record. */
1510
1511 static void
1512 parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
1513 const struct gdb_xml_element *element,
1514 void *user_data,
1515 VEC (gdb_xml_value_s) *attributes)
1516 {
1517 struct btrace_data *btrace;
1518 const char *vendor;
1519 ULONGEST *family, *model, *stepping;
1520
1521 vendor = (const char *) xml_find_attribute (attributes, "vendor")->value;
1522 family = (ULONGEST *) xml_find_attribute (attributes, "family")->value;
1523 model = (ULONGEST *) xml_find_attribute (attributes, "model")->value;
1524 stepping = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value;
1525
1526 btrace = (struct btrace_data *) user_data;
1527
1528 if (strcmp (vendor, "GenuineIntel") == 0)
1529 btrace->variant.pt.config.cpu.vendor = CV_INTEL;
1530
1531 btrace->variant.pt.config.cpu.family = *family;
1532 btrace->variant.pt.config.cpu.model = *model;
1533 btrace->variant.pt.config.cpu.stepping = *stepping;
1534 }
1535
1536 /* Parse a btrace pt "raw" xml record. */
1537
1538 static void
1539 parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
1540 const struct gdb_xml_element *element,
1541 void *user_data, const char *body_text)
1542 {
1543 struct btrace_data *btrace;
1544
1545 btrace = (struct btrace_data *) user_data;
1546 parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
1547 &btrace->variant.pt.size);
1548 }
1549
1550 /* Parse a btrace "pt" xml record. */
1551
1552 static void
1553 parse_xml_btrace_pt (struct gdb_xml_parser *parser,
1554 const struct gdb_xml_element *element,
1555 void *user_data, VEC (gdb_xml_value_s) *attributes)
1556 {
1557 struct btrace_data *btrace;
1558
1559 btrace = (struct btrace_data *) user_data;
1560 btrace->format = BTRACE_FORMAT_PT;
1561 btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
1562 btrace->variant.pt.data = NULL;
1563 btrace->variant.pt.size = 0;
1564 }
1565
1566 static const struct gdb_xml_attribute block_attributes[] = {
1567 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1568 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1569 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1570 };
1571
1572 static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
1573 { "vendor", GDB_XML_AF_NONE, NULL, NULL },
1574 { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1575 { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1576 { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1577 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1578 };
1579
1580 static const struct gdb_xml_element btrace_pt_config_children[] = {
1581 { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
1582 parse_xml_btrace_pt_config_cpu, NULL },
1583 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1584 };
1585
1586 static const struct gdb_xml_element btrace_pt_children[] = {
1587 { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
1588 NULL },
1589 { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
1590 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1591 };
1592
1593 static const struct gdb_xml_attribute btrace_attributes[] = {
1594 { "version", GDB_XML_AF_NONE, NULL, NULL },
1595 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1596 };
1597
1598 static const struct gdb_xml_element btrace_children[] = {
1599 { "block", block_attributes, NULL,
1600 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
1601 { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
1602 NULL },
1603 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1604 };
1605
1606 static const struct gdb_xml_element btrace_elements[] = {
1607 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
1608 check_xml_btrace_version, NULL },
1609 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1610 };
1611
1612 #endif /* defined (HAVE_LIBEXPAT) */
1613
1614 /* See btrace.h. */
1615
1616 void
1617 parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
1618 {
1619 struct cleanup *cleanup;
1620 int errcode;
1621
1622 #if defined (HAVE_LIBEXPAT)
1623
1624 btrace->format = BTRACE_FORMAT_NONE;
1625
1626 cleanup = make_cleanup_btrace_data (btrace);
1627 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
1628 buffer, btrace);
1629 if (errcode != 0)
1630 error (_("Error parsing branch trace."));
1631
1632 /* Keep parse results. */
1633 discard_cleanups (cleanup);
1634
1635 #else /* !defined (HAVE_LIBEXPAT) */
1636
1637 error (_("Cannot process branch trace. XML parsing is not supported."));
1638
1639 #endif /* !defined (HAVE_LIBEXPAT) */
1640 }
1641
1642 #if defined (HAVE_LIBEXPAT)
1643
1644 /* Parse a btrace-conf "bts" xml record. */
1645
1646 static void
1647 parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
1648 const struct gdb_xml_element *element,
1649 void *user_data, VEC (gdb_xml_value_s) *attributes)
1650 {
1651 struct btrace_config *conf;
1652 struct gdb_xml_value *size;
1653
1654 conf = (struct btrace_config *) user_data;
1655 conf->format = BTRACE_FORMAT_BTS;
1656 conf->bts.size = 0;
1657
1658 size = xml_find_attribute (attributes, "size");
1659 if (size != NULL)
1660 conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
1661 }
1662
1663 /* Parse a btrace-conf "pt" xml record. */
1664
1665 static void
1666 parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
1667 const struct gdb_xml_element *element,
1668 void *user_data, VEC (gdb_xml_value_s) *attributes)
1669 {
1670 struct btrace_config *conf;
1671 struct gdb_xml_value *size;
1672
1673 conf = (struct btrace_config *) user_data;
1674 conf->format = BTRACE_FORMAT_PT;
1675 conf->pt.size = 0;
1676
1677 size = xml_find_attribute (attributes, "size");
1678 if (size != NULL)
1679 conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
1680 }
1681
1682 static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
1683 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1684 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1685 };
1686
1687 static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
1688 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1689 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1690 };
1691
1692 static const struct gdb_xml_element btrace_conf_children[] = {
1693 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
1694 parse_xml_btrace_conf_bts, NULL },
1695 { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
1696 parse_xml_btrace_conf_pt, NULL },
1697 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1698 };
1699
1700 static const struct gdb_xml_attribute btrace_conf_attributes[] = {
1701 { "version", GDB_XML_AF_NONE, NULL, NULL },
1702 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1703 };
1704
1705 static const struct gdb_xml_element btrace_conf_elements[] = {
1706 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
1707 GDB_XML_EF_NONE, NULL, NULL },
1708 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1709 };
1710
1711 #endif /* defined (HAVE_LIBEXPAT) */
1712
1713 /* See btrace.h. */
1714
1715 void
1716 parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
1717 {
1718 int errcode;
1719
1720 #if defined (HAVE_LIBEXPAT)
1721
1722 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
1723 btrace_conf_elements, xml, conf);
1724 if (errcode != 0)
1725 error (_("Error parsing branch trace configuration."));
1726
1727 #else /* !defined (HAVE_LIBEXPAT) */
1728
1729 error (_("XML parsing is not supported."));
1730
1731 #endif /* !defined (HAVE_LIBEXPAT) */
1732 }
1733
1734 /* See btrace.h. */
1735
1736 const struct btrace_insn *
1737 btrace_insn_get (const struct btrace_insn_iterator *it)
1738 {
1739 const struct btrace_function *bfun;
1740 unsigned int index, end;
1741
1742 index = it->index;
1743 bfun = it->function;
1744
1745 /* Check if the iterator points to a gap in the trace. */
1746 if (bfun->errcode != 0)
1747 return NULL;
1748
1749 /* The index is within the bounds of this function's instruction vector. */
1750 end = VEC_length (btrace_insn_s, bfun->insn);
1751 gdb_assert (0 < end);
1752 gdb_assert (index < end);
1753
1754 return VEC_index (btrace_insn_s, bfun->insn, index);
1755 }
1756
1757 /* See btrace.h. */
1758
1759 unsigned int
1760 btrace_insn_number (const struct btrace_insn_iterator *it)
1761 {
1762 const struct btrace_function *bfun;
1763
1764 bfun = it->function;
1765
1766 /* Return zero if the iterator points to a gap in the trace. */
1767 if (bfun->errcode != 0)
1768 return 0;
1769
1770 return bfun->insn_offset + it->index;
1771 }
1772
1773 /* See btrace.h. */
1774
1775 void
1776 btrace_insn_begin (struct btrace_insn_iterator *it,
1777 const struct btrace_thread_info *btinfo)
1778 {
1779 const struct btrace_function *bfun;
1780
1781 bfun = btinfo->begin;
1782 if (bfun == NULL)
1783 error (_("No trace."));
1784
1785 it->function = bfun;
1786 it->index = 0;
1787 }
1788
1789 /* See btrace.h. */
1790
1791 void
1792 btrace_insn_end (struct btrace_insn_iterator *it,
1793 const struct btrace_thread_info *btinfo)
1794 {
1795 const struct btrace_function *bfun;
1796 unsigned int length;
1797
1798 bfun = btinfo->end;
1799 if (bfun == NULL)
1800 error (_("No trace."));
1801
1802 length = VEC_length (btrace_insn_s, bfun->insn);
1803
1804 /* The last function may either be a gap or it contains the current
1805 instruction, which is one past the end of the execution trace; ignore
1806 it. */
1807 if (length > 0)
1808 length -= 1;
1809
1810 it->function = bfun;
1811 it->index = length;
1812 }
1813
1814 /* See btrace.h. */
1815
1816 unsigned int
1817 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
1818 {
1819 const struct btrace_function *bfun;
1820 unsigned int index, steps;
1821
1822 bfun = it->function;
1823 steps = 0;
1824 index = it->index;
1825
1826 while (stride != 0)
1827 {
1828 unsigned int end, space, adv;
1829
1830 end = VEC_length (btrace_insn_s, bfun->insn);
1831
1832 /* An empty function segment represents a gap in the trace. We count
1833 it as one instruction. */
1834 if (end == 0)
1835 {
1836 const struct btrace_function *next;
1837
1838 next = bfun->flow.next;
1839 if (next == NULL)
1840 break;
1841
1842 stride -= 1;
1843 steps += 1;
1844
1845 bfun = next;
1846 index = 0;
1847
1848 continue;
1849 }
1850
1851 gdb_assert (0 < end);
1852 gdb_assert (index < end);
1853
1854 /* Compute the number of instructions remaining in this segment. */
1855 space = end - index;
1856
1857 /* Advance the iterator as far as possible within this segment. */
1858 adv = std::min (space, stride);
1859 stride -= adv;
1860 index += adv;
1861 steps += adv;
1862
1863 /* Move to the next function if we're at the end of this one. */
1864 if (index == end)
1865 {
1866 const struct btrace_function *next;
1867
1868 next = bfun->flow.next;
1869 if (next == NULL)
1870 {
1871 /* We stepped past the last function.
1872
1873 Let's adjust the index to point to the last instruction in
1874 the previous function. */
1875 index -= 1;
1876 steps -= 1;
1877 break;
1878 }
1879
1880 /* We now point to the first instruction in the new function. */
1881 bfun = next;
1882 index = 0;
1883 }
1884
1885 /* We did make progress. */
1886 gdb_assert (adv > 0);
1887 }
1888
1889 /* Update the iterator. */
1890 it->function = bfun;
1891 it->index = index;
1892
1893 return steps;
1894 }
1895
1896 /* See btrace.h. */
1897
1898 unsigned int
1899 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1900 {
1901 const struct btrace_function *bfun;
1902 unsigned int index, steps;
1903
1904 bfun = it->function;
1905 steps = 0;
1906 index = it->index;
1907
1908 while (stride != 0)
1909 {
1910 unsigned int adv;
1911
1912 /* Move to the previous function if we're at the start of this one. */
1913 if (index == 0)
1914 {
1915 const struct btrace_function *prev;
1916
1917 prev = bfun->flow.prev;
1918 if (prev == NULL)
1919 break;
1920
1921 /* We point to one after the last instruction in the new function. */
1922 bfun = prev;
1923 index = VEC_length (btrace_insn_s, bfun->insn);
1924
1925 /* An empty function segment represents a gap in the trace. We count
1926 it as one instruction. */
1927 if (index == 0)
1928 {
1929 stride -= 1;
1930 steps += 1;
1931
1932 continue;
1933 }
1934 }
1935
1936 /* Advance the iterator as far as possible within this segment. */
1937 adv = std::min (index, stride);
1938
1939 stride -= adv;
1940 index -= adv;
1941 steps += adv;
1942
1943 /* We did make progress. */
1944 gdb_assert (adv > 0);
1945 }
1946
1947 /* Update the iterator. */
1948 it->function = bfun;
1949 it->index = index;
1950
1951 return steps;
1952 }
1953
1954 /* See btrace.h. */
1955
1956 int
1957 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
1958 const struct btrace_insn_iterator *rhs)
1959 {
1960 unsigned int lnum, rnum;
1961
1962 lnum = btrace_insn_number (lhs);
1963 rnum = btrace_insn_number (rhs);
1964
1965 /* A gap has an instruction number of zero. Things are getting more
1966 complicated if gaps are involved.
1967
1968 We take the instruction number offset from the iterator's function.
1969 This is the number of the first instruction after the gap.
1970
1971 This is OK as long as both lhs and rhs point to gaps. If only one of
1972 them does, we need to adjust the number based on the other's regular
1973 instruction number. Otherwise, a gap might compare equal to an
1974 instruction. */
1975
1976 if (lnum == 0 && rnum == 0)
1977 {
1978 lnum = lhs->function->insn_offset;
1979 rnum = rhs->function->insn_offset;
1980 }
1981 else if (lnum == 0)
1982 {
1983 lnum = lhs->function->insn_offset;
1984
1985 if (lnum == rnum)
1986 lnum -= 1;
1987 }
1988 else if (rnum == 0)
1989 {
1990 rnum = rhs->function->insn_offset;
1991
1992 if (rnum == lnum)
1993 rnum -= 1;
1994 }
1995
1996 return (int) (lnum - rnum);
1997 }
1998
1999 /* See btrace.h. */
2000
2001 int
2002 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
2003 const struct btrace_thread_info *btinfo,
2004 unsigned int number)
2005 {
2006 const struct btrace_function *bfun;
2007 unsigned int end, length;
2008
2009 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
2010 {
2011 /* Skip gaps. */
2012 if (bfun->errcode != 0)
2013 continue;
2014
2015 if (bfun->insn_offset <= number)
2016 break;
2017 }
2018
2019 if (bfun == NULL)
2020 return 0;
2021
2022 length = VEC_length (btrace_insn_s, bfun->insn);
2023 gdb_assert (length > 0);
2024
2025 end = bfun->insn_offset + length;
2026 if (end <= number)
2027 return 0;
2028
2029 it->function = bfun;
2030 it->index = number - bfun->insn_offset;
2031
2032 return 1;
2033 }
2034
2035 /* See btrace.h. */
2036
2037 const struct btrace_function *
2038 btrace_call_get (const struct btrace_call_iterator *it)
2039 {
2040 return it->function;
2041 }
2042
2043 /* See btrace.h. */
2044
2045 unsigned int
2046 btrace_call_number (const struct btrace_call_iterator *it)
2047 {
2048 const struct btrace_thread_info *btinfo;
2049 const struct btrace_function *bfun;
2050 unsigned int insns;
2051
2052 btinfo = it->btinfo;
2053 bfun = it->function;
2054 if (bfun != NULL)
2055 return bfun->number;
2056
2057 /* For the end iterator, i.e. bfun == NULL, we return one more than the
2058 number of the last function. */
2059 bfun = btinfo->end;
2060 insns = VEC_length (btrace_insn_s, bfun->insn);
2061
2062 /* If the function contains only a single instruction (i.e. the current
2063 instruction), it will be skipped and its number is already the number
2064 we seek. */
2065 if (insns == 1)
2066 return bfun->number;
2067
2068 /* Otherwise, return one more than the number of the last function. */
2069 return bfun->number + 1;
2070 }
2071
2072 /* See btrace.h. */
2073
2074 void
2075 btrace_call_begin (struct btrace_call_iterator *it,
2076 const struct btrace_thread_info *btinfo)
2077 {
2078 const struct btrace_function *bfun;
2079
2080 bfun = btinfo->begin;
2081 if (bfun == NULL)
2082 error (_("No trace."));
2083
2084 it->btinfo = btinfo;
2085 it->function = bfun;
2086 }
2087
2088 /* See btrace.h. */
2089
2090 void
2091 btrace_call_end (struct btrace_call_iterator *it,
2092 const struct btrace_thread_info *btinfo)
2093 {
2094 const struct btrace_function *bfun;
2095
2096 bfun = btinfo->end;
2097 if (bfun == NULL)
2098 error (_("No trace."));
2099
2100 it->btinfo = btinfo;
2101 it->function = NULL;
2102 }
2103
2104 /* See btrace.h. */
2105
2106 unsigned int
2107 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2108 {
2109 const struct btrace_function *bfun;
2110 unsigned int steps;
2111
2112 bfun = it->function;
2113 steps = 0;
2114 while (bfun != NULL)
2115 {
2116 const struct btrace_function *next;
2117 unsigned int insns;
2118
2119 next = bfun->flow.next;
2120 if (next == NULL)
2121 {
2122 /* Ignore the last function if it only contains a single
2123 (i.e. the current) instruction. */
2124 insns = VEC_length (btrace_insn_s, bfun->insn);
2125 if (insns == 1)
2126 steps -= 1;
2127 }
2128
2129 if (stride == steps)
2130 break;
2131
2132 bfun = next;
2133 steps += 1;
2134 }
2135
2136 it->function = bfun;
2137 return steps;
2138 }
2139
2140 /* See btrace.h. */
2141
2142 unsigned int
2143 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2144 {
2145 const struct btrace_thread_info *btinfo;
2146 const struct btrace_function *bfun;
2147 unsigned int steps;
2148
2149 bfun = it->function;
2150 steps = 0;
2151
2152 if (bfun == NULL)
2153 {
2154 unsigned int insns;
2155
2156 btinfo = it->btinfo;
2157 bfun = btinfo->end;
2158 if (bfun == NULL)
2159 return 0;
2160
2161 /* Ignore the last function if it only contains a single
2162 (i.e. the current) instruction. */
2163 insns = VEC_length (btrace_insn_s, bfun->insn);
2164 if (insns == 1)
2165 bfun = bfun->flow.prev;
2166
2167 if (bfun == NULL)
2168 return 0;
2169
2170 steps += 1;
2171 }
2172
2173 while (steps < stride)
2174 {
2175 const struct btrace_function *prev;
2176
2177 prev = bfun->flow.prev;
2178 if (prev == NULL)
2179 break;
2180
2181 bfun = prev;
2182 steps += 1;
2183 }
2184
2185 it->function = bfun;
2186 return steps;
2187 }
2188
2189 /* See btrace.h. */
2190
2191 int
2192 btrace_call_cmp (const struct btrace_call_iterator *lhs,
2193 const struct btrace_call_iterator *rhs)
2194 {
2195 unsigned int lnum, rnum;
2196
2197 lnum = btrace_call_number (lhs);
2198 rnum = btrace_call_number (rhs);
2199
2200 return (int) (lnum - rnum);
2201 }
2202
2203 /* See btrace.h. */
2204
2205 int
2206 btrace_find_call_by_number (struct btrace_call_iterator *it,
2207 const struct btrace_thread_info *btinfo,
2208 unsigned int number)
2209 {
2210 const struct btrace_function *bfun;
2211
2212 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
2213 {
2214 unsigned int bnum;
2215
2216 bnum = bfun->number;
2217 if (number == bnum)
2218 {
2219 it->btinfo = btinfo;
2220 it->function = bfun;
2221 return 1;
2222 }
2223
2224 /* Functions are ordered and numbered consecutively. We could bail out
2225 earlier. On the other hand, it is very unlikely that we search for
2226 a nonexistent function. */
2227 }
2228
2229 return 0;
2230 }
2231
2232 /* See btrace.h. */
2233
2234 void
2235 btrace_set_insn_history (struct btrace_thread_info *btinfo,
2236 const struct btrace_insn_iterator *begin,
2237 const struct btrace_insn_iterator *end)
2238 {
2239 if (btinfo->insn_history == NULL)
2240 btinfo->insn_history = XCNEW (struct btrace_insn_history);
2241
2242 btinfo->insn_history->begin = *begin;
2243 btinfo->insn_history->end = *end;
2244 }
2245
2246 /* See btrace.h. */
2247
2248 void
2249 btrace_set_call_history (struct btrace_thread_info *btinfo,
2250 const struct btrace_call_iterator *begin,
2251 const struct btrace_call_iterator *end)
2252 {
2253 gdb_assert (begin->btinfo == end->btinfo);
2254
2255 if (btinfo->call_history == NULL)
2256 btinfo->call_history = XCNEW (struct btrace_call_history);
2257
2258 btinfo->call_history->begin = *begin;
2259 btinfo->call_history->end = *end;
2260 }
2261
2262 /* See btrace.h. */
2263
2264 int
2265 btrace_is_replaying (struct thread_info *tp)
2266 {
2267 return tp->btrace.replay != NULL;
2268 }
2269
2270 /* See btrace.h. */
2271
2272 int
2273 btrace_is_empty (struct thread_info *tp)
2274 {
2275 struct btrace_insn_iterator begin, end;
2276 struct btrace_thread_info *btinfo;
2277
2278 btinfo = &tp->btrace;
2279
2280 if (btinfo->begin == NULL)
2281 return 1;
2282
2283 btrace_insn_begin (&begin, btinfo);
2284 btrace_insn_end (&end, btinfo);
2285
2286 return btrace_insn_cmp (&begin, &end) == 0;
2287 }
2288
2289 /* Forward the cleanup request. */
2290
2291 static void
2292 do_btrace_data_cleanup (void *arg)
2293 {
2294 btrace_data_fini ((struct btrace_data *) arg);
2295 }
2296
2297 /* See btrace.h. */
2298
2299 struct cleanup *
2300 make_cleanup_btrace_data (struct btrace_data *data)
2301 {
2302 return make_cleanup (do_btrace_data_cleanup, data);
2303 }
2304
2305 #if defined (HAVE_LIBIPT)
2306
2307 /* Print a single packet. */
2308
2309 static void
2310 pt_print_packet (const struct pt_packet *packet)
2311 {
2312 switch (packet->type)
2313 {
2314 default:
2315 printf_unfiltered (("[??: %x]"), packet->type);
2316 break;
2317
2318 case ppt_psb:
2319 printf_unfiltered (("psb"));
2320 break;
2321
2322 case ppt_psbend:
2323 printf_unfiltered (("psbend"));
2324 break;
2325
2326 case ppt_pad:
2327 printf_unfiltered (("pad"));
2328 break;
2329
2330 case ppt_tip:
2331 printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
2332 packet->payload.ip.ipc,
2333 packet->payload.ip.ip);
2334 break;
2335
2336 case ppt_tip_pge:
2337 printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
2338 packet->payload.ip.ipc,
2339 packet->payload.ip.ip);
2340 break;
2341
2342 case ppt_tip_pgd:
2343 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
2344 packet->payload.ip.ipc,
2345 packet->payload.ip.ip);
2346 break;
2347
2348 case ppt_fup:
2349 printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
2350 packet->payload.ip.ipc,
2351 packet->payload.ip.ip);
2352 break;
2353
2354 case ppt_tnt_8:
2355 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
2356 packet->payload.tnt.bit_size,
2357 packet->payload.tnt.payload);
2358 break;
2359
2360 case ppt_tnt_64:
2361 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
2362 packet->payload.tnt.bit_size,
2363 packet->payload.tnt.payload);
2364 break;
2365
2366 case ppt_pip:
2367 printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2368 packet->payload.pip.nr ? (" nr") : (""));
2369 break;
2370
2371 case ppt_tsc:
2372 printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2373 break;
2374
2375 case ppt_cbr:
2376 printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
2377 break;
2378
2379 case ppt_mode:
2380 switch (packet->payload.mode.leaf)
2381 {
2382 default:
2383 printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
2384 break;
2385
2386 case pt_mol_exec:
2387 printf_unfiltered (("mode.exec%s%s"),
2388 packet->payload.mode.bits.exec.csl
2389 ? (" cs.l") : (""),
2390 packet->payload.mode.bits.exec.csd
2391 ? (" cs.d") : (""));
2392 break;
2393
2394 case pt_mol_tsx:
2395 printf_unfiltered (("mode.tsx%s%s"),
2396 packet->payload.mode.bits.tsx.intx
2397 ? (" intx") : (""),
2398 packet->payload.mode.bits.tsx.abrt
2399 ? (" abrt") : (""));
2400 break;
2401 }
2402 break;
2403
2404 case ppt_ovf:
2405 printf_unfiltered (("ovf"));
2406 break;
2407
2408 case ppt_stop:
2409 printf_unfiltered (("stop"));
2410 break;
2411
2412 case ppt_vmcs:
2413 printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2414 break;
2415
2416 case ppt_tma:
2417 printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
2418 packet->payload.tma.fc);
2419 break;
2420
2421 case ppt_mtc:
2422 printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
2423 break;
2424
2425 case ppt_cyc:
2426 printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
2427 break;
2428
2429 case ppt_mnt:
2430 printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
2431 break;
2432 }
2433 }
2434
2435 /* Decode packets into MAINT using DECODER. */
2436
2437 static void
2438 btrace_maint_decode_pt (struct btrace_maint_info *maint,
2439 struct pt_packet_decoder *decoder)
2440 {
2441 int errcode;
2442
2443 for (;;)
2444 {
2445 struct btrace_pt_packet packet;
2446
2447 errcode = pt_pkt_sync_forward (decoder);
2448 if (errcode < 0)
2449 break;
2450
2451 for (;;)
2452 {
2453 pt_pkt_get_offset (decoder, &packet.offset);
2454
2455 errcode = pt_pkt_next (decoder, &packet.packet,
2456 sizeof(packet.packet));
2457 if (errcode < 0)
2458 break;
2459
2460 if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
2461 {
2462 packet.errcode = pt_errcode (errcode);
2463 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2464 &packet);
2465 }
2466 }
2467
2468 if (errcode == -pte_eos)
2469 break;
2470
2471 packet.errcode = pt_errcode (errcode);
2472 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2473 &packet);
2474
2475 warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
2476 packet.offset, pt_errstr (packet.errcode));
2477 }
2478
2479 if (errcode != -pte_eos)
2480 warning (_("Failed to synchronize onto the Intel Processor Trace "
2481 "stream: %s."), pt_errstr (pt_errcode (errcode)));
2482 }
2483
2484 /* Update the packet history in BTINFO. */
2485
2486 static void
2487 btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
2488 {
2489 volatile struct gdb_exception except;
2490 struct pt_packet_decoder *decoder;
2491 struct btrace_data_pt *pt;
2492 struct pt_config config;
2493 int errcode;
2494
2495 pt = &btinfo->data.variant.pt;
2496
2497 /* Nothing to do if there is no trace. */
2498 if (pt->size == 0)
2499 return;
2500
2501 memset (&config, 0, sizeof(config));
2502
2503 config.size = sizeof (config);
2504 config.begin = pt->data;
2505 config.end = pt->data + pt->size;
2506
2507 config.cpu.vendor = pt_translate_cpu_vendor (pt->config.cpu.vendor);
2508 config.cpu.family = pt->config.cpu.family;
2509 config.cpu.model = pt->config.cpu.model;
2510 config.cpu.stepping = pt->config.cpu.stepping;
2511
2512 errcode = pt_cpu_errata (&config.errata, &config.cpu);
2513 if (errcode < 0)
2514 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
2515 pt_errstr (pt_errcode (errcode)));
2516
2517 decoder = pt_pkt_alloc_decoder (&config);
2518 if (decoder == NULL)
2519 error (_("Failed to allocate the Intel Processor Trace decoder."));
2520
2521 TRY
2522 {
2523 btrace_maint_decode_pt (&btinfo->maint, decoder);
2524 }
2525 CATCH (except, RETURN_MASK_ALL)
2526 {
2527 pt_pkt_free_decoder (decoder);
2528
2529 if (except.reason < 0)
2530 throw_exception (except);
2531 }
2532 END_CATCH
2533
2534 pt_pkt_free_decoder (decoder);
2535 }
2536
2537 #endif /* !defined (HAVE_LIBIPT) */
2538
2539 /* Update the packet maintenance information for BTINFO and store the
2540 low and high bounds into BEGIN and END, respectively.
2541 Store the current iterator state into FROM and TO. */
2542
2543 static void
2544 btrace_maint_update_packets (struct btrace_thread_info *btinfo,
2545 unsigned int *begin, unsigned int *end,
2546 unsigned int *from, unsigned int *to)
2547 {
2548 switch (btinfo->data.format)
2549 {
2550 default:
2551 *begin = 0;
2552 *end = 0;
2553 *from = 0;
2554 *to = 0;
2555 break;
2556
2557 case BTRACE_FORMAT_BTS:
2558 /* Nothing to do - we operate directly on BTINFO->DATA. */
2559 *begin = 0;
2560 *end = VEC_length (btrace_block_s, btinfo->data.variant.bts.blocks);
2561 *from = btinfo->maint.variant.bts.packet_history.begin;
2562 *to = btinfo->maint.variant.bts.packet_history.end;
2563 break;
2564
2565 #if defined (HAVE_LIBIPT)
2566 case BTRACE_FORMAT_PT:
2567 if (VEC_empty (btrace_pt_packet_s, btinfo->maint.variant.pt.packets))
2568 btrace_maint_update_pt_packets (btinfo);
2569
2570 *begin = 0;
2571 *end = VEC_length (btrace_pt_packet_s, btinfo->maint.variant.pt.packets);
2572 *from = btinfo->maint.variant.pt.packet_history.begin;
2573 *to = btinfo->maint.variant.pt.packet_history.end;
2574 break;
2575 #endif /* defined (HAVE_LIBIPT) */
2576 }
2577 }
2578
2579 /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
2580 update the current iterator position. */
2581
2582 static void
2583 btrace_maint_print_packets (struct btrace_thread_info *btinfo,
2584 unsigned int begin, unsigned int end)
2585 {
2586 switch (btinfo->data.format)
2587 {
2588 default:
2589 break;
2590
2591 case BTRACE_FORMAT_BTS:
2592 {
2593 VEC (btrace_block_s) *blocks;
2594 unsigned int blk;
2595
2596 blocks = btinfo->data.variant.bts.blocks;
2597 for (blk = begin; blk < end; ++blk)
2598 {
2599 const btrace_block_s *block;
2600
2601 block = VEC_index (btrace_block_s, blocks, blk);
2602
2603 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
2604 core_addr_to_string_nz (block->begin),
2605 core_addr_to_string_nz (block->end));
2606 }
2607
2608 btinfo->maint.variant.bts.packet_history.begin = begin;
2609 btinfo->maint.variant.bts.packet_history.end = end;
2610 }
2611 break;
2612
2613 #if defined (HAVE_LIBIPT)
2614 case BTRACE_FORMAT_PT:
2615 {
2616 VEC (btrace_pt_packet_s) *packets;
2617 unsigned int pkt;
2618
2619 packets = btinfo->maint.variant.pt.packets;
2620 for (pkt = begin; pkt < end; ++pkt)
2621 {
2622 const struct btrace_pt_packet *packet;
2623
2624 packet = VEC_index (btrace_pt_packet_s, packets, pkt);
2625
2626 printf_unfiltered ("%u\t", pkt);
2627 printf_unfiltered ("0x%" PRIx64 "\t", packet->offset);
2628
2629 if (packet->errcode == pte_ok)
2630 pt_print_packet (&packet->packet);
2631 else
2632 printf_unfiltered ("[error: %s]", pt_errstr (packet->errcode));
2633
2634 printf_unfiltered ("\n");
2635 }
2636
2637 btinfo->maint.variant.pt.packet_history.begin = begin;
2638 btinfo->maint.variant.pt.packet_history.end = end;
2639 }
2640 break;
2641 #endif /* defined (HAVE_LIBIPT) */
2642 }
2643 }
2644
2645 /* Read a number from an argument string. */
2646
2647 static unsigned int
2648 get_uint (char **arg)
2649 {
2650 char *begin, *end, *pos;
2651 unsigned long number;
2652
2653 begin = *arg;
2654 pos = skip_spaces (begin);
2655
2656 if (!isdigit (*pos))
2657 error (_("Expected positive number, got: %s."), pos);
2658
2659 number = strtoul (pos, &end, 10);
2660 if (number > UINT_MAX)
2661 error (_("Number too big."));
2662
2663 *arg += (end - begin);
2664
2665 return (unsigned int) number;
2666 }
2667
2668 /* Read a context size from an argument string. */
2669
2670 static int
2671 get_context_size (char **arg)
2672 {
2673 char *pos;
2674 int number;
2675
2676 pos = skip_spaces (*arg);
2677
2678 if (!isdigit (*pos))
2679 error (_("Expected positive number, got: %s."), pos);
2680
2681 return strtol (pos, arg, 10);
2682 }
2683
2684 /* Complain about junk at the end of an argument string. */
2685
2686 static void
2687 no_chunk (char *arg)
2688 {
2689 if (*arg != 0)
2690 error (_("Junk after argument: %s."), arg);
2691 }
2692
2693 /* The "maintenance btrace packet-history" command. */
2694
2695 static void
2696 maint_btrace_packet_history_cmd (char *arg, int from_tty)
2697 {
2698 struct btrace_thread_info *btinfo;
2699 struct thread_info *tp;
2700 unsigned int size, begin, end, from, to;
2701
2702 tp = find_thread_ptid (inferior_ptid);
2703 if (tp == NULL)
2704 error (_("No thread."));
2705
2706 size = 10;
2707 btinfo = &tp->btrace;
2708
2709 btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
2710 if (begin == end)
2711 {
2712 printf_unfiltered (_("No trace.\n"));
2713 return;
2714 }
2715
2716 if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
2717 {
2718 from = to;
2719
2720 if (end - from < size)
2721 size = end - from;
2722 to = from + size;
2723 }
2724 else if (strcmp (arg, "-") == 0)
2725 {
2726 to = from;
2727
2728 if (to - begin < size)
2729 size = to - begin;
2730 from = to - size;
2731 }
2732 else
2733 {
2734 from = get_uint (&arg);
2735 if (end <= from)
2736 error (_("'%u' is out of range."), from);
2737
2738 arg = skip_spaces (arg);
2739 if (*arg == ',')
2740 {
2741 arg = skip_spaces (++arg);
2742
2743 if (*arg == '+')
2744 {
2745 arg += 1;
2746 size = get_context_size (&arg);
2747
2748 no_chunk (arg);
2749
2750 if (end - from < size)
2751 size = end - from;
2752 to = from + size;
2753 }
2754 else if (*arg == '-')
2755 {
2756 arg += 1;
2757 size = get_context_size (&arg);
2758
2759 no_chunk (arg);
2760
2761 /* Include the packet given as first argument. */
2762 from += 1;
2763 to = from;
2764
2765 if (to - begin < size)
2766 size = to - begin;
2767 from = to - size;
2768 }
2769 else
2770 {
2771 to = get_uint (&arg);
2772
2773 /* Include the packet at the second argument and silently
2774 truncate the range. */
2775 if (to < end)
2776 to += 1;
2777 else
2778 to = end;
2779
2780 no_chunk (arg);
2781 }
2782 }
2783 else
2784 {
2785 no_chunk (arg);
2786
2787 if (end - from < size)
2788 size = end - from;
2789 to = from + size;
2790 }
2791
2792 dont_repeat ();
2793 }
2794
2795 btrace_maint_print_packets (btinfo, from, to);
2796 }
2797
2798 /* The "maintenance btrace clear-packet-history" command. */
2799
2800 static void
2801 maint_btrace_clear_packet_history_cmd (char *args, int from_tty)
2802 {
2803 struct btrace_thread_info *btinfo;
2804 struct thread_info *tp;
2805
2806 if (args != NULL && *args != 0)
2807 error (_("Invalid argument."));
2808
2809 tp = find_thread_ptid (inferior_ptid);
2810 if (tp == NULL)
2811 error (_("No thread."));
2812
2813 btinfo = &tp->btrace;
2814
2815 /* Must clear the maint data before - it depends on BTINFO->DATA. */
2816 btrace_maint_clear (btinfo);
2817 btrace_data_clear (&btinfo->data);
2818 }
2819
2820 /* The "maintenance btrace clear" command. */
2821
2822 static void
2823 maint_btrace_clear_cmd (char *args, int from_tty)
2824 {
2825 struct btrace_thread_info *btinfo;
2826 struct thread_info *tp;
2827
2828 if (args != NULL && *args != 0)
2829 error (_("Invalid argument."));
2830
2831 tp = find_thread_ptid (inferior_ptid);
2832 if (tp == NULL)
2833 error (_("No thread."));
2834
2835 btrace_clear (tp);
2836 }
2837
2838 /* The "maintenance btrace" command. */
2839
2840 static void
2841 maint_btrace_cmd (char *args, int from_tty)
2842 {
2843 help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands,
2844 gdb_stdout);
2845 }
2846
2847 /* The "maintenance set btrace" command. */
2848
2849 static void
2850 maint_btrace_set_cmd (char *args, int from_tty)
2851 {
2852 help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands,
2853 gdb_stdout);
2854 }
2855
2856 /* The "maintenance show btrace" command. */
2857
2858 static void
2859 maint_btrace_show_cmd (char *args, int from_tty)
2860 {
2861 help_list (maint_btrace_show_cmdlist, "maintenance show btrace ",
2862 all_commands, gdb_stdout);
2863 }
2864
2865 /* The "maintenance set btrace pt" command. */
2866
2867 static void
2868 maint_btrace_pt_set_cmd (char *args, int from_tty)
2869 {
2870 help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
2871 all_commands, gdb_stdout);
2872 }
2873
2874 /* The "maintenance show btrace pt" command. */
2875
2876 static void
2877 maint_btrace_pt_show_cmd (char *args, int from_tty)
2878 {
2879 help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
2880 all_commands, gdb_stdout);
2881 }
2882
2883 /* The "maintenance info btrace" command. */
2884
2885 static void
2886 maint_info_btrace_cmd (char *args, int from_tty)
2887 {
2888 struct btrace_thread_info *btinfo;
2889 struct thread_info *tp;
2890 const struct btrace_config *conf;
2891
2892 if (args != NULL && *args != 0)
2893 error (_("Invalid argument."));
2894
2895 tp = find_thread_ptid (inferior_ptid);
2896 if (tp == NULL)
2897 error (_("No thread."));
2898
2899 btinfo = &tp->btrace;
2900
2901 conf = btrace_conf (btinfo);
2902 if (conf == NULL)
2903 error (_("No btrace configuration."));
2904
2905 printf_unfiltered (_("Format: %s.\n"),
2906 btrace_format_string (conf->format));
2907
2908 switch (conf->format)
2909 {
2910 default:
2911 break;
2912
2913 case BTRACE_FORMAT_BTS:
2914 printf_unfiltered (_("Number of packets: %u.\n"),
2915 VEC_length (btrace_block_s,
2916 btinfo->data.variant.bts.blocks));
2917 break;
2918
2919 #if defined (HAVE_LIBIPT)
2920 case BTRACE_FORMAT_PT:
2921 {
2922 struct pt_version version;
2923
2924 version = pt_library_version ();
2925 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
2926 version.minor, version.build,
2927 version.ext != NULL ? version.ext : "");
2928
2929 btrace_maint_update_pt_packets (btinfo);
2930 printf_unfiltered (_("Number of packets: %u.\n"),
2931 VEC_length (btrace_pt_packet_s,
2932 btinfo->maint.variant.pt.packets));
2933 }
2934 break;
2935 #endif /* defined (HAVE_LIBIPT) */
2936 }
2937 }
2938
2939 /* The "maint show btrace pt skip-pad" show value function. */
2940
2941 static void
2942 show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
2943 struct cmd_list_element *c,
2944 const char *value)
2945 {
2946 fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
2947 }
2948
2949
2950 /* Initialize btrace maintenance commands. */
2951
2952 void _initialize_btrace (void);
2953 void
2954 _initialize_btrace (void)
2955 {
2956 add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
2957 _("Info about branch tracing data."), &maintenanceinfolist);
2958
2959 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_cmd,
2960 _("Branch tracing maintenance commands."),
2961 &maint_btrace_cmdlist, "maintenance btrace ",
2962 0, &maintenancelist);
2963
2964 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_set_cmd, _("\
2965 Set branch tracing specific variables."),
2966 &maint_btrace_set_cmdlist, "maintenance set btrace ",
2967 0, &maintenance_set_cmdlist);
2968
2969 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\
2970 Set Intel Processor Trace specific variables."),
2971 &maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
2972 0, &maint_btrace_set_cmdlist);
2973
2974 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_show_cmd, _("\
2975 Show branch tracing specific variables."),
2976 &maint_btrace_show_cmdlist, "maintenance show btrace ",
2977 0, &maintenance_show_cmdlist);
2978
2979 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\
2980 Show Intel Processor Trace specific variables."),
2981 &maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
2982 0, &maint_btrace_show_cmdlist);
2983
2984 add_setshow_boolean_cmd ("skip-pad", class_maintenance,
2985 &maint_btrace_pt_skip_pad, _("\
2986 Set whether PAD packets should be skipped in the btrace packet history."), _("\
2987 Show whether PAD packets should be skipped in the btrace packet history."),_("\
2988 When enabled, PAD packets are ignored in the btrace packet history."),
2989 NULL, show_maint_btrace_pt_skip_pad,
2990 &maint_btrace_pt_set_cmdlist,
2991 &maint_btrace_pt_show_cmdlist);
2992
2993 add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
2994 _("Print the raw branch tracing data.\n\
2995 With no argument, print ten more packets after the previous ten-line print.\n\
2996 With '-' as argument print ten packets before a previous ten-line print.\n\
2997 One argument specifies the starting packet of a ten-line print.\n\
2998 Two arguments with comma between specify starting and ending packets to \
2999 print.\n\
3000 Preceded with '+'/'-' the second argument specifies the distance from the \
3001 first.\n"),
3002 &maint_btrace_cmdlist);
3003
3004 add_cmd ("clear-packet-history", class_maintenance,
3005 maint_btrace_clear_packet_history_cmd,
3006 _("Clears the branch tracing packet history.\n\
3007 Discards the raw branch tracing data but not the execution history data.\n\
3008 "),
3009 &maint_btrace_cmdlist);
3010
3011 add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
3012 _("Clears the branch tracing data.\n\
3013 Discards the raw branch tracing data and the execution history data.\n\
3014 The next 'record' command will fetch the branch tracing data anew.\n\
3015 "),
3016 &maint_btrace_cmdlist);
3017
3018 }
This page took 0.127061 seconds and 5 git commands to generate.