Remove trademark acknowledgements throughout
[deliverable/binutils-gdb.git] / gdb / btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2016 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "btrace.h"
24 #include "gdbthread.h"
25 #include "inferior.h"
26 #include "target.h"
27 #include "record.h"
28 #include "symtab.h"
29 #include "disasm.h"
30 #include "source.h"
31 #include "filenames.h"
32 #include "xml-support.h"
33 #include "regcache.h"
34 #include "rsp-low.h"
35 #include "gdbcmd.h"
36 #include "cli/cli-utils.h"
37
38 #include <inttypes.h>
39 #include <ctype.h>
40
41 /* Command lists for btrace maintenance commands. */
42 static struct cmd_list_element *maint_btrace_cmdlist;
43 static struct cmd_list_element *maint_btrace_set_cmdlist;
44 static struct cmd_list_element *maint_btrace_show_cmdlist;
45 static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
46 static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
47
48 /* Control whether to skip PAD packets when computing the packet history. */
49 static int maint_btrace_pt_skip_pad = 1;
50
51 static void btrace_add_pc (struct thread_info *tp);
52
53 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
54 when used in if statements. */
55
56 #define DEBUG(msg, args...) \
57 do \
58 { \
59 if (record_debug != 0) \
60 fprintf_unfiltered (gdb_stdlog, \
61 "[btrace] " msg "\n", ##args); \
62 } \
63 while (0)
64
65 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
66
67 /* Return the function name of a recorded function segment for printing.
68 This function never returns NULL. */
69
70 static const char *
71 ftrace_print_function_name (const struct btrace_function *bfun)
72 {
73 struct minimal_symbol *msym;
74 struct symbol *sym;
75
76 msym = bfun->msym;
77 sym = bfun->sym;
78
79 if (sym != NULL)
80 return SYMBOL_PRINT_NAME (sym);
81
82 if (msym != NULL)
83 return MSYMBOL_PRINT_NAME (msym);
84
85 return "<unknown>";
86 }
87
88 /* Return the file name of a recorded function segment for printing.
89 This function never returns NULL. */
90
91 static const char *
92 ftrace_print_filename (const struct btrace_function *bfun)
93 {
94 struct symbol *sym;
95 const char *filename;
96
97 sym = bfun->sym;
98
99 if (sym != NULL)
100 filename = symtab_to_filename_for_display (symbol_symtab (sym));
101 else
102 filename = "<unknown>";
103
104 return filename;
105 }
106
107 /* Return a string representation of the address of an instruction.
108 This function never returns NULL. */
109
110 static const char *
111 ftrace_print_insn_addr (const struct btrace_insn *insn)
112 {
113 if (insn == NULL)
114 return "<nil>";
115
116 return core_addr_to_string_nz (insn->pc);
117 }
118
119 /* Print an ftrace debug status message. */
120
121 static void
122 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
123 {
124 const char *fun, *file;
125 unsigned int ibegin, iend;
126 int level;
127
128 fun = ftrace_print_function_name (bfun);
129 file = ftrace_print_filename (bfun);
130 level = bfun->level;
131
132 ibegin = bfun->insn_offset;
133 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
134
135 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
136 prefix, fun, file, level, ibegin, iend);
137 }
138
139 /* Return non-zero if BFUN does not match MFUN and FUN,
140 return zero otherwise. */
141
142 static int
143 ftrace_function_switched (const struct btrace_function *bfun,
144 const struct minimal_symbol *mfun,
145 const struct symbol *fun)
146 {
147 struct minimal_symbol *msym;
148 struct symbol *sym;
149
150 msym = bfun->msym;
151 sym = bfun->sym;
152
153 /* If the minimal symbol changed, we certainly switched functions. */
154 if (mfun != NULL && msym != NULL
155 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
156 return 1;
157
158 /* If the symbol changed, we certainly switched functions. */
159 if (fun != NULL && sym != NULL)
160 {
161 const char *bfname, *fname;
162
163 /* Check the function name. */
164 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
165 return 1;
166
167 /* Check the location of those functions, as well. */
168 bfname = symtab_to_fullname (symbol_symtab (sym));
169 fname = symtab_to_fullname (symbol_symtab (fun));
170 if (filename_cmp (fname, bfname) != 0)
171 return 1;
172 }
173
174 /* If we lost symbol information, we switched functions. */
175 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
176 return 1;
177
178 /* If we gained symbol information, we switched functions. */
179 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
180 return 1;
181
182 return 0;
183 }
184
185 /* Allocate and initialize a new branch trace function segment.
186 PREV is the chronologically preceding function segment.
187 MFUN and FUN are the symbol information we have for this function. */
188
189 static struct btrace_function *
190 ftrace_new_function (struct btrace_function *prev,
191 struct minimal_symbol *mfun,
192 struct symbol *fun)
193 {
194 struct btrace_function *bfun;
195
196 bfun = XCNEW (struct btrace_function);
197
198 bfun->msym = mfun;
199 bfun->sym = fun;
200 bfun->flow.prev = prev;
201
202 if (prev == NULL)
203 {
204 /* Start counting at one. */
205 bfun->number = 1;
206 bfun->insn_offset = 1;
207 }
208 else
209 {
210 gdb_assert (prev->flow.next == NULL);
211 prev->flow.next = bfun;
212
213 bfun->number = prev->number + 1;
214 bfun->insn_offset = (prev->insn_offset
215 + VEC_length (btrace_insn_s, prev->insn));
216 bfun->level = prev->level;
217 }
218
219 return bfun;
220 }
221
222 /* Update the UP field of a function segment. */
223
224 static void
225 ftrace_update_caller (struct btrace_function *bfun,
226 struct btrace_function *caller,
227 enum btrace_function_flag flags)
228 {
229 if (bfun->up != NULL)
230 ftrace_debug (bfun, "updating caller");
231
232 bfun->up = caller;
233 bfun->flags = flags;
234
235 ftrace_debug (bfun, "set caller");
236 }
237
238 /* Fix up the caller for all segments of a function. */
239
240 static void
241 ftrace_fixup_caller (struct btrace_function *bfun,
242 struct btrace_function *caller,
243 enum btrace_function_flag flags)
244 {
245 struct btrace_function *prev, *next;
246
247 ftrace_update_caller (bfun, caller, flags);
248
249 /* Update all function segments belonging to the same function. */
250 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
251 ftrace_update_caller (prev, caller, flags);
252
253 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
254 ftrace_update_caller (next, caller, flags);
255 }
256
257 /* Add a new function segment for a call.
258 CALLER is the chronologically preceding function segment.
259 MFUN and FUN are the symbol information we have for this function. */
260
261 static struct btrace_function *
262 ftrace_new_call (struct btrace_function *caller,
263 struct minimal_symbol *mfun,
264 struct symbol *fun)
265 {
266 struct btrace_function *bfun;
267
268 bfun = ftrace_new_function (caller, mfun, fun);
269 bfun->up = caller;
270 bfun->level += 1;
271
272 ftrace_debug (bfun, "new call");
273
274 return bfun;
275 }
276
277 /* Add a new function segment for a tail call.
278 CALLER is the chronologically preceding function segment.
279 MFUN and FUN are the symbol information we have for this function. */
280
281 static struct btrace_function *
282 ftrace_new_tailcall (struct btrace_function *caller,
283 struct minimal_symbol *mfun,
284 struct symbol *fun)
285 {
286 struct btrace_function *bfun;
287
288 bfun = ftrace_new_function (caller, mfun, fun);
289 bfun->up = caller;
290 bfun->level += 1;
291 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
292
293 ftrace_debug (bfun, "new tail call");
294
295 return bfun;
296 }
297
298 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
299 symbol information. */
300
301 static struct btrace_function *
302 ftrace_find_caller (struct btrace_function *bfun,
303 struct minimal_symbol *mfun,
304 struct symbol *fun)
305 {
306 for (; bfun != NULL; bfun = bfun->up)
307 {
308 /* Skip functions with incompatible symbol information. */
309 if (ftrace_function_switched (bfun, mfun, fun))
310 continue;
311
312 /* This is the function segment we're looking for. */
313 break;
314 }
315
316 return bfun;
317 }
318
319 /* Find the innermost caller in the back trace of BFUN, skipping all
320 function segments that do not end with a call instruction (e.g.
321 tail calls ending with a jump). */
322
323 static struct btrace_function *
324 ftrace_find_call (struct btrace_function *bfun)
325 {
326 for (; bfun != NULL; bfun = bfun->up)
327 {
328 struct btrace_insn *last;
329
330 /* Skip gaps. */
331 if (bfun->errcode != 0)
332 continue;
333
334 last = VEC_last (btrace_insn_s, bfun->insn);
335
336 if (last->iclass == BTRACE_INSN_CALL)
337 break;
338 }
339
340 return bfun;
341 }
342
343 /* Add a continuation segment for a function into which we return.
344 PREV is the chronologically preceding function segment.
345 MFUN and FUN are the symbol information we have for this function. */
346
347 static struct btrace_function *
348 ftrace_new_return (struct btrace_function *prev,
349 struct minimal_symbol *mfun,
350 struct symbol *fun)
351 {
352 struct btrace_function *bfun, *caller;
353
354 bfun = ftrace_new_function (prev, mfun, fun);
355
356 /* It is important to start at PREV's caller. Otherwise, we might find
357 PREV itself, if PREV is a recursive function. */
358 caller = ftrace_find_caller (prev->up, mfun, fun);
359 if (caller != NULL)
360 {
361 /* The caller of PREV is the preceding btrace function segment in this
362 function instance. */
363 gdb_assert (caller->segment.next == NULL);
364
365 caller->segment.next = bfun;
366 bfun->segment.prev = caller;
367
368 /* Maintain the function level. */
369 bfun->level = caller->level;
370
371 /* Maintain the call stack. */
372 bfun->up = caller->up;
373 bfun->flags = caller->flags;
374
375 ftrace_debug (bfun, "new return");
376 }
377 else
378 {
379 /* We did not find a caller. This could mean that something went
380 wrong or that the call is simply not included in the trace. */
381
382 /* Let's search for some actual call. */
383 caller = ftrace_find_call (prev->up);
384 if (caller == NULL)
385 {
386 /* There is no call in PREV's back trace. We assume that the
387 branch trace did not include it. */
388
389 /* Let's find the topmost call function - this skips tail calls. */
390 while (prev->up != NULL)
391 prev = prev->up;
392
393 /* We maintain levels for a series of returns for which we have
394 not seen the calls.
395 We start at the preceding function's level in case this has
396 already been a return for which we have not seen the call.
397 We start at level 0 otherwise, to handle tail calls correctly. */
398 bfun->level = min (0, prev->level) - 1;
399
400 /* Fix up the call stack for PREV. */
401 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
402
403 ftrace_debug (bfun, "new return - no caller");
404 }
405 else
406 {
407 /* There is a call in PREV's back trace to which we should have
408 returned. Let's remain at this level. */
409 bfun->level = prev->level;
410
411 ftrace_debug (bfun, "new return - unknown caller");
412 }
413 }
414
415 return bfun;
416 }
417
418 /* Add a new function segment for a function switch.
419 PREV is the chronologically preceding function segment.
420 MFUN and FUN are the symbol information we have for this function. */
421
422 static struct btrace_function *
423 ftrace_new_switch (struct btrace_function *prev,
424 struct minimal_symbol *mfun,
425 struct symbol *fun)
426 {
427 struct btrace_function *bfun;
428
429 /* This is an unexplained function switch. The call stack will likely
430 be wrong at this point. */
431 bfun = ftrace_new_function (prev, mfun, fun);
432
433 ftrace_debug (bfun, "new switch");
434
435 return bfun;
436 }
437
438 /* Add a new function segment for a gap in the trace due to a decode error.
439 PREV is the chronologically preceding function segment.
440 ERRCODE is the format-specific error code. */
441
442 static struct btrace_function *
443 ftrace_new_gap (struct btrace_function *prev, int errcode)
444 {
445 struct btrace_function *bfun;
446
447 /* We hijack prev if it was empty. */
448 if (prev != NULL && prev->errcode == 0
449 && VEC_empty (btrace_insn_s, prev->insn))
450 bfun = prev;
451 else
452 bfun = ftrace_new_function (prev, NULL, NULL);
453
454 bfun->errcode = errcode;
455
456 ftrace_debug (bfun, "new gap");
457
458 return bfun;
459 }
460
461 /* Update BFUN with respect to the instruction at PC. This may create new
462 function segments.
463 Return the chronologically latest function segment, never NULL. */
464
465 static struct btrace_function *
466 ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
467 {
468 struct bound_minimal_symbol bmfun;
469 struct minimal_symbol *mfun;
470 struct symbol *fun;
471 struct btrace_insn *last;
472
473 /* Try to determine the function we're in. We use both types of symbols
474 to avoid surprises when we sometimes get a full symbol and sometimes
475 only a minimal symbol. */
476 fun = find_pc_function (pc);
477 bmfun = lookup_minimal_symbol_by_pc (pc);
478 mfun = bmfun.minsym;
479
480 if (fun == NULL && mfun == NULL)
481 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
482
483 /* If we didn't have a function or if we had a gap before, we create one. */
484 if (bfun == NULL || bfun->errcode != 0)
485 return ftrace_new_function (bfun, mfun, fun);
486
487 /* Check the last instruction, if we have one.
488 We do this check first, since it allows us to fill in the call stack
489 links in addition to the normal flow links. */
490 last = NULL;
491 if (!VEC_empty (btrace_insn_s, bfun->insn))
492 last = VEC_last (btrace_insn_s, bfun->insn);
493
494 if (last != NULL)
495 {
496 switch (last->iclass)
497 {
498 case BTRACE_INSN_RETURN:
499 {
500 const char *fname;
501
502 /* On some systems, _dl_runtime_resolve returns to the resolved
503 function instead of jumping to it. From our perspective,
504 however, this is a tailcall.
505 If we treated it as return, we wouldn't be able to find the
506 resolved function in our stack back trace. Hence, we would
507 lose the current stack back trace and start anew with an empty
508 back trace. When the resolved function returns, we would then
509 create a stack back trace with the same function names but
510 different frame id's. This will confuse stepping. */
511 fname = ftrace_print_function_name (bfun);
512 if (strcmp (fname, "_dl_runtime_resolve") == 0)
513 return ftrace_new_tailcall (bfun, mfun, fun);
514
515 return ftrace_new_return (bfun, mfun, fun);
516 }
517
518 case BTRACE_INSN_CALL:
519 /* Ignore calls to the next instruction. They are used for PIC. */
520 if (last->pc + last->size == pc)
521 break;
522
523 return ftrace_new_call (bfun, mfun, fun);
524
525 case BTRACE_INSN_JUMP:
526 {
527 CORE_ADDR start;
528
529 start = get_pc_function_start (pc);
530
531 /* If we can't determine the function for PC, we treat a jump at
532 the end of the block as tail call. */
533 if (start == 0 || start == pc)
534 return ftrace_new_tailcall (bfun, mfun, fun);
535 }
536 }
537 }
538
539 /* Check if we're switching functions for some other reason. */
540 if (ftrace_function_switched (bfun, mfun, fun))
541 {
542 DEBUG_FTRACE ("switching from %s in %s at %s",
543 ftrace_print_insn_addr (last),
544 ftrace_print_function_name (bfun),
545 ftrace_print_filename (bfun));
546
547 return ftrace_new_switch (bfun, mfun, fun);
548 }
549
550 return bfun;
551 }
552
553 /* Add the instruction at PC to BFUN's instructions. */
554
555 static void
556 ftrace_update_insns (struct btrace_function *bfun,
557 const struct btrace_insn *insn)
558 {
559 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
560
561 if (record_debug > 1)
562 ftrace_debug (bfun, "update insn");
563 }
564
565 /* Classify the instruction at PC. */
566
567 static enum btrace_insn_class
568 ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
569 {
570 enum btrace_insn_class iclass;
571
572 iclass = BTRACE_INSN_OTHER;
573 TRY
574 {
575 if (gdbarch_insn_is_call (gdbarch, pc))
576 iclass = BTRACE_INSN_CALL;
577 else if (gdbarch_insn_is_ret (gdbarch, pc))
578 iclass = BTRACE_INSN_RETURN;
579 else if (gdbarch_insn_is_jump (gdbarch, pc))
580 iclass = BTRACE_INSN_JUMP;
581 }
582 CATCH (error, RETURN_MASK_ERROR)
583 {
584 }
585 END_CATCH
586
587 return iclass;
588 }
589
590 /* Compute the function branch trace from BTS trace. */
591
592 static void
593 btrace_compute_ftrace_bts (struct thread_info *tp,
594 const struct btrace_data_bts *btrace)
595 {
596 struct btrace_thread_info *btinfo;
597 struct btrace_function *begin, *end;
598 struct gdbarch *gdbarch;
599 unsigned int blk, ngaps;
600 int level;
601
602 gdbarch = target_gdbarch ();
603 btinfo = &tp->btrace;
604 begin = btinfo->begin;
605 end = btinfo->end;
606 ngaps = btinfo->ngaps;
607 level = begin != NULL ? -btinfo->level : INT_MAX;
608 blk = VEC_length (btrace_block_s, btrace->blocks);
609
610 while (blk != 0)
611 {
612 btrace_block_s *block;
613 CORE_ADDR pc;
614
615 blk -= 1;
616
617 block = VEC_index (btrace_block_s, btrace->blocks, blk);
618 pc = block->begin;
619
620 for (;;)
621 {
622 struct btrace_insn insn;
623 int size;
624
625 /* We should hit the end of the block. Warn if we went too far. */
626 if (block->end < pc)
627 {
628 /* Indicate the gap in the trace - unless we're at the
629 beginning. */
630 if (begin != NULL)
631 {
632 warning (_("Recorded trace may be corrupted around %s."),
633 core_addr_to_string_nz (pc));
634
635 end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
636 ngaps += 1;
637 }
638 break;
639 }
640
641 end = ftrace_update_function (end, pc);
642 if (begin == NULL)
643 begin = end;
644
645 /* Maintain the function level offset.
646 For all but the last block, we do it here. */
647 if (blk != 0)
648 level = min (level, end->level);
649
650 size = 0;
651 TRY
652 {
653 size = gdb_insn_length (gdbarch, pc);
654 }
655 CATCH (error, RETURN_MASK_ERROR)
656 {
657 }
658 END_CATCH
659
660 insn.pc = pc;
661 insn.size = size;
662 insn.iclass = ftrace_classify_insn (gdbarch, pc);
663 insn.flags = 0;
664
665 ftrace_update_insns (end, &insn);
666
667 /* We're done once we pushed the instruction at the end. */
668 if (block->end == pc)
669 break;
670
671 /* We can't continue if we fail to compute the size. */
672 if (size <= 0)
673 {
674 warning (_("Recorded trace may be incomplete around %s."),
675 core_addr_to_string_nz (pc));
676
677 /* Indicate the gap in the trace. We just added INSN so we're
678 not at the beginning. */
679 end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
680 ngaps += 1;
681
682 break;
683 }
684
685 pc += size;
686
687 /* Maintain the function level offset.
688 For the last block, we do it here to not consider the last
689 instruction.
690 Since the last instruction corresponds to the current instruction
691 and is not really part of the execution history, it shouldn't
692 affect the level. */
693 if (blk == 0)
694 level = min (level, end->level);
695 }
696 }
697
698 btinfo->begin = begin;
699 btinfo->end = end;
700 btinfo->ngaps = ngaps;
701
702 /* LEVEL is the minimal function level of all btrace function segments.
703 Define the global level offset to -LEVEL so all function levels are
704 normalized to start at zero. */
705 btinfo->level = -level;
706 }
707
708 #if defined (HAVE_LIBIPT)
709
710 static enum btrace_insn_class
711 pt_reclassify_insn (enum pt_insn_class iclass)
712 {
713 switch (iclass)
714 {
715 case ptic_call:
716 return BTRACE_INSN_CALL;
717
718 case ptic_return:
719 return BTRACE_INSN_RETURN;
720
721 case ptic_jump:
722 return BTRACE_INSN_JUMP;
723
724 default:
725 return BTRACE_INSN_OTHER;
726 }
727 }
728
729 /* Return the btrace instruction flags for INSN. */
730
731 static enum btrace_insn_flag
732 pt_btrace_insn_flags (const struct pt_insn *insn)
733 {
734 enum btrace_insn_flag flags = 0;
735
736 if (insn->speculative)
737 flags |= BTRACE_INSN_FLAG_SPECULATIVE;
738
739 return flags;
740 }
741
742 /* Add function branch trace using DECODER. */
743
744 static void
745 ftrace_add_pt (struct pt_insn_decoder *decoder,
746 struct btrace_function **pbegin,
747 struct btrace_function **pend, int *plevel,
748 unsigned int *ngaps)
749 {
750 struct btrace_function *begin, *end, *upd;
751 uint64_t offset;
752 int errcode, nerrors;
753
754 begin = *pbegin;
755 end = *pend;
756 nerrors = 0;
757 for (;;)
758 {
759 struct btrace_insn btinsn;
760 struct pt_insn insn;
761
762 errcode = pt_insn_sync_forward (decoder);
763 if (errcode < 0)
764 {
765 if (errcode != -pte_eos)
766 warning (_("Failed to synchronize onto the Intel Processor "
767 "Trace stream: %s."), pt_errstr (pt_errcode (errcode)));
768 break;
769 }
770
771 memset (&btinsn, 0, sizeof (btinsn));
772 for (;;)
773 {
774 errcode = pt_insn_next (decoder, &insn, sizeof(insn));
775 if (errcode < 0)
776 break;
777
778 /* Look for gaps in the trace - unless we're at the beginning. */
779 if (begin != NULL)
780 {
781 /* Tracing is disabled and re-enabled each time we enter the
782 kernel. Most times, we continue from the same instruction we
783 stopped before. This is indicated via the RESUMED instruction
784 flag. The ENABLED instruction flag means that we continued
785 from some other instruction. Indicate this as a trace gap. */
786 if (insn.enabled)
787 *pend = end = ftrace_new_gap (end, BDE_PT_DISABLED);
788
789 /* Indicate trace overflows. */
790 if (insn.resynced)
791 *pend = end = ftrace_new_gap (end, BDE_PT_OVERFLOW);
792 }
793
794 upd = ftrace_update_function (end, insn.ip);
795 if (upd != end)
796 {
797 *pend = end = upd;
798
799 if (begin == NULL)
800 *pbegin = begin = upd;
801 }
802
803 /* Maintain the function level offset. */
804 *plevel = min (*plevel, end->level);
805
806 btinsn.pc = (CORE_ADDR) insn.ip;
807 btinsn.size = (gdb_byte) insn.size;
808 btinsn.iclass = pt_reclassify_insn (insn.iclass);
809 btinsn.flags = pt_btrace_insn_flags (&insn);
810
811 ftrace_update_insns (end, &btinsn);
812 }
813
814 if (errcode == -pte_eos)
815 break;
816
817 /* If the gap is at the very beginning, we ignore it - we will have
818 less trace, but we won't have any holes in the trace. */
819 if (begin == NULL)
820 continue;
821
822 pt_insn_get_offset (decoder, &offset);
823
824 warning (_("Failed to decode Intel Processor Trace near trace "
825 "offset 0x%" PRIx64 " near recorded PC 0x%" PRIx64 ": %s."),
826 offset, insn.ip, pt_errstr (pt_errcode (errcode)));
827
828 /* Indicate the gap in the trace. */
829 *pend = end = ftrace_new_gap (end, errcode);
830 *ngaps += 1;
831 }
832
833 if (nerrors > 0)
834 warning (_("The recorded execution trace may have gaps."));
835 }
836
837 /* A callback function to allow the trace decoder to read the inferior's
838 memory. */
839
840 static int
841 btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
842 const struct pt_asid *asid, uint64_t pc,
843 void *context)
844 {
845 int result, errcode;
846
847 result = (int) size;
848 TRY
849 {
850 errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
851 if (errcode != 0)
852 result = -pte_nomap;
853 }
854 CATCH (error, RETURN_MASK_ERROR)
855 {
856 result = -pte_nomap;
857 }
858 END_CATCH
859
860 return result;
861 }
862
863 /* Translate the vendor from one enum to another. */
864
865 static enum pt_cpu_vendor
866 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
867 {
868 switch (vendor)
869 {
870 default:
871 return pcv_unknown;
872
873 case CV_INTEL:
874 return pcv_intel;
875 }
876 }
877
878 /* Finalize the function branch trace after decode. */
879
880 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
881 struct thread_info *tp, int level)
882 {
883 pt_insn_free_decoder (decoder);
884
885 /* LEVEL is the minimal function level of all btrace function segments.
886 Define the global level offset to -LEVEL so all function levels are
887 normalized to start at zero. */
888 tp->btrace.level = -level;
889
890 /* Add a single last instruction entry for the current PC.
891 This allows us to compute the backtrace at the current PC using both
892 standard unwind and btrace unwind.
893 This extra entry is ignored by all record commands. */
894 btrace_add_pc (tp);
895 }
896
897 /* Compute the function branch trace from Intel Processor Trace
898 format. */
899
900 static void
901 btrace_compute_ftrace_pt (struct thread_info *tp,
902 const struct btrace_data_pt *btrace)
903 {
904 struct btrace_thread_info *btinfo;
905 struct pt_insn_decoder *decoder;
906 struct pt_config config;
907 int level, errcode;
908
909 if (btrace->size == 0)
910 return;
911
912 btinfo = &tp->btrace;
913 level = btinfo->begin != NULL ? -btinfo->level : INT_MAX;
914
915 pt_config_init(&config);
916 config.begin = btrace->data;
917 config.end = btrace->data + btrace->size;
918
919 config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
920 config.cpu.family = btrace->config.cpu.family;
921 config.cpu.model = btrace->config.cpu.model;
922 config.cpu.stepping = btrace->config.cpu.stepping;
923
924 errcode = pt_cpu_errata (&config.errata, &config.cpu);
925 if (errcode < 0)
926 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
927 pt_errstr (pt_errcode (errcode)));
928
929 decoder = pt_insn_alloc_decoder (&config);
930 if (decoder == NULL)
931 error (_("Failed to allocate the Intel Processor Trace decoder."));
932
933 TRY
934 {
935 struct pt_image *image;
936
937 image = pt_insn_get_image(decoder);
938 if (image == NULL)
939 error (_("Failed to configure the Intel Processor Trace decoder."));
940
941 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
942 if (errcode < 0)
943 error (_("Failed to configure the Intel Processor Trace decoder: "
944 "%s."), pt_errstr (pt_errcode (errcode)));
945
946 ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level,
947 &btinfo->ngaps);
948 }
949 CATCH (error, RETURN_MASK_ALL)
950 {
951 /* Indicate a gap in the trace if we quit trace processing. */
952 if (error.reason == RETURN_QUIT && btinfo->end != NULL)
953 {
954 btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_USER_QUIT);
955 btinfo->ngaps++;
956 }
957
958 btrace_finalize_ftrace_pt (decoder, tp, level);
959
960 throw_exception (error);
961 }
962 END_CATCH
963
964 btrace_finalize_ftrace_pt (decoder, tp, level);
965 }
966
967 #else /* defined (HAVE_LIBIPT) */
968
969 static void
970 btrace_compute_ftrace_pt (struct thread_info *tp,
971 const struct btrace_data_pt *btrace)
972 {
973 internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
974 }
975
976 #endif /* defined (HAVE_LIBIPT) */
977
978 /* Compute the function branch trace from a block branch trace BTRACE for
979 a thread given by BTINFO. */
980
981 static void
982 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
983 {
984 DEBUG ("compute ftrace");
985
986 switch (btrace->format)
987 {
988 case BTRACE_FORMAT_NONE:
989 return;
990
991 case BTRACE_FORMAT_BTS:
992 btrace_compute_ftrace_bts (tp, &btrace->variant.bts);
993 return;
994
995 case BTRACE_FORMAT_PT:
996 btrace_compute_ftrace_pt (tp, &btrace->variant.pt);
997 return;
998 }
999
1000 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1001 }
1002
1003 /* Add an entry for the current PC. */
1004
1005 static void
1006 btrace_add_pc (struct thread_info *tp)
1007 {
1008 struct btrace_data btrace;
1009 struct btrace_block *block;
1010 struct regcache *regcache;
1011 struct cleanup *cleanup;
1012 CORE_ADDR pc;
1013
1014 regcache = get_thread_regcache (tp->ptid);
1015 pc = regcache_read_pc (regcache);
1016
1017 btrace_data_init (&btrace);
1018 btrace.format = BTRACE_FORMAT_BTS;
1019 btrace.variant.bts.blocks = NULL;
1020
1021 cleanup = make_cleanup_btrace_data (&btrace);
1022
1023 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
1024 block->begin = pc;
1025 block->end = pc;
1026
1027 btrace_compute_ftrace (tp, &btrace);
1028
1029 do_cleanups (cleanup);
1030 }
1031
1032 /* See btrace.h. */
1033
1034 void
1035 btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
1036 {
1037 if (tp->btrace.target != NULL)
1038 return;
1039
1040 #if !defined (HAVE_LIBIPT)
1041 if (conf->format == BTRACE_FORMAT_PT)
1042 error (_("GDB does not support Intel Processor Trace."));
1043 #endif /* !defined (HAVE_LIBIPT) */
1044
1045 if (!target_supports_btrace (conf->format))
1046 error (_("Target does not support branch tracing."));
1047
1048 DEBUG ("enable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1049
1050 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
1051
1052 /* Add an entry for the current PC so we start tracing from where we
1053 enabled it. */
1054 if (tp->btrace.target != NULL)
1055 btrace_add_pc (tp);
1056 }
1057
1058 /* See btrace.h. */
1059
1060 const struct btrace_config *
1061 btrace_conf (const struct btrace_thread_info *btinfo)
1062 {
1063 if (btinfo->target == NULL)
1064 return NULL;
1065
1066 return target_btrace_conf (btinfo->target);
1067 }
1068
1069 /* See btrace.h. */
1070
1071 void
1072 btrace_disable (struct thread_info *tp)
1073 {
1074 struct btrace_thread_info *btp = &tp->btrace;
1075 int errcode = 0;
1076
1077 if (btp->target == NULL)
1078 return;
1079
1080 DEBUG ("disable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1081
1082 target_disable_btrace (btp->target);
1083 btp->target = NULL;
1084
1085 btrace_clear (tp);
1086 }
1087
1088 /* See btrace.h. */
1089
1090 void
1091 btrace_teardown (struct thread_info *tp)
1092 {
1093 struct btrace_thread_info *btp = &tp->btrace;
1094 int errcode = 0;
1095
1096 if (btp->target == NULL)
1097 return;
1098
1099 DEBUG ("teardown thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1100
1101 target_teardown_btrace (btp->target);
1102 btp->target = NULL;
1103
1104 btrace_clear (tp);
1105 }
1106
1107 /* Stitch branch trace in BTS format. */
1108
1109 static int
1110 btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
1111 {
1112 struct btrace_thread_info *btinfo;
1113 struct btrace_function *last_bfun;
1114 struct btrace_insn *last_insn;
1115 btrace_block_s *first_new_block;
1116
1117 btinfo = &tp->btrace;
1118 last_bfun = btinfo->end;
1119 gdb_assert (last_bfun != NULL);
1120 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
1121
1122 /* If the existing trace ends with a gap, we just glue the traces
1123 together. We need to drop the last (i.e. chronologically first) block
1124 of the new trace, though, since we can't fill in the start address.*/
1125 if (VEC_empty (btrace_insn_s, last_bfun->insn))
1126 {
1127 VEC_pop (btrace_block_s, btrace->blocks);
1128 return 0;
1129 }
1130
1131 /* Beware that block trace starts with the most recent block, so the
1132 chronologically first block in the new trace is the last block in
1133 the new trace's block vector. */
1134 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
1135 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
1136
1137 /* If the current PC at the end of the block is the same as in our current
1138 trace, there are two explanations:
1139 1. we executed the instruction and some branch brought us back.
1140 2. we have not made any progress.
1141 In the first case, the delta trace vector should contain at least two
1142 entries.
1143 In the second case, the delta trace vector should contain exactly one
1144 entry for the partial block containing the current PC. Remove it. */
1145 if (first_new_block->end == last_insn->pc
1146 && VEC_length (btrace_block_s, btrace->blocks) == 1)
1147 {
1148 VEC_pop (btrace_block_s, btrace->blocks);
1149 return 0;
1150 }
1151
1152 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
1153 core_addr_to_string_nz (first_new_block->end));
1154
1155 /* Do a simple sanity check to make sure we don't accidentally end up
1156 with a bad block. This should not occur in practice. */
1157 if (first_new_block->end < last_insn->pc)
1158 {
1159 warning (_("Error while trying to read delta trace. Falling back to "
1160 "a full read."));
1161 return -1;
1162 }
1163
1164 /* We adjust the last block to start at the end of our current trace. */
1165 gdb_assert (first_new_block->begin == 0);
1166 first_new_block->begin = last_insn->pc;
1167
1168 /* We simply pop the last insn so we can insert it again as part of
1169 the normal branch trace computation.
1170 Since instruction iterators are based on indices in the instructions
1171 vector, we don't leave any pointers dangling. */
1172 DEBUG ("pruning insn at %s for stitching",
1173 ftrace_print_insn_addr (last_insn));
1174
1175 VEC_pop (btrace_insn_s, last_bfun->insn);
1176
1177 /* The instructions vector may become empty temporarily if this has
1178 been the only instruction in this function segment.
1179 This violates the invariant but will be remedied shortly by
1180 btrace_compute_ftrace when we add the new trace. */
1181
1182 /* The only case where this would hurt is if the entire trace consisted
1183 of just that one instruction. If we remove it, we might turn the now
1184 empty btrace function segment into a gap. But we don't want gaps at
1185 the beginning. To avoid this, we remove the entire old trace. */
1186 if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
1187 btrace_clear (tp);
1188
1189 return 0;
1190 }
1191
1192 /* Adjust the block trace in order to stitch old and new trace together.
1193 BTRACE is the new delta trace between the last and the current stop.
1194 TP is the traced thread.
1195 May modifx BTRACE as well as the existing trace in TP.
1196 Return 0 on success, -1 otherwise. */
1197
1198 static int
1199 btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
1200 {
1201 /* If we don't have trace, there's nothing to do. */
1202 if (btrace_data_empty (btrace))
1203 return 0;
1204
1205 switch (btrace->format)
1206 {
1207 case BTRACE_FORMAT_NONE:
1208 return 0;
1209
1210 case BTRACE_FORMAT_BTS:
1211 return btrace_stitch_bts (&btrace->variant.bts, tp);
1212
1213 case BTRACE_FORMAT_PT:
1214 /* Delta reads are not supported. */
1215 return -1;
1216 }
1217
1218 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1219 }
1220
1221 /* Clear the branch trace histories in BTINFO. */
1222
1223 static void
1224 btrace_clear_history (struct btrace_thread_info *btinfo)
1225 {
1226 xfree (btinfo->insn_history);
1227 xfree (btinfo->call_history);
1228 xfree (btinfo->replay);
1229
1230 btinfo->insn_history = NULL;
1231 btinfo->call_history = NULL;
1232 btinfo->replay = NULL;
1233 }
1234
1235 /* Clear the branch trace maintenance histories in BTINFO. */
1236
1237 static void
1238 btrace_maint_clear (struct btrace_thread_info *btinfo)
1239 {
1240 switch (btinfo->data.format)
1241 {
1242 default:
1243 break;
1244
1245 case BTRACE_FORMAT_BTS:
1246 btinfo->maint.variant.bts.packet_history.begin = 0;
1247 btinfo->maint.variant.bts.packet_history.end = 0;
1248 break;
1249
1250 #if defined (HAVE_LIBIPT)
1251 case BTRACE_FORMAT_PT:
1252 xfree (btinfo->maint.variant.pt.packets);
1253
1254 btinfo->maint.variant.pt.packets = NULL;
1255 btinfo->maint.variant.pt.packet_history.begin = 0;
1256 btinfo->maint.variant.pt.packet_history.end = 0;
1257 break;
1258 #endif /* defined (HAVE_LIBIPT) */
1259 }
1260 }
1261
1262 /* See btrace.h. */
1263
1264 void
1265 btrace_fetch (struct thread_info *tp)
1266 {
1267 struct btrace_thread_info *btinfo;
1268 struct btrace_target_info *tinfo;
1269 struct btrace_data btrace;
1270 struct cleanup *cleanup;
1271 int errcode;
1272
1273 DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1274
1275 btinfo = &tp->btrace;
1276 tinfo = btinfo->target;
1277 if (tinfo == NULL)
1278 return;
1279
1280 /* There's no way we could get new trace while replaying.
1281 On the other hand, delta trace would return a partial record with the
1282 current PC, which is the replay PC, not the last PC, as expected. */
1283 if (btinfo->replay != NULL)
1284 return;
1285
1286 btrace_data_init (&btrace);
1287 cleanup = make_cleanup_btrace_data (&btrace);
1288
1289 /* Let's first try to extend the trace we already have. */
1290 if (btinfo->end != NULL)
1291 {
1292 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1293 if (errcode == 0)
1294 {
1295 /* Success. Let's try to stitch the traces together. */
1296 errcode = btrace_stitch_trace (&btrace, tp);
1297 }
1298 else
1299 {
1300 /* We failed to read delta trace. Let's try to read new trace. */
1301 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1302
1303 /* If we got any new trace, discard what we have. */
1304 if (errcode == 0 && !btrace_data_empty (&btrace))
1305 btrace_clear (tp);
1306 }
1307
1308 /* If we were not able to read the trace, we start over. */
1309 if (errcode != 0)
1310 {
1311 btrace_clear (tp);
1312 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1313 }
1314 }
1315 else
1316 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1317
1318 /* If we were not able to read the branch trace, signal an error. */
1319 if (errcode != 0)
1320 error (_("Failed to read branch trace."));
1321
1322 /* Compute the trace, provided we have any. */
1323 if (!btrace_data_empty (&btrace))
1324 {
1325 /* Store the raw trace data. The stored data will be cleared in
1326 btrace_clear, so we always append the new trace. */
1327 btrace_data_append (&btinfo->data, &btrace);
1328 btrace_maint_clear (btinfo);
1329
1330 btrace_clear_history (btinfo);
1331 btrace_compute_ftrace (tp, &btrace);
1332 }
1333
1334 do_cleanups (cleanup);
1335 }
1336
1337 /* See btrace.h. */
1338
1339 void
1340 btrace_clear (struct thread_info *tp)
1341 {
1342 struct btrace_thread_info *btinfo;
1343 struct btrace_function *it, *trash;
1344
1345 DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1346
1347 /* Make sure btrace frames that may hold a pointer into the branch
1348 trace data are destroyed. */
1349 reinit_frame_cache ();
1350
1351 btinfo = &tp->btrace;
1352
1353 it = btinfo->begin;
1354 while (it != NULL)
1355 {
1356 trash = it;
1357 it = it->flow.next;
1358
1359 xfree (trash);
1360 }
1361
1362 btinfo->begin = NULL;
1363 btinfo->end = NULL;
1364 btinfo->ngaps = 0;
1365
1366 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1367 btrace_maint_clear (btinfo);
1368 btrace_data_clear (&btinfo->data);
1369 btrace_clear_history (btinfo);
1370 }
1371
1372 /* See btrace.h. */
1373
1374 void
1375 btrace_free_objfile (struct objfile *objfile)
1376 {
1377 struct thread_info *tp;
1378
1379 DEBUG ("free objfile");
1380
1381 ALL_NON_EXITED_THREADS (tp)
1382 btrace_clear (tp);
1383 }
1384
1385 #if defined (HAVE_LIBEXPAT)
1386
1387 /* Check the btrace document version. */
1388
1389 static void
1390 check_xml_btrace_version (struct gdb_xml_parser *parser,
1391 const struct gdb_xml_element *element,
1392 void *user_data, VEC (gdb_xml_value_s) *attributes)
1393 {
1394 const char *version
1395 = (const char *) xml_find_attribute (attributes, "version")->value;
1396
1397 if (strcmp (version, "1.0") != 0)
1398 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1399 }
1400
1401 /* Parse a btrace "block" xml record. */
1402
1403 static void
1404 parse_xml_btrace_block (struct gdb_xml_parser *parser,
1405 const struct gdb_xml_element *element,
1406 void *user_data, VEC (gdb_xml_value_s) *attributes)
1407 {
1408 struct btrace_data *btrace;
1409 struct btrace_block *block;
1410 ULONGEST *begin, *end;
1411
1412 btrace = (struct btrace_data *) user_data;
1413
1414 switch (btrace->format)
1415 {
1416 case BTRACE_FORMAT_BTS:
1417 break;
1418
1419 case BTRACE_FORMAT_NONE:
1420 btrace->format = BTRACE_FORMAT_BTS;
1421 btrace->variant.bts.blocks = NULL;
1422 break;
1423
1424 default:
1425 gdb_xml_error (parser, _("Btrace format error."));
1426 }
1427
1428 begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value;
1429 end = (ULONGEST *) xml_find_attribute (attributes, "end")->value;
1430
1431 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
1432 block->begin = *begin;
1433 block->end = *end;
1434 }
1435
1436 /* Parse a "raw" xml record. */
1437
1438 static void
1439 parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
1440 gdb_byte **pdata, size_t *psize)
1441 {
1442 struct cleanup *cleanup;
1443 gdb_byte *data, *bin;
1444 size_t len, size;
1445
1446 len = strlen (body_text);
1447 if (len % 2 != 0)
1448 gdb_xml_error (parser, _("Bad raw data size."));
1449
1450 size = len / 2;
1451
1452 bin = data = (gdb_byte *) xmalloc (size);
1453 cleanup = make_cleanup (xfree, data);
1454
1455 /* We use hex encoding - see common/rsp-low.h. */
1456 while (len > 0)
1457 {
1458 char hi, lo;
1459
1460 hi = *body_text++;
1461 lo = *body_text++;
1462
1463 if (hi == 0 || lo == 0)
1464 gdb_xml_error (parser, _("Bad hex encoding."));
1465
1466 *bin++ = fromhex (hi) * 16 + fromhex (lo);
1467 len -= 2;
1468 }
1469
1470 discard_cleanups (cleanup);
1471
1472 *pdata = data;
1473 *psize = size;
1474 }
1475
1476 /* Parse a btrace pt-config "cpu" xml record. */
1477
1478 static void
1479 parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
1480 const struct gdb_xml_element *element,
1481 void *user_data,
1482 VEC (gdb_xml_value_s) *attributes)
1483 {
1484 struct btrace_data *btrace;
1485 const char *vendor;
1486 ULONGEST *family, *model, *stepping;
1487
1488 vendor = (const char *) xml_find_attribute (attributes, "vendor")->value;
1489 family = (ULONGEST *) xml_find_attribute (attributes, "family")->value;
1490 model = (ULONGEST *) xml_find_attribute (attributes, "model")->value;
1491 stepping = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value;
1492
1493 btrace = (struct btrace_data *) user_data;
1494
1495 if (strcmp (vendor, "GenuineIntel") == 0)
1496 btrace->variant.pt.config.cpu.vendor = CV_INTEL;
1497
1498 btrace->variant.pt.config.cpu.family = *family;
1499 btrace->variant.pt.config.cpu.model = *model;
1500 btrace->variant.pt.config.cpu.stepping = *stepping;
1501 }
1502
1503 /* Parse a btrace pt "raw" xml record. */
1504
1505 static void
1506 parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
1507 const struct gdb_xml_element *element,
1508 void *user_data, const char *body_text)
1509 {
1510 struct btrace_data *btrace;
1511
1512 btrace = (struct btrace_data *) user_data;
1513 parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
1514 &btrace->variant.pt.size);
1515 }
1516
1517 /* Parse a btrace "pt" xml record. */
1518
1519 static void
1520 parse_xml_btrace_pt (struct gdb_xml_parser *parser,
1521 const struct gdb_xml_element *element,
1522 void *user_data, VEC (gdb_xml_value_s) *attributes)
1523 {
1524 struct btrace_data *btrace;
1525
1526 btrace = (struct btrace_data *) user_data;
1527 btrace->format = BTRACE_FORMAT_PT;
1528 btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
1529 btrace->variant.pt.data = NULL;
1530 btrace->variant.pt.size = 0;
1531 }
1532
1533 static const struct gdb_xml_attribute block_attributes[] = {
1534 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1535 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1536 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1537 };
1538
1539 static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
1540 { "vendor", GDB_XML_AF_NONE, NULL, NULL },
1541 { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1542 { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1543 { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1544 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1545 };
1546
1547 static const struct gdb_xml_element btrace_pt_config_children[] = {
1548 { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
1549 parse_xml_btrace_pt_config_cpu, NULL },
1550 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1551 };
1552
1553 static const struct gdb_xml_element btrace_pt_children[] = {
1554 { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
1555 NULL },
1556 { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
1557 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1558 };
1559
1560 static const struct gdb_xml_attribute btrace_attributes[] = {
1561 { "version", GDB_XML_AF_NONE, NULL, NULL },
1562 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1563 };
1564
1565 static const struct gdb_xml_element btrace_children[] = {
1566 { "block", block_attributes, NULL,
1567 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
1568 { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
1569 NULL },
1570 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1571 };
1572
1573 static const struct gdb_xml_element btrace_elements[] = {
1574 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
1575 check_xml_btrace_version, NULL },
1576 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1577 };
1578
1579 #endif /* defined (HAVE_LIBEXPAT) */
1580
1581 /* See btrace.h. */
1582
1583 void
1584 parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
1585 {
1586 struct cleanup *cleanup;
1587 int errcode;
1588
1589 #if defined (HAVE_LIBEXPAT)
1590
1591 btrace->format = BTRACE_FORMAT_NONE;
1592
1593 cleanup = make_cleanup_btrace_data (btrace);
1594 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
1595 buffer, btrace);
1596 if (errcode != 0)
1597 error (_("Error parsing branch trace."));
1598
1599 /* Keep parse results. */
1600 discard_cleanups (cleanup);
1601
1602 #else /* !defined (HAVE_LIBEXPAT) */
1603
1604 error (_("Cannot process branch trace. XML parsing is not supported."));
1605
1606 #endif /* !defined (HAVE_LIBEXPAT) */
1607 }
1608
1609 #if defined (HAVE_LIBEXPAT)
1610
1611 /* Parse a btrace-conf "bts" xml record. */
1612
1613 static void
1614 parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
1615 const struct gdb_xml_element *element,
1616 void *user_data, VEC (gdb_xml_value_s) *attributes)
1617 {
1618 struct btrace_config *conf;
1619 struct gdb_xml_value *size;
1620
1621 conf = (struct btrace_config *) user_data;
1622 conf->format = BTRACE_FORMAT_BTS;
1623 conf->bts.size = 0;
1624
1625 size = xml_find_attribute (attributes, "size");
1626 if (size != NULL)
1627 conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
1628 }
1629
1630 /* Parse a btrace-conf "pt" xml record. */
1631
1632 static void
1633 parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
1634 const struct gdb_xml_element *element,
1635 void *user_data, VEC (gdb_xml_value_s) *attributes)
1636 {
1637 struct btrace_config *conf;
1638 struct gdb_xml_value *size;
1639
1640 conf = (struct btrace_config *) user_data;
1641 conf->format = BTRACE_FORMAT_PT;
1642 conf->pt.size = 0;
1643
1644 size = xml_find_attribute (attributes, "size");
1645 if (size != NULL)
1646 conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
1647 }
1648
1649 static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
1650 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1651 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1652 };
1653
1654 static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
1655 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1656 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1657 };
1658
1659 static const struct gdb_xml_element btrace_conf_children[] = {
1660 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
1661 parse_xml_btrace_conf_bts, NULL },
1662 { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
1663 parse_xml_btrace_conf_pt, NULL },
1664 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1665 };
1666
1667 static const struct gdb_xml_attribute btrace_conf_attributes[] = {
1668 { "version", GDB_XML_AF_NONE, NULL, NULL },
1669 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1670 };
1671
1672 static const struct gdb_xml_element btrace_conf_elements[] = {
1673 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
1674 GDB_XML_EF_NONE, NULL, NULL },
1675 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1676 };
1677
1678 #endif /* defined (HAVE_LIBEXPAT) */
1679
1680 /* See btrace.h. */
1681
1682 void
1683 parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
1684 {
1685 int errcode;
1686
1687 #if defined (HAVE_LIBEXPAT)
1688
1689 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
1690 btrace_conf_elements, xml, conf);
1691 if (errcode != 0)
1692 error (_("Error parsing branch trace configuration."));
1693
1694 #else /* !defined (HAVE_LIBEXPAT) */
1695
1696 error (_("XML parsing is not supported."));
1697
1698 #endif /* !defined (HAVE_LIBEXPAT) */
1699 }
1700
1701 /* See btrace.h. */
1702
1703 const struct btrace_insn *
1704 btrace_insn_get (const struct btrace_insn_iterator *it)
1705 {
1706 const struct btrace_function *bfun;
1707 unsigned int index, end;
1708
1709 index = it->index;
1710 bfun = it->function;
1711
1712 /* Check if the iterator points to a gap in the trace. */
1713 if (bfun->errcode != 0)
1714 return NULL;
1715
1716 /* The index is within the bounds of this function's instruction vector. */
1717 end = VEC_length (btrace_insn_s, bfun->insn);
1718 gdb_assert (0 < end);
1719 gdb_assert (index < end);
1720
1721 return VEC_index (btrace_insn_s, bfun->insn, index);
1722 }
1723
1724 /* See btrace.h. */
1725
1726 unsigned int
1727 btrace_insn_number (const struct btrace_insn_iterator *it)
1728 {
1729 const struct btrace_function *bfun;
1730
1731 bfun = it->function;
1732
1733 /* Return zero if the iterator points to a gap in the trace. */
1734 if (bfun->errcode != 0)
1735 return 0;
1736
1737 return bfun->insn_offset + it->index;
1738 }
1739
1740 /* See btrace.h. */
1741
1742 void
1743 btrace_insn_begin (struct btrace_insn_iterator *it,
1744 const struct btrace_thread_info *btinfo)
1745 {
1746 const struct btrace_function *bfun;
1747
1748 bfun = btinfo->begin;
1749 if (bfun == NULL)
1750 error (_("No trace."));
1751
1752 it->function = bfun;
1753 it->index = 0;
1754 }
1755
1756 /* See btrace.h. */
1757
1758 void
1759 btrace_insn_end (struct btrace_insn_iterator *it,
1760 const struct btrace_thread_info *btinfo)
1761 {
1762 const struct btrace_function *bfun;
1763 unsigned int length;
1764
1765 bfun = btinfo->end;
1766 if (bfun == NULL)
1767 error (_("No trace."));
1768
1769 length = VEC_length (btrace_insn_s, bfun->insn);
1770
1771 /* The last function may either be a gap or it contains the current
1772 instruction, which is one past the end of the execution trace; ignore
1773 it. */
1774 if (length > 0)
1775 length -= 1;
1776
1777 it->function = bfun;
1778 it->index = length;
1779 }
1780
1781 /* See btrace.h. */
1782
1783 unsigned int
1784 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
1785 {
1786 const struct btrace_function *bfun;
1787 unsigned int index, steps;
1788
1789 bfun = it->function;
1790 steps = 0;
1791 index = it->index;
1792
1793 while (stride != 0)
1794 {
1795 unsigned int end, space, adv;
1796
1797 end = VEC_length (btrace_insn_s, bfun->insn);
1798
1799 /* An empty function segment represents a gap in the trace. We count
1800 it as one instruction. */
1801 if (end == 0)
1802 {
1803 const struct btrace_function *next;
1804
1805 next = bfun->flow.next;
1806 if (next == NULL)
1807 break;
1808
1809 stride -= 1;
1810 steps += 1;
1811
1812 bfun = next;
1813 index = 0;
1814
1815 continue;
1816 }
1817
1818 gdb_assert (0 < end);
1819 gdb_assert (index < end);
1820
1821 /* Compute the number of instructions remaining in this segment. */
1822 space = end - index;
1823
1824 /* Advance the iterator as far as possible within this segment. */
1825 adv = min (space, stride);
1826 stride -= adv;
1827 index += adv;
1828 steps += adv;
1829
1830 /* Move to the next function if we're at the end of this one. */
1831 if (index == end)
1832 {
1833 const struct btrace_function *next;
1834
1835 next = bfun->flow.next;
1836 if (next == NULL)
1837 {
1838 /* We stepped past the last function.
1839
1840 Let's adjust the index to point to the last instruction in
1841 the previous function. */
1842 index -= 1;
1843 steps -= 1;
1844 break;
1845 }
1846
1847 /* We now point to the first instruction in the new function. */
1848 bfun = next;
1849 index = 0;
1850 }
1851
1852 /* We did make progress. */
1853 gdb_assert (adv > 0);
1854 }
1855
1856 /* Update the iterator. */
1857 it->function = bfun;
1858 it->index = index;
1859
1860 return steps;
1861 }
1862
1863 /* See btrace.h. */
1864
1865 unsigned int
1866 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1867 {
1868 const struct btrace_function *bfun;
1869 unsigned int index, steps;
1870
1871 bfun = it->function;
1872 steps = 0;
1873 index = it->index;
1874
1875 while (stride != 0)
1876 {
1877 unsigned int adv;
1878
1879 /* Move to the previous function if we're at the start of this one. */
1880 if (index == 0)
1881 {
1882 const struct btrace_function *prev;
1883
1884 prev = bfun->flow.prev;
1885 if (prev == NULL)
1886 break;
1887
1888 /* We point to one after the last instruction in the new function. */
1889 bfun = prev;
1890 index = VEC_length (btrace_insn_s, bfun->insn);
1891
1892 /* An empty function segment represents a gap in the trace. We count
1893 it as one instruction. */
1894 if (index == 0)
1895 {
1896 stride -= 1;
1897 steps += 1;
1898
1899 continue;
1900 }
1901 }
1902
1903 /* Advance the iterator as far as possible within this segment. */
1904 adv = min (index, stride);
1905
1906 stride -= adv;
1907 index -= adv;
1908 steps += adv;
1909
1910 /* We did make progress. */
1911 gdb_assert (adv > 0);
1912 }
1913
1914 /* Update the iterator. */
1915 it->function = bfun;
1916 it->index = index;
1917
1918 return steps;
1919 }
1920
1921 /* See btrace.h. */
1922
1923 int
1924 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
1925 const struct btrace_insn_iterator *rhs)
1926 {
1927 unsigned int lnum, rnum;
1928
1929 lnum = btrace_insn_number (lhs);
1930 rnum = btrace_insn_number (rhs);
1931
1932 /* A gap has an instruction number of zero. Things are getting more
1933 complicated if gaps are involved.
1934
1935 We take the instruction number offset from the iterator's function.
1936 This is the number of the first instruction after the gap.
1937
1938 This is OK as long as both lhs and rhs point to gaps. If only one of
1939 them does, we need to adjust the number based on the other's regular
1940 instruction number. Otherwise, a gap might compare equal to an
1941 instruction. */
1942
1943 if (lnum == 0 && rnum == 0)
1944 {
1945 lnum = lhs->function->insn_offset;
1946 rnum = rhs->function->insn_offset;
1947 }
1948 else if (lnum == 0)
1949 {
1950 lnum = lhs->function->insn_offset;
1951
1952 if (lnum == rnum)
1953 lnum -= 1;
1954 }
1955 else if (rnum == 0)
1956 {
1957 rnum = rhs->function->insn_offset;
1958
1959 if (rnum == lnum)
1960 rnum -= 1;
1961 }
1962
1963 return (int) (lnum - rnum);
1964 }
1965
1966 /* See btrace.h. */
1967
1968 int
1969 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
1970 const struct btrace_thread_info *btinfo,
1971 unsigned int number)
1972 {
1973 const struct btrace_function *bfun;
1974 unsigned int end, length;
1975
1976 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1977 {
1978 /* Skip gaps. */
1979 if (bfun->errcode != 0)
1980 continue;
1981
1982 if (bfun->insn_offset <= number)
1983 break;
1984 }
1985
1986 if (bfun == NULL)
1987 return 0;
1988
1989 length = VEC_length (btrace_insn_s, bfun->insn);
1990 gdb_assert (length > 0);
1991
1992 end = bfun->insn_offset + length;
1993 if (end <= number)
1994 return 0;
1995
1996 it->function = bfun;
1997 it->index = number - bfun->insn_offset;
1998
1999 return 1;
2000 }
2001
2002 /* See btrace.h. */
2003
2004 const struct btrace_function *
2005 btrace_call_get (const struct btrace_call_iterator *it)
2006 {
2007 return it->function;
2008 }
2009
2010 /* See btrace.h. */
2011
2012 unsigned int
2013 btrace_call_number (const struct btrace_call_iterator *it)
2014 {
2015 const struct btrace_thread_info *btinfo;
2016 const struct btrace_function *bfun;
2017 unsigned int insns;
2018
2019 btinfo = it->btinfo;
2020 bfun = it->function;
2021 if (bfun != NULL)
2022 return bfun->number;
2023
2024 /* For the end iterator, i.e. bfun == NULL, we return one more than the
2025 number of the last function. */
2026 bfun = btinfo->end;
2027 insns = VEC_length (btrace_insn_s, bfun->insn);
2028
2029 /* If the function contains only a single instruction (i.e. the current
2030 instruction), it will be skipped and its number is already the number
2031 we seek. */
2032 if (insns == 1)
2033 return bfun->number;
2034
2035 /* Otherwise, return one more than the number of the last function. */
2036 return bfun->number + 1;
2037 }
2038
2039 /* See btrace.h. */
2040
2041 void
2042 btrace_call_begin (struct btrace_call_iterator *it,
2043 const struct btrace_thread_info *btinfo)
2044 {
2045 const struct btrace_function *bfun;
2046
2047 bfun = btinfo->begin;
2048 if (bfun == NULL)
2049 error (_("No trace."));
2050
2051 it->btinfo = btinfo;
2052 it->function = bfun;
2053 }
2054
2055 /* See btrace.h. */
2056
2057 void
2058 btrace_call_end (struct btrace_call_iterator *it,
2059 const struct btrace_thread_info *btinfo)
2060 {
2061 const struct btrace_function *bfun;
2062
2063 bfun = btinfo->end;
2064 if (bfun == NULL)
2065 error (_("No trace."));
2066
2067 it->btinfo = btinfo;
2068 it->function = NULL;
2069 }
2070
2071 /* See btrace.h. */
2072
2073 unsigned int
2074 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2075 {
2076 const struct btrace_function *bfun;
2077 unsigned int steps;
2078
2079 bfun = it->function;
2080 steps = 0;
2081 while (bfun != NULL)
2082 {
2083 const struct btrace_function *next;
2084 unsigned int insns;
2085
2086 next = bfun->flow.next;
2087 if (next == NULL)
2088 {
2089 /* Ignore the last function if it only contains a single
2090 (i.e. the current) instruction. */
2091 insns = VEC_length (btrace_insn_s, bfun->insn);
2092 if (insns == 1)
2093 steps -= 1;
2094 }
2095
2096 if (stride == steps)
2097 break;
2098
2099 bfun = next;
2100 steps += 1;
2101 }
2102
2103 it->function = bfun;
2104 return steps;
2105 }
2106
2107 /* See btrace.h. */
2108
2109 unsigned int
2110 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2111 {
2112 const struct btrace_thread_info *btinfo;
2113 const struct btrace_function *bfun;
2114 unsigned int steps;
2115
2116 bfun = it->function;
2117 steps = 0;
2118
2119 if (bfun == NULL)
2120 {
2121 unsigned int insns;
2122
2123 btinfo = it->btinfo;
2124 bfun = btinfo->end;
2125 if (bfun == NULL)
2126 return 0;
2127
2128 /* Ignore the last function if it only contains a single
2129 (i.e. the current) instruction. */
2130 insns = VEC_length (btrace_insn_s, bfun->insn);
2131 if (insns == 1)
2132 bfun = bfun->flow.prev;
2133
2134 if (bfun == NULL)
2135 return 0;
2136
2137 steps += 1;
2138 }
2139
2140 while (steps < stride)
2141 {
2142 const struct btrace_function *prev;
2143
2144 prev = bfun->flow.prev;
2145 if (prev == NULL)
2146 break;
2147
2148 bfun = prev;
2149 steps += 1;
2150 }
2151
2152 it->function = bfun;
2153 return steps;
2154 }
2155
2156 /* See btrace.h. */
2157
2158 int
2159 btrace_call_cmp (const struct btrace_call_iterator *lhs,
2160 const struct btrace_call_iterator *rhs)
2161 {
2162 unsigned int lnum, rnum;
2163
2164 lnum = btrace_call_number (lhs);
2165 rnum = btrace_call_number (rhs);
2166
2167 return (int) (lnum - rnum);
2168 }
2169
2170 /* See btrace.h. */
2171
2172 int
2173 btrace_find_call_by_number (struct btrace_call_iterator *it,
2174 const struct btrace_thread_info *btinfo,
2175 unsigned int number)
2176 {
2177 const struct btrace_function *bfun;
2178
2179 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
2180 {
2181 unsigned int bnum;
2182
2183 bnum = bfun->number;
2184 if (number == bnum)
2185 {
2186 it->btinfo = btinfo;
2187 it->function = bfun;
2188 return 1;
2189 }
2190
2191 /* Functions are ordered and numbered consecutively. We could bail out
2192 earlier. On the other hand, it is very unlikely that we search for
2193 a nonexistent function. */
2194 }
2195
2196 return 0;
2197 }
2198
2199 /* See btrace.h. */
2200
2201 void
2202 btrace_set_insn_history (struct btrace_thread_info *btinfo,
2203 const struct btrace_insn_iterator *begin,
2204 const struct btrace_insn_iterator *end)
2205 {
2206 if (btinfo->insn_history == NULL)
2207 btinfo->insn_history = XCNEW (struct btrace_insn_history);
2208
2209 btinfo->insn_history->begin = *begin;
2210 btinfo->insn_history->end = *end;
2211 }
2212
2213 /* See btrace.h. */
2214
2215 void
2216 btrace_set_call_history (struct btrace_thread_info *btinfo,
2217 const struct btrace_call_iterator *begin,
2218 const struct btrace_call_iterator *end)
2219 {
2220 gdb_assert (begin->btinfo == end->btinfo);
2221
2222 if (btinfo->call_history == NULL)
2223 btinfo->call_history = XCNEW (struct btrace_call_history);
2224
2225 btinfo->call_history->begin = *begin;
2226 btinfo->call_history->end = *end;
2227 }
2228
2229 /* See btrace.h. */
2230
2231 int
2232 btrace_is_replaying (struct thread_info *tp)
2233 {
2234 return tp->btrace.replay != NULL;
2235 }
2236
2237 /* See btrace.h. */
2238
2239 int
2240 btrace_is_empty (struct thread_info *tp)
2241 {
2242 struct btrace_insn_iterator begin, end;
2243 struct btrace_thread_info *btinfo;
2244
2245 btinfo = &tp->btrace;
2246
2247 if (btinfo->begin == NULL)
2248 return 1;
2249
2250 btrace_insn_begin (&begin, btinfo);
2251 btrace_insn_end (&end, btinfo);
2252
2253 return btrace_insn_cmp (&begin, &end) == 0;
2254 }
2255
2256 /* Forward the cleanup request. */
2257
2258 static void
2259 do_btrace_data_cleanup (void *arg)
2260 {
2261 btrace_data_fini ((struct btrace_data *) arg);
2262 }
2263
2264 /* See btrace.h. */
2265
2266 struct cleanup *
2267 make_cleanup_btrace_data (struct btrace_data *data)
2268 {
2269 return make_cleanup (do_btrace_data_cleanup, data);
2270 }
2271
2272 #if defined (HAVE_LIBIPT)
2273
2274 /* Print a single packet. */
2275
2276 static void
2277 pt_print_packet (const struct pt_packet *packet)
2278 {
2279 switch (packet->type)
2280 {
2281 default:
2282 printf_unfiltered (("[??: %x]"), packet->type);
2283 break;
2284
2285 case ppt_psb:
2286 printf_unfiltered (("psb"));
2287 break;
2288
2289 case ppt_psbend:
2290 printf_unfiltered (("psbend"));
2291 break;
2292
2293 case ppt_pad:
2294 printf_unfiltered (("pad"));
2295 break;
2296
2297 case ppt_tip:
2298 printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
2299 packet->payload.ip.ipc,
2300 packet->payload.ip.ip);
2301 break;
2302
2303 case ppt_tip_pge:
2304 printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
2305 packet->payload.ip.ipc,
2306 packet->payload.ip.ip);
2307 break;
2308
2309 case ppt_tip_pgd:
2310 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
2311 packet->payload.ip.ipc,
2312 packet->payload.ip.ip);
2313 break;
2314
2315 case ppt_fup:
2316 printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
2317 packet->payload.ip.ipc,
2318 packet->payload.ip.ip);
2319 break;
2320
2321 case ppt_tnt_8:
2322 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
2323 packet->payload.tnt.bit_size,
2324 packet->payload.tnt.payload);
2325 break;
2326
2327 case ppt_tnt_64:
2328 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
2329 packet->payload.tnt.bit_size,
2330 packet->payload.tnt.payload);
2331 break;
2332
2333 case ppt_pip:
2334 printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2335 packet->payload.pip.nr ? (" nr") : (""));
2336 break;
2337
2338 case ppt_tsc:
2339 printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2340 break;
2341
2342 case ppt_cbr:
2343 printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
2344 break;
2345
2346 case ppt_mode:
2347 switch (packet->payload.mode.leaf)
2348 {
2349 default:
2350 printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
2351 break;
2352
2353 case pt_mol_exec:
2354 printf_unfiltered (("mode.exec%s%s"),
2355 packet->payload.mode.bits.exec.csl
2356 ? (" cs.l") : (""),
2357 packet->payload.mode.bits.exec.csd
2358 ? (" cs.d") : (""));
2359 break;
2360
2361 case pt_mol_tsx:
2362 printf_unfiltered (("mode.tsx%s%s"),
2363 packet->payload.mode.bits.tsx.intx
2364 ? (" intx") : (""),
2365 packet->payload.mode.bits.tsx.abrt
2366 ? (" abrt") : (""));
2367 break;
2368 }
2369 break;
2370
2371 case ppt_ovf:
2372 printf_unfiltered (("ovf"));
2373 break;
2374
2375 case ppt_stop:
2376 printf_unfiltered (("stop"));
2377 break;
2378
2379 case ppt_vmcs:
2380 printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2381 break;
2382
2383 case ppt_tma:
2384 printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
2385 packet->payload.tma.fc);
2386 break;
2387
2388 case ppt_mtc:
2389 printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
2390 break;
2391
2392 case ppt_cyc:
2393 printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
2394 break;
2395
2396 case ppt_mnt:
2397 printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
2398 break;
2399 }
2400 }
2401
2402 /* Decode packets into MAINT using DECODER. */
2403
2404 static void
2405 btrace_maint_decode_pt (struct btrace_maint_info *maint,
2406 struct pt_packet_decoder *decoder)
2407 {
2408 int errcode;
2409
2410 for (;;)
2411 {
2412 struct btrace_pt_packet packet;
2413
2414 errcode = pt_pkt_sync_forward (decoder);
2415 if (errcode < 0)
2416 break;
2417
2418 for (;;)
2419 {
2420 pt_pkt_get_offset (decoder, &packet.offset);
2421
2422 errcode = pt_pkt_next (decoder, &packet.packet,
2423 sizeof(packet.packet));
2424 if (errcode < 0)
2425 break;
2426
2427 if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
2428 {
2429 packet.errcode = pt_errcode (errcode);
2430 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2431 &packet);
2432 }
2433 }
2434
2435 if (errcode == -pte_eos)
2436 break;
2437
2438 packet.errcode = pt_errcode (errcode);
2439 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2440 &packet);
2441
2442 warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
2443 packet.offset, pt_errstr (packet.errcode));
2444 }
2445
2446 if (errcode != -pte_eos)
2447 warning (_("Failed to synchronize onto the Intel Processor Trace "
2448 "stream: %s."), pt_errstr (pt_errcode (errcode)));
2449 }
2450
2451 /* Update the packet history in BTINFO. */
2452
2453 static void
2454 btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
2455 {
2456 volatile struct gdb_exception except;
2457 struct pt_packet_decoder *decoder;
2458 struct btrace_data_pt *pt;
2459 struct pt_config config;
2460 int errcode;
2461
2462 pt = &btinfo->data.variant.pt;
2463
2464 /* Nothing to do if there is no trace. */
2465 if (pt->size == 0)
2466 return;
2467
2468 memset (&config, 0, sizeof(config));
2469
2470 config.size = sizeof (config);
2471 config.begin = pt->data;
2472 config.end = pt->data + pt->size;
2473
2474 config.cpu.vendor = pt_translate_cpu_vendor (pt->config.cpu.vendor);
2475 config.cpu.family = pt->config.cpu.family;
2476 config.cpu.model = pt->config.cpu.model;
2477 config.cpu.stepping = pt->config.cpu.stepping;
2478
2479 errcode = pt_cpu_errata (&config.errata, &config.cpu);
2480 if (errcode < 0)
2481 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
2482 pt_errstr (pt_errcode (errcode)));
2483
2484 decoder = pt_pkt_alloc_decoder (&config);
2485 if (decoder == NULL)
2486 error (_("Failed to allocate the Intel Processor Trace decoder."));
2487
2488 TRY
2489 {
2490 btrace_maint_decode_pt (&btinfo->maint, decoder);
2491 }
2492 CATCH (except, RETURN_MASK_ALL)
2493 {
2494 pt_pkt_free_decoder (decoder);
2495
2496 if (except.reason < 0)
2497 throw_exception (except);
2498 }
2499 END_CATCH
2500
2501 pt_pkt_free_decoder (decoder);
2502 }
2503
2504 #endif /* !defined (HAVE_LIBIPT) */
2505
2506 /* Update the packet maintenance information for BTINFO and store the
2507 low and high bounds into BEGIN and END, respectively.
2508 Store the current iterator state into FROM and TO. */
2509
2510 static void
2511 btrace_maint_update_packets (struct btrace_thread_info *btinfo,
2512 unsigned int *begin, unsigned int *end,
2513 unsigned int *from, unsigned int *to)
2514 {
2515 switch (btinfo->data.format)
2516 {
2517 default:
2518 *begin = 0;
2519 *end = 0;
2520 *from = 0;
2521 *to = 0;
2522 break;
2523
2524 case BTRACE_FORMAT_BTS:
2525 /* Nothing to do - we operate directly on BTINFO->DATA. */
2526 *begin = 0;
2527 *end = VEC_length (btrace_block_s, btinfo->data.variant.bts.blocks);
2528 *from = btinfo->maint.variant.bts.packet_history.begin;
2529 *to = btinfo->maint.variant.bts.packet_history.end;
2530 break;
2531
2532 #if defined (HAVE_LIBIPT)
2533 case BTRACE_FORMAT_PT:
2534 if (VEC_empty (btrace_pt_packet_s, btinfo->maint.variant.pt.packets))
2535 btrace_maint_update_pt_packets (btinfo);
2536
2537 *begin = 0;
2538 *end = VEC_length (btrace_pt_packet_s, btinfo->maint.variant.pt.packets);
2539 *from = btinfo->maint.variant.pt.packet_history.begin;
2540 *to = btinfo->maint.variant.pt.packet_history.end;
2541 break;
2542 #endif /* defined (HAVE_LIBIPT) */
2543 }
2544 }
2545
2546 /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
2547 update the current iterator position. */
2548
2549 static void
2550 btrace_maint_print_packets (struct btrace_thread_info *btinfo,
2551 unsigned int begin, unsigned int end)
2552 {
2553 switch (btinfo->data.format)
2554 {
2555 default:
2556 break;
2557
2558 case BTRACE_FORMAT_BTS:
2559 {
2560 VEC (btrace_block_s) *blocks;
2561 unsigned int blk;
2562
2563 blocks = btinfo->data.variant.bts.blocks;
2564 for (blk = begin; blk < end; ++blk)
2565 {
2566 const btrace_block_s *block;
2567
2568 block = VEC_index (btrace_block_s, blocks, blk);
2569
2570 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
2571 core_addr_to_string_nz (block->begin),
2572 core_addr_to_string_nz (block->end));
2573 }
2574
2575 btinfo->maint.variant.bts.packet_history.begin = begin;
2576 btinfo->maint.variant.bts.packet_history.end = end;
2577 }
2578 break;
2579
2580 #if defined (HAVE_LIBIPT)
2581 case BTRACE_FORMAT_PT:
2582 {
2583 VEC (btrace_pt_packet_s) *packets;
2584 unsigned int pkt;
2585
2586 packets = btinfo->maint.variant.pt.packets;
2587 for (pkt = begin; pkt < end; ++pkt)
2588 {
2589 const struct btrace_pt_packet *packet;
2590
2591 packet = VEC_index (btrace_pt_packet_s, packets, pkt);
2592
2593 printf_unfiltered ("%u\t", pkt);
2594 printf_unfiltered ("0x%" PRIx64 "\t", packet->offset);
2595
2596 if (packet->errcode == pte_ok)
2597 pt_print_packet (&packet->packet);
2598 else
2599 printf_unfiltered ("[error: %s]", pt_errstr (packet->errcode));
2600
2601 printf_unfiltered ("\n");
2602 }
2603
2604 btinfo->maint.variant.pt.packet_history.begin = begin;
2605 btinfo->maint.variant.pt.packet_history.end = end;
2606 }
2607 break;
2608 #endif /* defined (HAVE_LIBIPT) */
2609 }
2610 }
2611
2612 /* Read a number from an argument string. */
2613
2614 static unsigned int
2615 get_uint (char **arg)
2616 {
2617 char *begin, *end, *pos;
2618 unsigned long number;
2619
2620 begin = *arg;
2621 pos = skip_spaces (begin);
2622
2623 if (!isdigit (*pos))
2624 error (_("Expected positive number, got: %s."), pos);
2625
2626 number = strtoul (pos, &end, 10);
2627 if (number > UINT_MAX)
2628 error (_("Number too big."));
2629
2630 *arg += (end - begin);
2631
2632 return (unsigned int) number;
2633 }
2634
2635 /* Read a context size from an argument string. */
2636
2637 static int
2638 get_context_size (char **arg)
2639 {
2640 char *pos;
2641 int number;
2642
2643 pos = skip_spaces (*arg);
2644
2645 if (!isdigit (*pos))
2646 error (_("Expected positive number, got: %s."), pos);
2647
2648 return strtol (pos, arg, 10);
2649 }
2650
2651 /* Complain about junk at the end of an argument string. */
2652
2653 static void
2654 no_chunk (char *arg)
2655 {
2656 if (*arg != 0)
2657 error (_("Junk after argument: %s."), arg);
2658 }
2659
2660 /* The "maintenance btrace packet-history" command. */
2661
2662 static void
2663 maint_btrace_packet_history_cmd (char *arg, int from_tty)
2664 {
2665 struct btrace_thread_info *btinfo;
2666 struct thread_info *tp;
2667 unsigned int size, begin, end, from, to;
2668
2669 tp = find_thread_ptid (inferior_ptid);
2670 if (tp == NULL)
2671 error (_("No thread."));
2672
2673 size = 10;
2674 btinfo = &tp->btrace;
2675
2676 btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
2677 if (begin == end)
2678 {
2679 printf_unfiltered (_("No trace.\n"));
2680 return;
2681 }
2682
2683 if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
2684 {
2685 from = to;
2686
2687 if (end - from < size)
2688 size = end - from;
2689 to = from + size;
2690 }
2691 else if (strcmp (arg, "-") == 0)
2692 {
2693 to = from;
2694
2695 if (to - begin < size)
2696 size = to - begin;
2697 from = to - size;
2698 }
2699 else
2700 {
2701 from = get_uint (&arg);
2702 if (end <= from)
2703 error (_("'%u' is out of range."), from);
2704
2705 arg = skip_spaces (arg);
2706 if (*arg == ',')
2707 {
2708 arg = skip_spaces (++arg);
2709
2710 if (*arg == '+')
2711 {
2712 arg += 1;
2713 size = get_context_size (&arg);
2714
2715 no_chunk (arg);
2716
2717 if (end - from < size)
2718 size = end - from;
2719 to = from + size;
2720 }
2721 else if (*arg == '-')
2722 {
2723 arg += 1;
2724 size = get_context_size (&arg);
2725
2726 no_chunk (arg);
2727
2728 /* Include the packet given as first argument. */
2729 from += 1;
2730 to = from;
2731
2732 if (to - begin < size)
2733 size = to - begin;
2734 from = to - size;
2735 }
2736 else
2737 {
2738 to = get_uint (&arg);
2739
2740 /* Include the packet at the second argument and silently
2741 truncate the range. */
2742 if (to < end)
2743 to += 1;
2744 else
2745 to = end;
2746
2747 no_chunk (arg);
2748 }
2749 }
2750 else
2751 {
2752 no_chunk (arg);
2753
2754 if (end - from < size)
2755 size = end - from;
2756 to = from + size;
2757 }
2758
2759 dont_repeat ();
2760 }
2761
2762 btrace_maint_print_packets (btinfo, from, to);
2763 }
2764
2765 /* The "maintenance btrace clear-packet-history" command. */
2766
2767 static void
2768 maint_btrace_clear_packet_history_cmd (char *args, int from_tty)
2769 {
2770 struct btrace_thread_info *btinfo;
2771 struct thread_info *tp;
2772
2773 if (args != NULL && *args != 0)
2774 error (_("Invalid argument."));
2775
2776 tp = find_thread_ptid (inferior_ptid);
2777 if (tp == NULL)
2778 error (_("No thread."));
2779
2780 btinfo = &tp->btrace;
2781
2782 /* Must clear the maint data before - it depends on BTINFO->DATA. */
2783 btrace_maint_clear (btinfo);
2784 btrace_data_clear (&btinfo->data);
2785 }
2786
2787 /* The "maintenance btrace clear" command. */
2788
2789 static void
2790 maint_btrace_clear_cmd (char *args, int from_tty)
2791 {
2792 struct btrace_thread_info *btinfo;
2793 struct thread_info *tp;
2794
2795 if (args != NULL && *args != 0)
2796 error (_("Invalid argument."));
2797
2798 tp = find_thread_ptid (inferior_ptid);
2799 if (tp == NULL)
2800 error (_("No thread."));
2801
2802 btrace_clear (tp);
2803 }
2804
2805 /* The "maintenance btrace" command. */
2806
2807 static void
2808 maint_btrace_cmd (char *args, int from_tty)
2809 {
2810 help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands,
2811 gdb_stdout);
2812 }
2813
2814 /* The "maintenance set btrace" command. */
2815
2816 static void
2817 maint_btrace_set_cmd (char *args, int from_tty)
2818 {
2819 help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands,
2820 gdb_stdout);
2821 }
2822
2823 /* The "maintenance show btrace" command. */
2824
2825 static void
2826 maint_btrace_show_cmd (char *args, int from_tty)
2827 {
2828 help_list (maint_btrace_show_cmdlist, "maintenance show btrace ",
2829 all_commands, gdb_stdout);
2830 }
2831
2832 /* The "maintenance set btrace pt" command. */
2833
2834 static void
2835 maint_btrace_pt_set_cmd (char *args, int from_tty)
2836 {
2837 help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
2838 all_commands, gdb_stdout);
2839 }
2840
2841 /* The "maintenance show btrace pt" command. */
2842
2843 static void
2844 maint_btrace_pt_show_cmd (char *args, int from_tty)
2845 {
2846 help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
2847 all_commands, gdb_stdout);
2848 }
2849
2850 /* The "maintenance info btrace" command. */
2851
2852 static void
2853 maint_info_btrace_cmd (char *args, int from_tty)
2854 {
2855 struct btrace_thread_info *btinfo;
2856 struct thread_info *tp;
2857 const struct btrace_config *conf;
2858
2859 if (args != NULL && *args != 0)
2860 error (_("Invalid argument."));
2861
2862 tp = find_thread_ptid (inferior_ptid);
2863 if (tp == NULL)
2864 error (_("No thread."));
2865
2866 btinfo = &tp->btrace;
2867
2868 conf = btrace_conf (btinfo);
2869 if (conf == NULL)
2870 error (_("No btrace configuration."));
2871
2872 printf_unfiltered (_("Format: %s.\n"),
2873 btrace_format_string (conf->format));
2874
2875 switch (conf->format)
2876 {
2877 default:
2878 break;
2879
2880 case BTRACE_FORMAT_BTS:
2881 printf_unfiltered (_("Number of packets: %u.\n"),
2882 VEC_length (btrace_block_s,
2883 btinfo->data.variant.bts.blocks));
2884 break;
2885
2886 #if defined (HAVE_LIBIPT)
2887 case BTRACE_FORMAT_PT:
2888 {
2889 struct pt_version version;
2890
2891 version = pt_library_version ();
2892 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
2893 version.minor, version.build,
2894 version.ext != NULL ? version.ext : "");
2895
2896 btrace_maint_update_pt_packets (btinfo);
2897 printf_unfiltered (_("Number of packets: %u.\n"),
2898 VEC_length (btrace_pt_packet_s,
2899 btinfo->maint.variant.pt.packets));
2900 }
2901 break;
2902 #endif /* defined (HAVE_LIBIPT) */
2903 }
2904 }
2905
2906 /* The "maint show btrace pt skip-pad" show value function. */
2907
2908 static void
2909 show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
2910 struct cmd_list_element *c,
2911 const char *value)
2912 {
2913 fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
2914 }
2915
2916
2917 /* Initialize btrace maintenance commands. */
2918
2919 void _initialize_btrace (void);
2920 void
2921 _initialize_btrace (void)
2922 {
2923 add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
2924 _("Info about branch tracing data."), &maintenanceinfolist);
2925
2926 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_cmd,
2927 _("Branch tracing maintenance commands."),
2928 &maint_btrace_cmdlist, "maintenance btrace ",
2929 0, &maintenancelist);
2930
2931 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_set_cmd, _("\
2932 Set branch tracing specific variables."),
2933 &maint_btrace_set_cmdlist, "maintenance set btrace ",
2934 0, &maintenance_set_cmdlist);
2935
2936 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\
2937 Set Intel Processor Trace specific variables."),
2938 &maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
2939 0, &maint_btrace_set_cmdlist);
2940
2941 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_show_cmd, _("\
2942 Show branch tracing specific variables."),
2943 &maint_btrace_show_cmdlist, "maintenance show btrace ",
2944 0, &maintenance_show_cmdlist);
2945
2946 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\
2947 Show Intel Processor Trace specific variables."),
2948 &maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
2949 0, &maint_btrace_show_cmdlist);
2950
2951 add_setshow_boolean_cmd ("skip-pad", class_maintenance,
2952 &maint_btrace_pt_skip_pad, _("\
2953 Set whether PAD packets should be skipped in the btrace packet history."), _("\
2954 Show whether PAD packets should be skipped in the btrace packet history."),_("\
2955 When enabled, PAD packets are ignored in the btrace packet history."),
2956 NULL, show_maint_btrace_pt_skip_pad,
2957 &maint_btrace_pt_set_cmdlist,
2958 &maint_btrace_pt_show_cmdlist);
2959
2960 add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
2961 _("Print the raw branch tracing data.\n\
2962 With no argument, print ten more packets after the previous ten-line print.\n\
2963 With '-' as argument print ten packets before a previous ten-line print.\n\
2964 One argument specifies the starting packet of a ten-line print.\n\
2965 Two arguments with comma between specify starting and ending packets to \
2966 print.\n\
2967 Preceded with '+'/'-' the second argument specifies the distance from the \
2968 first.\n"),
2969 &maint_btrace_cmdlist);
2970
2971 add_cmd ("clear-packet-history", class_maintenance,
2972 maint_btrace_clear_packet_history_cmd,
2973 _("Clears the branch tracing packet history.\n\
2974 Discards the raw branch tracing data but not the execution history data.\n\
2975 "),
2976 &maint_btrace_cmdlist);
2977
2978 add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
2979 _("Clears the branch tracing data.\n\
2980 Discards the raw branch tracing data and the execution history data.\n\
2981 The next 'record' command will fetch the branch tracing data anew.\n\
2982 "),
2983 &maint_btrace_cmdlist);
2984
2985 }
This page took 0.116239 seconds and 5 git commands to generate.