btrace: add format argument to supports_btrace
[deliverable/binutils-gdb.git] / gdb / btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "btrace.h"
24 #include "gdbthread.h"
25 #include "inferior.h"
26 #include "target.h"
27 #include "record.h"
28 #include "symtab.h"
29 #include "disasm.h"
30 #include "source.h"
31 #include "filenames.h"
32 #include "xml-support.h"
33 #include "regcache.h"
34
35 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
36 when used in if statements. */
37
38 #define DEBUG(msg, args...) \
39 do \
40 { \
41 if (record_debug != 0) \
42 fprintf_unfiltered (gdb_stdlog, \
43 "[btrace] " msg "\n", ##args); \
44 } \
45 while (0)
46
47 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
48
49 /* Return the function name of a recorded function segment for printing.
50 This function never returns NULL. */
51
52 static const char *
53 ftrace_print_function_name (const struct btrace_function *bfun)
54 {
55 struct minimal_symbol *msym;
56 struct symbol *sym;
57
58 msym = bfun->msym;
59 sym = bfun->sym;
60
61 if (sym != NULL)
62 return SYMBOL_PRINT_NAME (sym);
63
64 if (msym != NULL)
65 return MSYMBOL_PRINT_NAME (msym);
66
67 return "<unknown>";
68 }
69
70 /* Return the file name of a recorded function segment for printing.
71 This function never returns NULL. */
72
73 static const char *
74 ftrace_print_filename (const struct btrace_function *bfun)
75 {
76 struct symbol *sym;
77 const char *filename;
78
79 sym = bfun->sym;
80
81 if (sym != NULL)
82 filename = symtab_to_filename_for_display (symbol_symtab (sym));
83 else
84 filename = "<unknown>";
85
86 return filename;
87 }
88
89 /* Return a string representation of the address of an instruction.
90 This function never returns NULL. */
91
92 static const char *
93 ftrace_print_insn_addr (const struct btrace_insn *insn)
94 {
95 if (insn == NULL)
96 return "<nil>";
97
98 return core_addr_to_string_nz (insn->pc);
99 }
100
101 /* Print an ftrace debug status message. */
102
103 static void
104 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
105 {
106 const char *fun, *file;
107 unsigned int ibegin, iend;
108 int lbegin, lend, level;
109
110 fun = ftrace_print_function_name (bfun);
111 file = ftrace_print_filename (bfun);
112 level = bfun->level;
113
114 lbegin = bfun->lbegin;
115 lend = bfun->lend;
116
117 ibegin = bfun->insn_offset;
118 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
119
120 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, lines = [%d; %d], "
121 "insn = [%u; %u)", prefix, fun, file, level, lbegin, lend,
122 ibegin, iend);
123 }
124
125 /* Return non-zero if BFUN does not match MFUN and FUN,
126 return zero otherwise. */
127
128 static int
129 ftrace_function_switched (const struct btrace_function *bfun,
130 const struct minimal_symbol *mfun,
131 const struct symbol *fun)
132 {
133 struct minimal_symbol *msym;
134 struct symbol *sym;
135
136 msym = bfun->msym;
137 sym = bfun->sym;
138
139 /* If the minimal symbol changed, we certainly switched functions. */
140 if (mfun != NULL && msym != NULL
141 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
142 return 1;
143
144 /* If the symbol changed, we certainly switched functions. */
145 if (fun != NULL && sym != NULL)
146 {
147 const char *bfname, *fname;
148
149 /* Check the function name. */
150 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
151 return 1;
152
153 /* Check the location of those functions, as well. */
154 bfname = symtab_to_fullname (symbol_symtab (sym));
155 fname = symtab_to_fullname (symbol_symtab (fun));
156 if (filename_cmp (fname, bfname) != 0)
157 return 1;
158 }
159
160 /* If we lost symbol information, we switched functions. */
161 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
162 return 1;
163
164 /* If we gained symbol information, we switched functions. */
165 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
166 return 1;
167
168 return 0;
169 }
170
171 /* Return non-zero if we should skip this file when generating the function
172 call history, zero otherwise.
173 We would want to do that if, say, a macro that is defined in another file
174 is expanded in this function. */
175
176 static int
177 ftrace_skip_file (const struct btrace_function *bfun, const char *fullname)
178 {
179 struct symbol *sym;
180 const char *bfile;
181
182 sym = bfun->sym;
183 if (sym == NULL)
184 return 1;
185
186 bfile = symtab_to_fullname (symbol_symtab (sym));
187
188 return (filename_cmp (bfile, fullname) != 0);
189 }
190
191 /* Allocate and initialize a new branch trace function segment.
192 PREV is the chronologically preceding function segment.
193 MFUN and FUN are the symbol information we have for this function. */
194
195 static struct btrace_function *
196 ftrace_new_function (struct btrace_function *prev,
197 struct minimal_symbol *mfun,
198 struct symbol *fun)
199 {
200 struct btrace_function *bfun;
201
202 bfun = xzalloc (sizeof (*bfun));
203
204 bfun->msym = mfun;
205 bfun->sym = fun;
206 bfun->flow.prev = prev;
207
208 /* We start with the identities of min and max, respectively. */
209 bfun->lbegin = INT_MAX;
210 bfun->lend = INT_MIN;
211
212 if (prev == NULL)
213 {
214 /* Start counting at one. */
215 bfun->number = 1;
216 bfun->insn_offset = 1;
217 }
218 else
219 {
220 gdb_assert (prev->flow.next == NULL);
221 prev->flow.next = bfun;
222
223 bfun->number = prev->number + 1;
224 bfun->insn_offset = (prev->insn_offset
225 + VEC_length (btrace_insn_s, prev->insn));
226 }
227
228 return bfun;
229 }
230
231 /* Update the UP field of a function segment. */
232
233 static void
234 ftrace_update_caller (struct btrace_function *bfun,
235 struct btrace_function *caller,
236 enum btrace_function_flag flags)
237 {
238 if (bfun->up != NULL)
239 ftrace_debug (bfun, "updating caller");
240
241 bfun->up = caller;
242 bfun->flags = flags;
243
244 ftrace_debug (bfun, "set caller");
245 }
246
247 /* Fix up the caller for all segments of a function. */
248
249 static void
250 ftrace_fixup_caller (struct btrace_function *bfun,
251 struct btrace_function *caller,
252 enum btrace_function_flag flags)
253 {
254 struct btrace_function *prev, *next;
255
256 ftrace_update_caller (bfun, caller, flags);
257
258 /* Update all function segments belonging to the same function. */
259 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
260 ftrace_update_caller (prev, caller, flags);
261
262 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
263 ftrace_update_caller (next, caller, flags);
264 }
265
266 /* Add a new function segment for a call.
267 CALLER is the chronologically preceding function segment.
268 MFUN and FUN are the symbol information we have for this function. */
269
270 static struct btrace_function *
271 ftrace_new_call (struct btrace_function *caller,
272 struct minimal_symbol *mfun,
273 struct symbol *fun)
274 {
275 struct btrace_function *bfun;
276
277 bfun = ftrace_new_function (caller, mfun, fun);
278 bfun->up = caller;
279 bfun->level = caller->level + 1;
280
281 ftrace_debug (bfun, "new call");
282
283 return bfun;
284 }
285
286 /* Add a new function segment for a tail call.
287 CALLER is the chronologically preceding function segment.
288 MFUN and FUN are the symbol information we have for this function. */
289
290 static struct btrace_function *
291 ftrace_new_tailcall (struct btrace_function *caller,
292 struct minimal_symbol *mfun,
293 struct symbol *fun)
294 {
295 struct btrace_function *bfun;
296
297 bfun = ftrace_new_function (caller, mfun, fun);
298 bfun->up = caller;
299 bfun->level = caller->level + 1;
300 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
301
302 ftrace_debug (bfun, "new tail call");
303
304 return bfun;
305 }
306
307 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
308 symbol information. */
309
310 static struct btrace_function *
311 ftrace_find_caller (struct btrace_function *bfun,
312 struct minimal_symbol *mfun,
313 struct symbol *fun)
314 {
315 for (; bfun != NULL; bfun = bfun->up)
316 {
317 /* Skip functions with incompatible symbol information. */
318 if (ftrace_function_switched (bfun, mfun, fun))
319 continue;
320
321 /* This is the function segment we're looking for. */
322 break;
323 }
324
325 return bfun;
326 }
327
328 /* Find the innermost caller in the back trace of BFUN, skipping all
329 function segments that do not end with a call instruction (e.g.
330 tail calls ending with a jump). */
331
332 static struct btrace_function *
333 ftrace_find_call (struct gdbarch *gdbarch, struct btrace_function *bfun)
334 {
335 for (; bfun != NULL; bfun = bfun->up)
336 {
337 struct btrace_insn *last;
338 CORE_ADDR pc;
339
340 /* We do not allow empty function segments. */
341 gdb_assert (!VEC_empty (btrace_insn_s, bfun->insn));
342
343 last = VEC_last (btrace_insn_s, bfun->insn);
344 pc = last->pc;
345
346 if (gdbarch_insn_is_call (gdbarch, pc))
347 break;
348 }
349
350 return bfun;
351 }
352
353 /* Add a continuation segment for a function into which we return.
354 PREV is the chronologically preceding function segment.
355 MFUN and FUN are the symbol information we have for this function. */
356
357 static struct btrace_function *
358 ftrace_new_return (struct gdbarch *gdbarch,
359 struct btrace_function *prev,
360 struct minimal_symbol *mfun,
361 struct symbol *fun)
362 {
363 struct btrace_function *bfun, *caller;
364
365 bfun = ftrace_new_function (prev, mfun, fun);
366
367 /* It is important to start at PREV's caller. Otherwise, we might find
368 PREV itself, if PREV is a recursive function. */
369 caller = ftrace_find_caller (prev->up, mfun, fun);
370 if (caller != NULL)
371 {
372 /* The caller of PREV is the preceding btrace function segment in this
373 function instance. */
374 gdb_assert (caller->segment.next == NULL);
375
376 caller->segment.next = bfun;
377 bfun->segment.prev = caller;
378
379 /* Maintain the function level. */
380 bfun->level = caller->level;
381
382 /* Maintain the call stack. */
383 bfun->up = caller->up;
384 bfun->flags = caller->flags;
385
386 ftrace_debug (bfun, "new return");
387 }
388 else
389 {
390 /* We did not find a caller. This could mean that something went
391 wrong or that the call is simply not included in the trace. */
392
393 /* Let's search for some actual call. */
394 caller = ftrace_find_call (gdbarch, prev->up);
395 if (caller == NULL)
396 {
397 /* There is no call in PREV's back trace. We assume that the
398 branch trace did not include it. */
399
400 /* Let's find the topmost call function - this skips tail calls. */
401 while (prev->up != NULL)
402 prev = prev->up;
403
404 /* We maintain levels for a series of returns for which we have
405 not seen the calls.
406 We start at the preceding function's level in case this has
407 already been a return for which we have not seen the call.
408 We start at level 0 otherwise, to handle tail calls correctly. */
409 bfun->level = min (0, prev->level) - 1;
410
411 /* Fix up the call stack for PREV. */
412 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
413
414 ftrace_debug (bfun, "new return - no caller");
415 }
416 else
417 {
418 /* There is a call in PREV's back trace to which we should have
419 returned. Let's remain at this level. */
420 bfun->level = prev->level;
421
422 ftrace_debug (bfun, "new return - unknown caller");
423 }
424 }
425
426 return bfun;
427 }
428
429 /* Add a new function segment for a function switch.
430 PREV is the chronologically preceding function segment.
431 MFUN and FUN are the symbol information we have for this function. */
432
433 static struct btrace_function *
434 ftrace_new_switch (struct btrace_function *prev,
435 struct minimal_symbol *mfun,
436 struct symbol *fun)
437 {
438 struct btrace_function *bfun;
439
440 /* This is an unexplained function switch. The call stack will likely
441 be wrong at this point. */
442 bfun = ftrace_new_function (prev, mfun, fun);
443
444 /* We keep the function level. */
445 bfun->level = prev->level;
446
447 ftrace_debug (bfun, "new switch");
448
449 return bfun;
450 }
451
452 /* Update BFUN with respect to the instruction at PC. This may create new
453 function segments.
454 Return the chronologically latest function segment, never NULL. */
455
456 static struct btrace_function *
457 ftrace_update_function (struct gdbarch *gdbarch,
458 struct btrace_function *bfun, CORE_ADDR pc)
459 {
460 struct bound_minimal_symbol bmfun;
461 struct minimal_symbol *mfun;
462 struct symbol *fun;
463 struct btrace_insn *last;
464
465 /* Try to determine the function we're in. We use both types of symbols
466 to avoid surprises when we sometimes get a full symbol and sometimes
467 only a minimal symbol. */
468 fun = find_pc_function (pc);
469 bmfun = lookup_minimal_symbol_by_pc (pc);
470 mfun = bmfun.minsym;
471
472 if (fun == NULL && mfun == NULL)
473 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
474
475 /* If we didn't have a function before, we create one. */
476 if (bfun == NULL)
477 return ftrace_new_function (bfun, mfun, fun);
478
479 /* Check the last instruction, if we have one.
480 We do this check first, since it allows us to fill in the call stack
481 links in addition to the normal flow links. */
482 last = NULL;
483 if (!VEC_empty (btrace_insn_s, bfun->insn))
484 last = VEC_last (btrace_insn_s, bfun->insn);
485
486 if (last != NULL)
487 {
488 CORE_ADDR lpc;
489
490 lpc = last->pc;
491
492 /* Check for returns. */
493 if (gdbarch_insn_is_ret (gdbarch, lpc))
494 return ftrace_new_return (gdbarch, bfun, mfun, fun);
495
496 /* Check for calls. */
497 if (gdbarch_insn_is_call (gdbarch, lpc))
498 {
499 int size;
500
501 size = gdb_insn_length (gdbarch, lpc);
502
503 /* Ignore calls to the next instruction. They are used for PIC. */
504 if (lpc + size != pc)
505 return ftrace_new_call (bfun, mfun, fun);
506 }
507 }
508
509 /* Check if we're switching functions for some other reason. */
510 if (ftrace_function_switched (bfun, mfun, fun))
511 {
512 DEBUG_FTRACE ("switching from %s in %s at %s",
513 ftrace_print_insn_addr (last),
514 ftrace_print_function_name (bfun),
515 ftrace_print_filename (bfun));
516
517 if (last != NULL)
518 {
519 CORE_ADDR start, lpc;
520
521 start = get_pc_function_start (pc);
522
523 /* If we can't determine the function for PC, we treat a jump at
524 the end of the block as tail call. */
525 if (start == 0)
526 start = pc;
527
528 lpc = last->pc;
529
530 /* Jumps indicate optimized tail calls. */
531 if (start == pc && gdbarch_insn_is_jump (gdbarch, lpc))
532 return ftrace_new_tailcall (bfun, mfun, fun);
533 }
534
535 return ftrace_new_switch (bfun, mfun, fun);
536 }
537
538 return bfun;
539 }
540
541 /* Update BFUN's source range with respect to the instruction at PC. */
542
543 static void
544 ftrace_update_lines (struct btrace_function *bfun, CORE_ADDR pc)
545 {
546 struct symtab_and_line sal;
547 const char *fullname;
548
549 sal = find_pc_line (pc, 0);
550 if (sal.symtab == NULL || sal.line == 0)
551 {
552 DEBUG_FTRACE ("no lines at %s", core_addr_to_string_nz (pc));
553 return;
554 }
555
556 /* Check if we switched files. This could happen if, say, a macro that
557 is defined in another file is expanded here. */
558 fullname = symtab_to_fullname (sal.symtab);
559 if (ftrace_skip_file (bfun, fullname))
560 {
561 DEBUG_FTRACE ("ignoring file at %s, file=%s",
562 core_addr_to_string_nz (pc), fullname);
563 return;
564 }
565
566 /* Update the line range. */
567 bfun->lbegin = min (bfun->lbegin, sal.line);
568 bfun->lend = max (bfun->lend, sal.line);
569
570 if (record_debug > 1)
571 ftrace_debug (bfun, "update lines");
572 }
573
574 /* Add the instruction at PC to BFUN's instructions. */
575
576 static void
577 ftrace_update_insns (struct btrace_function *bfun, CORE_ADDR pc)
578 {
579 struct btrace_insn *insn;
580
581 insn = VEC_safe_push (btrace_insn_s, bfun->insn, NULL);
582 insn->pc = pc;
583
584 if (record_debug > 1)
585 ftrace_debug (bfun, "update insn");
586 }
587
588 /* Compute the function branch trace from BTS trace. */
589
590 static void
591 btrace_compute_ftrace_bts (struct btrace_thread_info *btinfo,
592 const struct btrace_data_bts *btrace)
593 {
594 struct btrace_function *begin, *end;
595 struct gdbarch *gdbarch;
596 unsigned int blk;
597 int level;
598
599 gdbarch = target_gdbarch ();
600 begin = btinfo->begin;
601 end = btinfo->end;
602 level = begin != NULL ? -btinfo->level : INT_MAX;
603 blk = VEC_length (btrace_block_s, btrace->blocks);
604
605 while (blk != 0)
606 {
607 btrace_block_s *block;
608 CORE_ADDR pc;
609
610 blk -= 1;
611
612 block = VEC_index (btrace_block_s, btrace->blocks, blk);
613 pc = block->begin;
614
615 for (;;)
616 {
617 int size;
618
619 /* We should hit the end of the block. Warn if we went too far. */
620 if (block->end < pc)
621 {
622 warning (_("Recorded trace may be corrupted around %s."),
623 core_addr_to_string_nz (pc));
624 break;
625 }
626
627 end = ftrace_update_function (gdbarch, end, pc);
628 if (begin == NULL)
629 begin = end;
630
631 /* Maintain the function level offset.
632 For all but the last block, we do it here. */
633 if (blk != 0)
634 level = min (level, end->level);
635
636 ftrace_update_insns (end, pc);
637 ftrace_update_lines (end, pc);
638
639 /* We're done once we pushed the instruction at the end. */
640 if (block->end == pc)
641 break;
642
643 size = gdb_insn_length (gdbarch, pc);
644
645 /* Make sure we terminate if we fail to compute the size. */
646 if (size <= 0)
647 {
648 warning (_("Recorded trace may be incomplete around %s."),
649 core_addr_to_string_nz (pc));
650 break;
651 }
652
653 pc += size;
654
655 /* Maintain the function level offset.
656 For the last block, we do it here to not consider the last
657 instruction.
658 Since the last instruction corresponds to the current instruction
659 and is not really part of the execution history, it shouldn't
660 affect the level. */
661 if (blk == 0)
662 level = min (level, end->level);
663 }
664 }
665
666 btinfo->begin = begin;
667 btinfo->end = end;
668
669 /* LEVEL is the minimal function level of all btrace function segments.
670 Define the global level offset to -LEVEL so all function levels are
671 normalized to start at zero. */
672 btinfo->level = -level;
673 }
674
675 /* Compute the function branch trace from a block branch trace BTRACE for
676 a thread given by BTINFO. */
677
678 static void
679 btrace_compute_ftrace (struct btrace_thread_info *btinfo,
680 struct btrace_data *btrace)
681 {
682 DEBUG ("compute ftrace");
683
684 switch (btrace->format)
685 {
686 case BTRACE_FORMAT_NONE:
687 return;
688
689 case BTRACE_FORMAT_BTS:
690 btrace_compute_ftrace_bts (btinfo, &btrace->variant.bts);
691 return;
692 }
693
694 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
695 }
696
697 /* Add an entry for the current PC. */
698
699 static void
700 btrace_add_pc (struct thread_info *tp)
701 {
702 struct btrace_data btrace;
703 struct btrace_block *block;
704 struct regcache *regcache;
705 struct cleanup *cleanup;
706 CORE_ADDR pc;
707
708 regcache = get_thread_regcache (tp->ptid);
709 pc = regcache_read_pc (regcache);
710
711 btrace_data_init (&btrace);
712 btrace.format = BTRACE_FORMAT_BTS;
713 btrace.variant.bts.blocks = NULL;
714
715 cleanup = make_cleanup_btrace_data (&btrace);
716
717 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
718 block->begin = pc;
719 block->end = pc;
720
721 btrace_compute_ftrace (&tp->btrace, &btrace);
722
723 do_cleanups (cleanup);
724 }
725
726 /* See btrace.h. */
727
728 void
729 btrace_enable (struct thread_info *tp)
730 {
731 if (tp->btrace.target != NULL)
732 return;
733
734 if (!target_supports_btrace (BTRACE_FORMAT_BTS))
735 error (_("Target does not support branch tracing."));
736
737 DEBUG ("enable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
738
739 tp->btrace.target = target_enable_btrace (tp->ptid);
740
741 /* Add an entry for the current PC so we start tracing from where we
742 enabled it. */
743 if (tp->btrace.target != NULL)
744 btrace_add_pc (tp);
745 }
746
747 /* See btrace.h. */
748
749 void
750 btrace_disable (struct thread_info *tp)
751 {
752 struct btrace_thread_info *btp = &tp->btrace;
753 int errcode = 0;
754
755 if (btp->target == NULL)
756 return;
757
758 DEBUG ("disable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
759
760 target_disable_btrace (btp->target);
761 btp->target = NULL;
762
763 btrace_clear (tp);
764 }
765
766 /* See btrace.h. */
767
768 void
769 btrace_teardown (struct thread_info *tp)
770 {
771 struct btrace_thread_info *btp = &tp->btrace;
772 int errcode = 0;
773
774 if (btp->target == NULL)
775 return;
776
777 DEBUG ("teardown thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
778
779 target_teardown_btrace (btp->target);
780 btp->target = NULL;
781
782 btrace_clear (tp);
783 }
784
785 /* Stitch branch trace in BTS format. */
786
787 static int
788 btrace_stitch_bts (struct btrace_data_bts *btrace,
789 const struct btrace_thread_info *btinfo)
790 {
791 struct btrace_function *last_bfun;
792 struct btrace_insn *last_insn;
793 btrace_block_s *first_new_block;
794
795 last_bfun = btinfo->end;
796 gdb_assert (last_bfun != NULL);
797
798 /* Beware that block trace starts with the most recent block, so the
799 chronologically first block in the new trace is the last block in
800 the new trace's block vector. */
801 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
802 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
803 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
804
805 /* If the current PC at the end of the block is the same as in our current
806 trace, there are two explanations:
807 1. we executed the instruction and some branch brought us back.
808 2. we have not made any progress.
809 In the first case, the delta trace vector should contain at least two
810 entries.
811 In the second case, the delta trace vector should contain exactly one
812 entry for the partial block containing the current PC. Remove it. */
813 if (first_new_block->end == last_insn->pc
814 && VEC_length (btrace_block_s, btrace->blocks) == 1)
815 {
816 VEC_pop (btrace_block_s, btrace->blocks);
817 return 0;
818 }
819
820 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
821 core_addr_to_string_nz (first_new_block->end));
822
823 /* Do a simple sanity check to make sure we don't accidentally end up
824 with a bad block. This should not occur in practice. */
825 if (first_new_block->end < last_insn->pc)
826 {
827 warning (_("Error while trying to read delta trace. Falling back to "
828 "a full read."));
829 return -1;
830 }
831
832 /* We adjust the last block to start at the end of our current trace. */
833 gdb_assert (first_new_block->begin == 0);
834 first_new_block->begin = last_insn->pc;
835
836 /* We simply pop the last insn so we can insert it again as part of
837 the normal branch trace computation.
838 Since instruction iterators are based on indices in the instructions
839 vector, we don't leave any pointers dangling. */
840 DEBUG ("pruning insn at %s for stitching",
841 ftrace_print_insn_addr (last_insn));
842
843 VEC_pop (btrace_insn_s, last_bfun->insn);
844
845 /* The instructions vector may become empty temporarily if this has
846 been the only instruction in this function segment.
847 This violates the invariant but will be remedied shortly by
848 btrace_compute_ftrace when we add the new trace. */
849 return 0;
850 }
851
852 /* Adjust the block trace in order to stitch old and new trace together.
853 BTRACE is the new delta trace between the last and the current stop.
854 BTINFO is the old branch trace until the last stop.
855 May modifx BTRACE as well as the existing trace in BTINFO.
856 Return 0 on success, -1 otherwise. */
857
858 static int
859 btrace_stitch_trace (struct btrace_data *btrace,
860 const struct btrace_thread_info *btinfo)
861 {
862 /* If we don't have trace, there's nothing to do. */
863 if (btrace_data_empty (btrace))
864 return 0;
865
866 switch (btrace->format)
867 {
868 case BTRACE_FORMAT_NONE:
869 return 0;
870
871 case BTRACE_FORMAT_BTS:
872 return btrace_stitch_bts (&btrace->variant.bts, btinfo);
873 }
874
875 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
876 }
877
878 /* Clear the branch trace histories in BTINFO. */
879
880 static void
881 btrace_clear_history (struct btrace_thread_info *btinfo)
882 {
883 xfree (btinfo->insn_history);
884 xfree (btinfo->call_history);
885 xfree (btinfo->replay);
886
887 btinfo->insn_history = NULL;
888 btinfo->call_history = NULL;
889 btinfo->replay = NULL;
890 }
891
892 /* See btrace.h. */
893
894 void
895 btrace_fetch (struct thread_info *tp)
896 {
897 struct btrace_thread_info *btinfo;
898 struct btrace_target_info *tinfo;
899 struct btrace_data btrace;
900 struct cleanup *cleanup;
901 int errcode;
902
903 DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
904
905 btinfo = &tp->btrace;
906 tinfo = btinfo->target;
907 if (tinfo == NULL)
908 return;
909
910 /* There's no way we could get new trace while replaying.
911 On the other hand, delta trace would return a partial record with the
912 current PC, which is the replay PC, not the last PC, as expected. */
913 if (btinfo->replay != NULL)
914 return;
915
916 btrace_data_init (&btrace);
917 cleanup = make_cleanup_btrace_data (&btrace);
918
919 /* Let's first try to extend the trace we already have. */
920 if (btinfo->end != NULL)
921 {
922 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
923 if (errcode == 0)
924 {
925 /* Success. Let's try to stitch the traces together. */
926 errcode = btrace_stitch_trace (&btrace, btinfo);
927 }
928 else
929 {
930 /* We failed to read delta trace. Let's try to read new trace. */
931 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
932
933 /* If we got any new trace, discard what we have. */
934 if (errcode == 0 && !btrace_data_empty (&btrace))
935 btrace_clear (tp);
936 }
937
938 /* If we were not able to read the trace, we start over. */
939 if (errcode != 0)
940 {
941 btrace_clear (tp);
942 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
943 }
944 }
945 else
946 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
947
948 /* If we were not able to read the branch trace, signal an error. */
949 if (errcode != 0)
950 error (_("Failed to read branch trace."));
951
952 /* Compute the trace, provided we have any. */
953 if (!btrace_data_empty (&btrace))
954 {
955 btrace_clear_history (btinfo);
956 btrace_compute_ftrace (btinfo, &btrace);
957 }
958
959 do_cleanups (cleanup);
960 }
961
962 /* See btrace.h. */
963
964 void
965 btrace_clear (struct thread_info *tp)
966 {
967 struct btrace_thread_info *btinfo;
968 struct btrace_function *it, *trash;
969
970 DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
971
972 /* Make sure btrace frames that may hold a pointer into the branch
973 trace data are destroyed. */
974 reinit_frame_cache ();
975
976 btinfo = &tp->btrace;
977
978 it = btinfo->begin;
979 while (it != NULL)
980 {
981 trash = it;
982 it = it->flow.next;
983
984 xfree (trash);
985 }
986
987 btinfo->begin = NULL;
988 btinfo->end = NULL;
989
990 btrace_clear_history (btinfo);
991 }
992
993 /* See btrace.h. */
994
995 void
996 btrace_free_objfile (struct objfile *objfile)
997 {
998 struct thread_info *tp;
999
1000 DEBUG ("free objfile");
1001
1002 ALL_NON_EXITED_THREADS (tp)
1003 btrace_clear (tp);
1004 }
1005
1006 #if defined (HAVE_LIBEXPAT)
1007
1008 /* Check the btrace document version. */
1009
1010 static void
1011 check_xml_btrace_version (struct gdb_xml_parser *parser,
1012 const struct gdb_xml_element *element,
1013 void *user_data, VEC (gdb_xml_value_s) *attributes)
1014 {
1015 const char *version = xml_find_attribute (attributes, "version")->value;
1016
1017 if (strcmp (version, "1.0") != 0)
1018 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1019 }
1020
1021 /* Parse a btrace "block" xml record. */
1022
1023 static void
1024 parse_xml_btrace_block (struct gdb_xml_parser *parser,
1025 const struct gdb_xml_element *element,
1026 void *user_data, VEC (gdb_xml_value_s) *attributes)
1027 {
1028 struct btrace_data *btrace;
1029 struct btrace_block *block;
1030 ULONGEST *begin, *end;
1031
1032 btrace = user_data;
1033
1034 switch (btrace->format)
1035 {
1036 case BTRACE_FORMAT_BTS:
1037 break;
1038
1039 case BTRACE_FORMAT_NONE:
1040 btrace->format = BTRACE_FORMAT_BTS;
1041 btrace->variant.bts.blocks = NULL;
1042 break;
1043
1044 default:
1045 gdb_xml_error (parser, _("Btrace format error."));
1046 }
1047
1048 begin = xml_find_attribute (attributes, "begin")->value;
1049 end = xml_find_attribute (attributes, "end")->value;
1050
1051 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
1052 block->begin = *begin;
1053 block->end = *end;
1054 }
1055
1056 static const struct gdb_xml_attribute block_attributes[] = {
1057 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1058 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1059 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1060 };
1061
1062 static const struct gdb_xml_attribute btrace_attributes[] = {
1063 { "version", GDB_XML_AF_NONE, NULL, NULL },
1064 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1065 };
1066
1067 static const struct gdb_xml_element btrace_children[] = {
1068 { "block", block_attributes, NULL,
1069 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
1070 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1071 };
1072
1073 static const struct gdb_xml_element btrace_elements[] = {
1074 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
1075 check_xml_btrace_version, NULL },
1076 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1077 };
1078
1079 #endif /* defined (HAVE_LIBEXPAT) */
1080
1081 /* See btrace.h. */
1082
1083 void
1084 parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
1085 {
1086 struct cleanup *cleanup;
1087 int errcode;
1088
1089 #if defined (HAVE_LIBEXPAT)
1090
1091 btrace->format = BTRACE_FORMAT_NONE;
1092
1093 cleanup = make_cleanup_btrace_data (btrace);
1094 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
1095 buffer, btrace);
1096 if (errcode != 0)
1097 error (_("Error parsing branch trace."));
1098
1099 /* Keep parse results. */
1100 discard_cleanups (cleanup);
1101
1102 #else /* !defined (HAVE_LIBEXPAT) */
1103
1104 error (_("Cannot process branch trace. XML parsing is not supported."));
1105
1106 #endif /* !defined (HAVE_LIBEXPAT) */
1107 }
1108
1109 /* See btrace.h. */
1110
1111 const struct btrace_insn *
1112 btrace_insn_get (const struct btrace_insn_iterator *it)
1113 {
1114 const struct btrace_function *bfun;
1115 unsigned int index, end;
1116
1117 index = it->index;
1118 bfun = it->function;
1119
1120 /* The index is within the bounds of this function's instruction vector. */
1121 end = VEC_length (btrace_insn_s, bfun->insn);
1122 gdb_assert (0 < end);
1123 gdb_assert (index < end);
1124
1125 return VEC_index (btrace_insn_s, bfun->insn, index);
1126 }
1127
1128 /* See btrace.h. */
1129
1130 unsigned int
1131 btrace_insn_number (const struct btrace_insn_iterator *it)
1132 {
1133 const struct btrace_function *bfun;
1134
1135 bfun = it->function;
1136 return bfun->insn_offset + it->index;
1137 }
1138
1139 /* See btrace.h. */
1140
1141 void
1142 btrace_insn_begin (struct btrace_insn_iterator *it,
1143 const struct btrace_thread_info *btinfo)
1144 {
1145 const struct btrace_function *bfun;
1146
1147 bfun = btinfo->begin;
1148 if (bfun == NULL)
1149 error (_("No trace."));
1150
1151 it->function = bfun;
1152 it->index = 0;
1153 }
1154
1155 /* See btrace.h. */
1156
1157 void
1158 btrace_insn_end (struct btrace_insn_iterator *it,
1159 const struct btrace_thread_info *btinfo)
1160 {
1161 const struct btrace_function *bfun;
1162 unsigned int length;
1163
1164 bfun = btinfo->end;
1165 if (bfun == NULL)
1166 error (_("No trace."));
1167
1168 /* The last instruction in the last function is the current instruction.
1169 We point to it - it is one past the end of the execution trace. */
1170 length = VEC_length (btrace_insn_s, bfun->insn);
1171
1172 it->function = bfun;
1173 it->index = length - 1;
1174 }
1175
1176 /* See btrace.h. */
1177
1178 unsigned int
1179 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
1180 {
1181 const struct btrace_function *bfun;
1182 unsigned int index, steps;
1183
1184 bfun = it->function;
1185 steps = 0;
1186 index = it->index;
1187
1188 while (stride != 0)
1189 {
1190 unsigned int end, space, adv;
1191
1192 end = VEC_length (btrace_insn_s, bfun->insn);
1193
1194 gdb_assert (0 < end);
1195 gdb_assert (index < end);
1196
1197 /* Compute the number of instructions remaining in this segment. */
1198 space = end - index;
1199
1200 /* Advance the iterator as far as possible within this segment. */
1201 adv = min (space, stride);
1202 stride -= adv;
1203 index += adv;
1204 steps += adv;
1205
1206 /* Move to the next function if we're at the end of this one. */
1207 if (index == end)
1208 {
1209 const struct btrace_function *next;
1210
1211 next = bfun->flow.next;
1212 if (next == NULL)
1213 {
1214 /* We stepped past the last function.
1215
1216 Let's adjust the index to point to the last instruction in
1217 the previous function. */
1218 index -= 1;
1219 steps -= 1;
1220 break;
1221 }
1222
1223 /* We now point to the first instruction in the new function. */
1224 bfun = next;
1225 index = 0;
1226 }
1227
1228 /* We did make progress. */
1229 gdb_assert (adv > 0);
1230 }
1231
1232 /* Update the iterator. */
1233 it->function = bfun;
1234 it->index = index;
1235
1236 return steps;
1237 }
1238
1239 /* See btrace.h. */
1240
1241 unsigned int
1242 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1243 {
1244 const struct btrace_function *bfun;
1245 unsigned int index, steps;
1246
1247 bfun = it->function;
1248 steps = 0;
1249 index = it->index;
1250
1251 while (stride != 0)
1252 {
1253 unsigned int adv;
1254
1255 /* Move to the previous function if we're at the start of this one. */
1256 if (index == 0)
1257 {
1258 const struct btrace_function *prev;
1259
1260 prev = bfun->flow.prev;
1261 if (prev == NULL)
1262 break;
1263
1264 /* We point to one after the last instruction in the new function. */
1265 bfun = prev;
1266 index = VEC_length (btrace_insn_s, bfun->insn);
1267
1268 /* There is at least one instruction in this function segment. */
1269 gdb_assert (index > 0);
1270 }
1271
1272 /* Advance the iterator as far as possible within this segment. */
1273 adv = min (index, stride);
1274 stride -= adv;
1275 index -= adv;
1276 steps += adv;
1277
1278 /* We did make progress. */
1279 gdb_assert (adv > 0);
1280 }
1281
1282 /* Update the iterator. */
1283 it->function = bfun;
1284 it->index = index;
1285
1286 return steps;
1287 }
1288
1289 /* See btrace.h. */
1290
1291 int
1292 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
1293 const struct btrace_insn_iterator *rhs)
1294 {
1295 unsigned int lnum, rnum;
1296
1297 lnum = btrace_insn_number (lhs);
1298 rnum = btrace_insn_number (rhs);
1299
1300 return (int) (lnum - rnum);
1301 }
1302
1303 /* See btrace.h. */
1304
1305 int
1306 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
1307 const struct btrace_thread_info *btinfo,
1308 unsigned int number)
1309 {
1310 const struct btrace_function *bfun;
1311 unsigned int end;
1312
1313 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1314 if (bfun->insn_offset <= number)
1315 break;
1316
1317 if (bfun == NULL)
1318 return 0;
1319
1320 end = bfun->insn_offset + VEC_length (btrace_insn_s, bfun->insn);
1321 if (end <= number)
1322 return 0;
1323
1324 it->function = bfun;
1325 it->index = number - bfun->insn_offset;
1326
1327 return 1;
1328 }
1329
1330 /* See btrace.h. */
1331
1332 const struct btrace_function *
1333 btrace_call_get (const struct btrace_call_iterator *it)
1334 {
1335 return it->function;
1336 }
1337
1338 /* See btrace.h. */
1339
1340 unsigned int
1341 btrace_call_number (const struct btrace_call_iterator *it)
1342 {
1343 const struct btrace_thread_info *btinfo;
1344 const struct btrace_function *bfun;
1345 unsigned int insns;
1346
1347 btinfo = it->btinfo;
1348 bfun = it->function;
1349 if (bfun != NULL)
1350 return bfun->number;
1351
1352 /* For the end iterator, i.e. bfun == NULL, we return one more than the
1353 number of the last function. */
1354 bfun = btinfo->end;
1355 insns = VEC_length (btrace_insn_s, bfun->insn);
1356
1357 /* If the function contains only a single instruction (i.e. the current
1358 instruction), it will be skipped and its number is already the number
1359 we seek. */
1360 if (insns == 1)
1361 return bfun->number;
1362
1363 /* Otherwise, return one more than the number of the last function. */
1364 return bfun->number + 1;
1365 }
1366
1367 /* See btrace.h. */
1368
1369 void
1370 btrace_call_begin (struct btrace_call_iterator *it,
1371 const struct btrace_thread_info *btinfo)
1372 {
1373 const struct btrace_function *bfun;
1374
1375 bfun = btinfo->begin;
1376 if (bfun == NULL)
1377 error (_("No trace."));
1378
1379 it->btinfo = btinfo;
1380 it->function = bfun;
1381 }
1382
1383 /* See btrace.h. */
1384
1385 void
1386 btrace_call_end (struct btrace_call_iterator *it,
1387 const struct btrace_thread_info *btinfo)
1388 {
1389 const struct btrace_function *bfun;
1390
1391 bfun = btinfo->end;
1392 if (bfun == NULL)
1393 error (_("No trace."));
1394
1395 it->btinfo = btinfo;
1396 it->function = NULL;
1397 }
1398
1399 /* See btrace.h. */
1400
1401 unsigned int
1402 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
1403 {
1404 const struct btrace_function *bfun;
1405 unsigned int steps;
1406
1407 bfun = it->function;
1408 steps = 0;
1409 while (bfun != NULL)
1410 {
1411 const struct btrace_function *next;
1412 unsigned int insns;
1413
1414 next = bfun->flow.next;
1415 if (next == NULL)
1416 {
1417 /* Ignore the last function if it only contains a single
1418 (i.e. the current) instruction. */
1419 insns = VEC_length (btrace_insn_s, bfun->insn);
1420 if (insns == 1)
1421 steps -= 1;
1422 }
1423
1424 if (stride == steps)
1425 break;
1426
1427 bfun = next;
1428 steps += 1;
1429 }
1430
1431 it->function = bfun;
1432 return steps;
1433 }
1434
1435 /* See btrace.h. */
1436
1437 unsigned int
1438 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
1439 {
1440 const struct btrace_thread_info *btinfo;
1441 const struct btrace_function *bfun;
1442 unsigned int steps;
1443
1444 bfun = it->function;
1445 steps = 0;
1446
1447 if (bfun == NULL)
1448 {
1449 unsigned int insns;
1450
1451 btinfo = it->btinfo;
1452 bfun = btinfo->end;
1453 if (bfun == NULL)
1454 return 0;
1455
1456 /* Ignore the last function if it only contains a single
1457 (i.e. the current) instruction. */
1458 insns = VEC_length (btrace_insn_s, bfun->insn);
1459 if (insns == 1)
1460 bfun = bfun->flow.prev;
1461
1462 if (bfun == NULL)
1463 return 0;
1464
1465 steps += 1;
1466 }
1467
1468 while (steps < stride)
1469 {
1470 const struct btrace_function *prev;
1471
1472 prev = bfun->flow.prev;
1473 if (prev == NULL)
1474 break;
1475
1476 bfun = prev;
1477 steps += 1;
1478 }
1479
1480 it->function = bfun;
1481 return steps;
1482 }
1483
1484 /* See btrace.h. */
1485
1486 int
1487 btrace_call_cmp (const struct btrace_call_iterator *lhs,
1488 const struct btrace_call_iterator *rhs)
1489 {
1490 unsigned int lnum, rnum;
1491
1492 lnum = btrace_call_number (lhs);
1493 rnum = btrace_call_number (rhs);
1494
1495 return (int) (lnum - rnum);
1496 }
1497
1498 /* See btrace.h. */
1499
1500 int
1501 btrace_find_call_by_number (struct btrace_call_iterator *it,
1502 const struct btrace_thread_info *btinfo,
1503 unsigned int number)
1504 {
1505 const struct btrace_function *bfun;
1506
1507 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1508 {
1509 unsigned int bnum;
1510
1511 bnum = bfun->number;
1512 if (number == bnum)
1513 {
1514 it->btinfo = btinfo;
1515 it->function = bfun;
1516 return 1;
1517 }
1518
1519 /* Functions are ordered and numbered consecutively. We could bail out
1520 earlier. On the other hand, it is very unlikely that we search for
1521 a nonexistent function. */
1522 }
1523
1524 return 0;
1525 }
1526
1527 /* See btrace.h. */
1528
1529 void
1530 btrace_set_insn_history (struct btrace_thread_info *btinfo,
1531 const struct btrace_insn_iterator *begin,
1532 const struct btrace_insn_iterator *end)
1533 {
1534 if (btinfo->insn_history == NULL)
1535 btinfo->insn_history = xzalloc (sizeof (*btinfo->insn_history));
1536
1537 btinfo->insn_history->begin = *begin;
1538 btinfo->insn_history->end = *end;
1539 }
1540
1541 /* See btrace.h. */
1542
1543 void
1544 btrace_set_call_history (struct btrace_thread_info *btinfo,
1545 const struct btrace_call_iterator *begin,
1546 const struct btrace_call_iterator *end)
1547 {
1548 gdb_assert (begin->btinfo == end->btinfo);
1549
1550 if (btinfo->call_history == NULL)
1551 btinfo->call_history = xzalloc (sizeof (*btinfo->call_history));
1552
1553 btinfo->call_history->begin = *begin;
1554 btinfo->call_history->end = *end;
1555 }
1556
1557 /* See btrace.h. */
1558
1559 int
1560 btrace_is_replaying (struct thread_info *tp)
1561 {
1562 return tp->btrace.replay != NULL;
1563 }
1564
1565 /* See btrace.h. */
1566
1567 int
1568 btrace_is_empty (struct thread_info *tp)
1569 {
1570 struct btrace_insn_iterator begin, end;
1571 struct btrace_thread_info *btinfo;
1572
1573 btinfo = &tp->btrace;
1574
1575 if (btinfo->begin == NULL)
1576 return 1;
1577
1578 btrace_insn_begin (&begin, btinfo);
1579 btrace_insn_end (&end, btinfo);
1580
1581 return btrace_insn_cmp (&begin, &end) == 0;
1582 }
1583
1584 /* Forward the cleanup request. */
1585
1586 static void
1587 do_btrace_data_cleanup (void *arg)
1588 {
1589 btrace_data_fini (arg);
1590 }
1591
1592 /* See btrace.h. */
1593
1594 struct cleanup *
1595 make_cleanup_btrace_data (struct btrace_data *data)
1596 {
1597 return make_cleanup (do_btrace_data_cleanup, data);
1598 }
This page took 0.106138 seconds and 5 git commands to generate.