Fix gdb.fortran/array-element.exp failures.
[deliverable/binutils-gdb.git] / gdb / btrace.c
CommitLineData
02d27625
MM
1/* Branch trace support for GDB, the GNU debugger.
2
ecd75fc8 3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
02d27625
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
d41f6d8e 22#include "defs.h"
02d27625
MM
23#include "btrace.h"
24#include "gdbthread.h"
25#include "exceptions.h"
26#include "inferior.h"
27#include "target.h"
28#include "record.h"
29#include "symtab.h"
30#include "disasm.h"
31#include "source.h"
32#include "filenames.h"
c12a2917 33#include "xml-support.h"
6e07b1d2 34#include "regcache.h"
02d27625
MM
35
36/* Print a record debug message. Use do ... while (0) to avoid ambiguities
37 when used in if statements. */
38
39#define DEBUG(msg, args...) \
40 do \
41 { \
42 if (record_debug != 0) \
43 fprintf_unfiltered (gdb_stdlog, \
44 "[btrace] " msg "\n", ##args); \
45 } \
46 while (0)
47
48#define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
49
02d27625
MM
50/* Return the function name of a recorded function segment for printing.
51 This function never returns NULL. */
52
53static const char *
23a7fe75 54ftrace_print_function_name (const struct btrace_function *bfun)
02d27625
MM
55{
56 struct minimal_symbol *msym;
57 struct symbol *sym;
58
59 msym = bfun->msym;
60 sym = bfun->sym;
61
62 if (sym != NULL)
63 return SYMBOL_PRINT_NAME (sym);
64
65 if (msym != NULL)
efd66ac6 66 return MSYMBOL_PRINT_NAME (msym);
02d27625
MM
67
68 return "<unknown>";
69}
70
71/* Return the file name of a recorded function segment for printing.
72 This function never returns NULL. */
73
74static const char *
23a7fe75 75ftrace_print_filename (const struct btrace_function *bfun)
02d27625
MM
76{
77 struct symbol *sym;
78 const char *filename;
79
80 sym = bfun->sym;
81
82 if (sym != NULL)
83 filename = symtab_to_filename_for_display (sym->symtab);
84 else
85 filename = "<unknown>";
86
87 return filename;
88}
89
23a7fe75
MM
90/* Return a string representation of the address of an instruction.
91 This function never returns NULL. */
02d27625 92
23a7fe75
MM
93static const char *
94ftrace_print_insn_addr (const struct btrace_insn *insn)
02d27625 95{
23a7fe75
MM
96 if (insn == NULL)
97 return "<nil>";
98
99 return core_addr_to_string_nz (insn->pc);
02d27625
MM
100}
101
23a7fe75 102/* Print an ftrace debug status message. */
02d27625
MM
103
104static void
23a7fe75 105ftrace_debug (const struct btrace_function *bfun, const char *prefix)
02d27625 106{
23a7fe75
MM
107 const char *fun, *file;
108 unsigned int ibegin, iend;
109 int lbegin, lend, level;
110
111 fun = ftrace_print_function_name (bfun);
112 file = ftrace_print_filename (bfun);
113 level = bfun->level;
114
115 lbegin = bfun->lbegin;
116 lend = bfun->lend;
117
118 ibegin = bfun->insn_offset;
119 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
120
121 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, lines = [%d; %d], "
122 "insn = [%u; %u)", prefix, fun, file, level, lbegin, lend,
123 ibegin, iend);
02d27625
MM
124}
125
23a7fe75
MM
126/* Return non-zero if BFUN does not match MFUN and FUN,
127 return zero otherwise. */
02d27625
MM
128
129static int
23a7fe75
MM
130ftrace_function_switched (const struct btrace_function *bfun,
131 const struct minimal_symbol *mfun,
132 const struct symbol *fun)
02d27625
MM
133{
134 struct minimal_symbol *msym;
135 struct symbol *sym;
136
02d27625
MM
137 msym = bfun->msym;
138 sym = bfun->sym;
139
140 /* If the minimal symbol changed, we certainly switched functions. */
141 if (mfun != NULL && msym != NULL
efd66ac6 142 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
02d27625
MM
143 return 1;
144
145 /* If the symbol changed, we certainly switched functions. */
146 if (fun != NULL && sym != NULL)
147 {
148 const char *bfname, *fname;
149
150 /* Check the function name. */
151 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
152 return 1;
153
154 /* Check the location of those functions, as well. */
155 bfname = symtab_to_fullname (sym->symtab);
156 fname = symtab_to_fullname (fun->symtab);
157 if (filename_cmp (fname, bfname) != 0)
158 return 1;
159 }
160
23a7fe75
MM
161 /* If we lost symbol information, we switched functions. */
162 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
163 return 1;
164
165 /* If we gained symbol information, we switched functions. */
166 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
167 return 1;
168
02d27625
MM
169 return 0;
170}
171
23a7fe75
MM
172/* Return non-zero if we should skip this file when generating the function
173 call history, zero otherwise.
174 We would want to do that if, say, a macro that is defined in another file
175 is expanded in this function. */
02d27625
MM
176
177static int
23a7fe75 178ftrace_skip_file (const struct btrace_function *bfun, const char *fullname)
02d27625
MM
179{
180 struct symbol *sym;
181 const char *bfile;
182
183 sym = bfun->sym;
23a7fe75
MM
184 if (sym == NULL)
185 return 1;
02d27625 186
23a7fe75
MM
187 bfile = symtab_to_fullname (sym->symtab);
188
189 return (filename_cmp (bfile, fullname) != 0);
190}
191
192/* Allocate and initialize a new branch trace function segment.
193 PREV is the chronologically preceding function segment.
194 MFUN and FUN are the symbol information we have for this function. */
195
196static struct btrace_function *
197ftrace_new_function (struct btrace_function *prev,
198 struct minimal_symbol *mfun,
199 struct symbol *fun)
200{
201 struct btrace_function *bfun;
202
203 bfun = xzalloc (sizeof (*bfun));
204
205 bfun->msym = mfun;
206 bfun->sym = fun;
207 bfun->flow.prev = prev;
208
209 /* We start with the identities of min and max, respectively. */
210 bfun->lbegin = INT_MAX;
211 bfun->lend = INT_MIN;
02d27625 212
5de9129b
MM
213 if (prev == NULL)
214 {
215 /* Start counting at one. */
216 bfun->number = 1;
217 bfun->insn_offset = 1;
218 }
219 else
23a7fe75
MM
220 {
221 gdb_assert (prev->flow.next == NULL);
222 prev->flow.next = bfun;
02d27625 223
23a7fe75
MM
224 bfun->number = prev->number + 1;
225 bfun->insn_offset = (prev->insn_offset
226 + VEC_length (btrace_insn_s, prev->insn));
227 }
228
229 return bfun;
02d27625
MM
230}
231
23a7fe75 232/* Update the UP field of a function segment. */
02d27625 233
23a7fe75
MM
234static void
235ftrace_update_caller (struct btrace_function *bfun,
236 struct btrace_function *caller,
237 enum btrace_function_flag flags)
02d27625 238{
23a7fe75
MM
239 if (bfun->up != NULL)
240 ftrace_debug (bfun, "updating caller");
02d27625 241
23a7fe75
MM
242 bfun->up = caller;
243 bfun->flags = flags;
244
245 ftrace_debug (bfun, "set caller");
246}
247
248/* Fix up the caller for all segments of a function. */
249
250static void
251ftrace_fixup_caller (struct btrace_function *bfun,
252 struct btrace_function *caller,
253 enum btrace_function_flag flags)
254{
255 struct btrace_function *prev, *next;
256
257 ftrace_update_caller (bfun, caller, flags);
258
259 /* Update all function segments belonging to the same function. */
260 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
261 ftrace_update_caller (prev, caller, flags);
262
263 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
264 ftrace_update_caller (next, caller, flags);
265}
266
267/* Add a new function segment for a call.
268 CALLER is the chronologically preceding function segment.
269 MFUN and FUN are the symbol information we have for this function. */
270
271static struct btrace_function *
272ftrace_new_call (struct btrace_function *caller,
273 struct minimal_symbol *mfun,
274 struct symbol *fun)
275{
276 struct btrace_function *bfun;
277
278 bfun = ftrace_new_function (caller, mfun, fun);
279 bfun->up = caller;
280 bfun->level = caller->level + 1;
281
282 ftrace_debug (bfun, "new call");
283
284 return bfun;
285}
286
287/* Add a new function segment for a tail call.
288 CALLER is the chronologically preceding function segment.
289 MFUN and FUN are the symbol information we have for this function. */
290
291static struct btrace_function *
292ftrace_new_tailcall (struct btrace_function *caller,
293 struct minimal_symbol *mfun,
294 struct symbol *fun)
295{
296 struct btrace_function *bfun;
02d27625 297
23a7fe75
MM
298 bfun = ftrace_new_function (caller, mfun, fun);
299 bfun->up = caller;
300 bfun->level = caller->level + 1;
301 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
02d27625 302
23a7fe75
MM
303 ftrace_debug (bfun, "new tail call");
304
305 return bfun;
306}
307
308/* Find the innermost caller in the back trace of BFUN with MFUN/FUN
309 symbol information. */
310
311static struct btrace_function *
312ftrace_find_caller (struct btrace_function *bfun,
313 struct minimal_symbol *mfun,
314 struct symbol *fun)
315{
316 for (; bfun != NULL; bfun = bfun->up)
317 {
318 /* Skip functions with incompatible symbol information. */
319 if (ftrace_function_switched (bfun, mfun, fun))
320 continue;
321
322 /* This is the function segment we're looking for. */
323 break;
324 }
325
326 return bfun;
327}
328
329/* Find the innermost caller in the back trace of BFUN, skipping all
330 function segments that do not end with a call instruction (e.g.
331 tail calls ending with a jump). */
332
333static struct btrace_function *
334ftrace_find_call (struct gdbarch *gdbarch, struct btrace_function *bfun)
335{
336 for (; bfun != NULL; bfun = bfun->up)
02d27625 337 {
23a7fe75 338 struct btrace_insn *last;
02d27625
MM
339 CORE_ADDR pc;
340
23a7fe75
MM
341 /* We do not allow empty function segments. */
342 gdb_assert (!VEC_empty (btrace_insn_s, bfun->insn));
343
344 last = VEC_last (btrace_insn_s, bfun->insn);
345 pc = last->pc;
02d27625 346
23a7fe75
MM
347 if (gdbarch_insn_is_call (gdbarch, pc))
348 break;
349 }
350
351 return bfun;
352}
353
354/* Add a continuation segment for a function into which we return.
355 PREV is the chronologically preceding function segment.
356 MFUN and FUN are the symbol information we have for this function. */
357
358static struct btrace_function *
359ftrace_new_return (struct gdbarch *gdbarch,
360 struct btrace_function *prev,
361 struct minimal_symbol *mfun,
362 struct symbol *fun)
363{
364 struct btrace_function *bfun, *caller;
365
366 bfun = ftrace_new_function (prev, mfun, fun);
367
368 /* It is important to start at PREV's caller. Otherwise, we might find
369 PREV itself, if PREV is a recursive function. */
370 caller = ftrace_find_caller (prev->up, mfun, fun);
371 if (caller != NULL)
372 {
373 /* The caller of PREV is the preceding btrace function segment in this
374 function instance. */
375 gdb_assert (caller->segment.next == NULL);
376
377 caller->segment.next = bfun;
378 bfun->segment.prev = caller;
379
380 /* Maintain the function level. */
381 bfun->level = caller->level;
382
383 /* Maintain the call stack. */
384 bfun->up = caller->up;
385 bfun->flags = caller->flags;
386
387 ftrace_debug (bfun, "new return");
388 }
389 else
390 {
391 /* We did not find a caller. This could mean that something went
392 wrong or that the call is simply not included in the trace. */
02d27625 393
23a7fe75
MM
394 /* Let's search for some actual call. */
395 caller = ftrace_find_call (gdbarch, prev->up);
396 if (caller == NULL)
02d27625 397 {
23a7fe75
MM
398 /* There is no call in PREV's back trace. We assume that the
399 branch trace did not include it. */
400
401 /* Let's find the topmost call function - this skips tail calls. */
402 while (prev->up != NULL)
403 prev = prev->up;
02d27625 404
23a7fe75
MM
405 /* We maintain levels for a series of returns for which we have
406 not seen the calls.
407 We start at the preceding function's level in case this has
408 already been a return for which we have not seen the call.
409 We start at level 0 otherwise, to handle tail calls correctly. */
410 bfun->level = min (0, prev->level) - 1;
411
412 /* Fix up the call stack for PREV. */
413 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
414
415 ftrace_debug (bfun, "new return - no caller");
416 }
417 else
02d27625 418 {
23a7fe75
MM
419 /* There is a call in PREV's back trace to which we should have
420 returned. Let's remain at this level. */
421 bfun->level = prev->level;
02d27625 422
23a7fe75 423 ftrace_debug (bfun, "new return - unknown caller");
02d27625 424 }
23a7fe75
MM
425 }
426
427 return bfun;
428}
429
430/* Add a new function segment for a function switch.
431 PREV is the chronologically preceding function segment.
432 MFUN and FUN are the symbol information we have for this function. */
433
434static struct btrace_function *
435ftrace_new_switch (struct btrace_function *prev,
436 struct minimal_symbol *mfun,
437 struct symbol *fun)
438{
439 struct btrace_function *bfun;
440
441 /* This is an unexplained function switch. The call stack will likely
442 be wrong at this point. */
443 bfun = ftrace_new_function (prev, mfun, fun);
02d27625 444
23a7fe75
MM
445 /* We keep the function level. */
446 bfun->level = prev->level;
02d27625 447
23a7fe75
MM
448 ftrace_debug (bfun, "new switch");
449
450 return bfun;
451}
452
453/* Update BFUN with respect to the instruction at PC. This may create new
454 function segments.
455 Return the chronologically latest function segment, never NULL. */
456
457static struct btrace_function *
458ftrace_update_function (struct gdbarch *gdbarch,
459 struct btrace_function *bfun, CORE_ADDR pc)
460{
461 struct bound_minimal_symbol bmfun;
462 struct minimal_symbol *mfun;
463 struct symbol *fun;
464 struct btrace_insn *last;
465
466 /* Try to determine the function we're in. We use both types of symbols
467 to avoid surprises when we sometimes get a full symbol and sometimes
468 only a minimal symbol. */
469 fun = find_pc_function (pc);
470 bmfun = lookup_minimal_symbol_by_pc (pc);
471 mfun = bmfun.minsym;
472
473 if (fun == NULL && mfun == NULL)
474 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
475
476 /* If we didn't have a function before, we create one. */
477 if (bfun == NULL)
478 return ftrace_new_function (bfun, mfun, fun);
479
480 /* Check the last instruction, if we have one.
481 We do this check first, since it allows us to fill in the call stack
482 links in addition to the normal flow links. */
483 last = NULL;
484 if (!VEC_empty (btrace_insn_s, bfun->insn))
485 last = VEC_last (btrace_insn_s, bfun->insn);
486
487 if (last != NULL)
488 {
489 CORE_ADDR lpc;
490
491 lpc = last->pc;
492
493 /* Check for returns. */
494 if (gdbarch_insn_is_ret (gdbarch, lpc))
495 return ftrace_new_return (gdbarch, bfun, mfun, fun);
496
497 /* Check for calls. */
498 if (gdbarch_insn_is_call (gdbarch, lpc))
02d27625 499 {
23a7fe75
MM
500 int size;
501
502 size = gdb_insn_length (gdbarch, lpc);
503
504 /* Ignore calls to the next instruction. They are used for PIC. */
505 if (lpc + size != pc)
506 return ftrace_new_call (bfun, mfun, fun);
02d27625 507 }
23a7fe75
MM
508 }
509
510 /* Check if we're switching functions for some other reason. */
511 if (ftrace_function_switched (bfun, mfun, fun))
512 {
513 DEBUG_FTRACE ("switching from %s in %s at %s",
514 ftrace_print_insn_addr (last),
515 ftrace_print_function_name (bfun),
516 ftrace_print_filename (bfun));
02d27625 517
23a7fe75 518 if (last != NULL)
02d27625 519 {
23a7fe75
MM
520 CORE_ADDR start, lpc;
521
522 start = get_pc_function_start (pc);
523
524 /* If we can't determine the function for PC, we treat a jump at
525 the end of the block as tail call. */
526 if (start == 0)
527 start = pc;
528
529 lpc = last->pc;
530
531 /* Jumps indicate optimized tail calls. */
532 if (start == pc && gdbarch_insn_is_jump (gdbarch, lpc))
533 return ftrace_new_tailcall (bfun, mfun, fun);
02d27625
MM
534 }
535
23a7fe75
MM
536 return ftrace_new_switch (bfun, mfun, fun);
537 }
538
539 return bfun;
540}
541
542/* Update BFUN's source range with respect to the instruction at PC. */
543
544static void
545ftrace_update_lines (struct btrace_function *bfun, CORE_ADDR pc)
546{
547 struct symtab_and_line sal;
548 const char *fullname;
549
550 sal = find_pc_line (pc, 0);
551 if (sal.symtab == NULL || sal.line == 0)
552 {
553 DEBUG_FTRACE ("no lines at %s", core_addr_to_string_nz (pc));
554 return;
555 }
556
557 /* Check if we switched files. This could happen if, say, a macro that
558 is defined in another file is expanded here. */
559 fullname = symtab_to_fullname (sal.symtab);
560 if (ftrace_skip_file (bfun, fullname))
561 {
562 DEBUG_FTRACE ("ignoring file at %s, file=%s",
563 core_addr_to_string_nz (pc), fullname);
564 return;
565 }
566
567 /* Update the line range. */
568 bfun->lbegin = min (bfun->lbegin, sal.line);
569 bfun->lend = max (bfun->lend, sal.line);
570
571 if (record_debug > 1)
572 ftrace_debug (bfun, "update lines");
573}
574
575/* Add the instruction at PC to BFUN's instructions. */
576
577static void
578ftrace_update_insns (struct btrace_function *bfun, CORE_ADDR pc)
579{
580 struct btrace_insn *insn;
581
582 insn = VEC_safe_push (btrace_insn_s, bfun->insn, NULL);
583 insn->pc = pc;
584
585 if (record_debug > 1)
586 ftrace_debug (bfun, "update insn");
587}
588
589/* Compute the function branch trace from a block branch trace BTRACE for
590 a thread given by BTINFO. */
591
592static void
593btrace_compute_ftrace (struct btrace_thread_info *btinfo,
594 VEC (btrace_block_s) *btrace)
595{
596 struct btrace_function *begin, *end;
597 struct gdbarch *gdbarch;
598 unsigned int blk;
599 int level;
600
601 DEBUG ("compute ftrace");
602
603 gdbarch = target_gdbarch ();
969c39fb
MM
604 begin = btinfo->begin;
605 end = btinfo->end;
606 level = begin != NULL ? -btinfo->level : INT_MAX;
23a7fe75
MM
607 blk = VEC_length (btrace_block_s, btrace);
608
609 while (blk != 0)
610 {
611 btrace_block_s *block;
612 CORE_ADDR pc;
613
614 blk -= 1;
615
616 block = VEC_index (btrace_block_s, btrace, blk);
617 pc = block->begin;
618
619 for (;;)
620 {
621 int size;
622
623 /* We should hit the end of the block. Warn if we went too far. */
624 if (block->end < pc)
625 {
626 warning (_("Recorded trace may be corrupted around %s."),
627 core_addr_to_string_nz (pc));
628 break;
629 }
630
631 end = ftrace_update_function (gdbarch, end, pc);
632 if (begin == NULL)
633 begin = end;
634
8710b709
MM
635 /* Maintain the function level offset.
636 For all but the last block, we do it here. */
637 if (blk != 0)
638 level = min (level, end->level);
23a7fe75
MM
639
640 ftrace_update_insns (end, pc);
641 ftrace_update_lines (end, pc);
642
643 /* We're done once we pushed the instruction at the end. */
644 if (block->end == pc)
645 break;
646
647 size = gdb_insn_length (gdbarch, pc);
648
649 /* Make sure we terminate if we fail to compute the size. */
650 if (size <= 0)
651 {
652 warning (_("Recorded trace may be incomplete around %s."),
653 core_addr_to_string_nz (pc));
654 break;
655 }
656
657 pc += size;
8710b709
MM
658
659 /* Maintain the function level offset.
660 For the last block, we do it here to not consider the last
661 instruction.
662 Since the last instruction corresponds to the current instruction
663 and is not really part of the execution history, it shouldn't
664 affect the level. */
665 if (blk == 0)
666 level = min (level, end->level);
23a7fe75 667 }
02d27625
MM
668 }
669
23a7fe75
MM
670 btinfo->begin = begin;
671 btinfo->end = end;
672
673 /* LEVEL is the minimal function level of all btrace function segments.
674 Define the global level offset to -LEVEL so all function levels are
675 normalized to start at zero. */
676 btinfo->level = -level;
02d27625
MM
677}
678
6e07b1d2
MM
679/* Add an entry for the current PC. */
680
681static void
682btrace_add_pc (struct thread_info *tp)
683{
684 VEC (btrace_block_s) *btrace;
685 struct btrace_block *block;
686 struct regcache *regcache;
687 struct cleanup *cleanup;
688 CORE_ADDR pc;
689
690 regcache = get_thread_regcache (tp->ptid);
691 pc = regcache_read_pc (regcache);
692
693 btrace = NULL;
694 cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace);
695
696 block = VEC_safe_push (btrace_block_s, btrace, NULL);
697 block->begin = pc;
698 block->end = pc;
699
700 btrace_compute_ftrace (&tp->btrace, btrace);
701
702 do_cleanups (cleanup);
703}
704
02d27625
MM
705/* See btrace.h. */
706
707void
708btrace_enable (struct thread_info *tp)
709{
710 if (tp->btrace.target != NULL)
711 return;
712
713 if (!target_supports_btrace ())
714 error (_("Target does not support branch tracing."));
715
716 DEBUG ("enable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
717
718 tp->btrace.target = target_enable_btrace (tp->ptid);
6e07b1d2
MM
719
720 /* Add an entry for the current PC so we start tracing from where we
721 enabled it. */
722 if (tp->btrace.target != NULL)
723 btrace_add_pc (tp);
02d27625
MM
724}
725
726/* See btrace.h. */
727
728void
729btrace_disable (struct thread_info *tp)
730{
731 struct btrace_thread_info *btp = &tp->btrace;
732 int errcode = 0;
733
734 if (btp->target == NULL)
735 return;
736
737 DEBUG ("disable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
738
739 target_disable_btrace (btp->target);
740 btp->target = NULL;
741
742 btrace_clear (tp);
743}
744
745/* See btrace.h. */
746
747void
748btrace_teardown (struct thread_info *tp)
749{
750 struct btrace_thread_info *btp = &tp->btrace;
751 int errcode = 0;
752
753 if (btp->target == NULL)
754 return;
755
756 DEBUG ("teardown thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
757
758 target_teardown_btrace (btp->target);
759 btp->target = NULL;
760
761 btrace_clear (tp);
762}
763
969c39fb
MM
764/* Adjust the block trace in order to stitch old and new trace together.
765 BTRACE is the new delta trace between the last and the current stop.
766 BTINFO is the old branch trace until the last stop.
767 May modify BTRACE as well as the existing trace in BTINFO.
768 Return 0 on success, -1 otherwise. */
769
770static int
771btrace_stitch_trace (VEC (btrace_block_s) **btrace,
772 const struct btrace_thread_info *btinfo)
773{
774 struct btrace_function *last_bfun;
775 struct btrace_insn *last_insn;
776 btrace_block_s *first_new_block;
777
778 /* If we don't have trace, there's nothing to do. */
779 if (VEC_empty (btrace_block_s, *btrace))
780 return 0;
781
782 last_bfun = btinfo->end;
783 gdb_assert (last_bfun != NULL);
784
785 /* Beware that block trace starts with the most recent block, so the
786 chronologically first block in the new trace is the last block in
787 the new trace's block vector. */
788 first_new_block = VEC_last (btrace_block_s, *btrace);
789 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
790
791 /* If the current PC at the end of the block is the same as in our current
792 trace, there are two explanations:
793 1. we executed the instruction and some branch brought us back.
794 2. we have not made any progress.
795 In the first case, the delta trace vector should contain at least two
796 entries.
797 In the second case, the delta trace vector should contain exactly one
798 entry for the partial block containing the current PC. Remove it. */
799 if (first_new_block->end == last_insn->pc
800 && VEC_length (btrace_block_s, *btrace) == 1)
801 {
802 VEC_pop (btrace_block_s, *btrace);
803 return 0;
804 }
805
806 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
807 core_addr_to_string_nz (first_new_block->end));
808
809 /* Do a simple sanity check to make sure we don't accidentally end up
810 with a bad block. This should not occur in practice. */
811 if (first_new_block->end < last_insn->pc)
812 {
813 warning (_("Error while trying to read delta trace. Falling back to "
814 "a full read."));
815 return -1;
816 }
817
818 /* We adjust the last block to start at the end of our current trace. */
819 gdb_assert (first_new_block->begin == 0);
820 first_new_block->begin = last_insn->pc;
821
822 /* We simply pop the last insn so we can insert it again as part of
823 the normal branch trace computation.
824 Since instruction iterators are based on indices in the instructions
825 vector, we don't leave any pointers dangling. */
826 DEBUG ("pruning insn at %s for stitching",
827 ftrace_print_insn_addr (last_insn));
828
829 VEC_pop (btrace_insn_s, last_bfun->insn);
830
831 /* The instructions vector may become empty temporarily if this has
832 been the only instruction in this function segment.
833 This violates the invariant but will be remedied shortly by
834 btrace_compute_ftrace when we add the new trace. */
835 return 0;
836}
837
838/* Clear the branch trace histories in BTINFO. */
839
840static void
841btrace_clear_history (struct btrace_thread_info *btinfo)
842{
843 xfree (btinfo->insn_history);
844 xfree (btinfo->call_history);
845 xfree (btinfo->replay);
846
847 btinfo->insn_history = NULL;
848 btinfo->call_history = NULL;
849 btinfo->replay = NULL;
850}
851
02d27625
MM
852/* See btrace.h. */
853
854void
855btrace_fetch (struct thread_info *tp)
856{
857 struct btrace_thread_info *btinfo;
969c39fb 858 struct btrace_target_info *tinfo;
02d27625 859 VEC (btrace_block_s) *btrace;
23a7fe75 860 struct cleanup *cleanup;
969c39fb 861 int errcode;
02d27625
MM
862
863 DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
864
969c39fb 865 btrace = NULL;
02d27625 866 btinfo = &tp->btrace;
969c39fb
MM
867 tinfo = btinfo->target;
868 if (tinfo == NULL)
869 return;
870
871 /* There's no way we could get new trace while replaying.
872 On the other hand, delta trace would return a partial record with the
873 current PC, which is the replay PC, not the last PC, as expected. */
874 if (btinfo->replay != NULL)
02d27625
MM
875 return;
876
23a7fe75 877 cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace);
02d27625 878
969c39fb
MM
879 /* Let's first try to extend the trace we already have. */
880 if (btinfo->end != NULL)
881 {
882 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
883 if (errcode == 0)
884 {
885 /* Success. Let's try to stitch the traces together. */
886 errcode = btrace_stitch_trace (&btrace, btinfo);
887 }
888 else
889 {
890 /* We failed to read delta trace. Let's try to read new trace. */
891 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
892
893 /* If we got any new trace, discard what we have. */
894 if (errcode == 0 && !VEC_empty (btrace_block_s, btrace))
895 btrace_clear (tp);
896 }
897
898 /* If we were not able to read the trace, we start over. */
899 if (errcode != 0)
900 {
901 btrace_clear (tp);
902 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
903 }
904 }
905 else
906 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
907
908 /* If we were not able to read the branch trace, signal an error. */
909 if (errcode != 0)
910 error (_("Failed to read branch trace."));
911
912 /* Compute the trace, provided we have any. */
23a7fe75
MM
913 if (!VEC_empty (btrace_block_s, btrace))
914 {
969c39fb 915 btrace_clear_history (btinfo);
23a7fe75
MM
916 btrace_compute_ftrace (btinfo, btrace);
917 }
02d27625 918
23a7fe75 919 do_cleanups (cleanup);
02d27625
MM
920}
921
922/* See btrace.h. */
923
924void
925btrace_clear (struct thread_info *tp)
926{
927 struct btrace_thread_info *btinfo;
23a7fe75 928 struct btrace_function *it, *trash;
02d27625
MM
929
930 DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
931
0b722aec
MM
932 /* Make sure btrace frames that may hold a pointer into the branch
933 trace data are destroyed. */
934 reinit_frame_cache ();
935
02d27625
MM
936 btinfo = &tp->btrace;
937
23a7fe75
MM
938 it = btinfo->begin;
939 while (it != NULL)
940 {
941 trash = it;
942 it = it->flow.next;
02d27625 943
23a7fe75
MM
944 xfree (trash);
945 }
946
947 btinfo->begin = NULL;
948 btinfo->end = NULL;
949
969c39fb 950 btrace_clear_history (btinfo);
02d27625
MM
951}
952
953/* See btrace.h. */
954
955void
956btrace_free_objfile (struct objfile *objfile)
957{
958 struct thread_info *tp;
959
960 DEBUG ("free objfile");
961
034f788c 962 ALL_NON_EXITED_THREADS (tp)
02d27625
MM
963 btrace_clear (tp);
964}
c12a2917
MM
965
966#if defined (HAVE_LIBEXPAT)
967
968/* Check the btrace document version. */
969
970static void
971check_xml_btrace_version (struct gdb_xml_parser *parser,
972 const struct gdb_xml_element *element,
973 void *user_data, VEC (gdb_xml_value_s) *attributes)
974{
975 const char *version = xml_find_attribute (attributes, "version")->value;
976
977 if (strcmp (version, "1.0") != 0)
978 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
979}
980
981/* Parse a btrace "block" xml record. */
982
983static void
984parse_xml_btrace_block (struct gdb_xml_parser *parser,
985 const struct gdb_xml_element *element,
986 void *user_data, VEC (gdb_xml_value_s) *attributes)
987{
988 VEC (btrace_block_s) **btrace;
989 struct btrace_block *block;
990 ULONGEST *begin, *end;
991
992 btrace = user_data;
993 block = VEC_safe_push (btrace_block_s, *btrace, NULL);
994
995 begin = xml_find_attribute (attributes, "begin")->value;
996 end = xml_find_attribute (attributes, "end")->value;
997
998 block->begin = *begin;
999 block->end = *end;
1000}
1001
1002static const struct gdb_xml_attribute block_attributes[] = {
1003 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1004 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1005 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1006};
1007
1008static const struct gdb_xml_attribute btrace_attributes[] = {
1009 { "version", GDB_XML_AF_NONE, NULL, NULL },
1010 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1011};
1012
1013static const struct gdb_xml_element btrace_children[] = {
1014 { "block", block_attributes, NULL,
1015 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
1016 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1017};
1018
1019static const struct gdb_xml_element btrace_elements[] = {
1020 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
1021 check_xml_btrace_version, NULL },
1022 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1023};
1024
1025#endif /* defined (HAVE_LIBEXPAT) */
1026
1027/* See btrace.h. */
1028
1029VEC (btrace_block_s) *
1030parse_xml_btrace (const char *buffer)
1031{
1032 VEC (btrace_block_s) *btrace = NULL;
1033 struct cleanup *cleanup;
1034 int errcode;
1035
1036#if defined (HAVE_LIBEXPAT)
1037
1038 cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace);
1039 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
1040 buffer, &btrace);
1041 if (errcode != 0)
969c39fb 1042 error (_("Error parsing branch trace."));
c12a2917
MM
1043
1044 /* Keep parse results. */
1045 discard_cleanups (cleanup);
1046
1047#else /* !defined (HAVE_LIBEXPAT) */
1048
1049 error (_("Cannot process branch trace. XML parsing is not supported."));
1050
1051#endif /* !defined (HAVE_LIBEXPAT) */
1052
1053 return btrace;
1054}
23a7fe75
MM
1055
1056/* See btrace.h. */
1057
1058const struct btrace_insn *
1059btrace_insn_get (const struct btrace_insn_iterator *it)
1060{
1061 const struct btrace_function *bfun;
1062 unsigned int index, end;
1063
1064 index = it->index;
1065 bfun = it->function;
1066
1067 /* The index is within the bounds of this function's instruction vector. */
1068 end = VEC_length (btrace_insn_s, bfun->insn);
1069 gdb_assert (0 < end);
1070 gdb_assert (index < end);
1071
1072 return VEC_index (btrace_insn_s, bfun->insn, index);
1073}
1074
1075/* See btrace.h. */
1076
1077unsigned int
1078btrace_insn_number (const struct btrace_insn_iterator *it)
1079{
1080 const struct btrace_function *bfun;
1081
1082 bfun = it->function;
1083 return bfun->insn_offset + it->index;
1084}
1085
1086/* See btrace.h. */
1087
1088void
1089btrace_insn_begin (struct btrace_insn_iterator *it,
1090 const struct btrace_thread_info *btinfo)
1091{
1092 const struct btrace_function *bfun;
1093
1094 bfun = btinfo->begin;
1095 if (bfun == NULL)
1096 error (_("No trace."));
1097
1098 it->function = bfun;
1099 it->index = 0;
1100}
1101
1102/* See btrace.h. */
1103
1104void
1105btrace_insn_end (struct btrace_insn_iterator *it,
1106 const struct btrace_thread_info *btinfo)
1107{
1108 const struct btrace_function *bfun;
1109 unsigned int length;
1110
1111 bfun = btinfo->end;
1112 if (bfun == NULL)
1113 error (_("No trace."));
1114
1115 /* The last instruction in the last function is the current instruction.
1116 We point to it - it is one past the end of the execution trace. */
1117 length = VEC_length (btrace_insn_s, bfun->insn);
1118
1119 it->function = bfun;
1120 it->index = length - 1;
1121}
1122
1123/* See btrace.h. */
1124
1125unsigned int
1126btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
1127{
1128 const struct btrace_function *bfun;
1129 unsigned int index, steps;
1130
1131 bfun = it->function;
1132 steps = 0;
1133 index = it->index;
1134
1135 while (stride != 0)
1136 {
1137 unsigned int end, space, adv;
1138
1139 end = VEC_length (btrace_insn_s, bfun->insn);
1140
1141 gdb_assert (0 < end);
1142 gdb_assert (index < end);
1143
1144 /* Compute the number of instructions remaining in this segment. */
1145 space = end - index;
1146
1147 /* Advance the iterator as far as possible within this segment. */
1148 adv = min (space, stride);
1149 stride -= adv;
1150 index += adv;
1151 steps += adv;
1152
1153 /* Move to the next function if we're at the end of this one. */
1154 if (index == end)
1155 {
1156 const struct btrace_function *next;
1157
1158 next = bfun->flow.next;
1159 if (next == NULL)
1160 {
1161 /* We stepped past the last function.
1162
1163 Let's adjust the index to point to the last instruction in
1164 the previous function. */
1165 index -= 1;
1166 steps -= 1;
1167 break;
1168 }
1169
1170 /* We now point to the first instruction in the new function. */
1171 bfun = next;
1172 index = 0;
1173 }
1174
1175 /* We did make progress. */
1176 gdb_assert (adv > 0);
1177 }
1178
1179 /* Update the iterator. */
1180 it->function = bfun;
1181 it->index = index;
1182
1183 return steps;
1184}
1185
1186/* See btrace.h. */
1187
1188unsigned int
1189btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1190{
1191 const struct btrace_function *bfun;
1192 unsigned int index, steps;
1193
1194 bfun = it->function;
1195 steps = 0;
1196 index = it->index;
1197
1198 while (stride != 0)
1199 {
1200 unsigned int adv;
1201
1202 /* Move to the previous function if we're at the start of this one. */
1203 if (index == 0)
1204 {
1205 const struct btrace_function *prev;
1206
1207 prev = bfun->flow.prev;
1208 if (prev == NULL)
1209 break;
1210
1211 /* We point to one after the last instruction in the new function. */
1212 bfun = prev;
1213 index = VEC_length (btrace_insn_s, bfun->insn);
1214
1215 /* There is at least one instruction in this function segment. */
1216 gdb_assert (index > 0);
1217 }
1218
1219 /* Advance the iterator as far as possible within this segment. */
1220 adv = min (index, stride);
1221 stride -= adv;
1222 index -= adv;
1223 steps += adv;
1224
1225 /* We did make progress. */
1226 gdb_assert (adv > 0);
1227 }
1228
1229 /* Update the iterator. */
1230 it->function = bfun;
1231 it->index = index;
1232
1233 return steps;
1234}
1235
1236/* See btrace.h. */
1237
1238int
1239btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
1240 const struct btrace_insn_iterator *rhs)
1241{
1242 unsigned int lnum, rnum;
1243
1244 lnum = btrace_insn_number (lhs);
1245 rnum = btrace_insn_number (rhs);
1246
1247 return (int) (lnum - rnum);
1248}
1249
1250/* See btrace.h. */
1251
1252int
1253btrace_find_insn_by_number (struct btrace_insn_iterator *it,
1254 const struct btrace_thread_info *btinfo,
1255 unsigned int number)
1256{
1257 const struct btrace_function *bfun;
1258 unsigned int end;
1259
1260 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1261 if (bfun->insn_offset <= number)
1262 break;
1263
1264 if (bfun == NULL)
1265 return 0;
1266
1267 end = bfun->insn_offset + VEC_length (btrace_insn_s, bfun->insn);
1268 if (end <= number)
1269 return 0;
1270
1271 it->function = bfun;
1272 it->index = number - bfun->insn_offset;
1273
1274 return 1;
1275}
1276
1277/* See btrace.h. */
1278
1279const struct btrace_function *
1280btrace_call_get (const struct btrace_call_iterator *it)
1281{
1282 return it->function;
1283}
1284
1285/* See btrace.h. */
1286
1287unsigned int
1288btrace_call_number (const struct btrace_call_iterator *it)
1289{
1290 const struct btrace_thread_info *btinfo;
1291 const struct btrace_function *bfun;
1292 unsigned int insns;
1293
1294 btinfo = it->btinfo;
1295 bfun = it->function;
1296 if (bfun != NULL)
1297 return bfun->number;
1298
1299 /* For the end iterator, i.e. bfun == NULL, we return one more than the
1300 number of the last function. */
1301 bfun = btinfo->end;
1302 insns = VEC_length (btrace_insn_s, bfun->insn);
1303
1304 /* If the function contains only a single instruction (i.e. the current
1305 instruction), it will be skipped and its number is already the number
1306 we seek. */
1307 if (insns == 1)
1308 return bfun->number;
1309
1310 /* Otherwise, return one more than the number of the last function. */
1311 return bfun->number + 1;
1312}
1313
1314/* See btrace.h. */
1315
1316void
1317btrace_call_begin (struct btrace_call_iterator *it,
1318 const struct btrace_thread_info *btinfo)
1319{
1320 const struct btrace_function *bfun;
1321
1322 bfun = btinfo->begin;
1323 if (bfun == NULL)
1324 error (_("No trace."));
1325
1326 it->btinfo = btinfo;
1327 it->function = bfun;
1328}
1329
1330/* See btrace.h. */
1331
1332void
1333btrace_call_end (struct btrace_call_iterator *it,
1334 const struct btrace_thread_info *btinfo)
1335{
1336 const struct btrace_function *bfun;
1337
1338 bfun = btinfo->end;
1339 if (bfun == NULL)
1340 error (_("No trace."));
1341
1342 it->btinfo = btinfo;
1343 it->function = NULL;
1344}
1345
1346/* See btrace.h. */
1347
1348unsigned int
1349btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
1350{
1351 const struct btrace_function *bfun;
1352 unsigned int steps;
1353
1354 bfun = it->function;
1355 steps = 0;
1356 while (bfun != NULL)
1357 {
1358 const struct btrace_function *next;
1359 unsigned int insns;
1360
1361 next = bfun->flow.next;
1362 if (next == NULL)
1363 {
1364 /* Ignore the last function if it only contains a single
1365 (i.e. the current) instruction. */
1366 insns = VEC_length (btrace_insn_s, bfun->insn);
1367 if (insns == 1)
1368 steps -= 1;
1369 }
1370
1371 if (stride == steps)
1372 break;
1373
1374 bfun = next;
1375 steps += 1;
1376 }
1377
1378 it->function = bfun;
1379 return steps;
1380}
1381
1382/* See btrace.h. */
1383
1384unsigned int
1385btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
1386{
1387 const struct btrace_thread_info *btinfo;
1388 const struct btrace_function *bfun;
1389 unsigned int steps;
1390
1391 bfun = it->function;
1392 steps = 0;
1393
1394 if (bfun == NULL)
1395 {
1396 unsigned int insns;
1397
1398 btinfo = it->btinfo;
1399 bfun = btinfo->end;
1400 if (bfun == NULL)
1401 return 0;
1402
1403 /* Ignore the last function if it only contains a single
1404 (i.e. the current) instruction. */
1405 insns = VEC_length (btrace_insn_s, bfun->insn);
1406 if (insns == 1)
1407 bfun = bfun->flow.prev;
1408
1409 if (bfun == NULL)
1410 return 0;
1411
1412 steps += 1;
1413 }
1414
1415 while (steps < stride)
1416 {
1417 const struct btrace_function *prev;
1418
1419 prev = bfun->flow.prev;
1420 if (prev == NULL)
1421 break;
1422
1423 bfun = prev;
1424 steps += 1;
1425 }
1426
1427 it->function = bfun;
1428 return steps;
1429}
1430
1431/* See btrace.h. */
1432
1433int
1434btrace_call_cmp (const struct btrace_call_iterator *lhs,
1435 const struct btrace_call_iterator *rhs)
1436{
1437 unsigned int lnum, rnum;
1438
1439 lnum = btrace_call_number (lhs);
1440 rnum = btrace_call_number (rhs);
1441
1442 return (int) (lnum - rnum);
1443}
1444
1445/* See btrace.h. */
1446
1447int
1448btrace_find_call_by_number (struct btrace_call_iterator *it,
1449 const struct btrace_thread_info *btinfo,
1450 unsigned int number)
1451{
1452 const struct btrace_function *bfun;
1453
1454 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1455 {
1456 unsigned int bnum;
1457
1458 bnum = bfun->number;
1459 if (number == bnum)
1460 {
1461 it->btinfo = btinfo;
1462 it->function = bfun;
1463 return 1;
1464 }
1465
1466 /* Functions are ordered and numbered consecutively. We could bail out
1467 earlier. On the other hand, it is very unlikely that we search for
1468 a nonexistent function. */
1469 }
1470
1471 return 0;
1472}
1473
1474/* See btrace.h. */
1475
1476void
1477btrace_set_insn_history (struct btrace_thread_info *btinfo,
1478 const struct btrace_insn_iterator *begin,
1479 const struct btrace_insn_iterator *end)
1480{
1481 if (btinfo->insn_history == NULL)
1482 btinfo->insn_history = xzalloc (sizeof (*btinfo->insn_history));
1483
1484 btinfo->insn_history->begin = *begin;
1485 btinfo->insn_history->end = *end;
1486}
1487
1488/* See btrace.h. */
1489
1490void
1491btrace_set_call_history (struct btrace_thread_info *btinfo,
1492 const struct btrace_call_iterator *begin,
1493 const struct btrace_call_iterator *end)
1494{
1495 gdb_assert (begin->btinfo == end->btinfo);
1496
1497 if (btinfo->call_history == NULL)
1498 btinfo->call_history = xzalloc (sizeof (*btinfo->call_history));
1499
1500 btinfo->call_history->begin = *begin;
1501 btinfo->call_history->end = *end;
1502}
07bbe694
MM
1503
1504/* See btrace.h. */
1505
1506int
1507btrace_is_replaying (struct thread_info *tp)
1508{
1509 return tp->btrace.replay != NULL;
1510}
6e07b1d2
MM
1511
1512/* See btrace.h. */
1513
1514int
1515btrace_is_empty (struct thread_info *tp)
1516{
1517 struct btrace_insn_iterator begin, end;
1518 struct btrace_thread_info *btinfo;
1519
1520 btinfo = &tp->btrace;
1521
1522 if (btinfo->begin == NULL)
1523 return 1;
1524
1525 btrace_insn_begin (&begin, btinfo);
1526 btrace_insn_end (&end, btinfo);
1527
1528 return btrace_insn_cmp (&begin, &end) == 0;
1529}
This page took 0.258814 seconds and 4 git commands to generate.