record-btrace: extend unwinder
[deliverable/binutils-gdb.git] / gdb / btrace.c
CommitLineData
02d27625
MM
1/* Branch trace support for GDB, the GNU debugger.
2
ecd75fc8 3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
02d27625
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "btrace.h"
23#include "gdbthread.h"
24#include "exceptions.h"
25#include "inferior.h"
26#include "target.h"
27#include "record.h"
28#include "symtab.h"
29#include "disasm.h"
30#include "source.h"
31#include "filenames.h"
c12a2917 32#include "xml-support.h"
02d27625
MM
33
34/* Print a record debug message. Use do ... while (0) to avoid ambiguities
35 when used in if statements. */
36
37#define DEBUG(msg, args...) \
38 do \
39 { \
40 if (record_debug != 0) \
41 fprintf_unfiltered (gdb_stdlog, \
42 "[btrace] " msg "\n", ##args); \
43 } \
44 while (0)
45
46#define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
47
02d27625
MM
48/* Return the function name of a recorded function segment for printing.
49 This function never returns NULL. */
50
51static const char *
23a7fe75 52ftrace_print_function_name (const struct btrace_function *bfun)
02d27625
MM
53{
54 struct minimal_symbol *msym;
55 struct symbol *sym;
56
57 msym = bfun->msym;
58 sym = bfun->sym;
59
60 if (sym != NULL)
61 return SYMBOL_PRINT_NAME (sym);
62
63 if (msym != NULL)
64 return SYMBOL_PRINT_NAME (msym);
65
66 return "<unknown>";
67}
68
69/* Return the file name of a recorded function segment for printing.
70 This function never returns NULL. */
71
72static const char *
23a7fe75 73ftrace_print_filename (const struct btrace_function *bfun)
02d27625
MM
74{
75 struct symbol *sym;
76 const char *filename;
77
78 sym = bfun->sym;
79
80 if (sym != NULL)
81 filename = symtab_to_filename_for_display (sym->symtab);
82 else
83 filename = "<unknown>";
84
85 return filename;
86}
87
23a7fe75
MM
88/* Return a string representation of the address of an instruction.
89 This function never returns NULL. */
02d27625 90
23a7fe75
MM
91static const char *
92ftrace_print_insn_addr (const struct btrace_insn *insn)
02d27625 93{
23a7fe75
MM
94 if (insn == NULL)
95 return "<nil>";
96
97 return core_addr_to_string_nz (insn->pc);
02d27625
MM
98}
99
23a7fe75 100/* Print an ftrace debug status message. */
02d27625
MM
101
102static void
23a7fe75 103ftrace_debug (const struct btrace_function *bfun, const char *prefix)
02d27625 104{
23a7fe75
MM
105 const char *fun, *file;
106 unsigned int ibegin, iend;
107 int lbegin, lend, level;
108
109 fun = ftrace_print_function_name (bfun);
110 file = ftrace_print_filename (bfun);
111 level = bfun->level;
112
113 lbegin = bfun->lbegin;
114 lend = bfun->lend;
115
116 ibegin = bfun->insn_offset;
117 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
118
119 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, lines = [%d; %d], "
120 "insn = [%u; %u)", prefix, fun, file, level, lbegin, lend,
121 ibegin, iend);
02d27625
MM
122}
123
23a7fe75
MM
124/* Return non-zero if BFUN does not match MFUN and FUN,
125 return zero otherwise. */
02d27625
MM
126
127static int
23a7fe75
MM
128ftrace_function_switched (const struct btrace_function *bfun,
129 const struct minimal_symbol *mfun,
130 const struct symbol *fun)
02d27625
MM
131{
132 struct minimal_symbol *msym;
133 struct symbol *sym;
134
02d27625
MM
135 msym = bfun->msym;
136 sym = bfun->sym;
137
138 /* If the minimal symbol changed, we certainly switched functions. */
139 if (mfun != NULL && msym != NULL
140 && strcmp (SYMBOL_LINKAGE_NAME (mfun), SYMBOL_LINKAGE_NAME (msym)) != 0)
141 return 1;
142
143 /* If the symbol changed, we certainly switched functions. */
144 if (fun != NULL && sym != NULL)
145 {
146 const char *bfname, *fname;
147
148 /* Check the function name. */
149 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
150 return 1;
151
152 /* Check the location of those functions, as well. */
153 bfname = symtab_to_fullname (sym->symtab);
154 fname = symtab_to_fullname (fun->symtab);
155 if (filename_cmp (fname, bfname) != 0)
156 return 1;
157 }
158
23a7fe75
MM
159 /* If we lost symbol information, we switched functions. */
160 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
161 return 1;
162
163 /* If we gained symbol information, we switched functions. */
164 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
165 return 1;
166
02d27625
MM
167 return 0;
168}
169
23a7fe75
MM
170/* Return non-zero if we should skip this file when generating the function
171 call history, zero otherwise.
172 We would want to do that if, say, a macro that is defined in another file
173 is expanded in this function. */
02d27625
MM
174
175static int
23a7fe75 176ftrace_skip_file (const struct btrace_function *bfun, const char *fullname)
02d27625
MM
177{
178 struct symbol *sym;
179 const char *bfile;
180
181 sym = bfun->sym;
23a7fe75
MM
182 if (sym == NULL)
183 return 1;
02d27625 184
23a7fe75
MM
185 bfile = symtab_to_fullname (sym->symtab);
186
187 return (filename_cmp (bfile, fullname) != 0);
188}
189
190/* Allocate and initialize a new branch trace function segment.
191 PREV is the chronologically preceding function segment.
192 MFUN and FUN are the symbol information we have for this function. */
193
194static struct btrace_function *
195ftrace_new_function (struct btrace_function *prev,
196 struct minimal_symbol *mfun,
197 struct symbol *fun)
198{
199 struct btrace_function *bfun;
200
201 bfun = xzalloc (sizeof (*bfun));
202
203 bfun->msym = mfun;
204 bfun->sym = fun;
205 bfun->flow.prev = prev;
206
207 /* We start with the identities of min and max, respectively. */
208 bfun->lbegin = INT_MAX;
209 bfun->lend = INT_MIN;
02d27625 210
5de9129b
MM
211 if (prev == NULL)
212 {
213 /* Start counting at one. */
214 bfun->number = 1;
215 bfun->insn_offset = 1;
216 }
217 else
23a7fe75
MM
218 {
219 gdb_assert (prev->flow.next == NULL);
220 prev->flow.next = bfun;
02d27625 221
23a7fe75
MM
222 bfun->number = prev->number + 1;
223 bfun->insn_offset = (prev->insn_offset
224 + VEC_length (btrace_insn_s, prev->insn));
225 }
226
227 return bfun;
02d27625
MM
228}
229
23a7fe75 230/* Update the UP field of a function segment. */
02d27625 231
23a7fe75
MM
232static void
233ftrace_update_caller (struct btrace_function *bfun,
234 struct btrace_function *caller,
235 enum btrace_function_flag flags)
02d27625 236{
23a7fe75
MM
237 if (bfun->up != NULL)
238 ftrace_debug (bfun, "updating caller");
02d27625 239
23a7fe75
MM
240 bfun->up = caller;
241 bfun->flags = flags;
242
243 ftrace_debug (bfun, "set caller");
244}
245
246/* Fix up the caller for all segments of a function. */
247
248static void
249ftrace_fixup_caller (struct btrace_function *bfun,
250 struct btrace_function *caller,
251 enum btrace_function_flag flags)
252{
253 struct btrace_function *prev, *next;
254
255 ftrace_update_caller (bfun, caller, flags);
256
257 /* Update all function segments belonging to the same function. */
258 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
259 ftrace_update_caller (prev, caller, flags);
260
261 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
262 ftrace_update_caller (next, caller, flags);
263}
264
265/* Add a new function segment for a call.
266 CALLER is the chronologically preceding function segment.
267 MFUN and FUN are the symbol information we have for this function. */
268
269static struct btrace_function *
270ftrace_new_call (struct btrace_function *caller,
271 struct minimal_symbol *mfun,
272 struct symbol *fun)
273{
274 struct btrace_function *bfun;
275
276 bfun = ftrace_new_function (caller, mfun, fun);
277 bfun->up = caller;
278 bfun->level = caller->level + 1;
279
280 ftrace_debug (bfun, "new call");
281
282 return bfun;
283}
284
285/* Add a new function segment for a tail call.
286 CALLER is the chronologically preceding function segment.
287 MFUN and FUN are the symbol information we have for this function. */
288
289static struct btrace_function *
290ftrace_new_tailcall (struct btrace_function *caller,
291 struct minimal_symbol *mfun,
292 struct symbol *fun)
293{
294 struct btrace_function *bfun;
02d27625 295
23a7fe75
MM
296 bfun = ftrace_new_function (caller, mfun, fun);
297 bfun->up = caller;
298 bfun->level = caller->level + 1;
299 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
02d27625 300
23a7fe75
MM
301 ftrace_debug (bfun, "new tail call");
302
303 return bfun;
304}
305
306/* Find the innermost caller in the back trace of BFUN with MFUN/FUN
307 symbol information. */
308
309static struct btrace_function *
310ftrace_find_caller (struct btrace_function *bfun,
311 struct minimal_symbol *mfun,
312 struct symbol *fun)
313{
314 for (; bfun != NULL; bfun = bfun->up)
315 {
316 /* Skip functions with incompatible symbol information. */
317 if (ftrace_function_switched (bfun, mfun, fun))
318 continue;
319
320 /* This is the function segment we're looking for. */
321 break;
322 }
323
324 return bfun;
325}
326
327/* Find the innermost caller in the back trace of BFUN, skipping all
328 function segments that do not end with a call instruction (e.g.
329 tail calls ending with a jump). */
330
331static struct btrace_function *
332ftrace_find_call (struct gdbarch *gdbarch, struct btrace_function *bfun)
333{
334 for (; bfun != NULL; bfun = bfun->up)
02d27625 335 {
23a7fe75 336 struct btrace_insn *last;
02d27625
MM
337 CORE_ADDR pc;
338
23a7fe75
MM
339 /* We do not allow empty function segments. */
340 gdb_assert (!VEC_empty (btrace_insn_s, bfun->insn));
341
342 last = VEC_last (btrace_insn_s, bfun->insn);
343 pc = last->pc;
02d27625 344
23a7fe75
MM
345 if (gdbarch_insn_is_call (gdbarch, pc))
346 break;
347 }
348
349 return bfun;
350}
351
352/* Add a continuation segment for a function into which we return.
353 PREV is the chronologically preceding function segment.
354 MFUN and FUN are the symbol information we have for this function. */
355
356static struct btrace_function *
357ftrace_new_return (struct gdbarch *gdbarch,
358 struct btrace_function *prev,
359 struct minimal_symbol *mfun,
360 struct symbol *fun)
361{
362 struct btrace_function *bfun, *caller;
363
364 bfun = ftrace_new_function (prev, mfun, fun);
365
366 /* It is important to start at PREV's caller. Otherwise, we might find
367 PREV itself, if PREV is a recursive function. */
368 caller = ftrace_find_caller (prev->up, mfun, fun);
369 if (caller != NULL)
370 {
371 /* The caller of PREV is the preceding btrace function segment in this
372 function instance. */
373 gdb_assert (caller->segment.next == NULL);
374
375 caller->segment.next = bfun;
376 bfun->segment.prev = caller;
377
378 /* Maintain the function level. */
379 bfun->level = caller->level;
380
381 /* Maintain the call stack. */
382 bfun->up = caller->up;
383 bfun->flags = caller->flags;
384
385 ftrace_debug (bfun, "new return");
386 }
387 else
388 {
389 /* We did not find a caller. This could mean that something went
390 wrong or that the call is simply not included in the trace. */
02d27625 391
23a7fe75
MM
392 /* Let's search for some actual call. */
393 caller = ftrace_find_call (gdbarch, prev->up);
394 if (caller == NULL)
02d27625 395 {
23a7fe75
MM
396 /* There is no call in PREV's back trace. We assume that the
397 branch trace did not include it. */
398
399 /* Let's find the topmost call function - this skips tail calls. */
400 while (prev->up != NULL)
401 prev = prev->up;
02d27625 402
23a7fe75
MM
403 /* We maintain levels for a series of returns for which we have
404 not seen the calls.
405 We start at the preceding function's level in case this has
406 already been a return for which we have not seen the call.
407 We start at level 0 otherwise, to handle tail calls correctly. */
408 bfun->level = min (0, prev->level) - 1;
409
410 /* Fix up the call stack for PREV. */
411 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
412
413 ftrace_debug (bfun, "new return - no caller");
414 }
415 else
02d27625 416 {
23a7fe75
MM
417 /* There is a call in PREV's back trace to which we should have
418 returned. Let's remain at this level. */
419 bfun->level = prev->level;
02d27625 420
23a7fe75 421 ftrace_debug (bfun, "new return - unknown caller");
02d27625 422 }
23a7fe75
MM
423 }
424
425 return bfun;
426}
427
428/* Add a new function segment for a function switch.
429 PREV is the chronologically preceding function segment.
430 MFUN and FUN are the symbol information we have for this function. */
431
432static struct btrace_function *
433ftrace_new_switch (struct btrace_function *prev,
434 struct minimal_symbol *mfun,
435 struct symbol *fun)
436{
437 struct btrace_function *bfun;
438
439 /* This is an unexplained function switch. The call stack will likely
440 be wrong at this point. */
441 bfun = ftrace_new_function (prev, mfun, fun);
02d27625 442
23a7fe75
MM
443 /* We keep the function level. */
444 bfun->level = prev->level;
02d27625 445
23a7fe75
MM
446 ftrace_debug (bfun, "new switch");
447
448 return bfun;
449}
450
451/* Update BFUN with respect to the instruction at PC. This may create new
452 function segments.
453 Return the chronologically latest function segment, never NULL. */
454
455static struct btrace_function *
456ftrace_update_function (struct gdbarch *gdbarch,
457 struct btrace_function *bfun, CORE_ADDR pc)
458{
459 struct bound_minimal_symbol bmfun;
460 struct minimal_symbol *mfun;
461 struct symbol *fun;
462 struct btrace_insn *last;
463
464 /* Try to determine the function we're in. We use both types of symbols
465 to avoid surprises when we sometimes get a full symbol and sometimes
466 only a minimal symbol. */
467 fun = find_pc_function (pc);
468 bmfun = lookup_minimal_symbol_by_pc (pc);
469 mfun = bmfun.minsym;
470
471 if (fun == NULL && mfun == NULL)
472 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
473
474 /* If we didn't have a function before, we create one. */
475 if (bfun == NULL)
476 return ftrace_new_function (bfun, mfun, fun);
477
478 /* Check the last instruction, if we have one.
479 We do this check first, since it allows us to fill in the call stack
480 links in addition to the normal flow links. */
481 last = NULL;
482 if (!VEC_empty (btrace_insn_s, bfun->insn))
483 last = VEC_last (btrace_insn_s, bfun->insn);
484
485 if (last != NULL)
486 {
487 CORE_ADDR lpc;
488
489 lpc = last->pc;
490
491 /* Check for returns. */
492 if (gdbarch_insn_is_ret (gdbarch, lpc))
493 return ftrace_new_return (gdbarch, bfun, mfun, fun);
494
495 /* Check for calls. */
496 if (gdbarch_insn_is_call (gdbarch, lpc))
02d27625 497 {
23a7fe75
MM
498 int size;
499
500 size = gdb_insn_length (gdbarch, lpc);
501
502 /* Ignore calls to the next instruction. They are used for PIC. */
503 if (lpc + size != pc)
504 return ftrace_new_call (bfun, mfun, fun);
02d27625 505 }
23a7fe75
MM
506 }
507
508 /* Check if we're switching functions for some other reason. */
509 if (ftrace_function_switched (bfun, mfun, fun))
510 {
511 DEBUG_FTRACE ("switching from %s in %s at %s",
512 ftrace_print_insn_addr (last),
513 ftrace_print_function_name (bfun),
514 ftrace_print_filename (bfun));
02d27625 515
23a7fe75 516 if (last != NULL)
02d27625 517 {
23a7fe75
MM
518 CORE_ADDR start, lpc;
519
520 start = get_pc_function_start (pc);
521
522 /* If we can't determine the function for PC, we treat a jump at
523 the end of the block as tail call. */
524 if (start == 0)
525 start = pc;
526
527 lpc = last->pc;
528
529 /* Jumps indicate optimized tail calls. */
530 if (start == pc && gdbarch_insn_is_jump (gdbarch, lpc))
531 return ftrace_new_tailcall (bfun, mfun, fun);
02d27625
MM
532 }
533
23a7fe75
MM
534 return ftrace_new_switch (bfun, mfun, fun);
535 }
536
537 return bfun;
538}
539
540/* Update BFUN's source range with respect to the instruction at PC. */
541
542static void
543ftrace_update_lines (struct btrace_function *bfun, CORE_ADDR pc)
544{
545 struct symtab_and_line sal;
546 const char *fullname;
547
548 sal = find_pc_line (pc, 0);
549 if (sal.symtab == NULL || sal.line == 0)
550 {
551 DEBUG_FTRACE ("no lines at %s", core_addr_to_string_nz (pc));
552 return;
553 }
554
555 /* Check if we switched files. This could happen if, say, a macro that
556 is defined in another file is expanded here. */
557 fullname = symtab_to_fullname (sal.symtab);
558 if (ftrace_skip_file (bfun, fullname))
559 {
560 DEBUG_FTRACE ("ignoring file at %s, file=%s",
561 core_addr_to_string_nz (pc), fullname);
562 return;
563 }
564
565 /* Update the line range. */
566 bfun->lbegin = min (bfun->lbegin, sal.line);
567 bfun->lend = max (bfun->lend, sal.line);
568
569 if (record_debug > 1)
570 ftrace_debug (bfun, "update lines");
571}
572
573/* Add the instruction at PC to BFUN's instructions. */
574
575static void
576ftrace_update_insns (struct btrace_function *bfun, CORE_ADDR pc)
577{
578 struct btrace_insn *insn;
579
580 insn = VEC_safe_push (btrace_insn_s, bfun->insn, NULL);
581 insn->pc = pc;
582
583 if (record_debug > 1)
584 ftrace_debug (bfun, "update insn");
585}
586
587/* Compute the function branch trace from a block branch trace BTRACE for
588 a thread given by BTINFO. */
589
590static void
591btrace_compute_ftrace (struct btrace_thread_info *btinfo,
592 VEC (btrace_block_s) *btrace)
593{
594 struct btrace_function *begin, *end;
595 struct gdbarch *gdbarch;
596 unsigned int blk;
597 int level;
598
599 DEBUG ("compute ftrace");
600
601 gdbarch = target_gdbarch ();
602 begin = NULL;
603 end = NULL;
604 level = INT_MAX;
605 blk = VEC_length (btrace_block_s, btrace);
606
607 while (blk != 0)
608 {
609 btrace_block_s *block;
610 CORE_ADDR pc;
611
612 blk -= 1;
613
614 block = VEC_index (btrace_block_s, btrace, blk);
615 pc = block->begin;
616
617 for (;;)
618 {
619 int size;
620
621 /* We should hit the end of the block. Warn if we went too far. */
622 if (block->end < pc)
623 {
624 warning (_("Recorded trace may be corrupted around %s."),
625 core_addr_to_string_nz (pc));
626 break;
627 }
628
629 end = ftrace_update_function (gdbarch, end, pc);
630 if (begin == NULL)
631 begin = end;
632
8710b709
MM
633 /* Maintain the function level offset.
634 For all but the last block, we do it here. */
635 if (blk != 0)
636 level = min (level, end->level);
23a7fe75
MM
637
638 ftrace_update_insns (end, pc);
639 ftrace_update_lines (end, pc);
640
641 /* We're done once we pushed the instruction at the end. */
642 if (block->end == pc)
643 break;
644
645 size = gdb_insn_length (gdbarch, pc);
646
647 /* Make sure we terminate if we fail to compute the size. */
648 if (size <= 0)
649 {
650 warning (_("Recorded trace may be incomplete around %s."),
651 core_addr_to_string_nz (pc));
652 break;
653 }
654
655 pc += size;
8710b709
MM
656
657 /* Maintain the function level offset.
658 For the last block, we do it here to not consider the last
659 instruction.
660 Since the last instruction corresponds to the current instruction
661 and is not really part of the execution history, it shouldn't
662 affect the level. */
663 if (blk == 0)
664 level = min (level, end->level);
23a7fe75 665 }
02d27625
MM
666 }
667
23a7fe75
MM
668 btinfo->begin = begin;
669 btinfo->end = end;
670
671 /* LEVEL is the minimal function level of all btrace function segments.
672 Define the global level offset to -LEVEL so all function levels are
673 normalized to start at zero. */
674 btinfo->level = -level;
02d27625
MM
675}
676
677/* See btrace.h. */
678
679void
680btrace_enable (struct thread_info *tp)
681{
682 if (tp->btrace.target != NULL)
683 return;
684
685 if (!target_supports_btrace ())
686 error (_("Target does not support branch tracing."));
687
688 DEBUG ("enable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
689
690 tp->btrace.target = target_enable_btrace (tp->ptid);
691}
692
693/* See btrace.h. */
694
695void
696btrace_disable (struct thread_info *tp)
697{
698 struct btrace_thread_info *btp = &tp->btrace;
699 int errcode = 0;
700
701 if (btp->target == NULL)
702 return;
703
704 DEBUG ("disable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
705
706 target_disable_btrace (btp->target);
707 btp->target = NULL;
708
709 btrace_clear (tp);
710}
711
712/* See btrace.h. */
713
714void
715btrace_teardown (struct thread_info *tp)
716{
717 struct btrace_thread_info *btp = &tp->btrace;
718 int errcode = 0;
719
720 if (btp->target == NULL)
721 return;
722
723 DEBUG ("teardown thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
724
725 target_teardown_btrace (btp->target);
726 btp->target = NULL;
727
728 btrace_clear (tp);
729}
730
731/* See btrace.h. */
732
733void
734btrace_fetch (struct thread_info *tp)
735{
736 struct btrace_thread_info *btinfo;
737 VEC (btrace_block_s) *btrace;
23a7fe75 738 struct cleanup *cleanup;
02d27625
MM
739
740 DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
741
742 btinfo = &tp->btrace;
743 if (btinfo->target == NULL)
744 return;
745
864089d2 746 btrace = target_read_btrace (btinfo->target, BTRACE_READ_NEW);
23a7fe75 747 cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace);
02d27625 748
23a7fe75
MM
749 if (!VEC_empty (btrace_block_s, btrace))
750 {
751 btrace_clear (tp);
752 btrace_compute_ftrace (btinfo, btrace);
753 }
02d27625 754
23a7fe75 755 do_cleanups (cleanup);
02d27625
MM
756}
757
758/* See btrace.h. */
759
760void
761btrace_clear (struct thread_info *tp)
762{
763 struct btrace_thread_info *btinfo;
23a7fe75 764 struct btrace_function *it, *trash;
02d27625
MM
765
766 DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
767
0b722aec
MM
768 /* Make sure btrace frames that may hold a pointer into the branch
769 trace data are destroyed. */
770 reinit_frame_cache ();
771
02d27625
MM
772 btinfo = &tp->btrace;
773
23a7fe75
MM
774 it = btinfo->begin;
775 while (it != NULL)
776 {
777 trash = it;
778 it = it->flow.next;
02d27625 779
23a7fe75
MM
780 xfree (trash);
781 }
782
783 btinfo->begin = NULL;
784 btinfo->end = NULL;
785
786 xfree (btinfo->insn_history);
787 xfree (btinfo->call_history);
07bbe694 788 xfree (btinfo->replay);
23a7fe75
MM
789
790 btinfo->insn_history = NULL;
791 btinfo->call_history = NULL;
07bbe694 792 btinfo->replay = NULL;
02d27625
MM
793}
794
795/* See btrace.h. */
796
797void
798btrace_free_objfile (struct objfile *objfile)
799{
800 struct thread_info *tp;
801
802 DEBUG ("free objfile");
803
804 ALL_THREADS (tp)
805 btrace_clear (tp);
806}
c12a2917
MM
807
808#if defined (HAVE_LIBEXPAT)
809
810/* Check the btrace document version. */
811
812static void
813check_xml_btrace_version (struct gdb_xml_parser *parser,
814 const struct gdb_xml_element *element,
815 void *user_data, VEC (gdb_xml_value_s) *attributes)
816{
817 const char *version = xml_find_attribute (attributes, "version")->value;
818
819 if (strcmp (version, "1.0") != 0)
820 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
821}
822
823/* Parse a btrace "block" xml record. */
824
825static void
826parse_xml_btrace_block (struct gdb_xml_parser *parser,
827 const struct gdb_xml_element *element,
828 void *user_data, VEC (gdb_xml_value_s) *attributes)
829{
830 VEC (btrace_block_s) **btrace;
831 struct btrace_block *block;
832 ULONGEST *begin, *end;
833
834 btrace = user_data;
835 block = VEC_safe_push (btrace_block_s, *btrace, NULL);
836
837 begin = xml_find_attribute (attributes, "begin")->value;
838 end = xml_find_attribute (attributes, "end")->value;
839
840 block->begin = *begin;
841 block->end = *end;
842}
843
844static const struct gdb_xml_attribute block_attributes[] = {
845 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
846 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
847 { NULL, GDB_XML_AF_NONE, NULL, NULL }
848};
849
850static const struct gdb_xml_attribute btrace_attributes[] = {
851 { "version", GDB_XML_AF_NONE, NULL, NULL },
852 { NULL, GDB_XML_AF_NONE, NULL, NULL }
853};
854
855static const struct gdb_xml_element btrace_children[] = {
856 { "block", block_attributes, NULL,
857 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
858 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
859};
860
861static const struct gdb_xml_element btrace_elements[] = {
862 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
863 check_xml_btrace_version, NULL },
864 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
865};
866
867#endif /* defined (HAVE_LIBEXPAT) */
868
869/* See btrace.h. */
870
871VEC (btrace_block_s) *
872parse_xml_btrace (const char *buffer)
873{
874 VEC (btrace_block_s) *btrace = NULL;
875 struct cleanup *cleanup;
876 int errcode;
877
878#if defined (HAVE_LIBEXPAT)
879
880 cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace);
881 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
882 buffer, &btrace);
883 if (errcode != 0)
884 {
885 do_cleanups (cleanup);
886 return NULL;
887 }
888
889 /* Keep parse results. */
890 discard_cleanups (cleanup);
891
892#else /* !defined (HAVE_LIBEXPAT) */
893
894 error (_("Cannot process branch trace. XML parsing is not supported."));
895
896#endif /* !defined (HAVE_LIBEXPAT) */
897
898 return btrace;
899}
23a7fe75
MM
900
901/* See btrace.h. */
902
903const struct btrace_insn *
904btrace_insn_get (const struct btrace_insn_iterator *it)
905{
906 const struct btrace_function *bfun;
907 unsigned int index, end;
908
909 index = it->index;
910 bfun = it->function;
911
912 /* The index is within the bounds of this function's instruction vector. */
913 end = VEC_length (btrace_insn_s, bfun->insn);
914 gdb_assert (0 < end);
915 gdb_assert (index < end);
916
917 return VEC_index (btrace_insn_s, bfun->insn, index);
918}
919
920/* See btrace.h. */
921
922unsigned int
923btrace_insn_number (const struct btrace_insn_iterator *it)
924{
925 const struct btrace_function *bfun;
926
927 bfun = it->function;
928 return bfun->insn_offset + it->index;
929}
930
931/* See btrace.h. */
932
933void
934btrace_insn_begin (struct btrace_insn_iterator *it,
935 const struct btrace_thread_info *btinfo)
936{
937 const struct btrace_function *bfun;
938
939 bfun = btinfo->begin;
940 if (bfun == NULL)
941 error (_("No trace."));
942
943 it->function = bfun;
944 it->index = 0;
945}
946
947/* See btrace.h. */
948
949void
950btrace_insn_end (struct btrace_insn_iterator *it,
951 const struct btrace_thread_info *btinfo)
952{
953 const struct btrace_function *bfun;
954 unsigned int length;
955
956 bfun = btinfo->end;
957 if (bfun == NULL)
958 error (_("No trace."));
959
960 /* The last instruction in the last function is the current instruction.
961 We point to it - it is one past the end of the execution trace. */
962 length = VEC_length (btrace_insn_s, bfun->insn);
963
964 it->function = bfun;
965 it->index = length - 1;
966}
967
968/* See btrace.h. */
969
970unsigned int
971btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
972{
973 const struct btrace_function *bfun;
974 unsigned int index, steps;
975
976 bfun = it->function;
977 steps = 0;
978 index = it->index;
979
980 while (stride != 0)
981 {
982 unsigned int end, space, adv;
983
984 end = VEC_length (btrace_insn_s, bfun->insn);
985
986 gdb_assert (0 < end);
987 gdb_assert (index < end);
988
989 /* Compute the number of instructions remaining in this segment. */
990 space = end - index;
991
992 /* Advance the iterator as far as possible within this segment. */
993 adv = min (space, stride);
994 stride -= adv;
995 index += adv;
996 steps += adv;
997
998 /* Move to the next function if we're at the end of this one. */
999 if (index == end)
1000 {
1001 const struct btrace_function *next;
1002
1003 next = bfun->flow.next;
1004 if (next == NULL)
1005 {
1006 /* We stepped past the last function.
1007
1008 Let's adjust the index to point to the last instruction in
1009 the previous function. */
1010 index -= 1;
1011 steps -= 1;
1012 break;
1013 }
1014
1015 /* We now point to the first instruction in the new function. */
1016 bfun = next;
1017 index = 0;
1018 }
1019
1020 /* We did make progress. */
1021 gdb_assert (adv > 0);
1022 }
1023
1024 /* Update the iterator. */
1025 it->function = bfun;
1026 it->index = index;
1027
1028 return steps;
1029}
1030
1031/* See btrace.h. */
1032
1033unsigned int
1034btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1035{
1036 const struct btrace_function *bfun;
1037 unsigned int index, steps;
1038
1039 bfun = it->function;
1040 steps = 0;
1041 index = it->index;
1042
1043 while (stride != 0)
1044 {
1045 unsigned int adv;
1046
1047 /* Move to the previous function if we're at the start of this one. */
1048 if (index == 0)
1049 {
1050 const struct btrace_function *prev;
1051
1052 prev = bfun->flow.prev;
1053 if (prev == NULL)
1054 break;
1055
1056 /* We point to one after the last instruction in the new function. */
1057 bfun = prev;
1058 index = VEC_length (btrace_insn_s, bfun->insn);
1059
1060 /* There is at least one instruction in this function segment. */
1061 gdb_assert (index > 0);
1062 }
1063
1064 /* Advance the iterator as far as possible within this segment. */
1065 adv = min (index, stride);
1066 stride -= adv;
1067 index -= adv;
1068 steps += adv;
1069
1070 /* We did make progress. */
1071 gdb_assert (adv > 0);
1072 }
1073
1074 /* Update the iterator. */
1075 it->function = bfun;
1076 it->index = index;
1077
1078 return steps;
1079}
1080
1081/* See btrace.h. */
1082
1083int
1084btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
1085 const struct btrace_insn_iterator *rhs)
1086{
1087 unsigned int lnum, rnum;
1088
1089 lnum = btrace_insn_number (lhs);
1090 rnum = btrace_insn_number (rhs);
1091
1092 return (int) (lnum - rnum);
1093}
1094
1095/* See btrace.h. */
1096
1097int
1098btrace_find_insn_by_number (struct btrace_insn_iterator *it,
1099 const struct btrace_thread_info *btinfo,
1100 unsigned int number)
1101{
1102 const struct btrace_function *bfun;
1103 unsigned int end;
1104
1105 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1106 if (bfun->insn_offset <= number)
1107 break;
1108
1109 if (bfun == NULL)
1110 return 0;
1111
1112 end = bfun->insn_offset + VEC_length (btrace_insn_s, bfun->insn);
1113 if (end <= number)
1114 return 0;
1115
1116 it->function = bfun;
1117 it->index = number - bfun->insn_offset;
1118
1119 return 1;
1120}
1121
1122/* See btrace.h. */
1123
1124const struct btrace_function *
1125btrace_call_get (const struct btrace_call_iterator *it)
1126{
1127 return it->function;
1128}
1129
1130/* See btrace.h. */
1131
1132unsigned int
1133btrace_call_number (const struct btrace_call_iterator *it)
1134{
1135 const struct btrace_thread_info *btinfo;
1136 const struct btrace_function *bfun;
1137 unsigned int insns;
1138
1139 btinfo = it->btinfo;
1140 bfun = it->function;
1141 if (bfun != NULL)
1142 return bfun->number;
1143
1144 /* For the end iterator, i.e. bfun == NULL, we return one more than the
1145 number of the last function. */
1146 bfun = btinfo->end;
1147 insns = VEC_length (btrace_insn_s, bfun->insn);
1148
1149 /* If the function contains only a single instruction (i.e. the current
1150 instruction), it will be skipped and its number is already the number
1151 we seek. */
1152 if (insns == 1)
1153 return bfun->number;
1154
1155 /* Otherwise, return one more than the number of the last function. */
1156 return bfun->number + 1;
1157}
1158
1159/* See btrace.h. */
1160
1161void
1162btrace_call_begin (struct btrace_call_iterator *it,
1163 const struct btrace_thread_info *btinfo)
1164{
1165 const struct btrace_function *bfun;
1166
1167 bfun = btinfo->begin;
1168 if (bfun == NULL)
1169 error (_("No trace."));
1170
1171 it->btinfo = btinfo;
1172 it->function = bfun;
1173}
1174
1175/* See btrace.h. */
1176
1177void
1178btrace_call_end (struct btrace_call_iterator *it,
1179 const struct btrace_thread_info *btinfo)
1180{
1181 const struct btrace_function *bfun;
1182
1183 bfun = btinfo->end;
1184 if (bfun == NULL)
1185 error (_("No trace."));
1186
1187 it->btinfo = btinfo;
1188 it->function = NULL;
1189}
1190
1191/* See btrace.h. */
1192
1193unsigned int
1194btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
1195{
1196 const struct btrace_function *bfun;
1197 unsigned int steps;
1198
1199 bfun = it->function;
1200 steps = 0;
1201 while (bfun != NULL)
1202 {
1203 const struct btrace_function *next;
1204 unsigned int insns;
1205
1206 next = bfun->flow.next;
1207 if (next == NULL)
1208 {
1209 /* Ignore the last function if it only contains a single
1210 (i.e. the current) instruction. */
1211 insns = VEC_length (btrace_insn_s, bfun->insn);
1212 if (insns == 1)
1213 steps -= 1;
1214 }
1215
1216 if (stride == steps)
1217 break;
1218
1219 bfun = next;
1220 steps += 1;
1221 }
1222
1223 it->function = bfun;
1224 return steps;
1225}
1226
1227/* See btrace.h. */
1228
1229unsigned int
1230btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
1231{
1232 const struct btrace_thread_info *btinfo;
1233 const struct btrace_function *bfun;
1234 unsigned int steps;
1235
1236 bfun = it->function;
1237 steps = 0;
1238
1239 if (bfun == NULL)
1240 {
1241 unsigned int insns;
1242
1243 btinfo = it->btinfo;
1244 bfun = btinfo->end;
1245 if (bfun == NULL)
1246 return 0;
1247
1248 /* Ignore the last function if it only contains a single
1249 (i.e. the current) instruction. */
1250 insns = VEC_length (btrace_insn_s, bfun->insn);
1251 if (insns == 1)
1252 bfun = bfun->flow.prev;
1253
1254 if (bfun == NULL)
1255 return 0;
1256
1257 steps += 1;
1258 }
1259
1260 while (steps < stride)
1261 {
1262 const struct btrace_function *prev;
1263
1264 prev = bfun->flow.prev;
1265 if (prev == NULL)
1266 break;
1267
1268 bfun = prev;
1269 steps += 1;
1270 }
1271
1272 it->function = bfun;
1273 return steps;
1274}
1275
1276/* See btrace.h. */
1277
1278int
1279btrace_call_cmp (const struct btrace_call_iterator *lhs,
1280 const struct btrace_call_iterator *rhs)
1281{
1282 unsigned int lnum, rnum;
1283
1284 lnum = btrace_call_number (lhs);
1285 rnum = btrace_call_number (rhs);
1286
1287 return (int) (lnum - rnum);
1288}
1289
1290/* See btrace.h. */
1291
1292int
1293btrace_find_call_by_number (struct btrace_call_iterator *it,
1294 const struct btrace_thread_info *btinfo,
1295 unsigned int number)
1296{
1297 const struct btrace_function *bfun;
1298
1299 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1300 {
1301 unsigned int bnum;
1302
1303 bnum = bfun->number;
1304 if (number == bnum)
1305 {
1306 it->btinfo = btinfo;
1307 it->function = bfun;
1308 return 1;
1309 }
1310
1311 /* Functions are ordered and numbered consecutively. We could bail out
1312 earlier. On the other hand, it is very unlikely that we search for
1313 a nonexistent function. */
1314 }
1315
1316 return 0;
1317}
1318
1319/* See btrace.h. */
1320
1321void
1322btrace_set_insn_history (struct btrace_thread_info *btinfo,
1323 const struct btrace_insn_iterator *begin,
1324 const struct btrace_insn_iterator *end)
1325{
1326 if (btinfo->insn_history == NULL)
1327 btinfo->insn_history = xzalloc (sizeof (*btinfo->insn_history));
1328
1329 btinfo->insn_history->begin = *begin;
1330 btinfo->insn_history->end = *end;
1331}
1332
1333/* See btrace.h. */
1334
1335void
1336btrace_set_call_history (struct btrace_thread_info *btinfo,
1337 const struct btrace_call_iterator *begin,
1338 const struct btrace_call_iterator *end)
1339{
1340 gdb_assert (begin->btinfo == end->btinfo);
1341
1342 if (btinfo->call_history == NULL)
1343 btinfo->call_history = xzalloc (sizeof (*btinfo->call_history));
1344
1345 btinfo->call_history->begin = *begin;
1346 btinfo->call_history->end = *end;
1347}
07bbe694
MM
1348
1349/* See btrace.h. */
1350
1351int
1352btrace_is_replaying (struct thread_info *tp)
1353{
1354 return tp->btrace.replay != NULL;
1355}
This page took 0.216373 seconds and 4 git commands to generate.