record btrace: add configuration struct
[deliverable/binutils-gdb.git] / gdb / btrace.c
CommitLineData
02d27625
MM
1/* Branch trace support for GDB, the GNU debugger.
2
32d0add0 3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
02d27625
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
d41f6d8e 22#include "defs.h"
02d27625
MM
23#include "btrace.h"
24#include "gdbthread.h"
02d27625
MM
25#include "inferior.h"
26#include "target.h"
27#include "record.h"
28#include "symtab.h"
29#include "disasm.h"
30#include "source.h"
31#include "filenames.h"
c12a2917 32#include "xml-support.h"
6e07b1d2 33#include "regcache.h"
02d27625
MM
34
35/* Print a record debug message. Use do ... while (0) to avoid ambiguities
36 when used in if statements. */
37
38#define DEBUG(msg, args...) \
39 do \
40 { \
41 if (record_debug != 0) \
42 fprintf_unfiltered (gdb_stdlog, \
43 "[btrace] " msg "\n", ##args); \
44 } \
45 while (0)
46
47#define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
48
02d27625
MM
49/* Return the function name of a recorded function segment for printing.
50 This function never returns NULL. */
51
52static const char *
23a7fe75 53ftrace_print_function_name (const struct btrace_function *bfun)
02d27625
MM
54{
55 struct minimal_symbol *msym;
56 struct symbol *sym;
57
58 msym = bfun->msym;
59 sym = bfun->sym;
60
61 if (sym != NULL)
62 return SYMBOL_PRINT_NAME (sym);
63
64 if (msym != NULL)
efd66ac6 65 return MSYMBOL_PRINT_NAME (msym);
02d27625
MM
66
67 return "<unknown>";
68}
69
70/* Return the file name of a recorded function segment for printing.
71 This function never returns NULL. */
72
73static const char *
23a7fe75 74ftrace_print_filename (const struct btrace_function *bfun)
02d27625
MM
75{
76 struct symbol *sym;
77 const char *filename;
78
79 sym = bfun->sym;
80
81 if (sym != NULL)
08be3fe3 82 filename = symtab_to_filename_for_display (symbol_symtab (sym));
02d27625
MM
83 else
84 filename = "<unknown>";
85
86 return filename;
87}
88
23a7fe75
MM
89/* Return a string representation of the address of an instruction.
90 This function never returns NULL. */
02d27625 91
23a7fe75
MM
92static const char *
93ftrace_print_insn_addr (const struct btrace_insn *insn)
02d27625 94{
23a7fe75
MM
95 if (insn == NULL)
96 return "<nil>";
97
98 return core_addr_to_string_nz (insn->pc);
02d27625
MM
99}
100
23a7fe75 101/* Print an ftrace debug status message. */
02d27625
MM
102
103static void
23a7fe75 104ftrace_debug (const struct btrace_function *bfun, const char *prefix)
02d27625 105{
23a7fe75
MM
106 const char *fun, *file;
107 unsigned int ibegin, iend;
108 int lbegin, lend, level;
109
110 fun = ftrace_print_function_name (bfun);
111 file = ftrace_print_filename (bfun);
112 level = bfun->level;
113
114 lbegin = bfun->lbegin;
115 lend = bfun->lend;
116
117 ibegin = bfun->insn_offset;
118 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
119
120 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, lines = [%d; %d], "
121 "insn = [%u; %u)", prefix, fun, file, level, lbegin, lend,
122 ibegin, iend);
02d27625
MM
123}
124
23a7fe75
MM
125/* Return non-zero if BFUN does not match MFUN and FUN,
126 return zero otherwise. */
02d27625
MM
127
128static int
23a7fe75
MM
129ftrace_function_switched (const struct btrace_function *bfun,
130 const struct minimal_symbol *mfun,
131 const struct symbol *fun)
02d27625
MM
132{
133 struct minimal_symbol *msym;
134 struct symbol *sym;
135
02d27625
MM
136 msym = bfun->msym;
137 sym = bfun->sym;
138
139 /* If the minimal symbol changed, we certainly switched functions. */
140 if (mfun != NULL && msym != NULL
efd66ac6 141 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
02d27625
MM
142 return 1;
143
144 /* If the symbol changed, we certainly switched functions. */
145 if (fun != NULL && sym != NULL)
146 {
147 const char *bfname, *fname;
148
149 /* Check the function name. */
150 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
151 return 1;
152
153 /* Check the location of those functions, as well. */
08be3fe3
DE
154 bfname = symtab_to_fullname (symbol_symtab (sym));
155 fname = symtab_to_fullname (symbol_symtab (fun));
02d27625
MM
156 if (filename_cmp (fname, bfname) != 0)
157 return 1;
158 }
159
23a7fe75
MM
160 /* If we lost symbol information, we switched functions. */
161 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
162 return 1;
163
164 /* If we gained symbol information, we switched functions. */
165 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
166 return 1;
167
02d27625
MM
168 return 0;
169}
170
23a7fe75
MM
171/* Return non-zero if we should skip this file when generating the function
172 call history, zero otherwise.
173 We would want to do that if, say, a macro that is defined in another file
174 is expanded in this function. */
02d27625
MM
175
176static int
23a7fe75 177ftrace_skip_file (const struct btrace_function *bfun, const char *fullname)
02d27625
MM
178{
179 struct symbol *sym;
180 const char *bfile;
181
182 sym = bfun->sym;
23a7fe75
MM
183 if (sym == NULL)
184 return 1;
02d27625 185
08be3fe3 186 bfile = symtab_to_fullname (symbol_symtab (sym));
23a7fe75
MM
187
188 return (filename_cmp (bfile, fullname) != 0);
189}
190
191/* Allocate and initialize a new branch trace function segment.
192 PREV is the chronologically preceding function segment.
193 MFUN and FUN are the symbol information we have for this function. */
194
195static struct btrace_function *
196ftrace_new_function (struct btrace_function *prev,
197 struct minimal_symbol *mfun,
198 struct symbol *fun)
199{
200 struct btrace_function *bfun;
201
202 bfun = xzalloc (sizeof (*bfun));
203
204 bfun->msym = mfun;
205 bfun->sym = fun;
206 bfun->flow.prev = prev;
207
208 /* We start with the identities of min and max, respectively. */
209 bfun->lbegin = INT_MAX;
210 bfun->lend = INT_MIN;
02d27625 211
5de9129b
MM
212 if (prev == NULL)
213 {
214 /* Start counting at one. */
215 bfun->number = 1;
216 bfun->insn_offset = 1;
217 }
218 else
23a7fe75
MM
219 {
220 gdb_assert (prev->flow.next == NULL);
221 prev->flow.next = bfun;
02d27625 222
23a7fe75
MM
223 bfun->number = prev->number + 1;
224 bfun->insn_offset = (prev->insn_offset
225 + VEC_length (btrace_insn_s, prev->insn));
226 }
227
228 return bfun;
02d27625
MM
229}
230
23a7fe75 231/* Update the UP field of a function segment. */
02d27625 232
23a7fe75
MM
233static void
234ftrace_update_caller (struct btrace_function *bfun,
235 struct btrace_function *caller,
236 enum btrace_function_flag flags)
02d27625 237{
23a7fe75
MM
238 if (bfun->up != NULL)
239 ftrace_debug (bfun, "updating caller");
02d27625 240
23a7fe75
MM
241 bfun->up = caller;
242 bfun->flags = flags;
243
244 ftrace_debug (bfun, "set caller");
245}
246
247/* Fix up the caller for all segments of a function. */
248
249static void
250ftrace_fixup_caller (struct btrace_function *bfun,
251 struct btrace_function *caller,
252 enum btrace_function_flag flags)
253{
254 struct btrace_function *prev, *next;
255
256 ftrace_update_caller (bfun, caller, flags);
257
258 /* Update all function segments belonging to the same function. */
259 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
260 ftrace_update_caller (prev, caller, flags);
261
262 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
263 ftrace_update_caller (next, caller, flags);
264}
265
266/* Add a new function segment for a call.
267 CALLER is the chronologically preceding function segment.
268 MFUN and FUN are the symbol information we have for this function. */
269
270static struct btrace_function *
271ftrace_new_call (struct btrace_function *caller,
272 struct minimal_symbol *mfun,
273 struct symbol *fun)
274{
275 struct btrace_function *bfun;
276
277 bfun = ftrace_new_function (caller, mfun, fun);
278 bfun->up = caller;
279 bfun->level = caller->level + 1;
280
281 ftrace_debug (bfun, "new call");
282
283 return bfun;
284}
285
286/* Add a new function segment for a tail call.
287 CALLER is the chronologically preceding function segment.
288 MFUN and FUN are the symbol information we have for this function. */
289
290static struct btrace_function *
291ftrace_new_tailcall (struct btrace_function *caller,
292 struct minimal_symbol *mfun,
293 struct symbol *fun)
294{
295 struct btrace_function *bfun;
02d27625 296
23a7fe75
MM
297 bfun = ftrace_new_function (caller, mfun, fun);
298 bfun->up = caller;
299 bfun->level = caller->level + 1;
300 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
02d27625 301
23a7fe75
MM
302 ftrace_debug (bfun, "new tail call");
303
304 return bfun;
305}
306
307/* Find the innermost caller in the back trace of BFUN with MFUN/FUN
308 symbol information. */
309
310static struct btrace_function *
311ftrace_find_caller (struct btrace_function *bfun,
312 struct minimal_symbol *mfun,
313 struct symbol *fun)
314{
315 for (; bfun != NULL; bfun = bfun->up)
316 {
317 /* Skip functions with incompatible symbol information. */
318 if (ftrace_function_switched (bfun, mfun, fun))
319 continue;
320
321 /* This is the function segment we're looking for. */
322 break;
323 }
324
325 return bfun;
326}
327
328/* Find the innermost caller in the back trace of BFUN, skipping all
329 function segments that do not end with a call instruction (e.g.
330 tail calls ending with a jump). */
331
332static struct btrace_function *
333ftrace_find_call (struct gdbarch *gdbarch, struct btrace_function *bfun)
334{
335 for (; bfun != NULL; bfun = bfun->up)
02d27625 336 {
23a7fe75 337 struct btrace_insn *last;
02d27625
MM
338 CORE_ADDR pc;
339
23a7fe75
MM
340 /* We do not allow empty function segments. */
341 gdb_assert (!VEC_empty (btrace_insn_s, bfun->insn));
342
343 last = VEC_last (btrace_insn_s, bfun->insn);
344 pc = last->pc;
02d27625 345
23a7fe75
MM
346 if (gdbarch_insn_is_call (gdbarch, pc))
347 break;
348 }
349
350 return bfun;
351}
352
353/* Add a continuation segment for a function into which we return.
354 PREV is the chronologically preceding function segment.
355 MFUN and FUN are the symbol information we have for this function. */
356
357static struct btrace_function *
358ftrace_new_return (struct gdbarch *gdbarch,
359 struct btrace_function *prev,
360 struct minimal_symbol *mfun,
361 struct symbol *fun)
362{
363 struct btrace_function *bfun, *caller;
364
365 bfun = ftrace_new_function (prev, mfun, fun);
366
367 /* It is important to start at PREV's caller. Otherwise, we might find
368 PREV itself, if PREV is a recursive function. */
369 caller = ftrace_find_caller (prev->up, mfun, fun);
370 if (caller != NULL)
371 {
372 /* The caller of PREV is the preceding btrace function segment in this
373 function instance. */
374 gdb_assert (caller->segment.next == NULL);
375
376 caller->segment.next = bfun;
377 bfun->segment.prev = caller;
378
379 /* Maintain the function level. */
380 bfun->level = caller->level;
381
382 /* Maintain the call stack. */
383 bfun->up = caller->up;
384 bfun->flags = caller->flags;
385
386 ftrace_debug (bfun, "new return");
387 }
388 else
389 {
390 /* We did not find a caller. This could mean that something went
391 wrong or that the call is simply not included in the trace. */
02d27625 392
23a7fe75
MM
393 /* Let's search for some actual call. */
394 caller = ftrace_find_call (gdbarch, prev->up);
395 if (caller == NULL)
02d27625 396 {
23a7fe75
MM
397 /* There is no call in PREV's back trace. We assume that the
398 branch trace did not include it. */
399
400 /* Let's find the topmost call function - this skips tail calls. */
401 while (prev->up != NULL)
402 prev = prev->up;
02d27625 403
23a7fe75
MM
404 /* We maintain levels for a series of returns for which we have
405 not seen the calls.
406 We start at the preceding function's level in case this has
407 already been a return for which we have not seen the call.
408 We start at level 0 otherwise, to handle tail calls correctly. */
409 bfun->level = min (0, prev->level) - 1;
410
411 /* Fix up the call stack for PREV. */
412 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
413
414 ftrace_debug (bfun, "new return - no caller");
415 }
416 else
02d27625 417 {
23a7fe75
MM
418 /* There is a call in PREV's back trace to which we should have
419 returned. Let's remain at this level. */
420 bfun->level = prev->level;
02d27625 421
23a7fe75 422 ftrace_debug (bfun, "new return - unknown caller");
02d27625 423 }
23a7fe75
MM
424 }
425
426 return bfun;
427}
428
429/* Add a new function segment for a function switch.
430 PREV is the chronologically preceding function segment.
431 MFUN and FUN are the symbol information we have for this function. */
432
433static struct btrace_function *
434ftrace_new_switch (struct btrace_function *prev,
435 struct minimal_symbol *mfun,
436 struct symbol *fun)
437{
438 struct btrace_function *bfun;
439
440 /* This is an unexplained function switch. The call stack will likely
441 be wrong at this point. */
442 bfun = ftrace_new_function (prev, mfun, fun);
02d27625 443
23a7fe75
MM
444 /* We keep the function level. */
445 bfun->level = prev->level;
02d27625 446
23a7fe75
MM
447 ftrace_debug (bfun, "new switch");
448
449 return bfun;
450}
451
452/* Update BFUN with respect to the instruction at PC. This may create new
453 function segments.
454 Return the chronologically latest function segment, never NULL. */
455
456static struct btrace_function *
457ftrace_update_function (struct gdbarch *gdbarch,
458 struct btrace_function *bfun, CORE_ADDR pc)
459{
460 struct bound_minimal_symbol bmfun;
461 struct minimal_symbol *mfun;
462 struct symbol *fun;
463 struct btrace_insn *last;
464
465 /* Try to determine the function we're in. We use both types of symbols
466 to avoid surprises when we sometimes get a full symbol and sometimes
467 only a minimal symbol. */
468 fun = find_pc_function (pc);
469 bmfun = lookup_minimal_symbol_by_pc (pc);
470 mfun = bmfun.minsym;
471
472 if (fun == NULL && mfun == NULL)
473 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
474
475 /* If we didn't have a function before, we create one. */
476 if (bfun == NULL)
477 return ftrace_new_function (bfun, mfun, fun);
478
479 /* Check the last instruction, if we have one.
480 We do this check first, since it allows us to fill in the call stack
481 links in addition to the normal flow links. */
482 last = NULL;
483 if (!VEC_empty (btrace_insn_s, bfun->insn))
484 last = VEC_last (btrace_insn_s, bfun->insn);
485
486 if (last != NULL)
487 {
488 CORE_ADDR lpc;
489
490 lpc = last->pc;
491
492 /* Check for returns. */
493 if (gdbarch_insn_is_ret (gdbarch, lpc))
494 return ftrace_new_return (gdbarch, bfun, mfun, fun);
495
496 /* Check for calls. */
497 if (gdbarch_insn_is_call (gdbarch, lpc))
02d27625 498 {
23a7fe75
MM
499 int size;
500
501 size = gdb_insn_length (gdbarch, lpc);
502
503 /* Ignore calls to the next instruction. They are used for PIC. */
504 if (lpc + size != pc)
505 return ftrace_new_call (bfun, mfun, fun);
02d27625 506 }
23a7fe75
MM
507 }
508
509 /* Check if we're switching functions for some other reason. */
510 if (ftrace_function_switched (bfun, mfun, fun))
511 {
512 DEBUG_FTRACE ("switching from %s in %s at %s",
513 ftrace_print_insn_addr (last),
514 ftrace_print_function_name (bfun),
515 ftrace_print_filename (bfun));
02d27625 516
23a7fe75 517 if (last != NULL)
02d27625 518 {
23a7fe75
MM
519 CORE_ADDR start, lpc;
520
521 start = get_pc_function_start (pc);
522
523 /* If we can't determine the function for PC, we treat a jump at
524 the end of the block as tail call. */
525 if (start == 0)
526 start = pc;
527
528 lpc = last->pc;
529
530 /* Jumps indicate optimized tail calls. */
531 if (start == pc && gdbarch_insn_is_jump (gdbarch, lpc))
532 return ftrace_new_tailcall (bfun, mfun, fun);
02d27625
MM
533 }
534
23a7fe75
MM
535 return ftrace_new_switch (bfun, mfun, fun);
536 }
537
538 return bfun;
539}
540
541/* Update BFUN's source range with respect to the instruction at PC. */
542
543static void
544ftrace_update_lines (struct btrace_function *bfun, CORE_ADDR pc)
545{
546 struct symtab_and_line sal;
547 const char *fullname;
548
549 sal = find_pc_line (pc, 0);
550 if (sal.symtab == NULL || sal.line == 0)
551 {
552 DEBUG_FTRACE ("no lines at %s", core_addr_to_string_nz (pc));
553 return;
554 }
555
556 /* Check if we switched files. This could happen if, say, a macro that
557 is defined in another file is expanded here. */
558 fullname = symtab_to_fullname (sal.symtab);
559 if (ftrace_skip_file (bfun, fullname))
560 {
561 DEBUG_FTRACE ("ignoring file at %s, file=%s",
562 core_addr_to_string_nz (pc), fullname);
563 return;
564 }
565
566 /* Update the line range. */
567 bfun->lbegin = min (bfun->lbegin, sal.line);
568 bfun->lend = max (bfun->lend, sal.line);
569
570 if (record_debug > 1)
571 ftrace_debug (bfun, "update lines");
572}
573
574/* Add the instruction at PC to BFUN's instructions. */
575
576static void
577ftrace_update_insns (struct btrace_function *bfun, CORE_ADDR pc)
578{
579 struct btrace_insn *insn;
580
581 insn = VEC_safe_push (btrace_insn_s, bfun->insn, NULL);
582 insn->pc = pc;
583
584 if (record_debug > 1)
585 ftrace_debug (bfun, "update insn");
586}
587
734b0e4b 588/* Compute the function branch trace from BTS trace. */
23a7fe75
MM
589
590static void
734b0e4b
MM
591btrace_compute_ftrace_bts (struct btrace_thread_info *btinfo,
592 const struct btrace_data_bts *btrace)
23a7fe75
MM
593{
594 struct btrace_function *begin, *end;
595 struct gdbarch *gdbarch;
596 unsigned int blk;
597 int level;
598
23a7fe75 599 gdbarch = target_gdbarch ();
969c39fb
MM
600 begin = btinfo->begin;
601 end = btinfo->end;
602 level = begin != NULL ? -btinfo->level : INT_MAX;
734b0e4b 603 blk = VEC_length (btrace_block_s, btrace->blocks);
23a7fe75
MM
604
605 while (blk != 0)
606 {
607 btrace_block_s *block;
608 CORE_ADDR pc;
609
610 blk -= 1;
611
734b0e4b 612 block = VEC_index (btrace_block_s, btrace->blocks, blk);
23a7fe75
MM
613 pc = block->begin;
614
615 for (;;)
616 {
617 int size;
618
619 /* We should hit the end of the block. Warn if we went too far. */
620 if (block->end < pc)
621 {
622 warning (_("Recorded trace may be corrupted around %s."),
623 core_addr_to_string_nz (pc));
624 break;
625 }
626
627 end = ftrace_update_function (gdbarch, end, pc);
628 if (begin == NULL)
629 begin = end;
630
8710b709
MM
631 /* Maintain the function level offset.
632 For all but the last block, we do it here. */
633 if (blk != 0)
634 level = min (level, end->level);
23a7fe75
MM
635
636 ftrace_update_insns (end, pc);
637 ftrace_update_lines (end, pc);
638
639 /* We're done once we pushed the instruction at the end. */
640 if (block->end == pc)
641 break;
642
643 size = gdb_insn_length (gdbarch, pc);
644
645 /* Make sure we terminate if we fail to compute the size. */
646 if (size <= 0)
647 {
648 warning (_("Recorded trace may be incomplete around %s."),
649 core_addr_to_string_nz (pc));
650 break;
651 }
652
653 pc += size;
8710b709
MM
654
655 /* Maintain the function level offset.
656 For the last block, we do it here to not consider the last
657 instruction.
658 Since the last instruction corresponds to the current instruction
659 and is not really part of the execution history, it shouldn't
660 affect the level. */
661 if (blk == 0)
662 level = min (level, end->level);
23a7fe75 663 }
02d27625
MM
664 }
665
23a7fe75
MM
666 btinfo->begin = begin;
667 btinfo->end = end;
668
669 /* LEVEL is the minimal function level of all btrace function segments.
670 Define the global level offset to -LEVEL so all function levels are
671 normalized to start at zero. */
672 btinfo->level = -level;
02d27625
MM
673}
674
734b0e4b
MM
675/* Compute the function branch trace from a block branch trace BTRACE for
676 a thread given by BTINFO. */
677
678static void
679btrace_compute_ftrace (struct btrace_thread_info *btinfo,
680 struct btrace_data *btrace)
681{
682 DEBUG ("compute ftrace");
683
684 switch (btrace->format)
685 {
686 case BTRACE_FORMAT_NONE:
687 return;
688
689 case BTRACE_FORMAT_BTS:
690 btrace_compute_ftrace_bts (btinfo, &btrace->variant.bts);
691 return;
692 }
693
694 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
695}
696
6e07b1d2
MM
697/* Add an entry for the current PC. */
698
699static void
700btrace_add_pc (struct thread_info *tp)
701{
734b0e4b 702 struct btrace_data btrace;
6e07b1d2
MM
703 struct btrace_block *block;
704 struct regcache *regcache;
705 struct cleanup *cleanup;
706 CORE_ADDR pc;
707
708 regcache = get_thread_regcache (tp->ptid);
709 pc = regcache_read_pc (regcache);
710
734b0e4b
MM
711 btrace_data_init (&btrace);
712 btrace.format = BTRACE_FORMAT_BTS;
713 btrace.variant.bts.blocks = NULL;
6e07b1d2 714
734b0e4b
MM
715 cleanup = make_cleanup_btrace_data (&btrace);
716
717 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
6e07b1d2
MM
718 block->begin = pc;
719 block->end = pc;
720
734b0e4b 721 btrace_compute_ftrace (&tp->btrace, &btrace);
6e07b1d2
MM
722
723 do_cleanups (cleanup);
724}
725
02d27625
MM
726/* See btrace.h. */
727
728void
f4abbc16 729btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
02d27625
MM
730{
731 if (tp->btrace.target != NULL)
732 return;
733
f4abbc16 734 if (!target_supports_btrace (conf->format))
02d27625
MM
735 error (_("Target does not support branch tracing."));
736
737 DEBUG ("enable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
738
f4abbc16 739 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
6e07b1d2
MM
740
741 /* Add an entry for the current PC so we start tracing from where we
742 enabled it. */
743 if (tp->btrace.target != NULL)
744 btrace_add_pc (tp);
02d27625
MM
745}
746
747/* See btrace.h. */
748
f4abbc16
MM
749const struct btrace_config *
750btrace_conf (const struct btrace_thread_info *btinfo)
751{
752 if (btinfo->target == NULL)
753 return NULL;
754
755 return target_btrace_conf (btinfo->target);
756}
757
758/* See btrace.h. */
759
02d27625
MM
760void
761btrace_disable (struct thread_info *tp)
762{
763 struct btrace_thread_info *btp = &tp->btrace;
764 int errcode = 0;
765
766 if (btp->target == NULL)
767 return;
768
769 DEBUG ("disable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
770
771 target_disable_btrace (btp->target);
772 btp->target = NULL;
773
774 btrace_clear (tp);
775}
776
777/* See btrace.h. */
778
779void
780btrace_teardown (struct thread_info *tp)
781{
782 struct btrace_thread_info *btp = &tp->btrace;
783 int errcode = 0;
784
785 if (btp->target == NULL)
786 return;
787
788 DEBUG ("teardown thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
789
790 target_teardown_btrace (btp->target);
791 btp->target = NULL;
792
793 btrace_clear (tp);
794}
795
734b0e4b 796/* Stitch branch trace in BTS format. */
969c39fb
MM
797
798static int
734b0e4b
MM
799btrace_stitch_bts (struct btrace_data_bts *btrace,
800 const struct btrace_thread_info *btinfo)
969c39fb
MM
801{
802 struct btrace_function *last_bfun;
803 struct btrace_insn *last_insn;
804 btrace_block_s *first_new_block;
805
969c39fb
MM
806 last_bfun = btinfo->end;
807 gdb_assert (last_bfun != NULL);
808
809 /* Beware that block trace starts with the most recent block, so the
810 chronologically first block in the new trace is the last block in
811 the new trace's block vector. */
734b0e4b
MM
812 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
813 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
969c39fb
MM
814 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
815
816 /* If the current PC at the end of the block is the same as in our current
817 trace, there are two explanations:
818 1. we executed the instruction and some branch brought us back.
819 2. we have not made any progress.
820 In the first case, the delta trace vector should contain at least two
821 entries.
822 In the second case, the delta trace vector should contain exactly one
823 entry for the partial block containing the current PC. Remove it. */
824 if (first_new_block->end == last_insn->pc
734b0e4b 825 && VEC_length (btrace_block_s, btrace->blocks) == 1)
969c39fb 826 {
734b0e4b 827 VEC_pop (btrace_block_s, btrace->blocks);
969c39fb
MM
828 return 0;
829 }
830
831 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
832 core_addr_to_string_nz (first_new_block->end));
833
834 /* Do a simple sanity check to make sure we don't accidentally end up
835 with a bad block. This should not occur in practice. */
836 if (first_new_block->end < last_insn->pc)
837 {
838 warning (_("Error while trying to read delta trace. Falling back to "
839 "a full read."));
840 return -1;
841 }
842
843 /* We adjust the last block to start at the end of our current trace. */
844 gdb_assert (first_new_block->begin == 0);
845 first_new_block->begin = last_insn->pc;
846
847 /* We simply pop the last insn so we can insert it again as part of
848 the normal branch trace computation.
849 Since instruction iterators are based on indices in the instructions
850 vector, we don't leave any pointers dangling. */
851 DEBUG ("pruning insn at %s for stitching",
852 ftrace_print_insn_addr (last_insn));
853
854 VEC_pop (btrace_insn_s, last_bfun->insn);
855
856 /* The instructions vector may become empty temporarily if this has
857 been the only instruction in this function segment.
858 This violates the invariant but will be remedied shortly by
859 btrace_compute_ftrace when we add the new trace. */
860 return 0;
861}
862
734b0e4b
MM
863/* Adjust the block trace in order to stitch old and new trace together.
864 BTRACE is the new delta trace between the last and the current stop.
865 BTINFO is the old branch trace until the last stop.
866 May modifx BTRACE as well as the existing trace in BTINFO.
867 Return 0 on success, -1 otherwise. */
868
869static int
870btrace_stitch_trace (struct btrace_data *btrace,
871 const struct btrace_thread_info *btinfo)
872{
873 /* If we don't have trace, there's nothing to do. */
874 if (btrace_data_empty (btrace))
875 return 0;
876
877 switch (btrace->format)
878 {
879 case BTRACE_FORMAT_NONE:
880 return 0;
881
882 case BTRACE_FORMAT_BTS:
883 return btrace_stitch_bts (&btrace->variant.bts, btinfo);
884 }
885
886 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
887}
888
969c39fb
MM
889/* Clear the branch trace histories in BTINFO. */
890
891static void
892btrace_clear_history (struct btrace_thread_info *btinfo)
893{
894 xfree (btinfo->insn_history);
895 xfree (btinfo->call_history);
896 xfree (btinfo->replay);
897
898 btinfo->insn_history = NULL;
899 btinfo->call_history = NULL;
900 btinfo->replay = NULL;
901}
902
02d27625
MM
903/* See btrace.h. */
904
905void
906btrace_fetch (struct thread_info *tp)
907{
908 struct btrace_thread_info *btinfo;
969c39fb 909 struct btrace_target_info *tinfo;
734b0e4b 910 struct btrace_data btrace;
23a7fe75 911 struct cleanup *cleanup;
969c39fb 912 int errcode;
02d27625
MM
913
914 DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
915
916 btinfo = &tp->btrace;
969c39fb
MM
917 tinfo = btinfo->target;
918 if (tinfo == NULL)
919 return;
920
921 /* There's no way we could get new trace while replaying.
922 On the other hand, delta trace would return a partial record with the
923 current PC, which is the replay PC, not the last PC, as expected. */
924 if (btinfo->replay != NULL)
02d27625
MM
925 return;
926
734b0e4b
MM
927 btrace_data_init (&btrace);
928 cleanup = make_cleanup_btrace_data (&btrace);
02d27625 929
969c39fb
MM
930 /* Let's first try to extend the trace we already have. */
931 if (btinfo->end != NULL)
932 {
933 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
934 if (errcode == 0)
935 {
936 /* Success. Let's try to stitch the traces together. */
937 errcode = btrace_stitch_trace (&btrace, btinfo);
938 }
939 else
940 {
941 /* We failed to read delta trace. Let's try to read new trace. */
942 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
943
944 /* If we got any new trace, discard what we have. */
734b0e4b 945 if (errcode == 0 && !btrace_data_empty (&btrace))
969c39fb
MM
946 btrace_clear (tp);
947 }
948
949 /* If we were not able to read the trace, we start over. */
950 if (errcode != 0)
951 {
952 btrace_clear (tp);
953 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
954 }
955 }
956 else
957 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
958
959 /* If we were not able to read the branch trace, signal an error. */
960 if (errcode != 0)
961 error (_("Failed to read branch trace."));
962
963 /* Compute the trace, provided we have any. */
734b0e4b 964 if (!btrace_data_empty (&btrace))
23a7fe75 965 {
969c39fb 966 btrace_clear_history (btinfo);
734b0e4b 967 btrace_compute_ftrace (btinfo, &btrace);
23a7fe75 968 }
02d27625 969
23a7fe75 970 do_cleanups (cleanup);
02d27625
MM
971}
972
973/* See btrace.h. */
974
975void
976btrace_clear (struct thread_info *tp)
977{
978 struct btrace_thread_info *btinfo;
23a7fe75 979 struct btrace_function *it, *trash;
02d27625
MM
980
981 DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
982
0b722aec
MM
983 /* Make sure btrace frames that may hold a pointer into the branch
984 trace data are destroyed. */
985 reinit_frame_cache ();
986
02d27625
MM
987 btinfo = &tp->btrace;
988
23a7fe75
MM
989 it = btinfo->begin;
990 while (it != NULL)
991 {
992 trash = it;
993 it = it->flow.next;
02d27625 994
23a7fe75
MM
995 xfree (trash);
996 }
997
998 btinfo->begin = NULL;
999 btinfo->end = NULL;
1000
969c39fb 1001 btrace_clear_history (btinfo);
02d27625
MM
1002}
1003
1004/* See btrace.h. */
1005
1006void
1007btrace_free_objfile (struct objfile *objfile)
1008{
1009 struct thread_info *tp;
1010
1011 DEBUG ("free objfile");
1012
034f788c 1013 ALL_NON_EXITED_THREADS (tp)
02d27625
MM
1014 btrace_clear (tp);
1015}
c12a2917
MM
1016
1017#if defined (HAVE_LIBEXPAT)
1018
1019/* Check the btrace document version. */
1020
1021static void
1022check_xml_btrace_version (struct gdb_xml_parser *parser,
1023 const struct gdb_xml_element *element,
1024 void *user_data, VEC (gdb_xml_value_s) *attributes)
1025{
1026 const char *version = xml_find_attribute (attributes, "version")->value;
1027
1028 if (strcmp (version, "1.0") != 0)
1029 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1030}
1031
1032/* Parse a btrace "block" xml record. */
1033
1034static void
1035parse_xml_btrace_block (struct gdb_xml_parser *parser,
1036 const struct gdb_xml_element *element,
1037 void *user_data, VEC (gdb_xml_value_s) *attributes)
1038{
734b0e4b 1039 struct btrace_data *btrace;
c12a2917
MM
1040 struct btrace_block *block;
1041 ULONGEST *begin, *end;
1042
1043 btrace = user_data;
734b0e4b
MM
1044
1045 switch (btrace->format)
1046 {
1047 case BTRACE_FORMAT_BTS:
1048 break;
1049
1050 case BTRACE_FORMAT_NONE:
1051 btrace->format = BTRACE_FORMAT_BTS;
1052 btrace->variant.bts.blocks = NULL;
1053 break;
1054
1055 default:
1056 gdb_xml_error (parser, _("Btrace format error."));
1057 }
c12a2917
MM
1058
1059 begin = xml_find_attribute (attributes, "begin")->value;
1060 end = xml_find_attribute (attributes, "end")->value;
1061
734b0e4b 1062 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
c12a2917
MM
1063 block->begin = *begin;
1064 block->end = *end;
1065}
1066
1067static const struct gdb_xml_attribute block_attributes[] = {
1068 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1069 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1070 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1071};
1072
1073static const struct gdb_xml_attribute btrace_attributes[] = {
1074 { "version", GDB_XML_AF_NONE, NULL, NULL },
1075 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1076};
1077
1078static const struct gdb_xml_element btrace_children[] = {
1079 { "block", block_attributes, NULL,
1080 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
1081 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1082};
1083
1084static const struct gdb_xml_element btrace_elements[] = {
1085 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
1086 check_xml_btrace_version, NULL },
1087 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1088};
1089
1090#endif /* defined (HAVE_LIBEXPAT) */
1091
1092/* See btrace.h. */
1093
734b0e4b
MM
1094void
1095parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
c12a2917 1096{
c12a2917
MM
1097 struct cleanup *cleanup;
1098 int errcode;
1099
1100#if defined (HAVE_LIBEXPAT)
1101
734b0e4b
MM
1102 btrace->format = BTRACE_FORMAT_NONE;
1103
1104 cleanup = make_cleanup_btrace_data (btrace);
c12a2917 1105 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
734b0e4b 1106 buffer, btrace);
c12a2917 1107 if (errcode != 0)
969c39fb 1108 error (_("Error parsing branch trace."));
c12a2917
MM
1109
1110 /* Keep parse results. */
1111 discard_cleanups (cleanup);
1112
1113#else /* !defined (HAVE_LIBEXPAT) */
1114
1115 error (_("Cannot process branch trace. XML parsing is not supported."));
1116
1117#endif /* !defined (HAVE_LIBEXPAT) */
c12a2917 1118}
23a7fe75 1119
f4abbc16
MM
1120#if defined (HAVE_LIBEXPAT)
1121
1122/* Parse a btrace-conf "bts" xml record. */
1123
1124static void
1125parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
1126 const struct gdb_xml_element *element,
1127 void *user_data, VEC (gdb_xml_value_s) *attributes)
1128{
1129 struct btrace_config *conf;
1130
1131 conf = user_data;
1132 conf->format = BTRACE_FORMAT_BTS;
1133}
1134
1135static const struct gdb_xml_element btrace_conf_children[] = {
1136 { "bts", NULL, NULL, GDB_XML_EF_OPTIONAL, parse_xml_btrace_conf_bts, NULL },
1137 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1138};
1139
1140static const struct gdb_xml_attribute btrace_conf_attributes[] = {
1141 { "version", GDB_XML_AF_NONE, NULL, NULL },
1142 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1143};
1144
1145static const struct gdb_xml_element btrace_conf_elements[] = {
1146 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
1147 GDB_XML_EF_NONE, NULL, NULL },
1148 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1149};
1150
1151#endif /* defined (HAVE_LIBEXPAT) */
1152
1153/* See btrace.h. */
1154
1155void
1156parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
1157{
1158 int errcode;
1159
1160#if defined (HAVE_LIBEXPAT)
1161
1162 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
1163 btrace_conf_elements, xml, conf);
1164 if (errcode != 0)
1165 error (_("Error parsing branch trace configuration."));
1166
1167#else /* !defined (HAVE_LIBEXPAT) */
1168
1169 error (_("XML parsing is not supported."));
1170
1171#endif /* !defined (HAVE_LIBEXPAT) */
1172}
1173
23a7fe75
MM
1174/* See btrace.h. */
1175
1176const struct btrace_insn *
1177btrace_insn_get (const struct btrace_insn_iterator *it)
1178{
1179 const struct btrace_function *bfun;
1180 unsigned int index, end;
1181
1182 index = it->index;
1183 bfun = it->function;
1184
1185 /* The index is within the bounds of this function's instruction vector. */
1186 end = VEC_length (btrace_insn_s, bfun->insn);
1187 gdb_assert (0 < end);
1188 gdb_assert (index < end);
1189
1190 return VEC_index (btrace_insn_s, bfun->insn, index);
1191}
1192
1193/* See btrace.h. */
1194
1195unsigned int
1196btrace_insn_number (const struct btrace_insn_iterator *it)
1197{
1198 const struct btrace_function *bfun;
1199
1200 bfun = it->function;
1201 return bfun->insn_offset + it->index;
1202}
1203
1204/* See btrace.h. */
1205
1206void
1207btrace_insn_begin (struct btrace_insn_iterator *it,
1208 const struct btrace_thread_info *btinfo)
1209{
1210 const struct btrace_function *bfun;
1211
1212 bfun = btinfo->begin;
1213 if (bfun == NULL)
1214 error (_("No trace."));
1215
1216 it->function = bfun;
1217 it->index = 0;
1218}
1219
1220/* See btrace.h. */
1221
1222void
1223btrace_insn_end (struct btrace_insn_iterator *it,
1224 const struct btrace_thread_info *btinfo)
1225{
1226 const struct btrace_function *bfun;
1227 unsigned int length;
1228
1229 bfun = btinfo->end;
1230 if (bfun == NULL)
1231 error (_("No trace."));
1232
1233 /* The last instruction in the last function is the current instruction.
1234 We point to it - it is one past the end of the execution trace. */
1235 length = VEC_length (btrace_insn_s, bfun->insn);
1236
1237 it->function = bfun;
1238 it->index = length - 1;
1239}
1240
1241/* See btrace.h. */
1242
1243unsigned int
1244btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
1245{
1246 const struct btrace_function *bfun;
1247 unsigned int index, steps;
1248
1249 bfun = it->function;
1250 steps = 0;
1251 index = it->index;
1252
1253 while (stride != 0)
1254 {
1255 unsigned int end, space, adv;
1256
1257 end = VEC_length (btrace_insn_s, bfun->insn);
1258
1259 gdb_assert (0 < end);
1260 gdb_assert (index < end);
1261
1262 /* Compute the number of instructions remaining in this segment. */
1263 space = end - index;
1264
1265 /* Advance the iterator as far as possible within this segment. */
1266 adv = min (space, stride);
1267 stride -= adv;
1268 index += adv;
1269 steps += adv;
1270
1271 /* Move to the next function if we're at the end of this one. */
1272 if (index == end)
1273 {
1274 const struct btrace_function *next;
1275
1276 next = bfun->flow.next;
1277 if (next == NULL)
1278 {
1279 /* We stepped past the last function.
1280
1281 Let's adjust the index to point to the last instruction in
1282 the previous function. */
1283 index -= 1;
1284 steps -= 1;
1285 break;
1286 }
1287
1288 /* We now point to the first instruction in the new function. */
1289 bfun = next;
1290 index = 0;
1291 }
1292
1293 /* We did make progress. */
1294 gdb_assert (adv > 0);
1295 }
1296
1297 /* Update the iterator. */
1298 it->function = bfun;
1299 it->index = index;
1300
1301 return steps;
1302}
1303
1304/* See btrace.h. */
1305
1306unsigned int
1307btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1308{
1309 const struct btrace_function *bfun;
1310 unsigned int index, steps;
1311
1312 bfun = it->function;
1313 steps = 0;
1314 index = it->index;
1315
1316 while (stride != 0)
1317 {
1318 unsigned int adv;
1319
1320 /* Move to the previous function if we're at the start of this one. */
1321 if (index == 0)
1322 {
1323 const struct btrace_function *prev;
1324
1325 prev = bfun->flow.prev;
1326 if (prev == NULL)
1327 break;
1328
1329 /* We point to one after the last instruction in the new function. */
1330 bfun = prev;
1331 index = VEC_length (btrace_insn_s, bfun->insn);
1332
1333 /* There is at least one instruction in this function segment. */
1334 gdb_assert (index > 0);
1335 }
1336
1337 /* Advance the iterator as far as possible within this segment. */
1338 adv = min (index, stride);
1339 stride -= adv;
1340 index -= adv;
1341 steps += adv;
1342
1343 /* We did make progress. */
1344 gdb_assert (adv > 0);
1345 }
1346
1347 /* Update the iterator. */
1348 it->function = bfun;
1349 it->index = index;
1350
1351 return steps;
1352}
1353
1354/* See btrace.h. */
1355
1356int
1357btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
1358 const struct btrace_insn_iterator *rhs)
1359{
1360 unsigned int lnum, rnum;
1361
1362 lnum = btrace_insn_number (lhs);
1363 rnum = btrace_insn_number (rhs);
1364
1365 return (int) (lnum - rnum);
1366}
1367
1368/* See btrace.h. */
1369
1370int
1371btrace_find_insn_by_number (struct btrace_insn_iterator *it,
1372 const struct btrace_thread_info *btinfo,
1373 unsigned int number)
1374{
1375 const struct btrace_function *bfun;
1376 unsigned int end;
1377
1378 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1379 if (bfun->insn_offset <= number)
1380 break;
1381
1382 if (bfun == NULL)
1383 return 0;
1384
1385 end = bfun->insn_offset + VEC_length (btrace_insn_s, bfun->insn);
1386 if (end <= number)
1387 return 0;
1388
1389 it->function = bfun;
1390 it->index = number - bfun->insn_offset;
1391
1392 return 1;
1393}
1394
1395/* See btrace.h. */
1396
1397const struct btrace_function *
1398btrace_call_get (const struct btrace_call_iterator *it)
1399{
1400 return it->function;
1401}
1402
1403/* See btrace.h. */
1404
1405unsigned int
1406btrace_call_number (const struct btrace_call_iterator *it)
1407{
1408 const struct btrace_thread_info *btinfo;
1409 const struct btrace_function *bfun;
1410 unsigned int insns;
1411
1412 btinfo = it->btinfo;
1413 bfun = it->function;
1414 if (bfun != NULL)
1415 return bfun->number;
1416
1417 /* For the end iterator, i.e. bfun == NULL, we return one more than the
1418 number of the last function. */
1419 bfun = btinfo->end;
1420 insns = VEC_length (btrace_insn_s, bfun->insn);
1421
1422 /* If the function contains only a single instruction (i.e. the current
1423 instruction), it will be skipped and its number is already the number
1424 we seek. */
1425 if (insns == 1)
1426 return bfun->number;
1427
1428 /* Otherwise, return one more than the number of the last function. */
1429 return bfun->number + 1;
1430}
1431
1432/* See btrace.h. */
1433
1434void
1435btrace_call_begin (struct btrace_call_iterator *it,
1436 const struct btrace_thread_info *btinfo)
1437{
1438 const struct btrace_function *bfun;
1439
1440 bfun = btinfo->begin;
1441 if (bfun == NULL)
1442 error (_("No trace."));
1443
1444 it->btinfo = btinfo;
1445 it->function = bfun;
1446}
1447
1448/* See btrace.h. */
1449
1450void
1451btrace_call_end (struct btrace_call_iterator *it,
1452 const struct btrace_thread_info *btinfo)
1453{
1454 const struct btrace_function *bfun;
1455
1456 bfun = btinfo->end;
1457 if (bfun == NULL)
1458 error (_("No trace."));
1459
1460 it->btinfo = btinfo;
1461 it->function = NULL;
1462}
1463
1464/* See btrace.h. */
1465
1466unsigned int
1467btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
1468{
1469 const struct btrace_function *bfun;
1470 unsigned int steps;
1471
1472 bfun = it->function;
1473 steps = 0;
1474 while (bfun != NULL)
1475 {
1476 const struct btrace_function *next;
1477 unsigned int insns;
1478
1479 next = bfun->flow.next;
1480 if (next == NULL)
1481 {
1482 /* Ignore the last function if it only contains a single
1483 (i.e. the current) instruction. */
1484 insns = VEC_length (btrace_insn_s, bfun->insn);
1485 if (insns == 1)
1486 steps -= 1;
1487 }
1488
1489 if (stride == steps)
1490 break;
1491
1492 bfun = next;
1493 steps += 1;
1494 }
1495
1496 it->function = bfun;
1497 return steps;
1498}
1499
1500/* See btrace.h. */
1501
1502unsigned int
1503btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
1504{
1505 const struct btrace_thread_info *btinfo;
1506 const struct btrace_function *bfun;
1507 unsigned int steps;
1508
1509 bfun = it->function;
1510 steps = 0;
1511
1512 if (bfun == NULL)
1513 {
1514 unsigned int insns;
1515
1516 btinfo = it->btinfo;
1517 bfun = btinfo->end;
1518 if (bfun == NULL)
1519 return 0;
1520
1521 /* Ignore the last function if it only contains a single
1522 (i.e. the current) instruction. */
1523 insns = VEC_length (btrace_insn_s, bfun->insn);
1524 if (insns == 1)
1525 bfun = bfun->flow.prev;
1526
1527 if (bfun == NULL)
1528 return 0;
1529
1530 steps += 1;
1531 }
1532
1533 while (steps < stride)
1534 {
1535 const struct btrace_function *prev;
1536
1537 prev = bfun->flow.prev;
1538 if (prev == NULL)
1539 break;
1540
1541 bfun = prev;
1542 steps += 1;
1543 }
1544
1545 it->function = bfun;
1546 return steps;
1547}
1548
1549/* See btrace.h. */
1550
1551int
1552btrace_call_cmp (const struct btrace_call_iterator *lhs,
1553 const struct btrace_call_iterator *rhs)
1554{
1555 unsigned int lnum, rnum;
1556
1557 lnum = btrace_call_number (lhs);
1558 rnum = btrace_call_number (rhs);
1559
1560 return (int) (lnum - rnum);
1561}
1562
1563/* See btrace.h. */
1564
1565int
1566btrace_find_call_by_number (struct btrace_call_iterator *it,
1567 const struct btrace_thread_info *btinfo,
1568 unsigned int number)
1569{
1570 const struct btrace_function *bfun;
1571
1572 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1573 {
1574 unsigned int bnum;
1575
1576 bnum = bfun->number;
1577 if (number == bnum)
1578 {
1579 it->btinfo = btinfo;
1580 it->function = bfun;
1581 return 1;
1582 }
1583
1584 /* Functions are ordered and numbered consecutively. We could bail out
1585 earlier. On the other hand, it is very unlikely that we search for
1586 a nonexistent function. */
1587 }
1588
1589 return 0;
1590}
1591
1592/* See btrace.h. */
1593
1594void
1595btrace_set_insn_history (struct btrace_thread_info *btinfo,
1596 const struct btrace_insn_iterator *begin,
1597 const struct btrace_insn_iterator *end)
1598{
1599 if (btinfo->insn_history == NULL)
1600 btinfo->insn_history = xzalloc (sizeof (*btinfo->insn_history));
1601
1602 btinfo->insn_history->begin = *begin;
1603 btinfo->insn_history->end = *end;
1604}
1605
1606/* See btrace.h. */
1607
1608void
1609btrace_set_call_history (struct btrace_thread_info *btinfo,
1610 const struct btrace_call_iterator *begin,
1611 const struct btrace_call_iterator *end)
1612{
1613 gdb_assert (begin->btinfo == end->btinfo);
1614
1615 if (btinfo->call_history == NULL)
1616 btinfo->call_history = xzalloc (sizeof (*btinfo->call_history));
1617
1618 btinfo->call_history->begin = *begin;
1619 btinfo->call_history->end = *end;
1620}
07bbe694
MM
1621
1622/* See btrace.h. */
1623
1624int
1625btrace_is_replaying (struct thread_info *tp)
1626{
1627 return tp->btrace.replay != NULL;
1628}
6e07b1d2
MM
1629
1630/* See btrace.h. */
1631
1632int
1633btrace_is_empty (struct thread_info *tp)
1634{
1635 struct btrace_insn_iterator begin, end;
1636 struct btrace_thread_info *btinfo;
1637
1638 btinfo = &tp->btrace;
1639
1640 if (btinfo->begin == NULL)
1641 return 1;
1642
1643 btrace_insn_begin (&begin, btinfo);
1644 btrace_insn_end (&end, btinfo);
1645
1646 return btrace_insn_cmp (&begin, &end) == 0;
1647}
734b0e4b
MM
1648
1649/* Forward the cleanup request. */
1650
1651static void
1652do_btrace_data_cleanup (void *arg)
1653{
1654 btrace_data_fini (arg);
1655}
1656
1657/* See btrace.h. */
1658
1659struct cleanup *
1660make_cleanup_btrace_data (struct btrace_data *data)
1661{
1662 return make_cleanup (do_btrace_data_cleanup, data);
1663}
This page took 0.282618 seconds and 4 git commands to generate.