btrace: compute line range when printing
[deliverable/binutils-gdb.git] / gdb / btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "btrace.h"
24 #include "gdbthread.h"
25 #include "inferior.h"
26 #include "target.h"
27 #include "record.h"
28 #include "symtab.h"
29 #include "disasm.h"
30 #include "source.h"
31 #include "filenames.h"
32 #include "xml-support.h"
33 #include "regcache.h"
34
35 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
36 when used in if statements. */
37
38 #define DEBUG(msg, args...) \
39 do \
40 { \
41 if (record_debug != 0) \
42 fprintf_unfiltered (gdb_stdlog, \
43 "[btrace] " msg "\n", ##args); \
44 } \
45 while (0)
46
47 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
48
49 /* Return the function name of a recorded function segment for printing.
50 This function never returns NULL. */
51
52 static const char *
53 ftrace_print_function_name (const struct btrace_function *bfun)
54 {
55 struct minimal_symbol *msym;
56 struct symbol *sym;
57
58 msym = bfun->msym;
59 sym = bfun->sym;
60
61 if (sym != NULL)
62 return SYMBOL_PRINT_NAME (sym);
63
64 if (msym != NULL)
65 return MSYMBOL_PRINT_NAME (msym);
66
67 return "<unknown>";
68 }
69
70 /* Return the file name of a recorded function segment for printing.
71 This function never returns NULL. */
72
73 static const char *
74 ftrace_print_filename (const struct btrace_function *bfun)
75 {
76 struct symbol *sym;
77 const char *filename;
78
79 sym = bfun->sym;
80
81 if (sym != NULL)
82 filename = symtab_to_filename_for_display (symbol_symtab (sym));
83 else
84 filename = "<unknown>";
85
86 return filename;
87 }
88
89 /* Return a string representation of the address of an instruction.
90 This function never returns NULL. */
91
92 static const char *
93 ftrace_print_insn_addr (const struct btrace_insn *insn)
94 {
95 if (insn == NULL)
96 return "<nil>";
97
98 return core_addr_to_string_nz (insn->pc);
99 }
100
101 /* Print an ftrace debug status message. */
102
103 static void
104 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
105 {
106 const char *fun, *file;
107 unsigned int ibegin, iend;
108 int level;
109
110 fun = ftrace_print_function_name (bfun);
111 file = ftrace_print_filename (bfun);
112 level = bfun->level;
113
114 ibegin = bfun->insn_offset;
115 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
116
117 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
118 prefix, fun, file, level, ibegin, iend);
119 }
120
121 /* Return non-zero if BFUN does not match MFUN and FUN,
122 return zero otherwise. */
123
124 static int
125 ftrace_function_switched (const struct btrace_function *bfun,
126 const struct minimal_symbol *mfun,
127 const struct symbol *fun)
128 {
129 struct minimal_symbol *msym;
130 struct symbol *sym;
131
132 msym = bfun->msym;
133 sym = bfun->sym;
134
135 /* If the minimal symbol changed, we certainly switched functions. */
136 if (mfun != NULL && msym != NULL
137 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
138 return 1;
139
140 /* If the symbol changed, we certainly switched functions. */
141 if (fun != NULL && sym != NULL)
142 {
143 const char *bfname, *fname;
144
145 /* Check the function name. */
146 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
147 return 1;
148
149 /* Check the location of those functions, as well. */
150 bfname = symtab_to_fullname (symbol_symtab (sym));
151 fname = symtab_to_fullname (symbol_symtab (fun));
152 if (filename_cmp (fname, bfname) != 0)
153 return 1;
154 }
155
156 /* If we lost symbol information, we switched functions. */
157 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
158 return 1;
159
160 /* If we gained symbol information, we switched functions. */
161 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
162 return 1;
163
164 return 0;
165 }
166
167 /* Allocate and initialize a new branch trace function segment.
168 PREV is the chronologically preceding function segment.
169 MFUN and FUN are the symbol information we have for this function. */
170
171 static struct btrace_function *
172 ftrace_new_function (struct btrace_function *prev,
173 struct minimal_symbol *mfun,
174 struct symbol *fun)
175 {
176 struct btrace_function *bfun;
177
178 bfun = xzalloc (sizeof (*bfun));
179
180 bfun->msym = mfun;
181 bfun->sym = fun;
182 bfun->flow.prev = prev;
183
184 if (prev == NULL)
185 {
186 /* Start counting at one. */
187 bfun->number = 1;
188 bfun->insn_offset = 1;
189 }
190 else
191 {
192 gdb_assert (prev->flow.next == NULL);
193 prev->flow.next = bfun;
194
195 bfun->number = prev->number + 1;
196 bfun->insn_offset = (prev->insn_offset
197 + VEC_length (btrace_insn_s, prev->insn));
198 bfun->level = prev->level;
199 }
200
201 return bfun;
202 }
203
204 /* Update the UP field of a function segment. */
205
206 static void
207 ftrace_update_caller (struct btrace_function *bfun,
208 struct btrace_function *caller,
209 enum btrace_function_flag flags)
210 {
211 if (bfun->up != NULL)
212 ftrace_debug (bfun, "updating caller");
213
214 bfun->up = caller;
215 bfun->flags = flags;
216
217 ftrace_debug (bfun, "set caller");
218 }
219
220 /* Fix up the caller for all segments of a function. */
221
222 static void
223 ftrace_fixup_caller (struct btrace_function *bfun,
224 struct btrace_function *caller,
225 enum btrace_function_flag flags)
226 {
227 struct btrace_function *prev, *next;
228
229 ftrace_update_caller (bfun, caller, flags);
230
231 /* Update all function segments belonging to the same function. */
232 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
233 ftrace_update_caller (prev, caller, flags);
234
235 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
236 ftrace_update_caller (next, caller, flags);
237 }
238
239 /* Add a new function segment for a call.
240 CALLER is the chronologically preceding function segment.
241 MFUN and FUN are the symbol information we have for this function. */
242
243 static struct btrace_function *
244 ftrace_new_call (struct btrace_function *caller,
245 struct minimal_symbol *mfun,
246 struct symbol *fun)
247 {
248 struct btrace_function *bfun;
249
250 bfun = ftrace_new_function (caller, mfun, fun);
251 bfun->up = caller;
252 bfun->level += 1;
253
254 ftrace_debug (bfun, "new call");
255
256 return bfun;
257 }
258
259 /* Add a new function segment for a tail call.
260 CALLER is the chronologically preceding function segment.
261 MFUN and FUN are the symbol information we have for this function. */
262
263 static struct btrace_function *
264 ftrace_new_tailcall (struct btrace_function *caller,
265 struct minimal_symbol *mfun,
266 struct symbol *fun)
267 {
268 struct btrace_function *bfun;
269
270 bfun = ftrace_new_function (caller, mfun, fun);
271 bfun->up = caller;
272 bfun->level += 1;
273 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
274
275 ftrace_debug (bfun, "new tail call");
276
277 return bfun;
278 }
279
280 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
281 symbol information. */
282
283 static struct btrace_function *
284 ftrace_find_caller (struct btrace_function *bfun,
285 struct minimal_symbol *mfun,
286 struct symbol *fun)
287 {
288 for (; bfun != NULL; bfun = bfun->up)
289 {
290 /* Skip functions with incompatible symbol information. */
291 if (ftrace_function_switched (bfun, mfun, fun))
292 continue;
293
294 /* This is the function segment we're looking for. */
295 break;
296 }
297
298 return bfun;
299 }
300
301 /* Find the innermost caller in the back trace of BFUN, skipping all
302 function segments that do not end with a call instruction (e.g.
303 tail calls ending with a jump). */
304
305 static struct btrace_function *
306 ftrace_find_call (struct btrace_function *bfun)
307 {
308 for (; bfun != NULL; bfun = bfun->up)
309 {
310 struct btrace_insn *last;
311
312 /* Skip gaps. */
313 if (bfun->errcode != 0)
314 continue;
315
316 last = VEC_last (btrace_insn_s, bfun->insn);
317
318 if (last->iclass == BTRACE_INSN_CALL)
319 break;
320 }
321
322 return bfun;
323 }
324
325 /* Add a continuation segment for a function into which we return.
326 PREV is the chronologically preceding function segment.
327 MFUN and FUN are the symbol information we have for this function. */
328
329 static struct btrace_function *
330 ftrace_new_return (struct btrace_function *prev,
331 struct minimal_symbol *mfun,
332 struct symbol *fun)
333 {
334 struct btrace_function *bfun, *caller;
335
336 bfun = ftrace_new_function (prev, mfun, fun);
337
338 /* It is important to start at PREV's caller. Otherwise, we might find
339 PREV itself, if PREV is a recursive function. */
340 caller = ftrace_find_caller (prev->up, mfun, fun);
341 if (caller != NULL)
342 {
343 /* The caller of PREV is the preceding btrace function segment in this
344 function instance. */
345 gdb_assert (caller->segment.next == NULL);
346
347 caller->segment.next = bfun;
348 bfun->segment.prev = caller;
349
350 /* Maintain the function level. */
351 bfun->level = caller->level;
352
353 /* Maintain the call stack. */
354 bfun->up = caller->up;
355 bfun->flags = caller->flags;
356
357 ftrace_debug (bfun, "new return");
358 }
359 else
360 {
361 /* We did not find a caller. This could mean that something went
362 wrong or that the call is simply not included in the trace. */
363
364 /* Let's search for some actual call. */
365 caller = ftrace_find_call (prev->up);
366 if (caller == NULL)
367 {
368 /* There is no call in PREV's back trace. We assume that the
369 branch trace did not include it. */
370
371 /* Let's find the topmost call function - this skips tail calls. */
372 while (prev->up != NULL)
373 prev = prev->up;
374
375 /* We maintain levels for a series of returns for which we have
376 not seen the calls.
377 We start at the preceding function's level in case this has
378 already been a return for which we have not seen the call.
379 We start at level 0 otherwise, to handle tail calls correctly. */
380 bfun->level = min (0, prev->level) - 1;
381
382 /* Fix up the call stack for PREV. */
383 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
384
385 ftrace_debug (bfun, "new return - no caller");
386 }
387 else
388 {
389 /* There is a call in PREV's back trace to which we should have
390 returned. Let's remain at this level. */
391 bfun->level = prev->level;
392
393 ftrace_debug (bfun, "new return - unknown caller");
394 }
395 }
396
397 return bfun;
398 }
399
400 /* Add a new function segment for a function switch.
401 PREV is the chronologically preceding function segment.
402 MFUN and FUN are the symbol information we have for this function. */
403
404 static struct btrace_function *
405 ftrace_new_switch (struct btrace_function *prev,
406 struct minimal_symbol *mfun,
407 struct symbol *fun)
408 {
409 struct btrace_function *bfun;
410
411 /* This is an unexplained function switch. The call stack will likely
412 be wrong at this point. */
413 bfun = ftrace_new_function (prev, mfun, fun);
414
415 ftrace_debug (bfun, "new switch");
416
417 return bfun;
418 }
419
420 /* Add a new function segment for a gap in the trace due to a decode error.
421 PREV is the chronologically preceding function segment.
422 ERRCODE is the format-specific error code. */
423
424 static struct btrace_function *
425 ftrace_new_gap (struct btrace_function *prev, int errcode)
426 {
427 struct btrace_function *bfun;
428
429 /* We hijack prev if it was empty. */
430 if (prev != NULL && prev->errcode == 0
431 && VEC_empty (btrace_insn_s, prev->insn))
432 bfun = prev;
433 else
434 bfun = ftrace_new_function (prev, NULL, NULL);
435
436 bfun->errcode = errcode;
437
438 ftrace_debug (bfun, "new gap");
439
440 return bfun;
441 }
442
443 /* Update BFUN with respect to the instruction at PC. This may create new
444 function segments.
445 Return the chronologically latest function segment, never NULL. */
446
447 static struct btrace_function *
448 ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
449 {
450 struct bound_minimal_symbol bmfun;
451 struct minimal_symbol *mfun;
452 struct symbol *fun;
453 struct btrace_insn *last;
454
455 /* Try to determine the function we're in. We use both types of symbols
456 to avoid surprises when we sometimes get a full symbol and sometimes
457 only a minimal symbol. */
458 fun = find_pc_function (pc);
459 bmfun = lookup_minimal_symbol_by_pc (pc);
460 mfun = bmfun.minsym;
461
462 if (fun == NULL && mfun == NULL)
463 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
464
465 /* If we didn't have a function or if we had a gap before, we create one. */
466 if (bfun == NULL || bfun->errcode != 0)
467 return ftrace_new_function (bfun, mfun, fun);
468
469 /* Check the last instruction, if we have one.
470 We do this check first, since it allows us to fill in the call stack
471 links in addition to the normal flow links. */
472 last = NULL;
473 if (!VEC_empty (btrace_insn_s, bfun->insn))
474 last = VEC_last (btrace_insn_s, bfun->insn);
475
476 if (last != NULL)
477 {
478 switch (last->iclass)
479 {
480 case BTRACE_INSN_RETURN:
481 return ftrace_new_return (bfun, mfun, fun);
482
483 case BTRACE_INSN_CALL:
484 /* Ignore calls to the next instruction. They are used for PIC. */
485 if (last->pc + last->size == pc)
486 break;
487
488 return ftrace_new_call (bfun, mfun, fun);
489
490 case BTRACE_INSN_JUMP:
491 {
492 CORE_ADDR start;
493
494 start = get_pc_function_start (pc);
495
496 /* If we can't determine the function for PC, we treat a jump at
497 the end of the block as tail call. */
498 if (start == 0 || start == pc)
499 return ftrace_new_tailcall (bfun, mfun, fun);
500 }
501 }
502 }
503
504 /* Check if we're switching functions for some other reason. */
505 if (ftrace_function_switched (bfun, mfun, fun))
506 {
507 DEBUG_FTRACE ("switching from %s in %s at %s",
508 ftrace_print_insn_addr (last),
509 ftrace_print_function_name (bfun),
510 ftrace_print_filename (bfun));
511
512 return ftrace_new_switch (bfun, mfun, fun);
513 }
514
515 return bfun;
516 }
517
518 /* Add the instruction at PC to BFUN's instructions. */
519
520 static void
521 ftrace_update_insns (struct btrace_function *bfun,
522 const struct btrace_insn *insn)
523 {
524 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
525
526 if (record_debug > 1)
527 ftrace_debug (bfun, "update insn");
528 }
529
530 /* Classify the instruction at PC. */
531
532 static enum btrace_insn_class
533 ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
534 {
535 volatile struct gdb_exception error;
536 enum btrace_insn_class iclass;
537
538 iclass = BTRACE_INSN_OTHER;
539 TRY_CATCH (error, RETURN_MASK_ERROR)
540 {
541 if (gdbarch_insn_is_call (gdbarch, pc))
542 iclass = BTRACE_INSN_CALL;
543 else if (gdbarch_insn_is_ret (gdbarch, pc))
544 iclass = BTRACE_INSN_RETURN;
545 else if (gdbarch_insn_is_jump (gdbarch, pc))
546 iclass = BTRACE_INSN_JUMP;
547 }
548
549 return iclass;
550 }
551
552 /* Compute the function branch trace from BTS trace. */
553
554 static void
555 btrace_compute_ftrace_bts (struct thread_info *tp,
556 const struct btrace_data_bts *btrace)
557 {
558 struct btrace_thread_info *btinfo;
559 struct btrace_function *begin, *end;
560 struct gdbarch *gdbarch;
561 unsigned int blk, ngaps;
562 int level;
563
564 gdbarch = target_gdbarch ();
565 btinfo = &tp->btrace;
566 begin = btinfo->begin;
567 end = btinfo->end;
568 ngaps = btinfo->ngaps;
569 level = begin != NULL ? -btinfo->level : INT_MAX;
570 blk = VEC_length (btrace_block_s, btrace->blocks);
571
572 while (blk != 0)
573 {
574 btrace_block_s *block;
575 CORE_ADDR pc;
576
577 blk -= 1;
578
579 block = VEC_index (btrace_block_s, btrace->blocks, blk);
580 pc = block->begin;
581
582 for (;;)
583 {
584 volatile struct gdb_exception error;
585 struct btrace_insn insn;
586 int size;
587
588 /* We should hit the end of the block. Warn if we went too far. */
589 if (block->end < pc)
590 {
591 /* Indicate the gap in the trace - unless we're at the
592 beginning. */
593 if (begin != NULL)
594 {
595 warning (_("Recorded trace may be corrupted around %s."),
596 core_addr_to_string_nz (pc));
597
598 end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
599 ngaps += 1;
600 }
601 break;
602 }
603
604 end = ftrace_update_function (end, pc);
605 if (begin == NULL)
606 begin = end;
607
608 /* Maintain the function level offset.
609 For all but the last block, we do it here. */
610 if (blk != 0)
611 level = min (level, end->level);
612
613 size = 0;
614 TRY_CATCH (error, RETURN_MASK_ERROR)
615 size = gdb_insn_length (gdbarch, pc);
616
617 insn.pc = pc;
618 insn.size = size;
619 insn.iclass = ftrace_classify_insn (gdbarch, pc);
620
621 ftrace_update_insns (end, &insn);
622
623 /* We're done once we pushed the instruction at the end. */
624 if (block->end == pc)
625 break;
626
627 /* We can't continue if we fail to compute the size. */
628 if (size <= 0)
629 {
630 warning (_("Recorded trace may be incomplete around %s."),
631 core_addr_to_string_nz (pc));
632
633 /* Indicate the gap in the trace. We just added INSN so we're
634 not at the beginning. */
635 end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
636 ngaps += 1;
637
638 break;
639 }
640
641 pc += size;
642
643 /* Maintain the function level offset.
644 For the last block, we do it here to not consider the last
645 instruction.
646 Since the last instruction corresponds to the current instruction
647 and is not really part of the execution history, it shouldn't
648 affect the level. */
649 if (blk == 0)
650 level = min (level, end->level);
651 }
652 }
653
654 btinfo->begin = begin;
655 btinfo->end = end;
656 btinfo->ngaps = ngaps;
657
658 /* LEVEL is the minimal function level of all btrace function segments.
659 Define the global level offset to -LEVEL so all function levels are
660 normalized to start at zero. */
661 btinfo->level = -level;
662 }
663
664 /* Compute the function branch trace from a block branch trace BTRACE for
665 a thread given by BTINFO. */
666
667 static void
668 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
669 {
670 DEBUG ("compute ftrace");
671
672 switch (btrace->format)
673 {
674 case BTRACE_FORMAT_NONE:
675 return;
676
677 case BTRACE_FORMAT_BTS:
678 btrace_compute_ftrace_bts (tp, &btrace->variant.bts);
679 return;
680 }
681
682 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
683 }
684
685 /* Add an entry for the current PC. */
686
687 static void
688 btrace_add_pc (struct thread_info *tp)
689 {
690 struct btrace_data btrace;
691 struct btrace_block *block;
692 struct regcache *regcache;
693 struct cleanup *cleanup;
694 CORE_ADDR pc;
695
696 regcache = get_thread_regcache (tp->ptid);
697 pc = regcache_read_pc (regcache);
698
699 btrace_data_init (&btrace);
700 btrace.format = BTRACE_FORMAT_BTS;
701 btrace.variant.bts.blocks = NULL;
702
703 cleanup = make_cleanup_btrace_data (&btrace);
704
705 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
706 block->begin = pc;
707 block->end = pc;
708
709 btrace_compute_ftrace (tp, &btrace);
710
711 do_cleanups (cleanup);
712 }
713
714 /* See btrace.h. */
715
716 void
717 btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
718 {
719 if (tp->btrace.target != NULL)
720 return;
721
722 if (!target_supports_btrace (conf->format))
723 error (_("Target does not support branch tracing."));
724
725 DEBUG ("enable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
726
727 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
728
729 /* Add an entry for the current PC so we start tracing from where we
730 enabled it. */
731 if (tp->btrace.target != NULL)
732 btrace_add_pc (tp);
733 }
734
735 /* See btrace.h. */
736
737 const struct btrace_config *
738 btrace_conf (const struct btrace_thread_info *btinfo)
739 {
740 if (btinfo->target == NULL)
741 return NULL;
742
743 return target_btrace_conf (btinfo->target);
744 }
745
746 /* See btrace.h. */
747
748 void
749 btrace_disable (struct thread_info *tp)
750 {
751 struct btrace_thread_info *btp = &tp->btrace;
752 int errcode = 0;
753
754 if (btp->target == NULL)
755 return;
756
757 DEBUG ("disable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
758
759 target_disable_btrace (btp->target);
760 btp->target = NULL;
761
762 btrace_clear (tp);
763 }
764
765 /* See btrace.h. */
766
767 void
768 btrace_teardown (struct thread_info *tp)
769 {
770 struct btrace_thread_info *btp = &tp->btrace;
771 int errcode = 0;
772
773 if (btp->target == NULL)
774 return;
775
776 DEBUG ("teardown thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
777
778 target_teardown_btrace (btp->target);
779 btp->target = NULL;
780
781 btrace_clear (tp);
782 }
783
784 /* Stitch branch trace in BTS format. */
785
786 static int
787 btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
788 {
789 struct btrace_thread_info *btinfo;
790 struct btrace_function *last_bfun;
791 struct btrace_insn *last_insn;
792 btrace_block_s *first_new_block;
793
794 btinfo = &tp->btrace;
795 last_bfun = btinfo->end;
796 gdb_assert (last_bfun != NULL);
797 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
798
799 /* If the existing trace ends with a gap, we just glue the traces
800 together. We need to drop the last (i.e. chronologically first) block
801 of the new trace, though, since we can't fill in the start address.*/
802 if (VEC_empty (btrace_insn_s, last_bfun->insn))
803 {
804 VEC_pop (btrace_block_s, btrace->blocks);
805 return 0;
806 }
807
808 /* Beware that block trace starts with the most recent block, so the
809 chronologically first block in the new trace is the last block in
810 the new trace's block vector. */
811 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
812 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
813
814 /* If the current PC at the end of the block is the same as in our current
815 trace, there are two explanations:
816 1. we executed the instruction and some branch brought us back.
817 2. we have not made any progress.
818 In the first case, the delta trace vector should contain at least two
819 entries.
820 In the second case, the delta trace vector should contain exactly one
821 entry for the partial block containing the current PC. Remove it. */
822 if (first_new_block->end == last_insn->pc
823 && VEC_length (btrace_block_s, btrace->blocks) == 1)
824 {
825 VEC_pop (btrace_block_s, btrace->blocks);
826 return 0;
827 }
828
829 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
830 core_addr_to_string_nz (first_new_block->end));
831
832 /* Do a simple sanity check to make sure we don't accidentally end up
833 with a bad block. This should not occur in practice. */
834 if (first_new_block->end < last_insn->pc)
835 {
836 warning (_("Error while trying to read delta trace. Falling back to "
837 "a full read."));
838 return -1;
839 }
840
841 /* We adjust the last block to start at the end of our current trace. */
842 gdb_assert (first_new_block->begin == 0);
843 first_new_block->begin = last_insn->pc;
844
845 /* We simply pop the last insn so we can insert it again as part of
846 the normal branch trace computation.
847 Since instruction iterators are based on indices in the instructions
848 vector, we don't leave any pointers dangling. */
849 DEBUG ("pruning insn at %s for stitching",
850 ftrace_print_insn_addr (last_insn));
851
852 VEC_pop (btrace_insn_s, last_bfun->insn);
853
854 /* The instructions vector may become empty temporarily if this has
855 been the only instruction in this function segment.
856 This violates the invariant but will be remedied shortly by
857 btrace_compute_ftrace when we add the new trace. */
858
859 /* The only case where this would hurt is if the entire trace consisted
860 of just that one instruction. If we remove it, we might turn the now
861 empty btrace function segment into a gap. But we don't want gaps at
862 the beginning. To avoid this, we remove the entire old trace. */
863 if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
864 btrace_clear (tp);
865
866 return 0;
867 }
868
869 /* Adjust the block trace in order to stitch old and new trace together.
870 BTRACE is the new delta trace between the last and the current stop.
871 TP is the traced thread.
872 May modifx BTRACE as well as the existing trace in TP.
873 Return 0 on success, -1 otherwise. */
874
875 static int
876 btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
877 {
878 /* If we don't have trace, there's nothing to do. */
879 if (btrace_data_empty (btrace))
880 return 0;
881
882 switch (btrace->format)
883 {
884 case BTRACE_FORMAT_NONE:
885 return 0;
886
887 case BTRACE_FORMAT_BTS:
888 return btrace_stitch_bts (&btrace->variant.bts, tp);
889 }
890
891 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
892 }
893
894 /* Clear the branch trace histories in BTINFO. */
895
896 static void
897 btrace_clear_history (struct btrace_thread_info *btinfo)
898 {
899 xfree (btinfo->insn_history);
900 xfree (btinfo->call_history);
901 xfree (btinfo->replay);
902
903 btinfo->insn_history = NULL;
904 btinfo->call_history = NULL;
905 btinfo->replay = NULL;
906 }
907
908 /* See btrace.h. */
909
910 void
911 btrace_fetch (struct thread_info *tp)
912 {
913 struct btrace_thread_info *btinfo;
914 struct btrace_target_info *tinfo;
915 struct btrace_data btrace;
916 struct cleanup *cleanup;
917 int errcode;
918
919 DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
920
921 btinfo = &tp->btrace;
922 tinfo = btinfo->target;
923 if (tinfo == NULL)
924 return;
925
926 /* There's no way we could get new trace while replaying.
927 On the other hand, delta trace would return a partial record with the
928 current PC, which is the replay PC, not the last PC, as expected. */
929 if (btinfo->replay != NULL)
930 return;
931
932 btrace_data_init (&btrace);
933 cleanup = make_cleanup_btrace_data (&btrace);
934
935 /* Let's first try to extend the trace we already have. */
936 if (btinfo->end != NULL)
937 {
938 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
939 if (errcode == 0)
940 {
941 /* Success. Let's try to stitch the traces together. */
942 errcode = btrace_stitch_trace (&btrace, tp);
943 }
944 else
945 {
946 /* We failed to read delta trace. Let's try to read new trace. */
947 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
948
949 /* If we got any new trace, discard what we have. */
950 if (errcode == 0 && !btrace_data_empty (&btrace))
951 btrace_clear (tp);
952 }
953
954 /* If we were not able to read the trace, we start over. */
955 if (errcode != 0)
956 {
957 btrace_clear (tp);
958 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
959 }
960 }
961 else
962 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
963
964 /* If we were not able to read the branch trace, signal an error. */
965 if (errcode != 0)
966 error (_("Failed to read branch trace."));
967
968 /* Compute the trace, provided we have any. */
969 if (!btrace_data_empty (&btrace))
970 {
971 btrace_clear_history (btinfo);
972 btrace_compute_ftrace (tp, &btrace);
973 }
974
975 do_cleanups (cleanup);
976 }
977
978 /* See btrace.h. */
979
980 void
981 btrace_clear (struct thread_info *tp)
982 {
983 struct btrace_thread_info *btinfo;
984 struct btrace_function *it, *trash;
985
986 DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
987
988 /* Make sure btrace frames that may hold a pointer into the branch
989 trace data are destroyed. */
990 reinit_frame_cache ();
991
992 btinfo = &tp->btrace;
993
994 it = btinfo->begin;
995 while (it != NULL)
996 {
997 trash = it;
998 it = it->flow.next;
999
1000 xfree (trash);
1001 }
1002
1003 btinfo->begin = NULL;
1004 btinfo->end = NULL;
1005 btinfo->ngaps = 0;
1006
1007 btrace_clear_history (btinfo);
1008 }
1009
1010 /* See btrace.h. */
1011
1012 void
1013 btrace_free_objfile (struct objfile *objfile)
1014 {
1015 struct thread_info *tp;
1016
1017 DEBUG ("free objfile");
1018
1019 ALL_NON_EXITED_THREADS (tp)
1020 btrace_clear (tp);
1021 }
1022
1023 #if defined (HAVE_LIBEXPAT)
1024
1025 /* Check the btrace document version. */
1026
1027 static void
1028 check_xml_btrace_version (struct gdb_xml_parser *parser,
1029 const struct gdb_xml_element *element,
1030 void *user_data, VEC (gdb_xml_value_s) *attributes)
1031 {
1032 const char *version = xml_find_attribute (attributes, "version")->value;
1033
1034 if (strcmp (version, "1.0") != 0)
1035 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1036 }
1037
1038 /* Parse a btrace "block" xml record. */
1039
1040 static void
1041 parse_xml_btrace_block (struct gdb_xml_parser *parser,
1042 const struct gdb_xml_element *element,
1043 void *user_data, VEC (gdb_xml_value_s) *attributes)
1044 {
1045 struct btrace_data *btrace;
1046 struct btrace_block *block;
1047 ULONGEST *begin, *end;
1048
1049 btrace = user_data;
1050
1051 switch (btrace->format)
1052 {
1053 case BTRACE_FORMAT_BTS:
1054 break;
1055
1056 case BTRACE_FORMAT_NONE:
1057 btrace->format = BTRACE_FORMAT_BTS;
1058 btrace->variant.bts.blocks = NULL;
1059 break;
1060
1061 default:
1062 gdb_xml_error (parser, _("Btrace format error."));
1063 }
1064
1065 begin = xml_find_attribute (attributes, "begin")->value;
1066 end = xml_find_attribute (attributes, "end")->value;
1067
1068 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
1069 block->begin = *begin;
1070 block->end = *end;
1071 }
1072
1073 static const struct gdb_xml_attribute block_attributes[] = {
1074 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1075 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1076 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1077 };
1078
1079 static const struct gdb_xml_attribute btrace_attributes[] = {
1080 { "version", GDB_XML_AF_NONE, NULL, NULL },
1081 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1082 };
1083
1084 static const struct gdb_xml_element btrace_children[] = {
1085 { "block", block_attributes, NULL,
1086 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
1087 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1088 };
1089
1090 static const struct gdb_xml_element btrace_elements[] = {
1091 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
1092 check_xml_btrace_version, NULL },
1093 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1094 };
1095
1096 #endif /* defined (HAVE_LIBEXPAT) */
1097
1098 /* See btrace.h. */
1099
1100 void
1101 parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
1102 {
1103 struct cleanup *cleanup;
1104 int errcode;
1105
1106 #if defined (HAVE_LIBEXPAT)
1107
1108 btrace->format = BTRACE_FORMAT_NONE;
1109
1110 cleanup = make_cleanup_btrace_data (btrace);
1111 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
1112 buffer, btrace);
1113 if (errcode != 0)
1114 error (_("Error parsing branch trace."));
1115
1116 /* Keep parse results. */
1117 discard_cleanups (cleanup);
1118
1119 #else /* !defined (HAVE_LIBEXPAT) */
1120
1121 error (_("Cannot process branch trace. XML parsing is not supported."));
1122
1123 #endif /* !defined (HAVE_LIBEXPAT) */
1124 }
1125
1126 #if defined (HAVE_LIBEXPAT)
1127
1128 /* Parse a btrace-conf "bts" xml record. */
1129
1130 static void
1131 parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
1132 const struct gdb_xml_element *element,
1133 void *user_data, VEC (gdb_xml_value_s) *attributes)
1134 {
1135 struct btrace_config *conf;
1136 struct gdb_xml_value *size;
1137
1138 conf = user_data;
1139 conf->format = BTRACE_FORMAT_BTS;
1140 conf->bts.size = 0;
1141
1142 size = xml_find_attribute (attributes, "size");
1143 if (size != NULL)
1144 conf->bts.size = (unsigned int) * (ULONGEST *) size->value;
1145 }
1146
1147 static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
1148 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1149 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1150 };
1151
1152 static const struct gdb_xml_element btrace_conf_children[] = {
1153 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
1154 parse_xml_btrace_conf_bts, NULL },
1155 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1156 };
1157
1158 static const struct gdb_xml_attribute btrace_conf_attributes[] = {
1159 { "version", GDB_XML_AF_NONE, NULL, NULL },
1160 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1161 };
1162
1163 static const struct gdb_xml_element btrace_conf_elements[] = {
1164 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
1165 GDB_XML_EF_NONE, NULL, NULL },
1166 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1167 };
1168
1169 #endif /* defined (HAVE_LIBEXPAT) */
1170
1171 /* See btrace.h. */
1172
1173 void
1174 parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
1175 {
1176 int errcode;
1177
1178 #if defined (HAVE_LIBEXPAT)
1179
1180 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
1181 btrace_conf_elements, xml, conf);
1182 if (errcode != 0)
1183 error (_("Error parsing branch trace configuration."));
1184
1185 #else /* !defined (HAVE_LIBEXPAT) */
1186
1187 error (_("XML parsing is not supported."));
1188
1189 #endif /* !defined (HAVE_LIBEXPAT) */
1190 }
1191
1192 /* See btrace.h. */
1193
1194 const struct btrace_insn *
1195 btrace_insn_get (const struct btrace_insn_iterator *it)
1196 {
1197 const struct btrace_function *bfun;
1198 unsigned int index, end;
1199
1200 index = it->index;
1201 bfun = it->function;
1202
1203 /* Check if the iterator points to a gap in the trace. */
1204 if (bfun->errcode != 0)
1205 return NULL;
1206
1207 /* The index is within the bounds of this function's instruction vector. */
1208 end = VEC_length (btrace_insn_s, bfun->insn);
1209 gdb_assert (0 < end);
1210 gdb_assert (index < end);
1211
1212 return VEC_index (btrace_insn_s, bfun->insn, index);
1213 }
1214
1215 /* See btrace.h. */
1216
1217 unsigned int
1218 btrace_insn_number (const struct btrace_insn_iterator *it)
1219 {
1220 const struct btrace_function *bfun;
1221
1222 bfun = it->function;
1223
1224 /* Return zero if the iterator points to a gap in the trace. */
1225 if (bfun->errcode != 0)
1226 return 0;
1227
1228 return bfun->insn_offset + it->index;
1229 }
1230
1231 /* See btrace.h. */
1232
1233 void
1234 btrace_insn_begin (struct btrace_insn_iterator *it,
1235 const struct btrace_thread_info *btinfo)
1236 {
1237 const struct btrace_function *bfun;
1238
1239 bfun = btinfo->begin;
1240 if (bfun == NULL)
1241 error (_("No trace."));
1242
1243 it->function = bfun;
1244 it->index = 0;
1245 }
1246
1247 /* See btrace.h. */
1248
1249 void
1250 btrace_insn_end (struct btrace_insn_iterator *it,
1251 const struct btrace_thread_info *btinfo)
1252 {
1253 const struct btrace_function *bfun;
1254 unsigned int length;
1255
1256 bfun = btinfo->end;
1257 if (bfun == NULL)
1258 error (_("No trace."));
1259
1260 length = VEC_length (btrace_insn_s, bfun->insn);
1261
1262 /* The last function may either be a gap or it contains the current
1263 instruction, which is one past the end of the execution trace; ignore
1264 it. */
1265 if (length > 0)
1266 length -= 1;
1267
1268 it->function = bfun;
1269 it->index = length;
1270 }
1271
1272 /* See btrace.h. */
1273
1274 unsigned int
1275 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
1276 {
1277 const struct btrace_function *bfun;
1278 unsigned int index, steps;
1279
1280 bfun = it->function;
1281 steps = 0;
1282 index = it->index;
1283
1284 while (stride != 0)
1285 {
1286 unsigned int end, space, adv;
1287
1288 end = VEC_length (btrace_insn_s, bfun->insn);
1289
1290 /* An empty function segment represents a gap in the trace. We count
1291 it as one instruction. */
1292 if (end == 0)
1293 {
1294 const struct btrace_function *next;
1295
1296 next = bfun->flow.next;
1297 if (next == NULL)
1298 break;
1299
1300 stride -= 1;
1301 steps += 1;
1302
1303 bfun = next;
1304 index = 0;
1305
1306 continue;
1307 }
1308
1309 gdb_assert (0 < end);
1310 gdb_assert (index < end);
1311
1312 /* Compute the number of instructions remaining in this segment. */
1313 space = end - index;
1314
1315 /* Advance the iterator as far as possible within this segment. */
1316 adv = min (space, stride);
1317 stride -= adv;
1318 index += adv;
1319 steps += adv;
1320
1321 /* Move to the next function if we're at the end of this one. */
1322 if (index == end)
1323 {
1324 const struct btrace_function *next;
1325
1326 next = bfun->flow.next;
1327 if (next == NULL)
1328 {
1329 /* We stepped past the last function.
1330
1331 Let's adjust the index to point to the last instruction in
1332 the previous function. */
1333 index -= 1;
1334 steps -= 1;
1335 break;
1336 }
1337
1338 /* We now point to the first instruction in the new function. */
1339 bfun = next;
1340 index = 0;
1341 }
1342
1343 /* We did make progress. */
1344 gdb_assert (adv > 0);
1345 }
1346
1347 /* Update the iterator. */
1348 it->function = bfun;
1349 it->index = index;
1350
1351 return steps;
1352 }
1353
1354 /* See btrace.h. */
1355
1356 unsigned int
1357 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1358 {
1359 const struct btrace_function *bfun;
1360 unsigned int index, steps;
1361
1362 bfun = it->function;
1363 steps = 0;
1364 index = it->index;
1365
1366 while (stride != 0)
1367 {
1368 unsigned int adv;
1369
1370 /* Move to the previous function if we're at the start of this one. */
1371 if (index == 0)
1372 {
1373 const struct btrace_function *prev;
1374
1375 prev = bfun->flow.prev;
1376 if (prev == NULL)
1377 break;
1378
1379 /* We point to one after the last instruction in the new function. */
1380 bfun = prev;
1381 index = VEC_length (btrace_insn_s, bfun->insn);
1382
1383 /* An empty function segment represents a gap in the trace. We count
1384 it as one instruction. */
1385 if (index == 0)
1386 {
1387 stride -= 1;
1388 steps += 1;
1389
1390 continue;
1391 }
1392 }
1393
1394 /* Advance the iterator as far as possible within this segment. */
1395 adv = min (index, stride);
1396
1397 stride -= adv;
1398 index -= adv;
1399 steps += adv;
1400
1401 /* We did make progress. */
1402 gdb_assert (adv > 0);
1403 }
1404
1405 /* Update the iterator. */
1406 it->function = bfun;
1407 it->index = index;
1408
1409 return steps;
1410 }
1411
1412 /* See btrace.h. */
1413
1414 int
1415 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
1416 const struct btrace_insn_iterator *rhs)
1417 {
1418 unsigned int lnum, rnum;
1419
1420 lnum = btrace_insn_number (lhs);
1421 rnum = btrace_insn_number (rhs);
1422
1423 /* A gap has an instruction number of zero. Things are getting more
1424 complicated if gaps are involved.
1425
1426 We take the instruction number offset from the iterator's function.
1427 This is the number of the first instruction after the gap.
1428
1429 This is OK as long as both lhs and rhs point to gaps. If only one of
1430 them does, we need to adjust the number based on the other's regular
1431 instruction number. Otherwise, a gap might compare equal to an
1432 instruction. */
1433
1434 if (lnum == 0 && rnum == 0)
1435 {
1436 lnum = lhs->function->insn_offset;
1437 rnum = rhs->function->insn_offset;
1438 }
1439 else if (lnum == 0)
1440 {
1441 lnum = lhs->function->insn_offset;
1442
1443 if (lnum == rnum)
1444 lnum -= 1;
1445 }
1446 else if (rnum == 0)
1447 {
1448 rnum = rhs->function->insn_offset;
1449
1450 if (rnum == lnum)
1451 rnum -= 1;
1452 }
1453
1454 return (int) (lnum - rnum);
1455 }
1456
1457 /* See btrace.h. */
1458
1459 int
1460 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
1461 const struct btrace_thread_info *btinfo,
1462 unsigned int number)
1463 {
1464 const struct btrace_function *bfun;
1465 unsigned int end, length;
1466
1467 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1468 {
1469 /* Skip gaps. */
1470 if (bfun->errcode != 0)
1471 continue;
1472
1473 if (bfun->insn_offset <= number)
1474 break;
1475 }
1476
1477 if (bfun == NULL)
1478 return 0;
1479
1480 length = VEC_length (btrace_insn_s, bfun->insn);
1481 gdb_assert (length > 0);
1482
1483 end = bfun->insn_offset + length;
1484 if (end <= number)
1485 return 0;
1486
1487 it->function = bfun;
1488 it->index = number - bfun->insn_offset;
1489
1490 return 1;
1491 }
1492
1493 /* See btrace.h. */
1494
1495 const struct btrace_function *
1496 btrace_call_get (const struct btrace_call_iterator *it)
1497 {
1498 return it->function;
1499 }
1500
1501 /* See btrace.h. */
1502
1503 unsigned int
1504 btrace_call_number (const struct btrace_call_iterator *it)
1505 {
1506 const struct btrace_thread_info *btinfo;
1507 const struct btrace_function *bfun;
1508 unsigned int insns;
1509
1510 btinfo = it->btinfo;
1511 bfun = it->function;
1512 if (bfun != NULL)
1513 return bfun->number;
1514
1515 /* For the end iterator, i.e. bfun == NULL, we return one more than the
1516 number of the last function. */
1517 bfun = btinfo->end;
1518 insns = VEC_length (btrace_insn_s, bfun->insn);
1519
1520 /* If the function contains only a single instruction (i.e. the current
1521 instruction), it will be skipped and its number is already the number
1522 we seek. */
1523 if (insns == 1)
1524 return bfun->number;
1525
1526 /* Otherwise, return one more than the number of the last function. */
1527 return bfun->number + 1;
1528 }
1529
1530 /* See btrace.h. */
1531
1532 void
1533 btrace_call_begin (struct btrace_call_iterator *it,
1534 const struct btrace_thread_info *btinfo)
1535 {
1536 const struct btrace_function *bfun;
1537
1538 bfun = btinfo->begin;
1539 if (bfun == NULL)
1540 error (_("No trace."));
1541
1542 it->btinfo = btinfo;
1543 it->function = bfun;
1544 }
1545
1546 /* See btrace.h. */
1547
1548 void
1549 btrace_call_end (struct btrace_call_iterator *it,
1550 const struct btrace_thread_info *btinfo)
1551 {
1552 const struct btrace_function *bfun;
1553
1554 bfun = btinfo->end;
1555 if (bfun == NULL)
1556 error (_("No trace."));
1557
1558 it->btinfo = btinfo;
1559 it->function = NULL;
1560 }
1561
1562 /* See btrace.h. */
1563
1564 unsigned int
1565 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
1566 {
1567 const struct btrace_function *bfun;
1568 unsigned int steps;
1569
1570 bfun = it->function;
1571 steps = 0;
1572 while (bfun != NULL)
1573 {
1574 const struct btrace_function *next;
1575 unsigned int insns;
1576
1577 next = bfun->flow.next;
1578 if (next == NULL)
1579 {
1580 /* Ignore the last function if it only contains a single
1581 (i.e. the current) instruction. */
1582 insns = VEC_length (btrace_insn_s, bfun->insn);
1583 if (insns == 1)
1584 steps -= 1;
1585 }
1586
1587 if (stride == steps)
1588 break;
1589
1590 bfun = next;
1591 steps += 1;
1592 }
1593
1594 it->function = bfun;
1595 return steps;
1596 }
1597
1598 /* See btrace.h. */
1599
1600 unsigned int
1601 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
1602 {
1603 const struct btrace_thread_info *btinfo;
1604 const struct btrace_function *bfun;
1605 unsigned int steps;
1606
1607 bfun = it->function;
1608 steps = 0;
1609
1610 if (bfun == NULL)
1611 {
1612 unsigned int insns;
1613
1614 btinfo = it->btinfo;
1615 bfun = btinfo->end;
1616 if (bfun == NULL)
1617 return 0;
1618
1619 /* Ignore the last function if it only contains a single
1620 (i.e. the current) instruction. */
1621 insns = VEC_length (btrace_insn_s, bfun->insn);
1622 if (insns == 1)
1623 bfun = bfun->flow.prev;
1624
1625 if (bfun == NULL)
1626 return 0;
1627
1628 steps += 1;
1629 }
1630
1631 while (steps < stride)
1632 {
1633 const struct btrace_function *prev;
1634
1635 prev = bfun->flow.prev;
1636 if (prev == NULL)
1637 break;
1638
1639 bfun = prev;
1640 steps += 1;
1641 }
1642
1643 it->function = bfun;
1644 return steps;
1645 }
1646
1647 /* See btrace.h. */
1648
1649 int
1650 btrace_call_cmp (const struct btrace_call_iterator *lhs,
1651 const struct btrace_call_iterator *rhs)
1652 {
1653 unsigned int lnum, rnum;
1654
1655 lnum = btrace_call_number (lhs);
1656 rnum = btrace_call_number (rhs);
1657
1658 return (int) (lnum - rnum);
1659 }
1660
1661 /* See btrace.h. */
1662
1663 int
1664 btrace_find_call_by_number (struct btrace_call_iterator *it,
1665 const struct btrace_thread_info *btinfo,
1666 unsigned int number)
1667 {
1668 const struct btrace_function *bfun;
1669
1670 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1671 {
1672 unsigned int bnum;
1673
1674 bnum = bfun->number;
1675 if (number == bnum)
1676 {
1677 it->btinfo = btinfo;
1678 it->function = bfun;
1679 return 1;
1680 }
1681
1682 /* Functions are ordered and numbered consecutively. We could bail out
1683 earlier. On the other hand, it is very unlikely that we search for
1684 a nonexistent function. */
1685 }
1686
1687 return 0;
1688 }
1689
1690 /* See btrace.h. */
1691
1692 void
1693 btrace_set_insn_history (struct btrace_thread_info *btinfo,
1694 const struct btrace_insn_iterator *begin,
1695 const struct btrace_insn_iterator *end)
1696 {
1697 if (btinfo->insn_history == NULL)
1698 btinfo->insn_history = xzalloc (sizeof (*btinfo->insn_history));
1699
1700 btinfo->insn_history->begin = *begin;
1701 btinfo->insn_history->end = *end;
1702 }
1703
1704 /* See btrace.h. */
1705
1706 void
1707 btrace_set_call_history (struct btrace_thread_info *btinfo,
1708 const struct btrace_call_iterator *begin,
1709 const struct btrace_call_iterator *end)
1710 {
1711 gdb_assert (begin->btinfo == end->btinfo);
1712
1713 if (btinfo->call_history == NULL)
1714 btinfo->call_history = xzalloc (sizeof (*btinfo->call_history));
1715
1716 btinfo->call_history->begin = *begin;
1717 btinfo->call_history->end = *end;
1718 }
1719
1720 /* See btrace.h. */
1721
1722 int
1723 btrace_is_replaying (struct thread_info *tp)
1724 {
1725 return tp->btrace.replay != NULL;
1726 }
1727
1728 /* See btrace.h. */
1729
1730 int
1731 btrace_is_empty (struct thread_info *tp)
1732 {
1733 struct btrace_insn_iterator begin, end;
1734 struct btrace_thread_info *btinfo;
1735
1736 btinfo = &tp->btrace;
1737
1738 if (btinfo->begin == NULL)
1739 return 1;
1740
1741 btrace_insn_begin (&begin, btinfo);
1742 btrace_insn_end (&end, btinfo);
1743
1744 return btrace_insn_cmp (&begin, &end) == 0;
1745 }
1746
1747 /* Forward the cleanup request. */
1748
1749 static void
1750 do_btrace_data_cleanup (void *arg)
1751 {
1752 btrace_data_fini (arg);
1753 }
1754
1755 /* See btrace.h. */
1756
1757 struct cleanup *
1758 make_cleanup_btrace_data (struct btrace_data *data)
1759 {
1760 return make_cleanup (do_btrace_data_cleanup, data);
1761 }
This page took 0.09643 seconds and 5 git commands to generate.