btrace: update btrace_compute_ftrace parameters
[deliverable/binutils-gdb.git] / gdb / btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "btrace.h"
24 #include "gdbthread.h"
25 #include "inferior.h"
26 #include "target.h"
27 #include "record.h"
28 #include "symtab.h"
29 #include "disasm.h"
30 #include "source.h"
31 #include "filenames.h"
32 #include "xml-support.h"
33 #include "regcache.h"
34
35 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
36 when used in if statements. */
37
38 #define DEBUG(msg, args...) \
39 do \
40 { \
41 if (record_debug != 0) \
42 fprintf_unfiltered (gdb_stdlog, \
43 "[btrace] " msg "\n", ##args); \
44 } \
45 while (0)
46
47 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
48
49 /* Return the function name of a recorded function segment for printing.
50 This function never returns NULL. */
51
52 static const char *
53 ftrace_print_function_name (const struct btrace_function *bfun)
54 {
55 struct minimal_symbol *msym;
56 struct symbol *sym;
57
58 msym = bfun->msym;
59 sym = bfun->sym;
60
61 if (sym != NULL)
62 return SYMBOL_PRINT_NAME (sym);
63
64 if (msym != NULL)
65 return MSYMBOL_PRINT_NAME (msym);
66
67 return "<unknown>";
68 }
69
70 /* Return the file name of a recorded function segment for printing.
71 This function never returns NULL. */
72
73 static const char *
74 ftrace_print_filename (const struct btrace_function *bfun)
75 {
76 struct symbol *sym;
77 const char *filename;
78
79 sym = bfun->sym;
80
81 if (sym != NULL)
82 filename = symtab_to_filename_for_display (symbol_symtab (sym));
83 else
84 filename = "<unknown>";
85
86 return filename;
87 }
88
89 /* Return a string representation of the address of an instruction.
90 This function never returns NULL. */
91
92 static const char *
93 ftrace_print_insn_addr (const struct btrace_insn *insn)
94 {
95 if (insn == NULL)
96 return "<nil>";
97
98 return core_addr_to_string_nz (insn->pc);
99 }
100
101 /* Print an ftrace debug status message. */
102
103 static void
104 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
105 {
106 const char *fun, *file;
107 unsigned int ibegin, iend;
108 int lbegin, lend, level;
109
110 fun = ftrace_print_function_name (bfun);
111 file = ftrace_print_filename (bfun);
112 level = bfun->level;
113
114 lbegin = bfun->lbegin;
115 lend = bfun->lend;
116
117 ibegin = bfun->insn_offset;
118 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
119
120 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, lines = [%d; %d], "
121 "insn = [%u; %u)", prefix, fun, file, level, lbegin, lend,
122 ibegin, iend);
123 }
124
125 /* Return non-zero if BFUN does not match MFUN and FUN,
126 return zero otherwise. */
127
128 static int
129 ftrace_function_switched (const struct btrace_function *bfun,
130 const struct minimal_symbol *mfun,
131 const struct symbol *fun)
132 {
133 struct minimal_symbol *msym;
134 struct symbol *sym;
135
136 msym = bfun->msym;
137 sym = bfun->sym;
138
139 /* If the minimal symbol changed, we certainly switched functions. */
140 if (mfun != NULL && msym != NULL
141 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
142 return 1;
143
144 /* If the symbol changed, we certainly switched functions. */
145 if (fun != NULL && sym != NULL)
146 {
147 const char *bfname, *fname;
148
149 /* Check the function name. */
150 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
151 return 1;
152
153 /* Check the location of those functions, as well. */
154 bfname = symtab_to_fullname (symbol_symtab (sym));
155 fname = symtab_to_fullname (symbol_symtab (fun));
156 if (filename_cmp (fname, bfname) != 0)
157 return 1;
158 }
159
160 /* If we lost symbol information, we switched functions. */
161 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
162 return 1;
163
164 /* If we gained symbol information, we switched functions. */
165 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
166 return 1;
167
168 return 0;
169 }
170
171 /* Return non-zero if we should skip this file when generating the function
172 call history, zero otherwise.
173 We would want to do that if, say, a macro that is defined in another file
174 is expanded in this function. */
175
176 static int
177 ftrace_skip_file (const struct btrace_function *bfun, const char *fullname)
178 {
179 struct symbol *sym;
180 const char *bfile;
181
182 sym = bfun->sym;
183 if (sym == NULL)
184 return 1;
185
186 bfile = symtab_to_fullname (symbol_symtab (sym));
187
188 return (filename_cmp (bfile, fullname) != 0);
189 }
190
191 /* Allocate and initialize a new branch trace function segment.
192 PREV is the chronologically preceding function segment.
193 MFUN and FUN are the symbol information we have for this function. */
194
195 static struct btrace_function *
196 ftrace_new_function (struct btrace_function *prev,
197 struct minimal_symbol *mfun,
198 struct symbol *fun)
199 {
200 struct btrace_function *bfun;
201
202 bfun = xzalloc (sizeof (*bfun));
203
204 bfun->msym = mfun;
205 bfun->sym = fun;
206 bfun->flow.prev = prev;
207
208 /* We start with the identities of min and max, respectively. */
209 bfun->lbegin = INT_MAX;
210 bfun->lend = INT_MIN;
211
212 if (prev == NULL)
213 {
214 /* Start counting at one. */
215 bfun->number = 1;
216 bfun->insn_offset = 1;
217 }
218 else
219 {
220 gdb_assert (prev->flow.next == NULL);
221 prev->flow.next = bfun;
222
223 bfun->number = prev->number + 1;
224 bfun->insn_offset = (prev->insn_offset
225 + VEC_length (btrace_insn_s, prev->insn));
226 }
227
228 return bfun;
229 }
230
231 /* Update the UP field of a function segment. */
232
233 static void
234 ftrace_update_caller (struct btrace_function *bfun,
235 struct btrace_function *caller,
236 enum btrace_function_flag flags)
237 {
238 if (bfun->up != NULL)
239 ftrace_debug (bfun, "updating caller");
240
241 bfun->up = caller;
242 bfun->flags = flags;
243
244 ftrace_debug (bfun, "set caller");
245 }
246
247 /* Fix up the caller for all segments of a function. */
248
249 static void
250 ftrace_fixup_caller (struct btrace_function *bfun,
251 struct btrace_function *caller,
252 enum btrace_function_flag flags)
253 {
254 struct btrace_function *prev, *next;
255
256 ftrace_update_caller (bfun, caller, flags);
257
258 /* Update all function segments belonging to the same function. */
259 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
260 ftrace_update_caller (prev, caller, flags);
261
262 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
263 ftrace_update_caller (next, caller, flags);
264 }
265
266 /* Add a new function segment for a call.
267 CALLER is the chronologically preceding function segment.
268 MFUN and FUN are the symbol information we have for this function. */
269
270 static struct btrace_function *
271 ftrace_new_call (struct btrace_function *caller,
272 struct minimal_symbol *mfun,
273 struct symbol *fun)
274 {
275 struct btrace_function *bfun;
276
277 bfun = ftrace_new_function (caller, mfun, fun);
278 bfun->up = caller;
279 bfun->level = caller->level + 1;
280
281 ftrace_debug (bfun, "new call");
282
283 return bfun;
284 }
285
286 /* Add a new function segment for a tail call.
287 CALLER is the chronologically preceding function segment.
288 MFUN and FUN are the symbol information we have for this function. */
289
290 static struct btrace_function *
291 ftrace_new_tailcall (struct btrace_function *caller,
292 struct minimal_symbol *mfun,
293 struct symbol *fun)
294 {
295 struct btrace_function *bfun;
296
297 bfun = ftrace_new_function (caller, mfun, fun);
298 bfun->up = caller;
299 bfun->level = caller->level + 1;
300 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
301
302 ftrace_debug (bfun, "new tail call");
303
304 return bfun;
305 }
306
307 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
308 symbol information. */
309
310 static struct btrace_function *
311 ftrace_find_caller (struct btrace_function *bfun,
312 struct minimal_symbol *mfun,
313 struct symbol *fun)
314 {
315 for (; bfun != NULL; bfun = bfun->up)
316 {
317 /* Skip functions with incompatible symbol information. */
318 if (ftrace_function_switched (bfun, mfun, fun))
319 continue;
320
321 /* This is the function segment we're looking for. */
322 break;
323 }
324
325 return bfun;
326 }
327
328 /* Find the innermost caller in the back trace of BFUN, skipping all
329 function segments that do not end with a call instruction (e.g.
330 tail calls ending with a jump). */
331
332 static struct btrace_function *
333 ftrace_find_call (struct gdbarch *gdbarch, struct btrace_function *bfun)
334 {
335 for (; bfun != NULL; bfun = bfun->up)
336 {
337 struct btrace_insn *last;
338 CORE_ADDR pc;
339
340 /* We do not allow empty function segments. */
341 gdb_assert (!VEC_empty (btrace_insn_s, bfun->insn));
342
343 last = VEC_last (btrace_insn_s, bfun->insn);
344 pc = last->pc;
345
346 if (gdbarch_insn_is_call (gdbarch, pc))
347 break;
348 }
349
350 return bfun;
351 }
352
353 /* Add a continuation segment for a function into which we return.
354 PREV is the chronologically preceding function segment.
355 MFUN and FUN are the symbol information we have for this function. */
356
357 static struct btrace_function *
358 ftrace_new_return (struct gdbarch *gdbarch,
359 struct btrace_function *prev,
360 struct minimal_symbol *mfun,
361 struct symbol *fun)
362 {
363 struct btrace_function *bfun, *caller;
364
365 bfun = ftrace_new_function (prev, mfun, fun);
366
367 /* It is important to start at PREV's caller. Otherwise, we might find
368 PREV itself, if PREV is a recursive function. */
369 caller = ftrace_find_caller (prev->up, mfun, fun);
370 if (caller != NULL)
371 {
372 /* The caller of PREV is the preceding btrace function segment in this
373 function instance. */
374 gdb_assert (caller->segment.next == NULL);
375
376 caller->segment.next = bfun;
377 bfun->segment.prev = caller;
378
379 /* Maintain the function level. */
380 bfun->level = caller->level;
381
382 /* Maintain the call stack. */
383 bfun->up = caller->up;
384 bfun->flags = caller->flags;
385
386 ftrace_debug (bfun, "new return");
387 }
388 else
389 {
390 /* We did not find a caller. This could mean that something went
391 wrong or that the call is simply not included in the trace. */
392
393 /* Let's search for some actual call. */
394 caller = ftrace_find_call (gdbarch, prev->up);
395 if (caller == NULL)
396 {
397 /* There is no call in PREV's back trace. We assume that the
398 branch trace did not include it. */
399
400 /* Let's find the topmost call function - this skips tail calls. */
401 while (prev->up != NULL)
402 prev = prev->up;
403
404 /* We maintain levels for a series of returns for which we have
405 not seen the calls.
406 We start at the preceding function's level in case this has
407 already been a return for which we have not seen the call.
408 We start at level 0 otherwise, to handle tail calls correctly. */
409 bfun->level = min (0, prev->level) - 1;
410
411 /* Fix up the call stack for PREV. */
412 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
413
414 ftrace_debug (bfun, "new return - no caller");
415 }
416 else
417 {
418 /* There is a call in PREV's back trace to which we should have
419 returned. Let's remain at this level. */
420 bfun->level = prev->level;
421
422 ftrace_debug (bfun, "new return - unknown caller");
423 }
424 }
425
426 return bfun;
427 }
428
429 /* Add a new function segment for a function switch.
430 PREV is the chronologically preceding function segment.
431 MFUN and FUN are the symbol information we have for this function. */
432
433 static struct btrace_function *
434 ftrace_new_switch (struct btrace_function *prev,
435 struct minimal_symbol *mfun,
436 struct symbol *fun)
437 {
438 struct btrace_function *bfun;
439
440 /* This is an unexplained function switch. The call stack will likely
441 be wrong at this point. */
442 bfun = ftrace_new_function (prev, mfun, fun);
443
444 /* We keep the function level. */
445 bfun->level = prev->level;
446
447 ftrace_debug (bfun, "new switch");
448
449 return bfun;
450 }
451
452 /* Update BFUN with respect to the instruction at PC. This may create new
453 function segments.
454 Return the chronologically latest function segment, never NULL. */
455
456 static struct btrace_function *
457 ftrace_update_function (struct gdbarch *gdbarch,
458 struct btrace_function *bfun, CORE_ADDR pc)
459 {
460 struct bound_minimal_symbol bmfun;
461 struct minimal_symbol *mfun;
462 struct symbol *fun;
463 struct btrace_insn *last;
464
465 /* Try to determine the function we're in. We use both types of symbols
466 to avoid surprises when we sometimes get a full symbol and sometimes
467 only a minimal symbol. */
468 fun = find_pc_function (pc);
469 bmfun = lookup_minimal_symbol_by_pc (pc);
470 mfun = bmfun.minsym;
471
472 if (fun == NULL && mfun == NULL)
473 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
474
475 /* If we didn't have a function before, we create one. */
476 if (bfun == NULL)
477 return ftrace_new_function (bfun, mfun, fun);
478
479 /* Check the last instruction, if we have one.
480 We do this check first, since it allows us to fill in the call stack
481 links in addition to the normal flow links. */
482 last = NULL;
483 if (!VEC_empty (btrace_insn_s, bfun->insn))
484 last = VEC_last (btrace_insn_s, bfun->insn);
485
486 if (last != NULL)
487 {
488 CORE_ADDR lpc;
489
490 lpc = last->pc;
491
492 /* Check for returns. */
493 if (gdbarch_insn_is_ret (gdbarch, lpc))
494 return ftrace_new_return (gdbarch, bfun, mfun, fun);
495
496 /* Check for calls. */
497 if (gdbarch_insn_is_call (gdbarch, lpc))
498 {
499 int size;
500
501 size = gdb_insn_length (gdbarch, lpc);
502
503 /* Ignore calls to the next instruction. They are used for PIC. */
504 if (lpc + size != pc)
505 return ftrace_new_call (bfun, mfun, fun);
506 }
507 }
508
509 /* Check if we're switching functions for some other reason. */
510 if (ftrace_function_switched (bfun, mfun, fun))
511 {
512 DEBUG_FTRACE ("switching from %s in %s at %s",
513 ftrace_print_insn_addr (last),
514 ftrace_print_function_name (bfun),
515 ftrace_print_filename (bfun));
516
517 if (last != NULL)
518 {
519 CORE_ADDR start, lpc;
520
521 start = get_pc_function_start (pc);
522
523 /* If we can't determine the function for PC, we treat a jump at
524 the end of the block as tail call. */
525 if (start == 0)
526 start = pc;
527
528 lpc = last->pc;
529
530 /* Jumps indicate optimized tail calls. */
531 if (start == pc && gdbarch_insn_is_jump (gdbarch, lpc))
532 return ftrace_new_tailcall (bfun, mfun, fun);
533 }
534
535 return ftrace_new_switch (bfun, mfun, fun);
536 }
537
538 return bfun;
539 }
540
541 /* Update BFUN's source range with respect to the instruction at PC. */
542
543 static void
544 ftrace_update_lines (struct btrace_function *bfun, CORE_ADDR pc)
545 {
546 struct symtab_and_line sal;
547 const char *fullname;
548
549 sal = find_pc_line (pc, 0);
550 if (sal.symtab == NULL || sal.line == 0)
551 {
552 DEBUG_FTRACE ("no lines at %s", core_addr_to_string_nz (pc));
553 return;
554 }
555
556 /* Check if we switched files. This could happen if, say, a macro that
557 is defined in another file is expanded here. */
558 fullname = symtab_to_fullname (sal.symtab);
559 if (ftrace_skip_file (bfun, fullname))
560 {
561 DEBUG_FTRACE ("ignoring file at %s, file=%s",
562 core_addr_to_string_nz (pc), fullname);
563 return;
564 }
565
566 /* Update the line range. */
567 bfun->lbegin = min (bfun->lbegin, sal.line);
568 bfun->lend = max (bfun->lend, sal.line);
569
570 if (record_debug > 1)
571 ftrace_debug (bfun, "update lines");
572 }
573
574 /* Add the instruction at PC to BFUN's instructions. */
575
576 static void
577 ftrace_update_insns (struct btrace_function *bfun, CORE_ADDR pc)
578 {
579 struct btrace_insn *insn;
580
581 insn = VEC_safe_push (btrace_insn_s, bfun->insn, NULL);
582 insn->pc = pc;
583
584 if (record_debug > 1)
585 ftrace_debug (bfun, "update insn");
586 }
587
588 /* Compute the function branch trace from BTS trace. */
589
590 static void
591 btrace_compute_ftrace_bts (struct thread_info *tp,
592 const struct btrace_data_bts *btrace)
593 {
594 struct btrace_thread_info *btinfo;
595 struct btrace_function *begin, *end;
596 struct gdbarch *gdbarch;
597 unsigned int blk;
598 int level;
599
600 gdbarch = target_gdbarch ();
601 btinfo = &tp->btrace;
602 begin = btinfo->begin;
603 end = btinfo->end;
604 level = begin != NULL ? -btinfo->level : INT_MAX;
605 blk = VEC_length (btrace_block_s, btrace->blocks);
606
607 while (blk != 0)
608 {
609 btrace_block_s *block;
610 CORE_ADDR pc;
611
612 blk -= 1;
613
614 block = VEC_index (btrace_block_s, btrace->blocks, blk);
615 pc = block->begin;
616
617 for (;;)
618 {
619 int size;
620
621 /* We should hit the end of the block. Warn if we went too far. */
622 if (block->end < pc)
623 {
624 warning (_("Recorded trace may be corrupted around %s."),
625 core_addr_to_string_nz (pc));
626 break;
627 }
628
629 end = ftrace_update_function (gdbarch, end, pc);
630 if (begin == NULL)
631 begin = end;
632
633 /* Maintain the function level offset.
634 For all but the last block, we do it here. */
635 if (blk != 0)
636 level = min (level, end->level);
637
638 ftrace_update_insns (end, pc);
639 ftrace_update_lines (end, pc);
640
641 /* We're done once we pushed the instruction at the end. */
642 if (block->end == pc)
643 break;
644
645 size = gdb_insn_length (gdbarch, pc);
646
647 /* Make sure we terminate if we fail to compute the size. */
648 if (size <= 0)
649 {
650 warning (_("Recorded trace may be incomplete around %s."),
651 core_addr_to_string_nz (pc));
652 break;
653 }
654
655 pc += size;
656
657 /* Maintain the function level offset.
658 For the last block, we do it here to not consider the last
659 instruction.
660 Since the last instruction corresponds to the current instruction
661 and is not really part of the execution history, it shouldn't
662 affect the level. */
663 if (blk == 0)
664 level = min (level, end->level);
665 }
666 }
667
668 btinfo->begin = begin;
669 btinfo->end = end;
670
671 /* LEVEL is the minimal function level of all btrace function segments.
672 Define the global level offset to -LEVEL so all function levels are
673 normalized to start at zero. */
674 btinfo->level = -level;
675 }
676
677 /* Compute the function branch trace from a block branch trace BTRACE for
678 a thread given by BTINFO. */
679
680 static void
681 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
682 {
683 DEBUG ("compute ftrace");
684
685 switch (btrace->format)
686 {
687 case BTRACE_FORMAT_NONE:
688 return;
689
690 case BTRACE_FORMAT_BTS:
691 btrace_compute_ftrace_bts (tp, &btrace->variant.bts);
692 return;
693 }
694
695 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
696 }
697
698 /* Add an entry for the current PC. */
699
700 static void
701 btrace_add_pc (struct thread_info *tp)
702 {
703 struct btrace_data btrace;
704 struct btrace_block *block;
705 struct regcache *regcache;
706 struct cleanup *cleanup;
707 CORE_ADDR pc;
708
709 regcache = get_thread_regcache (tp->ptid);
710 pc = regcache_read_pc (regcache);
711
712 btrace_data_init (&btrace);
713 btrace.format = BTRACE_FORMAT_BTS;
714 btrace.variant.bts.blocks = NULL;
715
716 cleanup = make_cleanup_btrace_data (&btrace);
717
718 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
719 block->begin = pc;
720 block->end = pc;
721
722 btrace_compute_ftrace (tp, &btrace);
723
724 do_cleanups (cleanup);
725 }
726
727 /* See btrace.h. */
728
729 void
730 btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
731 {
732 if (tp->btrace.target != NULL)
733 return;
734
735 if (!target_supports_btrace (conf->format))
736 error (_("Target does not support branch tracing."));
737
738 DEBUG ("enable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
739
740 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
741
742 /* Add an entry for the current PC so we start tracing from where we
743 enabled it. */
744 if (tp->btrace.target != NULL)
745 btrace_add_pc (tp);
746 }
747
748 /* See btrace.h. */
749
750 const struct btrace_config *
751 btrace_conf (const struct btrace_thread_info *btinfo)
752 {
753 if (btinfo->target == NULL)
754 return NULL;
755
756 return target_btrace_conf (btinfo->target);
757 }
758
759 /* See btrace.h. */
760
761 void
762 btrace_disable (struct thread_info *tp)
763 {
764 struct btrace_thread_info *btp = &tp->btrace;
765 int errcode = 0;
766
767 if (btp->target == NULL)
768 return;
769
770 DEBUG ("disable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
771
772 target_disable_btrace (btp->target);
773 btp->target = NULL;
774
775 btrace_clear (tp);
776 }
777
778 /* See btrace.h. */
779
780 void
781 btrace_teardown (struct thread_info *tp)
782 {
783 struct btrace_thread_info *btp = &tp->btrace;
784 int errcode = 0;
785
786 if (btp->target == NULL)
787 return;
788
789 DEBUG ("teardown thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
790
791 target_teardown_btrace (btp->target);
792 btp->target = NULL;
793
794 btrace_clear (tp);
795 }
796
797 /* Stitch branch trace in BTS format. */
798
799 static int
800 btrace_stitch_bts (struct btrace_data_bts *btrace,
801 const struct btrace_thread_info *btinfo)
802 {
803 struct btrace_function *last_bfun;
804 struct btrace_insn *last_insn;
805 btrace_block_s *first_new_block;
806
807 last_bfun = btinfo->end;
808 gdb_assert (last_bfun != NULL);
809
810 /* Beware that block trace starts with the most recent block, so the
811 chronologically first block in the new trace is the last block in
812 the new trace's block vector. */
813 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
814 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
815 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
816
817 /* If the current PC at the end of the block is the same as in our current
818 trace, there are two explanations:
819 1. we executed the instruction and some branch brought us back.
820 2. we have not made any progress.
821 In the first case, the delta trace vector should contain at least two
822 entries.
823 In the second case, the delta trace vector should contain exactly one
824 entry for the partial block containing the current PC. Remove it. */
825 if (first_new_block->end == last_insn->pc
826 && VEC_length (btrace_block_s, btrace->blocks) == 1)
827 {
828 VEC_pop (btrace_block_s, btrace->blocks);
829 return 0;
830 }
831
832 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
833 core_addr_to_string_nz (first_new_block->end));
834
835 /* Do a simple sanity check to make sure we don't accidentally end up
836 with a bad block. This should not occur in practice. */
837 if (first_new_block->end < last_insn->pc)
838 {
839 warning (_("Error while trying to read delta trace. Falling back to "
840 "a full read."));
841 return -1;
842 }
843
844 /* We adjust the last block to start at the end of our current trace. */
845 gdb_assert (first_new_block->begin == 0);
846 first_new_block->begin = last_insn->pc;
847
848 /* We simply pop the last insn so we can insert it again as part of
849 the normal branch trace computation.
850 Since instruction iterators are based on indices in the instructions
851 vector, we don't leave any pointers dangling. */
852 DEBUG ("pruning insn at %s for stitching",
853 ftrace_print_insn_addr (last_insn));
854
855 VEC_pop (btrace_insn_s, last_bfun->insn);
856
857 /* The instructions vector may become empty temporarily if this has
858 been the only instruction in this function segment.
859 This violates the invariant but will be remedied shortly by
860 btrace_compute_ftrace when we add the new trace. */
861 return 0;
862 }
863
864 /* Adjust the block trace in order to stitch old and new trace together.
865 BTRACE is the new delta trace between the last and the current stop.
866 BTINFO is the old branch trace until the last stop.
867 May modifx BTRACE as well as the existing trace in BTINFO.
868 Return 0 on success, -1 otherwise. */
869
870 static int
871 btrace_stitch_trace (struct btrace_data *btrace,
872 const struct btrace_thread_info *btinfo)
873 {
874 /* If we don't have trace, there's nothing to do. */
875 if (btrace_data_empty (btrace))
876 return 0;
877
878 switch (btrace->format)
879 {
880 case BTRACE_FORMAT_NONE:
881 return 0;
882
883 case BTRACE_FORMAT_BTS:
884 return btrace_stitch_bts (&btrace->variant.bts, btinfo);
885 }
886
887 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
888 }
889
890 /* Clear the branch trace histories in BTINFO. */
891
892 static void
893 btrace_clear_history (struct btrace_thread_info *btinfo)
894 {
895 xfree (btinfo->insn_history);
896 xfree (btinfo->call_history);
897 xfree (btinfo->replay);
898
899 btinfo->insn_history = NULL;
900 btinfo->call_history = NULL;
901 btinfo->replay = NULL;
902 }
903
904 /* See btrace.h. */
905
906 void
907 btrace_fetch (struct thread_info *tp)
908 {
909 struct btrace_thread_info *btinfo;
910 struct btrace_target_info *tinfo;
911 struct btrace_data btrace;
912 struct cleanup *cleanup;
913 int errcode;
914
915 DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
916
917 btinfo = &tp->btrace;
918 tinfo = btinfo->target;
919 if (tinfo == NULL)
920 return;
921
922 /* There's no way we could get new trace while replaying.
923 On the other hand, delta trace would return a partial record with the
924 current PC, which is the replay PC, not the last PC, as expected. */
925 if (btinfo->replay != NULL)
926 return;
927
928 btrace_data_init (&btrace);
929 cleanup = make_cleanup_btrace_data (&btrace);
930
931 /* Let's first try to extend the trace we already have. */
932 if (btinfo->end != NULL)
933 {
934 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
935 if (errcode == 0)
936 {
937 /* Success. Let's try to stitch the traces together. */
938 errcode = btrace_stitch_trace (&btrace, btinfo);
939 }
940 else
941 {
942 /* We failed to read delta trace. Let's try to read new trace. */
943 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
944
945 /* If we got any new trace, discard what we have. */
946 if (errcode == 0 && !btrace_data_empty (&btrace))
947 btrace_clear (tp);
948 }
949
950 /* If we were not able to read the trace, we start over. */
951 if (errcode != 0)
952 {
953 btrace_clear (tp);
954 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
955 }
956 }
957 else
958 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
959
960 /* If we were not able to read the branch trace, signal an error. */
961 if (errcode != 0)
962 error (_("Failed to read branch trace."));
963
964 /* Compute the trace, provided we have any. */
965 if (!btrace_data_empty (&btrace))
966 {
967 btrace_clear_history (btinfo);
968 btrace_compute_ftrace (tp, &btrace);
969 }
970
971 do_cleanups (cleanup);
972 }
973
974 /* See btrace.h. */
975
976 void
977 btrace_clear (struct thread_info *tp)
978 {
979 struct btrace_thread_info *btinfo;
980 struct btrace_function *it, *trash;
981
982 DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
983
984 /* Make sure btrace frames that may hold a pointer into the branch
985 trace data are destroyed. */
986 reinit_frame_cache ();
987
988 btinfo = &tp->btrace;
989
990 it = btinfo->begin;
991 while (it != NULL)
992 {
993 trash = it;
994 it = it->flow.next;
995
996 xfree (trash);
997 }
998
999 btinfo->begin = NULL;
1000 btinfo->end = NULL;
1001
1002 btrace_clear_history (btinfo);
1003 }
1004
1005 /* See btrace.h. */
1006
1007 void
1008 btrace_free_objfile (struct objfile *objfile)
1009 {
1010 struct thread_info *tp;
1011
1012 DEBUG ("free objfile");
1013
1014 ALL_NON_EXITED_THREADS (tp)
1015 btrace_clear (tp);
1016 }
1017
1018 #if defined (HAVE_LIBEXPAT)
1019
1020 /* Check the btrace document version. */
1021
1022 static void
1023 check_xml_btrace_version (struct gdb_xml_parser *parser,
1024 const struct gdb_xml_element *element,
1025 void *user_data, VEC (gdb_xml_value_s) *attributes)
1026 {
1027 const char *version = xml_find_attribute (attributes, "version")->value;
1028
1029 if (strcmp (version, "1.0") != 0)
1030 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1031 }
1032
1033 /* Parse a btrace "block" xml record. */
1034
1035 static void
1036 parse_xml_btrace_block (struct gdb_xml_parser *parser,
1037 const struct gdb_xml_element *element,
1038 void *user_data, VEC (gdb_xml_value_s) *attributes)
1039 {
1040 struct btrace_data *btrace;
1041 struct btrace_block *block;
1042 ULONGEST *begin, *end;
1043
1044 btrace = user_data;
1045
1046 switch (btrace->format)
1047 {
1048 case BTRACE_FORMAT_BTS:
1049 break;
1050
1051 case BTRACE_FORMAT_NONE:
1052 btrace->format = BTRACE_FORMAT_BTS;
1053 btrace->variant.bts.blocks = NULL;
1054 break;
1055
1056 default:
1057 gdb_xml_error (parser, _("Btrace format error."));
1058 }
1059
1060 begin = xml_find_attribute (attributes, "begin")->value;
1061 end = xml_find_attribute (attributes, "end")->value;
1062
1063 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
1064 block->begin = *begin;
1065 block->end = *end;
1066 }
1067
1068 static const struct gdb_xml_attribute block_attributes[] = {
1069 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1070 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1071 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1072 };
1073
1074 static const struct gdb_xml_attribute btrace_attributes[] = {
1075 { "version", GDB_XML_AF_NONE, NULL, NULL },
1076 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1077 };
1078
1079 static const struct gdb_xml_element btrace_children[] = {
1080 { "block", block_attributes, NULL,
1081 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
1082 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1083 };
1084
1085 static const struct gdb_xml_element btrace_elements[] = {
1086 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
1087 check_xml_btrace_version, NULL },
1088 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1089 };
1090
1091 #endif /* defined (HAVE_LIBEXPAT) */
1092
1093 /* See btrace.h. */
1094
1095 void
1096 parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
1097 {
1098 struct cleanup *cleanup;
1099 int errcode;
1100
1101 #if defined (HAVE_LIBEXPAT)
1102
1103 btrace->format = BTRACE_FORMAT_NONE;
1104
1105 cleanup = make_cleanup_btrace_data (btrace);
1106 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
1107 buffer, btrace);
1108 if (errcode != 0)
1109 error (_("Error parsing branch trace."));
1110
1111 /* Keep parse results. */
1112 discard_cleanups (cleanup);
1113
1114 #else /* !defined (HAVE_LIBEXPAT) */
1115
1116 error (_("Cannot process branch trace. XML parsing is not supported."));
1117
1118 #endif /* !defined (HAVE_LIBEXPAT) */
1119 }
1120
1121 #if defined (HAVE_LIBEXPAT)
1122
1123 /* Parse a btrace-conf "bts" xml record. */
1124
1125 static void
1126 parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
1127 const struct gdb_xml_element *element,
1128 void *user_data, VEC (gdb_xml_value_s) *attributes)
1129 {
1130 struct btrace_config *conf;
1131 struct gdb_xml_value *size;
1132
1133 conf = user_data;
1134 conf->format = BTRACE_FORMAT_BTS;
1135 conf->bts.size = 0;
1136
1137 size = xml_find_attribute (attributes, "size");
1138 if (size != NULL)
1139 conf->bts.size = (unsigned int) * (ULONGEST *) size->value;
1140 }
1141
1142 static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
1143 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1144 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1145 };
1146
1147 static const struct gdb_xml_element btrace_conf_children[] = {
1148 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
1149 parse_xml_btrace_conf_bts, NULL },
1150 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1151 };
1152
1153 static const struct gdb_xml_attribute btrace_conf_attributes[] = {
1154 { "version", GDB_XML_AF_NONE, NULL, NULL },
1155 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1156 };
1157
1158 static const struct gdb_xml_element btrace_conf_elements[] = {
1159 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
1160 GDB_XML_EF_NONE, NULL, NULL },
1161 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1162 };
1163
1164 #endif /* defined (HAVE_LIBEXPAT) */
1165
1166 /* See btrace.h. */
1167
1168 void
1169 parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
1170 {
1171 int errcode;
1172
1173 #if defined (HAVE_LIBEXPAT)
1174
1175 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
1176 btrace_conf_elements, xml, conf);
1177 if (errcode != 0)
1178 error (_("Error parsing branch trace configuration."));
1179
1180 #else /* !defined (HAVE_LIBEXPAT) */
1181
1182 error (_("XML parsing is not supported."));
1183
1184 #endif /* !defined (HAVE_LIBEXPAT) */
1185 }
1186
1187 /* See btrace.h. */
1188
1189 const struct btrace_insn *
1190 btrace_insn_get (const struct btrace_insn_iterator *it)
1191 {
1192 const struct btrace_function *bfun;
1193 unsigned int index, end;
1194
1195 index = it->index;
1196 bfun = it->function;
1197
1198 /* The index is within the bounds of this function's instruction vector. */
1199 end = VEC_length (btrace_insn_s, bfun->insn);
1200 gdb_assert (0 < end);
1201 gdb_assert (index < end);
1202
1203 return VEC_index (btrace_insn_s, bfun->insn, index);
1204 }
1205
1206 /* See btrace.h. */
1207
1208 unsigned int
1209 btrace_insn_number (const struct btrace_insn_iterator *it)
1210 {
1211 const struct btrace_function *bfun;
1212
1213 bfun = it->function;
1214 return bfun->insn_offset + it->index;
1215 }
1216
1217 /* See btrace.h. */
1218
1219 void
1220 btrace_insn_begin (struct btrace_insn_iterator *it,
1221 const struct btrace_thread_info *btinfo)
1222 {
1223 const struct btrace_function *bfun;
1224
1225 bfun = btinfo->begin;
1226 if (bfun == NULL)
1227 error (_("No trace."));
1228
1229 it->function = bfun;
1230 it->index = 0;
1231 }
1232
1233 /* See btrace.h. */
1234
1235 void
1236 btrace_insn_end (struct btrace_insn_iterator *it,
1237 const struct btrace_thread_info *btinfo)
1238 {
1239 const struct btrace_function *bfun;
1240 unsigned int length;
1241
1242 bfun = btinfo->end;
1243 if (bfun == NULL)
1244 error (_("No trace."));
1245
1246 /* The last instruction in the last function is the current instruction.
1247 We point to it - it is one past the end of the execution trace. */
1248 length = VEC_length (btrace_insn_s, bfun->insn);
1249
1250 it->function = bfun;
1251 it->index = length - 1;
1252 }
1253
1254 /* See btrace.h. */
1255
1256 unsigned int
1257 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
1258 {
1259 const struct btrace_function *bfun;
1260 unsigned int index, steps;
1261
1262 bfun = it->function;
1263 steps = 0;
1264 index = it->index;
1265
1266 while (stride != 0)
1267 {
1268 unsigned int end, space, adv;
1269
1270 end = VEC_length (btrace_insn_s, bfun->insn);
1271
1272 gdb_assert (0 < end);
1273 gdb_assert (index < end);
1274
1275 /* Compute the number of instructions remaining in this segment. */
1276 space = end - index;
1277
1278 /* Advance the iterator as far as possible within this segment. */
1279 adv = min (space, stride);
1280 stride -= adv;
1281 index += adv;
1282 steps += adv;
1283
1284 /* Move to the next function if we're at the end of this one. */
1285 if (index == end)
1286 {
1287 const struct btrace_function *next;
1288
1289 next = bfun->flow.next;
1290 if (next == NULL)
1291 {
1292 /* We stepped past the last function.
1293
1294 Let's adjust the index to point to the last instruction in
1295 the previous function. */
1296 index -= 1;
1297 steps -= 1;
1298 break;
1299 }
1300
1301 /* We now point to the first instruction in the new function. */
1302 bfun = next;
1303 index = 0;
1304 }
1305
1306 /* We did make progress. */
1307 gdb_assert (adv > 0);
1308 }
1309
1310 /* Update the iterator. */
1311 it->function = bfun;
1312 it->index = index;
1313
1314 return steps;
1315 }
1316
1317 /* See btrace.h. */
1318
1319 unsigned int
1320 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1321 {
1322 const struct btrace_function *bfun;
1323 unsigned int index, steps;
1324
1325 bfun = it->function;
1326 steps = 0;
1327 index = it->index;
1328
1329 while (stride != 0)
1330 {
1331 unsigned int adv;
1332
1333 /* Move to the previous function if we're at the start of this one. */
1334 if (index == 0)
1335 {
1336 const struct btrace_function *prev;
1337
1338 prev = bfun->flow.prev;
1339 if (prev == NULL)
1340 break;
1341
1342 /* We point to one after the last instruction in the new function. */
1343 bfun = prev;
1344 index = VEC_length (btrace_insn_s, bfun->insn);
1345
1346 /* There is at least one instruction in this function segment. */
1347 gdb_assert (index > 0);
1348 }
1349
1350 /* Advance the iterator as far as possible within this segment. */
1351 adv = min (index, stride);
1352 stride -= adv;
1353 index -= adv;
1354 steps += adv;
1355
1356 /* We did make progress. */
1357 gdb_assert (adv > 0);
1358 }
1359
1360 /* Update the iterator. */
1361 it->function = bfun;
1362 it->index = index;
1363
1364 return steps;
1365 }
1366
1367 /* See btrace.h. */
1368
1369 int
1370 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
1371 const struct btrace_insn_iterator *rhs)
1372 {
1373 unsigned int lnum, rnum;
1374
1375 lnum = btrace_insn_number (lhs);
1376 rnum = btrace_insn_number (rhs);
1377
1378 return (int) (lnum - rnum);
1379 }
1380
1381 /* See btrace.h. */
1382
1383 int
1384 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
1385 const struct btrace_thread_info *btinfo,
1386 unsigned int number)
1387 {
1388 const struct btrace_function *bfun;
1389 unsigned int end;
1390
1391 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1392 if (bfun->insn_offset <= number)
1393 break;
1394
1395 if (bfun == NULL)
1396 return 0;
1397
1398 end = bfun->insn_offset + VEC_length (btrace_insn_s, bfun->insn);
1399 if (end <= number)
1400 return 0;
1401
1402 it->function = bfun;
1403 it->index = number - bfun->insn_offset;
1404
1405 return 1;
1406 }
1407
1408 /* See btrace.h. */
1409
1410 const struct btrace_function *
1411 btrace_call_get (const struct btrace_call_iterator *it)
1412 {
1413 return it->function;
1414 }
1415
1416 /* See btrace.h. */
1417
1418 unsigned int
1419 btrace_call_number (const struct btrace_call_iterator *it)
1420 {
1421 const struct btrace_thread_info *btinfo;
1422 const struct btrace_function *bfun;
1423 unsigned int insns;
1424
1425 btinfo = it->btinfo;
1426 bfun = it->function;
1427 if (bfun != NULL)
1428 return bfun->number;
1429
1430 /* For the end iterator, i.e. bfun == NULL, we return one more than the
1431 number of the last function. */
1432 bfun = btinfo->end;
1433 insns = VEC_length (btrace_insn_s, bfun->insn);
1434
1435 /* If the function contains only a single instruction (i.e. the current
1436 instruction), it will be skipped and its number is already the number
1437 we seek. */
1438 if (insns == 1)
1439 return bfun->number;
1440
1441 /* Otherwise, return one more than the number of the last function. */
1442 return bfun->number + 1;
1443 }
1444
1445 /* See btrace.h. */
1446
1447 void
1448 btrace_call_begin (struct btrace_call_iterator *it,
1449 const struct btrace_thread_info *btinfo)
1450 {
1451 const struct btrace_function *bfun;
1452
1453 bfun = btinfo->begin;
1454 if (bfun == NULL)
1455 error (_("No trace."));
1456
1457 it->btinfo = btinfo;
1458 it->function = bfun;
1459 }
1460
1461 /* See btrace.h. */
1462
1463 void
1464 btrace_call_end (struct btrace_call_iterator *it,
1465 const struct btrace_thread_info *btinfo)
1466 {
1467 const struct btrace_function *bfun;
1468
1469 bfun = btinfo->end;
1470 if (bfun == NULL)
1471 error (_("No trace."));
1472
1473 it->btinfo = btinfo;
1474 it->function = NULL;
1475 }
1476
1477 /* See btrace.h. */
1478
1479 unsigned int
1480 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
1481 {
1482 const struct btrace_function *bfun;
1483 unsigned int steps;
1484
1485 bfun = it->function;
1486 steps = 0;
1487 while (bfun != NULL)
1488 {
1489 const struct btrace_function *next;
1490 unsigned int insns;
1491
1492 next = bfun->flow.next;
1493 if (next == NULL)
1494 {
1495 /* Ignore the last function if it only contains a single
1496 (i.e. the current) instruction. */
1497 insns = VEC_length (btrace_insn_s, bfun->insn);
1498 if (insns == 1)
1499 steps -= 1;
1500 }
1501
1502 if (stride == steps)
1503 break;
1504
1505 bfun = next;
1506 steps += 1;
1507 }
1508
1509 it->function = bfun;
1510 return steps;
1511 }
1512
1513 /* See btrace.h. */
1514
1515 unsigned int
1516 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
1517 {
1518 const struct btrace_thread_info *btinfo;
1519 const struct btrace_function *bfun;
1520 unsigned int steps;
1521
1522 bfun = it->function;
1523 steps = 0;
1524
1525 if (bfun == NULL)
1526 {
1527 unsigned int insns;
1528
1529 btinfo = it->btinfo;
1530 bfun = btinfo->end;
1531 if (bfun == NULL)
1532 return 0;
1533
1534 /* Ignore the last function if it only contains a single
1535 (i.e. the current) instruction. */
1536 insns = VEC_length (btrace_insn_s, bfun->insn);
1537 if (insns == 1)
1538 bfun = bfun->flow.prev;
1539
1540 if (bfun == NULL)
1541 return 0;
1542
1543 steps += 1;
1544 }
1545
1546 while (steps < stride)
1547 {
1548 const struct btrace_function *prev;
1549
1550 prev = bfun->flow.prev;
1551 if (prev == NULL)
1552 break;
1553
1554 bfun = prev;
1555 steps += 1;
1556 }
1557
1558 it->function = bfun;
1559 return steps;
1560 }
1561
1562 /* See btrace.h. */
1563
1564 int
1565 btrace_call_cmp (const struct btrace_call_iterator *lhs,
1566 const struct btrace_call_iterator *rhs)
1567 {
1568 unsigned int lnum, rnum;
1569
1570 lnum = btrace_call_number (lhs);
1571 rnum = btrace_call_number (rhs);
1572
1573 return (int) (lnum - rnum);
1574 }
1575
1576 /* See btrace.h. */
1577
1578 int
1579 btrace_find_call_by_number (struct btrace_call_iterator *it,
1580 const struct btrace_thread_info *btinfo,
1581 unsigned int number)
1582 {
1583 const struct btrace_function *bfun;
1584
1585 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1586 {
1587 unsigned int bnum;
1588
1589 bnum = bfun->number;
1590 if (number == bnum)
1591 {
1592 it->btinfo = btinfo;
1593 it->function = bfun;
1594 return 1;
1595 }
1596
1597 /* Functions are ordered and numbered consecutively. We could bail out
1598 earlier. On the other hand, it is very unlikely that we search for
1599 a nonexistent function. */
1600 }
1601
1602 return 0;
1603 }
1604
1605 /* See btrace.h. */
1606
1607 void
1608 btrace_set_insn_history (struct btrace_thread_info *btinfo,
1609 const struct btrace_insn_iterator *begin,
1610 const struct btrace_insn_iterator *end)
1611 {
1612 if (btinfo->insn_history == NULL)
1613 btinfo->insn_history = xzalloc (sizeof (*btinfo->insn_history));
1614
1615 btinfo->insn_history->begin = *begin;
1616 btinfo->insn_history->end = *end;
1617 }
1618
1619 /* See btrace.h. */
1620
1621 void
1622 btrace_set_call_history (struct btrace_thread_info *btinfo,
1623 const struct btrace_call_iterator *begin,
1624 const struct btrace_call_iterator *end)
1625 {
1626 gdb_assert (begin->btinfo == end->btinfo);
1627
1628 if (btinfo->call_history == NULL)
1629 btinfo->call_history = xzalloc (sizeof (*btinfo->call_history));
1630
1631 btinfo->call_history->begin = *begin;
1632 btinfo->call_history->end = *end;
1633 }
1634
1635 /* See btrace.h. */
1636
1637 int
1638 btrace_is_replaying (struct thread_info *tp)
1639 {
1640 return tp->btrace.replay != NULL;
1641 }
1642
1643 /* See btrace.h. */
1644
1645 int
1646 btrace_is_empty (struct thread_info *tp)
1647 {
1648 struct btrace_insn_iterator begin, end;
1649 struct btrace_thread_info *btinfo;
1650
1651 btinfo = &tp->btrace;
1652
1653 if (btinfo->begin == NULL)
1654 return 1;
1655
1656 btrace_insn_begin (&begin, btinfo);
1657 btrace_insn_end (&end, btinfo);
1658
1659 return btrace_insn_cmp (&begin, &end) == 0;
1660 }
1661
1662 /* Forward the cleanup request. */
1663
1664 static void
1665 do_btrace_data_cleanup (void *arg)
1666 {
1667 btrace_data_fini (arg);
1668 }
1669
1670 /* See btrace.h. */
1671
1672 struct cleanup *
1673 make_cleanup_btrace_data (struct btrace_data *data)
1674 {
1675 return make_cleanup (do_btrace_data_cleanup, data);
1676 }
This page took 0.0634710000000001 seconds and 5 git commands to generate.