windows-nat: Don't change current_event.dwThreadId in handle_output_debug_string()
[deliverable/binutils-gdb.git] / gdb / btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "btrace.h"
24 #include "gdbthread.h"
25 #include "inferior.h"
26 #include "target.h"
27 #include "record.h"
28 #include "symtab.h"
29 #include "disasm.h"
30 #include "source.h"
31 #include "filenames.h"
32 #include "xml-support.h"
33 #include "regcache.h"
34
35 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
36 when used in if statements. */
37
38 #define DEBUG(msg, args...) \
39 do \
40 { \
41 if (record_debug != 0) \
42 fprintf_unfiltered (gdb_stdlog, \
43 "[btrace] " msg "\n", ##args); \
44 } \
45 while (0)
46
47 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
48
49 /* Return the function name of a recorded function segment for printing.
50 This function never returns NULL. */
51
52 static const char *
53 ftrace_print_function_name (const struct btrace_function *bfun)
54 {
55 struct minimal_symbol *msym;
56 struct symbol *sym;
57
58 msym = bfun->msym;
59 sym = bfun->sym;
60
61 if (sym != NULL)
62 return SYMBOL_PRINT_NAME (sym);
63
64 if (msym != NULL)
65 return MSYMBOL_PRINT_NAME (msym);
66
67 return "<unknown>";
68 }
69
70 /* Return the file name of a recorded function segment for printing.
71 This function never returns NULL. */
72
73 static const char *
74 ftrace_print_filename (const struct btrace_function *bfun)
75 {
76 struct symbol *sym;
77 const char *filename;
78
79 sym = bfun->sym;
80
81 if (sym != NULL)
82 filename = symtab_to_filename_for_display (symbol_symtab (sym));
83 else
84 filename = "<unknown>";
85
86 return filename;
87 }
88
89 /* Return a string representation of the address of an instruction.
90 This function never returns NULL. */
91
92 static const char *
93 ftrace_print_insn_addr (const struct btrace_insn *insn)
94 {
95 if (insn == NULL)
96 return "<nil>";
97
98 return core_addr_to_string_nz (insn->pc);
99 }
100
101 /* Print an ftrace debug status message. */
102
103 static void
104 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
105 {
106 const char *fun, *file;
107 unsigned int ibegin, iend;
108 int level;
109
110 fun = ftrace_print_function_name (bfun);
111 file = ftrace_print_filename (bfun);
112 level = bfun->level;
113
114 ibegin = bfun->insn_offset;
115 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
116
117 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
118 prefix, fun, file, level, ibegin, iend);
119 }
120
121 /* Return non-zero if BFUN does not match MFUN and FUN,
122 return zero otherwise. */
123
124 static int
125 ftrace_function_switched (const struct btrace_function *bfun,
126 const struct minimal_symbol *mfun,
127 const struct symbol *fun)
128 {
129 struct minimal_symbol *msym;
130 struct symbol *sym;
131
132 msym = bfun->msym;
133 sym = bfun->sym;
134
135 /* If the minimal symbol changed, we certainly switched functions. */
136 if (mfun != NULL && msym != NULL
137 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
138 return 1;
139
140 /* If the symbol changed, we certainly switched functions. */
141 if (fun != NULL && sym != NULL)
142 {
143 const char *bfname, *fname;
144
145 /* Check the function name. */
146 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
147 return 1;
148
149 /* Check the location of those functions, as well. */
150 bfname = symtab_to_fullname (symbol_symtab (sym));
151 fname = symtab_to_fullname (symbol_symtab (fun));
152 if (filename_cmp (fname, bfname) != 0)
153 return 1;
154 }
155
156 /* If we lost symbol information, we switched functions. */
157 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
158 return 1;
159
160 /* If we gained symbol information, we switched functions. */
161 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
162 return 1;
163
164 return 0;
165 }
166
167 /* Allocate and initialize a new branch trace function segment.
168 PREV is the chronologically preceding function segment.
169 MFUN and FUN are the symbol information we have for this function. */
170
171 static struct btrace_function *
172 ftrace_new_function (struct btrace_function *prev,
173 struct minimal_symbol *mfun,
174 struct symbol *fun)
175 {
176 struct btrace_function *bfun;
177
178 bfun = xzalloc (sizeof (*bfun));
179
180 bfun->msym = mfun;
181 bfun->sym = fun;
182 bfun->flow.prev = prev;
183
184 if (prev == NULL)
185 {
186 /* Start counting at one. */
187 bfun->number = 1;
188 bfun->insn_offset = 1;
189 }
190 else
191 {
192 gdb_assert (prev->flow.next == NULL);
193 prev->flow.next = bfun;
194
195 bfun->number = prev->number + 1;
196 bfun->insn_offset = (prev->insn_offset
197 + VEC_length (btrace_insn_s, prev->insn));
198 bfun->level = prev->level;
199 }
200
201 return bfun;
202 }
203
204 /* Update the UP field of a function segment. */
205
206 static void
207 ftrace_update_caller (struct btrace_function *bfun,
208 struct btrace_function *caller,
209 enum btrace_function_flag flags)
210 {
211 if (bfun->up != NULL)
212 ftrace_debug (bfun, "updating caller");
213
214 bfun->up = caller;
215 bfun->flags = flags;
216
217 ftrace_debug (bfun, "set caller");
218 }
219
220 /* Fix up the caller for all segments of a function. */
221
222 static void
223 ftrace_fixup_caller (struct btrace_function *bfun,
224 struct btrace_function *caller,
225 enum btrace_function_flag flags)
226 {
227 struct btrace_function *prev, *next;
228
229 ftrace_update_caller (bfun, caller, flags);
230
231 /* Update all function segments belonging to the same function. */
232 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
233 ftrace_update_caller (prev, caller, flags);
234
235 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
236 ftrace_update_caller (next, caller, flags);
237 }
238
239 /* Add a new function segment for a call.
240 CALLER is the chronologically preceding function segment.
241 MFUN and FUN are the symbol information we have for this function. */
242
243 static struct btrace_function *
244 ftrace_new_call (struct btrace_function *caller,
245 struct minimal_symbol *mfun,
246 struct symbol *fun)
247 {
248 struct btrace_function *bfun;
249
250 bfun = ftrace_new_function (caller, mfun, fun);
251 bfun->up = caller;
252 bfun->level += 1;
253
254 ftrace_debug (bfun, "new call");
255
256 return bfun;
257 }
258
259 /* Add a new function segment for a tail call.
260 CALLER is the chronologically preceding function segment.
261 MFUN and FUN are the symbol information we have for this function. */
262
263 static struct btrace_function *
264 ftrace_new_tailcall (struct btrace_function *caller,
265 struct minimal_symbol *mfun,
266 struct symbol *fun)
267 {
268 struct btrace_function *bfun;
269
270 bfun = ftrace_new_function (caller, mfun, fun);
271 bfun->up = caller;
272 bfun->level += 1;
273 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
274
275 ftrace_debug (bfun, "new tail call");
276
277 return bfun;
278 }
279
280 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
281 symbol information. */
282
283 static struct btrace_function *
284 ftrace_find_caller (struct btrace_function *bfun,
285 struct minimal_symbol *mfun,
286 struct symbol *fun)
287 {
288 for (; bfun != NULL; bfun = bfun->up)
289 {
290 /* Skip functions with incompatible symbol information. */
291 if (ftrace_function_switched (bfun, mfun, fun))
292 continue;
293
294 /* This is the function segment we're looking for. */
295 break;
296 }
297
298 return bfun;
299 }
300
301 /* Find the innermost caller in the back trace of BFUN, skipping all
302 function segments that do not end with a call instruction (e.g.
303 tail calls ending with a jump). */
304
305 static struct btrace_function *
306 ftrace_find_call (struct btrace_function *bfun)
307 {
308 for (; bfun != NULL; bfun = bfun->up)
309 {
310 struct btrace_insn *last;
311
312 /* Skip gaps. */
313 if (bfun->errcode != 0)
314 continue;
315
316 last = VEC_last (btrace_insn_s, bfun->insn);
317
318 if (last->iclass == BTRACE_INSN_CALL)
319 break;
320 }
321
322 return bfun;
323 }
324
325 /* Add a continuation segment for a function into which we return.
326 PREV is the chronologically preceding function segment.
327 MFUN and FUN are the symbol information we have for this function. */
328
329 static struct btrace_function *
330 ftrace_new_return (struct btrace_function *prev,
331 struct minimal_symbol *mfun,
332 struct symbol *fun)
333 {
334 struct btrace_function *bfun, *caller;
335
336 bfun = ftrace_new_function (prev, mfun, fun);
337
338 /* It is important to start at PREV's caller. Otherwise, we might find
339 PREV itself, if PREV is a recursive function. */
340 caller = ftrace_find_caller (prev->up, mfun, fun);
341 if (caller != NULL)
342 {
343 /* The caller of PREV is the preceding btrace function segment in this
344 function instance. */
345 gdb_assert (caller->segment.next == NULL);
346
347 caller->segment.next = bfun;
348 bfun->segment.prev = caller;
349
350 /* Maintain the function level. */
351 bfun->level = caller->level;
352
353 /* Maintain the call stack. */
354 bfun->up = caller->up;
355 bfun->flags = caller->flags;
356
357 ftrace_debug (bfun, "new return");
358 }
359 else
360 {
361 /* We did not find a caller. This could mean that something went
362 wrong or that the call is simply not included in the trace. */
363
364 /* Let's search for some actual call. */
365 caller = ftrace_find_call (prev->up);
366 if (caller == NULL)
367 {
368 /* There is no call in PREV's back trace. We assume that the
369 branch trace did not include it. */
370
371 /* Let's find the topmost call function - this skips tail calls. */
372 while (prev->up != NULL)
373 prev = prev->up;
374
375 /* We maintain levels for a series of returns for which we have
376 not seen the calls.
377 We start at the preceding function's level in case this has
378 already been a return for which we have not seen the call.
379 We start at level 0 otherwise, to handle tail calls correctly. */
380 bfun->level = min (0, prev->level) - 1;
381
382 /* Fix up the call stack for PREV. */
383 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
384
385 ftrace_debug (bfun, "new return - no caller");
386 }
387 else
388 {
389 /* There is a call in PREV's back trace to which we should have
390 returned. Let's remain at this level. */
391 bfun->level = prev->level;
392
393 ftrace_debug (bfun, "new return - unknown caller");
394 }
395 }
396
397 return bfun;
398 }
399
400 /* Add a new function segment for a function switch.
401 PREV is the chronologically preceding function segment.
402 MFUN and FUN are the symbol information we have for this function. */
403
404 static struct btrace_function *
405 ftrace_new_switch (struct btrace_function *prev,
406 struct minimal_symbol *mfun,
407 struct symbol *fun)
408 {
409 struct btrace_function *bfun;
410
411 /* This is an unexplained function switch. The call stack will likely
412 be wrong at this point. */
413 bfun = ftrace_new_function (prev, mfun, fun);
414
415 ftrace_debug (bfun, "new switch");
416
417 return bfun;
418 }
419
420 /* Add a new function segment for a gap in the trace due to a decode error.
421 PREV is the chronologically preceding function segment.
422 ERRCODE is the format-specific error code. */
423
424 static struct btrace_function *
425 ftrace_new_gap (struct btrace_function *prev, int errcode)
426 {
427 struct btrace_function *bfun;
428
429 /* We hijack prev if it was empty. */
430 if (prev != NULL && prev->errcode == 0
431 && VEC_empty (btrace_insn_s, prev->insn))
432 bfun = prev;
433 else
434 bfun = ftrace_new_function (prev, NULL, NULL);
435
436 bfun->errcode = errcode;
437
438 ftrace_debug (bfun, "new gap");
439
440 return bfun;
441 }
442
443 /* Update BFUN with respect to the instruction at PC. This may create new
444 function segments.
445 Return the chronologically latest function segment, never NULL. */
446
447 static struct btrace_function *
448 ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
449 {
450 struct bound_minimal_symbol bmfun;
451 struct minimal_symbol *mfun;
452 struct symbol *fun;
453 struct btrace_insn *last;
454
455 /* Try to determine the function we're in. We use both types of symbols
456 to avoid surprises when we sometimes get a full symbol and sometimes
457 only a minimal symbol. */
458 fun = find_pc_function (pc);
459 bmfun = lookup_minimal_symbol_by_pc (pc);
460 mfun = bmfun.minsym;
461
462 if (fun == NULL && mfun == NULL)
463 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
464
465 /* If we didn't have a function or if we had a gap before, we create one. */
466 if (bfun == NULL || bfun->errcode != 0)
467 return ftrace_new_function (bfun, mfun, fun);
468
469 /* Check the last instruction, if we have one.
470 We do this check first, since it allows us to fill in the call stack
471 links in addition to the normal flow links. */
472 last = NULL;
473 if (!VEC_empty (btrace_insn_s, bfun->insn))
474 last = VEC_last (btrace_insn_s, bfun->insn);
475
476 if (last != NULL)
477 {
478 switch (last->iclass)
479 {
480 case BTRACE_INSN_RETURN:
481 {
482 const char *fname;
483
484 /* On some systems, _dl_runtime_resolve returns to the resolved
485 function instead of jumping to it. From our perspective,
486 however, this is a tailcall.
487 If we treated it as return, we wouldn't be able to find the
488 resolved function in our stack back trace. Hence, we would
489 lose the current stack back trace and start anew with an empty
490 back trace. When the resolved function returns, we would then
491 create a stack back trace with the same function names but
492 different frame id's. This will confuse stepping. */
493 fname = ftrace_print_function_name (bfun);
494 if (strcmp (fname, "_dl_runtime_resolve") == 0)
495 return ftrace_new_tailcall (bfun, mfun, fun);
496
497 return ftrace_new_return (bfun, mfun, fun);
498 }
499
500 case BTRACE_INSN_CALL:
501 /* Ignore calls to the next instruction. They are used for PIC. */
502 if (last->pc + last->size == pc)
503 break;
504
505 return ftrace_new_call (bfun, mfun, fun);
506
507 case BTRACE_INSN_JUMP:
508 {
509 CORE_ADDR start;
510
511 start = get_pc_function_start (pc);
512
513 /* If we can't determine the function for PC, we treat a jump at
514 the end of the block as tail call. */
515 if (start == 0 || start == pc)
516 return ftrace_new_tailcall (bfun, mfun, fun);
517 }
518 }
519 }
520
521 /* Check if we're switching functions for some other reason. */
522 if (ftrace_function_switched (bfun, mfun, fun))
523 {
524 DEBUG_FTRACE ("switching from %s in %s at %s",
525 ftrace_print_insn_addr (last),
526 ftrace_print_function_name (bfun),
527 ftrace_print_filename (bfun));
528
529 return ftrace_new_switch (bfun, mfun, fun);
530 }
531
532 return bfun;
533 }
534
535 /* Add the instruction at PC to BFUN's instructions. */
536
537 static void
538 ftrace_update_insns (struct btrace_function *bfun,
539 const struct btrace_insn *insn)
540 {
541 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
542
543 if (record_debug > 1)
544 ftrace_debug (bfun, "update insn");
545 }
546
547 /* Classify the instruction at PC. */
548
549 static enum btrace_insn_class
550 ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
551 {
552 enum btrace_insn_class iclass;
553
554 iclass = BTRACE_INSN_OTHER;
555 TRY
556 {
557 if (gdbarch_insn_is_call (gdbarch, pc))
558 iclass = BTRACE_INSN_CALL;
559 else if (gdbarch_insn_is_ret (gdbarch, pc))
560 iclass = BTRACE_INSN_RETURN;
561 else if (gdbarch_insn_is_jump (gdbarch, pc))
562 iclass = BTRACE_INSN_JUMP;
563 }
564 CATCH (error, RETURN_MASK_ERROR)
565 {
566 }
567 END_CATCH
568
569 return iclass;
570 }
571
572 /* Compute the function branch trace from BTS trace. */
573
574 static void
575 btrace_compute_ftrace_bts (struct thread_info *tp,
576 const struct btrace_data_bts *btrace)
577 {
578 struct btrace_thread_info *btinfo;
579 struct btrace_function *begin, *end;
580 struct gdbarch *gdbarch;
581 unsigned int blk, ngaps;
582 int level;
583
584 gdbarch = target_gdbarch ();
585 btinfo = &tp->btrace;
586 begin = btinfo->begin;
587 end = btinfo->end;
588 ngaps = btinfo->ngaps;
589 level = begin != NULL ? -btinfo->level : INT_MAX;
590 blk = VEC_length (btrace_block_s, btrace->blocks);
591
592 while (blk != 0)
593 {
594 btrace_block_s *block;
595 CORE_ADDR pc;
596
597 blk -= 1;
598
599 block = VEC_index (btrace_block_s, btrace->blocks, blk);
600 pc = block->begin;
601
602 for (;;)
603 {
604 struct btrace_insn insn;
605 int size;
606
607 /* We should hit the end of the block. Warn if we went too far. */
608 if (block->end < pc)
609 {
610 /* Indicate the gap in the trace - unless we're at the
611 beginning. */
612 if (begin != NULL)
613 {
614 warning (_("Recorded trace may be corrupted around %s."),
615 core_addr_to_string_nz (pc));
616
617 end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
618 ngaps += 1;
619 }
620 break;
621 }
622
623 end = ftrace_update_function (end, pc);
624 if (begin == NULL)
625 begin = end;
626
627 /* Maintain the function level offset.
628 For all but the last block, we do it here. */
629 if (blk != 0)
630 level = min (level, end->level);
631
632 size = 0;
633 TRY
634 {
635 size = gdb_insn_length (gdbarch, pc);
636 }
637 CATCH (error, RETURN_MASK_ERROR)
638 {
639 }
640 END_CATCH
641
642 insn.pc = pc;
643 insn.size = size;
644 insn.iclass = ftrace_classify_insn (gdbarch, pc);
645
646 ftrace_update_insns (end, &insn);
647
648 /* We're done once we pushed the instruction at the end. */
649 if (block->end == pc)
650 break;
651
652 /* We can't continue if we fail to compute the size. */
653 if (size <= 0)
654 {
655 warning (_("Recorded trace may be incomplete around %s."),
656 core_addr_to_string_nz (pc));
657
658 /* Indicate the gap in the trace. We just added INSN so we're
659 not at the beginning. */
660 end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
661 ngaps += 1;
662
663 break;
664 }
665
666 pc += size;
667
668 /* Maintain the function level offset.
669 For the last block, we do it here to not consider the last
670 instruction.
671 Since the last instruction corresponds to the current instruction
672 and is not really part of the execution history, it shouldn't
673 affect the level. */
674 if (blk == 0)
675 level = min (level, end->level);
676 }
677 }
678
679 btinfo->begin = begin;
680 btinfo->end = end;
681 btinfo->ngaps = ngaps;
682
683 /* LEVEL is the minimal function level of all btrace function segments.
684 Define the global level offset to -LEVEL so all function levels are
685 normalized to start at zero. */
686 btinfo->level = -level;
687 }
688
689 /* Compute the function branch trace from a block branch trace BTRACE for
690 a thread given by BTINFO. */
691
692 static void
693 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
694 {
695 DEBUG ("compute ftrace");
696
697 switch (btrace->format)
698 {
699 case BTRACE_FORMAT_NONE:
700 return;
701
702 case BTRACE_FORMAT_BTS:
703 btrace_compute_ftrace_bts (tp, &btrace->variant.bts);
704 return;
705 }
706
707 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
708 }
709
710 /* Add an entry for the current PC. */
711
712 static void
713 btrace_add_pc (struct thread_info *tp)
714 {
715 struct btrace_data btrace;
716 struct btrace_block *block;
717 struct regcache *regcache;
718 struct cleanup *cleanup;
719 CORE_ADDR pc;
720
721 regcache = get_thread_regcache (tp->ptid);
722 pc = regcache_read_pc (regcache);
723
724 btrace_data_init (&btrace);
725 btrace.format = BTRACE_FORMAT_BTS;
726 btrace.variant.bts.blocks = NULL;
727
728 cleanup = make_cleanup_btrace_data (&btrace);
729
730 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
731 block->begin = pc;
732 block->end = pc;
733
734 btrace_compute_ftrace (tp, &btrace);
735
736 do_cleanups (cleanup);
737 }
738
739 /* See btrace.h. */
740
741 void
742 btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
743 {
744 if (tp->btrace.target != NULL)
745 return;
746
747 if (!target_supports_btrace (conf->format))
748 error (_("Target does not support branch tracing."));
749
750 DEBUG ("enable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
751
752 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
753
754 /* Add an entry for the current PC so we start tracing from where we
755 enabled it. */
756 if (tp->btrace.target != NULL)
757 btrace_add_pc (tp);
758 }
759
760 /* See btrace.h. */
761
762 const struct btrace_config *
763 btrace_conf (const struct btrace_thread_info *btinfo)
764 {
765 if (btinfo->target == NULL)
766 return NULL;
767
768 return target_btrace_conf (btinfo->target);
769 }
770
771 /* See btrace.h. */
772
773 void
774 btrace_disable (struct thread_info *tp)
775 {
776 struct btrace_thread_info *btp = &tp->btrace;
777 int errcode = 0;
778
779 if (btp->target == NULL)
780 return;
781
782 DEBUG ("disable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
783
784 target_disable_btrace (btp->target);
785 btp->target = NULL;
786
787 btrace_clear (tp);
788 }
789
790 /* See btrace.h. */
791
792 void
793 btrace_teardown (struct thread_info *tp)
794 {
795 struct btrace_thread_info *btp = &tp->btrace;
796 int errcode = 0;
797
798 if (btp->target == NULL)
799 return;
800
801 DEBUG ("teardown thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
802
803 target_teardown_btrace (btp->target);
804 btp->target = NULL;
805
806 btrace_clear (tp);
807 }
808
809 /* Stitch branch trace in BTS format. */
810
811 static int
812 btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
813 {
814 struct btrace_thread_info *btinfo;
815 struct btrace_function *last_bfun;
816 struct btrace_insn *last_insn;
817 btrace_block_s *first_new_block;
818
819 btinfo = &tp->btrace;
820 last_bfun = btinfo->end;
821 gdb_assert (last_bfun != NULL);
822 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
823
824 /* If the existing trace ends with a gap, we just glue the traces
825 together. We need to drop the last (i.e. chronologically first) block
826 of the new trace, though, since we can't fill in the start address.*/
827 if (VEC_empty (btrace_insn_s, last_bfun->insn))
828 {
829 VEC_pop (btrace_block_s, btrace->blocks);
830 return 0;
831 }
832
833 /* Beware that block trace starts with the most recent block, so the
834 chronologically first block in the new trace is the last block in
835 the new trace's block vector. */
836 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
837 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
838
839 /* If the current PC at the end of the block is the same as in our current
840 trace, there are two explanations:
841 1. we executed the instruction and some branch brought us back.
842 2. we have not made any progress.
843 In the first case, the delta trace vector should contain at least two
844 entries.
845 In the second case, the delta trace vector should contain exactly one
846 entry for the partial block containing the current PC. Remove it. */
847 if (first_new_block->end == last_insn->pc
848 && VEC_length (btrace_block_s, btrace->blocks) == 1)
849 {
850 VEC_pop (btrace_block_s, btrace->blocks);
851 return 0;
852 }
853
854 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
855 core_addr_to_string_nz (first_new_block->end));
856
857 /* Do a simple sanity check to make sure we don't accidentally end up
858 with a bad block. This should not occur in practice. */
859 if (first_new_block->end < last_insn->pc)
860 {
861 warning (_("Error while trying to read delta trace. Falling back to "
862 "a full read."));
863 return -1;
864 }
865
866 /* We adjust the last block to start at the end of our current trace. */
867 gdb_assert (first_new_block->begin == 0);
868 first_new_block->begin = last_insn->pc;
869
870 /* We simply pop the last insn so we can insert it again as part of
871 the normal branch trace computation.
872 Since instruction iterators are based on indices in the instructions
873 vector, we don't leave any pointers dangling. */
874 DEBUG ("pruning insn at %s for stitching",
875 ftrace_print_insn_addr (last_insn));
876
877 VEC_pop (btrace_insn_s, last_bfun->insn);
878
879 /* The instructions vector may become empty temporarily if this has
880 been the only instruction in this function segment.
881 This violates the invariant but will be remedied shortly by
882 btrace_compute_ftrace when we add the new trace. */
883
884 /* The only case where this would hurt is if the entire trace consisted
885 of just that one instruction. If we remove it, we might turn the now
886 empty btrace function segment into a gap. But we don't want gaps at
887 the beginning. To avoid this, we remove the entire old trace. */
888 if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
889 btrace_clear (tp);
890
891 return 0;
892 }
893
894 /* Adjust the block trace in order to stitch old and new trace together.
895 BTRACE is the new delta trace between the last and the current stop.
896 TP is the traced thread.
897 May modifx BTRACE as well as the existing trace in TP.
898 Return 0 on success, -1 otherwise. */
899
900 static int
901 btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
902 {
903 /* If we don't have trace, there's nothing to do. */
904 if (btrace_data_empty (btrace))
905 return 0;
906
907 switch (btrace->format)
908 {
909 case BTRACE_FORMAT_NONE:
910 return 0;
911
912 case BTRACE_FORMAT_BTS:
913 return btrace_stitch_bts (&btrace->variant.bts, tp);
914 }
915
916 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
917 }
918
919 /* Clear the branch trace histories in BTINFO. */
920
921 static void
922 btrace_clear_history (struct btrace_thread_info *btinfo)
923 {
924 xfree (btinfo->insn_history);
925 xfree (btinfo->call_history);
926 xfree (btinfo->replay);
927
928 btinfo->insn_history = NULL;
929 btinfo->call_history = NULL;
930 btinfo->replay = NULL;
931 }
932
933 /* See btrace.h. */
934
935 void
936 btrace_fetch (struct thread_info *tp)
937 {
938 struct btrace_thread_info *btinfo;
939 struct btrace_target_info *tinfo;
940 struct btrace_data btrace;
941 struct cleanup *cleanup;
942 int errcode;
943
944 DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
945
946 btinfo = &tp->btrace;
947 tinfo = btinfo->target;
948 if (tinfo == NULL)
949 return;
950
951 /* There's no way we could get new trace while replaying.
952 On the other hand, delta trace would return a partial record with the
953 current PC, which is the replay PC, not the last PC, as expected. */
954 if (btinfo->replay != NULL)
955 return;
956
957 btrace_data_init (&btrace);
958 cleanup = make_cleanup_btrace_data (&btrace);
959
960 /* Let's first try to extend the trace we already have. */
961 if (btinfo->end != NULL)
962 {
963 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
964 if (errcode == 0)
965 {
966 /* Success. Let's try to stitch the traces together. */
967 errcode = btrace_stitch_trace (&btrace, tp);
968 }
969 else
970 {
971 /* We failed to read delta trace. Let's try to read new trace. */
972 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
973
974 /* If we got any new trace, discard what we have. */
975 if (errcode == 0 && !btrace_data_empty (&btrace))
976 btrace_clear (tp);
977 }
978
979 /* If we were not able to read the trace, we start over. */
980 if (errcode != 0)
981 {
982 btrace_clear (tp);
983 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
984 }
985 }
986 else
987 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
988
989 /* If we were not able to read the branch trace, signal an error. */
990 if (errcode != 0)
991 error (_("Failed to read branch trace."));
992
993 /* Compute the trace, provided we have any. */
994 if (!btrace_data_empty (&btrace))
995 {
996 btrace_clear_history (btinfo);
997 btrace_compute_ftrace (tp, &btrace);
998 }
999
1000 do_cleanups (cleanup);
1001 }
1002
1003 /* See btrace.h. */
1004
1005 void
1006 btrace_clear (struct thread_info *tp)
1007 {
1008 struct btrace_thread_info *btinfo;
1009 struct btrace_function *it, *trash;
1010
1011 DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1012
1013 /* Make sure btrace frames that may hold a pointer into the branch
1014 trace data are destroyed. */
1015 reinit_frame_cache ();
1016
1017 btinfo = &tp->btrace;
1018
1019 it = btinfo->begin;
1020 while (it != NULL)
1021 {
1022 trash = it;
1023 it = it->flow.next;
1024
1025 xfree (trash);
1026 }
1027
1028 btinfo->begin = NULL;
1029 btinfo->end = NULL;
1030 btinfo->ngaps = 0;
1031
1032 btrace_clear_history (btinfo);
1033 }
1034
1035 /* See btrace.h. */
1036
1037 void
1038 btrace_free_objfile (struct objfile *objfile)
1039 {
1040 struct thread_info *tp;
1041
1042 DEBUG ("free objfile");
1043
1044 ALL_NON_EXITED_THREADS (tp)
1045 btrace_clear (tp);
1046 }
1047
1048 #if defined (HAVE_LIBEXPAT)
1049
1050 /* Check the btrace document version. */
1051
1052 static void
1053 check_xml_btrace_version (struct gdb_xml_parser *parser,
1054 const struct gdb_xml_element *element,
1055 void *user_data, VEC (gdb_xml_value_s) *attributes)
1056 {
1057 const char *version = xml_find_attribute (attributes, "version")->value;
1058
1059 if (strcmp (version, "1.0") != 0)
1060 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1061 }
1062
1063 /* Parse a btrace "block" xml record. */
1064
1065 static void
1066 parse_xml_btrace_block (struct gdb_xml_parser *parser,
1067 const struct gdb_xml_element *element,
1068 void *user_data, VEC (gdb_xml_value_s) *attributes)
1069 {
1070 struct btrace_data *btrace;
1071 struct btrace_block *block;
1072 ULONGEST *begin, *end;
1073
1074 btrace = user_data;
1075
1076 switch (btrace->format)
1077 {
1078 case BTRACE_FORMAT_BTS:
1079 break;
1080
1081 case BTRACE_FORMAT_NONE:
1082 btrace->format = BTRACE_FORMAT_BTS;
1083 btrace->variant.bts.blocks = NULL;
1084 break;
1085
1086 default:
1087 gdb_xml_error (parser, _("Btrace format error."));
1088 }
1089
1090 begin = xml_find_attribute (attributes, "begin")->value;
1091 end = xml_find_attribute (attributes, "end")->value;
1092
1093 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
1094 block->begin = *begin;
1095 block->end = *end;
1096 }
1097
1098 static const struct gdb_xml_attribute block_attributes[] = {
1099 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1100 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1101 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1102 };
1103
1104 static const struct gdb_xml_attribute btrace_attributes[] = {
1105 { "version", GDB_XML_AF_NONE, NULL, NULL },
1106 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1107 };
1108
1109 static const struct gdb_xml_element btrace_children[] = {
1110 { "block", block_attributes, NULL,
1111 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
1112 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1113 };
1114
1115 static const struct gdb_xml_element btrace_elements[] = {
1116 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
1117 check_xml_btrace_version, NULL },
1118 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1119 };
1120
1121 #endif /* defined (HAVE_LIBEXPAT) */
1122
1123 /* See btrace.h. */
1124
1125 void
1126 parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
1127 {
1128 struct cleanup *cleanup;
1129 int errcode;
1130
1131 #if defined (HAVE_LIBEXPAT)
1132
1133 btrace->format = BTRACE_FORMAT_NONE;
1134
1135 cleanup = make_cleanup_btrace_data (btrace);
1136 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
1137 buffer, btrace);
1138 if (errcode != 0)
1139 error (_("Error parsing branch trace."));
1140
1141 /* Keep parse results. */
1142 discard_cleanups (cleanup);
1143
1144 #else /* !defined (HAVE_LIBEXPAT) */
1145
1146 error (_("Cannot process branch trace. XML parsing is not supported."));
1147
1148 #endif /* !defined (HAVE_LIBEXPAT) */
1149 }
1150
1151 #if defined (HAVE_LIBEXPAT)
1152
1153 /* Parse a btrace-conf "bts" xml record. */
1154
1155 static void
1156 parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
1157 const struct gdb_xml_element *element,
1158 void *user_data, VEC (gdb_xml_value_s) *attributes)
1159 {
1160 struct btrace_config *conf;
1161 struct gdb_xml_value *size;
1162
1163 conf = user_data;
1164 conf->format = BTRACE_FORMAT_BTS;
1165 conf->bts.size = 0;
1166
1167 size = xml_find_attribute (attributes, "size");
1168 if (size != NULL)
1169 conf->bts.size = (unsigned int) * (ULONGEST *) size->value;
1170 }
1171
1172 static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
1173 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1174 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1175 };
1176
1177 static const struct gdb_xml_element btrace_conf_children[] = {
1178 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
1179 parse_xml_btrace_conf_bts, NULL },
1180 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1181 };
1182
1183 static const struct gdb_xml_attribute btrace_conf_attributes[] = {
1184 { "version", GDB_XML_AF_NONE, NULL, NULL },
1185 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1186 };
1187
1188 static const struct gdb_xml_element btrace_conf_elements[] = {
1189 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
1190 GDB_XML_EF_NONE, NULL, NULL },
1191 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1192 };
1193
1194 #endif /* defined (HAVE_LIBEXPAT) */
1195
1196 /* See btrace.h. */
1197
1198 void
1199 parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
1200 {
1201 int errcode;
1202
1203 #if defined (HAVE_LIBEXPAT)
1204
1205 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
1206 btrace_conf_elements, xml, conf);
1207 if (errcode != 0)
1208 error (_("Error parsing branch trace configuration."));
1209
1210 #else /* !defined (HAVE_LIBEXPAT) */
1211
1212 error (_("XML parsing is not supported."));
1213
1214 #endif /* !defined (HAVE_LIBEXPAT) */
1215 }
1216
1217 /* See btrace.h. */
1218
1219 const struct btrace_insn *
1220 btrace_insn_get (const struct btrace_insn_iterator *it)
1221 {
1222 const struct btrace_function *bfun;
1223 unsigned int index, end;
1224
1225 index = it->index;
1226 bfun = it->function;
1227
1228 /* Check if the iterator points to a gap in the trace. */
1229 if (bfun->errcode != 0)
1230 return NULL;
1231
1232 /* The index is within the bounds of this function's instruction vector. */
1233 end = VEC_length (btrace_insn_s, bfun->insn);
1234 gdb_assert (0 < end);
1235 gdb_assert (index < end);
1236
1237 return VEC_index (btrace_insn_s, bfun->insn, index);
1238 }
1239
1240 /* See btrace.h. */
1241
1242 unsigned int
1243 btrace_insn_number (const struct btrace_insn_iterator *it)
1244 {
1245 const struct btrace_function *bfun;
1246
1247 bfun = it->function;
1248
1249 /* Return zero if the iterator points to a gap in the trace. */
1250 if (bfun->errcode != 0)
1251 return 0;
1252
1253 return bfun->insn_offset + it->index;
1254 }
1255
1256 /* See btrace.h. */
1257
1258 void
1259 btrace_insn_begin (struct btrace_insn_iterator *it,
1260 const struct btrace_thread_info *btinfo)
1261 {
1262 const struct btrace_function *bfun;
1263
1264 bfun = btinfo->begin;
1265 if (bfun == NULL)
1266 error (_("No trace."));
1267
1268 it->function = bfun;
1269 it->index = 0;
1270 }
1271
1272 /* See btrace.h. */
1273
1274 void
1275 btrace_insn_end (struct btrace_insn_iterator *it,
1276 const struct btrace_thread_info *btinfo)
1277 {
1278 const struct btrace_function *bfun;
1279 unsigned int length;
1280
1281 bfun = btinfo->end;
1282 if (bfun == NULL)
1283 error (_("No trace."));
1284
1285 length = VEC_length (btrace_insn_s, bfun->insn);
1286
1287 /* The last function may either be a gap or it contains the current
1288 instruction, which is one past the end of the execution trace; ignore
1289 it. */
1290 if (length > 0)
1291 length -= 1;
1292
1293 it->function = bfun;
1294 it->index = length;
1295 }
1296
1297 /* See btrace.h. */
1298
1299 unsigned int
1300 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
1301 {
1302 const struct btrace_function *bfun;
1303 unsigned int index, steps;
1304
1305 bfun = it->function;
1306 steps = 0;
1307 index = it->index;
1308
1309 while (stride != 0)
1310 {
1311 unsigned int end, space, adv;
1312
1313 end = VEC_length (btrace_insn_s, bfun->insn);
1314
1315 /* An empty function segment represents a gap in the trace. We count
1316 it as one instruction. */
1317 if (end == 0)
1318 {
1319 const struct btrace_function *next;
1320
1321 next = bfun->flow.next;
1322 if (next == NULL)
1323 break;
1324
1325 stride -= 1;
1326 steps += 1;
1327
1328 bfun = next;
1329 index = 0;
1330
1331 continue;
1332 }
1333
1334 gdb_assert (0 < end);
1335 gdb_assert (index < end);
1336
1337 /* Compute the number of instructions remaining in this segment. */
1338 space = end - index;
1339
1340 /* Advance the iterator as far as possible within this segment. */
1341 adv = min (space, stride);
1342 stride -= adv;
1343 index += adv;
1344 steps += adv;
1345
1346 /* Move to the next function if we're at the end of this one. */
1347 if (index == end)
1348 {
1349 const struct btrace_function *next;
1350
1351 next = bfun->flow.next;
1352 if (next == NULL)
1353 {
1354 /* We stepped past the last function.
1355
1356 Let's adjust the index to point to the last instruction in
1357 the previous function. */
1358 index -= 1;
1359 steps -= 1;
1360 break;
1361 }
1362
1363 /* We now point to the first instruction in the new function. */
1364 bfun = next;
1365 index = 0;
1366 }
1367
1368 /* We did make progress. */
1369 gdb_assert (adv > 0);
1370 }
1371
1372 /* Update the iterator. */
1373 it->function = bfun;
1374 it->index = index;
1375
1376 return steps;
1377 }
1378
1379 /* See btrace.h. */
1380
1381 unsigned int
1382 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1383 {
1384 const struct btrace_function *bfun;
1385 unsigned int index, steps;
1386
1387 bfun = it->function;
1388 steps = 0;
1389 index = it->index;
1390
1391 while (stride != 0)
1392 {
1393 unsigned int adv;
1394
1395 /* Move to the previous function if we're at the start of this one. */
1396 if (index == 0)
1397 {
1398 const struct btrace_function *prev;
1399
1400 prev = bfun->flow.prev;
1401 if (prev == NULL)
1402 break;
1403
1404 /* We point to one after the last instruction in the new function. */
1405 bfun = prev;
1406 index = VEC_length (btrace_insn_s, bfun->insn);
1407
1408 /* An empty function segment represents a gap in the trace. We count
1409 it as one instruction. */
1410 if (index == 0)
1411 {
1412 stride -= 1;
1413 steps += 1;
1414
1415 continue;
1416 }
1417 }
1418
1419 /* Advance the iterator as far as possible within this segment. */
1420 adv = min (index, stride);
1421
1422 stride -= adv;
1423 index -= adv;
1424 steps += adv;
1425
1426 /* We did make progress. */
1427 gdb_assert (adv > 0);
1428 }
1429
1430 /* Update the iterator. */
1431 it->function = bfun;
1432 it->index = index;
1433
1434 return steps;
1435 }
1436
1437 /* See btrace.h. */
1438
1439 int
1440 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
1441 const struct btrace_insn_iterator *rhs)
1442 {
1443 unsigned int lnum, rnum;
1444
1445 lnum = btrace_insn_number (lhs);
1446 rnum = btrace_insn_number (rhs);
1447
1448 /* A gap has an instruction number of zero. Things are getting more
1449 complicated if gaps are involved.
1450
1451 We take the instruction number offset from the iterator's function.
1452 This is the number of the first instruction after the gap.
1453
1454 This is OK as long as both lhs and rhs point to gaps. If only one of
1455 them does, we need to adjust the number based on the other's regular
1456 instruction number. Otherwise, a gap might compare equal to an
1457 instruction. */
1458
1459 if (lnum == 0 && rnum == 0)
1460 {
1461 lnum = lhs->function->insn_offset;
1462 rnum = rhs->function->insn_offset;
1463 }
1464 else if (lnum == 0)
1465 {
1466 lnum = lhs->function->insn_offset;
1467
1468 if (lnum == rnum)
1469 lnum -= 1;
1470 }
1471 else if (rnum == 0)
1472 {
1473 rnum = rhs->function->insn_offset;
1474
1475 if (rnum == lnum)
1476 rnum -= 1;
1477 }
1478
1479 return (int) (lnum - rnum);
1480 }
1481
1482 /* See btrace.h. */
1483
1484 int
1485 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
1486 const struct btrace_thread_info *btinfo,
1487 unsigned int number)
1488 {
1489 const struct btrace_function *bfun;
1490 unsigned int end, length;
1491
1492 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1493 {
1494 /* Skip gaps. */
1495 if (bfun->errcode != 0)
1496 continue;
1497
1498 if (bfun->insn_offset <= number)
1499 break;
1500 }
1501
1502 if (bfun == NULL)
1503 return 0;
1504
1505 length = VEC_length (btrace_insn_s, bfun->insn);
1506 gdb_assert (length > 0);
1507
1508 end = bfun->insn_offset + length;
1509 if (end <= number)
1510 return 0;
1511
1512 it->function = bfun;
1513 it->index = number - bfun->insn_offset;
1514
1515 return 1;
1516 }
1517
1518 /* See btrace.h. */
1519
1520 const struct btrace_function *
1521 btrace_call_get (const struct btrace_call_iterator *it)
1522 {
1523 return it->function;
1524 }
1525
1526 /* See btrace.h. */
1527
1528 unsigned int
1529 btrace_call_number (const struct btrace_call_iterator *it)
1530 {
1531 const struct btrace_thread_info *btinfo;
1532 const struct btrace_function *bfun;
1533 unsigned int insns;
1534
1535 btinfo = it->btinfo;
1536 bfun = it->function;
1537 if (bfun != NULL)
1538 return bfun->number;
1539
1540 /* For the end iterator, i.e. bfun == NULL, we return one more than the
1541 number of the last function. */
1542 bfun = btinfo->end;
1543 insns = VEC_length (btrace_insn_s, bfun->insn);
1544
1545 /* If the function contains only a single instruction (i.e. the current
1546 instruction), it will be skipped and its number is already the number
1547 we seek. */
1548 if (insns == 1)
1549 return bfun->number;
1550
1551 /* Otherwise, return one more than the number of the last function. */
1552 return bfun->number + 1;
1553 }
1554
1555 /* See btrace.h. */
1556
1557 void
1558 btrace_call_begin (struct btrace_call_iterator *it,
1559 const struct btrace_thread_info *btinfo)
1560 {
1561 const struct btrace_function *bfun;
1562
1563 bfun = btinfo->begin;
1564 if (bfun == NULL)
1565 error (_("No trace."));
1566
1567 it->btinfo = btinfo;
1568 it->function = bfun;
1569 }
1570
1571 /* See btrace.h. */
1572
1573 void
1574 btrace_call_end (struct btrace_call_iterator *it,
1575 const struct btrace_thread_info *btinfo)
1576 {
1577 const struct btrace_function *bfun;
1578
1579 bfun = btinfo->end;
1580 if (bfun == NULL)
1581 error (_("No trace."));
1582
1583 it->btinfo = btinfo;
1584 it->function = NULL;
1585 }
1586
1587 /* See btrace.h. */
1588
1589 unsigned int
1590 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
1591 {
1592 const struct btrace_function *bfun;
1593 unsigned int steps;
1594
1595 bfun = it->function;
1596 steps = 0;
1597 while (bfun != NULL)
1598 {
1599 const struct btrace_function *next;
1600 unsigned int insns;
1601
1602 next = bfun->flow.next;
1603 if (next == NULL)
1604 {
1605 /* Ignore the last function if it only contains a single
1606 (i.e. the current) instruction. */
1607 insns = VEC_length (btrace_insn_s, bfun->insn);
1608 if (insns == 1)
1609 steps -= 1;
1610 }
1611
1612 if (stride == steps)
1613 break;
1614
1615 bfun = next;
1616 steps += 1;
1617 }
1618
1619 it->function = bfun;
1620 return steps;
1621 }
1622
1623 /* See btrace.h. */
1624
1625 unsigned int
1626 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
1627 {
1628 const struct btrace_thread_info *btinfo;
1629 const struct btrace_function *bfun;
1630 unsigned int steps;
1631
1632 bfun = it->function;
1633 steps = 0;
1634
1635 if (bfun == NULL)
1636 {
1637 unsigned int insns;
1638
1639 btinfo = it->btinfo;
1640 bfun = btinfo->end;
1641 if (bfun == NULL)
1642 return 0;
1643
1644 /* Ignore the last function if it only contains a single
1645 (i.e. the current) instruction. */
1646 insns = VEC_length (btrace_insn_s, bfun->insn);
1647 if (insns == 1)
1648 bfun = bfun->flow.prev;
1649
1650 if (bfun == NULL)
1651 return 0;
1652
1653 steps += 1;
1654 }
1655
1656 while (steps < stride)
1657 {
1658 const struct btrace_function *prev;
1659
1660 prev = bfun->flow.prev;
1661 if (prev == NULL)
1662 break;
1663
1664 bfun = prev;
1665 steps += 1;
1666 }
1667
1668 it->function = bfun;
1669 return steps;
1670 }
1671
1672 /* See btrace.h. */
1673
1674 int
1675 btrace_call_cmp (const struct btrace_call_iterator *lhs,
1676 const struct btrace_call_iterator *rhs)
1677 {
1678 unsigned int lnum, rnum;
1679
1680 lnum = btrace_call_number (lhs);
1681 rnum = btrace_call_number (rhs);
1682
1683 return (int) (lnum - rnum);
1684 }
1685
1686 /* See btrace.h. */
1687
1688 int
1689 btrace_find_call_by_number (struct btrace_call_iterator *it,
1690 const struct btrace_thread_info *btinfo,
1691 unsigned int number)
1692 {
1693 const struct btrace_function *bfun;
1694
1695 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1696 {
1697 unsigned int bnum;
1698
1699 bnum = bfun->number;
1700 if (number == bnum)
1701 {
1702 it->btinfo = btinfo;
1703 it->function = bfun;
1704 return 1;
1705 }
1706
1707 /* Functions are ordered and numbered consecutively. We could bail out
1708 earlier. On the other hand, it is very unlikely that we search for
1709 a nonexistent function. */
1710 }
1711
1712 return 0;
1713 }
1714
1715 /* See btrace.h. */
1716
1717 void
1718 btrace_set_insn_history (struct btrace_thread_info *btinfo,
1719 const struct btrace_insn_iterator *begin,
1720 const struct btrace_insn_iterator *end)
1721 {
1722 if (btinfo->insn_history == NULL)
1723 btinfo->insn_history = xzalloc (sizeof (*btinfo->insn_history));
1724
1725 btinfo->insn_history->begin = *begin;
1726 btinfo->insn_history->end = *end;
1727 }
1728
1729 /* See btrace.h. */
1730
1731 void
1732 btrace_set_call_history (struct btrace_thread_info *btinfo,
1733 const struct btrace_call_iterator *begin,
1734 const struct btrace_call_iterator *end)
1735 {
1736 gdb_assert (begin->btinfo == end->btinfo);
1737
1738 if (btinfo->call_history == NULL)
1739 btinfo->call_history = xzalloc (sizeof (*btinfo->call_history));
1740
1741 btinfo->call_history->begin = *begin;
1742 btinfo->call_history->end = *end;
1743 }
1744
1745 /* See btrace.h. */
1746
1747 int
1748 btrace_is_replaying (struct thread_info *tp)
1749 {
1750 return tp->btrace.replay != NULL;
1751 }
1752
1753 /* See btrace.h. */
1754
1755 int
1756 btrace_is_empty (struct thread_info *tp)
1757 {
1758 struct btrace_insn_iterator begin, end;
1759 struct btrace_thread_info *btinfo;
1760
1761 btinfo = &tp->btrace;
1762
1763 if (btinfo->begin == NULL)
1764 return 1;
1765
1766 btrace_insn_begin (&begin, btinfo);
1767 btrace_insn_end (&end, btinfo);
1768
1769 return btrace_insn_cmp (&begin, &end) == 0;
1770 }
1771
1772 /* Forward the cleanup request. */
1773
1774 static void
1775 do_btrace_data_cleanup (void *arg)
1776 {
1777 btrace_data_fini (arg);
1778 }
1779
1780 /* See btrace.h. */
1781
1782 struct cleanup *
1783 make_cleanup_btrace_data (struct btrace_data *data)
1784 {
1785 return make_cleanup (do_btrace_data_cleanup, data);
1786 }
This page took 0.069535 seconds and 4 git commands to generate.