1d060d3eb8efcb5fc94377571e7e9f9ceecf62e0
[deliverable/binutils-gdb.git] / gdb / btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "btrace.h"
23 #include "gdbthread.h"
24 #include "exceptions.h"
25 #include "inferior.h"
26 #include "target.h"
27 #include "record.h"
28 #include "symtab.h"
29 #include "disasm.h"
30 #include "source.h"
31 #include "filenames.h"
32 #include "xml-support.h"
33
34 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
35 when used in if statements. */
36
37 #define DEBUG(msg, args...) \
38 do \
39 { \
40 if (record_debug != 0) \
41 fprintf_unfiltered (gdb_stdlog, \
42 "[btrace] " msg "\n", ##args); \
43 } \
44 while (0)
45
46 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
47
48 /* Return the function name of a recorded function segment for printing.
49 This function never returns NULL. */
50
51 static const char *
52 ftrace_print_function_name (const struct btrace_function *bfun)
53 {
54 struct minimal_symbol *msym;
55 struct symbol *sym;
56
57 msym = bfun->msym;
58 sym = bfun->sym;
59
60 if (sym != NULL)
61 return SYMBOL_PRINT_NAME (sym);
62
63 if (msym != NULL)
64 return SYMBOL_PRINT_NAME (msym);
65
66 return "<unknown>";
67 }
68
69 /* Return the file name of a recorded function segment for printing.
70 This function never returns NULL. */
71
72 static const char *
73 ftrace_print_filename (const struct btrace_function *bfun)
74 {
75 struct symbol *sym;
76 const char *filename;
77
78 sym = bfun->sym;
79
80 if (sym != NULL)
81 filename = symtab_to_filename_for_display (sym->symtab);
82 else
83 filename = "<unknown>";
84
85 return filename;
86 }
87
88 /* Return a string representation of the address of an instruction.
89 This function never returns NULL. */
90
91 static const char *
92 ftrace_print_insn_addr (const struct btrace_insn *insn)
93 {
94 if (insn == NULL)
95 return "<nil>";
96
97 return core_addr_to_string_nz (insn->pc);
98 }
99
100 /* Print an ftrace debug status message. */
101
102 static void
103 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
104 {
105 const char *fun, *file;
106 unsigned int ibegin, iend;
107 int lbegin, lend, level;
108
109 fun = ftrace_print_function_name (bfun);
110 file = ftrace_print_filename (bfun);
111 level = bfun->level;
112
113 lbegin = bfun->lbegin;
114 lend = bfun->lend;
115
116 ibegin = bfun->insn_offset;
117 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
118
119 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, lines = [%d; %d], "
120 "insn = [%u; %u)", prefix, fun, file, level, lbegin, lend,
121 ibegin, iend);
122 }
123
124 /* Return non-zero if BFUN does not match MFUN and FUN,
125 return zero otherwise. */
126
127 static int
128 ftrace_function_switched (const struct btrace_function *bfun,
129 const struct minimal_symbol *mfun,
130 const struct symbol *fun)
131 {
132 struct minimal_symbol *msym;
133 struct symbol *sym;
134
135 msym = bfun->msym;
136 sym = bfun->sym;
137
138 /* If the minimal symbol changed, we certainly switched functions. */
139 if (mfun != NULL && msym != NULL
140 && strcmp (SYMBOL_LINKAGE_NAME (mfun), SYMBOL_LINKAGE_NAME (msym)) != 0)
141 return 1;
142
143 /* If the symbol changed, we certainly switched functions. */
144 if (fun != NULL && sym != NULL)
145 {
146 const char *bfname, *fname;
147
148 /* Check the function name. */
149 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
150 return 1;
151
152 /* Check the location of those functions, as well. */
153 bfname = symtab_to_fullname (sym->symtab);
154 fname = symtab_to_fullname (fun->symtab);
155 if (filename_cmp (fname, bfname) != 0)
156 return 1;
157 }
158
159 /* If we lost symbol information, we switched functions. */
160 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
161 return 1;
162
163 /* If we gained symbol information, we switched functions. */
164 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
165 return 1;
166
167 return 0;
168 }
169
170 /* Return non-zero if we should skip this file when generating the function
171 call history, zero otherwise.
172 We would want to do that if, say, a macro that is defined in another file
173 is expanded in this function. */
174
175 static int
176 ftrace_skip_file (const struct btrace_function *bfun, const char *fullname)
177 {
178 struct symbol *sym;
179 const char *bfile;
180
181 sym = bfun->sym;
182 if (sym == NULL)
183 return 1;
184
185 bfile = symtab_to_fullname (sym->symtab);
186
187 return (filename_cmp (bfile, fullname) != 0);
188 }
189
190 /* Allocate and initialize a new branch trace function segment.
191 PREV is the chronologically preceding function segment.
192 MFUN and FUN are the symbol information we have for this function. */
193
194 static struct btrace_function *
195 ftrace_new_function (struct btrace_function *prev,
196 struct minimal_symbol *mfun,
197 struct symbol *fun)
198 {
199 struct btrace_function *bfun;
200
201 bfun = xzalloc (sizeof (*bfun));
202
203 bfun->msym = mfun;
204 bfun->sym = fun;
205 bfun->flow.prev = prev;
206
207 /* We start with the identities of min and max, respectively. */
208 bfun->lbegin = INT_MAX;
209 bfun->lend = INT_MIN;
210
211 if (prev == NULL)
212 {
213 /* Start counting at one. */
214 bfun->number = 1;
215 bfun->insn_offset = 1;
216 }
217 else
218 {
219 gdb_assert (prev->flow.next == NULL);
220 prev->flow.next = bfun;
221
222 bfun->number = prev->number + 1;
223 bfun->insn_offset = (prev->insn_offset
224 + VEC_length (btrace_insn_s, prev->insn));
225 }
226
227 return bfun;
228 }
229
230 /* Update the UP field of a function segment. */
231
232 static void
233 ftrace_update_caller (struct btrace_function *bfun,
234 struct btrace_function *caller,
235 enum btrace_function_flag flags)
236 {
237 if (bfun->up != NULL)
238 ftrace_debug (bfun, "updating caller");
239
240 bfun->up = caller;
241 bfun->flags = flags;
242
243 ftrace_debug (bfun, "set caller");
244 }
245
246 /* Fix up the caller for all segments of a function. */
247
248 static void
249 ftrace_fixup_caller (struct btrace_function *bfun,
250 struct btrace_function *caller,
251 enum btrace_function_flag flags)
252 {
253 struct btrace_function *prev, *next;
254
255 ftrace_update_caller (bfun, caller, flags);
256
257 /* Update all function segments belonging to the same function. */
258 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
259 ftrace_update_caller (prev, caller, flags);
260
261 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
262 ftrace_update_caller (next, caller, flags);
263 }
264
265 /* Add a new function segment for a call.
266 CALLER is the chronologically preceding function segment.
267 MFUN and FUN are the symbol information we have for this function. */
268
269 static struct btrace_function *
270 ftrace_new_call (struct btrace_function *caller,
271 struct minimal_symbol *mfun,
272 struct symbol *fun)
273 {
274 struct btrace_function *bfun;
275
276 bfun = ftrace_new_function (caller, mfun, fun);
277 bfun->up = caller;
278 bfun->level = caller->level + 1;
279
280 ftrace_debug (bfun, "new call");
281
282 return bfun;
283 }
284
285 /* Add a new function segment for a tail call.
286 CALLER is the chronologically preceding function segment.
287 MFUN and FUN are the symbol information we have for this function. */
288
289 static struct btrace_function *
290 ftrace_new_tailcall (struct btrace_function *caller,
291 struct minimal_symbol *mfun,
292 struct symbol *fun)
293 {
294 struct btrace_function *bfun;
295
296 bfun = ftrace_new_function (caller, mfun, fun);
297 bfun->up = caller;
298 bfun->level = caller->level + 1;
299 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
300
301 ftrace_debug (bfun, "new tail call");
302
303 return bfun;
304 }
305
306 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
307 symbol information. */
308
309 static struct btrace_function *
310 ftrace_find_caller (struct btrace_function *bfun,
311 struct minimal_symbol *mfun,
312 struct symbol *fun)
313 {
314 for (; bfun != NULL; bfun = bfun->up)
315 {
316 /* Skip functions with incompatible symbol information. */
317 if (ftrace_function_switched (bfun, mfun, fun))
318 continue;
319
320 /* This is the function segment we're looking for. */
321 break;
322 }
323
324 return bfun;
325 }
326
327 /* Find the innermost caller in the back trace of BFUN, skipping all
328 function segments that do not end with a call instruction (e.g.
329 tail calls ending with a jump). */
330
331 static struct btrace_function *
332 ftrace_find_call (struct gdbarch *gdbarch, struct btrace_function *bfun)
333 {
334 for (; bfun != NULL; bfun = bfun->up)
335 {
336 struct btrace_insn *last;
337 CORE_ADDR pc;
338
339 /* We do not allow empty function segments. */
340 gdb_assert (!VEC_empty (btrace_insn_s, bfun->insn));
341
342 last = VEC_last (btrace_insn_s, bfun->insn);
343 pc = last->pc;
344
345 if (gdbarch_insn_is_call (gdbarch, pc))
346 break;
347 }
348
349 return bfun;
350 }
351
352 /* Add a continuation segment for a function into which we return.
353 PREV is the chronologically preceding function segment.
354 MFUN and FUN are the symbol information we have for this function. */
355
356 static struct btrace_function *
357 ftrace_new_return (struct gdbarch *gdbarch,
358 struct btrace_function *prev,
359 struct minimal_symbol *mfun,
360 struct symbol *fun)
361 {
362 struct btrace_function *bfun, *caller;
363
364 bfun = ftrace_new_function (prev, mfun, fun);
365
366 /* It is important to start at PREV's caller. Otherwise, we might find
367 PREV itself, if PREV is a recursive function. */
368 caller = ftrace_find_caller (prev->up, mfun, fun);
369 if (caller != NULL)
370 {
371 /* The caller of PREV is the preceding btrace function segment in this
372 function instance. */
373 gdb_assert (caller->segment.next == NULL);
374
375 caller->segment.next = bfun;
376 bfun->segment.prev = caller;
377
378 /* Maintain the function level. */
379 bfun->level = caller->level;
380
381 /* Maintain the call stack. */
382 bfun->up = caller->up;
383 bfun->flags = caller->flags;
384
385 ftrace_debug (bfun, "new return");
386 }
387 else
388 {
389 /* We did not find a caller. This could mean that something went
390 wrong or that the call is simply not included in the trace. */
391
392 /* Let's search for some actual call. */
393 caller = ftrace_find_call (gdbarch, prev->up);
394 if (caller == NULL)
395 {
396 /* There is no call in PREV's back trace. We assume that the
397 branch trace did not include it. */
398
399 /* Let's find the topmost call function - this skips tail calls. */
400 while (prev->up != NULL)
401 prev = prev->up;
402
403 /* We maintain levels for a series of returns for which we have
404 not seen the calls.
405 We start at the preceding function's level in case this has
406 already been a return for which we have not seen the call.
407 We start at level 0 otherwise, to handle tail calls correctly. */
408 bfun->level = min (0, prev->level) - 1;
409
410 /* Fix up the call stack for PREV. */
411 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
412
413 ftrace_debug (bfun, "new return - no caller");
414 }
415 else
416 {
417 /* There is a call in PREV's back trace to which we should have
418 returned. Let's remain at this level. */
419 bfun->level = prev->level;
420
421 ftrace_debug (bfun, "new return - unknown caller");
422 }
423 }
424
425 return bfun;
426 }
427
428 /* Add a new function segment for a function switch.
429 PREV is the chronologically preceding function segment.
430 MFUN and FUN are the symbol information we have for this function. */
431
432 static struct btrace_function *
433 ftrace_new_switch (struct btrace_function *prev,
434 struct minimal_symbol *mfun,
435 struct symbol *fun)
436 {
437 struct btrace_function *bfun;
438
439 /* This is an unexplained function switch. The call stack will likely
440 be wrong at this point. */
441 bfun = ftrace_new_function (prev, mfun, fun);
442
443 /* We keep the function level. */
444 bfun->level = prev->level;
445
446 ftrace_debug (bfun, "new switch");
447
448 return bfun;
449 }
450
451 /* Update BFUN with respect to the instruction at PC. This may create new
452 function segments.
453 Return the chronologically latest function segment, never NULL. */
454
455 static struct btrace_function *
456 ftrace_update_function (struct gdbarch *gdbarch,
457 struct btrace_function *bfun, CORE_ADDR pc)
458 {
459 struct bound_minimal_symbol bmfun;
460 struct minimal_symbol *mfun;
461 struct symbol *fun;
462 struct btrace_insn *last;
463
464 /* Try to determine the function we're in. We use both types of symbols
465 to avoid surprises when we sometimes get a full symbol and sometimes
466 only a minimal symbol. */
467 fun = find_pc_function (pc);
468 bmfun = lookup_minimal_symbol_by_pc (pc);
469 mfun = bmfun.minsym;
470
471 if (fun == NULL && mfun == NULL)
472 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
473
474 /* If we didn't have a function before, we create one. */
475 if (bfun == NULL)
476 return ftrace_new_function (bfun, mfun, fun);
477
478 /* Check the last instruction, if we have one.
479 We do this check first, since it allows us to fill in the call stack
480 links in addition to the normal flow links. */
481 last = NULL;
482 if (!VEC_empty (btrace_insn_s, bfun->insn))
483 last = VEC_last (btrace_insn_s, bfun->insn);
484
485 if (last != NULL)
486 {
487 CORE_ADDR lpc;
488
489 lpc = last->pc;
490
491 /* Check for returns. */
492 if (gdbarch_insn_is_ret (gdbarch, lpc))
493 return ftrace_new_return (gdbarch, bfun, mfun, fun);
494
495 /* Check for calls. */
496 if (gdbarch_insn_is_call (gdbarch, lpc))
497 {
498 int size;
499
500 size = gdb_insn_length (gdbarch, lpc);
501
502 /* Ignore calls to the next instruction. They are used for PIC. */
503 if (lpc + size != pc)
504 return ftrace_new_call (bfun, mfun, fun);
505 }
506 }
507
508 /* Check if we're switching functions for some other reason. */
509 if (ftrace_function_switched (bfun, mfun, fun))
510 {
511 DEBUG_FTRACE ("switching from %s in %s at %s",
512 ftrace_print_insn_addr (last),
513 ftrace_print_function_name (bfun),
514 ftrace_print_filename (bfun));
515
516 if (last != NULL)
517 {
518 CORE_ADDR start, lpc;
519
520 start = get_pc_function_start (pc);
521
522 /* If we can't determine the function for PC, we treat a jump at
523 the end of the block as tail call. */
524 if (start == 0)
525 start = pc;
526
527 lpc = last->pc;
528
529 /* Jumps indicate optimized tail calls. */
530 if (start == pc && gdbarch_insn_is_jump (gdbarch, lpc))
531 return ftrace_new_tailcall (bfun, mfun, fun);
532 }
533
534 return ftrace_new_switch (bfun, mfun, fun);
535 }
536
537 return bfun;
538 }
539
540 /* Update BFUN's source range with respect to the instruction at PC. */
541
542 static void
543 ftrace_update_lines (struct btrace_function *bfun, CORE_ADDR pc)
544 {
545 struct symtab_and_line sal;
546 const char *fullname;
547
548 sal = find_pc_line (pc, 0);
549 if (sal.symtab == NULL || sal.line == 0)
550 {
551 DEBUG_FTRACE ("no lines at %s", core_addr_to_string_nz (pc));
552 return;
553 }
554
555 /* Check if we switched files. This could happen if, say, a macro that
556 is defined in another file is expanded here. */
557 fullname = symtab_to_fullname (sal.symtab);
558 if (ftrace_skip_file (bfun, fullname))
559 {
560 DEBUG_FTRACE ("ignoring file at %s, file=%s",
561 core_addr_to_string_nz (pc), fullname);
562 return;
563 }
564
565 /* Update the line range. */
566 bfun->lbegin = min (bfun->lbegin, sal.line);
567 bfun->lend = max (bfun->lend, sal.line);
568
569 if (record_debug > 1)
570 ftrace_debug (bfun, "update lines");
571 }
572
573 /* Add the instruction at PC to BFUN's instructions. */
574
575 static void
576 ftrace_update_insns (struct btrace_function *bfun, CORE_ADDR pc)
577 {
578 struct btrace_insn *insn;
579
580 insn = VEC_safe_push (btrace_insn_s, bfun->insn, NULL);
581 insn->pc = pc;
582
583 if (record_debug > 1)
584 ftrace_debug (bfun, "update insn");
585 }
586
587 /* Compute the function branch trace from a block branch trace BTRACE for
588 a thread given by BTINFO. */
589
590 static void
591 btrace_compute_ftrace (struct btrace_thread_info *btinfo,
592 VEC (btrace_block_s) *btrace)
593 {
594 struct btrace_function *begin, *end;
595 struct gdbarch *gdbarch;
596 unsigned int blk;
597 int level;
598
599 DEBUG ("compute ftrace");
600
601 gdbarch = target_gdbarch ();
602 begin = NULL;
603 end = NULL;
604 level = INT_MAX;
605 blk = VEC_length (btrace_block_s, btrace);
606
607 while (blk != 0)
608 {
609 btrace_block_s *block;
610 CORE_ADDR pc;
611
612 blk -= 1;
613
614 block = VEC_index (btrace_block_s, btrace, blk);
615 pc = block->begin;
616
617 for (;;)
618 {
619 int size;
620
621 /* We should hit the end of the block. Warn if we went too far. */
622 if (block->end < pc)
623 {
624 warning (_("Recorded trace may be corrupted around %s."),
625 core_addr_to_string_nz (pc));
626 break;
627 }
628
629 end = ftrace_update_function (gdbarch, end, pc);
630 if (begin == NULL)
631 begin = end;
632
633 /* Maintain the function level offset.
634 For all but the last block, we do it here. */
635 if (blk != 0)
636 level = min (level, end->level);
637
638 ftrace_update_insns (end, pc);
639 ftrace_update_lines (end, pc);
640
641 /* We're done once we pushed the instruction at the end. */
642 if (block->end == pc)
643 break;
644
645 size = gdb_insn_length (gdbarch, pc);
646
647 /* Make sure we terminate if we fail to compute the size. */
648 if (size <= 0)
649 {
650 warning (_("Recorded trace may be incomplete around %s."),
651 core_addr_to_string_nz (pc));
652 break;
653 }
654
655 pc += size;
656
657 /* Maintain the function level offset.
658 For the last block, we do it here to not consider the last
659 instruction.
660 Since the last instruction corresponds to the current instruction
661 and is not really part of the execution history, it shouldn't
662 affect the level. */
663 if (blk == 0)
664 level = min (level, end->level);
665 }
666 }
667
668 btinfo->begin = begin;
669 btinfo->end = end;
670
671 /* LEVEL is the minimal function level of all btrace function segments.
672 Define the global level offset to -LEVEL so all function levels are
673 normalized to start at zero. */
674 btinfo->level = -level;
675 }
676
677 /* See btrace.h. */
678
679 void
680 btrace_enable (struct thread_info *tp)
681 {
682 if (tp->btrace.target != NULL)
683 return;
684
685 if (!target_supports_btrace ())
686 error (_("Target does not support branch tracing."));
687
688 DEBUG ("enable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
689
690 tp->btrace.target = target_enable_btrace (tp->ptid);
691 }
692
693 /* See btrace.h. */
694
695 void
696 btrace_disable (struct thread_info *tp)
697 {
698 struct btrace_thread_info *btp = &tp->btrace;
699 int errcode = 0;
700
701 if (btp->target == NULL)
702 return;
703
704 DEBUG ("disable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
705
706 target_disable_btrace (btp->target);
707 btp->target = NULL;
708
709 btrace_clear (tp);
710 }
711
712 /* See btrace.h. */
713
714 void
715 btrace_teardown (struct thread_info *tp)
716 {
717 struct btrace_thread_info *btp = &tp->btrace;
718 int errcode = 0;
719
720 if (btp->target == NULL)
721 return;
722
723 DEBUG ("teardown thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
724
725 target_teardown_btrace (btp->target);
726 btp->target = NULL;
727
728 btrace_clear (tp);
729 }
730
731 /* See btrace.h. */
732
733 void
734 btrace_fetch (struct thread_info *tp)
735 {
736 struct btrace_thread_info *btinfo;
737 VEC (btrace_block_s) *btrace;
738 struct cleanup *cleanup;
739
740 DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
741
742 btinfo = &tp->btrace;
743 if (btinfo->target == NULL)
744 return;
745
746 btrace = target_read_btrace (btinfo->target, BTRACE_READ_NEW);
747 cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace);
748
749 if (!VEC_empty (btrace_block_s, btrace))
750 {
751 btrace_clear (tp);
752 btrace_compute_ftrace (btinfo, btrace);
753 }
754
755 do_cleanups (cleanup);
756 }
757
758 /* See btrace.h. */
759
760 void
761 btrace_clear (struct thread_info *tp)
762 {
763 struct btrace_thread_info *btinfo;
764 struct btrace_function *it, *trash;
765
766 DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
767
768 btinfo = &tp->btrace;
769
770 it = btinfo->begin;
771 while (it != NULL)
772 {
773 trash = it;
774 it = it->flow.next;
775
776 xfree (trash);
777 }
778
779 btinfo->begin = NULL;
780 btinfo->end = NULL;
781
782 xfree (btinfo->insn_history);
783 xfree (btinfo->call_history);
784
785 btinfo->insn_history = NULL;
786 btinfo->call_history = NULL;
787 }
788
789 /* See btrace.h. */
790
791 void
792 btrace_free_objfile (struct objfile *objfile)
793 {
794 struct thread_info *tp;
795
796 DEBUG ("free objfile");
797
798 ALL_THREADS (tp)
799 btrace_clear (tp);
800 }
801
802 #if defined (HAVE_LIBEXPAT)
803
804 /* Check the btrace document version. */
805
806 static void
807 check_xml_btrace_version (struct gdb_xml_parser *parser,
808 const struct gdb_xml_element *element,
809 void *user_data, VEC (gdb_xml_value_s) *attributes)
810 {
811 const char *version = xml_find_attribute (attributes, "version")->value;
812
813 if (strcmp (version, "1.0") != 0)
814 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
815 }
816
817 /* Parse a btrace "block" xml record. */
818
819 static void
820 parse_xml_btrace_block (struct gdb_xml_parser *parser,
821 const struct gdb_xml_element *element,
822 void *user_data, VEC (gdb_xml_value_s) *attributes)
823 {
824 VEC (btrace_block_s) **btrace;
825 struct btrace_block *block;
826 ULONGEST *begin, *end;
827
828 btrace = user_data;
829 block = VEC_safe_push (btrace_block_s, *btrace, NULL);
830
831 begin = xml_find_attribute (attributes, "begin")->value;
832 end = xml_find_attribute (attributes, "end")->value;
833
834 block->begin = *begin;
835 block->end = *end;
836 }
837
838 static const struct gdb_xml_attribute block_attributes[] = {
839 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
840 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
841 { NULL, GDB_XML_AF_NONE, NULL, NULL }
842 };
843
844 static const struct gdb_xml_attribute btrace_attributes[] = {
845 { "version", GDB_XML_AF_NONE, NULL, NULL },
846 { NULL, GDB_XML_AF_NONE, NULL, NULL }
847 };
848
849 static const struct gdb_xml_element btrace_children[] = {
850 { "block", block_attributes, NULL,
851 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
852 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
853 };
854
855 static const struct gdb_xml_element btrace_elements[] = {
856 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
857 check_xml_btrace_version, NULL },
858 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
859 };
860
861 #endif /* defined (HAVE_LIBEXPAT) */
862
863 /* See btrace.h. */
864
865 VEC (btrace_block_s) *
866 parse_xml_btrace (const char *buffer)
867 {
868 VEC (btrace_block_s) *btrace = NULL;
869 struct cleanup *cleanup;
870 int errcode;
871
872 #if defined (HAVE_LIBEXPAT)
873
874 cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace);
875 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
876 buffer, &btrace);
877 if (errcode != 0)
878 {
879 do_cleanups (cleanup);
880 return NULL;
881 }
882
883 /* Keep parse results. */
884 discard_cleanups (cleanup);
885
886 #else /* !defined (HAVE_LIBEXPAT) */
887
888 error (_("Cannot process branch trace. XML parsing is not supported."));
889
890 #endif /* !defined (HAVE_LIBEXPAT) */
891
892 return btrace;
893 }
894
895 /* See btrace.h. */
896
897 const struct btrace_insn *
898 btrace_insn_get (const struct btrace_insn_iterator *it)
899 {
900 const struct btrace_function *bfun;
901 unsigned int index, end;
902
903 index = it->index;
904 bfun = it->function;
905
906 /* The index is within the bounds of this function's instruction vector. */
907 end = VEC_length (btrace_insn_s, bfun->insn);
908 gdb_assert (0 < end);
909 gdb_assert (index < end);
910
911 return VEC_index (btrace_insn_s, bfun->insn, index);
912 }
913
914 /* See btrace.h. */
915
916 unsigned int
917 btrace_insn_number (const struct btrace_insn_iterator *it)
918 {
919 const struct btrace_function *bfun;
920
921 bfun = it->function;
922 return bfun->insn_offset + it->index;
923 }
924
925 /* See btrace.h. */
926
927 void
928 btrace_insn_begin (struct btrace_insn_iterator *it,
929 const struct btrace_thread_info *btinfo)
930 {
931 const struct btrace_function *bfun;
932
933 bfun = btinfo->begin;
934 if (bfun == NULL)
935 error (_("No trace."));
936
937 it->function = bfun;
938 it->index = 0;
939 }
940
941 /* See btrace.h. */
942
943 void
944 btrace_insn_end (struct btrace_insn_iterator *it,
945 const struct btrace_thread_info *btinfo)
946 {
947 const struct btrace_function *bfun;
948 unsigned int length;
949
950 bfun = btinfo->end;
951 if (bfun == NULL)
952 error (_("No trace."));
953
954 /* The last instruction in the last function is the current instruction.
955 We point to it - it is one past the end of the execution trace. */
956 length = VEC_length (btrace_insn_s, bfun->insn);
957
958 it->function = bfun;
959 it->index = length - 1;
960 }
961
962 /* See btrace.h. */
963
964 unsigned int
965 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
966 {
967 const struct btrace_function *bfun;
968 unsigned int index, steps;
969
970 bfun = it->function;
971 steps = 0;
972 index = it->index;
973
974 while (stride != 0)
975 {
976 unsigned int end, space, adv;
977
978 end = VEC_length (btrace_insn_s, bfun->insn);
979
980 gdb_assert (0 < end);
981 gdb_assert (index < end);
982
983 /* Compute the number of instructions remaining in this segment. */
984 space = end - index;
985
986 /* Advance the iterator as far as possible within this segment. */
987 adv = min (space, stride);
988 stride -= adv;
989 index += adv;
990 steps += adv;
991
992 /* Move to the next function if we're at the end of this one. */
993 if (index == end)
994 {
995 const struct btrace_function *next;
996
997 next = bfun->flow.next;
998 if (next == NULL)
999 {
1000 /* We stepped past the last function.
1001
1002 Let's adjust the index to point to the last instruction in
1003 the previous function. */
1004 index -= 1;
1005 steps -= 1;
1006 break;
1007 }
1008
1009 /* We now point to the first instruction in the new function. */
1010 bfun = next;
1011 index = 0;
1012 }
1013
1014 /* We did make progress. */
1015 gdb_assert (adv > 0);
1016 }
1017
1018 /* Update the iterator. */
1019 it->function = bfun;
1020 it->index = index;
1021
1022 return steps;
1023 }
1024
1025 /* See btrace.h. */
1026
1027 unsigned int
1028 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1029 {
1030 const struct btrace_function *bfun;
1031 unsigned int index, steps;
1032
1033 bfun = it->function;
1034 steps = 0;
1035 index = it->index;
1036
1037 while (stride != 0)
1038 {
1039 unsigned int adv;
1040
1041 /* Move to the previous function if we're at the start of this one. */
1042 if (index == 0)
1043 {
1044 const struct btrace_function *prev;
1045
1046 prev = bfun->flow.prev;
1047 if (prev == NULL)
1048 break;
1049
1050 /* We point to one after the last instruction in the new function. */
1051 bfun = prev;
1052 index = VEC_length (btrace_insn_s, bfun->insn);
1053
1054 /* There is at least one instruction in this function segment. */
1055 gdb_assert (index > 0);
1056 }
1057
1058 /* Advance the iterator as far as possible within this segment. */
1059 adv = min (index, stride);
1060 stride -= adv;
1061 index -= adv;
1062 steps += adv;
1063
1064 /* We did make progress. */
1065 gdb_assert (adv > 0);
1066 }
1067
1068 /* Update the iterator. */
1069 it->function = bfun;
1070 it->index = index;
1071
1072 return steps;
1073 }
1074
1075 /* See btrace.h. */
1076
1077 int
1078 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
1079 const struct btrace_insn_iterator *rhs)
1080 {
1081 unsigned int lnum, rnum;
1082
1083 lnum = btrace_insn_number (lhs);
1084 rnum = btrace_insn_number (rhs);
1085
1086 return (int) (lnum - rnum);
1087 }
1088
1089 /* See btrace.h. */
1090
1091 int
1092 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
1093 const struct btrace_thread_info *btinfo,
1094 unsigned int number)
1095 {
1096 const struct btrace_function *bfun;
1097 unsigned int end;
1098
1099 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1100 if (bfun->insn_offset <= number)
1101 break;
1102
1103 if (bfun == NULL)
1104 return 0;
1105
1106 end = bfun->insn_offset + VEC_length (btrace_insn_s, bfun->insn);
1107 if (end <= number)
1108 return 0;
1109
1110 it->function = bfun;
1111 it->index = number - bfun->insn_offset;
1112
1113 return 1;
1114 }
1115
1116 /* See btrace.h. */
1117
1118 const struct btrace_function *
1119 btrace_call_get (const struct btrace_call_iterator *it)
1120 {
1121 return it->function;
1122 }
1123
1124 /* See btrace.h. */
1125
1126 unsigned int
1127 btrace_call_number (const struct btrace_call_iterator *it)
1128 {
1129 const struct btrace_thread_info *btinfo;
1130 const struct btrace_function *bfun;
1131 unsigned int insns;
1132
1133 btinfo = it->btinfo;
1134 bfun = it->function;
1135 if (bfun != NULL)
1136 return bfun->number;
1137
1138 /* For the end iterator, i.e. bfun == NULL, we return one more than the
1139 number of the last function. */
1140 bfun = btinfo->end;
1141 insns = VEC_length (btrace_insn_s, bfun->insn);
1142
1143 /* If the function contains only a single instruction (i.e. the current
1144 instruction), it will be skipped and its number is already the number
1145 we seek. */
1146 if (insns == 1)
1147 return bfun->number;
1148
1149 /* Otherwise, return one more than the number of the last function. */
1150 return bfun->number + 1;
1151 }
1152
1153 /* See btrace.h. */
1154
1155 void
1156 btrace_call_begin (struct btrace_call_iterator *it,
1157 const struct btrace_thread_info *btinfo)
1158 {
1159 const struct btrace_function *bfun;
1160
1161 bfun = btinfo->begin;
1162 if (bfun == NULL)
1163 error (_("No trace."));
1164
1165 it->btinfo = btinfo;
1166 it->function = bfun;
1167 }
1168
1169 /* See btrace.h. */
1170
1171 void
1172 btrace_call_end (struct btrace_call_iterator *it,
1173 const struct btrace_thread_info *btinfo)
1174 {
1175 const struct btrace_function *bfun;
1176
1177 bfun = btinfo->end;
1178 if (bfun == NULL)
1179 error (_("No trace."));
1180
1181 it->btinfo = btinfo;
1182 it->function = NULL;
1183 }
1184
1185 /* See btrace.h. */
1186
1187 unsigned int
1188 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
1189 {
1190 const struct btrace_function *bfun;
1191 unsigned int steps;
1192
1193 bfun = it->function;
1194 steps = 0;
1195 while (bfun != NULL)
1196 {
1197 const struct btrace_function *next;
1198 unsigned int insns;
1199
1200 next = bfun->flow.next;
1201 if (next == NULL)
1202 {
1203 /* Ignore the last function if it only contains a single
1204 (i.e. the current) instruction. */
1205 insns = VEC_length (btrace_insn_s, bfun->insn);
1206 if (insns == 1)
1207 steps -= 1;
1208 }
1209
1210 if (stride == steps)
1211 break;
1212
1213 bfun = next;
1214 steps += 1;
1215 }
1216
1217 it->function = bfun;
1218 return steps;
1219 }
1220
1221 /* See btrace.h. */
1222
1223 unsigned int
1224 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
1225 {
1226 const struct btrace_thread_info *btinfo;
1227 const struct btrace_function *bfun;
1228 unsigned int steps;
1229
1230 bfun = it->function;
1231 steps = 0;
1232
1233 if (bfun == NULL)
1234 {
1235 unsigned int insns;
1236
1237 btinfo = it->btinfo;
1238 bfun = btinfo->end;
1239 if (bfun == NULL)
1240 return 0;
1241
1242 /* Ignore the last function if it only contains a single
1243 (i.e. the current) instruction. */
1244 insns = VEC_length (btrace_insn_s, bfun->insn);
1245 if (insns == 1)
1246 bfun = bfun->flow.prev;
1247
1248 if (bfun == NULL)
1249 return 0;
1250
1251 steps += 1;
1252 }
1253
1254 while (steps < stride)
1255 {
1256 const struct btrace_function *prev;
1257
1258 prev = bfun->flow.prev;
1259 if (prev == NULL)
1260 break;
1261
1262 bfun = prev;
1263 steps += 1;
1264 }
1265
1266 it->function = bfun;
1267 return steps;
1268 }
1269
1270 /* See btrace.h. */
1271
1272 int
1273 btrace_call_cmp (const struct btrace_call_iterator *lhs,
1274 const struct btrace_call_iterator *rhs)
1275 {
1276 unsigned int lnum, rnum;
1277
1278 lnum = btrace_call_number (lhs);
1279 rnum = btrace_call_number (rhs);
1280
1281 return (int) (lnum - rnum);
1282 }
1283
1284 /* See btrace.h. */
1285
1286 int
1287 btrace_find_call_by_number (struct btrace_call_iterator *it,
1288 const struct btrace_thread_info *btinfo,
1289 unsigned int number)
1290 {
1291 const struct btrace_function *bfun;
1292
1293 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1294 {
1295 unsigned int bnum;
1296
1297 bnum = bfun->number;
1298 if (number == bnum)
1299 {
1300 it->btinfo = btinfo;
1301 it->function = bfun;
1302 return 1;
1303 }
1304
1305 /* Functions are ordered and numbered consecutively. We could bail out
1306 earlier. On the other hand, it is very unlikely that we search for
1307 a nonexistent function. */
1308 }
1309
1310 return 0;
1311 }
1312
1313 /* See btrace.h. */
1314
1315 void
1316 btrace_set_insn_history (struct btrace_thread_info *btinfo,
1317 const struct btrace_insn_iterator *begin,
1318 const struct btrace_insn_iterator *end)
1319 {
1320 if (btinfo->insn_history == NULL)
1321 btinfo->insn_history = xzalloc (sizeof (*btinfo->insn_history));
1322
1323 btinfo->insn_history->begin = *begin;
1324 btinfo->insn_history->end = *end;
1325 }
1326
1327 /* See btrace.h. */
1328
1329 void
1330 btrace_set_call_history (struct btrace_thread_info *btinfo,
1331 const struct btrace_call_iterator *begin,
1332 const struct btrace_call_iterator *end)
1333 {
1334 gdb_assert (begin->btinfo == end->btinfo);
1335
1336 if (btinfo->call_history == NULL)
1337 btinfo->call_history = xzalloc (sizeof (*btinfo->call_history));
1338
1339 btinfo->call_history->begin = *begin;
1340 btinfo->call_history->end = *end;
1341 }
This page took 0.084762 seconds and 4 git commands to generate.